content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from rubygems_utils import RubyGemsTestUtils
| [
6738,
6437,
35641,
5232,
62,
26791,
1330,
10888,
38,
5232,
14402,
18274,
4487,
628
] | 3.285714 | 14 |
# Core
from fsociety.core.menu import tools_cli
from .gitgraber import gitgraber
from .hydrarecon import hydrarecon
from .s3scanner import s3scanner
from .sherlock import sherlock
from .sqlmap import sqlmap
from .striker import striker
from .sublist3r import sublist3r
__tools__ = [sqlmap, striker, sublist3r, sherlock, s3scanner, gitgraber, hydrarecon]
| [
2,
7231,
198,
6738,
277,
35634,
1905,
13,
7295,
13,
26272,
1330,
4899,
62,
44506,
198,
198,
6738,
764,
18300,
2164,
27359,
1330,
17606,
2164,
27359,
198,
6738,
764,
15511,
430,
260,
1102,
1330,
25039,
260,
1102,
198,
6738,
764,
82,
18... | 3 | 119 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.logging_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import string
import sys
import tempfile
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
if __name__ == "__main__":
test.main()
| [
2,
15069,
1853,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 3.846523 | 417 |
""" Unit Tests for Py-ART's graph/gridmapdisplay.py module. """
# execute this script to create figure_gridmapdisplay_*.png files.
# TODO use matplotlib's @image_comparison decorator to compare to file
# in baseline_images directory. Current this test only determines if files can
# be created, not that they are correct.
import matplotlib.pyplot as plt
import pyart
from numpy.testing import assert_raises
from numpy.testing.decorators import skipif
RESOLUTION = 'c' # crude resolution to speed up tests
@skipif('GridMapDisplay' not in dir(pyart.graph))
@skipif('GridMapDisplay' not in dir(pyart.graph))
@skipif('GridMapDisplay' not in dir(pyart.graph))
if __name__ == "__main__":
test_gridmapdisplay_simple('figure_gridmapdisplay_simple.png')
test_gridmapdisplay_fancy('figure_gridmapdisplay_fancy.png')
| [
37811,
11801,
30307,
329,
9485,
12,
7227,
338,
4823,
14,
25928,
8899,
13812,
13,
9078,
8265,
13,
37227,
198,
2,
12260,
428,
4226,
284,
2251,
3785,
62,
25928,
8899,
13812,
62,
24620,
11134,
3696,
13,
198,
198,
2,
16926,
46,
779,
2603,
... | 3.196911 | 259 |
# coding: utf-8
from email import encoders
from email.header import Header
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.utils import parseaddr, formataddr
import smtplib
# from_addr = input('From: ')
from_addr = '176462329@qq.com'
# password = input('Password: ')
password = ''
# to_addr = input('To: ')
to_addr = from_addr
# smtp_server = input('SMTP server: ')
# 发送纯文本邮件
# msg = MIMEText('hello, send by Python...', 'plain', 'utf-8')
# 发送HTML邮件
# msg = MIMEText('<html><body><h1>Hello</h1>' +
# '<p>send by <a href="http://www.python.org">Python</a>...</p>' +
# '</body></html>', 'html', 'utf-8')
# 发送附件
msg = MIMEMultipart()
# 同时支持HTML和Plain格式
# msg = MIMEMultipart('alternative')
msg['From'] = _format_addr('Python爱好者 <%s>' % from_addr)
msg['To'] = _format_addr('管理员 <%s>' % to_addr)
msg['Subject'] = Header('来自SMTP的问候。。。', 'utf-8').encode()
# msg.attach(MIMEText('send with file...', 'plain', 'utf-8'))
# 图片嵌入到邮件正文 cid:x
msg.attach(MIMEText('<html><body><h1>Hello</h1>' +
'<p><img src="cid:0"></p>' +
'</body></html>', 'html', 'utf-8'))
# 添加附件就是加上一个MIMEBase, 从本地读取一个图片
with open('../test.jpg', 'rb') as f:
# 设置附件的MIME和文件名,这里是PNG类型
mime = MIMEBase('image', 'jpg', filename='test.jpg')
# 加上必要的头信息
mime.add_header('Content-Disposition', 'attachment', filename='test.jpg')
mime.add_header('Content-ID', '<0>')
mime.add_header('X-Attachment-Id', '0')
# 把附件的内容读进来
mime.set_payload(f.read())
# 用Base64编码
encoders.encode_base64(mime)
# 添加到MIMEMultipart
msg.attach(mime)
server = smtplib.SMTP('smtp.qq.com', 25)
server.starttls()
# server = smtplib.SMTP_SSL('smtp.qq.com', 465)
server.set_debuglevel(1)
server.login(from_addr, password)
server.sendmail(from_addr, [to_addr], msg.as_string())
server.quit()
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
6738,
3053,
1330,
2207,
375,
364,
198,
6738,
3053,
13,
25677,
1330,
48900,
198,
6738,
3053,
13,
76,
524,
13,
8692,
1330,
337,
12789,
14881,
198,
6738,
3053,
13,
76,
524,
13,
5239,
1330,
33... | 1.880834 | 1,007 |
"""Management command to check defined domains."""
from datetime import datetime
from datetime import timedelta
import ipaddress
import dns.resolver
import gevent
from gevent import socket
from django.conf import settings
from django.core.management.base import BaseCommand
from django.template.loader import render_to_string
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _
from django.utils import timezone
from modoboa.admin import constants
from modoboa.admin import models
from modoboa.lib import email_utils
from modoboa.parameters import tools as param_tools
class CheckMXRecords(BaseCommand):
"""Command class."""
help = "Check defined domains."
@cached_property
def providers(self):
"""Return a list of DNSBL providers."""
if not hasattr(settings, "DNSBL_PROVIDERS"):
return constants.DNSBL_PROVIDERS
return settings.DNSBL_PROVIDERS
@cached_property
def sender(self):
"""Return sender address for notifications."""
return param_tools.get_global_parameter("sender_address", app="core")
@cached_property
def valid_mxs(self):
"""Return valid MXs set in admin."""
valid_mxs = param_tools.get_global_parameter("valid_mxs")
return [ipaddress.ip_network(u"{}".format(v.strip()))
for v in valid_mxs.split() if v.strip()]
def add_arguments(self, parser):
"""Add extra arguments to command."""
parser.add_argument(
"--no-dnsbl", action="store_true", default=False,
help="Skip DNSBL queries.")
parser.add_argument(
"--email", type=str, action="append", default=[],
help="One or more email to notify")
parser.add_argument(
"--skip-admin-emails", action="store_true",
default=False,
help="Skip domain's admins email notification.")
parser.add_argument(
"--domain", type=str, action="append", default=[],
help="Domain name or id to update.")
parser.add_argument(
"--timeout", type=int, default=3,
help="Timeout used for queries.")
parser.add_argument(
"--ttl", type=int, default=7200,
help="TTL for dns query.")
def get_mx_records_for_domain(self, domain, ttl=7200):
"""Return one or more `models.MXRecord` for `domain`.
DNS queries are not performed while `ttl` (in seconds) is still valid.
"""
now = timezone.now()
records = models.MXRecord.objects.filter(domain=domain,
updated__gt=now)
if records.exists():
for record in records:
yield record
raise StopIteration()
models.MXRecord.objects.filter(domain=domain).delete()
try:
answers = dns.resolver.query(domain.name, "MX")
except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN,
dns.resolver.NoNameservers):
raise StopIteration()
delta = timedelta(seconds=ttl)
for answer in answers:
try:
# work if .exchange is a name or IP
address = socket.gethostbyname(str(answer.exchange))
except socket.gaierror:
pass
else:
try:
# we must have a valid IP
address = ipaddress.ip_address(u"{}".format(address))
except ValueError:
pass
else:
record = models.MXRecord.objects.create(
domain=domain,
name=u"{}".format(str(answer.exchange).strip(".")),
address=u"{}".format(address),
updated=now + delta)
yield record
def query_dnsbl(self, mx_list, provider):
"""Check given IP against given DNSBL provider."""
results = {}
for mx in mx_list:
reverse = ".".join(reversed(mx.address.split(".")))
pattern = "{}.{}.".format(reverse, provider)
try:
results[mx] = socket.gethostbyname(pattern)
except socket.gaierror:
results[mx] = False
return provider, results
def store_dnsbl_result(self, domain, provider, results, **options):
"""Store DNSBL provider results for domain."""
alerts = {}
to_create = []
for mx in results.keys():
result = "" if not results[mx] else results[mx]
dnsbl_result = models.DNSBLResult.objects.filter(
domain=domain, provider=provider, mx=mx).first()
if dnsbl_result is None:
to_create.append(
models.DNSBLResult(
domain=domain, provider=provider, mx=mx,
status=result)
)
else:
if not dnsbl_result.status and result:
if domain not in alerts:
alerts[domain] = []
alerts[domain].append((provider, mx))
dnsbl_result.status = result
dnsbl_result.save()
models.DNSBLResult.objects.bulk_create(to_create)
if not alerts:
return
emails = options["email"]
if not options["skip_admin_emails"]:
emails.extend(
domain.admins.exclude(email="").values_list("email", flat=True)
)
if not len(emails):
return
content = render_to_string(
"admin/notifications/domain_in_dnsbl.html", {
"domain": domain, "alerts": alerts
})
subject = _("[modoboa] DNSBL issue(s) for domain {}").format(
domain.name)
for email in emails:
status, msg = email_utils.sendmail_simple(
self.sender, email,
subject=subject, content=content)
if not status:
print(msg)
def check_valid_mx(self, domain, mx_list, **options):
"""Check that domain's MX record exist.
If `valid_mx` is provided, retrieved MX records must be
contained in it.
"""
alerts = []
check = False
mxs = [(mx, ipaddress.ip_address(u"%s" % mx.address))
for mx in mx_list]
valid_mxs = self.valid_mxs
if not mxs:
alerts.append(_("Domain {} has no MX record").format(domain))
elif valid_mxs:
for subnet in valid_mxs:
for mx, addr in mxs:
if addr in subnet:
mx.managed = check = True
mx.save()
if check is False:
mx_names = [
"{0.name} ({0.address})".format(mx) for mx in mx_list]
alerts.append(
_("MX record for domain {0} is invalid: {1}").format(
domain, ", ".join(mx_names))
)
if not alerts:
return
emails = options["email"]
if not options["skip_admin_emails"]:
emails.extend(
domain.admins.exclude(email="").values_list("email", flat=True)
)
if not len(emails):
return
content = render_to_string(
"admin/notifications/domain_invalid_mx.html", {
"domain": domain, "alerts": alerts
})
subject = _("[modoboa] MX issue(s) for domain {}").format(
domain.name)
for email in emails:
status, msg = email_utils.sendmail_simple(
self.sender, email,
subject=subject, content=content)
if not status:
print(msg)
def check_domain(self, domain, timeout=3, ttl=7200, **options):
"""Check specified domain."""
mx_list = list(self.get_mx_records_for_domain(domain, ttl=ttl))
if param_tools.get_global_parameter("enable_mx_checks"):
self.check_valid_mx(domain, mx_list, **options)
condition = (
not param_tools.get_global_parameter("enable_dnsbl_checks") or
options["no_dnsbl"] is True)
if condition or not mx_list:
return
jobs = [
gevent.spawn(self.query_dnsbl, mx_list, provider)
for provider in self.providers]
gevent.joinall(jobs, timeout)
for job in jobs:
if not job.successful():
continue
provider, results = job.value
self.store_dnsbl_result(domain, provider, results, **options)
def handle(self, *args, **options):
"""Command entry point."""
# Remove deprecated records first
models.DNSBLResult.objects.exclude(
provider__in=self.providers).delete()
if options["domain"]:
domains = []
for domain in options["domain"]:
try:
if domain.isdigit():
domains.append(models.Domain.objects.get(pk=domain))
else:
domains.append(models.Domain.objects.get(name=domain))
except models.Domain.DoesNotExist:
pass
else:
domains = models.Domain.objects.filter(
enabled=True, enable_dns_checks=True)
options.pop("domain")
for domain in domains:
if domain.uses_a_reserved_tld:
continue
self.check_domain(domain, **options)
| [
37811,
48032,
3141,
284,
2198,
5447,
18209,
526,
15931,
198,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
4818,
8079,
1330,
28805,
12514,
198,
11748,
20966,
21975,
198,
198,
11748,
288,
5907,
13,
411,
14375,
198,
198,
11748,
4903,
... | 2.030811 | 4,771 |
from __future__ import unicode_literals
def select(composer):
"""
binds the django_productline base feature
"""
# no introductions or refinements necessary -
# django_productline acts as base feature
pass
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
628,
198,
4299,
2922,
7,
785,
1930,
263,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
37354,
262,
42625,
14208,
62,
11167,
1370,
2779,
3895,
198,
220,
220,
220,
37227... | 3.08 | 75 |
import logging
from spaceone.core.service import *
from spaceone.statistics.error import *
from spaceone.statistics.manager.resource_manager import ResourceManager
from spaceone.statistics.manager.schedule_manager import ScheduleManager
from spaceone.statistics.manager.history_manager import HistoryManager
_LOGGER = logging.getLogger(__name__)
@authentication_handler
@authorization_handler
@mutation_handler
@event_handler
| [
11748,
18931,
198,
198,
6738,
2272,
505,
13,
7295,
13,
15271,
1330,
1635,
198,
6738,
2272,
505,
13,
14269,
3969,
13,
18224,
1330,
1635,
198,
6738,
2272,
505,
13,
14269,
3969,
13,
37153,
13,
31092,
62,
37153,
1330,
20857,
13511,
198,
6... | 3.77193 | 114 |
import datetime
import json
import logging
import logging.config
import os
import traceback
from os.path import join as pjoin
import unittest2 as unittest
from observation.obs_virtual.ObsVirtualUtils import is_unique_points
| [
11748,
4818,
8079,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
18931,
13,
11250,
198,
11748,
28686,
198,
11748,
12854,
1891,
198,
6738,
28686,
13,
6978,
1330,
4654,
355,
279,
22179,
198,
198,
11748,
555,
715,
395,
17,
355,
555,
71... | 3.66129 | 62 |
# Time: O(n^2)
# Space: O(n)
| [
2,
3862,
25,
220,
440,
7,
77,
61,
17,
8,
198,
2,
4687,
25,
440,
7,
77,
8,
628
] | 1.631579 | 19 |
"""
@author: Andrea Domenico Giuliano
@contact: andreadomenico.giuliano@studenti.unipd.it
@organization: University of Padua
"""
import Time_Calcolation as t_c
#File contenente le funzioni riguardanti la creazione di una lista degli item utili per un determinato user
# e per l'attribuizione degli score relaitivi alle tipologie di interactions
| [
37811,
201,
198,
31,
9800,
25,
23174,
360,
3674,
3713,
35267,
10115,
201,
198,
31,
32057,
25,
290,
961,
3674,
3713,
13,
12397,
377,
10115,
31,
50139,
72,
13,
403,
541,
67,
13,
270,
201,
198,
31,
9971,
1634,
25,
2059,
286,
15744,
6... | 2.870968 | 124 |
import sqlite3
# connect to database
conn = sqlite3.connect('customer.db')
# Create a cursor
c = conn.cursor()
c = conn.execute("""CREATE TABLE customers (
first TEXT,
last TEXT,
email text)""")
print("Sucessfully Did what I had to!!")
# Commit our command
conn.commit()
# Close our connection
conn.close()
| [
11748,
44161,
578,
18,
198,
198,
2,
2018,
284,
6831,
198,
37043,
796,
44161,
578,
18,
13,
8443,
10786,
23144,
263,
13,
9945,
11537,
198,
198,
2,
13610,
257,
23493,
198,
66,
796,
48260,
13,
66,
21471,
3419,
198,
198,
66,
796,
48260,
... | 2.445946 | 148 |
from django.urls import path
from donate import views
urlpatterns = [
# Donate
path('donate/', views.DonateCreateView.as_view()),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
16565,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
1303,
2094,
378,
198,
220,
220,
220,
3108,
10786,
9099,
378,
14,
3256,
5009,
13,
3987,
378,
16... | 2.679245 | 53 |
import csv
import os
import json
from googleapiclient.discovery import build
# * IMPORTANT
query = r'python tutorial'
# read secret api key
with open('./SECRETS.json', 'r') as f:
secrets = json.load(f)
api_token = secrets['api_token']
# Functions
# creating data directory to save data
if not os.path.exists('./data'):
os.mkdir('data')
###########################
# # Start fetching data # #
###########################
# building connection
youtube = build('youtube', 'v3', developerKey=api_token)
# fetching top 5 channels
if not os.path.exists('./data/channels.csv'):
print('Start collecting top 5 playlists in search for "python"...')
playlist_response = youtube.search().list(
part='snippet',
q=query,
maxResults=5,
type='playlist'
).execute()
# parsing results and saving to csv
with open('./data/channels.csv', 'w', encoding='utf-8', newline='') as channels_CSV:
csv_writer = csv.writer(channels_CSV, quoting=csv.QUOTE_NONNUMERIC)
csv_writer.writerow(['channeltitle','channelId'])
for item in playlist_response['items']:
row = [
item['snippet']['channelTitle'],
item['snippet']['channelId']
]
csv_writer.writerow(row)
print('Done collecting top 5 playlists!')
else:
print('Already Satisfied playlists!')
# collecting channels statistics
if not os.path.exists('./data/channel_statistics.csv'):
print('Start fetching channels Statistics...')
with open('./data/channel_statistics.csv', 'w', encoding='utf-8', newline='') as channel_statistics_CSV:
csv_writer = csv.writer(channel_statistics_CSV, quoting=csv.QUOTE_NONNUMERIC)
csv_writer.writerow(['channelTitle','channelId','uploadsId','videoCount','subscriberCount','viewCount','commentCount'])
with open('./data/channels.csv', 'r', encoding='utf-8') as channels_CSV:
csv_reader = csv.reader(channels_CSV)
next(csv_reader) # skip header
for playlist in csv_reader:
channelResponse = youtube.channels().list(
part='contentDetails,statistics',
id=playlist[1]
).execute()
for item in channelResponse['items']:
row = [
playlist[0],
playlist[1],
item['contentDetails']['relatedPlaylists']['uploads'],
item['statistics'].get('videoCount'),
item['statistics'].get('subscriberCount'),
item['statistics'].get('viewCount'),
item['statistics'].get('commentCount')
]
csv_writer.writerow(row)
print('Done fetching channels Statistics!')
else:
print('Already Satisfied channels Statistics!')
# getting all videos for each channel
# creating video_ids.csv to store results
if not os.path.exists('./data/video_ids.csv'):
print('Start fetching Videos...')
with open('./data/video_ids.csv', 'w', encoding='utf-8', newline='') as videos_CSV:
csv_writer = csv.writer(videos_CSV, quoting=csv.QUOTE_NONNUMERIC)
csv_writer.writerow(['channelTitle','videoTitle','videoId','publishedDateTime'])
with open('./data/channel_statistics.csv', 'r', encoding='utf-8') as channel_statistics_CSV:
csv_reader = csv.reader(channel_statistics_CSV)
next(csv_reader) # skip header
for channel in csv_reader:
nextPageToken = None
# collecting videos then appending to video_ids.csv
while True:
# fetching result
videosResponse = youtube.playlistItems().list(
part='snippet',
playlistId=channel[2],
maxResults=50,
pageToken=nextPageToken
).execute()
# appending results to video_ids.csv
for item in videosResponse['items']:
row = [
channel[0],
item['snippet']['title'],
item['snippet']['resourceId']['videoId'],
item['snippet']['publishedAt']
]
csv_writer.writerow(row)
# retriving next page token
nextPageToken = videosResponse.get('nextPageToken')
# break while, when no more videos
if not nextPageToken: break
print(f'Done fetching videos from channel: {channel[0]}...')
print(f'Done fetching videos!')
else:
print('Already Satisfied videos!')
# fetching video data
if not os.path.exists('./data/videos_data.csv'):
print('Start fetching Videos Data...')
with open('./data/videos_data.csv', 'w', encoding='utf-8', newline='') as videos_data_CSV:
csv_writer = csv.writer(videos_data_CSV, quoting=csv.QUOTE_NONNUMERIC)
csv_writer.writerow(['channelTitle','videoId','videoTitle','videoViews','videoLike','videoDislike','videoComment','publishedDateTime'])
with open('./data/video_ids.csv', 'r', encoding='utf-8') as videos_CSV:
csv_reader = csv.reader(videos_CSV)
next(csv_reader) # skip header
lastLoop = True
counter = 1
while lastLoop:
# read 50 videos at a time
video_ids = []
for _ in range(50):
try: videoId = next(csv_reader)
except StopIteration:
lastLoop = False
break
video_ids.append(videoId[2])
videos_dataResponse = youtube.videos().list(
part='snippet,statistics',
id=','.join(video_ids),
maxResults=50
).execute()
# appending results to videos_data.csv
for item in videos_dataResponse['items']:
row = [
item['snippet']['channelTitle'],
item['id'],
item['snippet']['title'],
item['statistics'].get('viewCount'),
item['statistics'].get('likeCount'),
item['statistics'].get('dislikeCount'),
item['statistics'].get('commentCount'),
item['snippet']['publishedAt']
]
csv_writer.writerow(row)
print(f'Done fetching Videos Data: {counter}...')
counter += 1
print(f'Done fetching videos!')
else:
print('Already Satisfied videos Data!') | [
11748,
269,
21370,
198,
11748,
28686,
198,
11748,
33918,
198,
198,
6738,
23645,
499,
291,
75,
1153,
13,
67,
40821,
1330,
1382,
198,
198,
2,
1635,
30023,
9863,
8643,
198,
22766,
796,
374,
6,
29412,
11808,
6,
198,
198,
2,
1100,
3200,
... | 2.029058 | 3,407 |
import os
import binascii
import logging
import typing
import asyncio
from lbrynet.blob import MAX_BLOB_SIZE
from lbrynet.stream.descriptor import StreamDescriptor
if typing.TYPE_CHECKING:
from lbrynet.blob.blob_manager import BlobFileManager
from lbrynet.blob.blob_info import BlobInfo
from lbrynet.blob.blob_file import BlobFile
log = logging.getLogger(__name__)
| [
11748,
28686,
198,
11748,
9874,
292,
979,
72,
198,
11748,
18931,
198,
11748,
19720,
198,
11748,
30351,
952,
198,
6738,
18360,
563,
3262,
13,
2436,
672,
1330,
25882,
62,
9148,
9864,
62,
33489,
198,
6738,
18360,
563,
3262,
13,
5532,
13,
... | 2.82963 | 135 |
from .get_report import get_report
from ..call_rest import call_rest
| [
6738,
764,
1136,
62,
13116,
1330,
651,
62,
13116,
198,
6738,
11485,
13345,
62,
2118,
1330,
869,
62,
2118,
628
] | 3.5 | 20 |
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from abc import ABCMeta
from sagemaker_pyspark import SageMakerJavaWrapper
class NamePolicy(SageMakerJavaWrapper):
"""
Provides names for SageMaker entities created during fit in
:class:`~sagemaker_pyspark.JavaSageMakerEstimator`.
"""
__metaclass__ = ABCMeta
_wrapped_class = "com.amazonaws.services.sagemaker.sparksdk.NamePolicy"
class RandomNamePolicy(NamePolicy):
"""
Provides random, unique SageMaker entity names that begin with the specified prefix.
Args:
prefix (str): The common name prefix for all SageMaker entities named with this NamePolicy.
"""
_wrapped_class = "com.amazonaws.services.sagemaker.sparksdk.RandomNamePolicy"
class NamePolicyFactory(SageMakerJavaWrapper):
"""
Creates a NamePolicy upon a call to createNamePolicy
:class:`~sagemaker_pyspark.JavaSageMakerEstimator`.
"""
__metaclass__ = ABCMeta
_wrapped_class = "com.amazonaws.services.sagemaker.sparksdk.NamePolicyFactory"
class RandomNamePolicyFactory(NamePolicyFactory):
"""
Creates a RandomNamePolicy upon a call to createNamePolicy
Args:
prefix (str): The common name prefix for all SageMaker entities named with this NamePolicy.
"""
_wrapped_class = "com.amazonaws.services.sagemaker.sparksdk.RandomNamePolicyFactory"
| [
2,
15069,
2177,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
11074,
198,
2,
921,
743,
407,
779,
428,
2393,
... | 3.184746 | 590 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Wifi Rx Rftap
# Generated: Sun Feb 18 16:11:24 2018
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import fft
from gnuradio import gr
from gnuradio.eng_option import eng_option
from gnuradio.fft import window
from gnuradio.filter import firdes
from gnuradio.wxgui import forms
from grc_gnuradio import wxgui as grc_wxgui
from optparse import OptionParser
import ieee802_11
import osmosdr
import rftap
import time
import wx
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
29113,
14468,
2235,
198,
2,
22961,
8829,
11361,
27782,
29681,
198,
2,
11851,
25,
370,
22238,
49715,
371,
701,
499,
... | 2.710227 | 352 |
#price = float(input('Enter The Price: ')) # int() to convert the inputs into float so we can compare it with tax
#if price >= 2.0:
#tax = 0.07
#else:
# tax = 0.00
#print(f'The Tax for this price is: ', {tax}, '\n \n') # note that the print statements without indentations so it will wait the if and else tesults to print the final tax
#country = 'CANADA'
#if country.lower() == 'canada': # we add .lower() cause python is case senstive, so when working with string becarful
#print('You Are Canadian')
#else:
#print('You Are Not Canadian \n \n')
#province = input('What is your province : ')
#if province.capitalize() == 'Alberta':
#tax_1 = 0.05
#elif province.capitalize() == 'Nunavut':
#tax_1 = 0.05
#elif province.capitalize() == 'Ontario':
# tax_1 = 0.13
#else:
# tax_1 = 0.15
#print(f'The Tax for Your province is: {tax_1} \n \n ')
# capitalizing the user input so we can compare the results
# how to use or/ and
#province = input('What is your province : ')
#if province.capitalize() == 'Alberta'\
#or province.capitalize() == 'Nunavut':
# tax_2 = 0.05
#elif province.capitalize() == 'Ontario':
# tax_2 = 0.13
#else:
# tax_2 = 0.15
#print(f'The Tax for Your province is: {tax_2} \n \n ')
# instead of using or/ and python provide IN so we can use it to compare the user input with a list:
province = input('What is your province : ')
if province in ('Alberta', 'Nunavut', 'Ontario'):
tax_3 = 0.05
elif province == 'Ontario':
tax_3 = 0.13
else:
tax_3 = 0.15
print(f'The Tax for your province is : {tax_3} \n \n')
# as well as you can use nest if statements :
# for example we have combination of conditions so we need to check the country firt and then to check the province
country = input('Your Country is : ')
if country.capitalize() == 'Canada':
province = input('Your province is : ')
if province in ('Alberta', 'Nunavut', 'Yukon'):
tax_4 = 0.05
elif province == 'Ontario':
tax_4 = 0.13
else:
tax_4 =0.15
else:
tax_4 = 0.00
print(f'The tax in Your Case is : {tax_4} \n \n')
# if the cinditions are complicated and needed lots of if statements then you can use boolean flag :
gpa = float(input('What is your GPA : '))
lowest_grade = float(input('WHat is you lowest grade among the whole grades : '))
#if gpa >= 0.85:
#if lowest_grade >= 0.70:
#print('Well Done You are in the honour roll')
if gpa >= 0.85 and lowest_grade>= 0.70:
honour_roll: True # we created this var and we assigned a True value to it
else:
honour_roll: False
# somewhere later in your code :
if honour_roll:
print('Well Done \n \n ')
| [
2,
20888,
796,
12178,
7,
15414,
10786,
17469,
383,
7886,
25,
705,
4008,
220,
1303,
493,
3419,
284,
10385,
262,
17311,
656,
12178,
523,
356,
460,
8996,
340,
351,
1687,
201,
198,
201,
198,
2,
361,
2756,
18189,
362,
13,
15,
25,
201,
... | 2.581527 | 1,061 |
##Unpackers and Emulator of BMTF-TwinMux
import FWCore.ParameterSet.Config as cms
process = cms.Process("TwinMuxRawToDigi")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = "80X_dataRun2_Express_v10"
process.load("EventFilter.RPCRawToDigi.rpcUnpackingModule_cfi")
process.load("CondTools.RPC.RPCLinkMap_sqlite_cff")
process.load("EventFilter.L1TXRawToDigi.twinMuxStage2Digis_cfi")
process.load("EventFilter.RPCRawToDigi.RPCTwinMuxRawToDigi_cfi")
process.RPCTwinMuxRawToDigi.bxMin = cms.int32(-5)
process.RPCTwinMuxRawToDigi.bxMax = cms.int32(5)
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(False) )
process.source = cms.Source ("NewEventStreamFileReader", #"PoolSource",
#process.source = cms.Source ("PoolSource",
fileNames=cms.untracked.vstring(
'/store/t0streamer/Data/PhysicsMuons/000/280/307/run280307_ls0016_streamPhysicsMuons_StorageManager.dat',
),
)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1000))
# PostLS1 geometry used
process.load('Configuration.Geometry.GeometryExtended2015Reco_cff')
process.load('Configuration.Geometry.GeometryExtended2015_cff')
############################
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')
from Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_mc', '')
####Event Setup Producer
process.load('L1Trigger.L1TTwinMux.fakeTwinMuxParams_cff')
process.esProd = cms.EDAnalyzer("EventSetupRecordDataGetter",
toGet = cms.VPSet(
cms.PSet(record = cms.string('L1TwinMuxParamsRcd'),
data = cms.vstring('L1TwinMuxParams'))
),
verbose = cms.untracked.bool(True)
)
###TwinMux Emulator
process.load('L1Trigger.L1TTwinMux.simTwinMuxDigis_cfi')
process.simTwinMuxDigis.DTDigi_Source = cms.InputTag("twinMuxStage2Digis:PhIn")
process.simTwinMuxDigis.DTThetaDigi_Source = cms.InputTag("twinMuxStage2Digis:ThIn")
process.simTwinMuxDigis.RPC_Source = cms.InputTag("RPCTwinMuxRawToDigi")
process.dumpED = cms.EDAnalyzer("EventContentAnalyzer")
process.dumpES = cms.EDAnalyzer("PrintEventSetupContent")
#########################################
##########Ntuples Block#################
########################################
#process.load("UIoannina.TrUpS.L1TwinMuxProducer")
#
#process.L1TwinMuxProducer = process.L1TwinMuxProducer.clone(
# twinmuxOutputDigis = cms.InputTag("twinMuxStage2Digis:PhOut"),
# twinmuxInputPhDigis = cms.InputTag("twinMuxStage2Digis:PhIn"),
# twinmuxInputThDigis = cms.InputTag("twinMuxStage2Digis:ThIn"),
# twinmuxInputRPCDigis = cms.InputTag("RPCTwinMuxRawToDigi")
#
#)
#
#process.L1TwinMuxProducerEmulator = process.L1TwinMuxProducer.clone(
# twinmuxOutputDigis = cms.InputTag("simTwinMuxDigis"),
# twinmuxInputPhDigis = cms.InputTag("twinMuxStage2Digis:PhIn"),
# twinmuxInputThDigis = cms.InputTag("twinMuxStage2Digis:ThIn"),
# twinmuxInputRPCDigis = cms.InputTag("RPCTwinMuxRawToDigi")
#
#)
#process.load("UIoannina.TrUpS.EVRProducer_cfi")
#
#
# output file
#process.TFileService = cms.Service("TFileService",
# fileName = cms.string('Ntuple_l1ttwinmux_data_run280307.root')
#)
############################
process.L1TMuonSeq = cms.Sequence(process.RPCTwinMuxRawToDigi
+ process.twinMuxStage2Digis
+ process.rpcUnpackingModule
+ process.esProd
+ process.simTwinMuxDigis
# + process.EVRTProducer
# + process.L1TwinMuxProducer
# + process.L1TwinMuxProducerEmulator
)
process.L1TMuonPath = cms.Path(process.L1TMuonSeq)
process.out = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring(
'drop *',
#'keep *CSC*_*_*_*',
'keep *RPC*_*_*_*',
'keep *DT*_*_*_*',
'keep *L1Mu*_*_*_*',
'keep *_*Muon*_*_*',
'keep *_*gen*_*_*',
'keep *_*TwinMux*_*_*',
'keep *_*Bmtf*_*_*',
'keep GenEventInfoProduct_generator_*_*'),
fileName = cms.untracked.string("l1ttwinmux.root")
)
process.output_step = cms.EndPath(process.out)
process.schedule = cms.Schedule(process.L1TMuonPath)
process.schedule.extend([process.output_step])
| [
2235,
3118,
8002,
364,
290,
2295,
8927,
286,
29944,
10234,
12,
5080,
259,
44,
2821,
198,
198,
11748,
48849,
14055,
13,
36301,
7248,
13,
16934,
355,
269,
907,
198,
198,
14681,
796,
269,
907,
13,
18709,
7203,
5080,
259,
44,
2821,
27369,... | 2.251117 | 2,015 |
from __future__ import annotations
import logging
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
11748,
18931,
628
] | 5.1 | 10 |
# Copyright (C) 2019-2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
class ParamError(ValueError):
"""
Param of interface is illegal
"""
class ResultError(ValueError):
"""
Result of interface is illegal
"""
class ConnectError(ValueError):
"""
Connect server failed
"""
class NotConnectError(ConnectError):
"""
Disconnect error
"""
class RepeatingConnectError(ConnectError):
"""
Try to connect repeatedly
"""
class ConnectionPoolError(ConnectError):
"""
Waiting timeout error
"""
class FutureTimeoutError(TimeoutError):
"""
Future timeout
"""
class DeprecatedError(AttributeError):
"""
Deprecated
"""
class VersionError(AttributeError):
"""
Version not match
"""
| [
2,
15069,
357,
34,
8,
13130,
12,
1238,
2481,
1168,
359,
528,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
407,
779,
428,
2393,
2845,
198,... | 3.277641 | 407 |
test = { 'name': 'q3d',
'points': 2,
'suites': [ { 'cases': [ { 'code': ">>> np.all(sorted(top_5_acc) == sorted(np.array(['ROSELAND COLLEGIATE PREP', 'GRANGE SCHOOL THE', 'FELICITAS GONZALO MENDEZ HS', 'ALLIANCE M&E STERN "
"MATH SCI SCH', 'SOUTH HIGH SCHOOL'])))\n"
'True',
'hidden': False,
'locked': False},
{ 'code': ">>> np.all(sorted(bottom_5_acc) == sorted(np.array(['NANJING FOREIGN LANGUAGE SCH', 'BEIJING NATIONAL DAY SCHOOL', 'HIGH SCH @RENMIN UNIV OF CHINA', "
"'SKYLINE HIGH SCHOOL', 'SHANGHAI FOREIGN LANGUAGE SCHL'])))\n"
'True',
'hidden': False,
'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
| [
9288,
796,
1391,
220,
220,
705,
3672,
10354,
705,
80,
18,
67,
3256,
198,
220,
220,
220,
705,
13033,
10354,
362,
11,
198,
220,
220,
220,
705,
2385,
2737,
10354,
685,
220,
220,
1391,
220,
220,
705,
33964,
10354,
685,
220,
220,
1391,
... | 1.516734 | 747 |
from .orm import Category
| [
6738,
764,
579,
1330,
21743,
198
] | 4.333333 | 6 |
import random as rnd
import re
import string
import requests
from xeger import Xeger
from bs4 import BeautifulSoup
def create_strings_from_file(filename, count):
"""
Create all strings by reading lines in specified files
"""
strings = []
with open(filename, "r", encoding="utf8") as f:
lines = [l[0:200] for l in f.read().splitlines() if len(l) > 0]
if len(lines) == 0:
raise Exception("No lines could be read in file")
while len(strings) < count:
if len(lines) >= count - len(strings):
strings.extend(lines[0 : count - len(strings)])
else:
strings.extend(lines)
return strings
def create_strings_from_dict(length, allow_variable, count, lang_dict):
"""
Create all strings by picking X random word in the dictionnary
"""
dict_len = len(lang_dict)
strings = []
for _ in range(0, count):
current_string = ""
for _ in range(0, rnd.randint(1, length) if allow_variable else length):
current_string += lang_dict[rnd.randrange(dict_len)]
current_string += " "
strings.append(current_string[:-1])
return strings
def create_strings_from_wikipedia(minimum_length, count, lang):
"""
Create all string by randomly picking Wikipedia articles and taking sentences from them.
"""
sentences = []
while len(sentences) < count:
# We fetch a random page
page = requests.get("https://{}.wikipedia.org/wiki/Special:Random".format(lang))
soup = BeautifulSoup(page.text, "html.parser")
for script in soup(["script", "style"]):
script.extract()
# Only take a certain length
lines = list(
filter(
lambda s: len(s.split(" ")) > minimum_length
and not "Wikipedia" in s
and not "wikipedia" in s,
[
" ".join(re.findall(r"[\w']+", s.strip()))[0:200]
for s in soup.get_text().splitlines()
],
)
)
# Remove the last lines that talks about contributing
sentences.extend(lines[0 : max([1, len(lines) - 5])])
return sentences[0:count]
def create_strings_randomly(
length, allow_variable, count, let, num, sym, lang, min_seq_len, max_seq_len
):
"""
Create all strings by randomly sampling from a pool of characters.
"""
# If none specified, use all three
if True not in (let, num, sym):
let, num, sym = True, True, True
pool = ""
if let:
if lang == "cn":
pool += "".join(
[chr(i) for i in range(19968, 40908)]
) # Unicode range of CHK characters
else:
pool += string.ascii_letters
if num:
pool += "0123456789"
if sym:
pool += "!\"#$%&'()*+,-./:;?@[\\]^_`{|}~"
if lang == "cn":
min_seq_len = 1
max_seq_len = 2
strings = []
for _ in range(0, count):
current_string = ""
for _ in range(0, rnd.randint(1, length) if allow_variable else length):
seq_len = rnd.randint(min_seq_len, max_seq_len)
current_string += "".join([rnd.choice(pool) for _ in range(seq_len)])
current_string += " "
strings.append(current_string[:-1])
return strings
| [
11748,
4738,
355,
374,
358,
198,
11748,
302,
198,
11748,
4731,
198,
11748,
7007,
198,
198,
6738,
2124,
11893,
1330,
1395,
11893,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
628,
198,
4299,
2251,
62,
37336,
62,
6738,
62,
7753,
7,
... | 2.240132 | 1,520 |
import logging
from collections import defaultdict
from django.core.exceptions import FieldDoesNotExist
from django.db.models import Count, Field, Q
from unplugged import JSONAPIObject, command
from .models import BaseMetadataResolutionLink, BaseUpdatable
logger = logging.getLogger(__name__)
class ListingItemRelinkingMixin:
"""
Mixin to link Metadata and ListingItems according to the metadata_mapping.
Tries to do it intelligently.
"""
listing_item_relation_model = None
| [
11748,
18931,
198,
6738,
17268,
1330,
4277,
11600,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
7663,
13921,
3673,
3109,
396,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
2764,
11,
7663,
11,
1195,
198,
6738,
555... | 3.320261 | 153 |
import torch
from torch import nn
from torch import optim
from collections import OrderedDict
from PIL import Image
import torchvision.transforms as transforms
import torchvision.models as models
densenet121 = models.densenet121(pretrained=True)
vgg16 = models.vgg16(pretrained=True)
models = {
"densenet": {
"name": densenet121,
"input": 1024
},
"vgg": {
"name": vgg16,
"input": 25088
},
}
| [
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
6738,
28034,
1330,
6436,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
28034,
10178,
13,
7645,
23914,
355,
31408,
198,
11748,
28034,
10178,
... | 2.566474 | 173 |
'''
__author__ : Priyank Parmar
__tested_on__ : 4th June, 2017
__description__ : To test different encoding and decoding methods
'''
## Taking the Input from user
str = raw_input('Enter String :')
print "=================================================="
## Encoded String
print "Encoded Strings"
# UTF-8 Encoding
encoded_str_utf_8 = str.encode('UTF-8','strict')
print "Encoded String [UTF-8] :", encoded_str_utf_8
# UTF-16 Encoding
encoded_str_utf_16 = str.encode('UTF-16','strict')
print "Encoded String [UTF-16] :", encoded_str_utf_16
# base-64 Encoding
encoded_str_base_64 = str.encode('base-64','strict')
print "Encoded String [Base-64] :", encoded_str_base_64
# hex Encoding
encoded_str_hex = str.encode('hex','strict')
print "Encoded String [Hex] :", encoded_str_hex
# rot13 Encoding
encoded_str_rot_13 = str.encode('rot13','strict')
print "Encoded String [rot13] :", encoded_str_rot_13
print "=================================================="
## Decoded String
print "Decoded Strings"
# UTF-8 Decoding
print "Decoded String [UTF-8] :", encoded_str_utf_8.decode('UTF-8','strict')
# UTF-16 Decoding
print "Decoded String [UTF-16] :", encoded_str_utf_16.decode('UTF-16','strict')
# base-64 Decoding
print "Decoded String [Base-64] :", encoded_str_base_64.decode('base-64','strict')
# hex Decoding
print "Decoded String [Hex] :", encoded_str_hex.decode('hex','strict')
# rot13 Decoding
print "Decoded String [rot13] :", encoded_str_rot_13.decode('rot13','strict')
print "=================================================="
## Encryption combination of (Base-64 --> Hex --> rot13
print "Encryption"
encrypted_str = str.encode('base-64','strict').encode('hex','strict').encode('rot13','strict')
print encrypted_str
## Decryption combination of (rot13 --> Hex --> Base-64)
print "Decryption"
decrypted_str = encrypted_str.decode('rot13','strict').decode('hex','strict').decode('base-64','strict')
print decrypted_str
| [
7061,
6,
198,
834,
9800,
834,
220,
197,
25,
4389,
88,
962,
47796,
283,
198,
834,
39612,
62,
261,
834,
197,
25,
604,
400,
2795,
11,
2177,
198,
834,
11213,
834,
1058,
1675,
1332,
1180,
21004,
290,
39938,
5050,
198,
7061,
6,
198,
223... | 2.937785 | 659 |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014 Bryant E. McDonnell
#
# Licensed under the terms of the BSD2 License
# See LICENSE.txt for details
# -----------------------------------------------------------------------------
"""SWMM5 compiled libraries. This module provides the user with some options
for selecting the SWMM5 engine. """
# Standard library imports
import os
import sys
# Local Path
HERE = os.path.abspath(os.path.dirname(__file__))
# Platform Detection
def _platform():
"""Folder based on platform."""
if os.name == 'nt':
return 'windows'
if sys.platform == 'darwin':
return 'macos'
if sys.platform.startswith('linux'):
return 'linux'
# Library paths
if os.name == 'nt':
if sys.maxsize > 2**32:
LIB_SWMM = os.path.join(
HERE, _platform(), 'swmm5-x64.dll').replace('\\', '/')
else:
LIB_SWMM = os.path.join(
HERE,
_platform(),
'swmm5.dll').replace(
'\\',
'/')
elif sys.platform == 'darwin':
LIB_SWMM = os.path.join(
HERE,
_platform(),
'swmm5.dylib').replace(
'\\',
'/')
elif sys.platform.startswith('linux'):
LIB_SWMM = os.path.join(HERE, _platform(), 'swmm5.so').replace('\\', '/')
else:
LIB_SWMM = ''
class _DllPath(object):
"""DllPath Object."""
@property
def dll_loc(self):
"""Get/set DLL Name."""
return self._dll_loc
@dll_loc.setter
def dll_loc(self, value):
"""Set DLL Name."""
self._dll_loc = value
def __call__(self):
"""Caller returns DLL Name."""
return self._dll_loc
# Initialize dll path object
DLL_SELECTION = _DllPath()
def use(arg):
"""
Set the SWMM5 DLL.
This method allows the user to define the engine they would
like to use for the simulation. It is important to understand
that previous verisons of EPA-SWMM5 do not have the expanded
toolkit functionality. Therefore, only basic functionality for
running a simulation is available.
To use this, the user should copy and rename their SWMM5 DLL into
the :file:`site-packages/pyswmm/lib/windows` directory.
The example below outlines the steps. This should be done
before Simulation is imported.
Examples:
>>> import pyswmm
>>> pyswmm.lib.use("swmm5")
>>>
>>> from pyswmm import Simulation
"""
if os.name == 'nt':
if not arg.endswith('.dll'):
arg = arg + ".dll"
if os.path.isfile(
os.path.join(HERE, _platform(), arg).replace('\\', '/')):
DLL_SELECTION.dll_loc = os.path.join(HERE, _platform(),
arg).replace('\\', '/')
else:
raise (Exception("Library Not Found"))
elif sys.platform == 'darwin':
if not arg.endswith('.dylib'):
arg = arg + ".dylib"
if os.path.isfile(
os.path.join(HERE, _platform(), arg).replace('\\', '/')):
DLL_SELECTION.dll_loc = os.path.join(HERE, _platform(),
arg).replace('\\', '/')
else:
raise (Exception("Library Not Found"))
elif sys.platform.startswith('linux'):
if not arg.endswith('.so'):
arg = arg + ".so"
if os.path.isfile(
os.path.join(HERE, _platform(), arg).replace('\\', '/')):
DLL_SELECTION.dll_loc = os.path.join(HERE, _platform(),
arg).replace('\\', '/')
else:
raise (Exception("Library Not Found"))
else:
raise (Exception("Operating System not Supported"))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
16529,
32501,
198,
2,
15069,
357,
66,
8,
1946,
16754,
412,
13,
39441,
198,
2,
198,
2,
49962,
739,
262,
2846,
286,
262,
347,
10305,
17,
13789,
198,
2,
4091,
38559,... | 2.280072 | 1,671 |
from enum import Enum
from typing import Dict, Union
| [
6738,
33829,
1330,
2039,
388,
198,
6738,
19720,
1330,
360,
713,
11,
4479,
628,
198
] | 3.666667 | 15 |
"""empty message
Revision ID: 7ef3c4f2ba0c
Revises:
Create Date: 2020-12-28 10:48:53.012083
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '7ef3c4f2ba0c'
down_revision = None
branch_labels = None
depends_on = None
| [
37811,
28920,
3275,
198,
198,
18009,
1166,
4522,
25,
767,
891,
18,
66,
19,
69,
17,
7012,
15,
66,
198,
18009,
2696,
25,
220,
198,
16447,
7536,
25,
12131,
12,
1065,
12,
2078,
838,
25,
2780,
25,
4310,
13,
486,
1238,
5999,
198,
198,
... | 2.572581 | 124 |
pkgname = "ffmpeg"
pkgver = "4.4.1"
pkgrel = 0
build_style = "configure"
configure_args = [
"--prefix=/usr",
"--enable-shared",
"--enable-static",
"--enable-gpl",
"--enable-version3",
"--enable-runtime-cpudetect",
"--enable-lto",
"--enable-openssl",
"--enable-librtmp",
"--enable-postproc",
"--enable-libjack",
"--enable-libpulse",
"--enable-libxvid",
"--enable-libx264",
"--enable-libx265",
"--enable-libvpx",
"--enable-libaom",
"--enable-libdav1d",
"--enable-libvidstab",
"--enable-libmp3lame",
"--enable-libmodplug",
"--enable-libbs2b",
"--enable-libtheora",
"--enable-libvorbis",
"--enable-libopus",
"--enable-libcdio",
"--enable-libbluray",
"--enable-libfreetype",
"--enable-libopenjpeg",
"--enable-libwebp",
"--enable-libass",
"--enable-libv4l2",
"--enable-libxcb",
"--enable-librubberband",
"--enable-libxml2",
"--enable-opencl",
"--enable-vaapi",
"--enable-vdpau",
"--enable-vulkan",
"--enable-libdrm",
"--disable-debug",
"--disable-stripping",
"--disable-alsa",
"--disable-sndio",
"--disable-libopencore_amrnb",
"--disable-libopencore_amrwb",
"--disable-libcelt",
"--disable-libspeex",
]
make_cmd = "gmake"
make_install_args = ["install-man"]
make_check_args = ["-j1"]
hostmakedepends = ["gmake", "pkgconf", "perl", "yasm", "texinfo"]
makedepends = [
"zlib-devel", "libbz2-devel", "openssl-devel", "librtmp-devel",
"freetype-devel", "harfbuzz-devel",
"libxfixes-devel", "libxext-devel", "libxvmc-devel", "libxcb-devel",
"x264-devel", "x265-devel", "xvidcore-devel", "dav1d-devel",
"libvpx-devel", "libaom-devel", "libvidstab-devel",
"libtheora-devel", "libvorbis-devel", "opus-devel",
"libwebp-devel", "openjpeg-devel", "libbluray-devel",
"libass-devel", "libcdio-devel", "libcdio-paranoia-devel",
"libmodplug-devel", "lame-devel", "libbs2b-devel",
"libpulse-devel", "pipewire-jack-devel",
"libva-devel", "libvdpau-devel", "v4l-utils-devel",
"vulkan-loader", "vulkan-headers", "libdrm-devel", "sdl-devel",
"rubberband-devel", "libxml2-devel", "ocl-icd-devel",
]
depends = [f"ffplay={pkgver}-r{pkgrel}"]
pkgdesc = "Decoding, encoding and streaming software"
maintainer = "q66 <q66@chimera-linux.org>"
# we use --enable-gpl; it enables useful filters
license = "GPL-3.0-or-later"
url = "https://ffmpeg.org"
source = f"{url}/releases/{pkgname}-{pkgver}.tar.xz"
sha256 = "eadbad9e9ab30b25f5520fbfde99fae4a92a1ae3c0257a8d68569a4651e30e02"
# seems to need rpath?
options = ["!check"]
if self.profile().cross:
_archmap = {
"aarch64": "aarch64",
"riscv64": "riscv64",
"ppc64le": "ppc64",
"ppc64": "ppc64",
"x86_64": "x8_64",
}
if self.profile().arch not in archmap:
broken = f"unknown architecture: {self.profile().arch}"
configure_args += [
"--enable-cross-compile",
"--target-os=linux",
"--arch=" + _archmap.get(self.profile().arch, "unknown"),
f"--sysroot={self.profile().sysroot}",
]
for lname, ldesc in [
("avcodec", "codec"),
("avdevice", "device handling"),
("avformat", "file format"),
("avutil", "utility"),
("avfilter", "audio/video filter"),
("postproc", "video postprocessing"),
("swscale", "video scaling"),
("swresample", "video resampling"),
]:
_genlib(lname, ldesc)
@subpackage("ffmpeg-devel")
@subpackage("ffplay")
| [
35339,
3672,
796,
366,
487,
43913,
1,
198,
35339,
332,
796,
366,
19,
13,
19,
13,
16,
1,
198,
35339,
2411,
796,
657,
198,
11249,
62,
7635,
796,
366,
11250,
495,
1,
198,
11250,
495,
62,
22046,
796,
685,
198,
220,
220,
220,
366,
43... | 2.133414 | 1,649 |
import numpy as np
| [
11748,
299,
32152,
355,
45941,
628,
198
] | 3 | 7 |
import boto3
import time
import os
import csv
from datetime import datetime
import json
if __name__ == '__main__':
main()
| [
11748,
275,
2069,
18,
198,
11748,
640,
198,
11748,
28686,
198,
11748,
269,
21370,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
33918,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
... | 2.953488 | 43 |
import importlib, warnings
from pyinstrument.vendor.decorator import decorator
@decorator
def deprecated(func, *args, **kwargs):
''' Marks a function as deprecated. '''
warnings.warn(
'{} is deprecated and should no longer be used.'.format(func),
DeprecationWarning,
stacklevel=3
)
return func(*args, **kwargs)
def deprecated_option(option_name, message=''):
''' Marks an option as deprecated. '''
return decorator(caller)
| [
11748,
1330,
8019,
11,
14601,
198,
6738,
12972,
259,
43872,
13,
85,
18738,
13,
12501,
273,
1352,
1330,
11705,
1352,
198,
198,
31,
12501,
273,
1352,
198,
4299,
39224,
7,
20786,
11,
1635,
22046,
11,
12429,
46265,
22046,
2599,
198,
220,
... | 2.782353 | 170 |
# Generated by Django 4.0.2 on 2022-02-22 17:05
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
604,
13,
15,
13,
17,
319,
33160,
12,
2999,
12,
1828,
1596,
25,
2713,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import json
import numbers
from pathlib import Path
import numpy as np
import samplerate
import soundfile
from cached_property import cached_property
from lazy_dataset import Dataset, FilterException
from lazy_dataset.core import DynamicTimeSeriesBucket
from scipy.interpolate import make_interp_spline
from scipy.signal import stft
from skimage.transform import warp
from tqdm import tqdm
def split_dataset(dataset, fold, nfolfds=5, seed=0):
"""
Args:
dataset:
fold:
nfolfds:
seed:
Returns:
>>> split_dataset(np.array([1,2,3,4,5]), 0, nfolfds=2)
[array([1, 3]), array([2, 4, 5])]
>>> split_dataset(np.array([1,2,3,4,5]), 1, nfolfds=2)
[array([1, 3]), array([2, 4, 5])]
"""
indices = np.arange(len(dataset))
np.random.RandomState(seed).shuffle(indices)
folds = np.split(
indices,
np.linspace(0, len(dataset), nfolfds + 1)[1:-1].astype(np.int64)
)
validation_indices = folds.pop(fold)
training_indices = np.concatenate(folds)
return [
dataset[sorted(indices.tolist())]
for indices in (training_indices, validation_indices)
]
def read_moments(
dataset=None, key=None, center_axis=None, scale_axis=None,
filepath=None, verbose=False
):
"""
Loads or computes the global mean (center) and scale over a dataset.
Args:
dataset: lazy dataset providing example dicts
key: example dict key of the features to compute the moments from
center_axis: axis of the feature array over which the mean (center) is computed
scale_axis: axis of the feature array over which the scale is computed
filepath: file to load/store the moments from/at
verbose:
Returns:
"""
if filepath and Path(filepath).exists():
with filepath.open() as fid:
mean, scale = json.load(fid)
if verbose:
print(f'Restored moments from {filepath}')
else:
assert dataset is not None
mean = 0
mean_count = 0
energy = 0
energy_count = 0
for example in tqdm(dataset, disable=not verbose):
x = example[key]
if center_axis is not None:
if not mean_count:
mean = np.sum(x, axis=center_axis, keepdims=True)
else:
mean += np.sum(x, axis=center_axis, keepdims=True)
mean_count += np.prod(
np.array(x.shape)[np.array(center_axis)]
)
if scale_axis is not None:
if not energy_count:
energy = np.sum(x**2, axis=scale_axis, keepdims=True)
else:
energy += np.sum(x**2, axis=scale_axis, keepdims=True)
energy_count += np.prod(
np.array(x.shape)[np.array(scale_axis)]
)
if center_axis is not None:
mean /= mean_count
if scale_axis is not None:
energy /= energy_count
scale = np.sqrt(np.mean(
energy - mean ** 2, axis=scale_axis, keepdims=True
))
else:
scale = np.array(1.)
if filepath:
with filepath.open('w') as fid:
json.dump(
(mean.tolist(), scale.tolist()), fid,
sort_keys=True, indent=4
)
if verbose:
print(f'Saved moments to {filepath}')
return np.array(mean), np.array(scale)
| [
198,
11748,
33918,
198,
11748,
3146,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
6072,
20053,
378,
198,
11748,
2128,
7753,
198,
6738,
39986,
62,
26745,
1330,
39986,
62,
26745,
198,
6738,
16931,... | 2.051963 | 1,732 |
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export the source code comments into the API Reference website."""
import os
from absl import app
from absl import flags
from tensorflow_docs.api_generator import generate_lib
from tensorflow_docs.api_generator import public_api
import tensorflow_decision_forests as tfdf
FLAGS = flags.FLAGS
flags.DEFINE_string("output_dir", "/tmp/tfdf_api", "Where to output the docs")
flags.DEFINE_string("code_url_prefix", "", "The url prefix for links to code.")
flags.DEFINE_bool("search_hints", True,
"Include metadata search hints in the generated files")
flags.DEFINE_string("site_path", "/decision_forests/api_docs/python",
"Path prefix in the _toc.yaml")
flags.DEFINE_bool('gen_report', False,
('Generate an API report containing the health of the'
'docstrings of the public API.'))
if __name__ == "__main__":
app.run(main)
| [
2,
15069,
33448,
3012,
11419,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,... | 3.057613 | 486 |
#!/usr/bin/env python3
import argparse
from Gaugi import Pool
from Gaugi import Logger
mainLogger = Logger.getModuleLogger("prun.job")
parser = argparse.ArgumentParser(description = '', add_help = False)
parser = argparse.ArgumentParser()
parser.add_argument('-o','--outputFile', action='store',
dest='outputFile', required = True,
help = "Output file")
parser.add_argument('-i','--inputFiles', action='store',
dest='inputFiles', required = False, nargs='+', default = None,
help = "Input files")
parser.add_argument('-c','--command', action='store',
dest='command', required = True,
help = "The command job")
parser.add_argument('-mt','--numberOfThreads', action='store',
dest='numberOfThreads', required = False, default = 1, type=int,
help = "The number of threads")
parser.add_argument('-m','--merge', action='store_true', dest='merge', required = False,
help = "Merge all output files.")
import sys,os
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
from Gaugi import expand_folders
files = expand_folders( args.inputFiles )
prun = Pool( func, args.command, args.numberOfThreads, files, args.outputFile )
prun.run()
if args.merge:
prun.merge() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
1822,
29572,
198,
6738,
402,
7493,
72,
1330,
19850,
198,
6738,
402,
7493,
72,
1330,
5972,
1362,
628,
198,
12417,
11187,
1362,
796,
5972,
1362,
13,
1136,
26796,
11187,
1362,
... | 2.791574 | 451 |
from pedal.assertions.feedbacks import AssertionFeedback
from pedal.core.report import MAIN_REPORT
class missing_function(AssertionFeedback):
""" Unconditionally asserts that the function is missing. """
title = "Missing Function"
message_template = "No function named {name_message} was found."
class duplicate_function_definition(AssertionFeedback):
""" Unconditionally assert that the function is redefined somewhere. """
title = "Duplicate Function Definition"
message_template = ("The function {name_message} was defined multiple times, "
"on lines {lines_message}.")
class too_few_parameters(AssertionFeedback):
""" Too Few Parameters """
title = "Too Few Parameters"
message_template = ("The function named {name_message} has fewer "
"parameters ({found}) than expected ({expected}).")
class too_many_parameters(AssertionFeedback):
""" Too Many Parameters """
title = "Too Many Parameters"
message_template = ("The function named {name_message} has more "
"parameters ({found}) than expected ({expected}).")
class invalid_parameter_type(AssertionFeedback):
""" Invalid parameter type """
title = "Invalid Parameter Type"
message_template = ("The function named {name_message} has a parameter "
"named {parameter_message} with an invalid type.")
class missing_parameter_type(AssertionFeedback):
""" Invalid parameter type """
title = "Missing Parameter Type"
message_template = ("The function named {name_message} has a parameter "
"named {parameter_message}, but that parameter does "
"not have a type specified.")
class wrong_parameter_type(AssertionFeedback):
""" Wrong Parameter Type """
title = "Wrong Parameter Type"
message_template = ("The function named {name_message} has a parameter "
"named {parameter_message} that is {actual_message},"
" but should be {expected_message}.")
class missing_return_type(AssertionFeedback):
""" Missing Returns Type """
title = "Missing Return Type"
message_template = ("The function named {name_message} does not have a "
"return type specified in its header.")
class invalid_return_type(AssertionFeedback):
""" Invalid Return Type """
title = "Invalid Return Type"
message_template = ("The function named {name_message} has an invalid "
"return type in its header.")
class wrong_return_type(AssertionFeedback):
""" Wrong Return Type """
title = "Wrong Return Type"
message_template = ("The function named {name_message} was expected to "
"return {expected_message}, but instead its header "
"specifies that it returns {actual_message}. ")
| [
6738,
26667,
13,
30493,
507,
13,
12363,
10146,
1330,
2195,
861,
295,
18332,
1891,
198,
6738,
26667,
13,
7295,
13,
13116,
1330,
8779,
1268,
62,
2200,
15490,
628,
198,
4871,
4814,
62,
8818,
7,
8021,
861,
295,
18332,
1891,
2599,
198,
220... | 2.827519 | 1,032 |
# GENERATED BY KOMAND SDK - DO NOT EDIT
import insightconnect_plugin_runtime
import json
| [
2,
24700,
1137,
11617,
11050,
509,
2662,
6981,
26144,
532,
8410,
5626,
48483,
198,
11748,
11281,
8443,
62,
33803,
62,
43282,
198,
11748,
33918,
628,
198,
220,
220,
220,
220,
198,
220,
220,
220,
220,
628
] | 2.833333 | 36 |
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
import json
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
from django.urls import reverse
from django.contrib.auth import get_user_model
from django.test.utils import override_settings
from geonode import geoserver
from geonode.base.models import Link
from geonode.layers.models import Layer
from geonode.decorators import on_ogc_backend
from geonode.tests.base import GeoNodeBaseTestSupport
from geonode.base.populate_test_data import create_models
TEST_DOMAIN = '.github.com'
TEST_URL = f'https://help{TEST_DOMAIN}/'
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
29113,
29113,
7804,
2,
198,
2,
198,
2,
15069,
357,
34,
8,
1584,
7294,
10082,
78,
198,
2,
198,
2,
770,
1430,
318,
1479,
3788,
25,
345,
460,
17678,
4163,
340,
290,
1... | 3.674208 | 442 |
from .api import MediaFileHistory
| [
198,
6738,
764,
15042,
1330,
6343,
8979,
18122,
628
] | 4 | 9 |
#This was not written by sonicrules1234, look in namecoindnsserver.py at the top for the actual author
import struct
| [
2,
1212,
373,
407,
3194,
416,
36220,
38785,
1065,
2682,
11,
804,
287,
1438,
1073,
521,
77,
824,
18497,
13,
9078,
379,
262,
1353,
329,
262,
4036,
1772,
198,
11748,
2878,
628
] | 3.6875 | 32 |
from setuptools import setup
setup(
name='pylrc',
packages=['pylrc'],
version='0.1.0',
description='A library for parsing .lrc files',
long_description=open('README.rst').read(),
author='doakey3, SimonIT',
author_email='pylrc.dmodo@spamgourmet.com',
url='https://github.com/doakey3/pylrc',
download_url='https://github.com/doakey3/pylrc/releases/download/0.1.0/pylrc.tar.gz',
license='MIT',
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
79,
2645,
6015,
3256,
198,
220,
220,
220,
10392,
28,
17816,
79,
2645,
6015,
6,
4357,
198,
220,
220,
220,
2196,
11639,
15,
13,
16,
13,
15,
32... | 2.308511 | 188 |
# -*- coding: utf-8 -*-
from modules_lib.bleextop.models import roledao, permissiondao
import datetime
import re
#/**
#* Return the application permissions for the given user
#* @params data An array containing the user model and the application_id
#* @return Array containing the permissions and success property
#**/
#/**
#* Return the permissions for the given application
#* @params application_k The id application
#* @return Array containing the application permissions and a success property
#**/
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
13103,
62,
8019,
13,
903,
2302,
404,
13,
27530,
1330,
686,
992,
5488,
11,
7170,
67,
5488,
198,
11748,
4818,
8079,
198,
11748,
302,
628,
198,
220,
220,
220,
1303,... | 3.41875 | 160 |
import logging
from typing import BinaryIO, Optional
import requests
from injector import Module, ProviderOf, inject, singleton
from werkzeug import Request, Response
from werkzeug.routing import Rule
from rep0st.db.post import PostRepository, PostRepositoryModule
from rep0st.framework.data.transaction import transactional
from rep0st.framework.web import endpoint, request_data
from rep0st.service.media_service import ImageDecodeException, NoMediaFoundException
from rep0st.service.post_search_service import PostSearchService, PostSearchServiceModule
from rep0st.web import MediaHelper
from rep0st.web.templates import IndexTemplate
log = logging.getLogger(__name__)
@singleton
| [
11748,
18931,
198,
6738,
19720,
1330,
45755,
9399,
11,
32233,
198,
198,
11748,
7007,
198,
6738,
8677,
273,
1330,
19937,
11,
32549,
5189,
11,
8677,
11,
2060,
1122,
198,
6738,
266,
9587,
2736,
1018,
1330,
19390,
11,
18261,
198,
6738,
266,... | 3.724324 | 185 |
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']
).get_hosts('all')
| [
11748,
28686,
198,
198,
11748,
1332,
10745,
430,
13,
26791,
13,
504,
856,
62,
16737,
198,
198,
9288,
10745,
430,
62,
4774,
82,
796,
1332,
10745,
430,
13,
26791,
13,
504,
856,
62,
16737,
13,
2025,
82,
856,
49493,
7,
198,
220,
220,
... | 2.452055 | 73 |
##########################################################################
# Hopla - Copyright (C) AGrigis, 2015 - 2016
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
This module proposes a local worker and a distant TORQUE worker. The two
proposed workers are able to follow a '__hopla__' list of parameter
names to keep trace on. All specified parameters values are stored in the
execution status.
""" # pragma: no cover
# System import
from __future__ import print_function # pragma: no cover
import os # pragma: no cover
import copy # pragma: no cover
import subprocess # pragma: no cover
import traceback # pragma: no cover
from socket import getfqdn # pragma: no cover
import sys # pragma: no cover
import glob # pragma: no cover
import time # pragma: no cover
import json # pragma: no cover
import random # pragma: no cover
# Hopla import
from .signals import FLAG_ALL_DONE # pragma: no cover
from .signals import FLAG_WORKER_FINISHED_PROCESSING # pragma: no cover
def worker(tasks, returncodes, python_cmd="python", delay_upto=0,
use_subprocess=False):
""" The worker function for a script.
If the script contains a '__hopla__' list of parameter names to keep
trace on, all the specified parameters values are stored in the return
code.
Parameters
----------
tasks, returncodes: multiprocessing.Queue
the input (commands) and output (results) queues.
python_cmd: str (optional, default 'python')
the path to the python binary. Only usefull in the subprocess mode.
delay_upto: int (optional, default 0)
the process execution will be delayed randomly by [0, <delay_upto>[
seconds.
use_subprocess: bool, default False
use a subprocess (for instance in case of memory leak). In this
particular case the __hopla__ setting is deactivated.
"""
while True:
signal = tasks.get()
if signal == FLAG_ALL_DONE:
returncodes.put(FLAG_WORKER_FINISHED_PROCESSING)
break
job_name, command = signal
returncode = {}
returncode[job_name] = {}
returncode[job_name]["info"] = {}
returncode[job_name]["debug"] = {}
returncode[job_name]["info"]["cmd"] = command
returncode[job_name]["debug"]["hostname"] = getfqdn()
# COMPATIBILITY: dict in python 2 becomes structure in pyton 3
python_version = sys.version_info
if python_version[0] < 3:
environ = copy.deepcopy(os.environ.__dict__)
else:
environ = copy.deepcopy(os.environ._data)
returncode[job_name]["debug"]["environ"] = environ
# Execution with a random delay expressed in seconds
try:
time.sleep(random.random() * abs(delay_upto))
sys.argv = command
job_status = {}
if use_subprocess:
if python_cmd is not None:
subprocess.check_call([python_cmd] + command)
else:
command = command[0].split(" ") + command[1:]
subprocess.check_call(command)
else:
with open(command[0]) as ofile:
exec(ofile.read(), job_status)
returncode[job_name]["info"]["exitcode"] = "0"
# Error
except:
returncode[job_name]["info"]["exitcode"] = (
"1 - '{0}'".format(traceback.format_exc().rstrip("\n")))
# Follow '__hopla__' script parameters
finally:
if "__hopla__" in job_status:
for parameter_name in job_status["__hopla__"]:
if parameter_name in job_status:
returncode[job_name]["info"][
parameter_name] = job_status[parameter_name]
returncodes.put(returncode)
PBS_TEMPLATE = """
#!/bin/bash
#PBS -l mem={memory}gb,nodes=1:ppn={threads},walltime={hwalltime}:00:00
#PBS -N {name}
#PBS -e {errfile}
#PBS -o {logfile}
echo $PBS_JOBID
echo $HOSTNAME
{command}
""" # pragma: no cover
PY_TEMPLATE = """
from __future__ import print_function
import sys
import json
# Execute the command line in the 'job_status' environment
try:
command = {cmd}
sys.argv = command
job_status = dict()
with open(command[0]) as ofile:
exec(ofile.read(), job_status)
# Error
except:
raise
# Follow '__hopla__' script parameters: print the parameters to keep trace on
# in '<hopla>...</hopla>' div in order to communicate with the scheduler and
# in order to generate a complete log
finally:
parameters = dict()
if "__hopla__" in job_status:
for parameter_name in job_status["__hopla__"]:
if parameter_name in job_status:
parameters[parameter_name] = job_status[parameter_name]
print("<hopla>")
print(json.dumps(parameters))
print("</hopla>")
""" # pragma: no cover
def qsub_worker(tasks, returncodes, logdir, queue,
memory=1, walltime=24, nb_threads=1, python_cmd="python",
delay_upto=0, sleep=40):
""" A cluster worker function for a script.
Use the TORQUE resource manager provides control over batch jobs and
distributed computing resources. It is an advanced open-source product
based on the original PBS project.
Use a double script strategy in order to manage the '__hopla__' list of
parameter names to keep trace on: a '.pbs' script calling another '.py'
script that print the '__hopla__' parameters. All the specified parameters
values are stored in the return code.
Parameters
----------
tasks, returncodes: multiprocessing.Queue
the input (commands) and output (results) queues.
logdir: str
a path where the qsub error and output files will be stored.
queue: str
the name of the queue where the jobs will be submited.
memory: float (optional, default 1)
the memory allocated to each qsub (in GB).
walltime: int (optional, default 24)
the walltime used for each job submitted on the cluster (in hours).
nb_threads: int (optional, default 1)
the number of cores allocated for each node.
python_cmd: str (optional, default 'python')
the path to the python binary. If None consider the command directly in
the PBS batch.
delay_upto: int (optional, default 0)
the process execution will be delayed randomly by [0, <delay_upto>[
seconds.
sleep: float (optional, default 40)
time rate to check the termination of the submited jobs.
"""
while True:
signal = tasks.get()
if signal == FLAG_ALL_DONE:
returncodes.put(FLAG_WORKER_FINISHED_PROCESSING)
break
job_name, command = signal
returncode = {}
returncode[job_name] = {}
returncode[job_name]["info"] = {}
returncode[job_name]["debug"] = {}
returncode[job_name]["info"]["cmd"] = command
returncode[job_name]["debug"]["hostname"] = getfqdn()
# COMPATIBILITY: dict in python 2 becomes structure in python 3
python_version = sys.version_info
if python_version[0] < 3:
environ = copy.deepcopy(os.environ.__dict__)
else:
environ = copy.deepcopy(os.environ._data)
returncode[job_name]["debug"]["environ"] = environ
# Torque-PBS execution
fname_pbs = os.path.join(logdir, job_name + ".pbs")
fname_py = os.path.join(logdir, job_name + ".py")
errfile = os.path.join(logdir, "error." + job_name)
logfile = os.path.join(logdir, "output." + job_name)
try:
# Random delay expressed in seconds
time.sleep(random.random() * abs(delay_upto))
# Edit the job to be submitted
if python_cmd is not None:
with open(fname_py, "w") as open_file:
open_file.write(PY_TEMPLATE.format(cmd=command))
with open(fname_pbs, "w") as open_file:
pbs_cmd = " ".join([python_cmd, fname_py])
open_file.write(PBS_TEMPLATE.format(
memory=memory,
hwalltime=walltime,
threads=nb_threads,
name=job_name,
errfile=errfile,
logfile=logfile,
command=pbs_cmd))
else:
with open(fname_pbs, "w") as open_file:
open_file.write(PBS_TEMPLATE.format(
memory=memory,
hwalltime=walltime,
threads=nb_threads,
name=job_name,
errfile=errfile,
logfile=logfile,
command=" ".join(command)))
# Submit the job
# subprocess.check_call(["qsub", "-q", queue, fname_pbs])
process = subprocess.Popen(["qsub", "-q", queue, fname_pbs],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
stdout = stdout.decode("utf8")
job_id = stdout.rstrip("\n")
print(job_id)
exitcode = process.returncode
if exitcode != 0:
raise Exception(stderr)
# Lock everything until the submitted command has not terminated
while True:
terminated = os.path.isfile(errfile) or os.path.isfile(logfile)
with_log = terminated
process = subprocess.Popen("qstat | grep {0}".format(job_id),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
stdout, stderr = process.communicate()
exitcode = process.returncode
# terminated = terminated or (exitcode == 1)
if terminated:
break
time.sleep(sleep)
# Check that no error was produced during the submission
if with_log:
with open(errfile) as open_file:
stderr = open_file.readlines()
if len(stderr) > 0:
raise Exception("\n".join(stderr))
# Update the return code
if with_log:
returncode[job_name]["info"]["exitcode"] = "0"
else:
returncode[job_name]["info"]["exitcode"] = "-1"
# Error
except:
if os.path.isfile(errfile):
with open(errfile) as openfile:
error_message = openfile.readlines()
else:
error_message = traceback.format_exc()
returncode[job_name]["info"]["exitcode"] = (
"1 - '{0}'".format(error_message))
# Follow '__hopla__' script parameters in pbs '<hopla>...</hopla>'
# output
finally:
if os.path.isfile(logfile):
with open(logfile) as open_file:
stdout = open_file.read()
hopla_start = stdout.rfind("<hopla>")
hopla_end = stdout.rfind("</hopla>")
parameters_repr = stdout[
hopla_start + len("<hopla>"): hopla_end]
try:
parameters = json.loads(parameters_repr)
except:
parameters = {}
for name, value in parameters.items():
returncode[job_name]["info"][name] = value
returncodes.put(returncode)
| [
29113,
29113,
7804,
2235,
198,
2,
9996,
5031,
532,
15069,
357,
34,
8,
13077,
4359,
271,
11,
1853,
532,
1584,
198,
2,
4307,
6169,
739,
262,
2846,
286,
262,
20101,
34,
8267,
12,
33,
5964,
11,
355,
3199,
416,
198,
2,
262,
327,
16412,... | 2.147295 | 5,601 |
from simx.base.testutil import import_assert_functions
import_assert_functions(globals())
from simx.base import TossimBase
from simx.base import Node
from TOSSIM import Tossim
_tossim = Tossim([])
tossim = None
a = None
b = None
c = None
# PST- due to overflow bugs in Tossim (and that there is no way to
# clear the noise traces) each test that uses noise trace readings
# works on its own discrete node.
# NODE 0
# NODE 1
# NODE 2
# NODE 3
# linking tests
| [
6738,
985,
87,
13,
8692,
13,
9288,
22602,
1330,
1330,
62,
30493,
62,
12543,
2733,
198,
11748,
62,
30493,
62,
12543,
2733,
7,
4743,
672,
874,
28955,
198,
198,
6738,
985,
87,
13,
8692,
1330,
309,
793,
320,
14881,
198,
6738,
985,
87,
... | 2.914634 | 164 |
from tests.utils.runtest import makesuite, run
from tests.utils.testcase import TestCase
from tests.utils.gc import gcwait
from System import NullReferenceException, WeakReference
from System.Runtime.InteropServices import Marshal
from Ironclad import BadMappingException, CPyMarshal, InterestingPtrMap, PtrFunc
from Ironclad.Structs import PyObject
# NOTE: certain tests wrap some of their execution in a do() function;
# either to ensure that things-which-should-be-GCed really do get GCed,
# rather than hanging around until out of scope, or to ensure that
# inappropriate gc gets a chance to happen and cause a failure.
suite = makesuite(InterestingPtrMapTest)
if __name__ == '__main__':
run(suite) | [
201,
198,
6738,
5254,
13,
26791,
13,
81,
2797,
395,
1330,
1838,
84,
578,
11,
1057,
201,
198,
6738,
5254,
13,
26791,
13,
9288,
7442,
1330,
6208,
20448,
201,
198,
201,
198,
6738,
5254,
13,
26791,
13,
36484,
1330,
308,
66,
17077,
201,
... | 2.681967 | 305 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
4818,
8079,
198,
6738,
5366,
13,
9945,
1330,
20613,
198,
6738,
5366,
13,
85,
17,
1330,
10011,
2611,
44,
4254,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198
] | 2.954545 | 44 |
import pytest
from django.core.files.uploadedfile import SimpleUploadedFile
from portfolio.portfolioapp.tests.factories import PortfolioProjectFactory
from portfolio.templatematching.tests.factories import UploadedImageFactory
from portfolio.tftchampions.tests.factories import ChampionFactory
from portfolio.users.models import User
from portfolio.users.tests.factories import UserFactory
@pytest.fixture(autouse=True)
@pytest.fixture
@pytest.fixture(name="project")
@pytest.fixture(name="photo")
@pytest.fixture(name="champion")
@pytest.fixture(name="uploaded_image")
| [
11748,
12972,
9288,
198,
6738,
42625,
14208,
13,
7295,
13,
16624,
13,
25850,
276,
7753,
1330,
17427,
41592,
276,
8979,
198,
198,
6738,
15320,
13,
634,
13652,
1324,
13,
41989,
13,
22584,
1749,
1330,
4347,
13652,
16775,
22810,
198,
6738,
... | 3.369942 | 173 |
from django.urls import path
from app1.dateview.addDepartments import *
from app1.dateview.showDepartments import *
from app1.dateview.updateDepartment import *
from app1.dateview.delDepartment import *
urlpatterns =[
path("addDepartments",addDepartments,name="addDepartments"),
path("showDepartments",showDepartments,name="showDepartments"),
path("updateDepartment",updateDepartment,name="updateDepartment"),
path("delDepartment",delDepartment,name="delDepartment"),
] | [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
598,
16,
13,
4475,
1177,
13,
2860,
12156,
32514,
1330,
1635,
198,
6738,
598,
16,
13,
4475,
1177,
13,
12860,
12156,
32514,
1330,
1635,
198,
6738,
598,
16,
13,
4475,
1177,
13,
1... | 3.446809 | 141 |
from openpnm.algorithms import TransientReactiveTransport, IonicConduction
from openpnm.utils import logging, Docorator, GenericSettings
logger = logging.getLogger(__name__)
docstr = Docorator()
@docstr.get_sections(base='TransientIonicConductionSettings',
sections=['Parameters'])
@docstr.dedent
class TransientIonicConductionSettings(GenericSettings):
r"""
Parameters
----------
##
Other Parameters
----------------
**The following parameters pertain to steady-state IonicConduction**
%(IonicConductionSettings.parameters)s
----
**The following parameters pertain to the ReactiveTransport class**
%(ReactiveTransportSettings.other_parameters)s
----
**The following parameters pertain to the GenericTransport class**
%(GenericTransportSettings.other_parameters)s
"""
quantity = 'pore.potential'
conductance = 'throat.ionic_conductance'
charge_conservation = 'electroneutrality'
cache_A = False
cache_b = False
class TransientIonicConduction(TransientReactiveTransport,
IonicConduction):
r"""
A subclass of GenericTransport to perform steady and transient simulations
of pure diffusion and advection-diffusion problems.
"""
| [
6738,
1280,
79,
21533,
13,
282,
7727,
907,
1330,
3602,
1153,
3041,
5275,
8291,
634,
11,
314,
9229,
25559,
8110,
198,
6738,
1280,
79,
21533,
13,
26791,
1330,
18931,
11,
14432,
273,
1352,
11,
42044,
26232,
198,
6404,
1362,
796,
18931,
1... | 2.947368 | 437 |
import tenseal as ts
| [
11748,
20170,
282,
355,
40379,
628
] | 3.666667 | 6 |
#!/usr/bin/env python
# -- coding: utf-8 --
"""
Copyright (c) 2021. All rights reserved.
Created by C. L. Wang on 6.12.21
"""
import os
import math
import random
from PIL import Image
from myutils.cv_utils import *
from root_dir import DATA_DIR
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
area = img.size[0] * img.size[1]
for attempt in range(10):
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if w <= img.size[0] and h <= img.size[1]:
i = random.randint(0, img.size[1] - h)
j = random.randint(0, img.size[0] - w)
return i, j, h, w
# Fallback to central crop
in_ratio = img.size[0] / img.size[1]
if in_ratio < min(ratio):
w = img.size[0]
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = img.size[1]
w = int(round(h * max(ratio)))
else: # whole image
w = img.size[0]
h = img.size[1]
i = (img.size[1] - h) // 2
j = (img.size[0] - w) // 2
return i, j, h, w
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
1377,
19617,
25,
3384,
69,
12,
23,
1377,
198,
37811,
198,
15269,
357,
66,
8,
33448,
13,
1439,
2489,
10395,
13,
198,
41972,
416,
327,
13,
406,
13,
15233,
319,
718,
13,
1065,
13,
... | 2.204216 | 759 |
from bentso import CachingDataClient
from bentso.db import get_database
import numpy as np
import os
import pandas as pd
import pytest
FIXTURES = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"fixtures",
)
| [
6738,
17157,
568,
1330,
327,
8103,
6601,
11792,
198,
6738,
17157,
568,
13,
9945,
1330,
651,
62,
48806,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
12972,
9288,
198,
198,
47084... | 2.704545 | 88 |
# coding: utf-8
from __future__ import unicode_literals
from io import StringIO
from mock import Mock, patch
# pylint:disable=import-error
import pytest
# pylint:enable=import-error
from flaky import flaky
from flaky import _flaky_plugin
from flaky.flaky_pytest_plugin import (
CallInfo,
FlakyPlugin,
FlakyXdist,
PLUGIN,
)
from flaky.names import FlakyNames
from flaky.utils import unicode_type
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture(params=['instance', 'module', 'parent'])
@pytest.fixture
@pytest.fixture
@pytest.fixture(params=(
{},
{'flaky_report': ''},
{'flaky_report': 'ŝȁḿҏľȅ ƭȅхƭ'},
))
@pytest.fixture(params=(None, object()))
@pytest.mark.parametrize('assign_slaveoutput', (True, False))
_REPORT_TEXT1 = 'Flaky report text'
_REPORT_TEXT2 = 'Ḿőŕȅ ƒľȁƙŷ ŕȅҏőŕƭ ƭȅхƭ'
@pytest.mark.parametrize('initial_report,stream_report,expected_report', (
('', '', ''),
('', _REPORT_TEXT1, _REPORT_TEXT1),
(_REPORT_TEXT1, '', _REPORT_TEXT1),
(_REPORT_TEXT1, _REPORT_TEXT2, _REPORT_TEXT1 + _REPORT_TEXT2),
(_REPORT_TEXT2, _REPORT_TEXT1, _REPORT_TEXT2 + _REPORT_TEXT1),
))
def test_flaky_plugin_raises_errors_in_fixture_setup(
flaky_test,
flaky_plugin,
string_io,
mock_io,
):
"""
Test for Issue #57 - fixtures which raise an error should show up as
test errors.
This test ensures that exceptions occurring when running a test
fixture are copied into the call info's excinfo field.
"""
flaky()(flaky_test)
flaky_test.ihook = Mock()
flaky_test.ihook.pytest_runtest_setup = error_raising_setup_function
flaky_plugin._call_infos[flaky_test] = {} # pylint:disable=protected-access
call_info = flaky_plugin.call_runtest_hook(flaky_test, 'setup')
assert flaky_test.ran_setup
assert string_io.getvalue() == mock_io.getvalue()
assert call_info.excinfo.type is ZeroDivisionError
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
6738,
33245,
1330,
10903,
9399,
198,
6738,
15290,
1330,
44123,
11,
8529,
198,
2,
279,
2645,
600,
25,
40223,
28,
11748,
12,
18... | 2.354415 | 838 |
# Copyright 2019 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""gRPC's Asynchronous Python API."""
import abc
import types
import six
import grpc
from grpc._cython import cygrpc
from grpc._cython.cygrpc import init_grpc_aio
class Channel(six.with_metaclass(abc.ABCMeta)):
"""Asynchronous Channel implementation."""
@abc.abstractmethod
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
"""Creates a UnaryUnaryMultiCallable for a unary-unary method.
Args:
method: The name of the RPC method.
request_serializer: Optional behaviour for serializing the request
message. Request goes unserialized in case None is passed.
response_deserializer: Optional behaviour for deserializing the
response message. Response goes undeserialized in case None
is passed.
Returns:
A UnaryUnaryMultiCallable value for the named unary-unary method.
"""
raise NotImplementedError()
@abc.abstractmethod
async def close(self):
"""Closes this Channel and releases all resources held by it.
Closing the Channel will proactively terminate all RPCs active with the
Channel and it is not valid to invoke new RPCs with the Channel.
This method is idempotent.
"""
raise NotImplementedError()
@abc.abstractmethod
async def __aenter__(self):
"""Starts an asynchronous context manager.
Returns:
Channel the channel that was instantiated.
"""
raise NotImplementedError()
@abc.abstractmethod
async def __aexit__(self, exc_type, exc_val, exc_tb):
"""Finishes the asynchronous context manager by closing gracefully the channel."""
raise NotImplementedError()
class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
"""Affords invoking a unary-unary RPC from client-side in an asynchronous way."""
@abc.abstractmethod
async def __call__(self,
request,
*,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: An optional duration of time in seconds to allow
for the RPC.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC. Only valid for
secure Channel.
wait_for_ready: This is an EXPERIMENTAL argument. An optional
flag to enable wait for ready mechanism
compression: An element of grpc.compression, e.g.
grpc.compression.Gzip. This is an EXPERIMENTAL option.
Returns:
The response value for the RPC.
Raises:
RpcError: Indicating that the RPC terminated with non-OK status. The
raised RpcError will also be a Call for the RPC affording the RPC's
metadata, status code, and details.
"""
raise NotImplementedError()
def insecure_channel(target, options=None, compression=None):
"""Creates an insecure asynchronous Channel to a server.
Args:
target: The server address
options: An optional list of key-value pairs (channel args
in gRPC Core runtime) to configure the channel.
compression: An optional value indicating the compression method to be
used over the lifetime of the channel. This is an EXPERIMENTAL option.
Returns:
A Channel.
"""
from grpc.experimental.aio import _channel # pylint: disable=cyclic-import
return _channel.Channel(target, ()
if options is None else options, None, compression)
class _AioRpcError:
"""Private implementation of AioRpcError"""
class AioRpcError:
"""An RpcError to be used by the asynchronous API.
Parent classes: (cygrpc._AioRpcError, RpcError)
"""
# Dynamically registered as subclass of _AioRpcError and RpcError, because the former one is
# only available after the cython code has been compiled.
_class_built = _AioRpcError
| [
2,
15069,
13130,
308,
49,
5662,
7035,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
... | 2.612223 | 1,898 |
import json
import ntpath
from modeci_mdf.functions.standard import mdf_functions, create_python_expression
from typing import List, Tuple, Dict, Optional, Set, Any, Union
from modeci_mdf.utils import load_mdf, print_summary
from modeci_mdf.mdf import *
from modeci_mdf.full_translator import *
from modeci_mdf.execution_engine import EvaluableGraph
import argparse
import sys
if __name__ == "__main__":
main()
| [
11748,
33918,
198,
11748,
299,
83,
6978,
198,
198,
6738,
953,
721,
72,
62,
76,
7568,
13,
12543,
2733,
13,
20307,
1330,
285,
7568,
62,
12543,
2733,
11,
2251,
62,
29412,
62,
38011,
198,
6738,
19720,
1330,
7343,
11,
309,
29291,
11,
360... | 2.916667 | 144 |
__author__ = 'dimd'
from NetCatKS.Validators.api.interfaces.message import IMessage
from NetCatKS.Validators.api.implementers.validators.default import BaseValidator
from zope.component import adapts
from zope.component import getGlobalSiteManager
from lxml import etree
gsm = getGlobalSiteManager()
gsm.registerSubscriptionAdapter(XMLValidator) | [
834,
9800,
834,
796,
705,
27740,
67,
6,
198,
198,
6738,
3433,
21979,
27015,
13,
47139,
2024,
13,
15042,
13,
3849,
32186,
13,
20500,
1330,
8959,
7589,
198,
6738,
3433,
21979,
27015,
13,
47139,
2024,
13,
15042,
13,
320,
26908,
364,
13,
... | 3.398058 | 103 |
# Generated by Django 3.1.7 on 2021-03-24 09:36
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
22,
319,
33448,
12,
3070,
12,
1731,
7769,
25,
2623,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
import asyncio
import time
import kopf
# Sync daemons in threads are non-interruptable, they must check for the `stopped` flag.
# This daemon exits after 3 attempts and then 30 seconds of running (unless stopped).
@kopf.daemon('zalando.org', 'v1', 'kopfexamples', backoff=3)
# Async daemons do not need the `stopped` signal, they can rely on `asyncio.CancelledError` raised.
# This daemon runs forever (until stopped, i.e. cancelled). Yet fails to start for 3 first times.
@kopf.daemon('zalando.org', 'v1', 'kopfexamples', backoff=3,
cancellation_backoff=1.0, cancellation_timeout=0.5)
E2E_CREATION_STOP_WORDS = ["=> Ping from"]
E2E_DELETION_STOP_WORDS = ["'background_async' is cancelled", "'background_sync' is cancelled", "'background_async' has exited"]
| [
11748,
30351,
952,
198,
11748,
640,
198,
198,
11748,
479,
404,
69,
628,
198,
2,
35908,
12379,
368,
684,
287,
14390,
389,
1729,
12,
3849,
3622,
540,
11,
484,
1276,
2198,
329,
262,
4600,
301,
38333,
63,
6056,
13,
198,
2,
770,
33386,
... | 2.870849 | 271 |
from django.core import checks
from . import settings as djpaypal_settings
VALID_MODES = ("live", "sandbox")
@checks.register("djpaypal")
def check_paypal_api_key(app_configs=None, **kwargs):
"""Check that the Paypal API keys are configured correctly"""
messages = []
mode = getattr(djpaypal_settings, "PAYPAL_MODE", None)
if mode not in VALID_MODES:
msg = "Invalid PAYPAL_MODE specified: {}.".format(repr(mode))
hint = "PAYPAL_MODE must be one of {}".format(", ".join(repr(k) for k in VALID_MODES))
messages.append(checks.Critical(msg, hint=hint, id="djpaypal.C001"))
for setting in "PAYPAL_CLIENT_ID", "PAYPAL_CLIENT_SECRET":
if not getattr(djpaypal_settings, setting, None):
msg = "Invalid value specified for {}".format(setting)
hint = "Add PAYPAL_CLIENT_ID and PAYPAL_CLIENT_SECRET to your settings."
messages.append(checks.Critical(msg, hint=hint, id="djpaypal.C002"))
return messages
| [
6738,
42625,
14208,
13,
7295,
1330,
8794,
198,
198,
6738,
764,
1330,
6460,
355,
42625,
15577,
18596,
62,
33692,
628,
198,
23428,
2389,
62,
33365,
1546,
796,
5855,
12583,
1600,
366,
38142,
3524,
4943,
628,
198,
31,
42116,
13,
30238,
7203... | 2.554124 | 388 |
# Script to clean response into one-response-per-line format | [
2,
12327,
284,
3424,
2882,
656,
530,
12,
26209,
12,
525,
12,
1370,
5794
] | 4.285714 | 14 |
__all__ = [
'character',
'common',
'corpus',
'document',
]
from .common import *
from .character import Character
from .corpus import Corpus
from .document import Document
| [
834,
439,
834,
796,
685,
198,
220,
220,
220,
705,
22769,
3256,
198,
220,
220,
220,
705,
11321,
3256,
198,
220,
220,
220,
705,
10215,
79,
385,
3256,
198,
220,
220,
220,
705,
22897,
3256,
198,
60,
198,
198,
6738,
764,
11321,
1330,
1... | 2.907692 | 65 |
import anybadge
| [
11748,
597,
14774,
469,
628
] | 3.4 | 5 |
import os
from vent.api.plugins import Plugin
from vent.api.templates import Template
def test_add():
""" Test the add function """
instance = Plugin()
status = instance.add('https://github.com/cyberreboot/vent', build=False)
assert isinstance(status, tuple)
assert status[0] == True
status = instance.add('https://github.com/cyberreboot/vent.git', build=False)
assert isinstance(status, tuple)
assert status[0] == True
bad_instance = Plugin()
status = bad_instance.add('https://github.com/cyberreboot/vent', build=False)
assert isinstance(status, tuple)
assert status[0] == True
instance = Plugin()
status = instance.add('https://github.com/cyberreboot/vent', build=False, user='foo', pw='bar')
assert isinstance(status, tuple)
assert status[0] == True
instance = Plugin()
status = instance.add('https://github.com/cyberreboot/vent', build=False, overrides=[('.', 'HEAD')])
assert isinstance(status, tuple)
assert status[0] == True
instance = Plugin()
status = instance.add('https://github.com/cyberreboot/vent', build=False, tools=[('vent/', 'HEAD')], overrides=[('vent', 'HEAD')])
assert isinstance(status, tuple)
assert status[0] == True
instance = Plugin()
status = instance.add('https://github.com/cyberreboot/vent', build=False, overrides=[('.', 'HEAD')], user='foo', pw='foo')
assert isinstance(status, tuple)
assert status[0] == True
def test_add_image():
""" Test the add_image function """
instance = Plugin()
status = instance.add_image('quay/redis', 'redis', registry='quay.io')
assert isinstance(status, tuple)
assert status[0] == True
status = instance.add_image('alpine', 'alpine', tag='latest', groups='alpine')
assert isinstance(status, tuple)
assert status[0] == True
def test_builder():
""" Test the builder function """
instance = Plugin()
template = Template(instance.manifest)
template = instance.builder(template, os.getcwd()+'/plugins/cyberreboot/vent', 'image_name', 'section')
template = instance.builder(template, 'bad_path', 'image_name', 'section', build=True, branch='master', version='HEAD')
def test_build_tools():
""" Test the _build_tools function """
instance = Plugin()
status = instance._build_tools(False)
assert status[0] == False
def test_list_tools():
""" Test the tools function """
instance = Plugin()
tools = instance.list_tools()
def test_remove():
""" Test the remove function """
instance = Plugin()
status = instance.remove(groups='core', built='no')
assert status[0] == True
def test_versions():
""" Test the versions function """
instance = Plugin()
status = instance.add('https://github.com/cyberreboot/vent', build=False, branch='master')
assert status[0] == True
versions = instance.versions('elasticsearch', branch='master')
assert isinstance(versions, list)
assert isinstance(versions[0], tuple)
assert isinstance(versions[0][1], list)
assert versions[0][0] == 'cyberreboot:vent:/vent/core/elasticsearch:master:HEAD'
assert 'HEAD' in versions[0][1]
def test_current_version():
""" Test the current_version function """
instance = Plugin()
versions = instance.current_version('elasticsearch', branch='master')
assert versions == [('cyberreboot:vent:/vent/core/elasticsearch:master:HEAD', 'HEAD')]
def test_state():
""" Test the state function """
instance = Plugin()
states = instance.state('elasticsearch', branch='master')
assert states == [('cyberreboot:vent:/vent/core/elasticsearch:master:HEAD', 'enabled')]
def test_enable():
""" Test the enable function """
instance = Plugin()
status = instance.enable('elasticsearch', branch='master')
assert status[0] == True
def test_disable():
""" Test the disable function """
instance = Plugin()
status = instance.disable('elasticsearch', branch='master')
assert status[0] == True
def test_update():
""" Test the update function """
instance = Plugin()
status = instance.update()
assert isinstance(status, tuple)
assert status[0] == False
| [
11748,
28686,
198,
198,
6738,
7435,
13,
15042,
13,
37390,
1330,
42636,
198,
6738,
7435,
13,
15042,
13,
11498,
17041,
1330,
37350,
198,
198,
4299,
1332,
62,
2860,
33529,
198,
220,
220,
220,
37227,
6208,
262,
751,
2163,
37227,
198,
220,
... | 3.010065 | 1,391 |
from dashboard.models import Table, Dataset
import json
import csv
import zipfile
| [
6738,
30415,
13,
27530,
1330,
8655,
11,
16092,
292,
316,
198,
11748,
33918,
198,
11748,
269,
21370,
198,
11748,
19974,
7753,
198
] | 3.727273 | 22 |
#! /usr/bin/python
import json
| [
2,
0,
1220,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
33918,
628
] | 2.538462 | 13 |
#!/usr/bin/python3
from aip import AipNlp
import numpy as np
APP_ID = '10791703'
API_KEY = 'YNMY6eSAz6QlDCeM7KB9GKHC'
SECRET_KEY = 'tSqTt0OikFiYXVIt5kAXZyL0FdciRp5t'
client = AipNlp(APP_ID, API_KEY, SECRET_KEY)
def get_dependency(text):
"""
:param text: prepare to analysis the dependecy of tokens
:return word_list: a list which id is the subscript and the content is a dict with word's information
options represent:
mode 0: web model, for written text, default
mode 1: query model, for colloquial language
"""
# later, we need cut long sentence into multiply short sentences.
# dependency doesn't consider about ':'.
text = text.replace(':', '为')
word_list = []
for i, word in enumerate(client.depParser(text, options={"mode": 0})["items"], 1): # id from 1
print(i, word)
word_list.append(word) # id will increase 1
return word_list
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
6738,
257,
541,
1330,
317,
541,
45,
34431,
198,
11748,
299,
32152,
355,
45941,
198,
198,
24805,
62,
2389,
796,
705,
940,
3720,
1558,
3070,
6,
198,
17614,
62,
20373,
796,
705,
40760,
267... | 2.451444 | 381 |
from collections import ChainMap
from typing import Dict, List, Optional
import pandas as pd
from graphrole.features.prune import FeaturePruner
from graphrole.graph import interface
from graphrole.types import DataFrameDict, DataFrameLike
class RecursiveFeatureExtractor:
""" Compute recursive features for nodes of a graph """
supported_graph_libs = interface.get_supported_graph_libraries()
default_aggs = [
pd.DataFrame.sum,
pd.DataFrame.mean,
]
def __init__(
self,
G: interface.GraphLibInstance,
max_generations: int = 10,
aggs: Optional[List] = None
) -> None:
"""
:param G: graph object from supported graph package
:param max_generations: maximum levels of recursion
:param aggs: optional list of aggregations for each recursive generation
"""
graph = interface.get_interface(G)
if graph is None:
raise TypeError(f'Input graph G must be from one of the following '
f'supported libraries: {self.supported_graph_libs}')
if graph.get_num_edges() == 0:
raise ValueError('Input graph G must contain at least one edge')
self.graph = graph
self.max_generations = max_generations
self.aggs = aggs if aggs else self.default_aggs
# current generation
self.generation_count = 0
# distance threshold for grouping (binned) features; incremented
# by one at each generation, so although it always matches
# self.generation_count, we maintain it as a separate instance
# variable for clarity
self._feature_group_thresh = 0
# pd.DataFrame holding current features
self._features = pd.DataFrame()
# dict of generation number to dict dict representation dataframe
# of features retained at each generation to be emitted
self._final_features: Dict[int, DataFrameDict] = {}
def extract_features(self) -> DataFrameLike:
"""
Perform recursive feature extraction to return DataFrame of features
"""
# return already calculated features if stored in state
if self._final_features:
return self._finalize_features()
# initialization: generation 0 features are neighborhood features
features = self.graph.get_neighborhood_features()
self._update(features)
for generation in range(1, self.max_generations):
self.generation_count = generation
self._feature_group_thresh = generation
features = self._get_next_features()
self._update(features)
# stop if an iteration results in no features retained
if not self._final_features[generation]:
break
return self._finalize_features()
def _finalize_features(self) -> DataFrameLike:
"""
Return DataFrame of final features
"""
all_features_dict = dict(ChainMap(*self._final_features.values()))
return pd.DataFrame(all_features_dict)
def _get_next_features(self) -> DataFrameLike:
"""
Return next level of recursive features (aggregations of node
features from previous generation)
"""
# get nodes neighbors and aggregate their previous generation features
prev_features = self._final_features[self.generation_count - 1].keys()
features = {
node: (
self._features
# select previous generation features for neighbors of current node
.reindex(index=self.graph.get_neighbors(node), columns=prev_features)
# aggregate
.agg(self.aggs)
# fill nans that result from dangling nodes with 0
.fillna(0)
# store new aggregations as dict
.pipe(self._aggregated_df_to_dict)
)
for node in self.graph.get_nodes()
}
return pd.DataFrame.from_dict(features, orient='index')
def _update(self, features: DataFrameLike) -> None:
"""
Add current generation features and prune across all features to emit final
features from current generation
:param features: candidate features from current recursive generation
"""
# add features
self._features = (
pd.concat([self._features, features], axis=1, sort=True)
# fill nans resulting from concatenation where features does not
# contain neighborless nodes (out-degree=0) on its axis
.fillna(0)
)
# prune redundant features
pruner = FeaturePruner(self._final_features, self._feature_group_thresh)
features_to_drop = pruner.prune_features(self._features)
self._features = self._features.drop(features_to_drop, axis=1)
# save features that remain after pruning and that
# have not previously been saved as final features
retained = features.columns.difference(features_to_drop)
feature_dict = as_frame(self._features[retained]).to_dict()
self._final_features[self.generation_count] = feature_dict
@staticmethod
def _aggregated_df_to_dict(agg_df: DataFrameLike) -> Dict[str, float]:
"""
Transform DataFrame of aggregated features to dict formatted for
concatenation with self._features DataFrame
:param agg_df: agregated features resulting from df.agg(self.aggs)
"""
try:
agg_dicts = agg_df.to_dict(orient='index')
except TypeError:
# pd.Series objects do not have to_dict method with orient kwarg
# so cast to pd.DataFrame and transpose for correct shape
agg_dicts = agg_df.to_frame().T.to_dict(orient='index')
formatted_agg_dict = {
f'{key}({idx})': val
for idx, row in agg_dicts.items()
for key, val in row.items()
}
return formatted_agg_dict
# Helper functions
def as_frame(df_like: DataFrameLike) -> pd.DataFrame:
"""
Helper to safely cast a pd.Series to pd.DataFrame without throwing
an exception if input is already a pd.DataFrame
:param df_like: pd.Series or pd.DataFrame
"""
try:
return df_like.to_frame()
except AttributeError:
return df_like
| [
6738,
17268,
1330,
21853,
13912,
198,
6738,
19720,
1330,
360,
713,
11,
7343,
11,
32233,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
4823,
18090,
13,
40890,
13,
1050,
1726,
1330,
27018,
47,
5143,
263,
198,
6738,
4823,
18... | 2.505859 | 2,560 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-08-16 03:36
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
19,
319,
2177,
12,
2919,
12,
1433,
7643,
25,
2623,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.736842 | 57 |
from datetime import datetime
from random import random
import random
import webbrowser
import pyttsx3
import datetime
import urllib.request
import re
import speech_recognition as sr
import wikipedia
import os
engine=pyttsx3.init('sapi5')
voices=engine.getProperty('voices')
engine.setProperty('voice',voices[0].id)
"""def alarm():
Hh=int(input("set hour"))
mm=int(input("set minute"))
hour=int(datetime.datetime.now().hour)
if (Hh==hour):
speak("MASTER enthuringa")"""
if __name__=="__main__":
wishme()
whoamI()
while True:
query=mycommand().lower()
if "wikipedia"in query:
speak("Searching in wikipeia")
query=query.replace("wikipedia","")
results=wikipedia.summary(query,sentences=1)
speak("according to wikipedia")
speak(results)
print(results)
break;
elif"tell me a joke"in query:
joke()
break;
elif "tell joke"in query:
joke()
break;
elif "joke"in query:
joke()
break;
elif"hai"in query:
speak("hi master")
break;
elif "open youtube"in query:
speak("opening youtube")
webbrowser.open("youtube.com")
break;
elif "open google"in query:
speak("opening google")
webbrowser.open("google.com")
break;
elif "open geeks for geeks"in query:
speak("opening geeks for geeks ")
webbrowser.open_new_tab("geeksforgeeks.org")
break;
elif "play music"in query:
speak("opening music player")
music="C:\\Users\\home\\Desktop\\songs"
songs=os.listdir(music)
print(songs)
a=random.choice(songs)
print(a)
os.startfile(os.path.join(music,a))
break;
elif "open whatsapp"in query:
speak("opening whatsapp")
webbrowser.open("web.whatsapp.com")
break;
elif "play movie"in query:
speak("playing a movie")
kmovie="C:\\Users\\home\\Desktop\\sanjay"
movie="C:\\Users\\home\\Desktop\\movie\\movie"
k=[kmovie,movie]
c=random.choice(k)
film=os.listdir(c)
print(film)
b=random.choice(film)
print(b)
os.startfile(os.path.join(movie,b))
break;
elif "open chrome"in query:
speak("opening chrome" )
codepath="C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe "
os.startfile(codepath)
break;
elif "time now"in query:
time=datetime.datetime.now().strftime("%H:%M")
speak("THE TIME IS")
speak(time)
break;
elif "nothing"in query:
speak("Bye master")
exit()
elif "search in youtube"in query:
speak("what to search in youtube")
search=mycommand()
speak("searching for"+search)
webbrowser.open("https://www.youtube.com/results?search_query="+search)
break;
elif"play in youtube" in query:
playinyt()
break;
elif "play youtube songs"in query:
playinyt()
break;
elif "play youtube"in query:
playinyt()
break;
elif"youtube"in query:
playinyt()
break;
elif"search in google"in query:
speak("what to search in google")
search=mycommand()
speak("searching for"+search)
webbrowser.open("https://www.google.com/search?q="+search)
break;
| [
6738,
4818,
8079,
1330,
4818,
8079,
201,
198,
6738,
4738,
1330,
4738,
201,
198,
11748,
4738,
201,
198,
11748,
3992,
40259,
201,
198,
201,
198,
11748,
12972,
83,
912,
87,
18,
201,
198,
11748,
4818,
8079,
201,
198,
11748,
2956,
297,
571... | 1.867515 | 2,121 |
"""
Command peer
"""
from .command_base import CommandBase
from .help_command import HelpCommand
from .peer_create_command import PeerCreateCommand
| [
37811,
628,
220,
220,
220,
9455,
12720,
198,
198,
37811,
198,
198,
6738,
764,
21812,
62,
8692,
1330,
9455,
14881,
198,
6738,
764,
16794,
62,
21812,
1330,
10478,
21575,
198,
6738,
764,
33350,
62,
17953,
62,
21812,
1330,
41139,
16447,
215... | 3.714286 | 42 |
# System
import json,re
# SBaaS
from .stage01_quantification_peakInformation_query import stage01_quantification_peakInformation_query
from .stage01_quantification_MQResultsTable_query import stage01_quantification_MQResultsTable_query
# Resources
from io_utilities.base_importData import base_importData
from io_utilities.base_exportData import base_exportData
from matplotlib_utilities.matplot import matplot
from SBaaS_base.sbaas_template_io import sbaas_template_io
from ddt_python.ddt_container import ddt_container
| [
171,
119,
123,
2,
4482,
198,
11748,
33918,
11,
260,
198,
2,
18056,
7252,
50,
198,
6738,
764,
14247,
486,
62,
40972,
2649,
62,
36729,
21918,
62,
22766,
1330,
3800,
486,
62,
40972,
2649,
62,
36729,
21918,
62,
22766,
198,
6738,
764,
14... | 3.343949 | 157 |
"""
Balance Sheet Entries
"""
from datetime import timedelta
from matilda import config
from matilda.data_pipeline.db_crud import read_financial_statement_entry, companies_in_classification
def cash_and_cash_equivalents(stock, date=None, lookback_period: timedelta = timedelta(days=0), period: str = 'Q'):
"""
**Cash and Cash Equivalents** is the amount of money on deposit in the bank. It is composed of
* Short-term investments:sfsf
* Cash: fh;ohif
:param stock: ticker(s) in question. Can be a string (i.e. 'AAPL') or a list of strings (i.e. ['AAPL', 'BA']).
:param date: Can be a datetime (i.e. datetime(2019, 1, 1)) or list of datetimes. The most recent date of reporting from that date will be used. By default, date=datetime.now().
:param lookback_period: lookback from date (used to compare against previous year or quarter etc.) i.e. timedelta(days=90).
:param period: 'FY' for fiscal year, 'Q' for quarter, 'YTD' for calendar year to date, 'TTM' for trailing twelve months.
:return:
"""
return read_financial_statement_entry(financial_statement='BalanceSheet', stock=stock,
entry_name=['Assets', 'CurrentAssets', 'Cash and Cash Equivalents'],
date=date, lookback_period=lookback_period, period=period)
def current_marketable_securities(stock, date=None, lookback_period: timedelta = timedelta(days=0), period: str = 'Q'):
"""
Hello Paola!
:param stock: ticker(s) in question. Can be a string (i.e. 'AAPL') or a list of strings (i.e. ['AAPL', 'BA']).
:param date: Can be a datetime (i.e. datetime(2019, 1, 1)) or list of datetimes. The most recent date of reporting from that date will be used. By default, date=datetime.now().
:param lookback_period: lookback from date (used to compare against previous year or quarter etc.) i.e. timedelta(days=90).
:param period: 'FY' for fiscal year, 'Q' for quarter, 'YTD' for calendar year to date, 'TTM' for trailing twelve months.
:return:
"""
return read_financial_statement_entry(financial_statement='BalanceSheet', stock=stock,
entry_name=['Assets', 'CurrentAssets', 'CashAndShortTermInvestments',
'MarketableSecurities'],
date=date, lookback_period=lookback_period, period=period)
def net_accounts_receivable(stock, date=None, lookback_period: timedelta = timedelta(days=0), period: str = 'Q'):
"""
Invoices
:param stock:
:param date:
:param lookback_period:
:param period:
:return:
"""
return read_financial_statement_entry(financial_statement='BalanceSheet', stock=stock,
entry_name=['Assets', 'CurrentAssets', 'AccountsReceivable',
'NetAccountsReceivable'],
date=date, lookback_period=lookback_period, period=period)
def deferred_revenue_non_current_liabilities(stock, date=None, lookback_period: timedelta = timedelta(days=0),
period: str = 'Q'):
"""
Also known as *long-term unearned revenue*
:param stock:
:param date:
:param lookback_period:
:param period:
:return:
"""
return read_financial_statement_entry(financial_statement='BalanceSheet', stock=stock,
entry_name=['LiabilitiesAndShareholdersEquity', 'Liabilities',
'NonCurrentLiabilities', 'DeferredRevenue'],
date=date, lookback_period=lookback_period, period=period)
def total_shares_outstanding(stock, diluted_shares: bool = False, date=None,
lookback_period: timedelta = timedelta(days=0), period: str = 'Q'):
"""
:param stock:
:param diluted_shares: Share dilution is when a company issues additional stock, reducing the ownership proportion
of a current shareholder. Shares can be diluted through a conversion by holders of optionable securities, secondary
offerings to raise additional capital, or offering new shares in exchange for acquisitions or services.
:param date:
:param lookback_period:
:param period:
:return:
"""
entry = ['LiabilitiesAndShareholdersEquity', 'ShareholdersEquity', 'CommonStockAndAdditionalPaidInCapital',
'WeightedAverageNumberOfSharesOutstandingDiluted'] if diluted_shares \
else ['LiabilitiesAndShareholdersEquity', 'ShareholdersEquity', 'CommonStockAndAdditionalPaidInCapital',
'WeightedAverageNumberOfSharesOutstandingBasic']
return read_financial_statement_entry(financial_statement='BalanceSheet', stock=stock, entry_name=entry, date=date,
lookback_period=lookback_period, period=period)
| [
37811,
198,
45866,
21616,
7232,
1678,
198,
37811,
198,
198,
6738,
4818,
8079,
1330,
28805,
12514,
198,
198,
6738,
2603,
27281,
1330,
4566,
198,
6738,
2603,
27281,
13,
7890,
62,
79,
541,
4470,
13,
9945,
62,
6098,
463,
1330,
1100,
62,
4... | 2.440654 | 2,081 |
import serial
ANGLE_ERROR_CODE = 65535 # -1 signed int
if __name__ == '__main__':
arduino = SerialCommunication('COM3', 9600)
arduino.setGrabber(GrabberState.CLOSE)
print arduino.getRawBeta(), arduino.getRawAlpha() | [
11748,
11389,
198,
198,
15567,
2538,
62,
24908,
62,
34,
16820,
796,
45021,
2327,
197,
2,
532,
16,
4488,
493,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
610,
24493,
796,
23283,
30813,
3299,
... | 2.694118 | 85 |
import importlib
import pickle
import glob
from excut.utils.logging import logger
def restore_model(model_name_path=None, module_name="ampligraph.latent_features"):
"""Restore a saved model from disk.
See also :meth:`save_model`.
Parameters
----------
model_name_path: string
The name of saved model to be restored. If not specified,
the library will try to find the default model in the working directory.
Returns
-------
model: EmbeddingModel
the neural knowledge graph embedding model restored from disk.
"""
if model_name_path is None:
logger.warning("There is no model name specified. \
We will try to lookup \
the latest default saved model...")
default_models = glob.glob("*.model.pkl")
if len(default_models) == 0:
raise Exception("No default model found. Please specify \
model_name_path...")
else:
model_name_path = default_models[len(default_models) - 1]
logger.info("Will will load the model: {0} in your \
current dir...".format(model_name_path))
model = None
logger.info('Will load model {}.'.format(model_name_path))
try:
with open(model_name_path, 'rb') as fr:
restored_obj = pickle.load(fr)
logger.debug('Restoring model ...')
module = importlib.import_module(module_name)
class_ = getattr(module, restored_obj['class_name'].replace('Continue',''))
model = class_(**restored_obj['hyperparams'])
model.is_fitted = restored_obj['is_fitted']
model.ent_to_idx = restored_obj['ent_to_idx']
model.rel_to_idx = restored_obj['rel_to_idx']
try:
model.is_calibrated = restored_obj['is_calibrated']
except KeyError:
model.is_calibrated = False
model.restore_model_params(restored_obj)
except pickle.UnpicklingError as e:
msg = 'Error unpickling model {} : {}.'.format(model_name_path, e)
logger.debug(msg)
raise Exception(msg)
except (IOError, FileNotFoundError):
msg = 'No model found: {}.'.format(model_name_path)
logger.debug(msg)
raise FileNotFoundError(msg)
return model
# Not needed since
# def copy_model(in_model):
# model_params=dict()
# in_model.get_embedding_model_params(model_params)
#
# model_params_copy=deepcopy(model_params)
#
# logger.debug('Copying model ...')
#
# all_params_copy=deepcopy(in_model.all_params)
#
# print(all_params_copy)
#
# model = in_model.__class__(**in_model.all_params_copy)
# model.is_fitted = in_model.is_fitted
# model.ent_to_idx = dict(in_model.ent_to_idx)
# model.rel_to_idx = dict(in_model.rel_to_idx)
#
# try:
# model.is_calibrated = in_model.is_calibrated
# except KeyError:
# model.is_calibrated = False
#
# model.restore_model_params(model_params_copy)
#
# return model
| [
11748,
1330,
8019,
198,
11748,
2298,
293,
198,
198,
11748,
15095,
198,
198,
6738,
2859,
315,
13,
26791,
13,
6404,
2667,
1330,
49706,
628,
198,
4299,
11169,
62,
19849,
7,
19849,
62,
3672,
62,
6978,
28,
14202,
11,
8265,
62,
3672,
2625,
... | 2.270588 | 1,360 |
from src.analysis.adjacencyAnalysis import *
from src.vendor.Xilinx.S7.x7s import *
from src.vendor.Xilinx.S7.x7s_filters import *
from src.vendor.Xilinx.S7.x7s_clusterings import *
| [
6738,
12351,
13,
20930,
13,
324,
30482,
1387,
32750,
1330,
1635,
198,
6738,
12351,
13,
85,
18738,
13,
55,
346,
28413,
13,
50,
22,
13,
87,
22,
82,
1330,
1635,
198,
198,
6738,
12351,
13,
85,
18738,
13,
55,
346,
28413,
13,
50,
22,
... | 2.38961 | 77 |
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------#
# Import Librairies #
#-----------------------------------------------------------------------------#
import tool_kit as tk
#-----------------------------------------------------------------------------#
# get urls #
#-----------------------------------------------------------------------------#
urla = ["http://www.bjorg.fr/recettes/crepes-avec-boissons-vegetales.html",
"http://www.bjorg.fr/recherche/recette-video.html",
"http://www.bjorg.fr/produits/gamme-galettes.html",
"http://www.bjorg.fr/produits/sirop-agave.html",
"http://www.bjorg.fr/recherche/recettes-vegetariennes.html",
"http://www.bjorg.fr/recherche/recettes-sans-gluten.html",
"http://www.bjorg.fr/recherche/recettes-minceur.html"]
urlb = ["http://www.bjorg.fr/recettes/tofu.html"]
urls = tk.dispatch(textfilename="dispatch.txt", listename=urla, ratio=100)
url_file = open("url_file.txt", "w")
for url in urls :
print "url : ", url, "\n"
pool =tk.curl(url)
tk.oldnew_ipadress()
url_list = []
for c in pool :
data = c.body.getvalue()
soup = tk.BeautifulSoup(data)
s = soup.findAll('h2')
for i in s :
a = i.findAll('a')
if a == [] :
pass
else :
a = a[0].get('href')
a = a.split("?")[0]
a = "http://www.bjorg.fr/" + a
url_list.append(a)
print a
print '\n'
for i in url_list :
url_file.write(i + "\n")
url_file.close()
print "\n"
#-----------------------------------------------------------------------------#
# get url #
#-----------------------------------------------------------------------------#
recipe = open("url_file.txt", "r").readlines()
print "len(recipe) : ", len(recipe)
#-----------------------------------------------------------------------------#
# get data #
#-----------------------------------------------------------------------------#
urls = [i.replace('\n', '') for i in recipe]
urls = tk.dispatch(textfilename="dispatch_niv2.txt", listename=urls, ratio=1)
counta = 0
for url in urls :
print "url : ", url
counta += 1
print counta, "/", len(urls)
pool = tk.curl(url)
tk.oldnew_ipadress()
countb = 0
for c in pool :
data = c.body.getvalue()
soup = tk.BeautifulSoup(data)
s = soup.findAll('div', {'id' : 'recette'})
#-------------------------------------------------------------------------#
# Title #
#-------------------------------------------------------------------------#
title = s[0].findAll('h1')
title = title[0].text
#-------------------------------------------------------------------------#
# Nbr_person #
#-------------------------------------------------------------------------#
person = s[0].findAll('div', {'class' : 'infos'})
person = person[0].findAll('span', {'class' : 'yield'})
person = person[0].text
person = person.replace(' personne', '')
person = person.replace('s', '')
#-------------------------------------------------------------------------#
# Temps_prepa #
#-------------------------------------------------------------------------#
prep_time = s[0].findAll('span', {'class' : 'prepTime'})
prep_time = prep_time[0].text
print "title : ", title
print "person : ", person
print "prep_time : ", prep_time, "minutes"
#-------------------------------------------------------------------------#
# Ingredients #
#-------------------------------------------------------------------------#
print "ingredients : ",
ingredient = s[0].findAll('div', {'class' : 'ingredient'})
ingredient = ingredient[0].findAll('div')
ingredient = ingredient[0].findAll('li')
avoid_list = ["<br />", ".", "- "]
for i in ingredient :
if str(i) in avoid_list :
pass
else :
a = str(i).replace('• ', '')
a = a.replace(' ', '')
a = a.replace("à", "a")
a = a.replace('é', 'e')
a = a.replace(' de', '')
a = a.replace(" d'", "")
a = a.replace(".", "")
a = a.replace("- ", "")
a = a.replace("«", "")
a = a.replace("»", "")
a = a.replace('û', 'u')
a = a.replace("â", "a")
a = a.replace("<li>", "").replace("</li>", "")
a = a.split("<a")[0]
print a, ";",
print "preparation : "
prepa = s[0].findAll('div', {'class' : 'preparation instructions'})
prepa = prepa[0].findAll('p')
for i in prepa :
a = i.text
a = a.replace('à', 'a')
a = a.replace('é', 'e')
a = a.replace('â', 'a')
a = a.replace('ê', 'e')
a = a.replace('°', ' degres celcius')
a = a.replace(' ', '')
a = a.replace('!nbsp;', '')
a = a.replace('ï', 'i')
a = a.replace(" rsquo;", "'")
print a
print "\n"
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
10097,
32501,
2,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.071775 | 2,884 |
import preprocess
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import pandas as pd
import plotly.express as px
from sklearn.decomposition import FastICA
import matplotlib.pyplot as plt
import numpy as np
dataPath = r"C:\Users\shalev\Desktop\Introduction_to_AI\Introduction-to-AI\Data\mushrooms_data.csv"
reducedDataPath = r"C:\Users\shalev\Desktop\Introduction_to_AI\Introduction-to-AI\Data\reduced_data.csv"
# if __name__ == '__main__':
# # 28 saves 93% explained variance of the data
# dr = DimantionReduction(101)
# # dr.ICA_reduceDimentionForPlot()
# # dr.plotReducedData()
# dr.reduceDimension() | [
11748,
662,
14681,
198,
6738,
1341,
35720,
13,
3866,
36948,
1330,
8997,
3351,
36213,
198,
6738,
1341,
35720,
13,
12501,
296,
9150,
1330,
4217,
32,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
7110,
306,
13,
42712,
355,
279,
87,
1... | 2.798319 | 238 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 Violin Memory, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Violin Memory tests for Violin Memory 6000 Series All-Flash Array Drivers
by Ryan Lucio
Senior Software Engineer
Violin Memory
"""
import mock
from cinder.db.sqlalchemy import models
from cinder import test
from cinder.volume import configuration as conf
from cinder.tests import fake_vmem_xgtools_client as vxg
from cinder.volume.drivers.violin import v6000_common
from cinder.volume.drivers.violin import v6000_fcp
VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdeffedcba"
VOLUME = {
"name": "volume-" + VOLUME_ID,
"id": VOLUME_ID,
"display_name": "fake_volume",
"size": 2,
"host": "irrelevant",
"volume_type": None,
"volume_type_id": None,
}
SNAPSHOT_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbb"
SNAPSHOT = {
"name": "snapshot-" + SNAPSHOT_ID,
"id": SNAPSHOT_ID,
"volume_id": VOLUME_ID,
"volume_name": "volume-" + VOLUME_ID,
"volume_size": 2,
"display_name": "fake_snapshot",
"volume": VOLUME,
}
SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc"
SRC_VOL = {
"name": "volume-" + SRC_VOL_ID,
"id": SRC_VOL_ID,
"display_name": "fake_src_vol",
"size": 2,
"host": "irrelevant",
"volume_type": None,
"volume_type_id": None,
}
INITIATOR_IQN = "iqn.1111-22.org.debian:11:222"
CONNECTOR = {
"initiator": INITIATOR_IQN,
"host": "irrelevant",
'wwpns': [u'50014380186b3f65', u'50014380186b3f67'],
}
mock_client_conf = [
'basic',
'basic.login',
'basic.get_node_values',
'basic.save_config',
'lun',
'lun.export_lun',
'lun.unexport_lun',
'snapshot',
'snapshot.export_lun_snapshot',
'snapshot.unexport_lun_snapshot',
'igroup',
]
class V6000FCPDriverTestCase(test.TestCase):
"""Test case for VMEM FCP driver."""
@mock.patch.object(v6000_common.V6000CommonDriver, 'check_for_setup_error')
def test_check_for_setup_error(self, m_setup_func):
'''No setup errors are found.'''
result = self.driver.check_for_setup_error()
m_setup_func.assert_called_with()
self.assertTrue(result is None)
@mock.patch.object(v6000_common.V6000CommonDriver, 'check_for_setup_error')
def test_check_for_setup_error_no_wwn_config(self, m_setup_func):
'''No wwns were found during setup.'''
self.driver.gateway_fc_wwns = []
self.assertRaises(v6000_common.InvalidBackendConfig,
self.driver.check_for_setup_error)
@mock.patch.object(v6000_common.LunIdList, 'get_lun_id_for_volume')
@mock.patch.object(v6000_common.LunIdList, 'get_lun_id_for_volume')
@mock.patch.object(v6000_common.LunIdList, 'get_lun_id_for_snapshot')
| [
2,
43907,
25,
7400,
11338,
28,
19,
6482,
10394,
28,
19,
2705,
8658,
11338,
28,
19,
198,
198,
2,
15069,
1946,
13085,
259,
14059,
11,
3457,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
... | 2.411466 | 1,378 |
# ALLOW_RETRIES: 3
# RUN: false
| [
2,
11096,
3913,
62,
2200,
5446,
11015,
25,
513,
198,
198,
2,
32494,
25,
3991,
198
] | 2.0625 | 16 |
"""
From: https://taxfoundation.org/2017-tax-brackets/
Rate |Taxable Income Bracket |Tax Owed
:--- |:--------------------- |:--------
10% |$0 to $9,325 |10% of Taxable Income
15% |$9,325 to $37,950 |$932.50 plus 15% of the excess over $9325
25% |$37,950 to $91,900 |$5,226.25 plus 25% of the excess over $37,950
28% |$91,900 to $191,650 |$18,713.75 plus 28% of the excess over $91,900
33% |$191,650 to $416,700 |$46,643.75 plus 33% of the excess over $191,650
35% |$416,700 to $418,400 |$120,910.25 plus 35% of the excess over $416,700
39.60% |$418,400+ |$121,505.25 plus 39.6% of the excess over $418,400
"""
def _tax2017_1():
"""
Test the examples from the assignment
>>> import taxes
>>> taxes.tax2017(5000)
500.0
>>> taxes.tax2017(25000)
3283.75
>>> taxes.tax2017(50000)
8238.75
"""
def _tax2017_2(income):
"""
Test values near the breakpoints
>>> _tax2017_2(0)
0.0
0.1
>>> _tax2017_2(9325)
932.4
932.5
932.65
>>> _tax2017_2(37950)
5226.1
5226.25
5226.5
>>> _tax2017_2(91900)
18713.5
18713.75
18714.03
>>> _tax2017_2(191650)
46643.47
46643.75
46644.08
>>> _tax2017_2(416700)
120909.92
120910.25
120910.6
>>> _tax2017_2(418400)
121504.9
121505.25
121505.65
"""
from taxes import tax2017
if income >= 1:
print(round(taxit(income - 1), 2))
print(round(taxit(income), 2))
print(round(taxit(income + 1), 2))
| [
37811,
198,
4863,
25,
3740,
1378,
19290,
42526,
13,
2398,
14,
5539,
12,
19290,
12,
1671,
25180,
14,
198,
198,
32184,
220,
220,
220,
930,
27017,
540,
19003,
1709,
8317,
220,
930,
27017,
11960,
276,
198,
25,
6329,
220,
220,
220,
930,
... | 2.019084 | 786 |
'''
This is how to track a white ball example using SimpleCV
The parameters may need to be adjusted to match the RGB color
of your object.
The demo video can be found at:
http://www.youtube.com/watch?v=jihxqg3kr-g
'''
print __doc__
import SimpleCV
display = SimpleCV.Display()
cam = SimpleCV.Camera()
normaldisplay = True
while display.isNotDone():
if display.mouseRight:
normaldisplay = not(normaldisplay)
print "Display Mode:", "Normal" if normaldisplay else "Segmented"
img = cam.getImage().flipHorizontal()
dist = img.colorDistance(SimpleCV.Color.BLACK).dilate(2)
segmented = dist.stretch(200,255)
blobs = segmented.findBlobs()
if blobs:
circles = blobs.filter([b.isCircle(0.2) for b in blobs])
if circles:
img.drawCircle((circles[-1].x, circles[-1].y), circles[-1].radius(),SimpleCV.Color.BLUE,3)
if normaldisplay:
img.show()
else:
segmented.show()
| [
7061,
6,
198,
1212,
318,
703,
284,
2610,
257,
2330,
2613,
1672,
1262,
17427,
33538,
198,
198,
464,
10007,
743,
761,
284,
307,
12328,
284,
2872,
262,
25228,
3124,
198,
1659,
534,
2134,
13,
198,
198,
464,
13605,
2008,
460,
307,
1043,
... | 2.639881 | 336 |
import sys
import re
import os
if __name__ == '__main__':
images, bootstrap_crds, phapptemplates_dir = sys.argv[1:]
section("Inputs")
print("images.yaml => {}".format(images))
print("crds.yaml => {}".format(bootstrap_crds))
print("ph-app-templates => {}".format(phapptemplates_dir))
main(images, bootstrap_crds, phapptemplates_dir)
print("\nEverything is OK.")
sys.exit(0)
| [
11748,
25064,
198,
11748,
302,
198,
11748,
28686,
628,
628,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
4263,
11,
6297,
26418,
62,
66,
4372,
82,
11,
872,
1324,
11498,
17041,
62,
... | 2.441176 | 170 |
from py2exe_converter.converter import convert
convert('example.py', 'requirements.txt', 'icon_one.png') | [
6738,
12972,
17,
13499,
62,
1102,
332,
353,
13,
1102,
332,
353,
1330,
10385,
198,
198,
1102,
1851,
10786,
20688,
13,
9078,
3256,
705,
8897,
18883,
13,
14116,
3256,
705,
4749,
62,
505,
13,
11134,
11537
] | 2.916667 | 36 |
import Foundation
| [
11748,
5693,
198
] | 6 | 3 |
# Lib
import logging
import pandas as pd
# App
from ..models import (
FG_PROBE_SUBSETS,
ArrayType,
Channel,
ProbeType,
)
from ..files import IdatDataset
from ..utils import inner_join_data
from ..utils.progress_bar import * # checks environment and imports tqdm appropriately.
from collections import Counter
__all__ = ['RawDataset', 'RawMetaDataset', 'get_raw_datasets', 'get_raw_meta_datasets', 'get_array_type']
LOGGER = logging.getLogger(__name__)
def get_raw_datasets(sample_sheet, sample_name=None, from_s3=None, meta_only=False):
"""Generates a collection of RawDataset instances for the samples in a sample sheet.
Arguments:
sample_sheet {SampleSheet} -- The SampleSheet from which the data originates.
Keyword Arguments:
sample_name {string} -- Optional: one sample to process from the sample_sheet. (default: {None})
from_s3 {zip_reader} -- pass in a S3ZipReader object to extract idat files from a zipfile hosted on s3.
meta_only {True/False} -- doesn't read idat files, only parses the meta data about them.
(RawMetaDataset is same as RawDataset but has no idat probe values stored in object, because not needed in pipeline)
Raises:
ValueError: If the number of probes between raw datasets differ.
Returns:
[RawDataset] -- A RawDataset instance.
"""
LOGGER.debug('Generating raw datasets from sample sheet')
if not sample_name:
samples = sample_sheet.get_samples()
elif type(sample_name) is list:
samples = [
sample_sheet.get_sample(sample)
for sample in sample_name
]
else:
samples = [sample_sheet.get_sample(sample_name)]
LOGGER.info("Found sample in SampleSheet: {0}".format(sample_name))
if from_s3 and meta_only:
parser = RawMetaDataset
raw_datasets = [parser(sample) for sample in samples]
elif from_s3 and not meta_only:
parser = RawDataset.from_sample_s3
zip_reader = from_s3
raw_datasets = tqdm([parser(zip_reader, sample) for sample in samples], total=len(samples), desc='Getting raw datasets')
elif not from_s3 and not meta_only:
parser = RawDataset.from_sample
raw_datasets = tqdm([parser(sample) for sample in samples], total=len(samples), desc='Getting raw datasets')
if not meta_only:
# ensure all idat files have same number of probes
probe_counts = {
dataset.n_snps_read
for dataset in raw_datasets
}
if len(probe_counts) != 1:
# also explain which samples have which probes -- for splitting samples up
probe_sample_counts = Counter([dataset.n_snps_read for dataset in raw_datasets])
samples_by_probe_count = {probe_count:[] for probe_count in list(probe_counts)}
for dataset in raw_datasets:
sample_name = f"{dataset.sample.sentrix_id}_{dataset.sample.sentrix_id}"
samples_by_probe_count[dataset.n_snps_read].append(sample_name)
LOGGER.error(f'Samples grouped by probe count: {probe_sample_counts.most_common()}')
LOGGER.error(f'{samples_by_probe_count}')
raise ValueError(f'IDATs with varying number of probes: {probe_counts}')
return raw_datasets
def get_array_type(raw_datasets):
""" provide a list of raw_datasets and it will return the array type by counting probes """
array_types = {dataset.array_type for dataset in raw_datasets}
if len(array_types) == 0:
raise ValueError('could not identify array type from IDATs')
elif len(array_types) != 1:
raise ValueError('IDATs with varying array types')
array_type = array_types.pop()
return array_type
class RawDataset():
"""Wrapper for a sample and its pair of raw IdatDataset values.
Arguments:
sample {Sample} -- A Sample parsed from the sample sheet.
green_idat {IdatDataset} -- The sample's GREEN channel IdatDataset.
red_idat {IdatDataset} -- The sample's RED channel IdatDataset.
Raises:
ValueError: If the IDAT file pair have differing number of probes.
TypeError: If an invalid Channel is provided when parsing an IDAT file.
"""
@classmethod
@classmethod
def get_oob_controls(self, manifest):
""" Out-of-bound controls are the mean intensity values for the
channel in the opposite channel's probes """
oob_green = self.filter_oob_probes(Channel.RED, manifest, self.green_idat)
oob_red = self.filter_oob_probes(Channel.GREEN, manifest, self.red_idat)
oob_green['Channel'] = Channel.GREEN.value
oob_red['Channel'] = Channel.RED.value
return {
Channel.GREEN: oob_green,
Channel.RED: oob_red,
}
def filter_oob_probes(self, channel, manifest, idat_dataset):
""" this is the step where it appears that illumina_id (internal probe numbers)
are matched to the AddressA_ID / B_IDs from manifest,
which allows for 'cgXXXXXXX' probe names to be used later. """
probe_details = manifest.get_probe_details(
probe_type=ProbeType.ONE,
channel=channel,
)
# 2020-03-25: probe_details was returning an empty DataFrame with mouse,
# because two new probe types existed (IR, IG) -- note that new types results
# in this null issue and a huber ZeroDivisionError ultimately in CLI.
probe_details = probe_details[['AddressA_ID', 'AddressB_ID']]
probe_means = idat_dataset.probe_means
set_a = probe_details.merge(
probe_means,
how='inner',
left_on='AddressA_ID',
right_index=True,
suffixes=(False, False),
)
set_b = probe_details.merge(
probe_means,
how='inner',
left_on='AddressB_ID',
right_index=True,
suffixes=(False, False),
)
oob_probes = set_a.append(set_b) # will contain duplicates for probes that have both red and grn channels (II)
return oob_probes
def get_fg_values(self, manifest, channel):
""" appears to only be used in NOOB function """
#LOGGER.info('Preprocessing %s foreground datasets: %s', channel, self.sample)
probe_subsets = FG_PROBE_SUBSETS[channel]
channel_foregrounds = [
self.get_subset_means(probe_subset, manifest)
for probe_subset in probe_subsets
]
# debug - trying to locate the SNP signal
#for probe_subset in probe_subsets:
# print(probe_subset.probe_address, probe_subset.probe_type, probe_subset.data_channel, probe_subset.probe_channel)
# # this has both ProbeAddress.A and IlmnID -- check for rs.
# test = pd.concat(channel_foregrounds)
# print('get_fg_values', test.shape, test.index.duplicated())
# print([rs for rs in test['IlmnID'] if 'rs' in rs])
return pd.concat(channel_foregrounds)
def get_subset_means(self, probe_subset, manifest):
"""apparently, not called anywhere """
channel_means_df = self.get_channel_means(probe_subset.data_channel)
probe_details = probe_subset.get_probe_details(manifest)
column_name = probe_subset.column_name
merge_df = probe_details[[column_name, 'probe_type']]
merge_df = merge_df.reset_index()
merge_df = merge_df.set_index(column_name)
return inner_join_data(channel_means_df, merge_df)
class RawMetaDataset():
"""Wrapper for a sample and meta data, without its pair of raw IdatDataset values.
Arguments:
sample {Sample} -- A Sample parsed from the sample sheet.
each Sample contains (at a minimum):
data_dir=self.data_dir
sentrix_id=sentrix_id
sentrix_position=sentrix_position
"""
| [
2,
7980,
198,
11748,
18931,
198,
11748,
19798,
292,
355,
279,
67,
198,
2,
2034,
198,
6738,
11485,
27530,
1330,
357,
198,
220,
220,
220,
25503,
62,
4805,
9864,
36,
62,
12564,
4462,
32716,
11,
198,
220,
220,
220,
15690,
6030,
11,
198,... | 2.451623 | 3,235 |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as c
from Perceptron import Perceptron
# Simple kernel
# Do not change these three lines.
data = np.loadtxt("data.csv", delimiter=',')
X = data[:, :2]
Y = data[:, 2]
# These are the parameters for the models. Please play with these and note your observations about speed and successful hyperplane formation.
beta = 0
N = 10
numsamples = 20000
kernel_file_name = 'k.png'
budget_kernel_file_name = 'bk.png'
# Don't change things below this in your final version. Note that you can use the parameters above to generate multiple graphs if you want to include them in your writeup.
k = KernelPerceptron(numsamples)
k.fit(X,Y)
k.visualize(kernel_file_name, width=0, show_charts=False, save_fig=False, include_points=True)
bk = BudgetKernelPerceptron(beta, N, numsamples)
bk.fit(X, Y)
bk.visualize(budget_kernel_file_name, width=0, show_charts=True, save_fig=False, include_points=True)
| [
198,
198,
11748,
299,
32152,
355,
45941,
220,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
13,
4033,
669,
355,
269,
198,
6738,
2448,
984,
1313,
1330,
2448,
984,
1313,
628,
198,
2,
17427,... | 3.021807 | 321 |
from a10sdk.common.A10BaseClass import A10BaseClass
class BroadcastRateLimit(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param rate: {"description": "packets per second. Default is 500. (packets per second. Please specify an even number. Default is 500)", "format": "number", "default": 500, "maximum": 5000, "minimum": 50, "type": "number"}
:param bcast_rate_limit_enable: {"default": 0, "type": "number", "description": "Rate limit the l2 broadcast packet on mgmt port", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
class Ipv6(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param inbound: {"default": 0, "type": "number", "description": "ACL applied on incoming packets to this interface", "format": "flag"}
:param address_type: {"enum": ["link-local"], "type": "string", "description": "'link-local': Configure an IPv6 link local address; ", "format": "enum"}
:param default_ipv6_gateway: {"type": "string", "description": "Set default gateway (Default gateway address)", "format": "ipv6-address"}
:param ipv6_addr: {"type": "string", "description": "Set the IPv6 address of an interface", "format": "ipv6-address-plen"}
:param v6_acl_name: {"description": "Apply ACL rules to incoming packets on this interface (Named Access List)", "format": "string", "minLength": 1, "maxLength": 32, "type": "string", "$ref": "/axapi/v3/ipv6/access-list"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
class Ip(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param dhcp: {"default": 0, "not": "ipv4-address", "type": "number", "description": "Use DHCP to configure IP address", "format": "flag"}
:param ipv4_address: {"not": "dhcp", "type": "string", "description": "IP address", "format": "ipv4-address"}
:param default_gateway: {"type": "string", "description": "Set default gateway (Default gateway address)", "format": "ipv4-address"}
:param control_apps_use_mgmt_port: {"default": 0, "type": "number", "description": "Control applications use management port", "format": "flag"}
:param ipv4_netmask: {"type": "string", "description": "IP subnet mask", "format": "ipv4-netmask"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
class AccessList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param acl_name: {"minLength": 1, "maxLength": 16, "type": "string", "description": "Apply an access list (Named Access List)", "format": "string"}
:param acl_id: {"description": "ACL id", "format": "number", "maximum": 199, "minimum": 1, "type": "number", "$ref": "/axapi/v3/access-list/standard"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
class Management(A10BaseClass):
"""Class Description::
Management interface.
Class management supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param duplexity: {"description": "'Full': Full; 'Half': Half; 'auto': Auto; ", "format": "enum", "default": "auto", "type": "string", "enum": ["Full", "Half", "auto"], "optional": true}
:param speed: {"description": "'10': 10 Mbs/sec; '100': 100 Mbs/sec; '1000': 1 Gb/sec; 'auto': Auto Negotiate Speed; (Interface Speed)", "format": "enum", "default": "auto", "type": "string", "enum": ["10", "100", "1000", "auto"], "optional": true}
:param action: {"optional": true, "enum": ["enable", "disable"], "type": "string", "description": "'enable': Enable Management Port; 'disable': Disable Management Port; ", "format": "enum"}
:param flow_control: {"default": 0, "optional": true, "type": "number", "description": "Enable 802.3x flow control on full duplex port", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/interface/management`.
"""
| [
6738,
257,
940,
21282,
74,
13,
11321,
13,
32,
940,
14881,
9487,
1330,
317,
940,
14881,
9487,
628,
198,
4871,
44244,
32184,
39184,
7,
32,
940,
14881,
9487,
2599,
198,
220,
220,
220,
220,
198,
220,
220,
220,
37227,
1212,
1398,
857,
40... | 3.104588 | 1,482 |
from distutils.core import setup
setup(
name="asyncio-irc",
version="0.2.1",
description="irc based on asyncio",
author="Fox Wilson",
author_email="fwilson@fwilson.me",
url="https://github.com/watchtower/asyncirc",
install_requires=["blinker"],
packages=["asyncirc", "asyncirc.plugins"]
)
| [
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
220,
220,
220,
220,
1438,
2625,
292,
13361,
952,
12,
1980,
1600,
198,
220,
220,
220,
220,
220,
220,
220,
2196,
2625,
15,
13,
17,
13,
16,
1600,
198,
... | 2.240506 | 158 |
from unittest import mock
from unittest.mock import patch
from src.generate_new_site.site_data_structs import site
from pathlib import Path
################
# Test write() #
################
@patch('jinja2.Template.render')
@patch('src.generate_new_site.site_data_structs.site.Index.update_href')
@patch('src.generate_new_site.site_data_structs.site.SitePage.write')
# Mock it with itself just so we can get a count
######################
# Test update_href() #
######################
# Integration test for all update_href methods in site.py
@patch('src.generate_new_site.site_data_structs.site.rel_path', return_value=Path("updated"))
| [
6738,
555,
715,
395,
1330,
15290,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
198,
6738,
12351,
13,
8612,
378,
62,
3605,
62,
15654,
13,
15654,
62,
7890,
62,
7249,
82,
1330,
2524,
198,
6738,
3108,
8019,
1330,
10644,
628,
198,
... | 3.197044 | 203 |
"""
fractional_part function divides the numerator by the denominator, and returns just the fractional part (a number between 0 and 1). Complete the body of the function so that it returns the right number. Note: Since division by 0 produces an error, if the denominator is 0, the function should return 0 instead of attempting the division.
"""
print(fractional_part(5, 5)) # Should be 0
print(fractional_part(5, 4)) # Should be 0.25
print(fractional_part(5, 3)) # Should be 0.66...
print(fractional_part(5, 2)) # Should be 0.5
print(fractional_part(5, 0)) # Should be 0
print(fractional_part(0, 5)) # Should be 0 | [
37811,
201,
198,
69,
7861,
282,
62,
3911,
2163,
36319,
262,
5470,
1352,
416,
262,
31457,
1352,
11,
290,
5860,
655,
262,
13390,
282,
636,
357,
64,
1271,
1022,
657,
290,
352,
737,
13248,
262,
1767,
286,
262,
2163,
523,
326,
340,
5860,... | 3.177665 | 197 |
from setuptools import setup, find_packages
setup(
name='openvpp-agents',
version='0.1.0',
author='Martin Tröschel',
author_email='martin.troeschel@gmail.com',
description=('The Open VPP multi-agent system'),
# long_description=(open('README.txt').read() + '\n\n' +
# open('CHANGES.txt').read() + '\n\n' +
# open('AUTHORS.txt').read()),
url='https://particon.de',
install_requires=[
'aiomas[mpb]>=1.0.1',
'arrow>=0.4',
'click>=4.0',
'h5py>=2.5',
'numpy>=1.8',
'psutil>=2.2',
],
packages=find_packages(where='src'),
package_dir={'': 'src'},
include_package_data=True,
entry_points={
'console_scripts': [
'openvpp-mosaik = openvpp_agents.mosaik:main',
'openvpp-container = openvpp_agents.container:main',
],
},
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
628,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
9654,
85,
381,
12,
49638,
3256,
198,
220,
220,
220,
2196,
11639,
15,
13,
16,
13,
15,
3256,
198,
220,
220,
220,
1772,
11... | 1.958696 | 460 |
#/usr/bin/env python
import os
from setuptools import setup, find_packages
ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
setup(
name="haikus",
description="Some classes for finding haikus in text",
author="Grant Thomas",
author_email="grant.thomas@wk.com",
url="https://github.com/wieden-kennedy/haikus",
version="0.0.1",
install_requires=["nltk"],
packages=find_packages(),
zip_safe=False,
include_package_data=True,
classifiers=[
"Programming Language :: Python",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| [
2,
14,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
28686,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
13252,
2394,
62,
34720,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
8,
198,
47690,
6... | 2.593023 | 344 |
# perform gwas, use sub functions
import argparse
import torch
import numpy as np
from itertools import repeat
from perform_gwas import model, kin, perm
from preprocessing import load_files, prepare_data
import time
def gwas(arguments: argparse.Namespace, X: torch.tensor, y: torch.tensor, K: torch.tensor, covs: torch.tensor,
X_index: np.array, m: int):
"""
Function to perform batch-wise computation of univariate test:
(1) estimate variance components
(2) calculate Cholesky decomposition
(3) linearly transform data
(4) calculate residual sum of squares of null model
(5) batch-wise:
(a) linearly transform marker
(b) calculate effect size, residual sum of squares and standard error
(c) calculate test statistic
(6) calculate p-values
:param arguments: user input
:param X: genotype matrix of shape (n,m)
:param y: phenotype vector of shape (n)
:param K: kinship matrix of shape (n,n)
:param covs: vector/matrix of covariates of shape (n,c), optional
:param X_index: indices of genotype matrix samples to load in batches
:param m: total number of SNPs to work on
:return: p-value, test statistic, standard error and effect size of each SNP and minor allele frequencies if X was
loaded batch-wise
"""
start = time.time()
n = y.shape[0]
fixed = model.get_fixed_effects(arguments, covs, n) # shape: (n,1) or (n,c+1)
# estimate variance components
v_g, v_e = kin.estimate_variance_components(y, K, fixed) # shape: (2)
# Cholesky decomposition
C = kin.get_cholesky(v_g * K + v_e * torch.eye(n, dtype=K.dtype, device=arguments.device)) # shape: (n,n)
# linearly transform data, i.e. solve y = Cb
y_trans = kin.transform_input(y, C) # shape: (n)
fixed = kin.transform_input(fixed, C) # shape: (n) or (n,c+1)
# calculate residual sum of squares of null model
RSS_0 = model.get_rss_h0(y_trans, fixed) # shape: (1)
freedom_deg = fixed.shape[1] + 1
tmp = []
freq = []
# in batches:
for i in range(int(np.ceil(m / arguments.batch))):
lower_bound = i * arguments.batch
upper_bound = (i + 1) * arguments.batch
if upper_bound > m:
upper_bound = m
if X is None:
# load X batch-wise
X_batch = load_files.load_genotype_matrix(arguments.x, sample_index=X_index, snp_lower_index=lower_bound,
snp_upper_index=upper_bound)
# calculate minor allele frequencies
freq.append(prepare_data.get_maf(X_batch))
# linearly transform X
X_batch = kin.transform_input(X_batch.to(arguments.device), C) # shape: (n,b)
else:
# linearly transform X_batch if X was completely loaded before
X_batch = kin.transform_input(X[:, lower_bound:upper_bound].to(arguments.device), C) # shape: (n,b)
X_batch = model.get_x_batch(X_batch, fixed, lower_bound, upper_bound) # shape: (b,n,2) or (b,n,c+2)
# calculate effect size, residual sum of squares and standard error
RSS_1, SE, effSize = model.get_rss_and_se(X_batch, y_trans)
# calculate test statistic
F_score = model.get_f_score(RSS_0, RSS_1, n, freedom_deg)
tmp.append(torch.stack((F_score, SE, effSize), dim=1).to(torch.device("cpu")))
if arguments.device.type != "cpu":
with torch.cuda.device(arguments.device):
del RSS_1
del F_score
del X_batch
del SE
del effSize
torch.cuda.empty_cache()
output = torch.cat(tmp, dim=0) # shape(m,3)
output = output.to(torch.device("cpu"))
if X is None:
freq = torch.cat(freq, dim=0)
time_test_stats = time.time()
print("Have test statistics of %d SNPs. Elapsed time: %f" % (m, time_test_stats-start))
print("Calculate P-values now")
# compute p-values
p_val = list(map(model.get_p_value, output[:, 0], repeat(n), repeat(freedom_deg)))
print("Have P-values. Elapsed time: ", time.time()-time_test_stats)
return torch.cat((torch.tensor(p_val).unsqueeze(1), output), dim=1), freq
def perm_gwas(arguments: argparse.Namespace, X: torch.tensor, y: torch.tensor, K: torch.tensor,
true_test_stats: torch.tensor, covs: torch.tensor, X_index: np.array, m: int):
"""
Function to perform batch-wise computation of permutation-based test:
(1) compute permutations of phenotype
(2) estimate variance components for each permutation
(3) calculate Cholesky decomposition for each permutation
(4) linearly transform data
(5) calculate residual sum of squares of null model for each permutation
(6) batch-wise:
(a) linearly transform marker
(b) calculate residual sum of squares
(c) calculate test statistic
(7) calculate permutation-based p-values
(8) calculate Westfall-Young permutation-based threshold
:param arguments: user input
:param X: genotype matrix of shape (n,m)
:param y: phenotype vector of shape (n)
:param K: kinship matrix of shape (n,n)
:param true_test_stats: test statistics of true observations for each SNP
:param covs: vector/matrix of covariates of shape (n,c), optional
:param X_index: indices of genotype matrix samples to load in batches
:param m: total number of SNPs to work on
:return: adjusted p-value for each SNP, minimal p-value for each permutation, permutation seeds
"""
start = time.time()
n = y.shape[0]
fixed = model.get_fixed_effects_perm(arguments, covs, n) # shape: (p,n,1) or (p,n,c+1)
# compute permutations of phenotype
y_perm, my_seeds = perm.permute_phenotype(y, arguments.perm)
y_perm = y_perm.to(arguments.device) # shape: (n,p)
var_comps = []
# estimate variance components for each permutation
for i in range(arguments.perm):
var_comps.append(kin.estimate_variance_components(y_perm[:, i], K, fixed[0, :, :])) # shape: (p,2)
var_comps = torch.tensor(np.array(var_comps), device=arguments.device)
# calculate Cholesky decomposition for each permutation
C_perm = perm.get_perm_kinships(K, var_comps) # shape: (p,n,n)
C_perm = kin.get_cholesky(C_perm) # shape: (p,n,n)
# linearly transform data
y_perm = kin.transform_input(torch.unsqueeze(torch.t(y_perm), 2), C_perm) # shape: (p,n,1)
fixed = kin.transform_input(fixed, C_perm) # shape: (p,n,1) or (p,n,c+1)
# calculate residual sum of squares of null model for each permutation
RSS0 = model.get_rss_h0_perm(y_perm, fixed) # shape: (p)
if arguments.device.type != "cpu":
with torch.cuda.device(arguments.device):
del var_comps
del K
del y
del covs
torch.cuda.empty_cache()
freedom_deg = fixed.shape[2]+1
test_stats = []
# in batches:
for i in range(int(np.ceil(m / arguments.batch_perm))):
lower_bound = i * arguments.batch_perm
upper_bound = (i + 1) * arguments.batch_perm
if upper_bound > m:
upper_bound = m
print("Calculate perm test statistics for SNPs %d to %d" % (lower_bound, upper_bound))
if X is None:
# load X batch-wise and linearly transform it
X_batch = load_files.load_genotype_matrix(arguments.x, sample_index=X_index, snp_lower_index=lower_bound,
snp_upper_index=upper_bound)
X_batch = kin.transform_input(model.get_v_batch(X_batch.to(arguments.device),
arguments.perm), C_perm) # shape: (p,n,b)
else:
# if X was already loaded completely, linearly transform it
X_batch = kin.transform_input(model.get_v_batch(X[:, lower_bound:upper_bound].to(arguments.device),
arguments.perm), C_perm) # shape: (p,n,b)
X_batch = model.get_x_batch_perm(X_batch, fixed, lower_bound, upper_bound) # shape: (p,b,n,2) or (p,b,n,c+2)
# calculate residual sum of squares
RSS = model.get_rss_perm(X_batch, y_perm) # shape: (p,b)
# calculate test statistics
F_score = model.get_f_score(torch.t(RSS0.repeat(upper_bound-lower_bound, 1)), RSS, n, freedom_deg) # shape: (p,b)
test_stats.append(F_score.to(torch.device("cpu")))
if arguments.device.type != "cpu":
with torch.cuda.device(arguments.device):
del RSS
del F_score
del X_batch
torch.cuda.empty_cache()
test_stats = torch.cat(test_stats, dim=1) # shape: (p,m)
time_test_stats = time.time()
print("Have perm test statistics. Elapsed time: ", time_test_stats-start)
test_stats = test_stats.to(torch.device("cpu"))
# calculate permutation-based p-values
perm_p_val = perm.get_perm_p_value(test_stats, true_test_stats) # shape: (m)
# calculate Westfall-Young permutation-based threshold
min_p_val = perm.get_min_p_value(test_stats, n, freedom_deg) # shape: (p)
print("Have adjusted p-values and minimal p-values. Elapsed time: ", time.time()-time_test_stats)
return perm_p_val, min_p_val, my_seeds
| [
2,
1620,
308,
9776,
11,
779,
850,
5499,
198,
11748,
1822,
29572,
198,
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
340,
861,
10141,
1330,
9585,
198,
6738,
1620,
62,
70,
9776,
1330,
2746,
11,
18967,
11,
9943,
198,
6738,... | 2.308578 | 4,080 |
import face_detection as m
import time
m.init('./models/ncnn/')
print('warming up')
m.detect('./images_480p/1_854x480.jpg')
m.detect('./images_480p/1_854x480.jpg')
m.detect('./images_480p/1_854x480.jpg')
m.detect('./images_480p/1_854x480.jpg')
m.detect('./images_480p/1_854x480.jpg')
start = time.time()
for i in range(100):
step_start = time.time()
result = m.detect('./images_480p/1_854x480.jpg')
step_end = time.time()
print('step {} duration is {}'.format(i,step_end - step_start))
end = time.time()
print(result)
print('average duration is {}'.format((end - start)/100))
| [
11748,
1986,
62,
15255,
3213,
355,
285,
198,
11748,
640,
198,
198,
76,
13,
15003,
7,
4458,
14,
27530,
14,
10782,
20471,
14,
11537,
198,
4798,
10786,
48133,
510,
11537,
198,
76,
13,
15255,
478,
7,
4458,
14,
17566,
62,
22148,
79,
14,
... | 2.320158 | 253 |