content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from django.contrib import admin
from django.urls import path
from estimation.views import estimate
urlpatterns = [
path('admin/', admin.site.urls),
path('api/estimate/', estimate)
]
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
31850,
13,
33571,
1330,
8636,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
28482,
14,
3256,
13... | 2.969231 | 65 |
# -*- coding: utf-8 -*-
import re
def normalize_version(version):
"""
Helper function to normalize version.
Returns a comparable object.
Args:
version (str) version, e.g. "0.1.0"
"""
rv = []
for x in version.split("."):
try:
rv.append(int(x))
except ValueError:
for y in re.split("([0-9]+)", x):
if y == '':
continue
try:
rv.append(int(y))
except ValueError:
rv.append(y)
return rv
def ver_to_tuple(value):
"""
Convert version like string to a tuple of integers.
"""
return tuple(int(_f) for _f in re.split(r'\D+', value) if _f)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
302,
628,
198,
4299,
3487,
1096,
62,
9641,
7,
9641,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
5053,
525,
2163,
284,
3487,
1096,
2196,
13,
198,
... | 1.877863 | 393 |
import basic
import os
import subprocess
while True:
text = input('Doge shell 0.1 (Beta) > ')
if "import" in text:
importing = text.split(" ")
if importing[0] == "import":
f = open(importing[1], 'r')
imports = f.read()
f2 = open(importing[2], 'r')
toimp = f2.read()
aimp = imports + "\n" + toimp
print(aimp)
f2 = open(importing[2], 'w')
f2.truncate()
f2.write(aimp)
f2.close()
f.close()
else:
if text.strip() == "":
continue
result, error = basic.run('<stdin>', text)
if error:
print(error.as_string())
elif result:
if len(result.elements) == 1:
print(repr(result.elements[0]))
else:
print(repr(result))
else:
if text.strip() == "":
continue
result, error = basic.run('<stdin>', text)
if error:
print(error.as_string())
elif result:
if len(result.elements) == 1:
print(repr(result.elements[0]))
else:
print(repr(result))
| [
11748,
4096,
198,
11748,
28686,
198,
11748,
850,
14681,
198,
198,
4514,
6407,
25,
198,
220,
220,
220,
2420,
796,
5128,
10786,
5211,
469,
7582,
657,
13,
16,
357,
43303,
8,
1875,
705,
8,
198,
220,
220,
220,
611,
366,
11748,
1,
287,
... | 1.729023 | 727 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvs.endpoint import endpoint_data
| [
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,
428,
670,
329,
3224,
1321,
198,
2,
5115,
6634,
9238,
13,
220,
383,
7054,... | 3.898678 | 227 |
#!/usr/bin/env python
import json
import logging
import requests
from s3vaultlib import __application__
from .base import MetadataBase
__author__ = "Giuseppe Chiesa"
__copyright__ = "Copyright 2017-2021, Giuseppe Chiesa"
__credits__ = ["Giuseppe Chiesa"]
__license__ = "BSD"
__maintainer__ = "Giuseppe Chiesa"
__email__ = "mail@giuseppechiesa.it"
__status__ = "PerpetualBeta"
class EC2Metadata(MetadataBase):
"""
Object that retrieve metadata from within an EC2 instance
"""
def _get_data(self, url_path):
"""
Query the metadata
"""
url = '{b}/{p}'.format(b=self._uri, p=url_path)
try:
response = requests.get(url, timeout=5)
except Exception:
self.logger.error('Error while getting metadata. Perhaps you want to use --no-ec2 flag?')
raise
if not response.ok:
raise EC2MetadataException('Error while reading metadata from path')
return response.text.strip()
@property
def role(self):
"""
Return the role associated to the instance
"""
data = self._get_data('meta-data/iam/security-credentials/')
if not data:
raise EC2MetadataException('Role not associated')
return data
@property
def account_id(self):
"""
Return the account_id associated to the instance
:return: account_id
:rtype: basestring
"""
return self._get_instance_identity_document()['accountId']
@property
def region(self):
"""
Return the region associated to the instance
:return: region
:rtype: basestring
"""
return self._get_instance_identity_document()['availabilityZone'][:-1]
@property
def instance_id(self):
"""
Return the instance_id associated to the instance
:return: instance_id
:rtype: basestring
"""
return self._get_instance_identity_document()['instanceId']
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
33918,
198,
11748,
18931,
198,
198,
11748,
7007,
198,
198,
6738,
264,
18,
85,
1721,
8019,
1330,
11593,
31438,
834,
198,
6738,
764,
8692,
1330,
3395,
14706,
14881,
198,
198,
834,
... | 2.403341 | 838 |
import zroya
import unittest
if __name__ == "__main__":
unittest.main()
| [
11748,
1976,
3287,
64,
198,
11748,
555,
715,
395,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419,
628
] | 2.4375 | 32 |
import discord.ext.commands as commands
from config.settings import *
from cogs.const import *
bot = commands.Bot(commands.when_mentioned_or(prefix_list))
bot.load_extension('cogs.music')
@bot.command()
@commands.has_permissions(manage_guild=True)
bot.run(settings["token"])
| [
11748,
36446,
13,
2302,
13,
9503,
1746,
355,
9729,
198,
6738,
4566,
13,
33692,
1330,
1635,
198,
6738,
269,
18463,
13,
9979,
1330,
1635,
198,
198,
13645,
796,
9729,
13,
20630,
7,
9503,
1746,
13,
12518,
62,
17181,
62,
273,
7,
40290,
6... | 2.926316 | 95 |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Mailgun API wrapper."""
from __future__ import annotations
import urllib
from core import feconf
from core import utils
from core.platform.email import mailgun_email_services
from core.tests import test_utils
from typing import Dict, Tuple
MailgunQueryType = Tuple[str, bytes, Dict[str, str]]
class EmailTests(test_utils.GenericTestBase):
"""Tests for sending emails."""
class Response:
"""Class to mock utils.url_open responses."""
def getcode(self) -> int:
"""Gets the status code of this url_open mock.
Returns:
int. 200 to signify status is OK. 500 otherwise.
"""
return 200 if self.url == self.expected_url else 500
def test_send_email_to_mailgun(self) -> None:
"""Test for sending HTTP POST request."""
# Test sending email without bcc, reply_to or recipient_variables.
expected_query_url: MailgunQueryType = (
'https://api.mailgun.net/v3/domain/messages',
b'from=a%40a.com&'
b'subject=Hola+%F0%9F%98%82+-+invitation+to+collaborate&'
b'text=plaintext_body+%F0%9F%98%82&'
b'html=Hi+abc%2C%3Cbr%3E+%F0%9F%98%82&'
b'to=b%40b.com&'
b'recipient_variables=%7B%7D',
{'Authorization': 'Basic YXBpOmtleQ=='}
)
swapped_urlopen = lambda x: self.Response(x, expected_query_url)
swapped_request = lambda *args: args
swap_urlopen_context = self.swap(
utils, 'url_open', swapped_urlopen)
swap_request_context = self.swap(
urllib.request, 'Request', swapped_request)
swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key')
swap_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
with swap_urlopen_context, swap_request_context, swap_api, swap_domain:
resp = mailgun_email_services.send_email_to_recipients(
'a@a.com',
['b@b.com'],
'Hola 😂 - invitation to collaborate',
'plaintext_body 😂',
'Hi abc,<br> 😂')
self.assertTrue(resp)
# Test sending email with single bcc and single recipient email.
expected_query_url = (
'https://api.mailgun.net/v3/domain/messages',
b'from=a%40a.com&'
b'subject=Hola+%F0%9F%98%82+-+invitation+to+collaborate&'
b'text=plaintext_body+%F0%9F%98%82&'
b'html=Hi+abc%2C%3Cbr%3E+%F0%9F%98%82&'
b'to=b%40b.com&'
b'bcc=c%40c.com&'
b'h%3AReply-To=abc&'
b'recipient_variables=%7B%27b%40b.com'
b'%27%3A+%7B%27first%27%3A+%27Bob%27%2C+%27id%27%3A+1%7D%7D',
{'Authorization': 'Basic YXBpOmtleQ=='})
swapped_urlopen = lambda x: self.Response(x, expected_query_url)
swap_urlopen_context = self.swap(
utils, 'url_open', swapped_urlopen)
swap_request_context = self.swap(
urllib.request, 'Request', swapped_request)
swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key')
swap_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
with swap_urlopen_context, swap_request_context, swap_api, swap_domain:
resp = mailgun_email_services.send_email_to_recipients(
'a@a.com',
['b@b.com'],
'Hola 😂 - invitation to collaborate',
'plaintext_body 😂',
'Hi abc,<br> 😂',
bcc=['c@c.com'],
reply_to='abc',
recipient_variables={'b@b.com': {'first': 'Bob', 'id': 1}})
self.assertTrue(resp)
# Test sending email with single bcc, and multiple recipient emails
# differentiated by recipient_variables ids.
expected_query_url = (
'https://api.mailgun.net/v3/domain/messages',
b'from=a%40a.com&'
b'subject=Hola+%F0%9F%98%82+-+invitation+to+collaborate&'
b'text=plaintext_body+%F0%9F%98%82&'
b'html=Hi+abc%2C%3Cbr%3E+%F0%9F%98%82&'
b'to=b%40b.com&'
b'bcc=%5B%27c%40c.com%27%2C+%27d%40d.com%27%5D&'
b'h%3AReply-To=abc&'
b'recipient_variables=%7B%27b%40b.com'
b'%27%3A+%7B%27first%27%3A+%27Bob%27%2C+%27id%27%3A+1%7D%7D',
{'Authorization': 'Basic YXBpOmtleQ=='})
swapped_urlopen = lambda x: self.Response(x, expected_query_url)
swap_urlopen_context = self.swap(
utils, 'url_open', swapped_urlopen)
swap_request_context = self.swap(
urllib.request, 'Request', swapped_request)
swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key')
swap_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
with swap_urlopen_context, swap_request_context, swap_api, swap_domain:
resp = mailgun_email_services.send_email_to_recipients(
'a@a.com',
['b@b.com'],
'Hola 😂 - invitation to collaborate',
'plaintext_body 😂',
'Hi abc,<br> 😂',
bcc=['c@c.com', 'd@d.com'],
reply_to='abc',
recipient_variables=({'b@b.com': {'first': 'Bob', 'id': 1}}))
self.assertTrue(resp)
def test_batch_send_to_mailgun(self) -> None:
"""Test for sending HTTP POST request."""
expected_query_url: MailgunQueryType = (
'https://api.mailgun.net/v3/domain/messages',
b'from=a%40a.com&'
b'subject=Hola+%F0%9F%98%82+-+invitation+to+collaborate&'
b'text=plaintext_body+%F0%9F%98%82&'
b'html=Hi+abc%2C%3Cbr%3E+%F0%9F%98%82&'
b'to=%5B%27b%40b.com%27%2C+%27c%40c.com%27%2C+%27d%40d.com%27%5D&'
b'recipient_variables=%7B%7D',
{'Authorization': 'Basic YXBpOmtleQ=='})
swapped_urlopen = lambda x: self.Response(x, expected_query_url)
swapped_request = lambda *args: args
swap_urlopen_context = self.swap(
utils, 'url_open', swapped_urlopen)
swap_request_context = self.swap(
urllib.request, 'Request', swapped_request)
swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key')
swap_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
with swap_urlopen_context, swap_request_context, swap_api, swap_domain:
resp = mailgun_email_services.send_email_to_recipients(
'a@a.com',
['b@b.com', 'c@c.com', 'd@d.com'],
'Hola 😂 - invitation to collaborate',
'plaintext_body 😂',
'Hi abc,<br> 😂')
self.assertTrue(resp)
def test_mailgun_key_or_domain_name_not_set_raises_exception(self) -> None:
"""Test that exceptions are raised when API key or domain name are
unset.
"""
# Testing no mailgun api key.
mailgun_exception = self.assertRaisesRegex( # type: ignore[no-untyped-call]
Exception, 'Mailgun API key is not available.')
with mailgun_exception:
mailgun_email_services.send_email_to_recipients(
'a@a.com',
['b@b.com', 'c@c.com', 'd@d.com'],
'Hola 😂 - invitation to collaborate',
'plaintext_body 😂',
'Hi abc,<br> 😂')
# Testing no mailgun domain name.
swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key')
mailgun_exception = self.assertRaisesRegex( # type: ignore[no-untyped-call]
Exception, 'Mailgun domain name is not set.')
with swap_api, mailgun_exception:
mailgun_email_services.send_email_to_recipients(
'a@a.com',
['b@b.com', 'c@c.com', 'd@d.com'],
'Hola 😂 - invitation to collaborate',
'plaintext_body 😂',
'Hi abc,<br> 😂')
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
2,
198,
2,
15069,
1946,
383,
9385,
544,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743... | 1.943751 | 4,409 |
import sys
sys.setrecursionlimit(10 ** 5)
cache = [0 for _ in range(100000)]
if __name__ == '__main__':
v = [0] + list(map(int, input().split()))
# print(solve(v, len(v) - 1))
print(solve_memo(v, len(v) - 1))
| [
11748,
25064,
198,
198,
17597,
13,
2617,
8344,
24197,
32374,
7,
940,
12429,
642,
8,
628,
198,
198,
23870,
796,
685,
15,
329,
4808,
287,
2837,
7,
3064,
830,
15437,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
103... | 2.171429 | 105 |
#couning values
all_nums=[]
f = open ("COSC450_P1_Data.txt", "r")
num = 0
for line in f.read().split():
num += 1
all_nums.append(line)
f.close()
firstArr =[]
aLenght = int(num/5)
print(aLenght)
f = open ("COSC450_P1_Data.txt", "r")
for line in f.read().split():
for i in range(1,5):
for j in range(1,aLenght):
firstArr[i][j]=line
f.close()
print(firstArr)
| [
2,
66,
280,
768,
3815,
201,
198,
439,
62,
77,
5700,
28,
21737,
201,
198,
69,
796,
1280,
5855,
34,
2640,
34,
17885,
62,
47,
16,
62,
6601,
13,
14116,
1600,
366,
81,
4943,
201,
198,
22510,
796,
657,
201,
198,
1640,
1627,
287,
277,
... | 1.881818 | 220 |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2018 ZhicongYan
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
import os
import sys
sys.path.append('.')
sys.path.append("../")
import tensorflow as tf
import tensorflow.contrib.layers as tcl
import numpy as np
from netutils.loss import get_loss
from netutils.metric import get_metric
from .base_model import BaseModel
class Classification(BaseModel):
'''
train operations
'''
'''
test operations
'''
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
17168,
13789,
198,
2,
198,
2,
15069,
357,
66,
8,
2864,
10511,
291,
506,
49664,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
... | 3.644186 | 430 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2020-10-05 08:35
from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
23,
319,
12131,
12,
940,
12,
2713,
8487,
25,
2327,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.726027 | 73 |
# -*- coding: utf-8 -*-
from django.conf.urls import url # noqa
from django.views.generic import TemplateView # noqa
app_name = 'django_approval'
urlpatterns = []
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
220,
1303,
645,
20402,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,
37350,
7680,
220,
1303,
645,
20402,
... | 2.634921 | 63 |
import numpy as np
import matplotlib.pyplot as plt
path = r'D:\data\20190718\155221_Qubit smart_two_tone2'
cavity_path = r'D:\data\20190718\155221_cavity smart_two_tone2'
data_name = path+path[16:]+r'.dat'
cavity_data_name = cavity_path+cavity_path[16:]+r'.dat'
data = np.loadtxt(data_name, unpack=True)
cavity_data = np.loadtxt(cavity_data_name, unpack=True)
n = 5
power= np.array_split(data[0],n)
freq = np.array_split(data[1],n)[0]
absol = np.array_split(data[4],n)
cavity_absol = np.array_split(cavity_data[2],n)
cavity_freq = np.array_split(cavity_data[1],n)
print(cavity_freq[-1][np.argmax(cavity_absol[-1])]/1e9)
plt.title(path[8:])
plt.imshow(absol, aspect='auto',extent=[freq[0]/1e9, freq[-1]/1e9, cavity_freq[-1][np.argmax(cavity_absol[-1])]/1e9, cavity_freq[0][np.argmax(cavity_absol[0])]/1e9], cmap = 'RdBu')
plt.xlabel('Drive Frequency (GHz)')
plt.ylabel(r'Probe Frequency (GHz)')
plt.colorbar()
plt.show()
| [
11748,
299,
32152,
355,
45941,
201,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
201,
198,
201,
198,
201,
198,
6978,
796,
374,
6,
35,
7479,
7890,
59,
23344,
2998,
1507,
59,
18742,
26115,
62,
48,
549,
270,
4451,
62,
... | 1.969262 | 488 |
"""
main driver module
"""
# Copyright 2021, Blast Analytics & Marketing
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from pathlib import Path
from blast_datetime import get_timestamp
from blast_files import generate_random_data
from blast_logging import setup_logger
from blast_init_cmd_args import parse_cmd_args, parse_toml, get_relevant_env_vars
MODULE_NAME = Path(__file__).resolve().name
CWD_PATH = Path(__file__).resolve().parent
# ./blast_developer_tools/data/
DATA_PATH = Path(CWD_PATH, "data")
LOGGER = setup_logger(MODULE_NAME)
if __name__ == "__main__":
print(f"{MODULE_NAME} started: {get_timestamp()}")
start = time.perf_counter()
parse_cmd_args(logger=LOGGER)
parse_toml(logger=LOGGER)
get_relevant_env_vars()
generate_random_data(
file_path=DATA_PATH.joinpath(*["ascii_data.txt"]),
binary=False, megabytes=0.02, logger=LOGGER,
)
print(f"{MODULE_NAME} finished: {time.perf_counter() - start:0.2f} seconds")
| [
37811,
198,
12417,
4639,
8265,
198,
37811,
198,
2,
15069,
33448,
11,
20641,
30437,
1222,
22137,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
... | 2.985944 | 498 |
# A function with Behavior That varies Over Time
# A function compound value have a body and a parent frame
# The parent frame contains the balance, the local state of the withdraw function
# Non-Local Assignment & Persistent Local State
# Work for python 3
def make_withdraw(balance):
""" Return a withdraw function with a starting balance."""
return withdraw, deposit
# nonlocal <name>
# Effect: future assignments to that name change its pre-existing binding in the first non-local frame of the current environment
# Alternative 2.7
def make_withdraw(balance):
""" Return a withdraw function with a starting balance."""
balance = [balance]
return withdraw, deposit
| [
2,
317,
2163,
351,
20181,
1320,
17806,
3827,
3862,
198,
198,
2,
317,
2163,
13061,
1988,
423,
257,
1767,
290,
257,
2560,
5739,
198,
198,
2,
383,
2560,
5739,
4909,
262,
5236,
11,
262,
1957,
1181,
286,
262,
8399,
2163,
198,
198,
2,
8... | 3.921348 | 178 |
import os
#os.chdir("C:\\Users\\Anna Huang\\Desktop\\cse583\\Movie_Income_Prediction")
import requests
import json
import pandas as pd
from datetime import datetime
TMDB_KEY = "60027f35df522f00e57a79b9d3568423"
OMDB_KEY = "d3941272"
def get_tmdb_id_list():
"""function to get all Tmdb_id between 06-16"""
import requests
import json
year = [2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016]
page_num = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
id_list = []
tmdb_id_query = "https://api.themoviedb.org/3/discover/movie?" \
+ "api_key=%s" \
+ "&language=en-US&sort_by=release_date.asc" \
+ "&include_adult=false&include_video=false" \
+ "&page=%d" \
+ "&primary_release_year=%d"
for n in page_num:
for yr in year:
rq = requests.get(tmdb_id_query % (TMDB_KEY,n,yr)).json()
for item in rq['results']:
id_list.append(item['id'])
return id_list
def get_profit():
"""call procedure to get profit data of a particular movie (id)"""
TMDB_ID_LIST = get_tmdb_id_list()
query = "https://api.themoviedb.org/3/movie/%d?" \
+"api_key=%s" \
+"&language=en-US"
profit_dict_list = []
for id in TMDB_ID_LIST:
request = requests.get(query %(id,TMDB_KEY)).json()
profit_dict_list.append({'imdb_id':request['imdb_id'], 'profit': request['revenue'],'budget': request['budget']})
profit = pd.DataFrame(profit_dict_list)
profit_df=profit
#profit_df = profit[profit['profit']>0]
profit_df.to_csv('profit_by_imdb_id.csv')
get_profit() | [
11748,
28686,
198,
2,
418,
13,
354,
15908,
7203,
34,
25,
6852,
14490,
6852,
31160,
31663,
6852,
36881,
6852,
66,
325,
46239,
6852,
25097,
62,
818,
2958,
62,
39156,
2867,
4943,
198,
11748,
7007,
198,
11748,
33918,
198,
11748,
19798,
292,... | 2.004657 | 859 |
# Link -
'''
Given a chocolate bar, two children, Lily and Ron, are determining how to share it. Each of the squares has an integer on it.
Lily decides to share a contiguous segment of the bar selected such that:
The length of the segment matches Ron's birth month, and,
The sum of the integers on the squares is equal to his birth day.
You must determine how many ways she can divide the chocolate.
Consider the chocolate bar as an array of squares, . She wants to find segments summing to Ron's birth day, with a length equalling his birth month, . In this case, there are two segments meeting her criteria: and .
Function Description
Complete the birthday function in the editor below. It should return an integer denoting the number of ways Lily can divide the chocolate bar.
birthday has the following parameter(s):
s: an array of integers, the numbers on each of the squares of chocolate
d: an integer, Ron's birth day
m: an integer, Ron's birth month
Input Format
The first line contains an integer , the number of squares in the chocolate bar.
The second line contains space-separated integers , the numbers on the chocolate squares where .
The third line contains two space-separated integers, and , Ron's birth day and his birth month.
Constraints
, where ()
Output Format
Print an integer denoting the total number of ways that Lily can portion her chocolate bar to share with Ron.
Sample Input 0
5
1 2 1 3 2
3 2
Sample Output 0
2
Explanation 0
Lily wants to give Ron squares summing to . The following two segments meet the criteria:
image
Sample Input 1
6
1 1 1 1 1 1
3 2
Sample Output 1
0
Explanation 1
Lily only wants to give Ron consecutive squares of chocolate whose integers sum to . There are no possible pieces satisfying these constraints:
image
Thus, we print as our answer.
Sample Input 2
1
4
4 1
Sample Output 2
1
Explanation 2
Lily only wants to give Ron square of chocolate with an integer value of . Because the only square of chocolate in the bar satisfies this constraint, we print as our answer.
'''
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the birthday function below.
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input().strip())
s = list(map(int, input().rstrip().split()))
dm = input().rstrip().split()
d = int(dm[0])
m = int(dm[1])
result = birthday(s, d, m)
fptr.write(str(result) + '\n')
fptr.close()
| [
2,
7502,
532,
220,
198,
198,
7061,
6,
198,
15056,
257,
11311,
2318,
11,
734,
1751,
11,
20037,
290,
6575,
11,
389,
13213,
703,
284,
2648,
340,
13,
5501,
286,
262,
24438,
468,
281,
18253,
319,
340,
13,
198,
198,
43,
813,
13267,
284,... | 3.527504 | 709 |
#!/usr/bin/env python2.7
# Copyright 2015 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import sys, os, subprocess, shutil
target = None
channel = None
for arg in sys.argv:
if "--channel" in arg:
channel = arg.split("=")[1]
elif "--target" in arg:
target = arg.split("=")[1]
print
print "target: " + str(target)
print "channel: " + str(channel)
print
if channel is None:
print "specify --channel"
sys.exit(1)
if target is None:
print "specify --target"
sys.exit(1)
SERVER_ADDRESS = os.getenv("RUST_DIST_SERVER", "https://static.rust-lang.org")
RUST_DIST_FOLDER = "dist"
CARGO_DIST_FOLDER = "cargo-dist"
TEMP_DIR = "./tmp"
IN_DIR = "./in"
# Create the temp directory
if os.path.isdir(TEMP_DIR):
shutil.rmtree(TEMP_DIR)
os.mkdir(TEMP_DIR)
# Create the in directory
if os.path.isdir(IN_DIR):
shutil.rmtree(IN_DIR)
os.mkdir(IN_DIR)
# Download rust manifest
rust_manifest_name = "channel-rustc-" + channel
remote_rust_dir = SERVER_ADDRESS + "/" + RUST_DIST_FOLDER
remote_rust_manifest = remote_rust_dir + "/" + rust_manifest_name
print "rust manifest: " + remote_rust_manifest
cwd = os.getcwd()
os.chdir(TEMP_DIR)
retval = subprocess.call(["curl", "-f", "-O", remote_rust_manifest])
if retval != 0:
print "downlading rust manifest failed"
sys.exit(1)
os.chdir(cwd)
# Get list of rust artifacts for target
rust_artifacts = []
rustc_installer = None
for line in open(os.path.join(TEMP_DIR, rust_manifest_name)):
if target in line and ".tar.gz" in line:
rust_artifacts.append(line.rstrip())
if line.startswith("rustc-") and "-src" not in line:
rustc_installer = line.rstrip()
assert len(rust_artifacts) > 0
print "rust artifacts: " + str(rust_artifacts)
assert rustc_installer is not None
# We'll use the source checksum as a fingerprint for synchronizing
# dist builds across platforms on the buildbot prior to uploading.
# FIXME: Would be nice to get this fingerprint from the 'version' file
# but that requires some buildbot changes.
rust_source = None
for line in open(os.path.join(TEMP_DIR, rust_manifest_name)):
if "-src" in line:
rust_source = line.rstrip()
assert rust_source is not None
print "rust source: " + rust_source
# Download the source
cwd = os.getcwd()
os.chdir(IN_DIR)
full_rust_source = remote_rust_dir + "/" + rust_source
retval = subprocess.call(["curl", "-f", "-O", full_rust_source])
if retval != 0:
print "downloading source failed"
sys.exit(1)
os.chdir(cwd)
# Download the rust artifacts
full_rust_artifacts = [remote_rust_dir + "/" + x for x in rust_artifacts]
for artifact in full_rust_artifacts:
cwd = os.getcwd()
os.chdir(IN_DIR)
retval = subprocess.call(["curl", "-f", "-O", artifact])
if retval != 0:
print "downlading " + artifact + " failed"
sys.exit(1)
os.chdir(cwd)
# Figure out corresponding cargo nightly. If the channel is nightly, then it's just the cargo nightly.
# If it's beta or stable then it's paired with a specific revision from the cargo-snap.txt file.
cargo_archive_date = None
if channel != "nightly":
retval = subprocess.call(["tar", "xzf", IN_DIR + "/" + rustc_installer, "-C", TEMP_DIR])
if retval != 0:
print "untarring source failed"
sys.exit(1)
rustc_installer_dir = os.path.join(TEMP_DIR, rustc_installer.replace(".tar.gz", ""))
# Pull the version number out of the version file
version = None
for line in open(os.path.join(rustc_installer_dir, "version")):
print "reported version: " + line
version = line.split(" ")[0].split("-")[0]
assert version is not None
print "cargo version key: " + version
# Search the cargo snap database for this version
for line in open("cargo-revs.txt"):
if version in line:
cargo_archive_date = line.split(":")[1].strip()
assert len(cargo_archive_date) > 0
break
assert cargo_archive_date is not None
print "cargo date: " + str(cargo_archive_date)
# Download cargo manifest
remote_cargo_dir = SERVER_ADDRESS + "/" + CARGO_DIST_FOLDER
if cargo_archive_date is not None:
remote_cargo_dir += "/" + cargo_archive_date
cargo_manifest_name = "channel-cargo-nightly"
remote_cargo_manifest = remote_cargo_dir + "/" + cargo_manifest_name
print "cargo manifest: " + remote_cargo_manifest
cwd = os.getcwd()
os.chdir(TEMP_DIR)
retval = subprocess.call(["curl", "-f", "-O", remote_cargo_manifest])
if retval != 0:
print "downlading rust manifest failed"
sys.exit(1)
os.chdir(cwd)
# Get list of cargo artifacts for target
cargo_artifacts = []
for line in open(os.path.join(TEMP_DIR, cargo_manifest_name)):
if target in line:
cargo_artifacts.append(line.rstrip())
assert len(cargo_artifacts) > 0
print "cargo artifacts: " + str(cargo_artifacts)
# Download the cargo artifacts
full_cargo_artifacts = [remote_cargo_dir + "/" + x for x in cargo_artifacts]
for artifact in full_cargo_artifacts:
cwd = os.getcwd()
os.chdir(IN_DIR)
retval = subprocess.call(["curl", "-f", "-O", artifact])
if retval != 0:
print "downlading " + artifact + " failed"
sys.exit(1)
os.chdir(cwd)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
13,
22,
198,
2,
15069,
1853,
383,
17103,
4935,
34152,
13,
4091,
262,
27975,
38162,
9947,
198,
2,
2393,
379,
262,
1353,
12,
5715,
8619,
286,
428,
6082,
290,
379,
198,
2,
2638,
1378,
... | 2.612512 | 2,142 |
# Generated by Django 2.1 on 2018-08-28 02:35
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
319,
2864,
12,
2919,
12,
2078,
7816,
25,
2327,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.966667 | 30 |
"""
Unbounded (unrestricted) type system.
"""
from .base import TypeSystem
| [
37811,
198,
198,
3118,
65,
6302,
357,
403,
49343,
8,
2099,
1080,
13,
198,
198,
37811,
198,
198,
6738,
764,
8692,
1330,
5994,
11964,
628
] | 3.16 | 25 |
"""Helper functions for datascience
-By Natasha Upchurch
"""
class HelperFunctions:
"""Base Class
"""
@staticmethod
def null_count(df):
""":param df pandas.DataFrame to count nulls on
"""
return df.isnull().sum().sum()
@staticmethod
def list_2_series(list_2_series, df):
""" adds a new column named "list" to the dataframe with contents being list_2_series
:param df pandas.DataFrame to count nulls on
:param list_2_series list to add as feature/ column of the dataframe
"""
df['list'] = list_2_series
return df
| [
37811,
47429,
5499,
329,
19395,
4234,
198,
220,
220,
220,
532,
3886,
41875,
3205,
36964,
198,
37811,
198,
4871,
5053,
525,
24629,
2733,
25,
198,
220,
220,
220,
37227,
14881,
5016,
198,
220,
220,
220,
37227,
628,
220,
220,
220,
2488,
1... | 2.526749 | 243 |
from conans import ConanFile, CMake, tools
| [
6738,
369,
504,
1330,
31634,
8979,
11,
327,
12050,
11,
4899,
198
] | 3.583333 | 12 |
#!/usr/bin/env python
try:
from pyNLPQL import NLPQL
__all__ = ['NLPQL']
except:
__all__ = []
#end
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
28311,
25,
198,
220,
220,
220,
422,
12972,
45,
19930,
9711,
1330,
399,
19930,
9711,
198,
220,
220,
220,
11593,
439,
834,
796,
37250,
45,
19930,
9711,
20520,
198,
16341,
25,
198,
... | 2.074074 | 54 |
#! /usr/bin/python3 -B
conv = {}
conv['2012-SK-02_1-original.png'] = '-quality 50 -colors 50'
conv['2012-SK-02_2-original.png'] = '-quality 50 -colors 50'
conv['2012-SK-02_3-original.png'] = '-quality 50 -colors 50'
conv['2012-SK-02_solution-original.png'] = '-quality 50 -colors 50'
if __name__ == '__main__':
import sys, os
sys.path.append(os.path.join('..', 'scripts'))
from common import *
execute(conv)
| [
2,
0,
1220,
14629,
14,
8800,
14,
29412,
18,
532,
33,
198,
42946,
796,
23884,
198,
42946,
17816,
6999,
12,
18831,
12,
2999,
62,
16,
12,
14986,
13,
11134,
20520,
796,
705,
12,
13237,
2026,
532,
4033,
669,
2026,
6,
198,
42946,
17816,
... | 2.467836 | 171 |
from uuid import UUID
from pydantic import BaseModel
from ynab_api.api_client import ApiClient
from ynab_api.apis import AccountsApi, TransactionsApi
from ynab_api.configuration import Configuration
from ynab_api.model.save_transaction import SaveTransaction
from ynab_api.model.save_transactions_wrapper import SaveTransactionsWrapper
from ..models import AccountConfig, FintsTransaction
from .base import BaseApp
API_URL = "https://api.youneedabudget.com/v1"
Config = NewYnabConfig
App = NewYnabApp
| [
6738,
334,
27112,
1330,
471,
27586,
198,
198,
6738,
279,
5173,
5109,
1330,
7308,
17633,
198,
6738,
331,
77,
397,
62,
15042,
13,
15042,
62,
16366,
1330,
5949,
72,
11792,
198,
6738,
331,
77,
397,
62,
15042,
13,
499,
271,
1330,
35584,
... | 3.21519 | 158 |
# Copyright (c) 2017 Wind River Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
import threading
import traceback
from keystoneauth1.exceptions import HttpError
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from common.exceptions import VimDriverNewtonException
from newton_base.util import VimDriverUtils
from common.msapi import extsys
logger = logging.getLogger(__name__)
running_threads = {}
running_thread_lock = threading.Lock()
# assume volume is attached on server creation
| [
2,
15069,
357,
66,
8,
2177,
3086,
5866,
11998,
11,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
... | 3.722408 | 299 |
from typing import Any
from torch import Tensor
from tsts.cfg import CfgNode as CN
from tsts.core import LOSSES
from .loss import Loss
__all__ = ["MAPE"]
@LOSSES.register()
class MAPE(Loss):
"""MAPE implementation.
Example
-------
.. code-block:: yaml
LOSSES:
NAMES: ["MAPE"]
"""
@classmethod
def forward(self, Z: Tensor, y: Tensor, y_mask: Tensor) -> Tensor:
"""Return loss value.
Parameters
----------
Z : Tensor
Prediction
y : Tensor
Target time series
y_mask : Tensor
Target time series mask
Returns
-------
Tensor
Loss value
"""
Z = Z[y_mask.bool()]
y = y[y_mask.bool()]
# Remove invalid elements
Z = Z[y != 0.0]
y = y[y != 0.0]
loss_val = ((Z - y) / y).abs()
loss_val = loss_val.sum()
loss_val /= y_mask.sum()
return loss_val
| [
6738,
19720,
1330,
4377,
198,
198,
6738,
28034,
1330,
309,
22854,
198,
6738,
256,
6448,
13,
37581,
1330,
327,
40616,
19667,
355,
31171,
198,
6738,
256,
6448,
13,
7295,
1330,
406,
18420,
1546,
198,
198,
6738,
764,
22462,
1330,
22014,
198... | 2.01833 | 491 |
import os, sys, urllib.request, json
import PySide6.QtQml
from PySide6.QtQuick import QQuickView
from PySide6.QtCore import QStringListModel, Qt, QUrl
from PySide6.QtGui import QGuiApplication
if __name__ == '__main__':
url = "http://country.io/names.json"
response = urllib.request.urlopen(url)
data = json.loads(response.read().decode('utf-8'))
print("Type ", type(data))
#data= {1:"Court", 2:"Decision", 3:"Julian"}
#Format and sort the data
data_list = list(data.values())
data_list.sort()
#Set up the application window
app = QGuiApplication(sys.argv)
view = QQuickView()
view.setResizeMode(QQuickView.SizeRootObjectToView)
#Expose the list to the Qml code
my_model = QStringListModel()
my_model.setStringList(data_list)
view.rootContext().setContextProperty("myModel",my_model)
#Load the QML file
qml_file = os.path.join(os.path.dirname(__file__),"view.qml")
view.setSource(QUrl.fromLocalFile(os.path.abspath(qml_file)))
#Show the window
if view.status() == QQuickView.Error:
sys.exit(-1)
view.show()
#execute and cleanup
app.exec_()
del view
| [
11748,
28686,
11,
25064,
11,
2956,
297,
571,
13,
25927,
11,
33918,
198,
11748,
9485,
24819,
21,
13,
48,
83,
48,
4029,
198,
6738,
9485,
24819,
21,
13,
48,
83,
21063,
1330,
1195,
21063,
7680,
198,
6738,
9485,
24819,
21,
13,
48,
83,
... | 2.498929 | 467 |
#!/usr/bin/env python3
import numpyro
import numpyro.distributions as dist
from numpyro.infer import MCMC, NUTS, DiscreteHMCGibbs, Predictive
from jax import random
import jax
import os
import sys
import preprocessing
# from . import preprocessing
from ipynb.fs.full.models_NumPyro import model_infiniteSBP, model_finiteDPM, model_finiteDPM_extended
# post processing
import scipy
import numpy as np
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
def convert_h_to_seq(h, alphabet):
'''
Convert numeric representation to DNA-seq representation.
'''
seq = [alphabet[int(h[k])] for k in range(h.shape[0])]
return ''.join(seq)
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2], sys.argv[3], int(sys.argv[4]), sys.argv[5], sys.argv[6])
#(freads_in, fref_in, output_dir, num_clusters, model, alphabet = 'ACGT-')
# ./run_mcmc.py ../../../test_data/super_small_ex2/seqs.fasta ../../../test_data/super_small_ex2/ref.fasta ./Output/ 10 finiteDPM ACGT-
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
299,
32152,
305,
198,
11748,
299,
32152,
305,
13,
17080,
2455,
507,
355,
1233,
198,
6738,
299,
32152,
305,
13,
259,
2232,
1330,
13122,
9655,
11,
399,
3843,
50,
11,
8444,
8... | 2.563131 | 396 |
"""Add Aggregate*.last_build_id
Revision ID: 5896e31725d
Revises: 166d65e5a7e3
Create Date: 2013-12-05 13:50:57.818995
"""
# revision identifiers, used by Alembic.
revision = '5896e31725d'
down_revision = '166d65e5a7e3'
from alembic import op
import sqlalchemy as sa
| [
37811,
4550,
19015,
49373,
24620,
12957,
62,
11249,
62,
312,
198,
198,
18009,
1166,
4522,
25,
642,
48712,
68,
34125,
1495,
67,
198,
18009,
2696,
25,
26753,
67,
2996,
68,
20,
64,
22,
68,
18,
198,
16447,
7536,
25,
2211,
12,
1065,
12,
... | 2.394737 | 114 |
# -*- coding: UTF-8 -*-
from plc800stg.profiles import PlcProfilesParser
from plc800stg.utils import assert_list_dict_eq
from ast import literal_eval
with description('Parse profiles from PLC800 with plc800stg'):
with it('Parsing profiles'):
input_profiles_plc = open('spec/data/02CHBascul2002110245.txt').read()
profiles = PlcProfilesParser(input_profiles_plc, "Bascul")
expected_result = literal_eval(open('spec/data/02CHBascul2002110245_result.txt').read())
assert_list_dict_eq(expected_result, profiles.profiles, 'name')
| [
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
6738,
458,
66,
7410,
301,
70,
13,
5577,
2915,
1330,
1345,
66,
15404,
2915,
46677,
198,
6738,
458,
66,
7410,
301,
70,
13,
26791,
1330,
6818,
62,
4868,
62,
11600,
62,
27363... | 2.626168 | 214 |
'''
Copyright (C) 2014-2016 ddurdle
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import socket
import select
import time
import sys
buffer_size = 4096
delay = 0.0001
forward_to = ('dmdsoftware.net', 80)
#
# This class is contains code contributed from http://voorloopnul.com/blog/a-python-proxy-in-less-than-100-lines-of-code/
#
| [
7061,
6,
198,
220,
220,
220,
15069,
357,
34,
8,
1946,
12,
5304,
49427,
2799,
293,
628,
220,
220,
220,
770,
1430,
318,
1479,
3788,
25,
345,
460,
17678,
4163,
340,
290,
14,
273,
13096,
198,
220,
220,
220,
340,
739,
262,
2846,
286,
... | 3.348592 | 284 |
"""
"""
from typing import Any, NoReturn, Sequence, Union, Optional, List, Tuple
from numbers import Real
import numpy as np
import torch
from torch import Tensor
from .base import Augmenter
__all__ = ["RandomFlip",]
class RandomFlip(Augmenter):
"""
"""
__name__ = "RandomFlip"
def __init__(self,
fs:Optional[int]=None,
per_channel:bool=True,
prob:Union[Sequence[float],float]=[0.4,0.2],
inplace:bool=True,
**kwargs:Any) -> NoReturn:
""" finished, checked,
Parameters
----------
fs: int, optional,
sampling frequency of the ECGs to be augmented
per_channel: bool, default True,
whether to flip each channel independently.
prob: sequence of float or float, default [0.4,0.2],
probability of performing flip,
the first probality is for the batch dimension,
the second probability is for the lead dimension.
inplace: bool, default True,
if True, ECG signal tensors will be modified inplace
kwargs: Keyword arguments.
"""
super().__init__()
self.fs = fs
self.per_channel = per_channel
self.inplace = inplace
self.prob = prob
if isinstance(self.prob, Real):
self.prob = np.array([self.prob, self.prob])
else:
self.prob = np.array(self.prob)
assert (self.prob >= 0).all() and (self.prob <= 1).all(), \
"Probability must be between 0 and 1"
def forward(self, sig:Tensor, label:Optional[Tensor], *extra_tensors:Sequence[Tensor], **kwargs:Any) -> Tuple[Tensor, ...]:
""" finished, checked,
Parameters
----------
sig: Tensor,
the ECGs to be augmented, of shape (batch, lead, siglen)
label: Tensor, optional,
label tensor of the ECGs,
not used, but kept for consistency with other augmenters
extra_tensors: sequence of Tensors, optional,
not used, but kept for consistency with other augmenters
kwargs: keyword arguments,
not used, but kept for consistency with other augmenters
Returns
-------
sig: Tensor,
the augmented ECGs
label: Tensor,
the label tensor of the augmented ECGs, unchanged
extra_tensors: sequence of Tensors, optional,
if set in the input arguments, unchanged
"""
batch, lead, siglen = sig.shape
if not self.inplace:
sig = sig.clone()
if self.prob[0] == 0:
return (sig, label, *extra_tensors)
if self.per_channel:
flip = torch.ones((batch,lead,1), dtype=sig.dtype, device=sig.device)
for i in self.get_indices(prob=self.prob[0], pop_size=batch):
flip[i, self.get_indices(prob=self.prob[1], pop_size=lead), ...] = -1
sig = sig.mul_(flip)
else:
flip = torch.ones((batch,1,1), dtype=sig.dtype, device=sig.device)
flip[self.get_indices(prob=self.prob[0], pop_size=batch), ...] = -1
sig = sig.mul_(flip)
return (sig, label, *extra_tensors)
def extra_repr_keys(self) -> List[str]:
"""
"""
return ["per_channel", "prob", "inplace",] + super().extra_repr_keys()
| [
37811,
198,
37811,
198,
198,
6738,
19720,
1330,
4377,
11,
1400,
13615,
11,
45835,
11,
4479,
11,
32233,
11,
7343,
11,
309,
29291,
198,
6738,
3146,
1330,
6416,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
28034,
... | 2.162231 | 1,578 |
import config
from tlgbotcore import TlgBotCore
import logging
logging.basicConfig(level=logging.INFO)
tlg = TlgBotCore(session=config.TLG_APP_NAME,
plugin_path='plugins_bot',
connection_retries=None,
api_id=config.TLG_APP_API_ID,
api_hash=config.TLG_APP_API_HASH,
bot_token=config.I_BOT_TOKEN,
admins=config.TLG_ADMIN_ID_CLIENT,
proxy_key=config.TLG_PROXY_KEY,
proxy_server=config.TLG_PROXY_SERVER,
proxy_port=config.TLG_PROXY_PORT,
type_db=config.TYPE_DB)
tlg.run_until_disconnected()
| [
11748,
4566,
198,
6738,
256,
75,
70,
13645,
7295,
1330,
309,
75,
70,
20630,
14055,
198,
198,
11748,
18931,
198,
198,
6404,
2667,
13,
35487,
16934,
7,
5715,
28,
6404,
2667,
13,
10778,
8,
198,
198,
28781,
70,
796,
309,
75,
70,
20630,
... | 1.80109 | 367 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
#PYTHON PROGRAM TO SWAP TWO VARIABLES
#DESIGNED BY: SRIJITH R
#REFERENCE NUMBER: 21004191
x,y=int(input("enter num1")),int(input("enter num2"))
print("before swapping",x,y)
z=x
x=y
y=z
print("after swapping",x,y)
| [
2,
47,
56,
4221,
1340,
46805,
5390,
12672,
2969,
35288,
569,
1503,
3539,
9148,
1546,
198,
2,
30910,
16284,
1961,
11050,
25,
311,
7112,
41,
10554,
371,
198,
2,
2200,
24302,
18310,
36871,
13246,
25,
2310,
22914,
26492,
198,
198,
87,
11,... | 2.276596 | 94 |
# --------------
import pandas as pd
from sklearn import preprocessing
#path : File path
# Code starts here
# read the dataset
dataset = pd.read_csv(path)
# look at the first five columns
dataset.head(5)
# Check if there's any column which is not useful and remove it like the column id
dataset.drop(columns='Id',inplace=True)
# check the statistical description
dataset.describe()
# --------------
# We will visualize all the attributes using Violin Plot - a combination of box and density plots
import seaborn as sns
from matplotlib import pyplot as plt
#names of all the attributes
cols = dataset.columns
#number of attributes (exclude target)
size = len(dataset['Cover_Type'])
#x-axis has target attribute to distinguish between classes
x = dataset['Cover_Type']
#y-axis shows values of an attribute
y = dataset.drop(['Cover_Type'], 1)
#Plot violin for all attributes
ax = sns.violinplot(size,data=dataset)
# --------------
import numpy
upper_threshold = 0.5
lower_threshold = -0.5
# Code Starts Here
subset_train = dataset.iloc[:, 0:10]
data_corr = subset_train.corr()
sns.heatmap(data_corr)
correlation = data_corr.unstack().sort_values(ascending=False, kind = 'quicksort')
corr_var_list = correlation[(correlation != 1) & ((correlation > upper_threshold) | (correlation < lower_threshold))]
print(corr_var_list)
# --------------
#Import libraries
from sklearn import cross_validation
from sklearn.preprocessing import StandardScaler
# Identify the unnecessary columns and remove it
dataset.drop(columns=['Soil_Type7', 'Soil_Type15'], inplace=True)
X = dataset.drop(['Cover_Type'],axis=1)
Y = dataset['Cover_Type'].copy()
# Scales are not the same for all variables. Hence, rescaling and standardization may be necessary for some algorithm to be applied on it.
X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X, Y, test_size = 0.2, random_state = 0)
scaler = StandardScaler()
#Standardized
#Apply transform only for non-categorical data
X_train_temp = scaler.fit_transform(X_train.iloc[:, :10])
X_test_temp = scaler.transform(X_test.iloc[:, :10])
#Concatenate non-categorical data and categorical
X_train1 = numpy.concatenate((X_train_temp, X_train.iloc[:, 10:len(dataset.columns) - 1]), axis = 1)
X_test1 = numpy.concatenate((X_test_temp, X_test.iloc[:,10:len(dataset.columns) - 1]), axis = 1)
scaled_features_train_df = pd.DataFrame(X_train1, index = X_train.index, columns = X_train.columns)
scaled_features_test_df = pd.DataFrame(X_test1, index = X_test.index, columns = X_test.columns)
# --------------
from sklearn.feature_selection import SelectPercentile
from sklearn.feature_selection import f_classif
# Write your solution here:
skb = SelectPercentile(score_func = f_classif, percentile = 20)
predictors = skb.fit_transform(X_train1, Y_train)
scores = skb.scores_
Features = X_train.columns
top_k_predictors=list(pd.DataFrame({"Features": Features, "Scores": scores}).sort_values(by = "Scores",ascending = False).iloc[:11, 0])
print(top_k_predictors)
# --------------
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, precision_score
clf = OneVsRestClassifier(LogisticRegression())
clf1 = OneVsRestClassifier(LogisticRegression())
##Computing values for the all-features model
model_fit_all_features = clf1.fit(X_train, Y_train)
predictions_all_features = clf1.predict(X_test)
score_all_features = accuracy_score(Y_test, predictions_all_features)
print(score_all_features)
##Computing values for the top-features model
model_fit_top_features = clf.fit(scaled_features_train_df[top_k_predictors], Y_train)
predictions_top_features = model_fit_top_features.predict(scaled_features_test_df[top_k_predictors])
score_top_features = accuracy_score(Y_test, predictions_top_features)
print(score_top_features)
| [
2,
220,
26171,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
1341,
35720,
1330,
662,
36948,
198,
198,
2,
6978,
1058,
9220,
3108,
198,
198,
2,
6127,
4940,
994,
198,
198,
2,
1100,
262,
27039,
198,
198,
19608,
292,
316,
796,
279,
... | 2.960843 | 1,328 |
'''
Temperature Scaling
https://github.com/gpleiss/temperature_scaling
Modified
'''
import torch
from torch import nn, optim
from torch.nn import functional as F
from sklearn.calibration import calibration_curve
| [
7061,
6,
198,
42492,
1446,
4272,
198,
5450,
1378,
12567,
13,
785,
14,
70,
1154,
747,
14,
11498,
21069,
62,
1416,
4272,
198,
5841,
1431,
198,
7061,
6,
198,
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
11,
6436,
198,
6738,
28034,
13... | 3.435484 | 62 |
import sys
import pandas as pd
sys.path.insert(0, "../")
from oam.score.isolation_path import IsolationPath
from oam.search.simple_combination import SimpleCombination
from oam.visualization import visualize_oam_results
df = pd.read_csv("../datasets/df_outliers.csv")
correlation_list = []
ipath = IsolationPath(subsample_size=256, number_of_paths=600)
search = SimpleCombination(
ipath,
min_items_per_subspace=2,
max_items_per_subspace=4,
dimensions=[
"variation_mean",
"variation_std",
"up_count",
"down_count",
"top_15_variation_mean",
"top_15_variation_std",
],
)
result_df = search.search(df, 41)
visualize_oam_results(result_df, 5)
| [
11748,
25064,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
366,
40720,
4943,
198,
6738,
267,
321,
13,
26675,
13,
271,
21417,
62,
6978,
1330,
1148,
21417,
15235,
198,
6738,
267,
321,
13,... | 2.360927 | 302 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import cairo
from object import Object
from scale import Scale
from objects import *
class Image(Object):
"""This class represents a image"""
__name__ = "Image"
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
28686,
198,
11748,
1275,
7058,
198,
6738,
2134,
1330,
9515,
198,
6738,
5046,
1330,
21589,
198,
6738,
5563,
1330,
1... | 3.013514 | 74 |
#!/usr/bin/env python
# Initial Date: June 2020
# Author: Rahul Bhadani
# Copyright (c) Rahul Bhadani, Arizona Board of Regents
# All rights reserved.
""" This script helps launch a fleet of n cars along x-axis. """
import time
import signal
import signal
import numpy as np
import glob
import os
from .layout import layout
'''
Summary of Class layout:
This class requires a ros package 'Sparkle'
Attributes:
1. R: Radius of the Circle
2. theta: angular separation of two consecutive vehicle on the circle
and all attributes of super class
Functions:
1. __init__(circumference, n_vehicles): basically a constructor
'''
class lane(layout, object):
'''
`lane`: Simulation of Connected-and-Intelligent Vehicle on a straight line.
Inherits from its superclass `layout`.
Parameters
-------------
kwargs
variable keyword arguments
n_vehicles: `integer`
Number of vehicles on lane in simulation
Default Value: 1
vehicle_spacing: `double`
Space between two vehicles
include_laser: `bool array`
Whether to include laser sensor or not
Attributes
------------
theta: `double`
Angular Separation of Cars on Circular Trajectory
L: `double`
Wheelbase of each car
car_to_bumper: `double`
Length of car from bumper to bumper
See Also
---------
layout: superclass of `lane`
'''
def simulate(self, leader_vel, logdir, **kwargs):
'''
Class method `simulate` specifies state-based model for simulation of vehicles on circular trajectory.
- `super.create()` -> `super.spawn()` -> `super.control()` -> `super.rviz()`
- Simulation runs for specified `log_time`
- Retrieves the bag file recorded.
Parameters
------------
logdir: `string`
Directory/Path where bag files and other data files recording simulation statistics will be saved.
Returns
-----------
`string`:
The full path of the bag file recorded for the simulation.
'''
self.create()
# spawn all the vehicles
self.spawn() # spawn calls relevant functions to start roscore, gzclient, and rviz
time.sleep(4)
initial_distance = self.vehicle_spacing - self.car_to_bumper
control_method = kwargs.get("control_method", "ovftl")
logdata = kwargs.get("logdata", False)
# self.control(leader_vel= leader_vel, str_angle = 0.0, control_method = "uniform" ,logdir=logdir)
self.control(leader_vel=leader_vel, str_angle=0.0 , control_method=control_method, initial_distance =initial_distance , logdir = logdir, logdata=logdata)
self.rviz()
# start the rosbag record for 60 seconds
time.sleep(self.log_time)
print('Simulation complete, time to terminate')
self.destroy(signal.SIGINT)
return self.bagfile
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
20768,
7536,
25,
2795,
12131,
198,
2,
6434,
25,
48543,
16581,
324,
3216,
198,
2,
15069,
357,
66,
8,
220,
48543,
16581,
324,
3216,
11,
7943,
5926,
286,
3310,
658,
198,
2,
1439,
2... | 2.594017 | 1,170 |
if __name__=='__main__':
a=int(input("请输入一个整数:"))
zs(a)
| [
361,
11593,
3672,
834,
855,
6,
834,
12417,
834,
10354,
198,
197,
64,
28,
600,
7,
15414,
7203,
46237,
115,
164,
122,
241,
17739,
98,
31660,
10310,
103,
46763,
112,
46763,
108,
171,
120,
248,
48774,
198,
197,
89,
82,
7,
64,
8,
198
] | 1.318182 | 44 |
#!/usr/bin/env python3
##############################################################################################
# #
# Program purpose: Counts the occurrences of character of a given text in a text #
# file, that is, it finds occurrences of each character in file. # #
# Program Author : Happi Yvan <ivensteinpoker@gmail.com> #
# Creation Date : September 5, 2019 #
# #
##############################################################################################
if __name__ == "__main__":
file_name = input("Enter file name: ")
file = open(file_name, "r")
if file.mode == "r":
contents = file.readlines()
new_data = process_data(list_string=contents)
print(f"Processed data:\n{new_data}")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
220,
21015,
18,
198,
198,
29113,
29113,
14468,
7804,
4242,
2235,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,... | 1.887946 | 589 |
from enum import IntEnum
from eth_typing.evm import HexAddress
from eth_utils.units import units
from raiden_contracts.utils.signature import private_key_to_address
MAX_UINT256 = 2 ** 256 - 1
MAX_UINT192 = 2 ** 192 - 1
MAX_UINT32 = 2 ** 32 - 1
FAKE_ADDRESS = "0x03432"
EMPTY_ADDRESS = HexAddress("0x0000000000000000000000000000000000000000")
EMPTY_BALANCE_HASH = b"\x00" * 32
EMPTY_ADDITIONAL_HASH = b"\x00" * 32
EMPTY_LOCKSROOT = b"\x00" * 32
EMPTY_SIGNATURE = b"\x00" * 65
passphrase = "0"
FAUCET_PRIVATE_KEY = "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
FAUCET_ADDRESS = private_key_to_address(FAUCET_PRIVATE_KEY)
FAUCET_ALLOWANCE = 100 * int(units["ether"])
CONTRACT_DEPLOYER_ADDRESS = FAUCET_ADDRESS
| [
6738,
33829,
1330,
2558,
4834,
388,
198,
198,
6738,
4555,
62,
774,
13886,
13,
1990,
76,
1330,
22212,
20231,
198,
6738,
4555,
62,
26791,
13,
41667,
1330,
4991,
198,
198,
6738,
9513,
268,
62,
28484,
82,
13,
26791,
13,
12683,
1300,
1330,... | 2.513699 | 292 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
gdaltindex.py
---------------------
Date : February 2015
Copyright : (C) 2015 by Pedro Venancio
Email : pedrongvenancio at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Pedro Venancio'
__date__ = 'February 2015'
__copyright__ = '(C) 2015, Pedro Venancio'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsMapLayer,
QgsProcessing,
QgsProcessingAlgorithm,
QgsProcessingException,
QgsProcessingParameterCrs,
QgsProcessingParameterEnum,
QgsProcessingParameterString,
QgsProcessingParameterBoolean,
QgsProcessingParameterDefinition,
QgsProcessingParameterMultipleLayers,
QgsProcessingParameterVectorDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
17174,
17174,
4557,
8162,
198,
220,
220,
220,
308,
67,
2501,
9630,
13,
9078,
198,
220,
220,
220,
41436,
12,
198,
220,
220,
220,
7536,
220,
220,
220,
... | 2.174888 | 892 |
from ipywidgets import interact
@interact(max_depth=(1, 10))
| [
198,
6738,
20966,
88,
28029,
11407,
1330,
9427,
198,
198,
31,
3849,
529,
7,
9806,
62,
18053,
16193,
16,
11,
838,
4008,
198
] | 2.73913 | 23 |
import time
import random
import itertools
if __name__ == '__main__':
zagadka = 'jedziemy sobie pociągiem i jestem trochę zmęczony, ale jednak wesoło'
print(f'Hasło to:\n{zagadka}\n')
print('Losowe przetasowania jego liter to:')
# for p in regular_permuts(zagadka):
for p in random_permuts(zagadka):
print(p)
time.sleep(0.1)
| [
11748,
640,
198,
11748,
4738,
198,
11748,
340,
861,
10141,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1976,
363,
324,
4914,
796,
705,
73,
276,
17027,
3065,
523,
12590,
279,
1733,
128,
... | 2.085714 | 175 |
import os
import sys
# TODO make this less brittle
sys.path = [os.path.join(os.path.dirname(__file__), '../../build/src/python/')] + sys.path
import corrade
import corrade.containers
import corrade.pluginmanager
import magnum
import magnum.gl
import magnum.meshtools
import magnum.platform
import magnum.platform.egl
import magnum.platform.glx
import magnum.platform.glfw
import magnum.platform.sdl2
import magnum.primitives
import magnum.shaders
import magnum.scenegraph
import magnum.trade
# So the doc see everything
# TODO: use just +=, m.css should reorder this on its own
corrade.__all__ = ['containers', 'pluginmanager', 'BUILD_STATIC', 'BUILD_MULTITHREADED', 'TARGET_UNIX', 'TARGET_APPLE', 'TARGET_IOS', 'TARGET_IOS_SIMULATOR', 'TARGET_WINDOWS', 'TARGET_WINDOWS_RT', 'TARGET_EMSCRIPTEN', 'TARGET_ANDROID']
magnum.__all__ = ['math', 'gl', 'meshtools', 'platform', 'primitives', 'shaders', 'scenegraph', 'trade', 'BUILD_STATIC', 'TARGET_GL', 'TARGET_GLES', 'TARGET_GLES2', 'TARGET_WEBGL', 'TARGET_VK'] + magnum.__all__
# hide values of the preprocessor defines to avoid confusion by assigning a
# class without __repr__ to them
# TODO: more systematic solution directly in m.css
corrade.BUILD_STATIC = DoNotPrintValue()
corrade.BUILD_MULTITHREADED = DoNotPrintValue()
corrade.TARGET_UNIX = DoNotPrintValue()
corrade.TARGET_APPLE = DoNotPrintValue()
corrade.TARGET_IOS = DoNotPrintValue()
corrade.TARGET_IOS_SIMULATOR = DoNotPrintValue()
corrade.TARGET_WINDOWS = DoNotPrintValue()
corrade.TARGET_WINDOWS_RT = DoNotPrintValue()
corrade.TARGET_EMSCRIPTEN = DoNotPrintValue()
corrade.TARGET_ANDROID = DoNotPrintValue()
magnum.BUILD_STATIC = DoNotPrintValue()
magnum.TARGET_GL = DoNotPrintValue()
magnum.TARGET_GLES = DoNotPrintValue()
magnum.TARGET_GLES2 = DoNotPrintValue()
magnum.TARGET_WEBGL = DoNotPrintValue()
magnum.TARGET_VK = DoNotPrintValue()
# TODO ugh... can this be expressed directly in pybind?
corrade.__annotations__ = {
'BUILD_STATIC': bool,
'BUILD_MULTITHREADED': bool,
'TARGET_UNIX': bool,
'TARGET_APPLE': bool,
'TARGET_IOS': bool,
'TARGET_IOS_SIMULATOR': bool,
'TARGET_WINDOWS': bool,
'TARGET_WINDOWS_RT': bool,
'TARGET_EMSCRIPTEN': bool,
'TARGET_ANDROID': bool
}
magnum.__annotations__ = {
'BUILD_STATIC': bool,
'TARGET_GL': bool,
'TARGET_GLES': bool,
'TARGET_GLES2': bool,
'TARGET_WEBGL': bool,
'TARGET_VK': bool
}
magnum.gl.__annotations__ = {
'default_framebuffer': magnum.gl.DefaultFramebuffer
}
magnum.shaders.Flat2D.__annotations__ = {
'POSITION': magnum.gl.Attribute,
'TEXTURE_COORDINATES': magnum.gl.Attribute,
'COLOR3': magnum.gl.Attribute,
'COLOR4': magnum.gl.Attribute
}
magnum.shaders.Flat3D.__annotations__ = {
'POSITION': magnum.gl.Attribute,
'TEXTURE_COORDINATES': magnum.gl.Attribute,
'COLOR3': magnum.gl.Attribute,
'COLOR4': magnum.gl.Attribute
}
magnum.shaders.VertexColor2D.__annotations__ = {
'POSITION': magnum.gl.Attribute,
'COLOR3': magnum.gl.Attribute,
'COLOR4': magnum.gl.Attribute
}
magnum.shaders.VertexColor3D.__annotations__ = {
'POSITION': magnum.gl.Attribute,
'COLOR3': magnum.gl.Attribute,
'COLOR4': magnum.gl.Attribute
}
magnum.shaders.Phong.__annotations__ = {
'POSITION': magnum.gl.Attribute,
'NORMAL': magnum.gl.Attribute,
'TANGENT': magnum.gl.Attribute,
'TEXTURE_COORDINATES': magnum.gl.Attribute,
'COLOR3': magnum.gl.Attribute,
'COLOR4': magnum.gl.Attribute
}
PROJECT_TITLE = 'Magnum'
PROJECT_SUBTITLE = 'Python docs'
MAIN_PROJECT_URL = 'https://magnum.graphics'
INPUT_MODULES = [corrade, magnum]
INPUT_PAGES = [
'pages/index.rst',
'pages/building.rst',
'pages/api-conventions.rst',
'pages/changelog.rst',
'pages/credits.rst',
'../../../magnum-examples/doc/python/examples.rst'
]
INPUT_DOCS = [
'corrade.rst',
'corrade.containers.rst',
'corrade.pluginmanager.rst',
'magnum.rst',
'magnum.gl.rst',
'magnum.math.rst',
'magnum.platform.rst',
'magnum.scenegraph.rst',
'magnum.shaders.rst',
'magnum.trade.rst',
]
LINKS_NAVBAR2 = [
('C++ API', '../../../../magnum/build/doc-mcss/html/index.html', [])
]
PLUGINS = [
'm.code',
'm.components',
'm.dox',
'm.gh',
'm.htmlsanity',
'm.images',
'm.link',
'm.math',
'm.sphinx'
]
STYLESHEETS = [
'https://fonts.googleapis.com/css?family=Source+Sans+Pro:400,400i,600,600i%7CSource+Code+Pro:400,400i,600&subset=latin-ext',
'../css/m-dark+documentation.compiled.css'
]
FAVICON = '../favicon.ico'
M_DOX_TAGFILES = [
('../../../corrade/build/doc-mcss/corrade.tag', '../../../../corrade/build/doc-mcss/html/', ['Corrade::'], ['m-doc-external']),
('../../../magnum/build/doc-mcss/magnum.tag', '../../../../magnum/build/doc-mcss/html/', ['Magnum::'], ['m-doc-external'])
]
M_SPHINX_INVENTORY_OUTPUT = 'objects.inv'
M_SPHINX_INVENTORIES = [
('python.inv', 'https://docs.python.org/3/', [], ['m-doc-external']),
('numpy.inv', 'https://docs.scipy.org/doc/numpy/', [], ["m-doc-external"])
]
M_HTMLSANITY_SMART_QUOTES = True
PYBIND11_COMPATIBILITY = True
OUTPUT = '../../build/doc/python/'
PAGE_HEADER = """
.. container:: m-note m-success
Welcome to Python-flavored Magnum! Please note that, while already being
rather stable, this functionality is still considered *experimental* and
some APIs might get changed without preserving full backwards compatibility.
"""
FINE_PRINT = """
| Magnum Python docs. Part of the `Magnum project <https://magnum.graphics/>`_,
copyright © `Vladimír Vondruš <http://mosra.cz/>`_ and contributors, 2010–2019.
| Generated by `m.css Python doc generator <https://mcss.mosra.cz/documentation/python/>`_.
Contact the team via `GitHub <https://github.com/mosra/magnum>`_,
`Gitter <https://gitter.im/mosra/magnum>`_,
`e-mail <mailto:info@magnum.graphics>`_ or
`Twitter <https://twitter.com/czmosra>`_"""
| [
11748,
28686,
198,
11748,
25064,
198,
198,
2,
16926,
46,
787,
428,
1342,
49307,
198,
17597,
13,
6978,
796,
685,
418,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
828,
705,
40720,
40720,
11249,
14,
10677... | 2.402187 | 2,469 |
# IMPORTS --------------------------------------------------------------
# Standard packages
import unittest
# Third party packages
import pandas as pd
# Local packages
from ccat.model.database.bucket import Bucket
import ccat.controller.feature.height as height
import ccat.controller.indicator.sma as sma
import ccat.controller.helper.df_x_df as df_x_df
# TESTS ----------------------------------------------------------------
# MAIN --------------------------------------------------------------
if __name__ == '__main__':
unittest.main() | [
2,
30023,
33002,
20368,
1783,
26171,
198,
198,
2,
8997,
10392,
198,
11748,
555,
715,
395,
198,
198,
2,
10467,
2151,
10392,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
2,
10714,
10392,
198,
6738,
269,
9246,
13,
19849,
13,
48806,
... | 4.19697 | 132 |
class DeployerRepositoryTemplate:
"""Create a repository for continuous integration of deployer"""
@classmethod
@classmethod
@classmethod
| [
4871,
34706,
263,
6207,
13264,
30800,
25,
198,
220,
220,
220,
37227,
16447,
257,
16099,
329,
12948,
11812,
286,
6061,
263,
37811,
628,
220,
220,
220,
2488,
4871,
24396,
628,
220,
220,
220,
2488,
4871,
24396,
628,
220,
220,
220,
2488,
... | 3.568182 | 44 |
import json
from django.core.validators import RegexValidator
from django.http import HttpResponse, JsonResponse
from django.views import View
from django.views.decorators.csrf import csrf_exempt
from django.forms.models import model_to_dict
from django.utils.decorators import method_decorator
from django import forms
from .models import Item, Review
#[\s\w+][\S]*
@method_decorator(csrf_exempt, name='dispatch')
class AddItemView(View):
"""View для создания товара."""
@method_decorator(csrf_exempt, name='dispatch')
class PostReviewView(View):
"""View для создания отзыва о товаре."""
| [
11748,
33918,
198,
6738,
220,
42625,
14208,
13,
7295,
13,
12102,
2024,
1330,
797,
25636,
47139,
1352,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
11,
449,
1559,
31077,
198,
6738,
42625,
14208,
13,
33571,
1330,
3582,
198,
... | 2.54661 | 236 |
"""
binclf xgbopt
==================================
Utils library for XGBoost optimization.
Author: Casokaks (https://github.com/Casokaks/)
Created on: Nov 1st 2018
"""
import math
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
def xgb_opt_no_estimators(alg, dtrain, target, predictors, eval_metric='logloss',
num_boost_round=1000, early_stopping_rounds=50,
cv_folds=5, cv_stratified=False, cv_shuffle=True,
seed=8888, verbose=None):
'''Find the optimal number of weak estimators (trees)
Parameters
----------
Returns
-------
'''
print('\n--- n_estimators optimization started @ {} ---\n'.format(
datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
xgb_param = alg.get_xgb_params()
xgtrain = xgb.DMatrix(dtrain[predictors].values, label=dtrain[target].values)
if cv_stratified == True:
cv = StratifiedKFold(n_splits=cv_folds, shuffle=cv_shuffle, random_state=seed)
print('StratifiedKFold cross-validation instantiated...')
else:
cv = KFold(n_splits=cv_folds, shuffle=cv_shuffle, random_state=seed)
print('KFold cross-validation instantiated...')
print('Search started...\n')
cvresult = xgb.cv(params=xgb_param,
dtrain=xgtrain,
metrics=eval_metric,
num_boost_round=num_boost_round,
early_stopping_rounds=early_stopping_rounds,
folds = cv,
nfold=cv_folds,
stratified=cv_stratified,
shuffle=cv_shuffle,
seed=seed,
verbose_eval=verbose)
no_estimators = cvresult.shape[0]
score = cvresult.iloc[-1]
print('\nOptimal number of estimators = {}'.format(no_estimators))
print('Train {} = {:.4f} +/- {:.4f}'.format(eval_metric, score[2], score[3]))
print('Test {} = {:.4f} +/- {:.4f}'.format(eval_metric, score[0],score[1]))
print('\n--- n_estimators optimization completed @ {} ---\n'.format(
datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
return no_estimators, score
def print_opt_params_report(results, n_top=3):
'''Print top best results and parameters.
Parameters
----------
Returns
-------
'''
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
print('')
for candidate in candidates:
print('Model with rank: {0}'.format(i))
print('Mean validation score: {0:.4f} (std: {1:.4f})'.format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]))
print('Parameters: {0}'.format(results['params'][candidate]))
print('')
def _update_params(params, results, keep_top, params_dict):
'''Re-Initialize parameters for search, based on given parameters dictionary and last results to keep.
Parameters
----------
Returns
-------
'''
# update min and max based on last results to keep
for k in params.keys():
param_temp = np.array([])
for n in range(1,keep_top+1):
idx = np.flatnonzero(results['rank_test_score'] == n)[0]
param_temp = np.append(param_temp, results['params'][idx][k])
params_dict[k]['min'] = min(param_temp)
params_dict[k]['max'] = max(param_temp)
# re-initialize params
return _init_params(params_dict)
def _init_params(params_dict):
'''Initialize parameters for search, based on given parameters dictionary.
Parameters
----------
Returns
-------
'''
params = {}
for k in params_dict.keys():
# addictive range
if params_dict[k]['type'] == 'add':
params[k] = np.arange(params_dict[k]['min'], params_dict[k]['max'], params_dict[k]['step'])
# multiplication range
elif params_dict[k]['type'] == 'multi':
params[k] = np.array([])
next_value = params_dict[k]['min']
while next_value <= params_dict[k]['max']:
params[k] = np.append(params[k], next_value)
next_value = next_value * params_dict[k]['step']
# error
else:
print('ERROR: params type can only be either add or multi')
sys.exit(-1)
# last check to add range max if not already included
if params_dict[k]['max'] not in params[k]:
params[k] = np.append(params[k], params_dict[k]['max'])
# if range of integers then tranform the array into an integer array
if sum(params[k] == np.array(params[k], dtype=int)) == len(params[k]):
params[k] = np.array(params[k], dtype=int)
return params
def xgb_opt_params(X, y, estimator, scoring='neg_log_loss', search_type='random',
params_dict = {'max_depth':{'init':5,'min':2,'max':10,'step':1,'type':'add'},
'min_child_weight':{'init':1,'min':1,'max':20,'step':1,'type':'add'},
'gamma':{'init':0.00,'min':0.00,'max':1.00,'step':0.01,'type':'add'},
'subsample':{'init':0.80,'min':0.50,'max':1.00,'step':0.10,'type':'add'},
'colsample_bytree':{'init':0.80,'min':0.50,'max':1.00,'step':0.10,'type':'add'},
'reg_alpha':{'init':0,'min':1e-7,'max':1e+1,'step':10,'type':'multi'},
'reg_lambda':{'init':1,'min':1e-4,'max':1e+3,'step':10,'type':'multi'},
'learning_rate':{'init':0.1,'min':1e-4,'max':1e+1,'step':10,'type':'multi'},
},
n_iter_max=5, keep_top_perc=0.20, n_iter_rnd=15,
cv_folds=5, cv_stratified=False, cv_shuffle=True, iid=False,
n_jobs=1, seed=8888, verbose=True):
'''Find optimal parameter(s) values exploring the defined search space through random or grid search.
Basic search algorithms (i.e. sklearn GridSearchCV, RandomizedSearchCV) are re-iterated for n_iter_max times,
narrowing the search boundaries according to the last keep_top_perc best solutions.
Parameters
----------
Returns
-------
'''
print('\n--- {} optimization started @ {} ---\n'.format(
list(params_dict.keys()), datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
if verbose == True:
verbose = cv_folds
else:
verbose = 0
# init parmeters
params = _init_params(params_dict)
search_space = np.prod(np.array([len(list(params[k])) for k in params.keys()]))
n_iter = 0
while n_iter < n_iter_max and search_space > 1:
print('Starting iteration {}, with a search space of {}'.format(n_iter+1, search_space))
for k in params.keys():
print('{} = {}'.format(k,params[k]))
# cross validation setup
if cv_stratified == True:
cv = StratifiedKFold(n_splits=cv_folds, shuffle=cv_shuffle, random_state=seed)
print('StratifiedKFold cross-validation instantiated...')
else:
cv = KFold(n_splits=cv_folds, shuffle=cv_shuffle, random_state=seed)
print('KFold cross-validation instantiated...')
# search
if search_type == 'random':
print('RANDOM search in progress...\n')
n_iter_rnd = min(n_iter_rnd,search_space)
search = RandomizedSearchCV(estimator=estimator,
param_distributions=params,
n_iter=n_iter_rnd,
scoring=scoring,
cv=cv,
iid=iid,
random_state=seed,
n_jobs=n_jobs,
verbose=verbose)
elif search_type == 'grid':
print('GRID search in progress...\n')
search = GridSearchCV(estimator=estimator,
param_grid=params,
scoring=scoring,
cv=cv,
iid=iid,
n_jobs=n_jobs,
verbose=verbose)
else:
print('ERROR: search_type must be either random or grid')
sys.exit(-1)
search.fit(X,y)
results = search.cv_results_
best_result = {'params':search.best_params_,
'score': search.best_score_}
# define solutions to prepare next itaration
keep_top = int(math.ceil(keep_top_perc * len(results['rank_test_score'])))
keep_top = min(max(keep_top, 1),len(results['rank_test_score']))
print_opt_params_report(results=results, n_top=keep_top)
# re-define params for next itaration
params = _update_params(params, results, keep_top, params_dict)
search_space = np.prod(np.array([len(list(params[k])) for k in params.keys()]))
n_iter += 1
print('\n--- {} optimization completed @ {} in {} iterations, with left {} iterations and a search space of {} ---\n'.format(
list(params_dict.keys()), datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
n_iter, n_iter_max-n_iter, search_space))
return best_result, results
def xgb_full_params_optimization(dtrain, target, predictors, eval_metric='logloss', search_type='random',
params_dict = {'max_depth':{'init':5,'min':2,'max':10,'step':1,'type':'add'},
'min_child_weight':{'init':1,'min':1,'max':20,'step':1,'type':'add'},
'gamma':{'init':0.00,'min':0.00,'max':1.00,'step':0.01,'type':'add'},
'subsample':{'init':0.80,'min':0.50,'max':1.00,'step':0.10,'type':'add'},
'colsample_bytree':{'init':0.80,'min':0.50,'max':1.00,'step':0.10,'type':'add'},
'reg_alpha':{'init':0,'min':1e-7,'max':1e+1,'step':10,'type':'multi'},
'reg_lambda':{'init':1,'min':1e-4,'max':1e+3,'step':10,'type':'multi'},
'learning_rate':{'init':0.1,'min':1e-4,'max':1e+1,'step':10,'type':'multi'},
},
num_boost_round=1000, early_stopping_rounds=50,
cv_folds=5, cv_stratified=True, cv_shuffle=True, iid=False,
n_iter_max=5, keep_top_perc=0.20, n_iter_rnd=15,
n_jobs=1, seed=8888, verbose=False):
'''XGBoost full optimization pipeline:
Step 0: Set initial guess for parameters (high learning rate);
Step 1: Tune number of estimators;
Step 2: Tune max_depth and min_child_weight;
Step 3: Tune gamma;
Step 4: Tune subsample and colsample_bytree;
Step 5: Tuning Regularization Parameters reg_alpha, reg_lambda;
Step 6: Tune learning rate;
Step 7: Optimize no_estimators again.
Parameters
----------
Returns
-------
Dictionary of best parameters
'''
print('\n> Optimization pipeline started @ {}\n'.format(datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
# Step 0: Set initial guess for parameters (high learning rate)
xgb_opt = XGBClassifier(n_estimators=num_boost_round,
max_depth=params_dict['max_depth']['init'],
min_child_weight=params_dict['min_child_weight']['init'],
gamma=params_dict['gamma']['init'],
subsample=params_dict['subsample']['init'],
colsample_bytree=params_dict['colsample_bytree']['init'],
reg_alpha=params_dict['reg_alpha']['init'],
reg_lambda=params_dict['reg_lambda']['init'],
learning_rate=params_dict['learning_rate']['init'],
n_jobs=n_jobs,
seed=seed)
# Step 1: Tune number of estimators
no_estimators, _ = xgb_opt_no_estimators(alg=xgb_opt, dtrain=dtrain, target=target, predictors=predictors,
eval_metric=eval_metric, num_boost_round=num_boost_round,
early_stopping_rounds=early_stopping_rounds,
cv_folds=cv_folds, cv_stratified=cv_stratified, cv_shuffle=cv_shuffle,
seed=seed, verbose=verbose)
# Step 2: Tune max_depth and min_child_weight
xgb_opt = XGBClassifier(n_estimators=no_estimators,
max_depth=params_dict['max_depth']['init'],
min_child_weight=params_dict['min_child_weight']['init'],
gamma=params_dict['gamma']['init'],
subsample=params_dict['subsample']['init'],
colsample_bytree=params_dict['colsample_bytree']['init'],
reg_alpha=params_dict['reg_alpha']['init'],
reg_lambda=params_dict['reg_lambda']['init'],
learning_rate=params_dict['learning_rate']['init'],
n_jobs=n_jobs,
seed=seed)
scoring_metric = _map_eval_scoring_metric(eval_metric)
params_to_opt = {}
for k in ['max_depth','min_child_weight']:
params_to_opt[k] = params_dict[k]
best_result, _ = xgb_opt_params(X=dtrain[predictors], y=dtrain[target], estimator=xgb_opt,
scoring=scoring_metric, search_type=search_type, params_dict=params_to_opt,
n_iter_max=n_iter_max, keep_top_perc=keep_top_perc, n_iter_rnd=n_iter_rnd,
cv_folds=cv_folds, cv_stratified=cv_stratified, cv_shuffle=cv_shuffle,
iid=iid, n_jobs=n_jobs, seed=seed, verbose=verbose)
max_depth = best_result['params']['max_depth']
min_child_weight = best_result['params']['min_child_weight']
# Step 3: Tune gamma
xgb_opt = XGBClassifier(n_estimators=no_estimators,
max_depth=max_depth,
min_child_weight=min_child_weight,
gamma=params_dict['gamma']['init'],
subsample=params_dict['subsample']['init'],
colsample_bytree=params_dict['colsample_bytree']['init'],
reg_alpha=params_dict['reg_alpha']['init'],
reg_lambda=params_dict['reg_lambda']['init'],
learning_rate=params_dict['learning_rate']['init'],
n_jobs=n_jobs,
seed=seed)
scoring_metric = _map_eval_scoring_metric(eval_metric)
params_to_opt = {}
for k in ['gamma']:
params_to_opt[k] = params_dict[k]
best_result, _ = xgb_opt_params(X=dtrain[predictors], y=dtrain[target], estimator=xgb_opt,
scoring=scoring_metric, search_type=search_type, params_dict=params_to_opt,
n_iter_max=n_iter_max, keep_top_perc=keep_top_perc, n_iter_rnd=n_iter_rnd,
cv_folds=cv_folds, cv_stratified=cv_stratified, cv_shuffle=cv_shuffle,
iid=iid, n_jobs=n_jobs, seed=seed, verbose=verbose)
gamma = best_result['params']['gamma']
# Step 4: Tune subsample and colsample_bytree
xgb_opt = XGBClassifier(n_estimators=no_estimators,
max_depth=max_depth,
min_child_weight=min_child_weight,
gamma=gamma,
subsample=params_dict['subsample']['init'],
colsample_bytree=params_dict['colsample_bytree']['init'],
reg_alpha=params_dict['reg_alpha']['init'],
reg_lambda=params_dict['reg_lambda']['init'],
learning_rate=params_dict['learning_rate']['init'],
n_jobs=n_jobs,
seed=seed)
scoring_metric = _map_eval_scoring_metric(eval_metric)
params_to_opt = {}
for k in ['subsample','colsample_bytree']:
params_to_opt[k] = params_dict[k]
best_result, _ = xgb_opt_params(X=dtrain[predictors], y=dtrain[target], estimator=xgb_opt,
scoring=scoring_metric, search_type=search_type, params_dict=params_to_opt,
n_iter_max=n_iter_max, keep_top_perc=keep_top_perc, n_iter_rnd=n_iter_rnd,
cv_folds=cv_folds, cv_stratified=cv_stratified, cv_shuffle=cv_shuffle,
iid=iid, n_jobs=n_jobs, seed=seed, verbose=verbose)
subsample = best_result['params']['subsample']
colsample_bytree = best_result['params']['colsample_bytree']
# Step 5: Tuning Regularization Parameters reg_alpha, reg_lambda
xgb_opt = XGBClassifier(n_estimators=no_estimators,
max_depth=max_depth,
min_child_weight=min_child_weight,
gamma=gamma,
subsample=subsample,
colsample_bytree=colsample_bytree,
reg_alpha=params_dict['reg_alpha']['init'],
reg_lambda=params_dict['reg_lambda']['init'],
learning_rate=params_dict['learning_rate']['init'],
n_jobs=n_jobs,
seed=seed)
scoring_metric = _map_eval_scoring_metric(eval_metric)
params_to_opt = {}
for k in ['reg_alpha','reg_lambda']:
params_to_opt[k] = params_dict[k]
best_result, _ = xgb_opt_params(X=dtrain[predictors], y=dtrain[target], estimator=xgb_opt,
scoring=scoring_metric, search_type=search_type, params_dict=params_to_opt,
n_iter_max=n_iter_max, keep_top_perc=keep_top_perc, n_iter_rnd=n_iter_rnd,
cv_folds=cv_folds, cv_stratified=cv_stratified, cv_shuffle=cv_shuffle,
iid=iid, n_jobs=n_jobs, seed=seed, verbose=verbose)
reg_alpha = best_result['params']['reg_alpha']
reg_lambda = best_result['params']['reg_lambda']
# Step 6: Tune Learning Rate
xgb_opt = XGBClassifier(n_estimators=no_estimators,
max_depth=max_depth,
min_child_weight=min_child_weight,
gamma=gamma,
subsample=subsample,
colsample_bytree=colsample_bytree,
reg_alpha=reg_alpha,
reg_lambda=reg_lambda,
learning_rate=params_dict['learning_rate']['init'],
n_jobs=n_jobs,
seed=seed)
scoring_metric = _map_eval_scoring_metric(eval_metric)
params_to_opt = {}
for k in ['learning_rate']:
params_to_opt[k] = params_dict[k]
best_result, _ = xgb_opt_params(X=dtrain[predictors], y=dtrain[target], estimator=xgb_opt,
scoring=scoring_metric, search_type=search_type, params_dict=params_to_opt,
n_iter_max=n_iter_max, keep_top_perc=keep_top_perc, n_iter_rnd=n_iter_rnd,
cv_folds=cv_folds, cv_stratified=cv_stratified, cv_shuffle=cv_shuffle,
iid=iid, n_jobs=n_jobs, seed=seed, verbose=verbose)
learning_rate = best_result['params']['learning_rate']
# Step 7: Tune number of estimators again
xgb_opt = XGBClassifier(n_estimators=num_boost_round,
max_depth=max_depth,
min_child_weight=min_child_weight,
gamma=gamma,
subsample=subsample,
colsample_bytree=colsample_bytree,
reg_alpha=reg_alpha,
reg_lambda=reg_lambda,
learning_rate=learning_rate,
n_jobs=n_jobs,
seed=seed)
no_estimators, _ = xgb_opt_no_estimators(alg=xgb_opt, dtrain=dtrain, target=target, predictors=predictors,
eval_metric=eval_metric, num_boost_round=num_boost_round,
early_stopping_rounds=early_stopping_rounds,
cv_folds=cv_folds, cv_stratified=cv_stratified, cv_shuffle=cv_shuffle,
seed=seed, verbose=verbose)
# Return optimal params
print('\n> Optimization pipeline completed @ {}\n'.format(datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
return {'max_depth':max_depth,
'min_child_weight':min_child_weight,
'gamma':gamma,
'subsample':subsample,
'colsample_bytree':colsample_bytree,
'reg_alpha':reg_alpha,
'reg_lambda':reg_lambda,
'n_estimators':no_estimators,
'learning_rate':learning_rate}
| [
37811,
201,
198,
8800,
565,
69,
2124,
22296,
8738,
201,
198,
10052,
855,
201,
198,
18274,
4487,
5888,
329,
1395,
4579,
78,
455,
23989,
13,
201,
198,
201,
198,
13838,
25,
11294,
482,
4730,
357,
5450,
1378,
12567,
13,
785,
14,
35155,
... | 1.75691 | 13,205 |
from flask import Flask
from model import clf
from model import X_test, y_test
# set FLASK_APP=app.py flask run
app = Flask(__name__)
@app.route("/")
@app.route('/score')
def score():
'''Scores the accuracy of the regression'''
return str(clf.score(X_test, y_test))
@app.route('/predict')
def predict():
'''Predicts the y values of the test data'''
return str(clf.predict(X_test))
@app.route('/iris')
def iris():
'''Displays the data within X_test'''
return str(X_test)
| [
6738,
42903,
1330,
46947,
198,
6738,
2746,
1330,
537,
69,
198,
6738,
2746,
1330,
1395,
62,
9288,
11,
331,
62,
9288,
198,
198,
2,
900,
9977,
1921,
42,
62,
24805,
28,
1324,
13,
9078,
42903,
1057,
198,
198,
1324,
796,
46947,
7,
834,
... | 2.55102 | 196 |
"""
To use this component, add the following to iterm2_print_user_vars:
iterm2_set_user_var python_version $(pyenv_prompt_info)
"""
import asyncio
import iterm2
iterm2.run_forever(main)
| [
37811,
198,
2514,
779,
428,
7515,
11,
751,
262,
1708,
284,
340,
7780,
17,
62,
4798,
62,
7220,
62,
85,
945,
25,
628,
220,
220,
220,
340,
7780,
17,
62,
2617,
62,
7220,
62,
7785,
21015,
62,
9641,
29568,
9078,
24330,
62,
16963,
457,
... | 2.635135 | 74 |
# -*- coding: utf-8 -*-
"""
Service test.
"""
import pytest
from flask import Flask
from flask.testing import FlaskClient
from werkzeug.exceptions import NotFound
from pheweb.serve.components.chip.fs_storage import FileChipDAO
from pheweb.serve.components.chip.model import JeevesContext, ChipDAO
from pheweb.serve.components.chip.service import chip, development, get_dao
from tests.pheweb.serve.components.chip.fs_storage_test import (
CHIP_CODING_FILE,
PLOT_ROOT_DIRECTORY,
)
@pytest.fixture(name="chip_dao")
def fixture_chip_dao() -> ChipDAO:
"""
Create chip DAO.
:return: chip DAO
"""
chip_data = CHIP_CODING_FILE
plot_root = PLOT_ROOT_DIRECTORY
return FileChipDAO(chip_data=chip_data, plot_root=plot_root)
@pytest.fixture(name="client")
def fixture_client(chip_dao: ChipDAO) -> FlaskClient:
"""
Create flask client.
:param chip_dao: chip dao
:return: flask client
"""
app = Flask(__name__, instance_relative_config=True)
app.register_blueprint(chip)
app.register_blueprint(development)
app.jeeves = JeevesContext(chip_dao=chip_dao)
with app.test_client() as client:
yield client
@pytest.fixture(name="bad_client")
def fixture_bad_client() -> FlaskClient:
"""
Create flask client.
:return: flask client
"""
app = Flask(__name__, instance_relative_config=True)
app.register_blueprint(chip)
app.register_blueprint(development)
app.jeeves = JeevesContext(chip_dao=None)
with app.test_client() as client:
yield client
def test_get_dao_missing() -> None:
"""
Test get doa with dao missing.
:return: None
"""
app = Flask(__name__, instance_relative_config=True)
app.jeeves = JeevesContext(chip_dao=None)
with pytest.raises(NotFound):
get_dao(current_app=app)
def test_get_dao(chip_dao) -> None:
"""
Test get DAO with DAO available.
:param chip_dao: chip DAO.
:return: None
"""
app = Flask(__name__, instance_relative_config=True)
app.jeeves = JeevesContext(chip_dao=chip_dao)
assert get_dao(current_app=app) == chip_dao
def test_chip_data(client) -> None:
"""
Test chip data.
:param client: client
:return: None
"""
response_value = client.get("/api/v1/chip_data")
assert response_value.status_code == 200
def test_chip_data_bad(bad_client) -> None:
"""
Test with bad chip data.
:param bad_client: bad client data.
:return: None
"""
response_value = bad_client.get("/api/v1/chip_data")
assert response_value.status_code == 404
def test_cluster_plot(client) -> None:
"""
Test cluster plot
:param client:
:return: None
"""
response_value = client.get("/api/v1/cluster_plot/1:1:A:C")
assert response_value.status_code == 200
response_value = client.get("/api/v1/cluster_plot/variant")
assert response_value.status_code == 404
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
16177,
1332,
13,
198,
37811,
198,
198,
11748,
12972,
9288,
198,
6738,
42903,
1330,
46947,
198,
6738,
42903,
13,
33407,
1330,
46947,
11792,
198,
6738,
266,
9587... | 2.510693 | 1,169 |
#!/usr/bin/env python3
import optparse
import os
import sys
import chpl_bin_subdir, chpl_arch, chpl_compiler, chpl_platform, overrides
from chpl_home_utils import get_chpl_third_party
from utils import memoize, run_command
@memoize
@memoize
@memoize
if __name__ == '__main__':
_main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
2172,
29572,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
11748,
442,
489,
62,
8800,
62,
7266,
15908,
11,
442,
489,
62,
998,
11,
442,
489,
62,
5589,
5329,
11,
442,
489,... | 2.573913 | 115 |
#!/usr/bin/env python3
import socket
import os
import sys
import traceback
import random
from threading import Thread
PORT = int(os.environ.get("PORT", 24000))
flag = open('flag', 'r').readline()
start_server()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
17802,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
12854,
1891,
198,
11748,
4738,
198,
6738,
4704,
278,
1330,
14122,
628,
198,
15490,
796,
493,
7,
418,
13,
268,
2... | 3.013889 | 72 |
# Copyright 2021 Nokia
# Licensed under the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
import datetime
| [
2,
15069,
33448,
26182,
198,
2,
49962,
739,
262,
347,
10305,
513,
12,
2601,
682,
13789,
13,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
347,
10305,
12,
18,
12,
2601,
682,
198,
198,
11748,
4818,
8079,
628
] | 3.1 | 40 |
# 3x2 NN (populated random values)
inputs = [1.2, 5.1, 2.0] # (m1, m2, m3)
w1 = [0.2, 1.2, 1.6] # weights for n1 (from m1, m2, m3) → (w11, w21, w31)
w2 = [3.7, 1.0, 0.6] # weights for n2 (from m1, m2, m3) → (w12, w22, w32)
b1 = 1.2 # bias for n1
b2 = 0.3 # bias for n2
n1 = inputs[0] * w1[0] + inputs[1] * w1[1] + inputs[2] * w1[2] + b1
n2 = inputs[0] * w2[0] + inputs[1] * w2[1] + inputs[2] * w2[2] + b2
outputs = [n1, n2]
print(outputs) # calculated values for layer 2 (n1 & n2)
# [10.76, 11.04] | [
2,
513,
87,
17,
399,
45,
357,
12924,
4817,
4738,
3815,
8,
198,
198,
15414,
82,
796,
685,
16,
13,
17,
11,
642,
13,
16,
11,
362,
13,
15,
60,
1303,
357,
76,
16,
11,
285,
17,
11,
285,
18,
8,
198,
86,
16,
796,
685,
15,
13,
17... | 1.821818 | 275 |
from typing import Dict, List
from pydantic import BaseModel
| [
6738,
19720,
1330,
360,
713,
11,
7343,
198,
6738,
279,
5173,
5109,
1330,
7308,
17633,
628
] | 3.875 | 16 |
import arcpy
import os
import math
fc = r'D:\Pythoning\288205C\Database\288205C.gdb\Sheets\sheets_1'
print "a"
input_gdb = r'D:\Pythoning\288205C\Database\288205C.gdb'
print "b.."
output_folder = r'D:\Pythoning\outPut'
print "c...."
xmlFolder=r'D:\Pythoning'
prj_file = r'D:\Pythoning\Nepal_Nagarkot_TM_81.prj'
sr = arcpy.SpatialReference(prj_file)
i = 1
print str(i) + "igzo..."
fields = ['SHAPE@','SHEET_NO']
with arcpy.da.SearchCursor(fc, fields) as rows:
for row in rows:
print "Starting Sheet : "
clipper = row[0]
gdb_name = str(row[1]) + '.gdb'
xml_name= "Nagarkot25000TM81GridGraticule"+".xml"
arcpy.CreateFileGDB_management(output_folder, gdb_name)
arcpy.env.workspace = input_gdb
x=output_folder + "/" + gdb_name
grid_dataset=arcpy.CreateFeatureDataset_management(x,"grid_graticule",sr)
feature_class=row[0]
area_of_interest= feature_class
outputlayer=str(row[1])
template=xmlFolder+"\\"+xml_name
arcpy.MakeGridsAndGraticulesLayer_cartography (template, area_of_interest, grid_dataset, outputlayer)
input_datasets = arcpy.ListDatasets('*', 'Feature')
for ds in input_datasets:
print str(ds).lower()
gdb = output_folder + '/' + gdb_name
sr = arcpy.SpatialReference(prj_file)
if str(ds) == "Admin_layer":
input_location = input_gdb + '/' + str(ds)
out_location = gdb + "/" + str(ds)
print "Copying Admin_layer...."
arcpy.Copy_management(input_location, out_location)
elif str(ds).lower() == "grid_graticule" or str(ds).lower() == "sheets" :
continue
else:
out_dataset = arcpy.CreateFeatureDataset_management(gdb, str(ds), sr)
in_dataset = input_gdb + '/' + str(ds)
arcpy.env.workspace = in_dataset
in_feature_class = arcpy.ListFeatureClasses()
for fc in in_feature_class:
print "Clipping " + str(fc)
out_fc = str(out_dataset) + '/' + str(fc)
arcpy.Clip_analysis(str(fc), clipper, out_fc)
print "Yatta..."
del rows
print " All done !"
| [
11748,
10389,
9078,
201,
198,
11748,
28686,
201,
198,
11748,
10688,
201,
198,
16072,
796,
374,
6,
35,
7479,
37906,
278,
59,
25270,
21261,
34,
59,
38105,
59,
25270,
21261,
34,
13,
70,
9945,
59,
3347,
1039,
59,
42011,
62,
16,
6,
201,
... | 1.903251 | 1,261 |
# -*- coding: utf-8 -*-
import json
import scrapy
from locations.hours import OpeningHours
from locations.items import GeojsonPointItem
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
33918,
198,
198,
11748,
15881,
88,
198,
198,
6738,
7064,
13,
24425,
1330,
25522,
39792,
198,
6738,
7064,
13,
23814,
1330,
2269,
13210,
1559,
12727,
7449,
628
] | 3.309524 | 42 |
# Copyright (c) 2010 Doug Hellmann. All rights reserved.
#
"""The names of the paths in a scheme.
"""
# end_pymotw_header
import sysconfig
for name in sysconfig.get_path_names():
print(name)
| [
2,
15069,
357,
66,
8,
3050,
15115,
5783,
9038,
13,
220,
1439,
2489,
10395,
13,
198,
2,
198,
37811,
464,
3891,
286,
262,
13532,
287,
257,
7791,
13,
198,
37811,
198,
198,
2,
886,
62,
79,
4948,
313,
86,
62,
25677,
198,
11748,
25064,
... | 2.911765 | 68 |
from .search import SearchAPI
__version__ = "2.0.0"
__author__ = "Ed Kohlwey"
__all__ = ["SearchAPI"]
if __name__ == "__main__":
print("This module cannot be run on its own. Please use by running ",
"\"from yanytapi import SearchAPI\"")
exit(0)
| [
6738,
764,
12947,
1330,
11140,
17614,
198,
198,
834,
9641,
834,
796,
366,
17,
13,
15,
13,
15,
1,
198,
834,
9800,
834,
796,
366,
7407,
24754,
75,
732,
88,
1,
198,
834,
439,
834,
796,
14631,
18243,
17614,
8973,
198,
198,
361,
11593,... | 2.572816 | 103 |
"""Example 03: Writing LAMMPS Data File for a periodic system"""
from quick import write_lmpdat
# path to the xyz file
from constants import PACKAGE_DIR # just the package directory
xyz = PACKAGE_DIR + "/examples/rubber.xyz"
# you can open rubber.xyz as a text file
# notice how the Lattice is defined in the second line
# the software Ovito can write xyz files with Lattice for you
#
# you can also open it in Ovito or another visualization software
# it's a little chaotic, since it's an amorphous polymer
# (about natural rubber: https://en.wikipedia.org/wiki/Natural_rubber)
# path where the LAMMPS Data File will be writen
lmp = PACKAGE_DIR + "/examples/rubber.lmp"
write_lmpdat(xyz, lmp, periodic="xyz")
# notice that now we specify that the system is periodic in all directions
#
# this will take a while, since there are 2608 atoms in the system
#
# complete the par file with reasonable parameters (see ex01 for examples)
# pressing any key will give you the LAMMPS Data File
| [
37811,
16281,
7643,
25,
22183,
406,
2390,
44,
3705,
6060,
9220,
329,
257,
27458,
1080,
37811,
198,
198,
6738,
2068,
1330,
3551,
62,
75,
3149,
19608,
198,
198,
2,
3108,
284,
262,
2124,
45579,
2393,
198,
6738,
38491,
1330,
47035,
11879,
... | 3.412371 | 291 |
from unittest import TestCase, TestLoader
from sqlite3 import IntegrityError
from copy import copy, deepcopy
import os
from shutil import copytree, rmtree
from random import randint, random
import uuid
from tempfile import gettempdir
from shapely.geometry import Point
import shapely.wkb
from aequilibrae.project import Project
from ...data import siouxfalls_project
TestLoader.sortTestMethodsUsing = None
| [
6738,
555,
715,
395,
1330,
6208,
20448,
11,
6208,
17401,
198,
198,
6738,
44161,
578,
18,
1330,
39348,
12331,
198,
6738,
4866,
1330,
4866,
11,
2769,
30073,
198,
11748,
28686,
198,
6738,
4423,
346,
1330,
4866,
21048,
11,
374,
16762,
631,
... | 3.693694 | 111 |
# This Python file uses the following encoding: utf-8
''' genRAW.py tool for generating linguistic reference material
Usage: "python genRAW.py <text files directory> <output directory> <language>"
Example (on Windows): "python genRAW.py C:/TextCorpus/English/Financial/ C:/Raw/English/ en
'''
# import the usual suspects...
import sys, os, pprint, time
# run "pip install iknowpy" if "import iknowpy" fails.
import iknowpy
#
# Following are default runtime parameters if no command line parameters are present.
#
in_path_par = "C:/P4/Users/jdenys/text_input_data/en/" # input directory with text files
out_path_par = "C:/tmp/" # output directory to write the RAW file
language_par = "en" # language selector
OldStyle = True # mimics the old-style RAW file format
# print(sys.argv)
if (len(sys.argv)>1):
in_path_par = sys.argv[1]
if (len(sys.argv)>2):
out_path_par = sys.argv[2]
if (len(sys.argv)>3):
language_par = sys.argv[3]
print('genRAW input_dir=\"'+in_path_par+'\" output_dir=\"'+out_path_par+'\" language=\"'+language_par+'\"')
#
# collect text documents in 'in_path_par'
#
from os import walk
f = [] # non-recursive list of files, .txt only
for (dirpath, dirnames, filenames) in walk(in_path_par):
for single_file in filenames:
if (single_file.endswith('.txt')):
full_path = dirpath + single_file
f.append(full_path)
break
f_rec = [] # recursive list of files, .txt only
collect_files_recursive(in_path_par)
engine = iknowpy.iKnowEngine()
f_raw = open(out_path_par + "i.Know." + time.strftime("%Y.%m.%d (%Hu%M)", time.gmtime()) + ".raw", 'wb')
f_raw.write(b'\xef\xbb\xbf') # Utf8 BOM
write_ln(f_raw,'#')
write_ln(f_raw,"# in_path_par:"+in_path_par)
write_ln(f_raw,"# out_path_par:"+out_path_par)
write_ln(f_raw,"# language_par:"+language_par)
write_ln(f_raw,"#\n#\n#")
for text_file in f_rec:
print(text_file)
# do file.WriteLine("D"_$char(1)_$p(name,tSep,$l(name,tSep))_$char(1)_filename)
if OldStyle:
write_ln(f_raw,'\nD\x01'+text_file+'\x01'+in_path_par+text_file)
else:
write_ln(f_raw,'\n<D name=\"'+text_file+'\" file=\"'+in_path_par+text_file+'\">') # D050_sentences.txtC:\P4\Users\jdenys\text_input_data\ja\050_sentences.txt
f_text = open(text_file,"r",True,"utf8") # open text file, must be utf8 encoded
text = f_text.read() # read text
f_text.close()
engine.index(text, language_par)
for sent in engine.m_index['sentences']: # S志望学部の決定時期について経営学部生に関しては表7 ( a ) ( b )に、経済学部生に関しては表8 ( a ) ( b )に示す。
#
# reconstruct sentence literal
#
if OldStyle:
sentence_raw = 'S\x01'
ent_stop = ''
for entity in sent['entities']:
ent_type = entity['type']
lit_text = text[entity['offset_start']:entity['offset_stop']]
ent_start = entity['offset_start']
if ent_type == 'NonRelevant':
if (ent_start != ent_stop):
sentence_raw = sentence_raw + lit_text
else:
sentence_raw = sentence_raw.rstrip() + lit_text
ent_stop = entity['offset_stop']
if ent_type == 'Concept':
if(ent_start != ent_stop):
sentence_raw = sentence_raw + '\x02' + lit_text + '\x02'
else:
sentence_raw = sentence_raw.rstrip() + '\x02' + lit_text + '\x02'
ent_stop = entity['offset_stop']
if ent_type == 'Relation':
if(ent_start != ent_stop):
sentence_raw = sentence_raw + '\x03' + lit_text + '\x03'
else:
sentence_raw = sentence_raw.rstrip() + '\x03' + lit_text + '\x03'
ent_stop = entity['offset_stop']
if ent_type == 'PathRelevant':
if(ent_start != ent_stop):
sentence_raw = sentence_raw + (' ' if entity == sent['entities'][0] else '') + '<' + lit_text + '>'
else:
sentence_raw = sentence_raw.rstrip() + (' ' if entity == sent['entities'][0] else '') + '<' + lit_text + '>'
ent_stop = entity['offset_stop']
if entity != sent['entities'][len(sent['entities'])-1]: # not for the last one
sentence_raw = sentence_raw + ' '
else:
sentence_raw = '<S '
for entity in sent['entities']:
ent_type = entity['type']
lit_start = entity['offset_start']
lit_stop = entity['offset_stop']
lit_text = text[lit_start:lit_stop].replace("\n","") # literal representation of sentence, with newlines removed
sentence_raw = sentence_raw + ent_type + '= \"' + lit_text + '\" '
sentence_raw = sentence_raw + '>'
write_ln(f_raw, sentence_raw)
#
# sentence attributes
#
if (len(sent['sent_attributes'])):
for sent_attribute in sent['sent_attributes']:
attr_name = sent_attribute['type'].lower()
attr_marker = sent_attribute['marker'] # corresponds to lexreps.csv match
attr_entity = sent['entities'][sent_attribute['entity_ref']]['index'] # corresponding entity index value
attr_marker_literal = text[sent_attribute['offset_start']:sent_attribute['offset_stop']].replace("\n","") # literal version of the marker, remove newlines
attr_entity_literal = text[sent['entities'][sent_attribute['entity_ref']]['offset_start']:sent['entities'][sent_attribute['entity_ref']]['offset_stop']].replace("\n","") # corresponding entity index literal value, remove newlines
if (attr_name == 'datetime'):
attr_name = 'time'
sent_attribute_raw = '<attr type=\"' + attr_name + '\" literal=\"' + attr_entity_literal + ('\" marker=\"' if OldStyle==False else '\" token=\"') + sent_attribute['marker'].lstrip() + '\"'
if attr_name == 'certainty':
if sent_attribute['value']:
sent_attribute_raw = sent_attribute_raw + ' level=\"' + sent_attribute['value'] + '\"'
else:
if sent_attribute['value']:
sent_attribute_raw = sent_attribute_raw + ' value=\"' + sent_attribute['value'] + '\"'
if sent_attribute['unit']:
sent_attribute_raw = sent_attribute_raw + ' unit=\"' + sent_attribute['unit'] + '\"'
if sent_attribute['value2']:
sent_attribute_raw = sent_attribute_raw + ' value2=\"' + sent_attribute['value2'] + '\"'
if sent_attribute['unit2']:
sent_attribute_raw = sent_attribute_raw + ' unit2=\"' + sent_attribute['unit2'] + '\"'
sent_attribute_raw = sent_attribute_raw + '>'
# print(sent_attribute_raw)
write_ln(f_raw, sent_attribute_raw)
#
# if path not empty and language is Japanese, emit as attribute : "entity_vector"
#
if (len(sent['path']) and language_par=='ja'):
# <attr type="time" literal="経済学部2年のセンター利用入試" token="2年">
entity_vec_raw = '<attr type=\"entity_vector\"'
for sent_index in sent['path']:
entity = sent['entities'][sent_index]
lit_text = text[entity['offset_start']:entity['offset_stop']]
entity_vec_raw = entity_vec_raw + ' \"' + lit_text + '\"'
entity_vec_raw = entity_vec_raw + '>'
write_ln(f_raw, entity_vec_raw)
#
# write entities
#
for entity in sent['entities']:
if OldStyle:
if entity['type'] == 'Concept': # C2only sound
write_ln(f_raw, 'C\x01' + str(entity['index'].count(' ')+1) + '\x01' + entity['index'])
if entity['type'] == 'Relation':
write_ln(f_raw, 'R\x01' + entity['index'])
if entity['type'] == 'PathRelevant': # PR<we>
write_ln(f_raw, 'PR<' + entity['index'] + '>')
else:
write_ln(f_raw, "<"+entity['type']+' \"'+entity['index']+'\">')
#
# write path
#
if (len(sent['path'])):
if OldStyle:
path_raw = 'P\x01'
for sent_index in sent['path']:
path_raw = path_raw + sent['entities'][sent_index]['index'] + ' '
else:
path_raw = '<P '
for sent_index in sent['path']:
path_raw = path_raw + ' \"' + sent['entities'][sent_index]['index'] + '\"'
path_raw = path_raw + '>'
write_ln(f_raw, path_raw.rstrip())
#
# path attributes
# <attr type="negation" span="no sign here of hustle and bustle of french alpine tourist honeypots">
#
if (len(sent['path_attributes'])):
for path_attribute in sent['path_attributes']:
attr_name = path_attribute['type']
if (attr_name == "DateTime"):
attr_name = "time"
if (attr_name == "Measurement"):
attr_name = "measurement"
if (attr_name == "Negation"):
attr_name = "negation"
if (attr_name == "Certainty"):
attr_name = "certainty"
start_position = path_attribute['pos']
attribute_span = path_attribute['span']
attr_path = sent['path'][start_position:start_position+attribute_span]
path_attribute_raw = '<attr type=\"' + attr_name + '\" span=\"'
for sent_index in attr_path:
path_attribute_raw = path_attribute_raw + sent['entities'][sent_index]['index'] + ' '
path_attribute_raw = path_attribute_raw.strip() + '\">'
write_ln(f_raw,path_attribute_raw)
f_raw.close()
| [
2,
770,
11361,
2393,
3544,
262,
1708,
21004,
25,
3384,
69,
12,
23,
198,
7061,
6,
2429,
20530,
13,
9078,
2891,
329,
15453,
29929,
4941,
2587,
198,
220,
220,
220,
29566,
25,
366,
29412,
2429,
20530,
13,
9078,
1279,
5239,
3696,
8619,
2... | 1.963798 | 5,276 |
from asyncio import sleep as asyncio_sleep
from sage_utils.amqp.clients import RpcAmqpClient
from sage_utils.constants import NOT_FOUND_ERROR, TOKEN_ERROR, VALIDATION_ERROR
from sage_utils.wrappers import Response
from app.token.api.workers.generate_token import GenerateTokenWorker
from app.token.api.workers.refresh_token import RefreshTokenWorker
from app.users.documents import User
REQUEST_TOKEN_QUEUE = GenerateTokenWorker.QUEUE_NAME
REQUEST_TOKEN_EXCHANGE = GenerateTokenWorker.REQUEST_EXCHANGE_NAME
RESPONSE_TOKEN_EXCHANGE = GenerateTokenWorker.RESPONSE_EXCHANGE_NAME
REQUEST_QUEUE = RefreshTokenWorker.QUEUE_NAME
REQUEST_EXCHANGE = RefreshTokenWorker.REQUEST_EXCHANGE_NAME
RESPONSE_EXCHANGE = RefreshTokenWorker.RESPONSE_EXCHANGE_NAME
| [
6738,
30351,
952,
1330,
3993,
355,
30351,
952,
62,
42832,
198,
198,
6738,
35021,
62,
26791,
13,
321,
80,
79,
13,
565,
2334,
1330,
371,
14751,
5840,
80,
79,
11792,
198,
6738,
35021,
62,
26791,
13,
9979,
1187,
1330,
5626,
62,
37,
1591... | 2.888889 | 261 |
"""Letter Combinations of a Phone Number
Given a string containing digits from 2-9 inclusive, return all possible letter combinations that
the number could represent.
A mapping of digit to letters (just like on the telephone buttons) is given below.
Note that 1 does not map to any letters.
2 -> ['a', 'b', 'c']
3 -> ['d', 'e', 'f']
4 -> ['g', 'h', 'i']
5 -> ['j', 'k', 'l']
6 -> ['m', 'n', 'o']
7 -> ['p', 'q', 'r', 's']
8 -> ['t', 'u', 'v']
9 -> ['w', 'x', 'y', 'z']
Example:
Input: "23"
Output: ["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"].
Note:
Although the above answer is in lexicographical order, your answer could be in any order you want.
Seen this question in a real interview before?
Refer https://leetcode.com/problems/letter-combinations-of-a-phone-number/description/
"""
if __name__ == '__main__':
cases = [
("23", ["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"]),
]
s = Solution()
for case in cases:
result = s.letterCombinations(case[0])
for item in result:
if item not in case[1]:
print(item, result, case[1])
assert False
| [
37811,
45708,
14336,
7352,
286,
257,
14484,
7913,
198,
15056,
257,
4731,
7268,
19561,
422,
362,
12,
24,
19889,
11,
1441,
477,
1744,
3850,
17790,
326,
198,
1169,
1271,
714,
2380,
13,
198,
198,
32,
16855,
286,
16839,
284,
7475,
357,
313... | 2.526201 | 458 |
import requests
text_query = "a vanilla milk shake would be lovely"
r = requests.get(f'URL YOU COPIED &query={text_query}')
message = r.json()
print(message['prediction']['topIntent'])
for entity in message['prediction']["entities"]['$instance']:
print(entity)
| [
11748,
7007,
198,
198,
5239,
62,
22766,
796,
366,
64,
16858,
7545,
13279,
561,
307,
14081,
1,
198,
81,
796,
7007,
13,
1136,
7,
69,
6,
21886,
7013,
27975,
19767,
1222,
22766,
34758,
5239,
62,
22766,
92,
11537,
198,
20500,
796,
374,
1... | 2.977778 | 90 |
"""
zmail.info
~~~~~~~~~~
This module provide supported server information.
'Server_provider_address':{
'protocol':('protocol_server_address', port, use_ssl,use_tls),
}
"""
SUPPORTED_SERVER = {
'163.com': {
'smtp': ('smtp.163.com', 994, True, False),
'pop3': ('pop.163.com', 995, True, False),
'imap': ('imap.163.com', 993, True, False)
},
'126.com': {
'smtp': ('smtp.126.com', 994, True, False),
'pop3': ('pop.126.com', 995, True, False),
'imap': ('imap.126.com', 993, True, False)
},
'yeah.net': {
'smtp': ('smtp.yeah.net', 994, True, False),
'pop3': ('pop.yeah.net', 995, True, False),
'imap': ('imap.yeah.net', 993, True, False)
},
'qq.com': {
'smtp': ('smtp.qq.com', 465, True, False),
'pop3': ('pop.qq.com', 995, True, False),
},
'gmail.com': {
'smtp': ('smtp.gmail.com', 587, False, True),
'pop3': ('pop.gmail.com', 995, True, False),
},
'sina.com': {
'smtp': ('smtp.sina.com', 465, True, False),
'pop3': ('pop.sina.com', 995, True, False),
},
'outlook.com': {
'smtp': ('smtp-mail.outlook.com', 587, False, True),
'pop3': ('pop.outlook.com', 995, True, False),
},
}
SUPPORTED_ENTERPRISE_SERVER_CONFIG = {
'qq': {
'smtp_host': 'smtp.exmail.qq.com',
'smtp_port': 465,
'smtp_ssl': True,
'smtp_tls': False,
'pop_host': 'pop.exmail.qq.com',
'pop_port': 995,
'pop_ssl': True,
'pop_tls': False
},
'ali': {
'smtp_host': 'smtp.mxhichina.com',
'smtp_port': 465,
'smtp_ssl': True,
'smtp_tls': False,
'pop_host': 'pop3.mxhichina.com',
'pop_port': 995,
'pop_ssl': True,
'pop_tls': False
},
}
def get_supported_server_info(mail_address: str, protocol: str) -> tuple:
"""Use user address to get server address and port.
:param mail_address: str
:param protocol: str
:return: ('protocol_server_address', port, use_ssl)
"""
provider = mail_address.split('@')[1]
if provider in SUPPORTED_SERVER:
server_info = SUPPORTED_SERVER[provider]
if protocol in server_info:
return server_info[protocol]
if protocol == 'smtp':
return 'smtp.' + provider, 465, True, False
elif protocol == 'pop3':
return 'pop3.' + provider, 995, True, False
def get_enterprise_server_config(config: str):
"""Get user-defined config.
:param config: str
:return: ('protocol_server_address', port, use_ssl)
"""
if config in SUPPORTED_ENTERPRISE_SERVER_CONFIG:
return SUPPORTED_ENTERPRISE_SERVER_CONFIG[config]
return False
| [
37811,
198,
89,
4529,
13,
10951,
198,
15116,
4907,
198,
1212,
8265,
2148,
4855,
4382,
1321,
13,
628,
198,
6,
10697,
62,
15234,
1304,
62,
21975,
10354,
90,
198,
220,
220,
220,
705,
11235,
4668,
10354,
10786,
11235,
4668,
62,
15388,
62,... | 2.082576 | 1,320 |
import asyncio
from aionetworking.actions.base import BaseAction
from aionetworking.types.formats import MessageObjectType
from dataclasses import dataclass, field
from aionetworking.factories import queue_defaultdict
from typing import AsyncGenerator, DefaultDict
@dataclass
| [
11748,
30351,
952,
198,
6738,
257,
295,
316,
16090,
13,
4658,
13,
8692,
1330,
7308,
12502,
198,
6738,
257,
295,
316,
16090,
13,
19199,
13,
687,
1381,
1330,
16000,
10267,
6030,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
11,
2... | 3.636364 | 77 |
import argparse
import asyncio
from contextlib import asynccontextmanager
from functools import wraps
import logging
from rsp1570serial.commands import messages
from rsp1570serial.icons import icon_list_to_flags
from rsp1570serial.messages import (
decode_message_stream,
CommandMessage,
DEVICE_ID_RSP1570,
MSGTYPE_FEEDBACK_STRING,
MSGTYPE_VOLUME_DIRECT_COMMANDS,
MSGTYPE_ZONE_2_VOLUME_DIRECT_COMMANDS,
MSGTYPE_ZONE_3_VOLUME_DIRECT_COMMANDS,
MSGTYPE_ZONE_4_VOLUME_DIRECT_COMMANDS,
)
from rsp1570serial.protocol import encode_payload
MIN_VOLUME = 0
MAX_VOLUME = 96
INITIAL_VOLUME = 50
INITIAL_SOURCE = "VIDEO 1"
VOLUME_DIRECT_MESSAGE_TYPES = set(
[
MSGTYPE_VOLUME_DIRECT_COMMANDS,
MSGTYPE_ZONE_2_VOLUME_DIRECT_COMMANDS,
MSGTYPE_ZONE_3_VOLUME_DIRECT_COMMANDS,
MSGTYPE_ZONE_4_VOLUME_DIRECT_COMMANDS,
]
)
# A set of realistic looking attributes to apply when each source is selected
SOURCE_ATTRIB_MAP = {
" CD": SourceAttribs(
"STEREO 44.1K",
["A", "Standby LED", "SW", "FR", "FL"],
["--cd", "--alias_cd"],
"alias_cd",
),
"TUNER": SourceAttribs(
"STEREO 44.1K",
["A", "Standby LED", "SW", "FR", "FL"],
["--tuner", "--alias_tuner"],
"alias_tuner",
),
"TAPE": SourceAttribs(
"STEREO 44.1K",
["A", "Standby LED", "SW", "FR", "FL"],
["--tape", "--alias_tape"],
"alias_tape",
),
"VIDEO 1": SourceAttribs(
"DOLBY PL\x19 C 48K ",
["II", "HDMI", "Pro Logic", "Standby LED", "SW", "SR", "SL", "FR", "C", "FL"],
["--video_1", "--alias_video_1"],
"alias_video_1",
),
"VIDEO 2": SourceAttribs(
"DOLBY PL\x19 M 48K ",
["II", "HDMI", "Pro Logic", "Standby LED", "SW", "SR", "SL", "FR", "C", "FL"],
["--video_2", "--alias_video_2"],
"alias_video_2",
),
"VIDEO 3": SourceAttribs(
"DOLBY DIGITAL 48K ",
[
"HDMI",
"Optical",
"1",
"Dolby Digital",
"Standby LED",
"SW",
"SR",
"SL",
"FR",
"C",
"FL",
],
["--video_3", "--alias_video_3"],
"alias_video_3",
),
"VIDEO 4": SourceAttribs(
"5CH STEREO 48K ",
["A", "Standby LED", "SW", "SR", "SL", "FR", "C", "FL"],
["--video_4", "--alias_video_4"],
"alias_video_4",
),
"VIDEO 5": SourceAttribs(
"PCM 2CH 48K ",
["A", "Standby LED", "SW", "FR", "FL"],
["--video_5", "--alias_video_5"],
"alias_video_5",
),
"MULTI": SourceAttribs(
"BYPASS 48K ",
["Standby LED", "CBL", "CBR", "SW", "SR", "SL", "FR", "C", "FL"],
["--multi", "--alias_multi"],
"alias_multi",
),
}
async def heartbeat():
"""
Tells you that the loop is still running
Also keeps the KeyboardInterrupt running on Windows until
Python 3.8 comes along (https://bugs.python.org/issue23057)
"""
try:
count = 0
while True:
await asyncio.sleep(2)
count += 1
logging.info("Heartbeat number {}".format(count))
except asyncio.CancelledError:
logging.info("Heartbeat cancelled")
@asynccontextmanager
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument(
"-p", "--port", type=int, default=50001, help="port to serve on"
)
parser.add_argument(
"-o", "--is_on", action="store_true", help="emulator starts up in the on state"
)
for name, attribs in SOURCE_ATTRIB_MAP.items():
parser.add_argument(
*attribs.alias_args,
type=str,
dest=attribs.argparse_dest,
help="alias for '{}' input".format(name)
)
args = parser.parse_args()
aliases = {}
for name, attribs in SOURCE_ATTRIB_MAP.items():
alias = getattr(args, attribs.argparse_dest, None)
if alias is not None:
aliases[name] = alias
logging.info("Source '%s' aliased to '%s'", name, alias)
asyncio.run(run_server(args.port, aliases, args.is_on))
| [
11748,
1822,
29572,
198,
11748,
30351,
952,
198,
6738,
4732,
8019,
1330,
355,
2047,
535,
261,
5239,
37153,
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
11748,
18931,
198,
6738,
374,
2777,
1314,
2154,
46911,
13,
9503,
1746,
1330,
6218,
... | 1.9954 | 2,174 |
import numpy as np
import copy
class Cell(VertexGroup):
"""
Contains a cell that is symmetric to the initial hypercube triangulation
"""
class Simplex(VertexGroup):
"""
Contains a simplex that is symmetric to the initial symmetry constrained
hypersimplex triangulation
"""
| [
11748,
299,
32152,
355,
45941,
198,
11748,
4866,
628,
628,
198,
4871,
12440,
7,
13414,
16886,
13247,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
49850,
257,
2685,
326,
318,
23606,
19482,
284,
262,
4238,
8718,
40296,
1333,
648,
... | 3.21875 | 96 |
import random
import time
import pygame
from agagla import ship
from pygame.math import Vector2
import os
import math
from enum import Enum
from agagla import shared_objects
| [
11748,
4738,
198,
11748,
640,
198,
11748,
12972,
6057,
198,
6738,
556,
363,
5031,
1330,
4074,
198,
6738,
12972,
6057,
13,
11018,
1330,
20650,
17,
198,
11748,
28686,
198,
11748,
10688,
198,
6738,
33829,
1330,
2039,
388,
198,
6738,
556,
3... | 3.6875 | 48 |
from .... pyaz_utils import _call_az
def add(account_name, name, policy_option_name, resource_group, alt_rsa_token_keys=None, alt_symmetric_token_keys=None, alt_x509_token_keys=None, ask=None, audience=None, clear_key_configuration=None, fair_play_pfx=None, fair_play_pfx_password=None, fp_playback_duration_seconds=None, fp_storage_duration_seconds=None, issuer=None, open_id_connect_discovery_document=None, open_restriction=None, play_ready_template=None, rental_and_lease_key_type=None, rental_duration=None, token_claims=None, token_key=None, token_key_type=None, token_type=None, widevine_template=None):
'''
Add a new option to an existing content key policy.
Required Parameters:
- account_name -- The name of the Azure Media Services account.
- name -- The content key policy name.
- policy_option_name -- The content key policy option name.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- alt_rsa_token_keys -- Space-separated list of alternate rsa token keys.
- alt_symmetric_token_keys -- Space-separated list of alternate symmetric token keys.
- alt_x509_token_keys -- Space-separated list of alternate x509 certificate token keys.
- ask -- The key that must be used as FairPlay Application Secret Key, which is a 32 character hex string.
- audience -- The audience for the token.
- clear_key_configuration -- Use Clear Key configuration, a.k.a AES encryption. It's intended for non-DRM keys.
- fair_play_pfx -- The filepath to a FairPlay certificate file in PKCS 12 (pfx) format (including private key).
- fair_play_pfx_password -- The password encrypting FairPlay certificate in PKCS 12 (pfx) format.
- fp_playback_duration_seconds -- Playback duration
- fp_storage_duration_seconds -- Storage duration
- issuer -- The token issuer.
- open_id_connect_discovery_document -- The OpenID connect discovery document.
- open_restriction -- Use open restriction. License or key will be delivered on every request. Not recommended for production environments.
- play_ready_template -- JSON PlayReady license template. Use @{file} to load from a file.
- rental_and_lease_key_type -- The rental and lease key type. Available values: Undefined, DualExpiry, PersistentUnlimited, PersistentLimited.
- rental_duration -- The rental duration. Must be greater than or equal to 0.
- token_claims -- Space-separated required token claims in '[key=value]' format.
- token_key -- Either a string (for symmetric key) or a filepath to a certificate (x509) or public key (rsa). Must be used in conjunction with --token-key-type.
- token_key_type -- The type of the token key to be used for the primary verification key. Allowed values: Symmetric, RSA, X509
- token_type -- The type of token. Allowed values: Jwt, Swt.
- widevine_template -- JSON Widevine license template. Use @{file} to load from a file.
'''
return _call_az("az ams content-key-policy option add", locals())
def remove(account_name, name, policy_option_id, resource_group):
'''
Remove an option from an existing content key policy.
Required Parameters:
- account_name -- The name of the Azure Media Services account.
- name -- The content key policy name.
- policy_option_id -- The content key policy option identifier. This value can be obtained from "policyOptionId" property by running a show operation on a content key policy resource.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az ams content-key-policy option remove", locals())
def update(account_name, name, policy_option_id, resource_group, add_alt_token_key=None, add_alt_token_key_type=None, ask=None, audience=None, fair_play_pfx=None, fair_play_pfx_password=None, fp_playback_duration_seconds=None, fp_storage_duration_seconds=None, issuer=None, open_id_connect_discovery_document=None, play_ready_template=None, policy_option_name=None, rental_and_lease_key_type=None, rental_duration=None, token_claims=None, token_key=None, token_key_type=None, token_type=None, widevine_template=None):
'''
Update an option from an existing content key policy.
Required Parameters:
- account_name -- The name of the Azure Media Services account.
- name -- The content key policy name.
- policy_option_id -- The content key policy option identifier. This value can be obtained from "policyOptionId" property by running a show operation on a content key policy resource.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- add_alt_token_key -- Creates an alternate token key with either a string (for symmetric key) or a filepath to a certificate (x509) or public key (rsa). Must be used in conjunction with --add-alt-token-key-type.
- add_alt_token_key_type -- The type of the token key to be used for the alternate verification key. Allowed values: Symmetric, RSA, X509
- ask -- The key that must be used as FairPlay Application Secret Key, which is a 32 character hex string.
- audience -- The audience for the token.
- fair_play_pfx -- The filepath to a FairPlay certificate file in PKCS 12 (pfx) format (including private key).
- fair_play_pfx_password -- The password encrypting FairPlay certificate in PKCS 12 (pfx) format.
- fp_playback_duration_seconds -- Playback duration
- fp_storage_duration_seconds -- Storage duration
- issuer -- The token issuer.
- open_id_connect_discovery_document -- The OpenID connect discovery document.
- play_ready_template -- JSON PlayReady license template. Use @{file} to load from a file.
- policy_option_name -- The content key policy option name.
- rental_and_lease_key_type -- The rental and lease key type. Available values: Undefined, DualExpiry, PersistentUnlimited, PersistentLimited.
- rental_duration -- The rental duration. Must be greater than or equal to 0.
- token_claims -- Space-separated required token claims in '[key=value]' format.
- token_key -- Either a string (for symmetric key) or a filepath to a certificate (x509) or public key (rsa). Must be used in conjunction with --token-key-type.
- token_key_type -- The type of the token key to be used for the primary verification key. Allowed values: Symmetric, RSA, X509
- token_type -- The type of token. Allowed values: Jwt, Swt.
- widevine_template -- JSON Widevine license template. Use @{file} to load from a file.
'''
return _call_az("az ams content-key-policy option update", locals())
| [
6738,
19424,
12972,
1031,
62,
26791,
1330,
4808,
13345,
62,
1031,
198,
198,
4299,
751,
7,
23317,
62,
3672,
11,
1438,
11,
2450,
62,
18076,
62,
3672,
11,
8271,
62,
8094,
11,
5988,
62,
3808,
64,
62,
30001,
62,
13083,
28,
14202,
11,
5... | 3.371344 | 2,017 |
from flask_sqlalchemy import SQLAlchemy
from app import db
from model.members import Members | [
6738,
42903,
62,
25410,
282,
26599,
1330,
16363,
2348,
26599,
198,
6738,
598,
1330,
20613,
198,
6738,
2746,
13,
30814,
1330,
12688
] | 4.181818 | 22 |
"""
Unit tests for the MNIST model
==============================
"""
import os
import shutil
from sklearn.externals import joblib
from sklearn.metrics import accuracy_score
from pipeline.dags.train_model import get_mnist_data, fit_estimator
| [
37811,
198,
26453,
5254,
329,
262,
29060,
8808,
2746,
198,
4770,
25609,
855,
198,
37811,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
198,
6738,
1341,
35720,
13,
1069,
759,
874,
1330,
1693,
8019,
198,
6738,
1341,
35720,
13,
4164,
1046... | 3.536232 | 69 |
import requests
import os
import zipfile
import wget
from bs4 import BeautifulSoup
URL = 'https://data.coat.no'
DATASET = 'v_snowdepth_intensive_v1'
CWD = os.getcwd()
OUTPUT_PATH = os.path.join(CWD, 'raw_data')
class Scraper:
"""
Creates a Scraper Object
:param root_url: Root URL of page being scraped
"""
def navigate(self, href_elem: str) -> str:
"""
Navigate href links on the page
:param href_elem: the href element you want to find
:return: resulting URL
"""
try:
# Parse HTML and find href elements
self.print_action(action='NAVIGATING', object=self.current_url)
response = requests.get(self.current_url)
soup = BeautifulSoup(response.content, 'html.parser')
href_elems = soup.find_all('a', href=True)
href_elem = list(filter(lambda x: href_elem in x['href'], href_elems))[0]
# Set current URL
self.current_url = self.root_url + href_elem['href']
return self.current_url
except requests.exceptions.ConnectionError as e:
print(e)
def download(self, url: str):
"""
Download file locally
:param url: download URL
"""
self.print_action('DOWNLOADING',object=url)
wget.download(f'{url}')
self.print_action('DOWNLOADED', object=url)
def unzip(self, file: str, output_path: str):
"""
Unzip file to a folder and deletes file
:param file: zipfile name
:param output_path: output file path
"""
self.print_action(action='UNZIPPING', object=file)
try:
with zipfile.ZipFile(os.path.join(CWD, file)) as _zip:
_zip.extractall(output_path)
except Exception as e:
print(e)
os.remove(os.path.join(CWD, file))
self.print_action(action='UNZIPPED', object=file)
if __name__ == '__main__':
main() | [
11748,
7007,
198,
11748,
28686,
198,
11748,
19974,
7753,
198,
11748,
266,
1136,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
198,
21886,
796,
705,
5450,
1378,
7890,
13,
31434,
13,
3919,
6,
198,
35,
1404,
1921,
2767,
796,
705,
... | 2.130753 | 956 |
"""Test helper script that reads from stdin"""
import dnaio
if __name__ == '__main__':
with dnaio.open('-') as f:
records = list(f)
print(len(records))
| [
37811,
14402,
31904,
4226,
326,
9743,
422,
14367,
259,
37811,
198,
198,
11748,
288,
2616,
952,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
351,
288,
2616,
952,
13,
9654,
10786,
12,
11537,
35... | 2.428571 | 70 |
from ioscreen.coremedia.CMTime import CMTimeConst
from ioscreen.coremedia.CMTime import CMTime as CM
from ioscreen.sync import *
| [
6738,
1312,
17500,
1361,
13,
7295,
11431,
13,
24187,
7575,
1330,
16477,
7575,
34184,
198,
6738,
1312,
17500,
1361,
13,
7295,
11431,
13,
24187,
7575,
1330,
16477,
7575,
355,
16477,
198,
6738,
1312,
17500,
1361,
13,
27261,
1330,
1635,
628,
... | 3.113636 | 44 |
import os
# used for JSs
path0='/home/azamhamidinekoo/Documents/projects/Azam_HDGAN_feb21/results/results3_1may2021/original_he'
path1 = '/home/azamhamidinekoo/Documents/projects/Azam_HDGAN_feb21/dataset/cohort3/original_he'
for case in os.listdir(path0):
x_sr = os.path.join(path0,case,'he_erg_1may21/test_latest/images')
x_ds = os.path.join(path1,case,'test_A')
path00, dirs00, files00 = next(os.walk(x_sr))
file_count0 = len(files00)/2
path11, dirs11, files11 = next(os.walk(x_ds))
file_count1 = len(files11)
if file_count1 != file_count0:
print(case)
'''
p0 = '/home/azamhamidinekoo/Documents/projects/Azam_HDGAN_feb21/dataset/cohort3/original_he/'
p1 = '/home/azamhamidinekoo/Documents/projects/Azam_HDGAN_feb21/dataset/cohort3/original_he_2/'
import shutil
s = ['2503_RMS.czi','2795_RMS.czi','2395_RMS.czi','2836_RMS.czi','3501_RMS.czi','2888_RMS.czi','3476_RMS.czi','2848_RMS.czi','2520_RMS.czi','2238_RMS.czi','2404_RMS.czi','2889_RMS.czi','2783_RMS.czi','2790_RMS.czi','3521_RMS.czi','2392_RMS.czi','2255_RMS.czi','2815_RMS.czi','2846_RMS.czi','2839_RMS.czi','2873_RMS.czi','2504_RMS.czi','2865_RMS.czi','2797_RMS.czi','2505_RMS.czi','2892_RMS.czi','2532_RMS.czi','2840_RMS.czi','2406_RMS.czi','2243_RMS.czi','2799_RMS.czi','2802_RMS.czi','3510_RMS.czi','2831_RMS.czi','2546_RMS.czi','2822_RMS.czi']
for i in s:
path1 = p0 + i + '/test_A'
path2 = p1 + i + '/test_A'
print(path1,path2)
if not os.path.isdir(path1):
os.mkdir(path1)
shutil.copytree(path1,path2)
''' | [
11748,
28686,
220,
198,
198,
2,
973,
329,
26755,
82,
198,
6978,
15,
11639,
14,
11195,
14,
1031,
321,
2763,
39422,
74,
2238,
14,
38354,
14,
42068,
14,
26903,
321,
62,
10227,
45028,
62,
69,
1765,
2481,
14,
43420,
14,
43420,
18,
62,
... | 1.974901 | 757 |
# -*- coding: utf-8 -*-
import copy
import re
import sqlite3
import types
from malibu.database import dbtypeconv
__doc__ = """
malibu.database.dbmapper
------------------------
This is a small, hackish ORM for SQLite3.
Note from the author: (01 / 14 / 2016)
--------------------------------------
I've got to be honest, this is probably the worst code I have ever written and read.
At this point, this code is so difficult to maintain and keep up to date for 2/3 compat that
it is almost not worth the work.
Especially considering that there are things like Peewee, SQLAlchemy, etc, this is not worth
using or maintaining.
From this point forward, I recommend using some other, cleaner, better maintained solution
such as Peewee.
This DBMapper code will no longer be maintained and will be deprecated starting
with the 0.1.6 release.
The code will be removed as the 1.0.0 release approaches.
There may be plans to replace this with a SQLite adapter for the malibu.design.brine series
of classes that behave similar to this, just without all the cruft.
"""
class DBMapper(object):
""" This is code for a relatively small ORM for SQLite built
on top of the python-sqlite3 module.
"""
# FETCH Constants for __execute()
FETCH_ONE = 'one'
FETCH_MANY = 'many'
FETCH_ALL = 'all'
# INDEX Constants for options dictionary.
INDEX_PRIMARY = 'primaryIndex'
INDEX_AUTOINCR = 'autoincrIndex'
INDEX_UNIQUE = 'uniqueIndices'
GENERATE_FTS_VT = 'genFTSVTs'
# Global variables for static database methods
_options = None
__default_options = {
INDEX_PRIMARY: 0,
INDEX_AUTOINCR: True,
INDEX_UNIQUE: set(),
GENERATE_FTS_VT: False # Do NOT generate FTS by default.
}
@staticmethod
def get_default_options():
""" DBMapper.get_default_options()
Returns a deep copy of the default options dictionary for
modification in subclasses.
"""
return copy.deepcopy(DBMapper.__default_options)
@staticmethod
def connect_database(dbpath):
""" DBMapper.connect_database(dbpath)
Connects to a database at 'dbpath' and installs the json
type converter "middleware" into the database system.
"""
dbtypeconv.install_json_converter()
__db = sqlite3.connect(dbpath, detect_types=sqlite3.PARSE_DECLTYPES)
return __db
@classmethod
def set_db_options(cls, db, keys, ktypes, options=__default_options):
""" DBMapper.set_db_options(db => database instance
keys => list of keys
ktypes => list of key types
options => options dictionary (optional))
Sets options for a subclasses DBMapper context.
"""
if cls._options is None:
cls._options = {}
cls._options['database'] = db
cls._options['keys'] = keys
cls._options['keytypes'] = ktypes
cls._options['options'] = options
else:
cls._options['database'] = db
cls._options['keys'] = keys
cls._options['keytypes'] = ktypes
@classmethod
def load(cls, **kw):
""" DBMapper.load(**kw)
Loads a *single* row from the database and populates it into
the context cls this method was called under.
If the database returns more than one row for the kwarg query,
this method will only return the first result! If you want a
list of matching rows, use find() or search().
"""
if cls._options is None:
raise DBMapperException(
'Static database options have not been set.')
dbo = cls._options
obj = cls(dbo['database'])
cur = dbo['database'].cursor()
keys = []
vals = []
for key, val in kw.items():
keys.append(key)
vals.append(val)
whc = []
for pair in zip(keys, vals):
whc.append("%s=?" % (pair[0]))
query = "select * from %s where (%s)" % (obj._table, ' and '.join(whc))
result = obj.__execute(cur, query, args=vals)
if result is None:
for key in dbo['keys']:
setattr(obj, "_%s" % (key), None)
return
for key, dbv in zip(dbo['keys'], result):
setattr(obj, "_%s" % (key), dbv)
return obj
@classmethod
def new(cls, **kw):
""" DBMapper.new(**kw)
Creates a new contextual instance and returns the object.
Only parameters defined in the kwargs will be passed in to
the record creation query, as there is no support for default
values yet. (06/11/15)
"""
if cls._options is None:
raise DBMapperException(
'Static database options have not been set.')
dbo = cls._options
obj = cls(dbo['database'])
cur = dbo['database'].cursor()
keys = []
vals = []
for key, val in kw.items():
keys.append(key)
vals.append(val)
anonvals = []
for val in vals:
anonvals.append('?')
query = "insert into %s (%s) values (%s)" % (
obj._table, ','.join(keys), ','.join(anonvals))
obj.__execute(cur, query, args=vals)
res = cls.find(**kw)
if len(res) == 0:
return None
else:
return res[0]
@classmethod
def find(cls, **kw):
""" DBMapper.find(**kw)
Searches for a set of records that match the query built by
the contents of the kwargs and returns a filterable list of
contextualized results that can be modified.
"""
if cls._options is None:
raise DBMapperException(
'Static database options have not been set.')
dbo = cls._options
obj = cls(dbo['database'])
cur = dbo['database'].cursor()
primaryKey = dbo['keys'][dbo['options'][DBMapper.INDEX_PRIMARY]]
keys = []
vals = []
for key, val in kw.items():
keys.append(key)
vals.append(val)
whc = []
for pair in zip(keys, vals):
whc.append('%s=?' % (pair[0]))
query = "select %s from %s where (%s)" % (
primaryKey, obj._table, ' and '.join(whc))
result = obj.__execute(cur, query, args=vals, fetch=DBMapper.FETCH_ALL)
load_pairs = []
for row in result:
load_pairs.append(
{primaryKey: row[dbo['options'][DBMapper.INDEX_PRIMARY]]}
)
return DBResultList([cls.load(**pair) for pair in load_pairs])
@classmethod
def find_all(cls):
""" DBMapper.find_all()
Finds all rows that belong to a table and returns a filterable
list of contextualized results. Please note that the list that
is returned can be empty, but it should never be none.
"""
if cls._options is None:
raise DBMapperException(
'Static database options have not been set.')
dbo = cls._options
obj = cls(dbo['database'])
cur = dbo['database'].cursor()
primaryKey = dbo['keys'][dbo['options'][DBMapper.INDEX_PRIMARY]]
query = "select %s from %s" % (primaryKey, obj._table)
result = obj.__execute(cur, query, fetch=DBMapper.FETCH_ALL)
load_pairs = []
for row in result:
load_pairs.append(
{primaryKey: row[dbo['options'][DBMapper.INDEX_PRIMARY]]}
)
return DBResultList([cls.load(**pair) for pair in load_pairs])
@classmethod
def search(cls, param):
""" DBMapper.search(param)
This function will return a list of results that match the given
param for a full text query. The search parameter should be in the
form of a sqlite full text query, as defined here:
http://www.sqlite.org/fts3.html#section_3
As an example, suppose your table looked like this:
+----+---------+----------------+
| id | name | description |
+----+---------+----------------+
| 1 | linux | some magic |
| 2 | freebsd | daemonic magic |
| 3 | windows | tomfoolery |
+----+---------+----------------+
A full text query for "name:linux magic" would return the first
row because the name is linux and the description contains "magic".
A full text query just for "description:magic" would return both
rows one and two because the descriptions contain the word "magic".
"""
if cls._options is None:
raise DBMapperException(
'Static database options have not been set.')
if not cls._options['options'][DBMapper.GENERATE_FTS_VT]:
raise DBMapperException(
'Full-text search table not enabled on this table.')
dbo = cls._options
obj = cls(dbo['database'])
cur = dbo['database'].cursor()
query = """select docid from _search_%s where _search_%s match \"?\"""" % \
(obj._table, obj._table)
result = obj.__execute(
cur,
query,
args=[param],
fetch=DBMapper.FETCH_ALL)
load_pairs = []
for row in result:
load_pairs.append({cls._options['keys'][0]: row[0]})
return DBResultList([cls.load(**pair) for pair in load_pairs])
@classmethod
def join(cls, cond, a, b):
""" DBMapper.join(cond => other table to join on
a => left column to join
b => right column to join)
Performs a sqlite join on two tables. Returns the join results
in a filterable list.
"""
if cls._options is None or cond._options is None:
raise DBMapperException(
'Static database options have not been set.')
dba = cls._options
obja = cls(dba['database'])
dbb = cond._options
objb = cond(dbb['database'])
cur = dba['database'].cursor()
primaryKeyA = dba['keys'][dba['options'][DBMapper.INDEX_PRIMARY]]
primaryKeyB = dbb['keys'][dba['options'][DBMapper.INDEX_PRIMARY]]
query = "select A.%s, B.%s from %s as A join %s as B on A.%s=B.%s" % (
primaryKeyA, primaryKeyB, obja._table, objb._table, a, b)
result = obja.__execute(cur, query, fetch=DBMapper.FETCH_ALL)
load_pair_a = []
load_pair_b = []
for row in result:
load_pair_a.append({primaryKeyA: row[0]})
load_pair_b.append({primaryKeyB: row[1]})
return (
DBResultList([cls.load(**pair) for pair in load_pair_a]),
DBResultList([cond.load(**pair) for pair in load_pair_b]),
)
def __execute(self, cur, sql, fetch=FETCH_ONE, limit=-1, args=()):
""" __execute(self,
cur => pointer to database cursor
sql => sql query to execute
fetch => amount of results to fetch
limit => query limit if not use FETCH_ONE
args => query arguments to parse in)
Filters, quotes, and executes the provided sql query and returns
a list of database rows.
"""
query = sql
try:
if len(args) >= 1:
cur.execute("select " + ", ".join(["quote(?)" for i in args]),
args)
quoted_values = cur.fetchone()
for quoted_value in quoted_values:
query = query.replace('?', str(quoted_value), 1)
except:
pass
try:
cur.execute(query)
except (sqlite3.ProgrammingError):
try:
cur.execute(query, args)
except Exception as e:
raise DBMapperException(
"Error while executing query [%s]" % (query), cause=e)
except Exception as e:
raise DBMapperException(
"Error while executing query [%s]" % (query), cause=e)
if fetch == DBMapper.FETCH_ONE:
return cur.fetchone()
elif fetch == DBMapper.FETCH_MANY:
if limit == -1:
limit = cur.arraysize
return cur.fetchmany(size=limit)
elif fetch == DBMapper.FETCH_ALL:
return cur.fetchall()
else:
return cur.fetchall()
def __get_table_info(self, table=None):
""" __get_table_info(self, table)
Returns pragma information for a table.
"""
table = self._table if table is None else table
cur = self._db.cursor()
query = "pragma table_info(%s)" % (table)
return self.__execute(cur, query, fetch=DBMapper.FETCH_ALL)
def __generate_structure(self):
""" __generate_structure(self)
Generates table structure for determining column updates and
search information.
"""
# use pragma constructs to get table into
tblinfo = self.__get_table_info()
# create the table if the statement does not exist
if len(tblinfo) == 0:
ins = zip(self._keys, self._keytypes)
typarr = []
for pair in ins:
if pair[0] == self._primary:
# identifier type primary key
if self._autoincr_ind:
typarr.append("%s %s primary key autoincrement" % (
pair[0], pair[1]))
else:
typarr.append("%s %s primary key" % (pair[0], pair[1]))
elif pair[0] in self._unique_keys:
typarr.append("%s %s unique" % (pair[0], pair[1]))
else:
# identifier type
typarr.append("%s %s" % (pair[0], pair[1]))
cur = self._db.cursor()
# create table if not exists <table> (<typarr>)
query = "create table if not exists %s (%s)" % \
(self._table, ', '.join(typarr))
self.__execute(cur, query)
# make sure table columns are up to date.
if len(tblinfo) > 0:
# use pragma table info to build database schema
schema_ids = []
schema_types = []
for col in tblinfo:
schema_ids.append(col[1])
schema_types.append(col[2])
# use schema to determine / apply database updates
schema_updates = []
for pair in zip(self._keys, self._keytypes):
if pair[0] in schema_ids:
continue
else:
schema_updates.append("%s %s" % (pair[0], pair[1]))
for defn in schema_updates:
query = "alter table %s add column %s" % (self._table, defn)
cur = self._db.cursor()
self.__execute(cur, query)
# generate full text search table that corresponds with this dbo
if self._options[DBMapper.GENERATE_FTS_VT]:
if len(self.__get_table_info("_search_%s" % (self._table))) == 0:
cur = self._db.cursor()
# fts4 table doesn't exist, make it.
query = "create virtual table _search_%s using fts4(%s, content='%s')" % \
(self._table, ','.join(self._keys), self._table)
self.__execute(cur, query)
# create pre/post update/delete triggers for cascading updates
# XXX - [trigger warning] DO WE NEED THE TRIGGERS
query = "create trigger _%s_bu before update on %s begin delete from _search_%s where docid=old.rowid; end;" % \
(self._table, self._table, self._table)
self.__execute(cur, query)
query = "create trigger _%s_bd before delete on %s begin delete from _search_%s where docid=old.rowid; end;" % \
(self._table, self._table, self._table)
self.__execute(cur, query)
search_keys = ','.join(['docid'] + self._keys[1:])
target_keys = ','.join(['new.' + vkey for vkey in self._keys])
query = "create trigger _%s_au after update on %s begin insert into _search_%s(%s) values(%s); end;" % \
(self._table, self._table, self._table, search_keys,
target_keys)
self.__execute(cur, query)
query = "create trigger _%s_ai after insert on %s begin insert into _search_%s(%s) values(%s); end;" % \
(self._table, self._table, self._table, search_keys,
target_keys)
self.__execute(cur, query)
self._db.commit()
def __generate_getters(self):
""" __generate_getters(self)
Generates magical getter methods for pull data from the
underlying database.
"""
for _key in self._keys:
setattr(self, "get_" + _key, types.MethodType(getter_templ, self))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
4866,
198,
11748,
302,
198,
11748,
44161,
578,
18,
198,
11748,
3858,
198,
6738,
6428,
33828,
13,
48806,
1330,
20613,
4906,
42946,
198,
198,
834,
15390,
834,
796,
... | 2.103432 | 8,334 |
# Content autogenerated. Do not edit.
syscalls_ia64 = {
"_sysctl": 1150,
"accept": 1194,
"accept4": 1334,
"access": 1049,
"acct": 1064,
"add_key": 1271,
"adjtimex": 1131,
"bdflush": 1138,
"bind": 1191,
"bpf": 1341,
"brk": 1060,
"capget": 1185,
"capset": 1186,
"chdir": 1034,
"chmod": 1038,
"chown": 1039,
"chroot": 1068,
"clock_adjtime": 1328,
"clock_getres": 1255,
"clock_gettime": 1254,
"clock_nanosleep": 1256,
"clock_settime": 1253,
"clone": 1128,
"clone2": 1213,
"close": 1029,
"close_range": 1460,
"connect": 1192,
"copy_file_range": 1347,
"creat": 1030,
"delete_module": 1134,
"dup": 1057,
"dup2": 1070,
"dup3": 1316,
"epoll_create": 1243,
"epoll_create1": 1315,
"epoll_ctl": 1244,
"epoll_pwait": 1305,
"epoll_pwait2": 1465,
"epoll_wait": 1245,
"eventfd": 1309,
"eventfd2": 1314,
"execve": 1033,
"execveat": 1342,
"exit": 1025,
"exit_group": 1236,
"faccessat": 1293,
"faccessat2": 1463,
"fadvise64": 1234,
"fallocate": 1303,
"fanotify_init": 1323,
"fanotify_mark": 1324,
"fchdir": 1035,
"fchmod": 1099,
"fchmodat": 1292,
"fchown": 1100,
"fchownat": 1284,
"fcntl": 1066,
"fdatasync": 1052,
"fgetxattr": 1222,
"finit_module": 1335,
"flistxattr": 1225,
"flock": 1145,
"fremovexattr": 1228,
"fsconfig": 1455,
"fsetxattr": 1219,
"fsmount": 1456,
"fsopen": 1454,
"fspick": 1457,
"fstat": 1212,
"fstatfs": 1104,
"fstatfs64": 1257,
"fsync": 1051,
"ftruncate": 1098,
"futex": 1230,
"futex_waitv": 1473,
"futimesat": 1285,
"get_mempolicy": 1260,
"get_robust_list": 1299,
"getcpu": 1304,
"getcwd": 1184,
"getdents": 1144,
"getdents64": 1214,
"getegid": 1063,
"geteuid": 1047,
"getgid": 1062,
"getgroups": 1077,
"getitimer": 1119,
"getpeername": 1196,
"getpgid": 1079,
"getpid": 1041,
"getpmsg": 1188,
"getppid": 1042,
"getpriority": 1101,
"getrandom": 1339,
"getresgid": 1075,
"getresuid": 1073,
"getrlimit": 1085,
"getrusage": 1086,
"getsid": 1082,
"getsockname": 1195,
"getsockopt": 1204,
"gettid": 1105,
"gettimeofday": 1087,
"getuid": 1046,
"getunwind": 1215,
"getxattr": 1220,
"init_module": 1133,
"inotify_add_watch": 1278,
"inotify_init": 1277,
"inotify_init1": 1318,
"inotify_rm_watch": 1279,
"io_cancel": 1242,
"io_destroy": 1239,
"io_getevents": 1240,
"io_pgetevents": 1351,
"io_setup": 1238,
"io_submit": 1241,
"io_uring_enter": 1450,
"io_uring_register": 1451,
"io_uring_setup": 1449,
"ioctl": 1065,
"ioprio_get": 1275,
"ioprio_set": 1274,
"kcmp": 1345,
"kexec_load": 1268,
"keyctl": 1273,
"kill": 1053,
"landlock_add_rule": 1469,
"landlock_create_ruleset": 1468,
"landlock_restrict_self": 1470,
"lchown": 1124,
"lgetxattr": 1221,
"link": 1031,
"linkat": 1289,
"listen": 1193,
"listxattr": 1223,
"llistxattr": 1224,
"lookup_dcookie": 1237,
"lremovexattr": 1227,
"lseek": 1040,
"lsetxattr": 1218,
"lstat": 1211,
"madvise": 1209,
"mbind": 1259,
"membarrier": 1344,
"memfd_create": 1340,
"migrate_pages": 1280,
"mincore": 1208,
"mkdir": 1055,
"mkdirat": 1282,
"mknod": 1037,
"mknodat": 1283,
"mlock": 1153,
"mlock2": 1346,
"mlockall": 1154,
"mmap": 1151,
"mmap2": 1172,
"mount": 1043,
"mount_setattr": 1466,
"move_mount": 1453,
"move_pages": 1276,
"mprotect": 1155,
"mq_getsetattr": 1267,
"mq_notify": 1266,
"mq_open": 1262,
"mq_timedreceive": 1265,
"mq_timedsend": 1264,
"mq_unlink": 1263,
"mremap": 1156,
"msgctl": 1112,
"msgget": 1109,
"msgrcv": 1111,
"msgsnd": 1110,
"msync": 1157,
"munlock": 1158,
"munlockall": 1159,
"munmap": 1152,
"name_to_handle_at": 1326,
"nanosleep": 1168,
"newfstatat": 1286,
"nfsservctl": 1169,
"old_getpagesize": 1171,
"open": 1028,
"open_by_handle_at": 1327,
"open_tree": 1452,
"openat": 1281,
"openat2": 1461,
"pciconfig_read": 1173,
"pciconfig_write": 1174,
"perf_event_open": 1352,
"personality": 1140,
"pidfd_getfd": 1462,
"pidfd_open": 1458,
"pidfd_send_signal": 1448,
"pipe": 1058,
"pipe2": 1317,
"pivot_root": 1207,
"pkey_alloc": 1355,
"pkey_free": 1356,
"pkey_mprotect": 1354,
"poll": 1090,
"ppoll": 1295,
"prctl": 1170,
"pread64": 1148,
"preadv": 1319,
"preadv2": 1348,
"prlimit64": 1325,
"process_madvise": 1464,
"process_mrelease": 1472,
"process_vm_readv": 1332,
"process_vm_writev": 1333,
"pselect6": 1294,
"ptrace": 1048,
"pwrite64": 1149,
"pwritev": 1320,
"pwritev2": 1349,
"quotactl": 1137,
"quotactl_fd": 1467,
"read": 1026,
"readahead": 1216,
"readlink": 1092,
"readlinkat": 1291,
"readv": 1146,
"reboot": 1096,
"recv": 1200,
"recvfrom": 1201,
"recvmmsg": 1322,
"recvmsg": 1206,
"remap_file_pages": 1125,
"removexattr": 1226,
"rename": 1054,
"renameat": 1288,
"renameat2": 1338,
"request_key": 1272,
"restart_syscall": 1246,
"rmdir": 1056,
"rseq": 1357,
"rt_sigaction": 1177,
"rt_sigpending": 1178,
"rt_sigprocmask": 1179,
"rt_sigqueueinfo": 1180,
"rt_sigreturn": 1181,
"rt_sigsuspend": 1182,
"rt_sigtimedwait": 1183,
"rt_tgsigqueueinfo": 1321,
"sched_get_priority_max": 1165,
"sched_get_priority_min": 1166,
"sched_getaffinity": 1232,
"sched_getattr": 1337,
"sched_getparam": 1160,
"sched_getscheduler": 1162,
"sched_rr_get_interval": 1167,
"sched_setaffinity": 1231,
"sched_setattr": 1336,
"sched_setparam": 1161,
"sched_setscheduler": 1163,
"sched_yield": 1164,
"seccomp": 1353,
"select": 1089,
"semctl": 1108,
"semget": 1106,
"semop": 1107,
"semtimedop": 1247,
"send": 1198,
"sendfile": 1187,
"sendmmsg": 1331,
"sendmsg": 1205,
"sendto": 1199,
"set_mempolicy": 1261,
"set_robust_list": 1298,
"set_tid_address": 1233,
"setdomainname": 1129,
"setfsgid": 1143,
"setfsuid": 1142,
"setgid": 1061,
"setgroups": 1078,
"sethostname": 1083,
"setitimer": 1118,
"setns": 1330,
"setpgid": 1080,
"setpriority": 1102,
"setregid": 1072,
"setresgid": 1076,
"setresuid": 1074,
"setreuid": 1071,
"setrlimit": 1084,
"setsid": 1081,
"setsockopt": 1203,
"settimeofday": 1088,
"setuid": 1045,
"setxattr": 1217,
"shmat": 1114,
"shmctl": 1116,
"shmdt": 1115,
"shmget": 1113,
"shutdown": 1202,
"sigaltstack": 1176,
"signalfd": 1307,
"signalfd4": 1313,
"socket": 1190,
"socketpair": 1197,
"splice": 1297,
"stat": 1210,
"statfs": 1103,
"statfs64": 1258,
"statx": 1350,
"swapoff": 1095,
"swapon": 1094,
"symlink": 1091,
"symlinkat": 1290,
"sync": 1050,
"sync_file_range": 1300,
"syncfs": 1329,
"sysfs": 1139,
"sysinfo": 1127,
"syslog": 1117,
"tee": 1301,
"tgkill": 1235,
"timer_create": 1248,
"timer_delete": 1252,
"timer_getoverrun": 1251,
"timer_gettime": 1250,
"timer_settime": 1249,
"timerfd": 1308,
"timerfd_create": 1310,
"timerfd_gettime": 1312,
"timerfd_settime": 1311,
"times": 1059,
"tkill": 1229,
"truncate": 1097,
"umask": 1067,
"umount": 1044,
"umount2": 1044,
"uname": 1130,
"unlink": 1032,
"unlinkat": 1287,
"unshare": 1296,
"uselib": 1093,
"userfaultfd": 1343,
"ustat": 1069,
"utimensat": 1306,
"utimes": 1036,
"vhangup": 1123,
"vmsplice": 1302,
"wait4": 1126,
"waitid": 1270,
"write": 1027,
"writev": 1147,
}
| [
2,
14041,
1960,
519,
877,
515,
13,
2141,
407,
4370,
13,
198,
198,
17597,
66,
5691,
62,
544,
2414,
796,
1391,
198,
220,
220,
220,
45434,
17597,
34168,
1298,
1367,
1120,
11,
198,
220,
220,
220,
366,
13635,
1298,
15136,
19,
11,
198,
... | 1.870788 | 4,303 |
from typing import Any
from uuid import UUID
from sqlalchemy.dialects import postgresql as pg
from sqlalchemy.types import TypeDecorator
class PostgresUUID(TypeDecorator):
"""
Decorated `sqlalchemy.dialects.postgresql.UUID` type that ensures we are only
dealing with stdlib's `uuid.UUID` outsdide of SQLAlchemy.
This allows the type to play nicely with orjson.
"""
cache_ok = True
impl = pg.UUID
| [
6738,
19720,
1330,
4377,
198,
6738,
334,
27112,
1330,
471,
27586,
198,
198,
6738,
44161,
282,
26599,
13,
38969,
478,
82,
1330,
1281,
34239,
13976,
355,
23241,
198,
6738,
44161,
282,
26599,
13,
19199,
1330,
5994,
10707,
273,
1352,
628,
1... | 2.86 | 150 |
"""
Copyright (c) 2021, Electric Power Research Institute
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of DER-VET nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
POI.py
"""
import numpy as np
import cvxpy as cvx
import pandas as pd
from storagevet.ErrorHandling import *
class POI:
"""
This class holds the load data for the case described by the user defined model parameter. It will also
impose any constraints that should be opposed at the microgrid's POI.
"""
def __init__(self, params, technology_inputs_map, technology_class_map):
""" Initialize POI object to initialize the DER technologies given by the user input parameters
Args:
params(Dict): dictionary of all the user input parameters including all technologies, value streams,
finance, scenario, and poi
technology_inputs_map (Dict): dict of active technology prepared by Params
"""
self.max_export = params['max_export'] # > 0
self.max_import = params['max_import'] # < 0
self.apply_poi_constraints = params['apply_poi_constraints']
# types of DER technology, and their dictionary of active specific technologies
self.der_list = []
self.der_summary = {} # keys= names of DERs, values= DER type (basically tech summary output)
self.active_ders = []
# initialize all DERs
for der, params_input in technology_inputs_map.items():
if params_input is not None: # then Params class found an input
for id_val, id_der_input in params_input.items():
# add ID to der input dictionary
id_der_input['ID'] = id_val
TellUser.info(f"Initializing: {der}-{id_val}")
der_object = technology_class_map[der](id_der_input)
self.der_list.append(der_object)
self.active_ders.append(der_object)
def calculate_system_size(self):
""" Determines the maximum controllable power that the system can export, needs to import, and
energy that can be stored.
Returns: ((max power import),
(max power export),
(max energy allowed to be stored, min energy allowed to be stored))
"""
min_ene = 0
max_ene = 0
max_ch = 0
max_dis = 0
for der_instance in self.active_ders:
min_ene += der_instance.operational_min_energy()
max_ene += der_instance.operational_max_energy()
max_ch += der_instance.charge_capacity()
max_dis += der_instance.discharge_capacity()
return max_ch, max_dis, (max_ene, min_ene)
def initialize_optimization_variables(self, size):
"""Function should be called at the beginning of each optimization loop. Initializes optimization variables
Args:
size (int): length of optimization variables_df to create
"""
for der_instance in self.active_ders:
# initialize variables
der_instance.initialize_variables(size)
def get_state_of_system(self, mask):
""" POI method to measure the state of POI depending on available types of DERs. used in SET_UP_OPTIMIZATION
Args:
mask (DataFrame): DataFrame of booleans used, the same length as time_series. The value is true if the
corresponding column in time_series is included in the data to be optimized.
Returns:
aggregation of loads
aggregation of generation from variable resources
aggregation of generation from other sources
total net power from ESSs
total state of energy stored in the system
aggregation of all the power flows into the POI
aggregation of all the power flows out if the POI
aggregation of steam thermal heating power (heat recovered)
aggregation of hotwater thermal heating power (heat recovered)
aggregation of thermal cooling power (cold recovered)
"""
opt_var_size = sum(mask)
load_sum = cvx.Parameter(value=np.zeros(opt_var_size), shape=opt_var_size, name='POI-Zero') # at POI
var_gen_sum = cvx.Parameter(value=np.zeros(opt_var_size), shape=opt_var_size, name='POI-Zero') # at POI
gen_sum = cvx.Parameter(value=np.zeros(opt_var_size), shape=opt_var_size, name='POI-Zero')
tot_net_ess = cvx.Parameter(value=np.zeros(opt_var_size), shape=opt_var_size, name='POI-Zero')
total_soe = cvx.Parameter(value=np.zeros(opt_var_size), shape=opt_var_size, name='POI-Zero')
agg_power_flows_in = cvx.Parameter(value=np.zeros(opt_var_size), shape=opt_var_size, name='POI-Zero') # at POI
agg_power_flows_out = cvx.Parameter(value=np.zeros(opt_var_size), shape=opt_var_size, name='POI-Zero') # at POI
agg_steam_heating_power = cvx.Parameter(value=np.zeros(opt_var_size), shape=opt_var_size, name='POI-Zero') # at POI
agg_hotwater_heating_power = cvx.Parameter(value=np.zeros(opt_var_size), shape=opt_var_size, name='POI-Zero') # at POI
agg_thermal_cooling_power = cvx.Parameter(value=np.zeros(opt_var_size), shape=opt_var_size, name='POI-Zero') # at POI
for der_instance in self.active_ders:
# add the state of the der's power over time & stored energy over time to system's
agg_power_flows_in += der_instance.get_charge(mask)
agg_power_flows_out += der_instance.get_discharge(mask)
if der_instance.technology_type == 'Load':
load_sum += der_instance.get_charge(mask)
if der_instance.technology_type == 'Energy Storage System':
total_soe += der_instance.get_state_of_energy(mask)
tot_net_ess += der_instance.get_net_power(mask)
if der_instance.technology_type == 'Generator':
gen_sum += der_instance.get_discharge(mask)
if der_instance.technology_type == 'Intermittent Resource':
var_gen_sum += der_instance.get_discharge(mask)
return load_sum, var_gen_sum, gen_sum, tot_net_ess, total_soe, agg_power_flows_in, agg_power_flows_out, agg_steam_heating_power, agg_hotwater_heating_power, agg_thermal_cooling_power
def combined_discharge_rating_for_reliability(self):
""" Used to create the Reliability power constraint.
Returns: combined rating of ESS and ICE in the system
"""
combined_rating = 0
for der_instance in self.active_ders:
if der_instance.technology_type == 'Energy Storage System':
combined_rating += der_instance.dis_max_rated
if der_instance.technology_type == 'Generator':
combined_rating += der_instance.rated_power
return combined_rating
def load_balance_constraint_for_reliability(self):
""" Used to create the Reliability power constraint.
Returns: combined rating of ESS and ICE in the system
"""
combined_rating = 0
for der_instance in self.active_ders:
if der_instance.technology_type == 'Energy Storage System':
combined_rating += der_instance.dis_max_rated
if der_instance.technology_type == 'Generator':
combined_rating += der_instance.rated_power
return combined_rating
def optimization_problem(self, mask, power_in, power_out, steam_in, hotwater_in, cold_in, annuity_scalar=1):
""" Builds the master POI constraint list for the subset of time series data being optimized.
Due to VS power reservations, control constraints, import/export constraints, and energy throughput requirements
Args:
mask (DataFrame): A boolean array that is true for indices corresponding to time_series data included
in the subs data set
power_in (cvx.Expression):
power_out (cvx.Expression):
steam_in (cvx.Expression):
hotwater_in (cvx.Expression):
cold_in (cvx.Expression):
annuity_scalar (float): a scalar value to be multiplied by any yearly cost or benefit that helps capture
the cost/benefit over the entire project lifetime (only to be set iff sizing)
Returns:
A dictionary with the portion of the objective function that it affects, labeled by the expression's key.
A list of constraints being set by the POI: power reservations, control constraints requirements,
max import, max export, etc.
"""
constraint_list = []
opt_size = sum(mask)
obj_expression = {} # dict of objective costs
# deal with grid_charge constraint btw ESS and PV
total_pv_out_ess_can_charge_from = cvx.Parameter(value=np.zeros(opt_size), shape=opt_size, name='POIZero')
total_ess_charge = cvx.Parameter(value=np.zeros(opt_size), shape=opt_size, name='POIZero')
allow_charge_from_grid = True
# deal with inverter constraint btw ess and any PV marked as dc coupled
total_pv_out_dc = cvx.Parameter(value=np.zeros(opt_size), shape=opt_size, name='POIZero')
net_ess_power = cvx.Parameter(value=np.zeros(opt_size), shape=opt_size, name='POIZero')
agg_inv_max = 0
dc_coupled_pvs = False
for der_instance in self.active_ders:
# add all operational constraints
constraint_list += der_instance.constraints(mask)
# add DER cost funcs
obj_expression.update(der_instance.objective_function(mask, annuity_scalar))
if der_instance.tag == 'PV':
if not der_instance.grid_charge:
allow_charge_from_grid = False
total_pv_out_ess_can_charge_from += der_instance.get_discharge(mask)
if der_instance.loc == 'dc':
dc_coupled_pvs = True
total_pv_out_dc += der_instance.get_discharge(mask)
agg_inv_max += der_instance.inv_max
if der_instance.technology_type == 'Energy Storage System':
net_ess_power += der_instance.get_net_power(mask)
total_ess_charge += der_instance.get_charge(mask)
if not allow_charge_from_grid: # add grid charge constraint
constraint_list += [cvx.NonPos(total_ess_charge - total_pv_out_ess_can_charge_from)]
if dc_coupled_pvs: # add dc coupling constraints
constraint_list += [cvx.NonPos(total_pv_out_dc + (-1) * net_ess_power - agg_inv_max)]
constraint_list += [cvx.NonPos(-agg_inv_max - total_pv_out_dc + net_ess_power)]
# power import/export constraints
if self.apply_poi_constraints:
# -(power_in) >= max_import
constraint_list += [cvx.NonPos(self.max_import + power_in + (-1)*power_out)]
# max_export >= power_out
constraint_list += [cvx.NonPos(power_out + (-1)*power_in - self.max_export)]
return obj_expression, constraint_list
def aggregate_p_schedules(self, mask):
""" POI method to add up the discharge power schedules from DERs. The 'power schedule'
defines how much of the technology's discharge capacity can be bid to increase the amount
of power being delivered (UP) or can be bid to decrease the amount of power being delivered
(DOWN) in the electrical grid that the POI is connected to.
DERVET's logic does not allow intermittent resources to participate in ancillary markets
Args:
mask (DataFrame): DataFrame of booleans used, the same length as time_series. The value is true if the
corresponding column in time_series is included in the data to be optimized.
Returns:
total ability to pull power down from the grid by discharging less
total ability to push power up into the grid by discharging more
total ability to pull power down from the grid by charging more
total ability to push power up into the grid by charging less
total energy stored/delivered during sub-time-step activities
"""
opt_size = sum(mask)
agg_dis_up = cvx.Parameter(value=np.zeros(opt_size), shape=opt_size, name='POI-Zero')
agg_dis_down = cvx.Parameter(value=np.zeros(opt_size), shape=opt_size, name='POI-Zero')
agg_ch_up = cvx.Parameter(value=np.zeros(opt_size), shape=opt_size, name='POI-Zero')
agg_ch_down = cvx.Parameter(value=np.zeros(opt_size), shape=opt_size, name='POI-Zero')
uenergy_incr = cvx.Parameter(value=np.zeros(opt_size), shape=opt_size, name='POI-Zero')
uenergy_decr = cvx.Parameter(value=np.zeros(opt_size), shape=opt_size, name='POI-Zero')
uenergy_thru = cvx.Parameter(value=np.zeros(sum(mask)), shape=sum(mask), name='ServiceAggZero')
for der_in_market_participation in self.active_ders:
if der_in_market_participation.can_participate_in_market_services:
agg_ch_up += der_in_market_participation.get_charge_up_schedule(mask)
agg_ch_down += der_in_market_participation.get_charge_down_schedule(mask)
agg_dis_up += der_in_market_participation.get_discharge_up_schedule(mask)
agg_dis_down += der_in_market_participation.get_discharge_down_schedule(mask)
uenergy_incr += der_in_market_participation.get_uenergy_increase(mask)
uenergy_decr += der_in_market_participation.get_uenergy_decrease(mask)
uenergy_thru += der_in_market_participation.get_delta_uenegy(mask)
return agg_dis_down, agg_dis_up, agg_ch_down, agg_ch_up, uenergy_decr, uenergy_incr, uenergy_thru
def merge_reports(self, is_dispatch_opt, index):
""" Collects and merges the optimization results for all DERs into
Returns: A timeseries dataframe with user-friendly column headers that summarize the results
pertaining to this instance
"""
results = pd.DataFrame(index=index)
monthly_data = pd.DataFrame()
# initialize all the data columns that will ALWAYS be present in our results
results.loc[:, 'Total Load (kW)'] = 0
results.loc[:, 'Total Generation (kW)'] = 0
results.loc[:, 'Total Storage Power (kW)'] = 0
results.loc[:, 'Aggregated State of Energy (kWh)'] = 0
for der_instance in self.der_list:
report_df = der_instance.timeseries_report()
results = pd.concat([report_df, results], axis=1)
if der_instance.technology_type in ['Generator', 'Intermittent Resource']:
results.loc[:, 'Total Generation (kW)'] += results[f'{der_instance.unique_tech_id()} Electric Generation (kW)']
if der_instance.technology_type == 'Energy Storage System':
results.loc[:, 'Total Storage Power (kW)'] += results[f'{der_instance.unique_tech_id()} Power (kW)']
results.loc[:, 'Aggregated State of Energy (kWh)'] += results[f'{der_instance.unique_tech_id()} State of Energy (kWh)']
if der_instance.technology_type == 'Load':
results.loc[:, 'Total Load (kW)'] += results[f'{der_instance.unique_tech_id()} Original Load (kW)']
report = der_instance.monthly_report()
monthly_data = pd.concat([monthly_data, report], axis=1, sort=False)
# assumes the orginal net load only does not contain the Storage system
# net load is the load see at the POI
results.loc[:, 'Net Load (kW)'] = results.loc[:, 'Total Load (kW)'] - results.loc[:, 'Total Generation (kW)'] - results.loc[:, 'Total Storage Power (kW)']
return results, monthly_data
def technology_summary(self):
"""Creates and returns a data frame with two columns: the tag and name of each DER
"""
der_summary = {'Type': [], 'Name': []}
for der_object in self.der_list:
der_summary['Type'].append(der_object.technology_type)
der_summary['Name'].append(der_object.name)
technology_summary = pd.DataFrame(der_summary)
technology_summary.set_index('Name')
return technology_summary
def drill_down_dfs(self, **kwargs):
"""
Args:
kwargs (): dictionary of dataframes that were created by COLLECT_RESULTS
Returns: list of tuples of DataFrames of any reports that are value stream specific
tuple[0] are the file name that the df will be saved with
"""
df_dict = dict()
for der in self.der_list:
df_dict.update(der.drill_down_reports(**kwargs))
return df_dict
| [
37811,
198,
15269,
357,
66,
8,
33448,
11,
13944,
4333,
4992,
5136,
628,
1439,
2489,
10395,
13,
628,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
1231,
17613,
11,
198,
389,
10431,
2810,
326,
262,
1708,
3403,
... | 2.473149 | 7,374 |
from graph_ter_cls.transforms.global_rotate import GlobalRotate
from graph_ter_cls.transforms.global_shear import GlobalShear
from graph_ter_cls.transforms.global_translate import GlobalTranslate
from graph_ter_cls.transforms.local_rotate import LocalRotate
from graph_ter_cls.transforms.local_shear import LocalShear
from graph_ter_cls.transforms.local_translate import LocalTranslate
from graph_ter_cls.transforms.transformer import Transformer
| [
6738,
4823,
62,
353,
62,
565,
82,
13,
7645,
23914,
13,
20541,
62,
10599,
378,
1330,
8060,
24864,
378,
198,
6738,
4823,
62,
353,
62,
565,
82,
13,
7645,
23914,
13,
20541,
62,
7091,
283,
1330,
8060,
3347,
283,
198,
6738,
4823,
62,
35... | 3.262774 | 137 |
from Bio import AlignIO
pah_paralogues = AlignIO.read("../input/Human_PAH_paralogues.nex", "nexus")
h2bfs_paralogues = AlignIO.read("../input/Human_H2BFS_paralogues.nex", "nexus")
pah_orthologues_30 = AlignIO.read("../input/Human_PAH_orthologues_30.nex", "nexus")
| [
6738,
16024,
1330,
978,
570,
9399,
628,
198,
79,
993,
62,
1845,
11794,
947,
796,
978,
570,
9399,
13,
961,
7203,
40720,
15414,
14,
20490,
62,
4537,
39,
62,
1845,
11794,
947,
13,
12413,
1600,
366,
44520,
4943,
198,
71,
17,
65,
9501,
... | 2.440367 | 109 |
import sqlite3
import cabalutil
from sopel import formatting
from urllib.parse import urlparse
| [
11748,
44161,
578,
18,
198,
11748,
47548,
22602,
198,
6738,
264,
404,
417,
1330,
33313,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
19016,
29572,
628,
628,
628,
628,
198
] | 3.433333 | 30 |
#!/usr/bin/python3
# -*- coding: utf8 -*-
import tkinter as tk
# Instance
import tk_gui_tools.manage_widget as manage_widget
# Heritage
from tk_gui_tools.command_gui.mouse import Mouse
from tk_gui_tools.command_gui.keyboard import Keyboard
from tk_gui_tools.command_gui.command import CommandGUI
from tk_gui_tools.tool.scroolbar import ScrollBar
class Base(Mouse, Keyboard, CommandGUI, ScrollBar):
"""
Base is a pre-filled template to inherit and use command functionality
"""
def update(self):
"""
Update the template
"""
self.canvas.update_idletasks()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
23,
532,
9,
12,
198,
198,
11748,
256,
74,
3849,
355,
256,
74,
198,
198,
2,
2262,
590,
198,
11748,
256,
74,
62,
48317,
62,
31391,
13,
805,
496,
... | 2.71875 | 224 |
from kubernetes import client, config, watch
from kubernetes.client.rest import ApiException
import os,sys
import re
import argparse
parser = argparse.ArgumentParser(sys.argv[0])
#parser.add_argument("--configmap", help="Create configmap (default: 'Secret')", action='store_true',default=False)
parser.add_argument("--name", help="Name of Secret/Configmap", type=str,required=True)
parser.add_argument("--dir", help="Source directory path", type=str,required=True)
parser.add_argument("--file_pattern", help="File pattern for match files (default: '.*')", type=str,default=".*")
parser.add_argument("--namespace", help="Name of Namespace", type=str)
args = parser.parse_args()
#print(args.configmap)
print("name: "+ args.name)
if args.namespace:
print("namespace: "+args.namespace)
print("dir: "+args.dir)
print("file_pattern: "+args.file_pattern)
if __name__ == '__main__':
main() | [
6738,
479,
18478,
3262,
274,
1330,
5456,
11,
4566,
11,
2342,
198,
6738,
479,
18478,
3262,
274,
13,
16366,
13,
2118,
1330,
5949,
72,
16922,
198,
11748,
28686,
11,
17597,
198,
11748,
302,
198,
11748,
1822,
29572,
198,
48610,
796,
1822,
... | 3.071918 | 292 |
# Note: After all the names add the word STOP on the next row in the first column
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
import pandas as pd
# Initialize firebase admin sdk
cred = credentials.Certificate('path-to-acm-core-json')
firebase_admin.initialize_app(cred)
# read excel
officer_doc_path = 'path-to-officer-spreadsheet'
df = pd.read_excel(officer_doc_path, header=0)
# Initialize firestore
db = firestore.client()
for index, entry in df.iterrows():
# Only read until where valid data is present
if(entry['First Name'] == 'STOP'):
break
# search up officer that has the correct first + last name
query_results = db.collection(u'officer').where(u'name', u'==', entry['First Name'] + " " + entry['Last Name']).get()
# save linkedin and email fields to variables
linkedin_url = entry['LinkedIn Profile URL']
email = entry['Personal Email']
# Check whether query returned results and if so write to that document
if (len(query_results) > 0):
doc_ref = query_results[0]
db.collection(u'officer').document(doc_ref.id).update({u'linkedin': linkedin_url, u'email': email})
| [
2,
5740,
25,
2293,
477,
262,
3891,
751,
262,
1573,
44934,
319,
262,
1306,
5752,
287,
262,
717,
5721,
198,
198,
11748,
2046,
8692,
62,
28482,
198,
6738,
2046,
8692,
62,
28482,
1330,
18031,
198,
6738,
2046,
8692,
62,
28482,
1330,
2046,
... | 3.002506 | 399 |
from __future__ import print_function
import math
from compas.geometry import cross_vectors
from compas.geometry import subtract_vectors
from compas.geometry import matrix_from_basis_vectors
from compas.geometry import basis_vectors_from_matrix
from compas.geometry import quaternion_from_matrix
from compas.geometry import matrix_from_quaternion
from compas.geometry import axis_angle_vector_from_matrix
from compas.geometry import matrix_from_axis_angle_vector
from compas.geometry import euler_angles_from_matrix
from compas.geometry import matrix_from_euler_angles
from compas.geometry import decompose_matrix
from compas.geometry import Transformation
from compas.geometry import argmax
from compas.geometry.primitives import Primitive
from compas.geometry.primitives import Point
from compas.geometry.primitives import Vector
from compas.geometry.primitives import Quaternion
__all__ = ['Frame']
class Frame(Primitive):
"""A frame is defined by a base point and two orthonormal base vectors.
Parameters
----------
point : point
The origin of the frame.
xaxis : vector
The x-axis of the frame.
yaxis : vector
The y-axis of the frame.
Attributes
----------
data : dict
The data representation of the frame.
point : :class:`compas.geometry.Point`
The base point of the frame.
xaxis : :class:`compas.geometry.Vector`
The local X axis of the frame.
yaxis : :class:`compas.geometry.Vector`
The local Y axis of the frame.
zaxis : :class:`compas.geometry.Vector`
The local Z axis of the frame.
normal : :class:`compas.geometry.Vector`
The normal vector of the base plane of the frame.
quaternion : :class:`compas.geometry.Quaternion`
The quaternion representing the rotation of the frame.
axis_angle_vector : :class:`compas.geometry.Vector`
The rotation vector of the frame.
Notes
-----
All input vectors are orthonormalized when creating a frame, with the first
vector as starting point.
Examples
--------
>>> from compas.geometry import Point
>>> from compas.geometry import Vector
>>> f = Frame([0, 0, 0], [1, 0, 0], [0, 1, 0])
>>> f = Frame(Point(0, 0, 0), Vector(1, 0, 0), Vector(0, 1, 0))
"""
@property
def data(self):
"""dict : The data dictionary that represents the frame."""
return {'point': list(self.point),
'xaxis': list(self.xaxis),
'yaxis': list(self.yaxis)}
@data.setter
@property
def point(self):
""":class:`compas.geometry.Point` : The base point of the frame."""
return self._point
@point.setter
@property
def xaxis(self):
""":class:`compas.geometry.Vector` : The local X axis of the frame."""
return self._xaxis
@xaxis.setter
@property
def yaxis(self):
""":class:`compas.geometry.Vector` : The local Y axis of the frame."""
return self._yaxis
@yaxis.setter
@property
def normal(self):
""":class:`compas.geometry.Vector` : The normal of the base plane of the frame."""
return Vector(*cross_vectors(self.xaxis, self.yaxis))
@property
def zaxis(self):
""":class:`compas.geometry.Vector` : The Z axis of the frame."""
return self.normal
@property
def quaternion(self):
""":class:`compas.geometry.Quaternion` : The quaternion from the rotation given by the frame.
"""
R = matrix_from_basis_vectors(self.xaxis, self.yaxis)
return Quaternion(*quaternion_from_matrix(R))
@property
def axis_angle_vector(self):
""":class:`compas.geometry.Vector` : The axis-angle vector representing the rotation of the frame."""
R = matrix_from_basis_vectors(self.xaxis, self.yaxis)
return Vector(*axis_angle_vector_from_matrix(R))
# ==========================================================================
# customization
# ==========================================================================
# ==========================================================================
# constructors
# ==========================================================================
@classmethod
def from_data(cls, data):
"""Construct a frame from its data representation.
Parameters
----------
data : :obj:`dict`
The data dictionary.
Returns
-------
:class:`compas.geometry.Frame`
The constructed frame.
Examples
--------
>>> data = {'point': [0.0, 0.0, 0.0], 'xaxis': [1.0, 0.0, 0.0], 'yaxis': [0.0, 1.0, 0.0]}
>>> frame = Frame.from_data(data)
>>> frame.point
Point(0.000, 0.000, 0.000)
>>> frame.xaxis
Vector(1.000, 0.000, 0.000)
>>> frame.yaxis
Vector(0.000, 1.000, 0.000)
"""
frame = cls(data['point'], data['xaxis'], data['yaxis'])
return frame
@classmethod
def worldXY(cls):
"""Construct the world XY frame.
Returns
-------
:class:`compas.geometry.Frame`
The world XY frame.
Examples
--------
>>> frame = Frame.worldXY()
>>> frame.point
Point(0.000, 0.000, 0.000)
>>> frame.xaxis
Vector(1.000, 0.000, 0.000)
>>> frame.yaxis
Vector(0.000, 1.000, 0.000)
"""
return cls([0, 0, 0], [1, 0, 0], [0, 1, 0])
@classmethod
def worldZX(cls):
"""Construct the world ZX frame.
Returns
-------
:class:`compas.geometry.Frame`
The world ZX frame.
Examples
--------
>>> frame = Frame.worldZX()
>>> frame.point
Point(0.000, 0.000, 0.000)
>>> frame.xaxis
Vector(0.000, 0.000, 1.000)
>>> frame.yaxis
Vector(1.000, 0.000, 0.000)
"""
return cls([0, 0, 0], [0, 0, 1], [1, 0, 0])
@classmethod
def worldYZ(cls):
"""Construct the world YZ frame.
Returns
-------
:class:`compas.geometry.Frame`
The world YZ frame.
Examples
--------
>>> frame = Frame.worldYZ()
>>> frame.point
Point(0.000, 0.000, 0.000)
>>> frame.xaxis
Vector(0.000, 1.000, 0.000)
>>> frame.yaxis
Vector(0.000, 0.000, 1.000)
"""
return cls([0, 0, 0], [0, 1, 0], [0, 0, 1])
@classmethod
def from_points(cls, point, point_xaxis, point_xyplane):
"""Constructs a frame from 3 points.
Parameters
----------
point : point
The origin of the frame.
point_xaxis : point
A point on the x-axis of the frame.
point_xyplane : point
A point within the xy-plane of the frame.
Returns
-------
:class:`compas.geometry.Frame`
The constructed frame.
Examples
--------
>>> frame = Frame.from_points([0, 0, 0], [1, 0, 0], [0, 1, 0])
>>> frame.point
Point(0.000, 0.000, 0.000)
>>> frame.xaxis
Vector(1.000, 0.000, 0.000)
>>> frame.yaxis
Vector(0.000, 1.000, 0.000)
"""
xaxis = subtract_vectors(point_xaxis, point)
xyvec = subtract_vectors(point_xyplane, point)
yaxis = cross_vectors(cross_vectors(xaxis, xyvec), xaxis)
return cls(point, xaxis, yaxis)
@classmethod
def from_rotation(cls, rotation, point=[0, 0, 0]):
"""Constructs a frame from a ``Rotation``.
Parameters
----------
rotation : :class:`Rotation`
The rotation defines the orientation of the frame.
point : list of float, optional
The origin of the frame.
Defaults to ``[0, 0, 0]``.
Returns
-------
:class:`compas.geometry.Frame`
The constructed frame.
Examples
--------
>>> from compas.geometry import Rotation
>>> f1 = Frame([1, 1, 1], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15])
>>> R = Rotation.from_frame(f1)
>>> f2 = Frame.from_rotation(R, point=f1.point)
>>> f1 == f2
True
"""
xaxis, yaxis = rotation.basis_vectors
return cls(point, xaxis, yaxis)
@classmethod
def from_transformation(cls, transformation):
"""Constructs a frame from a ``Transformation``.
Parameters
----------
transformation : :class:`compas.geometry.Transformation`
The transformation defines the orientation of the frame through the
rotation and the origin through the translation.
Returns
-------
:class:`compas.geometry.Frame`
The constructed frame.
Examples
--------
>>> from compas.geometry import Transformation
>>> f1 = Frame([1, 1, 1], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15])
>>> T = Transformation.from_frame(f1)
>>> f2 = Frame.from_transformation(T)
>>> f1 == f2
True
"""
xaxis, yaxis = transformation.basis_vectors
point = transformation.translation_vector
return cls(point, xaxis, yaxis)
@classmethod
def from_matrix(cls, matrix):
"""Construct a frame from a matrix.
Parameters
----------
matrix : list of list of float
The 4x4 transformation matrix in row-major order.
Returns
-------
:class:`compas.geometry.Frame`
The constructed frame.
Examples
--------
>>> from compas.geometry import matrix_from_euler_angles
>>> ea1 = [0.5, 0.4, 0.8]
>>> M = matrix_from_euler_angles(ea1)
>>> f = Frame.from_matrix(M)
>>> ea2 = f.euler_angles()
>>> allclose(ea1, ea2)
True
"""
_, _, angles, point, _ = decompose_matrix(matrix)
R = matrix_from_euler_angles(angles, static=True, axes='xyz')
xaxis, yaxis = basis_vectors_from_matrix(R)
return cls(point, xaxis, yaxis)
@classmethod
def from_list(cls, values):
"""Construct a frame from a list of 12 or 16 float values.
Parameters
----------
values : list of float
The list of 12 or 16 values representing a 4x4 matrix.
Returns
-------
:class:`compas.geometry.Frame`
The constructed frame.
Raises
------
ValueError
If the length of the list is neither 12 nor 16.
Notes
-----
Since the transformation matrix follows the row-major order, the
translational components must be at the list's indices 3, 7, 11.
Examples
--------
>>> l = [-1.0, 0.0, 0.0, 8110, 0.0, 0.0, -1.0, 7020, 0.0, -1.0, 0.0, 1810]
>>> f = Frame.from_list(l)
"""
if len(values) == 12:
values.extend([0., 0., 0., 1.])
if len(values) != 16:
raise ValueError(
'Expected 12 or 16 floats but got %d' %
len(values))
matrix = [[0. for i in range(4)] for j in range(4)]
for i in range(4):
for j in range(4):
matrix[i][j] = float(values[i * 4 + j])
return cls.from_matrix(matrix)
@classmethod
def from_quaternion(cls, quaternion, point=[0, 0, 0]):
"""Construct a frame from a rotation represented by quaternion coefficients.
Parameters
----------
quaternion : list of float
Four numbers that represent the four coefficient values of a quaternion.
point : list of float, optional
The point of the frame.
Defaults to [0, 0, 0].
Returns
-------
:class:`compas.geometry.Frame`
The constructed frame.
Examples
--------
>>> q1 = [0.945, -0.021, -0.125, 0.303]
>>> f = Frame.from_quaternion(q1, point=[1., 1., 1.])
>>> q2 = f.quaternion
>>> allclose(q1, q2, tol=1e-03)
True
"""
R = matrix_from_quaternion(quaternion)
xaxis, yaxis = basis_vectors_from_matrix(R)
return cls(point, xaxis, yaxis)
@classmethod
def from_axis_angle_vector(cls, axis_angle_vector, point=[0, 0, 0]):
"""Construct a frame from an axis-angle vector representing the rotation.
Parameters
----------
axis_angle_vector : list of float
Three numbers that represent the axis of rotation and angle of
rotation by its magnitude.
point : list of float, optional
The point of the frame.
Defaults to [0, 0, 0].
Returns
-------
:class:`compas.geometry.Frame`
The constructed frame.
Examples
--------
>>> aav1 = [-0.043, -0.254, 0.617]
>>> f = Frame.from_axis_angle_vector(aav1, point=[0, 0, 0])
>>> aav2 = f.axis_angle_vector
>>> allclose(aav1, aav2)
True
"""
R = matrix_from_axis_angle_vector(axis_angle_vector)
xaxis, yaxis = basis_vectors_from_matrix(R)
return cls(point, xaxis, yaxis)
@classmethod
def from_euler_angles(cls, euler_angles, static=True,
axes='xyz', point=[0, 0, 0]):
"""Construct a frame from a rotation represented by Euler angles.
Parameters
----------
euler_angles : list of float
Three numbers that represent the angles of rotations about the defined axes.
static : bool, optional
If true the rotations are applied to a static frame.
If not, to a rotational.
Defaults to true.
axes : str, optional
A 3 character string specifying the order of the axes.
Defaults to 'xyz'.
point : list of float, optional
The point of the frame.
Defaults to ``[0, 0, 0]``.
Returns
-------
:class:`compas.geometry.Frame`
The constructed frame.
Examples
--------
>>> ea1 = 1.4, 0.5, 2.3
>>> f = Frame.from_euler_angles(ea1, static=True, axes='xyz')
>>> ea2 = f.euler_angles(static=True, axes='xyz')
>>> allclose(ea1, ea2)
True
"""
R = matrix_from_euler_angles(euler_angles, static, axes)
xaxis, yaxis = basis_vectors_from_matrix(R)
return cls(point, xaxis, yaxis)
@classmethod
def from_plane(cls, plane):
"""Constructs a frame from a plane.
Xaxis and yaxis are arbitrarily selected based on the plane's normal.
Parameters
----------
plane : :class:`compas.geometry.Plane`
A plane.
Returns
-------
:class:`compas.geometry.Frame`
The constructed frame.
Examples
--------
>>> from compas.geometry import Plane
>>> plane = Plane([0,0,0], [0,0,1])
>>> frame = Frame.from_plane(plane)
>>> allclose(frame.normal, plane.normal)
True
"""
point, normal = plane
# To construct a frame we need to find a vector v that is perpendicular
# to the plane's normal. This means that the dot-product of v with the
# normal must be equal to 0, which is true for the following vectors:
vectors = [Vector(-normal[1], normal[0], 0),
Vector(0, -normal[2], normal[1]),
Vector(normal[2], 0, -normal[0])]
# But if we are unlucky, one of these vectors is (0, 0, 0), so we
# choose the vector with the longest length as xaxis.
idx = argmax([v.length for v in vectors])
xaxis = vectors[idx]
yaxis = cross_vectors(normal, xaxis)
return cls(point, xaxis, yaxis)
# ==========================================================================
# methods
# ==========================================================================
def euler_angles(self, static=True, axes='xyz'):
"""The Euler angles from the rotation given by the frame.
Parameters
----------
static : bool, optional
If true the rotations are applied to a static frame.
If not, to a rotational.
Defaults to True.
axes : str, optional
A 3 character string specifying the order of the axes.
Defaults to 'xyz'.
Returns
-------
list of float
Three numbers that represent the angles of rotations about the defined axes.
Examples
--------
>>> ea1 = 1.4, 0.5, 2.3
>>> f = Frame.from_euler_angles(ea1, static=True, axes='xyz')
>>> ea2 = f.euler_angles(static=True, axes='xyz')
>>> allclose(ea1, ea2)
True
"""
R = matrix_from_basis_vectors(self.xaxis, self.yaxis)
return euler_angles_from_matrix(R, static, axes)
def to_local_coordinates(self, object_in_wcf):
"""Returns the object's coordinates in the local coordinate system of the frame.
Parameters
----------
object_in_wcf : :class:`compas.geometry.Point` or :class:`compas.geometry.Vector` or :class:`compas.geometry.Frame` or list of float
An object in the world coordinate frame.
Returns
-------
:class:`compas.geometry.Point` or :class:`compas.geometry.Vector` or :class:`compas.geometry.Frame`
The object in the local coordinate system of the frame.
Notes
-----
If you pass a list of float, it is assumed to represent a point.
Examples
--------
>>> from compas.geometry import Point
>>> frame = Frame([1, 1, 1], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15])
>>> pw = Point(2, 2, 2) # point in wcf
>>> pl = frame.to_local_coordinates(pw) # point in frame
>>> frame.to_world_coordinates(pl)
Point(2.000, 2.000, 2.000)
"""
T = Transformation.from_change_of_basis(Frame.worldXY(), self)
if isinstance(object_in_wcf, list):
return Point(*object_in_wcf).transformed(T)
else:
return object_in_wcf.transformed(T)
def to_world_coordinates(self, object_in_lcf):
"""Returns the object's coordinates in the global coordinate frame.
Parameters
----------
object_in_lcf : :class:`compas.geometry.Point` or :class:`compas.geometry.Vector` or :class:`compas.geometry.Frame` or list of float
An object in local coordinate system of the frame.
Returns
-------
:class:`compas.geometry.Point` or :class:`compas.geometry.Vector` or :class:`compas.geometry.Frame`
The object in the world coordinate frame.
Notes
-----
If you pass a list of float, it is assumed to represent a point.
Examples
--------
>>> from compas.geometry import Point
>>> frame = Frame([1, 1, 1], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15])
>>> pl = Point(1.632, -0.090, 0.573) # point in frame
>>> pw = frame.to_world_coordinates(pl) # point in wcf
>>> frame.to_local_coordinates(pw)
Point(1.632, -0.090, 0.573)
"""
T = Transformation.from_change_of_basis(self, Frame.worldXY())
if isinstance(object_in_lcf, list):
return Point(*object_in_lcf).transformed(T)
else:
return object_in_lcf.transformed(T)
# ?!
@staticmethod
def local_to_local_coordinates(frame1, frame2, object_in_frame1):
"""Returns the object's coordinates in frame1 in the local coordinates of frame2.
Parameters
----------
frame1 : :class:`compas.geometry.Frame`
A frame representing one local coordinate system.
frame2 : :class:`compas.geometry.Frame`
A frame representing another local coordinate system.
object_in_frame1 : :class:`compas.geometry.Point` or :class:`compas.geometry.Vector` or :class:`compas.geometry.Frame` or list of float
An object in the coordinate frame1. If you pass a list of float, it is assumed to represent a point.
Returns
-------
:class:`compas.geometry.Point` or :class:`compas.geometry.Vector` or :class:`compas.geometry.Frame`
The object in the local coordinate system of frame2.
Examples
--------
>>> from compas.geometry import Point
>>> frame1 = Frame([1, 1, 1], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15])
>>> frame2 = Frame([2, 1, 3], [1., 0., 0.], [0., 1., 0.])
>>> p1 = Point(2, 2, 2) # point in frame1
>>> p2 = Frame.local_to_local_coordinates(frame1, frame2, p1) # point in frame2
>>> Frame.local_to_local_coordinates(frame2, frame1, p2)
Point(2.000, 2.000, 2.000)
"""
T = Transformation.from_change_of_basis(frame1, frame2)
if isinstance(object_in_frame1, list):
return Point(*object_in_frame1).transformed(T)
return object_in_frame1.transformed(T)
def transform(self, T):
"""Transform the frame.
Parameters
----------
T : :class:`compas.geometry.Transformation`
The transformation.
Examples
--------
>>> from compas.geometry import Transformation
>>> f1 = Frame([1, 1, 1], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15])
>>> T = Transformation.from_frame(f1)
>>> f2 = Frame.worldXY()
>>> f2.transform(T)
>>> f1 == f2
True
"""
# replace this by function call
X = T * Transformation.from_frame(self)
point = X.translation_vector
xaxis, yaxis = X.basis_vectors
self.point = point
self.xaxis = xaxis
self.yaxis = yaxis
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
import doctest
from compas.geometry import allclose # noqa: F401
doctest.testmod(globs=globals())
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
10688,
198,
198,
6738,
552,
292,
13,
469,
15748,
1330,
3272,
62,
303,
5217,
198,
6738,
552,
292,
13,
469,
15748,
1330,
34128,
62,
303,
5217,
198,
6738,
552,
292,
13,
46... | 2.264938 | 9,874 |
from .base_tests import UnitTest, CompositeTest, MethodTest, test_results_to_score
import numpy as np
import math
def rel_error(x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
class SolverStepTest(UnitTest):
"""Test whether Solver._step() updates the model parameter correctly"""
def test_solver(Solver):
"""Test the Solver"""
test = SolverTest(Solver)
return test_results_to_score(test())
| [
6738,
764,
8692,
62,
41989,
1330,
11801,
14402,
11,
49355,
14402,
11,
11789,
14402,
11,
1332,
62,
43420,
62,
1462,
62,
26675,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
628,
198,
4299,
823,
62,
18224,
7,
87,
11,
331,
2599,... | 2.813953 | 172 |
# Standard libraries.
import ctypes
import threading
# Third-party libraries.
# None
# Internal libraries.
from maprclient.maprclient import * # All functions are prefixed with "hb_".
# For table scans.
scan_mutex = threading.Lock()
scan_cv = threading.Condition(scan_mutex)
is_scan_done = False
@ctypes.CFUNCTYPE(None, # void (return)
ctypes.c_int, # int32_t err
ctypes.c_void_p, # hb_client_t client
ctypes.c_void_p) # void *extra
def client_disconnection_callback(err, client, extra):
"""
static void
client_disconnection_callback(
int32_t err,
hb_client_t client,
void *extra
)
"""
pass
@ctypes.CFUNCTYPE(None, # void (return)
ctypes.c_int, # int32_t err
ctypes.c_void_p, # hb_scanner_t scanner
ctypes.c_void_p, # hb_result_t results[]
ctypes.c_size_t, # size_t num_results
ctypes.c_void_p) # void *extra
def scan_callback(err, scanner, results, num_results, extra):
"""
void scan_callback(
int32_t err,
hb_scanner_t scanner,
hb_result_t results[],
size_t num_results,
void *extra
)
"""
global is_scan_done
global scan_cv
global scan_mutex
if num_results <= 0:
hb_scanner_destroy(scanner, None, None)
scan_mutex.acquire()
is_scan_done = True
scan_cv.notify()
scan_mutex.release()
return
result_array = ctypes.cast(results, ctypes.POINTER(ctypes.c_void_p))
for i in range(0, num_results):
print_row(result_array[i])
hb_result_destroy(result_array[i])
hb_scanner_next(scanner, scan_callback, None)
if __name__ == '__main__':
main()
# EOF
| [
2,
8997,
12782,
13,
198,
11748,
269,
19199,
198,
11748,
4704,
278,
198,
198,
2,
10467,
12,
10608,
12782,
13,
198,
2,
6045,
198,
198,
2,
18628,
12782,
13,
198,
6738,
3975,
81,
16366,
13,
8899,
81,
16366,
1330,
1635,
1303,
1439,
5499,... | 2.031008 | 903 |
# Fireworks Damage Skin
success = sm.addDamageSkin(2435046)
if success:
sm.chat("The Fireworks Damage Skin has been added to your account's damage skin collection.")
| [
2,
3764,
5225,
8995,
17847,
198,
13138,
796,
895,
13,
2860,
22022,
42455,
7,
1731,
14877,
3510,
8,
198,
361,
1943,
25,
198,
220,
220,
220,
895,
13,
17006,
7203,
464,
3764,
5225,
8995,
17847,
468,
587,
2087,
284,
534,
1848,
338,
2465... | 3.617021 | 47 |
# -*- coding: utf-8 -*-
"""
Created on Sun May 5 16:03:53 2019
@author: kuche_000
Get Started with TF Ranking
"""
# Import needed Packages:
import tensorflow as tf
import tensorflow_ranking as tfr
tf.enable_eager_execution()
tf.executing_eagerly()
# (1) Define all flexible Parameters-------------------------------------------
# Get the [dummy] dataset and save its paths! [Train- & Test-Set]
# ['https://github.com/tensorflow/ranking/blob/master/tensorflow_ranking/examples/data/train.txt']
# Store the paths to files containing training and test instances.
# !!! Data in LibSVM format and content of each file is sorted by query ID !!!
_TRAIN_DATA_PATH = "C:/Users/kuche_000/Desktop/KDD - Own/ranking-master/tensorflow_ranking/examples/data/train.txt"
_TEST_DATA_PATH = "C:/Users/kuche_000/Desktop/KDD - Own/ranking-master/tensorflow_ranking/examples/data/test.txt"
# Define a loss function.
# complete list of available functions or add own function:
# refer to the tensorflow_ranking.losses module.
_LOSS = "pairwise_logistic_loss"
# A training instance is represented by a Tensor that contains features from a
# list of documents associated with a single query.
# For simplicity, we fix the shape of these Tensors to a maximum
# list size ["list_size"], the maximum number of documents per query in the dataset.
# In this demo, we take the following approach:
# * If a query has fewer documents, its Tensor will be padded
# appropriately.
# * If a query has more documents, we shuffle its list of
# documents and trim the list down to the prescribed list_size.
_LIST_SIZE = 4
# The total number of features per query-document pair.
# We set this number to the number of features in the MSLR-Web30K
# dataset.
_NUM_FEATURES = 136
# Parameters to the scoring function
_BATCH_SIZE = 12
_HIDDEN_LAYER_DIMS = ["5", "12"]
# (2) Input Pipeline-----------------------------------------------------------
# input pipeline that reads your dataset and produces a tensorflow.data.Dataset object
# parameterize function w( a path argument so it can be used to read in train & test set!
# LibSVM parser that is included in the tensorflow_ranking.data module to
# generate a Dataset from a given file
def input_fn(path):
"""
Function to read in a libSVM DF an generate a tf.data.dataset-object!
Args:
path (string) : Gives the path, where the (libSVM-)DF is lying
"""
train_dataset = tf.data.Dataset.from_generator(
tfr.data.libsvm_generator(path, _NUM_FEATURES, _LIST_SIZE),
# Define the Types of the Input:
output_types=(
{str(k): tf.float32 for k in range(1,_NUM_FEATURES+1)},
tf.float32
),
# Define the Shapes of the Dataset!
output_shapes=(
{str(k): tf.TensorShape([_LIST_SIZE, 1])
for k in range(1,_NUM_FEATURES+1)},
tf.TensorShape([_LIST_SIZE])
)
)
train_dataset = train_dataset.shuffle(1000).repeat().batch(_BATCH_SIZE)
return train_dataset.make_one_shot_iterator().get_next()
t2 = input_fn(_TRAIN_DATA_PATH)
# Scoring Function ------------------------------------------------------------
# compute a relevance score for a (set of) query-document pair(s).
# The TF-Ranking model will use training data to learn this function.
# Function to convert features to 'tf.feature_column' [so they have right type!]
def example_feature_columns():
"""Returns the example feature columns in the correct type
Right now only numeric features!
"""
# Create Names ["1",..., "_NUM_FEATURES"]
feature_names = [
"%d" % (i + 1) for i in range(0, _NUM_FEATURES)
]
return {
name: tf.feature_column.numeric_column(
name, shape=(1,), default_value=0.0) for name in feature_names
}
def make_score_fn():
"""Returns a scoring function to build `EstimatorSpec`."""
def _score_fn(context_features, group_features, mode, params, config):
"""Defines the network to score a documents."""
del params
del config
# Define input layer.
example_input = [
tf.layers.flatten(group_features[name])
for name in sorted(example_feature_columns())
]
input_layer = tf.concat(example_input, 1)
cur_layer = input_layer
for i, layer_width in enumerate(int(d) for d in _HIDDEN_LAYER_DIMS):
cur_layer = tf.layers.dense(
cur_layer,
units=layer_width,
activation="tanh")
logits = tf.layers.dense(cur_layer, units=1)
return logits
return _score_fn
# (3) Evaluation Metric--------------------------------------------------------
#
def eval_metric_fns():
"""Returns a dict from name to metric functions.
This can be customized as follows. Care must be taken when handling padded
lists.
def _auc(labels, predictions, features):
is_label_valid = tf_reshape(tf.greater_equal(labels, 0.), [-1, 1])
clean_labels = tf.boolean_mask(tf.reshape(labels, [-1, 1], is_label_valid)
clean_pred = tf.boolean_maks(tf.reshape(predictions, [-1, 1], is_label_valid)
return tf.metrics.auc(clean_labels, tf.sigmoid(clean_pred), ...)
metric_fns["auc"] = _auc
Returns:
A dict mapping from metric name to a metric function with above signature.
"""
metric_fns = {}
metric_fns.update({
"metric/ndcg@%d" % topn: tfr.metrics.make_ranking_metric_fn(
tfr.metrics.RankingMetricKey.NDCG, topn=topn)
for topn in [1, 3, 5, 10]
})
return metric_fns
# PUT IT ALL TOGETHER ---------------------------------------------------------
# ready to put all of the components above together and create an Estimator that
# can be used to train and evaluate a model
def get_estimator(hparams):
"""Create a ranking estimator.
Args:
hparams: (tf.contrib.training.HParams) a hyperparameters object.
Returns:
tf.learn `Estimator`.
"""
def _train_op_fn(loss):
"""Defines train op used in ranking head."""
return tf.contrib.layers.optimize_loss(
loss=loss,
global_step=tf.train.get_global_step(),
learning_rate=hparams.learning_rate,
optimizer="Adagrad")
ranking_head = tfr.head.create_ranking_head(
loss_fn=tfr.losses.make_loss_fn(_LOSS),
eval_metric_fns=eval_metric_fns(),
train_op_fn=_train_op_fn)
return tf.estimator.Estimator(
model_fn=tfr.model.make_groupwise_ranking_fn(
group_score_fn=make_score_fn(),
group_size=1,
transform_fn=None,
ranking_head=ranking_head),
params=hparams)
# Let us instantiate and initialize the Estimator we defined above
hparams = tf.contrib.training.HParams(learning_rate=0.05)
ranker = get_estimator(hparams)
# input_fn(PATH) is the function to read in data!
ranker.train(input_fn = lambda: input_fn(_TRAIN_DATA_PATH), steps = 25)
lol = ranker.evaluate(input_fn=lambda: input_fn(_TEST_DATA_PATH), steps=15)
# Extract Results
lol.values()
lol.keys()
lol["labels_mean"] # 0.77777780 w/ 10
# 0.77800936 w/ 15
lol["loss"] # 0.89 with the trainset2 [only 1 Query ID Pair]
ranker.model_dir
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3825,
1737,
220,
642,
1467,
25,
3070,
25,
4310,
13130,
198,
198,
31,
9800,
25,
479,
1229,
258,
62,
830,
198,
198,
3855,
31026,
351,
24958,
4540... | 2.73623 | 2,578 |
import csv
import matplotlib.animation as animation
'''
timestamo: data[11]
linearacceleration x,y,z[12,13,14]
'''
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
if __name__=='__main__':
number = 6
_ax,_ay,_az,_time = load_data(number)
print _ax
fig = plt.figure(figsize=(8,6),dpi=100)
ax = fig.add_subplot(3,1,1)
ax.scatter(_time,_ax)
ay = fig.add_subplot(3,1,2)
ay.scatter(_time,_ay)
az = fig.add_subplot(3,1,3)
az.scatter(_time,_az)
ax.set_title('signal '+str(number))
plt.show()
| [
11748,
269,
21370,
198,
11748,
2603,
29487,
8019,
13,
11227,
341,
355,
11034,
198,
7061,
6,
198,
16514,
395,
18811,
25,
1366,
58,
1157,
60,
198,
29127,
330,
7015,
341,
2124,
11,
88,
11,
89,
58,
1065,
11,
1485,
11,
1415,
60,
198,
7... | 2.071429 | 280 |
#Code to import packages
import sys
import os
from dotenv import load_dotenv
from pathlib import Path # python3 only
env_path = Path('..') / '.env'
load_dotenv(dotenv_path=env_path)
ROOT = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, ROOT)
| [
2,
10669,
284,
1330,
10392,
198,
11748,
25064,
198,
11748,
28686,
198,
6738,
16605,
24330,
1330,
3440,
62,
26518,
24330,
198,
6738,
3108,
8019,
1330,
10644,
220,
1303,
21015,
18,
691,
198,
24330,
62,
6978,
796,
10644,
10786,
492,
11537,
... | 2.747368 | 95 |