content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import pytest
from django.contrib.contenttypes.models import ContentType
from constants import user_system
from event_manager.event import Attribute, Event
from event_manager.events import (
bookmark,
build_job,
chart_view,
cluster,
experiment,
experiment_group,
experiment_job,
job,
notebook,
permission,
project,
repo,
search,
superuser,
tensorboard,
user
)
from event_manager.events.experiment import ExperimentSucceededEvent
from factories.factory_experiments import ExperimentFactory
from libs.json_utils import loads
from tests.utils import BaseTest
@pytest.mark.events_mark
| [
11748,
12972,
9288,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
11299,
19199,
13,
27530,
1330,
14041,
6030,
198,
198,
6738,
38491,
1330,
2836,
62,
10057,
198,
6738,
1785,
62,
37153,
13,
15596,
1330,
3460,
4163,
11,
8558,
198,
6738,... | 3.018605 | 215 |
from ldp import DirectContainer
from pycdm import Collection as PcdmCollection
from pycdm import Object as PcdmObject
from pycdm import PcdmReader as PcdmReaderBase
| [
198,
6738,
300,
26059,
1330,
4128,
29869,
198,
6738,
12972,
10210,
76,
1330,
12251,
355,
350,
10210,
76,
36307,
198,
6738,
12972,
10210,
76,
1330,
9515,
355,
350,
10210,
76,
10267,
198,
6738,
12972,
10210,
76,
1330,
350,
10210,
76,
3363... | 3.458333 | 48 |
#!/usr/bin/env python
from miflora_server.srv import *
from miflora_msgs.msg import *
import rospkg
import rospy
import sys
import subprocess
import json
sensor_pub = rospy.Publisher('/miflora', miflora, queue_size=10)
sensor_msg = miflora()
def callback_msg(msg):
''' Read sensor and return measurements '''
# Check to see is MAC has been set
if msg.MAC == '00:00:00:00:00:00':
rospy.logwarn("MAC address not set. Run $ rosrun miflora_server discover_devices.py")
break
# Run sensor driver. Requires python >=3.6
# HACK: Done this way to remove conflicts with ROS python version.
path = rospkg.RosPack().get_path('miflora_server')
miflora_out = subprocess.check_output(["python3.7", path+"/src/miflora_driver.py", msg.MAC])
try:
miflora_data = json.loads(miflora_out.replace("'", '"'))
response = read_mifloraResponse()
response.sensor.header.stamp = rospy.Time.now()
response.sensor.header.frame_id = msg.FrameID
response.sensor.moisture = miflora_data['soil-moisture']
response.sensor.battery = miflora_data['battery']
response.sensor.illuminance = miflora_data['light']
response.sensor.temperature = miflora_data['soil-temperature']
response.sensor.conductivity = miflora_data['soil-ec']
# Publish reading to topic
sensor_msg = response.sensor
sensor_pub.publish(sensor_msg)
# Return reading to service client
return response
except:
# Return blank measurements on error.
rospy.logerr(miflora_out)
rospy.logwarn("Try resetting BLE: $ sudo hciconfig hci0 down && sudo hciconfig hci0 up")
rospy.logwarn("Or add reset to crontab: $ sudo crontab -e ")
rospy.logwarn("add to bottom: 1 * * * * hciconfig hci0 reset")
return read_mifloraResponse()
if __name__ == "__main__":
service_server() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
285,
361,
4685,
64,
62,
15388,
13,
27891,
85,
1330,
1635,
198,
6738,
285,
361,
4685,
64,
62,
907,
14542,
13,
19662,
1330,
1635,
220,
198,
11748,
686,
2777,
10025,
198,
117... | 2.548252 | 715 |
from django.test import SimpleTestCase
from django.urls import reverse, resolve
from django.contrib.auth import views as auth_views
from hospital.views import *
#########
#########
######
######
######
######
########
#######
#######
####### | [
6738,
42625,
14208,
13,
9288,
1330,
17427,
14402,
20448,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
11,
10568,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
5009,
355,
6284,
62,
33571,
198,
6738,
4436,
13,
33571,
1330... | 2.484127 | 126 |
from .models import User, Vaccine, User_Vaccine, Agendamento, Estabelecimento, EstabelecimentoAtendimento, Uf, Municipio
from rest_framework import serializers
from rest_framework.validators import UniqueValidator
| [
6738,
764,
27530,
1330,
11787,
11,
31749,
500,
11,
11787,
62,
53,
4134,
500,
11,
2449,
437,
3263,
78,
11,
10062,
397,
11129,
66,
3681,
78,
11,
10062,
397,
11129,
66,
3681,
78,
2953,
437,
3681,
78,
11,
471,
69,
11,
27338,
952,
198,... | 3.323077 | 65 |
__version__ = VERSION = '0.6.0'
| [
834,
9641,
834,
796,
44156,
2849,
796,
705,
15,
13,
21,
13,
15,
6,
198
] | 2.133333 | 15 |
"""retrieve dns addresses from router and write to local csv file"""
import os.path
import csv;
from paramiko import SSHClient
from scp import SCPClient
router_ip = 'router.mvanvuren.local'
ip_range = '192.168.0.'
remote_hosts_file = '/tmp/etc/hosts.dnsmasq'
local_hosts_file = './hosts.dnsmasq'
remote_dnsmasq_file = '/tmp/etc/dnsmasq.conf'
local_dnsmasq_file = './dnsmasq.conf'
dns_file = './dns.csv'
field_names = ['ip', 'mac', 'name', 'comments']
main() | [
37811,
1186,
30227,
288,
5907,
9405,
422,
20264,
290,
3551,
284,
1957,
269,
21370,
2393,
37811,
198,
11748,
28686,
13,
6978,
198,
11748,
269,
21370,
26,
198,
6738,
5772,
12125,
1330,
33825,
11792,
198,
6738,
629,
79,
1330,
17527,
11792,
... | 2.460733 | 191 |
from mWindowsSDK import *;
from .fThrowLastError import fThrowLastError;
from mRegistry import cRegistryValue;
oSystemInfo = cSystemInfo(); | [
6738,
285,
11209,
10305,
42,
1330,
1635,
26,
198,
6738,
764,
69,
39431,
5956,
12331,
1330,
277,
39431,
5956,
12331,
26,
198,
6738,
285,
8081,
4592,
1330,
269,
8081,
4592,
11395,
26,
198,
198,
78,
11964,
12360,
796,
269,
11964,
12360,
... | 3.333333 | 42 |
from typing import List, Union
from pystratis.api import APIRequest, EndpointRegister, endpoint
from pystratis.api.federationgateway.responsemodels import *
from pystratis.api.federationgateway.requestmodels import *
from pystratis.core import DestinationChain
from pystratis.core.types import Address, Money, uint256
from pystratis.core.networks import StraxMain, StraxTest, StraxRegTest, CirrusMain, CirrusTest, CirrusRegTest, Ethereum
class FederationGateway(APIRequest, metaclass=EndpointRegister):
"""Implements the federationgateway api endpoints."""
route = '/api/federationgateway'
@endpoint(f'{route}/deposits')
def deposits(self, block_height: int, **kwargs) -> List[MaturedBlockDepositsModel]:
"""Retrieves block deposits
Args:
block_height (int): The block height at which to obtain deposits.
**kwargs: Extra keyword arguments.
Returns:
List[MaturedBlockDepositsModel]: A list of matured block deposits.
Raises:
APIError: Error thrown by node API. See message for details.
"""
request_model = DepositsRequest(block_height=block_height)
data = self.get(request_model, **kwargs)
serializable_result = SerializableResult(**data)
matured_block_deposit_models = []
for item in serializable_result.value:
data = {'deposits': [], 'blockInfo': item['blockInfo']}
for deposit in item['deposits']:
deposit['amount'] = Money.from_satoshi_units(deposit['amount'])
if 'targetChain' in deposit:
if deposit['targetChain'] == DestinationChain.ETH.value:
deposit['targetAddress'] = Address(address=deposit['targetAddress'], network=Ethereum())
else:
chain_name = DestinationChain(deposit['targetChain']).name
raise RuntimeWarning(f'Validation for {chain_name} not implemented.')
else:
if self._network == StraxMain():
deposit['targetAddress'] = Address(address=deposit['targetAddress'], network=CirrusMain())
if self._network == CirrusMain():
deposit['targetAddress'] = Address(address=deposit['targetAddress'], network=StraxMain())
if self._network == StraxTest():
deposit['targetAddress'] = Address(address=deposit['targetAddress'], network=CirrusTest())
if self._network == CirrusTest():
deposit['targetAddress'] = Address(address=deposit['targetAddress'], network=StraxTest())
if self._network == StraxRegTest() or self._network == CirrusRegTest():
deposit['targetAddress'] = Address(address=deposit['targetAddress'], network=StraxRegTest())
if self._network == StraxRegTest() or self._network == CirrusRegTest():
deposit['targetAddress'] = Address(address=deposit['targetAddress'], network=StraxRegTest())
data['deposits'].append(deposit)
matured_block_deposit_models.append(MaturedBlockDepositsModel(**data))
return matured_block_deposit_models
@endpoint(f'{route}/transfer/pending')
def pending_transfer(self,
deposit_id: Union[str, uint256],
transaction_id: Union[str, uint256],
**kwargs) -> List[CrossChainTransferModel]:
"""Gets pending transfers.
Args:
deposit_id (uint256, str): The deposit id hash.
transaction_id (uint256, str): The transaction id hash.
**kwargs: Extra keyword arguments.
Returns:
List[CrossChainTransferModel]: A list of cross chain transfers.
Raises:
APIError: Error thrown by node API. See message for details.
"""
if isinstance(deposit_id, str):
deposit_id = uint256(deposit_id)
if isinstance(transaction_id, str):
transaction_id = uint256(transaction_id)
request_model = PendingTransferRequest(deposit_id=deposit_id, transaction_id=transaction_id)
data = self.get(request_model, **kwargs)
return [CrossChainTransferModel(**x) for x in data]
@endpoint(f'{route}/transfer/fullysigned')
def fullysigned_transfer(self,
deposit_id: Union[str, uint256],
transaction_id: Union[str, uint256],
**kwargs) -> List[CrossChainTransferModel]:
"""Get fully signed transfers.
Args:
deposit_id (uint256, str): The deposit id hash.
transaction_id (uint256, str): The transaction id hash.
**kwargs: Extra keyword arguments.
Returns:
List[CrossChainTransferModel]: A list of cross chain transfers.
Raises:
APIError: Error thrown by node API. See message for details.
"""
if isinstance(deposit_id, str):
deposit_id = uint256(deposit_id)
if isinstance(transaction_id, str):
transaction_id = uint256(transaction_id)
request_model = FullySignedTransferRequest(deposit_id=deposit_id, transaction_id=transaction_id)
data = self.get(request_model, **kwargs)
return [CrossChainTransferModel(**x) for x in data]
@endpoint(f'{route}/member/info')
def member_info(self, **kwargs) -> FederationMemberInfoModel:
"""Gets info on the state of a multisig member.
Args:
**kwargs: Extra keyword arguments.
Returns:
FederationMemberInfoModel: Information on the current multisig member.
Raises:
APIError: Error thrown by node API. See message for details.
"""
data = self.get(**kwargs)
return FederationMemberInfoModel(**data)
@endpoint(f'{route}/info')
def info(self, **kwargs) -> FederationGatewayInfoModel:
"""Gets info on the state of the federation.
Args:
**kwargs: Extra keyword arguments.
Returns:
FederationGatewayInfoModel: Information on the federation gateway.
Raises:
APIError: Error thrown by node API. See message for details.
"""
data = self.get(**kwargs)
data['multisigAddress'] = Address(address=data['multisigAddress'], network=self._network)
return FederationGatewayInfoModel(**data)
@endpoint(f'{route}/member/ip/add')
def ip_add(self, ipaddr: str, **kwargs) -> str:
"""Add a federation member's IP address to the federation IP list
Args:
ipaddr (str): The endpoint.
**kwargs: Extra keyword arguments.
Returns:
str: Response to ip add request.
Raises:
APIError: Error thrown by node API. See message for details.
"""
request_model = MemberIPAddRequest(ipaddr=ipaddr)
data = self.put(request_model, **kwargs)
return data
@endpoint(f'{route}/member/ip/remove')
def ip_remove(self, ipaddr: str, **kwargs) -> str:
"""Remove a federation member's IP address to the federation IP list
Args:
ipaddr (str): The endpoint.
**kwargs: Extra keyword arguments.
Returns:
str: response to ip remove request.
Raises:
APIError: Error thrown by node API. See message for details.
"""
request_model = MemberIPRemoveRequest(ipaddr=ipaddr)
data = self.put(request_model, **kwargs)
return data
@endpoint(f'{route}/member/ip/replace')
def ip_replace(self, ipaddrtouse: str, ipaddr: str, **kwargs) -> str:
"""Replace a federation member's IP from the federation IP list with another.
Args:
ipaddrtouse (str): The new endpoint.
ipaddr (str): The endpoint being replaced.
**kwargs: Extra keyword arguments.
Returns:
str: Response to ip replace request.
Raises:
APIError: Error thrown by node API. See message for details.
"""
request_model = MemberIPReplaceRequest(ipaddrtouse=ipaddrtouse, ipaddr=ipaddr)
data = self.put(request_model, **kwargs)
return data
@endpoint(f'{route}/transfer/verify')
def verify_transfer(self,
deposit_id_transaction_id: Union[str, uint256],
**kwargs) -> Union[str, ValidateTransactionResultModel]:
"""Validate a transfer transaction.
Args:
deposit_id_transaction_id (uint256, str): The transaction id containing the deposit with the deposit id.
**kwargs: Extra keyword arguments.
Returns:
Union[str, ValidateTransactionResultModel]: A model describing the validity of the transfer.
Raises:
APIError: Error thrown by node API. See message for details.
"""
if isinstance(deposit_id_transaction_id, str):
deposit_id_transaction_id = uint256(deposit_id_transaction_id)
request_model = VerifyTransferRequest(deposit_id_transaction_id=deposit_id_transaction_id)
data = self.get(request_model, **kwargs)
if isinstance(data, str):
return data
return ValidateTransactionResultModel(**data)
@endpoint(f'{route}/transfers/deletesuspended')
def transfers_delete_suspended(self, **kwargs) -> str:
"""Delete a suspended transfer transaction.
Args:
**kwargs: Extra keyword arguments.
Returns:
str: A message about the deletion request.
Raises:
APIError: Error thrown by node API. See message for details.
"""
data = self.delete(**kwargs)
return data
@endpoint(f'{route}/transfer')
def transfer(self,
deposit_id: Union[str, uint256],
**kwargs) -> CrossChainTransferModel:
"""Gets pending transfers.
Args:
deposit_id (uint256, str): The deposit id hash.
**kwargs: Extra keyword arguments.
Returns:
CrossChainTransferModel: A cross chain transfer.
Raises:
APIError: Error thrown by node API. See message for details.
"""
if isinstance(deposit_id, str):
deposit_id = uint256(deposit_id)
request_model = TransferRequest(deposit_id=deposit_id)
data = self.get(request_model, **kwargs)
return CrossChainTransferModel(**data)
| [
6738,
19720,
1330,
7343,
11,
4479,
198,
6738,
12972,
2536,
37749,
13,
15042,
1330,
7824,
18453,
11,
5268,
4122,
38804,
11,
36123,
198,
6738,
12972,
2536,
37749,
13,
15042,
13,
69,
9748,
10494,
1014,
13,
26209,
27530,
1330,
1635,
198,
67... | 2.344903 | 4,552 |
from json import loads
from pathlib import PosixPath as Path
from galaxy_swift.exceptions import InvalidPlugin
from galaxy_swift.types import Manifest
| [
6738,
33918,
1330,
15989,
198,
6738,
3108,
8019,
1330,
18574,
844,
15235,
355,
10644,
198,
198,
6738,
16161,
62,
2032,
2135,
13,
1069,
11755,
1330,
17665,
37233,
198,
6738,
16161,
62,
2032,
2135,
13,
19199,
1330,
36757,
628
] | 4.026316 | 38 |
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from seaborn._core.groupby import GroupBy
from seaborn._stats.aggregation import Agg
| [
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
11748,
12972,
9288,
198,
6738,
19798,
292,
13,
33407,
1330,
6818,
62,
14535,
62,
40496,
198,
198,
6738,
384,
397,
1211,
13557,
7295,
13,
8094,
1525,
1330,
4912,
3886,
198,
6738,
384,
397... | 3.313725 | 51 |
from django.db.models.signals import post_delete
from django.dispatch import Signal, receiver
from baserow.contrib.database.table.cache import (
invalidate_table_model_cache_and_related_models,
)
from baserow.contrib.database.table.models import Table
table_created = Signal()
table_updated = Signal()
table_deleted = Signal()
tables_reordered = Signal()
@receiver(post_delete, sender=Table)
| [
6738,
42625,
14208,
13,
9945,
13,
27530,
13,
12683,
874,
1330,
1281,
62,
33678,
198,
6738,
42625,
14208,
13,
6381,
17147,
1330,
26484,
11,
9733,
198,
198,
6738,
1615,
263,
322,
13,
3642,
822,
13,
48806,
13,
11487,
13,
23870,
1330,
357... | 3.174603 | 126 |
import subprocess
import sys
member_list = [
"ZbT8sM1VLUA",
"ZbT8sM1VLUA"
]
#この辺りでvideokeyやタイトルをごにょごにょする。
def main():
"""
main関数
"""
for video_id in member_list:
cmd = 'chat_downloader https://www.youtube.com/watch?v=' + video_id + ' --cookies ../cookie.txt --output "result_member/' + video_id + '.json" > result_member/' + video_id + '.txt' # 実行するコマンド
print(cmd)
for result in command(cmd):
print(result)
if __name__ == "__main__":
main()
| [
11748,
850,
14681,
198,
11748,
25064,
198,
198,
19522,
62,
4868,
796,
685,
198,
220,
220,
220,
366,
57,
65,
51,
23,
82,
44,
16,
47468,
34970,
1600,
198,
220,
220,
220,
366,
57,
65,
51,
23,
82,
44,
16,
47468,
34970,
1,
198,
60,
... | 1.98062 | 258 |
from django.db.models.signals import post_save
from django.dispatch import receiver
import short_url
import logging
from .models import Publication
_log = logging.getLogger(__name__)
@receiver(post_save, sender=Publication)
| [
6738,
42625,
14208,
13,
9945,
13,
27530,
13,
12683,
874,
1330,
1281,
62,
21928,
198,
6738,
42625,
14208,
13,
6381,
17147,
1330,
9733,
198,
11748,
1790,
62,
6371,
198,
11748,
18931,
198,
198,
6738,
764,
27530,
1330,
45065,
198,
198,
62,
... | 3.304348 | 69 |
from ruamel import yaml
from jinja2 import Environment, FileSystemLoader, Template
import datetime
import os
# Edit ls output
now = datetime.datetime.now()
file = open('./config.yaml', 'r')
config_data = yaml.load(file, Loader=yaml.Loader)
# # List main server
os.system('lftp -c "source conf/original.conf && find -l . && exit" > lists/original.txt')
txt_editor("lists/original.txt", "lists/original_list.txt")
# Verify mirrors diff and update config data
mirrors = config_data['mirrors']
for mirror in mirrors:
os.system('lftp -c "source conf/' + mirror + '.conf && find -l . && exit" > lists/' + mirror + '.txt')
txt_editor("lists/" + mirror + ".txt", "lists/" + mirror + "_list.txt")
os.system("diff -u lists/original_list.txt lists/" + mirror + "_list.txt > webpage/" + mirrors[mirror]['diffFile'])
if os.stat(mirrors[mirror]['diffFile']).st_size==0:
if mirrors[mirror]['updated'] == False:
mirrors[mirror]['lastUpdate'] = now.strftime("%Y-%m-%d %H:%M")
mirrors[mirror]['updated'] = True
else:
mirrors[mirror]['updated'] = False
# Update time
config_data['lastRun'] = now.strftime("%Y-%m-%d %H:%M")
template_name = config_data['template']
# Save data
file = open('./config.yaml', 'w')
yaml.dump(config_data, file, Dumper=yaml.RoundTripDumper)
# Load Jinja2 template
env = Environment(loader = FileSystemLoader('./'), trim_blocks=True, lstrip_blocks=True)
template = env.get_template('templates/' + template_name +'/template.j2')
# Update template files in the webpage folder
os.system('cp -r templates/' + template_name +'/* webpage/ ; rm webpage/template.j2')
# Render the template with data and save in webpage/index.html
file = open('webpage/index.html', 'w')
file.write(template.render(config_data))
| [
6738,
7422,
17983,
1330,
331,
43695,
198,
6738,
474,
259,
6592,
17,
1330,
9344,
11,
9220,
11964,
17401,
11,
37350,
198,
11748,
4818,
8079,
198,
11748,
28686,
198,
198,
2,
5312,
43979,
5072,
628,
198,
2197,
796,
4818,
8079,
13,
19608,
... | 2.789969 | 638 |
#
# Copyright 2020 Jaroslav Chmurny
#
# This file is part of Library of Graph Algorithms for Python.
#
# Library of Graph Algorithms for Python is free software developed for
# educational # and experimental purposes. It is licensed under the Apache
# License, Version 2.0 # (the "License"); you may not use this file except
# in compliance with the # License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the graphlib.util module.
"""
from pytest import raises
from graphlib.util import QueueableItem, RepriorizablePriorityQueue, SimplePriorityQueue
from graphlib.util import UnionFind
class TestSimplePriorityQueue: # pylint: disable=R0201,C0116,C0121
"""Collection of test methods exercising the class :class:
graphlib.util.SimplePriorityQueue.
"""
class TestRepriorizablePriorityQueue: # pylint: disable=R0201,C0116,C0121
"""Collection of test methods exercising the class :class:
graphlib.util.RepriorizablePriorityQueue.
"""
class TestUnionFind: # pylint: disable=R0201,C0116,C0301,C0121
"""Collection of test methods exercising the class :class:
graphlib.util.UnionFind.
"""
| [
2,
201,
198,
2,
15069,
12131,
15374,
26388,
609,
76,
700,
88,
201,
198,
2,
201,
198,
2,
770,
2393,
318,
636,
286,
10074,
286,
29681,
978,
7727,
907,
329,
11361,
13,
201,
198,
2,
201,
198,
2,
10074,
286,
29681,
978,
7727,
907,
32... | 3.186475 | 488 |
import locale
class NumberUtil:
""" Number Utility
"""
def toCurrency(n):
""" Convert given number into currency format.
Args:
n (float): Number to convert.
Returns:
string
"""
if not isinstance(n, float):
n = float(n)
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
return locale.currency(n, grouping = True)
| [
11748,
36693,
198,
198,
4871,
7913,
18274,
346,
25,
198,
220,
220,
220,
220,
198,
220,
220,
220,
37227,
7913,
34030,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
220,
198,
220,
220,
220,
825,
284,
34,
13382,
7,
77,
2599,
198,
22... | 1.977778 | 225 |
# Copyright 2019 Splunk, Inc.
#
# Use of this source code is governed by a BSD-2-clause-style
# license that can be found in the LICENSE-BSD2 file or at
# https://opensource.org/licenses/BSD-2-Clause
import random
from jinja2 import Environment
from .sendmessage import *
from .splunkutils import *
from .timeutils import *
env = Environment()
# <46>1 2021-12-08T21:07:19.100000Z sysloghost CylancePROTECT - - - Event Type: ExploitAttempt, Event Name: none, Device Name: DEVICENAME, IP Address: (), Action: None, Process ID: 72724, Process Name: C:\Program Files (x86)\Medcon\Medcon Common\Dicom2Avi_App.exe, User Name: tcsadmin, Violation Type: Stack Pivot, Zone Names: (Windows Server 2008), Device Id: a603a6e8-cac7-4d06-970c-24671e5af6cc, Policy Name: Servers Complete Policy
| [
2,
15069,
13130,
13341,
2954,
11,
3457,
13,
198,
2,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
17,
12,
565,
682,
12,
7635,
198,
2,
5964,
326,
460,
307,
1043,
287,
262,
38559,
24290,
12,
21800,
17,
... | 2.962264 | 265 |
from django.contrib.gis.measure import D
from django.contrib.gis.db.models.functions import Distance
from froide.publicbody.models import PublicBody
from .amenity import AmenityProvider
class AmenityLocalProvider(AmenityProvider):
'''
Like Amenity provider but tries to find the public body
for the amenity at its location
'''
NEARBY_RADIUS = 200
| [
6738,
42625,
14208,
13,
3642,
822,
13,
70,
271,
13,
1326,
5015,
1330,
360,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
70,
271,
13,
9945,
13,
27530,
13,
12543,
2733,
1330,
34600,
198,
198,
6738,
8400,
485,
13,
11377,
2618,
13,
27530... | 3.04918 | 122 |
#!/usr/bin/python
import argparse
import os
import subprocess
import tempfile
from itertools import chain
from scipy.sparse import csc_matrix, save_npz
from CMash import MinHash as MH
## TODO: adjust for very large files by only loading portions of hdf5 database file at a time
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Forms the sensing matrix `A` when given a database of WGS genomes in FASTA format. Also"
"runs KMC on a temporary FASTA file consisting of each kmer counted by CMash for intersection"
"with sample FASTA files.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-k', '--k_size', type=int,
help="k-mer size to use (Default: 21)",
default=21)
parser.add_argument('-c', '--cmash_loc', type=str,
help="Location of CMash for accessing MakeStreamingDNADatabase.py.",
required=True)
parser.add_argument('-i', '--input_file', type=str,
help="Path to file containing absolute file names of training genomes",
required=True)
parser.add_argument('-o', '--output_file', type=str,
help="Output file of sparse representation of sensing matrix `A` in .mat format.",
default="TrainingDatabase",
required=True)
## Read in the arguments
args = parser.parse_args()
k_size = args.k_size
input_file_names = os.path.abspath(args.input_file)
if not os.path.exists(input_file_names):
raise Exception("Input file %s does not exist." % input_file_names)
output_file_name = os.path.abspath(args.output_file)
cmash_loc = args.cmash_loc
## Run CMash and load database
print("Running CMash.")
database_file_name = output_file_name + ".h5"
to_run = f"python {os.path.join(cmash_loc, 'scripts/MakeStreamingDNADatabase.py')} -k {k_size} {input_file_names} {database_file_name}"
res = subprocess.run(to_run, shell=True, stdout=subprocess.DEVNULL)
if res.returncode != 0:
raise Exception("Error running CMash (MakeStreamingDNADatabase.py)")
print("Loading in database.")
database = MH.import_multiple_from_single_hdf5(database_file_name)
## Extract list of kmers
print("Gathering kmers.")
# (convert to set for removing duplicates, sort for consistent ordering)
kmers = sorted(set(chain.from_iterable(genome._kmers for genome in database)))
## Gather data for, create, and save csc_matrix, each column representing the counts of a particular genome's kmers
print("Creating matrix.")
data = []
indices = []
indptr = [0]
for genome in database:
column_indices = [kmers.index(kmer) for kmer in genome._kmers] # Find indices of a particular genome's kmers
sorter = sorted(range(len(column_indices)), key=column_indices.__getitem__) # argsort the indices
# Concatenate sorted indices and corresponding data (counts) for canonical csc_matrix format
indices += [column_indices[i] for i in sorter]
data += [genome._counts[i] for i in sorter]
indptr.append(len(indices))
save_npz(output_file_name, csc_matrix((data, indices, indptr), shape=(len(kmers), len(database))), compressed=True)
## Run KMC on artificial FASTA file for future intersection with y vectors
print("Creating FASTA and running KMC.")
# Check if kmc is installed
res = subprocess.run("kmc", shell=True, stdout=subprocess.DEVNULL)
if res.returncode != 0:
raise Exception(
"It appears that kmc is not installed. Please consult the README, install kmc, and try again.")
with tempfile.TemporaryDirectory() as temp_dir:
with open("test.fasta", "w+") as training_out_file:
to_write = ""
iterator = 0
for kmer in kmers:
to_write += ">seq%d\n" % iterator
to_write += "%s\n" % kmer
iterator += 1
training_out_file.write(to_write)
# Run KMC on training fasta
to_run = f"kmc -k{k_size} -sm -fm -ci0 -cs3 {training_out_file.name} {output_file_name} {temp_dir}"
res = subprocess.run(to_run, shell=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
if res.returncode != 0:
raise Exception("An unexpected error was encountered while running kmc.")
print("Completed.")
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
850,
14681,
198,
11748,
20218,
7753,
198,
6738,
340,
861,
10141,
1330,
6333,
198,
6738,
629,
541,
88,
13,
82,
29572,
1330,
269,
1416,
62,
67... | 2.466739 | 1,849 |
#!/usr/bin/env python3
# Fabio Zanarello, Sanger Institute, 2019
# this script reads a table of precomputed score (space delimited, no header):
# 2 103060300 intronic IL18RAP 0.03276616 0.3697275149820258
# and a results table from NCBoost and produce an output in which the NCBoost results
# have four new columns to corresponding to:
# precomp_region
# precomp_closest_gene_name
# precomp_NCBoost_Score
# precomp_NCboost_chr_rank_perc
# NA if the variant is not precomputed
import os
import sys
import argparse
from collections import defaultdict
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
14236,
952,
47022,
533,
18798,
11,
311,
2564,
5136,
11,
13130,
198,
198,
2,
428,
4226,
9743,
257,
3084,
286,
662,
785,
17128,
4776,
357,
13200,
46728,
863,
11,
645,
13639,
259... | 3.115789 | 190 |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ...testing import assert_equal
from ..utility import Select
| [
2,
47044,
46,
12,
35353,
1137,
11617,
416,
4899,
14,
42116,
431,
6359,
13,
9078,
532,
8410,
5626,
48483,
198,
6738,
2644,
33407,
1330,
6818,
62,
40496,
198,
6738,
11485,
315,
879,
1330,
9683,
628,
198
] | 3.361111 | 36 |
import streamlit as st
import pandas as pd
import numpy as np | [
11748,
4269,
18250,
355,
336,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941
] | 3.263158 | 19 |
import pytest
from s2v_similarity import S2vSimilarity
@pytest.fixture
@pytest.fixture
| [
11748,
12972,
9288,
198,
6738,
264,
17,
85,
62,
38610,
414,
1330,
311,
17,
85,
18925,
414,
198,
198,
31,
9078,
9288,
13,
69,
9602,
628,
198,
31,
9078,
9288,
13,
69,
9602,
628,
628,
198,
220,
220,
220,
220
] | 2.45 | 40 |
# https://leetcode.com/problems/unique-paths/
from math import comb
| [
2,
3740,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
34642,
12,
6978,
82,
14,
198,
198,
6738,
10688,
1330,
1974,
628
] | 2.916667 | 24 |
# Copyright (c) 2018, Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
import pytest
from nose.plugins.skip import SkipTest
import logging
from ansible.modules.cloud.oracle import oci_db_node
from ansible.module_utils.oracle import oci_db_utils, oci_utils
try:
import oci
from oci.util import to_dict
from oci.database.models import DbNode
from oci.exceptions import ServiceError, ClientError
except ImportError:
raise SkipTest("test_oci_db_node.py requires `oci` module")
@pytest.fixture()
@pytest.fixture()
@pytest.fixture()
@pytest.fixture()
| [
2,
15069,
357,
66,
8,
2864,
11,
18650,
290,
14,
273,
663,
29116,
13,
198,
2,
770,
3788,
318,
925,
1695,
284,
345,
739,
262,
2846,
286,
262,
38644,
513,
13,
15,
5964,
393,
262,
24843,
362,
13,
15,
5964,
13,
198,
2,
22961,
3611,
... | 2.918149 | 281 |
import sys
import pandas as pd
import numpy as np
import re
import nltk
from sqlalchemy import create_engine
nltk.download(['punkt', 'wordnet'])
from sklearn.ensemble import GradientBoostingClassifier
from nltk.stem import WordNetLemmatizer
from sklearn.linear_model import LogisticRegression
from sqlalchemy import create_engine
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.multioutput import MultiOutputClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report, f1_score,\
precision_score, recall_score, make_scorer
import pickle
if __name__ == '__main__':
main()
| [
11748,
25064,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
302,
198,
11748,
299,
2528,
74,
198,
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
198,
77,
2528,
74,
13,
15002,
7,
17816,
30354,
83... | 3.414141 | 297 |
import jax
import jax.numpy as jnp
import snow
from collections import namedtuple
import numpy as np
Exploration = namedtuple("Exploration", ['type', 'param'])
if __name__ == '__main__':
import numpy as np
k = 1.15
l = 2
dmin = 0.8
dmax = dmin * l ** 0.6
hierarchization_config = (
(5, dmin * l ** 2, dmax * l ** 2, 1 / k ** 0, 1 / k ** 0),
(5, dmin * l ** 1, dmax * l ** 1, 1 / k ** 1, 1 / k ** 1),
(5, dmin * l ** 0, dmax * l ** 0, 1 / k ** 2, 1 / k ** 2),
)
N_ACTORS = int(np.prod(tuple(dim for dim, _, _, _, _ in hierarchization_config)))
# recomputed_returns = np.random.uniform(size=(N_ACTORS,), high=12)
recomputed_returns = np.linspace(1, 0, N_ACTORS)
recomputed_returns = np.stack([np.linspace(1, 0, N_ACTORS)] * 2, axis=1)
recomputed_actions = np.stack([np.random.uniform(size=(N_ACTORS, 7), low=-1, high=1)], axis=1)
ps = get_actions_probabilities(recomputed_returns, hierarchization_config, upsilon=0.0)
# print(f'{ps=}\n\n')
key = jax.random.PRNGKey(3)
actions = sample(recomputed_actions, ps, key)
| [
11748,
474,
897,
198,
11748,
474,
897,
13,
77,
32152,
355,
474,
37659,
198,
11748,
6729,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
11748,
299,
32152,
355,
45941,
628,
198,
18438,
6944,
796,
3706,
83,
29291,
7203,
18438,
6944,
1600,
... | 2.241449 | 497 |
#_ __
# | |/ /___ ___ _ __ ___ _ _ ®
# | ' </ -_) -_) '_ \/ -_) '_|
# |_|\_\___\___| .__/\___|_|
# |_|
#
# Keeper Commander
# Copyright 2018 Keeper Security Inc.
# Contact: ops@keepersecurity.com
#
import argparse
import collections
import logging
import re
import fnmatch
import shutil
import functools
import os
import json
from collections import OrderedDict
from .. import api, display
from ..subfolder import BaseFolderNode, try_resolve_path, find_folders
from ..record import Record
from .base import user_choice, suppress_exit, raise_parse_exception, Command
from ..params import LAST_SHARED_FOLDER_UID, LAST_FOLDER_UID
from ..error import CommandError
ls_parser = argparse.ArgumentParser(prog='ls', description='List folder contents.')
ls_parser.add_argument('-l', '--list', dest='detail', action='store_true', help='show detailed list')
ls_parser.add_argument('-f', '--folders', dest='folders', action='store_true', help='display folders')
ls_parser.add_argument('-r', '--records', dest='records', action='store_true', help='display records')
ls_parser.add_argument('-s', '--short', dest='short', action='store_true',
help='Do not display record details.')
ls_parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='verbose output')
ls_parser.add_argument('pattern', nargs='?', type=str, action='store', help='search pattern')
ls_parser.error = raise_parse_exception
ls_parser.exit = suppress_exit
cd_parser = argparse.ArgumentParser(prog='cd', description='Change current folder.')
cd_parser.add_argument('folder', nargs='?', type=str, action='store', help='folder path or UID')
cd_parser.error = raise_parse_exception
cd_parser.exit = suppress_exit
tree_parser = argparse.ArgumentParser(prog='tree', description='Display the folder structure.')
tree_parser.add_argument('folder', nargs='?', type=str, action='store', help='folder path or UID')
tree_parser.error = raise_parse_exception
tree_parser.exit = suppress_exit
rmdir_parser = argparse.ArgumentParser(prog='rmdir', description='Remove a folder and its contents.')
rmdir_parser.add_argument('-f', '--force', dest='force', action='store_true', help='remove folder without prompting')
rmdir_parser.add_argument('-q', '--quiet', dest='quiet', action='store_true', help='remove folder without folder info')
rmdir_parser.add_argument('pattern', nargs='*', type=str, action='store', help='folder path or UID')
rmdir_parser.error = raise_parse_exception
rmdir_parser.exit = suppress_exit
mkdir_parser = argparse.ArgumentParser(prog='mkdir', description='Create a folder.')
mkdir_parser.add_argument('-sf', '--shared-folder', dest='shared_folder', action='store_true', help='create shared folder')
mkdir_parser.add_argument('-uf', '--user-folder', dest='user_folder', action='store_true', help='create user folder')
mkdir_parser.add_argument('-a', '--all', dest='grant', action='store_true', help='anyone has all permissions by default')
mkdir_parser.add_argument('-u', '--manage-users', dest='manage_users', action='store_true', help='anyone can manage users by default')
mkdir_parser.add_argument('-r', '--manage-records', dest='manage_records', action='store_true', help='anyone can manage records by default')
mkdir_parser.add_argument('-s', '--can-share', dest='can_share', action='store_true', help='anyone can share records by default')
mkdir_parser.add_argument('-e', '--can-edit', dest='can_edit', action='store_true', help='anyone can edit records by default')
mkdir_parser.add_argument('folder', nargs='?', type=str, action='store', help='folder path')
mkdir_parser.error = raise_parse_exception
mkdir_parser.exit = suppress_exit
mv_parser = argparse.ArgumentParser(prog='mv', description='Move a record or folder to another folder.')
mv_parser.add_argument('-f', '--force', dest='force', action='store_true', help='do not prompt')
mv_parser.add_argument('-s', '--can-reshare', dest='can_reshare', action='store_true', help='anyone can reshare records')
mv_parser.add_argument('-e', '--can-edit', dest='can_edit', action='store_true', help='anyone can edit records')
mv_parser.add_argument('src', nargs='?', type=str, action='store', help='source path to folder/record or UID')
mv_parser.add_argument('dst', nargs='?', type=str, action='store', help='destination folder or UID')
mv_parser.error = raise_parse_exception
mv_parser.exit = suppress_exit
ln_parser = argparse.ArgumentParser(prog='ln', description='Create a link between a record and a folder.')
ln_parser.add_argument('-f', '--force', dest='force', action='store_true', help='do not prompt')
ln_parser.add_argument('-s', '--can-reshare', dest='can_reshare', action='store_true', help='anyone can reshare records')
ln_parser.add_argument('-e', '--can-edit', dest='can_edit', action='store_true', help='anyone can edit records')
ln_parser.add_argument('src', nargs='?', type=str, action='store', help='source path to folder/record or UID')
ln_parser.add_argument('dst', nargs='?', type=str, action='store', help='destination folder or UID')
ln_parser.error = raise_parse_exception
ln_parser.exit = suppress_exit
def get_shared_folder_delete_rq(params, sf_requests, uid):
"""Adds a delete request to given dictionary for specified shared folder uid"""
if uid in params.shared_folder_cache and uid not in sf_requests:
sf = params.shared_folder_cache[uid]
rq = {
'command': 'shared_folder_update',
'operation': 'delete',
'shared_folder_uid': sf['shared_folder_uid']
}
if 'shared_folder_key' not in sf:
if 'teams' in sf:
for team in sf['teams']:
rq['from_team_uid'] = team['team_uid']
break
sf_requests[uid] = rq
def get_shared_subfolder_delete_rq(params, sf_requests, user_folder, user_folder_ids):
"""Recursively searches a user folder for shared folders to delete"""
delete_rq_added = False
user_folder_ids.add(user_folder.uid)
for uid in user_folder.subfolders:
subfolder = params.folder_cache[uid]
if subfolder.type == BaseFolderNode.SharedFolderType:
delete_rq_added = True
get_shared_folder_delete_rq(params, sf_requests, uid)
elif uid not in user_folder_ids:
delete_rq_added = get_shared_subfolder_delete_rq(params, sf_requests, subfolder, user_folder_ids)
return delete_rq_added
| [
2,
62,
220,
11593,
198,
2,
930,
930,
14,
1220,
17569,
46444,
4808,
11593,
220,
46444,
4808,
4808,
38852,
198,
2,
930,
705,
7359,
532,
62,
8,
532,
62,
8,
705,
62,
3467,
14,
532,
62,
8,
705,
62,
91,
198,
2,
930,
62,
91,
59,
62... | 2.847922 | 2,262 |
import datetime
import numpy as np
from pyproj import CRS
from RAiDER.models.ecmwf import ECMWF
| [
11748,
4818,
8079,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
12972,
1676,
73,
1330,
327,
6998,
198,
198,
6738,
17926,
72,
14418,
13,
27530,
13,
721,
76,
86,
69,
1330,
13182,
14326,
37,
628
] | 2.75 | 36 |
# Copyright 2019 The TensorTrade Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import os
import pandas as pd
import datetime
import tensorflow as tf
from tensortrade.environments.render import BaseRenderer
from tensortrade.environments.utils.helpers import create_auto_file_name, check_path
DEFAULT_LOG_FORMAT = '[%(asctime)-15s] %(message)s'
DEFAULT_TIMESTAMP_FORMAT = '%Y-%m-%d %H:%M:%S'
| [
2,
15069,
13130,
383,
309,
22854,
35965,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
... | 3.340741 | 270 |
from django import forms
from django.contrib.auth.forms import AuthenticationForm
from .models import Group,Profile,Question,Choice
class GroupForm(forms.ModelForm):
'''
Class to create a form for an authenticated user to create Group
'''
#*************************************************************************************************************
class QuestionForm(forms.ModelForm):
'''
Class to create a form for an authenticated user to post a poll
'''
#************************************************** Enter A Choice **********************************************
class ChoiceForm(forms.ModelForm):
'''
Class to create a form for an authenticated user to post a poll
'''
#************************************* SET UP A PROFILE **********************************************************
class ProfileForm(forms.ModelForm):
'''
Class to create a form for an authenticated user to update profile
'''
#***********************************************************ACTUAL VOTING****************************************
class VoteForm(forms.ModelForm):
'''
Class to create a form for an authenticated user to vote
'''
| [
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
23914,
1330,
48191,
8479,
198,
6738,
764,
27530,
1330,
4912,
11,
37046,
11,
24361,
11,
46770,
628,
198,
198,
4871,
4912,
8479,
7,
23914,
13,
17633,
... | 4.310469 | 277 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.monitoring_dashboard_v1.types import metrics
from google.protobuf import duration_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.monitoring.dashboard.v1", manifest={"Scorecard",},
)
class Scorecard(proto.Message):
r"""A widget showing the latest value of a metric, and how this
value relates to one or more thresholds.
Attributes:
time_series_query (google.cloud.monitoring_dashboard_v1.types.TimeSeriesQuery):
Required. Fields for querying time series
data from the Stackdriver metrics API.
gauge_view (google.cloud.monitoring_dashboard_v1.types.Scorecard.GaugeView):
Will cause the scorecard to show a gauge
chart.
spark_chart_view (google.cloud.monitoring_dashboard_v1.types.Scorecard.SparkChartView):
Will cause the scorecard to show a spark
chart.
thresholds (Sequence[google.cloud.monitoring_dashboard_v1.types.Threshold]):
The thresholds used to determine the state of
the scorecard given the time series' current
value. For an actual value x, the scorecard is
in a danger state if x is less than or equal to
a danger threshold that triggers below, or
greater than or equal to a danger threshold that
triggers above. Similarly, if x is above/below a
warning threshold that triggers above/below,
then the scorecard is in a warning state -
unless x also puts it in a danger state. (Danger
trumps warning.)
As an example, consider a scorecard with the
following four thresholds: {
value: 90,
category: 'DANGER',
trigger: 'ABOVE',
},
{
value: 70,
category: 'WARNING',
trigger: 'ABOVE',
},
{
value: 10,
category: 'DANGER',
trigger: 'BELOW',
},
{
value: 20,
category: 'WARNING',
trigger: 'BELOW',
}
Then: values less than or equal to 10 would put
the scorecard in a DANGER state, values greater
than 10 but less than or equal to 20 a WARNING
state, values strictly between 20 and 70 an OK
state, values greater than or equal to 70 but
less than 90 a WARNING state, and values greater
than or equal to 90 a DANGER state.
"""
class GaugeView(proto.Message):
r"""A gauge chart shows where the current value sits within a
pre-defined range. The upper and lower bounds should define the
possible range of values for the scorecard's query (inclusive).
Attributes:
lower_bound (float):
The lower bound for this gauge chart. The
value of the chart should always be greater than
or equal to this.
upper_bound (float):
The upper bound for this gauge chart. The
value of the chart should always be less than or
equal to this.
"""
lower_bound = proto.Field(proto.DOUBLE, number=1,)
upper_bound = proto.Field(proto.DOUBLE, number=2,)
class SparkChartView(proto.Message):
r"""A sparkChart is a small chart suitable for inclusion in a
table-cell or inline in text. This message contains the
configuration for a sparkChart to show up on a Scorecard,
showing recent trends of the scorecard's timeseries.
Attributes:
spark_chart_type (google.cloud.monitoring_dashboard_v1.types.SparkChartType):
Required. The type of sparkchart to show in
this chartView.
min_alignment_period (google.protobuf.duration_pb2.Duration):
The lower bound on data point frequency in
the chart implemented by specifying the minimum
alignment period to use in a time series query.
For example, if the data is published once every
10 minutes it would not make sense to fetch and
align data at one minute intervals. This field
is optional and exists only as a hint.
"""
spark_chart_type = proto.Field(
proto.ENUM, number=1, enum=metrics.SparkChartType,
)
min_alignment_period = proto.Field(
proto.MESSAGE, number=2, message=duration_pb2.Duration,
)
time_series_query = proto.Field(
proto.MESSAGE, number=1, message=metrics.TimeSeriesQuery,
)
gauge_view = proto.Field(
proto.MESSAGE, number=4, oneof="data_view", message=GaugeView,
)
spark_chart_view = proto.Field(
proto.MESSAGE, number=5, oneof="data_view", message=SparkChartView,
)
thresholds = proto.RepeatedField(
proto.MESSAGE, number=6, message=metrics.Threshold,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
12131,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2... | 2.419124 | 2,374 |
# Generated by Django 2.2.10 on 2020-03-08 20:00
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
940,
319,
12131,
12,
3070,
12,
2919,
1160,
25,
405,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.875 | 32 |
import unittest
from Statistics.Statistics import Statistics
import pprint
# def test_instantiate_calculator(self):
# self.assertIsInstance(self.statistics, Statistics)
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
198,
6738,
14370,
13,
48346,
1330,
14370,
198,
11748,
279,
4798,
628,
198,
2,
220,
220,
220,
825,
1332,
62,
8625,
415,
9386,
62,
9948,
3129,
1352,
7,
944,
2599,
198,
2,
220,
220,
220,
220,
220,
220,
220,... | 2.8875 | 80 |
import torch
import torch.nn as nn
if __name__ == '__main__':
net = sphere(64)
print(net) | [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
2010,
796,
16558,
7,
2414,
8,
198,
220,
220,
220,
3601,
7,
3262,
8
] | 2.439024 | 41 |
from __future__ import unicode_literals
from contextlib import closing
import io
import os.path
from shutil import rmtree
import sqlite3
from tempfile import mkdtemp
import unittest
from wsgiref.headers import Headers
from wsgiref.util import FileWrapper, setup_testing_defaults
from wsgiref.validate import validator
import wsgir
if __name__ == '__main__':
unittest.main()
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
4732,
8019,
1330,
9605,
198,
11748,
33245,
198,
11748,
28686,
13,
6978,
198,
6738,
4423,
346,
1330,
374,
16762,
631,
198,
11748,
44161,
578,
18,
198,
6738,
2021... | 3.121951 | 123 |
from django.conf.urls import patterns, include, url
from store_search import views
urlpatterns = (
url(r'^search$', views.search_page, name='store_search'),
)
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
7572,
11,
2291,
11,
19016,
198,
6738,
3650,
62,
12947,
1330,
5009,
628,
198,
6371,
33279,
82,
796,
357,
198,
220,
220,
220,
19016,
7,
81,
6,
61,
12947,
3,
3256,
5009,
13,
12947,
6... | 3 | 55 |
from __future__ import absolute_import # NOQA
from .celery import app as celery_app # NOQA
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
220,
1303,
8005,
48,
32,
198,
6738,
764,
7015,
88,
1330,
598,
355,
18725,
1924,
62,
1324,
220,
1303,
8005,
48,
32,
198
] | 3 | 31 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RXml(RPackage):
"""Tools for Parsing and Generating XML Within R and S-Plus
Many approaches for both reading and creating XML (and HTML) documents
(including DTDs), both local and accessible via HTTP or FTP. Also offers
access to an 'XPath' "interpreter"."""
homepage = "https://cloud.r-project.org/package=XML"
url = "https://cloud.r-project.org/src/contrib/XML_3.98-1.9.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/XML"
version('3.99-0.5', sha256='60529b7360f162eba07da455eeb9b94a732b2dd623c49e0f04328a2e97bd53a6')
version('3.98-1.20', sha256='46af86376ea9a0fb1b440cf0acdf9b89178686a05c4b77728fcff1f023aa4858')
version('3.98-1.19', sha256='81b1c4a2df24c5747fa8b8ec2d76b4e9c3649b56ca94f6c93fbd106c8a72beab')
version('3.98-1.9', sha256='a3b70169cb2fbd8d61a41ff222d27922829864807e9ecad373f55ba0df6cf3c3')
version('3.98-1.5', sha256='deaff082e4d37931d2dabea3a60c3d6916d565821043b22b3f9522ebf3918d35')
version('3.98-1.4', sha256='9c0abc75312f66aac564266b6b79222259c678aedee9fc347462978354f11126')
depends_on('r@2.13.0:', type=('build', 'run'))
depends_on('r@4.0.0:', when='@3.99-0.5:', type=('build', 'run'))
depends_on('libxml2@2.6.3:')
| [
2,
15069,
2211,
12,
1238,
2481,
13914,
45036,
3549,
2351,
4765,
11,
11419,
290,
584,
198,
2,
1338,
441,
4935,
34152,
13,
4091,
262,
1353,
12,
5715,
27975,
38162,
9947,
2393,
329,
3307,
13,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
... | 2.177877 | 669 |
#!/usr/bin/python3 -u
## Copyright (C) 2012 - 2022 ENCRYPTED SUPPORT LP <adrelanos@whonix.org>
## See the file COPYING for copying conditions.
# Since it will be useful to know something about the script,
# for the later tests, the terms are defined here:
# (A discussion of Python language structure is beyond
# the scope of this document)
# [1] http://en.wikipedia.org/wiki/Ipv4
# [2] http://en.wikipedia.org/wiki/Internet_Control_Message_Protocol
# [3] http://en.wikipedia.org/wiki/IP_routing
# [4] http://en.wikipedia.org/wiki/Ping
# [5] http://en.wikipedia.org/wiki/Internet_Control_Message_Protocol#List_of_permitted_control_messages_.28incomplete_list.29
# [6] http://www.secdev.org/projects/scapy/doc/usage.html#send-and-receive-packets-sr
# [7] http://www.secdev.org/projects/scapy/doc/usage.html#stacking-layers
import sys
from scapy.all import *
# define the target gateway & data payload
# IP HARDCODED but only needed during manual leak testing.
target = "10.152.152.10"
#target = "45.33.32.156"
data = "testing"
# define packets
# These define two variables, that are set to the object types IP
# and ICMP respectively. These objects in Scapy define the protocol
# type for IP (default IPv4) [1] and ICMP [2] respectively.
# And will send packets on the wire of these types when used.
ip = IP()
icmp = ICMP()
# define packet parameters
ip.dst = target
# IP packets are used for routing [3] between networks on the Internet.
# So, we assign the destination (dst) in the IP portion of the
# packet we are going to assemble and send out.
icmp.type = 8
icmp.code = 0
# Defines the type of ICMP message to send out. The ..8 type.. is
# a type defined as ..echo request.., e.g. a simple ping [4].
# See a list here of various types of ICMP [5] messages here.
# The sr1() [6] command will ..send and receive network traffic,
# returning the 1st packet received...
# The notation of ..ip/icmp/data.. is the notation for encapsulation
# of various instances of networking protocols [7].
# Read it right to left: ..data encapsulated inside an ICMP message
# and encapsulated inside an IP datagram...
test_ping = sr1(ip/icmp/data)
if isinstance(test_ping, types.NoneType):
print("No response")
else:
# Prints a short report on the packet received (if any).
test_ping.summary()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
532,
84,
198,
198,
2235,
15069,
357,
34,
8,
2321,
532,
33160,
412,
7792,
18276,
11571,
1961,
43333,
18470,
1279,
324,
2411,
40015,
31,
1929,
261,
844,
13,
2398,
29,
198,
2235,
4091,
262,
239... | 3.150068 | 733 |
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
import pandas as pd
def make_dataset(input_csv_filepath, output_dirpath):
""" Runs data processing scripts to transform raw data from (../raw) into
cleaned and data-defacto view ready to be analyzed/feature-engineered/encoded
(saved in ../interim).
"""
logger = logging.getLogger(__name__)
logger.info(f'making cleaned dataset from {input_csv_filepath}')
# Get data
logger.info('getting data...')
df = pd.read_csv(Path.cwd().joinpath(input_csv_filepath))
# Get Col Types
continuous_cols = [x for x in df.columns if x in ['encounter_id', 'hospital_id', 'patient_id', 'icu_id', 'gcs_eyes_apache', 'gcs_motor_apache', 'gcs_verbal_apache', 'age', 'height', 'pre_icu_los_days', 'weight', 'albumin_apache', 'bilirubin_apache', 'bun_apache', 'creatinine_apache', 'fio2_apache', 'glucose_apache', 'heart_rate_apache', 'hematocrit_apache', 'map_apache', 'paco2_apache', 'paco2_for_ph_apache', 'pao2_apache', 'ph_apache', 'resprate_apache', 'sodium_apache', 'temp_apache', 'urineoutput_apache', 'wbc_apache', 'd1_diasbp_invasive_max', 'd1_diasbp_invasive_min', 'd1_diasbp_max', 'd1_diasbp_min', 'd1_diasbp_noninvasive_max', 'd1_diasbp_noninvasive_min', 'd1_heartrate_max', 'd1_heartrate_min', 'd1_mbp_invasive_max', 'd1_mbp_invasive_min', 'd1_mbp_max', 'd1_mbp_min', 'd1_mbp_noninvasive_max', 'd1_mbp_noninvasive_min', 'd1_resprate_max', 'd1_resprate_min', 'd1_spo2_max', 'd1_spo2_min', 'd1_sysbp_invasive_max', 'd1_sysbp_invasive_min', 'd1_sysbp_max', 'd1_sysbp_min', 'd1_sysbp_noninvasive_max', 'd1_sysbp_noninvasive_min', 'd1_temp_max', 'd1_temp_min', 'h1_diasbp_invasive_max', 'h1_diasbp_invasive_min', 'h1_diasbp_max', 'h1_diasbp_min', 'h1_diasbp_noninvasive_max', 'h1_diasbp_noninvasive_min', 'h1_heartrate_max', 'h1_heartrate_min', 'h1_mbp_invasive_max', 'h1_mbp_invasive_min', 'h1_mbp_max', 'h1_mbp_min', 'h1_mbp_noninvasive_max', 'h1_mbp_noninvasive_min', 'h1_resprate_max', 'h1_resprate_min', 'h1_spo2_max', 'h1_spo2_min', 'h1_sysbp_invasive_max', 'h1_sysbp_invasive_min', 'h1_sysbp_max', 'h1_sysbp_min', 'h1_sysbp_noninvasive_max', 'h1_sysbp_noninvasive_min', 'h1_temp_max', 'h1_temp_min', 'd1_albumin_max', 'd1_albumin_min', 'd1_bilirubin_max', 'd1_bilirubin_min', 'd1_bun_max', 'd1_bun_min', 'd1_calcium_max', 'd1_calcium_min', 'd1_creatinine_max', 'd1_creatinine_min', 'd1_glucose_max', 'd1_glucose_min', 'd1_hco3_max', 'd1_hco3_min', 'd1_hemaglobin_max', 'd1_hemaglobin_min', 'd1_hematocrit_max', 'd1_hematocrit_min', 'd1_inr_max', 'd1_inr_min', 'd1_lactate_max', 'd1_lactate_min', 'd1_platelets_max', 'd1_platelets_min', 'd1_potassium_max', 'd1_potassium_min', 'd1_sodium_max', 'd1_sodium_min', 'd1_wbc_max', 'd1_wbc_min', 'h1_albumin_max', 'h1_albumin_min', 'h1_bilirubin_max', 'h1_bilirubin_min', 'h1_bun_max', 'h1_bun_min', 'h1_calcium_max', 'h1_calcium_min', 'h1_creatinine_max', 'h1_creatinine_min', 'h1_glucose_max', 'h1_glucose_min', 'h1_hco3_max', 'h1_hco3_min', 'h1_hemaglobin_max', 'h1_hemaglobin_min', 'h1_hematocrit_max', 'h1_hematocrit_min', 'h1_inr_max', 'h1_inr_min', 'h1_lactate_max', 'h1_lactate_min', 'h1_platelets_max', 'h1_platelets_min', 'h1_potassium_max', 'h1_potassium_min', 'h1_sodium_max', 'h1_sodium_min', 'h1_wbc_max', 'h1_wbc_min', 'd1_arterial_pco2_max', 'd1_arterial_pco2_min', 'd1_arterial_ph_max', 'd1_arterial_ph_min', 'd1_arterial_po2_max', 'd1_arterial_po2_min', 'd1_pao2fio2ratio_max', 'd1_pao2fio2ratio_min', 'h1_arterial_pco2_max', 'h1_arterial_pco2_min', 'h1_arterial_ph_max', 'h1_arterial_ph_min', 'h1_arterial_po2_max', 'h1_arterial_po2_min', 'h1_pao2fio2ratio_max', 'h1_pao2fio2ratio_min', 'apache_4a_hospital_death_prob', 'apache_4a_icu_death_prob', 'bmi', 'apache_2_diagnosis', 'apache_3j_diagnosis']] # noqa
categorical_cols = [x for x in df.columns if x in ['ethnicity', 'gender', 'hospital_admit_source', 'icu_admit_source', 'icu_stay_type', 'icu_type', 'apache_3j_bodysystem', 'apache_2_bodysystem']] # noqa
binary_cols = [x for x in df.columns if x in ['elective_surgery', 'readmission_status', 'apache_post_operative', 'arf_apache', 'gcs_unable_apache', 'intubated_apache', 'ventilated_apache', 'aids', 'cirrhosis', 'diabetes_mellitus', 'hepatic_failure', 'immunosuppression', 'leukemia', 'lymphoma', 'solid_tumor_with_metastasis']] # noqa
target_col = [x for x in df.columns if x in ['hospital_death']]
logger.info(f'Target cols: {target_col}')
logger.info(f'Continuous cols: {continuous_cols}')
logger.info(f'Categorical cols: {categorical_cols}')
logger.info(f'Binary cols: {binary_cols}')
logger.info(f'Masking columns')
df = df[target_col + continuous_cols + categorical_cols + binary_cols]
logger.info(f'Dataframe has these cols {list(df.columns)}')
# Typify
logger.info('typifying...')
if 0 < len(continuous_cols):
df[continuous_cols] = df[continuous_cols].astype('float32')
if 0 < len(categorical_cols):
df[categorical_cols] = df[categorical_cols].astype('str').astype('category')
if 0 < len(binary_cols):
df[binary_cols] = df[binary_cols].astype('str').astype('category')
if 0 < len(target_col):
df[target_col] = df[target_col].astype('str').astype('category')
# Export
output_filename = Path.cwd().joinpath(input_csv_filepath).stem + '.feather'
df.to_feather(Path.cwd().joinpath(output_dirpath).joinpath(output_filename))
@click.command()
@click.argument('input_csv_filepath', type=click.Path(exists=True), default='data/raw/training_v2.csv')
@click.argument('output_dirpath', type=click.Path(exists=True), default='data/interim/')
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
3904,
198,
11748,
18931,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
16605,
24330,
1330,
1064,
62,
26518,
24330,
11,
3440,
62,
26518,
24330,
198,
11748,
19798,
2... | 2.238545 | 2,750 |
#!/usr/bin/python
# coding: utf8
import geocoder
import requests_mock
location = 'New York City'
coordinates = [45.3, -75.4]
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
19617,
25,
3384,
69,
23,
198,
198,
11748,
4903,
420,
12342,
198,
11748,
7007,
62,
76,
735,
198,
198,
24886,
796,
705,
3791,
1971,
2254,
6,
198,
37652,
17540,
796,
685,
2231,
13,
18,
11... | 2.529412 | 51 |
def print_bst_level(root):
"""Prints the tree in level order."""
if root is None:
return
print_bst_level(root.left)
print(root.data, end=' ')
print_bst_level(root.right)
def print_levels_bst(root):
"""Prints the tree in level order."""
if root is None:
return
q = []
q.append(root)
while len(q) > 0:
node = q.pop(0)
print(node.data, end=' ')
if node.left is not None:
q.append(node.left)
if node.right is not None:
q.append(node.right)
| [
198,
198,
4299,
3601,
62,
65,
301,
62,
5715,
7,
15763,
2599,
198,
220,
220,
220,
37227,
18557,
82,
262,
5509,
287,
1241,
1502,
526,
15931,
198,
220,
220,
220,
611,
6808,
318,
6045,
25,
198,
220,
220,
220,
220,
220,
220,
220,
1441,... | 2.017857 | 280 |
#!/usr/bin/env python
"""Scikit-learn trainer package setup."""
import setuptools
REQUIRED_PACKAGES = [
"scikit-learn==0.20.4",
"pandas==0.24.2",
"pandas-gbq==0.13.2",
"cloudml-hypertune",
"gensim==3.8.3",
]
setuptools.setup(
name="hashy",
version="0.1.0",
install_requires=REQUIRED_PACKAGES,
packages=setuptools.find_packages(),
include_package_data=True,
description="Hashtag Suggestions",
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
50,
979,
15813,
12,
35720,
21997,
5301,
9058,
526,
15931,
198,
198,
11748,
900,
37623,
10141,
628,
198,
2200,
10917,
37819,
62,
47,
8120,
25552,
796,
685,
198,
220,
220,
22... | 2.20398 | 201 |
import collections
import json
import logging
import sys
import regex as re
import requests
import discord
log = logging.getLogger(__name__)
| [
11748,
17268,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
25064,
198,
198,
11748,
40364,
355,
302,
198,
11748,
7007,
198,
11748,
36446,
198,
198,
6404,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
628,
628,
628,
628,
... | 3.581395 | 43 |
from time import time
# Implementation using a decorator
# to store least recently used cache
from functools import lru_cache
@lru_cache(maxsize=1000)
start = time()
(fibonacci_with_decorator(500))
end = time()
runtime = end - start
# runtime=0.0009982585906982422
# explicit implementation of fibonacci function
# here a dict caches/stored the already calculated values of the fibonacci function
# and uses them in the next calculation that would require the fibonacci of a stored number
fibonacci_cached = {}
fibonacci(900)
# basic implementation
num1, num2 = 0, 1
for _ in range(0, 10):
print(num1)
num1, num2 = num2, num1 + num2
# implementation using generator
for number in fibonacci(10):
print(number)
| [
6738,
640,
1330,
640,
198,
2,
46333,
1262,
257,
11705,
1352,
198,
2,
284,
3650,
1551,
2904,
973,
12940,
198,
198,
6738,
1257,
310,
10141,
1330,
300,
622,
62,
23870,
628,
198,
31,
75,
622,
62,
23870,
7,
9806,
7857,
28,
12825,
8,
62... | 3.10084 | 238 |
import json
import os
import uuid
import pytest
from prefect.storage import Local
from viadot.flows import SupermetricsToAzureSQLv3
CWD = os.getcwd()
STORAGE = Local(path=CWD)
@pytest.fixture(scope="session")
uuid_4 = uuid.uuid4()
file_name = f"test_file_{uuid_4}.csv"
adls_path = f"raw/supermetrics/{file_name}"
| [
11748,
33918,
198,
11748,
28686,
198,
11748,
334,
27112,
198,
198,
11748,
12972,
9288,
198,
6738,
662,
2309,
13,
35350,
1330,
10714,
198,
198,
6738,
25357,
324,
313,
13,
44041,
1330,
3115,
4164,
10466,
2514,
26903,
495,
17861,
85,
18,
1... | 2.507813 | 128 |
__author__ = "Stephen W. O'Neill Jr. soneill5045@gmail.com"
from .calculate_smoothness import calculate_smoothness | [
198,
198,
834,
9800,
834,
796,
366,
24920,
370,
13,
440,
6,
26538,
7504,
13,
264,
505,
359,
1120,
2231,
31,
14816,
13,
785,
1,
198,
198,
6738,
764,
9948,
3129,
378,
62,
5796,
5226,
1108,
1330,
15284,
62,
5796,
5226,
1108
] | 2.785714 | 42 |
#import json
import numpy as np
import os
try:
import ujson as json
except ImportError:
try:
import simplejson as json
except ImportError:
import json
class NumpyEncoder(json.JSONEncoder):
"""
Special json encoder for numpy types
Original code from: https://github.com/mpld3/mpld3/issues/434#issuecomment-340255689
Then modified by: https://stackoverflow.com/a/49677241
"""
def write_to_json(dic, target_dir):
"""
A function that saves JSON files.
"""
dumped = json.dumps(dic, cls=NumpyEncoder)
file = open(target_dir, 'w')
json.dump(dumped, file)
file.close()
def read_from_json(target_dir, use_dumps=False):
"""
A function that reads JSON files.
"""
f = open(target_dir, 'r')
data = json.load(f)
if use_dumps is True:
data = json.dumps(data)
data = json.loads(data)
f.close()
return data
| [
2,
11748,
33918,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
628,
198,
28311,
25,
198,
220,
220,
220,
1330,
334,
17752,
355,
33918,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
1949,
25,
198,
220,
220,
220,
220,
220,
... | 2.454787 | 376 |
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import create_engine, MetaData
from ..databuddy import SqlaQueryBuilder
from flask import current_app
from toolspy import merge, fetch_nested_key_from_dict
| [
6738,
44161,
282,
26599,
13,
2302,
13,
2306,
296,
499,
1330,
3557,
499,
62,
8692,
198,
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
11,
30277,
6601,
198,
6738,
11485,
19608,
397,
21584,
1330,
311,
80,
5031,
20746,
32875,
198,
6738,
... | 3.358209 | 67 |
from django.conf import settings
# define the minimal weight of a tag in the tagcloud
TAGCLOUD_MIN = getattr(settings, 'TAGGIT_TAGCLOUD_MIN', 1.0)
# define the maximum weight of a tag in the tagcloud
TAGCLOUD_MAX = getattr(settings, 'TAGGIT_TAGCLOUD_MAX', 6.0) | [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
198,
2,
8160,
262,
10926,
3463,
286,
257,
7621,
287,
262,
7621,
17721,
198,
42197,
5097,
2606,
35,
62,
23678,
796,
651,
35226,
7,
33692,
11,
705,
42197,
38,
2043,
62,
42197,
5097,
2606,
... | 2.778947 | 95 |
#!/usr/bin/env python
__author__ = 'Stephen P. Henrie'
from interface.services.coi.iobject_management_service import BaseObjectManagementService
from pyon.util.containers import is_basic_identifier
from pyon.core.exception import BadRequest, NotFound
from pyon.core.interfaces.interface_util import is_yaml_string_valid
class ObjectManagementService(BaseObjectManagementService):
"""
A service for defining and managing object types used as resource, messages, etc.
"""
def create_object_type(self, object_type=None):
""" Should receive an ObjectType object
"""
# Return Value
# ------------
# {object_type_id: ''}
#
if not is_basic_identifier(object_type.name):
raise BadRequest("Invalid object_type name: %s" % object_type.name)
if not is_yaml_string_valid(object_type.definition):
raise BadRequest("Invalid YAML definition")
object_type_id, version = self.clients.resource_registry.create(object_type)
return object_type_id
def update_object_type(self, object_type=None):
""" Should receive an ObjectType object
"""
# Return Value
# ------------
# {success: true}
#
if not is_basic_identifier(object_type.name):
raise BadRequest("Invalid object_type name: %s" % object_type.name)
if not is_yaml_string_valid(object_type.definition):
raise BadRequest("Invalid YAML definition")
object_id, version = self.clients.resource_registry.update(object_type)
return object_id
def read_object_type(self, object_type_id=''):
""" Should return an ObjectType object
"""
# Return Value
# ------------
# object_type: {}
#
if not object_type_id:
raise BadRequest("The resource_type_id parameter is missing")
return self.clients.resource_registry.read(object_type_id)
def delete_object_type(self, object_type_id=''):
"""method docstring
"""
# Return Value
# ------------
# {success: true}
#
if not object_type_id:
raise BadRequest("The object_type_id parameter is missing")
return self.clients.resource_registry.delete(object_type_id)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
834,
9800,
834,
796,
705,
24920,
350,
13,
6752,
5034,
6,
628,
198,
6738,
7071,
13,
30416,
13,
1073,
72,
13,
72,
15252,
62,
27604,
62,
15271,
1330,
7308,
10267,
48032,
16177,
198... | 2.491435 | 934 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
4818,
8079,
198,
6738,
5366,
13,
9945,
1330,
20613,
198,
6738,
5366,
13,
85,
17,
1330,
10011,
2611,
44,
4254,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198
] | 2.954545 | 44 |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Brpc(CMakePackage):
"""An industrial-grade RPC framework used throughout Baidu, with
1,000,000+ instances(not counting clients) and thousands kinds of
services, called "baidu-rpc" inside Baidu. Only C++ implementatioo
on is opensourced right now."""
homepage = "https://github.com/apache/incubator-brpc"
url = "https://github.com/apache/incubator-brpc/archive/0.9.7.tar.gz"
version('0.9.7', sha256='722cd342baf3b05189ca78ecf6c56ea6ffec22e62fc2938335e4e5bab545a49c')
version('0.9.6', sha256='b872ca844999e0ba768acd823b409761f126590fb34cb0183da915a595161446')
version('0.9.5', sha256='11ca8942242a4c542c11345b7463a4aea33a11ca33e91d9a2f64f126df8c70e9')
depends_on('gflags')
depends_on('protobuf')
depends_on('leveldb')
depends_on('openssl')
patch('narrow.patch', sha256='d7393029443853ddda6c09e3d2185ac2f60920a36a8b685eb83b6b80c1535539', when='@:0.9.7')
| [
2,
15069,
2211,
12,
1238,
1828,
13914,
45036,
3549,
2351,
4765,
11,
11419,
290,
584,
198,
2,
1338,
441,
4935,
34152,
13,
4091,
262,
1353,
12,
5715,
27975,
38162,
9947,
2393,
329,
3307,
13,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
... | 2.344969 | 487 |
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" A logging filter to prepend request scoped formatting to all logging
records that use this filter. Request-based values will cause the log
records to have correlation values that can be used to better trace
logs. If no handler that supports a request scope is present, does not attempt
to change the logs in any way
Threads initiated using threading. Thread can be correlated to the request
they came from by setting a kwarg of log_extra, containing a dictionary
of values matching the VALID_ADDL_FIELDS below and any fields that are set
as additional_fields by the setup_logging function. This mechanism assumes
that the thread will maintain the correlation values for the life
of the thread.
"""
import logging
# Import uwsgi to determine if it has been provided to the application.
# Only import the UwsgiLogFilter if uwsgi is present
try:
import uwsgi
from shipyard_airflow.control.logging.uwsgi_filter import UwsgiLogFilter
except ImportError:
uwsgi = None
# BASE_ADDL_FIELDS are fields that will be included in the request based
# logging - these fields need not be set up independently as opposed to the
# additional_fields parameter used below, which allows for more fields beyond
# this default set.
BASE_ADDL_FIELDS = ['req_id', 'external_ctx', 'user', 'user_id']
LOG = logging.getLogger(__name__)
def set_logvar(key, value):
""" Attempts to set the logvar in the request scope, or ignores it
if not running in an environment that supports it.
"""
if value:
if uwsgi:
uwsgi.set_logvar(key, value)
# If there were a different handler than uwsgi, here is where we'd
# need to set the logvar value for its use.
def assign_request_filter(handler, additional_fields=None):
"""Adds the request-scoped filter log filter to the passed handler
:param handler: a logging handler, e.g. a ConsoleHandler
:param additional_fields: fields that will be included in the logging
records (if a matching logging format is used)
"""
handler_cls = handler.__class__.__name__
if uwsgi:
if additional_fields is None:
additional_fields = []
addl_fields = [*BASE_ADDL_FIELDS, *additional_fields]
handler.addFilter(UwsgiLogFilter(uwsgi, addl_fields))
LOG.info("UWSGI present, added UWSGI log filter to handler %s",
handler_cls)
# if there are other handlers that would allow for request scoped logging
# to be set up, we could include those options here.
else:
LOG.info("No request based logging filter in the current environment. "
"No log filter added to handler %s", handler_cls)
| [
2,
15069,
2177,
5161,
5,
51,
42443,
14161,
13,
220,
1439,
584,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,... | 3.267857 | 1,008 |
#
# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import json
import sys
# We're going to explicitly use a local installation of Pyserini (as opposed to a pip-installed one).
# Comment these lines out to use a pip-installed one instead.
sys.path.insert(0, './')
from pyserini.search.lucene import LuceneSearcher
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--qrels', type=str, help='qrels file', required=True)
parser.add_argument('--index', type=str, help='index location', required=True)
args = parser.parse_args()
searcher = LuceneSearcher(args.index)
with open(args.qrels, 'r') as reader:
for line in reader.readlines():
arr = line.split('\t')
doc = json.loads(searcher.doc(arr[2]).raw())['contents']
print(f'{arr[2]}\t{doc}')
| [
2,
198,
2,
350,
893,
263,
5362,
25,
36551,
37369,
14826,
2267,
351,
29877,
290,
15715,
24612,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
23... | 3.062099 | 467 |
"""Code snippets vol-45-snippet-223
Subprocess return code.
stevepython.wordpress.com
Download all snippets so far:
https://wp.me/Pa5TU8-1yg
"""
import subprocess
# Change 'notepade.exe' to some other executable if you are no using Windows.
result = subprocess.run(['notepad.exe'], stdout=subprocess.DEVNULL)
print(result.returncode)
if not result.returncode:
print('Returned from process without error')
# 0 is returned to indicate no error, else error.
| [
37811,
10669,
45114,
2322,
12,
2231,
12,
16184,
3974,
316,
12,
22047,
198,
7004,
14681,
1441,
2438,
13,
198,
198,
4169,
303,
29412,
13,
40346,
13,
785,
198,
10002,
477,
45114,
523,
1290,
25,
198,
5450,
1378,
24142,
13,
1326,
14,
28875... | 3.14094 | 149 |
import winograd.o6x6k3x3
import block8x8
from common import _MM_SHUFFLE
for post_operation in ["store", "stream"]:
arg_d_pointer = Argument(ptr(const_float_), name="d_pointer")
arg_wd_pointer = Argument(ptr(float_), name="wd_pointer")
arg_d_stride = Argument(size_t, name="d_stride")
arg_wd_stride = Argument(size_t, name="wd_stride")
arg_row_count = Argument(uint32_t, name="row_count")
arg_column_count = Argument(uint32_t, name="column_count")
arg_row_offset = Argument(uint32_t, name="row_offset")
arg_column_offset = Argument(uint32_t, name="column_offset")
with Function("nnp_iwt8x8_3x3_and_{post_operation}__avx2".format(post_operation=post_operation),
(arg_d_pointer, arg_wd_pointer, arg_d_stride, arg_wd_stride, arg_row_count, arg_column_count, arg_row_offset, arg_column_offset),
target=uarch.default + isa.fma3 + isa.avx2):
reg_d = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_d, arg_d_pointer)
reg_wd = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_wd, arg_wd_pointer)
reg_stride_d = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_stride_d, arg_d_stride)
reg_stride_wd = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_stride_wd, arg_wd_stride)
reg_row_cnt = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_row_cnt, arg_row_count)
reg_col_cnt = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_col_cnt, arg_column_count)
reg_row_off = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_row_off, arg_row_offset)
reg_col_off = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_col_off, arg_column_offset)
ymm_data = [YMMRegister() for _ in range(8)]
block8x8.load_with_padding(ymm_data, reg_d, reg_stride_d, reg_row_off, reg_row_cnt, reg_col_off, reg_col_cnt)
ymm_data = winograd.o6x6k3x3.input_transform(ymm_data)
winograd.o6x6k3x3.transpose8x8(ymm_data)
ymm_data = winograd.o6x6k3x3.input_transform(ymm_data)
VSTOREPS = {"store": VMOVAPS, "stream": VMOVNTPS}[post_operation]
for ymm_row in ymm_data:
VSTOREPS([reg_wd], ymm_row)
if ymm_row is not ymm_data[-1]:
ADD(reg_wd, reg_stride_wd)
RETURN()
for reverse_kernel in [False, True]:
for post_operation in ["store", "stream"]:
arg_g_pointer = Argument(ptr(const_float_), name="d_pointer")
arg_wg_pointer = Argument(ptr(float_), name="wd_pointer")
arg_g_stride = Argument(size_t, name="d_stride")
arg_wg_stride = Argument(size_t, name="wd_stride")
arg_row_count = Argument(uint32_t, name="row_count")
arg_column_count = Argument(uint32_t, name="column_count")
arg_row_offset = Argument(uint32_t, name="row_offset")
arg_column_offset = Argument(uint32_t, name="column_offset")
kwt_arguments = (arg_g_pointer, arg_wg_pointer, arg_g_stride, arg_wg_stride, arg_row_count, arg_column_count, arg_row_offset, arg_column_offset)
with Function("nnp_kwt8x8_3{reverse}x3{reverse}_and_{post_operation}__avx2".format(
reverse="R" if reverse_kernel else "", post_operation=post_operation),
kwt_arguments, target=uarch.default + isa.fma3 + isa.avx2):
reg_g = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_g, arg_g_pointer)
reg_wg = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_wg, arg_wg_pointer)
reg_stride_g = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_stride_g, arg_g_stride)
reg_stride_wg = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_stride_wg, arg_wg_stride)
# stride is in elements; multiply by sizeof(float) to get stride in bytes
SHL(reg_stride_g, 2)
xmm_load_mask = XMMRegister()
VMOVAPS(xmm_load_mask.as_ymm, Constant.float32x8(-0.0, -0.0, -0.0, +0.0, +0.0, +0.0, +0.0, +0.0))
xmm_g = [XMMRegister() for _ in range(3)]
for xmm in xmm_g:
VMASKMOVPS(xmm, xmm_load_mask, [reg_g])
if xmm is not xmm_g[-1]:
ADD(reg_g, reg_stride_g)
if reverse_kernel:
xmm_g = xmm_g[::-1]
ymm_wg_rows = winograd.o6x6k3x3.kernel_transform([xmm.as_ymm for xmm in xmm_g], rescale_coefficients=False)
ymm_g_rows = winograd.o6x6k3x3.transpose8x3([ymm.as_xmm for ymm in ymm_wg_rows])
if reverse_kernel:
ymm_g_rows = ymm_g_rows[::-1]
ymm_wg_rows = winograd.o6x6k3x3.kernel_transform(ymm_g_rows, rescale_coefficients=False)
rcp_9 = float.fromhex("0x1.C71C72p-4")
rcp_81 = float.fromhex("0x1.948B10p-7")
rcp_90 = float.fromhex("0x1.6C16C2p-7")
rcp_180 = float.fromhex("0x1.6C16C2p-8")
rcp_810 = float.fromhex("0x1.43A274p-10")
rcp_1620 = float.fromhex("0x1.43A274p-11")
rcp_8100 = float.fromhex("0x1.02E85Cp-13")
rcp_16200 = float.fromhex("0x1.02E85Cp-14")
rcp_32400 = float.fromhex("0x1.02E85Cp-15")
ymm_edge_scale = YMMRegister()
VMOVAPS(ymm_edge_scale, Constant.float32x8( 1.0, -2.0 * rcp_9, -2.0 * rcp_9, rcp_90, rcp_90, rcp_180, rcp_180, 1.0))
VMULPS(ymm_wg_rows[0], ymm_wg_rows[0], ymm_edge_scale)
VMULPS(ymm_wg_rows[7], ymm_wg_rows[7], ymm_edge_scale)
ymm_row12_scale = YMMRegister()
VMOVAPS(ymm_row12_scale, Constant.float32x8(-2.0 * rcp_9, 4.0 * rcp_81, 4.0 * rcp_81, -2.0 * rcp_810, -2.0 * rcp_810, -2.0 * rcp_1620, -2.0 * rcp_1620, -2.0 * rcp_9))
VMULPS(ymm_wg_rows[1], ymm_wg_rows[1], ymm_row12_scale)
VMULPS(ymm_wg_rows[2], ymm_wg_rows[2], ymm_row12_scale)
ymm_row34_scale = YMMRegister()
VMOVAPS(ymm_row34_scale, Constant.float32x8( rcp_90, -2.0 * rcp_810, -2.0 * rcp_810, rcp_8100, rcp_8100, rcp_16200, rcp_16200, rcp_90))
VMULPS(ymm_wg_rows[3], ymm_wg_rows[3], ymm_row34_scale)
VMULPS(ymm_wg_rows[4], ymm_wg_rows[4], ymm_row34_scale)
ymm_row56_scale = YMMRegister()
VMOVAPS(ymm_row56_scale, Constant.float32x8( rcp_180, -2.0 * rcp_1620, -2.0 * rcp_1620, rcp_16200, rcp_16200, rcp_32400, rcp_32400, rcp_180))
VMULPS(ymm_wg_rows[5], ymm_wg_rows[5], ymm_row56_scale)
VMULPS(ymm_wg_rows[6], ymm_wg_rows[6], ymm_row56_scale)
# Write output with stride
VSTOREPS = {"store": VMOVAPS, "stream": VMOVNTPS}[post_operation]
for ymm_wg_row in ymm_wg_rows:
VSTOREPS([reg_wg], ymm_wg_row)
if ymm_wg_row is not ymm_wg_rows[-1]:
ADD(reg_wg, reg_stride_wg)
RETURN()
arg_m_pointer = Argument(ptr(const_float_), name="m_pointer")
arg_s_pointer = Argument(ptr(float_), name="s_pointer")
arg_bias = Argument(ptr(const_float_), name="bias_pointer")
arg_m_stride = Argument(size_t, name="m_stride")
arg_s_stride = Argument(size_t, name="s_stride")
arg_row_count = Argument(uint32_t, name="row_count")
arg_column_count = Argument(uint32_t, name="column_count")
arg_row_offset = Argument(uint32_t, name="row_offset")
arg_column_offset = Argument(uint32_t, name="column_offset")
for with_bias in [False, True]:
for with_relu in [False, True]:
if with_bias:
owt8x8_arguments = (arg_m_pointer, arg_s_pointer, arg_bias, arg_m_stride, arg_s_stride, arg_row_count, arg_column_count)
else:
owt8x8_arguments = (arg_m_pointer, arg_s_pointer, arg_m_stride, arg_s_stride, arg_row_count, arg_column_count, arg_row_offset, arg_column_offset)
with Function("nnp_owt8x8_3x3{with_bias}{with_relu}__avx2".format(with_bias="_with_bias" if with_bias else "", with_relu="_with_relu" if with_relu else ""),
owt8x8_arguments, target=uarch.default + isa.fma3 + isa.avx2):
reg_m = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_m, arg_m_pointer)
reg_s = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_s, arg_s_pointer)
if with_bias:
reg_bias = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_bias, arg_bias)
xmm_bias = XMMRegister()
VINSERTPS(xmm_bias, xmm_bias, [reg_bias], 0b1101 | 1<<4)
reg_m_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_m_stride, arg_m_stride)
reg_s_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_s_stride, arg_s_stride)
reg_row_count = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_row_count, arg_row_count)
reg_column_count = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_column_count, arg_column_count)
ymm_m = [YMMRegister() for _ in range(8)]
for ymm in ymm_m:
if with_bias and ymm is ymm_m[1]:
VADDPS(ymm, xmm_bias.as_ymm, [reg_m])
else:
VMOVAPS(ymm, [reg_m])
if ymm is not ymm_m[-1]:
ADD(reg_m, reg_m_stride)
ymm_t = winograd.o6x6k3x3.output_transform(ymm_m)
ymm_tt = winograd.o6x6k3x3.transpose6x8(ymm_t)
ymm_s = winograd.o6x6k3x3.output_transform(ymm_tt)
block8x8.store_packed(ymm_s, reg_s, reg_s_stride, reg_row_count, reg_column_count, None, None, with_relu)
RETURN()
| [
11748,
1592,
519,
6335,
13,
78,
21,
87,
21,
74,
18,
87,
18,
198,
11748,
2512,
23,
87,
23,
198,
6738,
2219,
1330,
4808,
12038,
62,
9693,
47588,
2538,
628,
198,
1640,
1281,
62,
27184,
287,
14631,
8095,
1600,
366,
5532,
1,
5974,
198,... | 1.867928 | 5,179 |
#!/usr/bin/env python
# instance_editor_selection.py -- Example of an instance editor
# with instance selection
from traits.api \
import HasStrictTraits, Int, Instance, List, Regex, Str
from traitsui.api \
import View, Item, InstanceEditor
people = [
Person( name = 'Dave', age = 39, phone = '555-1212' ),
Person( name = 'Mike', age = 28, phone = '555-3526' ),
Person( name = 'Joe', age = 34, phone = '555-6943' ),
Person( name = 'Tom', age = 22, phone = '555-7586' ),
Person( name = 'Dick', age = 63, phone = '555-3895' ),
Person( name = 'Harry', age = 46, phone = '555-3285' ),
Person( name = 'Sally', age = 43, phone = '555-8797' ),
Person( name = 'Fields', age = 31, phone = '555-3547' )
]
if __name__ == '__main__':
Team( name = 'Vultures',
captain = people[0],
roster = people ).configure_traits()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
220,
198,
198,
2,
4554,
62,
35352,
62,
49283,
13,
9078,
1377,
17934,
286,
281,
4554,
5464,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.407407 | 378 |
""" Basic mixin for MongoEngine backed data models """
import logging
from typing import Dict, List, Union, no_type_check
import mongoengine as me
from util.deco import classproperty
logger = logging.getLogger(__name__)
| [
37811,
14392,
5022,
259,
329,
42591,
13798,
9763,
1366,
4981,
37227,
198,
198,
11748,
18931,
198,
6738,
19720,
1330,
360,
713,
11,
7343,
11,
4479,
11,
645,
62,
4906,
62,
9122,
198,
198,
11748,
285,
25162,
18392,
355,
502,
198,
198,
67... | 3.461538 | 65 |
#!/usr/bin/env python
import logging
import flask
import RPi.GPIO as GPIO
PIN_GREEN = 16
PIN_BLUE = 20
PIN_RED = 21
pwms = None
if __name__ == '__main__':
import IPython
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('color', help='Color in RGB (0-1) or (0-255). E.g. 255 255 0')
args = parser.parse_args()
GPIO.setmode(GPIO.BCM)
setup()
color(*[float(i) for i in args.color.split(' ')])
IPython.embed()
GPIO.cleanup()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
18931,
198,
198,
11748,
42903,
198,
11748,
25812,
72,
13,
16960,
9399,
355,
50143,
628,
198,
44032,
62,
43016,
796,
1467,
198,
44032,
62,
9148,
8924,
796,
1160,
198,
44032,
62,
... | 2.438424 | 203 |
import torch
from torch.nn import functional as fn
def fixed_padding(inputs: torch.Tensor, kernel_size: int, dilation: int) -> torch.Tensor:
"""Computes the effective kernel size given the dilation (3x3 with dilation 2 is in fact a 5x5),
then computes the start and end padding (e.g. for a 5x5: |xx|c|xx| => padding=2 left and right required),
lastly it expands the input by the computed amount in all directions.
:param inputs: input tensor not yet padded
:type inputs: torch.Tensor
:param kernel_size: nominal kernel size, not accounting for dilations
:type kernel_size: int
:param dilation: kernel dilation
:type dilation: int
:return: padded input, with the same shape, except height and width
:rtype: torch.Tensor
"""
kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = fn.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))
return padded_inputs
| [
11748,
28034,
198,
6738,
28034,
13,
20471,
1330,
10345,
355,
24714,
628,
198,
4299,
5969,
62,
39231,
7,
15414,
82,
25,
28034,
13,
51,
22854,
11,
9720,
62,
7857,
25,
493,
11,
288,
10520,
25,
493,
8,
4613,
28034,
13,
51,
22854,
25,
... | 2.903581 | 363 |
# Copyright 2019 The TensorNetwork Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensornetwork
import pytest
import numpy as np
import tensorflow as tf
import torch
import jax
from jax.config import config
import tensornetwork.config as config_file
config.update("jax_enable_x64", True)
tf.compat.v1.enable_v2_behavior()
np_dtypes = [np.float32, np.float64, np.complex64, np.complex128, np.int32]
tf_dtypes = [tf.float32, tf.float64, tf.complex64, tf.complex128, tf.int32]
torch_dtypes = [torch.float32, torch.float64, torch.int32, torch.int64]
jax_dtypes = [
jax.numpy.float32,
jax.numpy.float64,
jax.numpy.complex64,
jax.numpy.complex128,
jax.numpy.int32
]
@pytest.mark.parametrize("backend, dtype", [
*list(zip(['numpy'] * len(np_dtypes), np_dtypes)),
*list(zip(['tensorflow'] * len(tf_dtypes), tf_dtypes)),
*list(zip(['pytorch'] * len(torch_dtypes), torch_dtypes)),
*list(zip(['jax'] * len(jax_dtypes), jax_dtypes)),
])
| [
2,
15069,
13130,
383,
309,
22854,
26245,
46665,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
... | 2.795262 | 591 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import click
from pyparallel import Downloader
@click.command()
@click.option('--url', required=True, help='Download URL')
@click.option('--conns', required=True, help='Number of parts')
@click.option('--filename', required=True, help='Output filename')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
3904,
198,
198,
6738,
12972,
1845,
29363,
1330,
10472,
263,
628,
198,
31,
12976,
13,
21812,
3419,
198,
31,
12976... | 3.091837 | 98 |
# Python3
# Definition for singly-linked list:
# class ListNode(object):
# def __init__(self, x):
# self.value = x
# self.next = None
#
| [
2,
11361,
18,
198,
198,
2,
30396,
329,
1702,
306,
12,
25614,
1351,
25,
198,
2,
1398,
7343,
19667,
7,
15252,
2599,
198,
2,
220,
220,
825,
11593,
15003,
834,
7,
944,
11,
2124,
2599,
198,
2,
220,
220,
220,
220,
2116,
13,
8367,
796,... | 2.45 | 60 |
import yaml
with open("analysis_config.yml", mode="r") as fptr:
analysis_config = yaml.safe_load(fptr)
# Read in the variable dictionary
variable_dict = analysis_config["timeseries"]
# Grab a list of variables
variable_list = list(variable_dict.keys())
# Add the vertical transformations to the list of variables
all_variables = variable_dict
all_variables.update(analysis_config["coordinate_transformations"])
# Determine which variables need to converted
# TODO rename - unit_conversion_var (include units somewhere in here)
variables_to_convert = {}
for variable in all_variables.keys():
if all_variables[variable]["units_out"]:
variables_to_convert.update({variable: all_variables[variable]})
| [
11748,
331,
43695,
198,
198,
4480,
1280,
7203,
20930,
62,
11250,
13,
88,
4029,
1600,
4235,
2625,
81,
4943,
355,
277,
20692,
25,
198,
220,
220,
220,
3781,
62,
11250,
796,
331,
43695,
13,
21230,
62,
2220,
7,
69,
20692,
8,
198,
198,
... | 3.293578 | 218 |
import logging
_DEBUG = True
if _DEBUG:
logging.basicConfig(level=logging.DEBUG,
format='%(levelname)s > %(asctime)s > %(message)s [in %(filename)s at %(lineno)d]',
datefmt='%Y-%m-%d %H:%M:%S')
else:
logging.basicConfig(level=logging.INFO,
format='%(levelname)s > %(asctime)s > %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
| [
11748,
18931,
198,
198,
62,
30531,
796,
6407,
198,
198,
361,
4808,
30531,
25,
198,
220,
220,
220,
18931,
13,
35487,
16934,
7,
5715,
28,
6404,
2667,
13,
30531,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
22... | 1.714859 | 249 |
"""
———————————————————————
Python3
LC20_validParentheses.py
———————————————————————
个人备注
2020-10-10
需要再次尝试
思路完全理解,利用进出栈及字典思路解题,代码还需熟悉
———————————————————————
题目出处
LC#20 有效的括号
简单
https://leetcode-cn.com/problems/valid-parentheses/
NC#52 括号序列
简单 栈 字符串
https://www.nowcoder.com/practice/37548e94a270412c8b9fb85643c8ccc2
———————————————————————
题目描述
给定一个只包括 '(',')','{','}','[',']' 的字符串,判断字符串是否有效。
有效字符串需满足:
1.左括号必须用相同类型的右括号闭合。
2.左括号必须以正确的顺序闭合。
注意空字符串可被认为是有效字符串。
———————————————————————
"""
testCases = ['{[[[(]])]}', '{([(())])}', '', ')(', '([{}']
class Solution:
"""
:type string: str
:rtype: bool
"""
if __name__ == '__main__':
main()
| [
37811,
198,
30542,
8184,
4500,
960,
198,
37906,
18,
220,
198,
5639,
1238,
62,
12102,
24546,
39815,
13,
9078,
198,
30542,
8184,
4500,
960,
198,
10310,
103,
21689,
13783,
229,
37345,
101,
198,
42334,
12,
940,
12,
940,
198,
165,
250,
222... | 1.192982 | 570 |
import json
from schema import And, Schema, Use
# This is what you would see if you uses boto3 client.receive_messages()
# This is what you would see as the input event for a lambda triggered by sqs
# This is what you would see as the input event for a lambda triggered by sqs
# via eventbridge
| [
11748,
33918,
198,
198,
6738,
32815,
1330,
843,
11,
10011,
2611,
11,
5765,
628,
198,
2,
770,
318,
644,
345,
561,
766,
611,
345,
3544,
275,
2069,
18,
5456,
13,
260,
15164,
62,
37348,
1095,
3419,
628,
198,
2,
770,
318,
644,
345,
561... | 3.682927 | 82 |
import os
import numpy as np
import torch
from torch.utils.data import Dataset
| [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
28034,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
16092,
292,
316,
628
] | 3.375 | 24 |
import json
from decimal import Decimal
from django.conf import settings
from django.core.urlresolvers import reverse
import commonware.log
from rest_framework import response, serializers
from tower import ungettext as ngettext
import mkt
from drf_compound_fields.fields import ListField
from mkt.api.fields import (ESTranslationSerializerField, LargeTextField,
ReverseChoiceField, SemiSerializerMethodField,
TranslationSerializerField)
from mkt.constants.applications import DEVICE_TYPES
from mkt.constants.categories import CATEGORY_CHOICES
from mkt.constants.features import FeatureProfile
from mkt.constants.payments import PROVIDER_BANGO
from mkt.prices.models import AddonPremium, Price
from mkt.search.serializers import BaseESSerializer, es_to_datetime
from mkt.site.helpers import absolutify
from mkt.submit.forms import mark_for_rereview
from mkt.submit.serializers import PreviewSerializer, SimplePreviewSerializer
from mkt.tags.models import attach_tags
from mkt.translations.utils import no_translation
from mkt.versions.models import Version
from mkt.webapps.models import (AddonUpsell, AppFeatures, Geodata, Preview,
Webapp)
from mkt.webapps.utils import dehydrate_content_rating
log = commonware.log.getLogger('z.api')
class ESAppFeedSerializer(BaseESAppFeedSerializer):
"""
App serializer targetted towards the Feed, Fireplace's homepage.
Specifically for Feed Apps/Brands that feature the whole app tile and an
install button rather than just an icon.
"""
class ESAppFeedCollectionSerializer(BaseESAppFeedSerializer):
"""
App serializer targetted towards the Feed, Fireplace's homepage.
Specifically for Feed Apps, Collections, Shelves that only need app icons.
"""
class SimpleAppSerializer(AppSerializer):
"""
App serializer with fewer fields (and fewer db queries as a result).
Used as a base for FireplaceAppSerializer and CollectionAppSerializer.
"""
previews = SimplePreviewSerializer(many=True, required=False,
source='all_previews')
class RocketbarESAppSerializer(serializers.Serializer):
"""Used by Firefox OS's Rocketbar apps viewer."""
name = ESTranslationSerializerField()
@property
class RocketbarESAppSerializerV2(AppSerializer, RocketbarESAppSerializer):
"""
Replaced `icon` key with `icons` for various pixel sizes: 128, 64, 48, 32.
"""
| [
11748,
33918,
198,
6738,
32465,
1330,
4280,
4402,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
7295,
13,
6371,
411,
349,
690,
1330,
9575,
198,
198,
11748,
2219,
1574,
13,
6404,
198,
6738,
1334,
62,
... | 3.033939 | 825 |
from drf_yasg.utils import swagger_auto_schema
from core.docs import dict_response_schema, param, list_response_schema
shipping_items_response = {
"uuid": "45563485-ca00-43e7-b573-7890fd4c3822",
"tracking_number": "20220309-0003066198",
"sku": "1234",
"status": "CREATED",
"shipping_batches_history": [
{
"alias": "20220309-CqaaCZx",
"completed": True,
"shipping_transport__uuid": "ef7d79b3-5267-4f02-acde-593844fb02a0",
"shipping_transport__distribution_center_code_source": "AFK",
"shipping_transport__distribution_center_code_destination": "S01"
},
{
"alias": "20220309-xWK",
"completed": False,
"shipping_transport__uuid": None,
"shipping_transport__distribution_center_code_source": None,
"shipping_transport__distribution_center_code_destination": None
}
],
"current_distribution_center_code": "S01",
"timestamp_created": "2022-03-01T22:03:00+09:00",
"timestamp_completed": None
}
shipping_batch_response = {
"uuid": "5dd9b051-9c42-417e-9287-71751b9442b2",
"alias": "20220309-xWK",
"completed": False,
"shipping_transport": None,
"timestamp_created": None,
"timestamp_completed": None
}
shipping_transport_response = {
"uuid": "ef7d79b3-5267-4f02-acde-593844fb02a0",
"completed": False,
"batch_count": 2,
"distribution_center_source": {
"uuid": "48574f05-a29e-4735-a817-839b228c3e82",
"center_code": "AFK",
"name": "AFK",
"staff_names": ""
},
"distribution_center_destination": None,
"driver": None,
"timestamp_created": "2022-03-09T22:20:15.418651+09:00",
"timestamp_departed": "2022-03-12T16:15:35.224593+09:00",
"timestamp_arrived": "2022-03-12T16:24:32.355557+09:00"
}
doc_transport_batches = swagger_auto_schema(
operation_id='transport_batches',
operation_description=
'Description: \n'
' - list batches in transport \n'
' - \n\n'
'Params: \n'
'- ordering: timestamp_created, timestamp_completed\n'
'- completed: 1, 0\n\n'
'Permission: '
' - IsStaff | HasAPIKey\n\n'
'Link: <link for extra information>',
operation_summary="list batches in transport",
manual_parameters=[
param('ordering', 'timestamp_created, timestamp_completed, -timestamp_created, -timestamp_completed'),
param('completed', '1, 0')
],
tags=["shipping"],
responses={
200: dict_response_schema(
list_response_schema(
shipping_batch_response
)
),
}
)
doc_batch_items = swagger_auto_schema(
operation_id='batch_items',
operation_description=
'Description: \n'
' - list shipping items in a batch \n'
' - \n\n'
'Params: \n'
'- status: \n'
'- ordering: timestamp_created, timestamp_completed\n\n'
'Permission: '
' - IsStaff | HasAPIKey\n\n'
'Link: <link for extra information>',
operation_summary="list shipping items in a batch \n",
manual_parameters=[
param('status', 'CREATED, MOVING, COMPLETED, DAMAGED, LOST'),
param('completed', '1, 0')
],
tags=["shipping"],
responses={
200: dict_response_schema(
list_response_schema(
shipping_items_response
)
)
}
) | [
6738,
1553,
69,
62,
88,
292,
70,
13,
26791,
1330,
1509,
7928,
62,
23736,
62,
15952,
2611,
198,
6738,
4755,
13,
31628,
1330,
8633,
62,
26209,
62,
15952,
2611,
11,
5772,
11,
1351,
62,
26209,
62,
15952,
2611,
628,
198,
1477,
4501,
62,
... | 2.156511 | 1,559 |
# Import the relevant libraries
# Data manipulation
import pandas as pd
import numpy as np
# Save files
import pickle
# Machine Learning models
from xgboost import XGBClassifier
from sklearn.experimental import enable_halving_search_cv
from sklearn.model_selection import HalvingGridSearchCV
# import
x_train = pd.read_csv('data/x_train.csv')
y_train = pd.read_csv('data/y_train.csv')
columns_selected = ['Gender',
'Age',
'Region_Code',
'Previously_Insured',
'Vehicle_Damage',
'Annual_Premium',
'Policy_Sales_Channel',
'age_class',
'Vehicle_Age_0',
'Vehicle_Age_1',
'Vehicle_Age_2']
x_train_fs = x_train[columns_selected]
# Model
xgb_tuning = XGBClassifier(random_state=42)
# Parameters
parameters = {
'n_estimators': [300, 400, 500],
'eta': [0.5],
'max_depth': [10],
'subsample': [0.9],
'colsample_bytree': [0.6]
}
# Fit
xgb_tuning = HalvingGridSearchCV(xgb_tuning,
parameters,
verbose=1, n_jobs=1, cv=3,
scoring='roc_auc')
xgb_tuning.fit(x_train_fs, y_train.values)
print(xgb_tuning.best_params_)
| [
2,
17267,
262,
5981,
12782,
198,
2,
6060,
17512,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
198,
2,
12793,
3696,
198,
11748,
2298,
293,
198,
198,
2,
10850,
18252,
4981,
198,
6738,
2124,
70,
39521,
... | 1.904011 | 698 |
# Push an element to stack
# Pop an element from stack 1 and push it to stack two.
if __name__ == '__main':
stack = Stack()
stack.push(3)
stack.push(1)
stack.push(6)
stack.push(10)
stack.pop()
print(stack.display())
| [
198,
220,
220,
220,
220,
198,
220,
220,
220,
1303,
23691,
281,
5002,
284,
8931,
628,
220,
220,
220,
1303,
8099,
281,
5002,
422,
8931,
352,
290,
4574,
340,
284,
8931,
734,
13,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
... | 2.304348 | 115 |
# Copyright 2017 Rodrigo Pinheiro Marques de Araujo <fenrrir@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
# THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import abc
import codecs
import six
@six.add_metaclass(abc.ABCMeta)
@six.add_metaclass(abc.ABCMeta)
| [
2,
15069,
2177,
46198,
13727,
258,
7058,
1526,
13281,
390,
30574,
84,
7639,
1279,
41037,
21062,
343,
31,
14816,
13,
785,
29,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
198,
2,
... | 3.638235 | 340 |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
import threading
from django.conf import settings
from django.core.management.base import BaseCommand
from django.test.signals import template_rendered
from django_nose.runner import NoseTestSuiteRunner, translate_option
from mako import runtime
from mako.template import Template
__all__ = ['HueTestRunner']
# Capturing the mako context is not thread safe, so we wrap rendering in a mutex.
_MAKO_LOCK = threading.RLock()
def _instrumented_test_render(self, *args, **data):
"""
An instrumented Template render method, providing a signal
that can be intercepted by the test system Client
"""
with _MAKO_LOCK:
if hasattr(self, 'original_callable_'):
self.original_callable_.append(self.callable_)
else:
self.original_callable_ = [self.callable_]
self.callable_ = mako_callable_
try:
response = runtime._render(self, self.original_callable_[-1], args, data)
finally:
self.callable_ = self.original_callable_.pop()
return response
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
49962,
284,
1012,
280,
1082,
64,
11,
3457,
13,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,
428,
670,
329,
3224,
1321,
198... | 3.372477 | 545 |
# -*- coding: utf-8 -*-
import os
os_env = os.environ
class ProdConfig(Config):
"""Production configuration. - aligned to openshift """
ENV = 'prod'
DEBUG = False
DEBUG_TB_ENABLED = False # Disable Debug toolbar
class DevConfig(Config):
"""Development configuration."""
ENV = 'dev'
DEBUG = True
# Put the db file in project root
#DB_NAME = 'dev.db'
#DB_PATH = os.path.join(Config.PROJECT_ROOT, DB_NAME)
#SQLALCHEMY_DATABASE_URI = 'sqlite:///{0}'.format(DB_PATH)
DEBUG_TB_ENABLED = True
ASSETS_DEBUG = True # Don't bundle/minify static assets
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
198,
198,
418,
62,
24330,
796,
28686,
13,
268,
2268,
628,
198,
4871,
1041,
67,
16934,
7,
16934,
2599,
198,
220,
220,
220,
37227,
35027,
8398,
13,
532,
22... | 2.492537 | 268 |
import aspose.slides as slides | [
11748,
355,
3455,
13,
6649,
1460,
355,
19392
] | 3.75 | 8 |
# -*- coding: utf-8 -*-
"""
:copyright: Copyright 2016-2020 Sphinx Confluence Builder Contributors (AUTHORS)
:license: BSD-2-Clause (LICENSE)
"""
from collections import namedtuple
from sphinxcontrib.confluencebuilder.translator import ConfluenceBaseTranslator
from tests.lib import prepareConfiguration
from tests.lib import prepareDirectories
from tests.lib import prepareSphinx
import os
import unittest
Reporter = namedtuple('Reporter', 'warning')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
25,
22163,
4766,
25,
15069,
1584,
12,
42334,
45368,
28413,
7326,
23079,
35869,
25767,
669,
357,
32,
24318,
20673,
8,
198,
25,
43085,
25,
347,
10305,
12,
17,
... | 3.413534 | 133 |
#pragma out
import dylink_portability
dylink_portability.run_unrestricted_repy_code("testchunklib_bigchunk_multisockets.py")
| [
2,
1050,
363,
2611,
503,
198,
11748,
288,
2645,
676,
62,
634,
1799,
198,
30360,
676,
62,
634,
1799,
13,
5143,
62,
403,
49343,
62,
7856,
88,
62,
8189,
7203,
9288,
354,
2954,
8019,
62,
14261,
354,
2954,
62,
16680,
271,
11603,
13,
90... | 2.717391 | 46 |
# -*- coding: utf-8 -*-
# Copyright (c) 2021 The Project GNOME Pomodoro Tracking Authors
import os
import argparse
import configparser
import sys
from datetime import datetime, timedelta
import logging
import gnome_pomodoro_tracking.utils as utils
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
357,
66,
8,
33448,
383,
4935,
40339,
25505,
375,
16522,
37169,
46665,
198,
11748,
28686,
198,
11748,
1822,
29572,
198,
11748,
4566,
48610,
198,
11748,
25064,
198... | 3.444444 | 72 |
import datetime
import pytest
import aiomysql.cursors
BOB = {'name': 'bob', 'age': 21,
'DOB': datetime.datetime(1990, 2, 6, 23, 4, 56)}
JIM = {'name': 'jim', 'age': 56,
'DOB': datetime.datetime(1955, 5, 9, 13, 12, 45)}
FRED = {'name': 'fred', 'age': 100,
'DOB': datetime.datetime(1911, 9, 12, 1, 1, 1)}
CURSOR_TYPE = aiomysql.cursors.DictCursor
@pytest.mark.run_loop
@pytest.mark.run_loop
@pytest.mark.run_loop
| [
11748,
4818,
8079,
198,
198,
11748,
12972,
9288,
198,
198,
11748,
257,
29005,
893,
13976,
13,
66,
1834,
669,
628,
198,
8202,
33,
796,
1391,
6,
3672,
10354,
705,
65,
672,
3256,
705,
496,
10354,
2310,
11,
198,
220,
220,
220,
220,
220,... | 2.050691 | 217 |
import pytest
import sqlalchemy as sa
from sqlalchemy_utils.observer import observes
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.mark.usefixtures('postgresql_dsn')
| [
11748,
12972,
9288,
198,
11748,
44161,
282,
26599,
355,
473,
198,
198,
6738,
44161,
282,
26599,
62,
26791,
13,
672,
15388,
1330,
34526,
628,
198,
31,
9078,
9288,
13,
69,
9602,
628,
198,
31,
9078,
9288,
13,
69,
9602,
628,
198,
31,
90... | 2.626374 | 91 |
#!c:\users\patrick\documents\github\ojomebeta\ojomebeta\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
2,
0,
66,
7479,
18417,
59,
29615,
59,
15390,
2886,
59,
12567,
59,
13210,
462,
31361,
59,
13210,
462,
31361,
59,
46521,
59,
29412,
13,
13499,
198,
6738,
42625,
14208,
13,
7295,
1330,
4542,
198,
198,
361,
11593,
3672,
834,
6624,
366,
... | 2.967213 | 61 |
# [LICENSE]
# Copyright (c) 2020, Alliance for Sustainable Energy.
# All rights reserved.
#
# Redistribution and use in source and binary forms,
# with or without modification, are permitted provided
# that the following conditions are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# 2. Redistributions in binary form must reproduce the
# above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or
# promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# [/LICENSE]
import pytest
@pytest.fixture(scope="session",autouse=True)
| [
2,
685,
43,
2149,
24290,
60,
198,
2,
15069,
357,
66,
8,
12131,
11,
10302,
329,
45276,
6682,
13,
198,
2,
1439,
2489,
10395,
13,
198,
2,
220,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
220,
198,
2,
351,
... | 3.366 | 500 |
import numpy as np
import argparse
from utils import data_utils
from utils import ml_utils
import pickle
if __name__ == "__main__":
main()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
1822,
29572,
198,
198,
6738,
3384,
4487,
1330,
1366,
62,
26791,
198,
6738,
3384,
4487,
1330,
25962,
62,
26791,
198,
11748,
2298,
293,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
... | 2.921569 | 51 |
# Generated by Django 3.0.7 on 2020-06-18 18:57
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
22,
319,
12131,
12,
3312,
12,
1507,
1248,
25,
3553,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
from pymol import cmd
from pymol import util
from luna.wrappers.cgo_arrow import cgo_arrow
from luna.util.exceptions import PymolSessionNotInitialized, IllegalArgumentError
from luna.util.default_values import PYMOL_INTERACTION_COLOR, INTERACTION_SHORT_NAMES
from luna.util.file import get_filename, get_file_format
from Bio.Data.SCOPData import protein_letters_3to1
import logging
logger = logging.getLogger()
NUCLEOPHILE_INTERS = ["Orthogonal multipolar", "Parallel multipolar", "Antiparallel multipolar", "Tilted multipolar", "Multipolar",
"Cation-nucleophile", "Unfavorable anion-nucleophile", "Unfavorable nucleophile-nucleophile"]
ELECTROPHILE_INTERS = ["Orthogonal multipolar", "Parallel multipolar", "Antiparallel multipolar", "Tilted multipolar", "Multipolar",
"Anion-electrophile", "Unfavorable cation-electrophile", "Unfavorable electrophile-electrophile"]
UNFAVORABLE_INTERS = ["Repulsive", "Unfavorable anion-nucleophile", "Unfavorable cation-electrophile",
"Unfavorable nucleophile-nucleophile", "Unfavorable electrophile-electrophile"]
class PymolWrapper:
"""This class provides functions to provide easy access to common functions from Pymol."""
def get_cmd(self):
"""Expose the Pymol ``cmd`` object, so that one can call Pymol functions directly."""
return cmd
def load(self, input_file, obj_name=None):
"""Load a molecular file (e.g., PDB files).
Parameters
----------
input_file : str
The pathname of the molecular file to be loaded.
obj_name : str, optional
Pymol object to store the loaded structure.
If not provided, the filename will be used instead.
"""
self.input_file = input_file
if not obj_name:
obj_name = get_filename(input_file)
cmd.load(input_file, obj_name)
def show(self, tuples):
"""Display atom and bond representations for certain selections.
Parameters
----------
tuples : iterable of tuple
Each tuple should contain a Pymol representation (e.g., 'sticks') and a selection (e.g., 'hetatm').
"""
for representation, selection in tuples:
cmd.show(representation, selection)
def hide(self, tuples):
"""Hide atom and bond representations for certain selections.
Parameters
----------
tuples : iterable of tuple
Each tuple should contain a Pymol representation (e.g., 'sticks') and a selection (e.g., 'hetatm').
"""
for representation, selection in tuples:
cmd.hide(representation, selection)
def hide_all(self):
"""Hide all representations."""
self.hide([('everything', '')])
def center(self, selection):
"""Translate the window, the clipping slab, and the origin to a point centered within the selection."""
cmd.center(selection)
def label(self, tuples):
"""Draw text labels for PyMOL objects.
Parameters
----------
tuples : iterable of tuple
Each tuple should contain a selection (e.g., 'hetatm') and some string to label the given selection.
"""
for selection, expression in tuples:
cmd.label(selection, expression)
def add_pseudoatom(self, name, opts=None):
"""Create a molecular object with a pseudoatom or add a pseudoatom to a molecular object
if the specified object already exists.
Parameters
----------
name : str
The object name to create or modify.
opts : dict
A set of options to create the pseudoatom. Check `Pymol <https://pymolwiki.org/index.php/Pseudoatom>`_ to discover which options are available.
"""
opts = opts or {}
cmd.pseudoatom(name, **opts)
def select(self, selection, name="sele", enable=0):
"""Create a named selection from an atom selection.
Parameters
----------
selection : str
The expression to select atoms.
name : str
The selection name, which by default is 'sele'.
enable : {0, 1}
If ``1``, activate the selection, i.e., show selection indicators.
The default value is 0, which implies the selection indicators won't be shown.
"""
cmd.select(name, selection, enable=enable)
def get_names(self, obj_type):
"""Get names of objects, grouped objects, or selections.
Parameters
----------
obj_type : {'objects', 'selections', 'all', 'public_objects', 'public_selections', 'public_nongroup_objects', 'public_group_objects', 'nongroup_objects', 'group_objects'}
The target object type.
Returns
-------
: list of str
"""
return cmd.get_names(obj_type)
def sel_exists(self, name):
"""Check if a selection exists given by its name ``name``."""
return name in cmd.get_names("selections")
def obj_exists(self, name):
"""Check if an object exists given by its name ``name``."""
return name in cmd.get_names("objects")
def group_exists(self, name):
"""Check if a group of objects exists given by its name ``name``."""
return name in cmd.get_names("group_objects")
def get_coords(self, selection):
"""Get atomic coordinates for a given atom selection.
Parameters
----------
selection : str
The expression to select atoms.
Returns
-------
: array_like of float (size 3)
Atomic coordinates (x, y, z) of each atom selected.
"""
return cmd.get_coords(selection)
def distance(self, name, sel1, sel2):
"""Create a new distance object between two atoms given by their selection-expressions.
Parameters
----------
name : str
Name of the distance object to create.
sel1 : str
The expression to select the first atom.
sel2 : str
The expression to select the second atom.
"""
cmd.distance(name, sel1, sel2)
def arrow(self, name, atm_sel1, atm_sel2, opts=None):
"""Draw an arrow object between two atoms given by their selection-expressions.
Parameters
----------
name : str
Name of the arrow object to create.
sel1 : str
The expression to select the first atom.
sel2 : str
The expression to select the second atom.
opts : dict
A set of options to create the arrow.
Check `Pymol <https://pymolwiki.org/index.php/Cgo_arrow>`_ to discover which options are available.
"""
opts = opts or {}
cgo_arrow(atm_sel1, atm_sel2, name=name, **opts)
def save_png(self, output_file, width=1200, height=1200, dpi=100, ray=1):
"""Save the current Pymol session as a PNG format image file.
Parameters
----------
output_file : str
The output image pathname.
width : int
The width in pixels. The default value is 1,200.
height : int or str
The height in pixels. The default value is 1,200.
dpi : float
Dots-per-inch. The default value is 100.
ray : {0, 1}
If ``1`` (the default), run ray first to make high-resolution photos.
"""
cmd.png(output_file, width, height, dpi, ray)
def save_session(self, output_file):
"""Save the current PyMOL state to a PSE format file to later use.
Parameters
----------
output_file : str
The output pathname.
"""
if get_file_format(output_file) != 'pse':
output_file += '.pse'
cmd.save(output_file)
def color(self, tuples):
"""Color objects and atoms.
Parameters
----------
tuples : iterable of tuple
Each tuple should contain a color (e.g., 'red') and a selection (e.g., 'hetatm').
"""
for color, selection in tuples:
cmd.color(color, selection)
def color_by_element(self, selections, c_color="green"):
"""Color atoms by their default element color (e.g., oxygen in red).
Parameters
----------
selections : iterable of str
A sequence of selections to define which atoms will be colored by element.
c_color : {'green', 'cyan', 'light magenta', 'yellow', 'salmon', 'white', 'slate', 'bright orange', 'purple', 'pink'}
The carbon color. The default value is 'green'.
"""
valid_colors = ['green', 'cyan', 'light magenta', 'yellow', 'salmon', 'white',
'slate', 'bright orange', 'purple', 'pink']
if c_color.lower() not in valid_colors:
raise IllegalArgumentError("Invalid color '%s'. The accepted colors are: %s." % (c_color, ", ".join(valid_colors)))
c_color = c_color.lower()
for selection in selections:
if c_color == 'green':
util.cbag(selection)
elif c_color == 'cyan':
util.cbac(selection)
elif c_color == 'light magenta':
util.cbam(selection)
elif c_color == 'yellow':
util.cbay(selection)
elif c_color == 'salmon':
util.cbas(selection)
elif c_color == 'white':
util.cbaw(selection)
elif c_color == 'slate':
util.cbab(selection)
elif c_color == 'bright orange':
util.cbao(selection)
elif c_color == 'purple':
util.cbap(selection)
elif c_color == 'pink':
util.cbak(selection)
def group(self, name, members, action=None):
"""Create or update a group object.
Parameters
----------
name : str
The group name to create or update.
members : iterable of str
The objects to include in the group.
action : {'add', 'remove', 'open', 'close', 'toggle', 'auto', 'empty', 'purge', 'excise'}, optional
An action to take. If not provided, the default value 'auto' will be used instead.
The description of the actions are described below (source: Pymol documentation):
* add: add members to group.
* remove: remove members from group (members will be ungrouped).
* empty: remove all members from group.
* purge: remove all members from group and delete them.
* excise: remove all members from group and delete group.
* open: expand group display in object menu panel.
* close: collapse group display in object menu panel.
* toggle: toggle group display in object menu panel.
* auto: add or toggle.
"""
for member in members:
if action:
cmd.group(name, member, action)
else:
cmd.group(name, member)
def create(self, name, selection):
"""Create a new molecular object from a selection.
Note that the selected atoms won't be extracted from the original object.
Instead, a copy of them will be created in the new object.
Parameters
----------
name : str
The object name to be created.
selection : str
The expression to select atoms.
"""
cmd.create(name, selection)
def alter(self, selection, expression):
"""Modify atomic properties.
Parameters
----------
selection : str
The expression to select atoms.
expression : str
Expression in Python language to define which properties should be modified.
This can be used, for instance, to rename an atom or chain.
Examples
--------
Alter the name of a ligand carbon from 'C1' to 'CA'.
>>> pw_obj.alter("hetatm and name C1", "name='CA'")
Alter the chain A name to 'Z'.
>>> pw_obj.alter("chain A", "chain='Z'")
"""
cmd.alter(selection, expression)
def set(self, name, value, opts=None):
"""Modify global, object, object-state, or per-atom settings.
Parameters
----------
name : str
The setting name to modify.
value : str
The new setting value.
opts : dict
A set of options. Check `Pymol <https://pymolwiki.org/index.php/Pseudoatom>`_ to discover which options are available.
"""
opts = opts or {}
cmd.set(name, value, **opts)
def align(self, mobile, target, opts=None):
"""Align the structure ``mobile`` to the ``target`` structure.
Parameters
----------
mobile : str
The structure to be aligned given by an atomic selection.
target : str
The target structure given by an atomic selection.
opts : dict
Alignment options. Check `Pymol <https://pymolwiki.org/index.php/Align>`_ to discover which options are available.
"""
opts = opts or {}
cmd.align(mobile, target, **opts)
def delete(self, selections):
"""Delete the provided selections.
Parameters
----------
selections : iterable of str
A sequence of selections to be deleted.
Wildcards can be used to define object or selection names.
"""
for selection in selections:
cmd.delete(selection)
def remove(self, selections):
"""Remove a selection of atoms from models.
Parameters
----------
selections : iterable of str
A sequence of selections to define which atoms will be removed.
"""
for selection in selections:
cmd.remove(selection)
def extract(self, tuples):
"""Perform multiple extractions, i.e., extract atoms from an object to another object.
Parameters
----------
tuples : iterable of tuple
Each tuple should contain the object name to where atoms will be added
and the selection itself that defines which atoms will be extracted (e.g., 'hetatm').
"""
for name, selection in tuples:
cmd.extract(name, selection)
def load_mol_from_pdb_block(self, pdb_block, obj_name):
"""Load a molecular file from a PDB block string.
Parameters
----------
pdb_block :str
The PDB block string.
obj_name : str
Pymol object to store the loaded structure.
"""
cmd.read_pdbstr(pdb_block, obj_name, state=1)
def reinitialize(self):
"""Clear all objects and resets all parameters to default."""
cmd.reinitialize('everything')
self.input_file = None
def quit(self):
"""Terminate Pymol."""
cmd.quit()
def run(self, func_name, opts):
"""Run a Pymol command ``func_name`` with parameters ``opts``.
Parameters
----------
func_name : str
The Pymol command name.
opts : dict
Parameters to pass to the command.
"""
return getattr(cmd, func_name)(**opts)
def run_cmds(self, commands):
"""Run a set of Pymol commands.
Parameters
----------
commands : iterable of tuple
Each tuple should contain a Pymol command and its parameters. See :meth:`run` to more details.
"""
for func_name, opts in commands:
getattr(cmd, func_name)(**opts)
class PymolSessionManager:
"""Class to start, manage, and save Pymol sessions.
This class provides useful built-in functions to load PDB/Mol files and show interactions.
.. note::
This class is not intended to be used directly because :meth:`set_view` is not implemented by default.
Instead, you should use a class that inherits from `PymolSessionManager` and implements :meth:`set_view`.
An example is the class :class:`~luna.interaction.view.InteractionViewer` that implements a custom
:meth:`set_view` to show interactions.
Therefore, you should define your own logic beyond :meth:`set_view` to save a Pymol session that meets your goals.
Parameters
----------
show_cartoon : bool
If True, show the protein structure as cartoons.
bg_color : str
The background color. The default value is "white".
Check `Pymol <https://pymolwiki.org/index.php/Color_Values>`_ to discover which colors are available.
add_directional_arrows : bool
If True, show arrows for directional interactions (e.g., hydrogen bonds and multipolar interactions).
show_res_labels : bool
If True (the default), show residue labels.
inter_color : :class:`~luna.util.ColorPallete`
A Pymol-compatible color scheme for interactions.
The default value is :const:`~luna.util.default_values.PYMOL_INTERACTION_COLOR`.
pse_export_version : str
Define a legacy format for saving Pymol sessions (PSE files). The default value os '1.8'.
"""
def new_session(self, data, output_file):
"""Start a new session, which includes the following steps:
* Start a new Pymol session (:meth:`start_session`);
* Set the view (:meth:`set_view`);
* Save the Pymol session to ``output_file``;
* Finish the Pymol session.
Parameters
----------
data : iterable
Data to be processed by :meth:`set_view`.
output_file : str
The pathname to where the Pymol session will be saved.
"""
self.start_session()
self.set_view(data)
self.save_session(output_file)
self.finish_session()
def start_session(self):
"""Start a new session and set Pymol settings, including the
background color and the PSE export version.
"""
self.wrapper = PymolWrapper()
self.wrapper.set("pse_export_version", self.pse_export_version)
self.wrapper.set("transparency_mode", 3)
self.wrapper.set("group_auto_mode", 2)
self.wrapper.run_cmds([("bg_color", {"color": self.bg_color})])
self.wrapper.set("internal_gui_width", 370)
def set_view(self, data):
"""Set the session view. However, this method is not implemented by default.
Instead, you should use a class that inherits from `PymolSessionManager` and
implements :meth:`set_view`. An example is the class :class:`~luna.interaction.view.InteractionViewer`
that implements a custom :meth:`set_view` to show interactions.
Therefore, you should define your own logic beyond :meth:`set_view` to save a Pymol session that meets your goals.
Parameters
----------
data : iterable
The data that will be used to set the Pymol view.
"""
raise NotImplementedError("Use a class that implements this method.")
def load_pdb(self, pdb_file, pdb_obj, mol_block=None, is_ftmap_output=False):
"""Load molecules from PDB files to the current Pymol session.
Optionally, ligands can also be loaded from a separate molecular string block.
This is especially useful when working with docked molecules in which the
protein structure is in a PDB file and ligands are in a separate molecular file.
Parameters
----------
pdb_file : str
The pathname of the PDB file to be loaded.
pdb_obj : str
Pymol object to store the loaded structure.
mol_block : str, optional
A molecular string block to load together with the PDB file.
is_ftmap_output : bool
If the PDB file is an FTMap output.
If so, an additional processing step is performed to
standardize the loaded Pymol objects.
"""
prot_obj = "%s.prot" % pdb_obj
self.wrapper.load(pdb_file, prot_obj)
if is_ftmap_output:
objs = self.wrapper.get_names("objects")
to_merge = []
for obj in objs:
if obj == "protein":
to_merge.append(obj)
sel = "protein and not resn %s" % " and not resn ".join(protein_letters_3to1.keys())
self.wrapper.alter(sel, "type='HETATM'")
elif obj.endswith(".pdb"):
to_merge.append(obj)
self.wrapper.alter(obj, "type='HETATM'")
self.wrapper.create(prot_obj, " | ".join(to_merge))
self.wrapper.delete(objs)
self.wrapper.extract([("%s.hets" % pdb_obj, "hetatm and %s" % prot_obj)])
if mol_block is not None:
self.wrapper.load_mol_from_pdb_block(mol_block, "%s.hets" % pdb_obj)
self.wrapper.color_by_element([pdb_obj])
self.wrapper.hide([("everything", pdb_obj)])
if self.show_cartoon:
self.wrapper.show([("cartoon", pdb_obj)])
def set_interactions_view(self, interactions, main_grp, secondary_grp=None):
"""Display molecular interactions.
Parameters
----------
interactions : iterable of :class:`~luna.interaction.type.InteractionType`
A sequence of interactions to show.
main_grp : str
Main Pymol object to store atom groups.
secondary_grp : str, optional
Secondary Pymol object to store interactions. If not provided, ``main_grp`` will be used instead.
"""
residue_selections = set()
secondary_grp = secondary_grp or main_grp
for i, inter in enumerate(interactions):
#
# Centroid 1
#
obj1_name = "%s.centroids.%s" % (main_grp, hash(tuple(sorted(inter.src_interacting_atms))))
centroid_obj1 = inter.src_centroid
centroid_obj1_visible = True
# Define the centroid in a nucleophile with two atoms as the position of its more electronegative atom.
# Remember that the position in the interaction object matters. We have defined that the first group is always
# the nucleophile for both dipole-dipole and ion-dipole interactions.
if inter.type in NUCLEOPHILE_INTERS and len(inter.src_grp.atoms) == 2:
dipole_atm = inter.src_grp.atoms[0] if (inter.src_grp.atoms[0].electronegativity
> inter.src_grp.atoms[1].electronegativity) else inter.src_grp.atoms[1]
obj1_name += "_%s" % hash(dipole_atm.name)
centroid_obj1 = dipole_atm.coord
centroid_obj1_visible = False
# For unfavorable multipolar interactions, it may happen that the first atom group is an electrophile as well.
elif inter.type == "Unfavorable electrophile-electrophile" and len(inter.src_grp.atoms) == 2:
dipole_atm = inter.src_grp.atoms[0] if (inter.src_grp.atoms[0].electronegativity
< inter.src_grp.atoms[1].electronegativity) else inter.src_grp.atoms[1]
obj1_name += "_%s" % hash(dipole_atm.name)
centroid_obj1 = dipole_atm.coord
centroid_obj1_visible = False
#
# Centroid 2
#
obj2_name = "%s.centroids.%s" % (main_grp, hash(tuple(sorted(inter.trgt_interacting_atms))))
centroid_obj2 = inter.trgt_centroid
centroid_obj2_visible = True
# Define the centroid in an electrophile with two atoms as the position of its less electronegative atom.
# Remember that the position in the interaction object matters. We have defined that the second group is always
# the electrophile for both dipole-dipole and ion-dipole interactions.
if inter.type in ELECTROPHILE_INTERS and len(inter.trgt_grp.atoms) == 2:
dipole_atm = inter.trgt_grp.atoms[0] if (inter.trgt_grp.atoms[0].electronegativity
< inter.trgt_grp.atoms[1].electronegativity) else inter.trgt_grp.atoms[1]
obj2_name += "_%s" % hash(dipole_atm.name)
centroid_obj2 = dipole_atm.coord
centroid_obj2_visible = False
# For unfavorable multipolar interactions, it may happen that the second atom group is a nucleophile as well.
elif inter.type == "Unfavorable nucleophile-nucleophile" and len(inter.trgt_grp.atoms) == 2:
dipole_atm = inter.trgt_grp.atoms[0] if (inter.trgt_grp.atoms[0].electronegativity
> inter.trgt_grp.atoms[1].electronegativity) else inter.trgt_grp.atoms[1]
obj2_name += "_%s" % hash(dipole_atm.name)
centroid_obj2 = dipole_atm.coord
centroid_obj2_visible = False
# Add pseudoatoms
if not self.wrapper.obj_exists(obj1_name):
self.wrapper.add_pseudoatom(obj1_name, {"vdw": 1, "pos": list(centroid_obj1)})
if not self.wrapper.obj_exists(obj2_name):
self.wrapper.add_pseudoatom(obj2_name, {"vdw": 1, "pos": list(centroid_obj2)})
# Set the representation for each compound in the groups involved in the interaction.
for compound in inter.src_grp.compounds.union(inter.trgt_grp.compounds):
if compound.is_water():
comp_repr = "sphere"
elif compound.is_hetatm():
if len(compound.child_list) == 1 or len([atm for atm in compound.child_list if atm.element != "H"]) == 1:
comp_repr = "sphere"
else:
comp_repr = "sticks"
else:
comp_repr = "sticks"
comp_sel = "%s and %s" % (main_grp, mybio_to_pymol_selection(compound))
self.wrapper.show([(comp_repr, comp_sel)])
carb_color = "green" if compound.is_target() else "gray"
self.wrapper.color([(carb_color, comp_sel + " AND elem C")])
if compound.is_residue():
residue_selections.add(comp_sel)
# Show residue label if required.
if self.show_res_labels:
self.wrapper.label([("%s AND name CA" % comp_sel, '"%s-%s" % (resn, resi)')])
# Check if the interaction involves the same compound: intramolecular interactions.
inter_grp = "intra" if inter.is_intramol_interaction() else "inter"
src_grp_name = "+".join(["%s-%s-%d%s" % (c.parent.id, c.resname, c.id[1], c.id[2].strip())
for c in sorted(inter.src_grp.compounds)])
trgt_grp_name = "+".join(["%s-%s-%d%s" % (c.parent.id, c.resname, c.id[1], c.id[2].strip())
for c in sorted(inter.trgt_grp.compounds)])
inter_name = "%s.all_inters.%s.%s.i%d_%s_and_%s.line" % (secondary_grp, inter_grp, INTERACTION_SHORT_NAMES[inter.type],
i, src_grp_name, trgt_grp_name)
self.wrapper.distance(inter_name, obj1_name, obj2_name)
self.wrapper.hide([("label", inter_name)])
# Set styles to the interactions.
self.wrapper.color([(self.inter_color.get_color(inter.type), inter_name)])
if self.add_directional_arrows:
if inter.type in UNFAVORABLE_INTERS:
arrow_name1 = "%s.all_inters.%s.%s.inter%d.arrow1" % (secondary_grp, inter_grp, INTERACTION_SHORT_NAMES[inter.type], i)
arrow_name2 = "%s.all_inters.%s.%s.inter%d.arrow2" % (secondary_grp, inter_grp, INTERACTION_SHORT_NAMES[inter.type], i)
square_name = "%s.all_inters.%s.%s.inter%d.block" % (secondary_grp, inter_grp, INTERACTION_SHORT_NAMES[inter.type], i)
arrow_opts = {"radius": 0.03, "gap": 0.9, "hlength": 0.5, "hradius": 0.2,
"color": self.inter_color.get_color(inter.type)}
square_opts = {"radius": 0.3, "gap": 1.5, "hlength": 0, "hradius": 0,
"color": self.inter_color.get_color(inter.type)}
# Two arrows in different directions
self.wrapper.arrow(arrow_name1, obj1_name, obj2_name, arrow_opts)
if not inter.is_directional():
self.wrapper.arrow(arrow_name2, obj2_name, obj1_name, arrow_opts)
# Add a square-like object
self.wrapper.arrow(square_name, obj1_name, obj2_name, square_opts)
# Add arrows over the interaction lines to represent directional interactions
elif inter.is_directional():
arrow_name = "%s.all_inters.%s.%s.inter%d.arrow" % (secondary_grp, inter_grp, INTERACTION_SHORT_NAMES[inter.type], i)
arrow_opts = {"radius": 0.03, "gap": 0.9, "hlength": 0.5, "hradius": 0.2,
"color": self.inter_color.get_color(inter.type)}
self.wrapper.arrow(arrow_name, obj1_name, obj2_name, arrow_opts)
# If a group object contains more than one atom.
if inter.src_grp.size > 1 and centroid_obj1_visible:
# Add the centroids to the group "centroids" and append them to the main group
self._set_centroid_style(obj1_name)
# Otherwise, just remove the centroid as it will not add any new information (the atom represented
# by the centroid is already the atom itself).
else:
self.wrapper.delete([obj1_name])
# If a group object contains more than one atom.
if inter.trgt_grp.size > 1 and centroid_obj2_visible:
# Add the centroids to the group "centroids" and append them to the main group
self._set_centroid_style(obj2_name)
# Otherwise, just remove the centroid as it will not add any new information (the atom represented
# by the centroid is already been displayed).
else:
self.wrapper.delete([obj2_name])
return residue_selections
def set_last_details_to_view(self):
"""This method can be called to apply final modifications to the Pymol session.
In its default version, the following modifications are applied:
* Dash radius for interactions is set to 0.08;
* Labels are set to bold and their size is set to 20;
* Atomic spheres' scale is set to 0.3;
* Hydrogen atoms are hidden;
* The view is centered within the visible objects.
"""
self.wrapper.set("dash_radius", 0.08)
self.wrapper.set("label_font_id", "13")
self.wrapper.set("label_size", "20")
self.wrapper.set("sphere_scale", "0.3", {"selection": "visible and not name PS*"})
self.wrapper.hide([("everything", "elem H+D")])
self.wrapper.center("visible")
def save_session(self, output_file):
"""Save the Pymol session as a PSE file of name ``output_file``."""
if not isinstance(self.wrapper, PymolWrapper):
raise PymolSessionNotInitialized("No session was initialized.")
self.wrapper.save_session(output_file)
def finish_session(self):
"""Clear all objects and resets all parameters to default."""
if not isinstance(self.wrapper, PymolWrapper):
raise PymolSessionNotInitialized("No session was initialized.")
self.wrapper.reinitialize()
self.wrapper = None
def mybio_to_pymol_selection(entity):
"""Transform an :class:`~luna.MyBio.PDB.Entity.Entity` instance into a Pymol selection-expression,
which can then be used to select atoms in a Pymol session.
Parameters
----------
entity : :class:`~luna.MyBio.PDB.Entity.Entity`
An entity to be transformed into a Pymol selection-expression.
Returns
-------
: str
The Pymol selection-expression.
Examples
--------
First, let's parse a PDB file to work with.
>>> from luna.util.default_values import LUNA_PATH
>>> from luna.MyBio.PDB.PDBParser import PDBParser
>>> pdb_parser = PDBParser(PERMISSIVE=True, QUIET=True)
>>> structure = pdb_parser.get_structure("Protein", f"{LUNA_PATH}/tutorial/inputs/3QQK.pdb")
Now, let's get the Pymol selection-expression for the chain A.
>>> from luna.wrappers.pymol import mybio_to_pymol_selection
>>> print(mybio_to_pymol_selection(structure[0]['A']))
chain A
Finally, we can get the Pymol selection-expression for the ligand X02.
>>> from luna.wrappers.pymol import mybio_to_pymol_selection
>>> print(mybio_to_pymol_selection(structure[0]["A"][('H_X02', 497, ' ')]))
resn X02 AND res 497 AND chain A
"""
params = {}
if entity.level == 'S' or entity.level == 'M':
params[''] = 'all'
elif entity.level == 'C':
params['chain'] = entity.id
elif entity.level == 'R':
params['resn'] = entity.resname
params['res'] = str(entity.id[1]) + entity.id[2].strip()
params['chain'] = entity.get_parent().id
elif entity.level == 'A':
residue = entity.get_parent()
params['id'] = entity.serial_number
params['name'] = entity.name
params['resn'] = residue.resname
params['res'] = str(residue.id[1]) + residue.id[2].strip()
params['chain'] = residue.get_parent().id
else:
return {}
if "resn" in params:
# Escape characters that can generate problems with Pymol
params['resn'] = params['resn'].replace("+", "\\+")
params['resn'] = params['resn'].replace("-", "\\-")
return (' AND ').join(['%s %s' % (k, str(v)) for k, v in params.items()])
| [
6738,
279,
4948,
349,
1330,
23991,
198,
6738,
279,
4948,
349,
1330,
7736,
628,
198,
6738,
300,
9613,
13,
29988,
11799,
13,
66,
2188,
62,
6018,
1330,
269,
2188,
62,
6018,
198,
6738,
300,
9613,
13,
22602,
13,
1069,
11755,
1330,
350,
4... | 2.274031 | 15,137 |
import os
import sib_api_v3_sdk
from sib_api_v3_sdk.rest import ApiException
from uuid import UUID
from app.api.models.namespaces import NamespacePrimaryKeyTransferEmailInfo, NamespacePrimaryKeyEmailInfo, NamespaceSchemaProjectNameEmailInfo
from app.api.models.projects import ProjectPrimaryKeyName
configuration = sib_api_v3_sdk.Configuration()
configuration.api_key['api-key'] = os.getenv("SENDINBLUE_API_KEY")
api_instance = sib_api_v3_sdk.TransactionalEmailsApi(sib_api_v3_sdk.ApiClient(configuration))
| [
11748,
28686,
198,
11748,
264,
571,
62,
15042,
62,
85,
18,
62,
21282,
74,
198,
6738,
264,
571,
62,
15042,
62,
85,
18,
62,
21282,
74,
13,
2118,
1330,
5949,
72,
16922,
198,
6738,
334,
27112,
1330,
471,
27586,
198,
198,
6738,
598,
13... | 2.870787 | 178 |
"""Control local mopidy."""
import requests
import json
class MopidyError(Exception):
"""Mopidy control error."""
def mopidy_is_playing():
"""Check if currently playing."""
json_data = {
"method": "core.playback.get_state",
"jsonrpc": "2.0",
"prams": {},
"id": 1,
}
payload = json.dumps(json_data)
try:
ret = requests.post(
"http://127.0.0.1:6680/mopidy/rpc",
headers={"Content-Type": "application/json"},
data=payload,
)
except requests.exceptions.ConnectionError:
raise MopidyError("request failed")
except Exception:
raise MopidyError("unknown error")
if ret.status_code != 200:
raise MopidyError("request failed")
return ret.json()["result"] == "playing"
def mopidy_pause():
"""Play."""
json_data = {
"method": "core.playback.pause",
"jsonrpc": "2.0",
"prams": {},
"id": 1,
}
payload = json.dumps(json_data)
try:
ret = requests.post(
"http://127.0.0.1:6680/mopidy/rpc",
headers={"Content-Type": "application/json"},
data=payload,
)
except requests.exceptions.ConnectionError:
raise MopidyError("request failed")
except Exception:
raise MopidyError("unknown error")
if ret.status_code != 200:
raise MopidyError("request failed")
def mopidy_play():
"""Play."""
json_data = {
"method": "core.playback.play",
"jsonrpc": "2.0",
"prams": {"tl_track": None, "tlid": None},
"id": 1,
}
payload = json.dumps(json_data)
try:
ret = requests.post(
"http://127.0.0.1:6680/mopidy/rpc",
headers={"Content-Type": "application/json"},
data=payload,
)
except requests.exceptions.ConnectionError:
raise MopidyError("request failed")
except Exception:
raise MopidyError("unknown error")
if ret.status_code != 200:
raise MopidyError("request failed")
| [
37811,
15988,
1957,
285,
404,
19325,
526,
15931,
198,
198,
11748,
7007,
198,
11748,
33918,
628,
198,
4871,
337,
404,
19325,
12331,
7,
16922,
2599,
198,
220,
220,
220,
37227,
44,
404,
19325,
1630,
4049,
526,
15931,
628,
198,
4299,
285,
... | 2.166667 | 960 |
import unittest
from redblacktree.redblacktree import Node, RedBlackTree, RED, BLACK
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
6738,
2266,
13424,
21048,
13,
445,
13424,
21048,
1330,
19081,
11,
2297,
9915,
27660,
11,
23848,
11,
31963,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
71... | 2.87234 | 47 |
import os, sys, platform
__SUPPORT_ANSI__ = False
| [
11748,
28686,
11,
25064,
11,
3859,
198,
198,
834,
40331,
15490,
62,
1565,
11584,
834,
796,
10352,
198,
220,
220,
220,
220,
220,
220,
220,
220,
198
] | 2.222222 | 27 |
# (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: context_demo
type: aggregate
short_description: demo callback that adds play/task context
description:
- Displays some play and task context along with normal output
- This is mostly for demo purposes
version_added: "2.1"
requirements:
- whitelist in configuration
'''
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
"""
This is a very trivial example of how any callback function can get at play and task objects.
play will be 'None' for runner invocations, and task will be None for 'setup' invocations.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'context_demo'
CALLBACK_NEEDS_WHITELIST = True
| [
2,
357,
34,
8,
2321,
11,
3899,
1024,
23303,
272,
11,
1279,
76,
40302,
13,
2934,
3099,
272,
31,
14816,
13,
785,
29,
198,
2,
357,
66,
8,
2177,
28038,
856,
4935,
198,
2,
22961,
3611,
5094,
13789,
410,
18,
13,
15,
10,
357,
3826,
2... | 2.979827 | 347 |
from django.conf.urls import url
from rest_framework import routers
from talentmap_api.common.urls import get_retrieve, post_create
from talentmap_api.feature_flags.views import featureflags as views
router = routers.SimpleRouter()
urlpatterns = [
url(r'^$', views.FeatureFlagsView.as_view({**get_retrieve, 'post': 'perform_create'}), name='featureflags'),
]
urlpatterns += router.urls
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
6738,
1334,
62,
30604,
1330,
41144,
198,
6738,
7401,
8899,
62,
15042,
13,
11321,
13,
6371,
82,
1330,
651,
62,
1186,
30227,
11,
1281,
62,
17953,
198,
198,
6738,
7401,
8899,... | 3.086614 | 127 |
# Generated from JSON.g4 by ANTLR 4.7.2
# encoding: utf-8
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
| [
2,
2980,
515,
422,
19449,
13,
70,
19,
416,
3537,
14990,
49,
604,
13,
22,
13,
17,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
6738,
1885,
14050,
19,
1330,
1635,
198,
6738,
33245,
1330,
10903,
9399,
198,
6738,
19720,
13,
952,
1330,
... | 2.830189 | 53 |
#new home for the weaponry lists, they shouldn't be in the core file
#hopefully in 1.4 the entire purpose of this file dissolves and these lists go away.
#
| [
2,
3605,
1363,
329,
262,
28479,
8341,
11,
484,
6584,
470,
307,
287,
262,
4755,
2393,
198,
2,
8548,
7549,
287,
352,
13,
19,
262,
2104,
4007,
286,
428,
2393,
6249,
9010,
290,
777,
8341,
467,
1497,
13,
198,
198,
2,
628
] | 3.761905 | 42 |
profile1 = {
"name": "testing",
"codeforces": "aaradhya0707",
"codechef": "aaradhya0707",
"atcoder": "aaradhya0707",
"uva_handle": "aaradhya0707",
"spoj": "aaradhya777",
"uva_id": "1143870"
}
profile2 = {
"name": "testinguser",
"codeforces": "shivamsinghal1012",
"codechef": "shivam1012"
}
| [
13317,
16,
796,
1391,
198,
220,
220,
220,
366,
3672,
1298,
366,
33407,
1600,
198,
220,
220,
220,
366,
19815,
891,
273,
728,
1298,
366,
64,
283,
24411,
3972,
15,
24038,
1600,
198,
220,
220,
220,
366,
19815,
721,
258,
69,
1298,
366,
... | 1.913295 | 173 |
[n,m] = list(map(int,input().split()))
ai = list(map(int,input().split()))
aux = [[]for i in range(m)]
dp = [aux for i in range(n)]
#def grundy():
for i in range(n):
for j in range(m):
| [
58,
77,
11,
76,
60,
796,
1351,
7,
8899,
7,
600,
11,
15414,
22446,
35312,
3419,
4008,
198,
198,
1872,
796,
1351,
7,
8899,
7,
600,
11,
15414,
22446,
35312,
3419,
4008,
198,
198,
14644,
796,
16410,
60,
1640,
1312,
287,
2837,
7,
76,
... | 2.136842 | 95 |
# Importing the Kratos Library
import KratosMultiphysics
import KratosMultiphysics.FluidDynamicsApplication as KratosFluid
## Import base class file
from KratosMultiphysics.FluidDynamicsApplication.fluid_solver import FluidSolver
from KratosMultiphysics.FluidDynamicsApplication import check_and_prepare_model_process_fluid | [
2,
17267,
278,
262,
509,
10366,
418,
10074,
198,
11748,
509,
10366,
418,
15205,
13323,
23154,
198,
11748,
509,
10366,
418,
15205,
13323,
23154,
13,
37,
2290,
312,
35,
4989,
873,
23416,
355,
509,
10366,
418,
37,
2290,
312,
198,
198,
22... | 3.125 | 104 |