content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import common
data = common.read_file('2017/11/data.txt').strip().split(',')
offset_x = 0
offset_y = 0
max_dist = 0
# |nw|n
# sw| |ne
# s |se|
for move in data:
if move == 'sw' or move == 's':
offset_x -= 1
elif move == 'n' or move == 'ne':
offset_x += 1
if move == 'nw' or move == 'n':
offset_y -= 1
elif move == 's' or move == 'se':
offset_y += 1
curr_dist = dist()
if curr_dist > max_dist:
max_dist = curr_dist
last_dist = dist()
print(last_dist)
print(max_dist)
| [
11748,
2219,
198,
198,
7890,
796,
2219,
13,
961,
62,
7753,
10786,
5539,
14,
1157,
14,
7890,
13,
14116,
27691,
36311,
22446,
35312,
7,
3256,
11537,
198,
198,
28968,
62,
87,
796,
657,
198,
28968,
62,
88,
796,
657,
628,
198,
198,
9806,... | 2.108949 | 257 |
import json
import model
from utils import Error
import test_data
import validation
def test_upvote(client, existing_item):
'''
Tests upvoting.
'''
upc = str(existing_item.upc)
store = existing_item.stores[0]
store_name = str(store.name)
lat = float(store.location['lat'])
long_arg = float(store.location['long'])
user = 'newuser'
direction = 1
rv = client.post('/vote', data=json.dumps({
'upc': upc,
'user': user,
'store': store_name,
'lat': lat,
'long': long_arg,
'dir': direction
}))
response = json.loads(rv.data)
assert response == {'success': True, 'error': None}
price = model.Item.objects(upc=upc).first().stores[0].prices[-1]
assert user in price.upvotes
def test_downvote(client, existing_item):
'''
Tests downvoting.
'''
upc = str(existing_item.upc)
store = existing_item.stores[0]
store_name = str(store.name)
lat = float(store.location['lat'])
long_arg = float(store.location['long'])
user = 'newuser'
direction = -1
rv = client.post('/vote', data=json.dumps({
'upc': upc,
'user': user,
'store': store_name,
'lat': lat,
'long': long_arg,
'dir': direction
}))
response = json.loads(rv.data)
assert response == {'success': True, 'error': None}
price = model.Item.objects(upc=upc).first().stores[0].prices[-1]
assert user in price.downvotes
def test_undo_upvote(client, existing_item):
'''
Tests undoing an upvote.
'''
upc = str(existing_item.upc)
store = existing_item.stores[0]
store_name = str(store.name)
lat = float(store.location['lat'])
long_arg = float(store.location['long'])
user = 'newuser'
direction = 1
rv = client.post('/vote', data=json.dumps({
'upc': upc,
'user': user,
'store': store_name,
'lat': lat,
'long': long_arg,
'dir': direction
}))
response = json.loads(rv.data)
assert response == {'success': True, 'error': None}
price = model.Item.objects(upc=upc).first().stores[0].prices[-1]
assert user in price.upvotes
direction = 0
rv = client.post('/vote', data=json.dumps({
'upc': upc,
'user': user,
'store': store_name,
'lat': lat,
'long': long_arg,
'dir': direction
}))
response = json.loads(rv.data)
assert response == {'success': True, 'error': None}
price = model.Item.objects(upc=upc).first().stores[0].prices[-1]
assert not user in price.upvotes
assert not user in price.downvotes
def test_undo_downvote(client, existing_item):
'''
Tests undoing an downvote.
'''
upc = str(existing_item.upc)
store = existing_item.stores[0]
store_name = str(store.name)
lat = float(store.location['lat'])
long_arg = float(store.location['long'])
user = 'newuser'
direction = -1
rv = client.post('/vote', data=json.dumps({
'upc': upc,
'user': user,
'store': store_name,
'lat': lat,
'long': long_arg,
'dir': direction
}))
response = json.loads(rv.data)
assert response == {'success': True, 'error': None}
price = model.Item.objects(upc=upc).first().stores[0].prices[-1]
assert user in price.downvotes
direction = 0
rv = client.post('/vote', data=json.dumps({
'upc': upc,
'user': user,
'store': store_name,
'lat': lat,
'long': long_arg,
'dir': direction
}))
response = json.loads(rv.data)
assert response == {'success': True, 'error': None}
price = model.Item.objects(upc=upc).first().stores[0].prices[-1]
assert not user in price.upvotes
assert not user in price.downvotes
def test_upvote_then_downvote(client, existing_item):
'''
Tests upvoting then changing it to a downvote.
'''
upc = str(existing_item.upc)
store = existing_item.stores[0]
store_name = str(store.name)
lat = float(store.location['lat'])
long_arg = float(store.location['long'])
user = 'newuser'
direction = 1
rv = client.post('/vote', data=json.dumps({
'upc': upc,
'user': user,
'store': store_name,
'lat': lat,
'long': long_arg,
'dir': direction
}))
response = json.loads(rv.data)
assert response == {'success': True, 'error': None}
price = model.Item.objects(upc=upc).first().stores[0].prices[-1]
assert user in price.upvotes
direction = -1
rv = client.post('/vote', data=json.dumps({
'upc': upc,
'user': user,
'store': store_name,
'lat': lat,
'long': long_arg,
'dir': direction
}))
response = json.loads(rv.data)
assert response == {'success': True, 'error': None}
price = model.Item.objects(upc=upc).first().stores[0].prices[-1]
assert not user in price.upvotes
assert user in price.downvotes
def test_downvote_then_upvote(client, existing_item):
'''
Tests downvoting then changing it to an upvote.
'''
upc = str(existing_item.upc)
store = existing_item.stores[0]
store_name = str(store.name)
lat = float(store.location['lat'])
long_arg = float(store.location['long'])
user = 'newuser'
direction = -1
rv = client.post('/vote', data=json.dumps({
'upc': upc,
'user': user,
'store': store_name,
'lat': lat,
'long': long_arg,
'dir': direction
}))
response = json.loads(rv.data)
assert response == {'success': True, 'error': None}
price = model.Item.objects(upc=upc).first().stores[0].prices[-1]
assert user in price.downvotes
direction = 1
rv = client.post('/vote', data=json.dumps({
'upc': upc,
'user': user,
'store': store_name,
'lat': lat,
'long': long_arg,
'dir': direction
}))
response = json.loads(rv.data)
assert response == {'success': True, 'error': None}
price = model.Item.objects(upc=upc).first().stores[0].prices[-1]
assert not user in price.downvotes
assert user in price.upvotes
def test_invalid_unvote(client, existing_item):
'''
Tests undoing a vote without ever voting.
'''
upc = str(existing_item.upc)
store = existing_item.stores[0]
store_name = str(store.name)
lat = float(store.location['lat'])
long_arg = float(store.location['long'])
user = 'newuser'
direction = 0
rv = client.post('/vote', data=json.dumps({
'upc': upc,
'user': user,
'store': store_name,
'lat': lat,
'long': long_arg,
'dir': direction
}))
response = json.loads(rv.data)
assert response == {'success': False, 'error': Error.NOT_VOTED.value}
def test_double_upvote(client, existing_item):
'''
Tests upvoting twice in a row.
'''
upc = str(existing_item.upc)
store = existing_item.stores[0]
store_name = str(store.name)
lat = float(store.location['lat'])
long_arg = float(store.location['long'])
user = 'newuser'
direction = 1
rv = client.post('/vote', data=json.dumps({
'upc': upc,
'user': user,
'store': store_name,
'lat': lat,
'long': long_arg,
'dir': direction
}))
response = json.loads(rv.data)
assert response == {'success': True, 'error': None}
price = model.Item.objects(upc=upc).first().stores[0].prices[-1]
assert user in price.upvotes
rv = client.post('/vote', data=json.dumps({
'upc': upc,
'user': user,
'store': store_name,
'lat': lat,
'long': long_arg,
'dir': direction
}))
response = json.loads(rv.data)
assert response == {'success': False,
'error': Error.ALREADY_UPVOTED.value}
def test_double_downvote(client, existing_item):
'''
Tests downvoting twice in a row.
'''
upc = str(existing_item.upc)
store = existing_item.stores[0]
store_name = str(store.name)
lat = float(store.location['lat'])
long_arg = float(store.location['long'])
user = 'newuser'
direction = -1
rv = client.post('/vote', data=json.dumps({
'upc': upc,
'user': user,
'store': store_name,
'lat': lat,
'long': long_arg,
'dir': direction
}))
response = json.loads(rv.data)
assert response == {'success': True, 'error': None}
price = model.Item.objects(upc=upc).first().stores[0].prices[-1]
assert user in price.downvotes
rv = client.post('/vote', data=json.dumps({
'upc': upc,
'user': user,
'store': store_name,
'lat': lat,
'long': long_arg,
'dir': direction
}))
response = json.loads(rv.data)
assert response == {'success': False,
'error': Error.ALREADY_DOWNVOTED.value}
def test_nonexistent_item(client, nonexistent_item):
'''
Tests upvoting the price of a nonexistent item.
'''
upc = str(nonexistent_item.upc)
store = nonexistent_item.stores[0]
store_name = str(store.name)
lat = float(store.location['lat'])
long_arg = float(store.location['long'])
user = 'newuser'
direction = 1
rv = client.post('/vote', data=json.dumps({
'upc': upc,
'user': user,
'store': store_name,
'lat': lat,
'long': long_arg,
'dir': direction
}))
response = json.loads(rv.data)
assert response == {'success': False, 'error': Error.ITEM_DNE.value}
def test_nonexistent_store(client, existing_item):
'''
Tests upvoting the price for a nonexistent store.
'''
upc = str(existing_item.upc)
store = test_data.store10
store_name = str(store.name)
lat = float(store.location['lat'])
long_arg = float(store.location['long'])
user = 'newuser'
direction = 1
rv = client.post('/vote', data=json.dumps({
'upc': upc,
'user': user,
'store': store_name,
'lat': lat,
'long': long_arg,
'dir': direction
}))
response = json.loads(rv.data)
assert response == {'success': False, 'error': Error.STORE_DNE.value}
def test_missing_direction(client, existing_item):
'''
Tests voting without specifying the vote.
'''
upc = str(existing_item.upc)
store = existing_item.stores[0]
store_name = str(store.name)
lat = float(store.location['lat'])
long_arg = float(store.location['long'])
user = 'newuser'
rv = client.post('/vote', data=json.dumps({
'upc': upc,
'user': user,
'store': store_name,
'lat': lat,
'long': long_arg
}))
response = json.loads(rv.data)
assert response == {'success': False,
'error': Error.MISSING_FIELDS.value}
def test_invalid_dir(client, existing_item):
'''
Tests voting with an invalid vote direction indicator.
'''
upc = str(existing_item.upc)
store = existing_item.stores[0]
store_name = str(store.name)
lat = float(store.location['lat'])
long_arg = float(store.location['long'])
user = 'newuser'
direction = 2
rv = client.post('/vote', data=json.dumps({
'upc': upc,
'user': user,
'store': store_name,
'lat': lat,
'long': long_arg,
'dir': direction
}))
response = json.loads(rv.data)
assert response == {'success': False, 'error': Error.INVALID_DIR.value}
def test_invalid_user(client, existing_item):
'''
Tests upvoting with an invalid user.
'''
upc = str(existing_item.upc)
store = existing_item.stores[0]
store_name = str(store.name)
lat = float(store.location['lat'])
long_arg = float(store.location['long'])
user = ''
direction = 1
rv = client.post('/vote', data=json.dumps({
'upc': upc,
'user': user,
'store': store_name,
'lat': lat,
'long': long_arg,
'dir': direction
}))
response = json.loads(rv.data)
assert response['success'] == False
assert 'ValidationError' in response['error']
assert 'String value is too short' in response['error']
| [
11748,
33918,
198,
11748,
2746,
198,
6738,
3384,
4487,
1330,
13047,
198,
11748,
1332,
62,
7890,
198,
11748,
21201,
628,
198,
4299,
1332,
62,
929,
27257,
7,
16366,
11,
4683,
62,
9186,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
220,... | 2.248491 | 5,469 |
import re
from typing import Dict, Iterable, Optional, Type
from ..shared.patterns import Collection
from . import fields
model_registry: Dict[Collection, Type["Model"]] = {}
class ModelMetaClass(type):
"""
Metaclass for Model base class (see below).
This metaclass ensures that all fields get attributes set so that they
know its own collection and its own field name.
It also creates the registry for models and collections.
"""
class Model(metaclass=ModelMetaClass):
"""
Base class for models in OpenSlides.
"""
collection: Collection
verbose_name: str
# saves all fields with their respective unique prefix for easier access
field_prefix_map: Dict[str, fields.BaseRelationField]
def get_field(self, field_name: str) -> fields.Field:
"""
Returns the requested model field.
"""
field = self.try_get_field(field_name)
if field:
return field
else:
raise ValueError(f"Model {self} has no field {field_name}.")
def get_fields(self) -> Iterable[fields.Field]:
"""
Yields all fields in form of a tuple containing field name and field.
"""
for attr_name in dir(self):
attr = getattr(self, attr_name)
if isinstance(attr, fields.Field):
yield attr
def get_relation_fields(self) -> Iterable[fields.BaseRelationField]:
"""
Yields all relation fields (using BaseRelationField).
"""
for model_field in self.get_fields():
if isinstance(model_field, fields.BaseRelationField):
yield model_field
def get_schema(self, field: str) -> fields.Schema:
"""
Returns JSON schema for the given field.
"""
return getattr(self, field).get_schema()
def get_properties(self, *fields: str) -> Dict[str, fields.Schema]:
"""
Returns a dictionary of field schemas used for the properties keyword in
an action schema.
"""
properties = {}
for field in fields:
try:
properties[field] = self.get_schema(field)
except AttributeError:
raise ValueError(f"{field} is not a field of {self}")
return properties
| [
11748,
302,
198,
6738,
19720,
1330,
360,
713,
11,
40806,
540,
11,
32233,
11,
5994,
198,
198,
6738,
11485,
28710,
13,
33279,
82,
1330,
12251,
198,
6738,
764,
1330,
7032,
198,
198,
19849,
62,
2301,
4592,
25,
360,
713,
58,
36307,
11,
5... | 2.502714 | 921 |
"""
Tests for `astrofunc` module.
"""
import pytest
from astrofunc import astrofunc
| [
37811,
198,
51,
3558,
329,
4600,
459,
305,
20786,
63,
8265,
13,
198,
37811,
198,
11748,
12972,
9288,
198,
6738,
6468,
305,
20786,
1330,
6468,
305,
20786,
628
] | 3.035714 | 28 |
from office365.runtime.client_value_object import ClientValueObject
class TeamFunSettings(ClientValueObject):
"""Settings to configure use of Giphy, memes, and stickers in the team."""
| [
6738,
2607,
24760,
13,
43282,
13,
16366,
62,
8367,
62,
15252,
1330,
20985,
11395,
10267,
628,
198,
4871,
4816,
24629,
26232,
7,
11792,
11395,
10267,
2599,
198,
220,
220,
220,
37227,
26232,
284,
17425,
779,
286,
402,
541,
12114,
11,
3290... | 3.84 | 50 |
import argparse
import pathlib
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from selenium import webdriver
try:
from TinyTools.LoggingConfigurator import logger
except ModuleNotFoundError:
from sys import path
from os.path import abspath
path.append(abspath(""))
from TinyTools.LoggingConfigurator import logger
parser = argparse.ArgumentParser(description="Auto-reload designated HTML file using selenium and watchdog.")
parser.add_argument("file_location", metavar="FILE_PATH", type=str,
help='path of the HTML file')
args = parser.parse_args()
if __name__ == '__main__':
observer_instance, driver_ = html_closure()
try:
observer_instance.start()
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
finally:
driver_.close()
| [
11748,
1822,
29572,
198,
11748,
3108,
8019,
198,
11748,
640,
198,
198,
6738,
26856,
13,
672,
2655,
690,
1330,
27058,
198,
6738,
26856,
13,
31534,
1330,
9220,
11964,
9237,
25060,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
198,
... | 2.866242 | 314 |
#!/usr/bin/env python3
# Because it's better not to rely on external libraries
def banner():
"""Print the program's banner"""
banner = """
____ ____ __ __ ____ ___ ____
( _ \(_ _)( ) /__\ ( _ \/ __)( ___)
)___/ _)(_ )(__ /(__)\ )___/\__ \ )__)
(__) (____)(____)(__)(__)(__) (___/(____)
"""
print(f"\033[96m{banner}\033[00m")
print("Capture & record time lapse videos on Raspberry Pi!")
print(
"\033[95m{}\033[00m".format(
"@akihakune, https://github.com/git-akihakune/pilapse"
)
)
print()
def progressBar(duration: int, frequency: int):
"""Set up progress bar while capturing based on verbosity"""
from .arguments import arguments
if arguments["--verbose"]:
from tqdm import tqdm
RANGE = tqdm(range(0, duration, frequency))
else:
RANGE = range(0, duration, frequency)
return {"iter": RANGE, "type": type(RANGE).__name__}
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
4362,
340,
338,
1365,
407,
284,
8814,
319,
7097,
12782,
628,
198,
4299,
17625,
33529,
198,
220,
220,
220,
37227,
18557,
262,
1430,
338,
17625,
37811,
198,
220,
220,
220,
... | 2.335766 | 411 |
import time
print('')
| [
11748,
640,
198,
198,
4798,
7,
7061,
8,
198
] | 2.555556 | 9 |
import random
figlet_ansi_shadow = """
████████╗██╗ ██████╗ ██████╗ ██████╗ ███████╗██████╗
╚══██╔══╝██║ ██╔═══██╗██╔════╝ ██╔════╝ ██╔════╝██╔══██╗
██║ ██║ ██║ ██║██║ ███╗██║ ███╗█████╗ ██████╔╝
██║ ██║ ██║ ██║██║ ██║██║ ██║██╔══╝ ██╔══██╗
██║ ███████╗╚██████╔╝╚██████╔╝╚██████╔╝███████╗██║ ██║
╚═╝ ╚══════╝ ╚═════╝ ╚═════╝ ╚═════╝ ╚══════╝╚═╝ ╚═╝
"""
figlet_big = """
_______ _ _ _ _
|__ __| | | | \ | | | |
| | ___ ___| |__ | \| | _____ _| | ___ __ _ __ _ ___ _ __
| |/ _ \/ __| '_ \| . ` |/ _ \ \ /\ / / | / _ \ / _` |/ _` |/ _ \ '__|
| | __/ (__| | | | |\ | (_) \ V V /| |___| (_) | (_| | (_| | __/ |
|_|\___|\___|_| |_|_| \_|\___/ \_/\_/ |______\___/ \__, |\__, |\___|_|
__/ | __/ |
|___/ |___/
"""
figlet_bloody = """
▄▄▄█████▓ ██▓ ▒█████ ▄████ ▄████ ▓█████ ██▀███
▓ ██▒ ▓▒▓██▒ ▒██▒ ██▒ ██▒ ▀█▒ ██▒ ▀█▒▓█ ▀ ▓██ ▒ ██▒
▒ ▓██░ ▒░▒██░ ▒██░ ██▒▒██░▄▄▄░▒██░▄▄▄░▒███ ▓██ ░▄█ ▒
░ ▓██▓ ░ ▒██░ ▒██ ██░░▓█ ██▓░▓█ ██▓▒▓█ ▄ ▒██▀▀█▄
▒██▒ ░ ░██████▒░ ████▓▒░░▒▓███▀▒░▒▓███▀▒░▒████▒░██▓ ▒██▒
▒ ░░ ░ ▒░▓ ░░ ▒░▒░▒░ ░▒ ▒ ░▒ ▒ ░░ ▒░ ░░ ▒▓ ░▒▓░
░ ░ ░ ▒ ░ ░ ▒ ▒░ ░ ░ ░ ░ ░ ░ ░ ░▒ ░ ▒░
░ ░ ░ ░ ░ ░ ▒ ░ ░ ░ ░ ░ ░ ░ ░░ ░
░ ░ ░ ░ ░ ░ ░ ░ ░
"""
figlet_doom = """
_____ _ _ _ _
|_ _| | | | \ | | | |
| | ___ ___| |__ | \| | _____ _| | ___ __ _ __ _ ___ _ __
| |/ _ \/ __| '_ \| . ` |/ _ \ \ /\ / / | / _ \ / _` |/ _` |/ _ \ '__|
| | __/ (__| | | | |\ | (_) \ V V /| |___| (_) | (_| | (_| | __/ |
\_/\___|\___|_| |_\_| \_/\___/ \_/\_/ \_____/\___/ \__, |\__, |\___|_|
__/ | __/ |
|___/ |___/
"""
figlet_drpepper = """
___ _ _ _ _
|_ _|___ ___ | |_ | \ | ___ _ _ _ | | ___ ___ ___ ___ _ _
| |/ ._>/ | '| . || |/ . \| | | || |_ / . \/ . |/ . |/ ._>| '_>
|_|\___.\_|_.|_|_||_\_|\___/|__/_/ |___|\___/\_. |\_. |\___.|_|
<___'<___'
"""
figlet_ogre = """
_____ _ __ __
/__ \___ ___| |__ /\ \ \_____ __/ / ___ __ _ __ _ ___ _ __
/ /\/ _ \/ __| '_ \ / \/ / _ \ \ /\ / / / / _ \ / _` |/ _` |/ _ \ '__|
/ / | __/ (__| | | / /\ / (_) \ V V / /__| (_) | (_| | (_| | __/ |
\/ \___|\___|_| |_\_\ \/ \___/ \_/\_/\____/\___/ \__, |\__, |\___|_|
|___/ |___/
"""
figlet_slant = """
______ __ _ __ __
/_ __/__ _____/ /_ / | / /___ _ __/ / ____ ____ _____ ____ _____
/ / / _ \/ ___/ __ \/ |/ / __ \ | /| / / / / __ \/ __ `/ __ `/ _ \/ ___/
/ / / __/ /__/ / / / /| / /_/ / |/ |/ / /___/ /_/ / /_/ / /_/ / __/ /
/_/ \___/\___/_/ /_/_/ |_/\____/|__/|__/_____/\____/\__, /\__, /\___/_/
/____//____/
"""
figlet_small = """
_____ _ _ _ _
|_ _|__ __| |_ | \| |_____ __ _| | ___ __ _ __ _ ___ _ _
| |/ -_) _| ' \| .` / _ \ V V / |__/ _ \/ _` / _` / -_) '_|
|_|\___\__|_||_|_|\_\___/\_/\_/|____\___/\__, \__, \___|_|
|___/|___/
"""
figlet_smslant = """
______ __ _ __ __
/_ __/__ ____/ / / |/ /__ _ __/ / ___ ___ ____ ____ ____
/ / / -_) __/ _ \/ / _ \ |/|/ / /__/ _ \/ _ `/ _ `/ -_) __/
/_/ \__/\__/_//_/_/|_/\___/__,__/____/\___/\_, /\_, /\__/_/
/___//___/
"""
figlet_standard = """
_____ _ _ _ _
|_ _|__ ___| |__ | \ | | _____ _| | ___ __ _ __ _ ___ _ __
| |/ _ \/ __| '_ \| \| |/ _ \ \ /\ / / | / _ \ / _` |/ _` |/ _ \ '__|
| | __/ (__| | | | |\ | (_) \ V V /| |__| (_) | (_| | (_| | __/ |
|_|\___|\___|_| |_|_| \_|\___/ \_/\_/ |_____\___/ \__, |\__, |\___|_|
|___/ |___/
"""
| [
11748,
4738,
201,
198,
201,
198,
5647,
1616,
62,
504,
72,
62,
19106,
796,
37227,
201,
198,
201,
198,
49527,
22880,
245,
9968,
22880,
245,
220,
220,
220,
220,
220,
23287,
20503,
8115,
22880,
245,
220,
23287,
20503,
8115,
22880,
245,
22... | 1.324764 | 3,812 |
load(
"//:deps.bzl",
"com_google_protobuf",
"io_bazel_rules_rust",
)
| [
2220,
7,
198,
220,
220,
220,
366,
1003,
25,
10378,
82,
13,
65,
48274,
1600,
198,
220,
220,
220,
366,
785,
62,
13297,
62,
11235,
672,
3046,
1600,
198,
220,
220,
220,
366,
952,
62,
65,
41319,
62,
38785,
62,
11469,
1600,
198,
8,
19... | 1.8 | 45 |
# wwwhisper - web access control.
# Copyright (C) 2012-2018 Jan Wrobel <jan@mixedbit.org>
"""wwwhisper admin API.
The package exposes http API for specifying which users can access
which locations and for other admin operations.
"""
default_app_config = 'wwwhisper_admin.appconfig.Config'
| [
2,
7324,
14363,
525,
532,
3992,
1895,
1630,
13,
198,
2,
15069,
357,
34,
8,
2321,
12,
7908,
2365,
370,
305,
6667,
1279,
13881,
31,
76,
2966,
2545,
13,
2398,
29,
198,
198,
37811,
1383,
1929,
271,
525,
13169,
7824,
13,
198,
198,
464,... | 3.356322 | 87 |
#/usr/bin/env python
'''
The minionswarm script will start a group of salt minions with different ids
on a single system to test scale capabilities
'''
# Import Python Libs
import os
import optparse
import subprocess
import tempfile
import shutil
import random
import hashlib
# Import salt libs
import salt
# Import third party libs
import yaml
def parse():
'''
Parse the cli options
'''
parser = optparse.OptionParser()
parser.add_option('-m',
'--minions',
dest='minions',
default=5,
type='int',
help='The number of minions to make')
parser.add_option('--master',
dest='master',
default='salt',
help='The location of the salt master that this swarm will serve')
parser.add_option('-k',
'--keep-modules',
dest='keep',
default='',
help='A comma delimited list of modules to enable')
parser.add_option('-f',
'--foreground',
dest='foreground',
default=False,
action='store_true',
help=('Run the minions with debug output of the swarm going to '
'the terminal'))
options, args = parser.parse_args()
opts = {}
for key, val in options.__dict__.items():
opts[key] = val
return opts
class Swarm(object):
'''
Create a swarm of minions
'''
def mkconf(self):
'''
Create a config file for a single minion
'''
fd_, path = tempfile.mkstemp()
path = '{0}{1}'.format(
path,
hashlib.md5(str(random.randint(0, 999999))).hexdigest())
os.close(fd_)
dpath = '{0}.d'.format(path)
os.makedirs(dpath)
data = {'id': os.path.basename(path),
'pki_dir': os.path.join(dpath, 'pki'),
'cachedir': os.path.join(dpath, 'cache'),
'master': self.opts['master'],
}
if self.opts['keep']:
ignore = set()
keep = self.opts['keep'].split(',')
modpath = os.path.join(os.path.dirname(salt.__file__), 'modules')
for fn_ in os.listdir(modpath):
if fn_.split('.')[0] in keep:
continue
ignore.add(fn_.split('.')[0])
data['disable_modules'] = list(ignore)
with open(path, 'w+') as fp_:
yaml.dump(data, fp_)
self.confs.add(path)
def start_minions(self):
'''
Iterate over the config files and start up the minions
'''
for path in self.confs:
cmd = 'salt-minion -c {0} --pid-file {1}'.format(
path,
'{0}.pid'.format(path)
)
if self.opts['foreground']:
cmd += ' -l debug &'
else:
cmd += ' -d &'
subprocess.call(cmd, shell=True)
def prep_configs(self):
'''
Prepare the confs set
'''
for ind in range(self.opts['minions']):
self.mkconf()
def clean_configs(self):
'''
Clean up the config files
'''
for path in self.confs:
try:
os.remove(path)
os.remove('{0}.pid'.format(path))
shutil.rmtree('{0}.d'.format(path))
except:
pass
def start(self):
'''
Start the minions!!
'''
self.prep_configs()
self.start_minions()
if __name__ == '__main__':
swarm = Swarm(parse())
swarm.start()
| [
2,
14,
14629,
14,
8800,
14,
24330,
21015,
198,
7061,
6,
198,
464,
22811,
31975,
4226,
481,
923,
257,
1448,
286,
8268,
22811,
351,
1180,
220,
2340,
198,
261,
257,
2060,
1080,
284,
1332,
5046,
9889,
198,
7061,
6,
198,
198,
2,
17267,
... | 1.939894 | 1,880 |
import sys
sys.path.insert(0, "../src")
import cs50
i = cs50.get_int("Input: ")
print(f"Output: {i}")
| [
11748,
25064,
198,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
366,
40720,
10677,
4943,
198,
198,
11748,
50115,
1120,
198,
198,
72,
796,
50115,
1120,
13,
1136,
62,
600,
7203,
20560,
25,
220,
366,
8,
198,
4798,
7,
69,
1,
26410,
25,... | 2.163265 | 49 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by shimeng on 17-8-17
# 爬虫名称
spider_name = 'get_ip'
# 日志设置
log_folder_name = '%s_logs' % spider_name
delete_existed_logs = True
# 请求参数设置
thread_num = 50
sleep_time = 0.5
retry_times = 10
time_out = 5
# 当use_proxy为True时,必须在请求的args中或者在配置文件中定义ip, eg: ip="120.52.72.58:80", 否则程序将报错
use_proxy = False
ip = None
# 移动端设置为 ua_type = 'mobile'
ua_type = 'pc'
# 队列顺序
FIFO = 0
# 默认提供的浏览器头包括user_agent host, 若需要更丰富的header,可自行定定义新的header,并赋值给diy_header
diy_header = None
# 定义状态码,不在其中的均视为请求错误或异常
status_code = [200, 304, 404]
# 保存设置
# 当你定义好了host,port,database_name之后再将connect改为True
connect = True
host = 'localhost'
port = 27017
database_name = 'free_ip'
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15622,
416,
427,
320,
1516,
319,
1596,
12,
23,
12,
1558,
628,
198,
2,
13328,
230,
105,
164,
247,
104,
28938,
235... | 1.342803 | 528 |
# -*- coding: utf-8 -*-
"""Working_Model.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/15vMWS3F0cKUYSGMFpdKrZkNjuDhfApRm
"""
from google.colab import drive
drive.mount('/content/drive')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import *
from sklearn.model_selection import *
from sklearn.metrics import *
from xgboost import XGBClassifier
from sklearn.ensemble import GradientBoostingClassifier
from lightgbm import LGBMClassifier
from sklearn.ensemble import VotingClassifier
train = pd.read_csv("/content/drive/My Drive/train.csv", encoding="utf-8")
test = pd.read_csv("/content/drive/My Drive/test.csv", encoding="utf-8")
train.dtypes
data_types = ["float32","float64","int32","int64","object","category","datetime64[ns]"]
data_info(train, data_types, "train")
display_head_tail(train, 3, 3)
remove_duplicates(train)
fill_types = [ "Forward_Fill"]
fill_value = train["Number_Weeks_Used"].median()
train = handle_missing_values(train, fill_value, fill_types, ["Number_Weeks_Used"],"train")
unique_values(train)
heatmap(train)
cntplt(train, 'Crop_Damage')
sns.catplot(x="Crop_Damage", y="Season", hue="Crop_Damage", kind="bar", data=train);
X = train.drop(labels=['Crop_Damage'], axis=1)
y = train['Crop_Damage']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
y_test.value_counts
X_train_data = X_train.iloc[:, 1:9]
X_test_data = X_test.iloc[:, 1:9]
X_train_data
lgbm = LGBMClassifier()
lgbm_pred = lgbm.fit(X_train_data, y_train)
y_pred = lgbm_pred.predict(X_test_data)
print(accuracy_score(y_pred, y_test))
test2 = test.iloc[:, 1:9]
test_pred = lgbm_pred.predict(test2)
test2['Crop_Damage'] = test_pred
test2['ID'] = test['ID']
test2
output=pd.DataFrame(data={"ID":test2["ID"],"Crop_Damage":test2["Crop_Damage"]}).to_csv("Sol.csv", index=False)
from google.colab import files
files.download('Sol.csv')
output
output=pd.DataFrame(data={"ID":["ID"],"Crop_Damage":y_pred}).to_csv("Sample.csv", index=False)
from google.colab import files
files.download('Sample.csv') | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
28516,
62,
17633,
13,
541,
2047,
65,
198,
198,
38062,
4142,
7560,
416,
1623,
4820,
2870,
13,
198,
198,
20556,
2393,
318,
5140,
379,
198,
220,
220,
220,
3740,
13... | 2.606096 | 853 |
import numpy
#Initialize the class variables
#The variable names indicate what data is stored in them
#Method to calculate the hinge loss value for the entire data set
#Method to calculate the hinge loss value and gradient for a particular row (used for Stochastic Gradient)
#Method to train classifier using Stochastic Gradient Method
| [
11748,
299,
32152,
628,
198,
2,
24243,
1096,
262,
1398,
9633,
198,
2,
464,
7885,
3891,
7603,
644,
1366,
318,
8574,
287,
606,
198,
198,
2,
17410,
284,
15284,
262,
41968,
2994,
1988,
329,
262,
2104,
1366,
900,
198,
2,
17410,
284,
1528... | 4.3125 | 80 |
#!/usr/bin/python
import smbus
import os
import time
import math
twowire = smbus.SMBus(1)
cal0 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
cal1 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
dig_T = [0,0,0,0,0,0,0,0,0]
dig_P = [0,0,0,0,0,0,0,0,0,0]
dig_H = [0,0,0,0,0,0,0,0,0]
raw_data = [0,0,0,0,0,0,0,0]
raw_values = [0,0,0]
TEMP = 1 ; ALTI = 0 ; HUMI = 2
T1 = 0 ; T2 = 1 ; T3 = 2
P1 = 0; P2 = 1; P3 = 2; P4 = 3; P5 = 4
P6 = 5; P7 = 6; P8 = 7; P9 = 8
H1 = 0; H2 = 1; H3 = 2; H4 = 3; H5 = 4; H6 = 5
ADDR = 0x76 #I2C Address BME280
CALB0 = 0x88 #start address calibration data0 T1-T3 P1-P9 H1
CALB1 = 0xE1 #start address calibration data1 H2-H6
MEASURE = 0xF7 #start address ad-conversion
CONTROL = 0xF4 #config adc temperature and airpressure
CONTROL1 = 0xF2 #config adc humidity
CONTROL2 = 0xF5 #config iir filter to discriminate noise
CONTROL_BYTE = 0x27 #oversampling x 1 forced mode temp and pressure
CONTROL_BYTE1 = 0x01 #oversampling x 1 humidity
CONTROL_BYTE2 = 0x04 #iir filter koefficient 2
#section read out eeprom calibration data0 and data1
#end section
file = "/var/www/html/wetter/bme280.dat"
timestamp = time.asctime(time.localtime(time.time()))
filehandle = open(file, "a")
config_adc() #first of all start this function for proper adc work
test = read_weather_data()
filehandle.write("time+%s+temp+%3.1f+pressure+%5i+humidity+%3i\r\n" %(timestamp, test[TEMP], test[ALTI], test[HUMI]))
filehandle.close()
# print(str(test[TEMP]) + " " + str(test[ALTI]) + " " + str(test[HUMI]))
time.sleep(1)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
895,
10885,
198,
11748,
28686,
198,
11748,
640,
198,
11748,
10688,
198,
198,
4246,
322,
557,
796,
895,
10885,
13,
50,
10744,
385,
7,
16,
8,
198,
198,
9948,
15,
796,
685,
15,
1... | 2.030423 | 756 |
from django.shortcuts import render, redirect
from .models import Contact
from django.shortcuts import get_object_or_404
from .forms import ContactForm
from django.contrib import messages
from django.core.paginator import Paginator
from django.contrib.auth.decorators import login_required
@login_required
@login_required
@login_required
@login_required
@login_required | [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
198,
6738,
764,
27530,
1330,
14039,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
651,
62,
15252,
62,
273,
62,
26429,
198,
6738,
764,
23914,
1330,
14039,
8479,
198,
6738,
4... | 3.542056 | 107 |
import pytest
from share import models
from share.search import SearchIndexer
from tests import factories
| [
11748,
12972,
9288,
198,
198,
6738,
2648,
1330,
4981,
198,
6738,
2648,
13,
12947,
1330,
11140,
15732,
263,
198,
198,
6738,
5254,
1330,
17590,
628
] | 4.36 | 25 |
n = int(input("Enter number of nodes:"))
node = Node()
node.makeset(n)
edges = int(input("Enter number of edges:"))
lis = []
mst = []
for i in range(edges):
x, y, w = input().split(' ')
lis.append((int(w), int(x), int(y)))
lis.sort()
print (lis)
for j in range(edges):
u = lis[j][1]
v = lis[j][2]
a = node.findset(u)
b = node.findset(v)
# print (str(u) + " parent = " + str(node.nodes[u].parent.index))
# print (str(v) + " parent = " + str(node.nodes[v].parent.index))
if a != b:
node.union(a, b)
mst.append((u, v))
# print (str(u) + " parent = " + str(node.nodes[u].parent.index))
# print (str(v) + " parent = " + str(node.nodes[v].parent.index))
# print ("*****")
print (mst) | [
628,
198,
77,
796,
493,
7,
15414,
7203,
17469,
1271,
286,
13760,
11097,
4008,
198,
17440,
796,
19081,
3419,
198,
17440,
13,
49123,
316,
7,
77,
8,
198,
276,
3212,
796,
493,
7,
15414,
7203,
17469,
1271,
286,
13015,
11097,
4008,
198,
2... | 2.152738 | 347 |
"""PRAW exception classes.
Includes two main exceptions: :class:`.RedditAPIException` for when something
goes wrong on the server side, and :class:`.ClientException` when something
goes wrong on the client side. Both of these classes extend
:class:`.PRAWException`.
All other exceptions are subclassed from :class:`.ClientException`.
"""
from typing import List, Optional, Union
from warnings import warn
class PRAWException(Exception):
"""The base Async PRAW Exception that all other exception classes extend."""
class RedditErrorItem:
"""Represents a single error returned from Reddit's API."""
@property
def error_message(self) -> str:
"""Get the completed error message string."""
error_str = f"{self.error_type}: {self.message!r}"
if self.field:
error_str += f" on field {self.field!r}"
return error_str
def __init__(self, error_type: str, message: str, field: Optional[str] = None):
"""Instantiate an error item.
:param error_type: The error type set on Reddit's end.
:param message: The associated message for the error.
:param field: The input field associated with the error, if available.
"""
self.error_type = error_type
self.message = message
self.field = field
def __eq__(self, other: Union["RedditErrorItem", List[str]]):
"""Check for equality."""
if isinstance(other, RedditErrorItem):
return (self.error_type, self.message, self.field) == (
other.error_type,
other.message,
other.field,
)
return super().__eq__(other)
def __repr__(self):
"""Return repr(self)."""
return f"{self.__class__.__name__}(error_type={self.error_type!r}, message={self.message!r}, field={self.field!r})"
def __str__(self):
"""Get the message returned from str(self)."""
return self.error_message
class APIException(PRAWException):
"""Old class preserved for alias purposes.
.. deprecated:: 7.0
Class :class:`.APIException` has been deprecated in favor of
:class:`.RedditAPIException`. This class will be removed in Async PRAW 8.0.
"""
@staticmethod
def parse_exception_list(exceptions: List[Union[RedditErrorItem, List[str]]]):
"""Covert an exception list into a :class:`.RedditErrorItem` list."""
return [
exception
if isinstance(exception, RedditErrorItem)
else RedditErrorItem(
exception[0],
exception[1],
exception[2] if bool(exception[2]) else "",
)
for exception in exceptions
]
@property
def error_type(self) -> str:
"""Get error_type.
.. deprecated:: 7.0
Accessing attributes through instances of
:class:`.RedditAPIException` is deprecated. This behavior will be
removed in Async PRAW 8.0. Check out the
:ref:`PRAW 7 Migration tutorial <Exception_Handling>` on how to
migrate code from this behavior.
"""
return self._get_old_attr("error_type")
@property
def message(self) -> str:
"""Get message.
.. deprecated:: 7.0
Accessing attributes through instances of
:class:`.RedditAPIException` is deprecated. This behavior will be
removed in Async PRAW 8.0. Check out the
:ref:`PRAW 7 Migration tutorial <Exception_Handling>` on how to
migrate code from this behavior.
"""
return self._get_old_attr("message")
@property
def field(self) -> str:
"""Get field.
.. deprecated:: 7.0
Accessing attributes through instances of
:class:`.RedditAPIException` is deprecated. This behavior will be
removed in Async PRAW 8.0. Check out the
:ref:`PRAW 7 Migration tutorial <Exception_Handling>` on how to
migrate code from this behavior.
"""
return self._get_old_attr("field")
def __init__(
self,
items: Union[List[Union[RedditErrorItem, List[str], str]], str],
*optional_args: str,
):
"""Initialize an instance of RedditAPIException.
:param items: Either a list of instances of :class:`.RedditErrorItem`
or a list containing lists of unformed errors.
:param optional_args: Takes the second and third arguments that
:class:`.APIException` used to take.
"""
if isinstance(items, str):
items = [[items, *optional_args]]
elif isinstance(items, list) and isinstance(items[0], str):
items = [items]
self.items = self.parse_exception_list(items)
super().__init__(*self.items)
class RedditAPIException(APIException):
"""Container for error messages from Reddit's API."""
class ClientException(PRAWException):
"""Indicate exceptions that don't involve interaction with Reddit's API."""
class DuplicateReplaceException(ClientException):
"""Indicate exceptions that involve the replacement of MoreComments."""
def __init__(self):
"""Initialize the class."""
super().__init__(
"A duplicate comment has been detected. Are you attempting to call"
" ``replace_more_comments`` more than once?"
)
class InvalidFlairTemplateID(ClientException):
"""Indicate exceptions where an invalid flair template id is given."""
def __init__(self, template_id: str):
"""Initialize the class."""
super().__init__(
f"The flair template id ``{template_id}`` is invalid. If you are trying "
f"to create a flair, please use the ``add`` method."
)
class InvalidImplicitAuth(ClientException):
"""Indicate exceptions where an implicit auth type is used incorrectly."""
def __init__(self):
"""Instantize the class."""
super().__init__("Implicit authorization can only be used with installed apps.")
class InvalidURL(ClientException):
"""Indicate exceptions where an invalid URL is entered."""
def __init__(self, url: str, message: str = "Invalid URL: {}"):
"""Initialize the class.
:param url: The invalid URL.
:param message: The message to display. Must contain a format
identifier (``{}`` or ``{0}``). (default: ``"Invalid URL: {}"``)
"""
super().__init__(message.format(url))
class MissingRequiredAttributeException(ClientException):
"""Indicate exceptions caused by not including a required attribute."""
class TooLargeMediaException(ClientException):
"""Indicate exceptions from uploading media that's too large."""
def __init__(self, maximum_size: int, actual: int):
"""Initialize a TooLargeMediaException.
:param maximum_size: The maximum_size size of the uploaded media.
:param actual: The actual size of the uploaded media.
"""
self.maximum_size = maximum_size
self.actual = actual
super().__init__(
f"The media that you uploaded was too large (maximum size is "
f"{maximum_size} bytes, uploaded {actual} bytes)"
)
class WebSocketException(ClientException):
"""Indicate exceptions caused by use of WebSockets."""
@property
def original_exception(self) -> Exception:
"""Access the original_exception attribute (now deprecated)."""
warn(
"Accessing the attribute original_exception is deprecated. Please"
" rewrite your code in such a way that this attribute does not"
" need to be used. It will be removed in Async PRAW 8.0.",
category=DeprecationWarning,
stacklevel=2,
)
return self._original_exception
@original_exception.setter
@original_exception.deleter
def __init__(self, message: str, exception: Optional[Exception]):
"""Initialize a WebSocketException.
:param message: The exception message.
:param exception: The exception thrown by the websocket library.
.. note:: This parameter is deprecated. It will be removed in Async PRAW
8.0.
"""
super().__init__(message)
self._original_exception = exception
class MediaPostFailed(WebSocketException):
"""Indicate exceptions where media uploads failed.."""
def __init__(self):
"""Instantiate MediaPostFailed."""
super().__init__(
"The attempted media upload action has failed. Possible causes"
" include the corruption of media files. Check that the media "
"file can be opened on your local machine.",
None,
)
| [
37811,
47,
20530,
6631,
6097,
13,
198,
198,
42986,
734,
1388,
13269,
25,
1058,
4871,
25,
44646,
22367,
17614,
16922,
63,
329,
618,
1223,
198,
2188,
274,
2642,
319,
262,
4382,
1735,
11,
290,
1058,
4871,
25,
44646,
11792,
16922,
63,
618... | 2.636418 | 3,350 |
""" Modulo para manejo de matrices de transformadas. """
from OpenGL.GL import *
from functools import wraps
# Decorador para mantener orden en matriz
def matriz_propia(fun):
""" Funcion para manejar espacios de matriz de OpenGL.
La funcion que sea recibida por el decorador
espera manejar matrices de traslacion, rota_
cion o escalamiento de OpenGl. De manera que
este decorador agregara la matriz al inicio
con PushMatrix, haga lo que deba hacer para
despues quitarala al final de las operaciones
con PopMatrix.
"""
@wraps(fun)
return ret_fun
#######################################
# Clases para manejo de matrices #####
#####################################
class Jerarquia:
""" Clase para manejar Jerarquia como contexto.
Esta clase ayudara a manejar los niveles de Jerarquia y
sus transformaciones como cotexto. Esto ayuda a mejorar
la legibilidad del codigo y a reducir los errores.
Al entrar a un nuevo nivel agregamos una matriz a la pila
de transformaciones.
Con el objeto obtenido podemos agregar transformaciones
directas a esta pila como tranladra, rotar y zoom.
Al salir del contexto quitamos la matriz correspondiente.
"""
| [
37811,
3401,
43348,
31215,
582,
68,
7639,
390,
2603,
45977,
390,
6121,
38768,
13,
37227,
198,
198,
6738,
30672,
13,
8763,
1330,
1635,
198,
6738,
1257,
310,
10141,
1330,
27521,
628,
198,
2,
4280,
273,
7079,
31215,
24818,
877,
2760,
268,
... | 2.832184 | 435 |
# -*- coding: utf8 -*-
"""
Copyright 2014-2016 Andreas Würl
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import os
from injector import singleton, inject
from .base import HttpFileTransport, BlitzortungDataPath, BlitzortungDataPathGenerator
from .. import builder
@singleton
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
23,
532,
9,
12,
198,
198,
37811,
628,
220,
220,
15069,
1946,
12,
5304,
33728,
370,
25151,
75,
628,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
... | 3.577093 | 227 |
class StagedObject:
"""
Use this class as a mixin to provide an interface for onStage/offStage objects.
The idea here is that a DistributedObject could be present and active due to
simple visibility, but we want to hide or otherwise disable it for some reason.
"""
UNKNOWN = -1
OFF = 0
ON = 1
def __init__(self, initState = UNKNOWN):
"""
Only sets the initial state of this object. This will not
call any "handle" functions.
"""
self.__state = initState
def goOnStage(self, *args, **kw):
"""
If a stage switch is needed, the correct "handle" function
will be called. Otherwise, nothing happens.
"""
# This is the high level function that clients of
# your class should call to set the on/off stage state.
if not self.isOnStage():
self.handleOnStage(*args, **kw)
def handleOnStage(self):
"""
Override this function to provide your on/off stage funcitionality.
Don't forget to call down to this one, though.
"""
self.__state = StagedObject.ON
def goOffStage(self, *args, **kw):
"""
If a stage switch is needed, the correct "handle" function
will be called. Otherwise, nothing happens.
"""
# This is the high level function that clients of
# your class should call to set the on/off stage state.
if not self.isOffStage():
self.handleOffStage(*args, **kw)
def handleOffStage(self):
"""
Override this function to provide your on/off stage funcitionality.
Don't forget to call down to this one, though.
"""
self.__state = StagedObject.OFF
| [
198,
4871,
520,
1886,
10267,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
5765,
428,
1398,
355,
257,
5022,
259,
284,
2148,
281,
7071,
329,
319,
29391,
14,
2364,
29391,
5563,
13,
628,
220,
220,
220,
383,
2126,
994,
318,
326,
... | 2.581871 | 684 |
#!/usr/bin/python
from tweet_archiveur.scrapper import Scrapper
from tweet_archiveur.database import Database
import pandas as pd
from os import getenv
from dotenv import load_dotenv
from pathlib import Path
from sys import exit
import logging
import time
import tweepy
import random
# Logging
logger = logging.getLogger("tweet-archiveur")
logFormatter = logging.Formatter("%(asctime)s - %(name)-12s %(levelname)-8s %(message)s")
logger.setLevel(logging.DEBUG)
if not len(logger.handlers):
# Console logger
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
logger.info(f'Start archiving')
# Load env if not set
if getenv('DATABASE_USER') is None:
logger.warning("No env variable found, loading .env...")
env_path = Path('.') / '.env'
if env_path.is_file():
load_dotenv(dotenv_path=env_path)
else:
logger.error(f"No env set and no {env_path} found !")
exit(1)
logger.info(f'Getting all tweet...')
scrapper = Scrapper()
df_users = scrapper.get_users_accounts()
users_id = df_users.twitter_id.tolist()
database = Database()
database.create_tables_if_not_exist()
database.insert_twitter_users(df_users)
scrapper.get_all_tweet_and_store_them(database, users_id)
del database
del scrapper
logger.info(f'Done archiving') | [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
6738,
6126,
62,
17474,
333,
13,
1416,
430,
2848,
1330,
1446,
430,
2848,
198,
6738,
6126,
62,
17474,
333,
13,
48806,
1330,
24047,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
28686,
1330,
... | 2.754098 | 488 |
import requests
import json
import re
# from requests.sessions import _TextMapping
ATHLETES = []
with open('../../assets/nflAthletes.json', 'r') as athFile:
data = athFile.read()
athJson = json.loads(data)
# list of abbreviated names
active_list = []
id_list = []
for ath in athJson:
id_list.append(ath['id'])
active_list.append(ath['name'])
# active_list = [
# 'T.Brady',
# 'P.Mahomes',
# 'J.Allen',
# 'L.Jackson',
# 'D.Adams',
# 'S.Diggs',
# 'C.Ridley',
# 'D.Metcalf',
# 'T.Hill',
# 'D.Henry',
# 'D.Cook',
# 'D.Harris',
# 'N.Harris'
# ]
host = 'https://db.axmarkets.net'
for athlete in active_list:
new_athlete = Athlete(athlete)
try:
sql_query = f"select * from nfl where name = '{athlete}'" # loop through current athletes and select only the data one by one
response = requests.post(host + '/exec', params={'query': sql_query})
json_response = json.loads(response.text)
rows = json_response['dataset']
# print(rows)
new_athlete.team, new_athlete.position = rows[0][1], rows[0][2]
for row in rows:
new_athlete.passingYards.append(row[3])
# print(row[3], 'passing yards')
new_athlete.passingTouchdowns.append(row[4])
# print(row[4], 'passing touch')
new_athlete.reception.append(row[5])
# print(row[5], 'reception')
new_athlete.receiveYards.append(row[6])
# print(row[6], 'receive yards')
new_athlete.receiveTouch.append(row[7])
# print(row[7], 'receive touch')
new_athlete.rushingYards.append(row[8])
# print(row[8], 'rushng yards')
new_athlete.price.append(row[9])
# print(row[9], 'price')
new_athlete.time.append(row[10])
# print(row[10], 'time')
print(new_athlete.name, new_athlete.price)
except requests.exceptions.RequestException as e:
print("Error: %s" % (e))
ATHLETES.append(new_athlete)
jsondata = []
for athlete in ATHLETES:
id, json_data = athlete.make_dict()
jsondata.append(json_data)
with open('../../assets/data.json', 'w') as f:
json.dump(jsondata, f)
| [
11748,
7007,
198,
11748,
33918,
198,
11748,
302,
198,
198,
2,
422,
7007,
13,
82,
6202,
1330,
4808,
8206,
44,
5912,
198,
198,
12599,
28882,
1546,
796,
17635,
198,
198,
4480,
1280,
10786,
40720,
40720,
19668,
14,
77,
2704,
32,
400,
4067... | 2.09462 | 1,078 |
# -*- coding: utf-8 -*-
# Copyright (C) 2019-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import pathlib
import subprocess # nosec
import sys
import pytest
from setup import SNYK_API, SNYK_TOKEN
from utils.loader import SNYK_URL
from utils.utilities import download_file
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
357,
34,
8,
13130,
12,
1238,
2481,
8180,
10501,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
11748,
3108,
8019,
198,
1174... | 2.881188 | 101 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: modules/prediction/proto/prediction_conf.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from modules.perception.proto import perception_obstacle_pb2 as modules_dot_perception_dot_proto_dot_perception__obstacle__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='modules/prediction/proto/prediction_conf.proto',
package='apollo.prediction',
syntax='proto2',
serialized_pb=_b('\n.modules/prediction/proto/prediction_conf.proto\x12\x11\x61pollo.prediction\x1a\x32modules/perception/proto/perception_obstacle.proto\"\xe9\x04\n\x0cObstacleConf\x12\x41\n\robstacle_type\x18\x01 \x01(\x0e\x32*.apollo.perception.PerceptionObstacle.Type\x12G\n\x0fobstacle_status\x18\x02 \x01(\x0e\x32..apollo.prediction.ObstacleConf.ObstacleStatus\x12\x45\n\x0e\x65valuator_type\x18\x03 \x01(\x0e\x32-.apollo.prediction.ObstacleConf.EvaluatorType\x12\x45\n\x0epredictor_type\x18\x04 \x01(\x0e\x32-.apollo.prediction.ObstacleConf.PredictorType\"G\n\x0eObstacleStatus\x12\x0b\n\x07ON_LANE\x10\x00\x12\x0c\n\x08OFF_LANE\x10\x01\x12\x0e\n\nSTATIONARY\x10\x03\x12\n\n\x06MOVING\x10\x04\"I\n\rEvaluatorType\x12\x11\n\rMLP_EVALUATOR\x10\x00\x12\x11\n\rRNN_EVALUATOR\x10\x01\x12\x12\n\x0e\x43OST_EVALUATOR\x10\x02\"\xaa\x01\n\rPredictorType\x12\x1b\n\x17LANE_SEQUENCE_PREDICTOR\x10\x00\x12\x17\n\x13\x46REE_MOVE_PREDICTOR\x10\x01\x12\x16\n\x12REGIONAL_PREDICTOR\x10\x02\x12\x1b\n\x17MOVE_SEQUENCE_PREDICTOR\x10\x03\x12\x13\n\x0f\x45MPTY_PREDICTOR\x10\x04\x12\x19\n\x15SINGLE_LANE_PREDICTOR\x10\x05\"H\n\x0ePredictionConf\x12\x36\n\robstacle_conf\x18\x01 \x03(\x0b\x32\x1f.apollo.prediction.ObstacleConf')
,
dependencies=[modules_dot_perception_dot_proto_dot_perception__obstacle__pb2.DESCRIPTOR,])
_OBSTACLECONF_OBSTACLESTATUS = _descriptor.EnumDescriptor(
name='ObstacleStatus',
full_name='apollo.prediction.ObstacleConf.ObstacleStatus',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ON_LANE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OFF_LANE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STATIONARY', index=2, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MOVING', index=3, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=420,
serialized_end=491,
)
_sym_db.RegisterEnumDescriptor(_OBSTACLECONF_OBSTACLESTATUS)
_OBSTACLECONF_EVALUATORTYPE = _descriptor.EnumDescriptor(
name='EvaluatorType',
full_name='apollo.prediction.ObstacleConf.EvaluatorType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MLP_EVALUATOR', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RNN_EVALUATOR', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='COST_EVALUATOR', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=493,
serialized_end=566,
)
_sym_db.RegisterEnumDescriptor(_OBSTACLECONF_EVALUATORTYPE)
_OBSTACLECONF_PREDICTORTYPE = _descriptor.EnumDescriptor(
name='PredictorType',
full_name='apollo.prediction.ObstacleConf.PredictorType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='LANE_SEQUENCE_PREDICTOR', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FREE_MOVE_PREDICTOR', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REGIONAL_PREDICTOR', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MOVE_SEQUENCE_PREDICTOR', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EMPTY_PREDICTOR', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SINGLE_LANE_PREDICTOR', index=5, number=5,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=569,
serialized_end=739,
)
_sym_db.RegisterEnumDescriptor(_OBSTACLECONF_PREDICTORTYPE)
_OBSTACLECONF = _descriptor.Descriptor(
name='ObstacleConf',
full_name='apollo.prediction.ObstacleConf',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='obstacle_type', full_name='apollo.prediction.ObstacleConf.obstacle_type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='obstacle_status', full_name='apollo.prediction.ObstacleConf.obstacle_status', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='evaluator_type', full_name='apollo.prediction.ObstacleConf.evaluator_type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='predictor_type', full_name='apollo.prediction.ObstacleConf.predictor_type', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_OBSTACLECONF_OBSTACLESTATUS,
_OBSTACLECONF_EVALUATORTYPE,
_OBSTACLECONF_PREDICTORTYPE,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=122,
serialized_end=739,
)
_PREDICTIONCONF = _descriptor.Descriptor(
name='PredictionConf',
full_name='apollo.prediction.PredictionConf',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='obstacle_conf', full_name='apollo.prediction.PredictionConf.obstacle_conf', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=741,
serialized_end=813,
)
_OBSTACLECONF.fields_by_name['obstacle_type'].enum_type = modules_dot_perception_dot_proto_dot_perception__obstacle__pb2._PERCEPTIONOBSTACLE_TYPE
_OBSTACLECONF.fields_by_name['obstacle_status'].enum_type = _OBSTACLECONF_OBSTACLESTATUS
_OBSTACLECONF.fields_by_name['evaluator_type'].enum_type = _OBSTACLECONF_EVALUATORTYPE
_OBSTACLECONF.fields_by_name['predictor_type'].enum_type = _OBSTACLECONF_PREDICTORTYPE
_OBSTACLECONF_OBSTACLESTATUS.containing_type = _OBSTACLECONF
_OBSTACLECONF_EVALUATORTYPE.containing_type = _OBSTACLECONF
_OBSTACLECONF_PREDICTORTYPE.containing_type = _OBSTACLECONF
_PREDICTIONCONF.fields_by_name['obstacle_conf'].message_type = _OBSTACLECONF
DESCRIPTOR.message_types_by_name['ObstacleConf'] = _OBSTACLECONF
DESCRIPTOR.message_types_by_name['PredictionConf'] = _PREDICTIONCONF
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ObstacleConf = _reflection.GeneratedProtocolMessageType('ObstacleConf', (_message.Message,), dict(
DESCRIPTOR = _OBSTACLECONF,
__module__ = 'modules.prediction.proto.prediction_conf_pb2'
# @@protoc_insertion_point(class_scope:apollo.prediction.ObstacleConf)
))
_sym_db.RegisterMessage(ObstacleConf)
PredictionConf = _reflection.GeneratedProtocolMessageType('PredictionConf', (_message.Message,), dict(
DESCRIPTOR = _PREDICTIONCONF,
__module__ = 'modules.prediction.proto.prediction_conf_pb2'
# @@protoc_insertion_point(class_scope:apollo.prediction.PredictionConf)
))
_sym_db.RegisterMessage(PredictionConf)
# @@protoc_insertion_point(module_scope)
| [
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
13103,
14,
28764,
2867,
14,
1676,
1462,
14,
28764,
2867,
62,
10414,
13,
1676,
1462,
198,
198,
11748,
25064,
198,
62,
65,
28,
17597,
13,
9... | 2.272958 | 3,942 |
import argparse
from os import path
from distutils.dir_util import copy_tree
TEMPLATE_PATH = "core/conf/projct_template"
| [
11748,
1822,
29572,
198,
6738,
28686,
1330,
3108,
198,
6738,
1233,
26791,
13,
15908,
62,
22602,
1330,
4866,
62,
21048,
198,
198,
51,
3620,
6489,
6158,
62,
34219,
796,
366,
7295,
14,
10414,
14,
1676,
73,
310,
62,
28243,
1,
198
] | 2.97561 | 41 |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision.models import resnet152
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
35748,
198,
6738,
28034,
10178,
13,
27530,
133... | 3.574468 | 47 |
import setuptools
import FIP_mirror
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="FIP_mirror",
version=FIP_mirror.__version__,
author="dbeley",
author_email="dbeley@protonmail.com",
description="Mirror the FIP webradios on several services.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/dbeley/FIP_mirror",
packages=setuptools.find_packages(),
include_package_data=True,
entry_points={"console_scripts": ["FIP_mirror=FIP_mirror.__main__:main"]},
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: POSIX :: Linux",
],
install_requires=[
"pylast",
"requests",
"beautifulsoup4",
"lxml",
"tweepy",
"Mastodon.py",
],
)
| [
11748,
900,
37623,
10141,
198,
11748,
376,
4061,
62,
10793,
1472,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
4943,
355,
277,
71,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
71,
13,
961,
3419,
198,
198,
... | 2.304233 | 378 |
"""
Settings for achievements app.
"""
DEFAULT_ADMIN = ''
DEFAULT_ADMIN_PASSWORD = ''
DEFAULT_ADMIN_MAIL = '' | [
37811,
198,
26232,
329,
16970,
598,
13,
198,
37811,
198,
7206,
38865,
62,
2885,
23678,
796,
10148,
198,
7206,
38865,
62,
2885,
23678,
62,
47924,
54,
12532,
796,
10148,
198,
7206,
38865,
62,
2885,
23678,
62,
5673,
4146,
796,
10148
] | 2.725 | 40 |
"""Copy attributes from old data models to GenericAsset
Revision ID: 6cf5b241b85f
Revises: 1ae32ffc8c3f
Create Date: 2021-11-11 17:18:15.395915
"""
import json
from datetime import datetime
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "6cf5b241b85f"
down_revision = "1ae32ffc8c3f"
branch_labels = None
depends_on = None
def upgrade():
"""
Add attributes column to GenericAsset and Sensor tables. Then:
- For each OldModel (Market/WeatherSensor/Asset), get the Sensor with the same id as the OldModel,
and then get the GenericAsset of that Sensor.
- Add the OldModel's display name to the corresponding GenericAsset's and Sensor's attributes,
and other attributes we want to copy. Most go to the Sensor.
- Find the OldModelType (MarketType/WeatherSensorType/AssetType) of the OldModel,
and copy its seasonalities and other attributes to the GenericAsset's or Sensor's attributes.
"""
op.add_column(
"generic_asset",
sa.Column("attributes", sa.JSON(), nullable=True, default={}),
)
op.add_column(
"sensor",
sa.Column("attributes", sa.JSON(), nullable=True, default={}),
)
# Declare ORM table views
t_generic_asset_type = sa.Table(
"generic_asset_type",
sa.MetaData(),
sa.Column("id"),
sa.Column("name"),
)
t_generic_asset = sa.Table(
"generic_asset",
sa.MetaData(),
sa.Column("id"),
sa.Column("generic_asset_type_id"),
sa.Column("latitude"),
sa.Column("longitude"),
sa.Column("attributes"),
)
t_sensor = sa.Table(
"sensor",
sa.MetaData(),
sa.Column("id"),
sa.Column("name"),
sa.Column("attributes"),
sa.Column("generic_asset_id"),
sa.Column("unit"),
sa.Column("event_resolution"),
sa.Column("knowledge_horizon_fnc"),
sa.Column("knowledge_horizon_par"),
)
t_market = sa.Table(
"market",
sa.MetaData(),
sa.Column("id", sa.Integer),
sa.Column("market_type_name", sa.String(80)),
sa.Column("name"), # Copy to Sensor
sa.Column(
"display_name", sa.String(80)
), # Copy to both Sensor and to GenericAsset
sa.Column("unit"), # Copy to Sensor [done]
sa.Column("event_resolution"), # Copy to Sensor [done]
sa.Column("knowledge_horizon_fnc"), # Copy to Sensor [done]
sa.Column("knowledge_horizon_par"), # Copy to Sensor [done]
)
t_market_type = sa.Table(
"market_type",
sa.MetaData(),
sa.Column("name", sa.String(80)),
sa.Column("daily_seasonality", sa.Boolean), # Copy to Sensor
sa.Column("weekly_seasonality", sa.Boolean), # Copy to Sensor
sa.Column("yearly_seasonality", sa.Boolean), # Copy to Sensor
)
t_asset = sa.Table(
"asset",
sa.MetaData(),
sa.Column("id"),
sa.Column("asset_type_name"),
sa.Column("name"), # Copy to Sensor
sa.Column("display_name"), # Copy to both Sensor and to GenericAsset
sa.Column("latitude"), # Copy to GenericAsset
sa.Column("longitude"), # Copy to GenericAsset
sa.Column("capacity_in_mw"), # Copy to Sensor
sa.Column("min_soc_in_mwh"), # Copy to GenericAsset [1]
sa.Column("max_soc_in_mwh"), # Copy to GenericAsset [1]
sa.Column("soc_in_mwh"), # Copy to GenericAsset [1]
sa.Column("soc_datetime"), # Copy to GenericAsset [1]
sa.Column("soc_udi_event_id"), # Copy to GenericAsset [2]
sa.Column("market_id"), # Copy to Sensor [3]
sa.Column("unit"), # Copy to Sensor [done]
sa.Column("event_resolution"), # Copy to Sensor [done]
sa.Column("knowledge_horizon_fnc"), # Copy to Sensor [done]
sa.Column("knowledge_horizon_par"), # Copy to Sensor [done]
)
# [1] will be moved to a separate sensor later
# [2] deprecated in favour of Redis job id since api v1.3
# [3] will be deprecated in favour of something like a weighed by relationship (could be multiple)
t_asset_type = sa.Table(
"asset_type",
sa.MetaData(),
sa.Column("name", sa.String(80)),
sa.Column("is_consumer"), # Copy to Sensor
sa.Column("is_producer"), # Copy to Sensor
sa.Column("can_curtail"), # Copy to GenericAsset [4]
sa.Column("can_shift"), # Copy to GenericAsset [4]
sa.Column("daily_seasonality", sa.Boolean), # Copy to Sensor
sa.Column("weekly_seasonality", sa.Boolean), # Copy to Sensor
sa.Column("yearly_seasonality", sa.Boolean), # Copy to Sensor
)
# [4] will be deprecated in favour of actuator functionality
t_weather_sensor = sa.Table(
"weather_sensor",
sa.MetaData(),
sa.Column("id"),
sa.Column("weather_sensor_type_name"),
sa.Column("name"), # Copy to Sensor
sa.Column("display_name"), # Copy to both Sensor and to GenericAsset
sa.Column("latitude"), # Copy to GenericAsset
sa.Column("longitude"), # Copy to GenericAsset
sa.Column("unit"), # Copy to Sensor [done]
sa.Column("event_resolution"), # Copy to Sensor [done]
sa.Column("knowledge_horizon_fnc"), # Copy to Sensor [done]
sa.Column("knowledge_horizon_par"), # Copy to Sensor [done]
)
t_weather_sensor_type = sa.Table(
"weather_sensor_type",
sa.MetaData(),
sa.Column("name", sa.String(80)),
)
# Use SQLAlchemy's connection and transaction to go through the data
connection = op.get_bind()
# Set default attributes
connection.execute(
t_sensor.update().values(
attributes=json.dumps({}),
)
)
connection.execute(
t_generic_asset.update().values(
attributes=json.dumps({}),
)
)
copy_attributes(
connection,
t_market,
t_sensor,
t_generic_asset_type,
t_generic_asset,
t_target=t_sensor,
t_old_model_type=t_market_type,
old_model_attributes=["id", "market_type_name", "display_name"],
old_model_type_attributes=[
"daily_seasonality",
"weekly_seasonality",
"yearly_seasonality",
],
)
copy_attributes(
connection,
t_market,
t_sensor,
t_generic_asset_type,
t_generic_asset,
t_target=t_generic_asset,
t_old_model_type=t_market_type,
old_model_attributes=["id", "market_type_name", "display_name"],
)
copy_attributes(
connection,
t_weather_sensor,
t_sensor,
t_generic_asset_type,
t_generic_asset,
t_target=t_sensor,
t_old_model_type=t_weather_sensor_type,
old_model_attributes=[
"id",
"weather_sensor_type_name",
"display_name",
"latitude",
"longitude",
],
extra_attributes={
"daily_seasonality": True,
"weekly_seasonality": False,
"yearly_seasonality": True,
}, # The WeatherSensor table had these hardcoded (d, w, y) seasonalities
)
copy_attributes(
connection,
t_weather_sensor,
t_sensor,
t_generic_asset_type,
t_generic_asset,
t_target=t_generic_asset,
t_old_model_type=t_weather_sensor_type,
old_model_attributes=["id", "weather_sensor_type_name", "display_name"],
)
copy_attributes(
connection,
t_asset,
t_sensor,
t_generic_asset_type,
t_generic_asset,
t_target=t_sensor,
t_old_model_type=t_asset_type,
old_model_attributes=[
"id",
"asset_type_name",
"display_name",
"latitude",
"longitude",
"capacity_in_mw",
"market_id",
],
old_model_type_attributes=[
"is_consumer",
"is_producer",
"daily_seasonality",
"weekly_seasonality",
"yearly_seasonality",
],
)
copy_attributes(
connection,
t_asset,
t_sensor,
t_generic_asset_type,
t_generic_asset,
t_target=t_generic_asset,
t_old_model_type=t_asset_type,
old_model_attributes=[
"id",
"asset_type_name",
"display_name",
"min_soc_in_mwh",
"max_soc_in_mwh",
"soc_in_mwh",
"soc_datetime",
"soc_udi_event_id",
],
old_model_type_attributes=[
"can_curtail",
"can_shift",
],
extra_attributes_depending_on_old_model_type_name={
"solar": {
"correlations": ["radiation"],
},
"wind": {
"correlations": ["wind_speed"],
},
"one-way_evse": {
"correlations": ["temperature"],
},
"two-way_evse": {
"correlations": ["temperature"],
},
"battery": {
"correlations": ["temperature"],
},
"building": {
"correlations": ["temperature"],
},
}, # The GenericAssetType table had these hardcoded weather correlations
)
op.alter_column(
"sensor",
"attributes",
nullable=False,
)
op.alter_column(
"generic_asset",
"attributes",
nullable=False,
)
copy_sensor_columns(connection, t_market, t_sensor)
copy_sensor_columns(connection, t_weather_sensor, t_sensor)
copy_sensor_columns(connection, t_asset, t_sensor)
copy_location_columns_to_generic_asset(
connection, t_weather_sensor, t_generic_asset, t_sensor
)
copy_location_columns_to_generic_asset(
connection, t_asset, t_generic_asset, t_sensor
)
def copy_attributes(
connection,
t_old_model,
t_sensor,
t_generic_asset_type,
t_generic_asset,
t_target,
t_old_model_type,
old_model_attributes,
old_model_type_attributes=[],
extra_attributes={},
extra_attributes_depending_on_old_model_type_name={},
):
"""
:param old_model_attributes: first two attributes should be id and old_model_type_name, then any other columns we want to copy over from the old model
:param old_model_type_attributes: columns we want to copy over from the old model type
:param extra_attributes: any additional attributes we want to set
:param extra_attributes_depending_on_old_model_type_name: any additional attributes we want to set, depending on old model type name
"""
# Get attributes from old model
results = connection.execute(
sa.select([getattr(t_old_model.c, a) for a in old_model_attributes])
).fetchall()
for _id, type_name, *args in results:
# Obtain attributes we want to copy over, from the old model
old_model_attributes_to_copy = {
k: v if not isinstance(v, datetime) else v.isoformat()
for k, v in zip(old_model_attributes[-len(args) :], args)
}
# Obtain seasonality attributes we want to copy over, from the old model type
old_model_type_attributes_to_copy = get_old_model_type_attributes(
connection,
type_name,
t_old_model_type,
old_model_type_attributes=old_model_type_attributes,
)
# Find out where to copy over the attributes and where the old sensor type lives
if t_target.name == "generic_asset":
target_id = get_generic_asset_id(connection, _id, t_sensor)
elif t_target.name == "sensor":
target_id = _id
else:
raise ValueError
# Fill in the target class's attributes: A) first those with extra attributes depending on model type name
generic_asset_type_names_with_extra_attributes = (
extra_attributes_depending_on_old_model_type_name.keys()
)
if t_target.name == "generic_asset":
for gatn in generic_asset_type_names_with_extra_attributes:
connection.execute(
t_target.update()
.where(t_target.c.id == target_id)
.where(
t_generic_asset_type.c.id
== t_generic_asset.c.generic_asset_type_id
)
.where(t_generic_asset_type.c.name == gatn)
.values(
attributes=json.dumps(
{
**old_model_attributes_to_copy,
**old_model_type_attributes_to_copy,
**extra_attributes,
**extra_attributes_depending_on_old_model_type_name[
gatn
],
}
)
)
)
# Fill in the target class's attributes: B) then those without extra attributes depending on model type name
query = (
t_target.update()
.where(t_target.c.id == target_id)
.where(t_generic_asset_type.c.id == t_generic_asset.c.generic_asset_type_id)
.where(
t_generic_asset_type.c.name.not_in(
generic_asset_type_names_with_extra_attributes
)
)
.values(
attributes=json.dumps(
{
**old_model_attributes_to_copy,
**old_model_type_attributes_to_copy,
**extra_attributes,
}
)
)
)
if t_target.name == "generic_asset":
connection.execute(query)
elif t_target.name == "sensor":
connection.execute(
query.where(t_sensor.c.generic_asset_id == t_generic_asset.c.id)
)
def get_generic_asset_id(connection, old_model_id: int, t_sensors) -> int:
"""Get the Sensor with the same id as the OldModel, and then get the id of the GenericAsset of that Sensor."""
(generic_asset_id,) = connection.execute(
sa.select(
[
t_sensors.c.generic_asset_id,
]
).filter(t_sensors.c.id == old_model_id)
).one_or_none()
assert generic_asset_id is not None
return generic_asset_id
def get_old_model_type_attributes(
connection, old_model_type_name, t_old_model_types, old_model_type_attributes
) -> dict:
"""Get the attributes from the OldModelType."""
values = connection.execute(
sa.select(
[getattr(t_old_model_types.c, a) for a in old_model_type_attributes]
).filter(t_old_model_types.c.name == old_model_type_name)
).one_or_none()
assert values is not None
return {k: v for k, v in zip(old_model_type_attributes, values)}
| [
37811,
29881,
12608,
422,
1468,
1366,
4981,
284,
42044,
45869,
198,
198,
18009,
1166,
4522,
25,
718,
12993,
20,
65,
28872,
65,
5332,
69,
198,
18009,
2696,
25,
352,
3609,
2624,
487,
66,
23,
66,
18,
69,
198,
16447,
7536,
25,
33448,
12... | 2.064289 | 7,373 |
import torch.nn as nn
import torch
from utils.model_utils import init_model
from torchsummary import summary
if __name__ == "__main___":
__main__()
| [
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
198,
6738,
3384,
4487,
13,
19849,
62,
26791,
1330,
2315,
62,
19849,
198,
6738,
28034,
49736,
1330,
10638,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
17569,
1298,
198... | 3.122449 | 49 |
from setuptools import find_packages, setup
setup(
name='acmclient',
version="0.2",
url='https://github.com/yangjiaronga/acmclient/',
license='MIT',
author='yangjiaronga',
author_email='yangjiaronga@gmail.com',
description='Aliyun acm client for Python',
packages=find_packages(),
install_requires=[
'requests',
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
) | [
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
11,
9058,
628,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
330,
76,
16366,
3256,
198,
220,
220,
220,
2196,
2625,
15,
13,
17,
1600,
198,
220,
220,
220,
19016,
11639,
5450,
1378,
125... | 2.594203 | 276 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 Malcolm Ramsay <malramsay64@gmail.com>
#
# Distributed under terms of the MIT license.
"""Test the behaviour of the looping through variables.
This is a series of tests that are defining the interface of the module,
primarily the iteration of the variables."""
import sys
from pathlib import Path
import pytest
from experi.run import process_command, process_jobs, read_file, variable_matrix
test_cases = sorted(Path("test/data/iter").glob("*.yml"))
@pytest.mark.xfail(
sys.version_info < (3, 6), reason="Dictionaries nondeterministic in python < 3.6"
)
@pytest.mark.parametrize("test_file", test_cases, ids=[i.stem for i in test_cases])
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
43907,
25,
69,
12685,
28,
40477,
12,
23,
198,
2,
198,
2,
15069,
10673,
2864,
20002,
47959,
1279,
7617,
9474,
3... | 3.012295 | 244 |
from __future__ import print_function
import sys
class Color(object):
"""A printable and mixable color."""
# A dictionary of all colors, keyed by their numeric value
colors = {}
Color.NEITHER = Color(0, 'neither', '0')
Color.RED = Color(1, 'red', '31;1')
Color.BLUE = Color(~1, 'blue', '44')
Color.PURPLE = Color(~0, 'purple', '37;45;1')
# RED == ~BLUE
# BLUE == ~RED
# PURPLE == RED | BLUE
# NEITHER == RED & BLUE
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
25064,
198,
198,
4871,
5315,
7,
15252,
2599,
198,
197,
37811,
32,
3601,
540,
290,
5022,
540,
3124,
526,
15931,
628,
197,
2,
317,
22155,
286,
477,
7577,
11,
1994,
276,
4... | 2.595092 | 163 |
from pathlib import PosixPath
import numpy
from osgeo import gdal
from osgeo import osr
from .image_output import SimpleImageOutput
from .resampler import get_resampler
from .utils import ensure_dir_exists, gdal_write
| [
6738,
3108,
8019,
1330,
18574,
844,
15235,
198,
198,
11748,
299,
32152,
198,
6738,
28686,
469,
78,
1330,
308,
31748,
198,
6738,
28686,
469,
78,
1330,
267,
27891,
198,
198,
6738,
764,
9060,
62,
22915,
1330,
17427,
5159,
26410,
198,
6738,... | 3.25 | 68 |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
import unittest
from io import StringIO
from ...styles import Styles
from ...format import Format
class TestWriteXf(unittest.TestCase):
"""
Test the Styles _write_xf() method. This test case is similar to
test_write_xf.py but with methods calls instead of properties.
"""
def test_write_xf_1(self):
"""Test the _write_xf() method. Default properties."""
xf_format = Format()
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_2(self):
"""Test the _write_xf() method. Has font but is first XF."""
xf_format = Format()
xf_format.set_has_font()
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_3(self):
"""Test the _write_xf() method. Has font but isn't first XF."""
xf_format = Format()
xf_format.set_has_font()
xf_format.set_font_index(1)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="1" fillId="0" borderId="0" xfId="0" applyFont="1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_4(self):
"""Test the _write_xf() method. Uses built-in number format."""
xf_format = Format()
xf_format.set_num_format_index(2)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="2" fontId="0" fillId="0" borderId="0" xfId="0" applyNumberFormat="1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_5(self):
"""Test the _write_xf() method. Uses built-in number format + font."""
xf_format = Format()
xf_format.set_num_format_index(2)
xf_format.set_has_font()
xf_format.set_font_index(1)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="2" fontId="1" fillId="0" borderId="0" xfId="0" applyNumberFormat="1" applyFont="1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_6(self):
"""Test the _write_xf() method. Vertical alignment = top."""
xf_format = Format()
xf_format.set_align('top')
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment vertical="top"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_7(self):
"""Test the _write_xf() method. Vertical alignment = centre."""
xf_format = Format()
xf_format.set_align('vcenter')
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment vertical="center"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_8(self):
"""Test the _write_xf() method. Vertical alignment = bottom."""
xf_format = Format()
xf_format.set_align('bottom')
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_9(self):
"""Test the _write_xf() method. Vertical alignment = justify."""
xf_format = Format()
xf_format.set_align('vjustify')
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment vertical="justify"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_10(self):
"""Test the _write_xf() method. Vertical alignment = distributed."""
xf_format = Format()
xf_format.set_align('vdistributed')
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment vertical="distributed"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_11(self):
"""Test the _write_xf() method. Horizontal alignment = left."""
xf_format = Format()
xf_format.set_align('left')
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment horizontal="left"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_12(self):
"""Test the _write_xf() method. Horizontal alignment = center."""
xf_format = Format()
xf_format.set_align('center')
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment horizontal="center"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_13(self):
"""Test the _write_xf() method. Horizontal alignment = right."""
xf_format = Format()
xf_format.set_align('right')
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment horizontal="right"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_14(self):
"""Test the _write_xf() method. Horizontal alignment = left + indent."""
xf_format = Format()
xf_format.set_align('left')
xf_format.set_indent()
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment horizontal="left" indent="1"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_15(self):
"""Test the _write_xf() method. Horizontal alignment = right + indent."""
xf_format = Format()
xf_format.set_align('right')
xf_format.set_indent()
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment horizontal="right" indent="1"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_16(self):
"""Test the _write_xf() method. Horizontal alignment = fill."""
xf_format = Format()
xf_format.set_align('fill')
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment horizontal="fill"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_17(self):
"""Test the _write_xf() method. Horizontal alignment = justify."""
xf_format = Format()
xf_format.set_align('justify')
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment horizontal="justify"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_18(self):
"""Test the _write_xf() method. Horizontal alignment = center across."""
xf_format = Format()
xf_format.set_align('center_across')
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment horizontal="centerContinuous"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_19(self):
"""Test the _write_xf() method. Horizontal alignment = distributed."""
xf_format = Format()
xf_format.set_align('distributed')
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment horizontal="distributed"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_20(self):
"""Test the _write_xf() method. Horizontal alignment = distributed + indent."""
xf_format = Format()
xf_format.set_align('distributed')
xf_format.set_indent()
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment horizontal="distributed" indent="1"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_21(self):
"""Test the _write_xf() method. Horizontal alignment = justify distributed."""
xf_format = Format()
xf_format.set_align('justify_distributed')
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment horizontal="distributed" justifyLastLine="1"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_22(self):
"""Test the _write_xf() method. Horizontal alignment = indent only."""
xf_format = Format()
xf_format.set_indent()
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment horizontal="left" indent="1"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_23(self):
"""Test the _write_xf() method. Horizontal alignment = distributed + indent."""
xf_format = Format()
xf_format.set_align('justify_distributed')
xf_format.set_indent()
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment horizontal="distributed" indent="1"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_24(self):
"""Test the _write_xf() method. Alignment = text wrap"""
xf_format = Format()
xf_format.set_text_wrap()
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment wrapText="1"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_25(self):
"""Test the _write_xf() method. Alignment = shrink to fit"""
xf_format = Format()
xf_format.set_shrink()
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment shrinkToFit="1"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_26(self):
"""Test the _write_xf() method. Alignment = reading order"""
xf_format = Format()
xf_format.set_reading_order(1)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment readingOrder="1"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_27(self):
"""Test the _write_xf() method. Alignment = reading order"""
xf_format = Format()
xf_format.set_reading_order(2)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment readingOrder="2"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_28(self):
"""Test the _write_xf() method. Alignment = rotation"""
xf_format = Format()
xf_format.set_rotation(45)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment textRotation="45"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_29(self):
"""Test the _write_xf() method. Alignment = rotation"""
xf_format = Format()
xf_format.set_rotation(-45)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment textRotation="135"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_30(self):
"""Test the _write_xf() method. Alignment = rotation"""
xf_format = Format()
xf_format.set_rotation(270)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment textRotation="255"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_31(self):
"""Test the _write_xf() method. Alignment = rotation"""
xf_format = Format()
xf_format.set_rotation(90)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment textRotation="90"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_32(self):
"""Test the _write_xf() method. Alignment = rotation"""
xf_format = Format()
xf_format.set_rotation(-90)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1"><alignment textRotation="180"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_33(self):
"""Test the _write_xf() method. With cell protection."""
xf_format = Format()
xf_format.set_locked(0)
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyProtection="1"><protection locked="0"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_34(self):
"""Test the _write_xf() method. With cell protection."""
xf_format = Format()
xf_format.set_hidden()
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyProtection="1"><protection hidden="1"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_35(self):
"""Test the _write_xf() method. With cell protection."""
xf_format = Format()
xf_format.set_locked(0)
xf_format.set_hidden()
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyProtection="1"><protection locked="0" hidden="1"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_xf_36(self):
"""Test the _write_xf() method. With cell protection + align."""
xf_format = Format()
xf_format.set_align('right')
xf_format.set_locked(0)
xf_format.set_hidden()
self.styles._write_xf(xf_format)
exp = """<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0" applyAlignment="1" applyProtection="1"><alignment horizontal="right"/><protection locked="0" hidden="1"/></xf>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
| [
29113,
29113,
7804,
4242,
21017,
198,
2,
198,
2,
30307,
329,
1395,
7278,
87,
34379,
13,
198,
2,
198,
2,
15069,
357,
66,
828,
2211,
12,
1238,
2481,
11,
1757,
22586,
47848,
11,
474,
23209,
7402,
3301,
31,
66,
6839,
13,
2398,
198,
2,... | 2.234457 | 7,093 |
"""
Functions for interface to the SARA client API.
These routines can be used to build client-side applications for searching and
downloading data.
The most obvious way to use these routines is as follows::
urlOpener = saraclient.makeUrlOpener()
sentinel = 2
paramList = ['startDate=2017-05-01', 'completionDate=2017-05-31']
results = saraclient.searchSara(urlOpener, sentinel, paramList)
This would return a list of multi-level dictionary objects created from the JSON output
of the server, one for each matching zipfile. The paramList can be any of the parameters which
the SARA API accepts, these are passed straight through to the API.
The default SARA server is hard-wired in this module. However, the server name, and the protocol
to be used, can both be over-ridden using the following environment variables
| AUSCOPHUB_SARA_PROTOCOL (default https)
| AUSCOPHUB_SARA_SERVERHOST (default copernicus.nci.org.au)
"""
from __future__ import print_function, division
import sys
import os
import json
import copy
import shlex
import subprocess
isPython3 = (sys.version_info.major == 3)
if isPython3:
from urllib.request import build_opener, ProxyHandler
from urllib.error import HTTPError
from urllib.parse import quote as urlquote
else:
from urllib2 import build_opener, ProxyHandler, HTTPError
from urllib import quote as urlquote
SARA_PROTOCOL = os.getenv("AUSCOPHUB_SARA_PROTOCOL", default="https")
SARA_HOST = os.getenv("AUSCOPHUB_SARA_SERVERHOST", default="copernicus.nci.org.au")
SARA_SEARCHSERVER = "{}://{}/sara.server/1.0/api/collections".format(SARA_PROTOCOL, SARA_HOST)
def makeUrlOpener(proxy=None):
"""
Use the crazy urllib2 routines to make a thing which can open a URL, with proxy
handling if required. Return an opener object, which is used as::
reader = opener.open(url)
"""
if proxy is None:
opener = build_opener()
else:
proxyHandler = ProxyHandler({'http':proxy, 'https':proxy})
opener = build_opener(proxyHandler)
return opener
def searchSara(urlOpener, sentinelNumber, paramList):
"""
Search the GA/NCI SARA Resto API, according to a set of parameter
name/value pairs, as given in paramList. The names and values are those
allowed by the API, as described at
| http://copernicus.nci.org.au/sara.server/1.0/api/collections/describe.xml
| http://copernicus.nci.org.au/sara.server/1.0/api/collections/S1/describe.xml
| http://copernicus.nci.org.au/sara.server/1.0/api/collections/S2/describe.xml
| http://copernicus.nci.org.au/sara.server/1.0/api/collections/S3/describe.xml
Each name/value pair is added to a HTTP GET URL as a separate name=value
string, separated by '&', creating a single query.
The overall effect of multiple name/value pairs is that each one further
restricts the results, in other words they are being AND-ed together. Note
that this is not because of the '&' in the constructed URL, that is just the
URL separator character. This means that there is no mechanism for doing an
OR of multiple search conditions.
If sentinelNumber is None, then all Sentinels are searched, using the "all collections"
URL of the API. I am not sure how useful that might be.
Args:
urlOpener: Object as created by the makeUrlOpener() function
sentinelNumber (int): an integer (i.e. 1, 2 or 3), identifying which Sentinel family
paramList (list): List of name=value strings, corresponding to the query parameters
defined by the SARA API.
Returns:
The return value is a list of the matching datasets. Each entry is a feature object,
as given by the JSON output of the SARA API. This list is built up from multiple
queries, because the server pages its output, so the list is just the feature objects,
without all the stuff which would be repeated per page.
"""
url = makeQueryUrl(sentinelNumber, paramList)
(results, httpErrorStr) = readJsonUrl(urlOpener, url)
if httpErrorStr is not None:
print("Error querying URL:", url, file=sys.stderr)
raise SaraClientError(httpErrorStr)
# Start with the first page of results.
allFeatures = results['features']
# The API only gives us a page of results at a time. So, we have to do repeated queries,
# with increasing page numbers, to get all pages. We can't use the totalResults field to work
# out how many pages there ought to be, because Resto does something crazy with that,
# so instead we have to just keep going until we don't get any more results. All a bit
# unsatisfactory, but this is what we have.
finished = False
page = 2
while not finished:
tmpParamList = copy.copy(paramList)
tmpParamList.append('page={}'.format(page))
url = makeQueryUrl(sentinelNumber, tmpParamList)
(results, httpErrorStr) = readJsonUrl(urlOpener, url)
features = results['features']
if len(features) > 0:
allFeatures.extend(features)
page += 1
else:
finished = True
return allFeatures
def makeQueryUrl(sentinelNumber, paramList):
"""
Return a full URL for the query defined by the given parameters
"""
# No URL encoding for these characters
noURLencode = "=:/(),"
queryStr = '&'.join([urlquote(p, safe=noURLencode) for p in paramList])
if sentinelNumber is None:
url = "{}/search.json?{}".format(SARA_SEARCHSERVER, queryStr)
else:
url = "{}/S{}/search.json?{}".format(SARA_SEARCHSERVER, sentinelNumber, queryStr)
return url
def readJsonUrl(urlOpener, url):
"""
Read the contents of the given URL, returning the object created from the
JSON which the server returns
"""
try:
reader = urlOpener.open(url)
jsonStr = reader.read()
# Ensure that we have a str object, but in a robust way. Mainly required in Python-3.
if hasattr(jsonStr, 'decode'):
jsonStr = jsonStr.decode('utf-8')
results = json.loads(jsonStr)
httpErrorStr = None
except HTTPError as e:
results = None
httpErrorStr = str(e)
return (results, httpErrorStr)
def simplifyFullFeature(feature):
"""
Given a full feature object as returned by the server (a GeoJSON-compliant object),
extract just the few interesting pieces, and return a single dictionary of them.
The names are ones I made up, and do not comply with any particular standards or anything.
They are intended purely for internal use within this software.
"""
d = {}
for localName in [FEATUREATTR_DOWNLOADURL, FEATUREATTR_MD5, FEATUREATTR_SIZE,
FEATUREATTR_ESAID, FEATUREATTR_CLOUDCOVER]:
d[localName] = getFeatAttr(feature, localName)
return d
FEATUREATTR_DOWNLOADURL = "downloadurl"
FEATUREATTR_MD5 = "md5"
FEATUREATTR_SIZE = "size"
FEATUREATTR_ESAID = "esaid"
FEATUREATTR_CLOUDCOVER = "cloud"
def getFeatAttr(feature, localName):
"""
Given a feature dictionary as returned by the SARA API, and the local name for some
attribute of interest, this function knows how to navigate through the feature
structures to find the relevant attribute.
The main reason for this function is to give a simple, flat namespace, without requiring
that other parts of the code decompose the multi-level structure of the feature objects.
Note that the local names are NOT the same as the names in the feature structure, but
are simple local names used unambiguously. Only a subset of attributes are handled.
"""
value = None
properties = feature['properties']
download = properties['services']['download']
if localName == FEATUREATTR_DOWNLOADURL:
value = download['url']
elif localName == FEATUREATTR_MD5:
checksum = download['checksum']
checksumParts = checksum.split(':')
if checksumParts[0] == "md5":
value = checksumParts[1]
elif localName == FEATUREATTR_SIZE:
value = download['size']
elif localName == FEATUREATTR_ESAID:
value = properties['productIdentifier']
elif localName == FEATUREATTR_CLOUDCOVER:
value = properties['cloudCover']
return value
def getRemoteFilename(downloadUrl, proxy):
"""
Given the SARA download URL, ask the server what the actual filename is.
At the moment, this uses "curl -I" to do the work, but I would much prefer to do
this directly in Python. In theory this should be possible, but I can't get
the authentication to work. When I do, I will change this code, and thus may
require extra arguments. I also suspect that the re-directs which the SARA server
does will cause me trouble, but I have yet to get to that point.
In the meantime, this is slow, but at least it works.
I am a bit unsure about this approach......
"""
cmdWords = ["curl", "--silent", "-n", "-L", "-I", downloadUrl]
if proxy is not None:
cmdWords.extend(["-x", proxy])
proc = subprocess.Popen(cmdWords, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
(stdout, stderr) = proc.communicate()
# I should really parse this with the standard library tools for doing so. However,
# because of the redirections the server does, this is actually several sets of HTTP headers
# kind of tacked together, which means there are extra traps. So, given that I have
# to handle at least part of it myself, I decided to just handle the whole lot.
stdoutLines = stdout.strip().split('\n')
filename = None
for line in stdoutLines:
if line.startswith('Content-Disposition: '):
words = shlex.split(line)
for word in words:
if word.startswith("filename="):
fields = word.split('=')
filename = fields[1]
return filename
| [
37811,
198,
24629,
2733,
329,
7071,
284,
262,
311,
24401,
5456,
7824,
13,
220,
198,
198,
4711,
31878,
460,
307,
973,
284,
1382,
5456,
12,
1589,
5479,
329,
10342,
290,
220,
198,
15002,
278,
1366,
13,
220,
198,
198,
464,
749,
3489,
83... | 2.77878 | 3,657 |
from collections.abc import Iterable
import numpy as np
import pandas as pd
import warnings
from scipy.sparse import issparse, csr_matrix
from sklearn.decomposition import FastICA
from sklearn.utils import sparsefuncs
import anndata
from anndata import AnnData
from typing import Optional, Union, Callable
from .cell_cycle import cell_cycle_scores
from ..tools.utils import update_dict
from .utils import (
convert2symbol,
pca,
clusters_stats,
cook_dist,
get_layer_keys,
get_shared_counts,
get_svr_filter,
Freeman_Tukey,
merge_adata_attrs,
sz_util,
normalize_util,
get_sz_exprs,
unique_var_obs_adata,
layers2csr,
collapse_adata,
NTR,
detect_datatype,
basic_stats,
add_noise_to_duplicates,
gene_exp_fraction,
)
from ..dynamo_logger import (
main_info,
main_critical,
main_warning,
LoggerManager,
)
from ..utils import copy_adata
from ..configuration import DynamoAdataConfig
def szFactor(
adata_ori: anndata.AnnData,
layers: Union[str, list] = "all",
total_layers: Union[list, None] = None,
splicing_total_layers: bool = False,
X_total_layers: bool = False,
locfunc: Callable = np.nanmean,
round_exprs: bool = False,
method: str = "median",
scale_to: Union[float, None] = None,
use_all_genes_cells: bool = True,
genes_use_for_norm: Union[list, None] = None,
) -> anndata.AnnData:
"""Calculate the size factor of the each cell using geometric mean of total UMI across cells for a AnnData object.
This function is partly based on Monocle R package (https://github.com/cole-trapnell-lab/monocle3).
Parameters
----------
adata_ori: :class:`~anndata.AnnData`.
AnnData object.
layers: str or list (default: `all`)
The layer(s) to be normalized. Default is `all`, including RNA (X, raw) or spliced, unspliced, protein, etc.
total_layers: list or None (default `None`)
The layer(s) that can be summed up to get the total mRNA. for example, ["spliced", "unspliced"], ["uu", "ul"
, "su", "sl"] or ["new", "old"], etc.
splicing_total_layers: bool (default `False`)
Whether to also normalize spliced / unspliced layers by size factor from total RNA.
X_total_layers: bool (default `False`)
Whether to also normalize adata.X by size factor from total RNA.
locfunc: `function` (default: `np.nanmean`)
The function to normalize the data.
round_exprs: `bool` (default: `False`)
A logic flag to determine whether the gene expression should be rounded into integers.
method: `str` (default: `mean-geometric-mean-total`)
The method used to calculate the expected total reads / UMI used in size factor calculation.
Only `mean-geometric-mean-total` / `geometric` and `median` are supported. When `median` is used, `locfunc`
will be replaced with `np.nanmedian`.
scale_to: `float` or None (default: `None`)
The final total expression for each cell that will be scaled to.
use_all_genes_cells: `bool` (default: `True`)
A logic flag to determine whether all cells and genes should be used for the size factor calculation.
genes_use_for_norm: `list` (default: `None`)
A list of gene names that will be used to calculate total RNA for each cell and then the size factor for
normalization. This is often very useful when you want to use only the host genes to normalize the dataset
in a virus infection experiment (i.e. CMV or SARS-CoV-2 infection).
Returns
-------
adata: :AnnData
An updated anndata object that are updated with the `Size_Factor` (`layer_` + `Size_Factor`) column(s) in
the obs attribute.
"""
if use_all_genes_cells:
# let us ignore the `inplace` parameter in pandas.Categorical.remove_unused_categories warning.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
adata = adata_ori if genes_use_for_norm is None else adata_ori[:, genes_use_for_norm]
else:
cell_inds = adata_ori.obs.use_for_pca if "use_for_pca" in adata_ori.obs.columns else adata_ori.obs.index
filter_list = ["use_for_pca", "pass_basic_filter"]
filter_checker = [i in adata_ori.var.columns for i in filter_list]
which_filter = np.where(filter_checker)[0]
gene_inds = adata_ori.var[filter_list[which_filter[0]]] if len(which_filter) > 0 else adata_ori.var.index
adata = adata_ori[cell_inds, :][:, gene_inds]
if genes_use_for_norm is not None:
# let us ignore the `inplace` parameter in pandas.Categorical.remove_unused_categories warning.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
adata = adata[:, adata.var_names.intersection(genes_use_for_norm)]
if total_layers is not None:
if not isinstance(total_layers, list):
total_layers = [total_layers]
if len(set(total_layers).difference(adata.layers.keys())) == 0:
total = None
for t_key in total_layers:
total = adata.layers[t_key] if total is None else total + adata.layers[t_key]
adata.layers["_total_"] = total
if type(layers) is str:
layers = [layers]
layers.extend(["_total_"])
layers = get_layer_keys(adata, layers)
if "raw" in layers and adata.raw is None:
adata.raw = adata.copy()
excluded_layers = []
if not X_total_layers:
excluded_layers.extend(["X"])
if not splicing_total_layers:
excluded_layers.extend(["spliced", "unspliced"])
for layer in layers:
if layer in excluded_layers:
sfs, cell_total = sz_util(
adata,
layer,
round_exprs,
method,
locfunc,
total_layers=None,
scale_to=scale_to,
)
else:
sfs, cell_total = sz_util(
adata,
layer,
round_exprs,
method,
locfunc,
total_layers=total_layers,
scale_to=scale_to,
)
sfs[~np.isfinite(sfs)] = 1
if layer == "raw":
adata.obs[layer + "_Size_Factor"] = sfs
adata.obs["Size_Factor"] = sfs
adata.obs["initial_cell_size"] = cell_total
elif layer == "X":
adata.obs["Size_Factor"] = sfs
adata.obs["initial_cell_size"] = cell_total
elif layer == "_total_":
adata.obs["total_Size_Factor"] = sfs
adata.obs["initial" + layer + "cell_size"] = cell_total
del adata.layers["_total_"]
else:
adata.obs[layer + "_Size_Factor"] = sfs
adata.obs["initial_" + layer + "_cell_size"] = cell_total
adata_ori = merge_adata_attrs(adata_ori, adata, attr="obs")
return adata_ori
def normalize_expr_data(
adata: anndata.AnnData,
layers: str = "all",
total_szfactor: str = "total_Size_Factor",
splicing_total_layers: str = False,
X_total_layers: str = False,
norm_method: Union[Callable, None] = None,
pseudo_expr: int = 1,
relative_expr: bool = True,
keep_filtered: bool = True,
recalc_sz: bool = False,
sz_method: str = "median",
scale_to: Union[float, None] = None,
) -> anndata.AnnData:
"""Normalize the gene expression value for the AnnData object
This function is partly based on Monocle R package (https://github.com/cole-trapnell-lab/monocle3).
Parameters
----------
adata: :class:`~anndata.AnnData`
AnnData object.
layers: `str` (default: `all`)
The layer(s) to be normalized. Default is all, including RNA (X, raw) or spliced, unspliced, protein, etc.
total_szfactor: `str` (default: `total_Size_Factor`)
The column name in the .obs attribute that corresponds to the size factor for the total mRNA.
splicing_total_layers: bool (default `False`)
Whether to also normalize spliced / unspliced layers by size factor from total RNA.
X_total_layers: bool (default `False`)
Whether to also normalize adata.X by size factor from total RNA.
norm_method: `function` or None (default: `None`)
The method used to normalize data. Can be either function `np.log1p, np.log2 or any other functions or
string `clr`. By default, only .X will be size normalized and log1p transformed while data in other layers
will only be size normalized.
pseudo_expr: `int` (default: `1`)
A pseudocount added to the gene expression value before log/log2 normalization.
relative_expr: `bool` (default: `True`)
A logic flag to determine whether we need to divide gene expression values first by size factor before
normalization.
keep_filtered: `bool` (default: `True`)
A logic flag to determine whether we will only store feature genes in the adata object. If it is False, size
factor will be recalculated only for the selected feature genes.
recalc_sz: `bool` (default: `False`)
A logic flag to determine whether we need to recalculate size factor based on selected genes before
normalization.
sz_method: `str` (default: `mean-geometric-mean-total`)
The method used to calculate the expected total reads / UMI used in size factor calculation.
Only `mean-geometric-mean-total` / `geometric` and `median` are supported. When `median` is used, `locfunc`
will be replaced with `np.nanmedian`.
scale_to: `float` or None (default: `None`)
The final total expression for each cell that will be scaled to.
Returns
-------
adata: :class:`~anndata.AnnData`
An updated anndata object that are updated with normalized expression values for different layers.
"""
if recalc_sz:
if "use_for_pca" in adata.var.columns and keep_filtered is False:
adata = adata[:, adata.var.loc[:, "use_for_pca"]]
adata.obs = adata.obs.loc[:, ~adata.obs.columns.str.contains("Size_Factor")]
layers = get_layer_keys(adata, layers)
layer_sz_column_names = [i + "_Size_Factor" for i in set(layers).difference("X")]
layer_sz_column_names.extend(["Size_Factor"])
layers_to_sz = list(set(layer_sz_column_names).difference(adata.obs.keys()))
if len(layers_to_sz) > 0:
layers = pd.Series(layers_to_sz).str.split("_Size_Factor", expand=True).iloc[:, 0].tolist()
if "Size_Factor" in layers:
layers[np.where(np.array(layers) == "Size_Factor")[0][0]] = "X"
szFactor(
adata,
layers=layers,
locfunc=np.nanmean,
round_exprs=True,
method=sz_method,
scale_to=scale_to,
)
excluded_layers = []
if not X_total_layers:
excluded_layers.extend(["X"])
if not splicing_total_layers:
excluded_layers.extend(["spliced", "unspliced"])
for layer in layers:
if layer in excluded_layers:
szfactors, CM = get_sz_exprs(adata, layer, total_szfactor=None)
else:
szfactors, CM = get_sz_exprs(adata, layer, total_szfactor=total_szfactor)
if norm_method is None and layer == "X":
CM = normalize_util(CM, szfactors, relative_expr, pseudo_expr, np.log1p)
elif norm_method in [np.log1p, np.log, np.log2, Freeman_Tukey, None] and layer != "protein":
CM = normalize_util(CM, szfactors, relative_expr, pseudo_expr, norm_method)
elif layer == "protein": # norm_method == 'clr':
if norm_method != "clr":
main_warning(
"For protein data, log transformation is not recommended. Using clr normalization by default."
)
"""This normalization implements the centered log-ratio (CLR) normalization from Seurat which is computed
for each gene (M Stoeckius, 2017).
"""
CM = CM.T
n_feature = CM.shape[1]
for i in range(CM.shape[0]):
x = CM[i].A if issparse(CM) else CM[i]
res = np.log1p(x / (np.exp(np.nansum(np.log1p(x[x > 0])) / n_feature)))
res[np.isnan(res)] = 0
# res[res > 100] = 100
# no .A is required # https://stackoverflow.com/questions/28427236/set-row-of-csr-matrix
CM[i] = res
CM = CM.T
else:
main_warning(norm_method + " is not implemented yet")
if layer in ["raw", "X"]:
adata.X = CM
elif layer == "protein" and "protein" in adata.obsm_keys():
adata.obsm["X_protein"] = CM
else:
adata.layers["X_" + layer] = CM
adata.uns["pp"]["norm_method"] = norm_method.__name__ if callable(norm_method) else norm_method
return adata
def Gini(adata, layers="all"):
"""Calculate the Gini coefficient of a numpy array.
https://github.com/thomasmaxwellnorman/perturbseq_demo/blob/master/perturbseq/util.py
Parameters
----------
adata: :class:`~anndata.AnnData`
AnnData object
layers: str (default: None)
The layer(s) to be normalized. Default is all, including RNA (X, raw) or spliced, unspliced, protein, etc.
Returns
-------
adata: :class:`~anndata.AnnData`
An updated anndata object with gini score for the layers (include .X) in the corresponding var columns
(layer + '_gini').
"""
# From: https://github.com/oliviaguest/gini
# based on bottom eq: http://www.statsdirect.com/help/content/image/stat0206_wmf.gif
# from: http://www.statsdirect.com/help/default.htm#nonparametric_methods/gini.htm
layers = get_layer_keys(adata, layers)
for layer in layers:
if layer == "raw":
CM = adata.raw.X
elif layer == "X":
CM = adata.X
elif layer == "protein":
if "protein" in adata.obsm_keys():
CM = adata.obsm[layer]
else:
continue
else:
CM = adata.layers[layer]
n_features = adata.shape[1]
gini = np.zeros(n_features)
for i in np.arange(n_features):
# all values are treated equally, arrays must be 1d
cur_cm = CM[:, i].A if issparse(CM) else CM[:, i]
if np.amin(CM) < 0:
cur_cm -= np.amin(cur_cm) # values cannot be negative
cur_cm += 0.0000001 # np.min(array[array!=0]) #values cannot be 0
cur_cm = np.sort(cur_cm) # values must be sorted
# index per array element
index = np.arange(1, cur_cm.shape[0] + 1)
n = cur_cm.shape[0] # number of array elements
gini[i] = (np.sum((2 * index - n - 1) * cur_cm)) / (n * np.sum(cur_cm)) # Gini coefficient
if layer in ["raw", "X"]:
adata.var["gini"] = gini
else:
adata.var[layer + "_gini"] = gini
return adata
def parametric_dispersion_fit(disp_table: pd.DataFrame, initial_coefs: np.ndarray = np.array([1e-6, 1])):
"""This function is partly based on Monocle R package (https://github.com/cole-trapnell-lab/monocle3).
Parameters
----------
disp_table: :class:`~pandas.DataFrame`
AnnData object
initial_coefs: :class:`~numpy.ndarray`
Initial parameters for the gamma fit of the dispersion parameters.
Returns
-------
fit: :class:`~statsmodels.api.formula.glm`
A statsmodels fitting object.
coefs: :class:`~numpy.ndarray`
The two resulting gamma fitting coefficients.
good: :class:`~pandas.DataFrame`
The subsetted dispersion table that is subjected to Gamma fitting.
"""
import statsmodels.api as sm
coefs = initial_coefs
iter = 0
while True:
residuals = disp_table["disp"] / (coefs[0] + coefs[1] / disp_table["mu"])
good = disp_table.loc[(residuals > initial_coefs[0]) & (residuals < 10000), :]
# https://stats.stackexchange.com/questions/356053/the-identity-link-function-does-not-respect-the-domain-of-the
# -gamma-family
fit = sm.formula.glm(
"disp ~ I(1 / mu)",
data=good,
family=sm.families.Gamma(link=sm.genmod.families.links.identity),
).train(start_params=coefs)
oldcoefs = coefs
coefs = fit.params
if coefs[0] < initial_coefs[0]:
coefs[0] = initial_coefs[0]
if coefs[1] < 0:
main_warning("Parametric dispersion fit may be failed.")
if np.sum(np.log(coefs / oldcoefs) ** 2 < coefs[0]):
break
iter += 1
if iter > 10:
main_warning("Dispersion fit didn't converge")
break
if not np.all(coefs > 0):
main_warning("Parametric dispersion fit may be failed.")
return fit, coefs, good
def disp_calc_helper_NB(adata: anndata.AnnData, layers: str = "X", min_cells_detected: int = 1) -> pd.DataFrame:
"""This function is partly based on Monocle R package (https://github.com/cole-trapnell-lab/monocle3).
Parameters
----------
adata: :class:`~anndata.AnnData`
AnnData object
min_cells_detected: `int` (default: None)
The mimimal required number of cells with expression for selecting gene for dispersion fitting.
layer: `str`
The layer of data used for dispersion fitting.
Returns
-------
res: :class:`~pandas.DataFrame`
A pandas dataframe with mu, dispersion for each gene that passes filters.
"""
layers = get_layer_keys(adata, layers=layers, include_protein=False)
res_list = []
for layer in layers:
if layer == "raw":
CM = adata.raw.X
szfactors = adata.obs[layer + "Size_Factor"][:, None]
elif layer == "X":
CM = adata.X
szfactors = adata.obs["Size_Factor"][:, None]
else:
CM = adata.layers[layer]
szfactors = adata.obs[layer + "Size_Factor"][:, None]
if issparse(CM):
CM.data = np.round(CM.data, 0)
rounded = CM
else:
rounded = CM.round().astype("int")
lowerDetectedLimit = adata.uns["lowerDetectedLimit"] if "lowerDetectedLimit" in adata.uns.keys() else 1
nzGenes = (rounded > lowerDetectedLimit).sum(axis=0)
nzGenes = nzGenes > min_cells_detected
nzGenes = nzGenes.A1 if issparse(rounded) else nzGenes
if layer.startswith("X_"):
x = rounded[:, nzGenes]
else:
x = (
rounded[:, nzGenes].multiply(csr_matrix(1 / szfactors))
if issparse(rounded)
else rounded[:, nzGenes] / szfactors
)
xim = np.mean(1 / szfactors) if szfactors is not None else 1
f_expression_mean = x.mean(axis=0)
# For NB: Var(Y) = mu * (1 + mu / k)
# x.A.var(axis=0, ddof=1)
f_expression_var = (
(x.multiply(x).mean(0).A1 - f_expression_mean.A1 ** 2) * x.shape[0] / (x.shape[0] - 1)
if issparse(x)
else x.var(axis=0, ddof=0) ** 2
) # np.mean(np.power(x - f_expression_mean, 2), axis=0) # variance with n - 1
# https://scialert.net/fulltext/?doi=ajms.2010.1.15 method of moments
disp_guess_meth_moments = f_expression_var - xim * f_expression_mean # variance - mu
disp_guess_meth_moments = disp_guess_meth_moments / np.power(
f_expression_mean, 2
) # this is dispersion parameter (1/k)
res = pd.DataFrame(
{
"mu": np.array(f_expression_mean).flatten(),
"disp": np.array(disp_guess_meth_moments).flatten(),
}
)
res.loc[res["mu"] == 0, "mu"] = None
res.loc[res["mu"] == 0, "disp"] = None
res.loc[res["disp"] < 0, "disp"] = 0
res["gene_id"] = adata.var_names[nzGenes]
res_list.append(res)
return layers, res_list
def top_table(adata: anndata.AnnData, layer: str = "X", mode: str = "dispersion") -> pd.DataFrame:
"""This function is partly based on Monocle R package (https://github.com/cole-trapnell-lab/monocle3).
Parameters
----------
adata: :class:`~anndata.AnnData`
AnnData object
Returns
-------
disp_df: :class:`~pandas.DataFrame`
The data frame with the gene_id, mean_expression, dispersion_fit and dispersion_empirical as the columns.
"""
layer = get_layer_keys(adata, layers=layer, include_protein=False)[0]
if layer in ["X"]:
key = "dispFitInfo"
else:
key = layer + "_dispFitInfo"
if mode == "dispersion":
if adata.uns[key] is None:
estimate_dispersion(adata, layers=[layer])
if adata.uns[key] is None:
raise KeyError(
"Error: for adata.uns.key=%s, no dispersion model found. Please call estimate_dispersion() before calling this function"
% key
)
top_df = pd.DataFrame(
{
"gene_id": adata.uns[key]["disp_table"]["gene_id"],
"mean_expression": adata.uns[key]["disp_table"]["mu"],
"dispersion_fit": adata.uns[key]["disp_func"](adata.uns[key]["disp_table"]["mu"]),
"dispersion_empirical": adata.uns[key]["disp_table"]["disp"],
}
)
top_df = top_df.set_index("gene_id")
elif mode == "gini":
top_df = adata.var[layer + "_gini"]
return top_df
def vstExprs(
adata: anndata.AnnData,
expr_matrix: Union[np.ndarray, None] = None,
round_vals: bool = True,
) -> np.ndarray:
"""This function is partly based on Monocle R package (https://github.com/cole-trapnell-lab/monocle3).
Parameters
----------
adata: :class:`~anndata.AnnData`
AnnData object
dispModelName: `str`
The name of the dispersion function to use for VST.
expr_matrix: :class:`~numpy.ndarray` or `None` (default: `None`)
An matrix of values to transform. Must be normalized (e.g. by size factors) already. This function doesn't
do this for you.
round_vals: `bool`
Whether to round expression values to the nearest integer before applying the transformation.
Returns
-------
res: :class:`~numpy.ndarray`
A numpy array of the gene expression after VST.
"""
fitInfo = adata.uns["dispFitInfo"]
coefs = fitInfo["coefs"]
if expr_matrix is None:
ncounts = adata.X
if round_vals:
if issparse(ncounts):
ncounts.data = np.round(ncounts.data, 0)
else:
ncounts = ncounts.round().astype("int")
else:
ncounts = expr_matrix
res = vst(ncounts.toarray()) if issparse(ncounts) else vst(ncounts)
return res
def estimate_dispersion(
adata: anndata.AnnData,
layers: str = "X",
modelFormulaStr: str = "~ 1",
min_cells_detected: int = 1,
removeOutliers: bool = False,
) -> anndata.AnnData:
"""This function is partly based on Monocle R package (https://github.com/cole-trapnell-lab/monocle3).
Parameters
----------
adata: :class:`~anndata.AnnData`
AnnData object
layers: `str` (default: 'X')
The layer(s) to be used for calculating dispersion. Default is X if there is no spliced layers.
modelFormulaStr: `str`
The model formula used to calculate dispersion parameters. Not used.
min_cells_detected: `int`
The minimum number of cells detected for calculating the dispersion.
removeOutliers: `bool` (default: True)
Whether to remove outliers when performing dispersion fitting.
Returns
-------
adata: :class:`~anndata.AnnData`
An updated annData object with dispFitInfo added to uns attribute as a new key.
"""
import re
logger = LoggerManager.gen_logger("dynamo-preprocessing")
# mu = None
model_terms = [x.strip() for x in re.compile("~|\\*|\\+").split(modelFormulaStr)]
model_terms = list(set(model_terms) - set([""]))
cds_pdata = adata.obs # .loc[:, model_terms]
cds_pdata["rowname"] = cds_pdata.index.values
layers, disp_tables = disp_calc_helper_NB(adata[:, :], layers, min_cells_detected)
# disp_table['disp'] = np.random.uniform(0, 10, 11)
# disp_table = cds_pdata.apply(disp_calc_helper_NB(adata[:, :], min_cells_detected))
# cds_pdata <- dplyr::group_by_(dplyr::select_(rownames_to_column(pData(cds)), "rowname", .dots=model_terms), .dots
# =model_terms)
# disp_table <- as.data.frame(cds_pdata %>% do(disp_calc_helper_NB(cds[,.$rowname], cds@expressionFamily, min_cells_
# detected)))
for ind in range(len(layers)):
layer, disp_table = layers[ind], disp_tables[ind]
if disp_table is None:
raise Exception("Parametric dispersion fitting failed, please set a different lowerDetectionLimit")
disp_table = disp_table.loc[np.where(disp_table["mu"] != np.nan)[0], :]
res = parametric_dispersion_fit(disp_table)
fit, coefs, good = res[0], res[1], res[2]
if removeOutliers:
# influence = fit.get_influence().cooks_distance()
# #CD is the distance and p is p-value
# (CD, p) = influence.cooks_distance
CD = cook_dist(fit, 1 / good["mu"][:, None], good)
cooksCutoff = 4 / good.shape[0]
print("Removing ", len(CD[CD > cooksCutoff]), " outliers")
outliers = CD > cooksCutoff
# use CD.index.values? remove genes that lost when doing parameter fitting
lost_gene = set(good.index.values).difference(set(range(len(CD))))
outliers[lost_gene] = True
res = parametric_dispersion_fit(good.loc[~outliers, :])
fit, coefs = res[0], res[1]
if layer == "X":
logger.info_insert_adata("dispFitInfo", "uns")
adata.uns["dispFitInfo"] = {
"disp_table": good,
"disp_func": ans,
"coefs": coefs,
}
else:
logger.info_insert_adata(layer + "_dispFitInfo", "uns")
adata.uns[layer + "_dispFitInfo"] = {
"disp_table": good,
"disp_func": ans,
"coefs": coefs,
}
return adata
def SVRs(
adata_ori: anndata.AnnData,
filter_bool: Union[np.ndarray, None] = None,
layers: str = "X",
relative_expr: bool = True,
total_szfactor: str = "total_Size_Factor",
min_expr_cells: int = 0,
min_expr_avg: int = 0,
max_expr_avg: int = 0,
svr_gamma: Union[float, None] = None,
winsorize: bool = False,
winsor_perc: tuple = (1, 99.5),
sort_inverse: bool = False,
use_all_genes_cells: bool = False,
) -> anndata.AnnData:
"""This function is modified from https://github.com/velocyto-team/velocyto.py/blob/master/velocyto/analysis.py
Parameters
----------
adata: :class:`~anndata.AnnData`
AnnData object.
filter_bool: :class:`~numpy.ndarray` (default: None)
A boolean array from the user to select genes for downstream analysis.
layers: `str` (default: 'X')
The layer(s) to be used for calculating dispersion score via support vector regression (SVR). Default is X
if there is no spliced layers.
relative_expr: `bool` (default: `True`)
A logic flag to determine whether we need to divide gene expression values first by size factor before run
SVR.
total_szfactor: `str` (default: `total_Size_Factor`)
The column name in the .obs attribute that corresponds to the size factor for the total mRNA.
min_expr_cells: `int` (default: `2`)
minimum number of cells that express that gene for it to be considered in the fit.
min_expr_avg: `int` (default: `0`)
The minimum average of genes across cells accepted.
max_expr_avg: `float` (defaul: `20`)
The maximum average of genes across cells accepted before treating house-keeping/outliers for removal.
svr_gamma: `float` or None (default: `None`)
the gamma hyper-parameter of the SVR.
winsorize: `bool` (default: `False`)
Wether to winsorize the data for the cv vs mean model.
winsor_perc: `tuple` (default: `(1, 99.5)`)
the up and lower bound of the winsorization.
sort_inverse: `bool` (default: `False`)
if True it sorts genes from less noisy to more noisy (to use for size estimation not for feature selection).
use_all_genes_cells: `bool` (default: `False`)
A logic flag to determine whether all cells and genes should be used for the size factor calculation.
Returns
-------
adata: :class:`~anndata.AnnData`
An updated annData object with `log_m`, `log_cv`, `score` added to .obs columns and `SVR` added to uns
attribute as a new key.
"""
from sklearn.svm import SVR
layers = get_layer_keys(adata_ori, layers)
if use_all_genes_cells:
# let us ignore the `inplace` parameter in pandas.Categorical.remove_unused_categories warning.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
adata = adata_ori[:, filter_bool].copy() if filter_bool is not None else adata_ori
else:
cell_inds = adata_ori.obs.use_for_pca if "use_for_pca" in adata_ori.obs.columns else adata_ori.obs.index
filter_list = ["use_for_pca", "pass_basic_filter"]
filter_checker = [i in adata_ori.var.columns for i in filter_list]
which_filter = np.where(filter_checker)[0]
gene_inds = adata_ori.var[filter_list[which_filter[0]]] if len(which_filter) > 0 else adata_ori.var.index
# let us ignore the `inplace` parameter in pandas.Categorical.remove_unused_categories warning.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
adata = adata_ori[cell_inds, gene_inds].copy()
filter_bool = filter_bool[gene_inds]
for layer in layers:
if layer == "raw":
CM = adata.X.copy() if adata.raw is None else adata.raw
szfactors = (
adata.obs[layer + "_Size_Factor"].values[:, None]
if adata.raw.X is not None
else adata.obs["Size_Factor"].values[:, None]
)
elif layer == "X":
CM = adata.X.copy()
szfactors = adata.obs["Size_Factor"].values[:, None]
elif layer == "protein":
if "protein" in adata.obsm_keys():
CM = adata.obsm["protein"].copy()
szfactors = adata.obs[layer + "_Size_Factor"].values[:, None]
else:
continue
else:
CM = adata.layers[layer].copy()
szfactors = (
adata.obs[layer + "_Size_Factor"].values[:, None]
if layer + "_Size_Factor" in adata.obs.columns
else None
)
if total_szfactor is not None and total_szfactor in adata.obs.keys():
szfactors = adata.obs[total_szfactor].values[:, None] if total_szfactor in adata.obs.columns else None
if szfactors is not None and relative_expr:
if issparse(CM):
sparsefuncs.inplace_row_scale(CM, 1 / szfactors)
else:
CM /= szfactors
if winsorize:
if min_expr_cells <= ((100 - winsor_perc[1]) * CM.shape[0] * 0.01):
min_expr_cells = int(np.ceil((100 - winsor_perc[1]) * CM.shape[1] * 0.01)) + 2
detected_bool = np.array(
((CM > 0).sum(0) >= min_expr_cells) & (CM.mean(0) <= max_expr_avg) & (CM.mean(0) >= min_expr_avg)
).flatten()
valid_CM = CM[:, detected_bool]
if winsorize:
down, up = (
np.percentile(valid_CM.A, winsor_perc, 0)
if issparse(valid_CM)
else np.percentile(valid_CM, winsor_perc, 0)
)
Sfw = (
np.clip(valid_CM.A, down[None, :], up[None, :])
if issparse(valid_CM)
else np.percentile(valid_CM, winsor_perc, 0)
)
mu = Sfw.mean(0)
sigma = Sfw.std(0, ddof=1)
else:
mu = np.array(valid_CM.mean(0)).flatten()
sigma = (
np.array(
np.sqrt(
(valid_CM.multiply(valid_CM).mean(0).A1 - (mu) ** 2)
# * (adata.n_obs)
# / (adata.n_obs - 1)
)
)
if issparse(valid_CM)
else valid_CM.std(0, ddof=1)
)
cv = sigma / mu
log_m = np.array(np.log2(mu)).flatten()
log_cv = np.array(np.log2(cv)).flatten()
log_m[mu == 0], log_cv[mu == 0] = 0, 0
if svr_gamma is None:
svr_gamma = 150.0 / len(mu)
# Fit the Support Vector Regression
clf = SVR(gamma=svr_gamma)
clf.fit(log_m[:, None], log_cv)
fitted_fun = clf.predict
ff = fitted_fun(log_m[:, None])
score = log_cv - ff
if sort_inverse:
score = -score
prefix = "" if layer == "X" else layer + "_"
(adata.var[prefix + "log_m"], adata.var[prefix + "log_cv"], adata.var[prefix + "score"],) = (
np.nan,
np.nan,
-np.inf,
)
(
adata.var.loc[detected_bool, prefix + "log_m"],
adata.var.loc[detected_bool, prefix + "log_cv"],
adata.var.loc[detected_bool, prefix + "score"],
) = (
np.array(log_m).flatten(),
np.array(log_cv).flatten(),
np.array(score).flatten(),
)
key = "velocyto_SVR" if layer == "raw" or layer == "X" else layer + "_velocyto_SVR"
adata_ori.uns[key] = {"SVR": fitted_fun}
adata_ori = merge_adata_attrs(adata_ori, adata, attr="var")
return adata_ori
def filter_cells(
adata: anndata.AnnData,
filter_bool: Union[np.ndarray, None] = None,
layer: str = "all",
keep_filtered: bool = False,
min_expr_genes_s: int = 50,
min_expr_genes_u: int = 25,
min_expr_genes_p: int = 1,
max_expr_genes_s: float = np.inf,
max_expr_genes_u: float = np.inf,
max_expr_genes_p: float = np.inf,
shared_count: Union[int, None] = None,
) -> anndata.AnnData:
"""Select valid cells based on a collection of filters.
Parameters
----------
adata: :class:`~anndata.AnnData`
AnnData object.
filter_bool: :class:`~numpy.ndarray` (default: `None`)
A boolean array from the user to select cells for downstream analysis.
layer: `str` (default: `all`)
The data from a particular layer (include X) used for feature selection.
keep_filtered: `bool` (default: `False`)
Whether to keep cells that don't pass the filtering in the adata object.
min_expr_genes_s: `int` (default: `50`)
Minimal number of genes with expression for a cell in the data from the spliced layer (also used for X).
min_expr_genes_u: `int` (default: `25`)
Minimal number of genes with expression for a cell in the data from the unspliced layer.
min_expr_genes_p: `int` (default: `1`)
Minimal number of genes with expression for a cell in the data from in the protein layer.
max_expr_genes_s: `float` (default: `np.inf`)
Maximal number of genes with expression for a cell in the data from the spliced layer (also used for X).
max_expr_genes_u: `float` (default: `np.inf`)
Maximal number of genes with expression for a cell in the data from the unspliced layer.
max_expr_genes_p: `float` (default: `np.inf`)
Maximal number of protein with expression for a cell in the data from the protein layer.
shared_count: `int` or `None` (default: `None`)
The minimal shared number of counts for each cell across genes between layers.
Returns
-------
adata: :class:`~anndata.AnnData`
An updated AnnData object with use_for_pca as a new column in obs to indicate the selection of cells for
downstream analysis. adata will be subsetted with only the cells pass filtering if keep_filtered is set to
be False.
"""
detected_bool = np.ones(adata.X.shape[0], dtype=bool)
detected_bool = (detected_bool) & (
((adata.X > 0).sum(1) >= min_expr_genes_s) & ((adata.X > 0).sum(1) <= max_expr_genes_s)
).flatten()
if ("spliced" in adata.layers.keys()) & (layer == "spliced" or layer == "all"):
detected_bool = (
detected_bool
& (
((adata.layers["spliced"] > 0).sum(1) >= min_expr_genes_s)
& ((adata.layers["spliced"] > 0).sum(1) <= max_expr_genes_s)
).flatten()
)
if ("unspliced" in adata.layers.keys()) & (layer == "unspliced" or layer == "all"):
detected_bool = (
detected_bool
& (
((adata.layers["unspliced"] > 0).sum(1) >= min_expr_genes_u)
& ((adata.layers["unspliced"] > 0).sum(1) <= max_expr_genes_u)
).flatten()
)
if ("protein" in adata.obsm.keys()) & (layer == "protein" or layer == "all"):
detected_bool = (
detected_bool
& (
((adata.obsm["protein"] > 0).sum(1) >= min_expr_genes_p)
& ((adata.obsm["protein"] > 0).sum(1) <= max_expr_genes_p)
).flatten()
)
if shared_count is not None:
layers = get_layer_keys(adata, layer, False)
detected_bool = detected_bool & get_shared_counts(adata, layers, shared_count, "cell")
filter_bool = filter_bool & detected_bool if filter_bool is not None else detected_bool
filter_bool = np.array(filter_bool).flatten()
if keep_filtered:
adata.obs["pass_basic_filter"] = filter_bool
else:
adata._inplace_subset_obs(filter_bool)
adata.obs["pass_basic_filter"] = True
return adata
def filter_genes_by_clusters_(
adata: anndata.AnnData,
cluster: str,
min_avg_U: float = 0.02,
min_avg_S: float = 0.08,
size_limit: int = 40,
):
"""Prepare filtering genes on the basis of cluster-wise expression threshold
This function is taken from velocyto in order to reproduce velocyto's DentateGyrus notebook.
Arguments
---------
adata: :class:`~anndata.AnnData`
AnnData object.
cluster: `str`
A column in the adata.obs attribute which will be used for cluster specific expression filtering.
min_avg_U: float
Include genes that have unspliced average bigger than `min_avg_U` in at least one of the clusters
min_avg_S: float
Include genes that have spliced average bigger than `min_avg_U` in at least one of the clusters
Note: the two conditions are combined by and "&" logical operator.
Returns
-------
Nothing but it creates the attribute
clu_avg_selected: np.ndarray bool
The gene cluster that is selected
To perform the filtering use the method `filter_genes`
"""
U, S, cluster_uid = (
adata.layers["unspliced"],
adata.layers["spliced"],
adata.obs[cluster],
)
cluster_uid, cluster_ix = np.unique(cluster_uid, return_inverse=True)
U_avgs, S_avgs = clusters_stats(U, S, cluster_uid, cluster_ix, size_limit=size_limit)
clu_avg_selected = (U_avgs.max(1) > min_avg_U) & (S_avgs.max(1) > min_avg_S)
return clu_avg_selected
def filter_genes(
adata: anndata.AnnData,
filter_bool: Union[np.ndarray, None] = None,
layer: str = "all",
min_cell_s: int = 1,
min_cell_u: int = 1,
min_cell_p: int = 1,
min_avg_exp_s: float = 1e-10,
min_avg_exp_u: float = 0,
min_avg_exp_p: float = 0,
max_avg_exp: float = np.infty,
min_count_s: int = 0,
min_count_u: int = 0,
min_count_p: int = 0,
shared_count: int = 30,
) -> anndata.AnnData:
"""Basic filter of genes based a collection of expression filters.
Parameters
----------
adata: :class:`~anndata.AnnData`
AnnData object.
filter_bool: :class:`~numpy.ndarray` (default: None)
A boolean array from the user to select genes for downstream analysis.
layer: `str` (default: `X`)
The data from a particular layer (include X) used for feature selection.
min_cell_s: `int` (default: `5`)
Minimal number of cells with expression for the data in the spliced layer (also used for X).
min_cell_u: `int` (default: `5`)
Minimal number of cells with expression for the data in the unspliced layer.
min_cell_p: `int` (default: `5`)
Minimal number of cells with expression for the data in the protein layer.
min_avg_exp_s: `float` (default: `1e-2`)
Minimal average expression across cells for the data in the spliced layer (also used for X).
min_avg_exp_u: `float` (default: `1e-4`)
Minimal average expression across cells for the data in the unspliced layer.
min_avg_exp_p: `float` (default: `1e-4`)
Minimal average expression across cells for the data in the protein layer.
max_avg_exp: `float` (default: `100`.)
Maximal average expression across cells for the data in all layers (also used for X).
min_cell_s: `int` (default: `5`)
Minimal number of counts (UMI/expression) for the data in the spliced layer (also used for X).
min_cell_u: `int` (default: `5`)
Minimal number of counts (UMI/expression) for the data in the unspliced layer.
min_cell_p: `int` (default: `5`)
Minimal number of counts (UMI/expression) for the data in the protein layer.
shared_count: `int` (default: `30`)
The minimal shared number of counts for each genes across cell between layers.
Returns
-------
adata: :class:`~anndata.AnnData`
An updated AnnData object with use_for_pca as a new column in .var attributes to indicate the selection of
genes for downstream analysis. adata will be subsetted with only the genes pass filter if keep_unflitered is
set to be False.
"""
detected_bool = np.ones(adata.shape[1], dtype=bool)
detected_bool = (detected_bool) & np.array(
((adata.X > 0).sum(0) >= min_cell_s)
& (adata.X.mean(0) >= min_avg_exp_s)
& (adata.X.mean(0) <= max_avg_exp)
& (adata.X.sum(0) >= min_count_s)
).flatten()
# add our filtering for labeling data below
if "spliced" in adata.layers.keys() and (layer == "spliced" or layer == "all"):
detected_bool = (
detected_bool
& np.array(
((adata.layers["spliced"] > 0).sum(0) >= min_cell_s)
& (adata.layers["spliced"].mean(0) >= min_avg_exp_s)
& (adata.layers["spliced"].mean(0) <= max_avg_exp)
& (adata.layers["spliced"].sum(0) >= min_count_s)
).flatten()
)
if "unspliced" in adata.layers.keys() and (layer == "unspliced" or layer == "all"):
detected_bool = (
detected_bool
& np.array(
((adata.layers["unspliced"] > 0).sum(0) >= min_cell_u)
& (adata.layers["unspliced"].mean(0) >= min_avg_exp_u)
& (adata.layers["unspliced"].mean(0) <= max_avg_exp)
& (adata.layers["unspliced"].sum(0) >= min_count_u)
).flatten()
)
if shared_count is not None:
layers = get_layer_keys(adata, "all", False)
tmp = get_shared_counts(adata, layers, shared_count, "gene")
if tmp.sum() > 2000:
detected_bool &= tmp
else:
# in case the labeling time is very short for pulse experiment or
# chase time is very long for degradation experiment.
tmp = get_shared_counts(
adata,
list(set(layers).difference(["new", "labelled", "labeled"])),
shared_count,
"gene",
)
detected_bool &= tmp
# The following code need to be updated
# just remove genes that are not following the protein criteria
if "protein" in adata.obsm.keys() and layer == "protein":
detected_bool = (
detected_bool
& np.array(
((adata.obsm["protein"] > 0).sum(0) >= min_cell_p)
& (adata.obsm["protein"].mean(0) >= min_avg_exp_p)
& (adata.obsm["protein"].mean(0) <= max_avg_exp)
& (adata.layers["protein"].sum(0) >= min_count_p)
).flatten()
)
filter_bool = filter_bool & detected_bool if filter_bool is not None else detected_bool
adata.var["pass_basic_filter"] = np.array(filter_bool).flatten()
return adata
def select_genes(
adata: anndata.AnnData,
layer: str = "X",
total_szfactor: str = "total_Size_Factor",
keep_filtered: bool = True,
sort_by: str = "SVR",
n_top_genes: int = 2000,
SVRs_kwargs: dict = {},
only_bools: bool = False,
) -> anndata.AnnData:
"""Select feature genes based on a collection of filters.
Parameters
----------
adata: :class:`~anndata.AnnData`
AnnData object.
layer: `str` (default: `X`)
The data from a particular layer (include X) used for feature selection.
total_szfactor: `str` (default: `total_Size_Factor`)
The column name in the .obs attribute that corresponds to the size factor for the total mRNA.
keep_filtered: `bool` (default: `True`)
Whether to keep genes that don't pass the filtering in the adata object.
sort_by: `str` (default: `SVR`)
Which soring method, either SVR, dispersion or Gini index, to be used to select genes.
n_top_genes: `int` (default: `int`)
How many top genes based on scoring method (specified by sort_by) will be selected as feature genes.
only_bools: `bool` (default: `False`)
Only return a vector of bool values.
Returns
-------
adata: :class:`~anndata.AnnData`
An updated AnnData object with use_for_pca as a new column in .var attributes to indicate the selection of
genes for downstream analysis. adata will be subsetted with only the genes pass filter if keep_unflitered is
set to be False.
"""
filter_bool = (
adata.var["pass_basic_filter"]
if "pass_basic_filter" in adata.var.columns
else np.ones(adata.shape[1], dtype=bool)
)
if adata.shape[1] <= n_top_genes:
filter_bool = np.ones(adata.shape[1], dtype=bool)
else:
if sort_by == "dispersion":
table = top_table(adata, layer, mode="dispersion")
valid_table = table.query("dispersion_empirical > dispersion_fit")
valid_table = valid_table.loc[
set(adata.var.index[filter_bool]).intersection(valid_table.index),
:,
]
gene_id = np.argsort(-valid_table.loc[:, "dispersion_empirical"])[:n_top_genes]
gene_id = valid_table.iloc[gene_id, :].index
filter_bool = adata.var.index.isin(gene_id)
elif sort_by == "gini":
table = top_table(adata, layer, mode="gini")
valid_table = table.loc[filter_bool, :]
gene_id = np.argsort(-valid_table.loc[:, "gini"])[:n_top_genes]
gene_id = valid_table.index[gene_id]
filter_bool = gene_id.isin(adata.var.index)
elif sort_by == "SVR":
SVRs_args = {
"min_expr_cells": 0,
"min_expr_avg": 0,
"max_expr_avg": np.inf,
"svr_gamma": None,
"winsorize": False,
"winsor_perc": (1, 99.5),
"sort_inverse": False,
}
SVRs_args = update_dict(SVRs_args, SVRs_kwargs)
adata = SVRs(
adata,
layers=[layer],
total_szfactor=total_szfactor,
filter_bool=filter_bool,
**SVRs_args,
)
filter_bool = get_svr_filter(adata, layer=layer, n_top_genes=n_top_genes, return_adata=False)
if keep_filtered:
adata.var["use_for_pca"] = filter_bool
else:
adata._inplace_subset_var(filter_bool)
adata.var["use_for_pca"] = True
return filter_bool if only_bools else adata
def recipe_monocle(
adata: anndata.AnnData,
reset_X: bool = False,
tkey: Union[str, None] = None,
t_label_keys: Union[str, list, None] = None,
experiment_type: Union[str, None] = None,
normalized: Union[bool, None] = None,
layer: Union[str, None] = None,
total_layers: Union[bool, list, None] = None,
splicing_total_layers: bool = False,
X_total_layers: bool = False,
genes_use_for_norm: Union[list, None] = None,
genes_to_use: Union[list, None] = None,
genes_to_append: Union[list, None] = None,
genes_to_exclude: Union[list, None] = None,
exprs_frac_max: float = 1,
method: str = "pca",
num_dim: int = 30,
sz_method: str = "median",
scale_to: Union[float, None] = None,
norm_method: Union[str, None] = None,
pseudo_expr: int = 1,
feature_selection: str = "SVR",
n_top_genes: int = 2000,
maintain_n_top_genes: bool = True,
relative_expr: bool = True,
keep_filtered_cells: Optional[bool] = None,
keep_filtered_genes: Optional[bool] = None,
keep_raw_layers: Optional[bool] = None,
scopes: Union[str, Iterable, None] = None,
fc_kwargs: Union[dict, None] = None,
fg_kwargs: Union[dict, None] = None,
sg_kwargs: Union[dict, None] = None,
copy: bool = False,
feature_selection_layer: Union[list, np.ndarray, np.array, str] = "X",
) -> Union[anndata.AnnData, None]:
"""This function is partly based on Monocle R package (https://github.com/cole-trapnell-lab/monocle3).
Parameters
----------
adata: :class:`~anndata.AnnData`
AnnData object.
tkey: `str` or None (default: None)
The column key for the labeling time of cells in .obs. Used for labeling based scRNA-seq data (will also
support for conventional scRNA-seq data). Note that `tkey` will be saved to adata.uns['pp']['tkey'] and used
in `dyn.tl.dynamics` in which when `group` is None, `tkey` will also be used for calculating 1st/2st moment
or covariance. We recommend to use hour as the unit of `time`.
t_label_keys: `str`, `list` or None (default: None)
The column key(s) for the labeling time label of cells in .obs. Used for either "conventional" or "labeling
based" scRNA-seq data. Not used for now and `tkey` is implicitly assumed as `t_label_key` (however, `tkey`
should just be the time of the experiment).
experiment_type: `str` {`deg`, `kin`, `one-shot`, `mix_std_stm`, 'mixture'} or None, (default: `None`)
experiment type for labeling single cell RNA-seq. Available options are:
(1) 'conventional': conventional single-cell RNA-seq experiment, if `experiment_type` is `None` and there is
only splicing data, this will be set to `conventional`;
(2) 'deg': chase/degradation experiment. Cells are first labeled with an extended period, followed by chase;
(3) 'kin': pulse/synthesis/kinetics experiment. Cells are labeled for different duration in a time-series;
(4) 'one-shot': one-shot kinetic experiment. Cells are only labeled for a short pulse duration;
Other possible experiments include:
(5) 'mix_pulse_chase' or 'mix_kin_deg': This is a mixture chase experiment in which the entire experiment
lasts for a certain period of time which an initial pulse followed by washing out at different time point
but chasing cells at the same time point. This type of labeling strategy was adopted in scEU-seq paper. For
kind of experiment, we need to assume a non-steady state dynamics.
(4) 'mix_std_stm';
reset_X: bool (default: `False`)
Whether do you want to let dynamo reset `adata.X` data based on layers stored in your experiment. One
critical functionality of dynamo is about visualizing RNA velocity vector flows which requires proper data
into which the high dimensional RNA velocity vectors will be projected.
(1) For `kinetics` experiment, we recommend the use of `total` layer as `adata.X`;
(2) For `degradation/conventional` experiment scRNA-seq, we recommend using `splicing` layer as `adata.X`.
Set `reset_X` to `True` to set those default values if you are not sure.
normalized: `None` or `bool` (default: `None`)
If you already normalized your data (or run recipe_monocle already), set this to be `True` to avoid
renormalizing your data. By default it is set to be `None` and the first 20 values of adata.X (if adata.X is
sparse) or its first column will be checked to determine whether you already normalized your data. This only
works for UMI based or read-counts data.
layer: str (default: `None`)
The layer(s) to be normalized. Default is all, including RNA (X, raw) or spliced, unspliced, protein, etc.
total_layers: bool, list or None (default `None`)
The layer(s) that can be summed up to get the total mRNA. for example, ["spliced", "unspliced"], ["uu", "ul"
, "su", "sl"] or ["total"], etc. If total_layers is `True`, total_layers will be set to be `total` or ["uu",
"ul", "su", "sl"] depends on whether you have labeling but no splicing or labeling and splicing data.
splicing_total_layers: bool (default `False`)
Whether to also normalize spliced / unspliced layers by size factor from total RNA.
X_total_layers: bool (default `False`)
Whether to also normalize adata.X by size factor from total RNA.
genes_use_for_norm: `list` (default: `None`)
A list of gene names that will be used to calculate total RNA for each cell and then the size factor for
normalization. This is often very useful when you want to use only the host genes to normalize the dataset
in a virus infection experiment (i.e. CMV or SARS-CoV-2 infection).
genes_to_use: `list` (default: `None`)
A list of gene names that will be used to set as the feature genes for downstream analysis.
genes_to_append: `list` (default: `None`)
A list of gene names that will be appended to the feature genes list for downstream analysis.
genes_to_exclude: `list` (default: `None`)
A list of gene names that will be excluded to the feature genes list for downstream analysis.
exprs_frac_max: `float` (default: `1`)
The minimal fraction of gene counts to the total counts across cells that will used to filter genes. By
default it is 1 which means we don't filter any genes, but we need to change it to 0.005 or something in
order to remove some highly expressed housekeeping genes.
method: `str` (default: `pca`)
The linear dimension reduction methods to be used.
num_dim: `int` (default: `30`)
The number of linear dimensions reduced to.
sz_method: `str` (default: `mean-geometric-mean-total`)
The method used to calculate the expected total reads / UMI used in size factor calculation.
Only `mean-geometric-mean-total` / `geometric` and `median` are supported. When `median` is used, `locfunc`
will be replaced with `np.nanmedian`.
scale_to: `float` or None (default: `None`)
The final total expression for each cell that will be scaled to.
norm_method: `function` or None (default: function `None`)
The method to normalize the data. Can be any numpy function or `Freeman_Tukey`. By default, only .X will be
size normalized and log1p transformed while data in other layers will only be size factor normalized.
pseudo_expr: `int` (default: `1`)
A pseudocount added to the gene expression value before log/log2 normalization.
feature_selection: `str` (default: `SVR`)
Which soring method, either dispersion, SVR or Gini index, to be used to select genes.
n_top_genes: `int` (default: `2000`)
How many top genes based on scoring method (specified by sort_by) will be selected as feature genes.
maintain_n_top_genes: `bool` (default: `True`)
Whether to ensure 2000 feature genes selected no matter what genes_to_use, genes_to_append, etc. are
specified. The only exception is that if `genes_to_use` is supplied with `n_top_genes`.
relative_expr: `bool` (default: `True`)
A logic flag to determine whether we need to divide gene expression values first by size factor before
normalization.
keep_filtered_cells: `bool` (default: `True`)
Whether to keep genes that don't pass the filtering in the returned adata object.
keep_filtered_genes: `bool` (default: `True`)
Whether to keep genes that don't pass the filtering in the returned adata object.
keep_raw_layers: `bool` (default: `True`)
Whether to keep layers with raw measurements in the returned adata object.
scopes: `str`, `list-like` or `None` (default: `None`)
Scopes are needed when you use non-official gene name as your gene indices (or adata.var_name). This
arugument corresponds to type of types of identifiers, either a list or a comma-separated fields to specify
type of input qterms, e.g. “entrezgene”, “entrezgene,symbol”, [“ensemblgene”, “symbol”]. Refer to official
MyGene.info docs (https://docs.mygene.info/en/latest/doc/query_service.html#available_fields) for full list
of fields.
fc_kwargs: `dict` or None (default: `None`)
Other Parameters passed into the filter_cells function.
fg_kwargs: `dict` or None (default: `None`)
Other Parameters passed into the filter_genes function.
sg_kwargs: `dict` or None (default: `None`)
Other Parameters passed into the select_genes function.
copy:
Whether to return a new deep copy of `adata` instead of updating `adata` object passed in arguments.
Returns
-------
adata: :class:`~anndata.AnnData`
An new or updated anndata object, based on copy parameter, that are updated with Size_Factor, normalized
expression values, X and reduced dimensions, etc.
"""
logger = LoggerManager.gen_logger("dynamo-preprocessing")
logger.log_time()
keep_filtered_cells = DynamoAdataConfig.check_config_var(
keep_filtered_cells, DynamoAdataConfig.RECIPE_MONOCLE_KEEP_FILTERED_CELLS_KEY
)
keep_filtered_genes = DynamoAdataConfig.check_config_var(
keep_filtered_genes, DynamoAdataConfig.RECIPE_MONOCLE_KEEP_FILTERED_GENES_KEY
)
keep_raw_layers = DynamoAdataConfig.check_config_var(
keep_raw_layers, DynamoAdataConfig.RECIPE_MONOCLE_KEEP_RAW_LAYERS_KEY
)
adata = copy_adata(adata) if copy else adata
logger.info("apply Monocole recipe to adata...", indent_level=1)
if "use_for_pca" in adata.var.columns:
del adata.var["use_for_pca"] # avoid use_for_pca was set previously.
adata = convert2symbol(adata, scopes=scopes)
n_cells, n_genes = adata.n_obs, adata.n_vars
# Since convert2symbol may subset adata and generate a new AnnData object,
# we should create all following data after convert2symbol (gene names)
adata.uns["pp"] = {}
if norm_method == "Freeman_Tukey":
norm_method = Freeman_Tukey
basic_stats(adata)
(
has_splicing,
has_labeling,
splicing_labeling,
has_protein,
) = detect_datatype(adata)
logger.info_insert_adata("pp", "uns")
logger.info_insert_adata("has_splicing", "uns['pp']", indent_level=2)
logger.info_insert_adata("has_labling", "uns['pp']", indent_level=2)
logger.info_insert_adata("splicing_labeling", "uns['pp']", indent_level=2)
logger.info_insert_adata("has_protein", "uns['pp']", indent_level=2)
(
adata.uns["pp"]["has_splicing"],
adata.uns["pp"]["has_labeling"],
adata.uns["pp"]["splicing_labeling"],
adata.uns["pp"]["has_protein"],
) = (has_splicing, has_labeling, splicing_labeling, has_protein)
if has_splicing and has_labeling and splicing_labeling:
layer = (
[
"X",
"uu",
"ul",
"su",
"sl",
"spliced",
"unspliced",
"new",
"total",
]
if layer is None
else layer
)
if type(total_layers) != list:
total_layers = ["uu", "ul", "su", "sl"] if total_layers else None
if has_splicing and has_labeling and not splicing_labeling:
layer = ["X", "spliced", "unspliced", "new", "total"] if layer is None else layer
if type(total_layers) != list:
total_layers = ["total"] if total_layers else None
elif has_labeling and not has_splicing:
layer = ["X", "total", "new"] if layer is None else layer
if type(total_layers) != list:
total_layers = ["total"] if total_layers else None
elif has_splicing and not has_labeling:
layer = ["X", "spliced", "unspliced"] if layer is None else layer
logger.info("ensure all cell and variable names unique.", indent_level=1)
adata = unique_var_obs_adata(adata)
logger.info(
"ensure all data in different layers in csr sparse matrix format.",
indent_level=1,
)
adata = layers2csr(adata)
logger.info("ensure all labeling data properly collapased", indent_level=1)
adata = collapse_adata(adata)
# reset adata.X
if has_labeling:
if tkey is None:
main_warning(
"\nWhen analyzing labeling based scRNA-seq without providing `tkey`, dynamo will try to use "
"\n `time` as the key for labeling time. Please correct this via supplying the correct `tkey`"
"\nif needed."
)
tkey = "time"
if tkey not in adata.obs.keys():
raise ValueError(f"`tkey` {tkey} that encodes the labeling time is not existed in your adata.")
if experiment_type is None:
experiment_type = _infer_experiment_type(adata)
main_info("detected experiment type: %s" % experiment_type)
valid_experiment_types = [
"one-shot",
"kin",
"mixture",
"mix_std_stm",
"kinetics",
"mix_pulse_chase",
"mix_kin_deg",
"deg",
]
if experiment_type not in valid_experiment_types:
raise ValueError(
"expriment_type can only be one of ['one-shot', 'kin', 'mixture', 'mix_std_stm', "
"'kinetics', 'mix_pulse_chase','mix_kin_deg', 'deg']"
)
elif experiment_type == "kinetics":
experiment_type = "kin"
elif experiment_type == "degradation":
experiment_type = "deg"
if reset_X:
if has_labeling:
if experiment_type.lower() in [
"one-shot",
"kin",
"mixture",
"mix_std_stm",
"kinetics",
"mix_pulse_chase",
"mix_kin_deg",
]:
adata.X = adata.layers["total"].copy()
if experiment_type.lower() in ["deg", "degradation"] and has_splicing:
adata.X = adata.layers["spliced"].copy()
if experiment_type.lower() in ["deg", "degradation"] and not has_splicing:
main_warning(
"It is not possible to calculate RNA velocity from a degradation experiment which has no "
"splicing information."
)
adata.X = adata.layers["total"].copy()
else:
adata.X = adata.layers["total"].copy()
else:
adata.X = adata.layers["spliced"].copy()
if tkey is not None:
if adata.obs[tkey].max() > 60:
main_warning(
"Looks like you are using minutes as the time unit. For the purpose of numeric stability, "
"we recommend using hour as the time unit."
)
logger.info_insert_adata("tkey", "uns['pp']", indent_level=2)
logger.info_insert_adata("experiment_type", "uns['pp']", indent_level=2)
adata.uns["pp"]["tkey"] = tkey
adata.uns["pp"]["experiment_type"] = "conventional" if experiment_type is None else experiment_type
_szFactor, _logged = (True, True) if normalized else (False, False)
if normalized is None and not has_labeling:
if "raw_data" in adata.uns_keys():
_szFactor, _logged = (
not adata.uns["raw_data"],
not adata.uns["raw_data"],
)
else:
# automatically detect whether the data is size-factor normalized -- no integers (only works for readcounts
# / UMI based data).
_szFactor = not np.allclose(
(adata.X.data[:20] if issparse(adata.X) else adata.X[:, 0]) % 1,
0,
atol=1e-3,
)
# check whether total UMI is the same -- if not the same, logged
if _szFactor:
_logged = not np.allclose(
np.sum(adata.X.sum(1)[np.random.choice(adata.n_obs, 10)] - adata.X.sum(1)[0]),
0,
atol=1e-1,
)
if _szFactor or _logged:
main_warning(
"dynamo detects your data is size factor normalized and/or log transformed. If this is not "
"right, plese set `normalized = False."
)
# filter bad cells
filter_cells_kwargs = {
"filter_bool": None,
"layer": "all",
"min_expr_genes_s": min(50, 0.01 * n_genes),
"min_expr_genes_u": min(25, 0.01 * n_genes),
"min_expr_genes_p": min(2, 0.01 * n_genes),
"max_expr_genes_s": np.inf,
"max_expr_genes_u": np.inf,
"max_expr_genes_p": np.inf,
"shared_count": None,
}
if fc_kwargs is not None:
filter_cells_kwargs.update(fc_kwargs)
logger.info("filtering cells...")
logger.info_insert_adata("pass_basic_filter", "obs")
adata = filter_cells(adata, keep_filtered=keep_filtered_cells, **filter_cells_kwargs)
logger.info(f"{adata.obs.pass_basic_filter.sum()} cells passed basic filters.")
filter_genes_kwargs = {
"filter_bool": None,
"layer": "all",
"min_cell_s": max(5, 0.01 * n_cells),
"min_cell_u": max(5, 0.005 * n_cells),
"min_cell_p": max(5, 0.005 * n_cells),
"min_avg_exp_s": 0,
"min_avg_exp_u": 0,
"min_avg_exp_p": 0,
"max_avg_exp": np.inf,
"min_count_s": 0,
"min_count_u": 0,
"min_count_p": 0,
"shared_count": 30,
}
if fg_kwargs is not None:
filter_genes_kwargs.update(fg_kwargs)
# set pass_basic_filter for genes
logger.info("filtering gene...")
logger.info_insert_adata("pass_basic_filter", "var")
adata = filter_genes(
adata,
**filter_genes_kwargs,
)
logger.info(f"{adata.var.pass_basic_filter.sum()} genes passed basic filters.")
if adata.var.pass_basic_filter.sum() == 0:
logger.error(
"No genes pass basic filters. Please check your data, for example, layer names, etc or other " "arguments."
)
raise Exception()
if adata.obs.pass_basic_filter.sum() == 0:
logger.error("No cells pass basic filters. Please check your data or arguments, for example, fc_kwargs.")
raise Exception()
# calculate sz factor
logger.info("calculating size factor...")
if not _szFactor or "Size_Factor" not in adata.obs_keys():
adata = szFactor(
adata,
total_layers=total_layers,
scale_to=scale_to,
splicing_total_layers=splicing_total_layers,
X_total_layers=X_total_layers,
layers=layer if type(layer) is list else "all",
genes_use_for_norm=genes_use_for_norm,
)
# if feature_selection.lower() == "dispersion":
# adata = estimate_dispersion(adata)
# set use_for_pca (use basic_filtered data)
select_genes_dict = {
"min_expr_cells": 0,
"min_expr_avg": 0,
"max_expr_avg": np.inf,
"svr_gamma": None,
"winsorize": False,
"winsor_perc": (1, 99.5),
"sort_inverse": False,
}
if sg_kwargs is not None:
select_genes_dict.update(sg_kwargs)
if genes_to_use is None:
pass_basic_filter_num = adata.var.pass_basic_filter.sum()
if pass_basic_filter_num < n_top_genes:
logger.warning(
f"only {pass_basic_filter_num} genes passed basic filtering, but you requested {n_top_genes} "
f"genes for feature selection. Try lowering the gene selection stringency: "
f"{select_genes_dict}",
)
logger.info("selecting genes in layer: %s, sort method: %s..." % (feature_selection_layer, feature_selection))
adata = select_genes(
adata,
layer=feature_selection_layer,
sort_by=feature_selection,
n_top_genes=n_top_genes,
keep_filtered=True,
SVRs_kwargs=select_genes_dict,
)
else:
if len(adata.var_names.intersection(genes_to_use)) == 0:
logger.error(
"No genes from genes_to_use matches with the gene names from adata. Please ensure you use gene short "
"names!"
)
raise Exception()
logger.info_insert_adata("use_for_pca", "var")
adata.var["use_for_pca"] = adata.var.index.isin(genes_to_use)
logger.info_insert_adata("frac", "var")
adata.var["frac"], invalid_ids = gene_exp_fraction(X=adata.X, threshold=exprs_frac_max)
genes_to_exclude = (
list(adata.var_names[invalid_ids])
if genes_to_exclude is None
else genes_to_exclude + list(adata.var_names[invalid_ids])
)
if genes_to_append is not None:
valid_genes = adata.var.index.intersection(genes_to_append)
if len(valid_genes) > 0:
adata.var.loc[valid_genes, "use_for_pca"] = True
if genes_to_exclude is not None:
valid_genes = adata.var.index.intersection(genes_to_exclude)
if len(valid_genes) > 0:
adata.var.loc[valid_genes, "use_for_pca"] = False
if adata.var.use_for_pca.sum() < 50 and not maintain_n_top_genes:
main_warning(
"You only have less than 50 feature gene selected. Are you sure you want to exclude all "
"genes passed to the genes_to_exclude argument?"
)
if maintain_n_top_genes:
if genes_to_append is not None:
n_top_genes = n_top_genes - len(genes_to_append)
valid_ids = adata.var.index.difference(genes_to_exclude + genes_to_append)
else:
valid_ids = adata.var.index.difference(genes_to_exclude)
if n_top_genes > 0:
# let us ignore the `inplace` parameter in pandas.Categorical.remove_unused_categories warning.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
filter_bool = select_genes(
adata[:, valid_ids],
sort_by=feature_selection,
n_top_genes=n_top_genes,
keep_filtered=True, # no effect to adata
SVRs_kwargs=select_genes_dict,
only_bools=True,
)
adata.var.loc[valid_ids, "use_for_pca"] = filter_bool
if not keep_filtered_genes:
logger.info("Discarding genes that failed the filtering...")
adata._inplace_subset_var(adata.var["use_for_pca"])
# normalized data based on sz factor
if not _logged:
total_szfactor = "total_Size_Factor" if total_layers is not None else None
logger.info("size factor normalizing the data, followed by log1p transformation.")
adata = normalize_expr_data(
adata,
layers=layer if type(layer) is list else "all",
total_szfactor=total_szfactor,
splicing_total_layers=splicing_total_layers,
X_total_layers=X_total_layers,
norm_method=norm_method,
pseudo_expr=pseudo_expr,
relative_expr=relative_expr,
keep_filtered=keep_filtered_genes,
sz_method=sz_method,
scale_to=scale_to,
)
else:
layers = get_layer_keys(adata, "all")
for layer in layers:
if layer != "X":
logger.info_insert_adata("X_" + layer, "layers")
adata.layers["X_" + layer] = adata.layers[layer].copy()
logger.info_insert_adata("norm_method", "uns['pp']", indent_level=2)
adata.uns["pp"]["norm_method"] = None
# only use genes pass filter (based on use_for_pca) to perform dimension reduction.
if layer is None:
CM = adata.X[:, adata.var.use_for_pca.values]
else:
if "X" in layer:
CM = adata.X[:, adata.var.use_for_pca.values]
elif "total" in layer:
CM = adata.layers["X_total"][:, adata.var.use_for_pca.values]
elif "spliced" in layer:
CM = adata.layers["X_spliced"][:, adata.var.use_for_pca.values]
elif "protein" in layer:
CM = adata.obsm["X_protein"]
elif type(layer) is str:
CM = adata.layers["X_" + layer][:, adata.var.use_for_pca.values]
else:
raise ValueError(
f"your input layer argument should be either a `str` or a list that includes one of `X`, "
f"`total`, `protein` element. `Layer` currently is {layer}."
)
cm_genesums = CM.sum(axis=0)
valid_ind = np.logical_and(np.isfinite(cm_genesums), cm_genesums != 0)
valid_ind = np.array(valid_ind).flatten()
bad_genes = np.where(adata.var.use_for_pca)[0][~valid_ind]
if genes_to_append is not None and len(adata.var.index[bad_genes].intersection(genes_to_append)) > 0:
raise ValueError(
f"The gene list passed to argument genes_to_append contains genes with no expression "
f"across cells or non finite values. Please check those genes:"
f"{set(bad_genes).intersection(genes_to_append)}!"
)
adata.var.iloc[bad_genes, adata.var.columns.tolist().index("use_for_pca")] = False
CM = CM[:, valid_ind]
logger.info("applying %s ..." % (method.upper()))
if method == "pca":
adata = pca(adata, CM, num_dim, "X_" + method.lower())
adata.obsm["X"] = adata.obsm["X_" + method.lower()]
elif method == "ica":
fit = FastICA(
num_dim,
algorithm="deflation",
tol=5e-6,
fun="logcosh",
max_iter=1000,
)
reduce_dim = fit.fit_transform(CM.toarray())
adata.obsm["X_" + method.lower()] = reduce_dim
adata.obsm["X"] = adata.obsm["X_" + method.lower()]
logger.info_insert_adata(method + "_fit", "uns")
adata.uns[method + "_fit"], adata.uns["feature_selection"] = (
{},
feature_selection,
)
# calculate NTR for every cell:
ntr, var_ntr = NTR(adata)
if ntr is not None:
logger.info_insert_adata("ntr", "obs")
logger.info_insert_adata("ntr", "var")
adata.obs["ntr"] = ntr
adata.var["ntr"] = var_ntr
logger.info("cell cycle scoring...")
try:
cell_cycle_scores(adata)
except Exception:
logger.warning(
"\nDynamo is not able to perform cell cycle staging for you automatically. \n"
"Since dyn.pl.phase_diagram in dynamo by default colors cells by its cell-cycle stage, \n"
"you need to set color argument accordingly if confronting errors related to this."
)
if "raw_data" in adata.uns_keys():
logger.info_insert_adata("raw_data", "uns")
adata.uns["raw_data"] = False
if not keep_raw_layers:
layers = list(adata.layers.keys())
for layer in layers:
if not layer.startswith("X_"):
del adata.layers[layer]
logger.finish_progress(progress_name="recipe_monocle preprocess")
if copy:
return adata
return None
def recipe_velocyto(
adata: anndata.AnnData,
total_layers=None,
method="pca",
num_dim=30,
norm_method=None,
pseudo_expr=1,
feature_selection="SVR",
n_top_genes=2000,
cluster="Clusters",
relative_expr=True,
keep_filtered_genes=None,
):
"""This function is adapted from the velocyto's DentateGyrus notebook.
.
Parameters
----------
adata: :class:`~anndata.AnnData`
AnnData object.
total_layers: list or None (default `None`)
The layer(s) that can be summed up to get the total mRNA. for example, ["spliced", "unspliced"], ["uu",
"ul", "su", "sl"] or ["new", "old"], etc.
method: `str` (default: `log`)
The linear dimension reduction methods to be used.
num_dim: `int` (default: `50`)
The number of linear dimensions reduced to.
norm_method: `function`, `str` or `None` (default: function `None`)
The method to normalize the data.
pseudo_expr: `int` (default: `1`)
A pseudocount added to the gene expression value before log/log2 normalization.
feature_selection: `str` (default: `SVR`)
Which sorting method, either dispersion, SVR or Gini index, to be used to select genes.
n_top_genes: `int` (default: `2000`)
How many top genes based on scoring method (specified by sort_by) will be selected as feature genes.
cluster: `str`
A column in the adata.obs attribute which will be used for cluster specific expression filtering.
relative_expr: `bool` (default: `True`)
A logic flag to determine whether we need to divide gene expression values first by size factor before
normalization.
keep_filtered_genes: `bool` (default: `True`)
Whether to keep genes that don't pass the filtering in the adata object.
Returns
-------
adata: :class:`~anndata.AnnData`
An updated anndata object that are updated with Size_Factor, normalized expression values, X and reduced
dimensions, etc.
"""
keep_filtered_genes = DynamoAdataConfig.check_config_var(
keep_filtered_genes, DynamoAdataConfig.RECIPE_KEEP_FILTERED_GENES_KEY
)
adata = szFactor(adata, method="mean", total_layers=total_layers)
initial_Ucell_size = adata.layers["unspliced"].sum(1)
filter_bool = initial_Ucell_size > np.percentile(initial_Ucell_size, 0.4)
adata = filter_cells(adata, filter_bool=np.array(filter_bool).flatten())
filter_bool = filter_genes(adata, min_cell_s=30, min_count_s=40, shared_count=None)
adata = adata[:, filter_bool]
adata = SVRs(
adata,
layers=["spliced"],
min_expr_cells=2,
max_expr_avg=35,
min_expr_avg=0,
)
filter_bool = get_svr_filter(adata, layer="spliced", n_top_genes=n_top_genes)
adata = adata[:, filter_bool]
filter_bool_gene = filter_genes(
adata,
min_cell_s=0,
min_count_s=0,
min_count_u=25,
min_cell_u=20,
shared_count=None,
)
filter_bool_cluster = filter_genes_by_clusters_(adata, min_avg_S=0.08, min_avg_U=0.01, cluster=cluster)
adata = adata[:, filter_bool_gene & filter_bool_cluster]
adata = normalize_expr_data(
adata,
total_szfactor=None,
norm_method=norm_method,
pseudo_expr=pseudo_expr,
relative_expr=relative_expr,
keep_filtered=keep_filtered_genes,
)
CM = adata.X
cm_genesums = CM.sum(axis=0)
valid_ind = np.logical_and(np.isfinite(cm_genesums), cm_genesums != 0)
valid_ind = np.array(valid_ind).flatten()
adata.var.use_for_pca[np.where(adata.var.use_for_pca)[0][~valid_ind]] = False
CM = CM[:, valid_ind]
if method == "pca":
adata, fit, _ = pca(adata, CM, num_dim, "X_" + method.lower(), return_all=True)
# adata.obsm['X_' + method.lower()] = reduce_dim
elif method == "ica":
cm_genesums = CM.sum(axis=0)
valid_ind = (np.isfinite(cm_genesums)) + (cm_genesums != 0)
valid_ind = np.array(valid_ind).flatten()
CM = CM[:, valid_ind]
fit = FastICA(
num_dim,
algorithm="deflation",
tol=5e-6,
fun="logcosh",
max_iter=1000,
)
reduce_dim = fit.fit_transform(CM.toarray())
adata.obsm["X_" + method.lower()] = reduce_dim
add_noise_to_duplicates(adata, method.lower())
adata.uns[method + "_fit"], adata.uns["feature_selection"] = (
fit,
feature_selection,
)
# calculate NTR for every cell:
ntr = NTR(adata)
if ntr is not None:
adata.obs["ntr"] = ntr
return adata
def highest_frac_genes(
adata: AnnData,
store_key: str = "highest_frac_genes",
n_top: int = 30,
gene_prefix_list: list = None,
show_individual_prefix_gene: bool = False,
layer: Union[str, None] = None,
):
"""
Compute top genes df and store results in `adata.uns`
Parameters
----------
adata : AnnData
[description]
store_key : str, optional
[description], by default "highest_frac_genes"
n_top : int, optional
[description], by default 30
gene_prefix_list : list, optional
[description], by default None
show_individual_prefix_gene : bool, optional
[description], by default False
layer : Union[str, None], optional
[description], by default None
Returns
-------
[type]
[description]
"""
gene_mat = adata.X
if layer is not None:
gene_mat = adata.layers[layer]
# compute gene percents at each cell row
cell_expression_sum = gene_mat.sum(axis=1).flatten()
# get rid of cells that have all zero counts
not_all_zero = cell_expression_sum != 0
adata = adata[not_all_zero, :]
cell_expression_sum = cell_expression_sum[not_all_zero]
main_info("%d rows(cells or subsets) are not zero. zero total RNA cells are removed." % np.sum(not_all_zero))
valid_gene_set = set()
prefix_to_genes = {}
_adata = adata
if gene_prefix_list is not None:
prefix_to_genes = {prefix: [] for prefix in gene_prefix_list}
for name in adata.var_names:
for prefix in gene_prefix_list:
length = len(prefix)
if name[:length] == prefix:
valid_gene_set.add(name)
prefix_to_genes[prefix].append(name)
break
if len(valid_gene_set) == 0:
main_critical("NO VALID GENES FOUND WITH REQUIRED GENE PREFIX LIST, GIVING UP PLOTTING")
return None
if not show_individual_prefix_gene:
# gathering gene prefix set data
df = pd.DataFrame(index=adata.obs.index)
for prefix in prefix_to_genes:
if len(prefix_to_genes[prefix]) == 0:
main_info("There is no %s gene prefix in adata." % prefix)
continue
df[prefix] = adata[:, prefix_to_genes[prefix]].X.sum(axis=1)
# adata = adata[:, list(valid_gene_set)]
_adata = AnnData(X=df)
gene_mat = _adata.X
# compute gene's total percents in the dataset
gene_percents = np.array(gene_mat.sum(axis=0))
gene_percents = (gene_percents / gene_mat.shape[1]).flatten()
# obtain top genes
sorted_indices = np.argsort(-gene_percents)
selected_indices = sorted_indices[:n_top]
gene_names = _adata.var_names[selected_indices]
gene_X_percents = gene_mat / cell_expression_sum.reshape([-1, 1])
# assemble a dataframe
selected_gene_X_percents = np.array(gene_X_percents)[:, selected_indices]
selected_gene_X_percents = np.squeeze(selected_gene_X_percents)
top_genes_df = pd.DataFrame(
selected_gene_X_percents,
index=adata.obs_names,
columns=gene_names,
)
adata.uns[store_key] = {
"top_genes_df": top_genes_df,
"gene_mat": gene_mat,
"layer": layer,
"selected_indices": selected_indices,
"gene_prefix_list": gene_prefix_list,
"show_individual_prefix_gene": show_individual_prefix_gene,
"gene_percents": gene_percents,
}
return adata
| [
6738,
17268,
13,
39305,
1330,
40806,
540,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
14601,
198,
6738,
629,
541,
88,
13,
82,
29572,
1330,
1189,
29572,
11,
269,
27891,
62,
6759,
8609,
198,
673... | 2.193488 | 39,496 |
from coefficients import Coefficients, Constants
from data_models import States, Setpoints, Weather
from equations.utils import total_side_vents_ventilation_rates, total_roof_ventilation_rates, \
thermal_screen_air_flux_rate
from equations.heat_fluxes import sensible_heat_flux_between_direct_air_heater_and_greenhouse_air
| [
6738,
44036,
1330,
1766,
41945,
11,
4757,
1187,
198,
6738,
1366,
62,
27530,
1330,
1829,
11,
5345,
13033,
11,
15615,
198,
6738,
27490,
13,
26791,
1330,
2472,
62,
1589,
62,
85,
658,
62,
1151,
10520,
62,
9700,
11,
2472,
62,
305,
1659,
... | 3.363636 | 99 |
"""
Construct csvs for Figure--Figure Captions/ Tables -- Table Captions
"""
from lxml import etree
import os
from Parser.parse_preprocess import get_words_from_child as get_words
from converters.html2xml import htmlfile2xml
from converters.xml2list import xml2list
import pandas as pd
import multiprocessing as mp
import glob
from bs4 import BeautifulSoup
import codecs
from postprocess.postprocess import not_ocr
import click
def get_cls_list(html_f):
"""
Given an html file, get a list of objects that's easier to reason about
:param html_f: The input html file
:return: [(cls, bb, score)]
"""
htmlfile2xml(html_f, '/tmp')
return xml2list(f'{os.path.join("/tmp", os.path.basename(html_f)[:-5])}.xml')
def get_target_map(html_f, target_cls, target_cls_association):
"""
Get a map with targets and target associations
:param html_f: html file to ingest
:param target_cls: the target class
:param target_cls_association: the target class association
:return: dictionary mapping targets to target associations
"""
cls_list = get_cls_list(html_f)
cls_list = [(x[0], tuple(x[1]), x[2]) for x in cls_list]
targets = [x for x in cls_list if x[0] == target_cls]
if len(targets) == 0:
return None, None
cls_associations = [x for x in cls_list if x[0] == target_cls_association]
target_map = {}
for target in targets:
if len(cls_associations) == 0:
target_map[target] = None
continue
# compute two distances
# 1) Distance from tl of target to bottom left of class associations
# 2) Distance from br of target to top left of class associations
cls_ass_bl = [(x[1][0], x[1][3]) for x in cls_associations]
cls_ass_tr = [(x[1][2], x[1][1]) for x in cls_associations]
cls_ass_bl_dists = [calc_tl_dists(x) for x in cls_ass_bl]
cls_ass_tr_dists = [calc_br_dists(x) for x in cls_ass_tr]
bl_min_dist = min(cls_ass_bl_dists)
tr_min_dist = min(cls_ass_tr_dists)
cls_assoc = None
if bl_min_dist <= tr_min_dist:
ind = cls_ass_bl_dists.index(bl_min_dist)
cls_assoc = cls_associations[ind]
else:
ind = cls_ass_tr_dists.index(tr_min_dist)
cls_assoc = cls_associations[ind]
target_map[target] = cls_assoc
leftover = target_map.values()
leftover_assocs = [assoc for assoc in cls_associations if assoc not in leftover]
print(leftover_assocs)
return target_map, leftover_assocs
def collect_words(xml_string, target):
"""
Collect the words in an xml
:param xml_string: xml string input
:param target: Target class
:return: String of word list
"""
root = etree.fromstring(xml_string)
word_list = [x['text'] for x in get_words(root, target)]
return ' '.join(word_list)
def construct_single_df(html_f, target_cls, target_cls_association):
"""
Construct a single df of target class and target_class association
:param html_f: Path to html_file
:param target_cls: Target class file
:param target_cls_association: Association object
:return: Df
"""
target_map, leftover_assocs = get_target_map(html_f, target_cls, target_cls_association)
if target_map is None:
return None
with codecs.open(html_f, 'r', 'utf-8') as f:
soup = BeautifulSoup(f, 'html.parser')
df_dict = {'target_img_path': [], 'target_unicode': [], 'target_tesseract': [], 'assoc_img_path': [], 'assoc_unicode': [], 'assoc_tesseract': []}
for target in target_map:
target_assoc = target_map[target]
# The assumption made here is that we just need to extract text information from just the target
# but we'll get the path to the image for both of them
target_cls, target_bb, _ = target
target_img_path = None
target_unic = None
target_tess = None
for target_div in soup.find_all('div', target_cls):
hocr = target_div.find_next('div', 'hocr')
coordinates = hocr['data-coordinates']
spl = coordinates.split(' ')
spl = [int(x) for x in spl]
spl = tuple(spl)
if spl != target_bb:
continue
img = target_div.find_next('img')
target_img_path = str(img['src'])
tdiv = str(target_div)
target_unic = collect_words(tdiv, 'text_unicode')
target_tess = collect_words(tdiv, 'hocr')
break
# Sometimes there is no association to an object (Dangling caption).
# TODO: Decide what to do in this case
# For now, we will just add nans
assoc_img_path = None
assoc_unic = None
assoc_tess = None
if target_assoc is not None:
assoc_cls, assoc_bb, _ = target_assoc
for assoc_div in soup.find_all('div', assoc_cls):
hocr = assoc_div.find_next('div', 'hocr')
coordinates = hocr['data-coordinates']
spl = coordinates.split(' ')
spl = [int(x) for x in spl]
spl = tuple(spl)
if spl != assoc_bb:
continue
img = assoc_div.find_next('img')
assoc_img_path = str(img['src'])
adiv = str(assoc_div)
assoc_unic = collect_words(adiv, 'text_unicode')
assoc_tess = collect_words(adiv, 'hocr')
break
df_dict['target_img_path'].append(target_img_path)
df_dict['assoc_img_path'].append(assoc_img_path)
df_dict['target_unicode'].append(target_unic)
df_dict['assoc_unicode'].append(assoc_unic)
df_dict['target_tesseract'].append(target_tess)
df_dict['assoc_tesseract'].append(assoc_tess)
for assoc in leftover_assocs:
assoc_cls, assoc_bb, _ = assoc
for assoc_div in soup.find_all('div', assoc_cls):
hocr = assoc_div.find_next('div', 'hocr')
coordinates = hocr['data-coordinates']
spl = coordinates.split(' ')
spl = [int(x) for x in spl]
spl = tuple(spl)
if spl != assoc_bb:
continue
img = assoc_div.find_next('img')
assoc_img_path = str(img['src'])
adiv = str(assoc_div)
assoc_unic = collect_words(adiv, 'text_unicode')
assoc_tess = collect_words(adiv, 'hocr')
df_dict['target_img_path'].append(None)
df_dict['assoc_img_path'].append(assoc_img_path)
df_dict['target_unicode'].append(None)
df_dict['assoc_unicode'].append(assoc_unic)
df_dict['target_tesseract'].append(None)
df_dict['assoc_tesseract'].append(assoc_tess)
break
df = pd.DataFrame(df_dict)
df['html_file'] = os.path.basename(html_f)
return df
def construct(html_dir, target_cls, assoc_cls, output_file, processes=160):
"""
Construct the target <=> target association dataframe
:param html_dir: Input html
:param target_cls: Target class
:param assoc_cls: Target association class
:param output_file: Output path
:param processes: Number of processes
"""
results = []
if processes == 1:
results = [construct_single_df(f, target_cls, assoc_cls) for f in glob.glob(os.path.join(html_dir, '*.html'))]
else:
pool = mp.Pool(processes=processes)
ret = [pool.apply_async(construct_single_df, args=(f, target_cls, assoc_cls,)) for f in glob.glob(os.path.join(html_dir, '*.html'))]
results = [r.get() for r in ret]
results = [r for r in results if r is not None]
final_df = None
if len(results) > 0:
final_df = pd.concat(results)
if final_df is None:
print(f'{output_file} was not written as there were not any {target_cls} in the set of htmls')
return
final_df.to_csv(output_file, index=False)
@click.command()
@click.argument('html_dir')
@click.argument('target_cls')
@click.argument('assoc_cls')
@click.argument('output_file')
@click.option('--processes', help='Number of processes to spawn', default=160)
if __name__ == '__main__':
construct_click()
| [
37811,
198,
42316,
50115,
14259,
329,
11291,
438,
11337,
6790,
507,
14,
33220,
1377,
8655,
6790,
507,
198,
37811,
198,
198,
6738,
300,
19875,
1330,
2123,
631,
198,
11748,
28686,
198,
6738,
23042,
263,
13,
29572,
62,
3866,
14681,
1330,
6... | 2.11705 | 4,041 |
import unittest
from exasol_integration_test_docker_environment.testing.utils import find_free_ports
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
198,
6738,
409,
292,
349,
62,
18908,
1358,
62,
9288,
62,
45986,
62,
38986,
13,
33407,
13,
26791,
1330,
1064,
62,
5787,
62,
3742,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198... | 2.867925 | 53 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import textwrap
from contextlib import contextmanager
from pants.util.contextutil import temporary_dir, temporary_file
from pants.util.dirutil import chmod_plus_x, touch
from pants_test.jvm.jvm_tool_task_test_base import JvmToolTaskTestBase
from twitter.common.collections import maybe_list
from pants.contrib.android.targets.android_binary import AndroidBinary
from pants.contrib.android.targets.android_library import AndroidLibrary
from pants.contrib.android.targets.android_resources import AndroidResources
from pants.contrib.android.targets.android_target import AndroidTarget
class TestAndroidBase(JvmToolTaskTestBase):
"""Base class for Android tests that provides some mock structures useful for testing.
:API: public
"""
@staticmethod
def android_manifest(package_name=None, target_sdk=None):
"""
:API: public
"""
package_name = package_name or 'org.pantsbuild.example.hello'
sdk = target_sdk or 19
manifest = textwrap.dedent(
"""<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="{}" >
<uses-sdk
android:minSdkVersion="8"
android:targetSdkVersion="{}" />
</manifest>
""".format(package_name, sdk))
return manifest
@contextmanager
def android_target(self, target_name=None, package_name=None, target_sdk=None, dependencies=None,
target_type=AndroidTarget, **kwargs):
"""Represent an Android target.
:API: public
"""
with temporary_file() as manifest:
manifest.write(self.android_manifest(package_name=package_name, target_sdk=target_sdk))
manifest.close()
target_name = target_name or 'target'
deps = dependencies or []
target = self.make_target(spec=':{}'.format(target_name),
target_type=target_type,
manifest=manifest.name,
dependencies=deps,
**kwargs)
yield target
@contextmanager
def android_binary(self, target_name=None, dependencies=None, package_name=None, target_sdk=None):
"""Represent an android_binary target.
:API: public
"""
with self.android_target(target_name=target_name or 'binary',
dependencies=dependencies,
package_name=package_name,
target_sdk=target_sdk,
target_type=AndroidBinary) as binary:
yield binary
@contextmanager
def android_resources(self, target_name=None, dependencies=None, package_name=None):
"""Represent an android_resources target.
:API: public
"""
with temporary_dir() as temp:
with self.android_target(target_name=target_name or 'resources',
dependencies=dependencies,
resource_dir=temp,
package_name=package_name,
target_type=AndroidResources) as resources:
yield resources
@contextmanager
def android_library(self, target_name=None, libraries=None, include_patterns=None,
exclude_patterns=None, dependencies=None, package_name=None):
"""Represent an android_library target.
:API: public
"""
with self.android_target(target_name=target_name or 'library',
libraries=libraries,
include_patterns=include_patterns,
exclude_patterns=exclude_patterns,
dependencies=dependencies,
package_name=package_name,
target_type=AndroidLibrary) as library:
yield library
@contextmanager
def distribution(installed_sdks=('18', '19'),
installed_build_tools=('19.1.0', '20.0.0'),
files=('android.jar',),
executables=('aapt', 'zipalign')):
"""Mock Android SDK Distribution.
:API: public
:param tuple[strings] installed_sdks: SDK versions of the files being mocked.
:param tuple[strings] installed_build_tools: Build tools version of any tools.
:param tuple[strings] files: The files are to mock non-executables and one will be created for
each installed_sdks version.
:param tuple[strings] executables: Executables are any required tools and one is created for
each installed_build_tools version.
"""
with temporary_dir() as sdk:
for sdk_version in installed_sdks:
for android_file in files:
touch(os.path.join(sdk, 'platforms', 'android-' + sdk_version, android_file))
for version in installed_build_tools:
for exe in maybe_list(executables or ()):
path = os.path.join(sdk, 'build-tools', version, exe)
touch(path)
chmod_plus_x(path)
dx_path = os.path.join(sdk, 'build-tools', version, 'lib/dx.jar')
touch(dx_path)
yield sdk
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
1946,
41689,
1628,
20420,
357,
3826,
27342,
9865,
3843,
20673,
13,
9132,
737,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
3826,
38559,
24290,
737,
198,
198,
6738,
1... | 2.392377 | 2,230 |
#!/usr/bin/env python
"""
SmallNet.py is a class that provides a network for Person localization
Created 6/13/17.
"""
__author__ = "Alexander Ponamarev"
__email__ = "alex.ponamaryov@gmail.com"
import tensorflow as tf
from collections import namedtuple
from tensorflow import name_scope, variable_scope, stop_gradient
from .ObjectDetection import ObjectDetectionNet
Point = namedtuple('Point',['x', 'y'])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
18712,
7934,
13,
9078,
318,
257,
1398,
326,
3769,
257,
3127,
329,
7755,
42842,
198,
41972,
718,
14,
1485,
14,
1558,
13,
198,
37811,
198,
834,
9800,
834,
796,
366,
38708,
... | 3.178295 | 129 |
# -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# oof_manager@nist.gov.
# Classes and functions for reading and replaying log files.
import GUI.loggergui
import gobject
import gtk
import os
import sys
import core
import checkpoint
import loggers
import logutils
## log files being played back are executed in this namespace, so
## functions used in log files have to be defined here.
checkpoint_count = checkpoint.checkpoint_count
_threaded = True
# replaydelay is the delay time (in milliseconds) that is inserted
# between lines in the log file during playback, if there is no
# explicit delay ('pause' statement). The timing is not guaranteed to
# be precise.
replaydelay = 0
# Time in milliseconds to wait before retrying a checkpoint or looking
# for a window.
retrydelay = 100
# maxtries limits the number of times a line that raises a
# GtkLoggerTopFailure exception will be retried.
maxtries = 100
## See the README file for a description of the arguments to replay().
# A GUILogPlayer reads a log file of saved gui events and simulates them.
# A GUILogLineRunner is in charge of executing a single line of the
# gui log file.
_postponed = []
# Special handler for lines of the form "pause <time>". Such
# lines allow threads started by earlier commands to complete.
# The pause is guaranteed to be at least <time> milliseconds long.
####################
## Functions used within log files.
# findWidget. et al are defined in logutils so that they can easily be
# used elsewhere, too.
findWidget = logutils.findWidget
findAllWidgets = logutils.findAllWidgets
findMenu = logutils.findMenu
findCellRenderer = logutils.findCellRenderer
setComboBox = logutils.setComboBox
# Utility function for creating a gtk.gdk.Event object. "etype" must
# be an event type (gtk.gdk.BUTTON_PRESS, for example). "kwargs" can
# contain attributes of the event object. It almost certainly should
# include the "window" attribute, which must be set to a
# gtk.gdk.Window. (For gtk.Widgets, this is just Widget.window. For
# adopted GObjects, it's harder to get the correct gdk.Window into the
# log...)
####################
## replayDefine adds an object to the namespace used while replaying
## log files. If an externally defined GtkLogger needs to invoke an
## externally defined function during replay, that function (or the
## module containing it) should be injected into the namespace using
## replayDefine.
| [
2,
532,
9,
12,
21015,
532,
9,
12,
198,
198,
2,
770,
3788,
373,
4635,
416,
399,
8808,
11,
281,
4086,
286,
262,
471,
13,
50,
13,
1230,
11,
198,
2,
290,
416,
14195,
318,
407,
2426,
284,
6634,
287,
262,
1578,
1829,
13,
198,
2,
3... | 3.703022 | 761 |
from unittest.mock import patch
import pytest
from linnapi import exceptions, inventory
from linnapi.models import InventoryItemImage
from linnapi.requests.inventory import AddImageToInventoryItem
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
| [
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
300,
3732,
15042,
1330,
13269,
11,
13184,
198,
6738,
300,
3732,
15042,
13,
27530,
1330,
35772,
25502,
198,
6738,
300,
3732,
15042,
13,
8897,
3... | 3.038835 | 103 |
#!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Unlimited developers
"""
Tests the electrum call 'blockchain.transaction.get'
"""
import asyncio
from test_framework.util import assert_equal, p2p_port
from test_framework.electrumutil import ElectrumTestFramework, ElectrumConnection
from test_framework.nodemessages import ToHex
from test_framework.blocktools import create_transaction, pad_tx
from test_framework.script import (
CScript,
OP_CHECKSIG,
OP_DROP,
OP_DUP,
OP_EQUAL,
OP_EQUALVERIFY,
OP_FALSE,
OP_HASH160,
OP_TRUE,
)
from test_framework.nodemessages import COIN
TX_GET = "blockchain.transaction.get"
DUMMY_HASH = 0x1111111111111111111111111111111111111111
if __name__ == '__main__':
ElectrumTransactionGet().main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
15069,
357,
66,
8,
12131,
383,
6185,
26774,
6505,
198,
37811,
198,
51,
3558,
262,
1742,
6582,
869,
705,
9967,
7983,
13,
7645,
2673,
13,
1136,
6,
198,
37811,
198,
11748,
30351,... | 2.553459 | 318 |
#!/usr/bin/python
"""
==============================================================================
Author: Tao Li (taoli@ucsd.edu)
Date: Jul 8, 2015
Question: 233-Number-of-Digit-One
Link: https://leetcode.com/problems/number-of-digit-one/
==============================================================================
Given an integer n, count the total number of digit 1 appearing in all
non-negative integers less than or equal to n.
For example:
Given n = 13,
Return 6, because digit 1 occurred in the following numbers: 1, 10, 11, 12, 13.
==============================================================================
Method: math formula
Time Complexity: O(log n)
Space Complexity: O(1)
==============================================================================
"""
# @param {integer} n
# @return {integer}
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
37811,
198,
23926,
25609,
855,
198,
13838,
25,
220,
220,
32120,
7455,
357,
8326,
11106,
31,
1229,
21282,
13,
15532,
8,
198,
10430,
25,
220,
220,
220,
220,
5979,
807,
11,
1853,
198,
24361,
2... | 4.153465 | 202 |
# Copyright 2015 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from mistraldashboard import api
| [
2,
15069,
1853,
43208,
21852,
1766,
1539,
12052,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
... | 3.97561 | 205 |
"""Split the Urban 3D dataset into train/dev/test and splits each image to 256x256 sub-tiles.
Original images have size (2048, 2048).
We do not resize images, but split them into smaller images i.e. each input image divided into 64 256x256 images
"""
import argparse
import os
from pathlib import Path
import skimage.io
import skimage.external.tifffile as skitiff
import glob
import random
import shutil
import numpy as np
import src.image_tools as it
HOME_PATH = str(Path.home())
DS_NAME = "SpaceNet Urban3D"
SIZE = 256
IMG_PER_DIM = 2048 // SIZE
DATA_ROOT = os.getenv("DATASET_ROOT", f"{HOME_PATH}/data")
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default=f'{DATA_ROOT}/spacenet/urban3d', help=f"Directory with {DS_NAME} images, that has 01-Provisional_Train and other folders")
parser.add_argument('--output_dir', default=f'{DATA_ROOT}/spacenet-dataset/urban3d', help="Where we create dataset with of 256x256 images divided into train/dev/test")
parser.add_argument('--seed', default=-1, help="Random generator seed.")
def data_input_files(image_dirs, im_type = "rgb"):
"""
Returns list of files of specified type
im_type: should be RGB, or depth bufers DSM or DTM
"""
im_type = im_type.upper()
assert im_type in {"RGB", "DSM", "DTM", "GTI"}
pattern = os.path.join(image_dirs, f"*_Tile_*_{im_type}.tif")
yield from glob.glob(pattern)
if __name__ == '__main__':
args = parser.parse_args()
assert os.path.isdir(args.data_dir), "Couldn't find the oritinal spacent urgan3d at {}. Please download as defined in doc/datasets.md".format(args.data_dir)
# init random number generator
if args.seed != -1:
random.seed(args.seed)
# cleanup output folder
if os.path.exists(args.output_dir):
shutil.rmtree(args.output_dir)
# TRAIN
# writing training files (X)
pattern = os.path.join(args.data_dir, "01-Provisional_Train", "Inputs", "*_RGB.tif")
input_files = glob.glob(pattern)
build_dataset_images(input_files, os.path.join(args.output_dir, "train", "inputs"), "train set")
# writing training files (Y labels)
src = os.path.join(args.data_dir, "01-Provisional_Train", "GT")
dest = os.path.join(args.output_dir, "train", "target")
build_dataset_label_images(input_files, src, dest, "train target set")
train_files, src, dest = None, None, None
# test files (we will split them into 2 equal dev and test parts randomly)
input_test_dir = os.path.join(args.data_dir, "02-Provisional_Test", "Inputs")
devtest_files = list(data_input_files(input_test_dir))
random.shuffle(devtest_files)
mid = len(devtest_files) // 2
dev_files, test_files = devtest_files[:mid], devtest_files[mid:]
# DEV
# writing dev set files (X)
dev_dir = os.path.join(args.output_dir, "dev", "inputs")
build_dataset_images(dev_files, dev_dir, "dev set")
# writing dev files (Y)
src = os.path.join(args.data_dir, "02-Provisional_Test", "GT")
dest = os.path.join(args.output_dir, "dev", "target")
build_dataset_label_images(dev_files, src, dest, "dev target set")
dev_files, dev_dir, src, dest = None, None, None, None
# TEST
# writing test set files (X)
test_dir = os.path.join(args.output_dir, "test", "inputs")
build_dataset_images(test_files, test_dir, "test set")
# writing test files (Y)
src = os.path.join(args.data_dir, "02-Provisional_Test", "GT")
dest = os.path.join(args.output_dir, "test", "target")
build_dataset_label_images(test_files, src, dest, "test target set")
| [
37811,
41205,
262,
14665,
513,
35,
27039,
656,
4512,
14,
7959,
14,
9288,
290,
30778,
1123,
2939,
284,
17759,
87,
11645,
850,
12,
83,
2915,
13,
198,
198,
20556,
4263,
423,
2546,
357,
1238,
2780,
11,
36117,
737,
198,
198,
1135,
466,
4... | 2.618909 | 1,375 |
import unicodedata
import codecs
import sys
import scipy as sp
import hmm
tbl = dict.fromkeys(i for i in xrange(sys.maxunicode)
if unicodedata.category(unichr(i)).startswith('P'))
def uniq(listinput):
""" This finds the unique elements of the list listinput. """
""" This will be provided for the student. """
output = []
for x in listinput:
if x not in output:
output.append(x)
return output
| [
11748,
28000,
9043,
1045,
198,
11748,
40481,
82,
198,
11748,
25064,
198,
11748,
629,
541,
88,
355,
599,
198,
11748,
289,
3020,
198,
198,
83,
2436,
796,
8633,
13,
6738,
13083,
7,
72,
329,
1312,
287,
2124,
9521,
7,
17597,
13,
9806,
46... | 2.66875 | 160 |
# Copyright 2021 Tony Wu +https://github.com/tonywu7/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import pandas as pd
from .implementations.tfidf import Vector, get_similarity
| [
2,
15069,
33448,
8832,
18027,
1343,
5450,
1378,
12567,
13,
785,
14,
1122,
88,
43812,
22,
14,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
239... | 3.658031 | 193 |
#!/usr/bin/env python
#
# Copyright (c) 2017 10X Genomics, Inc. All rights reserved.
#
import martian
import numpy as np
import sys
import cellranger.analysis.clustering as cr_clustering
import cellranger.analysis.graphclust as cr_graphclust
import cellranger.analysis.io as analysis_io
from cellranger.analysis.singlegenome import SingleGenomeAnalysis
import cellranger.h5_constants as h5_constants
import cellranger.analysis.constants as analysis_constants
from cellranger.logperf import LogPerf
import cellranger.io as cr_io
__MRO__ = """
stage RUN_GRAPH_CLUSTERING(
in h5 matrix_h5,
in h5 pca_h5,
in int num_neighbors "Use this many neighbors",
in float neighbor_a "Use larger of (a+b*log10(n_cells) neighbors or num_neighbors",
in float neighbor_b "Use larger of (a+b*log10(n_cells) neighbors or num_neighbors",
in int num_bcs "Use this many cell-barcodes in clustering",
in int input_pcs "Use top N PCs",
in int balltree_leaf_size,
in string similarity_type "Type of similarity to use (nn or snn)",
in bool skip,
out h5 chunked_neighbors,
out h5 clusters_h5,
out path clusters_csv,
src py "stages/analyzer/run_graph_clustering",
) split using (
in pickle neighbor_index,
in h5 submatrix,
in int row_start,
in int total_rows,
in int k_nearest,
in h5 use_bcs,
)
"""
# 1e6 cells => ~64 chunks
NN_QUERIES_PER_CHUNK = 15000
DEFAULT_BALLTREE_LEAFSIZE = 40
# Memory usage in join, empirically determined
NN_ENTRIES_PER_MEM_GB = 5000000
# Unweighted nearest neighbor (boolean: is-nearest-neighbor)
NN_SIMILARITY = 'nn'
# Shared nearest neighbor (fraction of neighbors shared)
SNN_SIMILARITY = 'snn'
SIMILARITY_TYPES = [NN_SIMILARITY, SNN_SIMILARITY]
# TODO: Martian needs to provide a way to give split more memory.
# Workaround is mrp --overrides
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
15069,
357,
66,
8,
2177,
838,
55,
5215,
31994,
11,
3457,
13,
1439,
2489,
10395,
13,
198,
2,
198,
11748,
11277,
666,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
25064,
... | 2.423792 | 807 |
import os
import logging
logging.basicConfig(
filename=".last_run.log",
filemode="w",
format="%(asctime)s %(levelname)s: %(message)s",
level=logging.DEBUG
)
# Must be done before importing Pygame:
os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "1"
os.environ["SDL_VIDEO_CENTERED"] = "1"
from src.game import Game
if __name__ == "__main__":
logging.info("Initialize new game.")
game = Game("main menu")
game.run()
logging.info("Game ended.")
| [
11748,
28686,
198,
11748,
18931,
628,
198,
6404,
2667,
13,
35487,
16934,
7,
198,
220,
220,
220,
29472,
28,
1911,
12957,
62,
5143,
13,
6404,
1600,
198,
220,
220,
220,
2393,
14171,
2625,
86,
1600,
198,
220,
220,
220,
5794,
2625,
4,
7,... | 2.486911 | 191 |
r"""
**apyce.io**
----
This module contains functionality for exporting data to VTK
----
+----------------+------------------------------------------------------------+
| Format | Description |
+================+============================================================+
| VTK | The Visualization Toolkit (VTK) format defined by Kitware |
| | and used by ParaView |
+----------------+------------------------------------------------------------+
"""
from .VTK import VTK | [
81,
37811,
198,
198,
1174,
12826,
344,
13,
952,
1174,
198,
198,
650,
198,
198,
1212,
8265,
4909,
11244,
329,
39133,
1366,
284,
32751,
42,
198,
198,
650,
198,
198,
10,
1783,
10,
47232,
10541,
10,
198,
91,
220,
220,
220,
220,
18980,
... | 2.739726 | 219 |
from utils import filter
| [
6738,
3384,
4487,
1330,
8106,
198
] | 4.166667 | 6 |
import docx
import json
import pyperclip
# Concatenate role/company for keys below.
# Order: URL, img, skills list
# Read bullet points to check for skills.
extra_info = {
"IT Associate [Placement]PwC": [
"http://www.pwc.com/",
"img/experience/pwc.jpeg",
["Python", "HTML5", "CSS3", "Bootstrap", "JavaScript", "JSON", "Chrome Extensions", "APIs", "Postman", "Reverse Engineering", "Google Analytics", "Alteryx", "NLP", "Internet of Things", "Microsoft Azure", "Teaching", "Parallel Programming", "Firebase", "Electronics", "Agile"]
],
"Freelance Technology Support": [
"",
"img/experience/freelance.png",
["Python", "JSON", "Microsoft Excel"]
],
"Camp CounsellorAmeriCamp": [
"https://www.americamp.co.uk",
"img/experience/americamp.png",
[]
],
"Technical AssistantNTSU": [
"https://www.trentstudents.org",
"img/experience/ntsu.jpg",
["Electronics"]
],
"Recruitment InternDriver Hire": [
"https://www.driverhire.co.uk/office/grimsby-scunthorpe/",
"img/experience/driverhire.png",
["HTML5", "CSS3", "Teaching", "Microsoft Excel", "Visual Basic"]
],
"Student MemberInstitute of Directors": [
"https://www.iod.com",
"img/experience/iod.jpg",
[]
],
"National Award WinnerCareer Ready": [
"https://www.careerready.org.uk",
"img/experience/careerready.png",
[]
],
}
doc = docx.Document('CV.docx')
tables = doc.tables
# Creating json
data = {}
json_cv('work', 2)
json_cv('achievements', 3)
json_data = json.dumps(data, indent=4)
#print(json_data)
print("\nThe JSON has been copied to your clipboard.")
pyperclip.copy(json_data)
| [
11748,
2205,
87,
198,
11748,
33918,
198,
11748,
12972,
525,
15036,
198,
198,
2,
1482,
9246,
268,
378,
2597,
14,
39722,
329,
8251,
2174,
13,
198,
2,
8284,
25,
10289,
11,
33705,
11,
4678,
1351,
198,
2,
4149,
10492,
2173,
284,
2198,
32... | 2.357047 | 745 |
# Generated by Django 2.1.7 on 2019-07-04 14:33
from django.db import migrations, models
import users.models
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
22,
319,
13130,
12,
2998,
12,
3023,
1478,
25,
2091,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
2985,
13,
27530,
628
] | 3 | 37 |
from os import path
import pytest
import menu
import ufo
import pygame
@pytest.fixture
| [
6738,
28686,
1330,
3108,
198,
11748,
12972,
9288,
198,
11748,
6859,
198,
11748,
334,
6513,
198,
11748,
12972,
6057,
198,
198,
31,
9078,
9288,
13,
69,
9602,
628
] | 3.178571 | 28 |
import time
| [
11748,
640,
198
] | 4 | 3 |
__all__ = [
'HelpOperation',
'ListOperation',
'UploadOperation',
'DownloadOperation',
'InfoOperation',
'DeleteOperation',
]
from .help import HelpOperation
from .list import ListOperation
from .upload import UploadOperation
from .download import DownloadOperation
from .info import InfoOperation
from .delete import DeleteOperation
def make(command, group_id, args, options):
"""
Generate operation class from arguments.
:param command: command string
:param args: list of argument
:param options: dict of options
:return: Raise AssertionError when failure
"""
if command == 'list':
return ListOperation(group_id, args, options['output'])
if command == 'upload':
return UploadOperation(group_id, args, options['force'], options['print_only'])
if command == 'download':
return DownloadOperation(group_id, args, options['print_only'])
if command == 'info':
return InfoOperation(group_id, args, options['output'])
if command == 'delete':
return DeleteOperation(group_id, args, options['print_only'])
raise AssertionError('Unknown command: %s' % command)
| [
834,
439,
834,
796,
685,
198,
220,
220,
220,
705,
22087,
32180,
3256,
198,
220,
220,
220,
705,
8053,
32180,
3256,
198,
220,
220,
220,
705,
41592,
32180,
3256,
198,
220,
220,
220,
705,
10002,
32180,
3256,
198,
220,
220,
220,
705,
123... | 3.117333 | 375 |
"""
Utilities for AAE
"""
import torch
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from visdom import Visdom
from ....common import FloatTensor
class SampleImage(object):
""" Make a grid and plot a class of image on one row """
| [
37811,
198,
18274,
2410,
329,
317,
14242,
198,
37811,
198,
198,
11748,
28034,
198,
6738,
1341,
35720,
13,
805,
361,
727,
1330,
26136,
12161,
198,
6738,
1341,
35720,
13,
12501,
296,
9150,
1330,
4217,
32,
198,
6738,
1490,
3438,
1330,
6911... | 3.35443 | 79 |
_base_ = '../mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py'
# We also need to change the num_classes in head to match the dataset's annotation
classes=('pig', 'person', 'pig_head', 'pig_hip') # pig person 类分了也没法用,肯定不如专门分pig的分的准,所以不用这个的pig (目前pig类不能用,因为dorm中的pig在mask里)
model = dict(
roi_head=dict(
bbox_head=dict(num_classes=len(classes)),
mask_head=dict(num_classes=len(classes))),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.6,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5))
)
# Modify dataset related settings
dataset_type = 'COCODataset'
dir_1 = "../dataset/1all_dorm+cut_safe/"
dir_1_1 = "../dataset/1all_dorm_BYZ+cut_safe/"
dir_1_2 = "../dataset/1huiyan_dorm_raw+cut_safe/"
dir_2 = "../dataset/2all_passage+cut_safe/"
dir_3 = "../dataset/3all_stage+cut_safe/"
dir_4 = "../dataset/4all_weights+cut_safe/" #虽然不是roi cut但是有头有尾
dir_roi = "../dataset/1all_dorm_BYZ_roi+cut_safe/" #有头有尾
dir_roi_1 = "../dataset/1huiyan_dorm_roi+cut_safe/" #有头有尾
dir_roi_3 = "../dataset/4all_weights_BYZ_roi+cut_safe/" #有头有尾
# head and hip
pig_head_hip_dirs = [dir_roi, dir_roi_1, dir_4, dir_roi_3]
safe_head_and_hip_prefix_train = [i+'/train/' for i in pig_head_hip_dirs]
safe_head_and_hip_prefix_val = [i+'/val/' for i in pig_head_hip_dirs]
safe_head_and_hip_ann_train = [i+'/annotation_coco.json' for i in safe_head_and_hip_prefix_train]
safe_head_and_hip_ann_val = [i+'/annotation_coco.json' for i in safe_head_and_hip_prefix_val]
test_dir = "/home/lmw/leemengwei/dataset_others/HGXG/new_baoyu_pic/"
# We can use the pre-trained Mask RCNN model to obtain higher performance
load_from = 'checkpoints/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco_bbox_mAP-0.408__segm_mAP-0.37_20200504_163245-42aa3d00.pth'
albu_train_transforms = [
dict(
type='ShiftScaleRotate',
shift_limit=0.0625,
scale_limit=0.0,
rotate_limit=0,
interpolation=1,
p=0.5),
dict(
type='RandomBrightnessContrast',
brightness_limit=[0.1, 0.3],
contrast_limit=[0.1, 0.3],
p=0.2),
dict(
type='OneOf',
transforms=[
dict(
type='RGBShift',
r_shift_limit=10,
g_shift_limit=10,
b_shift_limit=10,
p=1.0),
dict(
type='HueSaturationValue',
hue_shift_limit=20,
sat_shift_limit=30,
val_shift_limit=20,
p=1.0)
],
p=0.1),
dict(type='JpegCompression', quality_lower=85, quality_upper=95, p=0.2),
dict(type='ChannelShuffle', p=0.1),
dict(
type='OneOf',
transforms=[
dict(type='Blur', blur_limit=3, p=1.0),
dict(type='MedianBlur', blur_limit=3, p=1.0)
],
p=0.1),
#dict(type='CopyPaste', blend=True, sigma=1, pct_objects_paste=0.5, p=1),
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type='CocoDataset',
# ann_file= safe_head_and_hip_ann_train,
# img_prefix=safe_head_and_hip_prefix_train,
ann_file=test_dir + '/test_loop_mannual.json', # loop train
img_prefix=test_dir,
pipeline=[
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='Resize',
img_scale=[(1080, 1080)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
# dict(
# type='Albu',
# transforms=albu_train_transforms,
# bbox_params=dict(
# type='BboxParams',
# format='pascal_voc',
# label_fields=['gt_labels'],
# min_visibility=0.0,
# filter_lost_elements=True),
# keymap={
# 'img': 'image',
# 'gt_masks': 'masks',
# 'gt_bboxes': 'bboxes'
# },
# update_pad_shape=False,
# skip_img_without_anno=True),
dict(
type='Normalize',
mean=[103.53, 116.28, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks'])
],
classes=classes),
val=dict(
type='CocoDataset',
#ann_file = safe_pigs_ann_val,
ann_file = safe_head_and_hip_ann_val,
#img_prefix=safe_pigs_prefix_val,
img_prefix=safe_head_and_hip_prefix_val,
pipeline=[
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1080, 1080),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[103.53, 116.28, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False),
dict(type='Pad', size_divisor=32),
#dict(type='ImageToTensor', keys=['img']),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
],
classes=classes),
test=dict(
type='CocoDataset',
#ann_file = safe_pigs_ann_val,
#ann_file = safe_head_and_hip_ann_val,
#img_prefix=safe_pigs_prefix_val,
#img_prefix=safe_head_and_hip_prefix_val,
ann_file=test_dir + '/test_loop_mannual.json',
img_prefix=test_dir,
pipeline=[
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=[(1080,1080)],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[103.53, 116.28, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False),
dict(type='Pad', size_divisor=32),
#dict(type='ImageToTensor', keys=['img']),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
],
classes=classes))
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=2000,
warmup_ratio=0.001,
step=[150, 175, 185])
#fp16=True
runner = dict(type='EpochBasedRunner', max_epochs=200)
checkpoint_config = dict(interval=10)
log_config = dict(
interval=10)
workflow = [('train', 1), ('val', 1)]
| [
62,
8692,
62,
796,
705,
40720,
27932,
62,
6015,
20471,
14,
27932,
62,
6015,
20471,
62,
81,
1120,
62,
66,
21223,
62,
69,
21999,
62,
76,
2536,
391,
12,
35428,
62,
16,
87,
62,
66,
25634,
13,
9078,
6,
201,
198,
2,
775,
635,
761,
2... | 1.68444 | 4,595 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by HazzaCheng on 2020-05-11
import torch
import torch.nn.functional as F
from torch import nn
from torch_geometric.nn import GATConv, ARMAConv, SAGEConv
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15622,
416,
17064,
4496,
7376,
782,
319,
12131,
12,
2713,
12,
1157,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
... | 2.617284 | 81 |
import time
import blockchain.Transaction
from blockchain.Block import Block
| [
11748,
640,
198,
198,
11748,
11779,
13,
48720,
198,
6738,
11779,
13,
12235,
1330,
9726,
198
] | 4.875 | 16 |
# Functions for orbit prediction
import model
import perturbation
import transform
import numpy as np
from scipy.integrate import solve_ivp
#
# function predict:
# input:
# elements - elements of satellite orbit, defined in model.py
# ts - array of time points
# flag - whether to consider perturbation
# return:
# list of elements at each predict point
#
#
# functoin change:
# input:
# elements - elements of orbit
# t - current time
# flag - whether consider perturbations
# return:
# derivatives of elements
#
| [
2,
40480,
329,
13066,
17724,
201,
198,
201,
198,
11748,
2746,
201,
198,
11748,
22146,
5945,
341,
201,
198,
11748,
6121,
201,
198,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
6738,
629,
541,
88,
13,
18908,
4873,
1330,
8494,
62,
... | 2.316901 | 284 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
from azure.cognitiveservices.vision import computervision
from msrest import authentication
from dotenv import load_dotenv
# Load the API key and endpoint from the .env file
load_dotenv()
API_KEY = os.getenv('API_KEY')
ENDPOINT = os.getenv('ENDPOINT')
# Captures an image from a USB web cam and saves it as filename
# This code assumes you only have one web cam attached
# Captures an image from a Raspberry Pi camera and saves it as filename
# The filename to save the image to
# Images are saved instead of just being analysed for text directly
# to help with debugging - check the image.png file saved in the current
# folder to debug what the camera is actually seeing. This is great to
# help with things like making sure the camera is the right way up.
filename = 'image.png'
# Uncomment the function calls depending on what camera you are using
# capture_image_from_usb(filename)
# capture_image_from_picamera(filename)
# Create the computer vision client using the API key and endpoint
credentials = authentication.CognitiveServicesCredentials(API_KEY)
computervision_client = computervision.ComputerVisionClient(ENDPOINT, credentials)
# Open the image file for reading
with open(filename, "rb") as image:
# Look for printed text in the image
result = computervision_client.recognize_printed_text_in_stream(image)
# The results come back in regions - defined areas in the image containing
# text on one or more lines, with one or more words per line
# To make it easier to see the results, this code flattens all the words
# in all the lines in all the regions into one array of words
text_words = []
for region in result.regions:
for line in region.lines:
for word in line.words:
text_words.append(word.text)
# Show the detected words on the console
print(text_words)
| [
2,
15069,
357,
66,
8,
5413,
10501,
13,
198,
2,
49962,
739,
262,
17168,
5964,
13,
198,
198,
11748,
28686,
198,
6738,
35560,
495,
13,
66,
2360,
20288,
712,
1063,
13,
10178,
1330,
2653,
712,
1166,
198,
6738,
13845,
2118,
1330,
18239,
1... | 3.451327 | 565 |
import hashlib
import logging
import re
logger = logging.getLogger('main.util')
| [
11748,
12234,
8019,
198,
11748,
18931,
198,
11748,
302,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
10786,
12417,
13,
22602,
11537,
628,
628,
628
] | 3.269231 | 26 |
'''
This code works as simulator.
Main values: brightness, time, date, temperature, humidity.
Users values: message.
'''
import datetime
from datetime import datetime
import random
################################## PLANT SETTINGS ##################################
plant_last_watering_day = "2021-01-12"
plant_watering_range_days = 7
plant_temperature = [20, 25]
plant_light_level = 800
plant_temp_limit = 5
plant_light_limit = 100
################################## SYSTEM SETTINGS ##################################
set_lamp = 0
set_pump = 0
################################## HELPER FUNCTIONS ##################################
################################## BEGIN OF STATES ##################################
# 1. Все ок, растение все устраивает. День, светло
# 2. Слишком темно, не хватает света.
# 3. Холодно, нужны действия от пользователя.
################################## END OF STATES ##################################
state_too_dark(True) | [
7061,
6,
220,
198,
1212,
2438,
2499,
355,
35375,
13,
198,
13383,
3815,
25,
22204,
11,
640,
11,
3128,
11,
5951,
11,
27716,
13,
198,
14490,
3815,
25,
3275,
13,
198,
7061,
6,
198,
11748,
4818,
8079,
198,
6738,
4818,
8079,
1330,
4818,
... | 2.92145 | 331 |
import asyncio
import collections
import gc
import logging
import psycopg2
import pytest
import re
import socket
import sys
import time
import uuid
import warnings
from docker import Client as DockerClient
import aiopg
from aiopg import sa
@pytest.fixture(scope='session')
@pytest.yield_fixture
@pytest.mark.tryfirst
@pytest.mark.tryfirst
def pytest_pyfunc_call(pyfuncitem):
"""
Run asyncio marked test functions in an event loop instead of a normal
function call.
"""
if 'run_loop' in pyfuncitem.keywords:
funcargs = pyfuncitem.funcargs
loop = funcargs['loop']
testargs = {arg: funcargs[arg]
for arg in pyfuncitem._fixtureinfo.argnames}
loop.run_until_complete(pyfuncitem.obj(**testargs))
return True
@pytest.fixture(scope='session')
def session_id():
'''Unique session identifier, random string.'''
return str(uuid.uuid4())
@pytest.fixture(scope='session')
@pytest.yield_fixture(scope='session')
@pytest.fixture
@pytest.yield_fixture()
@pytest.yield_fixture()
@pytest.yield_fixture
@pytest.yield_fixture()
class _AssertWarnsContext:
"""A context manager used to implement TestCase.assertWarns* methods."""
_LoggingWatcher = collections.namedtuple("_LoggingWatcher",
["records", "output"])
class _CapturingHandler(logging.Handler):
"""
A logging handler capturing all (raw and formatted) logging output.
"""
class _AssertLogsContext:
"""A context manager used to implement TestCase.assertLogs()."""
LOGGING_FORMAT = "%(levelname)s:%(name)s:%(message)s"
@pytest.yield_fixture
@pytest.yield_fixture
| [
11748,
30351,
952,
198,
11748,
17268,
198,
11748,
308,
66,
198,
11748,
18931,
198,
11748,
17331,
22163,
70,
17,
198,
11748,
12972,
9288,
198,
11748,
302,
198,
11748,
17802,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
334,
27112,
198,
... | 2.590214 | 654 |
from django import forms
from django.utils.datetime_safe import datetime
from vacation_schedule.models import EmployeeVacationPeriod
| [
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
26791,
13,
19608,
8079,
62,
21230,
1330,
4818,
8079,
198,
6738,
14600,
62,
15952,
5950,
13,
27530,
1330,
36824,
53,
330,
341,
5990,
2101,
628
] | 3.828571 | 35 |
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from .destination import DestinationHadoop
__all__ = ["DestinationHadoop"]
| [
2,
198,
2,
15069,
357,
66,
8,
33448,
3701,
26327,
11,
3457,
1539,
477,
2489,
10395,
13,
198,
2,
628,
198,
6738,
764,
16520,
1883,
1330,
45657,
39,
4533,
404,
198,
198,
834,
439,
834,
796,
14631,
24159,
1883,
39,
4533,
404,
8973,
1... | 3.159091 | 44 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @author:Spring
import tensorflow as tf
import os
import tarfile
import requests
inception_pretrain_model_url = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# 下载inception pretrain模型
inception_pretrain_model_dir = "inception_pretrain"
if not os.path.exists(inception_pretrain_model_dir):
os.makedirs(inception_pretrain_model_dir)
filename = inception_pretrain_model_url.split('/')[-1]
filepath = os.path.join(inception_pretrain_model_dir, filename)
if not os.path.exists(filepath):
print("开始下载: ", filename)
r = requests.get(inception_pretrain_model_url, stream=True)
with open(filepath, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
print("下载完成, 开始解压: ", filename)
tarfile.open(filepath, 'r:gz').extractall(inception_pretrain_model_dir)
# TensorBoard log目录
log_dir = 'inception_log'
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# 加载inception graph
inception_graph_def_file = os.path.join(inception_pretrain_model_dir, 'classify_image_graph_def.pb')
with tf.Session() as sess:
with tf.gfile.FastGFile(inception_graph_def_file, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
writer = tf.summary.FileWriter(log_dir, sess.graph)
# writer = tf.train.SummaryWriter(log_dir, sess.graph)
writer.close()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
2488,
9800,
25,
30387,
198,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
28686,
198,
11748,
13422,
7753,
198,
... | 2.281346 | 654 |
import json
import logging
import os
from string import Template
from ldap3 import Connection, SIMPLE
from ldap3.core.exceptions import LDAPAttributeError
from auth import auth_base
from model import model_helper
from utils import file_utils
from utils.string_utils import strip
KNOWN_REJECTIONS = [
"invalidCredentials",
"user name is mandatory in simple bind",
"password is mandatory in simple bind"]
LOGGER = logging.getLogger('script_server.LdapAuthorizer')
| [
11748,
33918,
198,
11748,
18931,
198,
11748,
28686,
198,
6738,
4731,
1330,
37350,
198,
198,
6738,
300,
67,
499,
18,
1330,
26923,
11,
23749,
16437,
198,
6738,
300,
67,
499,
18,
13,
7295,
13,
1069,
11755,
1330,
27178,
2969,
33682,
12331,
... | 3.354167 | 144 |
import numpy as np
from autogluon.features.generators import OneHotEncoderFeatureGenerator
| [
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
1960,
49006,
261,
13,
40890,
13,
8612,
2024,
1330,
1881,
21352,
27195,
12342,
38816,
8645,
1352,
628,
198
] | 3.518519 | 27 |
# Generated by Django 3.1.3 on 2020-11-26 00:16
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
18,
319,
12131,
12,
1157,
12,
2075,
3571,
25,
1433,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
from finding_parents_method import finding_parents
# import lxml library
from lxml import etree as ElementTree
# genereer data voor input later
file = open('country_data.xml')
data = file.read()
tree = ElementTree.fromstring(data)
rank = tree.xpath('.//rank')[0]
parents = finding_parents(rank)
print(parents)
| [
6738,
4917,
62,
23743,
62,
24396,
1330,
4917,
62,
23743,
198,
2,
1330,
300,
19875,
5888,
198,
6738,
300,
19875,
1330,
2123,
631,
355,
11703,
27660,
198,
2,
2429,
567,
263,
1366,
410,
2675,
5128,
1568,
198,
7753,
796,
1280,
10786,
1931... | 3.20202 | 99 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['GoogleIdentityProviderArgs', 'GoogleIdentityProvider']
@pulumi.input_type
@pulumi.input_type
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
24118,
687,
10290,
357,
27110,
5235,
8,
16984,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760... | 3.488189 | 127 |
from .steam import Steam
| [
6738,
764,
21465,
1330,
9094,
198
] | 4.166667 | 6 |
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
student = torch.tensor([[0,1,2],[2,3,4],[2,3,5]], dtype=torch.float)
teacher = torch.tensor([[1,2,2],[3,4,5],[3,4,0]], dtype=torch.float)
student2 = torch.tensor([[[0,1],[0,3]]], dtype=torch.float)
teacher2 = torch.tensor([[[1,2],[4,3]]], dtype=torch.float)
a = torch.tensor([[0,1]], dtype=torch.float)
b = torch.tensor([[1,2]], dtype=torch.float)
cosine_similarity_loss(student,teacher) | [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
279,
9945,
628,
198,
50139,
796,
28034,
13,
83,
22854,
2... | 2.294118 | 221 |
import sys
import tempfile
from dataclasses import asdict
import tensorflow as tf
from absl import logging
from official.utils.misc import distribution_utils
from smart_compose.args import SmartComposeArg
from smart_compose.train import train
from smart_compose.utils import distributed_utils, parsing_utils
def main(argv):
""" This is the main method for training the model.
:param argv: training parameters
"""
argument = SmartComposeArg.__from_argv__(argv[1:], error_on_unknown=False)
logging.set_verbosity(logging.INFO)
logging.info(f"Args:\n {argument}")
hparams = argument
strategy = distribution_utils.get_distribution_strategy(hparams.distribution_strategy, num_gpus=hparams.num_gpu, all_reduce_alg=hparams.all_reduce_alg)
logging.info(f"***********Num replica: {strategy.num_replicas_in_sync}***********")
create_output_dir(hparams.resume_training, hparams.out_dir, strategy)
save_hparams(hparams.out_dir, parsing_utils.HParams(**asdict(argument)), strategy)
logging.info("***********Smart Compose Training***********")
return train.train(strategy, hparams)
def save_hparams(out_dir, hparams, strategy):
"""Saves hparams to out_dir"""
is_chief = distributed_utils.is_chief(strategy)
if not is_chief:
out_dir = tempfile.mkdtemp()
parsing_utils.save_hparams(out_dir, hparams)
if not is_chief:
tf.io.gfile.remove(parsing_utils._get_hparam_path(out_dir))
def create_output_dir(resume_training, out_dir, strategy):
"""Creates output directory if not exists"""
is_chief = distributed_utils.is_chief(strategy)
if not is_chief:
out_dir = tempfile.mkdtemp()
if not resume_training:
if tf.io.gfile.exists(out_dir):
logging.info("Removing previous output directory...")
tf.io.gfile.rmtree(out_dir)
# If output directory deleted or does not exist, create the directory.
if not tf.io.gfile.exists(out_dir):
logging.info('Creating dirs recursively at: {0}'.format(out_dir))
tf.io.gfile.makedirs(out_dir)
if not is_chief:
tf.io.gfile.rmtree(out_dir)
if __name__ == '__main__':
main(sys.argv)
| [
11748,
25064,
198,
11748,
20218,
7753,
198,
6738,
4818,
330,
28958,
1330,
355,
11600,
198,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
2352,
75,
1330,
18931,
198,
6738,
1743,
13,
26791,
13,
44374,
1330,
6082,
62,
26791,
198,
1... | 2.658981 | 824 |
## Greeter
## timed_block
from time import time
## Closer
## Database
## contextlib
from contextlib import contextmanager
@contextmanager
| [
2235,
11955,
2357,
628,
198,
2235,
28805,
62,
9967,
198,
6738,
640,
1330,
640,
628,
198,
198,
2235,
1012,
13416,
628,
198,
2235,
24047,
628,
198,
2235,
4732,
8019,
198,
6738,
4732,
8019,
1330,
4732,
37153,
628,
198,
31,
22866,
37153,
... | 3.547619 | 42 |
import math
import sklearn
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.autograd import Variable
import torch.nn.functional as F
import sklearn.metrics
class fcbr(nn.Module):
""" fc-bn-relu
[B, Fin] -> [B, Fout]
"""
class fcdbr(nn.Module):
""" fc-dp-bn-relu
[B, Fin] -> [B, Fout]
"""
class conv1dbr(nn.Module):
""" Conv1d-bn-relu
[B, Fin, N] -> [B, Fout, N]
"""
class conv2dbr(nn.Module):
""" Conv2d-bn-relu
[B, Fin, H, W] -> [B, Fout, H, W]
"""
| [
11748,
10688,
198,
11748,
1341,
35720,
198,
11748,
28034,
220,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
28034,
13,
20471,
13,
17143,
2357,
1330,
25139,
2357,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
35748,
198,
11748,
... | 2.172691 | 249 |
import numpy as np
from PIL import Image
import os, sys
import argparse
from sklearn.metrics import mean_absolute_error as compare_mae
from skimage.measure import compare_psnr
from skimage.measure import compare_ssim
from labels import labels
import scipy, skimage
from scipy.spatial import KDTree
from sklearn.metrics import confusion_matrix
import caffe
from util import *
parser = argparse.ArgumentParser()
parser.add_argument("--result_dir", type=str, required=True, help="Path to the generated images to be evaluated")
parser.add_argument("--version", type=str, default='test_latest', help="Version of model")
parser.add_argument("--which_direction", type=str, default='AtoB', help="direction")
parser.add_argument("--caffemodel_dir", type=str, default='./scripts/caffemodel/', help="Where the FCN-8s caffemodel stored")
args = parser.parse_args()
# Set paths
img_path = os.path.join(args.result_dir, args.version, 'images')
output_file = os.path.join(args.result_dir, args.version, 'scores.txt')
# Load model
#caffe.set_mode_cpu();
caffe.set_device(0)
caffe.set_mode_gpu()
net = caffe.Net(args.caffemodel_dir + '/deploy.prototxt',
args.caffemodel_dir + 'fcn-8s-cityscapes.caffemodel',
caffe.TEST)
with open(output_file, 'w') as f:
# Load data
real_As = sorted([os.path.join(img_path, x) for x in os.listdir(img_path) if 'real_A' in x])
fake_Bs = sorted([os.path.join(img_path, x) for x in os.listdir(img_path) if 'fake_B' in x])
real_Bs = sorted([os.path.join(img_path, x) for x in os.listdir(img_path) if 'real_B' in x])
fake_As = sorted([os.path.join(img_path, x) for x in os.listdir(img_path) if 'fake_A' in x])
is_forward = True
is_backward = True if len(fake_As) > 0 else False
f.write('Model: {}\tVersion: {}\tFwrd: {} \tBwrd: {}\n'.format(args.result_dir, args.version, len(fake_Bs), len(fake_As)))
# Direction
if args.which_direction == 'BtoA':
is_forward, is_backward = is_backward, is_forward
real_As, real_Bs, fake_As, fake_Bs = real_Bs, real_As, fake_Bs, fake_As
# Forward
if is_forward:
print('forward...')
n_cl = 19
hist_perframe = np.zeros((n_cl, n_cl))
for i, (fakeb_path, realb_path) in enumerate(zip(fake_Bs, real_Bs)):
print(i, len(fake_Bs))
fakeb = np.array(Image.open(fakeb_path))
realb = np.array(Image.open(realb_path))
hist_perframe += confusion_matrix(color2id(fakeb).flatten(), color2id(realb).flatten(), labels=range(n_cl))
per_pixel_acc, per_class_acc, class_iou, _, _ = get_scores(hist_perframe)
f.write('\nCLASSIFICATION SCORES\n')
f.write('Per Pixel acc.: %s\n' % per_pixel_acc)
f.write('Per-class acc.: %s\n' % per_class_acc)
f.write('Class IOU: %s\n' % class_iou)
# Backward
if is_backward:
print('backward...')
n_cl = 19
hist_perframe = np.zeros((n_cl, n_cl))
for i, (fakea_path, realb_path) in enumerate(zip(fake_As, real_Bs)):
print(i, len(fake_As))
fakea = np.array(Image.open(fakea_path))
realb = np.array(Image.open(realb_path))
print(fakea.shape)
print('fwd')
fakea_segmented = segment_image(fakea)
print('res')
y_pred = scipy.misc.imresize(fakea_segmented, (128, 128))
y_true = color2id(realb)
print('conf')
hist_perframe += confusion_matrix(y_pred.flatten(), y_true.flatten(), labels=range(n_cl))
per_pixel_acc, per_class_acc, class_iou, _, _ = get_scores(hist_perframe)
f.write('\nFCN-SCORES\n')
f.write('Per Pixel acc.: %s\n' % per_pixel_acc)
f.write('Per-class acc.: %s\n' % per_class_acc)
f.write('Class IOU: %s\n' % class_iou)
| [
11748,
299,
32152,
355,
45941,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
28686,
11,
25064,
198,
198,
11748,
1822,
29572,
220,
198,
6738,
1341,
35720,
13,
4164,
10466,
1330,
1612,
62,
48546,
62,
18224,
355,
8996,
62,
2611,
68,
198,
... | 2.163807 | 1,807 |
# Copyright 2015 Rackspace
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
from inspect import isclass, ismethod
import importlib
import pkgutil
import unittest
import uuid
from cafe.drivers.base import print_exception, get_error
from cafe.drivers.unittest.suite import OpenCafeUnittestTestSuite
from cafe.drivers.unittest.decorators import PARALLEL_TAGS_LIST_ATTR
class SuiteBuilder(object):
"""Builds suites for OpenCafe Unittest Runner"""
def get_suites(self):
"""Creates the suites for testing given the options in init"""
test_suites = self.load_file()
for class_ in self._get_classes(self._get_modules()):
suite = OpenCafeUnittestTestSuite()
for test in self._get_tests(class_):
suite.addTest(class_(test))
if suite._tests:
test_suites.append(suite)
if self.dry_run:
for suite in test_suites:
for test in suite:
print(test)
exit(0)
for suite in test_suites:
suite.cafe_uuid = uuid.uuid4()
return test_suites
def load_file(self):
"""Load a file generated by --dry_run"""
suites = []
for key, value in self.file_.items():
suite = OpenCafeUnittestTestSuite()
module, class_ = key.rsplit(".", 1)
module = importlib.import_module(module)
class_ = getattr(module, class_)
for test in value:
suite.addTest(class_(test))
suites.append(suite)
return suites
def _get_modules(self):
"""Gets modules given the repo paths passed in to init"""
modules = []
error = False
for repo in self.testrepos:
if not repo.__file__.endswith("__init__.pyc"):
modules.append(repo)
continue
prefix = "{0}.".format(repo.__name__)
for _, modname, is_pkg in pkgutil.walk_packages(
path=repo.__path__, prefix=prefix, onerror=lambda x: None):
if not is_pkg:
try:
modules.append(importlib.import_module(modname))
except Exception as exception:
print_exception(
"Suite Builder", "import_module", modname,
exception)
error = True
if self.exit_on_error and error:
exit(get_error(exception))
return modules
@staticmethod
def _get_classes(modules):
"""Gets classes given a list of modules"""
classes = []
for loaded_module in modules:
for objname in dir(loaded_module):
obj = getattr(loaded_module, objname, None)
if (isclass(obj) and issubclass(obj, unittest.TestCase) and
"fixture" not in obj.__name__.lower()):
classes.append(obj)
return classes
def _get_tests(self, class_):
"""Gets tests from a class"""
tests = []
for name in dir(class_):
if name.startswith("test_") and self._check_test(class_, name):
tests.append(name)
return tests
def _check_test(self, class_, test_name):
"""Checks filters for a given test, regex/tags"""
test = getattr(class_, test_name)
full_path = "{0}.{1}.{2}".format(
class_.__module__, class_.__name__, test_name)
ret_val = ismethod(test) and self._check_tags(test)
regex_val = not self.regex_list
for regex in self.regex_list:
regex_val |= bool(regex.search(full_path))
return ret_val & regex_val
def _check_tags(self, test):
"""
Checks to see if the test passed in has matching tags.
if the tags are (foo, bar) this function will match foo or
bar. if a all_tags is true only tests that contain
foo and bar will be matched including a test that contains
(foo, bar, bazz)
"""
test_tags = getattr(test, PARALLEL_TAGS_LIST_ATTR, [])
if self.all_tags:
return all([tag in test_tags for tag in self.tags])
else:
return any([tag in test_tags for tag in self.tags] or [True])
| [
2,
15069,
1853,
37927,
13200,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
743,
7330,
198,
2,
257,
... | 2.21634 | 2,191 |
import geopandas as gpd
import pandas as pd
import numpy as np
import shared
import sys
from shapely.geometry import Polygon
args = sys.argv[1:]
prefix = args[0] + "_" if len(args) else ""
parcels = gpd.read_geocsv("parcels/%sparcels.csv" % prefix, low_memory=False)
mazs = gpd.read_geocsv("data/mazs.csv")
parcels_centroid = parcels.copy()
parcels_centroid["geometry"] = parcels.centroid
parcels_linked_to_mazs = gpd.sjoin(parcels_centroid, mazs)
parcels["maz_id"] = parcels_linked_to_mazs["maz_id"]
# takes a list of parcels and returns a dictionary where keys are parcel ids
# and values are lists of parcel ids which are fully contained in the key
# parcel id
# iterate over mazs because the sjoin is too slow without going to
# small geography
fully_contained_parcels = merge_dicts(
find_fully_contained_parcels(grouped_parcels)
for index, grouped_parcels in parcels.groupby("maz_id"))
parcels.set_index("apn", inplace=True)
parcels = merge_parcel_attributes(parcels, fully_contained_parcels)
if len(fully_contained_parcels):
drop_apns = pd.concat([
pd.Series(v) for v in fully_contained_parcels.values()])
else:
drop_apns = []
parcels_no_contains = parcels.drop(drop_apns)
del parcels_no_contains["maz_id"]
num_intersections = len(parcels) - len(parcels_no_contains)
print "%d parcels dropped because of self-intersections" % num_intersections
parcels_no_contains.to_csv(
"cache/%sparcels_no_self_intersections.csv" % prefix)
| [
11748,
30324,
392,
292,
355,
27809,
67,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4888,
198,
11748,
25064,
198,
6738,
5485,
306,
13,
469,
15748,
1330,
12280,
14520,
198,
198,
22046,
796,
25064... | 2.636852 | 559 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from test.multiprocess_test_case import MultiProcessTestCase
import crypten.mpc.primitives.baseOT as baseOT
if __name__ == "__main__":
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
2,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
17168,
5964,
1043,
287,
262,
198,
2,
38559,
24290,
... | 3.194915 | 118 |
from ply import *
import MonkeyLex
tokens = MonkeyLex.tokens
def p_program(p):
'''program : program compound_action actions
| compound_action actions
| program actions
| actions
'''
if len(p) == 4:
p[0] = p[1]
if not p[0]: p[0] = []
p[0].append(p[2])
if p[3]:
for act_obj in p[3]:
if act_obj:
p[0].append(act_obj)
elif len(p) == 3:
if isinstance(p[1], dict):
p[0] = p[1]
if not p[0]: p[0] = []
if p[2]:
for act_obj in p[2]:
p[0].append(act_obj)
else:
p[0] = []
p[0].append(p[1])
if p[2] and len(p[2]) > 0:
for act_obj in p[2]:
p[0].append(act_obj)
elif len(p) == 2 and p[1]:
p[0] = []
for act_obj in p[1]:
p[0].append(act_obj)
def p_program_error(p):
'''program : error '''
p[0] = None
p.parser.error = 1
def p_actions(p):
'''actions : actions action
| action
'''
if len(p) == 2:
if not p[0]:
p[0] = []
p[0].append(p[1])
elif len(p) == 3:
p[0] = p[1]
if not p[0]:
p[0] = []
p[0].append(p[2])
def p_action_auth(p):
'''action : Auth STRING STRING NEWLINE
'''
action_dict = {
'move': p[1],
'type': 'auth',
'username': p[2],
'password': p[3]
}
p[0] = action_dict
def p_action_single(p):
'''action : movement BOOL NEWLINE
| movement NEWLINE
'''
action_dict = {
'move': p[1][1],
'type': 'single_action',
'is_wait': True if len(p) == 3 else p[2]
}
p[0] = action_dict
def p_action_target(p):
'''action : movement target STRING BOOL NEWLINE
| movement target STRING NEWLINE
| movement target BOOL NEWLINE
| movement target NEWLINE
'''
if not p[1][0]:
print("%s At Line %d", (p[1][1], p.lineno(1)))
p[0] = None
p.parser.error = 1
else:
action_dict = {}
if len(p) == 6:
action_dict = {
'move':p[1][1],
'target': p[2],
'vaule': p[3],
'is_wait': p[4],
'type': 'action_target'
}
elif len(p) == 5:
action_dict = {
'move':p[1][1],
'target': p[2],
'type': 'action_target',
'is_wait' : True
}
if isinstance(p[3], str):
action_dict['value'] = p[3]
action_dict['is_wait'] = True
elif isinstance(p[3], bool):
action_dict['is_wait'] = p[3]
elif len(p) == 4:
action_dict = {
'move':p[1][1],
'target': p[2],
'type': 'action_target',
'is_wait' : True
}
p[0] = action_dict
def p_action_command(p):
'''action : movement STRING NEWLINE
'''
if not p[1][0]:
print("%s At Line %d", (p[1][1], p.lineno(1)))
p[0] = None
p.parser.error = 1
else:
action_dict = {
'move': p[1][1],
'value':p[2],
'type':'action_command',
'is_wait': True
}
p[0] = action_dict
def p_action_judge(p):
'''action : Judge target STRING NEWLINE
| Judge target Not STRING NEWLINE
'''
action_dict = {
'type': 'action_judge',
'target': p[2],
'expect': p[3] if len(p) == 5 else p[4],
'is_equal': True if len(p) == 5 else False,
'is_wait': True
}
p[0] = action_dict
def p_action_empty(p):
'''action : NEWLINE
'''
p[0] = None
def p_action_bad(p):
'''action : error NEWLINE
'''
print("Wrong action at line %d" % p.lineno(0))
p[0] = None
p.parser.error = 1
def p_movement(p):
'''movement : Prefer
| Patient
| Visit
| Blind
| Click
| Input
| Choose
| Back
| Forward
| Accept
| Auth
| Dismiss
| Press
| Switch
'''
p[0] = (True, p[1])
def p_movement_bad(p):
'''movement : error
'''
p[0] = (False, "Wrong movement setting")
p.parser.error = 1
def p_compound_action(p):
'''compound_action : repeat_block
| task_block
'''
p[0] = p[1]
def p_repeat_block(p):
'''repeat_block : Repeat NUMBER NEWLINE actions End NEWLINE
| Repeat NUMBER NEWLINE compound_action End NEWLINE
'''
block_dict = {
'type':'repeat',
'content':p[4],
'times': p[2]
}
p[0] = block_dict
def p_task_block(p):
'''task_block : Task ID NEWLINE actions End
| Task ID NEWLINE compound_action End
'''
block_dict = {
'type': 'task',
'content': p[4],
'name': p[2]
}
p[0] = block_dict
def p_target(p):
'''target : STRING STRING
| STRING
'''
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
MParser = yacc.yacc() | [
6738,
35960,
1330,
1635,
198,
11748,
26997,
45117,
198,
198,
83,
482,
641,
796,
26997,
45117,
13,
83,
482,
641,
198,
198,
4299,
279,
62,
23065,
7,
79,
2599,
198,
220,
220,
220,
705,
7061,
23065,
220,
1058,
1430,
13061,
62,
2673,
402... | 1.700748 | 3,208 |
from agent import Agent
from random import random, sample
from itertools import combinations
from math import comb
from mission import Mission
class Bayes(Agent):
'''
Maintains probabilities of all possible worlds.
Calculates the probabilty of each player being a spy from set of worlds.
Worlds probabilities are based on only mission outcomes.
'''
def new_game(self, number_of_players, player_number, spies):
'''
initialises the game, spies is empty if player is not a spy
'''
self.number_of_players = number_of_players
self.player_number = player_number
self.spies = spies
self.missions = []
self.failed_teams = [] # teams that betrayed - avoid them
worlds = list(combinations(range(self.number_of_players), self.spy_count[number_of_players]))
self.worlds = {w: 1/len(worlds) for w in worlds}
self.update_suspicions()
self.vote_threshold = 1.0
self.vote_failable_rate = 0.2
self.betray_rate = 0.2
self.risky_betray_rate = 0.2 # if there are more spies on mission than necessary
def possible_teams(self, l):
'''
Returns list of all possible teams of length l including self,
in ascending average suspicion
'''
teams = [t for t in list(combinations(range(self.number_of_players), l))
if self.player_number in t]
return sorted(teams, key=lambda t: sum([self.suspicions[x] for x in t]))
def number_of_spies(self, mission):
'''
Spy method
returns number of spies on mission
'''
return len([x for x in self.spies if x in mission])
def enough_spies(self, mission):
'''
Spy method
returns True iff there are enough spies in mission to fail the mission
'''
return self.number_of_spies(mission) >= self.betrayals_required()
def bad_mission(self, mission):
'''
Returns True iff this mission configuration has already ended in
betrayal
'''
for m in self.failed_teams:
if mission == m or set(mission).issubset(m): return True
return False
def propose_mission(self, team_size, betrayals_required = 1):
'''
Propose the least suspicious team including self.
If spy and two betrayals required, try to return the least suspicious
team containing two spies.
'''
ps = self.possible_teams(team_size)
if not self.is_spy() or betrayals_required == 1:
team = ps[0]
for n in range(1, len(ps)):
if self.bad_mission(team): team = ps[n]
else: return team
elif betrayals_required == 2:
team = ps[0]
for n in range(1, len(ps)):
if self.bad_mission(team) or not self.enough_spies(team): team = ps[n]
else: return team
if self.missions_succeeded() < 2:
team = ps[0]
for n in range(1, len(ps)):
if self.bad_mission(team): team = ps[n]
else: return team
else:
team = ps[0]
for n in range(1, len(ps)):
if not self.enough_spies(team): team = ps[n]
else: return team
return [self.player_number] + sample([x for x in range(self.num_players) if x != self.player_number], team_size-1)
def mission_suspicion(self, mission):
'''
Returns the average suspicion of a mission. Does not include self
'''
others = [self.suspicions[x] for x in mission if x != self.player_number]
return sum(others) / len(others)
def vote_outcome(self, mission, proposer, votes):
'''
Add a new Mission object to our stored info
'''
self.missions.append(Mission(self.number_of_players, self.rnd(), proposer, mission, votes))
def update_suspicions(self):
'''
Updates self.suspicions to reflect the probability of each player being
a spy
'''
suspicions = {x: 0 for x in range(self.number_of_players)}
worlds = self.worlds.items()
for x in range(self.number_of_players):
for s, p in worlds:
if x in s: suspicions[x] += p
self.suspicions = suspicions
def outcome_probability(self, spies_in_mission, betrayals, betray_rate):
'''
Determines the probability of a mission outcome given a world
Assume spy always betrays with probability betray_rate in a round
'''
if spies_in_mission < betrayals: return 0
p = 1
for i in range(betrayals): p *= betray_rate
for i in range(spies_in_mission - betrayals): p *= 1 - betray_rate
if spies_in_mission > 0: p *= comb(spies_in_mission, betrayals)
return p
def mission_outcome(self, mission, proposer, betrayals, mission_success):
'''
Update the last Mission object with mission info
Assumes opponent spies betray with probability of
self.betray_rate * self.rounds_completed()
'''
self.missions[-1].betrayals = betrayals
self.missions[-1].success = mission_success
if not mission_success: self.failed_teams.append(mission)
if len(self.worlds) > 1 and self.rounds_completed() < 5:
prob = 0 # overall probability of this mission outcome
betray_rate = max(0.05, min(0.95, self.betray_rate * self.rounds_completed()))
for w, wp in self.worlds.items():
spies_in_mission = len([x for x in w if x in mission])
prob += self.outcome_probability(spies_in_mission, betrayals, betray_rate) * wp
impossible_worlds = []
for w, wp in self.worlds.items():
spies_in_mission = len([x for x in w if x in mission])
if spies_in_mission == betrayals and len(mission) == betrayals:
self.worlds = {w:1}
break
self.worlds[w] *= self.outcome_probability(spies_in_mission, betrayals, betray_rate)
self.worlds[w] /= prob
if self.worlds[w] == 0: impossible_worlds.append(w)
for w in impossible_worlds: self.worlds.pop(w, None)
self.update_suspicions()
| [
6738,
5797,
1330,
15906,
198,
6738,
4738,
1330,
4738,
11,
6291,
198,
6738,
340,
861,
10141,
1330,
17790,
198,
6738,
10688,
1330,
1974,
198,
6738,
4365,
1330,
12633,
198,
198,
4871,
4696,
274,
7,
36772,
2599,
220,
220,
220,
220,
198,
2... | 2.229044 | 2,899 |
from math import sqrt, atan2, hypot, sin, cos, pi
from random import uniform, gauss
from typing import Optional, Tuple
import numpy as np
from map import Map
from motion import Velocity, Odometry
from obstacle import Obstacle, ObstacleID
from robot import Robot
from sensor_trace import SensorTrace, TimeStep
from simulator import Simulator
from testcase import TestCase
DISTANCE_RATIO = 0.15 # Factor by which to decrease the threshold distance to reject obstacles too close to each other
SENSOR_RANGE = 2000 # Range of the sensor
def generate_testcase(path_length: int,
map_size: int,
map_density: float,
sensor_angle_variance: float,
sensor_angle_bias: float,
sensor_distance_variance: float,
sensor_distance_bias: float,
odometry_position_variance: float,
odometry_heading_bias: float,
odometry_heading_variance: float,
step_length: int,
outlier_probability: float,
rotational_error: int,
add_inactivity: int,
symmetry: float,
directional_traverse: bool) -> TestCase:
"""
Automatically generates a test case based on the given parameters
:param path_length: Total number of steps in the test case
:param map_size: Total side length of the (quadratic) map
:param map_density: Number of landmarks per unit area
:param sensor_angle_variance: Variance in sensor angle measurements (in degrees)
:param sensor_angle_bias: Bias in sensor angle measurements (counter-clockwise rotation; in degrees)
:param sensor_distance_variance: Variance in sensor distance measurements (in mm)
:param sensor_distance_bias: Bias in sensor distance measurements (in mm)
:param odometry_position_variance: Uncertainty in both x- and y-direction after movement
(assumed constant, independent and axis-aligned for simplicity)
:param odometry_heading_bias: Bias in angular heading after movement
:param odometry_heading_variance: Variance in angular heading after movement
:param step_length: Length of a step in the path
:param outlier_probability: The probability that a given measurement is an outlier
:param rotational_error: Multiple of 360° to be added to angular measurements and odometry
:param add_inactivity: Number of initial runs without any information, to check resistance to inactivity
:param symmetry: Fraction of landmarks that are to be placed in a (point) symmetrical manner
:param directional_traverse: Determines whether the robot's trajectory should be unidirectional or random
:return: A test case following the specifications
"""
print('Generating testcase')
test_map = generate_map(map_size,
map_density,
symmetry)
sensor_trace = generate_odometry_sensor_trace(test_map,
path_length,
sensor_angle_variance,
sensor_angle_bias,
sensor_distance_variance,
sensor_distance_bias,
odometry_position_variance,
odometry_heading_bias,
odometry_heading_variance,
step_length,
outlier_probability,
rotational_error,
add_inactivity,
directional_traverse)
return TestCase(test_map, sensor_trace)
def generate_map(map_size: int,
map_density: float,
symmetry: float) -> Map:
"""
Automatically generates a ground truth map with the given specifications
:param map_size: Side length in meters of the (quadratic) map
:param map_density: Number of landmarks per square meter
:param symmetry: Fraction of landmarks that are to be placed in a (point) symmetrical fashion
:return: A map with the given properties
"""
# Convert to millimeters to place the obstacles
x_lower_bound = map_size * -500
x_upper_bound = map_size * 500
y_lower_bound = map_size * -500
y_upper_bound = map_size * 500
number_of_landmarks = int(map_size * map_size * map_density) # round down to avoid collision with minimum distance
symmetrical_landmarks = int(round(symmetry * number_of_landmarks))
# Average (minimum) distance between obstacles
minimum_distance = 1000 * DISTANCE_RATIO/sqrt(map_density)
m = Map(map_size)
landmarks_placed = 0
# First place the symmetrical portion of landmarks
while landmarks_placed < symmetrical_landmarks:
x = int(round(uniform(x_lower_bound, x_upper_bound)))
y = int(round(uniform(y_lower_bound, y_upper_bound)))
if 2 * hypot(x, y) >= minimum_distance:
o1 = Obstacle(ObstacleID(landmarks_placed), x, y)
o2 = Obstacle(ObstacleID(landmarks_placed + 1), -x, -y) # mirrored through (0, 0)
if m.fits(o1, minimum_distance) and m.fits(o2, minimum_distance):
m.add_obstacle(o1)
m.add_obstacle(o2)
landmarks_placed += 2
# Then fill in the rest non-symmetrically
while landmarks_placed < number_of_landmarks:
x = int(round(uniform(x_lower_bound, x_upper_bound)))
y = int(round(uniform(y_lower_bound, y_upper_bound)))
o = Obstacle(ObstacleID(landmarks_placed), x, y)
if m.fits(o, minimum_distance):
m.add_obstacle(o)
landmarks_placed += 1
m.done()
return m
def generate_odometry_sensor_trace(test_map: Map,
path_length: int,
sensor_angle_variance: float,
sensor_angle_bias: float,
sensor_distance_variance: float,
sensor_distance_bias: float,
odometry_position_variance: float,
odometry_heading_bias: float,
odometry_heading_variance: float,
step_length: int,
outlier_probability: float,
rotational_error: int,
add_inactivity: int,
directional_traverse: bool) -> SensorTrace:
"""
Automatically generates a path through the given map and a corresponding sensor trace
:param test_map: Ground truth map
:param path_length: Total number of steps in the test case
:param sensor_angle_variance: Variance in sensor angle measurements (in degrees)
:param sensor_angle_bias: Bias in sensor angle measurements (counter-clockwise rotation; in degrees)
:param sensor_distance_variance: Variance in sensor distance measurements (in mm)
:param sensor_distance_bias: Bias in sensor distance measurements (in mm)
:param odometry_position_variance: Uncertainty in both x- and y-direction after movement
(assumed constant, independent and axis-aligned for simplicity)
:param odometry_heading_bias: Bias in angular heading after movement
:param odometry_heading_variance: Variance in angular heading after movement
:param step_length: Length of a step in the path
:param outlier_probability: The probability that a given measurement is an outlier
:param rotational_error: Multiple of 360° to be added to angular measurements and odometry
:param add_inactivity: Number of initial runs without any information, to check resistance to inactivity
:param directional_traverse: Determines whether the robot's trajectory should be unidirectional or random
:return: The sensor trace corresponding to a path through the given map with the given properties
"""
sim = Simulator(test_map)
robot = Robot(0, 0, 0)
trace = SensorTrace(np.array([robot.x, robot.y, robot.rot]))
# Add an observation run before movement
observations = sim.simulate(robot,
SENSOR_RANGE,
sensor_distance_bias,
sensor_distance_variance,
sensor_angle_bias,
sensor_angle_variance,
outlier_probability,
rotational_error)
test_map.visit([o.obstacle for o in observations])
trace.add_step(TimeStep(Odometry(0, 0, 0), observations), robot.x, robot.y)
print("Start generation")
# Then add the real run
current_destination: Optional[Obstacle] = None
i = 0
for i in range(int((path_length - 1) / 2)):
# Find a destination to head to (an obstacle that has not yet been observed)
if test_map.visited(current_destination):
if directional_traverse:
current_destination = test_map.get_next_destination()
else:
current_destination = test_map.get_random_destination()
add_odometry_step(test_map,
current_destination,
robot,
trace,
sim,
step_length,
sensor_angle_variance,
sensor_angle_bias,
sensor_distance_variance,
sensor_distance_bias,
odometry_position_variance,
odometry_heading_bias,
odometry_heading_variance,
outlier_probability,
rotational_error)
# Stop after half the run to add a period of inactivity
index = i
print("Add inactivity")
# Add inactivity, if desired
for i in range(add_inactivity):
time_step = TimeStep(Odometry(0, 0, 0), [])
trace.add_step(time_step, robot.x, robot.y)
for i in range(index, path_length - 1):
# Find a destination to head to (an obstacle that has not yet been observed)
if test_map.visited(current_destination):
if directional_traverse:
current_destination = test_map.get_next_destination()
else:
current_destination = test_map.get_random_destination()
add_odometry_step(test_map,
current_destination,
robot,
trace,
sim,
step_length,
sensor_angle_variance,
sensor_angle_bias,
sensor_distance_variance,
sensor_distance_bias,
odometry_position_variance,
odometry_heading_bias,
odometry_heading_variance,
outlier_probability,
rotational_error)
print("Done")
return trace
def add_odometry_step(test_map: Map,
current_destination: Obstacle,
robot: Robot,
trace: SensorTrace,
sim: Simulator,
step_length: int,
sensor_angle_variance: float,
sensor_angle_bias: float,
sensor_distance_variance: float,
sensor_distance_bias: float,
odometry_position_variance: float,
odometry_heading_bias: float,
odometry_heading_variance: float,
outlier_probability: float,
rotational_error: int) -> None:
"""
Adds one time step on the given map to the given sensor trace
:param test_map: Ground map
:param current_destination: Destination to drive towards
:param robot: Vehicle to move
:param trace: Sensor trace to add to
:param sim: Simulation object for generating observations
:param step_length: Length of next movement
:param sensor_angle_variance: Variance in sensor angle measurements (in degrees)
:param sensor_angle_bias: Bias in sensor angle measurements (counter-clockwise rotation; in degrees)
:param sensor_distance_variance: Variance in sensor distance measurements (in mm)
:param sensor_distance_bias: Bias in sensor distance measurements (in mm)
:param odometry_position_variance: Uncertainty in both x- and y-direction after movement
(assumed constant, independent and axis-aligned for simplicity)
:param odometry_heading_bias: Bias in angular heading after movement
:param odometry_heading_variance: Variance in angular heading after movement
:param outlier_probability: Probability of an outlier of some sort
:param rotational_error: Determines how often 2*pi should be added to angles
"""
# This is not the angle straight to the obstacle, since the robot moves forward first, but it will eventually
# converge
angle_to_destination = atan2(current_destination.y - robot.y, current_destination.x - robot.x) - robot.rot
measured_forward_movement = step_length
measured_rotational_movement = (angle_to_destination + pi) % (2 * pi) - pi
# Add noise to (actual) robot path
noisy_forward_movement = gauss(measured_forward_movement,
odometry_position_variance)
noisy_rotational_movement = gauss(measured_rotational_movement + odometry_heading_bias,
odometry_heading_variance)
# Create odometry information
odometry = Odometry(0, measured_forward_movement, measured_rotational_movement + rotational_error * 2 * pi)
# Move robot to new location
robot.move_by_odometry(noisy_forward_movement, noisy_rotational_movement)
# Simulate noisy measurements
observations = sim.simulate(robot,
SENSOR_RANGE,
sensor_distance_bias,
sensor_distance_variance,
sensor_angle_bias,
sensor_angle_variance,
outlier_probability,
rotational_error)
# Mark visited obstacles as visited
test_map.visit([o.obstacle for o in observations])
# Create sensor TimeStep
time_step = TimeStep(odometry, observations)
# Add to SensorTrace
trace.add_step(time_step, robot.x, robot.y)
def generate_velocity_sensor_trace(test_map: Map,
path_length: int,
sensor_angle_variance: float,
sensor_angle_bias: float,
sensor_distance_variance: float,
sensor_distance_bias: float,
odometry_position_variance: float,
odometry_heading_bias: float,
odometry_heading_variance: float,
step_length: int,
outlier_probability: float,
rotational_error: int,
add_inactivity: int,
directional_traverse: bool) -> SensorTrace:
"""
Automatically generates a path through the given map and a corresponding sensor trace
:param test_map: Ground truth map
:param path_length: Total number of steps in the test case
:param sensor_angle_variance: Variance in sensor angle measurements (in degrees)
:param sensor_angle_bias: Bias in sensor angle measurements (counter-clockwise rotation; in degrees)
:param sensor_distance_variance: Variance in sensor distance measurements (in mm)
:param sensor_distance_bias: Bias in sensor distance measurements (in mm)
:param odometry_position_variance: Uncertainty in both x- and y-direction after movement
(assumed constant, independent and axis-aligned for simplicity)
:param odometry_heading_bias: Bias in angular heading after movement
:param odometry_heading_variance: Variance in angular heading after movement
:param step_length: Length of a step in the path
:param outlier_probability: The probability that a given measurement is an outlier
:param rotational_error: Multiple of 360° to be added to angular measurements and odometry
:param add_inactivity: Number of initial runs without any information, to check resistance to inactivity
:param directional_traverse: Determines whether the robot's trajectory should be unidirectional or random
:return: The sensor trace corresponding to a path through the given map with the given properties
"""
sim = Simulator(test_map)
robot = Robot(0, 0, 0)
trace = SensorTrace(np.array([robot.x, robot.y, robot.rot]))
# Add an observation run before movement
observations = sim.simulate(robot,
SENSOR_RANGE,
sensor_distance_bias,
sensor_distance_variance,
sensor_angle_bias,
sensor_angle_variance,
outlier_probability,
rotational_error)
test_map.visit([o.obstacle for o in observations])
trace.add_step(TimeStep(Velocity(0, 0), observations), robot.x, robot.y)
# Then add the real run
current_destination: Optional[Obstacle] = None
forward_to_yaw_factor = 0
angle = 0.0
i = 0
for i in range(int((path_length - 1) / 2)):
# Find a destination to head to (an obstacle that has not yet been observed)
if test_map.visited(current_destination):
current_destination, forward_to_yaw_factor, angle = select_new_target(test_map, robot, directional_traverse)
# If the robot points away from the obstacle, it turns around first
if abs(angle) > pi / 2.0:
measured_velocity = 0.0
measured_yaw_velocity = pi
else:
# Assume dt = 1, so velocity is just equivalent to the step size
measured_velocity = step_length
measured_yaw_velocity = measured_velocity * forward_to_yaw_factor
velocities = Velocity(measured_velocity, measured_yaw_velocity)
# Generate noisy (actual) data
noisy_velocity = gauss(measured_velocity, odometry_position_variance)
noisy_yaw_velocity = gauss(measured_yaw_velocity + odometry_heading_bias, odometry_heading_variance)
# Move robot
move_robot_by(robot, noisy_velocity, noisy_yaw_velocity)
# Simulate noisy measurements
observations = sim.simulate(robot,
SENSOR_RANGE,
sensor_distance_bias,
sensor_distance_variance,
sensor_angle_bias,
sensor_angle_variance,
outlier_probability,
rotational_error)
# Mark visited obstacles as visited
test_map.visit([o.obstacle for o in observations])
# Create sensor TimeStep
time_step = TimeStep(velocities, observations)
# Add to SensorTrace
trace.add_step(time_step, robot.x, robot.y)
index = i
# Then add a period of inactivity, if desired
for i in range(add_inactivity):
time_step = TimeStep(Velocity(0, 0), [])
trace.add_step(time_step, robot.x, robot.y)
# Then resume the run
for i in range(index, path_length):
# Find a destination to head to (an obstacle that has not yet been observed)
if test_map.visited(current_destination):
current_destination, forward_to_yaw_factor, angle = select_new_target(test_map, robot, directional_traverse)
# If the robot points away from the obstacle, it turns around first
if abs(angle) > pi / 2.0:
measured_velocity = 0.0
measured_yaw_velocity = pi
else:
# Assume dt = 1, so velocity is just equivalent to the step size
measured_velocity = step_length
measured_yaw_velocity = measured_velocity * forward_to_yaw_factor
velocities = Velocity(measured_velocity, measured_yaw_velocity)
# Generate noisy (actual) data
noisy_velocity = gauss(measured_velocity, odometry_position_variance)
noisy_yaw_velocity = gauss(measured_yaw_velocity + odometry_heading_bias, odometry_heading_variance)
# Move robot
move_robot_by(robot, noisy_velocity, noisy_yaw_velocity)
# Simulate noisy measurements
observations = sim.simulate(robot,
SENSOR_RANGE,
sensor_distance_bias,
sensor_distance_variance,
sensor_angle_bias,
sensor_angle_variance,
outlier_probability,
rotational_error)
# Mark visited obstacles as visited
test_map.visit([o.obstacle for o in observations])
# Create sensor TimeStep
time_step = TimeStep(velocities, observations)
# Add to SensorTrace
trace.add_step(time_step, robot.x, robot.y)
return trace
def select_new_target(test_map: Map, robot: Robot, directional: bool) -> Tuple[Obstacle, float, float]:
"""
Selects a new target from the given map and calculates the ratio of forward to angular velocity necessary to reach
it from the given robot position
:param test_map: Map from which to choose a target
:param robot: Current robot position in the map
:param directional: Determines whether the next target should be selected in order or randomly
:return: A tuple of (1) the obstacle that is the new target, (2) the desired ratio of forward and angular
velocity and (3) the angle to the target
"""
if directional:
next_obstacle = test_map.get_next_destination()
else:
next_obstacle = test_map.get_random_destination()
dx = next_obstacle.x - robot.x
dy = next_obstacle.y - robot.y
d = hypot(dx, dy)
alpha = atan2(dy, dx) - robot.rot
# Velocity factor needed to traverse twice the angle (since it is an arc, the angle difference at the end will be
# -alpha) in the time it takes the robot to traverse the distance d, based on arc length geometry
factor = 2 * sin(alpha) / d
return next_obstacle, factor, alpha
def move_robot_by(robot: Robot, v: float, yaw_rate: float) -> None:
"""
Moves the given robot to the position it will be after driving for t=1s with given forward and angular velocities
:param robot: Robot to move
:param v: Forward velocity
:param yaw_rate: Angular velocity
"""
turn_radius = abs(v / yaw_rate)
distance = abs(2 * turn_radius * sin(yaw_rate / 2)) # since t=1s, alpha = yaw_rate
angle = robot.rot + yaw_rate / 2
dx = int(round(distance * cos(angle)))
dy = int(round(distance * sin(angle)))
robot.move_by(dx, dy, yaw_rate)
| [
6738,
10688,
1330,
19862,
17034,
11,
379,
272,
17,
11,
8813,
11,
7813,
11,
8615,
11,
31028,
198,
6738,
4738,
1330,
8187,
11,
31986,
1046,
198,
6738,
19720,
1330,
32233,
11,
309,
29291,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198... | 2.183408 | 11,090 |
# Android environment
import time
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from eyes import Eyes
DESIRED_CAPS = dict(
avd='Pixel_4_API_31',
platformName='Android',
deviceName='Pixel_2_API_31',
appPackage='com.globe.gcash.android',
appActivity='gcash.module.splashscreen.mvp.view.SplashScreenActivity',
noReset=True
)
APPIUM_URL = 'http://localhost:4723/wd/hub'
PIN_ELEMENT_ID = 'com.globe.gcash.android:id/mpin'
SERVICES_ITEM_ID = 'com.globe.gcash.android:id/tvItemLabel' | [
2,
5565,
2858,
198,
11748,
640,
198,
6738,
598,
1505,
1330,
3992,
26230,
198,
6738,
598,
1505,
13,
12384,
26230,
13,
11321,
13,
29332,
62,
2673,
1330,
15957,
12502,
198,
6738,
2951,
1330,
18301,
198,
198,
30910,
37819,
62,
33177,
50,
... | 2.620192 | 208 |
#
# PySNMP MIB module CISCO-IF-LOOPBACK-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-IF-LOOPBACK-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:44:15 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint", "ValueSizeConstraint")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
ObjectGroup, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "ModuleCompliance", "NotificationGroup")
TimeTicks, Counter32, Integer32, ModuleIdentity, Counter64, ObjectIdentity, Gauge32, Bits, iso, IpAddress, Unsigned32, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "Counter32", "Integer32", "ModuleIdentity", "Counter64", "ObjectIdentity", "Gauge32", "Bits", "iso", "IpAddress", "Unsigned32", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType")
TextualConvention, DisplayString, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "RowStatus")
ciscoIfLoopbackMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 9399))
ciscoIfLoopbackMIB.setRevisions(('2001-11-15 00:00',))
if mibBuilder.loadTexts: ciscoIfLoopbackMIB.setLastUpdated('200111150000Z')
if mibBuilder.loadTexts: ciscoIfLoopbackMIB.setOrganization('Cisco Systems, Inc.')
ciscoIfLoopbackMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 9399, 1))
ciscoIfLoopbackConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 9399, 1, 1))
cifLConfTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 9399, 1, 1, 1), )
if mibBuilder.loadTexts: cifLConfTable.setStatus('current')
cifLConfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 9399, 1, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: cifLConfEntry.setStatus('current')
cifLLoopback = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 9399, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("farEndLineLoopback", 1), ("farEndPayloadLoopback", 2), ("remoteLineLoopback", 3), ("remotePayloadLoopback", 4), ("localLoopback", 5)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cifLLoopback.setStatus('current')
cifLLoopbackStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 9399, 1, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("completed", 1), ("inProgress", 2), ("clockOutOfSync", 3), ("failed", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cifLLoopbackStatus.setStatus('current')
cifLFELoopbackDeviceAndCode = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 9399, 1, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17))).clone(namedValues=NamedValues(("nonLatchOCUwith1", 1), ("nonLatchOCUwithout1", 2), ("nonLatchCSU", 3), ("nonLatchDSU", 4), ("latchDS0Drop", 5), ("latchDS0Line", 6), ("latchOCU", 7), ("latchCSU", 8), ("latchDSU", 9), ("latchHL96", 10), ("v54PN127Polynomial", 11), ("lineInband", 12), ("lineLoopbackESF", 13), ("payloadLoopbackESF", 14), ("noCode", 15), ("lineLoopbackFEAC", 16), ("smartJackInband", 17)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cifLFELoopbackDeviceAndCode.setStatus('current')
cifLRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 9399, 1, 1, 1, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cifLRowStatus.setStatus('current')
ciscoIfLoopbackMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 9399, 8))
ciscoIfLoopbackMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 9399, 8, 1))
ciscoIfLoopbackMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 9399, 8, 2))
ciscoIfLoopbackMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 9399, 8, 1, 1)).setObjects(("CISCO-IF-LOOPBACK-MIB", "ciscoIfLoopbackGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoIfLoopbackMIBCompliance = ciscoIfLoopbackMIBCompliance.setStatus('current')
ciscoIfLoopbackGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 9399, 8, 2, 1)).setObjects(("CISCO-IF-LOOPBACK-MIB", "cifLLoopback"), ("CISCO-IF-LOOPBACK-MIB", "cifLLoopbackStatus"), ("CISCO-IF-LOOPBACK-MIB", "cifLFELoopbackDeviceAndCode"), ("CISCO-IF-LOOPBACK-MIB", "cifLRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoIfLoopbackGroup = ciscoIfLoopbackGroup.setStatus('current')
mibBuilder.exportSymbols("CISCO-IF-LOOPBACK-MIB", cifLFELoopbackDeviceAndCode=cifLFELoopbackDeviceAndCode, ciscoIfLoopbackMIBGroups=ciscoIfLoopbackMIBGroups, ciscoIfLoopbackMIB=ciscoIfLoopbackMIB, ciscoIfLoopbackMIBObjects=ciscoIfLoopbackMIBObjects, cifLLoopback=cifLLoopback, ciscoIfLoopbackMIBConformance=ciscoIfLoopbackMIBConformance, cifLRowStatus=cifLRowStatus, cifLConfTable=cifLConfTable, ciscoIfLoopbackConfig=ciscoIfLoopbackConfig, ciscoIfLoopbackMIBCompliance=ciscoIfLoopbackMIBCompliance, ciscoIfLoopbackMIBCompliances=ciscoIfLoopbackMIBCompliances, ciscoIfLoopbackGroup=ciscoIfLoopbackGroup, cifLConfEntry=cifLConfEntry, cifLLoopbackStatus=cifLLoopbackStatus, PYSNMP_MODULE_ID=ciscoIfLoopbackMIB)
| [
2,
198,
2,
9485,
15571,
7378,
337,
9865,
8265,
36159,
8220,
12,
5064,
12,
21982,
3185,
31098,
12,
8895,
33,
357,
4023,
1378,
16184,
76,
489,
8937,
13,
785,
14,
79,
893,
11632,
8,
198,
2,
7054,
45,
13,
16,
2723,
2393,
1378,
14,
1... | 2.544415 | 2,274 |
from jsonspec.validators import load
| [
6738,
44804,
684,
43106,
13,
12102,
2024,
1330,
3440,
628
] | 3.8 | 10 |
"""TIL-446 Ny tabell for de dataene som skal arkiveres
Revision ID: 2a2a61dd26dc
Revises: 31c82e4a3348
Create Date: 2014-12-19 12:22:28.513372
"""
# revision identifiers, used by Alembic.
from sqlalchemy.dialects.postgresql import JSON, ENUM
revision = '2a2a61dd26dc'
down_revision = '31c82e4a3348'
from alembic import op
import sqlalchemy as sa
| [
37811,
51,
4146,
12,
27260,
220,
17735,
7400,
695,
329,
390,
1366,
1734,
3870,
1341,
282,
610,
74,
425,
411,
198,
198,
18009,
1166,
4522,
25,
362,
64,
17,
64,
5333,
1860,
2075,
17896,
198,
18009,
2696,
25,
3261,
66,
6469,
68,
19,
... | 2.458333 | 144 |
import ifaddr
localips = []
| [
11748,
611,
29851,
198,
198,
12001,
2419,
796,
17635,
628
] | 3 | 10 |
# Generated by Django 3.0.6 on 2020-07-05 13:15
from django.conf import settings
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
21,
319,
12131,
12,
2998,
12,
2713,
1511,
25,
1314,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 3.1 | 40 |
import tinychain as tc
import unittest
from testutils import start_host
if __name__ == "__main__":
unittest.main()
| [
11748,
7009,
7983,
355,
37096,
198,
11748,
555,
715,
395,
198,
198,
6738,
1332,
26791,
1330,
923,
62,
4774,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3... | 2.840909 | 44 |
# @param A : list of list of integers
# @return the same list modified
| [
220,
220,
220,
1303,
2488,
17143,
317,
1058,
1351,
286,
1351,
286,
37014,
201,
198,
220,
220,
220,
1303,
2488,
7783,
262,
976,
1351,
9518,
201
] | 3.076923 | 26 |