content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
"""The auth service module configures the flask app for the OpenBMS auth service.
The auth service provides an API for managing and authenticating user accounts. Users
may authenticate through a number of supported identity provides using SAML or through a
native OpenBMS account using an email address and password. The authentication service
also maintains user roles and permissions.
The auth service can be run in a development environment with the following command:
$ poetry run python auth_service.py
The auth service can be run in a production environment using gunicorn:
$ poetry run gunicorn auth:app
The auth_service.py script should not be run directly in a production environment due to
security and performance concerns.
"""
import sys
from os import environ
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql import text
from flask_mongoengine import MongoEngine
from auth.api import auth_api_v1
from util.logstash import configure_logstash_handler
# create new flask app
app = Flask(__name__)
"""The WSGI Flask application."""
configure_logstash_handler(app)
# expose the auth API
app.register_blueprint(auth_api_v1)
with app.app_context():
# establish a connection to the database
app.config["SQLALCHEMY_DATABASE_URI"] = environ.get("SQLALCHEMY_DATABASE_URI")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
postgres = SQLAlchemy(app)
"""Provides access to the PostgreSQL database."""
try:
# verify the database connection
postgres.session.query(text("1")).from_statement(text("SELECT 1")).all()
app.logger.info("Connected to the PostgreSQL database.")
except Exception as e:
sys.exit(f"Failed to connect to the PostgreSQL database: {e}")
# establish a connection to the document store
app.config["MONGODB_HOST"] = environ.get("MONGODB_HOST")
mongo = MongoEngine(app)
"""Provides access to the MongoDB database."""
try:
# verify the document store connection
mongo.connection.server_info()
app.logger.info("Connected to the MongoDB database.")
except Exception as e:
sys.exit(f"Failed to connect to the MongoDB database: {e}")
@app.route("/health")
def health_check():
"""Attempt to ping the database and respond with a status code 200.
This endpoint is verify that the server is running and that the database is
accessible.
"""
response = {"service": "OK"}
try:
postgres.session.query(text("1")).from_statement(text("SELECT 1")).all()
response["database"] = "OK"
except Exception as e:
app.logger.error(e)
response["database"] = "ERROR"
try:
mongo.connection.server_info()
response["document_store"] = "OK"
except Exception as e:
app.logger.error(e)
response["document_store"] = "ERROR"
return response
if __name__ == "__main__" and environ.get("FLASK_ENV") == "development":
app.run(host="0.0.0.0", port=5000, debug=True) # nosec
elif __name__ == "__main__":
sys.exit("Development server can only be run in development mode.")
| [
37811,
464,
6284,
2139,
8265,
4566,
942,
262,
42903,
598,
329,
262,
4946,
33,
5653,
6284,
2139,
13,
198,
198,
464,
6284,
2139,
3769,
281,
7824,
329,
11149,
290,
8323,
12364,
2836,
5504,
13,
18987,
198,
11261,
8323,
5344,
832,
257,
127... | 3.041748 | 1,030 |
"""=============================================================================
Download experimental directory.
============================================================================="""
import argparse
import os
# ------------------------------------------------------------------------------
def mkdir(directory):
"""Make directory if it does not exist. Void return.
"""
if not os.path.exists(directory):
os.makedirs(directory)
# ------------------------------------------------------------------------------
def download(directory):
"""Download directory and save locally.
"""
remote = '/scratch/gpfs/gwg3/fe/experiments/%s' % directory
local = '/Users/gwg/fe/experiments/'
mkdir(local)
cmd = 'rsync --progress -r ' \
'gwg3@tigergpu.princeton.edu:%s %s' % (remote, local)
os.system(cmd)
# ------------------------------------------------------------------------------
if __name__ == '__main__':
p = argparse.ArgumentParser()
p.add_argument('--directory', type=str, required=True)
args = p.parse_args()
download(args.directory)
| [
37811,
23926,
25609,
28,
198,
10002,
11992,
8619,
13,
198,
23926,
25609,
2625,
15931,
198,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
198,
2,
16529,
26171,
198,
198,
4299,
33480,
15908,
7,
34945,
2599,
198,
220,
220,
220,
37227,
1... | 3.633117 | 308 |
################################################################################
# User Libs
import test_utils
import test.unittest as unittest
import tablature as tab
# Std Libs
import os
################################################################################
################################################################################
if __name__ == '__main__':
unittest.main() | [
29113,
29113,
14468,
201,
198,
201,
198,
2,
11787,
7980,
82,
201,
198,
11748,
1332,
62,
26791,
201,
198,
11748,
1332,
13,
403,
715,
395,
355,
555,
715,
395,
201,
198,
201,
198,
11748,
36148,
1300,
355,
7400,
201,
198,
201,
198,
2,
... | 4.655556 | 90 |
import test_yaml
import test_new
test_yaml.run_tests()
test_new.run_tests()
| [
11748,
1332,
62,
88,
43695,
198,
11748,
1332,
62,
3605,
198,
198,
9288,
62,
88,
43695,
13,
5143,
62,
41989,
3419,
198,
9288,
62,
3605,
13,
5143,
62,
41989,
3419,
198
] | 2.483871 | 31 |
# Copyright The IETF Trust 2015-2020, All Rights Reserved
import itertools
from django.db import models
class ForeignKey(models.ForeignKey):
"A local ForeignKey proxy which provides the on_delete value required under Django 2.0."
class OneToOneField(models.OneToOneField):
"A local OneToOneField proxy which provides the on_delete value required under Django 2.0."
def object_to_dict(instance):
"""
Similar to django.forms.models.model_to_dict() but more comprehensive.
Taken from https://stackoverflow.com/questions/21925671/#answer-29088221
with a minor tweak: .id --> .pk
"""
opts = instance._meta
data = {}
for f in itertools.chain(opts.concrete_fields, opts.private_fields):
data[f.name] = f.value_from_object(instance)
for f in opts.many_to_many:
data[f.name] = [i.pk for i in f.value_from_object(instance)]
return data
| [
2,
15069,
383,
314,
22274,
9870,
1853,
12,
42334,
11,
1439,
6923,
33876,
198,
198,
11748,
340,
861,
10141,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
4871,
8708,
9218,
7,
27530,
13,
33616,
9218,
2599,
198,
220,
220,... | 2.713864 | 339 |
"""Test config utils."""
# pylint: disable=too-many-public-methods,protected-access
import os
import tempfile
import unittest
import unittest.mock as mock
import pytest
from voluptuous import MultipleInvalid
from homeassistant.core import DOMAIN, HomeAssistantError, Config
import homeassistant.config as config_util
from homeassistant.const import (
CONF_LATITUDE, CONF_LONGITUDE, CONF_TEMPERATURE_UNIT, CONF_NAME,
CONF_TIME_ZONE, CONF_ELEVATION, CONF_CUSTOMIZE, __version__,
TEMP_FAHRENHEIT)
from homeassistant.util import location as location_util, dt as dt_util
from homeassistant.helpers.entity import Entity
from tests.common import (
get_test_config_dir, get_test_home_assistant)
CONFIG_DIR = get_test_config_dir()
YAML_PATH = os.path.join(CONFIG_DIR, config_util.YAML_CONFIG_FILE)
VERSION_PATH = os.path.join(CONFIG_DIR, config_util.VERSION_FILE)
ORIG_TIMEZONE = dt_util.DEFAULT_TIME_ZONE
def create_file(path):
"""Create an empty file."""
with open(path, 'w'):
pass
class TestConfig(unittest.TestCase):
"""Test the configutils."""
def tearDown(self): # pylint: disable=invalid-name
"""Clean up."""
dt_util.DEFAULT_TIME_ZONE = ORIG_TIMEZONE
if os.path.isfile(YAML_PATH):
os.remove(YAML_PATH)
if os.path.isfile(VERSION_PATH):
os.remove(VERSION_PATH)
if hasattr(self, 'hass'):
self.hass.stop()
def test_create_default_config(self):
"""Test creation of default config."""
config_util.create_default_config(CONFIG_DIR, False)
self.assertTrue(os.path.isfile(YAML_PATH))
def test_find_config_file_yaml(self):
"""Test if it finds a YAML config file."""
create_file(YAML_PATH)
self.assertEqual(YAML_PATH, config_util.find_config_file(CONFIG_DIR))
@mock.patch('builtins.print')
def test_ensure_config_exists_creates_config(self, mock_print):
"""Test that calling ensure_config_exists.
If not creates a new config file.
"""
config_util.ensure_config_exists(CONFIG_DIR, False)
self.assertTrue(os.path.isfile(YAML_PATH))
self.assertTrue(mock_print.called)
def test_ensure_config_exists_uses_existing_config(self):
"""Test that calling ensure_config_exists uses existing config."""
create_file(YAML_PATH)
config_util.ensure_config_exists(CONFIG_DIR, False)
with open(YAML_PATH) as f:
content = f.read()
# File created with create_file are empty
self.assertEqual('', content)
def test_load_yaml_config_converts_empty_files_to_dict(self):
"""Test that loading an empty file returns an empty dict."""
create_file(YAML_PATH)
self.assertIsInstance(
config_util.load_yaml_config_file(YAML_PATH), dict)
def test_load_yaml_config_raises_error_if_not_dict(self):
"""Test error raised when YAML file is not a dict."""
with open(YAML_PATH, 'w') as f:
f.write('5')
with self.assertRaises(HomeAssistantError):
config_util.load_yaml_config_file(YAML_PATH)
def test_load_yaml_config_raises_error_if_malformed_yaml(self):
"""Test error raised if invalid YAML."""
with open(YAML_PATH, 'w') as f:
f.write(':')
with self.assertRaises(HomeAssistantError):
config_util.load_yaml_config_file(YAML_PATH)
def test_load_yaml_config_raises_error_if_unsafe_yaml(self):
"""Test error raised if unsafe YAML."""
with open(YAML_PATH, 'w') as f:
f.write('hello: !!python/object/apply:os.system')
with self.assertRaises(HomeAssistantError):
config_util.load_yaml_config_file(YAML_PATH)
def test_load_yaml_config_preserves_key_order(self):
"""Test removal of library."""
with open(YAML_PATH, 'w') as f:
f.write('hello: 0\n')
f.write('world: 1\n')
self.assertEqual(
[('hello', 0), ('world', 1)],
list(config_util.load_yaml_config_file(YAML_PATH).items()))
@mock.patch('homeassistant.util.location.detect_location_info',
return_value=location_util.LocationInfo(
'0.0.0.0', 'US', 'United States', 'CA', 'California',
'San Diego', '92122', 'America/Los_Angeles', 32.8594,
-117.2073, True))
@mock.patch('homeassistant.util.location.elevation', return_value=101)
@mock.patch('builtins.print')
def test_create_default_config_detect_location(self, mock_detect,
mock_elev, mock_print):
"""Test that detect location sets the correct config keys."""
config_util.ensure_config_exists(CONFIG_DIR)
config = config_util.load_yaml_config_file(YAML_PATH)
self.assertIn(DOMAIN, config)
ha_conf = config[DOMAIN]
expected_values = {
CONF_LATITUDE: 32.8594,
CONF_LONGITUDE: -117.2073,
CONF_ELEVATION: 101,
CONF_TEMPERATURE_UNIT: 'F',
CONF_NAME: 'Home',
CONF_TIME_ZONE: 'America/Los_Angeles'
}
assert expected_values == ha_conf
assert mock_print.called
@mock.patch('builtins.print')
def test_create_default_config_returns_none_if_write_error(self,
mock_print):
"""Test the writing of a default configuration.
Non existing folder returns None.
"""
self.assertIsNone(
config_util.create_default_config(
os.path.join(CONFIG_DIR, 'non_existing_dir/'), False))
self.assertTrue(mock_print.called)
def test_entity_customization(self):
"""Test entity customization through configuration."""
self.hass = get_test_home_assistant()
config = {CONF_LATITUDE: 50,
CONF_LONGITUDE: 50,
CONF_NAME: 'Test',
CONF_CUSTOMIZE: {'test.test': {'hidden': True}}}
config_util.process_ha_core_config(self.hass, config)
entity = Entity()
entity.entity_id = 'test.test'
entity.hass = self.hass
entity.update_ha_state()
state = self.hass.states.get('test.test')
assert state.attributes['hidden']
def test_remove_lib_on_upgrade(self):
"""Test removal of library on upgrade."""
with tempfile.TemporaryDirectory() as config_dir:
version_path = os.path.join(config_dir, '.HA_VERSION')
lib_dir = os.path.join(config_dir, 'deps')
check_file = os.path.join(lib_dir, 'check')
with open(version_path, 'wt') as outp:
outp.write('0.7.0')
os.mkdir(lib_dir)
with open(check_file, 'w'):
pass
self.hass = get_test_home_assistant()
self.hass.config.config_dir = config_dir
assert os.path.isfile(check_file)
config_util.process_ha_config_upgrade(self.hass)
assert not os.path.isfile(check_file)
def test_not_remove_lib_if_not_upgrade(self):
"""Test removal of library with no upgrade."""
with tempfile.TemporaryDirectory() as config_dir:
version_path = os.path.join(config_dir, '.HA_VERSION')
lib_dir = os.path.join(config_dir, 'deps')
check_file = os.path.join(lib_dir, 'check')
with open(version_path, 'wt') as outp:
outp.write(__version__)
os.mkdir(lib_dir)
with open(check_file, 'w'):
pass
self.hass = get_test_home_assistant()
self.hass.config.config_dir = config_dir
config_util.process_ha_config_upgrade(self.hass)
assert os.path.isfile(check_file)
def test_loading_configuration(self):
"""Test loading core config onto hass object."""
config = Config()
hass = mock.Mock(config=config)
config_util.process_ha_core_config(hass, {
'latitude': 60,
'longitude': 50,
'elevation': 25,
'name': 'Huis',
'temperature_unit': 'F',
'time_zone': 'America/New_York',
})
assert config.latitude == 60
assert config.longitude == 50
assert config.elevation == 25
assert config.location_name == 'Huis'
assert config.temperature_unit == TEMP_FAHRENHEIT
assert config.time_zone.zone == 'America/New_York'
@mock.patch('homeassistant.util.location.detect_location_info',
return_value=location_util.LocationInfo(
'0.0.0.0', 'US', 'United States', 'CA', 'California',
'San Diego', '92122', 'America/Los_Angeles', 32.8594,
-117.2073, True))
@mock.patch('homeassistant.util.location.elevation', return_value=101)
def test_discovering_configuration(self, mock_detect, mock_elevation):
"""Test auto discovery for missing core configs."""
config = Config()
hass = mock.Mock(config=config)
config_util.process_ha_core_config(hass, {})
assert config.latitude == 32.8594
assert config.longitude == -117.2073
assert config.elevation == 101
assert config.location_name == 'San Diego'
assert config.temperature_unit == TEMP_FAHRENHEIT
assert config.time_zone.zone == 'America/Los_Angeles'
@mock.patch('homeassistant.util.location.detect_location_info',
return_value=None)
@mock.patch('homeassistant.util.location.elevation', return_value=0)
def test_discovering_configuration_auto_detect_fails(self, mock_detect,
mock_elevation):
"""Test config remains unchanged if discovery fails."""
config = Config()
hass = mock.Mock(config=config)
config_util.process_ha_core_config(hass, {})
blankConfig = Config()
assert config.latitude == blankConfig.latitude
assert config.longitude == blankConfig.longitude
assert config.elevation == blankConfig.elevation
assert config.location_name == blankConfig.location_name
assert config.temperature_unit == blankConfig.temperature_unit
assert config.time_zone == blankConfig.time_zone
| [
37811,
14402,
4566,
3384,
4487,
526,
15931,
198,
2,
279,
2645,
600,
25,
15560,
28,
18820,
12,
21834,
12,
11377,
12,
24396,
82,
11,
24326,
12,
15526,
198,
11748,
28686,
198,
11748,
20218,
7753,
198,
11748,
555,
715,
395,
198,
11748,
55... | 2.165768 | 4,820 |
###############################################################################
# Copyright 2011-2014 The University of Texas at Austin #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
###############################################################################
import json
import os
from xml.dom.minidom import getDOMImplementation
from ipf.data import Data, Representation
from ipf.dt import *
from ipf.error import NoMoreInputsError, StepError
from ipf.sysinfo import ResourceName
from ipf.step import Step
from ipf.ipfinfo import IPFInformation, IPFInformationJson, IPFInformationTxt
from .computing_activity import ComputingActivities, ComputingActivityTeraGridXml, ComputingActivityOgfJson
from .computing_manager import ComputingManager, ComputingManagerTeraGridXml, ComputingManagerOgfJson
from .computing_manager_accel_info import ComputingManagerAcceleratorInfo, ComputingManagerAcceleratorInfoOgfJson
from .computing_service import ComputingService, ComputingServiceTeraGridXml, ComputingServiceOgfJson
from .computing_share import ComputingShares, ComputingShareTeraGridXml, ComputingShareOgfJson
from .computing_share_accel_info import ComputingShareAcceleratorInfo, ComputingShareAcceleratorInfoOgfJson
from .execution_environment import ExecutionEnvironments, ExecutionEnvironmentTeraGridXml
from .execution_environment import ExecutionEnvironmentTeraGridXml
from .execution_environment import ExecutionEnvironmentOgfJson
from .accelerator_environment import AcceleratorEnvironments
from .accelerator_environment import AcceleratorEnvironmentsOgfJson
from .accelerator_environment import AcceleratorEnvironment
from .accelerator_environment import AcceleratorEnvironmentOgfJson
from .location import Location, LocationOgfJson, LocationTeraGridXml
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
| [
198,
29113,
29113,
7804,
4242,
21017,
198,
2,
220,
220,
15069,
2813,
12,
4967,
383,
2059,
286,
3936,
379,
9533,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1303,
198,
2,
220,... | 3.588291 | 1,059 |
from .filter import FilterWidget
from .flash import FlashWidget, ShowFlashNotification
from .header import HeaderWidget
from .help import HelpWidget
from .secret_properties import SecretPropertiesWidget
from .secret_versions import SecretVersionsWidget
from .secrets import SecretsWidget
__all__ = (
"SecretsWidget",
"ShowFlashNotification",
"FilterWidget",
"FlashWidget",
"HeaderWidget",
"SecretVersionsWidget",
"SecretPropertiesWidget",
"HelpWidget",
)
| [
6738,
764,
24455,
1330,
25853,
38300,
198,
6738,
764,
34167,
1330,
9973,
38300,
11,
5438,
30670,
3673,
2649,
198,
6738,
764,
25677,
1330,
48900,
38300,
198,
6738,
764,
16794,
1330,
10478,
38300,
198,
6738,
764,
21078,
62,
48310,
1330,
394... | 3.460993 | 141 |
main()
| [
201,
198,
201,
198,
201,
198,
12417,
3419,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198
] | 1.125 | 32 |
import numpy as np
from sklearn.base import clone
from sklearn.ensemble import RandomForestRegressor
from sklearn.utils.validation import check_is_fitted, check_X_y
from pycausal_explorer.base import BaseCausalModel
from ..reweight import PropensityScore
class XLearner(BaseCausalModel):
"""
Implementation of the X-learner.
It consists of estimating heterogeneous treatment effect using four machine learning models.
Details of X-learner theory are available at Kunzel et al. (2018) (https://arxiv.org/abs/1706.03461).
Parameters
----------
learner: base learner to use in all models. Either leaner or (u0, u1, te_u0, te_u1) must be filled
u0: model used to estimate outcome in the control group
u1: model used to estimate outcome in the treatment group
te_u0: model used to estimate treatment effect in the control group
te_u1: model used to estimate treatment effect in the treatment group group
random_state: random state
"""
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
1341,
35720,
13,
8692,
1330,
17271,
198,
6738,
1341,
35720,
13,
1072,
11306,
1330,
14534,
34605,
8081,
44292,
198,
6738,
1341,
35720,
13,
26791,
13,
12102,
341,
1330,
2198,
62,
271,
62,
386... | 3.329966 | 297 |
# coding=utf-8
from collections import OrderedDict
expected = [
OrderedDict(
[
("id", u"par-1"),
(
"source",
OrderedDict(
[
("funderId", u"10.13039/100006978"),
(
"name",
[
u"University of California Berkeley (University of California, Berkeley)"
],
),
]
),
),
("awardId", u"AWS in Education grant"),
(
"recipients",
[
OrderedDict(
[
("type", "person"),
(
"name",
OrderedDict(
[
("preferred", u"Eric Jonas"),
("index", u"Jonas, Eric"),
]
),
),
]
)
],
),
]
),
OrderedDict(
[
("id", u"par-2"),
(
"source",
OrderedDict(
[
("funderId", u"10.13039/100000001"),
("name", [u"National Science Foundation"]),
]
),
),
("awardId", u"NSF CISE Expeditions Award CCF-1139158"),
(
"recipients",
[
{
"type": "person",
"name": {"index": "Jonas, Eric", "preferred": "Eric Jonas"},
}
],
),
]
),
OrderedDict(
[
("id", u"par-3"),
(
"source",
OrderedDict(
[
("funderId", u"10.13039/100006235"),
("name", [u"Lawrence Berkely National Laboratory"]),
]
),
),
("awardId", u"Award 7076018"),
(
"recipients",
[
{
"type": "person",
"name": {"index": "Jonas, Eric", "preferred": "Eric Jonas"},
}
],
),
]
),
OrderedDict(
[
("id", u"par-4"),
(
"source",
OrderedDict(
[
("funderId", u"10.13039/100000185"),
("name", [u"Defense Advanced Research Projects Agency"]),
]
),
),
("awardId", u"XData Award FA8750-12-2-0331"),
(
"recipients",
[
{
"type": "person",
"name": {"index": "Jonas, Eric", "preferred": "Eric Jonas"},
}
],
),
]
),
OrderedDict(
[
("id", u"par-5"),
(
"source",
OrderedDict(
[
("funderId", u"10.13039/100000002"),
("name", [u"National Institutes of Health"]),
]
),
),
("awardId", u"R01NS074044"),
(
"recipients",
[
OrderedDict(
[
("type", "person"),
(
"name",
OrderedDict(
[
("preferred", u"Konrad Kording"),
("index", u"Kording, Konrad"),
]
),
),
]
)
],
),
]
),
OrderedDict(
[
("id", u"par-6"),
(
"source",
OrderedDict(
[
("funderId", u"10.13039/100000002"),
("name", [u"National Institutes of Health"]),
]
),
),
("awardId", u"R01NS063399"),
(
"recipients",
[
OrderedDict(
[
("type", "person"),
(
"name",
OrderedDict(
[
("preferred", u"Konrad Kording"),
("index", u"Kording, Konrad"),
]
),
),
]
)
],
),
]
),
]
| [
2,
19617,
28,
40477,
12,
23,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198,
40319,
796,
685,
198,
220,
220,
220,
14230,
1068,
35,
713,
7,
198,
220,
220,
220,
220,
220,
220,
220,
685,
198,
220,
220,
220,
220,
220,
220,
2... | 1.323837 | 4,107 |
from django.contrib import admin
from apps.web.models import CodeAnnotation
admin.site.register(CodeAnnotation)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
6725,
13,
12384,
13,
27530,
1330,
6127,
2025,
38983,
198,
198,
28482,
13,
15654,
13,
30238,
7,
10669,
2025,
38983,
8,
198
] | 3.454545 | 33 |
from PuppeteerLibrary.ikeywords.imockresponse_async import iMockResponseAsync
from PuppeteerLibrary.base.robotlibcore import keyword
from PuppeteerLibrary.base.librarycomponent import LibraryComponent
| [
6738,
20926,
14471,
263,
23377,
13,
522,
88,
10879,
13,
320,
735,
26209,
62,
292,
13361,
1330,
1312,
44,
735,
31077,
42367,
198,
6738,
20926,
14471,
263,
23377,
13,
8692,
13,
305,
13645,
8019,
7295,
1330,
21179,
198,
6738,
20926,
14471,... | 3.884615 | 52 |
__all__ = ['DataParallel', 'ModelParallel', 'benchmarks', 'dataparallel', 'modelparallel']
from .DataParallel import DataParallel
from .ModelParallel import ModelParallel
import splintr.benchmarks
import splintr.dataparallel
import splintr.modelparallel
| [
834,
439,
834,
796,
37250,
6601,
10044,
29363,
3256,
705,
17633,
10044,
29363,
3256,
705,
26968,
14306,
3256,
705,
19608,
499,
283,
29363,
3256,
705,
19849,
1845,
29363,
20520,
198,
6738,
764,
6601,
10044,
29363,
1330,
6060,
10044,
29363,
... | 3.298701 | 77 |
#!/usr/bin/env/python3
# -*- coding:utf-8 -*-
"""
@project: apiAutoTest
@author: cjw
@file: __init__.py.py
@ide: PyCharm
@time: 2020/7/31
""" | [
2,
48443,
14629,
14,
8800,
14,
24330,
14,
29412,
18,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
37811,
198,
31,
16302,
25,
40391,
27722,
14402,
198,
31,
9800,
25,
269,
73,
86,
198,
31,
7753,
25,
11593,
15003,... | 2.073529 | 68 |
#!/usr/bin/env python
# coding: utf-8
import logging
import argparse
import pydash
from lib.common import USER_EMAIL
from lib.common import API_KEY
from lib.common import API_SECRET
from lib.common import USER_API
from lib.common import TEAM_API
from lib.common import ROLE_API
from lib.common import POLICY_API
from lib.common import APP_API
from lib.common import getToken
from lib.common import booleanString
from lib.purge import getResource
from lib.purge import getResources
from lib.purge import updateResource
from lib.purge import purgeResource
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remove existing user and associated objects')
parser.add_argument('--dryrun', dest='dryrun', type=booleanString, default=True,
required=True, help='In dryrun mode, no objects will be deleted')
parser.add_argument('--debug', dest='debug', type=booleanString, default=False,
required=False, help='Output verbose log')
args = parser.parse_args()
main(vars(args))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
11748,
18931,
198,
11748,
1822,
29572,
198,
198,
11748,
279,
5173,
1077,
198,
198,
6738,
9195,
13,
11321,
1330,
1294,
1137,
62,
27630,
4146,
... | 3.091445 | 339 |
from typing import TypeVar, Generic, List
T = TypeVar('T')
if __name__ == '__main__':
discs: int = 5
tower_a: Stack[int] = Stack()
tower_b: Stack[int] = Stack()
tower_c: Stack[int] = Stack()
for i in range(discs, 0, -1):
tower_a.push(i)
print(tower_a, tower_b, tower_c)
hanoi(tower_a, tower_c, tower_b, discs)
| [
6738,
19720,
1330,
5994,
19852,
11,
42044,
11,
7343,
198,
198,
51,
796,
5994,
19852,
10786,
51,
11537,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
31014,
25,
493,
796,
642,
198,
220,
... | 2.213836 | 159 |
import os.path
from unittest import TestCase
from code.cli import PARAMS_DIR, TESTS_DIR
from code.prepare.base import load_data
from code.prepare.params import load_params
from code.prepare.utils import *
FIXTURE_DATASET = os.path.join(TESTS_DIR, 'fixtures/GER.tsv')
FIXTURE_DATASET_ASJP = os.path.join(TESTS_DIR, 'fixtures/Afrasian.tsv')
| [
11748,
28686,
13,
6978,
198,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
6738,
2438,
13,
44506,
1330,
29463,
40834,
62,
34720,
11,
309,
1546,
4694,
62,
34720,
198,
198,
6738,
2438,
13,
46012,
533,
13,
8692,
1330,
3440,
62,
... | 2.532847 | 137 |
"""
Process time data set
see create_timed_data to generate files with times for all
Extract a single data set around a cone with TimedData
"""
import os, glob, pickle
import healpy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from astropy.time import Time, TimeDelta
from . import binned_data
mission_start = Time('2001-01-01T00:00:00', scale='utc')
class TimeInfo(object):
"""Read in, process a file generated by binned_data.ConvertFT1.time_record
"""
def select(self, l, b, radius=5, nside=1024):
"""create DataFrame with times, band id, distance from center
parameters:
l,b : position in Galactic
radius : cone radius, deg
nside : for healpy
returns:
DataFrame with columns:
band : from input, energy and event type
time : Mission Elapsed Time in s. (double)
delta : distance from input position (deg, float32)
"""
df = self.df
cart = lambda l,b: healpy.dir2vec(l,b, lonlat=True)
# use query_disc to get photons within given radius of position
center = cart(l,b)
ipix = healpy.query_disc(nside, cart(l,b), np.radians(radius), nest=False)
incone = np.isin(self.df.hpindex, ipix)
# times: convert to double, add to start
t = np.array(df.time[incone],float)+self.tstart
# convert position info to just distance from center
ll,bb = healpy.pix2ang(nside, self.df.hpindex[incone], nest=False, lonlat=True)
t2 = np.array(np.sqrt((1.-np.dot(center, cart(ll,bb)))*2), np.float32)
return pd.DataFrame(np.rec.fromarrays(
[df.band[incone], t, np.degrees(t2)], names='band time delta'.split()))
class TimedData(object):
"""Create a data set at a given position
"""
plt.rc('font', size=12)
def __init__(self, position, name='', radius=5,
file_pattern='$FERMI/data/P8_P305/time_info/month_*.pkl'):
"""Set up combined data from set of monthly files
position : l,b in degrees
name : string, optional name to describe source
radius : float, cone radius for selection
file_pattern : string for glob use
"""
assert hasattr(position, '__len__') and len(position)==2, 'expect position to be (l,b)'
files = sorted(glob.glob(os.path.expandvars(file_pattern)))
assert len(files)>0, 'No files found using pattern {}'.format(file_pattern)
self.name = name
gbtotal = np.array([os.stat(filename).st_size for filename in files]).sum()/2**30
print 'Opening {} files, with {} GB total'.format(len(files), gbtotal)
dflist=[]
for filename in files:
dflist.append(TimeInfo(filename).select(*position))
print '.',
self.df = pd.concat(dflist)
print 'Selected {} photons'.format(len(self.df))
def plot_time(self, delta_max=2, delta_t=1, xlim=None):
"""
"""
df = self.df
t = timed_data.MJD(df.time)
ta,tb=t[0],t[-1]
Nbins = int((tb-ta)/float(delta_t))
fig,ax= plt.subplots(figsize=(15,5))
hkw = dict(bins = np.linspace(ta,tb,Nbins), histtype='step')
ax.hist(t, label='E>100 MeV', **hkw)
ax.hist(t[(df.delta<delta_max) & (df.band>0)], label='delta<{} deg'.format(delta_max), **hkw);
ax.set(xlabel=r'$\mathsf{MJD}$', ylabel='counts per {:.0f} day'.format(delta_t))
if xlim is not None: ax.set(xlim=xlim)
ax.legend()
ax.set_title('{} counts vs. time'.format(self.name))
def create_timed_data(
monthly_ft1_files='/afs/slac/g/glast/groups/catalog/P8_P305/zmax105/*.fits',
outfolder='$FERMI/data/P8_P305/time_info/',
overwrite=False,
test=False,
verbose=1):
"""
"""
files=sorted(glob.glob(monthly_ft1_files))
assert len(files)>0, 'No ft1 files found at {}'.format(monthly_ft1_files)
gbtotal = np.array([os.stat(filename).st_size for filename in files]).sum()/2**30
if verbose>0:
print '{} monthly FT1 files found at {}\n\t {} GB total'.format(len(files), monthly_ft1_files, gbtotal)
outfolder = os.path.expandvars(outfolder)
if not os.path.exists(outfolder):
os.makedirs(outfolder)
os.chdir(outfolder)
if verbose>0:
print 'Writing time files to folder {}\n\toverwrite={}'.format(outfolder, overwrite)
for filename in files:
m = filename.split('_')[-2]
outfile = 'month_{}.pkl'.format(m)
if not overwrite and os.path.exists(outfile) :
if verbose>1:
print 'exists: {}'.format(outfile)
else:
print '.',
continue
tr = binned_data.ConvertFT1(filename).time_record()
if not test:
if verbose>1:
print 'writing {}'.format(outfile),
elif verbose>0:
print '+',
pickle.dump(tr, open(outfile, 'wr'))
else:
if verbose>0:
print 'Test: would have written {}'.format(outfile)
# check how many exist
files=sorted(glob.glob(outfolder+'/*.pkl'))
gbtotal = np.array([os.stat(filename).st_size for filename in files]).sum()/float(2**30)
print '\nThere are {} timed data files, {:.1f} GB total'.format(len(files), gbtotal)
| [
37811,
198,
18709,
640,
1366,
900,
198,
3826,
2251,
62,
16514,
276,
62,
7890,
284,
7716,
3696,
351,
1661,
329,
477,
220,
198,
11627,
974,
257,
2060,
1366,
900,
1088,
257,
27763,
351,
5045,
276,
6601,
198,
37811,
198,
198,
11748,
28686... | 2.158686 | 2,527 |
from discord.ext import commands
import os
from decouple import config
bot = commands.Bot("!")
load_cogs(bot)
TOKEN = config("TOKEN")
bot.run(TOKEN)
| [
6738,
36446,
13,
2302,
1330,
9729,
198,
11748,
28686,
198,
6738,
875,
43846,
1330,
4566,
198,
198,
13645,
796,
9729,
13,
20630,
7203,
2474,
8,
628,
198,
198,
2220,
62,
66,
18463,
7,
13645,
8,
198,
198,
10468,
43959,
796,
4566,
7203,
... | 2.851852 | 54 |
from .grid import Grid
from .random import Random
from .quasirandom import QuasiRandom
| [
6738,
764,
25928,
1330,
24846,
198,
6738,
764,
25120,
1330,
14534,
198,
6738,
764,
421,
292,
343,
3749,
1330,
2264,
17053,
29531,
198
] | 3.782609 | 23 |
from enum import Enum
from .core.vector2 import Vector2
screen_size = width, height = 1040, 480
map_size = Vector2(x=10000, y=1000)
gravity = 1.5
| [
6738,
33829,
1330,
2039,
388,
198,
6738,
764,
7295,
13,
31364,
17,
1330,
20650,
17,
198,
198,
9612,
62,
7857,
796,
9647,
11,
6001,
796,
838,
1821,
11,
23487,
198,
8899,
62,
7857,
796,
20650,
17,
7,
87,
28,
49388,
11,
331,
28,
1282... | 2.846154 | 52 |
# -*- coding: utf-8 -*-
# Search API docs: https://developers.google.com/youtube/v3/docs/search/list
# Search API Python docs: https://developers.google.com/resources/api-libraries/documentation/youtube/v3/python/latest/youtube_v3.search.html
# Examples: https://github.com/youtube/api-samples/tree/master/python
import argparse
import inspect
import math
import os
from pprint import pprint
import sys
try:
#python2
from urllib import urlencode
except ImportError:
#python3
from urllib.parse import urlencode
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
# add parent directory to sys path to import relative modules
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
parentdir = os.path.dirname(parentdir)
sys.path.insert(0,parentdir)
from lib.collection_utils import *
from lib.io_utils import *
from lib.math_utils import *
# input
parser = argparse.ArgumentParser()
parser.add_argument('-key', dest="API_KEY", default="", help="Your API Key. See: https://google-developers.appspot.com/youtube/v3/getting-started")
parser.add_argument('-query', dest="QUERY", default=" location=40.903125,-73.85062&locationRadius=10km&videoLicense=creativeCommon", help="Search query parameters as a query string")
parser.add_argument('-in', dest="INPUT_FILE", default="", help="Input .csv file containing one or more queries; will override individual query")
parser.add_argument('-sort', dest="SORT_BY", default="", help="Sort by string")
parser.add_argument('-lim', dest="LIMIT", default=100, type=int, help="Limit results")
parser.add_argument('-out', dest="OUTPUT_FILE", default="tmp/yt-search/%s.json", help="JSON output file pattern")
parser.add_argument('-verbose', dest="VERBOSE", action="store_true", help="Display search result details")
a = parser.parse_args()
aa = vars(a)
makeDirectories([a.OUTPUT_FILE])
aa["QUERY"] = a.QUERY.strip()
MAX_YT_RESULTS_PER_PAGE = 50
if len(a.API_KEY) <= 0:
print("You must pass in your developer API key. See more at https://google-developers.appspot.com/youtube/v3/getting-started")
sys.exit()
if len(a.QUERY) <= 0:
print("Please pass in a query.")
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, developerKey=a.API_KEY)
queries = []
if len(a.INPUT_FILE) > 0:
queryKeys, queries = readCsv(a.INPUT_FILE, doParseNumbers=False)
else:
queries = [parseQueryString(a.QUERY)]
queryCount = len(queries)
for i, q in enumerate(queries):
ytQuery = q.copy()
ytQuery["part"] = "id,snippet"
ytQuery["type"] = "video" # Always get videos back
ytQuery["videoDimension"] = "2d" # exclude 3d videos
if len(a.SORT_BY) > 0:
ytQuery["order"] = a.SORT_BY
pages = 1
if a.LIMIT > 0:
pages = ceilInt(1.0 * a.LIMIT / MAX_YT_RESULTS_PER_PAGE)
ytQuery["maxResults"] = min(a.LIMIT, MAX_YT_RESULTS_PER_PAGE)
print("Query %s of %s: %s" % (i+1, queryCount, urlencode(ytQuery)))
for page in range(pages):
print("- Page %s..." % (page+1))
# Make one query to retrieve ids
try:
search_response = youtube.search().list(**ytQuery).execute()
except HttpError as e:
print('An HTTP error %d occurred:\n%s' % (e.resp.status, e.content))
sys.exit()
nextPageToken = search_response.get('nextPageToken', "")
# pprint(search_response.get('items', []))
# sys.exit()
ids = []
for r in search_response.get('items', []):
ids.append(r['id']['videoId'])
print("-- %s results found." % (len(ids)))
missingIds = []
for id in ids:
outfile = a.OUTPUT_FILE % id
if not os.path.isfile(outfile):
missingIds.append(id)
if len(missingIds) > 0:
print("-- Getting details for %s videos..." % (len(missingIds)))
# Make another query to retrieve stats
idString = ",".join(ids)
try:
search_response = youtube.videos().list(id=idString, part="id,statistics,snippet").execute()
except HttpError as e:
print('An HTTP error %d occurred:\n%s' % (e.resp.status, e.content))
sys.exit()
if a.VERBOSE:
print("-----\nResults: ")
for r in search_response.get('items', []):
outfile = a.OUTPUT_FILE % r['id']
writeJSON(outfile, r, verbose=a.VERBOSE)
# pprint(r['id'])
# pprint(r['statistics'])
# pprint(r['snippet'])
if a.VERBOSE:
print("%s: %s (%s views)" % (r['id'], r['snippet']['title'], r['statistics']['viewCount']))
if a.VERBOSE:
print("-----")
# Retrieve the next page
if len(nextPageToken) < 1:
break
ytQuery["pageToken"] = nextPageToken
print("Done.")
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
11140,
7824,
34165,
25,
3740,
1378,
16244,
364,
13,
13297,
13,
785,
14,
11604,
14,
85,
18,
14,
31628,
14,
12947,
14,
4868,
198,
2,
11140,
7824,
11361,
34165,
... | 2.350861 | 2,149 |
import logging
from time import time
from typing import Tuple, Optional
from bubuku.broker import BrokerManager
from bubuku.communicate import sleep_and_operate
from bubuku.env_provider import EnvProvider
from bubuku.zookeeper import BukuExhibitor
_LOG = logging.getLogger('bubuku.controller')
#
# Returns a flag indicating if the change should continue running (True).
# In that case time_till_next_run() is called to determine when to schedule the next run.
#
| [
11748,
18931,
198,
6738,
640,
1330,
640,
198,
6738,
19720,
1330,
309,
29291,
11,
32233,
198,
198,
6738,
10015,
33263,
13,
7957,
6122,
1330,
2806,
6122,
13511,
198,
6738,
10015,
33263,
13,
10709,
5344,
1330,
3993,
62,
392,
62,
3575,
378,... | 3.391608 | 143 |
def integer(roman):
"""
Function to convert a roman numeral to integer.
:type roman: str
:rtype: int
"""
# Initialize a dictionary of symbol and values
symbol_value = {
'M': 1000,
'D': 500,
'C': 100,
'L': 50,
'X': 10,
'V': 5,
'I': 1
}
second_last_index = len(roman) - 1
result = 0
# Now traverse the roman string from index 0 to the second last index.
# Compare value of the present symbol with the value of the next symbol.
# If the present value is smaller than the next value, reduce the
# present value from the result. Else add it with the result.
for i in range(second_last_index):
present_value = symbol_value[roman[i]]
next_value = symbol_value[roman[i+1]]
if present_value < next_value:
result -= present_value
else:
result += present_value
# At last, add the value of the last symbol.
result += symbol_value[roman[-1]]
return result
if __name__ == '__main__':
test_set = [
('XLV', 45),
('MMMMMCMXCV', 5995),
('XCV', 95),
('DCCC', 800),
('CDLXXXII', 482),
]
for roman, output in test_set:
assert output == integer(roman)
print('Test Passed.')
| [
4299,
18253,
7,
47119,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
15553,
284,
10385,
257,
374,
5185,
997,
1691,
284,
18253,
13,
628,
220,
220,
220,
1058,
4906,
374,
5185,
25,
965,
198,
220,
220,
220,
1058,
81,
4906,
25,
... | 2.33452 | 562 |
from fishbase import logger
class PluginsManagerStatic(object):
"""
1. 现阶段插件是用来进行请求或者响应参数的处理
2. 暂时规定插件必须实现 run 方法
3. 使用实例:
pm = PluginsManager()
pm.run_plugin('demo.demo_md5',
{'sign_type':'md5','data_sign_params':'param1, param2'}, {'param1':'1','param2':'2','param3':'3'})
"""
| [
6738,
5916,
8692,
1330,
49706,
628,
198,
4871,
22689,
1040,
13511,
45442,
7,
15252,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
352,
13,
13328,
236,
108,
165,
246,
35050,
106,
113,
162,
237,
240,
20015,
114,
42468,
18796,
101... | 1.484018 | 219 |
import unittest
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419,
198
] | 2.392857 | 28 |
import sklearn.utils.sparsefuncs as sf
from . import q, ut, pd, sp, np, warnings, sc
from .utils import to_vo, to_vn, substr, df_to_dict, sparse_knn, prepend_var_prefix
from samalg import SAM
from scipy.stats import rankdata
def GOEA(target_genes,GENE_SETS,df_key='GO',goterms=None,fdr_thresh=0.25,p_thresh=1e-3):
"""Performs GO term Enrichment Analysis using the hypergeometric distribution.
Parameters
----------
target_genes - array-like
List of target genes from which to find enriched GO terms.
GENE_SETS - dictionary or pandas.DataFrame
Dictionary where the keys are GO terms and the values are lists of genes associated with each GO term.
Ex: {'GO:0000001': ['GENE_A','GENE_B'],
'GO:0000002': ['GENE_A','GENE_C','GENE_D']}
Make sure to include all available genes that have GO terms in your dataset.
---OR---
Pandas DataFrame with genes as the index and GO terms values.
Ex: 'GENE_A','GO:0000001',
'GENE_A','GO:0000002',
'GENE_B','GO:0000001',
'GENE_B','GO:0000004',
...
If `GENE_SETS` is a pandas DataFrame, the `df_key` parameter should be the name of the column in which
the GO terms are stored.
df_key - str, optional, default 'GO'
The name of the column in which GO terms are stored. Only used if `GENE_SETS` is a DataFrame.
goterms - array-list, optional, default None
If provided, only these GO terms will be tested.
fdr_thresh - float, optional, default 0.25
Filter out GO terms with FDR q value greater than this threshold.
p_thresh - float, optional, default 1e-3
Filter out GO terms with p value greater than this threshold.
Returns:
-------
enriched_goterms - pandas.DataFrame
A Pandas DataFrame of enriched GO terms with FDR q values, p values, and associated genes provided.
"""
# identify all genes found in `GENE_SETS`
if isinstance(GENE_SETS,pd.DataFrame):
print('Converting DataFrame into dictionary')
genes = np.array(list(GENE_SETS.index))
agt = np.array(list(GENE_SETS[df_key].values))
idx = np.argsort(agt)
genes = genes[idx]
agt = agt[idx]
bounds = np.where(agt[:-1]!=agt[1:])[0]+1
bounds = np.append(np.append(0,bounds),agt.size)
bounds_left=bounds[:-1]
bounds_right=bounds[1:]
genes_lists = [genes[bounds_left[i]:bounds_right[i]] for i in range(bounds_left.size)]
GENE_SETS = dict(zip(np.unique(agt),genes_lists))
all_genes = np.unique(np.concatenate(list(GENE_SETS.values())))
all_genes = np.array(all_genes)
# if goterms is None, use all the goterms found in `GENE_SETS`
if goterms is None:
goterms = np.unique(list(GENE_SETS.keys()))
else:
goterms = goterms[np.in1d(goterms,np.unique(list(GENE_SETS.keys())))]
# ensure that target genes are all present in `all_genes`
_,ix = np.unique(target_genes,return_index=True)
target_genes=target_genes[np.sort(ix)]
target_genes = target_genes[np.in1d(target_genes,all_genes)]
# N -- total number of genes
N = all_genes.size
probs=[]
probs_genes=[]
counter=0
# for each go term,
for goterm in goterms:
if counter%1000==0:
pass; #print(counter)
counter+=1
# identify genes associated with this go term
gene_set = np.array(GENE_SETS[goterm])
# B -- number of genes associated with this go term
B = gene_set.size
# b -- number of genes in target associated with this go term
gene_set_in_target = gene_set[np.in1d(gene_set,target_genes)]
b = gene_set_in_target.size
if b != 0:
# calculate the enrichment probability as the cumulative sum of the tail end of a hypergeometric distribution
# with parameters (N,B,n,b)
n = target_genes.size
num_iter = min(n,B)
rng = np.arange(b,num_iter+1)
probs.append(sum([np.exp(_log_binomial(n,i)+_log_binomial(N-n,B-i) - _log_binomial(N,B)) for i in rng]))
else:
probs.append(1.0)
#append associated genes to a list
probs_genes.append(gene_set_in_target)
probs = np.array(probs)
probs_genes = np.array([';'.join(x) for x in probs_genes])
# adjust p value to correct for multiple testing
fdr_q_probs = probs.size*probs / rankdata(probs,method='ordinal')
# filter out go terms based on the FDR q value and p value thresholds
filt = np.logical_and(fdr_q_probs<fdr_thresh,probs<p_thresh)
enriched_goterms = goterms[filt]
p_values = probs[filt]
fdr_q_probs = fdr_q_probs[filt]
probs_genes=probs_genes[filt]
# construct the Pandas DataFrame
gns = probs_genes
enriched_goterms = pd.DataFrame(data=fdr_q_probs,index=enriched_goterms,columns=['fdr_q_value'])
enriched_goterms['p_value'] = p_values
enriched_goterms['genes'] = gns
# sort in ascending order by the p value
enriched_goterms = enriched_goterms.sort_values('p_value')
return enriched_goterms
_KOG_TABLE = dict(A = "RNA processing and modification",
B = "Chromatin structure and dynamics",
C = "Energy production and conversion",
D = "Cell cycle control, cell division, chromosome partitioning",
E = "Amino acid transport and metabolism",
F = "Nucleotide transport and metabolism",
G = "Carbohydrate transport and metabolism",
H = "Coenzyme transport and metabolism",
I = "Lipid transport and metabolism",
J = "Translation, ribosomal structure and biogenesis",
K = "Transcription",
L = "Replication, recombination, and repair",
M = "Cell wall membrane/envelope biogenesis",
N = "Cell motility",
O = "Post-translational modification, protein turnover, chaperones",
P = "Inorganic ion transport and metabolism",
Q = "Secondary metabolites biosynthesis, transport and catabolism",
R = "General function prediction only",
S = "Function unknown",
T = "Signal transduction mechanisms",
U = "Intracellular trafficking, secretion, and vesicular transport",
V = "Defense mechanisms",
W = "Extracellular structures",
Y = "Nuclear structure",
Z = "Cytoskeleton")
import gc
from collections.abc import Iterable
def sankey_plot(M,species_order=None,align_thr=0.1,**params):
"""Generate a sankey plot
Parameters
----------
M: pandas.DataFrame
Mapping table output from `get_mapping_scores` (second output).
align_thr: float, optional, default 0.1
The alignment score threshold below which to remove cell type mappings.
species_order: list, optional, default None
Specify the order of species (left-to-right) in the sankey plot.
For example, `species_order=['hu','le','ms']`.
Keyword arguments
-----------------
Keyword arguments will be passed to `sankey.opts`.
"""
if species_order is not None:
ids = np.array(species_order)
else:
ids = np.unique([x.split('_')[0] for x in M.index])
if len(ids)>2:
d = M.values.copy()
d[d<align_thr]=0
x,y = d.nonzero()
x,y = np.unique(np.sort(np.vstack((x,y)).T,axis=1),axis=0).T
values = d[x,y]
nodes = q(M.index)
node_pairs = nodes[np.vstack((x,y)).T]
sn1 = q([xi.split('_')[0] for xi in node_pairs[:,0]])
sn2 = q([xi.split('_')[0] for xi in node_pairs[:,1]])
filt = np.logical_or(
np.logical_or(np.logical_and(sn1==ids[0],sn2==ids[1]),np.logical_and(sn1==ids[1],sn2==ids[0])),
np.logical_or(np.logical_and(sn1==ids[1],sn2==ids[2]),np.logical_and(sn1==ids[2],sn2==ids[1]))
)
x,y,values=x[filt],y[filt],values[filt]
d=dict(zip(ids,list(np.arange(len(ids)))))
depth_map = dict(zip(nodes,[d[xi.split('_')[0]] for xi in nodes]))
data = nodes[np.vstack((x,y))].T
for i in range(data.shape[0]):
if d[data[i,0].split('_')[0]] > d[data[i,1].split('_')[0]]:
data[i,:]=data[i,::-1]
R = pd.DataFrame(data = data,columns=['source','target'])
R['Value'] = values
else:
d = M.values.copy()
d[d<align_thr]=0
x,y = d.nonzero()
x,y = np.unique(np.sort(np.vstack((x,y)).T,axis=1),axis=0).T
values = d[x,y]
nodes = q(M.index)
R = pd.DataFrame(data = nodes[np.vstack((x,y))].T,columns=['source','target'])
R['Value'] = values
depth_map=None
try:
from holoviews import dim
#from bokeh.models import Label
import holoviews as hv
hv.extension('bokeh',logo=False)
hv.output(size=100)
except:
raise ImportError('Please install holoviews-samap with `!pip install holoviews-samap`.')
sankey1 = hv.Sankey(R, kdims=["source", "target"])#, vdims=["Value"])
cmap = params.get('cmap','Colorblind')
label_position = params.get('label_position','outer')
edge_line_width = params.get('edge_line_width',0)
show_values = params.get('show_values',False)
node_padding = params.get('node_padding',4)
node_alpha = params.get('node_alpha',1.0)
node_width = params.get('node_width',40)
node_sort = params.get('node_sort',True)
frame_height = params.get('frame_height',1000)
frame_width = params.get('frame_width',800)
bgcolor = params.get('bgcolor','snow')
apply_ranges = params.get('apply_ranges',True)
sankey1.opts(cmap=cmap,label_position=label_position, edge_line_width=edge_line_width, show_values=show_values,
node_padding=node_padding,depth_map=depth_map, node_alpha=node_alpha, node_width=node_width,
node_sort=node_sort,frame_height=frame_height,frame_width=frame_width,bgcolor=bgcolor,
apply_ranges=apply_ranges,hooks=[f])
return sankey1
def chord_plot(A,align_thr=0.1):
"""Generate a chord plot
Parameters
----------
A: pandas.DataFrame
Mapping table output from `get_mapping_scores` (second output).
align_thr: float, optional, default 0.1
The alignment score threshold below which to remove cell type mappings.
"""
try:
from holoviews import dim, opts
import holoviews as hv
hv.extension('bokeh',logo=False)
hv.output(size=300)
except:
raise ImportError('Please install holoviews-samap with `!pip install holoviews-samap`.')
xx=A.values.copy()
xx[xx<align_thr]=0
x,y = xx.nonzero()
z=xx[x,y]
x,y = A.index[x],A.columns[y]
links=pd.DataFrame(data=np.array([x,y,z]).T,columns=['source','target','value'])
links['edge_grp'] = [x.split('_')[0]+y.split('_')[0] for x,y in zip(links['source'],links['target'])]
links['value']*=100
f = links['value'].values
z=((f-f.min())/(f.max()-f.min())*0.99+0.01)*100
links['value']=z
links['value']=np.round([x for x in links['value'].values]).astype('int')
clu=np.unique(A.index)
clu = clu[np.in1d(clu,np.unique(np.array([x,y])))]
links = hv.Dataset(links)
nodes = hv.Dataset(pd.DataFrame(data=np.array([clu,clu,np.array([x.split('_')[0] for x in clu])]).T,columns=['index','name','group']),'index')
chord = hv.Chord((links, nodes),kdims=["source", "target"], vdims=["value","edge_grp"])#.select(value=(5, None))
chord.opts(
opts.Chord(cmap='Category20', edge_cmap='Category20',edge_color=dim('edge_grp'),
labels='name', node_color=dim('group').str()))
return chord
def find_cluster_markers(sam, key, inplace=True):
""" Finds differentially expressed genes for provided cell type labels.
Parameters
----------
sam - SAM object
key - str
Column in `sam.adata.obs` for which to identifying differentially expressed genes.
inplace - bool, optional, default True
If True, deposits enrichment scores in `sam.adata.varm[f'{key}_scores']`
and p-values in `sam.adata.varm[f'{key}_pvals']`.
Otherwise, returns three pandas.DataFrame objects (genes x clusters).
NAMES - the gene names
PVALS - the p-values
SCORES - the enrichment scores
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
a,c = np.unique(q(sam.adata.obs[key]),return_counts=True)
t = a[c==1]
adata = sam.adata[np.in1d(q(sam.adata.obs[key]),a[c==1],invert=True)].copy()
sc.tl.rank_genes_groups(
adata,
key,
method="wilcoxon",
n_genes=sam.adata.shape[1],
use_raw=False,
layer=None,
)
sam.adata.uns['rank_genes_groups'] = adata.uns['rank_genes_groups']
NAMES = pd.DataFrame(sam.adata.uns["rank_genes_groups"]["names"])
PVALS = pd.DataFrame(sam.adata.uns["rank_genes_groups"]["pvals"])
SCORES = pd.DataFrame(sam.adata.uns["rank_genes_groups"]["scores"])
if not inplace:
return NAMES, PVALS, SCORES
dfs1 = []
dfs2 = []
for i in range(SCORES.shape[1]):
names = NAMES.iloc[:, i]
scores = SCORES.iloc[:, i]
pvals = PVALS.iloc[:, i]
pvals[scores < 0] = 1.0
scores[scores < 0] = 0
pvals = q(pvals)
scores = q(scores)
dfs1.append(pd.DataFrame(
data=scores[None, :], index = [SCORES.columns[i]], columns=names
)[sam.adata.var_names].T)
dfs2.append(pd.DataFrame(
data=pvals[None, :], index = [SCORES.columns[i]], columns=names
)[sam.adata.var_names].T)
df1 = pd.concat(dfs1,axis=1)
df2 = pd.concat(dfs2,axis=1)
try:
sam.adata.varm[key+'_scores'] = df1
sam.adata.varm[key+'_pvals'] = df2
except:
sam.adata.varm.dim_names = sam.adata.var_names
sam.adata.varm.dim_names = sam.adata.var_names
sam.adata.varm[key+'_scores'] = df1
sam.adata.varm[key+'_pvals'] = df2
for i in range(t.size):
sam.adata.varm[key+'_scores'][t[i]]=0
sam.adata.varm[key+'_pvals'][t[i]]=1
def ParalogSubstitutions(sm, ortholog_pairs, paralog_pairs=None, psub_thr = 0.3):
"""Identify paralog substitutions.
For all genes in `ortholog_pairs` and `paralog_pairs`, this function expects the genes to
be prepended with their corresponding species IDs.
Parameters
----------
sm - SAMAP object
ortholog_pairs - n x 2 numpy array of ortholog pairs
paralog_pairs - n x 2 numpy array of paralog pairs, optional, default None
If None, assumes every pair in the homology graph that is not an ortholog is a paralog.
Note that this would essentially result in the more generic 'homolog substitutions' rather
than paralog substitutions.
The paralogs can be either cross-species, within-species, or a mix of both.
psub_thr - float, optional, default 0.3
Threshold for correlation difference between paralog pairs and ortholog pairs.
Paralog pairs that do not have greater than `psub_thr` correlation than their
corresponding ortholog pairs are filtered out.
Returns
-------
RES - pandas.DataFrame
A table of paralog substitutions.
"""
if paralog_pairs is not None:
ids1 = np.array([x.split('_')[0] for x in paralog_pairs[:,0]])
ids2 = np.array([x.split('_')[0] for x in paralog_pairs[:,1]])
ix = np.where(ids1==ids2)[0]
ixnot = np.where(ids1!=ids2)[0]
if ix.size > 0:
pps = paralog_pairs[ix]
ZZ1 = {}
ZZ2 = {}
for i in range(pps.shape[0]):
L = ZZ1.get(pps[i,0],[])
L.append(pps[i,1])
ZZ1[pps[i,0]]=L
L = ZZ2.get(pps[i,1],[])
L.append(pps[i,0])
ZZ2[pps[i,1]]=L
keys = list(ZZ1.keys())
for k in keys:
L = ZZ2.get(k,[])
L.extend(ZZ1[k])
ZZ2[k] = list(np.unique(L))
ZZ = ZZ2
L1=[]
L2=[]
for i in range(ortholog_pairs.shape[0]):
try:
x = ZZ[ortholog_pairs[i,0]]
except:
x = []
L1.extend([ortholog_pairs[i,1]]*len(x))
L2.extend(x)
try:
x = ZZ[ortholog_pairs[i,1]]
except:
x = []
L1.extend([ortholog_pairs[i,0]]*len(x))
L2.extend(x)
L = np.vstack((L2,L1)).T
pps = np.unique(np.sort(L,axis=1),axis=0)
paralog_pairs = np.unique(np.sort(np.vstack((pps,paralog_pairs[ixnot])),axis=1),axis=0)
smp = sm.samap
gnnm = smp.adata.varp["homology_graph_reweighted"]
gn = q(smp.adata.var_names)
ortholog_pairs = np.sort(ortholog_pairs,axis=1)
ortholog_pairs = ortholog_pairs[np.logical_and(np.in1d(ortholog_pairs[:,0],gn),np.in1d(ortholog_pairs[:,1],gn))]
if paralog_pairs is None:
paralog_pairs = gn[np.vstack(smp.adata.varp["homology_graph"].nonzero()).T]
else:
paralog_pairs = paralog_pairs[np.logical_and(np.in1d(paralog_pairs[:,0],gn),np.in1d(paralog_pairs[:,1],gn))]
paralog_pairs = np.sort(paralog_pairs,axis=1)
paralog_pairs = paralog_pairs[
np.in1d(to_vn(paralog_pairs), np.append(to_vn(ortholog_pairs),to_vn(ortholog_pairs[:,::-1])), invert=True)
]
A = pd.DataFrame(data=np.arange(gn.size)[None, :], columns=gn)
xp, yp = (
A[paralog_pairs[:, 0]].values.flatten(),
A[paralog_pairs[:, 1]].values.flatten(),
)
xp, yp = np.unique(
np.vstack((np.vstack((xp, yp)).T, np.vstack((yp, xp)).T)), axis=0
).T
xo, yo = (
A[ortholog_pairs[:, 0]].values.flatten(),
A[ortholog_pairs[:, 1]].values.flatten(),
)
xo, yo = np.unique(
np.vstack((np.vstack((xo, yo)).T, np.vstack((yo, xo)).T)), axis=0
).T
A = pd.DataFrame(data=np.vstack((xp, yp)).T, columns=["x", "y"])
pairdict = df_to_dict(A, key_key="x", val_key="y")
Xp = []
Yp = []
Xo = []
Yo = []
for i in range(xo.size):
try:
y = pairdict[xo[i]]
except KeyError:
y = np.array([])
Yp.extend(y)
Xp.extend([xo[i]] * y.size)
Xo.extend([xo[i]] * y.size)
Yo.extend([yo[i]] * y.size)
orths = to_vn(gn[np.vstack((np.array(Xo), np.array(Yo))).T])
paras = to_vn(gn[np.vstack((np.array(Xp), np.array(Yp))).T])
orth_corrs = gnnm[Xo, Yo].A.flatten()
par_corrs = gnnm[Xp, Yp].A.flatten()
diff_corrs = par_corrs - orth_corrs
RES = pd.DataFrame(
data=np.vstack((orths, paras)).T, columns=["ortholog pairs", "paralog pairs"]
)
RES["ortholog corrs"] = orth_corrs
RES["paralog corrs"] = par_corrs
RES["corr diff"] = diff_corrs
RES = RES.sort_values("corr diff", ascending=False)
RES = RES[RES["corr diff"] > psub_thr]
orths = RES['ortholog pairs'].values.flatten()
paras = RES['paralog pairs'].values.flatten()
orthssp = np.vstack([np.array([x.split('_')[0] for x in xx]) for xx in to_vo(orths)])
parassp = np.vstack([np.array([x.split('_')[0] for x in xx]) for xx in to_vo(paras)])
filt=[]
for i in range(orthssp.shape[0]):
filt.append(np.in1d(orthssp[i],parassp[i]).mean()==1.0)
filt=np.array(filt)
return RES[filt]
def convert_eggnog_to_homologs(sm, EGGs, og_key = 'eggNOG_OGs', taxon=2759):
"""Gets an n x 2 array of homologs at some taxonomic level based on Eggnog results.
Parameters
----------
smp: SAMAP object
EGGs: dict of pandas.DataFrame, Eggnog output tables keyed by species IDs
og_key: str, optional, default 'eggNOG_OGs'
The column name of the orthology group mapping results in the Eggnog output tables.
taxon: int, optional, default 2759
Taxonomic ID corresponding to the level at which genes with overlapping orthology groups
will be considered homologs. Defaults to the Eukaryotic level.
Returns
-------
homolog_pairs: n x 2 numpy array of homolog pairs.
"""
smp = sm.samap
taxon = str(taxon)
EGGs = dict(zip(list(EGGs.keys()),list(EGGs.values()))) #copying
for k in EGGs.keys():
EGGs[k] = EGGs[k].copy()
Es=[]
for k in EGGs.keys():
A=EGGs[k]
A.index=k+"_"+A.index
Es.append(A)
A = pd.concat(Es, axis=0)
gn = q(smp.adata.var_names)
A = A[np.in1d(q(A.index), gn)]
orthology_groups = A[og_key]
og = q(orthology_groups)
x = np.unique(",".join(og).split(","))
D = pd.DataFrame(data=np.arange(x.size)[None, :], columns=x)
for i in range(og.size):
n = orthology_groups[i].split(",")
taxa = substr(substr(n, "@", 1),'|',0)
if (taxa == "2759").sum() > 1 and taxon == '2759':
og[i] = ""
else:
og[i] = "".join(np.array(n)[taxa == taxon])
A[og_key] = og
og = q(A[og_key].reindex(gn))
og[og == "nan"] = ""
X = []
Y = []
for i in range(og.size):
x = og[i]
if x != "":
X.extend(D[x].values.flatten())
Y.extend([i])
X = np.array(X)
Y = np.array(Y)
B = sp.sparse.lil_matrix((og.size, D.size))
B[Y, X] = 1
B = B.tocsr()
B = B.dot(B.T)
B.data[:] = 1
pairs = gn[np.vstack((B.nonzero())).T]
pairssp = np.vstack([q([x.split('_')[0] for x in xx]) for xx in pairs])
return np.unique(np.sort(pairs[pairssp[:,0]!=pairssp[:,1]],axis=1),axis=0)
def CellTypeTriangles(sm,keys, align_thr=0.1):
"""Outputs a table of cell type triangles.
Parameters
----------
sm: SAMAP object - assumed to contain at least three species.
keys: dictionary of annotation keys (`.adata.obs[key]`) keyed by species.
align_thr: float, optional, default, 0.1
Only keep triangles with minimum `align_thr` alignment score.
"""
D,A = get_mapping_scores(sm,keys=keys)
x,y = A.values.nonzero()
all_pairsf = np.array([A.index[x],A.columns[y]]).T.astype('str')
alignmentf = A.values[x,y].flatten()
alignment = alignmentf.copy()
all_pairs = all_pairsf.copy()
all_pairs = all_pairs[alignment > align_thr]
alignment = alignment[alignment > align_thr]
all_pairs = to_vn(np.sort(all_pairs, axis=1))
x, y = substr(all_pairs, ";")
ctu = np.unique(np.concatenate((x, y)))
Z = pd.DataFrame(data=np.arange(ctu.size)[None, :], columns=ctu)
nnm = sp.sparse.lil_matrix((ctu.size,) * 2)
nnm[Z[x].values.flatten(), Z[y].values.flatten()] = alignment
nnm[Z[y].values.flatten(), Z[x].values.flatten()] = alignment
nnm = nnm.tocsr()
import networkx as nx
G = nx.Graph()
gps=ctu[np.vstack(nnm.nonzero()).T]
G.add_edges_from(gps)
alignment = pd.Series(index=to_vn(gps),data=nnm.data)
all_cliques = nx.enumerate_all_cliques(G)
all_triangles = [x for x in all_cliques if len(x) == 3]
Z = np.sort(np.vstack(all_triangles), axis=1)
DF = pd.DataFrame(data=Z, columns=[x.split("_")[0] for x in Z[0]])
for i,sid1 in enumerate(sm.ids):
for sid2 in sm.ids[i:]:
if sid1!=sid2:
DF[sid1+';'+sid2] = [alignment[x] for x in DF[sid1].values.astype('str').astype('object')+';'+DF[sid2].values.astype('str').astype('object')]
DF = DF[sm.ids]
return DF
def GeneTriangles(sm,orth,keys=None,compute_markers=True,corr_thr=0.3, psub_thr = 0.3, pval_thr=1e-10):
"""Outputs a table of gene triangles.
Parameters
----------
sm: SAMAP object which contains at least three species
orths: (n x 2) ortholog pairs
keys: dict of strings corresponding to each species annotation column keyed by species, optional, default None
If you'd like to include information about where each gene is differentially expressed, you can specify the
annotation column to compute differential expressivity from for each species.
compute_markers: bool, optional, default True
Set this to False if you already precomputed differential expression for the input keys.
corr_thr: float, optional, default, 0.3
Only keep triangles with minimum `corr_thr` correlation.
pval_thr: float, optional, defaul, 1e-10
Consider cell types as differentially expressed if their p-values are less than `pval_thr`.
"""
FINALS = []
orth = np.sort(orth,axis=1)
orthsp = np.vstack([q([x.split('_')[0] for x in xx]) for xx in orth])
RES = ParalogSubstitutions(sm, orth, psub_thr = psub_thr)
op = to_vo(q(RES['ortholog pairs']))
pp = to_vo(q(RES['paralog pairs']))
ops = np.vstack([q([x.split('_')[0] for x in xx]) for xx in op])
pps = np.vstack([q([x.split('_')[0] for x in xx]) for xx in pp])
gnnm = sm.samap.adata.varp["homology_graph_reweighted"]
gn = q(sm.samap.adata.var_names)
gnsp = q([x.split('_')[0] for x in gn])
import itertools
combs = list(itertools.combinations(sm.ids,3))
for comb in combs:
A,B,C = comb
smp1 = SAM(counts=sm.samap.adata[np.logical_or(sm.samap.adata.obs['species']==A,sm.samap.adata.obs['species']==B)])
smp2 = SAM(counts=sm.samap.adata[np.logical_or(sm.samap.adata.obs['species']==A,sm.samap.adata.obs['species']==C)])
smp3 = SAM(counts=sm.samap.adata[np.logical_or(sm.samap.adata.obs['species']==B,sm.samap.adata.obs['species']==C)])
sam1=sm.sams[A]
sam2=sm.sams[B]
sam3=sm.sams[C]
A1,A2=A,B
B1,B2=A,C
C1,C2=B,C
f1 = np.logical_and(((ops[:,0]==A1) * (ops[:,1]==A2) + (ops[:,0]==A2) * (ops[:,1]==A1)) > 0,
((pps[:,0]==A1) * (pps[:,1]==A2) + (pps[:,0]==A2) * (pps[:,1]==A1)) > 0)
f2 = np.logical_and(((ops[:,0]==B1) * (ops[:,1]==B2) + (ops[:,0]==B2) * (ops[:,1]==B1)) > 0,
((pps[:,0]==B1) * (pps[:,1]==B2) + (pps[:,0]==B2) * (pps[:,1]==B1)) > 0)
f3 = np.logical_and(((ops[:,0]==C1) * (ops[:,1]==C2) + (ops[:,0]==C2) * (ops[:,1]==C1)) > 0,
((pps[:,0]==C1) * (pps[:,1]==C2) + (pps[:,0]==C2) * (pps[:,1]==C1)) > 0)
RES1=RES[f1]
RES2=RES[f2]
RES3=RES[f3]
f1 = ((orthsp[:,0]==A1) * (orthsp[:,1]==A2) + (orthsp[:,0]==A2) * (orthsp[:,1]==A1)) > 0
f2 = ((orthsp[:,0]==B1) * (orthsp[:,1]==B2) + (orthsp[:,0]==B2) * (orthsp[:,1]==B1)) > 0
f3 = ((orthsp[:,0]==C1) * (orthsp[:,1]==C2) + (orthsp[:,0]==C2) * (orthsp[:,1]==C1)) > 0
orth1 = orth[f1]
orth2 = orth[f2]
orth3 = orth[f3]
op1 = to_vo(q(RES1["ortholog pairs"]))
op2 = to_vo(q(RES2["ortholog pairs"]))
op3 = to_vo(q(RES3["ortholog pairs"]))
pp1 = to_vo(q(RES1["paralog pairs"]))
pp2 = to_vo(q(RES2["paralog pairs"]))
pp3 = to_vo(q(RES3["paralog pairs"]))
gnnm1 = sp.sparse.vstack((
sp.sparse.hstack((sp.sparse.csr_matrix(((gnsp==A1).sum(),)*2),gnnm[gnsp==A1,:][:,gnsp==A2])),
sp.sparse.hstack((gnnm[gnsp==A2,:][:,gnsp==A1],sp.sparse.csr_matrix(((gnsp==A2).sum(),)*2)))
)).tocsr()
gnnm2 = sp.sparse.vstack((
sp.sparse.hstack((sp.sparse.csr_matrix(((gnsp==B1).sum(),)*2),gnnm[gnsp==B1,:][:,gnsp==B2])),
sp.sparse.hstack((gnnm[gnsp==B2,:][:,gnsp==B1],sp.sparse.csr_matrix(((gnsp==B2).sum(),)*2)))
)).tocsr()
gnnm3 = sp.sparse.vstack((
sp.sparse.hstack((sp.sparse.csr_matrix(((gnsp==C1).sum(),)*2),gnnm[gnsp==C1,:][:,gnsp==C2])),
sp.sparse.hstack((gnnm[gnsp==C2,:][:,gnsp==C1],sp.sparse.csr_matrix(((gnsp==C2).sum(),)*2)))
)).tocsr()
gn1 = np.append(gn[gnsp==A1],gn[gnsp==A2])
gn2 = np.append(gn[gnsp==B1],gn[gnsp==B2])
gn3 = np.append(gn[gnsp==C1],gn[gnsp==C2])
# suppress warning
with warnings.catch_warnings():
warnings.simplefilter("ignore")
T1 = pd.DataFrame(data=np.arange(gn1.size)[None, :], columns=gn1)
x, y = T1[op1[:, 0]].values.flatten(), T1[op1[:, 1]].values.flatten()
gnnm1[x, y] = gnnm1[x, y]
gnnm1[y, x] = gnnm1[y, x]
T1 = pd.DataFrame(data=np.arange(gn2.size)[None, :], columns=gn2)
x, y = T1[op2[:, 0]].values.flatten(), T1[op2[:, 1]].values.flatten()
gnnm2[x, y] = gnnm2[x, y]
gnnm2[y, x] = gnnm2[y, x]
T1 = pd.DataFrame(data=np.arange(gn3.size)[None, :], columns=gn3)
x, y = T1[op3[:, 0]].values.flatten(), T1[op3[:, 1]].values.flatten()
gnnm3[x, y] = gnnm3[x, y]
gnnm3[y, x] = gnnm3[y, x]
gnnm1.data[gnnm1.data==0]=1e-4
gnnm2.data[gnnm2.data==0]=1e-4
gnnm3.data[gnnm3.data==0]=1e-4
pairs1 = gn1[np.vstack(gnnm1.nonzero()).T]
pairs2 = gn2[np.vstack(gnnm2.nonzero()).T]
pairs3 = gn3[np.vstack(gnnm3.nonzero()).T]
data = np.concatenate((gnnm1.data, gnnm2.data, gnnm3.data))
CORR1 = pd.DataFrame(data=gnnm1.data[None, :], columns=to_vn(pairs1))
CORR2 = pd.DataFrame(data=gnnm2.data[None, :], columns=to_vn(pairs2))
CORR3 = pd.DataFrame(data=gnnm3.data[None, :], columns=to_vn(pairs3))
pairs = np.vstack((pairs1, pairs2, pairs3))
all_genes = np.unique(pairs.flatten())
Z = pd.DataFrame(data=np.arange(all_genes.size)[None, :], columns=all_genes)
x, y = Z[pairs[:, 0]].values.flatten(), Z[pairs[:, 1]].values.flatten()
GNNM = sp.sparse.lil_matrix((all_genes.size,) * 2)
GNNM[x, y] = data
import networkx as nx
G = nx.from_scipy_sparse_matrix(GNNM, create_using=nx.Graph)
all_cliques = nx.enumerate_all_cliques(G)
all_triangles = [x for x in all_cliques if len(x) == 3]
Z = all_genes[np.sort(np.vstack(all_triangles), axis=1)]
DF = pd.DataFrame(data=Z, columns=[x.split("_")[0] for x in Z[0]])
DF = DF[[A, B, C]]
orth1DF = pd.DataFrame(data=orth1, columns=[x.split("_")[0] for x in orth1[0]])[
[A, B]
]
orth2DF = pd.DataFrame(data=orth2, columns=[x.split("_")[0] for x in orth2[0]])[
[A, C]
]
orth3DF = pd.DataFrame(data=orth3, columns=[x.split("_")[0] for x in orth3[0]])[
[B, C]
]
ps1DF = pd.DataFrame(
data=np.sort(pp1, axis=1),
columns=[x.split("_")[0] for x in np.sort(pp1, axis=1)[0]],
)[[A, B]]
ps2DF = pd.DataFrame(
data=np.sort(pp2, axis=1),
columns=[x.split("_")[0] for x in np.sort(pp2, axis=1)[0]],
)[[A, C]]
ps3DF = pd.DataFrame(
data=np.sort(pp3, axis=1),
columns=[x.split("_")[0] for x in np.sort(pp3, axis=1)[0]],
)[[B, C]]
A_AB = pd.DataFrame(data=to_vn(op1)[None, :], columns=to_vn(ps1DF.values))
A_AC = pd.DataFrame(data=to_vn(op2)[None, :], columns=to_vn(ps2DF.values))
A_BC = pd.DataFrame(data=to_vn(op3)[None, :], columns=to_vn(ps3DF.values))
AB = to_vn(DF[[A, B]].values)
AC = to_vn(DF[[A, C]].values)
BC = to_vn(DF[[B, C]].values)
AVs = []
CATs = []
CORRs = []
for i, X, O, P, Z, R in zip(
[0, 1, 2],
[AB, AC, BC],
[orth1DF, orth2DF, orth3DF],
[ps1DF, ps2DF, ps3DF],
[A_AB, A_AC, A_BC],
[CORR1, CORR2, CORR3],
):
cat = q(["homolog"] * X.size).astype("object")
cat[np.in1d(X, to_vn(O.values))] = "ortholog"
ff = np.in1d(X, to_vn(P.values))
cat[ff] = "substitution"
z = Z[X[ff]] #problem line here
x = X[ff]
av = np.zeros(x.size, dtype="object")
for ai in range(x.size):
v=pd.DataFrame(z[x[ai]]) #get ortholog pairs - paralog pairs dataframe
vd=v.values.flatten() #get ortholog pairs
vc=q(';'.join(v.columns).split(';')) # get paralogous genes
temp = np.unique(q(';'.join(vd).split(';'))) #get orthologous genes
av[ai] = ';'.join(temp[np.in1d(temp,vc,invert=True)]) #get orthologous genes not present in paralogous genes
AV = np.zeros(X.size, dtype="object")
AV[ff] = av
corr = R[X].values.flatten()
AVs.append(AV)
CATs.append(cat)
CORRs.append(corr)
tri_pairs = np.vstack((AB, AC, BC)).T
cat_pairs = np.vstack(CATs).T
corr_pairs = np.vstack(CORRs).T
homology_triangles = DF.values
substituted_genes = np.vstack(AVs).T
substituted_genes[substituted_genes == 0] = "N.S."
data = np.hstack(
(
homology_triangles.astype("object"),
substituted_genes.astype("object"),
tri_pairs.astype("object"),
corr_pairs.astype("object"),
cat_pairs.astype("object"),
)
)
FINAL = pd.DataFrame(data = data, columns = [f'{A} gene',f'{B} gene',f'{C} gene',
f'{A}/{B} subbed',f'{A}/{C} subbed',f'{B}/{C} subbed',
f'{A}/{B}',f'{A}/{C}',f'{B}/{C}',
f'{A}/{B} corr',f'{A}/{C} corr',f'{B}/{C} corr',
f'{A}/{B} type',f'{A}/{C} type',f'{B}/{C} type'])
FINAL['#orthologs'] = (cat_pairs=='ortholog').sum(1)
FINAL['#substitutions'] = (cat_pairs=='substitution').sum(1)
FINAL = FINAL[(FINAL['#orthologs']+FINAL['#substitutions'])==3]
x = FINAL[[f'{A}/{B} corr',f'{A}/{C} corr',f'{B}/{C} corr']].min(1)
FINAL['min_corr'] = x
FINAL = FINAL[x>corr_thr]
if keys is not None:
keys = [keys[A],keys[B],keys[C]]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if keys is not None:
for i,sam,n in zip([0,1,2],[sam1,sam2,sam3],[A,B,C]):
if compute_markers:
find_cluster_markers(sam,keys[i])
a = sam.adata.varm[keys[i]+'_scores'].T[q(FINAL[n+' gene'])].T
p = sam.adata.varm[keys[i]+'_pvals'].T[q(FINAL[n+' gene'])].T.values
p[p>pval_thr]=1
p[p<1]=0
p=1-p
f = a.columns[a.values.argmax(1)]
res=[]
for i in range(p.shape[0]):
res.append(';'.join(np.unique(np.append(f[i],a.columns[p[i,:]==1]))))
FINAL[n+' cell type'] = res
FINAL = FINAL.sort_values('min_corr',ascending=False)
FINALS.append(FINAL)
FINAL = pd.concat(FINALS,axis=0)
return FINAL
def transfer_annotations(sm,reference_id=None, keys=[],num_iters=5, inplace = True):
""" Transfer annotations across species using label propagation along the combined manifold.
Parameters
----------
sm - SAMAP object
reference_id - str, optional, default None
The species ID of the reference species from which the annotations will be transferred.
keys - str or list, optional, default []
The `obs` key or list of keys corresponding to the labels to be propagated.
If passed an empty list, all keys in the reference species' `obs` dataframe
will be propagated.
num_iters - int, optional, default 5
The number of steps to run the diffusion propagation.
inplace - bool, optional, default True
If True, deposit propagated labels in the target species (`sm.sams['hu']`) `obs`
DataFrame. Otherwise, just return the soft-membership DataFrame.
Returns
-------
A Pandas DataFrame with soft membership scores for each cluster in each cell.
"""
stitched = sm.samap
NNM = stitched.adata.obsp['connectivities'].copy()
NNM = NNM.multiply(1/NNM.sum(1).A).tocsr()
if type(keys) is str:
keys = [keys]
elif len(keys) == 0:
try:
keys = list(sm.sams[reference_id].adata.obs.keys())
except KeyError:
raise ValueError(f'`reference` must be one of {sm.ids}.')
for key in keys:
samref = sm.sams[reference_id]
ANN = stitched.adata.obs
ANNr = samref.adata.obs
cl = ANN[key].values.astype('object').astype('str')
clr = reference_id+'_'+ANNr[key].values.astype('object')
cl[np.invert(np.in1d(cl,clr))]=''
clu,clui = np.unique(cl,return_inverse=True)
P = np.zeros((NNM.shape[0],clu.size))
Pmask = np.ones((NNM.shape[0],clu.size))
P[np.arange(clui.size),clui]=1.0
Pmask[stitched.adata.obs['species']==reference_id]=0
Pmask=Pmask[:,1:]
P=P[:,1:]
Pinit = P.copy()
for j in range(num_iters):
P_new = NNM.dot(P)
if np.max(np.abs(P_new - P)) < 5e-3:
P = P_new
s=P.sum(1)[:,None]
s[s==0]=1
P = P/s
break
else:
P = P_new
s=P.sum(1)[:,None]
s[s==0]=1
P = P/s
P = P * Pmask + Pinit
uncertainty = 1-P.max(1)
labels = clu[1:][np.argmax(P,axis=1)]
labels[uncertainty==1.0]='NAN'
uncertainty[uncertainty>=uncertainty.max()*0.99] = 1
if inplace:
stitched.adata.obs[key+'_transfer'] = pd.Series(labels,index = stitched.adata.obs_names)
stitched.adata.obs[key+'_uncertainty'] = pd.Series(uncertainty,index=stitched.adata.obs_names)
res = pd.DataFrame(data=P,index=stitched.adata.obs_names,columns=clu[1:])
res['labels'] = labels
return res
def get_mapping_scores(sm, keys, n_top = 0):
"""Calculate mapping scores
Parameters
----------
sm: SAMAP object
keys: dict, annotation vector keys for at least two species with species identifiers as the keys
e.g. {'pl':'tissue','sc':'tissue'}
n_top: int, optional, default 0
If `n_top` is 0, average the alignment scores for all cells in a pair of clusters.
Otherwise, average the alignment scores of the top `n_top` cells in a pair of clusters.
Set this to non-zero if you suspect there to be subpopulations of your cell types mapping
to distinct cell types in the other species.
Returns
-------
D - table of highest mapping scores for cell types
A - pairwise table of mapping scores between cell types across species
"""
if len(list(keys.keys()))<len(list(sm.sams.keys())):
samap = SAM(counts = sm.samap.adata[np.in1d(sm.samap.adata.obs['species'],list(keys.keys()))])
else:
samap=sm.samap
clusters = []
ix = np.unique(samap.adata.obs['species'],return_index=True)[1]
skeys = q(samap.adata.obs['species'])[np.sort(ix)]
for sid in skeys:
clusters.append(q([sid+'_'+str(x) for x in sm.sams[sid].adata.obs[keys[sid]]]))
cl = np.concatenate(clusters)
l = "{}_mapping_scores".format(';'.join([keys[sid] for sid in skeys]))
samap.adata.obs[l] = pd.Categorical(cl)
CSIMth, clu = _compute_csim(samap, l, n_top = n_top, prepend = False)
A = pd.DataFrame(data=CSIMth, index=clu, columns=clu)
i = np.argsort(-A.values.max(0).flatten())
H = []
C = []
for I in range(A.shape[1]):
x = A.iloc[:, i[I]].sort_values(ascending=False)
H.append(np.vstack((x.index, x.values)).T)
C.append(A.columns[i[I]])
C.append(A.columns[i[I]])
H = np.hstack(H)
D = pd.DataFrame(data=H, columns=[C, ["Cluster","Alignment score"]*(H.shape[1]//2)])
return D, A
| [
11748,
1341,
35720,
13,
26791,
13,
82,
29572,
12543,
6359,
355,
264,
69,
198,
6738,
764,
1330,
10662,
11,
3384,
11,
279,
67,
11,
599,
11,
45941,
11,
14601,
11,
629,
198,
6738,
764,
26791,
1330,
284,
62,
13038,
11,
284,
62,
85,
77,... | 1.950161 | 21,148 |
# -*- coding:UTF-8 -*-
import sys
reqMax = []
reqMin = []
cont = 0
#print(limpar())
p = 0
while p != 4:
print('~'*30)
print('Para Inserir dados do jogo aperte [1]: ')
print('Para consultar dados dos jogos aperte [2]: ')
print('para limpar a tabela de jogos aperte [3]')
print('Para Sair do programa aperte [4]: ')
p = int(input('Opção: '))
print('~'*30)
if p == 1:
cont+=1
inserir()
elif p == 2:
consulta()
elif p ==3:
limpar()
elif p == 4:
print('Opção {}'.format(p), 'Saindo do programa!!!')
else:
print('Opção Invalida')
print('*'*30)
| [
2,
532,
9,
12,
19617,
25,
48504,
12,
23,
532,
9,
12,
220,
198,
11748,
25064,
198,
42180,
11518,
796,
17635,
198,
42180,
9452,
796,
17635,
198,
3642,
796,
657,
220,
628,
220,
220,
220,
220,
198,
2,
4798,
7,
2475,
1845,
28955,
220,
... | 1.911429 | 350 |
from django import template
from account.models import UserMessage
from account.models import Conversation
register = template.Library()
@register.assignment_tag
@register.assignment_tag
@register.assignment_tag
| [
6738,
42625,
14208,
1330,
11055,
198,
198,
6738,
1848,
13,
27530,
1330,
11787,
12837,
198,
6738,
1848,
13,
27530,
1330,
42427,
198,
198,
30238,
796,
11055,
13,
23377,
3419,
628,
198,
31,
30238,
13,
562,
16747,
62,
12985,
628,
198,
31,
... | 3.859649 | 57 |
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os.path as osp
import joblib
MAIN_PATH = '/scratch/gobi2/kamyar/oorl_rlkit/output'
WHAT_TO_PLOT = 'faster_all_eval_stats.pkl'
# WHAT_TO_PLOT = 'faster_all_eval_stats.pkl'
# WHAT_TO_PLOT = 'faster_all_eval_stats.pkl'
data_dirs = {
'np_airl': {
0.2: 'correct-saving-np-airl-KL-0p2-disc-512-dim-rew-2-NO-TARGET-ANYTHING-over-10-epochs',
0.15: 'correct-saving-np-airl-KL-0p15-disc-512-dim-rew-2-NO-TARGET-ANYTHING-over-10-epochs',
0.1: 'correct-saving-np-airl-KL-0p1-disc-512-dim-rew-2-NO-TARGET-ANYTHING-over-10-epochs',
0.05: 'correct-saving-np-airl-KL-0p05-disc-512-dim-rew-2-NO-TARGET-ANYTHING-over-10-epochs',
0.0: 'correct-saving-np-airl-KL-0-disc-512-dim-rew-2-NO-TARGET-ANYTHING-over-10-epochs'
},
'np_bc': {
0.2: 'np-bc-KL-0p2-FINAL-WITHOUT-TARGETS',
0.15: 'np-bc-KL-0p15-FINAL-WITHOUT-TARGETS',
0.1: 'np-bc-KL-0p1-FINAL-WITHOUT-TARGETS',
0.05: 'np-bc-KL-0p05-FINAL-WITHOUT-TARGETS',
0.0: 'np-bc-KL-0-FINAL-WITHOUT-TARGETS'
}
}
# fig, ax = plt.subplots(1, 5)
for i, beta in enumerate([0.0, 0.05, 0.1, 0.15, 0.2]):
fig, ax = plt.subplots(1)
ax.set_xlabel('$\\beta = %.2f$' % beta)
# np_airl
all_stats = joblib.load(osp.join(MAIN_PATH, data_dirs['np_airl'][beta], WHAT_TO_PLOT))['faster_all_eval_stats']
good_reaches_means = []
good_reaches_stds = []
solves_means = []
solves_stds = []
for c_size in range(1,7):
good_reaches = []
solves = []
for d in all_stats:
good_reaches.append(d[c_size]['Percent_Good_Reach'])
solves.append(d[c_size]['Percent_Solved'])
good_reaches_means.append(np.mean(good_reaches))
good_reaches_stds.append(np.std(good_reaches))
solves_means.append(np.mean(solves))
solves_stds.append(np.std(solves))
# ax.errorbar(list(range(1,7)), good_reaches_means, good_reaches_stds)
ax.errorbar(np.array(list(range(1,7))) + 0.1, solves_means, solves_stds,
elinewidth=2.0, capsize=4.0, barsabove=True, linewidth=2.0, label='Meta-AIRL'
)
# np_bc
all_stats = joblib.load(osp.join(MAIN_PATH, data_dirs['np_bc'][beta], WHAT_TO_PLOT))['faster_all_eval_stats']
good_reaches_means = []
good_reaches_stds = []
solves_means = []
solves_stds = []
for c_size in range(1,7):
good_reaches = []
solves = []
for d in all_stats:
good_reaches.append(d[c_size]['Percent_Good_Reach'])
solves.append(d[c_size]['Percent_Solved'])
good_reaches_means.append(np.mean(good_reaches))
good_reaches_stds.append(np.std(good_reaches))
solves_means.append(np.mean(solves))
solves_stds.append(np.std(solves))
# ax.errorbar(list(range(1,7)), good_reaches_means, good_reaches_stds)
ax.errorbar(np.array(list(range(1,7))) - 0.1, solves_means, solves_stds,
elinewidth=2.0, capsize=4.0, barsabove=True, linewidth=2.0, label='Meta-BC'
)
ax.set_ylim([0.3, 1.0])
lgd = ax.legend(loc='upper center', bbox_to_anchor=(0.725, 0.1), shadow=False, ncol=3)
plt.savefig('plots/abc/faster_test_%d.png'%i, bbox_extra_artists=(lgd,), bbox_inches='tight')
# plt.savefig('plots/abc/test_%d.png'%i)
plt.close()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
198,
6759,
29487,
8019,
13,
1904,
10786,
46384,
11537,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
11748,
28686,
13,
6978,
355,
267,
2777,
198,
11... | 1.928981 | 1,746 |
from django.shortcuts import render, get_object_or_404, redirect
from .models import RepPost
from .forms import RepForm
from django.utils import timezone
from django.contrib.auth.decorators import login_required
@login_required | [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
651,
62,
15252,
62,
273,
62,
26429,
11,
18941,
198,
6738,
764,
27530,
1330,
1432,
6307,
198,
6738,
764,
23914,
1330,
1432,
8479,
198,
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,... | 3.5625 | 64 |
#int_to_line.py
#This script takes intersection and road segment and determine the direction of the road segment in contrast to the intersection.
import arcpy
from arcpy import env
from arcpy.sa import *
arcpy.CheckOutExtension("Spatial")
arcpy.env.overwriteOutput = True
#input configuration
env.workspace = "C:/Users/kml42638/Desktop/testDB.gdb"
print("The name of the workspace is " + env.workspace)
streetCL = "GGISC_streetCL"
intersections = "Intersections_all"
main(intersections, streetCL)
| [
2,
600,
62,
1462,
62,
1370,
13,
9078,
198,
2,
1212,
4226,
2753,
16246,
290,
2975,
10618,
290,
5004,
262,
4571,
286,
262,
2975,
10618,
287,
6273,
284,
262,
16246,
13,
198,
198,
11748,
10389,
9078,
198,
6738,
10389,
9078,
1330,
17365,
... | 3.286624 | 157 |
import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cactusco.settings')
app = Celery('cactusco')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
@app.task(bind=True)
| [
11748,
28686,
201,
198,
6738,
18725,
1924,
1330,
15248,
1924,
201,
198,
201,
198,
418,
13,
268,
2268,
13,
2617,
12286,
10786,
35028,
1565,
11230,
62,
28480,
51,
20754,
62,
33365,
24212,
3256,
705,
66,
34144,
1073,
13,
33692,
11537,
201,... | 2.524752 | 101 |
from dtpattern import alignment
from dtpattern.alignment import needle, finalize, gap_penalty, match_award, mismatch_penalty, water
from dtpattern.utils import translate
from dtpattern.alignment import alignment as al
def align(s1,s2):
"""
input is a list of characters or character set symbols for each s1 and s2
return is
:param s1:
:param s2:
:return: tuple of align1, align2, symbol2, identity, score
"""
identity, score, align1, symbol2, align2 = needle(s1, s2)
print_alignment(align1, align2, symbol2, identity, score, altype="NEEDLE")
identity, score, align1, symbol2, align2 = water(s1, s2)
print_alignment(align1, align2, symbol2, identity, score, altype="WATER")
score_matrix = {
gap_penalty: -15,
match_award: 5,
mismatch_penalty: -4
}
identity, score, align1, symbol2, align2 = needle(s1, s2,score_matrix=score_matrix)
print_alignment(align1, align2, symbol2, identity, score, altype="VALUE")
identity, score, align1, symbol2, align2 = water(s1, s2,score_matrix=score_matrix)
print_alignment(align1, align2, symbol2, identity, score, altype="WATER")
identity, score, align1, symbol2, align2 = needle(_translate(s1), s2)
print_alignment(align1, align2, symbol2, identity, score, altype="TRANS")
identity, score, align1, symbol2, align2 = water(_translate(s1), s2)
print_alignment(align1, align2, symbol2, identity, score, altype="TRANS_WATER")
#for a in al.align.globalms("".join(s1), "".join(s2), 5, -4, -50, -.1):
# print(al.format_alignment(*a))
return align1, align2, symbol2, identity, score
data=[
['111',"1222","1113"]
]
for values in data:
s1 = values[0]
for s2 in values[1:]:
print("MERGE:\n\t{}\n\t{}".format(s1,s2))
if isinstance(s1,str):
s1= to_list(s1)
if isinstance(s2,str):
s2= to_list(s2)
align1, align2, symbol2, identity, score = align(s1,s2)
#print_alignment(align1, align2, symbol2, identity, score)
_s1,_s2=s1,s2
while not is_valid_alignment(align1, align2, symbol2):
break
s1 = merge_alignment(symbol2)
| [
6738,
288,
83,
33279,
1330,
19114,
198,
6738,
288,
83,
33279,
13,
282,
16747,
1330,
17598,
11,
2457,
1096,
11,
7625,
62,
3617,
6017,
11,
2872,
62,
707,
446,
11,
46318,
62,
3617,
6017,
11,
1660,
198,
6738,
288,
83,
33279,
13,
26791,
... | 2.363441 | 930 |
import logging
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(levelname)s %(message)s',
"%Y-%m-%d %H:%M:%S"
)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
import mimetypes
| [
11748,
18931,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
198,
198,
30281,
796,
18931,
13,
12124,
25060,
3419,
198,
687,
1436,
796,
18931,
13,
8479,
1436,
7,
198,
220,
220,
220,
705,
4,
7,
292,
310,
5... | 2.487395 | 119 |
import logging
import numpy as np
import scipy.stats as stats
from .eigd import eigenDecompose
| [
11748,
18931,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
13,
34242,
355,
9756,
198,
198,
6738,
764,
68,
328,
67,
1330,
304,
9324,
10707,
3361,
577,
628,
628,
628
] | 3 | 34 |
import numpy as np
import cv2
input = cv2.imread('input/strawberry.jpg')
height, width = input_image.shape[:2]
x_gauss = cv2.getGaussianKernel(width,250)
y_gauss = cv2.getGaussianKernel(height,200)
kernel = x_gauss * y_gauss.T
mask = kernel * 255 / np.linalg.norm(kernel)
output[:,:,0] = input[:,:,0] * mask
output[:,:,1] = input[:,:,1] * mask
output[:,:,2] = input[:,:,2] * mask
cv2.imshow('vignette', output)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
11748,
299,
32152,
355,
45941,
220,
201,
198,
11748,
269,
85,
17,
201,
198,
201,
198,
201,
198,
15414,
796,
269,
85,
17,
13,
320,
961,
10786,
15414,
14,
301,
1831,
8396,
13,
9479,
11537,
220,
201,
198,
220,
220,
220,
220,
201,
198... | 2.019763 | 253 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 5 11:40:58 2021
@author: Christopher Corbell
Things we can use here:
- construct Digraph from underlying Graph (default direction for edges)
- DigraphFactory to construct some interesting digraphs
"""
from graphoire.graph import Graph
class Digraph(Graph):
"""
Digraph is a subclass of Graph that implements edge direction.
This includes distinguishing between u,v and v,u edges (the
base class resolves such edges to u,v). The class also can
calculate in-degree and out-degree of vertices; note that the
base class vertexDegree() and related methods consider out-degree only.
"""
def getOutNeighbors(self, vertex):
"""
Get a list of vertices that this vertex connects-outward to.
Parameters
----------
vertex : int
The vertex index
Returns list of adjacent head-vertex integer indices.
"""
neighbors = []
for edge in self.edges:
if edge[0] == vertex:
neighbors.append(edge[1])
return neighbors
def getInNeighbors(self, vertex):
"""
Get a list of vertices that connect inward to this vertex.
Parameters
----------
vertex : int
The vertex index
Returns list of adjacent tail-vertex integer indicdes.
"""
neighbors = []
for edge in self.edges:
if edge[1] == vertex:
neighbors.append(edge[0])
return neighbors
def edgeDirection(self, tail, head):
"""
Get the direction of edge between tail and head.
Parameters
----------
tail : integer (vertex index)
The vertex to interpret as tail
head : integer (vertex index)
The vertex to interpret as head
Returns
-------
An integer value 1 if this is a directed edge from
tail to head, -1 if the edge is the other direction,
and 0 if there is no edge.
"""
if self.hasEdge(tail, head):
return 1
elif self.hasEdge(head, tail):
return -1
else:
return 0
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
19480,
1526,
220,
642,
1367,
25,
1821,
25,
3365,
33448,
198,
198,
31,
9800,
25,
12803,
27... | 2.446137 | 919 |
from collections import OrderedDict
import numpy as np
from gym.envs.mujoco import mujoco_env
from gym.spaces import Box
from bgp.rlkit.core import logger as default_logger
from bgp.rlkit.core.eval_util import create_stats_ordered_dict
from bgp.rlkit.core.serializable import Serializable
from bgp.rlkit.envs.mujoco_env import get_asset_xml
from bgp.rlkit.samplers.util import get_stat_in_paths
from bgp.rlkit.torch.tdm.envs.multitask_env import MultitaskEnv
| [
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
11550,
13,
268,
14259,
13,
76,
23577,
25634,
1330,
285,
23577,
25634,
62,
24330,
198,
6738,
11550,
13,
2777,
2114,
1330,
8315,
198,
198,
6738,
... | 2.789157 | 166 |
import sublime
import sublime_plugin
class SurroundCommand(sublime_plugin.TextCommand):
"""
Base class to surround the selection with text.
"""
surround = ''
| [
11748,
41674,
198,
11748,
41674,
62,
33803,
628,
198,
4871,
4198,
744,
21575,
7,
7266,
27299,
62,
33803,
13,
8206,
21575,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
7308,
1398,
284,
4573,
262,
6356,
351,
2420,
13,
198,
220,
... | 3.272727 | 55 |
import requests
from faker import Faker
from faker.providers import date_time
import json
fake = Faker()
fake.add_provider(date_time)
for i in range(40000000):
user = {
'name': fake.name(),
'email': fake.email(),
'birthdate': fake.date()
}
response = requests.post('http://localhost:8000/users', json=json.dumps(user))
if response.ok:
if i % 100000 == 0:
user_id = response.json()['id']
print("User {0} added".format(user_id))
else:
print("Error") | [
11748,
7007,
198,
6738,
277,
3110,
1330,
376,
3110,
198,
6738,
277,
3110,
13,
15234,
4157,
1330,
3128,
62,
2435,
198,
11748,
33918,
198,
198,
30706,
796,
376,
3110,
3419,
198,
30706,
13,
2860,
62,
15234,
1304,
7,
4475,
62,
2435,
8,
... | 2.336245 | 229 |
import sys
from hangul_utils import *
# for word segmentation and pos tagging of Korean text
# Note: You need to install "hangul-utils" in advanced
# Ref link: https://github.com/kaniblu/hangul-utils
# written by Ye Kyaw Thu, Visiting Professor, LST, NECTEC, Thailand
#
# How to run: python ./korean-breaks.py <input-filename> <word|morph|pos>
# eg 1: python ./korean-breaks.py ./tst.ko -pos
# eg 2: python ./korean-breaks.py ./tst.ko -morph
# e.g 3: python ./korean-breaks.py ./tst.ko -word
if len(sys.argv) < 3:
print ("You must set two arguments!")
print ("How to run:")
print ("python ./korean-breaks.py <raw-korean-text-filename> <-word|-morph|-pos>")
sys.exit()
else:
f1 = sys.argv[1]
arg = sys.argv[2]
fp1=open(f1,"r")
for line1 in fp1:
if arg.lower() == '-word':
# Word tokenization (mainly using space):
print (" ".join(list(word_tokenize(line1.strip()))))
elif arg.lower() == '-morph':
# Morpheme tokenization
print (" ".join(list(morph_tokenize(line1.strip()))))
elif arg.lower() == '-pos':
# Morpheme tokenization with POS
print (list(morph_tokenize(line1.strip(), pos=True)))
fp1.close()
| [
11748,
25064,
198,
6738,
8181,
377,
62,
26791,
1330,
1635,
198,
198,
2,
329,
1573,
10618,
341,
290,
1426,
49620,
286,
6983,
2420,
198,
2,
5740,
25,
921,
761,
284,
2721,
366,
33255,
377,
12,
26791,
1,
287,
6190,
198,
2,
6524,
2792,
... | 2.433735 | 498 |
# Uses python3
if __name__ == "__main__":
print(edit_distance(input(), input()))
| [
2,
36965,
21015,
18,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
3601,
7,
19312,
62,
30246,
7,
15414,
22784,
5128,
3419,
4008,
198
] | 2.6875 | 32 |
#!/usr/bin/env python
# encoding: utf-8
import rospy
import tf
from std_msgs.msg import Float64, Int32, Int8
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist, Vector3
from PID import PID
from math import sin, cos, pi, atan2, sqrt
autoMove = AUTO_MOVE()
"""LinearPub = rospy.Publisher("/command/linear", self.twist, queue_size=5)
AngularPub = rospy.Publisher("/command/angular", self.twist, queue_size=5)"""
# pub = rospy.Publisher('cmd_vel', self.twist, queue_size=10)
if __name__ == '__main__':
rospy.init_node('robot_teleop')
pub = rospy.Publisher('cmd_vel', Twist, queue_size=10)
# Set subscribers
rospy.Subscriber("/odom", Odometry, autoMove.getState)
rospy.Subscriber("/command/pos", Vector3, autoMove.moveCommand)
# Server(AlignmentControllerConfig, dynamicReconfigureCb)
rospy.spin()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
11748,
686,
2777,
88,
198,
11748,
48700,
198,
6738,
14367,
62,
907,
14542,
13,
19662,
1330,
48436,
2414,
11,
2558,
2624,
11,
2558,
23,
198,
6738,... | 2.603659 | 328 |
from django.conf import settings
from django.contrib.auth import get_user_model
from rest_framework.response import Response
from rest_framework.views import APIView
User = get_user_model()
class RetrieveCurrentUserView(APIView):
"""Возвращает информацию о текущем пользователе"""
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
198,
6738,
1334,
62,
30604,
13,
33571,
1330,
3486,
3824,
... | 2.341463 | 123 |
#!/usr/bin/env python
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
# Create a Miller project
map = Basemap(projection='hammer', lon_0=20, resolution='l')
# Plot coastlines
map.drawcoastlines(linewidth=0.)
map.fillcontinents(alpha=0.85)
# Parse telescopes.txt and plot the points on the map
for line in open('telescopes.txt', 'r').readlines():
if line[0] == '#': continue
lat = float( line.split()[1][:-1] )
lon = float( line.split()[2] )
xpt, ypt = map(lon, lat)
map.plot([xpt],[ypt],'ro', markersize=0.75)
#
plt.savefig('radiotelescopes.png', dpi=500, bbox_inches='tight')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
285,
489,
62,
25981,
74,
896,
13,
12093,
368,
499,
1330,
6455,
368,
499,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
220,
198,
198,
2,
13610,
257,
7920,
1628,... | 2.496 | 250 |
from .KeyCodes import *
from .MouseButtonCodes import *
| [
6738,
764,
9218,
34,
4147,
1330,
1635,
198,
6738,
764,
39643,
21864,
34,
4147,
1330,
1635,
198
] | 3.294118 | 17 |
"""Evaluation
This script consists of evaluation functions needed
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import datetime
import tensorflow as tf
from tensorflow.python.tools import inspect_checkpoint as chkp
import load_data
from geometry_parameters import TEST_INDEX, RECONSTRUCT_PARA
def show_reconstruction(model, phantom_index):
"""
show reconstructed CT
Parameters
----------
model : str
which model's results to use
phantom_index : int
which CT to display
"""
recon_dir = model + '/eval_recon/recon_' + str(phantom_index) + '.npy'
recon = np.load(recon_dir)
fig = plt.figure()
imgs = []
for i in range(recon.shape[0]):
img = plt.imshow(recon[i, :, :], animated=True, cmap=plt.get_cmap('gist_gray'))
imgs.append([img])
animation.ArtistAnimation(fig, imgs, interval=50, blit=True, repeat_delay=1000)
plt.show()
def compare_reconstruction(model_one, model_two, phantom_index, slice_index):
"""
compared reconstructed CT results from different two models
Parameters
----------
model_one : str
the first model's result to use
model_two : str
the second model's result to use
phantom_index : int
which CT to display
slice_index : int
which slice in the CT to display
"""
recon_one = model_one + '/eval_recon/recon_' + str(phantom_index) + '.npy'
recon_one = np.load(recon_one)
recon_one = recon_one[slice_index-1,:,:]
recon_two = model_two + '/eval_recon/recon_' + str(phantom_index) + '.npy'
recon_two = np.load(recon_two)
recon_two = recon_two[slice_index-1,:,:]
fig = plt.figure(figsize=plt.figaspect(0.5))
ax = fig.add_subplot(1, 2, 1)
ax.imshow(recon_one, cmap=plt.get_cmap('gist_gray'))
ax.set_title('model: ' + model_one)
ax = fig.add_subplot(1, 2, 2)
ax.imshow(recon_two, cmap=plt.get_cmap('gist_gray'))
ax.set_title('model: ' + model_two)
plt.show()
def single_ct_normalize(input):
"""
normalize one CT sample to [0, 1]
Parameters
----------
input : ndarray
The input CT to normalize
Returns
-------
ndarray
the normalized CT
"""
max = np.max(input)
min = np.min(input)
input = (input - min) / (max - min)
return input
def compare_reconstruction_with_fdk(model, phantom_index, slice_index):
"""
compare reconstructed CT results with the conventional FDK and the ground truth
Parameters
----------
model : str
which model's results to use
phantom_index : int
which CT to display
slice_index : int
which slice in the CT to display
"""
recon_one = '../data_preprocessing/recon_145/recon_' + str(phantom_index) + '.npy'
recon_one = single_ct_normalize(np.load(recon_one))
recon_one = recon_one[slice_index - 1, :, :]
recon_two = model + '/eval_recon/recon_' + str(phantom_index) + '.npy'
recon_two = np.load(recon_two)
recon_two = recon_two[slice_index - 1, :, :]
recon_three = '../data_preprocessing/recon_360/recon_' + str(phantom_index) + '.npy'
recon_three = single_ct_normalize(np.load(recon_three))
recon_three = recon_three[slice_index - 1, :, :]
fig = plt.figure(figsize=plt.figaspect(0.3))
ax = fig.add_subplot(1, 3, 1)
ax.imshow(recon_one, cmap=plt.get_cmap('gist_gray'))
ax.set_title('pure_fdk')
ax = fig.add_subplot(1, 3, 2)
ax.imshow(recon_two, cmap=plt.get_cmap('gist_gray'))
ax.set_title('model: ' + model)
ax = fig.add_subplot(1, 3, 3)
ax.imshow(recon_three, cmap=plt.get_cmap('gist_gray'))
ax.set_title('ground truth')
plt.show()
def calculate_ssim(predictions, gt_labels, max_val):
"""
ssim calculation
Parameters
----------
predictions : ndarray
the reconstructed results
gt_labels : ndarray
the ground truth
max_val : float
the value range
"""
tf_predictions = tf.placeholder(tf.float32, shape=predictions.shape)
tf_gt_labels = tf.placeholder(tf.float32, shape=gt_labels.shape)
tf_ssim_value = tf.image.ssim(tf.expand_dims(tf_predictions, 4),
tf.expand_dims(tf_gt_labels, 4), max_val)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.9
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
ssim = sess.run(tf_ssim_value, feed_dict={tf_predictions: predictions,
tf_gt_labels: gt_labels})
return np.mean(ssim)
def calculate_ms_ssim(predictions, gt_labels, max_val):
"""
ms-ssim calculation
Parameters
----------
predictions : ndarray
the reconstructed results
gt_labels : ndarray
the ground truth
max_val : float
the value range
"""
tf_predictions = tf.placeholder(tf.float32, shape=predictions.shape)
tf_gt_labels = tf.placeholder(tf.float32, shape=gt_labels.shape)
tf_ms_ssim_value = tf.image.ssim_multiscale(tf.expand_dims(tf_predictions, 4),
tf.expand_dims(tf_gt_labels, 4), max_val)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.9
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
ms_ssim = sess.run(tf_ms_ssim_value, feed_dict={tf_predictions: predictions,
tf_gt_labels: gt_labels})
return np.mean(ms_ssim)
def calculate_psnr(predictions, gt_labels, max_val):
"""
psnr calculation
Parameters
----------
predictions : ndarray
the reconstructed results
gt_labels : ndarray
the ground truth
max_val : float
the value range
"""
tf_predictions = tf.placeholder(tf.float32, shape=predictions.shape)
tf_gt_labels = tf.placeholder(tf.float32, shape=gt_labels.shape)
tf_psnr_value = tf.image.psnr(tf.expand_dims(tf_predictions, 4),
tf.expand_dims(tf_gt_labels, 4), max_val)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.9
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
psnr = sess.run(tf_psnr_value, feed_dict={tf_predictions: predictions,
tf_gt_labels: gt_labels})
return np.mean(psnr)
def normalize(input):
"""
normalize more than one CT sample to [0, 1]
Parameters
----------
input : ndarray
The input CT samples to normalize
Returns
-------
ndarray
the normalized CT results
"""
for i in range(input.shape[0]):
min_bound = np.min(input[i,::])
max_bound = np.max(input[i,::])
input[i,::] = (input[i,::] - min_bound) / (max_bound - min_bound)
return input
# ms-ssim, psnr, mse
def evaluate_on_metrics(model):
"""
do evaluation on mse, ssim, ms-ssim and psnr
Parameters
----------
model : str
The model for evaluation
"""
# get the labels
_, labels = load_data.load_test_data()
labels = normalize(labels)
# load the recons on the model
recon_phantoms = np.empty(labels.shape)
for i in range(recon_phantoms.shape[0]):
recon_file = model + '/eval_recon/recon_' + str(TEST_INDEX[i]) + '.npy'
recon_phantoms[i,:,:,:] = np.load(recon_file)
# MSE
mse = np.mean(np.square(recon_phantoms - labels))
#
max_val = 1.0
# SSIM
ssim = calculate_ssim(recon_phantoms, labels, max_val)
# MS-SSIM
ms_ssim = calculate_ms_ssim(recon_phantoms, labels, max_val)
# Peak Signal-to-Noise Ratio
psnr = calculate_psnr(recon_phantoms, labels, max_val)
# print the results
print('mse value: ', str(mse))
print('ssim value: ', str(ssim))
print('ms-ssim value: ', str(ms_ssim))
print('psnr value: ', str(psnr))
# save the metrics results
f = open(model + '/eval_result/metrics_result.txt', 'a+')
f.write("Model: {0}, Date: {1:%Y-%m-%d_%H:%M:%S} \nMSE: {2:3.8f} \nSSIM: {3:3.8f} \nMS-SSIM: {4:3.8f} \nPSNR: {5:3.8f}\n\n".format(
model, datetime.datetime.now(), mse, ssim, ms_ssim, psnr))
f.close()
def check_stored_sess_var(sess_file, var_name):
"""
display variable results for trained models in the stored session
Parameters
----------
sess_file : str
the stored session file
var_name : str
the variable to see
"""
if var_name == '':
# print all tensors in checkpoint file (.ckpt)
chkp.print_tensors_in_checkpoint_file(sess_file, tensor_name='', all_tensors=True)
else:
chkp.print_tensors_in_checkpoint_file(sess_file, tensor_name=var_name, all_tensors=False)
def eval_pure_fdk():
"""
do evaluation on mse, ssim, ms-ssim and psnr for the conventional FDK algorithm
"""
# get the labels
_, labels = load_data.load_test_data()
labels = normalize(labels)
# load the recons
recon_phantoms = np.empty(labels.shape)
for i in range(recon_phantoms.shape[0]):
recon_file = '../data_preprocessing/recon_145/recon_' + str(TEST_INDEX[i]) + '.npy'
recon_phantoms[i, :, :, :] = np.load(recon_file)
recon_phantoms = normalize(recon_phantoms)
# MSE
mse = np.mean(np.square(recon_phantoms - labels))
#
max_val = 1.0
# SSIM
ssim = calculate_ssim(recon_phantoms, labels, max_val)
# MS-SSIM
ms_ssim = calculate_ms_ssim(recon_phantoms, labels, max_val)
# Peak Signal-to-Noise Ratio
psnr = calculate_psnr(recon_phantoms, labels, max_val)
# print the results
print('mse value: ', str(mse))
print('ssim value: ', str(ssim))
print('ms-ssim value: ', str(ms_ssim))
print('psnr value: ', str(psnr))
# save the metrics results
f = open('pure_fdk_model/eval_result/metrics_result.txt', 'a+')
f.write(
"Model: {0}, Date: {1:%Y-%m-%d_%H:%M:%S} \nMSE: {2:3.8f} \nSSIM: {3:3.8f} \nMS-SSIM: {4:3.8f} \nPSNR: {5:3.8f}\n\n".format(
'pure_fdk_model', datetime.datetime.now(), mse, ssim, ms_ssim, psnr))
f.close()
def convert_to_raw_bin(model):
"""
convert the reconstructed results of the model to raw data file
Parameters
----------
model : str
The model for which results to convert
"""
dir = model + '/eval_recon/'
for i in range(len(TEST_INDEX)):
recon_file = dir + 'recon_' + str(TEST_INDEX[i]) + '.npy'
recon = np.load(recon_file)
recon.astype('float32').tofile(dir + 'recon_' + str(TEST_INDEX[i]) + '_float32_' +
str(RECONSTRUCT_PARA['volume_shape'][1]) + 'x' +
str(RECONSTRUCT_PARA['volume_shape'][2]) + 'x' +
str(RECONSTRUCT_PARA['volume_shape'][0]) + '_bin')
if __name__ == "__main__":
###########################################
# show reconstructed result CT
show_reconstruction('fdk_nn_model', TEST_INDEX[1])
# show_reconstruction('cnn_projection_model', TEST_INDEX[1])
# show_reconstruction('cnn_reconstruction_model', TEST_INDEX[1])
# show_reconstruction('dense_cnn_reconstruction_model', TEST_INDEX[1])
# show_reconstruction('unet_projection_model', TEST_INDEX[1])
# show_reconstruction('unet_reconstruction_model', TEST_INDEX[1])
# show_reconstruction('unet_proposed_reconstruction_model', TEST_INDEX[1])
# show_reconstruction('combined_projection_reconstruction_model', TEST_INDEX[1])
###########################################
# Evaluation on each model
# evaluate_on_metrics('fdk_nn_model')
# evaluate_on_metrics('cnn_projection_model')
# evaluate_on_metrics('cnn_reconstruction_model')
# evaluate_on_metrics('dense_cnn_reconstruction_model')
# evaluate_on_metrics('unet_projection_model')
# evaluate_on_metrics('unet_reconstruction_model')
# evaluate_on_metrics('unet_proposed_reconstruction_model')
# evaluate_on_metrics('combined_projection_reconstruction_model')
# eval_pure_fdk()
###########################################
# compare_reconstruction results
# compare_reconstruction('cnn_projection_model', 'unet_projection_model', TEST_INDEX[1], 75)
# compare_reconstruction_with_fdk('combined_projection_reconstruction_model', TEST_INDEX[1], 75)
###########################################
# generate raw binary reconstruction files
# convert_to_raw_bin('combined_projection_reconstruction_model') | [
37811,
36,
2100,
2288,
198,
198,
1212,
4226,
10874,
286,
12660,
5499,
2622,
198,
37811,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
13,
11227,
341... | 2.280542 | 5,607 |
from user import User
brianna = User(1, 'Brianna')
mary = User(2, 'Mary')
keyboard = brianna.sell_product('Keyboard', 'A nice mechanical keyboard', 100)
print(keyboard.availability) # => True
mary.buy_product(keyboard)
print(keyboard.availability) # => False
review = mary.write_review('This is the best keyboard ever!', keyboard)
review in mary.reviews # => True
review in keyboard.reviews # => True | [
6738,
2836,
1330,
11787,
628,
198,
65,
380,
7697,
796,
11787,
7,
16,
11,
705,
33,
380,
7697,
11537,
198,
6874,
796,
11787,
7,
17,
11,
705,
24119,
11537,
198,
198,
2539,
3526,
796,
31013,
7697,
13,
7255,
62,
11167,
10786,
9218,
3526,... | 3.155039 | 129 |
import datetime
import factory
from factory.fuzzy import FuzzyChoice
from wins.models import (
Advisor,
Breakdown,
CustomerResponse,
HVC,
Notification,
Win,
)
from wins.constants import BUSINESS_POTENTIAL, SECTORS, WIN_TYPES
from users.factories import UserFactory
WIN_TYPES_DICT = {y: x for x, y in WIN_TYPES}
| [
11748,
4818,
8079,
198,
198,
11748,
8860,
198,
6738,
8860,
13,
69,
4715,
88,
1330,
376,
4715,
88,
46770,
198,
198,
6738,
7864,
13,
27530,
1330,
357,
198,
220,
220,
220,
35399,
11,
198,
220,
220,
220,
12243,
2902,
11,
198,
220,
220,
... | 2.633588 | 131 |
from . import BinarySearchTree
from . import BinaryTree
from . import Tree
| [
6738,
764,
1330,
45755,
18243,
27660,
198,
6738,
764,
1330,
45755,
27660,
198,
6738,
764,
1330,
12200,
198
] | 4.166667 | 18 |
import pytest
from .common import TESTDATA
from flyingpigeon.utils import local_path
from cdo import Cdo
cdo = Cdo()
| [
11748,
12972,
9288,
198,
198,
6738,
764,
11321,
1330,
43001,
26947,
198,
6738,
7348,
79,
10045,
261,
13,
26791,
1330,
1957,
62,
6978,
198,
198,
6738,
269,
4598,
1330,
327,
4598,
198,
66,
4598,
796,
327,
4598,
3419,
628
] | 3.076923 | 39 |
from datetime import datetime
import os
import nose
import nose.tools
from TransactionBook.model.TransactionBook import *
def save_load(tb):
"""
Helper function wich does save and load the data.
:param tb: Transaction Book
:return tb2: Transaction Book after save load operation
"""
filename = "dummy_database.csv"
tb.save_as(filename)
tb2 = TransactionBook()
tb2.load_from(filename)
os.remove(filename)
return tb2
if __name__ == '__main__':
test_populate_list_from_data()
test_filter_date()
test_account_balance()
test_save_load()
test_pivot_category_pie()
test_years()
test_total_balance()
test_pivot_monthly_trend()
test_delete_transaction()
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
28686,
198,
11748,
9686,
198,
11748,
9686,
13,
31391,
198,
6738,
45389,
10482,
13,
19849,
13,
48720,
10482,
1330,
1635,
628,
628,
198,
4299,
3613,
62,
2220,
7,
83,
65,
2599,
198,
220,
22... | 2.624113 | 282 |
from nip import nip, dumps
@nip
@nip("myfunc")
@nip
| [
6738,
299,
541,
1330,
299,
541,
11,
45514,
628,
198,
31,
77,
541,
628,
198,
31,
77,
541,
7203,
1820,
20786,
4943,
628,
198,
31,
77,
541,
628,
628
] | 2.103448 | 29 |
"""
Given a linked list, determine if it has a cycle in it.
Follow up:
Can you solve it without using extra space?
"""
__author__ = 'Danyang'
# Definition for singly-linked list.
| [
37811,
198,
15056,
257,
6692,
1351,
11,
5004,
611,
340,
468,
257,
6772,
287,
340,
13,
198,
198,
7155,
510,
25,
198,
6090,
345,
8494,
340,
1231,
1262,
3131,
2272,
30,
198,
37811,
198,
834,
9800,
834,
796,
705,
35,
1092,
648,
6,
198... | 3.333333 | 54 |
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class RecordRuleReq:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'obs_addr': 'RecordObsFileAddr',
'record_formats': 'list[str]',
'hls_config': 'HLSRecordConfig',
'mp4_config': 'MP4RecordConfig'
}
attribute_map = {
'obs_addr': 'obs_addr',
'record_formats': 'record_formats',
'hls_config': 'hls_config',
'mp4_config': 'mp4_config'
}
def __init__(self, obs_addr=None, record_formats=None, hls_config=None, mp4_config=None):
"""RecordRuleReq - a model defined in huaweicloud sdk"""
self._obs_addr = None
self._record_formats = None
self._hls_config = None
self._mp4_config = None
self.discriminator = None
self.obs_addr = obs_addr
self.record_formats = record_formats
if hls_config is not None:
self.hls_config = hls_config
if mp4_config is not None:
self.mp4_config = mp4_config
@property
def obs_addr(self):
"""Gets the obs_addr of this RecordRuleReq.
:return: The obs_addr of this RecordRuleReq.
:rtype: RecordObsFileAddr
"""
return self._obs_addr
@obs_addr.setter
def obs_addr(self, obs_addr):
"""Sets the obs_addr of this RecordRuleReq.
:param obs_addr: The obs_addr of this RecordRuleReq.
:type: RecordObsFileAddr
"""
self._obs_addr = obs_addr
@property
def record_formats(self):
"""Gets the record_formats of this RecordRuleReq.
录制格式:支持HLS格式和MP4格式(HLS和MP4为大写)。 - 若配置HLS则必须携带HLSRecordConfig参数 - 若配置MP4则需要携带MP4RecordConfig
:return: The record_formats of this RecordRuleReq.
:rtype: list[str]
"""
return self._record_formats
@record_formats.setter
def record_formats(self, record_formats):
"""Sets the record_formats of this RecordRuleReq.
录制格式:支持HLS格式和MP4格式(HLS和MP4为大写)。 - 若配置HLS则必须携带HLSRecordConfig参数 - 若配置MP4则需要携带MP4RecordConfig
:param record_formats: The record_formats of this RecordRuleReq.
:type: list[str]
"""
self._record_formats = record_formats
@property
def hls_config(self):
"""Gets the hls_config of this RecordRuleReq.
:return: The hls_config of this RecordRuleReq.
:rtype: HLSRecordConfig
"""
return self._hls_config
@hls_config.setter
def hls_config(self, hls_config):
"""Sets the hls_config of this RecordRuleReq.
:param hls_config: The hls_config of this RecordRuleReq.
:type: HLSRecordConfig
"""
self._hls_config = hls_config
@property
def mp4_config(self):
"""Gets the mp4_config of this RecordRuleReq.
:return: The mp4_config of this RecordRuleReq.
:rtype: MP4RecordConfig
"""
return self._mp4_config
@mp4_config.setter
def mp4_config(self, mp4_config):
"""Sets the mp4_config of this RecordRuleReq.
:param mp4_config: The mp4_config of this RecordRuleReq.
:type: MP4RecordConfig
"""
self._mp4_config = mp4_config
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RecordRuleReq):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
11748,
302,
198,
11748,
2237,
628,
198,
198,
6738,
289,
84,
707,
68,
291,
75,
2778,
21282,
74,
7295,
13,
26791,
13,
4023,
62,
26791,
1330,
5336,
270,
1096,
62,
1640,
62,
46911,
1634,
628,
... | 1.96688 | 2,657 |
import argparse
import markdown
_EXTENSIONS = (
'markdown.extensions.fenced_code',
'markdown.extensions.tables',
)
if __name__ == '__main__':
main()
| [
11748,
1822,
29572,
198,
198,
11748,
1317,
2902,
198,
198,
62,
13918,
16938,
11053,
796,
357,
198,
220,
220,
220,
705,
4102,
2902,
13,
2302,
5736,
13,
69,
5864,
62,
8189,
3256,
198,
220,
220,
220,
705,
4102,
2902,
13,
2302,
5736,
13... | 2.441176 | 68 |
from bs4 import BeautifulSoup
import re
import urllib.parse
import requests
if __name__ == '__main__':
info = u"""<div id="info" class="">\
<span>\
<span class="pl"> 作者</span>\
<a class="" href="/search/%E5%8D%A1%E5%8B%92%E5%BE%B7%C2%B7%E8%83%A1%E8%B5%9B%E5%B0%BC">[美] 卡勒德·胡赛尼</a>\
</span><br>\
<span class="pl">出版社:</span> 上海人民出版社<br>\
<span class="pl">原作名:</span> The Kite Runner<br>\
<span>\
<span class="pl"> 译者</span>:\
<a class="" href="/search/%E6%9D%8E%E7%BB%A7%E5%AE%8F">李继宏</a>
</span><br>\
<span class="pl">出版年:</span> 2006-5<br>\
<span class="pl">页数:</span> 362<br>\
<span class="pl">定价:</span> 29.00元<br>\
<span class="pl">装帧:</span> 平装<br>\
<span class="pl">丛书:</span> <a href="https://book.douban.com/series/19760">卡勒德·胡赛尼作品</a><br>\
<span class="pl">ISBN:</span> 9787208061644<br>\
</div>"""
info = "clearfix"
HtmlParser().parse("https://book.douban.com/subject/1082154/",requests.get("https://book.douban.com/subject/1082154/").content)
| [
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
302,
198,
11748,
2956,
297,
571,
13,
29572,
198,
11748,
7007,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
7508,
796,
334,
37811,
27,
... | 1.754181 | 598 |
""" AUTHTAB.DIR file parser. """
from pybycus.file import File
class AuthTab(File):
""" The Author List (with the filename AUTHTAB.DIR) contains
descriptive information for each text file on the disc. The
purpose of the Author Table is to allow the user to ask for
the author Plato, for example, without having to know that
the actual file name is TLG0059. Each entry contains the
author name, the corresponding file name, synonyms, remarks,
and language. The entries are arranged by category. """
def content(path):
""" Return the content of an AUTHTAB.DIR file. """
return AuthTab(path).content()
if __name__ == "__main__":
import sys
import pprint
pprint.pprint(content(sys.argv[1]))
| [
37811,
37195,
5603,
33,
13,
34720,
2393,
30751,
13,
37227,
198,
198,
6738,
12972,
1525,
9042,
13,
7753,
1330,
9220,
198,
198,
4871,
26828,
33349,
7,
8979,
2599,
198,
220,
220,
220,
37227,
383,
6434,
7343,
357,
4480,
262,
29472,
37195,
... | 3.241228 | 228 |
from .param_value import ParamValue
| [
6738,
764,
17143,
62,
8367,
1330,
25139,
11395,
628
] | 4.111111 | 9 |
import unittest
import os
import pandas as pd
from causal_testing.data_collection.data_collector import ObservationalDataCollector
from causal_testing.specification.causal_specification import Scenario
from causal_testing.specification.variable import Input, Output, Meta
from scipy.stats import uniform, rv_discrete
from tests.test_helpers import create_temp_dir_if_non_existent, remove_temp_dir_if_existent
if __name__ == "__main__":
unittest.main()
| [
11748,
555,
715,
395,
198,
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
26558,
62,
33407,
13,
7890,
62,
43681,
13,
7890,
62,
33327,
273,
1330,
19243,
864,
6601,
31337,
273,
198,
6738,
26558,
62,
33407,
13,
16684,
264... | 3.375 | 136 |
import copy
# @property
# def F(self):
# attr = "F"
# if attr in self.__dict__:
# return self.__dict__[attr]
# else:
# return None
# Gets called when the item is not found via __getattribute__
# def __getattr__(self, item):
# return super(Individual, self).__setattr__(item, 'orphan')
# def __setitem__(self, key, value):
# self.__dict__[key] = value
#
# def __getitem__(self, key):
# return self.__dict__.get(key)
# def __getattr__(self, attr):
#
# if attr == "F":
# if attr in self.__dict__:
# return self.__dict__[attr]
# else:
# return None
#
# if attr in self.__dict__:
# return self.__dict__[attr]
#
#
#
| [
11748,
4866,
628,
628,
220,
220,
220,
1303,
2488,
26745,
198,
220,
220,
220,
1303,
825,
376,
7,
944,
2599,
198,
220,
220,
220,
1303,
220,
220,
220,
220,
708,
81,
796,
366,
37,
1,
198,
220,
220,
220,
1303,
220,
220,
220,
220,
611... | 1.961905 | 420 |
import unittest
from bgmi.lib.constants import BANGUMI_UPDATE_TIME
from bgmi.lib.controllers import (
add,
cal,
delete,
mark,
recreate_source_relatively_table,
search,
)
from bgmi.main import setup
| [
11748,
555,
715,
395,
198,
198,
6738,
275,
70,
11632,
13,
8019,
13,
9979,
1187,
1330,
347,
15567,
5883,
40,
62,
16977,
62,
34694,
198,
6738,
275,
70,
11632,
13,
8019,
13,
3642,
36667,
1330,
357,
198,
220,
220,
220,
751,
11,
198,
2... | 2.461538 | 91 |
from PIL import Image, ImageFilter
import random
# This library only words with the assumption that the dataset has been formatted as 0.jpg, 1.jpg ... or 0.png, 1.png ... accordingly | [
6738,
350,
4146,
1330,
7412,
11,
7412,
22417,
198,
11748,
4738,
198,
198,
2,
770,
5888,
691,
2456,
351,
262,
13196,
326,
262,
27039,
468,
587,
39559,
355,
657,
13,
9479,
11,
352,
13,
9479,
2644,
393,
657,
13,
11134,
11,
352,
13,
1... | 3.914894 | 47 |
import os
import time
import pandas as pd
FETCH_URL = "https://poloniex.com/public?command=returnChartData¤cyPair=%s&start=%d&end=%d&period=300"
#PAIR_LIST = ["BTC_ETH"]
DATA_DIR = "data"
COLUMNS = ["date","high","low","open","close","volume","quoteVolume","weightedAverage"]
if __name__ == '__main__':
main()
| [
11748,
28686,
198,
11748,
640,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
37,
2767,
3398,
62,
21886,
796,
366,
5450,
1378,
16104,
261,
494,
87,
13,
785,
14,
11377,
30,
21812,
28,
7783,
45488,
6601,
5,
34415,
47,
958,
28,
4,
8... | 2.626016 | 123 |
from hearthbreaker.constants import CHARACTER_CLASS, CARD_RARITY
from hearthbreaker.game_objects import WeaponCard, Weapon
| [
6738,
3285,
400,
25766,
13,
9979,
1187,
1330,
28521,
2246,
5781,
62,
31631,
11,
48731,
62,
49,
1503,
9050,
198,
6738,
3285,
400,
25766,
13,
6057,
62,
48205,
1330,
13072,
16962,
11,
13072,
628,
198
] | 3.571429 | 35 |
# Copyright 2022 Touca, Inc. Subject to Apache-2.0 License.
from sys import stderr, stdout
from pathlib import Path
from argparse import ArgumentParser
from loguru import logger
from touca.cli._common import Operation
| [
2,
15069,
33160,
23359,
6888,
11,
3457,
13,
15540,
284,
24843,
12,
17,
13,
15,
13789,
13,
198,
198,
6738,
25064,
1330,
336,
1082,
81,
11,
14367,
448,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
1822,
29572,
1330,
45751,
46677,
198,... | 3.745763 | 59 |
import pygame
import moves
from typing import List
from pieces.king import King
import copy
SIZE = (1000, 800)
SQUARE_WIDTH = int(0.8 * SIZE[0] // 8)
SQUARE_HEIGHT = SIZE[1] // 8
IMAGES = {}
pygame.init()
screen = pygame.display.set_mode(SIZE)
move_feed = []
running = True
board_array = [
['Br', 'Bn', 'Bb', 'Bq', 'Bk', 'Bb', 'Bn', 'Br'],
['Bp', 'Bp', 'Bp', 'Bp', 'Bp', 'Bp', 'Bp', 'Bp'],
['--', '--', '--', '--', '--', '--', '--', '--'],
['--', '--', '--', '--', '--', '--', '--', '--'],
['--', '--', '--', '--', '--', '--', '--', '--'],
['--', '--', '--', '--', '--', '--', '--', '--'],
['Wp', 'Wp', 'Wp', 'Wp', 'Wp', 'Wp', 'Wp', 'Wp'],
['Wr', 'Wn', 'Wb', 'Wq', 'Wk', 'Wb', 'Wn', 'Wr']
]
count = 0
load_images()
draw_board()
draw_pieces()
draw_sidebar()
pygame.display.update()
last_color_moved = 'B'
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
if count == 0:
initial_pos = event.pos
if (last_color_moved == 'B' and get_piece_color(initial_pos) == 'W') or (
last_color_moved == 'W' and get_piece_color(initial_pos) == 'B'):
count += 1
draw_board()
highlight_square(initial_pos)
draw_pieces()
elif count == 1:
ending_pos = event.pos
count = 0
if color := handle_move(initial_pos, ending_pos):
last_color_moved = color
draw_board()
draw_pieces()
pygame.display.update()
pygame.quit()
| [
11748,
12972,
6057,
198,
11748,
6100,
198,
6738,
19720,
1330,
7343,
198,
6738,
5207,
13,
3364,
1330,
2677,
198,
11748,
4866,
198,
198,
33489,
796,
357,
12825,
11,
10460,
8,
198,
50,
10917,
12203,
62,
54,
2389,
4221,
796,
493,
7,
15,
... | 1.907104 | 915 |
# ПРИМЕР ПОЛУЧЕНИЯ И УКАЗАНИЯ ТИПА ЛИНИИ ТРАССЫ:
# Тип линии, равно как и калибровочные значения,
# хранятся в энергонезависимой памяти модуля.
from time import sleep
# Подключаем библиотеку для работы с бампером I2C-flash.
from pyiArduinoI2Cbumper import *
# Объявляем объект bum для работы с функциями и методами
# библиотеки pyiArduinoI2Cbumper, указывая адрес модуля на шине I2C.
# Если объявить объект без указания адреса bum = pyiArduinoI2Cbumper(),
# то адрес будет найден автоматически.
bum = pyiArduinoI2Cbumper(0x09)
while True:
# ОПРЕДЕЛЯЕМ ИСПОЛЬЗУЕМЫЙ ТИП ЛИНИИ:
if bum.getLineType() == BUM_LINE_BLACK:
first = "тёмной"
second = "светлой"
elif bum.getLineType() == BUM_LINE_WHITE:
first = "светлой"
second = "тёмной"
t = "Модуль использовал трассу с {} линией"\
", а теперь использует трассу"\
"с {} линией".format(first, second)
print(t)
# УКАЗЫВАЕМ НОВЫЙ ТИП ЛИНИИ:
# Тип линии задаётся как BUM_LINE_BLACK - тёмная
# BUM_LINE_WHITE - светлая
# BUM_LINE_CHANGE - сменить тип линии.
bum.setLineType(BUM_LINE_CHANGE)
sleep(2)
| [
2,
12466,
253,
140,
254,
140,
246,
140,
250,
140,
243,
140,
254,
12466,
253,
140,
252,
140,
249,
140,
96,
140,
100,
140,
243,
140,
251,
140,
246,
140,
107,
12466,
246,
12466,
96,
140,
248,
140,
238,
140,
245,
140,
238,
140,
251,... | 1.144153 | 992 |
# Generated by Django 2.2.6 on 2019-11-12 17:18
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
21,
319,
13130,
12,
1157,
12,
1065,
1596,
25,
1507,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
# -*- coding:utf-8 -*-
"""
This script is used to build a qa data for usage.
Typically, each enty contains three elements: a question, an answer, a url
"""
import sys
import re
import os
import jieba
import gensim
try:
import cPickle as pickle
except:
import pickle
reload(sys)
sys.setdefaultencoding('utf-8')
def filtering_line(line_content, stopwords_list):
'''
this function spams the noisy symbols, then cut the line to words and remove the stopwords in each line
:param line_content:
:return:
'''
multi_version = re.compile(ur'-\{.*?(zh-hans|zh-cn):([^;]*?)(;.*?)?\}-')
# punctuation = re.compile(u"[-~!@#$%^&*()_+`=\[\]\\\{\}\\t\\r\"|;':,./<>?·!@#¥%……&*()——+【】、;‘:“”,。、《》?「『」』]")
punctuation = re.compile(u"[\[\]\\\{\}\\t\\r\"|;',<>?·!@#¥%……&*()——+【】、;‘:“”,。、《》?「『」』]")
line_content = multi_version.sub(ur'\2', line_content)
line_content = punctuation.sub('', line_content.decode('utf8'))
# cut the line content to words
line_content_cut = [w for w in jieba.cut(line_content)]
if stopwords_list is not None:
new_line = []
for word in line_content_cut:
if word not in stopwords_list:
new_line.append(word)
return new_line
else:
return line_content_cut
def load_qa_education(data_dir, education_file):
'''
load the eudcation file, return a list, with each element is a string in each line
'''
education_content = []
idx = 0
with open(os.path.join(data_dir, education_file)) as fid:
for item in fid:
education_content.append(item.strip('\n'))
idx = idx + 1
# if idx % 1000 == 0:
# print 'loading %d-th questions done!' % idx
return education_content
def load_qa_education_with_answer(data_dir, education_file):
'''
load the eudcation file, return a list, with each element is a string in each line
'''
education_content = []
answer_content = []
idx = 0
with open(os.path.join(data_dir, education_file)) as fid:
for item in fid:
if idx % 2 == 0: # questions
education_content.append(item.strip('\n'))
elif idx % 2 == 1: # answer
answer_content.append(item.strip('\n'))
idx = idx + 1
# if idx % 1000 == 0:
# print 'loading %d-th questions done!' % idx
print 'loading %d questions done!' % int(idx/2)
return education_content, answer_content
def load_stopwords_file(data_dir, stopwords_file):
'''
load the stopwords file, return a list, with each element is a string in each line
'''
stopwords_list = []
idx = 0
with open(os.path.join(data_dir, stopwords_file)) as fid:
for item in fid:
stopwords_list.append(item.strip('\n'))
idx = idx + 1
print 'loading %d stopwords done!' % idx
return stopwords_list
def calculate_education_data(data_dir, education_content, stopwords_list):
'''
this file is to calculate the dictionary, similarity matrix given a data.txt file
:param data_dir: the root dir that save the returned data
:param eudcation_content: a list that each element is a eudcation question
:param stopwords_list: stopwords list for eudcation corpus
:return: a dictionary, a simialrity matrix
'''
corpora_documents_name = 'qa_education_corpora.pickle'
if not os.path.exists(os.path.join(data_dir, corpora_documents_name)):
corpora_documents = []
idx = 0
for item_text in education_content:
item_str = filtering_line(item_text, stopwords_list)
corpora_documents.append(item_str)
idx = idx + 1
if idx % 1000 == 0:
print 'jieba cutting for %d-th sentence' % idx
# dump pickfile
fid_corpora = open(os.path.join(data_dir, corpora_documents_name), 'wb')
pickle.dump(corpora_documents, fid_corpora)
fid_corpora.close()
print 'save %s finished' % corpora_documents_name
else:
# load pickfile
fid_corpora = open(os.path.join(data_dir, corpora_documents_name), 'rb')
corpora_documents = pickle.load(fid_corpora)
fid_corpora.close()
print 'load %s finished' % corpora_documents_name
dict_name = 'dict_education'
# 生成字典和向量语料
if not os.path.exists(os.path.join(data_dir, dict_name)):
print 'calculating dictionary education !'
dictionary = gensim.corpora.Dictionary(corpora_documents)
dictionary.save(os.path.join(data_dir, dict_name))
else:
print 'dictionary for education already exists, load it!'
dictionary = gensim.corpora.Dictionary.load(os.path.join(data_dir, dict_name))
corpus = [dictionary.doc2bow(text) for text in corpora_documents]
numSen = len(corpus)
# calculate the similarity for pairwise training samples
num_features = len(dictionary.keys())
print '%d words in dictionary' % num_features
# # save object
sim_name = 'sim_education'
if not os.path.exists(os.path.join(data_dir, sim_name)):
print 'calculating sim_education !'
similarity = gensim.similarities.Similarity(os.path.join(data_dir, sim_name), corpus, num_features)
similarity.save(os.path.join(data_dir, sim_name))
else:
print 'sim_eudcation already exists, load it!'
similarity = gensim.similarities.Similarity.load(os.path.join(data_dir, sim_name))
return dictionary, similarity
def calculate_education_data_w2v(data_dir, education_content, w2v_model, stopwords_list):
'''
this file is to calculate the dictionary, similarity matrix given a data.txt file
:param data_dir: the root dir that save the returned data
:param eudcation_content: a list that each element is a eudcation question
:param stopwords_list: stopwords list for eudcation corpus
:return: a dictionary, a simialrity matrix
'''
corpora_documents = []
idx = 0
for item_text in education_content:
item_str = filtering_line(item_text, stopwords_list)
corpora_documents.append(item_str)
idx = idx + 1
if idx % 1000 == 10:
print 'jieba cutting for %d-th sentence' % idx
# corpus = [text for text in corpora_documents]
corpus = corpora_documents
numSen = len(corpus)
# calculate the similarity for pairwise training samples
# # save object
sim_name = 'sim_education_w2v'
if not os.path.exists(os.path.join(data_dir, sim_name)):
print 'calculating sim_education !'
similarity = gensim.similarities.WmdSimilarity(corpus, w2v_model, num_best=3)
similarity.save(os.path.join(data_dir, sim_name))
else:
print 'sim_eudcation already exists, load it!'
similarity = gensim.similarities.WmdSimilarity.load(os.path.join(data_dir, sim_name))
return similarity
'''
测试的问题:
北京小升初的政策?
成都比较好的小学推荐
小孩子谈恋爱怎么办?
怎么提高小孩子英语学习?
北京好的幼儿园推荐
中考前饮食应该注意什么?
我家小孩上课注意力不集中,贪玩,怎么办?
小孩子在学校打架,怎么办?
成都龙江路小学划片么?
小孩子厌学怎么办?
孩子上课注意力不集中,贪玩怎么办?
武汉比较好的中学有哪些?
幼儿园学前教育有必要吗?
'''
if __name__ == '__main__':
# load the eudcation data
data_dir = './qa_dataset'
qa_education_file = 'qa_education.txt'
# education_content = load_qa_education(data_dir, qa_education_file)
education_content, answer_content = load_qa_education_with_answer(data_dir, qa_education_file)
# use jieba to cut the sentence in each line with stopwords
stopwords_file = 'stopwords_gaokao.txt'
stopwords_dir = './stopwords_cn'
stopwords_list = load_stopwords_file(stopwords_dir, stopwords_file)
# caluclate the dictionary and the similarity of the given corpus
dictionary, similarity = calculate_education_data(data_dir, education_content, stopwords_list)
print 'obtained the dictionary and similarity of the %s corpus!' % qa_education_file
similarity.num_best = 3
while(True):
print '欢迎来到小题博士-教育问答 @_@'
print '你可以咨询与中小学教育相关的问题,比如:'
print ' 北京好的幼儿园推荐? \n 中考前饮食应该注意什么?\n 我家小孩上课注意力不集中,贪玩,怎么办? \n 小孩子在学校打架,怎么办?'
print '################################'
print ''
input_query = raw_input(u'请输入你要问的问题:')
input_query_cut = filtering_line(input_query, stopwords_list)
# parse the input query, get its doc vector
doc_input_query = dictionary.doc2bow(input_query_cut)
res = similarity[doc_input_query]
print '这是你要问的问题吗?'
for idx, content in res:
print '%d, %s' % (idx, education_content[idx])
print '%s' % answer_content[idx]
print '################################'
print '请问下一个问题 @_@'
'''
# caluclate the dictionary and the similarity using walking-earth similarity measure of the given corpus
# load wiki model
wiki_model_file = './tempfile/out_w2v_qa_incremental.model'
wiki_model = gensim.models.Word2Vec.load(wiki_model_file)
similarity = calculate_education_data_w2v(data_dir, education_content, wiki_model, stopwords_list)
print 'obtained the dictionary and similarity of the %s corpus!' % qa_education_file
num_best = 3
while (True):
print '欢迎来到小题博士-教育问答 @_@'
input_query = raw_input(u'请输入你要问的问题:')
input_query_cut = filtering_line(input_query, stopwords_list)
res = similarity[input_query_cut]
print '这是你要问的问题吗?'
for idx, content in res:
print '%d, %s' % (idx, education_content[idx])
print '################################'
print '请问下一个问题 @_@'
'''
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
1212,
4226,
318,
973,
284,
1382,
257,
10662,
64,
1366,
329,
8748,
13,
198,
49321,
11,
1123,
920,
88,
4909,
1115,
4847,
25,
257,
1808,
11,
281,
3280,
11,
... | 2.103486 | 4,561 |
# -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
6738,
5366,
13,
9945,
1330,
20613,
201,
198,
6738,
5366,
13,
85,
17,
1330,
10011,
2611,
44,
4254,
201,
198,
201,
198
] | 2.432432 | 37 |
from .bilibili.biliAudio import *
from .bilibili.biliVideo import *
from .bilibili.biliLive import *
from .wenku8.Wenku8TXT import *
from .video.imomoe import *
from .video.kakadm import *
from .audio.netease import * | [
6738,
764,
65,
22282,
2403,
13,
65,
2403,
21206,
1330,
1635,
198,
6738,
764,
65,
22282,
2403,
13,
65,
2403,
10798,
1330,
1635,
198,
6738,
764,
65,
22282,
2403,
13,
65,
2403,
18947,
1330,
1635,
198,
6738,
764,
21006,
23063,
23,
13,
5... | 2.679012 | 81 |
import ast
import csv
import json
from absl import flags
import numpy as np
import pandas as pd
FLAGS = flags.FLAGS
def get_timestamps_with_obstacles(filename, obstacle_distance_threshold=10):
"""Finds timestamps when we detected obstacles."""
print(filename)
df = pd.read_csv(
filename,
names=["timestamp", "ms", "log_label", "label_info", "label_value"])
df = df.dropna()
df['label_value'] = df['label_value'].str.replace(" ", ", ")
df['label_value'] = df['label_value'].apply(converter)
obstacles = df[df['log_label'] == 'obstacle']
obstacles = obstacles.set_index('ms')
pose = df[df['log_label'] == 'pose']
timestamps = []
first_timestamp = df["ms"].min()
for t, p in pose[["ms", "label_value"]].values:
if t not in obstacles.index:
continue
obs = obstacles.loc[t]['label_value']
if isinstance(obs, list):
obs = [obs]
else:
obs = obs.values
for o in obs:
dist = np.linalg.norm(np.array(p) - np.array(o))
if 0 < dist <= obstacle_distance_threshold:
timestamps.append(t - first_timestamp)
print("Selected {} timestamps".format(len(timestamps)))
return timestamps
| [
11748,
6468,
198,
11748,
269,
21370,
198,
11748,
33918,
198,
198,
6738,
2352,
75,
1330,
9701,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
38948,
50,
796,
9701,
13,
38948,
50,
628,
628,
628,... | 2.306715 | 551 |
# encoding: UTF-8
from leetcode import *
from typing import Generator, Tuple
@Problem(3, 'Longest Substring Without Repeating Characters', Difficulty.Medium, Tags.HashTable, Tags.String, Tags.TwoPointers)
@Solution.test.lengthOfLongestSubstring
@Solution.test.lengthOfLongestSubstring
@Solution.test.lengthOfLongestSubstring
| [
2,
21004,
25,
41002,
12,
23,
198,
198,
6738,
443,
316,
8189,
1330,
1635,
198,
6738,
19720,
1330,
35986,
11,
309,
29291,
628,
198,
31,
40781,
7,
18,
11,
705,
14617,
395,
3834,
8841,
9170,
30558,
803,
26813,
3256,
27419,
13,
31205,
11... | 3.408163 | 98 |
#!/usr/bin/env python3
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
628
] | 2.4 | 10 |
EMOJI_LIST = [
':1st_place_medal:',
':2nd_place_medal:',
':3rd_place_medal:',
':AB_button_(blood_type):',
':ATM_sign:',
':A_button_(blood_type):',
':Afghanistan:',
':Albania:',
':Algeria:',
':American_Samoa:',
':Andorra:',
':Angola:',
':Anguilla:',
':Antarctica:',
':Antigua_&_Barbuda:',
':Aquarius:',
':Argentina:',
':Aries:',
':Armenia:',
':Aruba:',
':Ascension_Island:',
':Australia:',
':Austria:',
':Azerbaijan:',
':BACK_arrow:',
':B_button_(blood_type):',
':Bahamas:',
':Bahrain:',
':Bangladesh:',
':Barbados:',
':Belarus:',
':Belgium:',
':Belize:',
':Benin:',
':Bermuda:',
':Bhutan:',
':Bolivia:',
':Bosnia_&_Herzegovina:',
':Botswana:',
':Bouvet_Island:',
':Brazil:',
':British_Indian_Ocean_Territory:',
':British_Virgin_Islands:',
':Brunei:',
':Bulgaria:',
':Burkina_Faso:',
':Burundi:',
':CL_button:',
':COOL_button:',
':Cambodia:',
':Cameroon:',
':Canada:',
':Canary_Islands:',
':Cancer:',
':Cape_Verde:',
':Capricorn:',
':Caribbean_Netherlands:',
':Cayman_Islands:',
':Central_African_Republic:',
':Ceuta_&_Melilla:',
':Chad:',
':Chile:',
':China:',
':Christmas_Island:',
':Christmas_tree:',
':Clipperton_Island:',
':Cocos_(Keeling)_Islands:',
':Colombia:',
':Comoros:',
':Congo_-_Brazzaville:',
':Congo_-_Kinshasa:',
':Cook_Islands:',
':Costa_Rica:',
':Croatia:',
':Cuba:',
':Curaçao:',
':Cyprus:',
':Czech_Republic:',
':Côte_d’Ivoire:',
':Denmark:',
':Diego_Garcia:',
':Djibouti:',
':Dominica:',
':Dominican_Republic:',
':END_arrow:',
':Ecuador:',
':Egypt:',
':El_Salvador:',
':Equatorial_Guinea:',
':Eritrea:',
':Estonia:',
':Ethiopia:',
':European_Union:',
':FREE_button:',
':Falkland_Islands:',
':Faroe_Islands:',
':Fiji:',
':Finland:',
':France:',
':French_Guiana:',
':French_Polynesia:',
':French_Southern_Territories:',
':Gabon:',
':Gambia:',
':Gemini:',
':Georgia:',
':Germany:',
':Ghana:',
':Gibraltar:',
':Greece:',
':Greenland:',
':Grenada:',
':Guadeloupe:',
':Guam:',
':Guatemala:',
':Guernsey:',
':Guinea:',
':Guinea-Bissau:',
':Guyana:',
':Haiti:',
':Heard_&_McDonald_Islands:',
':Honduras:',
':Hong_Kong_SAR_China:',
':Hungary:',
':ID_button:',
':Iceland:',
':India:',
':Indonesia:',
':Iran:',
':Iraq:',
':Ireland:',
':Isle_of_Man:',
':Israel:',
':Italy:',
':Jamaica:',
':Japan:',
':Japanese_acceptable_button:',
':Japanese_application_button:',
':Japanese_bargain_button:',
':Japanese_castle:',
':Japanese_congratulations_button:',
':Japanese_discount_button:',
':Japanese_dolls:',
':Japanese_free_of_charge_button:',
':Japanese_here_button:',
':Japanese_monthly_amount_button:',
':Japanese_no_vacancy_button:',
':Japanese_not_free_of_charge_button:',
':Japanese_open_for_business_button:',
':Japanese_passing_grade_button:',
':Japanese_post_office:',
':Japanese_prohibited_button:',
':Japanese_reserved_button:',
':Japanese_secret_button:',
':Japanese_service_charge_button:',
':Japanese_symbol_for_beginner:',
':Japanese_vacancy_button:',
':Jersey:',
':Jordan:',
':Kazakhstan:',
':Kenya:',
':Kiribati:',
':Kosovo:',
':Kuwait:',
':Kyrgyzstan:',
':Laos:',
':Latvia:',
':Lebanon:',
':Leo:',
':Lesotho:',
':Liberia:',
':Libra:',
':Libya:',
':Liechtenstein:',
':Lithuania:',
':Luxembourg:',
':Macau_SAR_China:',
':Macedonia:',
':Madagascar:',
':Malawi:',
':Malaysia:',
':Maldives:',
':Mali:',
':Malta:',
':Marshall_Islands:',
':Martinique:',
':Mauritania:',
':Mauritius:',
':Mayotte:',
':Mexico:',
':Micronesia:',
':Moldova:',
':Monaco:',
':Mongolia:',
':Montenegro:',
':Montserrat:',
':Morocco:',
':Mozambique:',
':Mrs._Claus:',
':Mrs._Claus_dark_skin_tone:',
':Mrs._Claus_light_skin_tone:',
':Mrs._Claus_medium-dark_skin_tone:',
':Mrs._Claus_medium-light_skin_tone:',
':Mrs._Claus_medium_skin_tone:',
':Myanmar_(Burma):',
':NEW_button:',
':NG_button:',
':Namibia:',
':Nauru:',
':Nepal:',
':Netherlands:',
':New_Caledonia:',
':New_Zealand:',
':Nicaragua:',
':Niger:',
':Nigeria:',
':Niue:',
':Norfolk_Island:',
':North_Korea:',
':Northern_Mariana_Islands:',
':Norway:',
':OK_button:',
':OK_hand:',
':OK_hand_dark_skin_tone:',
':OK_hand_light_skin_tone:',
':OK_hand_medium-dark_skin_tone:',
':OK_hand_medium-light_skin_tone:',
':OK_hand_medium_skin_tone:',
':ON!_arrow:',
':O_button_(blood_type):',
':Oman:',
':Ophiuchus:',
':P_button:',
':Pakistan:',
':Palau:',
':Palestinian_Territories:',
':Panama:',
':Papua_New_Guinea:',
':Paraguay:',
':Peru:',
':Philippines:',
':Pisces:',
':Pitcairn_Islands:',
':Poland:',
':Portugal:',
':Puerto_Rico:',
':Qatar:',
':Romania:',
':Russia:',
':Rwanda:',
':Réunion:',
':SOON_arrow:',
':SOS_button:',
':Sagittarius:',
':Samoa:',
':San_Marino:',
':Santa_Claus:',
':Santa_Claus_dark_skin_tone:',
':Santa_Claus_light_skin_tone:',
':Santa_Claus_medium-dark_skin_tone:',
':Santa_Claus_medium-light_skin_tone:',
':Santa_Claus_medium_skin_tone:',
':Saudi_Arabia:',
':Scorpius:',
':Senegal:',
':Serbia:',
':Seychelles:',
':Sierra_Leone:',
':Singapore:',
':Sint_Maarten:',
':Slovakia:',
':Slovenia:',
':Solomon_Islands:',
':Somalia:',
':South_Africa:',
':South_Georgia_&_South_Sandwich_Islands:',
':South_Korea:',
':South_Sudan:',
':Spain:',
':Sri_Lanka:',
':St._Barthélemy:',
':St._Helena:',
':St._Kitts_&_Nevis:',
':St._Lucia:',
':St._Martin:',
':St._Pierre_&_Miquelon:',
':St._Vincent_&_Grenadines:',
':Statue_of_Liberty:',
':Sudan:',
':Suriname:',
':Svalbard_&_Jan_Mayen:',
':Swaziland:',
':Sweden:',
':Switzerland:',
':Syria:',
':São_Tomé_&_Príncipe:',
':TOP_arrow:',
':Taiwan:',
':Tajikistan:',
':Tanzania:',
':Taurus:',
':Thailand:',
':Timor-Leste:',
':Togo:',
':Tokelau:',
':Tokyo_tower:',
':Tonga:',
':Trinidad_&_Tobago:',
':Tristan_da_Cunha:',
':Tunisia:',
':Turkey:',
':Turkmenistan:',
':Turks_&_Caicos_Islands:',
':Tuvalu:',
':U.S._Outlying_Islands:',
':U.S._Virgin_Islands:',
':UP!_button:',
':Uganda:',
':Ukraine:',
':United_Arab_Emirates:',
':United_Kingdom:',
':United_Nations:',
':United_States:',
':Uruguay:',
':Uzbekistan:',
':VS_button:',
':Vanuatu:',
':Vatican_City:',
':Venezuela:',
':Vietnam:',
':Virgo:',
':Wallis_&_Futuna:',
':Western_Sahara:',
':Yemen:',
':Zambia:',
':Zimbabwe:',
':admission_tickets:',
':aerial_tramway:',
':airplane:',
':airplane_arrival:',
':airplane_departure:',
':alarm_clock:',
':alembic:',
':alien:',
':alien_monster:',
':ambulance:',
':american_football:',
':amphora:',
':anchor:',
':anger_symbol:',
':angry_face:',
':angry_face_with_horns:',
':anguished_face:',
':ant:',
':antenna_bars:',
':anticlockwise_arrows_button:',
':articulated_lorry:',
':artist_palette:',
':astonished_face:',
':atom_symbol:',
':automobile:',
':avocado:',
':baby:',
':baby_angel:',
':baby_angel_dark_skin_tone:',
':baby_angel_light_skin_tone:',
':baby_angel_medium-dark_skin_tone:',
':baby_angel_medium-light_skin_tone:',
':baby_angel_medium_skin_tone:',
':baby_bottle:',
':baby_chick:',
':baby_dark_skin_tone:',
':baby_light_skin_tone:',
':baby_medium-dark_skin_tone:',
':baby_medium-light_skin_tone:',
':baby_medium_skin_tone:',
':baby_symbol:',
':backhand_index_pointing_down:',
':backhand_index_pointing_down_dark_skin_tone:',
':backhand_index_pointing_down_light_skin_tone:',
':backhand_index_pointing_down_medium-dark_skin_tone:',
':backhand_index_pointing_down_medium-light_skin_tone:',
':backhand_index_pointing_down_medium_skin_tone:',
':backhand_index_pointing_left:',
':backhand_index_pointing_left_dark_skin_tone:',
':backhand_index_pointing_left_light_skin_tone:',
':backhand_index_pointing_left_medium-dark_skin_tone:',
':backhand_index_pointing_left_medium-light_skin_tone:',
':backhand_index_pointing_left_medium_skin_tone:',
':backhand_index_pointing_right:',
':backhand_index_pointing_right_dark_skin_tone:',
':backhand_index_pointing_right_light_skin_tone:',
':backhand_index_pointing_right_medium-dark_skin_tone:',
':backhand_index_pointing_right_medium-light_skin_tone:',
':backhand_index_pointing_right_medium_skin_tone:',
':backhand_index_pointing_up:',
':backhand_index_pointing_up_dark_skin_tone:',
':backhand_index_pointing_up_light_skin_tone:',
':backhand_index_pointing_up_medium-dark_skin_tone:',
':backhand_index_pointing_up_medium-light_skin_tone:',
':backhand_index_pointing_up_medium_skin_tone:',
':bacon:',
':badminton:',
':baggage_claim:',
':baguette_bread:',
':balance_scale:',
':balloon:',
':ballot_box_with_ballot:',
':ballot_box_with_check:',
':banana:',
':bank:',
':bar_chart:',
':barber_pole:',
':baseball:',
':basketball:',
':bat:',
':bathtub:',
':battery:',
':beach_with_umbrella:',
':bear_face:',
':beating_heart:',
':bed:',
':beer_mug:',
':bell:',
':bell_with_slash:',
':bellhop_bell:',
':bento_box:',
':bicycle:',
':bikini:',
':biohazard:',
':bird:',
':birthday_cake:',
':black_circle:',
':black_flag:',
':black_heart:',
':black_large_square:',
':black_medium-small_square:',
':black_medium_square:',
':black_nib:',
':black_small_square:',
':black_square_button:',
':blond-haired_man:',
':blond-haired_man_dark_skin_tone:',
':blond-haired_man_light_skin_tone:',
':blond-haired_man_medium-dark_skin_tone:',
':blond-haired_man_medium-light_skin_tone:',
':blond-haired_man_medium_skin_tone:',
':blond-haired_person:',
':blond-haired_person_dark_skin_tone:',
':blond-haired_person_light_skin_tone:',
':blond-haired_person_medium-dark_skin_tone:',
':blond-haired_person_medium-light_skin_tone:',
':blond-haired_person_medium_skin_tone:',
':blond-haired_woman:',
':blond-haired_woman_dark_skin_tone:',
':blond-haired_woman_light_skin_tone:',
':blond-haired_woman_medium-dark_skin_tone:',
':blond-haired_woman_medium-light_skin_tone:',
':blond-haired_woman_medium_skin_tone:',
':blossom:',
':blowfish:',
':blue_book:',
':blue_circle:',
':blue_heart:',
':boar:',
':bomb:',
':bookmark:',
':bookmark_tabs:',
':books:',
':bottle_with_popping_cork:',
':bouquet:',
':bow_and_arrow:',
':bowling:',
':boxing_glove:',
':boy:',
':boy_dark_skin_tone:',
':boy_light_skin_tone:',
':boy_medium-dark_skin_tone:',
':boy_medium-light_skin_tone:',
':boy_medium_skin_tone:',
':bread:',
':bride_with_veil:',
':bride_with_veil_dark_skin_tone:',
':bride_with_veil_light_skin_tone:',
':bride_with_veil_medium-dark_skin_tone:',
':bride_with_veil_medium-light_skin_tone:',
':bride_with_veil_medium_skin_tone:',
':bridge_at_night:',
':briefcase:',
':bright_button:',
':broken_heart:',
':bug:',
':building_construction:',
':burrito:',
':bus:',
':bus_stop:',
':bust_in_silhouette:',
':busts_in_silhouette:',
':butterfly:',
':cactus:',
':calendar:',
':call_me_hand:',
':call_me_hand_dark_skin_tone:',
':call_me_hand_light_skin_tone:',
':call_me_hand_medium-dark_skin_tone:',
':call_me_hand_medium-light_skin_tone:',
':call_me_hand_medium_skin_tone:',
':camel:',
':camera:',
':camera_with_flash:',
':camping:',
':candle:',
':candy:',
':canoe:',
':card_file_box:',
':card_index:',
':card_index_dividers:',
':carousel_horse:',
':carp_streamer:',
':carrot:',
':castle:',
':cat:',
':cat_face:',
':cat_face_with_tears_of_joy:',
':cat_face_with_wry_smile:',
':chains:',
':chart_decreasing:',
':chart_increasing:',
':chart_increasing_with_yen:',
':cheese_wedge:',
':chequered_flag:',
':cherries:',
':cherry_blossom:',
':chestnut:',
':chicken:',
':children_crossing:',
':chipmunk:',
':chocolate_bar:',
':church:',
':cigarette:',
':cinema:',
':circled_M:',
':circus_tent:',
':cityscape:',
':cityscape_at_dusk:',
':clamp:',
':clapper_board:',
':clapping_hands:',
':clapping_hands_dark_skin_tone:',
':clapping_hands_light_skin_tone:',
':clapping_hands_medium-dark_skin_tone:',
':clapping_hands_medium-light_skin_tone:',
':clapping_hands_medium_skin_tone:',
':classical_building:',
':clinking_beer_mugs:',
':clinking_glasses:',
':clipboard:',
':clockwise_vertical_arrows:',
':closed_book:',
':closed_mailbox_with_lowered_flag:',
':closed_mailbox_with_raised_flag:',
':closed_umbrella:',
':cloud:',
':cloud_with_lightning:',
':cloud_with_lightning_and_rain:',
':cloud_with_rain:',
':cloud_with_snow:',
':clown_face:',
':club_suit:',
':clutch_bag:',
':cocktail_glass:',
':coffin:',
':collision:',
':comet:',
':computer_disk:',
':computer_mouse:',
':confetti_ball:',
':confounded_face:',
':confused_face:',
':construction:',
':construction_worker:',
':construction_worker_dark_skin_tone:',
':construction_worker_light_skin_tone:',
':construction_worker_medium-dark_skin_tone:',
':construction_worker_medium-light_skin_tone:',
':construction_worker_medium_skin_tone:',
':control_knobs:',
':convenience_store:',
':cooked_rice:',
':cookie:',
':cooking:',
':copyright:',
':couch_and_lamp:',
':couple_with_heart:',
':couple_with_heart_man_man:',
':couple_with_heart_woman_man:',
':couple_with_heart_woman_woman:',
':cow:',
':cow_face:',
':cowboy_hat_face:',
':crab:',
':crayon:',
':credit_card:',
':crescent_moon:',
':cricket:',
':crocodile:',
':croissant:',
':cross_mark:',
':cross_mark_button:',
':crossed_fingers:',
':crossed_fingers_dark_skin_tone:',
':crossed_fingers_light_skin_tone:',
':crossed_fingers_medium-dark_skin_tone:',
':crossed_fingers_medium-light_skin_tone:',
':crossed_fingers_medium_skin_tone:',
':crossed_flags:',
':crossed_swords:',
':crown:',
':crying_cat_face:',
':crying_face:',
':crystal_ball:',
':cucumber:',
':curly_loop:',
':currency_exchange:',
':curry_rice:',
':custard:',
':customs:',
':cyclone:',
':dagger:',
':dango:',
':dark_skin_tone:',
':dashing_away:',
':deciduous_tree:',
':deer:',
':delivery_truck:',
':department_store:',
':derelict_house:',
':desert:',
':desert_island:',
':desktop_computer:',
':detective:',
':detective_dark_skin_tone:',
':detective_light_skin_tone:',
':detective_medium-dark_skin_tone:',
':detective_medium-light_skin_tone:',
':detective_medium_skin_tone:',
':diamond_suit:',
':diamond_with_a_dot:',
':dim_button:',
':direct_hit:',
':disappointed_but_relieved_face:',
':disappointed_face:',
':dizzy:',
':dizzy_face:',
':dog:',
':dog_face:',
':dollar_banknote:',
':dolphin:',
':door:',
':dotted_six-pointed_star:',
':double_curly_loop:',
':double_exclamation_mark:',
':doughnut:',
':dove:',
':down-left_arrow:',
':down-right_arrow:',
':down_arrow:',
':down_button:',
':dragon:',
':dragon_face:',
':dress:',
':drooling_face:',
':droplet:',
':drum:',
':duck:',
':dvd:',
':e-mail:',
':eagle:',
':ear:',
':ear_dark_skin_tone:',
':ear_light_skin_tone:',
':ear_medium-dark_skin_tone:',
':ear_medium-light_skin_tone:',
':ear_medium_skin_tone:',
':ear_of_corn:',
':egg:',
':eggplant:',
':eight-pointed_star:',
':eight-spoked_asterisk:',
':eight-thirty:',
':eight_o’clock:',
':eject_button:',
':electric_plug:',
':elephant:',
':eleven-thirty:',
':eleven_o’clock:',
':envelope:',
':envelope_with_arrow:',
':euro_banknote:',
':evergreen_tree:',
':exclamation_mark:',
':exclamation_question_mark:',
':expressionless_face:',
':eye:',
':eye_in_speech_bubble:',
':eyes:',
':face_blowing_a_kiss:',
':face_savouring_delicious_food:',
':face_screaming_in_fear:',
':face_with_cold_sweat:',
':face_with_head-bandage:',
':face_with_medical_mask:',
':face_with_open_mouth:',
':face_with_open_mouth_&_cold_sweat:',
':face_with_rolling_eyes:',
':face_with_steam_from_nose:',
':face_with_stuck-out_tongue:',
':face_with_stuck-out_tongue_&_closed_eyes:',
':face_with_stuck-out_tongue_&_winking_eye:',
':face_with_tears_of_joy:',
':face_with_thermometer:',
':face_without_mouth:',
':factory:',
':fallen_leaf:',
':family:',
':family_man_boy:',
':family_man_boy_boy:',
':family_man_girl:',
':family_man_girl_boy:',
':family_man_girl_girl:',
':family_man_man_boy:',
':family_man_man_boy_boy:',
':family_man_man_girl:',
':family_man_man_girl_boy:',
':family_man_man_girl_girl:',
':family_man_woman_boy:',
':family_man_woman_boy_boy:',
':family_man_woman_girl:',
':family_man_woman_girl_boy:',
':family_man_woman_girl_girl:',
':family_woman_boy:',
':family_woman_boy_boy:',
':family_woman_girl:',
':family_woman_girl_boy:',
':family_woman_girl_girl:',
':family_woman_woman_boy:',
':family_woman_woman_boy_boy:',
':family_woman_woman_girl:',
':family_woman_woman_girl_boy:',
':family_woman_woman_girl_girl:',
':fast-forward_button:',
':fast_down_button:',
':fast_reverse_button:',
':fast_up_button:',
':fax_machine:',
':fearful_face:',
':female_sign:',
':ferris_wheel:',
':ferry:',
':field_hockey:',
':file_cabinet:',
':file_folder:',
':film_frames:',
':film_projector:',
':fire:',
':fire_engine:',
':fireworks:',
':first_quarter_moon:',
':first_quarter_moon_with_face:',
':fish:',
':fish_cake_with_swirl:',
':fishing_pole:',
':five-thirty:',
':five_o’clock:',
':flag_in_hole:',
':flashlight:',
':fleur-de-lis:',
':flexed_biceps:',
':flexed_biceps_dark_skin_tone:',
':flexed_biceps_light_skin_tone:',
':flexed_biceps_medium-dark_skin_tone:',
':flexed_biceps_medium-light_skin_tone:',
':flexed_biceps_medium_skin_tone:',
':floppy_disk:',
':flower_playing_cards:',
':flushed_face:',
':fog:',
':foggy:',
':folded_hands:',
':folded_hands_dark_skin_tone:',
':folded_hands_light_skin_tone:',
':folded_hands_medium-dark_skin_tone:',
':folded_hands_medium-light_skin_tone:',
':folded_hands_medium_skin_tone:',
':footprints:',
':fork_and_knife:',
':fork_and_knife_with_plate:',
':fountain:',
':fountain_pen:',
':four-thirty:',
':four_leaf_clover:',
':four_o’clock:',
':fox_face:',
':framed_picture:',
':french_fries:',
':fried_shrimp:',
':frog_face:',
':front-facing_baby_chick:',
':frowning_face:',
':frowning_face_with_open_mouth:',
':fuel_pump:',
':full_moon:',
':full_moon_with_face:',
':funeral_urn:',
':game_die:',
':gear:',
':gem_stone:',
':ghost:',
':girl:',
':girl_dark_skin_tone:',
':girl_light_skin_tone:',
':girl_medium-dark_skin_tone:',
':girl_medium-light_skin_tone:',
':girl_medium_skin_tone:',
':glass_of_milk:',
':glasses:',
':globe_showing_Americas:',
':globe_showing_Asia-Australia:',
':globe_showing_Europe-Africa:',
':globe_with_meridians:',
':glowing_star:',
':goal_net:',
':goat:',
':goblin:',
':gorilla:',
':graduation_cap:',
':grapes:',
':green_apple:',
':green_book:',
':green_heart:',
':green_salad:',
':grimacing_face:',
':grinning_cat_face_with_smiling_eyes:',
':grinning_face:',
':grinning_face_with_smiling_eyes:',
':growing_heart:',
':guard:',
':guard_dark_skin_tone:',
':guard_light_skin_tone:',
':guard_medium-dark_skin_tone:',
':guard_medium-light_skin_tone:',
':guard_medium_skin_tone:',
':guitar:',
':hamburger:',
':hammer:',
':hammer_and_pick:',
':hammer_and_wrench:',
':hamster_face:',
':handbag:',
':handshake:',
':hatching_chick:',
':headphone:',
':hear-no-evil_monkey:',
':heart_decoration:',
':heart_suit:',
':heart_with_arrow:',
':heart_with_ribbon:',
':heavy_check_mark:',
':heavy_division_sign:',
':heavy_dollar_sign:',
':heavy_heart_exclamation:',
':heavy_large_circle:',
':heavy_minus_sign:',
':heavy_multiplication_x:',
':heavy_plus_sign:',
':helicopter:',
':herb:',
':hibiscus:',
':high-heeled_shoe:',
':high-speed_train:',
':high-speed_train_with_bullet_nose:',
':high_voltage:',
':hole:',
':honey_pot:',
':honeybee:',
':horizontal_traffic_light:',
':horse:',
':horse_face:',
':horse_racing:',
':horse_racing_dark_skin_tone:',
':horse_racing_light_skin_tone:',
':horse_racing_medium-dark_skin_tone:',
':horse_racing_medium-light_skin_tone:',
':horse_racing_medium_skin_tone:',
':hospital:',
':hot_beverage:',
':hot_dog:',
':hot_pepper:',
':hot_springs:',
':hotel:',
':hourglass:',
':hourglass_with_flowing_sand:',
':house:',
':house_with_garden:',
':hugging_face:',
':hundred_points:',
':hushed_face:',
':ice_cream:',
':ice_hockey:',
':ice_skate:',
':inbox_tray:',
':incoming_envelope:',
':index_pointing_up:',
':index_pointing_up_dark_skin_tone:',
':index_pointing_up_light_skin_tone:',
':index_pointing_up_medium-dark_skin_tone:',
':index_pointing_up_medium-light_skin_tone:',
':index_pointing_up_medium_skin_tone:',
':information:',
':input_latin_letters:',
':input_latin_lowercase:',
':input_latin_uppercase:',
':input_numbers:',
':input_symbols:',
':jack-o-lantern:',
':jeans:',
':joker:',
':joystick:',
':kaaba:',
':key:',
':keyboard:',
':keycap_#:',
':keycap_*:',
':keycap_0:',
':keycap_1:',
':keycap_10:',
':keycap_2:',
':keycap_3:',
':keycap_4:',
':keycap_5:',
':keycap_6:',
':keycap_7:',
':keycap_8:',
':keycap_9:',
':kick_scooter:',
':kimono:',
':kiss:',
':kiss_man_man:',
':kiss_mark:',
':kiss_woman_man:',
':kiss_woman_woman:',
':kissing_cat_face_with_closed_eyes:',
':kissing_face:',
':kissing_face_with_closed_eyes:',
':kissing_face_with_smiling_eyes:',
':kitchen_knife:',
':kiwi_fruit:',
':koala:',
':label:',
':lady_beetle:',
':laptop_computer:',
':large_blue_diamond:',
':large_orange_diamond:',
':last_quarter_moon:',
':last_quarter_moon_with_face:',
':last_track_button:',
':latin_cross:',
':leaf_fluttering_in_wind:',
':ledger:',
':left-facing_fist:',
':left-facing_fist_dark_skin_tone:',
':left-facing_fist_light_skin_tone:',
':left-facing_fist_medium-dark_skin_tone:',
':left-facing_fist_medium-light_skin_tone:',
':left-facing_fist_medium_skin_tone:',
':left-pointing_magnifying_glass:',
':left-right_arrow:',
':left_arrow:',
':left_arrow_curving_right:',
':left_luggage:',
':left_speech_bubble:',
':lemon:',
':leopard:',
':level_slider:',
':light_bulb:',
':light_rail:',
':light_skin_tone:',
':link:',
':linked_paperclips:',
':lion_face:',
':lipstick:',
':litter_in_bin_sign:',
':lizard:',
':locked:',
':locked_with_key:',
':locked_with_pen:',
':locomotive:',
':lollipop:',
':loudly_crying_face:',
':loudspeaker:',
':love_hotel:',
':love_letter:',
':lying_face:',
':mahjong_red_dragon:',
':male_sign:',
':man:',
':man_and_woman_holding_hands:',
':man_artist:',
':man_artist_dark_skin_tone:',
':man_artist_light_skin_tone:',
':man_artist_medium-dark_skin_tone:',
':man_artist_medium-light_skin_tone:',
':man_artist_medium_skin_tone:',
':man_astronaut:',
':man_astronaut_dark_skin_tone:',
':man_astronaut_light_skin_tone:',
':man_astronaut_medium-dark_skin_tone:',
':man_astronaut_medium-light_skin_tone:',
':man_astronaut_medium_skin_tone:',
':man_biking:',
':man_biking_dark_skin_tone:',
':man_biking_light_skin_tone:',
':man_biking_medium-dark_skin_tone:',
':man_biking_medium-light_skin_tone:',
':man_biking_medium_skin_tone:',
':man_bouncing_ball:',
':man_bouncing_ball_dark_skin_tone:',
':man_bouncing_ball_light_skin_tone:',
':man_bouncing_ball_medium-dark_skin_tone:',
':man_bouncing_ball_medium-light_skin_tone:',
':man_bouncing_ball_medium_skin_tone:',
':man_bowing:',
':man_bowing_dark_skin_tone:',
':man_bowing_light_skin_tone:',
':man_bowing_medium-dark_skin_tone:',
':man_bowing_medium-light_skin_tone:',
':man_bowing_medium_skin_tone:',
':man_cartwheeling:',
':man_cartwheeling_dark_skin_tone:',
':man_cartwheeling_light_skin_tone:',
':man_cartwheeling_medium-dark_skin_tone:',
':man_cartwheeling_medium-light_skin_tone:',
':man_cartwheeling_medium_skin_tone:',
':man_construction_worker:',
':man_construction_worker_dark_skin_tone:',
':man_construction_worker_light_skin_tone:',
':man_construction_worker_medium-dark_skin_tone:',
':man_construction_worker_medium-light_skin_tone:',
':man_construction_worker_medium_skin_tone:',
':man_cook:',
':man_cook_dark_skin_tone:',
':man_cook_light_skin_tone:',
':man_cook_medium-dark_skin_tone:',
':man_cook_medium-light_skin_tone:',
':man_cook_medium_skin_tone:',
':man_dancing:',
':man_dancing_dark_skin_tone:',
':man_dancing_light_skin_tone:',
':man_dancing_medium-dark_skin_tone:',
':man_dancing_medium-light_skin_tone:',
':man_dancing_medium_skin_tone:',
':man_dark_skin_tone:',
':man_detective:',
':man_detective_dark_skin_tone:',
':man_detective_light_skin_tone:',
':man_detective_medium-dark_skin_tone:',
':man_detective_medium-light_skin_tone:',
':man_detective_medium_skin_tone:',
':man_facepalming:',
':man_facepalming_dark_skin_tone:',
':man_facepalming_light_skin_tone:',
':man_facepalming_medium-dark_skin_tone:',
':man_facepalming_medium-light_skin_tone:',
':man_facepalming_medium_skin_tone:',
':man_factory_worker:',
':man_factory_worker_dark_skin_tone:',
':man_factory_worker_light_skin_tone:',
':man_factory_worker_medium-dark_skin_tone:',
':man_factory_worker_medium-light_skin_tone:',
':man_factory_worker_medium_skin_tone:',
':man_farmer:',
':man_farmer_dark_skin_tone:',
':man_farmer_light_skin_tone:',
':man_farmer_medium-dark_skin_tone:',
':man_farmer_medium-light_skin_tone:',
':man_farmer_medium_skin_tone:',
':man_firefighter:',
':man_firefighter_dark_skin_tone:',
':man_firefighter_light_skin_tone:',
':man_firefighter_medium-dark_skin_tone:',
':man_firefighter_medium-light_skin_tone:',
':man_firefighter_medium_skin_tone:',
':man_frowning:',
':man_frowning_dark_skin_tone:',
':man_frowning_light_skin_tone:',
':man_frowning_medium-dark_skin_tone:',
':man_frowning_medium-light_skin_tone:',
':man_frowning_medium_skin_tone:',
':man_gesturing_NO:',
':man_gesturing_NO_dark_skin_tone:',
':man_gesturing_NO_light_skin_tone:',
':man_gesturing_NO_medium-dark_skin_tone:',
':man_gesturing_NO_medium-light_skin_tone:',
':man_gesturing_NO_medium_skin_tone:',
':man_gesturing_OK:',
':man_gesturing_OK_dark_skin_tone:',
':man_gesturing_OK_light_skin_tone:',
':man_gesturing_OK_medium-dark_skin_tone:',
':man_gesturing_OK_medium-light_skin_tone:',
':man_gesturing_OK_medium_skin_tone:',
':man_getting_haircut:',
':man_getting_haircut_dark_skin_tone:',
':man_getting_haircut_light_skin_tone:',
':man_getting_haircut_medium-dark_skin_tone:',
':man_getting_haircut_medium-light_skin_tone:',
':man_getting_haircut_medium_skin_tone:',
':man_getting_massage:',
':man_getting_massage_dark_skin_tone:',
':man_getting_massage_light_skin_tone:',
':man_getting_massage_medium-dark_skin_tone:',
':man_getting_massage_medium-light_skin_tone:',
':man_getting_massage_medium_skin_tone:',
':man_golfing:',
':man_golfing_dark_skin_tone:',
':man_golfing_light_skin_tone:',
':man_golfing_medium-dark_skin_tone:',
':man_golfing_medium-light_skin_tone:',
':man_golfing_medium_skin_tone:',
':man_guard:',
':man_guard_dark_skin_tone:',
':man_guard_light_skin_tone:',
':man_guard_medium-dark_skin_tone:',
':man_guard_medium-light_skin_tone:',
':man_guard_medium_skin_tone:',
':man_health_worker:',
':man_health_worker_dark_skin_tone:',
':man_health_worker_light_skin_tone:',
':man_health_worker_medium-dark_skin_tone:',
':man_health_worker_medium-light_skin_tone:',
':man_health_worker_medium_skin_tone:',
':man_in_business_suit_levitating:',
':man_in_business_suit_levitating_dark_skin_tone:',
':man_in_business_suit_levitating_light_skin_tone:',
':man_in_business_suit_levitating_medium-dark_skin_tone:',
':man_in_business_suit_levitating_medium-light_skin_tone:',
':man_in_business_suit_levitating_medium_skin_tone:',
':man_in_tuxedo:',
':man_in_tuxedo_dark_skin_tone:',
':man_in_tuxedo_light_skin_tone:',
':man_in_tuxedo_medium-dark_skin_tone:',
':man_in_tuxedo_medium-light_skin_tone:',
':man_in_tuxedo_medium_skin_tone:',
':man_judge:',
':man_judge_dark_skin_tone:',
':man_judge_light_skin_tone:',
':man_judge_medium-dark_skin_tone:',
':man_judge_medium-light_skin_tone:',
':man_judge_medium_skin_tone:',
':man_juggling:',
':man_juggling_dark_skin_tone:',
':man_juggling_light_skin_tone:',
':man_juggling_medium-dark_skin_tone:',
':man_juggling_medium-light_skin_tone:',
':man_juggling_medium_skin_tone:',
':man_lifting_weights:',
':man_lifting_weights_dark_skin_tone:',
':man_lifting_weights_light_skin_tone:',
':man_lifting_weights_medium-dark_skin_tone:',
':man_lifting_weights_medium-light_skin_tone:',
':man_lifting_weights_medium_skin_tone:',
':man_light_skin_tone:',
':man_mechanic:',
':man_mechanic_dark_skin_tone:',
':man_mechanic_light_skin_tone:',
':man_mechanic_medium-dark_skin_tone:',
':man_mechanic_medium-light_skin_tone:',
':man_mechanic_medium_skin_tone:',
':man_medium-dark_skin_tone:',
':man_medium-light_skin_tone:',
':man_medium_skin_tone:',
':man_mountain_biking:',
':man_mountain_biking_dark_skin_tone:',
':man_mountain_biking_light_skin_tone:',
':man_mountain_biking_medium-dark_skin_tone:',
':man_mountain_biking_medium-light_skin_tone:',
':man_mountain_biking_medium_skin_tone:',
':man_office_worker:',
':man_office_worker_dark_skin_tone:',
':man_office_worker_light_skin_tone:',
':man_office_worker_medium-dark_skin_tone:',
':man_office_worker_medium-light_skin_tone:',
':man_office_worker_medium_skin_tone:',
':man_pilot:',
':man_pilot_dark_skin_tone:',
':man_pilot_light_skin_tone:',
':man_pilot_medium-dark_skin_tone:',
':man_pilot_medium-light_skin_tone:',
':man_pilot_medium_skin_tone:',
':man_playing_handball:',
':man_playing_handball_dark_skin_tone:',
':man_playing_handball_light_skin_tone:',
':man_playing_handball_medium-dark_skin_tone:',
':man_playing_handball_medium-light_skin_tone:',
':man_playing_handball_medium_skin_tone:',
':man_playing_water_polo:',
':man_playing_water_polo_dark_skin_tone:',
':man_playing_water_polo_light_skin_tone:',
':man_playing_water_polo_medium-dark_skin_tone:',
':man_playing_water_polo_medium-light_skin_tone:',
':man_playing_water_polo_medium_skin_tone:',
':man_police_officer:',
':man_police_officer_dark_skin_tone:',
':man_police_officer_light_skin_tone:',
':man_police_officer_medium-dark_skin_tone:',
':man_police_officer_medium-light_skin_tone:',
':man_police_officer_medium_skin_tone:',
':man_pouting:',
':man_pouting_dark_skin_tone:',
':man_pouting_light_skin_tone:',
':man_pouting_medium-dark_skin_tone:',
':man_pouting_medium-light_skin_tone:',
':man_pouting_medium_skin_tone:',
':man_raising_hand:',
':man_raising_hand_dark_skin_tone:',
':man_raising_hand_light_skin_tone:',
':man_raising_hand_medium-dark_skin_tone:',
':man_raising_hand_medium-light_skin_tone:',
':man_raising_hand_medium_skin_tone:',
':man_rowing_boat:',
':man_rowing_boat_dark_skin_tone:',
':man_rowing_boat_light_skin_tone:',
':man_rowing_boat_medium-dark_skin_tone:',
':man_rowing_boat_medium-light_skin_tone:',
':man_rowing_boat_medium_skin_tone:',
':man_running:',
':man_running_dark_skin_tone:',
':man_running_light_skin_tone:',
':man_running_medium-dark_skin_tone:',
':man_running_medium-light_skin_tone:',
':man_running_medium_skin_tone:',
':man_scientist:',
':man_scientist_dark_skin_tone:',
':man_scientist_light_skin_tone:',
':man_scientist_medium-dark_skin_tone:',
':man_scientist_medium-light_skin_tone:',
':man_scientist_medium_skin_tone:',
':man_shrugging:',
':man_shrugging_dark_skin_tone:',
':man_shrugging_light_skin_tone:',
':man_shrugging_medium-dark_skin_tone:',
':man_shrugging_medium-light_skin_tone:',
':man_shrugging_medium_skin_tone:',
':man_singer:',
':man_singer_dark_skin_tone:',
':man_singer_light_skin_tone:',
':man_singer_medium-dark_skin_tone:',
':man_singer_medium-light_skin_tone:',
':man_singer_medium_skin_tone:',
':man_student:',
':man_student_dark_skin_tone:',
':man_student_light_skin_tone:',
':man_student_medium-dark_skin_tone:',
':man_student_medium-light_skin_tone:',
':man_student_medium_skin_tone:',
':man_surfing:',
':man_surfing_dark_skin_tone:',
':man_surfing_light_skin_tone:',
':man_surfing_medium-dark_skin_tone:',
':man_surfing_medium-light_skin_tone:',
':man_surfing_medium_skin_tone:',
':man_swimming:',
':man_swimming_dark_skin_tone:',
':man_swimming_light_skin_tone:',
':man_swimming_medium-dark_skin_tone:',
':man_swimming_medium-light_skin_tone:',
':man_swimming_medium_skin_tone:',
':man_teacher:',
':man_teacher_dark_skin_tone:',
':man_teacher_light_skin_tone:',
':man_teacher_medium-dark_skin_tone:',
':man_teacher_medium-light_skin_tone:',
':man_teacher_medium_skin_tone:',
':man_technologist:',
':man_technologist_dark_skin_tone:',
':man_technologist_light_skin_tone:',
':man_technologist_medium-dark_skin_tone:',
':man_technologist_medium-light_skin_tone:',
':man_technologist_medium_skin_tone:',
':man_tipping_hand:',
':man_tipping_hand_dark_skin_tone:',
':man_tipping_hand_light_skin_tone:',
':man_tipping_hand_medium-dark_skin_tone:',
':man_tipping_hand_medium-light_skin_tone:',
':man_tipping_hand_medium_skin_tone:',
':man_walking:',
':man_walking_dark_skin_tone:',
':man_walking_light_skin_tone:',
':man_walking_medium-dark_skin_tone:',
':man_walking_medium-light_skin_tone:',
':man_walking_medium_skin_tone:',
':man_wearing_turban:',
':man_wearing_turban_dark_skin_tone:',
':man_wearing_turban_light_skin_tone:',
':man_wearing_turban_medium-dark_skin_tone:',
':man_wearing_turban_medium-light_skin_tone:',
':man_wearing_turban_medium_skin_tone:',
':man_with_Chinese_cap:',
':man_with_Chinese_cap_dark_skin_tone:',
':man_with_Chinese_cap_light_skin_tone:',
':man_with_Chinese_cap_medium-dark_skin_tone:',
':man_with_Chinese_cap_medium-light_skin_tone:',
':man_with_Chinese_cap_medium_skin_tone:',
':mantelpiece_clock:',
':man’s_shoe:',
':map_of_Japan:',
':maple_leaf:',
':martial_arts_uniform:',
':meat_on_bone:',
':medical_symbol:',
':medium-dark_skin_tone:',
':medium-light_skin_tone:',
':medium_skin_tone:',
':megaphone:',
':melon:',
':memo:',
':men_with_bunny_ears_partying:',
':men_wrestling:',
':menorah:',
':men’s_room:',
':metro:',
':microphone:',
':microscope:',
':middle_finger:',
':middle_finger_dark_skin_tone:',
':middle_finger_light_skin_tone:',
':middle_finger_medium-dark_skin_tone:',
':middle_finger_medium-light_skin_tone:',
':middle_finger_medium_skin_tone:',
':military_medal:',
':milky_way:',
':minibus:',
':moai:',
':mobile_phone:',
':mobile_phone_off:',
':mobile_phone_with_arrow:',
':money-mouth_face:',
':money_bag:',
':money_with_wings:',
':monkey:',
':monkey_face:',
':monorail:',
':moon_viewing_ceremony:',
':mosque:',
':motor_boat:',
':motor_scooter:',
':motorcycle:',
':motorway:',
':mount_fuji:',
':mountain:',
':mountain_cableway:',
':mountain_railway:',
':mouse:',
':mouse_face:',
':mouth:',
':movie_camera:',
':mushroom:',
':musical_keyboard:',
':musical_note:',
':musical_notes:',
':musical_score:',
':muted_speaker:',
':nail_polish:',
':nail_polish_dark_skin_tone:',
':nail_polish_light_skin_tone:',
':nail_polish_medium-dark_skin_tone:',
':nail_polish_medium-light_skin_tone:',
':nail_polish_medium_skin_tone:',
':name_badge:',
':national_park:',
':nauseated_face:',
':necktie:',
':nerd_face:',
':neutral_face:',
':new_moon:',
':new_moon_face:',
':newspaper:',
':next_track_button:',
':night_with_stars:',
':nine-thirty:',
':nine_o’clock:',
':no_bicycles:',
':no_entry:',
':no_littering:',
':no_mobile_phones:',
':no_one_under_eighteen:',
':no_pedestrians:',
':no_smoking:',
':non-potable_water:',
':nose:',
':nose_dark_skin_tone:',
':nose_light_skin_tone:',
':nose_medium-dark_skin_tone:',
':nose_medium-light_skin_tone:',
':nose_medium_skin_tone:',
':notebook:',
':notebook_with_decorative_cover:',
':nut_and_bolt:',
':octopus:',
':oden:',
':office_building:',
':ogre:',
':oil_drum:',
':old_key:',
':old_man:',
':old_man_dark_skin_tone:',
':old_man_light_skin_tone:',
':old_man_medium-dark_skin_tone:',
':old_man_medium-light_skin_tone:',
':old_man_medium_skin_tone:',
':old_woman:',
':old_woman_dark_skin_tone:',
':old_woman_light_skin_tone:',
':old_woman_medium-dark_skin_tone:',
':old_woman_medium-light_skin_tone:',
':old_woman_medium_skin_tone:',
':om:',
':oncoming_automobile:',
':oncoming_bus:',
':oncoming_fist:',
':oncoming_fist_dark_skin_tone:',
':oncoming_fist_light_skin_tone:',
':oncoming_fist_medium-dark_skin_tone:',
':oncoming_fist_medium-light_skin_tone:',
':oncoming_fist_medium_skin_tone:',
':oncoming_police_car:',
':oncoming_taxi:',
':one-thirty:',
':one_o’clock:',
':open_book:',
':open_file_folder:',
':open_hands:',
':open_hands_dark_skin_tone:',
':open_hands_light_skin_tone:',
':open_hands_medium-dark_skin_tone:',
':open_hands_medium-light_skin_tone:',
':open_hands_medium_skin_tone:',
':open_mailbox_with_lowered_flag:',
':open_mailbox_with_raised_flag:',
':optical_disk:',
':orange_book:',
':orthodox_cross:',
':outbox_tray:',
':owl:',
':ox:',
':package:',
':page_facing_up:',
':page_with_curl:',
':pager:',
':paintbrush:',
':palm_tree:',
':pancakes:',
':panda_face:',
':paperclip:',
':part_alternation_mark:',
':party_popper:',
':passenger_ship:',
':passport_control:',
':pause_button:',
':paw_prints:',
':peace_symbol:',
':peach:',
':peanuts:',
':pear:',
':pen:',
':pencil:',
':penguin:',
':pensive_face:',
':people_with_bunny_ears_partying:',
':people_wrestling:',
':performing_arts:',
':persevering_face:',
':person_biking:',
':person_biking_dark_skin_tone:',
':person_biking_light_skin_tone:',
':person_biking_medium-dark_skin_tone:',
':person_biking_medium-light_skin_tone:',
':person_biking_medium_skin_tone:',
':person_bouncing_ball:',
':person_bouncing_ball_dark_skin_tone:',
':person_bouncing_ball_light_skin_tone:',
':person_bouncing_ball_medium-dark_skin_tone:',
':person_bouncing_ball_medium-light_skin_tone:',
':person_bouncing_ball_medium_skin_tone:',
':person_bowing:',
':person_bowing_dark_skin_tone:',
':person_bowing_light_skin_tone:',
':person_bowing_medium-dark_skin_tone:',
':person_bowing_medium-light_skin_tone:',
':person_bowing_medium_skin_tone:',
':person_cartwheeling:',
':person_cartwheeling_dark_skin_tone:',
':person_cartwheeling_light_skin_tone:',
':person_cartwheeling_medium-dark_skin_tone:',
':person_cartwheeling_medium-light_skin_tone:',
':person_cartwheeling_medium_skin_tone:',
':person_facepalming:',
':person_facepalming_dark_skin_tone:',
':person_facepalming_light_skin_tone:',
':person_facepalming_medium-dark_skin_tone:',
':person_facepalming_medium-light_skin_tone:',
':person_facepalming_medium_skin_tone:',
':person_fencing:',
':person_frowning:',
':person_frowning_dark_skin_tone:',
':person_frowning_light_skin_tone:',
':person_frowning_medium-dark_skin_tone:',
':person_frowning_medium-light_skin_tone:',
':person_frowning_medium_skin_tone:',
':person_gesturing_NO:',
':person_gesturing_NO_dark_skin_tone:',
':person_gesturing_NO_light_skin_tone:',
':person_gesturing_NO_medium-dark_skin_tone:',
':person_gesturing_NO_medium-light_skin_tone:',
':person_gesturing_NO_medium_skin_tone:',
':person_gesturing_OK:',
':person_gesturing_OK_dark_skin_tone:',
':person_gesturing_OK_light_skin_tone:',
':person_gesturing_OK_medium-dark_skin_tone:',
':person_gesturing_OK_medium-light_skin_tone:',
':person_gesturing_OK_medium_skin_tone:',
':person_getting_haircut:',
':person_getting_haircut_dark_skin_tone:',
':person_getting_haircut_light_skin_tone:',
':person_getting_haircut_medium-dark_skin_tone:',
':person_getting_haircut_medium-light_skin_tone:',
':person_getting_haircut_medium_skin_tone:',
':person_getting_massage:',
':person_getting_massage_dark_skin_tone:',
':person_getting_massage_light_skin_tone:',
':person_getting_massage_medium-dark_skin_tone:',
':person_getting_massage_medium-light_skin_tone:',
':person_getting_massage_medium_skin_tone:',
':person_golfing:',
':person_golfing_dark_skin_tone:',
':person_golfing_light_skin_tone:',
':person_golfing_medium-dark_skin_tone:',
':person_golfing_medium-light_skin_tone:',
':person_golfing_medium_skin_tone:',
':person_in_bed:',
':person_in_bed_dark_skin_tone:',
':person_in_bed_light_skin_tone:',
':person_in_bed_medium-dark_skin_tone:',
':person_in_bed_medium-light_skin_tone:',
':person_in_bed_medium_skin_tone:',
':person_juggling:',
':person_juggling_dark_skin_tone:',
':person_juggling_light_skin_tone:',
':person_juggling_medium-dark_skin_tone:',
':person_juggling_medium-light_skin_tone:',
':person_juggling_medium_skin_tone:',
':person_lifting_weights:',
':person_lifting_weights_dark_skin_tone:',
':person_lifting_weights_light_skin_tone:',
':person_lifting_weights_medium-dark_skin_tone:',
':person_lifting_weights_medium-light_skin_tone:',
':person_lifting_weights_medium_skin_tone:',
':person_mountain_biking:',
':person_mountain_biking_dark_skin_tone:',
':person_mountain_biking_light_skin_tone:',
':person_mountain_biking_medium-dark_skin_tone:',
':person_mountain_biking_medium-light_skin_tone:',
':person_mountain_biking_medium_skin_tone:',
':person_playing_handball:',
':person_playing_handball_dark_skin_tone:',
':person_playing_handball_light_skin_tone:',
':person_playing_handball_medium-dark_skin_tone:',
':person_playing_handball_medium-light_skin_tone:',
':person_playing_handball_medium_skin_tone:',
':person_playing_water_polo:',
':person_playing_water_polo_dark_skin_tone:',
':person_playing_water_polo_light_skin_tone:',
':person_playing_water_polo_medium-dark_skin_tone:',
':person_playing_water_polo_medium-light_skin_tone:',
':person_playing_water_polo_medium_skin_tone:',
':person_pouting:',
':person_pouting_dark_skin_tone:',
':person_pouting_light_skin_tone:',
':person_pouting_medium-dark_skin_tone:',
':person_pouting_medium-light_skin_tone:',
':person_pouting_medium_skin_tone:',
':person_raising_hand:',
':person_raising_hand_dark_skin_tone:',
':person_raising_hand_light_skin_tone:',
':person_raising_hand_medium-dark_skin_tone:',
':person_raising_hand_medium-light_skin_tone:',
':person_raising_hand_medium_skin_tone:',
':person_rowing_boat:',
':person_rowing_boat_dark_skin_tone:',
':person_rowing_boat_light_skin_tone:',
':person_rowing_boat_medium-dark_skin_tone:',
':person_rowing_boat_medium-light_skin_tone:',
':person_rowing_boat_medium_skin_tone:',
':person_running:',
':person_running_dark_skin_tone:',
':person_running_light_skin_tone:',
':person_running_medium-dark_skin_tone:',
':person_running_medium-light_skin_tone:',
':person_running_medium_skin_tone:',
':person_shrugging:',
':person_shrugging_dark_skin_tone:',
':person_shrugging_light_skin_tone:',
':person_shrugging_medium-dark_skin_tone:',
':person_shrugging_medium-light_skin_tone:',
':person_shrugging_medium_skin_tone:',
':person_surfing:',
':person_surfing_dark_skin_tone:',
':person_surfing_light_skin_tone:',
':person_surfing_medium-dark_skin_tone:',
':person_surfing_medium-light_skin_tone:',
':person_surfing_medium_skin_tone:',
':person_swimming:',
':person_swimming_dark_skin_tone:',
':person_swimming_light_skin_tone:',
':person_swimming_medium-dark_skin_tone:',
':person_swimming_medium-light_skin_tone:',
':person_swimming_medium_skin_tone:',
':person_taking_bath:',
':person_taking_bath_dark_skin_tone:',
':person_taking_bath_light_skin_tone:',
':person_taking_bath_medium-dark_skin_tone:',
':person_taking_bath_medium-light_skin_tone:',
':person_taking_bath_medium_skin_tone:',
':person_tipping_hand:',
':person_tipping_hand_dark_skin_tone:',
':person_tipping_hand_light_skin_tone:',
':person_tipping_hand_medium-dark_skin_tone:',
':person_tipping_hand_medium-light_skin_tone:',
':person_tipping_hand_medium_skin_tone:',
':person_walking:',
':person_walking_dark_skin_tone:',
':person_walking_light_skin_tone:',
':person_walking_medium-dark_skin_tone:',
':person_walking_medium-light_skin_tone:',
':person_walking_medium_skin_tone:',
':person_wearing_turban:',
':person_wearing_turban_dark_skin_tone:',
':person_wearing_turban_light_skin_tone:',
':person_wearing_turban_medium-dark_skin_tone:',
':person_wearing_turban_medium-light_skin_tone:',
':person_wearing_turban_medium_skin_tone:',
':pick:',
':pig:',
':pig_face:',
':pig_nose:',
':pile_of_poo:',
':pill:',
':pine_decoration:',
':pineapple:',
':ping_pong:',
':pistol:',
':pizza:',
':place_of_worship:',
':play_button:',
':play_or_pause_button:',
':police_car:',
':police_car_light:',
':police_officer:',
':police_officer_dark_skin_tone:',
':police_officer_light_skin_tone:',
':police_officer_medium-dark_skin_tone:',
':police_officer_medium-light_skin_tone:',
':police_officer_medium_skin_tone:',
':poodle:',
':pool_8_ball:',
':popcorn:',
':post_office:',
':postal_horn:',
':postbox:',
':pot_of_food:',
':potable_water:',
':potato:',
':poultry_leg:',
':pound_banknote:',
':pouting_cat_face:',
':pouting_face:',
':prayer_beads:',
':pregnant_woman:',
':pregnant_woman_dark_skin_tone:',
':pregnant_woman_light_skin_tone:',
':pregnant_woman_medium-dark_skin_tone:',
':pregnant_woman_medium-light_skin_tone:',
':pregnant_woman_medium_skin_tone:',
':prince:',
':prince_dark_skin_tone:',
':prince_light_skin_tone:',
':prince_medium-dark_skin_tone:',
':prince_medium-light_skin_tone:',
':prince_medium_skin_tone:',
':princess:',
':princess_dark_skin_tone:',
':princess_light_skin_tone:',
':princess_medium-dark_skin_tone:',
':princess_medium-light_skin_tone:',
':princess_medium_skin_tone:',
':printer:',
':prohibited:',
':purple_heart:',
':purse:',
':pushpin:',
':question_mark:',
':rabbit:',
':rabbit_face:',
':racing_car:',
':radio:',
':radio_button:',
':radioactive:',
':railway_car:',
':railway_track:',
':rainbow:',
':rainbow_flag:',
':raised_back_of_hand:',
':raised_back_of_hand_dark_skin_tone:',
':raised_back_of_hand_light_skin_tone:',
':raised_back_of_hand_medium-dark_skin_tone:',
':raised_back_of_hand_medium-light_skin_tone:',
':raised_back_of_hand_medium_skin_tone:',
':raised_fist:',
':raised_fist_dark_skin_tone:',
':raised_fist_light_skin_tone:',
':raised_fist_medium-dark_skin_tone:',
':raised_fist_medium-light_skin_tone:',
':raised_fist_medium_skin_tone:',
':raised_hand:',
':raised_hand_dark_skin_tone:',
':raised_hand_light_skin_tone:',
':raised_hand_medium-dark_skin_tone:',
':raised_hand_medium-light_skin_tone:',
':raised_hand_medium_skin_tone:',
':raised_hand_with_fingers_splayed:',
':raised_hand_with_fingers_splayed_dark_skin_tone:',
':raised_hand_with_fingers_splayed_light_skin_tone:',
':raised_hand_with_fingers_splayed_medium-dark_skin_tone:',
':raised_hand_with_fingers_splayed_medium_skin_tone:',
':raising_hands:',
':raising_hands_dark_skin_tone:',
':raising_hands_light_skin_tone:',
':raising_hands_medium-dark_skin_tone:',
':raising_hands_medium-light_skin_tone:',
':raising_hands_medium_skin_tone:',
':ram:',
':rat:',
':record_button:',
':recycling_symbol:',
':red_apple:',
':red_circle:',
':red_heart:',
':red_paper_lantern:',
':red_triangle_pointed_down:',
':red_triangle_pointed_up:',
':registered:',
':relieved_face:',
':reminder_ribbon:',
':repeat_button:',
':repeat_single_button:',
':rescue_worker’s_helmet:',
':restroom:',
':reverse_button:',
':revolving_hearts:',
':rhinoceros:',
':ribbon:',
':rice_ball:',
':rice_cracker:',
':right-facing_fist:',
':right-facing_fist_dark_skin_tone:',
':right-facing_fist_light_skin_tone:',
':right-facing_fist_medium-dark_skin_tone:',
':right-facing_fist_medium-light_skin_tone:',
':right-facing_fist_medium_skin_tone:',
':right-pointing_magnifying_glass:',
':right_anger_bubble:',
':right_arrow:',
':right_arrow_curving_down:',
':right_arrow_curving_left:',
':right_arrow_curving_up:',
':ring:',
':roasted_sweet_potato:',
':robot_face:',
':rocket:',
':rolled-up_newspaper:',
':roller_coaster:',
':rolling_on_the_floor_laughing:',
':rooster:',
':rose:',
':rosette:',
':round_pushpin:',
':rugby_football:',
':running_shirt:',
':running_shoe:',
':sailboat:',
':sake:',
':satellite:',
':satellite_antenna:',
':saxophone:',
':school:',
':school_backpack:',
':scissors:',
':scorpion:',
':scroll:',
':seat:',
':see-no-evil_monkey:',
':seedling:',
':selfie:',
':selfie_dark_skin_tone:',
':selfie_light_skin_tone:',
':selfie_medium-dark_skin_tone:',
':selfie_medium-light_skin_tone:',
':selfie_medium_skin_tone:',
':seven-thirty:',
':seven_o’clock:',
':shallow_pan_of_food:',
':shamrock:',
':shark:',
':shaved_ice:',
':sheaf_of_rice:',
':sheep:',
':shield:',
':shinto_shrine:',
':ship:',
':shooting_star:',
':shopping_bags:',
':shopping_cart:',
':shortcake:',
':shower:',
':shrimp:',
':shuffle_tracks_button:',
':sign_of_the_horns:',
':sign_of_the_horns_dark_skin_tone:',
':sign_of_the_horns_light_skin_tone:',
':sign_of_the_horns_medium-dark_skin_tone:',
':sign_of_the_horns_medium-light_skin_tone:',
':sign_of_the_horns_medium_skin_tone:',
':six-thirty:',
':six_o’clock:',
':skier:',
':skis:',
':skull:',
':skull_and_crossbones:',
':sleeping_face:',
':sleepy_face:',
':slightly_frowning_face:',
':slightly_smiling_face:',
':slot_machine:',
':small_airplane:',
':small_blue_diamond:',
':small_orange_diamond:',
':smiling_cat_face_with_heart-eyes:',
':smiling_cat_face_with_open_mouth:',
':smiling_face:',
':smiling_face_with_halo:',
':smiling_face_with_heart-eyes:',
':smiling_face_with_horns:',
':smiling_face_with_open_mouth:',
':smiling_face_with_open_mouth_&_closed_eyes:',
':smiling_face_with_open_mouth_&_cold_sweat:',
':smiling_face_with_open_mouth_&_smiling_eyes:',
':smiling_face_with_smiling_eyes:',
':smiling_face_with_sunglasses:',
':smirking_face:',
':snail:',
':snake:',
':sneezing_face:',
':snow-capped_mountain:',
':snowboarder:',
':snowboarder_dark_skin_tone:',
':snowboarder_light_skin_tone:',
':snowboarder_medium-dark_skin_tone:',
':snowboarder_medium-light_skin_tone:',
':snowboarder_medium_skin_tone:',
':snowflake:',
':snowman:',
':snowman_without_snow:',
':soccer_ball:',
':soft_ice_cream:',
':spade_suit:',
':spaghetti:',
':sparkle:',
':sparkler:',
':sparkles:',
':sparkling_heart:',
':speak-no-evil_monkey:',
':speaker_high_volume:',
':speaker_low_volume:',
':speaker_medium_volume:',
':speaking_head:',
':speech_balloon:',
':speedboat:',
':spider:',
':spider_web:',
':spiral_calendar:',
':spiral_notepad:',
':spiral_shell:',
':spoon:',
':sport_utility_vehicle:',
':sports_medal:',
':spouting_whale:',
':squid:',
':stadium:',
':star_and_crescent:',
':star_of_David:',
':station:',
':steaming_bowl:',
':stop_button:',
':stop_sign:',
':stopwatch:',
':straight_ruler:',
':strawberry:',
':studio_microphone:',
':stuffed_flatbread:',
':sun:',
':sun_behind_cloud:',
':sun_behind_large_cloud:',
':sun_behind_rain_cloud:',
':sun_behind_small_cloud:',
':sun_with_face:',
':sunflower:',
':sunglasses:',
':sunrise:',
':sunrise_over_mountains:',
':sunset:',
':sushi:',
':suspension_railway:',
':sweat_droplets:',
':synagogue:',
':syringe:',
':t-shirt:',
':taco:',
':tanabata_tree:',
':tangerine:',
':taxi:',
':teacup_without_handle:',
':tear-off_calendar:',
':telephone:',
':telephone_receiver:',
':telescope:',
':television:',
':ten-thirty:',
':ten_o’clock:',
':tennis:',
':tent:',
':thermometer:',
':thinking_face:',
':thought_balloon:',
':three-thirty:',
':three_o’clock:',
':thumbs_down:',
':thumbs_down_dark_skin_tone:',
':thumbs_down_light_skin_tone:',
':thumbs_down_medium-dark_skin_tone:',
':thumbs_down_medium-light_skin_tone:',
':thumbs_down_medium_skin_tone:',
':thumbs_up:',
':thumbs_up_dark_skin_tone:',
':thumbs_up_light_skin_tone:',
':thumbs_up_medium-dark_skin_tone:',
':thumbs_up_medium-light_skin_tone:',
':thumbs_up_medium_skin_tone:',
':ticket:',
':tiger:',
':tiger_face:',
':timer_clock:',
':tired_face:',
':toilet:',
':tomato:',
':tongue:',
':top_hat:',
':tornado:',
':trackball:',
':tractor:',
':trade_mark:',
':train:',
':tram:',
':tram_car:',
':triangular_flag:',
':triangular_ruler:',
':trident_emblem:',
':trolleybus:',
':trophy:',
':tropical_drink:',
':tropical_fish:',
':trumpet:',
':tulip:',
':tumbler_glass:',
':turkey:',
':turtle:',
':twelve-thirty:',
':twelve_o’clock:',
':two-hump_camel:',
':two-thirty:',
':two_hearts:',
':two_men_holding_hands:',
':two_o’clock:',
':two_women_holding_hands:',
':umbrella:',
':umbrella_on_ground:',
':umbrella_with_rain_drops:',
':unamused_face:',
':unicorn_face:',
':unlocked:',
':up-down_arrow:',
':up-left_arrow:',
':up-right_arrow:',
':up_arrow:',
':up_button:',
':upside-down_face:',
':vertical_traffic_light:',
':vibration_mode:',
':victory_hand:',
':victory_hand_dark_skin_tone:',
':victory_hand_light_skin_tone:',
':victory_hand_medium-dark_skin_tone:',
':victory_hand_medium-light_skin_tone:',
':victory_hand_medium_skin_tone:',
':video_camera:',
':video_game:',
':videocassette:',
':violin:',
':volcano:',
':volleyball:',
':vulcan_salute:',
':vulcan_salute_dark_skin_tone:',
':vulcan_salute_light_skin_tone:',
':vulcan_salute_medium-dark_skin_tone:',
':vulcan_salute_medium-light_skin_tone:',
':vulcan_salute_medium_skin_tone:',
':waning_crescent_moon:',
':waning_gibbous_moon:',
':warning:',
':wastebasket:',
':watch:',
':water_buffalo:',
':water_closet:',
':water_wave:',
':watermelon:',
':waving_hand:',
':waving_hand_dark_skin_tone:',
':waving_hand_light_skin_tone:',
':waving_hand_medium-dark_skin_tone:',
':waving_hand_medium-light_skin_tone:',
':waving_hand_medium_skin_tone:',
':wavy_dash:',
':waxing_crescent_moon:',
':waxing_gibbous_moon:',
':weary_cat_face:',
':weary_face:',
':wedding:',
':whale:',
':wheel_of_dharma:',
':wheelchair_symbol:',
':white_circle:',
':white_exclamation_mark:',
':white_flag:',
':white_flower:',
':white_heavy_check_mark:',
':white_large_square:',
':white_medium-small_square:',
':white_medium_square:',
':white_medium_star:',
':white_question_mark:',
':white_small_square:',
':white_square_button:',
':wilted_flower:',
':wind_chime:',
':wind_face:',
':wine_glass:',
':winking_face:',
':wolf_face:',
':woman:',
':woman_artist:',
':woman_artist_dark_skin_tone:',
':woman_artist_light_skin_tone:',
':woman_artist_medium-dark_skin_tone:',
':woman_artist_medium-light_skin_tone:',
':woman_artist_medium_skin_tone:',
':woman_astronaut:',
':woman_astronaut_dark_skin_tone:',
':woman_astronaut_light_skin_tone:',
':woman_astronaut_medium-dark_skin_tone:',
':woman_astronaut_medium-light_skin_tone:',
':woman_astronaut_medium_skin_tone:',
':woman_biking:',
':woman_biking_dark_skin_tone:',
':woman_biking_light_skin_tone:',
':woman_biking_medium-dark_skin_tone:',
':woman_biking_medium-light_skin_tone:',
':woman_biking_medium_skin_tone:',
':woman_bouncing_ball:',
':woman_bouncing_ball_dark_skin_tone:',
':woman_bouncing_ball_light_skin_tone:',
':woman_bouncing_ball_medium-dark_skin_tone:',
':woman_bouncing_ball_medium-light_skin_tone:',
':woman_bouncing_ball_medium_skin_tone:',
':woman_bowing:',
':woman_bowing_dark_skin_tone:',
':woman_bowing_light_skin_tone:',
':woman_bowing_medium-dark_skin_tone:',
':woman_bowing_medium-light_skin_tone:',
':woman_bowing_medium_skin_tone:',
':woman_cartwheeling:',
':woman_cartwheeling_dark_skin_tone:',
':woman_cartwheeling_light_skin_tone:',
':woman_cartwheeling_medium-dark_skin_tone:',
':woman_cartwheeling_medium-light_skin_tone:',
':woman_cartwheeling_medium_skin_tone:',
':woman_construction_worker:',
':woman_construction_worker_dark_skin_tone:',
':woman_construction_worker_light_skin_tone:',
':woman_construction_worker_medium-dark_skin_tone:',
':woman_construction_worker_medium-light_skin_tone:',
':woman_construction_worker_medium_skin_tone:',
':woman_cook:',
':woman_cook_dark_skin_tone:',
':woman_cook_light_skin_tone:',
':woman_cook_medium-dark_skin_tone:',
':woman_cook_medium-light_skin_tone:',
':woman_cook_medium_skin_tone:',
':woman_dancing:',
':woman_dancing_dark_skin_tone:',
':woman_dancing_light_skin_tone:',
':woman_dancing_medium-dark_skin_tone:',
':woman_dancing_medium-light_skin_tone:',
':woman_dancing_medium_skin_tone:',
':woman_dark_skin_tone:',
':woman_detective:',
':woman_detective_dark_skin_tone:',
':woman_detective_light_skin_tone:',
':woman_detective_medium-dark_skin_tone:',
':woman_detective_medium-light_skin_tone:',
':woman_detective_medium_skin_tone:',
':woman_facepalming:',
':woman_facepalming_dark_skin_tone:',
':woman_facepalming_light_skin_tone:',
':woman_facepalming_medium-dark_skin_tone:',
':woman_facepalming_medium-light_skin_tone:',
':woman_facepalming_medium_skin_tone:',
':woman_factory_worker:',
':woman_factory_worker_dark_skin_tone:',
':woman_factory_worker_light_skin_tone:',
':woman_factory_worker_medium-dark_skin_tone:',
':woman_factory_worker_medium-light_skin_tone:',
':woman_factory_worker_medium_skin_tone:',
':woman_farmer:',
':woman_farmer_dark_skin_tone:',
':woman_farmer_light_skin_tone:',
':woman_farmer_medium-dark_skin_tone:',
':woman_farmer_medium-light_skin_tone:',
':woman_farmer_medium_skin_tone:',
':woman_firefighter:',
':woman_firefighter_dark_skin_tone:',
':woman_firefighter_light_skin_tone:',
':woman_firefighter_medium-dark_skin_tone:',
':woman_firefighter_medium-light_skin_tone:',
':woman_firefighter_medium_skin_tone:',
':woman_frowning:',
':woman_frowning_dark_skin_tone:',
':woman_frowning_light_skin_tone:',
':woman_frowning_medium-dark_skin_tone:',
':woman_frowning_medium-light_skin_tone:',
':woman_frowning_medium_skin_tone:',
':woman_gesturing_NO:',
':woman_gesturing_NO_dark_skin_tone:',
':woman_gesturing_NO_light_skin_tone:',
':woman_gesturing_NO_medium-dark_skin_tone:',
':woman_gesturing_NO_medium-light_skin_tone:',
':woman_gesturing_NO_medium_skin_tone:',
':woman_gesturing_OK:',
':woman_gesturing_OK_dark_skin_tone:',
':woman_gesturing_OK_light_skin_tone:',
':woman_gesturing_OK_medium-dark_skin_tone:',
':woman_gesturing_OK_medium-light_skin_tone:',
':woman_gesturing_OK_medium_skin_tone:',
':woman_getting_haircut:',
':woman_getting_haircut_dark_skin_tone:',
':woman_getting_haircut_light_skin_tone:',
':woman_getting_haircut_medium-dark_skin_tone:',
':woman_getting_haircut_medium-light_skin_tone:',
':woman_getting_haircut_medium_skin_tone:',
':woman_getting_massage:',
':woman_getting_massage_dark_skin_tone:',
':woman_getting_massage_light_skin_tone:',
':woman_getting_massage_medium-dark_skin_tone:',
':woman_getting_massage_medium-light_skin_tone:',
':woman_getting_massage_medium_skin_tone:',
':woman_golfing:',
':woman_golfing_dark_skin_tone:',
':woman_golfing_light_skin_tone:',
':woman_golfing_medium-dark_skin_tone:',
':woman_golfing_medium-light_skin_tone:',
':woman_golfing_medium_skin_tone:',
':woman_guard:',
':woman_guard_dark_skin_tone:',
':woman_guard_light_skin_tone:',
':woman_guard_medium-dark_skin_tone:',
':woman_guard_medium-light_skin_tone:',
':woman_guard_medium_skin_tone:',
':woman_health_worker:',
':woman_health_worker_dark_skin_tone:',
':woman_health_worker_light_skin_tone:',
':woman_health_worker_medium-dark_skin_tone:',
':woman_health_worker_medium-light_skin_tone:',
':woman_health_worker_medium_skin_tone:',
':woman_judge:',
':woman_judge_dark_skin_tone:',
':woman_judge_light_skin_tone:',
':woman_judge_medium-dark_skin_tone:',
':woman_judge_medium-light_skin_tone:',
':woman_judge_medium_skin_tone:',
':woman_juggling:',
':woman_juggling_dark_skin_tone:',
':woman_juggling_light_skin_tone:',
':woman_juggling_medium-dark_skin_tone:',
':woman_juggling_medium-light_skin_tone:',
':woman_juggling_medium_skin_tone:',
':woman_lifting_weights:',
':woman_lifting_weights_dark_skin_tone:',
':woman_lifting_weights_light_skin_tone:',
':woman_lifting_weights_medium-dark_skin_tone:',
':woman_lifting_weights_medium-light_skin_tone:',
':woman_lifting_weights_medium_skin_tone:',
':woman_light_skin_tone:',
':woman_mechanic:',
':woman_mechanic_dark_skin_tone:',
':woman_mechanic_light_skin_tone:',
':woman_mechanic_medium-dark_skin_tone:',
':woman_mechanic_medium-light_skin_tone:',
':woman_mechanic_medium_skin_tone:',
':woman_medium-dark_skin_tone:',
':woman_medium-light_skin_tone:',
':woman_medium_skin_tone:',
':woman_mountain_biking:',
':woman_mountain_biking_dark_skin_tone:',
':woman_mountain_biking_light_skin_tone:',
':woman_mountain_biking_medium-dark_skin_tone:',
':woman_mountain_biking_medium-light_skin_tone:',
':woman_mountain_biking_medium_skin_tone:',
':woman_office_worker:',
':woman_office_worker_dark_skin_tone:',
':woman_office_worker_light_skin_tone:',
':woman_office_worker_medium-dark_skin_tone:',
':woman_office_worker_medium-light_skin_tone:',
':woman_office_worker_medium_skin_tone:',
':woman_pilot:',
':woman_pilot_dark_skin_tone:',
':woman_pilot_light_skin_tone:',
':woman_pilot_medium-dark_skin_tone:',
':woman_pilot_medium-light_skin_tone:',
':woman_pilot_medium_skin_tone:',
':woman_playing_handball:',
':woman_playing_handball_dark_skin_tone:',
':woman_playing_handball_light_skin_tone:',
':woman_playing_handball_medium-dark_skin_tone:',
':woman_playing_handball_medium-light_skin_tone:',
':woman_playing_handball_medium_skin_tone:',
':woman_playing_water_polo:',
':woman_playing_water_polo_dark_skin_tone:',
':woman_playing_water_polo_light_skin_tone:',
':woman_playing_water_polo_medium-dark_skin_tone:',
':woman_playing_water_polo_medium-light_skin_tone:',
':woman_playing_water_polo_medium_skin_tone:',
':woman_police_officer:',
':woman_police_officer_dark_skin_tone:',
':woman_police_officer_light_skin_tone:',
':woman_police_officer_medium-dark_skin_tone:',
':woman_police_officer_medium-light_skin_tone:',
':woman_police_officer_medium_skin_tone:',
':woman_pouting:',
':woman_pouting_dark_skin_tone:',
':woman_pouting_light_skin_tone:',
':woman_pouting_medium-dark_skin_tone:',
':woman_pouting_medium-light_skin_tone:',
':woman_pouting_medium_skin_tone:',
':woman_raising_hand:',
':woman_raising_hand_dark_skin_tone:',
':woman_raising_hand_light_skin_tone:',
':woman_raising_hand_medium-dark_skin_tone:',
':woman_raising_hand_medium-light_skin_tone:',
':woman_raising_hand_medium_skin_tone:',
':woman_rowing_boat:',
':woman_rowing_boat_dark_skin_tone:',
':woman_rowing_boat_light_skin_tone:',
':woman_rowing_boat_medium-dark_skin_tone:',
':woman_rowing_boat_medium-light_skin_tone:',
':woman_rowing_boat_medium_skin_tone:',
':woman_running:',
':woman_running_dark_skin_tone:',
':woman_running_light_skin_tone:',
':woman_running_medium-dark_skin_tone:',
':woman_running_medium-light_skin_tone:',
':woman_running_medium_skin_tone:',
':woman_scientist:',
':woman_scientist_dark_skin_tone:',
':woman_scientist_light_skin_tone:',
':woman_scientist_medium-dark_skin_tone:',
':woman_scientist_medium-light_skin_tone:',
':woman_scientist_medium_skin_tone:',
':woman_shrugging:',
':woman_shrugging_dark_skin_tone:',
':woman_shrugging_light_skin_tone:',
':woman_shrugging_medium-dark_skin_tone:',
':woman_shrugging_medium-light_skin_tone:',
':woman_shrugging_medium_skin_tone:',
':woman_singer:',
':woman_singer_dark_skin_tone:',
':woman_singer_light_skin_tone:',
':woman_singer_medium-dark_skin_tone:',
':woman_singer_medium-light_skin_tone:',
':woman_singer_medium_skin_tone:',
':woman_student:',
':woman_student_dark_skin_tone:',
':woman_student_light_skin_tone:',
':woman_student_medium-dark_skin_tone:',
':woman_student_medium-light_skin_tone:',
':woman_student_medium_skin_tone:',
':woman_surfing:',
':woman_surfing_dark_skin_tone:',
':woman_surfing_light_skin_tone:',
':woman_surfing_medium-dark_skin_tone:',
':woman_surfing_medium-light_skin_tone:',
':woman_surfing_medium_skin_tone:',
':woman_swimming:',
':woman_swimming_dark_skin_tone:',
':woman_swimming_light_skin_tone:',
':woman_swimming_medium-dark_skin_tone:',
':woman_swimming_medium-light_skin_tone:',
':woman_swimming_medium_skin_tone:',
':woman_teacher:',
':woman_teacher_dark_skin_tone:',
':woman_teacher_light_skin_tone:',
':woman_teacher_medium-dark_skin_tone:',
':woman_teacher_medium-light_skin_tone:',
':woman_teacher_medium_skin_tone:',
':woman_technologist:',
':woman_technologist_dark_skin_tone:',
':woman_technologist_light_skin_tone:',
':woman_technologist_medium-dark_skin_tone:',
':woman_technologist_medium-light_skin_tone:',
':woman_technologist_medium_skin_tone:',
':woman_tipping_hand:',
':woman_tipping_hand_dark_skin_tone:',
':woman_tipping_hand_light_skin_tone:',
':woman_tipping_hand_medium-dark_skin_tone:',
':woman_tipping_hand_medium-light_skin_tone:',
':woman_tipping_hand_medium_skin_tone:',
':woman_walking:',
':woman_walking_dark_skin_tone:',
':woman_walking_light_skin_tone:',
':woman_walking_medium-dark_skin_tone:',
':woman_walking_medium-light_skin_tone:',
':woman_walking_medium_skin_tone:',
':woman_wearing_turban:',
':woman_wearing_turban_dark_skin_tone:',
':woman_wearing_turban_light_skin_tone:',
':woman_wearing_turban_medium-dark_skin_tone:',
':woman_wearing_turban_medium-light_skin_tone:',
':woman_wearing_turban_medium_skin_tone:',
':woman’s_boot:',
':woman’s_clothes:',
':woman’s_hat:',
':woman’s_sandal:',
':women_with_bunny_ears_partying:',
':women_wrestling:',
':women’s_room:',
':world_map:',
':worried_face:',
':wrapped_gift:',
':wrench:',
':writing_hand:',
':writing_hand_dark_skin_tone:',
':writing_hand_light_skin_tone:',
':writing_hand_medium-dark_skin_tone:',
':writing_hand_medium-light_skin_tone:',
':writing_hand_medium_skin_tone:',
':yellow_heart:',
':yen_banknote:',
':yin_yang:',
':zipper-mouth_face:',
':zzz:',
':admission_tickets:',
':aerial_tramway:',
':airplane:',
':airplane_arriving:',
':airplane_departure:',
':alarm_clock:',
':alembic:',
':space_invader:',
':ambulance:',
':football:',
':amphora:',
':anchor:',
':anger:',
':angry:',
':anguished:',
':ant:',
':signal_strength:',
':arrows_counterclockwise:',
':aquarius:',
':aries:',
':arrow_heading_down:',
':arrow_heading_up:',
':articulated_lorry:',
':art:',
':astonished:',
':athletic_shoe:',
':atom_symbol:',
':eggplant:',
':atm:',
':car:',
':red_car:',
':baby:',
':angel:',
':baby_bottle:',
':baby_chick:',
':baby_symbol:',
':back:',
':camel:',
':badminton_racquet_and_shuttlecock:',
':baggage_claim:',
':balloon:',
':ballot_box_with_ballot:',
':ballot_box_with_check:',
':banana:',
':bank:',
':dollar:',
':euro:',
':pound:',
':yen:',
':bar_chart:',
':barber:',
':baseball:',
':basketball:',
':bath:',
':bathtub:',
':battery:',
':beach_with_umbrella:',
':bear:',
':heartbeat:',
':bed:',
':beer:',
':bell:',
':no_bell:',
':bellhop_bell:',
':bento:',
':bike:',
':bicyclist:',
':bikini:',
':8ball:',
':biohazard_sign:',
':bird:',
':birthday:',
':black_circle_for_record:',
':clubs:',
':diamonds:',
':arrow_double_down:',
':hearts:',
':black_large_square:',
':rewind:',
':black_left__pointing_double_triangle_with_vertical_bar:',
':arrow_backward:',
':black_medium_small_square:',
':black_medium_square:',
':black_nib:',
':question:',
':fast_forward:',
':black_right__pointing_double_triangle_with_vertical_bar:',
':arrow_forward:',
':black_right__pointing_triangle_with_double_vertical_bar:',
':arrow_right:',
':scissors:',
':black_small_square:',
':spades:',
':black_square_button:',
':black_square_for_stop:',
':sunny:',
':phone:',
':telephone:',
':recycle:',
':arrow_double_up:',
':blossom:',
':blowfish:',
':blue_book:',
':blue_heart:',
':boar:',
':bomb:',
':bookmark:',
':bookmark_tabs:',
':books:',
':bottle_with_popping_cork:',
':bouquet:',
':bow_and_arrow:',
':bowling:',
':boy:',
':bread:',
':bride_with_veil:',
':bridge_at_night:',
':briefcase:',
':broken_heart:',
':bug:',
':building_construction:',
':burrito:',
':bus:',
':busstop:',
':bust_in_silhouette:',
':busts_in_silhouette:',
':cactus:',
':date:',
':camera:',
':camera_with_flash:',
':camping:',
':cancer:',
':candle:',
':candy:',
':capricorn:',
':card_file_box:',
':card_index:',
':card_index_dividers:',
':carousel_horse:',
':flags:',
':cat2:',
':cat:',
':joy_cat:',
':smirk_cat:',
':chains:',
':chart_with_downwards_trend:',
':chart_with_upwards_trend:',
':chart:',
':mega:',
':cheese_wedge:',
':checkered_flag:',
':cherries:',
':cherry_blossom:',
':chestnut:',
':chicken:',
':children_crossing:',
':chipmunk:',
':chocolate_bar:',
':christmas_tree:',
':church:',
':cinema:',
':accept:',
':ideograph_advantage:',
':congratulations:',
':secret:',
':m:',
':circus_tent:',
':cityscape:',
':city_sunset:',
':clapper:',
':clap:',
':classical_building:',
':beers:',
':clipboard:',
':clock830:',
':clock8:',
':clock1130:',
':clock11:',
':clock530:',
':clock5:',
':clock430:',
':clock4:',
':clock930:',
':clock9:',
':clock130:',
':clock1:',
':clock730:',
':clock7:',
':clock630:',
':clock6:',
':clock1030:',
':clock10:',
':clock330:',
':clock3:',
':clock1230:',
':clock12:',
':clock230:',
':clock2:',
':arrows_clockwise:',
':repeat:',
':repeat_one:',
':closed_book:',
':closed_lock_with_key:',
':mailbox_closed:',
':mailbox:',
':closed_umbrella:',
':cloud:',
':cloud_with_lightning:',
':cloud_with_rain:',
':cloud_with_snow:',
':cloud_with_tornado:',
':cocktail:',
':coffin:',
':boom:',
':collision:',
':comet:',
':compression:',
':confetti_ball:',
':confounded:',
':confused:',
':construction:',
':construction_worker:',
':control_knobs:',
':convenience_store:',
':rice:',
':cookie:',
':egg:',
':copyright:',
':couch_and_lamp:',
':couple_with_heart:',
':cow2:',
':cow:',
':crab:',
':credit_card:',
':crescent_moon:',
':cricket_bat_and_ball:',
':crocodile:',
':x:',
':crossed_flags:',
':crossed_swords:',
':crown:',
':crying_cat_face:',
':cry:',
':crystal_ball:',
':curly_loop:',
':currency_exchange:',
':curry:',
':custard:',
':customs:',
':cyclone:',
':dagger_knife:',
':dancer:',
':dango:',
':dark_sunglasses:',
':dash:',
':deciduous_tree:',
':truck:',
':department_store:',
':derelict_house_building:',
':desert:',
':desert_island:',
':desktop_computer:',
':diamond_shape_with_a_dot_inside:',
':dart:',
':disappointed_relieved:',
':disappointed:',
':dizzy_face:',
':dizzy:',
':do_not_litter:',
':dog2:',
':dog:',
':dolphin:',
':flipper:',
':door:',
':loop:',
':bangbang:',
':double_vertical_bar:',
':doughnut:',
':dove_of_peace:',
':small_red_triangle_down:',
':arrow_down_small:',
':arrow_down:',
':dragon:',
':dragon_face:',
':dress:',
':dromedary_camel:',
':droplet:',
':dvd:',
':e__mail:',
':ear:',
':corn:',
':ear_of_rice:',
':earth_americas:',
':earth_asia:',
':earth_africa:',
':eight_pointed_black_star:',
':eight_spoked_asterisk:',
':eject_symbol:',
':bulb:',
':electric_plug:',
':flashlight:',
':elephant:',
':emoji_modifier_fitzpatrick_type__1__2:',
':emoji_modifier_fitzpatrick_type__3:',
':emoji_modifier_fitzpatrick_type__4:',
':emoji_modifier_fitzpatrick_type__5:',
':emoji_modifier_fitzpatrick_type__6:',
':end:',
':email:',
':envelope:',
':envelope_with_arrow:',
':european_castle:',
':european_post_office:',
':evergreen_tree:',
':interrobang:',
':expressionless:',
':alien:',
':eye:',
':eyeglasses:',
':eyes:',
':massage:',
':yum:',
':scream:',
':kissing_heart:',
':sweat:',
':face_with_head__bandage:',
':triumph:',
':mask:',
':no_good:',
':ok_woman:',
':open_mouth:',
':cold_sweat:',
':face_with_rolling_eyes:',
':stuck_out_tongue:',
':stuck_out_tongue_closed_eyes:',
':stuck_out_tongue_winking_eye:',
':joy:',
':face_with_thermometer:',
':no_mouth:',
':factory:',
':fallen_leaf:',
':family:',
':santa:',
':fax:',
':fearful:',
':ferris_wheel:',
':ferry:',
':field_hockey_stick_and_ball:',
':file_cabinet:',
':file_folder:',
':film_frames:',
':film_projector:',
':fire:',
':fire_engine:',
':sparkler:',
':fireworks:',
':first_quarter_moon:',
':first_quarter_moon_with_face:',
':fish:',
':fish_cake:',
':fishing_pole_and_fish:',
':facepunch:',
':punch:',
':flag_for_Afghanistan:',
':flag_for_Albania:',
':flag_for_Algeria:',
':flag_for_American_Samoa:',
':flag_for_Andorra:',
':flag_for_Angola:',
':flag_for_Anguilla:',
':flag_for_Antarctica:',
':flag_for_Antigua_&_Barbuda:',
':flag_for_Argentina:',
':flag_for_Armenia:',
':flag_for_Aruba:',
':flag_for_Ascension_Island:',
':flag_for_Australia:',
':flag_for_Austria:',
':flag_for_Azerbaijan:',
':flag_for_Bahamas:',
':flag_for_Bahrain:',
':flag_for_Bangladesh:',
':flag_for_Barbados:',
':flag_for_Belarus:',
':flag_for_Belgium:',
':flag_for_Belize:',
':flag_for_Benin:',
':flag_for_Bermuda:',
':flag_for_Bhutan:',
':flag_for_Bolivia:',
':flag_for_Bosnia_&_Herzegovina:',
':flag_for_Botswana:',
':flag_for_Bouvet_Island:',
':flag_for_Brazil:',
':flag_for_British_Indian_Ocean_Territory:',
':flag_for_British_Virgin_Islands:',
':flag_for_Brunei:',
':flag_for_Bulgaria:',
':flag_for_Burkina_Faso:',
':flag_for_Burundi:',
':flag_for_Cambodia:',
':flag_for_Cameroon:',
':flag_for_Canada:',
':flag_for_Canary_Islands:',
':flag_for_Cape_Verde:',
':flag_for_Caribbean_Netherlands:',
':flag_for_Cayman_Islands:',
':flag_for_Central_African_Republic:',
':flag_for_Ceuta_&_Melilla:',
':flag_for_Chad:',
':flag_for_Chile:',
':flag_for_China:',
':flag_for_Christmas_Island:',
':flag_for_Clipperton_Island:',
':flag_for_Cocos__Islands:',
':flag_for_Colombia:',
':flag_for_Comoros:',
':flag_for_Congo____Brazzaville:',
':flag_for_Congo____Kinshasa:',
':flag_for_Cook_Islands:',
':flag_for_Costa_Rica:',
':flag_for_Croatia:',
':flag_for_Cuba:',
':flag_for_Curaçao:',
':flag_for_Cyprus:',
':flag_for_Czech_Republic:',
':flag_for_Côte_d’Ivoire:',
':flag_for_Denmark:',
':flag_for_Diego_Garcia:',
':flag_for_Djibouti:',
':flag_for_Dominica:',
':flag_for_Dominican_Republic:',
':flag_for_Ecuador:',
':flag_for_Egypt:',
':flag_for_El_Salvador:',
':flag_for_Equatorial_Guinea:',
':flag_for_Eritrea:',
':flag_for_Estonia:',
':flag_for_Ethiopia:',
':flag_for_European_Union:',
':flag_for_Falkland_Islands:',
':flag_for_Faroe_Islands:',
':flag_for_Fiji:',
':flag_for_Finland:',
':flag_for_France:',
':flag_for_French_Guiana:',
':flag_for_French_Polynesia:',
':flag_for_French_Southern_Territories:',
':flag_for_Gabon:',
':flag_for_Gambia:',
':flag_for_Georgia:',
':flag_for_Germany:',
':flag_for_Ghana:',
':flag_for_Gibraltar:',
':flag_for_Greece:',
':flag_for_Greenland:',
':flag_for_Grenada:',
':flag_for_Guadeloupe:',
':flag_for_Guam:',
':flag_for_Guatemala:',
':flag_for_Guernsey:',
':flag_for_Guinea:',
':flag_for_Guinea__Bissau:',
':flag_for_Guyana:',
':flag_for_Haiti:',
':flag_for_Heard_&_McDonald_Islands:',
':flag_for_Honduras:',
':flag_for_Hong_Kong:',
':flag_for_Hungary:',
':flag_for_Iceland:',
':flag_for_India:',
':flag_for_Indonesia:',
':flag_for_Iran:',
':flag_for_Iraq:',
':flag_for_Ireland:',
':flag_for_Isle_of_Man:',
':flag_for_Israel:',
':flag_for_Italy:',
':flag_for_Jamaica:',
':flag_for_Japan:',
':flag_for_Jersey:',
':flag_for_Jordan:',
':flag_for_Kazakhstan:',
':flag_for_Kenya:',
':flag_for_Kiribati:',
':flag_for_Kosovo:',
':flag_for_Kuwait:',
':flag_for_Kyrgyzstan:',
':flag_for_Laos:',
':flag_for_Latvia:',
':flag_for_Lebanon:',
':flag_for_Lesotho:',
':flag_for_Liberia:',
':flag_for_Libya:',
':flag_for_Liechtenstein:',
':flag_for_Lithuania:',
':flag_for_Luxembourg:',
':flag_for_Macau:',
':flag_for_Macedonia:',
':flag_for_Madagascar:',
':flag_for_Malawi:',
':flag_for_Malaysia:',
':flag_for_Maldives:',
':flag_for_Mali:',
':flag_for_Malta:',
':flag_for_Marshall_Islands:',
':flag_for_Martinique:',
':flag_for_Mauritania:',
':flag_for_Mauritius:',
':flag_for_Mayotte:',
':flag_for_Mexico:',
':flag_for_Micronesia:',
':flag_for_Moldova:',
':flag_for_Monaco:',
':flag_for_Mongolia:',
':flag_for_Montenegro:',
':flag_for_Montserrat:',
':flag_for_Morocco:',
':flag_for_Mozambique:',
':flag_for_Myanmar:',
':flag_for_Namibia:',
':flag_for_Nauru:',
':flag_for_Nepal:',
':flag_for_Netherlands:',
':flag_for_New_Caledonia:',
':flag_for_New_Zealand:',
':flag_for_Nicaragua:',
':flag_for_Niger:',
':flag_for_Nigeria:',
':flag_for_Niue:',
':flag_for_Norfolk_Island:',
':flag_for_North_Korea:',
':flag_for_Northern_Mariana_Islands:',
':flag_for_Norway:',
':flag_for_Oman:',
':flag_for_Pakistan:',
':flag_for_Palau:',
':flag_for_Palestinian_Territories:',
':flag_for_Panama:',
':flag_for_Papua_New_Guinea:',
':flag_for_Paraguay:',
':flag_for_Peru:',
':flag_for_Philippines:',
':flag_for_Pitcairn_Islands:',
':flag_for_Poland:',
':flag_for_Portugal:',
':flag_for_Puerto_Rico:',
':flag_for_Qatar:',
':flag_for_Romania:',
':flag_for_Russia:',
':flag_for_Rwanda:',
':flag_for_Réunion:',
':flag_for_Samoa:',
':flag_for_San_Marino:',
':flag_for_Saudi_Arabia:',
':flag_for_Senegal:',
':flag_for_Serbia:',
':flag_for_Seychelles:',
':flag_for_Sierra_Leone:',
':flag_for_Singapore:',
':flag_for_Sint_Maarten:',
':flag_for_Slovakia:',
':flag_for_Slovenia:',
':flag_for_Solomon_Islands:',
':flag_for_Somalia:',
':flag_for_South_Africa:',
':flag_for_South_Georgia_&_South_Sandwich_Islands:',
':flag_for_South_Korea:',
':flag_for_South_Sudan:',
':flag_for_Spain:',
':flag_for_Sri_Lanka:',
':flag_for_St._Barthélemy:',
':flag_for_St._Helena:',
':flag_for_St._Kitts_&_Nevis:',
':flag_for_St._Lucia:',
':flag_for_St._Martin:',
':flag_for_St._Pierre_&_Miquelon:',
':flag_for_St._Vincent_&_Grenadines:',
':flag_for_Sudan:',
':flag_for_Suriname:',
':flag_for_Svalbard_&_Jan_Mayen:',
':flag_for_Swaziland:',
':flag_for_Sweden:',
':flag_for_Switzerland:',
':flag_for_Syria:',
':flag_for_São_Tomé_&_Príncipe:',
':flag_for_Taiwan:',
':flag_for_Tajikistan:',
':flag_for_Tanzania:',
':flag_for_Thailand:',
':flag_for_Timor__Leste:',
':flag_for_Togo:',
':flag_for_Tokelau:',
':flag_for_Tonga:',
':flag_for_Trinidad_&_Tobago:',
':flag_for_Tristan_da_Cunha:',
':flag_for_Tunisia:',
':flag_for_Turkey:',
':flag_for_Turkmenistan:',
':flag_for_Turks_&_Caicos_Islands:',
':flag_for_Tuvalu:',
':flag_for_U.S._Outlying_Islands:',
':flag_for_U.S._Virgin_Islands:',
':flag_for_Uganda:',
':flag_for_Ukraine:',
':flag_for_United_Arab_Emirates:',
':flag_for_United_Kingdom:',
':flag_for_United_States:',
':flag_for_Uruguay:',
':flag_for_Uzbekistan:',
':flag_for_Vanuatu:',
':flag_for_Vatican_City:',
':flag_for_Venezuela:',
':flag_for_Vietnam:',
':flag_for_Wallis_&_Futuna:',
':flag_for_Western_Sahara:',
':flag_for_Yemen:',
':flag_for_Zambia:',
':flag_for_Zimbabwe:',
':flag_for_Åland_Islands:',
':golf:',
':fleur__de__lis:',
':muscle:',
':floppy_disk:',
':flower_playing_cards:',
':flushed:',
':fog:',
':foggy:',
':footprints:',
':fork_and_knife:',
':fork_and_knife_with_plate:',
':fountain:',
':four_leaf_clover:',
':frame_with_picture:',
':fries:',
':fried_shrimp:',
':frog:',
':hatched_chick:',
':frowning:',
':fuelpump:',
':full_moon:',
':full_moon_with_face:',
':funeral_urn:',
':game_die:',
':gear:',
':gem:',
':gemini:',
':ghost:',
':girl:',
':globe_with_meridians:',
':star2:',
':goat:',
':golfer:',
':mortar_board:',
':grapes:',
':green_apple:',
':green_book:',
':green_heart:',
':grimacing:',
':smile_cat:',
':grinning:',
':grin:',
':heartpulse:',
':guardsman:',
':guitar:',
':haircut:',
':hamburger:',
':hammer:',
':hammer_and_pick:',
':hammer_and_wrench:',
':hamster:',
':handbag:',
':raising_hand:',
':hatching_chick:',
':headphones:',
':hear_no_evil:',
':heart_decoration:',
':cupid:',
':gift_heart:',
':heart:',
':heavy_check_mark:',
':heavy_division_sign:',
':heavy_dollar_sign:',
':exclamation:',
':heavy_exclamation_mark:',
':heavy_heart_exclamation_mark_ornament:',
':o:',
':heavy_minus_sign:',
':heavy_multiplication_x:',
':heavy_plus_sign:',
':helicopter:',
':helm_symbol:',
':helmet_with_white_cross:',
':herb:',
':hibiscus:',
':high_heel:',
':bullettrain_side:',
':bullettrain_front:',
':high_brightness:',
':zap:',
':hocho:',
':knife:',
':hole:',
':honey_pot:',
':bee:',
':traffic_light:',
':racehorse:',
':horse:',
':horse_racing:',
':hospital:',
':coffee:',
':hot_dog:',
':hot_pepper:',
':hotsprings:',
':hotel:',
':hourglass:',
':hourglass_flowing_sand:',
':house:',
':house_buildings:',
':house_with_garden:',
':hugging_face:',
':100:',
':hushed:',
':ice_cream:',
':ice_hockey_stick_and_puck:',
':ice_skate:',
':imp:',
':inbox_tray:',
':incoming_envelope:',
':information_desk_person:',
':information_source:',
':capital_abcd:',
':abc:',
':abcd:',
':1234:',
':symbols:',
':izakaya_lantern:',
':lantern:',
':jack_o_lantern:',
':japanese_castle:',
':dolls:',
':japanese_goblin:',
':japanese_ogre:',
':post_office:',
':beginner:',
':jeans:',
':joystick:',
':kaaba:',
':key:',
':keyboard:',
':keycap_asterisk:',
':keycap_digit_eight:',
':keycap_digit_five:',
':keycap_digit_four:',
':keycap_digit_nine:',
':keycap_digit_one:',
':keycap_digit_seven:',
':keycap_digit_six:',
':keycap_digit_three:',
':keycap_digit_two:',
':keycap_digit_zero:',
':keycap_number_sign:',
':keycap_ten:',
':kimono:',
':couplekiss:',
':kiss:',
':kissing_cat:',
':kissing:',
':kissing_closed_eyes:',
':kissing_smiling_eyes:',
':koala:',
':label:',
':beetle:',
':large_blue_circle:',
':large_blue_diamond:',
':large_orange_diamond:',
':red_circle:',
':last_quarter_moon:',
':last_quarter_moon_with_face:',
':latin_cross:',
':leaves:',
':ledger:',
':mag:',
':left_luggage:',
':left_right_arrow:',
':leftwards_arrow_with_hook:',
':arrow_left:',
':lemon:',
':leo:',
':leopard:',
':level_slider:',
':libra:',
':light_rail:',
':link:',
':linked_paperclips:',
':lion_face:',
':lipstick:',
':lock:',
':lock_with_ink_pen:',
':lollipop:',
':sob:',
':love_hotel:',
':love_letter:',
':low_brightness:',
':lower_left_ballpoint_pen:',
':lower_left_crayon:',
':lower_left_fountain_pen:',
':lower_left_paintbrush:',
':mahjong:',
':man:',
':couple:',
':man_in_business_suit_levitating:',
':man_with_gua_pi_mao:',
':man_with_turban:',
':mans_shoe:',
':shoe:',
':mantelpiece_clock:',
':maple_leaf:',
':meat_on_bone:',
':black_circle:',
':white_circle:',
':melon:',
':memo:',
':pencil:',
':menorah_with_nine_branches:',
':mens:',
':metro:',
':microphone:',
':microscope:',
':military_medal:',
':milky_way:',
':minibus:',
':minidisc:',
':iphone:',
':mobile_phone_off:',
':calling:',
':money__mouth_face:',
':moneybag:',
':money_with_wings:',
':monkey:',
':monkey_face:',
':monorail:',
':rice_scene:',
':mosque:',
':motor_boat:',
':motorway:',
':mount_fuji:',
':mountain:',
':mountain_bicyclist:',
':mountain_cableway:',
':mountain_railway:',
':mouse2:',
':mouse:',
':lips:',
':movie_camera:',
':moyai:',
':notes:',
':mushroom:',
':musical_keyboard:',
':musical_note:',
':musical_score:',
':nail_care:',
':name_badge:',
':national_park:',
':necktie:',
':ab:',
':negative_squared_cross_mark:',
':a:',
':b:',
':o2:',
':parking:',
':nerd_face:',
':neutral_face:',
':new_moon:',
':honeybee:',
':new_moon_with_face:',
':newspaper:',
':night_with_stars:',
':no_bicycles:',
':no_entry:',
':no_entry_sign:',
':no_mobile_phones:',
':underage:',
':no_pedestrians:',
':no_smoking:',
':non__potable_water:',
':arrow_upper_right:',
':arrow_upper_left:',
':nose:',
':notebook:',
':notebook_with_decorative_cover:',
':nut_and_bolt:',
':octopus:',
':oden:',
':office:',
':oil_drum:',
':ok_hand:',
':old_key:',
':older_man:',
':older_woman:',
':om_symbol:',
':on:',
':oncoming_automobile:',
':oncoming_bus:',
':oncoming_police_car:',
':oncoming_taxi:',
':book:',
':open_book:',
':open_file_folder:',
':open_hands:',
':unlock:',
':mailbox_with_no_mail:',
':mailbox_with_mail:',
':ophiuchus:',
':cd:',
':orange_book:',
':orthodox_cross:',
':outbox_tray:',
':ox:',
':package:',
':page_facing_up:',
':page_with_curl:',
':pager:',
':palm_tree:',
':panda_face:',
':paperclip:',
':part_alternation_mark:',
':tada:',
':passenger_ship:',
':passport_control:',
':feet:',
':paw_prints:',
':peace_symbol:',
':peach:',
':pear:',
':walking:',
':pencil2:',
':penguin:',
':pensive:',
':performing_arts:',
':persevere:',
':bow:',
':person_frowning:',
':raised_hands:',
':person_with_ball:',
':person_with_blond_hair:',
':pray:',
':person_with_pouting_face:',
':computer:',
':pick:',
':pig2:',
':pig:',
':pig_nose:',
':hankey:',
':poop:',
':shit:',
':pill:',
':bamboo:',
':pineapple:',
':pisces:',
':gun:',
':place_of_worship:',
':black_joker:',
':police_car:',
':rotating_light:',
':cop:',
':poodle:',
':popcorn:',
':postal_horn:',
':postbox:',
':stew:',
':potable_water:',
':pouch:',
':poultry_leg:',
':pouting_cat:',
':rage:',
':prayer_beads:',
':princess:',
':printer:',
':loudspeaker:',
':purple_heart:',
':purse:',
':pushpin:',
':put_litter_in_its_place:',
':rabbit2:',
':rabbit:',
':racing_car:',
':racing_motorcycle:',
':radio:',
':radio_button:',
':radioactive_sign:',
':railway_car:',
':railway_track:',
':rainbow:',
':fist:',
':hand:',
':raised_hand:',
':raised_hand_with_fingers_splayed:',
':raised_hand_with_part_between_middle_and_ring_fingers:',
':ram:',
':rat:',
':blue_car:',
':apple:',
':registered:',
':relieved:',
':reminder_ribbon:',
':restroom:',
':reversed_hand_with_middle_finger_extended:',
':revolving_hearts:',
':ribbon:',
':rice_ball:',
':rice_cracker:',
':mag_right:',
':right_anger_bubble:',
':arrow_right_hook:',
':ring:',
':sweet_potato:',
':robot_face:',
':rocket:',
':rolled__up_newspaper:',
':roller_coaster:',
':rooster:',
':rose:',
':rosette:',
':round_pushpin:',
':rowboat:',
':rugby_football:',
':runner:',
':running:',
':running_shirt_with_sash:',
':sagittarius:',
':boat:',
':sailboat:',
':sake:',
':satellite:',
':saxophone:',
':scales:',
':school:',
':school_satchel:',
':scorpion:',
':scorpius:',
':scroll:',
':seat:',
':see_no_evil:',
':seedling:',
':shamrock:',
':shaved_ice:',
':sheep:',
':shield:',
':shinto_shrine:',
':ship:',
':stars:',
':shopping_bags:',
':cake:',
':shower:',
':sign_of_the_horns:',
':japan:',
':six_pointed_star:',
':ski:',
':skier:',
':skull:',
':skull_and_crossbones:',
':sleeping_accommodation:',
':sleeping:',
':zzz:',
':sleepy:',
':sleuth_or_spy:',
':pizza:',
':slightly_frowning_face:',
':slightly_smiling_face:',
':slot_machine:',
':small_airplane:',
':small_blue_diamond:',
':small_orange_diamond:',
':heart_eyes_cat:',
':smiley_cat:',
':innocent:',
':heart_eyes:',
':smiling_imp:',
':smiley:',
':sweat_smile:',
':smile:',
':laughing:',
':satisfied:',
':blush:',
':sunglasses:',
':smirk:',
':smoking:',
':snail:',
':snake:',
':snow_capped_mountain:',
':snowboarder:',
':snowflake:',
':snowman:',
':soccer:',
':icecream:',
':soon:',
':arrow_lower_right:',
':arrow_lower_left:',
':spaghetti:',
':sparkle:',
':sparkles:',
':sparkling_heart:',
':speak_no_evil:',
':speaker:',
':mute:',
':sound:',
':loud_sound:',
':speaking_head_in_silhouette:',
':speech_balloon:',
':speedboat:',
':spider:',
':spider_web:',
':spiral_calendar_pad:',
':spiral_note_pad:',
':shell:',
':sweat_drops:',
':sports_medal:',
':whale:',
':u5272:',
':u5408:',
':u55b6:',
':u6307:',
':u6708:',
':u6709:',
':u6e80:',
':u7121:',
':u7533:',
':u7981:',
':u7a7a:',
':cl:',
':cool:',
':free:',
':id:',
':koko:',
':sa:',
':new:',
':ng:',
':ok:',
':sos:',
':up:',
':vs:',
':stadium:',
':star_and_crescent:',
':star_of_david:',
':station:',
':statue_of_liberty:',
':steam_locomotive:',
':ramen:',
':stopwatch:',
':straight_ruler:',
':strawberry:',
':studio_microphone:',
':partly_sunny:',
':sun_with_face:',
':sunflower:',
':sunrise:',
':sunrise_over_mountains:',
':city_sunrise:',
':surfer:',
':sushi:',
':suspension_railway:',
':swimmer:',
':synagogue:',
':syringe:',
':shirt:',
':tshirt:',
':table_tennis_paddle_and_ball:',
':taco:',
':tanabata_tree:',
':tangerine:',
':taurus:',
':taxi:',
':tea:',
':calendar:',
':telephone_receiver:',
':telescope:',
':tv:',
':tennis:',
':tent:',
':thermometer:',
':thinking_face:',
':thought_balloon:',
':three_button_mouse:',
':+1:',
':thumbsup:',
':__1:',
':thumbsdown:',
':thunder_cloud_and_rain:',
':ticket:',
':tiger2:',
':tiger:',
':timer_clock:',
':tired_face:',
':toilet:',
':tokyo_tower:',
':tomato:',
':tongue:',
':tophat:',
':top:',
':trackball:',
':tractor:',
':tm:',
':train2:',
':tram:',
':train:',
':triangular_flag_on_post:',
':triangular_ruler:',
':trident:',
':trolleybus:',
':trophy:',
':tropical_drink:',
':tropical_fish:',
':trumpet:',
':tulip:',
':turkey:',
':turtle:',
':twisted_rightwards_arrows:',
':two_hearts:',
':two_men_holding_hands:',
':two_women_holding_hands:',
':umbrella:',
':umbrella_on_ground:',
':unamused:',
':unicorn_face:',
':small_red_triangle:',
':arrow_up_small:',
':arrow_up_down:',
':upside__down_face:',
':arrow_up:',
':vertical_traffic_light:',
':vibration_mode:',
':v:',
':video_camera:',
':video_game:',
':vhs:',
':violin:',
':virgo:',
':volcano:',
':volleyball:',
':waning_crescent_moon:',
':waning_gibbous_moon:',
':warning:',
':wastebasket:',
':watch:',
':water_buffalo:',
':wc:',
':ocean:',
':watermelon:',
':waving_black_flag:',
':wave:',
':waving_white_flag:',
':wavy_dash:',
':waxing_crescent_moon:',
':moon:',
':waxing_gibbous_moon:',
':scream_cat:',
':weary:',
':wedding:',
':weight_lifter:',
':whale2:',
':wheel_of_dharma:',
':wheelchair:',
':point_down:',
':grey_exclamation:',
':white_flower:',
':white_frowning_face:',
':white_check_mark:',
':white_large_square:',
':point_left:',
':white_medium_small_square:',
':white_medium_square:',
':star:',
':grey_question:',
':point_right:',
':white_small_square:',
':relaxed:',
':white_square_button:',
':white_sun_behind_cloud:',
':white_sun_behind_cloud_with_rain:',
':white_sun_with_small_cloud:',
':point_up_2:',
':point_up:',
':wind_blowing_face:',
':wind_chime:',
':wine_glass:',
':wink:',
':wolf:',
':woman:',
':dancers:',
':boot:',
':womans_clothes:',
':womans_hat:',
':sandal:',
':womens:',
':world_map:',
':worried:',
':gift:',
':wrench:',
':writing_hand:',
':yellow_heart:',
':yin_yang:',
':zipper__mouth_face:',
]
| [
3620,
46,
41,
40,
62,
45849,
796,
685,
198,
197,
10354,
16,
301,
62,
5372,
62,
1150,
282,
25,
3256,
198,
220,
220,
220,
705,
25,
17,
358,
62,
5372,
62,
1150,
282,
25,
3256,
198,
220,
220,
220,
705,
25,
18,
4372,
62,
5372,
62,
... | 1.95234 | 51,930 |
from flask import Flask,render_template,request
import requests
app = Flask(__name__)
API_KEY = 'RQM7GIDWT0ZU2WLU'
@app.route('/',methods=['GET','POST'])
if __name__ == "__main__":
app.run(debug= False) | [
6738,
42903,
1330,
46947,
11,
13287,
62,
28243,
11,
25927,
201,
198,
11748,
7007,
201,
198,
201,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
201,
198,
17614,
62,
20373,
796,
705,
49,
48,
44,
22,
38,
2389,
39386,
15,
57,
52,
17,
... | 2.265306 | 98 |
import base64
import os
import random
from io import BytesIO
import matplotlib.font_manager as fm
from PIL import Image, ImageDraw, ImageFont
| [
11748,
2779,
2414,
198,
11748,
28686,
198,
11748,
4738,
198,
6738,
33245,
1330,
2750,
4879,
9399,
198,
198,
11748,
2603,
29487,
8019,
13,
10331,
62,
37153,
355,
277,
76,
198,
6738,
350,
4146,
1330,
7412,
11,
7412,
25302,
11,
7412,
23252... | 3.428571 | 42 |
from json import dumps
from pathlib import Path
from sqlite3 import connect
from pycargr.model import Car
DB_PATH = Path.home().joinpath('pycargr.db')
SEARCH_BASE_URL = 'https://www.car.gr/classifieds/cars/'
| [
6738,
33918,
1330,
45514,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
44161,
578,
18,
1330,
2018,
198,
198,
6738,
12972,
66,
853,
81,
13,
19849,
1330,
1879,
198,
198,
11012,
62,
34219,
796,
10644,
13,
11195,
22446,
22179,
6978,
10786... | 2.864865 | 74 |
from pathlib import Path
import ast
import pytest
import astor
import warnings
import os
from json_codegen import load_schema
from json_codegen.generators.python3_marshmallow import Python3MarshmallowGenerator
SCHEMAS_DIR = Path(__file__).parent / "fixtures" / "schemas"
FIXTURES_DIR = Path(__file__).parent / "fixtures" / "python3_marshmallow"
expected_init_py = astor.dump_tree(ast.Module(body=[]))
test_params = sorted(pytest.param(f, id=f.name) for f in SCHEMAS_DIR.glob("*.schema.json"))
@pytest.mark.parametrize("schema_filename", (test_params))
| [
6738,
3108,
8019,
1330,
10644,
198,
11748,
6468,
198,
198,
11748,
12972,
9288,
198,
11748,
6468,
273,
198,
11748,
14601,
198,
11748,
28686,
198,
198,
6738,
33918,
62,
8189,
5235,
1330,
3440,
62,
15952,
2611,
198,
6738,
33918,
62,
8189,
... | 2.805 | 200 |
# -*- coding: utf-8 -*-
if __name__ == '__main__':
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
201,
198,
220,
220,
220,
1388,
3419,
201,
198
] | 1.775 | 40 |
# robofab manual
# Glyphmath howto
# Fun examples
#FLM: Fun with GlyphMath
# this example is meant to run with the RoboFab Demo Font
# as the Current Font. So, if you're doing this in FontLab
# import the Demo Font UFO first.
from robofab.world import CurrentFont
from random import random
f = CurrentFont()
condensedLight = f["a#condensed_light"]
wideLight = f["a#wide_light"]
wideBold = f["a#wide_bold"]
diff = wideLight - condensedLight
destination = f.newGlyph("a#deltaexperiment")
destination.clear()
x = wideBold + (condensedLight-wideLight)*random()
destination.appendGlyph( x)
destination.width = x.width
f.update() | [
2,
3857,
1659,
397,
10107,
198,
2,
27949,
746,
11018,
703,
1462,
198,
2,
11138,
6096,
198,
198,
2,
3697,
44,
25,
11138,
351,
27949,
746,
37372,
198,
220,
198,
2,
428,
1672,
318,
4001,
284,
1057,
351,
262,
39702,
43957,
34588,
24060,... | 2.967442 | 215 |
""" Simple module for loading documentation of various
pypy-cs from doc directory
"""
import py
| [
198,
37811,
17427,
8265,
329,
11046,
10314,
286,
2972,
198,
79,
4464,
88,
12,
6359,
422,
2205,
8619,
198,
37811,
198,
198,
11748,
12972,
628
] | 3.96 | 25 |
#!/usr/bin/env python
'''
SST scheduler simulation input file generator
Input parameters are given below
Setting a parameter to "default" or "" will select the default option
'''
import os
# Input workload trace path:
traceName = 'jobtrace_files/bisection_N1.sim'
# Output file name:
outFile = 'simple_libtopomap_bisection_N1.py'
# Machine (cluster) configuration:
# mesh[xdim, ydim, zdim], torus[xdim, ydim, zdim], simple,
# dragonfly[routersPerGroup, portsPerRouter, opticalsPerRouter,
# nodesPerRouter, localTopology, globalTopology]
# localTopology:[all_to_all]
# globalTopology:[absolute,circulant,relative]
# (default: simple)
machine = 'dragonfly[8,11,2,2,all_to_all,absolute]'
# Number of machine nodes
# The script calculates the number of nodes if mesh or torus machine is provided.
# any integer. (default: 1)
numberNodes = ''
# Number of cores in each machine node
# any integer. (default: 1)
coresPerNode = '2'
# Scheduler algorithm:
# cons, delayed, easy, elc, pqueue, prioritize. (default: pqueue)
scheduler = 'easy'
# Fair start time algorithm:
# none, relaxed, strict. (default: none)
FST = ''
# Allocation algorithm:
# bestfit, constraint, energy, firstfit, genalg, granularmbs, hybrid, mbs,
# mc1x1, mm, nearest, octetmbs, oldmc1x1,random, simple, sortedfreelist,
# nearestamap, spectralamap. (default: simple)
allocator = 'simple'
# Task mapping algorithm:
# simple, rcb, random, topo, rcm, nearestamap, spectralamap. (default: simple)
taskMapper = 'topo'
# Communication overhead parameters
# a[b,c] (default: none)
timeperdistance = '.001865[.1569,0.0129]'
# Heat distribution matrix (D_matrix) input file
# file path, none. (default: none)
dMatrixFile = 'none'
# Randomization seed for communication time overhead
# none, any integer. (default: none)
randomSeed = ''
# Detailed network simulation mode
# ON, OFF (default: OFF)
detailedNetworkSim = 'ON'
# Completed jobs trace (in ember) for detailed network sim mode
# file path, none (default: none)
completedJobsTrace = 'emberCompleted.txt'
# Running jobs (in ember) for detailed network sim mode
# file path, none (default: none)
runningJobsTrace = 'emberRunning.txt'
'''
Do not modify the script after this point.
'''
import sys
if __name__ == '__main__':
if outFile == "" or outFile == "default":
print "Error: There is no default value for outFile"
sys.exit()
f = open(outFile,'w')
f.write('# scheduler simulation input file\n')
f.write('import sst\n')
f.write('\n')
f.write('# Define SST core options\n')
f.write('sst.setProgramOption("run-mode", "both")\n')
f.write('\n')
f.write('# Define the simulation components\n')
f.write('scheduler = sst.Component("myScheduler", \
"scheduler.schedComponent")\n')
f.write('scheduler.addParams({\n')
if traceName == "" or traceName == "default":
print "Error: There is no default value for traceName"
os.remove(outFile)
sys.exit()
f.write(' "traceName" : "' + traceName + '",\n')
if machine != "" and machine != "default":
f.write(' "machine" : "' + machine + '",\n')
if coresPerNode != "":
f.write(' "coresPerNode" : "' + coresPerNode + '",\n')
if scheduler != "" and scheduler != "default":
f.write(' "scheduler" : "' + scheduler + '",\n')
if FST != "" and FST != "default":
f.write(' "FST" : "' + FST + '",\n')
if allocator != "" and allocator != "default":
f.write(' "allocator" : "' + allocator + '",\n')
if taskMapper != "" and taskMapper != "default":
f.write(' "taskMapper" : "' + taskMapper + '",\n')
if timeperdistance != "" and timeperdistance != "default":
f.write(' "timeperdistance" : "' + timeperdistance + '",\n')
if dMatrixFile != "" and dMatrixFile != "default":
f.write(' "dMatrixFile" : "' + dMatrixFile + '",\n')
if randomSeed != "" and randomSeed != "default":
f.write(' "runningTimeSeed" : "' + randomSeed + '",\n')
if detailedNetworkSim != "" and detailedNetworkSim != "default":
f.write(' "detailedNetworkSim" : "' + detailedNetworkSim + '",\n')
if completedJobsTrace != "" and completedJobsTrace != "default":
f.write(' "completedJobsTrace" : "' + completedJobsTrace + '",\n')
if runningJobsTrace != "" and runningJobsTrace != "default":
f.write(' "runningJobsTrace" : "' + runningJobsTrace + '",\n')
f.seek(-2, os.SEEK_END)
f.truncate()
f.write('\n})\n')
f.write('\n')
f.write('# nodes\n')
if machine.split('[')[0] == 'mesh' or machine.split('[')[0] == 'torus':
nums = machine.split('[')[1]
nums = nums.split(']')[0]
nums = nums.split(',')
numberNodes = int(nums[0])*int(nums[1])*int(nums[2])
elif machine.split('[')[0] == 'dragonfly':
nums = machine.split('[')[1]
nums = nums.split(']')[0]
nums = nums.split(',')
numberNodes = (int(nums[0])*int(nums[2])+1) *int(nums[0])*int(nums[3])
numberNodes = int(numberNodes)
for i in range(0, numberNodes):
f.write('n' + str(i) + ' = sst.Component("n' + str(i) + \
'", "scheduler.nodeComponent")\n')
f.write('n' + str(i) + '.addParams({\n')
f.write(' "nodeNum" : "' + str(i) + '",\n')
f.write('})\n')
f.write('\n')
f.write('# define links\n')
for i in range(0, numberNodes):
f.write('l' + str(i) + ' = sst.Link("l' + str(i) + '")\n')
f.write('l' + str(i) + '.connect( (scheduler, "nodeLink' + str(i) + \
'", "0 ns"), (n' + str(i) + ', "Scheduler", "0 ns") )\n')
f.write('\n')
f.close()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
7061,
6,
198,
50,
2257,
6038,
18173,
18640,
5128,
2393,
17301,
198,
20560,
10007,
389,
1813,
2174,
198,
34149,
257,
11507,
284,
366,
12286,
1,
393,
13538,
481,
2922,
262,
4277,
3038,
1... | 2.41812 | 2,351 |
from jax import lax
from jax.experimental import host_callback
from tqdm.auto import tqdm
def progress_bar_scan(num_samples, message=None):
"""
Progress bar for a JAX scan.
"""
if message is None:
message = f"Running for {num_samples:,} iterations"
tqdm_bars = {}
if num_samples > 20:
print_rate = int(num_samples / 20)
else:
print_rate = 1
remainder = num_samples % print_rate
def _update_progress_bar(iter_num):
"""
Updates tqdm progress bar of a JAX scan or loop.
"""
_ = lax.cond(
iter_num == 0,
lambda _: host_callback.id_tap(_define_tqdm, None, result=iter_num),
lambda _: iter_num,
operand=None,
)
_ = lax.cond(
# update tqdm every multiple of `print_rate` except at the end
(iter_num % print_rate == 0) & (iter_num != num_samples - remainder),
lambda _: host_callback.id_tap(_update_tqdm, print_rate, result=iter_num),
lambda _: iter_num,
operand=None,
)
_ = lax.cond(
# update tqdm by `remainder`
iter_num == num_samples - remainder,
lambda _: host_callback.id_tap(_update_tqdm, remainder, result=iter_num),
lambda _: iter_num,
operand=None,
)
def _progress_bar_scan(func):
"""
Decorator that adds a progress bar to `body_fun` used in `lax.scan`.
Note that `body_fun` must either be looping over
`np.arange(num_samples)`, or be looping over a tuple who's first
element is `np.arange(num_samples)` This means that `iter_num`
is the current iteration number
"""
return wrapper_progress_bar
return _progress_bar_scan
| [
6738,
474,
897,
1330,
36919,
198,
6738,
474,
897,
13,
23100,
9134,
1330,
2583,
62,
47423,
198,
6738,
256,
80,
36020,
13,
23736,
1330,
256,
80,
36020,
628,
198,
4299,
4371,
62,
5657,
62,
35836,
7,
22510,
62,
82,
12629,
11,
3275,
28,
... | 2.166867 | 833 |
import logging
from pathlib import Path | [
11748,
18931,
198,
6738,
3108,
8019,
1330,
10644
] | 4.875 | 8 |
import numpy as np
from skimage.future import graph
from skimage._shared.version_requirements import is_installed
from skimage import segmentation
import pytest
@pytest.mark.skipif(not is_installed('networkx'),
reason="networkx not installed")
@pytest.mark.skipif(not is_installed('networkx'),
reason="networkx not installed")
@pytest.mark.skipif(not is_installed('networkx'),
reason="networkx not installed")
@pytest.mark.skipif(not is_installed('networkx'),
reason="networkx not installed")
@pytest.mark.skipif(not is_installed('networkx'),
reason="networkx not installed")
@pytest.mark.skipif(not is_installed('networkx'),
reason="networkx not installed")
def test_ncut_stable_subgraph():
""" Test to catch an error thrown when subgraph has all equal edges. """
img = np.zeros((100, 100, 3), dtype='uint8')
labels = np.zeros((100, 100), dtype='uint8')
labels[:50, :50] = 1
labels[:50, 50:] = 2
rag = graph.rag_mean_color(img, labels, mode='similarity')
new_labels = graph.cut_normalized(labels, rag, in_place=False)
new_labels, _, _ = segmentation.relabel_sequential(new_labels)
assert new_labels.max() == 0
| [
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
9060,
13,
37443,
1330,
4823,
198,
6738,
1341,
9060,
13557,
28710,
13,
9641,
62,
8897,
18883,
1330,
318,
62,
37050,
198,
6738,
1341,
9060,
1330,
10618,
341,
198,
11748,
12972,
9288,
628,
19... | 2.509728 | 514 |
from django.shortcuts import redirect, render, reverse
from django.urls import reverse_lazy
from django.contrib import messages
from django.db.models import Case, CharField, Value, When
from django.views.generic.base import TemplateView
from django.views.generic import ListView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from unidecode import unidecode # normalize strings Csii
from alunos.models import Aluno
from alunos.forms import AlunoForm
from turmas.models import Turma
from accounts.models import CustomUser
# Classes to control admin acess and success messages
from base.base_admin_permissions import BaseAdminUsersAdSe
# Constants Vars
from base.constants import CURRENT_YEAR
def create_user_after_registration(
username, password, first_name, last_name, department):
"""
Create user after aluno registration
"""
CustomUser.objects.create_user(
username=username,
password=password,
first_name=first_name,
last_name=last_name,
department=department
)
def data_processing_user_creation(cpf, name_form, department):
"""
Processing data for user creation
"""
cpf_split_1 = cpf.split('.')
cpf_split_2 = ''.join(cpf_split_1).split('-')
cpf_join = ''.join(cpf_split_2)
name_split = name_form.split()
first_name = name_split[0]
last_name = name_split[-1]
password = f'{unidecode(first_name).lower()}{cpf_join[0:6]}'
# Test if user already exists
cpf_qs = CustomUser.objects.filter(username=cpf_join)
if not cpf_qs:
create_user_after_registration(
cpf_join, password, first_name, last_name, department)
# --- General views --- #
# --- Admin views --- #
# --- Lists views --- #
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
18941,
11,
8543,
11,
9575,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
62,
75,
12582,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
6218,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
... | 3.014467 | 553 |
# *****************************************************************************
# * Copyright 2019 Amazon.com, Inc. and its affiliates. All Rights Reserved. *
# *
# Licensed under the Amazon Software License (the "License"). *
# You may not use this file except in compliance with the License. *
# A copy of the License is located at *
# *
# http://aws.amazon.com/asl/ *
# *
# or in the "license" file accompanying this file. This file is distributed *
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either *
# express or implied. See the License for the specific language governing *
# permissions and limitations under the License. *
# *****************************************************************************
import tempfile
import torch
from torchvision import transforms
from model_factory_service_locator import ModelFactoryServiceLocator
class Predict:
"""
Runs predictions on a given model
"""
| [
2,
41906,
17174,
4557,
35625,
198,
2,
1635,
15069,
13130,
6186,
13,
785,
11,
3457,
13,
290,
663,
29116,
13,
1439,
6923,
33876,
13,
220,
1635,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.199005 | 603 |
import string
from random import *
characters = string.ascii_letters + string.punctuation + string.digits
pswd = "".join(choice(characters) for x in range(randint(6, 14)))
print pswd
| [
11748,
4731,
198,
6738,
4738,
1330,
1635,
198,
10641,
19858,
796,
4731,
13,
292,
979,
72,
62,
15653,
1343,
4731,
13,
79,
16260,
2288,
220,
1343,
4731,
13,
12894,
896,
198,
862,
16993,
796,
220,
366,
1911,
22179,
7,
25541,
7,
10641,
... | 2.983871 | 62 |
from typing import Any, Dict, Set
from django.apps import AppConfig
def required_users(element: Dict[str, Any]) -> Set[int]:
"""
Returns all user ids that are displayed as speaker in the given element.
"""
return set(speaker["user_id"] for speaker in element["speakers"])
| [
6738,
19720,
1330,
4377,
11,
360,
713,
11,
5345,
198,
198,
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628,
198,
198,
4299,
2672,
62,
18417,
7,
30854,
25,
360,
713,
58,
2536,
11,
4377,
12962,
4613,
5345,
58,
600,
5974,
198,
22... | 3.106383 | 94 |
"""
In this step the destination address is no longer node 2 -- we draw a
random destination, and we'll add the destination address to the message.
The best way is to subclass cMessage and add destination as a data member.
To make the model execute longer, after a message arrives to its destination
the destination node will generate another message with a random destination
address, and so forth.
"""
from pyopp import cSimpleModule, cMessage, EV
| [
37811,
198,
818,
428,
2239,
262,
10965,
2209,
318,
645,
2392,
10139,
362,
1377,
356,
3197,
257,
198,
25120,
10965,
11,
290,
356,
1183,
751,
262,
10965,
2209,
284,
262,
3275,
13,
198,
198,
464,
1266,
835,
318,
284,
47611,
269,
12837,
... | 4.375 | 104 |
#!/usr/bin/env python3
"""Set/get/remove client/port metadata."""
from pprint import pprint
import jack
client = jack.Client('Metadata-Client')
port = client.inports.register('input')
client.set_property(client, jack.METADATA_PRETTY_NAME, 'Best Client Ever')
print('Client "pretty" name:',
jack.get_property(client, jack.METADATA_PRETTY_NAME))
client.set_property(
port, jack.METADATA_PRETTY_NAME, b'a good port', 'text/plain')
print('Port "pretty" name:',
jack.get_property(port, jack.METADATA_PRETTY_NAME))
print('All client properties:')
pprint(jack.get_properties(client))
print('All port properties:')
pprint(jack.get_properties(port))
print('All properties:')
pprint(jack.get_all_properties())
client.remove_property(port, jack.METADATA_PRETTY_NAME)
client.remove_properties(client)
client.remove_all_properties()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
7248,
14,
1136,
14,
28956,
5456,
14,
634,
20150,
526,
15931,
198,
6738,
279,
4798,
1330,
279,
4798,
198,
198,
11748,
14509,
198,
198,
16366,
796,
14509,
13,
11792,
10786,
91... | 2.841751 | 297 |
import csv
from argparse import ArgumentParser, ArgumentTypeError
from os import path
from string import Template
from subprocess import Popen
from tempfile import NamedTemporaryFile
import numpy as np
import util
# This import only works if the directory where "generate_trench.so" is located is present in
# the PYTHONPATH environment variable
#import generate_trench
VIENNATS_EXE = "../../ViennaTools/ViennaTS/build/viennats-2.3.2"
PROJECT_DIRECTORY = path.dirname(__file__)
PROCESS_TIME = 10
DISTANCE_BITS = 8
OUTPUT_DIR = path.join(PROJECT_DIRECTORY, "output")
parser = ArgumentParser(
description="Run physical deposition simulations with different sticking probabilities.")
parser.add_argument(
"output",
type=str,
default="results.csv",
nargs="?",
help="output CSV file for saving the results")
def check_list_input(x):
""" Converts the input string to a list of floats. Only uses input elements with a value between 0 and 1."""
x = x.replace("[", "").replace("]", "").split(",")
try:
x = [float(i) for i in x]
except ValueError as e:
raise ArgumentTypeError(e)
if np.all([0 < i <= 1 for i in x]):
if len(x) == 0:
raise ArgumentTypeError("No sticking probability values provided")
return x
else:
raise ArgumentTypeError(
"The sticking probability has to have a value between 0 and 1.")
parser.add_argument(
"--sticking-probabilities",
dest="sticking_probabilities",
type=check_list_input,
default=[1/2**i for i in range(5)],
help="list of sticking probabilities to be used during the simulation"
)
parser.add_argument(
"--repetitions",
dest="repetitions",
type=int,
default=10,
help="how often the simulation should be repeated for one set of parameters")
if __name__ == "__main__":
main()
| [
11748,
269,
21370,
198,
6738,
1822,
29572,
1330,
45751,
46677,
11,
45751,
6030,
12331,
198,
6738,
28686,
1330,
3108,
198,
6738,
4731,
1330,
37350,
198,
6738,
850,
14681,
1330,
8099,
268,
198,
6738,
20218,
7753,
1330,
34441,
12966,
5551,
8... | 2.866564 | 652 |
# File: N (Python 2.4)
import random
import types
import string
from direct.fsm import StateData
from direct.fsm import ClassicFSM
from direct.fsm import State
from direct.gui import DirectGuiGlobals
from direct.gui.DirectGui import *
from direct.task import Task
from pandac.PandaModules import *
from pandac.PandaModules import TextEncoder
from otp.namepanel import NameCheck
from otp.otpbase import OTPLocalizer as OL
from pirates.piratesbase import PLocalizer as PL
from pirates.pirate import HumanDNA
from pirates.piratesbase import PiratesGlobals
from pirates.piratesgui import GuiButton
from pirates.piratesgui import PiratesGuiGlobals
from pirates.leveleditor import NPCList
from pirates.makeapirate.PCPickANamePattern import PCPickANamePattern
from direct.distributed.MsgTypes import *
from direct.distributed import PyDatagram
MAX_NAME_WIDTH = 9
| [
2,
9220,
25,
399,
357,
37906,
362,
13,
19,
8,
198,
198,
11748,
4738,
198,
11748,
3858,
198,
11748,
4731,
198,
6738,
1277,
13,
69,
5796,
1330,
1812,
6601,
198,
6738,
1277,
13,
69,
5796,
1330,
13449,
10652,
44,
198,
6738,
1277,
13,
... | 3.46371 | 248 |
import argparse
import config
import utils
from chat import ChatSession
from utils import Color
if __name__ == '__main__':
main()
| [
11748,
1822,
29572,
198,
198,
11748,
4566,
198,
11748,
3384,
4487,
198,
6738,
8537,
1330,
24101,
36044,
198,
6738,
3384,
4487,
1330,
5315,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388... | 3.232558 | 43 |
from checkio.home.most_wanted_letter import checkio
| [
6738,
2198,
952,
13,
11195,
13,
1712,
62,
86,
4126,
62,
9291,
1330,
2198,
952,
628
] | 3.3125 | 16 |
import copy
from Reversi import Reversi
from dqn_agent import DQNAgent
if __name__ == "__main__":
# parameters
#n_epochs = 1000
n_epochs = 5
# environment, agent
env = Reversi()
# playerID
playerID = [env.Black, env.White, env.Black]
# player agent
players = []
# player[0]= env.Black
players.append(DQNAgent(env.enable_actions, env.name, env.screen_n_rows, env.screen_n_cols))
# player[1]= env.White
players.append(DQNAgent(env.enable_actions, env.name, env.screen_n_rows, env.screen_n_cols))
for e in range(n_epochs):
# reset
env.reset()
terminal = False
while terminal == False: # 1エピソードが終わるまでループ
for i in range(0, len(players)):
state = env.screen
#print(state)
targets = env.get_enables(playerID[i])
exploration = (n_epochs - e + 20)/(n_epochs + 20)
#exploration = 0.1
if len(targets) > 0:
# どこかに置く場所がある場合
#すべての手をトレーニングする
for tr in targets:
tmp = copy.deepcopy(env)
tmp.update(tr, playerID[i])
#終了判定
win = tmp.winner()
end = tmp.isEnd()
#次の状態
state_X = tmp.screen
target_X = tmp.get_enables(playerID[i+1])
if len(target_X) == 0:
target_X = tmp.get_enables(playerID[i])
# 両者トレーニング
for j in range(0, len(players)):
reword = 0
if end == True:
if win == playerID[j]:
# 勝ったら報酬1を得る
reword = 1
players[j].store_experience(state, targets, tr, reword, state_X, target_X, end)
#print(state)
#print(state_X)
#if e > n_epochs*0.2:
# players[j].experience_replay()
# 行動を選択
action = players[i].select_action(state, targets, exploration)
# 行動を実行
env.update(action, playerID[i])
# for log
loss = players[i].current_loss
Q_max, Q_action = players[i].select_enable_action(state, targets)
print("player:{:1d} | pos:{:2d} | LOSS: {:.4f} | Q_MAX: {:.4f} | Q_ACTION: {:.4f}".format(
playerID[i], action, loss, Q_max, Q_action))
# 行動を実行した結果
terminal = env.isEnd()
for j in range(0, len(players)):
if e > n_epochs*0.3:
for k in range(25):
players[j].experience_replay()
elif e > n_epochs*0.1:
for k in range(5):
players[j].experience_replay()
w = env.winner()
print("EPOCH: {:03d}/{:03d} | WIN: player{:1d}".format(
e, n_epochs, w))
# 保存は後攻のplayer2 を保存する。
if e%50 == 0:
players[1].save_model(e)
| [
11748,
4866,
198,
198,
6738,
797,
690,
72,
1330,
797,
690,
72,
198,
6738,
288,
80,
77,
62,
25781,
1330,
360,
48,
4535,
6783,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
628,
220,
220,
220,
1303,
10007,
198,
... | 1.554111 | 2,153 |