hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b0fd1d21a57ae1f72f842d8b057cfcf5fa68da79 | 1,970 | py | Python | playground.py | BogyMitutoyoCTL/Riesen-Tetris-2 | af308d900ca7386c42d9debf85c53029588c9430 | [
"MIT"
] | 2 | 2019-09-15T18:22:22.000Z | 2019-12-30T15:30:19.000Z | playground.py | BogyMitutoyoCTL/Riesen-Tetris-2 | af308d900ca7386c42d9debf85c53029588c9430 | [
"MIT"
] | 12 | 2019-07-19T15:00:35.000Z | 2020-01-03T10:53:13.000Z | playground.py | BogyMitutoyoCTL/Riesen-Tetris-2 | af308d900ca7386c42d9debf85c53029588c9430 | [
"MIT"
] | null | null | null | import tetris_blocks
class Playground:
def __init__(self, hight, width):
self.height = hight
self.width = width
self.list_pixel = []
for h in range(self.height):
line = []
self.list_pixel.append(line)
for w in range(self.width):
line.append((0, 0, 0))
self.clear()
def clear(self):
for y in range(self.height):
for x in range(self.width):
self.set_pixel(x, y, (0, 0, 0))
def add_block(self, block: tetris_blocks.Block, columns_right=0, lines_down=0):
for y_of_block in range(block.height):
for x_of_block in range(block.width):
ispixel = block.get_field()[y_of_block][x_of_block]
if ispixel > 0:
if not self.is_inside_field(x_of_block + columns_right, y_of_block + lines_down):
break
self.set_pixel(x_of_block + columns_right, y_of_block + lines_down, block.color.get_color())
def block_clear(self, block: tetris_blocks.Block, columns_right, lines_down):
for y_of_block in range(block.height):
for x_of_block in range(block.width):
ispixel = block.get_field()[y_of_block][x_of_block]
if ispixel > 0:
if not self.is_inside_field(x_of_block + columns_right, y_of_block + lines_down):
break
self.set_pixel(x_of_block + columns_right, y_of_block + lines_down, (0, 0, 0))
def draw(self):
for x in range(self.width):
for y in range(self.height):
print(self.get_pixel(x, y), end=' ')
print("")
def is_inside_field(self, x, y):
return (0 <= x < self.width) and (0 <= y < self.height)
def get_pixel(self, x, y):
return self.list_pixel[y][x]
def set_pixel(self, x, y, color):
self.list_pixel[y][x] = color
| 36.481481 | 112 | 0.565482 |
c520b5239e6c60457852c9a0cada3127f8fc9437 | 51 | py | Python | gaoqilai.py | 1378056564/dream | d6e9318b8d025bfd61d9a57a81687a999ecc19b9 | [
"Apache-2.0"
] | null | null | null | gaoqilai.py | 1378056564/dream | d6e9318b8d025bfd61d9a57a81687a999ecc19b9 | [
"Apache-2.0"
] | null | null | null | gaoqilai.py | 1378056564/dream | d6e9318b8d025bfd61d9a57a81687a999ecc19b9 | [
"Apache-2.0"
] | null | null | null | girls and boys
我们应该
开始搞点项目了
一起搞起来吧
如果不搞
我们一辈子都是人下人
| 7.285714 | 14 | 0.843137 |
2f4dcb6c02e1352ee04904504865dc2e684ce9b7 | 4,153 | py | Python | test/collection_client_test.py | agsimmons/bynder-python-sdk | 4ed23bbd159f1cb03dc9c94205a11ad137ca769a | [
"MIT"
] | 4 | 2019-03-29T12:14:38.000Z | 2020-11-14T03:32:28.000Z | test/collection_client_test.py | agsimmons/bynder-python-sdk | 4ed23bbd159f1cb03dc9c94205a11ad137ca769a | [
"MIT"
] | 12 | 2017-12-01T15:13:09.000Z | 2022-01-05T21:27:10.000Z | test/collection_client_test.py | agsimmons/bynder-python-sdk | 4ed23bbd159f1cb03dc9c94205a11ad137ca769a | [
"MIT"
] | 8 | 2017-11-02T19:16:54.000Z | 2022-02-08T10:35:58.000Z | import json
from unittest import mock, TestCase
from test import create_bynder_client
class CollectionClientTest(TestCase):
""" Test the collection client.
"""
def setUp(self):
self.bynder_client = create_bynder_client()
self.collection_client = self.bynder_client.collection_client
self.collection_client.session.get = mock.MagicMock()
self.collection_client.session.post = mock.MagicMock()
self.collection_client.session.delete = mock.MagicMock()
def tearDown(self):
self.bynder_client = None
self.collection_client = None
def test_collections(self):
""" Test if when we call collections it will use the correct params for the
request and returns successfully.
"""
self.collection_client.collections()
self.collection_client.session.get.assert_called_with(
'/v4/collections/',
params={}
)
def test_collection_info(self):
""" Test if when we call collection info it will use the correct params for the
request and returns successfully.
"""
self.collection_client.collection_info(collection_id=1111)
self.collection_client.session.get.assert_called_with(
'/v4/collections/1111/'
)
def test_create_collection(self):
""" Test if when we call create collections it will use the correct
params for the request and returns successfully.
"""
collection_name = 'Unit Test'
self.collection_client.create_collection(
name=collection_name
)
self.collection_client.session.post.assert_called_with(
'/v4/collections/',
data={'name': collection_name}
)
def test_delete_collection(self):
""" Test if when we call delete collections it will use the correct
params for the request and returns successfully.
"""
self.collection_client.delete_collection(collection_id=1111)
self.collection_client.session.delete\
.assert_called_with('/v4/collections/1111/')
def test_collection_media_ids(self):
""" Test if when we call collection media ids it will use the correct
params for the request and returns successfully.
"""
self.collection_client.collection_media_ids(collection_id=1111)
self.collection_client.session.get.assert_called_with(
'/v4/collections/1111/media/'
)
def test_add_media_to_collection(self):
""" Test if when we call add media to collection it will use the correct
params for the request and returns successfully.
"""
media_ids = ['2222', '3333']
self.collection_client.add_media_to_collection(
collection_id=1111, media_ids=media_ids)
self.collection_client.session.post.assert_called_with(
'/v4/collections/1111/media/',
data={'data': json.dumps(media_ids)}
)
def test_remove_media_from_collection(self):
""" Test if when we call remove media from collection it will use the correct
params for the request and returns successfully.
"""
media_ids = ['2222', '3333']
self.collection_client.remove_media_from_collection(
collection_id=1111, media_ids=media_ids)
self.collection_client.session\
.delete.assert_called_with(
'/v4/collections/1111/media/',
params={'deleteIds': ','.join(map(str, media_ids))}
)
def test_share_collection(self):
""" Test if when we call share collection it will use the correct
params for the request and returns successfully.
"""
self.collection_client.share_collection(
collection_id=1111,
collection_option='view',
recipients=[]
)
self.collection_client.session.post.assert_called_with(
'/v4/collections/1111/share/',
data={
'collectionOptions': 'view',
'recipients': ','.join(map(str, []))
}
)
| 37.080357 | 87 | 0.640982 |
ee4325866cfba8146619fa5601e11f135d4c3cbb | 1,371 | py | Python | src/csharp/CSharpTemplate.py | suryadesu/codenn | 4d2e1b75ac3b9debb37671b1d7d7216b7520fee2 | [
"MIT"
] | null | null | null | src/csharp/CSharpTemplate.py | suryadesu/codenn | 4d2e1b75ac3b9debb37671b1d7d7216b7520fee2 | [
"MIT"
] | null | null | null | src/csharp/CSharpTemplate.py | suryadesu/codenn | 4d2e1b75ac3b9debb37671b1d7d7216b7520fee2 | [
"MIT"
] | null | null | null |
import antlr4
from csharp.CSharp4Lexer import CSharp4Lexer
import re
def parseCSharp(code):
code = code.replace('\\n', '\n')
parsedVersion = []
stream = antlr4.InputStream(code)
lexer = CSharp4Lexer(stream)
toks = antlr4.CommonTokenStream(lexer)
toks.fetch(500)
identifiers = {}
identCount = 0
for token in toks.tokens:
if token.type == 109:
parsedVersion += ["CODE_INTEGER"]
elif token.type == 111:
parsedVersion += ["CODE_REAL"]
elif token.type == 112:
parsedVersion += ["CODE_CHAR"]
elif token.type == 113:
parsedVersion += ["CODE_STRING"]
elif token.type == 9 or token.type == 7 or token.type == 6: # whitespace and comments and newline
pass
else:
parsedVersion += [str(token.text)]
return parsedVersion
if __name__ == '__main__':
print(parseCSharp("public Boolean SomeValue { get { return someValue; } set { someValue = value; } }"))
print(parseCSharp("Console.WriteLine('cat'); int mouse = 5; int cat = 0.4; int cow = 'c'; int moo = \"mouse\"; "))
print(parseCSharp("int i = 4; // i is assigned the literal value of '4' \n int j = i // j is assigned the value of i. Since i is a variable, //it can change and is not a 'literal'"))
try:
print(parseCSharp('string `fixed = Regex.Replace(input, "\s*()","$1");'));
except:
print("Error")
| 32.642857 | 202 | 0.633115 |
300f201e02c3f8249bc3a51638a26ed250b6c543 | 2,820 | py | Python | plumbum/machines/base.py | octarinesec/plumbum | 9645cd3b2ebd92ab3b5f657c5068384d60f8fb71 | [
"MIT"
] | 1 | 2018-09-10T10:01:25.000Z | 2018-09-10T10:01:25.000Z | src/plumbum/machines/base.py | ownport/playbook | 6d3196ddf68f2c3c3efc4a52e26719c3e5596dca | [
"MIT"
] | null | null | null | src/plumbum/machines/base.py | ownport/playbook | 6d3196ddf68f2c3c3efc4a52e26719c3e5596dca | [
"MIT"
] | null | null | null | from plumbum.commands.processes import CommandNotFound
from plumbum.commands.processes import ProcessExecutionError
from plumbum.commands.processes import ProcessTimedOut
class PopenAddons(object):
"""This adds a verify to popen objects to that the correct command is attributed when
an error is thrown."""
def verify(self, retcode, timeout, stdout, stderr):
"""This verifies that the correct command is attributed."""
if getattr(self, "_timed_out", False):
raise ProcessTimedOut("Process did not terminate within %s seconds" % (timeout,),
getattr(self, "argv", None))
if retcode is not None:
if hasattr(retcode, "__contains__"):
if self.returncode not in retcode:
raise ProcessExecutionError(getattr(self, "argv", None), self.returncode,
stdout, stderr)
elif self.returncode != retcode:
raise ProcessExecutionError(getattr(self, "argv", None), self.returncode,
stdout, stderr)
class BaseMachine(object):
"""This is a base class for other machines. It contains common code to
all machines in Plumbum."""
def get(self, cmd, *othercommands):
"""This works a little like the ``.get`` method with dict's, only
it supports an unlimited number of arguments, since later arguments
are tried as commands and could also fail. It
will try to call the first command, and if that is not found,
it will call the next, etc. Will raise if no file named for the
executable if a path is given, unlike ``[]`` access.
Usage::
best_zip = local.get('pigz','gzip')
"""
try:
command = self[cmd]
if not command.executable.exists():
raise CommandNotFound(cmd,command.executable)
else:
return command
except CommandNotFound:
if othercommands:
return self.get(othercommands[0],*othercommands[1:])
else:
raise
def __contains__(self, cmd):
"""Tests for the existance of the command, e.g., ``"ls" in plumbum.local``.
``cmd`` can be anything acceptable by ``__getitem__``.
"""
try:
self[cmd]
except CommandNotFound:
return False
else:
return True
@property
def encoding(self):
'This is a wrapper for custom_encoding'
return self.custom_encoding
@encoding.setter
def encoding(self, value):
self.custom_encoding = value
def daemonic_popen(self, command, cwd = "/", stdout=None, stderr=None, append=True):
raise NotImplementedError("This is not implemented on this machine!")
| 36.623377 | 93 | 0.611348 |
d8ac3c19e595d869c0a8c36ff7b69a431a0b5e98 | 511 | py | Python | Python Programming/09. Collections Module/01-Counter.py | luckyrabbit85/Python | ed134fd70b4a7b84b183b87b85ad5190f54c9526 | [
"MIT"
] | 1 | 2021-07-15T18:40:26.000Z | 2021-07-15T18:40:26.000Z | Python Programming/09. Collections Module/01-Counter.py | luckyrabbit85/Python | ed134fd70b4a7b84b183b87b85ad5190f54c9526 | [
"MIT"
] | null | null | null | Python Programming/09. Collections Module/01-Counter.py | luckyrabbit85/Python | ed134fd70b4a7b84b183b87b85ad5190f54c9526 | [
"MIT"
] | null | null | null | # A counter is a sub-class of the dictionary. It is used to keep
# the count of the elements in an iterable in the form of an unordered
# dictionary where the key represents the element in the iterable and
# value represents the count of that element in the iterable.
from collections import Counter
# with a list of items
print(Counter(["B", "B", "A", "B", "C", "A", "B", "B", "A", "C"]))
# with a dictionary
print(Counter({"A": 3, "B": 5, "C": 2}))
# with keyword arguments
print(Counter(A=3, B=5, C=2))
| 30.058824 | 70 | 0.677104 |
9f47367c69dc5066280fb5ab8617693114889904 | 1,590 | py | Python | Python Files/barchart.py | r-jain8/Job-Cred-Meter | 2580bbdcd8cd2b3c53772dc4df588f111f2b29d8 | [
"MIT"
] | null | null | null | Python Files/barchart.py | r-jain8/Job-Cred-Meter | 2580bbdcd8cd2b3c53772dc4df588f111f2b29d8 | [
"MIT"
] | null | null | null | Python Files/barchart.py | r-jain8/Job-Cred-Meter | 2580bbdcd8cd2b3c53772dc4df588f111f2b29d8 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import requests
import json
import matplotlib.pyplot as plt
#data=pandas.read_csv('ham.csv',sep=';',na_values='.')
f=open("hamfeed.csv","r")
g=open("list.csv","r") #file with names and ids of pages and groups
list = {} #dictionary to store names and count of posts of any group or page in hamfeed file.
array={} #dictionary to store id and names of groups and pages
for line in f:
cells=line.split(";")
val=cells[0].split("_")
#print val[0]
if not val[0] in array:
'''base_url="https://graph.facebook.com/v2.7/186737671408393?fields=fan_count&access_token="+access_token
results=requests.get(base_url)
results_text=results.text
results_json=json.loads(results_text)
''' #ignore this commented part
for line in g:
name=line.split(";")
if val[0]==name[0]:
a=name[1]
array[val[0]]=a
list[a]=1
break
else:
list[array[val[0]]]+=1
#print list
d={}
count=0
for w in sorted(list, key=list.get, reverse=True):
if list[w]==25:
print w, list[w]
d[w]=list[w]
count+=1
else:
break
print count #displays the total no of such groups or pages
width=1/1.5
plt.bar(range(len(d)), d.values(), align='center', color="pink")
plt.xticks(range(len(d)), d.keys())
plt.xlabel('Groups or Pages')
plt.ylabel('Number of hams posted')
plt.title('Top 10 Most Efficient Facebook Groups or Pages')
plt.savefig("bar",dpi=150)
plt.show()
f.close()
g.close()
| 28.909091 | 113 | 0.618239 |
f3e6755d28064d0e98613def73e7b084c65f10cf | 3,965 | py | Python | tests/test_serializers.py | mikoblog/splitio-requests | c09b069e49f5224cbe3f892a5731c62885fd6151 | [
"MIT"
] | null | null | null | tests/test_serializers.py | mikoblog/splitio-requests | c09b069e49f5224cbe3f892a5731c62885fd6151 | [
"MIT"
] | 4 | 2020-11-22T12:07:45.000Z | 2021-03-26T23:44:12.000Z | tests/test_serializers.py | mikoblog/splitio-requests | c09b069e49f5224cbe3f892a5731c62885fd6151 | [
"MIT"
] | null | null | null | import pytest
import marshmallow
from splitiorequests.serializers import (
load_split, load_splits, load_split_definition, load_split_definitions,
load_environment, load_environments, load_workspaces, load_traffic_types,
load_tags, load_segment, load_segment_keys
)
class TestSerializers:
def test_load_split_exclude_unknown(self, splits):
load_split(splits['objects'][0], unknown_handler='EXCLUDE')
def test_load_split_raise_unknown(self, splits):
with pytest.raises(marshmallow.exceptions.ValidationError):
load_split(splits['objects'][0])
def test_load_splits_exclude_unknown(self, splits):
load_splits(splits, unknown_handler='EXCLUDE')
def test_load_splits_raise_unknown(self, splits):
with pytest.raises(marshmallow.exceptions.ValidationError):
load_splits(splits)
def test_load_split_definition_exclude_unknown(self, split_definitions):
load_split_definition(split_definitions['objects'][0], unknown_handler='EXCLUDE')
def test_load_split_definition_raise_unknown(self, split_definitions):
with pytest.raises(marshmallow.exceptions.ValidationError):
load_split_definition(split_definitions['objects'][0])
def test_load_split_definitions_exclude_unknown(self, split_definitions):
load_split_definitions(split_definitions, unknown_handler='EXCLUDE')
def test_load_split_definitions_raise_unknown(self, split_definitions):
with pytest.raises(marshmallow.exceptions.ValidationError):
load_split_definitions(split_definitions)
def test_load_environment_raise_unknown(self, environments):
with pytest.raises(marshmallow.exceptions.ValidationError):
load_environment(environments['environment']['with_unknown'])
def test_load_environment_exclude_unknown(self, environments):
load_environment(environments['environment']['with_unknown'], unknown_handler='EXCLUDE')
def test_load_environments_raise_unknown(self, environments):
with pytest.raises(marshmallow.exceptions.ValidationError):
load_environments(environments['environments']['with_unknown'])
def test_load_environments_exclude_unknown(self, environments):
load_environments(environments['environments']['with_unknown'], unknown_handler='EXCLUDE')
def test_load_workspaces_raise_unknown(self, workspaces):
with pytest.raises(marshmallow.exceptions.ValidationError):
load_workspaces(workspaces)
def test_workspaces_exclude_unknown(self, workspaces):
load_workspaces(workspaces, unknown_handler='EXCLUDE')
def test_load_traffic_types_raise_unknown(self, traffic_types):
with pytest.raises(marshmallow.exceptions.ValidationError):
load_traffic_types(traffic_types['with_unknown'])
def test_load_traffic_types_exclude_unknown(self, traffic_types):
load_traffic_types(traffic_types['with_unknown'], unknown_handler='EXCLUDE')
def test_load_tags_raise_unknown(self, tags):
with pytest.raises(marshmallow.exceptions.ValidationError):
load_tags(tags)
def test_load_tags_exclude_unknown(self, tags):
load_tags(tags, unknown_handler='EXCLUDE')
def test_load_segment_raise_unknown(self, segments):
with pytest.raises(marshmallow.exceptions.ValidationError):
load_segment(segments['segment_create']['with_unknown'])
def test_load_segment_exclude_unknown(self, segments):
load_segment(segments['segment_create']['with_unknown'], unknown_handler='EXCLUDE')
def test_load_segment_keys_raise_unknown(self, segments):
with pytest.raises(marshmallow.exceptions.ValidationError):
load_segment_keys(segments['create_segment_keys_with_unknown'])
def test_load_segment_keys_exclude_unknown(self, segments):
load_segment_keys(segments['create_segment_keys_with_unknown'], unknown_handler='EXCLUDE')
| 45.056818 | 98 | 0.768222 |
2db626ac8a2945d6b1a1709b8e87c14048e72085 | 1,057 | py | Python | app/core/migrations/0004_recipe.py | cdcasey/recipe-app-api | 3b9c54b01de82710a754741bcc74e0843149e6b1 | [
"MIT"
] | null | null | null | app/core/migrations/0004_recipe.py | cdcasey/recipe-app-api | 3b9c54b01de82710a754741bcc74e0843149e6b1 | [
"MIT"
] | null | null | null | app/core/migrations/0004_recipe.py | cdcasey/recipe-app-api | 3b9c54b01de82710a754741bcc74e0843149e6b1 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.8 on 2019-08-27 13:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_ingredient'),
]
operations = [
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True,
primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('time_minutes', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('link', models.CharField(blank=True, max_length=255)),
('ingredients', models.ManyToManyField(to='core.Ingredient')),
('tags', models.ManyToManyField(to='core.Tag')),
('user', models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to='core.User')),
],
),
]
| 35.233333 | 95 | 0.554399 |
9d1ad3f306d36e915ec0edf0fe4d8964ebbbce2c | 6,489 | py | Python | nats/aio/js/models/consumers.py | databasedav/nats.py | 75aa768f4e2aae87f7a2820e2a76aa7eeff71f46 | [
"Apache-2.0"
] | 1 | 2022-01-18T05:26:13.000Z | 2022-01-18T05:26:13.000Z | nats/aio/js/models/consumers.py | databasedav/nats.py | 75aa768f4e2aae87f7a2820e2a76aa7eeff71f46 | [
"Apache-2.0"
] | 1 | 2021-11-05T23:16:21.000Z | 2021-11-06T21:58:31.000Z | nats/aio/js/models/consumers.py | databasedav/nats.py | 75aa768f4e2aae87f7a2820e2a76aa7eeff71f46 | [
"Apache-2.0"
] | 1 | 2021-12-09T19:49:57.000Z | 2021-12-09T19:49:57.000Z | # Copyright 2021 - Guillaume Charbonnier
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from dataclasses import dataclass, field
from datetime import datetime, timezone
from enum import Enum
from typing import List, Optional
from .base import JetStreamResponse, parse_datetime
from .clusters import Cluster
class AckPolicy(str, Enum):
"""Policies defining how messages should be adcknowledged.
If an ack is required but is not received within the AckWait window, the message will be redelivered.
References:
* Consumers, AckPolicy - [NATS Docs](https://docs.nats.io/jetstream/concepts/consumers#ackpolicy)
"""
none = "none"
all = "all"
explicit = "explicit"
class DeliverPolicy(str, Enum):
"""When a consumer is first created, it can specify where in the stream it wants to start receiving messages.
This is the DeliverPolicy, and this enumeration defines allowed values.
References:
* Consumers, DeliverPolicy/OptStartSeq/OptStartTime - [NATS Docs](https://docs.nats.io/jetstream/concepts/consumers#deliverpolicy-optstartseq-optstarttime)
"""
all = "all"
last = "last"
new = "new"
last_per_subject = "last_per_subject"
by_start_sequence = "by_start_sequence"
by_start_time = "by_start_time"
class ReplayPolicy(str, Enum):
"""The replay policy applies when the DeliverPolicy is one of:
* all
* by_start_sequence
* by_start_time
since those deliver policies begin reading the stream at a position other than the end.
References:
* Consumers, ReplayPolicy - [NATS Docs](https://docs.nats.io/jetstream/concepts/consumers#replaypolicy)
"""
instant = "instant"
original = "original"
@dataclass
class ConsumerConfig:
"""Consumer configuration.
Field descriptions are available in source code.
References:
* Consumers - [NATS Docs](https://docs.nats.io/jetstream/concepts/consumers)
"""
deliver_policy: DeliverPolicy = DeliverPolicy.last
deliver_group: Optional[str] = None
ack_policy: AckPolicy = AckPolicy.explicit
durable_name: Optional[str] = None
description: Optional[str] = None
deliver_subject: Optional[str] = None
ack_wait: Optional[int] = None
max_deliver: Optional[int] = None
filter_subject: Optional[str] = None
replay_policy: ReplayPolicy = ReplayPolicy.instant
sample_freq: Optional[str] = None
rate_limit_bps: Optional[int] = None
max_ack_pending: Optional[int] = None
idle_heartbeat: Optional[int] = None
flow_control: Optional[bool] = None
max_waiting: Optional[int] = None
ops_start_seq: Optional[int] = None
ops_start_time: Optional[int] = None
headers_only: Optional[bool] = None
@dataclass
class Delivered:
"""Last message delivered from this consumer
Fields descriptions are available in source code.
"""
consumer_seq: int
stream_seq: int
last: Optional[datetime]
def __post_init__(self):
if isinstance(self.last, str):
self.last = parse_datetime(self.last)
@dataclass
class AckFloor:
"""Highest contiguous acknowledged message
Fields descriptions are available in source code.
"""
consumer_seq: int
stream_seq: int
last: Optional[datetime]
def __post_init__(self):
if isinstance(self.last, str):
self.last = parse_datetime(self.last)
@dataclass
class Consumer:
"""View of a consumer"""
stream_name: str
config: ConsumerConfig
created: datetime
delivered: Delivered
ack_floor: AckFloor
num_ack_pending: int
num_redelivered: int
num_waiting: int
num_pending: int
name: Optional[str] = None
cluster: Optional[Cluster] = None
push_bound: Optional[bool] = None
def __post_init__(self):
if isinstance(self.created, str):
self.created = parse_datetime(self.created)
if isinstance(self.config, dict):
self.config = ConsumerConfig(**self.config)
if isinstance(self.cluster, dict):
self.cluster = Cluster(**self.cluster)
@dataclass
class ConsumerCreateResponse(Consumer, JetStreamResponse):
"""Reply from `$JS.API.CONSUMER.CREATE.*.*`"""
pass
@dataclass
class ConsumerInfoResponse(Consumer, JetStreamResponse):
"""Reply from `$JS.API.CONSUMER.INFO.*.*`"""
pass
@dataclass
class ConsumerDeleteResponse(JetStreamResponse):
"""Reply from `$JS.API.CONSUMER.DELETE.*.*`"""
success: bool
@dataclass
class ConsumerListResponse(JetStreamResponse):
"""Reply from `$JS.API.CONSUMER.LIST.*`"""
total: int
offset: int
limit: int
consumers: List[Consumer] = field(default_factory=list)
def __post_init__(self):
if self.consumers is None:
self.consumers = []
self.consumers = [
Consumer(**item) if isinstance(item, dict) else item
for item in self.consumers
]
@dataclass
class ConsumerNamesResponse(JetStreamResponse):
"""Reply from `$JS.API.CONSUMER.NAMES.*`"""
total: int
offset: int
limit: int
consumers: List[str] = field(default_factory=list)
@dataclass
class ConsumerCreateRequest:
"""Request options for `$JS.API.CONSUMER.CREATE.*.*`"""
stream_name: str
config: ConsumerConfig
def __post_init__(self):
if isinstance(self.config, dict):
self.config = ConsumerConfig(**self.config)
@dataclass
class ConsumerListRequest:
"""Request options for `$JS.API.CONSUMER.LIST.*`"""
offset: Optional[int] = None
@dataclass
class ConsumerNamesRequest:
"""Request options for `$JS.API.CONSUMER.NAMES.*`"""
offset: Optional[int] = None
subject: Optional[str] = None
@dataclass
class ConsumerGetNextRequest:
"""Request options for `$JS.API.CONSUMER.MSG.NEXT.*.*`"""
expires: Optional[int] = 5000000000
batch: Optional[int] = 1
no_wait: Optional[bool] = False
| 26.703704 | 163 | 0.693019 |
ce1ac0c8d74aa4b8e7d88c938845f7296a65bfb4 | 5,496 | py | Python | netapp/santricity/models/symbol/drive_temperature_data.py | NetApp/santricity-webapi-pythonsdk | 1d3df4a00561192f4cdcdd1890f4d27547ed2de2 | [
"BSD-3-Clause-Clear"
] | 5 | 2016-08-23T17:52:22.000Z | 2019-05-16T08:45:30.000Z | netapp/santricity/models/symbol/drive_temperature_data.py | NetApp/santricity-webapi-pythonsdk | 1d3df4a00561192f4cdcdd1890f4d27547ed2de2 | [
"BSD-3-Clause-Clear"
] | 2 | 2016-11-10T05:30:21.000Z | 2019-04-05T15:03:37.000Z | netapp/santricity/models/symbol/drive_temperature_data.py | NetApp/santricity-webapi-pythonsdk | 1d3df4a00561192f4cdcdd1890f4d27547ed2de2 | [
"BSD-3-Clause-Clear"
] | 7 | 2016-08-25T16:11:44.000Z | 2021-02-22T05:31:25.000Z | # coding: utf-8
"""
DriveTemperatureData.py
The Clear BSD License
Copyright (c) – 2016, NetApp, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of NetApp, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from pprint import pformat
from six import iteritems
class DriveTemperatureData(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
DriveTemperatureData - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'drive_ref': 'str', # (required parameter)
'drive_temperature': 'DriveTemperature'
}
self.attribute_map = {
'drive_ref': 'driveRef', # (required parameter)
'drive_temperature': 'driveTemperature'
}
self._drive_ref = None
self._drive_temperature = None
@property
def drive_ref(self):
"""
Gets the drive_ref of this DriveTemperatureData.
A reference to the drive object.
:return: The drive_ref of this DriveTemperatureData.
:rtype: str
:required/optional: required
"""
return self._drive_ref
@drive_ref.setter
def drive_ref(self, drive_ref):
"""
Sets the drive_ref of this DriveTemperatureData.
A reference to the drive object.
:param drive_ref: The drive_ref of this DriveTemperatureData.
:type: str
"""
self._drive_ref = drive_ref
@property
def drive_temperature(self):
"""
Gets the drive_temperature of this DriveTemperatureData.
Drive temperature data.
:return: The drive_temperature of this DriveTemperatureData.
:rtype: DriveTemperature
:required/optional: required
"""
return self._drive_temperature
@drive_temperature.setter
def drive_temperature(self, drive_temperature):
"""
Sets the drive_temperature of this DriveTemperatureData.
Drive temperature data.
:param drive_temperature: The drive_temperature of this DriveTemperatureData.
:type: DriveTemperature
"""
self._drive_temperature = drive_temperature
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
if self is None:
return None
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if self is None or other is None:
return None
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 36.157895 | 844 | 0.636645 |
1c90379c9d3a16a11c3c15d38029fcbcd803a14c | 8,219 | py | Python | backend/config_app/config_orgues/config_app_styles.py | co-demos/apiviz-backend | 8a86b92dce728e81c1c935427b890da590edd720 | [
"MIT"
] | 1 | 2019-12-17T22:31:00.000Z | 2019-12-17T22:31:00.000Z | backend/config_app/config_orgues/config_app_styles.py | co-demos/apiviz-backend | 8a86b92dce728e81c1c935427b890da590edd720 | [
"MIT"
] | 10 | 2019-05-28T19:57:28.000Z | 2021-06-01T23:46:00.000Z | backend/config_app/config_orgues/config_app_styles.py | co-demos/apiviz-backend | 8a86b92dce728e81c1c935427b890da590edd720 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
from . import version, uuid_models
default_app_styles_config = [
### TIERS LIEUX STYLES
### GLOBAL STYLES
{ "field" : "app_colors",
### COLOR INPUTS AS HEXA
"content" : {
### DEFAULTS
"navbar-border-color" : '#3D3A39' , #'#004494', # '#592d7b',
"default_background_app" : "#fafafa",
"default_background_navbar" : "#ffffff",
### SIMILI - BULMA
'light' : '#40529d',
'dark' : '#1b1b1b',
'link' : '#4b4d58',
'link-hover' : '#cd8500' , #'#004494', # '#592d7b',
'primary' : '#a0522d' , #'#004494', # '#592d7b',
'info' : '#8b5a2b',
'success' : '#80C2BD',
'warning' : '#f3bd80',
'danger' : '#d24745',
'text-color' : '#3D3A39',
# ### EXTRA COLORS
# "dark_blue" : "#40529d",
# "light_pink" : "#e89db1",
# "light_blue" : "#a3b1d7",
# "deep_blue" : "#21295e",
},
"app_version" : version,
"help" : u"Choose a set of colors (an hexa for example) for your ApiViz instance",
"apiviz_front_uuid" : uuid_models["uuid_orgues"],
"is_default" : True
},
# { "field" : "app_typo_colors",
# "content" : {
# "default_dark" : "#000000",
# "default_light_dark" : "#3D3A39",
# "default_invert" : "#ffffff",
# ### SIMILI - BULMA
# 'light' : '#40529d',
# 'dark' : '#1b1b1b',
# 'link' : '#21295e',
# 'link-hover' : '#592d7b',
# 'primary' : '#592d7b',
# 'info' : '#40529d',
# 'success' : '#80C2BD',
# 'warning' : '#f3bd80',
# 'danger' : '#d24745',
# 'text-color' : '#3D3A39',
# ### EXTRA COLORS
# "dark_blue" : "#40529d",
# "light_pink" : "#e89db1",
# "light_blue" : "#a3b1d7",
# "deep_blue" : "#21295e",
# },
# "app_version" : version,
# "help" : u"Choose a set of colors for your typo for your ApiViz instance",
# "apiviz_front_uuid" : uuid_models["uuid_orgues"],
# "is_default" : True
# },
{ "field" : "app_typo",
"content" : {
"titles" : u"BonvenoCF-Light", # TODO: replace with Barow
"textes" : u"NEXA SANS",
},
"url" : "",
"app_version" : version,
"help" : u"Choose a typo for your ApiViz instance",
"apiviz_front_uuid" : uuid_models["uuid_orgues"],
"is_default" : True
},
### BANNERS SET
{ "field" : "app_banners",
"locale" : "fr",
"app_version" : version,
"help" : u"The dataset banners for your ApiViz instance (between navbar and filter)",
"banners_set" : [
{
"banner_uri" : "apiviz_default",
"dataset_uri" : None,
"template_url" : "",
"is_dynamic" : False,
"dynamic_template" : 'DynamicBanner',
"is_visible" : False,
"is_dismisible" : True,
"is_disapearing" : False,
"disapearing_timeout" : 5, ## in seconds
},
{
"banner_uri" : "banner-cis-carto",
"dataset_uri" : "cis-carto",
"template_url" : "",
"is_dynamic" : False,
"dynamic_template" : 'DynamicBanner',
"is_visible" : False,
"is_dismisible" : True,
"is_disapearing" : False,
"disapearing_timeout" : 5, ## in seconds
},
{
"banner_uri" : "banner-cis-xp",
"dataset_uri" : "cis-xp",
"template_url" : "",
"is_dynamic" : False,
"dynamic_template" : 'DynamicBanner',
"is_visible" : False,
"is_dismisible" : True,
"is_disapearing" : False,
"disapearing_timeout" : 5, ## in seconds
},
],
"apiviz_front_uuid" : uuid_models["uuid_orgues"],
"is_default" : True
},
### DEFAULT IMAGES FOR DATASETS SEARCH
{ "field" : "app_search_default_images_sets",
"app_version" : version,
"help" : u"The default images sets for the cards for each dataset",
"images_sets" : [
{
"dataset_uri" : "recherche",
"images_set" : [
{ "dft_text" : "img_1", "src_image" : "https://raw.githubusercontent.com/co-demos/cis-data/master/illustrations/textures/medium_fiche_1.png?raw=true", "credits" : "Élise Lalique", "licence" : "" },
{ "dft_text" : "img_2", "src_image" : "https://raw.githubusercontent.com/co-demos/cis-data/master/illustrations/textures/medium_fiche_2.png?raw=true", "credits" : "Élise Lalique", "licence" : "" },
{ "dft_text" : "img_3", "src_image" : "https://raw.githubusercontent.com/co-demos/cis-data/master/illustrations/textures/medium_fiche_3.png?raw=true", "credits" : "Élise Lalique", "licence" : "" },
{ "dft_text" : "img_4", "src_image" : "https://raw.githubusercontent.com/co-demos/cis-data/master/illustrations/textures/medium_fiche_4.png?raw=true", "credits" : "Élise Lalique", "licence" : "" },
{ "dft_text" : "img_5", "src_image" : "https://raw.githubusercontent.com/co-demos/cis-data/master/illustrations/textures/medium_fiche_5.png?raw=true", "credits" : "Élise Lalique", "licence" : "" },
{ "dft_text" : "img_6", "src_image" : "https://raw.githubusercontent.com/co-demos/cis-data/master/illustrations/textures/medium_fiche_6.png?raw=true", "credits" : "Élise Lalique", "licence" : "" },
{ "dft_text" : "img_7", "src_image" : "https://raw.githubusercontent.com/co-demos/cis-data/master/illustrations/textures/medium_fiche_7.png?raw=true", "credits" : "Élise Lalique", "licence" : "" },
{ "dft_text" : "img_8", "src_image" : "https://raw.githubusercontent.com/co-demos/cis-data/master/illustrations/textures/medium_fiche_8.png?raw=true", "credits" : "Élise Lalique", "licence" : "" },
{ "dft_text" : "img_9", "src_image" : "https://raw.githubusercontent.com/co-demos/cis-data/master/illustrations/textures/medium_fiche_9.png?raw=true", "credits" : "Élise Lalique", "licence" : "" },
{ "dft_text" : "img_10", "src_image" : "https://raw.githubusercontent.com/co-demos/cis-data/master/illustrations/textures/medium_fiche_10.png?raw=true", "credits" : "Élise Lalique", "licence" : "" },
{ "dft_text" : "img_11", "src_image" : "https://raw.githubusercontent.com/co-demos/cis-data/master/illustrations/textures/medium_fiche_11.png?raw=true", "credits" : "Élise Lalique", "licence" : "" },
{ "dft_text" : "img_12", "src_image" : "https://raw.githubusercontent.com/co-demos/cis-data/master/illustrations/textures/medium_fiche_12.png?raw=true", "credits" : "Élise Lalique", "licence" : "" },
{ "dft_text" : "img_13", "src_image" : "https://raw.githubusercontent.com/co-demos/cis-data/master/illustrations/textures/medium_fiche_13.png?raw=true", "credits" : "Élise Lalique", "licence" : "" },
{ "dft_text" : "img_14", "src_image" : "https://raw.githubusercontent.com/co-demos/cis-data/master/illustrations/textures/medium_fiche_14.png?raw=true", "credits" : "Élise Lalique", "licence" : "" },
{ "dft_text" : "img_15", "src_image" : "https://raw.githubusercontent.com/co-demos/cis-data/master/illustrations/textures/medium_fiche_15.png?raw=true", "credits" : "Élise Lalique", "licence" : "" },
{ "dft_text" : "img_16", "src_image" : "https://raw.githubusercontent.com/co-demos/cis-data/master/illustrations/textures/medium_fiche_16.png?raw=true", "credits" : "Élise Lalique", "licence" : "" },
]
}
],
"apiviz_front_uuid" : uuid_models["uuid_orgues"],
"is_default" : True
},
]
| 49.215569 | 211 | 0.536318 |
523140c2701c94abce66f35bef2fd1657501ef4e | 3,020 | py | Python | fiddles/python/fiddle-0008-WikipediaScrapy/wikiSpider/wikiSpider/settings.py | oneorthomedical/house | 03bc23075f4d7a18362f596f96fabddcb237af30 | [
"MIT"
] | 48 | 2016-01-06T14:34:26.000Z | 2021-07-14T16:10:36.000Z | fiddles/python/fiddle-0008-WikipediaScrapy/wikiSpider/wikiSpider/settings.py | oneorthomedical/house | 03bc23075f4d7a18362f596f96fabddcb237af30 | [
"MIT"
] | 311 | 2016-05-28T12:35:22.000Z | 2022-03-25T14:57:06.000Z | fiddles/python/fiddle-0008-WikipediaScrapy/wikiSpider/wikiSpider/settings.py | oneorthomedical/house | 03bc23075f4d7a18362f596f96fabddcb237af30 | [
"MIT"
] | 33 | 2015-11-07T06:39:17.000Z | 2020-12-22T18:59:14.000Z | # -*- coding: utf-8 -*-
# Scrapy settings for wikiSpider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'wikiSpider'
SPIDER_MODULES = ['wikiSpider.spiders']
NEWSPIDER_MODULE = 'wikiSpider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'wikiSpider (+http://www.yourdomain.com)'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS=32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY=3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN=16
#CONCURRENT_REQUESTS_PER_IP=16
# Disable cookies (enabled by default)
#COOKIES_ENABLED=False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED=False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'wikiSpider.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'wikiSpider.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'wikiSpider.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
#AUTOTHROTTLE_ENABLED=True
# The initial download delay
#AUTOTHROTTLE_START_DELAY=5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY=60
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG=False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED=True
#HTTPCACHE_EXPIRATION_SECS=0
#HTTPCACHE_DIR='httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES=[]
#HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'
| 35.116279 | 109 | 0.786093 |
1b11266edf5ee39349fc6669c41a7b32cf726f28 | 14,802 | py | Python | contrib/python/generate-manifest-2.6.py | andreimironenko/arago-oe-dev | 4e1175f61be9d0dd68873e4847fe531e5f93b52a | [
"MIT"
] | 70 | 2015-02-23T04:18:51.000Z | 2022-03-15T02:01:27.000Z | contrib/python/generate-manifest-2.6.py | buglabs/oe-buglabs | b8a4c4b1358214cd3ac1cf6f85154e9c62b16ce7 | [
"MIT"
] | 1 | 2020-09-07T15:33:56.000Z | 2020-09-07T15:33:56.000Z | contrib/python/generate-manifest-2.6.py | buglabs/oe-buglabs | b8a4c4b1358214cd3ac1cf6f85154e9c62b16ce7 | [
"MIT"
] | 88 | 2015-02-11T12:03:16.000Z | 2022-03-30T11:33:42.000Z | #!/usr/bin/env python
# generate Python Manifest for the OpenEmbedded build system
# (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
# (C) 2007 Jeremy Laine
# licensed under MIT, see COPYING.MIT
import os
import sys
import time
VERSION = "2.6.6"
__author__ = "Michael 'Mickey' Lauer <mlauer@vanille-media.de>"
__version__ = "20110222"
class MakefileMaker:
def __init__( self, outfile ):
"""initialize"""
self.packages = {}
self.targetPrefix = "${libdir}/python%s/" % VERSION[:3]
self.output = outfile
self.out( """
# WARNING: This file is AUTO GENERATED: Manual edits will be lost next time I regenerate the file.
# Generator: '%s' Version %s (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
# Visit the Python for Embedded Systems Site => http://www.Vanille.de/projects/python.spy
""" % ( sys.argv[0], __version__ ) )
#
# helper functions
#
def out( self, data ):
"""print a line to the output file"""
self.output.write( "%s\n" % data )
def setPrefix( self, targetPrefix ):
"""set a file prefix for addPackage files"""
self.targetPrefix = targetPrefix
def doProlog( self ):
self.out( """ """ )
self.out( "" )
def addPackage( self, name, description, dependencies, filenames ):
"""add a package to the Makefile"""
if type( filenames ) == type( "" ):
filenames = filenames.split()
fullFilenames = []
for filename in filenames:
if filename[0] != "$":
fullFilenames.append( "%s%s" % ( self.targetPrefix, filename ) )
else:
fullFilenames.append( filename )
self.packages[name] = description, dependencies, fullFilenames
def doBody( self ):
"""generate body of Makefile"""
global VERSION
#
# generate provides line
#
provideLine = 'PROVIDES+="'
for name in sorted(self.packages):
provideLine += "%s " % name
provideLine += '"'
self.out( provideLine )
self.out( "" )
#
# generate package line
#
packageLine = 'PACKAGES="${PN}-core-dbg '
for name in sorted(self.packages):
if name != '${PN}-core-dbg':
packageLine += "%s " % name
packageLine += '${PN}-modules"'
self.out( packageLine )
self.out( "" )
#
# generate package variables
#
for name, data in sorted(self.packages.iteritems()):
desc, deps, files = data
#
# write out the description, revision and dependencies
#
self.out( 'DESCRIPTION_%s="%s"' % ( name, desc ) )
self.out( 'RDEPENDS_%s="%s"' % ( name, deps ) )
line = 'FILES_%s="' % name
#
# check which directories to make in the temporary directory
#
dirset = {} # if python had a set-datatype this would be sufficient. for now, we're using a dict instead.
for target in files:
dirset[os.path.dirname( target )] = True
#
# generate which files to copy for the target (-dfR because whole directories are also allowed)
#
for target in files:
line += "%s " % target
line += '"'
self.out( line )
self.out( "" )
self.out( 'DESCRIPTION_${PN}-modules="All Python modules"' )
line = 'RDEPENDS_${PN}-modules="'
for name, data in sorted(self.packages.iteritems()):
if name not in ['${PN}-core-dbg', '${PN}-dev']:
line += "%s " % name
self.out( "%s \"" % line )
self.out( 'ALLOW_EMPTY_${PN}-modules = "1"' )
def doEpilog( self ):
self.out( """""" )
self.out( "" )
def make( self ):
self.doProlog()
self.doBody()
self.doEpilog()
if __name__ == "__main__":
if len( sys.argv ) > 1:
os.popen( "rm -f ./%s" % sys.argv[1] )
outfile = file( sys.argv[1], "w" )
else:
outfile = sys.stdout
m = MakefileMaker( outfile )
# Add packages here. Only specify dlopen-style library dependencies here, no ldd-style dependencies!
# Parameters: revision, name, description, dependencies, filenames
#
m.addPackage( "${PN}-core", "Python Interpreter and core modules (needed!)", "",
"__future__.* _abcoll.* abc.* copy.* copy_reg.* ConfigParser.* " +
"genericpath.* getopt.* linecache.* new.* " +
"os.* posixpath.* struct.* " +
"warnings.* site.* stat.* " +
"UserDict.* UserList.* UserString.* " +
"lib-dynload/binascii.so lib-dynload/_struct.so lib-dynload/time.so " +
"lib-dynload/xreadlines.so types.* platform.* ${bindir}/python*" )
m.addPackage( "${PN}-core-dbg", "Python core module debug information", "${PN}-core",
"config/.debug lib-dynload/.debug ${bindir}/.debug ${libdir}/.debug" )
m.addPackage( "${PN}-dev", "Python Development Package", "${PN}-core",
"${includedir} ${libdir}/libpython2.6.so" ) # package
m.addPackage( "${PN}-idle", "Python Integrated Development Environment", "${PN}-core ${PN}-tkinter",
"${bindir}/idle idlelib" ) # package
m.addPackage( "${PN}-pydoc", "Python Interactive Help Support", "${PN}-core ${PN}-lang ${PN}-stringold ${PN}-re",
"${bindir}/pydoc pydoc.*" )
m.addPackage( "${PN}-smtpd", "Python Simple Mail Transport Daemon", "${PN}-core ${PN}-netserver ${PN}-email ${PN}-mime",
"${bindir}/smtpd.*" )
m.addPackage( "${PN}-audio", "Python Audio Handling", "${PN}-core",
"wave.* chunk.* sndhdr.* lib-dynload/ossaudiodev.so lib-dynload/audioop.so" )
m.addPackage( "${PN}-bsddb", "Python Berkeley Database Bindings", "${PN}-core",
"bsddb lib-dynload/_bsddb.so" ) # package
m.addPackage( "${PN}-codecs", "Python Codecs, Encodings & i18n Support", "${PN}-core ${PN}-lang",
"codecs.* encodings gettext.* locale.* lib-dynload/_locale.so lib-dynload/unicodedata.so stringprep.* xdrlib.*" )
m.addPackage( "${PN}-compile", "Python Bytecode Compilation Support", "${PN}-core",
"py_compile.* compileall.*" )
m.addPackage( "${PN}-compiler", "Python Compiler Support", "${PN}-core",
"compiler" ) # package
m.addPackage( "${PN}-compression", "Python High Level Compression Support", "${PN}-core ${PN}-zlib",
"gzip.* zipfile.* tarfile.* lib-dynload/bz2.so" )
m.addPackage( "${PN}-crypt", "Python Basic Cryptographic and Hashing Support", "${PN}-core",
"hashlib.* md5.* sha.* lib-dynload/crypt.so lib-dynload/_hashlib.so lib-dynload/_sha256.so lib-dynload/_sha512.so" )
m.addPackage( "${PN}-textutils", "Python Option Parsing, Text Wrapping and Comma-Separated-Value Support", "${PN}-core ${PN}-io ${PN}-re ${PN}-stringold",
"lib-dynload/_csv.so csv.* optparse.* textwrap.*" )
m.addPackage( "${PN}-curses", "Python Curses Support", "${PN}-core",
"curses lib-dynload/_curses.so lib-dynload/_curses_panel.so" ) # directory + low level module
m.addPackage( "${PN}-ctypes", "Python C Types Support", "${PN}-core",
"ctypes lib-dynload/_ctypes.so" ) # directory + low level module
m.addPackage( "${PN}-datetime", "Python Calendar and Time support", "${PN}-core ${PN}-codecs",
"_strptime.* calendar.* lib-dynload/datetime.so" )
m.addPackage( "${PN}-db", "Python File-Based Database Support", "${PN}-core",
"anydbm.* dumbdbm.* whichdb.* " )
m.addPackage( "${PN}-debugger", "Python Debugger", "${PN}-core ${PN}-io ${PN}-lang ${PN}-re ${PN}-stringold ${PN}-shell ${PN}-pprint",
"bdb.* pdb.*" )
m.addPackage( "${PN}-difflib", "Python helpers for computing deltas between objects.", "${PN}-lang ${PN}-re",
"difflib.*" )
m.addPackage( "${PN}-distutils", "Python Distribution Utilities", "${PN}-core",
"config distutils" ) # package
m.addPackage( "${PN}-doctest", "Python framework for running examples in docstrings.", "${PN}-core ${PN}-lang ${PN}-io ${PN}-re ${PN}-unittest ${PN}-debugger ${PN}-difflib",
"doctest.*" )
# FIXME consider adding to some higher level package
m.addPackage( "${PN}-elementtree", "Python elementree", "${PN}-core",
"lib-dynload/_elementtree.so" )
m.addPackage( "${PN}-email", "Python Email Support", "${PN}-core ${PN}-io ${PN}-re ${PN}-mime ${PN}-audio ${PN}-image ${PN}-netclient",
"imaplib.* email" ) # package
m.addPackage( "${PN}-fcntl", "Python's fcntl Interface", "${PN}-core",
"lib-dynload/fcntl.so" )
m.addPackage( "${PN}-hotshot", "Python Hotshot Profiler", "${PN}-core",
"hotshot lib-dynload/_hotshot.so" )
m.addPackage( "${PN}-html", "Python HTML Processing", "${PN}-core",
"formatter.* htmlentitydefs.* htmllib.* markupbase.* sgmllib.* " )
m.addPackage( "${PN}-gdbm", "Python GNU Database Support", "${PN}-core",
"lib-dynload/gdbm.so" )
m.addPackage( "${PN}-image", "Python Graphical Image Handling", "${PN}-core",
"colorsys.* imghdr.* lib-dynload/imageop.so lib-dynload/rgbimg.so" )
m.addPackage( "${PN}-io", "Python Low-Level I/O", "${PN}-core ${PN}-math",
"lib-dynload/_socket.so lib-dynload/_ssl.so lib-dynload/select.so lib-dynload/termios.so lib-dynload/cStringIO.so " +
"pipes.* socket.* ssl.* tempfile.* StringIO.* " )
m.addPackage( "${PN}-json", "Python JSON Support", "${PN}-core ${PN}-math ${PN}-re",
"json" ) # package
m.addPackage( "${PN}-lang", "Python Low-Level Language Support", "${PN}-core",
"lib-dynload/_bisect.so lib-dynload/_collections.so lib-dynload/_heapq.so lib-dynload/_weakref.so lib-dynload/_functools.so " +
"lib-dynload/array.so lib-dynload/itertools.so lib-dynload/operator.so lib-dynload/parser.so " +
"atexit.* bisect.* code.* codeop.* collections.* dis.* functools.* heapq.* inspect.* keyword.* opcode.* symbol.* repr.* token.* " +
"tokenize.* traceback.* weakref.*" )
m.addPackage( "${PN}-logging", "Python Logging Support", "${PN}-core ${PN}-io ${PN}-lang ${PN}-pickle ${PN}-stringold",
"logging" ) # package
m.addPackage( "${PN}-mailbox", "Python Mailbox Format Support", "${PN}-core ${PN}-mime",
"mailbox.*" )
m.addPackage( "${PN}-math", "Python Math Support", "${PN}-core",
"lib-dynload/cmath.so lib-dynload/math.so lib-dynload/_random.so random.* sets.*" )
m.addPackage( "${PN}-mime", "Python MIME Handling APIs", "${PN}-core ${PN}-io",
"mimetools.* uu.* quopri.* rfc822.*" )
m.addPackage( "${PN}-mmap", "Python Memory-Mapped-File Support", "${PN}-core ${PN}-io",
"lib-dynload/mmap.so " )
m.addPackage( "${PN}-multiprocessing", "Python Multiprocessing Support", "${PN}-core ${PN}-io ${PN}-lang",
"lib-dynload/_multiprocessing.so multiprocessing" ) # package
m.addPackage( "${PN}-netclient", "Python Internet Protocol Clients", "${PN}-core ${PN}-crypt ${PN}-datetime ${PN}-io ${PN}-lang ${PN}-logging ${PN}-mime",
"*Cookie*.* " +
"base64.* cookielib.* ftplib.* gopherlib.* hmac.* httplib.* mimetypes.* nntplib.* poplib.* smtplib.* telnetlib.* urllib.* urllib2.* urlparse.* uuid.* rfc822.* mimetools.*" )
m.addPackage( "${PN}-netserver", "Python Internet Protocol Servers", "${PN}-core ${PN}-netclient",
"cgi.* *HTTPServer.* SocketServer.*" )
m.addPackage( "${PN}-numbers", "Python Number APIs", "${PN}-core ${PN}-lang ${PN}-re",
"decimal.* numbers.*" )
m.addPackage( "${PN}-pickle", "Python Persistence Support", "${PN}-core ${PN}-codecs ${PN}-io ${PN}-re",
"pickle.* shelve.* lib-dynload/cPickle.so" )
m.addPackage( "${PN}-pkgutil", "Python Package Extension Utility Support", "${PN}-core",
"pkgutil.*")
m.addPackage( "${PN}-pprint", "Python Pretty-Print Support", "${PN}-core",
"pprint.*" )
m.addPackage( "${PN}-profile", "Python Basic Profiling Support", "${PN}-core ${PN}-textutils",
"profile.* pstats.* cProfile.* lib-dynload/_lsprof.so" )
m.addPackage( "${PN}-re", "Python Regular Expression APIs", "${PN}-core",
"re.* sre.* sre_compile.* sre_constants* sre_parse.*" ) # _sre is builtin
m.addPackage( "${PN}-readline", "Python Readline Support", "${PN}-core",
"lib-dynload/readline.so rlcompleter.*" )
m.addPackage( "${PN}-resource", "Python Resource Control Interface", "${PN}-core",
"lib-dynload/resource.so" )
m.addPackage( "${PN}-shell", "Python Shell-Like Functionality", "${PN}-core ${PN}-re",
"cmd.* commands.* dircache.* fnmatch.* glob.* popen2.* shlex.* shutil.*" )
m.addPackage( "${PN}-robotparser", "Python robots.txt parser", "${PN}-core ${PN}-netclient",
"robotparser.*")
m.addPackage( "${PN}-subprocess", "Python Subprocess Support", "${PN}-core ${PN}-io ${PN}-re ${PN}-fcntl ${PN}-pickle",
"subprocess.*" )
m.addPackage( "${PN}-sqlite3", "Python Sqlite3 Database Support", "${PN}-core ${PN}-datetime ${PN}-lang ${PN}-crypt ${PN}-io ${PN}-threading ${PN}-zlib",
"lib-dynload/_sqlite3.so sqlite3/dbapi2.* sqlite3/__init__.* sqlite3/dump.*" )
m.addPackage( "${PN}-sqlite3-tests", "Python Sqlite3 Database Support Tests", "${PN}-core ${PN}-sqlite3",
"sqlite3/test" )
m.addPackage( "${PN}-stringold", "Python String APIs [deprecated]", "${PN}-core ${PN}-re",
"lib-dynload/strop.so string.*" )
m.addPackage( "${PN}-syslog", "Python Syslog Interface", "${PN}-core",
"lib-dynload/syslog.so" )
m.addPackage( "${PN}-terminal", "Python Terminal Controlling Support", "${PN}-core ${PN}-io",
"pty.* tty.*" )
m.addPackage( "${PN}-tests", "Python Tests", "${PN}-core",
"test" ) # package
m.addPackage( "${PN}-threading", "Python Threading & Synchronization Support", "${PN}-core ${PN}-lang",
"_threading_local.* dummy_thread.* dummy_threading.* mutex.* threading.* Queue.*" )
m.addPackage( "${PN}-tkinter", "Python Tcl/Tk Bindings", "${PN}-core",
"lib-dynload/_tkinter.so lib-tk" ) # package
m.addPackage( "${PN}-unittest", "Python Unit Testing Framework", "${PN}-core ${PN}-stringold ${PN}-lang",
"unittest.*" )
m.addPackage( "${PN}-unixadmin", "Python Unix Administration Support", "${PN}-core",
"lib-dynload/nis.so lib-dynload/grp.so lib-dynload/pwd.so getpass.*" )
m.addPackage( "${PN}-xml", "Python basic XML support.", "${PN}-core ${PN}-elementtree ${PN}-re",
"lib-dynload/pyexpat.so xml xmllib.*" ) # package
m.addPackage( "${PN}-xmlrpc", "Python XMLRPC Support", "${PN}-core ${PN}-xml ${PN}-netserver ${PN}-lang",
"xmlrpclib.* SimpleXMLRPCServer.*" )
m.addPackage( "${PN}-zlib", "Python zlib Support.", "${PN}-core",
"lib-dynload/zlib.so" )
m.addPackage( "${PN}-mailbox", "Python Mailbox Format Support", "${PN}-core ${PN}-mime",
"mailbox.*" )
m.make()
| 40.77686 | 177 | 0.601878 |
81864df9ab09674f67a9e3928f090e5ca1c4adfc | 776 | py | Python | src/rozbieznosci_if/migrations/0002_auto_20210323_0106.py | iplweb/django-bpp | 85f183a99d8d5027ae4772efac1e4a9f21675849 | [
"BSD-3-Clause"
] | 1 | 2017-04-27T19:50:02.000Z | 2017-04-27T19:50:02.000Z | src/rozbieznosci_if/migrations/0002_auto_20210323_0106.py | mpasternak/django-bpp | 434338821d5ad1aaee598f6327151aba0af66f5e | [
"BSD-3-Clause"
] | 41 | 2019-11-07T00:07:02.000Z | 2022-02-27T22:09:39.000Z | src/rozbieznosci_if/migrations/0002_auto_20210323_0106.py | iplweb/bpp | f027415cc3faf1ca79082bf7bacd4be35b1a6fdf | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 3.0.11 on 2021-03-23 00:06
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
("rozbieznosci_if", "0001_initial"),
]
operations = [
migrations.AlterModelOptions(
name="ignorujrozbieznoscif",
options={"verbose_name": "ignorowanie rozbieżności IF"},
),
migrations.AddField(
model_name="ignorujrozbieznoscif",
name="created_on",
field=models.DateTimeField(
auto_created=True,
default=datetime.datetime(2021, 3, 23, 0, 6, 43, 174113, tzinfo=utc),
),
preserve_default=False,
),
]
| 25.032258 | 85 | 0.597938 |
8f3823c806fcd786f07fc0cbf6643ffb5e8cf4b7 | 11,565 | py | Python | src/freedreno/vulkan/vk_format_parse.py | thermasol/mesa3d | 6f1bc4e7edfface197ef281cffdf399b5389e24a | [
"MIT"
] | null | null | null | src/freedreno/vulkan/vk_format_parse.py | thermasol/mesa3d | 6f1bc4e7edfface197ef281cffdf399b5389e24a | [
"MIT"
] | null | null | null | src/freedreno/vulkan/vk_format_parse.py | thermasol/mesa3d | 6f1bc4e7edfface197ef281cffdf399b5389e24a | [
"MIT"
] | null | null | null |
'''
/**************************************************************************
*
* Copyright 2009 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
'''
VOID, UNSIGNED, SIGNED, FIXED, FLOAT = range(5)
SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_0, SWIZZLE_1, SWIZZLE_NONE, = range(7)
PLAIN = 'plain'
SCALED = 'scaled'
RGB = 'rgb'
SRGB = 'srgb'
YUV = 'yuv'
ZS = 'zs'
def is_pot(x):
return (x & (x - 1)) == 0
VERY_LARGE = 99999999999999999999999
class Channel:
'''Describe the channel of a color channel.'''
def __init__(self, type, norm, pure, scaled, size, name = ''):
self.type = type
self.norm = norm
self.pure = pure
self.size = size
self.scaled = scaled
self.sign = type in (SIGNED, FIXED, FLOAT)
self.name = name
def __str__(self):
s = str(self.type)
if self.norm:
s += 'n'
if self.pure:
s += 'p'
if self.scaled:
s += 's'
s += str(self.size)
return s
def __eq__(self, other):
return (other is not None and
self.type == other.type and
self.norm == other.norm and
self.pure == other.pure and
self.size == other.size and
self.scaled == other.scaled)
def max(self):
'''Maximum representable number.'''
if self.type == FLOAT:
return VERY_LARGE
if self.type == FIXED:
return (1 << (self.size/2)) - 1
if self.norm:
return 1
if self.type == UNSIGNED:
return (1 << self.size) - 1
if self.type == SIGNED:
return (1 << (self.size - 1)) - 1
assert False
def min(self):
'''Minimum representable number.'''
if self.type == FLOAT:
return -VERY_LARGE
if self.type == FIXED:
return -(1 << (self.size/2))
if self.type == UNSIGNED:
return 0
if self.norm:
return -1
if self.type == SIGNED:
return -(1 << (self.size - 1))
assert False
class Format:
'''Describe a pixel format.'''
def __init__(self, name, layout, block_width, block_height, le_channels, le_swizzles, be_channels, be_swizzles, colorspace):
self.name = name
self.layout = layout
self.block_width = block_width
self.block_height = block_height
self.le_channels = le_channels
self.le_swizzles = le_swizzles
self.be_channels = be_channels
self.be_swizzles = be_swizzles
self.name = name
self.colorspace = colorspace
def __str__(self):
return self.name
def short_name(self):
'''Make up a short norm for a format, suitable to be used as suffix in
function names.'''
name = self.name
if name.startswith('VK_FORMAT_'):
name = name[len('VK_FORMAT_'):]
name = name.lower()
return name
def block_size(self):
size = 0
for channel in self.le_channels:
size += channel.size
return size
def nr_channels(self):
nr_channels = 0
for channel in self.le_channels:
if channel.size:
nr_channels += 1
return nr_channels
def array_element(self):
if self.layout != PLAIN:
return None
ref_channel = self.le_channels[0]
if ref_channel.type == VOID:
ref_channel = self.le_channels[1]
for channel in self.le_channels:
if channel.size and (channel.size != ref_channel.size or channel.size % 8):
return None
if channel.type != VOID:
if channel.type != ref_channel.type:
return None
if channel.norm != ref_channel.norm:
return None
if channel.pure != ref_channel.pure:
return None
if channel.scaled != ref_channel.scaled:
return None
return ref_channel
def is_array(self):
return self.array_element() != None
def is_mixed(self):
if self.layout != PLAIN:
return False
ref_channel = self.le_channels[0]
if ref_channel.type == VOID:
ref_channel = self.le_channels[1]
for channel in self.le_channels[1:]:
if channel.type != VOID:
if channel.type != ref_channel.type:
return True
if channel.norm != ref_channel.norm:
return True
if channel.pure != ref_channel.pure:
return True
if channel.scaled != ref_channel.scaled:
return True
return False
def is_pot(self):
return is_pot(self.block_size())
def is_int(self):
if self.layout != PLAIN:
return False
for channel in self.le_channels:
if channel.type not in (VOID, UNSIGNED, SIGNED):
return False
return True
def is_float(self):
if self.layout != PLAIN:
return False
for channel in self.le_channels:
if channel.type not in (VOID, FLOAT):
return False
return True
def is_bitmask(self):
if self.layout != PLAIN:
return False
if self.block_size() not in (8, 16, 32):
return False
for channel in self.le_channels:
if channel.type not in (VOID, UNSIGNED, SIGNED):
return False
return True
def is_pure_color(self):
if self.layout != PLAIN or self.colorspace == ZS:
return False
pures = [channel.pure
for channel in self.le_channels
if channel.type != VOID]
for x in pures:
assert x == pures[0]
return pures[0]
def channel_type(self):
types = [channel.type
for channel in self.le_channels
if channel.type != VOID]
for x in types:
assert x == types[0]
return types[0]
def is_pure_signed(self):
return self.is_pure_color() and self.channel_type() == SIGNED
def is_pure_unsigned(self):
return self.is_pure_color() and self.channel_type() == UNSIGNED
def has_channel(self, id):
return self.le_swizzles[id] != SWIZZLE_NONE
def has_depth(self):
return self.colorspace == ZS and self.has_channel(0)
def has_stencil(self):
return self.colorspace == ZS and self.has_channel(1)
def stride(self):
return self.block_size()/8
_type_parse_map = {
'': VOID,
'x': VOID,
'u': UNSIGNED,
's': SIGNED,
'h': FIXED,
'f': FLOAT,
}
_swizzle_parse_map = {
'x': SWIZZLE_X,
'y': SWIZZLE_Y,
'z': SWIZZLE_Z,
'w': SWIZZLE_W,
'0': SWIZZLE_0,
'1': SWIZZLE_1,
'_': SWIZZLE_NONE,
}
def _parse_channels(fields, layout, colorspace, swizzles):
if layout == PLAIN:
names = ['']*4
if colorspace in (RGB, SRGB):
for i in range(4):
swizzle = swizzles[i]
if swizzle < 4:
names[swizzle] += 'rgba'[i]
elif colorspace == ZS:
for i in range(4):
swizzle = swizzles[i]
if swizzle < 4:
names[swizzle] += 'zs'[i]
else:
assert False
for i in range(4):
if names[i] == '':
names[i] = 'x'
else:
names = ['x', 'y', 'z', 'w']
channels = []
for i in range(0, 4):
field = fields[i]
if field:
type = _type_parse_map[field[0]]
if field[1] == 'n':
norm = True
pure = False
scaled = False
size = int(field[2:])
elif field[1] == 'p':
pure = True
norm = False
scaled = False
size = int(field[2:])
elif field[1] == 's':
pure = False
norm = False
scaled = True
size = int(field[2:])
else:
norm = False
pure = False
scaled = False
size = int(field[1:])
else:
type = VOID
norm = False
pure = False
scaled = False
size = 0
channel = Channel(type, norm, pure, scaled, size, names[i])
channels.append(channel)
return channels
def parse(filename):
'''Parse the format description in CSV format in terms of the
Channel and Format classes above.'''
stream = open(filename)
formats = []
for line in stream:
try:
comment = line.index('#')
except ValueError:
pass
else:
line = line[:comment]
line = line.strip()
if not line:
continue
fields = [field.strip() for field in line.split(',')]
if len (fields) < 10:
continue
if len (fields) == 10:
fields += fields[4:9]
assert len (fields) == 15
name = fields[0]
layout = fields[1]
block_width, block_height = map(int, fields[2:4])
colorspace = fields[9]
le_swizzles = [_swizzle_parse_map[swizzle] for swizzle in fields[8]]
le_channels = _parse_channels(fields[4:8], layout, colorspace, le_swizzles)
be_swizzles = [_swizzle_parse_map[swizzle] for swizzle in fields[14]]
be_channels = _parse_channels(fields[10:14], layout, colorspace, be_swizzles)
le_shift = 0
for channel in le_channels:
channel.shift = le_shift
le_shift += channel.size
be_shift = 0
for channel in be_channels[3::-1]:
channel.shift = be_shift
be_shift += channel.size
assert le_shift == be_shift
for i in range(4):
assert (le_swizzles[i] != SWIZZLE_NONE) == (be_swizzles[i] != SWIZZLE_NONE)
format = Format(name, layout, block_width, block_height, le_channels, le_swizzles, be_channels, be_swizzles, colorspace)
formats.append(format)
return formats
| 29.730077 | 128 | 0.540164 |
01ac1d9a69ae76073b229968481418cfb5b669e9 | 2,717 | py | Python | qa/rpc-tests/mempool_spendcoinbase.py | itgoldcoins/itgoldcoins | 2373f5877d8294063ac89ff106d7fe2347b2f5cb | [
"MIT"
] | 1 | 2019-04-25T07:56:04.000Z | 2019-04-25T07:56:04.000Z | qa/rpc-tests/mempool_spendcoinbase.py | aron10/itgoldcoins | 2373f5877d8294063ac89ff106d7fe2347b2f5cb | [
"MIT"
] | null | null | null | qa/rpc-tests/mempool_spendcoinbase.py | aron10/itgoldcoins | 2373f5877d8294063ac89ff106d7fe2347b2f5cb | [
"MIT"
] | 1 | 2019-04-25T07:56:08.000Z | 2019-04-25T07:56:08.000Z | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Itgoldcoins Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test spending coinbase transactions.
# The coinbase transaction in block N can appear in block
# N+100... so is valid in the mempool when the best block
# height is N+99.
# This test makes sure coinbase spends that will be mature
# in the next block are accepted into the memory pool,
# but less mature coinbase spends are NOT.
#
from test_framework.test_framework import ItgoldcoinsTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class MempoolSpendCoinbaseTest(ItgoldcoinsTestFramework):
def setup_network(self):
# Just need one node for this test
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.is_network_split = False
def create_tx(self, from_txid, to_address, amount):
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signresult = self.nodes[0].signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
node0_address = self.nodes[0].getnewaddress()
# Coinbase at height chain_height-100+1 ok in mempool, should
# get mined. Coinbase at height chain_height-100+2 is
# is too immature to spend.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 103) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spends_raw = [ self.create_tx(txid, node0_address, 50) for txid in coinbase_txids ]
spend_101_id = self.nodes[0].sendrawtransaction(spends_raw[0])
# coinbase at height 102 should be too immature to spend
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, spends_raw[1])
# mempool should have just spend_101:
assert_equal(self.nodes[0].getrawmempool(), [ spend_101_id ])
# mine a block, spend_101 should get confirmed
self.nodes[0].generate(1)
assert_equal(set(self.nodes[0].getrawmempool()), set())
# ... and now height 102 can be spent:
spend_102_id = self.nodes[0].sendrawtransaction(spends_raw[1])
assert_equal(self.nodes[0].getrawmempool(), [ spend_102_id ])
if __name__ == '__main__':
MempoolSpendCoinbaseTest().main()
| 40.552239 | 91 | 0.688995 |
b2ed13bb4ce1b590cffe5cf1f8023cf1a551359f | 4,565 | py | Python | setup.py | fair-data-austria/invenio-rdm-records | e4bb79e4d2bdaf3d57041db0e43aafb342d56179 | [
"MIT"
] | null | null | null | setup.py | fair-data-austria/invenio-rdm-records | e4bb79e4d2bdaf3d57041db0e43aafb342d56179 | [
"MIT"
] | null | null | null | setup.py | fair-data-austria/invenio-rdm-records | e4bb79e4d2bdaf3d57041db0e43aafb342d56179 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019-2020 CERN.
# Copyright (C) 2019-2020 Northwestern University.
#
# Invenio-RDM-Records is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""DataCite-based data model for Invenio."""
import os
from setuptools import find_packages, setup
readme = open('README.rst').read()
history = open('CHANGES.rst').read()
tests_require = [
'pytest-invenio>=1.4.0,<2.0.0',
'invenio-app>=1.3.0,<2.0.0'
]
# Should follow inveniosoftware/invenio versions
invenio_db_version = '>=1.0.4,<2.0.0'
invenio_search_version = '>=1.4.0,<2.0.0'
extras_require = {
'docs': [
'Sphinx>=3',
],
# Elasticsearch version
'elasticsearch6': [
'invenio-search[elasticsearch6]{}'.format(invenio_search_version),
],
'elasticsearch7': [
'invenio-search[elasticsearch7]{}'.format(invenio_search_version),
],
# Databases
'mysql': [
'invenio-db[mysql,versioning]{}'.format(invenio_db_version),
],
'postgresql': [
'invenio-db[postgresql,versioning]{}'.format(invenio_db_version),
],
'sqlite': [
'invenio-db[versioning]{}'.format(invenio_db_version),
],
'tests': tests_require,
}
extras_require['all'] = []
for name, reqs in extras_require.items():
if name[0] == ':' or name in ('elasticsearch6', 'elasticsearch7',
'mysql', 'postgresql', 'sqlite'):
continue
extras_require['all'].extend(reqs)
setup_requires = [
'Babel>=1.3',
'pytest-runner>=3.0.0,<5',
]
install_requires = [
'arrow>=0.17.0',
'CairoSVG>=1.0.20',
'Faker>=2.0.3',
'ftfy>=4.4.3,<5.0.0',
'idutils>=1.1.7',
'invenio-assets>=1.2.2,<1.3.0',
'invenio-communities>=2.1.1,<3.0.0',
'invenio-drafts-resources>=0.5.0,<0.6.0',
'invenio-formatter[badges]>=1.1.0a1,<2.0.0',
'invenio-i18n>=1.2.0',
'invenio-records>=1.4.0a4,<2.0.0',
'invenio-records-files>=1.2.1,<2.0.0',
'invenio-records-ui>=1.2.0a1,<2.0.0',
'invenio-previewer>=1.2.1,<2.0.0',
# until fix in invenio-previewer is released
'nbconvert[execute]>=4.1.0,<6.0.0',
'pytz>=2020.4',
# TODO: Get from invenio-base
'six>=1.12.0' # Needed to pass CI tests
]
packages = find_packages()
# Get the version string. Cannot be done with import!
g = {}
with open(os.path.join('invenio_rdm_records', 'version.py'), 'rt') as fp:
exec(fp.read(), g)
version = g['__version__']
setup(
name='invenio-rdm-records',
version=version,
description=__doc__,
long_description=readme + '\n\n' + history,
keywords='invenio data model',
license='MIT',
author='CERN',
author_email='info@inveniosoftware.org',
url='https://github.com/inveniosoftware/invenio-rdm-records',
packages=packages,
zip_safe=False,
include_package_data=True,
platforms='any',
entry_points={
'flask.commands': [
'rdm-records = invenio_rdm_records.cli:rdm_records',
],
'invenio_assets.webpack': [
'invenio_rdm_records_theme = invenio_rdm_records.theme.webpack:theme',
],
'invenio_base.apps': [
'invenio_rdm_records = invenio_rdm_records:InvenioRDMRecords',
],
'invenio_base.api_apps': [
'invenio_rdm_records = invenio_rdm_records:InvenioRDMRecords',
],
'invenio_base.blueprints': [
'invenio_rdm_records = invenio_rdm_records.theme.views:blueprint',
],
'invenio_i18n.translations': [
'messages = invenio_rdm_records',
],
'invenio_jsonschemas.schemas': [
'invenio_rdm_records = invenio_rdm_records.records.jsonschemas',
],
'invenio_search.mappings': [
'rdmrecords = invenio_rdm_records.records.mappings',
],
},
extras_require=extras_require,
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Development Status :: 3 - Alpha',
],
)
| 30.231788 | 82 | 0.617963 |
bcedf09913b157fc3455a2e389b241cd2edb8bfa | 1,227 | py | Python | source/assets/projectile.py | HugoPFe/Project-Asteroids | 7a58ba00283216e83f02b2f58cf1944e9e217433 | [
"MIT"
] | null | null | null | source/assets/projectile.py | HugoPFe/Project-Asteroids | 7a58ba00283216e83f02b2f58cf1944e9e217433 | [
"MIT"
] | 4 | 2021-06-20T21:32:53.000Z | 2021-08-12T11:12:17.000Z | source/assets/projectile.py | HugoPFe/Project-Asteroids | 7a58ba00283216e83f02b2f58cf1944e9e217433 | [
"MIT"
] | null | null | null | import pygame
from pygame.locals import *
from math import cos, sin, radians
class Projectile(pygame.sprite.Sprite):
def __init__(self, x_pos, y_pos, angle, screen, level_rules):
pygame.sprite.Sprite.__init__(self)
self.rules = level_rules
self.speed = self.rules['speed']
self.damage = self.rules['damage_single']
self.screen = screen
self.angle = radians(angle)
self.x_pos = x_pos + cos(angle) * 2
self.y_pos = y_pos - sin(angle) * 2
self.image = pygame.Surface((7, 3), SRCALPHA)
if self.damage == self.rules['damage_single']:
self.image.fill('white')
else:
self.image.fill('red')
self.copy_img = self.image.copy()
self.image = pygame.transform.rotate(self.copy_img, angle)
self.mask = pygame.mask.from_surface(self.image)
self.rect = self.image.get_rect(center=(self.x_pos, self.y_pos))
def update(self):
self.rect.centerx += cos(self.angle) * self.speed
self.rect.centery -= sin(self.angle) * self.speed
self.speed += 1
if not self.screen.get_rect().colliderect(self.rect):
self.kill()
__all__ = ['Projectile']
| 26.673913 | 72 | 0.615322 |
47d0a46d230375d40c2fc2dbd498bc6f2297ecf6 | 88 | py | Python | utils/nn/modules/__init__.py | roshanr11/Research-DCST | 225461e6ffd7ca5a48b9688946eb36b2d98f358e | [
"MIT"
] | 5 | 2020-04-29T08:48:53.000Z | 2020-12-23T10:11:39.000Z | utils/nn/modules/__init__.py | roshanr11/Research-DCST | 225461e6ffd7ca5a48b9688946eb36b2d98f358e | [
"MIT"
] | 2 | 2020-01-11T08:31:06.000Z | 2021-06-09T12:41:32.000Z | utils/nn/modules/__init__.py | roshanr11/Research-DCST | 225461e6ffd7ca5a48b9688946eb36b2d98f358e | [
"MIT"
] | 5 | 2019-11-20T02:49:03.000Z | 2020-09-17T15:27:34.000Z | from .crf import *
from .sparse import *
from .attention import *
from .linear import *
| 17.6 | 24 | 0.727273 |
a57aae50acccb08330c9107fe40a469c326622eb | 3,155 | py | Python | sdk/python/pulumi_aws/ec2/get_instances.py | lemonade-hq/pulumi-aws | 9ee22c65c7bad42d38b16879ccd56526d856a01a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/ec2/get_instances.py | lemonade-hq/pulumi-aws | 9ee22c65c7bad42d38b16879ccd56526d856a01a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/ec2/get_instances.py | lemonade-hq/pulumi-aws | 9ee22c65c7bad42d38b16879ccd56526d856a01a | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-03-08T15:05:29.000Z | 2021-03-08T15:05:29.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from .. import utilities, tables
class GetInstancesResult:
"""
A collection of values returned by getInstances.
"""
def __init__(__self__, ids=None, instance_tags=None, private_ips=None, public_ips=None, id=None):
if ids and not isinstance(ids, list):
raise TypeError('Expected argument ids to be a list')
__self__.ids = ids
"""
IDs of instances found through the filter
"""
if instance_tags and not isinstance(instance_tags, dict):
raise TypeError('Expected argument instance_tags to be a dict')
__self__.instance_tags = instance_tags
if private_ips and not isinstance(private_ips, list):
raise TypeError('Expected argument private_ips to be a list')
__self__.private_ips = private_ips
"""
Private IP addresses of instances found through the filter
"""
if public_ips and not isinstance(public_ips, list):
raise TypeError('Expected argument public_ips to be a list')
__self__.public_ips = public_ips
"""
Public IP addresses of instances found through the filter
"""
if id and not isinstance(id, str):
raise TypeError('Expected argument id to be a str')
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
async def get_instances(filters=None,instance_state_names=None,instance_tags=None,opts=None):
"""
Use this data source to get IDs or IPs of Amazon EC2 instances to be referenced elsewhere,
e.g. to allow easier migration from another management solution
or to make it easier for an operator to connect through bastion host(s).
> **Note:** It's a best practice to expose instance details via [outputs](https://www.terraform.io/docs/configuration/outputs.html)
and [remote state](https://www.terraform.io/docs/state/remote.html) and
**use [`terraform_remote_state`](https://www.terraform.io/docs/providers/terraform/d/remote_state.html)
data source instead** if you manage referenced instances via Terraform.
> **Note:** It's strongly discouraged to use this data source for querying ephemeral
instances (e.g. managed via autoscaling group), as the output may change at any time
and you'd need to re-run `apply` every time an instance comes up or dies.
"""
__args__ = dict()
__args__['filters'] = filters
__args__['instanceStateNames'] = instance_state_names
__args__['instanceTags'] = instance_tags
__ret__ = await pulumi.runtime.invoke('aws:ec2/getInstances:getInstances', __args__, opts=opts)
return GetInstancesResult(
ids=__ret__.get('ids'),
instance_tags=__ret__.get('instanceTags'),
private_ips=__ret__.get('privateIps'),
public_ips=__ret__.get('publicIps'),
id=__ret__.get('id'))
| 43.819444 | 135 | 0.684628 |
721580c23f092c725d2c719314961754e4f035eb | 522 | py | Python | config.py | kangalah/News-Api | ded938d380f0291dc4bce84399d02fe7dc3c5ceb | [
"Unlicense"
] | null | null | null | config.py | kangalah/News-Api | ded938d380f0291dc4bce84399d02fe7dc3c5ceb | [
"Unlicense"
] | null | null | null | config.py | kangalah/News-Api | ded938d380f0291dc4bce84399d02fe7dc3c5ceb | [
"Unlicense"
] | null | null | null | import os
class Config:
NEWS_SOURCES_BASE_URL ='https://newsapi.org/v2/sources?language=en&apiKey=267d7fe5843144d9b8f75ea889ea51f7'
ARTICLES_BASE_URL = 'https://newsapi.org/v2/top-headlines?sources=bbc-news&apiKey=267d7fe5843144d9b8f75ea889ea51f7'
NEWS_API_KEY = os.environ.get('NEWS_API_KEY')
@staticmethod
def init_app(app):
pass
class ProdConfig(Config):
pass
class DevConfig(Config):
DEBUG = True
config_options = {
'development':DevConfig,
'production':ProdConfig
}
| 21.75 | 119 | 0.735632 |
0679c43c7a3fe9d82ff5bcde67e3b6db2f97b570 | 375 | py | Python | pysyncgateway/__init__.py | constructpm/pysyncgateway | 653db702b2f872e18fa15ab41920276ffc07aa45 | [
"Apache-2.0"
] | 2 | 2018-04-04T17:13:25.000Z | 2018-07-21T13:30:42.000Z | pysyncgateway/__init__.py | constructpm/pysyncgateway | 653db702b2f872e18fa15ab41920276ffc07aa45 | [
"Apache-2.0"
] | 14 | 2018-03-22T11:35:28.000Z | 2021-11-12T17:46:54.000Z | pysyncgateway/__init__.py | constructpm/pysyncgateway | 653db702b2f872e18fa15ab41920276ffc07aa45 | [
"Apache-2.0"
] | 1 | 2018-06-15T13:37:00.000Z | 2018-06-15T13:37:00.000Z | from .admin_client import AdminClient
from .database import Database
from .document import Document
from .query import Query
from .session import Session
from .stats import Stats
from .user import User
from .user_client import UserClient
__all__ = [
"AdminClient",
"Database",
"Document",
"Query",
"Session",
"Stats",
"User",
"UserClient",
]
| 18.75 | 37 | 0.701333 |
5e539a9ce4d14ed465c6d31a78c535730590df36 | 2,607 | py | Python | examples/tdm/ant_position_and_velocity.py | yifan-you-37/rl_swiss | 8b0ee7caa5c1fa93860916004cf4fd970667764f | [
"MIT"
] | 56 | 2019-10-20T03:09:02.000Z | 2022-03-25T09:21:40.000Z | examples/tdm/ant_position_and_velocity.py | yifan-you-37/rl_swiss | 8b0ee7caa5c1fa93860916004cf4fd970667764f | [
"MIT"
] | 3 | 2020-10-01T07:33:51.000Z | 2021-05-12T03:40:57.000Z | examples/tdm/ant_position_and_velocity.py | yifan-you-37/rl_swiss | 8b0ee7caa5c1fa93860916004cf4fd970667764f | [
"MIT"
] | 10 | 2019-11-04T16:56:09.000Z | 2022-03-25T09:21:41.000Z | import rlkit.torch.pytorch_util as ptu
from rlkit.envs.wrappers import NormalizedBoxEnv
from rlkit.exploration_strategies.base import \
PolicyWrappedWithExplorationStrategy
from rlkit.exploration_strategies.ou_strategy import OUStrategy
from rlkit.launchers.launcher_util import setup_logger
from rlkit.torch.modules import HuberLoss
from rlkit.torch.tdm.envs.ant_env import GoalXYPosAndVelAnt
from rlkit.torch.tdm.her_replay_buffer import HerReplayBuffer
from rlkit.torch.tdm.networks import TdmNormalizer, TdmQf, TdmPolicy
from rlkit.torch.tdm.tdm import TemporalDifferenceModel
def experiment(variant):
env = NormalizedBoxEnv(GoalXYPosAndVelAnt(
goal_dim_weights=[0.1, 0.1, 0.9, 0.9],
speed_weight=None,
))
max_tau = variant['tdm_kwargs']['max_tau']
# Normalizer isn't used unless you set num_pretrain_paths > 0
tdm_normalizer = TdmNormalizer(
env,
vectorized=True,
max_tau=max_tau,
)
qf = TdmQf(
env=env,
vectorized=True,
norm_order=1,
tdm_normalizer=tdm_normalizer,
hidden_sizes=[300, 300],
)
policy = TdmPolicy(
env=env,
tdm_normalizer=tdm_normalizer,
hidden_sizes=[300, 300],
)
es = OUStrategy(
action_space=env.action_space,
theta=0.1,
max_sigma=0.1,
min_sigma=0.1,
)
exploration_policy = PolicyWrappedWithExplorationStrategy(
exploration_strategy=es,
policy=policy,
)
replay_buffer = HerReplayBuffer(
env=env,
max_size=int(1E6),
)
algorithm = TemporalDifferenceModel(
env,
qf=qf,
replay_buffer=replay_buffer,
policy=policy,
exploration_policy=exploration_policy,
qf_criterion=HuberLoss(),
tdm_normalizer=tdm_normalizer,
**variant['tdm_kwargs']
)
if ptu.gpu_enabled():
algorithm.cuda()
algorithm.train()
if __name__ == "__main__":
variant = dict(
tdm_kwargs=dict(
# TDM parameters
max_tau=32,
num_pretrain_paths=0,
# General parameters
num_epochs=100,
num_steps_per_epoch=1000,
num_steps_per_eval=1000,
max_path_length=99,
num_updates_per_env_step=10,
batch_size=128,
discount=1,
reward_scale=1000,
# DDPG soft-target tau (not TDM tau)
tau=0.001,
),
algorithm="TDM",
)
setup_logger('name-of-tdm-ant-pos-and-vel-experiment', variant=variant)
experiment(variant)
| 28.648352 | 75 | 0.644802 |
bdf5b78a838a2c4211d3c518cedb903c75d02baa | 856 | py | Python | PyGitUp/tests/test_fast_forwarded.py | hugovk/PyGitUp | c3da6c8db8628c1e23da22fe2fcbf5d96c6a7a44 | [
"MIT"
] | 431 | 2015-01-01T15:33:40.000Z | 2022-03-02T22:41:06.000Z | PyGitUp/tests/test_fast_forwarded.py | hugovk/PyGitUp | c3da6c8db8628c1e23da22fe2fcbf5d96c6a7a44 | [
"MIT"
] | 86 | 2015-01-12T14:41:06.000Z | 2021-10-02T14:30:09.000Z | PyGitUp/tests/test_fast_forwarded.py | hugovk/PyGitUp | c3da6c8db8628c1e23da22fe2fcbf5d96c6a7a44 | [
"MIT"
] | 38 | 2015-01-20T09:46:07.000Z | 2021-10-12T02:06:31.000Z | # System imports
import os
from os.path import join
from git import *
from PyGitUp.tests import basepath, init_master, update_file
test_name = 'fast-forwarded'
repo_path = join(basepath, test_name + os.sep)
def setup():
master_path, master = init_master(test_name)
# Prepare master repo
master.git.checkout(b=test_name)
# Clone to test repo
path = join(basepath, test_name)
master.clone(path, b=test_name)
repo = Repo(path, odbt=GitCmdObjectDB)
assert repo.working_dir == path
# Modify file in master
update_file(master, test_name)
def test_fast_forwarded():
""" Run 'git up' with result: fast-forwarding """
os.chdir(repo_path)
from PyGitUp.gitup import GitUp
gitup = GitUp(testing=True)
gitup.run()
assert len(gitup.states) == 1
assert gitup.states[0] == 'fast-forwarding'
| 21.4 | 60 | 0.695093 |
71249e2c4deaf856bcc1a8fe49bef144821f5a77 | 1,110 | py | Python | notesapp/notes/migrations/0001_initial.py | KrishangSaharia/Notes-App | f246f068904b1145e5ef0280db9e0c5cfe5627f5 | [
"MIT"
] | null | null | null | notesapp/notes/migrations/0001_initial.py | KrishangSaharia/Notes-App | f246f068904b1145e5ef0280db9e0c5cfe5627f5 | [
"MIT"
] | null | null | null | notesapp/notes/migrations/0001_initial.py | KrishangSaharia/Notes-App | f246f068904b1145e5ef0280db9e0c5cfe5627f5 | [
"MIT"
] | null | null | null | # Generated by Django 3.1 on 2020-10-21 07:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=20)),
('password', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Note',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('datetime', models.DateTimeField(verbose_name='date published')),
('note', models.CharField(max_length=500)),
('title', models.CharField(max_length=50)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='notes.user')),
],
),
]
| 32.647059 | 114 | 0.574775 |
0b13a18eb5e3225758fbe9505a741acc8e371d99 | 14,585 | py | Python | store/neo_wrapper.py | drorgarti/SatoriLab | 6e57bad2c01d6ee8baa97f915abc001bed974785 | [
"MIT"
] | 1 | 2019-06-12T09:02:34.000Z | 2019-06-12T09:02:34.000Z | store/neo_wrapper.py | drorgarti/SatoriLab | 6e57bad2c01d6ee8baa97f915abc001bed974785 | [
"MIT"
] | null | null | null | store/neo_wrapper.py | drorgarti/SatoriLab | 6e57bad2c01d6ee8baa97f915abc001bed974785 | [
"MIT"
] | null | null | null | import logging
from utils.acurerate_utils import AcureRateUtils
from enrichment.enrichment_service_config import EnrichmentServiceConfig
from entities.acurerate_attributes import P, C, G
from entities. acurerate_job import AcureRateJob
from neo4j.v1 import GraphDatabase, basic_auth, CypherError
class NeoWrapper(object):
DB_HOST = "bolt://localhost"
DB_PORT = 7687
USERNAME = "neo4j"
PASSWORD = "0internet1"
driver = None
logger = None
def __init__(self):
pass
# @staticmethod
# def set_logger(logger):
# NeoWrapper.logger = logger
@staticmethod
def connect():
if NeoWrapper.driver is not None:
return
NeoWrapper.logger = logging.getLogger(EnrichmentServiceConfig.LOGGER_NAME)
if not NeoWrapper.logger:
print('*** No logger set. Aborting connect action. ***')
return
url = '%s:%s' % (NeoWrapper.DB_HOST, NeoWrapper.DB_PORT)
NeoWrapper.driver = GraphDatabase.driver(url, auth=basic_auth(NeoWrapper.USERNAME, NeoWrapper.PASSWORD))
NeoWrapper.logger.info('Connected to Neo4J database!')
@staticmethod
def set_person(person):
NeoWrapper.set_person_properties(person)
NeoWrapper.set_person_relationships(person)
pass
@staticmethod
def set_company(company):
NeoWrapper.set_company_properties(company)
NeoWrapper.set_company_relationships(company)
pass
@staticmethod
def get_property_by_type(property_name, property_value, kv_separator):
if property_name is None or property_value is None:
return None
if isinstance(property_value, str):
the_string = property_value
if "'" in the_string:
the_string = the_string.replace("'", "")
s = "%s%s'%s'" % (property_name, kv_separator, the_string)
elif isinstance(property_value, bool):
s = "%s%s%s" % (property_name, kv_separator, str(property_value).lower())
elif isinstance(property_value, int):
s = "%s%s%s" % (property_name, kv_separator, int(property_value))
elif isinstance(property_value, list):
clean_list = [item for item in property_value if item is not None]
s = "%s%s%s" % (property_name, kv_separator, clean_list)
elif isinstance(property_value, set):
clean_list = [item for item in property_value if item is not None]
s = "%s%s%s" % (property_name, kv_separator, clean_list)
else:
s = "something else"
return s
@staticmethod
def _calc_strength(x):
strength = 0
for i in range(1, 5):
k = 'r%d.strength' % i
if k in x and x[k]:
strength += x[k]
return strength
@staticmethod
def get_paths_to_person(source_id, target_id):
paths = []
# Get session for query
session = NeoWrapper.driver.session()
# Run query to locate possible direct connection to target
# Source:Person -[CONTACT]-> Target:Person
cql_r_str = "MATCH (s:Person)-[r1]->(t:Person) " + \
"WHERE s.aid = '%s' AND t.aid = '%s' " % (source_id, target_id) + \
"RETURN s.name, type(r1), r1.strength, t.name"
statement_res = session.run(cql_r_str)
# Iterate over results and construct paths from segments
for x in statement_res:
segments = [
[x['s.name'], x['type(r1)'], x['t.name']]
]
paths.append((NeoWrapper._calc_strength(x), segments))
# Run query to locate possible connection to target via same company
# Source:Person -[]-> Company <-[]- Target:Person
cql_r_str = "MATCH (s:Person)-[r1]->(c:Company)<-[r2]-(t:Person) " + \
"WHERE s.aid = '%s' AND t.aid = '%s' " % (source_id, target_id) + \
"RETURN s.name, type(r1), r1.strength, c.name, type(r2), r2.strength, t.name"
statement_res = session.run(cql_r_str)
# Iterate over results and construct paths from segments
for x in statement_res:
segments = [
[x['s.name'], x['type(r1)'], x['c.name']],
[x['t.name'], x['type(r2)'], x['c.name']]
]
paths.append((NeoWrapper._calc_strength(x), segments))
# Run query to locate the referrals through direct contacts
# Source:Person -[CONTACT]-> Referral:Person -[CONTACT]-> Target:Person -[R]-> Company
cql_r_str = "MATCH (s:Person)-[r1]->(r:Person), " + \
" (r:Person)-[r2]->(t:Person) " + \
"WHERE s.aid = '%s' AND t.aid = '%s' " % (source_id, target_id) + \
"RETURN s.name, type(r1), r1.strength, r.name, type(r2), r2.strength, t.name"
statement_res = session.run(cql_r_str)
# Iterate over results and construct paths from segments
for x in statement_res:
segments = [
[x['s.name'], x['type(r1)'], x['r.name']],
[x['r.name'], x['type(r2)'], x['t.name']]
]
paths.append((NeoWrapper._calc_strength(x), segments))
session.close()
# (a) Sort by score and (b) Strip the score
sorted_paths = sorted(paths, key=lambda tup: tup[0], reverse=True)
final_paths = [path for score, path in sorted_paths]
return final_paths
@staticmethod
def get_paths_to_company(source_id, target_id, seniority=None, area=None):
paths = []
# Get session for query
session = NeoWrapper.driver.session()
# Run query to locate the referrals through direct contacts
# Source:Person -[CONTACT]-> Referral:Person -[CONTACT]-> Target:Person -[R]-> Company
condition_line_1 = ' AND "%s" in r3.jobs_seniorites' % seniority if seniority else ''
condition_line_2 = ' AND "%s" in r3.jobs_areas' % area if area else ''
cql_r_str = "MATCH (s:Person)-[r1]->(r:Person), " + \
" (r:Person)-[r2]->(t:Person), " + \
" (t:Person)-[r3]->(c:Company) " + \
"WHERE s.aid = '%s' AND c.aid = '%s' " % (source_id, target_id) + \
"%s%s" % (condition_line_1, condition_line_2) + \
"RETURN s.name, type(r1), r1.strength, r.name, type(r2), r2.strength, t.name, type(r3), r3.strength, c.name"
statement_res = session.run(cql_r_str)
# Iterate over results and construct paths from segments
for x in statement_res:
segments = [
[x['s.name'], x['type(r1)'], x['r.name']],
[x['r.name'], x['type(r2)'], x['t.name']],
[x['t.name'], x['type(r3)'], x['c.name']]
]
paths.append((NeoWrapper._calc_strength(x), segments))
# Run query to locate paths with referrals that worked with another person in the same company
# Source:Person -[CONTACT]-> Referral:Person -[R1]-> Company-1 <- Target:Person <-[R2] Company
cql_r_str = "MATCH (fp:Person)-[r1:CONTACT]-(rp:Person), " + \
" (rp:Person)-[r2:EMPLOYEE_OF]->(md:Company), " + \
" (tp:Person)-[r3]-(md:Company), " + \
" (tp:Person)-[r4]-(tc:Company) " + \
"WHERE fp.aid = '%s' AND tc.aid = '%s' " % (source_id, target_id) + \
"RETURN fp.name, type(r1), r1.strength, rp.name, type(r2), r2.strength, md.name, tp.name, type(r3), r3.strength, type(r4), r4.strength, tc.name"
statement_res = session.run(cql_r_str)
# Iterate over results and construct paths from segments
for x in statement_res:
segments = [
[x['fp.name'], x['type(r1)'], x['rp.name']],
[x['rp.name'], x['type(r2)'], x['md.name']],
[x['tp.name'], x['type(r3)'], x['md.name']],
[x['tp.name'], x['type(r4)'], x['tc.name']]
]
scored_segment = (NeoWrapper._calc_strength(x), segments)
paths.append(scored_segment)
# Source:Person -[R1]-> Company <-[R2]- Referral:Person -[R1]-> Company-1 <- Target:Person <-[R2] Company
session.close()
# (a) Sort by score and (b) Strip the score
sorted_paths = sorted(paths, key=lambda tup: tup[0], reverse=True)
final_paths = [path for score, path in sorted_paths]
return final_paths
@staticmethod
def set_person_properties(person):
# Sanity check to make sure there's aid
if not person.aid:
NeoWrapper.logger.warning('No aid for person %s. Not migrated.' % person)
return
session = NeoWrapper.driver.session()
try:
properties = person.get_properties()
labels = person.get_labels()
# Add 'name' for the Neo4J Browser :-)
if P.FULL_NAME in person.deduced:
properties['name'] = person.deduced[P.FULL_NAME].replace("'", "")
# Add 'aid' for the Neo4J to be able to relate back to Mongo
properties['aid'] = person.aid
# Set the node with properties and labels
# TODO: if node exists, we may want to remove all previous labels, because query will not remove them
cql_str = 'MERGE (n:Person {aid: "%s"}) ' % person.aid + \
'SET n = {props}, n:%s' % ':'.join(labels)
statement_res = session.run(cql_str, {'props': properties})
NeoWrapper.logger.info('Migrated person %s successfully! Details: %s' %
(person.deduced[P.FULL_NAME],
AcureRateUtils.obj2string(statement_res.consume().counters)))
except Exception as e:
NeoWrapper.logger.error('Migration of person node %s failed. Exception raised: %s' %
(person.deduced[P.FULL_NAME] if person else 'none', e))
finally:
session.close()
pass
@staticmethod
def set_person_relationships(pivot_person):
from store.store import Store
session = NeoWrapper.driver.session()
relations = pivot_person.get_relations()
try:
for source_aid, relation_type, target_aid, relation_properties in relations:
cql_r_str = "MATCH (source),(target) " + \
"WHERE source.aid = '%s' AND target.aid = '%s' " % (source_aid, target_aid) + \
"MERGE (source)-[r:%s{%s}]->(target)" % (relation_type, relation_properties)
statement_res = session.run(cql_r_str)
# TODO: inspect statement_res and add it to log
NeoWrapper.logger.info('Created person %s relation %s-[%s]-%s succesfully! Details: %s',
pivot_person.deduced[P.FULL_NAME], source_aid, relation_type, target_aid,
AcureRateUtils.obj2string(statement_res.consume().counters))
pass
except Exception as e:
NeoWrapper.logger.error('Migrated relations of person %s failed. Exception raised: %s' %
(pivot_person.deduced[P.FULL_NAME] if pivot_person else 'none', e))
finally:
session.close()
pass
@staticmethod
def set_company_properties(company):
# Sanity check to make sure there's aid
if not company.aid:
NeoWrapper.logger.warning('No aid for company %s. Not migrated.' % company)
return
# TODO: TEMP TEMP!! We ignore all those who were initially inserted only by CB Excel
if len(company.data_sources) == 1 and 'CrunchBase2014' in company.data_sources:
return
session = NeoWrapper.driver.session()
try:
properties = company.get_properties()
labels = company.get_labels()
# Add 'aid' for the Neo4J to be able to relate back to Mongo
properties['aid'] = company.aid
# Set the node with properties and labels
# TODO: if node exists, we may want to remove all previous labels, because query will not remove them
cql_str = 'MERGE (n:Company {aid: "%s"}) ' % company.aid + \
'SET n = {props}, n:%s' % ':'.join(labels)
statement_res = session.run(cql_str, {'props': properties})
NeoWrapper.logger.info('Migrated company %s successfully! Details: %s' %
(company.deduced[C.NAME],
AcureRateUtils.obj2string(statement_res.consume().counters)))
except Exception as e:
NeoWrapper.logger.error('Migration of company node %s failed. Exception raised: %s' %
(company.deduced[C.NAME] if company else 'none', e))
finally:
session.close()
pass
@staticmethod
def set_company_relationships(company):
from store.store import Store
session = NeoWrapper.driver.session()
try:
# TODO: TEMP TEMP!! We ignore all those who were initially inserted only by CB Excel
if len(company.data_sources) == 1 and 'CrunchBase2014' in company.data_sources:
return
relations = company.get_relations()
NeoWrapper.logger.info('Attempting to create relations for company %s', company.deduced[C.NAME])
# Go over relations and create them in Neo4J
for source_aid, relation_label, target_id, relations_str in relations:
cql_r_str = "MATCH (source),(target) " + \
"WHERE source.aid = '%s' AND target.aid = '%s' " % (source_aid, target_id) + \
"MERGE (source)-[r:%s{%s}]->(target)" % (relation_label, relations_str)
statement_res = session.run(cql_r_str)
NeoWrapper.logger.info('Create p2p %s-[%s]-%s relation successfully! Details: %s' %
(source_aid, relation_label, target_id, AcureRateUtils.obj2string(statement_res.consume().counters)))
pass
except CypherError as ce:
NeoWrapper.logger.error('CypherError raised: %s', ce)
except Exception as e:
NeoWrapper.logger.error('Exception raised: %s', e)
finally:
session.close()
pass
| 44.063444 | 164 | 0.569421 |
8a7c7c6eef229e4ec5b0c4a12331322bba5afab0 | 1,405 | py | Python | src/darkslide/utils.py | santosh653/python-darkslide | 248bf9dd5da9b0f26adecf46cd1f409d45b4a4db | [
"Apache-2.0"
] | 82 | 2015-06-01T01:17:29.000Z | 2022-01-28T18:33:03.000Z | src/darkslide/utils.py | santosh653/python-darkslide | 248bf9dd5da9b0f26adecf46cd1f409d45b4a4db | [
"Apache-2.0"
] | 25 | 2015-11-04T01:23:54.000Z | 2022-01-30T14:53:09.000Z | src/darkslide/utils.py | santosh653/python-darkslide | 248bf9dd5da9b0f26adecf46cd1f409d45b4a4db | [
"Apache-2.0"
] | 28 | 2015-11-03T23:08:00.000Z | 2022-01-20T17:21:56.000Z | # -*- coding: utf-8 -*-
import base64
import mimetypes
import os
# add woff2 font type: not here by default...
mimetypes.add_type('font/woff2', '.woff2')
def get_path_url(path, relative=False):
""" Returns an absolute or relative path url given a path
"""
if relative is False:
return 'file://%s' % os.path.abspath(path)
else:
return os.path.relpath(path, relative)
def encode_data_from_url(url, source_path):
if not url or url.startswith('data:') or url.startswith('file://'):
return False
if url.startswith('http://') or url.startswith('https://'):
return False
real_path = url if os.path.isabs(url) else os.path.join(source_path, url)
if not os.path.exists(real_path):
return False
mime_type, encoding = mimetypes.guess_type(real_path)
if not mime_type:
return False
try:
with open(real_path, 'rb') as image_file:
image_contents = image_file.read()
encoded_image = base64.b64encode(image_contents)
except IOError:
return False
return u"data:%s;base64,%s" % (mime_type, encoded_image.decode())
class cached_property(object):
def __init__(self, func):
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
| 25.545455 | 77 | 0.639146 |
5be89e0d0c05bc599e3ae62d9c8cd64c9e54ba8e | 3,809 | py | Python | ds4se/vis.py | LeyliG/ds4se | ed8191cfb62c53a0ea9626a54f54f23bd31c3e59 | [
"Apache-2.0"
] | 1 | 2021-01-21T04:24:03.000Z | 2021-01-21T04:24:03.000Z | ds4se/vis.py | LeyliG/ds4se | ed8191cfb62c53a0ea9626a54f54f23bd31c3e59 | [
"Apache-2.0"
] | null | null | null | ds4se/vis.py | LeyliG/ds4se | ed8191cfb62c53a0ea9626a54f54f23bd31c3e59 | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/5.0_vis.ipynb (unless otherwise specified).
__all__ = ['get_contours', 'visualize_gt_ngt', 'visualize_events', 'plot_counts', 'vis_3d', 'reduce_dims',
'clusterize_w_entropy']
# Cell
# Imports
import matplotlib.pyplot as plt
import numpy as np
#from ds4se.desc.stats import *
from mpl_toolkits.mplot3d import Axes3D
from scipy.interpolate import griddata
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
# Cell
def get_contours(x_range, y_range, delta):
x = np.arange(x_range[0], x_range[1], delta)
y = np.arange(x_range[0], x_range[1], delta)
X, Y = np.meshgrid(x, y)
Z = X + Y
return X, Y, Z
# Cell
def visualize_gt_ngt(gt, ngt, title, y_label):
plt.title(title)
plt.xlim(2, 15)
plt.xlabel('req entropy')
plt.ylim(2, 15)
plt.ylabel(f'{y_label} entropy')
plt.gca().set_aspect('equal', adjustable='box')
# xi = np.linspace(2, 10, 10)
# yi = np.linspace(2, 10, 10)
# zi = griddata((gt[0], gt[1]), gt[1], (xi[None,:], yi[:,None]), method='linear')
X, Y, Z = get_contours([1, 16], [1, 16], 1)
plt.contourf(X, Y, Z, levels = 20, cmap = 'gray')
# plt.tricontour(gt[0], gt[1])
plt.scatter(ngt[0], ngt[1], c='r', label='non-groundtruth', alpha = 0.5)
plt.scatter(gt[0], gt[1], c='b', label='groundtruth', alpha = 0.5)
plt.legend()
plt.show()
# Cell
def visualize_events(events, color, title, label):
plt.title(title)
maxi, mini, μ, med, σ, med_σ = get_desc_stats(events)
text = f'Max: {maxi:.3f}\nMin: {mini:.3f}\nMean: {μ:.3f}\nMed: {med:.3f}\nStDev: {σ:.3f}\nMAD: {med_σ:.3f}'
plt.gcf().text(0.02, 0.35, text, fontsize=14)
plt.hlines(1,0,1)
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.eventplot(events, orientation='horizontal', colors=color, alpha = 0.5, label=label)
plt.subplots_adjust(left=0.25)
plt.legend()
plt.show()
# Cell
def plot_counts(counts, x_label, y_label, top_k = 30):
labels, values = zip(*counts.most_common()[:top_k])
indexes = np.arange(len(labels))
width = 0.5
plt.figure(num=None, figsize=(22, 4), dpi=60, facecolor='w', edgecolor='k')
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.bar(indexes, values, width, align = 'center')
plt.xticks(indexes, labels)
plt.show()
# Cell
def vis_3d(gt, ngt, src_dtype, trgt_dtype):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
plt.xlim(1, 15)
plt.ylim(1, 15)
ax.scatter(gt[0], gt[1], gt[2], c='b', marker='o')
ax.scatter(ngt[0], ngt[1], ngt[2], c='r', marker='^')
ax.set_xlabel(src_dtype)
ax.set_ylabel(trgt_dtype)
ax.set_zlabel('Word Mover Distance')
ax.invert_yaxis()
ax.set_zlim(0, 1)
plt.show()
# Cell
# Uses PCA first and then t-SNE
def reduce_dims(doc_vecs, dims = 2):
# hyperparameters from https://towardsdatascience.com/visualising-high-dimensional-datasets-using-pca-and-t-sne-in-python-8ef87e7915b
pca = PCA(n_components=30)
pca_features = pca.fit_transform(doc_vecs)
tsne = TSNE(n_components=dims, verbose=1, perplexity=40, n_iter=300)
tsne_features = tsne.fit_transform(doc_vecs)
return tsne_features
# Cell
def clusterize_w_entropy(gt_doc_vecs, ngt_doc_vecs, gt_entropies, ngt_entropies):
gt_reduced_vecs = reduce_dims(gt_doc_vecs)
ngt_reduced_vecs = reduce_dims(ngt_doc_vecs)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(gt_reduced_vecs[:, 0], gt_reduced_vecs[:, 1], gt_entropies, c='b', marker='o')
ax.scatter(ngt_reduced_vecs[:, 0], ngt_reduced_vecs[:, 1], ngt_entropies, c='r', marker='^')
# ax.set_xlabel(src_dtype)
# ax.set_ylabel(trgt_dtype)
ax.set_zlabel('Entropy')
ax.invert_yaxis()
plt.show() | 31.221311 | 137 | 0.658178 |
947c6e0de5460b8092ea60bfbf1152c4e065eaab | 4,832 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/community/general/plugins/modules/sensu_subscription.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 22 | 2021-07-16T08:11:22.000Z | 2022-03-31T07:15:34.000Z | venv/lib/python3.6/site-packages/ansible_collections/community/general/plugins/modules/sensu_subscription.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/community/general/plugins/modules/sensu_subscription.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 39 | 2021-07-05T02:31:42.000Z | 2022-03-31T02:46:03.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Anders Ingemann <aim@secoya.dk>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: sensu_subscription
short_description: Manage Sensu subscriptions
description:
- Manage which I(sensu channels) a machine should subscribe to
options:
name:
type: str
description:
- The name of the channel
required: true
state:
type: str
description:
- Whether the machine should subscribe or unsubscribe from the channel
choices: [ 'present', 'absent' ]
required: false
default: present
path:
type: str
description:
- Path to the subscriptions json file
required: false
default: /etc/sensu/conf.d/subscriptions.json
backup:
description:
- Create a backup file (if yes), including the timestamp information so you
- can get the original file back if you somehow clobbered it incorrectly.
type: bool
required: false
default: no
requirements: [ ]
author: Anders Ingemann (@andsens)
'''
RETURN = '''
reasons:
description: the reasons why the module changed or did not change something
returned: success
type: list
sample: ["channel subscription was absent and state is `present'"]
'''
EXAMPLES = '''
# Subscribe to the nginx channel
- name: Subscribe to nginx checks
community.general.sensu_subscription: name=nginx
# Unsubscribe from the common checks channel
- name: Unsubscribe from common checks
community.general.sensu_subscription: name=common state=absent
'''
import json
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native
def sensu_subscription(module, path, name, state='present', backup=False):
changed = False
reasons = []
try:
config = json.load(open(path))
except IOError as e:
if e.errno == 2: # File not found, non-fatal
if state == 'absent':
reasons.append('file did not exist and state is `absent\'')
return changed, reasons
config = {}
else:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except ValueError:
msg = '{path} contains invalid JSON'.format(path=path)
module.fail_json(msg=msg)
if 'client' not in config:
if state == 'absent':
reasons.append('`client\' did not exist and state is `absent\'')
return changed, reasons
config['client'] = {}
changed = True
reasons.append('`client\' did not exist')
if 'subscriptions' not in config['client']:
if state == 'absent':
reasons.append('`client.subscriptions\' did not exist and state is `absent\'')
return changed, reasons
config['client']['subscriptions'] = []
changed = True
reasons.append('`client.subscriptions\' did not exist')
if name not in config['client']['subscriptions']:
if state == 'absent':
reasons.append('channel subscription was absent')
return changed, reasons
config['client']['subscriptions'].append(name)
changed = True
reasons.append('channel subscription was absent and state is `present\'')
else:
if state == 'absent':
config['client']['subscriptions'].remove(name)
changed = True
reasons.append('channel subscription was present and state is `absent\'')
if changed and not module.check_mode:
if backup:
module.backup_local(path)
try:
open(path, 'w').write(json.dumps(config, indent=2) + '\n')
except IOError as e:
module.fail_json(msg='Failed to write to file %s: %s' % (path, to_native(e)),
exception=traceback.format_exc())
return changed, reasons
def main():
arg_spec = {'name': {'type': 'str', 'required': True},
'path': {'type': 'str', 'default': '/etc/sensu/conf.d/subscriptions.json'},
'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
'backup': {'type': 'bool', 'default': 'no'},
}
module = AnsibleModule(argument_spec=arg_spec,
supports_check_mode=True)
path = module.params['path']
name = module.params['name']
state = module.params['state']
backup = module.params['backup']
changed, reasons = sensu_subscription(module, path, name, state, backup)
module.exit_json(path=path, name=name, changed=changed, msg='OK', reasons=reasons)
if __name__ == '__main__':
main()
| 31.581699 | 97 | 0.632243 |
de3670b9425028ef4b1824ceda3be8e3e0f1eec0 | 21 | py | Python | seglearn/_version.py | tylerwmarrs/seglearn | 01fa65b3e326091671043f30c0310eead0e78bc2 | [
"BSD-3-Clause"
] | 9 | 2019-02-28T04:38:30.000Z | 2021-07-31T03:04:56.000Z | seglearn/_version.py | tylerwmarrs/seglearn | 01fa65b3e326091671043f30c0310eead0e78bc2 | [
"BSD-3-Clause"
] | 1 | 2019-08-01T09:14:41.000Z | 2019-08-01T09:14:41.000Z | seglearn/_version.py | tylerwmarrs/seglearn | 01fa65b3e326091671043f30c0310eead0e78bc2 | [
"BSD-3-Clause"
] | 3 | 2020-08-02T04:24:34.000Z | 2022-03-07T13:41:36.000Z | __version__ = "1.0.9" | 21 | 21 | 0.666667 |
826fce7312568cd240100637d3e5a86a1eeb25ac | 6,789 | py | Python | extract_heartrate_data.py | ddboline/garmin_scripts | 08545d3a5de22014f4d57d8ae596af9d57ae9f0f | [
"MIT"
] | null | null | null | extract_heartrate_data.py | ddboline/garmin_scripts | 08545d3a5de22014f4d57d8ae596af9d57ae9f0f | [
"MIT"
] | null | null | null | extract_heartrate_data.py | ddboline/garmin_scripts | 08545d3a5de22014f4d57d8ae596af9d57ae9f0f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import sys
import socket
import traceback
import webbrowser
import datetime
import requests
import pandas as pd
from dateutil.parser import parse
from urllib.parse import urlencode
from pytz import timezone
from time import strftime, sleep
from base64 import b64encode
import fitbit
from fitbit.api import Fitbit
import matplotlib
matplotlib.use('Agg')
import pylab as pl
#from garmin_app import garmin_utils, garmin_parse, garmin_report
#from garmin_app.util import utc, est
os.set_blocking(0, True)
utc = timezone('UTC')
est = timezone(
strftime("%Z").replace('CST', 'CST6CDT').replace('EDT', 'EST5EDT'))
hostname = socket.gethostname()
HOME = os.environ['HOME']
DEFAULT_HOST = 'www.ddboline.net' if hostname == 'dilepton-tower' else 'cloud.ddboline.net'
def read_config_env():
with open('config.env', 'r') as f:
for l in f:
(key, val) = l.strip().split('=')[:2]
os.environ[key] = val
read_config_env()
client_id = os.environ['FITBIT_CLIENTID']
client_secret = os.environ['FITBIT_CLIENTSECRET']
garmin_username = os.environ['GARMIN_USERNAME']
garmin_password = os.environ['GARMIN_PASSWORD']
def get_client(session, refresh=False, tokens_last_mod=None,
host=DEFAULT_HOST):
if refresh:
url = f'https://{host}/garmin/fitbit/auth'
webbrowser.open(session.get(url).text)
sleep(5)
token_fname = f'{HOME}/.fitbit_tokens'
current_last_mod = None
if os.path.exists(token_fname):
current_last_mod = os.stat(token_fname).st_mtime
else:
return get_client(session, refresh=True)
if tokens_last_mod is not None and current_last_mod <= tokens_last_mod:
sleep(5)
return get_client(session,
refresh=False,
tokens_last_mod=tokens_last_mod)
access_token, refresh_token = '', ''
with open(f'{HOME}/.fitbit_tokens', 'r') as fd:
for line in fd:
tmp = line.strip().split('=')
if len(tmp) < 2:
continue
key, val = tmp[:2]
if key == 'access_token':
access_token = val
elif key == 'refresh_token':
refresh_token = val
client = fitbit.Fitbit(client_id,
client_secret,
access_token=access_token,
refresh_token=refresh_token)
try:
client.user_profile_get()
return client
except fitbit.exceptions.HTTPUnauthorized:
if refresh is True:
return get_client(session,
refresh=False,
tokens_last_mod=current_last_mod)
else:
return get_client(session,
refresh=True,
tokens_last_mod=current_last_mod)
def get_session(host=DEFAULT_HOST):
session = requests.Session()
session.post(f'https://{host}/api/auth',
json={
'email': garmin_username,
'password': garmin_password
})
return session
def get_heartrate_data(begin_date='2017-03-10',
end_date=datetime.date.today().isoformat(),
host=DEFAULT_HOST):
begin_date = parse(begin_date).date()
end_date = parse(end_date).date()
assert end_date >= begin_date
days = (end_date - begin_date).days
dates = [begin_date + datetime.timedelta(days=x) for x in range(days + 1)]
dates = list(map(lambda x: x.isoformat(), dates))
heart_rate_pace_data = []
files = []
session = get_session()
last_date = dates[0]
zero_dates = []
entries = []
for date in dates:
url = f'https://{host}/garmin/fitbit/heartrate_db?date={date}'
tmp = session.get(url).json()
if len(tmp) > 0:
last_date = date
entries.append(tmp)
print(date, len(tmp))
else:
zero_dates.append(date)
if entries:
entries.pop()
for date in [last_date] + zero_dates:
url = f'https://{host}/garmin/fitbit/sync?date={date}'
session.get(url).raise_for_status()
print(f'sync {date}')
url = f'https://{host}/garmin/fitbit/heartrate_db?date={date}'
tmp = session.get(url).json()
print(date, len(tmp))
entries.append(tmp)
data = []
for tmp in entries:
tmp = [{
'time': parse(x['datetime']).astimezone(est).isoformat()[:19],
'value': x['value']
} for x in tmp]
data.extend(tmp)
for date in dates:
js = session.get(
f'https://{host}/garmin/list_gps_tracks?filter={date}').json()
files.extend(js['gps_list'])
for fname in files:
print(fname)
js = session.get(
f'https://{host}/garmin/get_hr_data?filter={fname}').json()
tmp = [{
'time': parse(x['time']).astimezone(est).isoformat()[:19],
'value': x['value']
} for x in js['hr_data']]
data.extend(tmp)
js = session.get(
f'https://{host}/garmin/get_hr_pace?filter={fname}').json()
tmp = [{'hrt': int(x['hr']), 'pace': x['pace']} for x in js['hr_pace']]
heart_rate_pace_data.extend(tmp)
df = pd.DataFrame(data)
if df.shape[0] > 0:
df.index = pd.to_datetime(df.time)
ts = df.sort_index().value
pl.clf()
ts.resample('5Min').mean().dropna().plot()
pl.savefig('heartrate_data.png')
os.system(f'mv heartrate_data.png {HOME}/public_html/')
df = pd.DataFrame(heart_rate_pace_data)
if df.shape[0] > 0:
pl.clf()
#df.plot.scatter('hrt', 'pace')
df.plot.hexbin('hrt', 'pace', gridsize=30)
pl.savefig('hrt_vs_pace.png')
os.system(f'mv hrt_vs_pace.png {HOME}/public_html/')
if DEFAULT_HOST != 'www.ddboline.net':
return df
for f in ('hrt_vs_pace.png', 'heartrate_data.png'):
cmd = f'scp {HOME}/public_html/{f} ubuntu@cloud.ddboline.net:~/public_html/'
os.system(cmd)
return df
if __name__ == '__main__':
begin_date, end_date = None, None
for arg in os.sys.argv:
if begin_date is None:
try:
begin_date = parse(arg).date()
except:
pass
elif end_date is None:
try:
end_date = parse(arg).date()
except:
pass
if begin_date is None:
begin_date = datetime.date.today() - datetime.timedelta(days=3)
if end_date is None:
end_date = datetime.date.today()
x = get_heartrate_data(begin_date.isoformat(), end_date.isoformat())
| 29.517391 | 91 | 0.580498 |
844c5afd226e1626cee6807a1eb512b4711e3282 | 1,906 | py | Python | tests/unit/utils/test_get_synapse_team_ids.py | zaro0508/cfn-cr-synapse-tagger | c9f6f71aa2e9111f6e184c046b7851e7883feba5 | [
"Apache-2.0"
] | null | null | null | tests/unit/utils/test_get_synapse_team_ids.py | zaro0508/cfn-cr-synapse-tagger | c9f6f71aa2e9111f6e184c046b7851e7883feba5 | [
"Apache-2.0"
] | 10 | 2020-09-03T16:45:31.000Z | 2021-01-29T18:10:44.000Z | tests/unit/utils/test_get_synapse_team_ids.py | zaro0508/cfn-cr-synapse-tagger | c9f6f71aa2e9111f6e184c046b7851e7883feba5 | [
"Apache-2.0"
] | 3 | 2020-09-02T16:21:05.000Z | 2021-06-07T16:53:00.000Z | import unittest
import boto3
from unittest.mock import patch
from set_tags import utils
from botocore.stub import Stubber
MOCK_GET_PARAMETER_RESPONSE = {
"Parameter": {
"Name": "/service-catalog/TeamToRoleArnMap",
"Type": "String",
"Value": "[ {\"teamId\":\"1111111\",\"roleArn\":\"arn:aws:iam::999999999999:role/ServiceCatalogEndusers\"},"
"{\"teamId\":\"2222222\",\"roleArn\":\"arn:aws:iam::999999999999:role/ServiceCatalogExternalEndusers\"} ]",
"Version": 1,
"LastModifiedDate": 1600127530.776,
"ARN": "arn:aws:ssm:us-east-1:999999999999:parameter/service-catalog/TeamToRoleArnMap",
"DataType": "text"
}
}
class TestGetSynapseTeamIds(unittest.TestCase):
def test_happy_path(self):
ssm = boto3.client('ssm')
with Stubber(ssm) as stubber, \
patch('set_tags.utils.get_env_var_value') as env_var_mock, \
patch('set_tags.utils.get_ssm_parameter') as param_mock:
env_var_mock.return_value = "some-value"
param_mock.return_value = MOCK_GET_PARAMETER_RESPONSE
result = utils.get_synapse_team_ids()
expected = ["1111111","2222222"]
self.assertListEqual(result, expected)
def test_no_env_var_team_to_role_arn_map_param_name(self):
ssm = boto3.client('ssm')
with Stubber(ssm) as stubber, \
patch('set_tags.utils.get_env_var_value') as env_var_mock:
env_var_mock.return_value = None
result = utils.get_synapse_team_ids()
expected = []
self.assertListEqual(result, expected)
def test_no_ssm_param_role_arn_map(self):
ssm = boto3.client('ssm')
with Stubber(ssm) as stubber, \
patch('set_tags.utils.get_ssm_parameter') as get_ssm_param_mock:
get_ssm_param_mock.return_value = None
result = utils.get_synapse_team_ids()
expected = []
self.assertListEqual(result, expected)
| 37.372549 | 124 | 0.681532 |
66589f6de5572dabf5fb21bc82269f2568ebd283 | 2,247 | py | Python | tests/parsers/winreg_plugins/usb.py | nflexfo/plaso | 5da7aa51c39b593773687fdf20a93ba35fc492b4 | [
"Apache-2.0"
] | 1 | 2020-12-04T10:26:34.000Z | 2020-12-04T10:26:34.000Z | tests/parsers/winreg_plugins/usb.py | nflexfo/plaso | 5da7aa51c39b593773687fdf20a93ba35fc492b4 | [
"Apache-2.0"
] | null | null | null | tests/parsers/winreg_plugins/usb.py | nflexfo/plaso | 5da7aa51c39b593773687fdf20a93ba35fc492b4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the USB Windows Registry plugin."""
from __future__ import unicode_literals
import unittest
from plaso.parsers.winreg_plugins import usb
from tests.parsers.winreg_plugins import test_lib
class USBPluginTest(test_lib.RegistryPluginTestCase):
"""Tests for the USB Windows Registry plugin."""
def testFilters(self):
"""Tests the FILTERS class attribute."""
plugin = usb.USBPlugin()
key_path = 'HKEY_LOCAL_MACHINE\\System\\ControlSet001\\Enum\\USB'
self._AssertFiltersOnKeyPath(plugin, key_path)
self._AssertNotFiltersOnKeyPath(plugin, 'HKEY_LOCAL_MACHINE\\Bogus')
def testProcess(self):
"""Tests the Process function."""
test_file_entry = self._GetTestFileEntry(['SYSTEM'])
key_path = 'HKEY_LOCAL_MACHINE\\System\\ControlSet001\\Enum\\USB'
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin = usb.USBPlugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin, file_entry=test_file_entry)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 7)
events = list(storage_writer.GetEvents())
event = events[3]
self.CheckTimestamp(event.timestamp, '2012-04-07 10:31:37.625247')
event_data = self._GetEventDataOfEvent(storage_writer, event)
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event_data.parser, plugin.plugin_name)
self.assertEqual(event_data.data_type, 'windows:registry:usb')
self.assertEqual(event_data.subkey_name, 'VID_0E0F&PID_0002')
self.assertEqual(event_data.vendor, 'VID_0E0F')
self.assertEqual(event_data.product, 'PID_0002')
expected_message = (
'[{0:s}] '
'Product: PID_0002 '
'Serial: 6&2ab01149&0&2 '
'Subkey name: VID_0E0F&PID_0002 '
'Vendor: VID_0E0F').format(key_path)
expected_short_message = '{0:s}...'.format(expected_message[:77])
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main()
| 31.647887 | 73 | 0.728082 |
5a3ec07980e93e0bf85600800d15dbbad4ed9477 | 355 | py | Python | Day 14/Aayushi-Mittal.py | ChetasShree/MarchCode | 80ee6206c0e4481b4421a83c7b7b7fc977450009 | [
"MIT"
] | 9 | 2021-03-02T12:16:24.000Z | 2021-03-26T11:06:08.000Z | Day 14/Aayushi-Mittal.py | ChetasShree/MarchCode | 80ee6206c0e4481b4421a83c7b7b7fc977450009 | [
"MIT"
] | 65 | 2021-03-02T04:57:47.000Z | 2021-04-02T19:31:30.000Z | Day 14/Aayushi-Mittal.py | ChetasShree/MarchCode | 80ee6206c0e4481b4421a83c7b7b7fc977450009 | [
"MIT"
] | 94 | 2021-03-02T04:42:28.000Z | 2021-06-28T10:38:20.000Z | # To find whether a number is Perfect or not.
# Perfect number is a positive integer which is equal to the sum of its proper positive divisors.
num=0
sum=0;
num = int(input("enter a number : "))
for i in range(0, num):
if num%i==0:
sum+=i
if sum==num:
print(sum," is a perfect number.")
else:
print(num, " is not a perfect number.")
| 25.357143 | 97 | 0.653521 |
89b979e1de000a5acf3e5900ffc0ad635ab8a376 | 62,228 | py | Python | conf/city_code.py | zhoumh1988/12306 | 8ce07ea28d2eda88ca8af7bc2ccc5a2cadabe4c0 | [
"MIT"
] | 2 | 2021-03-18T11:19:49.000Z | 2022-02-07T01:36:26.000Z | conf/city_code.py | zhoumh1988/12306 | 8ce07ea28d2eda88ca8af7bc2ccc5a2cadabe4c0 | [
"MIT"
] | null | null | null | conf/city_code.py | zhoumh1988/12306 | 8ce07ea28d2eda88ca8af7bc2ccc5a2cadabe4c0 | [
"MIT"
] | 1 | 2022-02-07T01:35:59.000Z | 2022-02-07T01:35:59.000Z | CITY_NAME = [
"北京北",
"北京东",
"北京",
"北京南",
"北京西",
"广州南",
"重庆北",
"重庆",
"重庆南",
"重庆西",
"广州东",
"上海",
"上海南",
"上海虹桥",
"上海西",
"天津北",
"天津",
"天津南",
"天津西",
"香港西九龙",
"长春",
"长春南",
"长春西",
"成都东",
"成都南",
"成都",
"长沙",
"长沙南",
"大明湖",
"福州",
"福州南",
"贵阳",
"广州",
"广州西",
"哈尔滨",
"哈尔滨东",
"哈尔滨西",
"合肥",
"合肥西",
"呼和浩特东",
"呼和浩特",
"口东",
"海口东",
"海口",
"杭州东",
"杭州",
"杭州南",
"济南",
"济南西",
"昆明",
"昆明西",
"拉萨",
"兰州东",
"兰州",
"兰州西",
"南昌",
"南京",
"南京南",
"南宁",
"石家庄北",
"石家庄",
"沈阳",
"沈阳北",
"沈阳东",
"沈阳南",
"太原北",
"太原东",
"太原",
"武汉",
"王家营西",
"乌鲁木齐",
"西安北",
"西安",
"西安南",
"西宁",
"银川",
"郑州",
"阿尔山",
"安康",
"阿克苏",
"阿里河",
"阿拉山口",
"安平",
"安庆",
"安顺",
"鞍山",
"安阳",
"北安",
"蚌埠",
"白城",
"北海",
"白河",
"白涧",
"宝鸡",
"滨江",
"博克图",
"百色",
"白山市",
"北台",
"包头东",
"包头",
"北屯市",
"本溪",
"白云鄂博",
"白银西",
"亳州",
"赤壁",
"常德",
"承德",
"长甸",
"赤峰",
"茶陵",
"苍南",
"昌平",
"崇仁",
"昌图",
"长汀镇",
"曹县",
"楚雄南",
"陈相屯",
"长治北",
"池州",
"长征",
"常州",
"郴州",
"长治",
"沧州",
"崇左",
"大安北",
"大成",
"丹东",
"东方红",
"东莞东",
"大虎山",
"敦化",
"敦煌",
"德惠",
"东京城",
"大涧",
"都江堰",
"大连北",
"大理",
"大连",
"定南",
"大庆",
"东胜",
"大石桥",
"大同",
"东营",
"大杨树",
"都匀",
"邓州",
"达州",
"德州",
"额济纳",
"二连",
"恩施",
"福鼎",
"凤凰机场",
"风陵渡",
"涪陵",
"富拉尔基",
"抚顺北",
"佛山",
"阜新南",
"阜阳",
"格尔木",
"广汉",
"古交",
"桂林北",
"古莲",
"桂林",
"固始",
"广水",
"干塘",
"广元",
"广州北",
"赣州",
"公主岭",
"公主岭南",
"淮安",
"淮北",
"鹤北",
"淮滨",
"河边",
"潢川",
"韩城",
"邯郸",
"横道河子",
"鹤岗",
"皇姑屯",
"红果",
"黑河",
"怀化",
"汉口",
"葫芦岛",
"海拉尔",
"霍林郭勒",
"海伦",
"侯马",
"哈密",
"淮南",
"桦南",
"海宁西",
"鹤庆",
"怀柔北",
"怀柔",
"黄石东",
"华山",
"黄山",
"黄石",
"衡水",
"衡阳",
"菏泽",
"贺州",
"汉中",
"惠州",
"吉安",
"集安",
"江边村",
"晋城",
"金城江",
"景德镇",
"嘉峰",
"加格达奇",
"井冈山",
"蛟河",
"金华南",
"金华",
"九江",
"吉林",
"荆门",
"佳木斯",
"济宁",
"集宁南",
"酒泉",
"江山",
"吉首",
"九台",
"镜铁山",
"鸡西",
"绩溪县",
"嘉峪关",
"江油",
"蓟州北",
"金州",
"锦州",
"库尔勒",
"开封",
"岢岚",
"凯里",
"喀什",
"昆山南",
"奎屯",
"开原",
"六安",
"灵宝",
"芦潮港",
"隆昌",
"陆川",
"利川",
"临川",
"潞城",
"鹿道",
"娄底",
"临汾",
"良各庄",
"临河",
"漯河",
"绿化",
"隆化",
"丽江",
"临江",
"龙井",
"吕梁",
"醴陵",
"柳林南",
"滦平",
"六盘水",
"灵丘",
"旅顺",
"兰溪",
"陇西",
"澧县",
"临西",
"龙岩",
"耒阳",
"洛阳",
"连云港东",
"洛阳东",
"临沂",
"洛阳龙门",
"柳园",
"凌源",
"辽源",
"立志",
"柳州",
"辽中",
"麻城",
"免渡河",
"牡丹江",
"莫尔道嘎",
"明光",
"满归",
"漠河",
"茂名",
"茂名西",
"密山",
"马三家",
"麻尾",
"绵阳",
"梅州",
"满洲里",
"宁波东",
"宁波",
"南岔",
"南充",
"南丹",
"南大庙",
"南芬",
"讷河",
"嫩江",
"内江",
"南平",
"南通",
"南阳",
"碾子山",
"平顶山",
"盘锦",
"平凉",
"平凉南",
"平泉",
"坪石",
"萍乡",
"凭祥",
"郫县西",
"攀枝花",
"蕲春",
"青城山",
"青岛",
"清河城",
"曲靖",
"黔江",
"前进镇",
"齐齐哈尔",
"七台河",
"沁县",
"泉州东",
"泉州",
"衢州",
"融安",
"汝箕沟",
"瑞金",
"日照",
"双城堡",
"绥芬河",
"韶关东",
"山海关",
"绥化",
"三间房",
"苏家屯",
"舒兰",
"神木南",
"三门峡",
"商南",
"遂宁",
"四平",
"商丘",
"上饶",
"韶山",
"宿松",
"汕头",
"邵武",
"涉县",
"三亚",
"亚",
"邵阳",
"十堰",
"三元区",
"双鸭山",
"松原",
"苏州",
"深圳",
"宿州",
"随州",
"朔州",
"深圳西",
"塘豹",
"塔尔气",
"潼关",
"塘沽",
"塔河",
"通化",
"泰来",
"吐鲁番",
"通辽",
"铁岭",
"陶赖昭",
"图们",
"铜仁",
"唐山北",
"田师府",
"泰山",
"唐山",
"天水",
"通远堡",
"太阳升",
"泰州",
"桐梓",
"通州西",
"五常",
"武昌",
"瓦房店",
"威海",
"芜湖",
"乌海西",
"吴家屯",
"乌鲁木齐南",
"武隆",
"乌兰浩特",
"渭南",
"威舍",
"歪头山",
"武威",
"武威南",
"无锡",
"乌西",
"乌伊岭",
"武夷山",
"万源",
"万州",
"梧州",
"温州",
"温州南",
"西昌",
"许昌",
"西昌南",
"锡林浩特",
"厦门北",
"厦门",
"厦门高崎",
"宣威",
"新乡",
"信阳",
"咸阳",
"襄阳",
"熊岳城",
"新余",
"徐州",
"延安",
"宜宾",
"亚布力南",
"叶柏寿",
"宜昌东",
"永川",
"盐城",
"宜昌",
"运城",
"伊春",
"榆次",
"杨村",
"宜春西",
"伊尔施",
"燕岗",
"永济",
"延吉",
"营口",
"牙克石",
"玉林",
"阎良",
"榆林",
"亚龙湾",
"一面坡",
"伊宁",
"阳平关",
"玉屏",
"原平",
"延庆",
"阳泉曲",
"玉泉",
"阳泉",
"营山",
"玉山",
"燕山",
"榆树",
"鹰潭",
"烟台",
"伊图里河",
"玉田县",
"义乌",
"阳新",
"义县",
"益阳",
"岳阳",
"崖州",
"永州",
"扬州",
"淄博",
"镇城底",
"自贡",
"珠海",
"珠海北",
"湛江",
"镇江",
"张家界",
"张家口",
"张家口南",
"周口",
"哲里木",
"扎兰屯",
"驻马店",
"肇庆",
"周水子",
"昭通",
"中卫",
"资阳",
"遵义西",
"枣庄",
"资中",
"株洲",
"枣庄西",
"昂昂溪",
"阿城",
"安达",
"安德",
"安定",
"安多",
"安广",
"敖汉",
"艾河",
"安化",
"艾家村",
"鳌江",
"安家",
"阿金",
"安靖",
"阿克陶",
"安口窑",
"敖力布告",
"安龙",
"阿龙山",
"安陆",
"阿木尔",
"阿南庄",
"安庆西",
"鞍山西",
"安塘",
"安亭北",
"阿图什",
"安图",
"安溪",
"博鳌",
"北碚",
"白壁关",
"蚌埠南",
"巴楚",
"板城",
"北戴河",
"保定",
"宝坻",
"八达岭",
"巴东",
"柏果",
"布海",
"白河东",
"贲红",
"宝华山",
"白河县",
"白芨沟",
"碧鸡关",
"北滘",
"碧江",
"白鸡坡",
"笔架山",
"八角台",
"保康",
"白奎堡",
"白狼",
"百浪",
"博乐",
"宝拉格",
"巴林",
"宝林",
"北流",
"勃利",
"布列开",
"宝龙山",
"百里峡",
"八面城",
"班猫箐",
"八面通",
"北马圈子",
"北票南",
"白旗",
"宝泉岭",
"白泉",
"巴山",
"白水江",
"白沙坡",
"白石山",
"白水镇",
"东",
"坂田",
"泊头",
"北屯",
"本溪湖",
"博兴",
"八仙筒",
"白音察干",
"背荫河",
"北营",
"巴彦高勒",
"白音他拉",
"鲅鱼圈",
"白银市",
"白音胡硕",
"巴中",
"霸州",
"北宅",
"赤壁北",
"查布嘎",
"长城",
"长冲",
"承德东",
"赤峰西",
"嵯岗",
"柴岗",
"长葛",
"柴沟堡",
"城固",
"陈官营",
"成高子",
"草海",
"柴河",
"册亨",
"草河口",
"崔黄口",
"巢湖",
"蔡家沟",
"成吉思汗",
"岔江",
"蔡家坡",
"昌乐",
"超梁沟",
"慈利",
"昌黎",
"长岭子",
"晨明",
"长农",
"昌平北",
"常平",
"长坡岭",
"辰清",
"蔡山",
"楚山",
"长寿",
"磁山",
"苍石",
"草市",
"察素齐",
"长山屯",
"长汀",
"朝天南",
"昌图西",
"春湾",
"磁县",
"岑溪",
"辰溪",
"磁西",
"长兴南",
"磁窑",
"春阳",
"城阳",
"创业村",
"朝阳川",
"朝阳地",
"朝阳南",
"长垣",
"朝阳镇",
"滁州北",
"常州北",
"滁州",
"潮州",
"常庄",
"曹子里",
"车转湾",
"郴州西",
"沧州西",
"德安",
"大安",
"大坝",
"大板",
"大巴",
"电白",
"到保",
"达坂城",
"定边",
"东边井",
"德伯斯",
"打柴沟",
"德昌",
"滴道",
"大磴沟",
"刀尔登",
"得耳布尔",
"杜尔伯特",
"东方",
"丹凤",
"东丰",
"都格",
"大官屯",
"大关",
"东光",
"东海",
"大灰厂",
"大红旗",
"大禾塘",
"德惠西",
"东海县",
"达家沟",
"东津",
"杜家",
"大口屯",
"东来",
"德令哈",
"大陆号",
"带岭",
"大林",
"达拉特旗",
"独立屯",
"豆罗",
"达拉特西",
"大连西",
"东明村",
"洞庙河",
"东明县",
"大拟",
"大平房",
"大盘石",
"大埔",
"大堡",
"大庆东",
"大其拉哈",
"道清",
"对青山",
"德清西",
"大庆西",
"东升",
"砀山",
"独山",
"登沙河",
"读书铺",
"大石头",
"东胜西",
"大石寨",
"东台",
"定陶",
"灯塔",
"大田边",
"东通化",
"丹徒",
"大屯",
"东湾",
"大武口",
"低窝铺",
"大王滩",
"大湾子",
"大兴沟",
"大兴",
"定西",
"甸心",
"东乡",
"代县",
"定襄",
"东戌",
"东辛庄",
"丹阳",
"德阳",
"大雁",
"当阳",
"丹阳北",
"大英东",
"东淤地",
"大营",
"定远",
"岱岳",
"大元",
"大营镇",
"大营子",
"大战场",
"德州东",
"东至",
"低庄",
"东镇",
"道州",
"东庄",
"兑镇",
"豆庄",
"定州",
"大竹园",
"大杖子",
"豆张庄",
"峨边",
"二道沟门",
"二道湾",
"鄂尔多斯",
"二龙",
"二龙山屯",
"峨眉",
"二密河",
"恩平",
"二营",
"鄂州",
"福安",
"丰城",
"丰城南",
"肥东",
"发耳",
"富海",
"福海",
"凤凰城",
"汾河",
"奉化",
"富锦",
"范家屯",
"福利区",
"福利屯",
"丰乐镇",
"阜南",
"阜宁",
"抚宁",
"福清",
"福泉",
"丰水村",
"丰顺",
"繁峙",
"抚顺",
"福山口",
"扶绥",
"冯屯",
"浮图峪",
"富县东",
"凤县",
"富县",
"费县",
"凤阳",
"汾阳",
"扶余北",
"分宜",
"富源",
"扶余",
"富裕",
"抚州北",
"凤州",
"丰镇",
"范镇",
"固安",
"广安",
"高碑店",
"沟帮子",
"甘草店",
"谷城",
"藁城",
"高村",
"古城镇",
"广德",
"贵定",
"贵定南",
"古东",
"贵港",
"官高",
"葛根庙",
"干沟",
"甘谷",
"高各庄",
"甘河",
"根河",
"郭家店",
"孤家子",
"古浪",
"皋兰",
"高楼房",
"归流河",
"关林",
"甘洛",
"郭磊庄",
"高密",
"公庙子",
"工农湖",
"广宁寺南",
"广南卫",
"高平",
"甘泉北",
"共青城",
"甘旗卡",
"甘泉",
"高桥镇",
"灌水",
"赶水",
"孤山口",
"果松",
"高山子",
"嘎什甸子",
"高台",
"高滩",
"古田",
"官厅",
"官厅西",
"贵溪",
"涡阳",
"巩义",
"高邑",
"巩义南",
"广元南",
"固原",
"菇园",
"公营子",
"光泽",
"古镇",
"固镇",
"虢镇",
"瓜州",
"高州",
"盖州",
"官字井",
"冠豸山",
"盖州西",
"海安",
"淮安南",
"红安",
"红安西",
"黄柏",
"海北",
"鹤壁",
"会昌北",
"华城",
"河唇",
"汉川",
"海城",
"合川",
"黑冲滩",
"黄村",
"海城西",
"化德",
"洪洞",
"霍尔果斯",
"横峰",
"韩府湾",
"汉沽",
"黄瓜园",
"红光镇",
"浑河",
"红花沟",
"黄花筒",
"贺家店",
"和静",
"红江",
"黑井",
"获嘉",
"河津",
"涵江",
"华家",
"杭锦后旗",
"河间西",
"花家庄",
"河口南",
"湖口",
"黄口",
"呼兰",
"葫芦岛北",
"浩良河",
"哈拉海",
"鹤立",
"桦林",
"黄陵",
"海林",
"虎林",
"寒岭",
"和龙",
"海龙",
"哈拉苏",
"呼鲁斯太",
"火连寨",
"黄梅",
"韩麻营",
"黄泥河",
"海宁",
"惠农",
"和平",
"花棚子",
"花桥",
"宏庆",
"怀仁",
"华容",
"华山北",
"黄松甸",
"和什托洛盖",
"红山",
"汉寿",
"衡山",
"黑水",
"惠山",
"虎什哈",
"红寺堡",
"虎石台",
"海石湾",
"衡山西",
"红砂岘",
"黑台",
"桓台",
"和田",
"会同",
"海坨子",
"黑旺",
"海湾",
"红星",
"徽县",
"红兴隆",
"换新天",
"红岘台",
"红彦",
"海晏",
"合阳",
"衡阳东",
"华蓥",
"汉阴",
"黄羊滩",
"汉源",
"河源",
"花园",
"湟源",
"黄羊镇",
"湖州",
"化州",
"黄州",
"霍州",
"惠州西",
"巨宝",
"靖边",
"金宝屯",
"晋城北",
"金昌",
"鄄城",
"交城",
"建昌",
"峻德",
"井店",
"鸡东",
"江都",
"鸡冠山",
"金沟屯",
"静海",
"金河",
"锦河",
"精河",
"精河南",
"江华",
"建湖",
"纪家沟",
"晋江",
"锦界",
"姜家",
"江津",
"金坑",
"芨岭",
"金马村",
"江门东",
"角美",
"莒南",
"井南",
"建瓯",
"经棚",
"江桥",
"九三",
"金山北",
"嘉善",
"京山",
"建始",
"稷山",
"吉舒",
"建设",
"甲山",
"建三江",
"嘉善南",
"金山屯",
"江所田",
"景泰",
"九台南",
"吉文",
"进贤",
"莒县",
"嘉祥",
"介休",
"嘉兴",
"井陉",
"嘉兴南",
"夹心子",
"姜堰",
"简阳",
"揭阳",
"建阳",
"巨野",
"江永",
"缙云",
"靖远",
"江源",
"济源",
"靖远西",
"胶州北",
"焦作东",
"金寨",
"靖州",
"荆州",
"胶州",
"晋州",
"蓟州",
"锦州南",
"焦作",
"旧庄窝",
"金杖子",
"开安",
"库车",
"康城",
"库都尔",
"宽甸",
"克东",
"昆都仑召",
"开江",
"康金井",
"喀喇其",
"开鲁",
"克拉玛依",
"开平南",
"口前",
"昆山",
"奎山",
"克山",
"康熙岭",
"昆阳",
"克一河",
"开原西",
"康庄",
"来宾",
"老边",
"灵宝西",
"龙川",
"乐昌",
"黎城",
"聊城",
"蓝村",
"两当",
"林东",
"乐都",
"梁底下",
"六道河子",
"鲁番",
"廊坊",
"落垡",
"廊坊北",
"老府",
"兰岗",
"龙骨甸",
"芦沟",
"龙沟",
"拉古",
"临海",
"林海",
"拉哈",
"凌海",
"柳河",
"六合",
"龙华",
"滦河沿",
"六合镇",
"亮甲店",
"刘家店",
"刘家河",
"连江",
"庐江",
"李家",
"罗江",
"廉江",
"两家",
"龙江",
"龙嘉",
"莲江口",
"蔺家楼",
"李家坪",
"兰考",
"林口",
"路口铺",
"老莱",
"拉林",
"陆良",
"龙里",
"临澧",
"兰棱",
"零陵",
"卢龙",
"喇嘛甸",
"里木店",
"洛门",
"龙南",
"梁平",
"罗平",
"落坡岭",
"六盘山",
"乐平市",
"临清",
"龙泉寺",
"乐山北",
"乐善村",
"冷水江东",
"连山关",
"流水沟",
"丽水",
"陵水",
"罗山",
"鲁山",
"梁山",
"灵石",
"露水河",
"庐山",
"林盛堡",
"柳树屯",
"龙山镇",
"梨树镇",
"李石寨",
"黎塘",
"轮台",
"芦台",
"龙塘坝",
"濑湍",
"骆驼巷",
"李旺",
"莱芜东",
"狼尾山",
"灵武",
"莱芜西",
"朗乡",
"陇县",
"临湘",
"芦溪",
"莱西",
"林西",
"滦县",
"莱阳",
"略阳",
"辽阳",
"凌源东",
"临沂东",
"连云港",
"临颍",
"老营",
"龙游",
"罗源",
"林源",
"涟源",
"涞源",
"耒阳西",
"临泽",
"龙爪沟",
"雷州",
"六枝",
"鹿寨",
"来舟",
"龙镇",
"拉鲊",
"兰州新区",
"马鞍山",
"毛坝",
"毛坝关",
"麻城北",
"渑池",
"明城",
"庙城",
"渑池南",
"茅草坪",
"猛洞河",
"磨刀石",
"弥渡",
"帽儿山",
"明港",
"梅河口",
"马皇",
"孟家岗",
"美兰",
"汨罗东",
"马莲河",
"茅岭",
"庙岭",
"茂林",
"穆棱",
"马林",
"马龙",
"木里图",
"汨罗",
"玛纳斯湖",
"冕宁",
"沐滂",
"马桥河",
"闽清",
"民权",
"明水河",
"麻山",
"眉山",
"漫水湾",
"茂舍祖",
"米沙子",
"马踏",
"美溪",
"勉县",
"麻阳",
"密云北",
"米易",
"麦园",
"墨玉",
"庙庄",
"米脂",
"明珠",
"宁安",
"农安",
"南博山",
"南仇",
"南城司",
"宁村",
"宁德",
"南观村",
"南宫东",
"南关岭",
"宁国",
"宁海",
"南华北",
"南河川",
"泥河子",
"宁家",
"南靖",
"牛家",
"能家",
"南口",
"南口前",
"南朗",
"乃林",
"尼勒克",
"那罗",
"宁陵县",
"奈曼",
"宁明",
"南木",
"南平南",
"那铺",
"南桥",
"那曲",
"暖泉",
"南台",
"南头",
"宁武",
"南湾子",
"南翔北",
"宁乡",
"内乡",
"牛心台",
"南峪",
"娘子关",
"南召",
"南杂木",
"蓬安",
"平安",
"平安驿",
"磐安镇",
"平安镇",
"蒲城东",
"蒲城",
"裴德",
"偏店",
"平顶山西",
"坡底下",
"瓢儿屯",
"平房",
"平岗",
"平果",
"平关",
"盘关",
"徘徊北",
"平河口",
"平湖",
"盘锦北",
"潘家店",
"皮口南",
"普兰店",
"偏岭",
"平山",
"彭山",
"皮山",
"磐石",
"平社",
"彭水",
"平台",
"平田",
"莆田",
"葡萄菁",
"普湾",
"平旺",
"平型关",
"普雄",
"蓬溪",
"郫县",
"平洋",
"彭阳",
"平遥",
"平邑",
"平原堡",
"平原",
"平峪",
"彭泽",
"邳州",
"平庄",
"泡子",
"平庄南",
"乾安",
"庆安",
"迁安",
"祁东北",
"七甸",
"曲阜东",
"庆丰",
"奇峰塔",
"曲阜",
"琼海",
"秦皇岛",
"千河",
"清河",
"清河门",
"清华园",
"全椒",
"渠旧",
"潜江",
"秦家",
"綦江",
"祁家堡",
"清涧县",
"秦家庄",
"七里河",
"秦岭",
"渠黎",
"青龙",
"青龙山",
"祁门",
"前磨头",
"青山",
"确山",
"前山",
"清水",
"戚墅堰",
"青田",
"桥头",
"青铜峡",
"前卫",
"前苇塘",
"渠县",
"祁县",
"青县",
"桥西",
"清徐",
"旗下营",
"千阳",
"沁阳",
"泉阳",
"祁阳北",
"七营",
"庆阳山",
"清远",
"清原",
"钦州东",
"钦州",
"青州市",
"瑞安",
"荣昌",
"瑞昌",
"如皋",
"容桂",
"任丘",
"乳山",
"融水",
"热水",
"容县",
"饶阳",
"汝阳",
"绕阳河",
"汝州",
"石坝",
"上板城",
"施秉",
"上板城南",
"世博园",
"双城北",
"舒城",
"商城",
"莎车",
"顺昌",
"神池",
"沙城",
"石城",
"山城镇",
"山丹",
"顺德",
"绥德",
"水洞",
"商都",
"十渡",
"四道湾",
"顺德学院",
"绅坊",
"双丰",
"四方台",
"水富",
"三关口",
"桑根达来",
"韶关",
"上高镇",
"上杭",
"沙海",
"蜀河",
"松河",
"沙河",
"沙河口",
"赛汗塔拉",
"沙河市",
"沙后所",
"山河屯",
"三河县",
"四合永",
"三汇镇",
"双河镇",
"石河子",
"三合庄",
"三家店",
"水家湖",
"沈家河",
"松江河",
"尚家",
"孙家",
"沈家",
"双吉",
"松江",
"三江口",
"司家岭",
"松江南",
"石景山南",
"邵家堂",
"三江县",
"三家寨",
"十家子",
"松江镇",
"施家嘴",
"深井子",
"什里店",
"疏勒",
"疏勒河",
"舍力虎",
"石磷",
"石林",
"双辽",
"绥棱",
"石岭",
"石林南",
"石龙",
"萨拉齐",
"索伦",
"商洛",
"沙岭子",
"石门县北",
"三门峡南",
"三门县",
"石门县",
"三门峡西",
"肃宁",
"宋",
"双牌",
"沙坪坝",
"四平东",
"遂平",
"沙坡头",
"沙桥",
"商丘南",
"水泉",
"石泉县",
"石桥子",
"石人城",
"石人",
"山市",
"神树",
"鄯善",
"三水",
"泗水",
"石山",
"松树",
"首山",
"三十家",
"三十里堡",
"双水镇",
"松树镇",
"松桃",
"索图罕",
"三堂集",
"石头",
"神头",
"沙沱",
"上万",
"孙吴",
"沙湾县",
"歙县",
"遂溪",
"沙县",
"绍兴",
"石岘",
"上西铺",
"石峡子",
"沭阳",
"绥阳",
"寿阳",
"水洋",
"三阳川",
"上腰墩",
"三营",
"顺义",
"三义井",
"三源浦",
"上虞",
"三原",
"上园",
"水源",
"桑园子",
"绥中北",
"苏州北",
"宿州东",
"深圳东",
"深州",
"孙镇",
"绥中",
"尚志",
"师庄",
"松滋",
"师宗",
"苏州园区",
"苏州新区",
"泰安",
"台安",
"通安驿",
"桐柏",
"通北",
"桐城",
"汤池",
"郯城",
"铁厂",
"桃村",
"通道",
"田东",
"天岗",
"土贵乌拉",
"通沟",
"太谷",
"塔哈",
"棠海",
"唐河",
"泰和",
"太湖",
"团结",
"谭家井",
"陶家屯",
"唐家湾",
"统军庄",
"吐列毛杜",
"图里河",
"铜陵",
"田林",
"亭亮",
"铁力",
"铁岭西",
"图们北",
"天门",
"天门南",
"太姥山",
"土牧尔台",
"土门子",
"洮南",
"潼南",
"太平川",
"太平镇",
"图强",
"台前",
"天桥岭",
"土桥子",
"汤山城",
"桃山",
"台山",
"塔石嘴",
"通途",
"汤旺河",
"同心",
"土溪",
"桐乡",
"田阳",
"天义",
"汤阴",
"驼腰岭",
"太阳山",
"通榆",
"汤原",
"塔崖驿",
"滕州东",
"台州",
"天祝",
"滕州",
"天镇",
"桐子林",
"天柱山",
"文安",
"武安",
"王安镇",
"吴堡",
"旺苍",
"五叉沟",
"文昌",
"温春",
"五大连池",
"文登",
"五道沟",
"五道河",
"文地",
"卫东",
"武当山",
"望都",
"乌尔旗汗",
"潍坊",
"万发屯",
"王府",
"瓦房店西",
"王岗",
"武功",
"湾沟",
"吴官田",
"乌海",
"苇河",
"卫辉",
"吴家川",
"五家",
"威箐",
"午汲",
"渭津",
"王家湾",
"倭肯",
"五棵树",
"五龙背",
"乌兰哈达",
"万乐",
"瓦拉干",
"温岭",
"五莲",
"乌拉特前旗",
"乌拉山",
"卧里屯",
"渭南北",
"乌奴耳",
"万宁",
"万年",
"渭南南",
"渭南镇",
"沃皮",
"吴桥",
"汪清",
"武清",
"武山",
"文水",
"魏善庄",
"王瞳",
"五台山",
"王团庄",
"五五",
"无锡东",
"卫星",
"闻喜",
"武乡",
"无锡新区",
"武穴",
"吴圩",
"王杨",
"武义",
"五营",
"瓦窑田",
"五原",
"苇子沟",
"韦庄",
"五寨",
"王兆屯",
"微子镇",
"魏杖子",
"新安",
"兴安",
"新安县",
"新保安",
"下板城",
"西八里",
"宣城",
"兴城",
"小村",
"新绰源",
"下城子",
"新城子",
"喜德",
"小得江",
"西大庙",
"小董",
"小东",
"香坊",
"信丰",
"襄汾",
"息烽",
"新干",
"轩岗",
"孝感",
"西固城",
"兴国",
"西固",
"夏官营",
"西岗子",
"宣汉",
"襄河",
"新和",
"宣和",
"斜河涧",
"新华屯",
"新会",
"新华",
"新晃",
"新化",
"宣化",
"兴和西",
"小河沿",
"下花园",
"小河镇",
"徐家店",
"徐家",
"峡江",
"新绛",
"辛集",
"新江",
"西街口",
"许家屯",
"许家台",
"谢家镇",
"兴凯",
"小榄",
"香兰",
"兴隆店",
"新乐",
"新林",
"小岭",
"新李",
"西林",
"西柳",
"仙林",
"新立屯",
"兴隆县",
"兴隆镇",
"新立镇",
"新民",
"西麻山",
"下马塘",
"孝南",
"咸宁北",
"兴宁",
"咸宁",
"犀浦东",
"西平",
"兴平",
"新坪田",
"霞浦",
"溆浦",
"犀浦",
"新青",
"新邱",
"兴泉堡",
"仙人桥",
"小寺沟",
"杏树",
"浠水",
"下社",
"小市",
"徐水",
"夏石",
"小哨",
"秀山",
"新松浦",
"杏树屯",
"许三湾",
"湘潭",
"邢台",
"向塘",
"仙桃西",
"下台子",
"徐闻",
"新窝铺",
"修武",
"新县",
"息县",
"西乡",
"湘乡",
"西峡",
"孝西",
"小新街",
"新兴县",
"西小召",
"小西庄",
"向阳",
"旬阳",
"旬阳北",
"襄阳东",
"兴业",
"小雨谷",
"新沂",
"兴义",
"信宜",
"小月旧",
"小扬气",
"襄垣",
"夏邑县",
"祥云西",
"新友谊",
"新阳镇",
"徐州东",
"新帐房",
"悬钟",
"新肇",
"忻州",
"汐子",
"西哲里木",
"新杖子",
"姚安",
"依安",
"永安",
"永安乡",
"亚布力",
"元宝山",
"羊草",
"秧草地",
"阳澄湖",
"迎春",
"叶城",
"盐池",
"砚川",
"阳春",
"宜城",
"应城",
"禹城",
"晏城",
"阳城",
"阳岔",
"郓城",
"雁翅",
"云彩岭",
"虞城县",
"营城子",
"英德",
"永登",
"尹地",
"永定",
"阳东",
"雁荡山",
"于都",
"园墩",
"英德西",
"永丰营",
"杨岗",
"阳高",
"阳谷",
"友好",
"余杭",
"沿河城",
"岩会",
"羊臼河",
"永嘉",
"营街",
"盐津",
"阳江",
"余江",
"燕郊",
"姚家",
"岳家井",
"一间堡",
"英吉沙",
"云居寺",
"燕家庄",
"永康",
"营口东",
"银浪",
"永郎",
"宜良北",
"永乐店",
"伊拉哈",
"伊林",
"杨陵",
"彝良",
"杨林",
"余粮堡",
"杨柳青",
"月亮田",
"义马",
"阳明堡",
"玉门",
"云梦",
"元谋",
"一面山",
"沂南",
"宜耐",
"伊宁东",
"营盘水",
"羊堡",
"阳泉北",
"乐清",
"焉耆",
"源迁",
"姚千户屯",
"阳曲",
"榆树沟",
"月山",
"玉石",
"玉舍",
"偃师",
"沂水",
"榆社",
"颍上",
"窑上",
"元氏",
"杨树岭",
"野三坡",
"榆树屯",
"榆树台",
"鹰手营子",
"源潭",
"牙屯堡",
"烟筒山",
"烟筒屯",
"羊尾哨",
"越西",
"攸县",
"阳西",
"永修",
"玉溪西",
"弋阳",
"余姚",
"酉阳",
"岳阳东",
"阳邑",
"鸭园",
"鸳鸯镇",
"燕子砭",
"仪征",
"宜州",
"兖州",
"迤资",
"羊者窝",
"杨杖子",
"镇安",
"治安",
"招柏",
"张百湾",
"中川机场",
"枝城",
"子长",
"诸城",
"邹城",
"赵城",
"章党",
"正定",
"肇东",
"照福铺",
"章古台",
"赵光",
"中和",
"中华门",
"枝江北",
"钟家村",
"朱家沟",
"紫荆关",
"周家",
"诸暨",
"镇江南",
"周家屯",
"褚家湾",
"湛江西",
"朱家窑",
"曾家坪子",
"张兰",
"镇赉",
"枣林",
"扎鲁特",
"扎赉诺尔西",
"樟木头",
"中牟",
"中宁东",
"中宁",
"中宁南",
"镇平",
"漳平",
"泽普",
"枣强",
"张桥",
"章丘",
"朱日和",
"泽润里",
"中山北",
"樟树东",
"珠斯花",
"中山",
"柞水",
"钟山",
"樟树",
"珠窝",
"张维屯",
"彰武",
"棕溪",
"钟祥",
"资溪",
"镇西",
"张辛",
"正镶白旗",
"紫阳",
"枣阳",
"竹园坝",
"张掖",
"镇远",
"漳州东",
"漳州",
"壮志",
"子洲",
"中寨",
"涿州",
"咋子",
"卓资山",
"株洲西",
"郑州西",
"阿巴嘎旗",
"阿城北",
"阿尔山北",
"安江东",
"阿勒泰",
"安仁",
"安顺西",
"安图西",
"安阳东",
"博白",
"八步",
"栟茶",
"保定东",
"八方山",
"白沟",
"滨海",
"滨海北",
"滨海港",
"滨海西",
"宝鸡南",
"北井子",
"白马井",
"北票",
"宝清",
"璧山",
"白沙铺",
"白水县",
"板塘",
"白文东",
"宾西北",
"本溪新城",
"宾阳",
"白洋淀",
"百宜",
"白音华南",
"巴中东",
"彬州",
"滨州",
"宾州",
"霸州西",
"澄城",
"承德县北",
"承德南",
"成都西",
"曹妃甸东",
"曹妃甸港",
"城固北",
"查干湖",
"巢湖东",
"从江",
"蔡家崖",
"茶卡",
"长临河",
"茶陵南",
"常平东",
"常平南",
"长庆桥",
"长寿北",
"长寿湖",
"常山",
"潮汕",
"长沙西",
"朝天",
"长汀南",
"长武",
"长兴",
"苍溪",
"楚雄",
"朝阳",
"长阳",
"潮阳",
"朝阳湖",
"崇州",
"城子坦",
"东安东",
"德保",
"都昌",
"东岔",
"东城南",
"东戴河",
"丹东西",
"东二道河",
"大丰",
"大方南",
"东港北",
"大孤山",
"东莞",
"鼎湖东",
"鼎湖山",
"道滘",
"垫江",
"洞井",
"董家口",
"大苴",
"洞口",
"达连河",
"大荔",
"大朗镇",
"得莫利",
"大青沟",
"德清",
"东胜东",
"砀山南",
"大石头南",
"当涂东",
"大通西",
"大旺",
"定西北",
"德兴东",
"德兴",
"丹霞山",
"大冶北",
"都匀东",
"大邑",
"东营南",
"大余",
"定州东",
"端州",
"大足南",
"峨眉山",
"阿房宫",
"鄂州东",
"防城港北",
"凤城东",
"富川",
"繁昌西",
"丰都",
"涪陵北",
"枫林",
"阜宁东",
"富宁",
"佛坪",
"法启",
"芙蓉南",
"复盛",
"抚松",
"佛山西",
"福山镇",
"福田",
"阜新",
"富阳",
"富源北",
"抚远",
"抚州东",
"抚州",
"方正",
"南",
"高安",
"广安南",
"贵安",
"高碑店东",
"恭城",
"藁城南",
"贵定北",
"葛店南",
"贵定县",
"广汉北",
"高花",
"革居",
"高楞",
"关岭",
"桂林西",
"高密北",
"光明城",
"广宁",
"广宁寺",
"广南县",
"桂平",
"弓棚子",
"赶水东",
"光山",
"谷山",
"观沙岭",
"古田北",
"广通北",
"高台南",
"古田会址",
"贵阳北",
"贵阳东",
"赣榆",
"高邑西",
"惠安",
"淮北北",
"鹤壁东",
"寒葱沟",
"霍城",
"珲春",
"横道河子东",
"邯郸东",
"惠东",
"哈达铺",
"洪洞西",
"海东西",
"哈尔滨北",
"合肥北城",
"合肥南",
"黄冈",
"黄冈东",
"横沟桥东",
"黄冈西",
"洪河",
"怀化南",
"黄河景区",
"惠环",
"花湖",
"后湖",
"怀集",
"河口北",
"宏克力",
"海林北",
"黄流",
"黄陵南",
"鲘门",
"海门",
"虎门",
"侯马西",
"衡南",
"淮南东",
"合浦",
"霍邱",
"怀仁东",
"华容东",
"华容南",
"黑山北",
"衡水北",
"黄石北",
"黄山北",
"贺胜桥东",
"和硕",
"花山南",
"荷塘",
"黄土店",
"海阳北",
"合阳北",
"槐荫",
"鄠邑",
"花园口",
"霍州东",
"惠州南",
"建安",
"泾川",
"景德镇北",
"旌德",
"建德",
"尖峰",
"近海",
"蛟河西",
"军粮城北",
"将乐",
"贾鲁河",
"九郎山",
"即墨北",
"剑门关",
"佳木斯西",
"建宁县北",
"济南东",
"江宁",
"江宁西",
"建瓯西",
"酒泉南",
"句容西",
"建水",
"尖山",
"界首市",
"绩溪北",
"介休东",
"泾县",
"靖西",
"进贤南",
"江油北",
"简阳南",
"嘉峪关南",
"金银潭",
"靖宇",
"金月湾",
"缙云西",
"景州",
"晋中",
"开封北",
"开福寺",
"开化",
"凯里南",
"库伦",
"昆明南",
"葵潭",
"开阳",
"喀左",
"隆安东",
"来宾北",
"灵璧",
"寮步",
"绿博园",
"隆昌北",
"乐昌东",
"临城",
"罗城",
"陵城",
"老城镇",
"龙洞堡",
"乐都南",
"娄底南",
"乐东",
"离堆公园",
"娄烦",
"陆丰",
"龙丰",
"禄丰南",
"临汾西",
"临高南",
"麓谷",
"滦河",
"珞璜南",
"隆回",
"漯河西",
"罗江东",
"柳江",
"利津南",
"兰考南",
"龙口市",
"龙里北",
"兰陵北",
"沥林北",
"醴陵东",
"陇南",
"梁平南",
"礼泉",
"灵石东",
"乐山",
"龙市",
"溧水",
"娄山关南",
"岚山西",
"洛湾三江",
"莱西北",
"岚县",
"溧阳",
"临邑",
"柳园南",
"鹿寨北",
"临淄北",
"阆中",
"临泽南",
"马鞍山东",
"毛陈",
"帽儿山西",
"明港东",
"民和南",
"闵集",
"马兰",
"民乐",
"弥勒",
"玛纳斯",
"牟平",
"闽清北",
"民权北",
"眉山东",
"名山",
"庙山",
"岷县",
"门源",
"暮云",
"蒙自北",
"孟庄",
"蒙自",
"南部",
"南曹",
"南充北",
"南城",
"昌",
"南昌西",
"宁东南",
"宁东",
"南芬北",
"南丰",
"南湖东",
"牛河梁",
"南华",
"内江北",
"南江",
"南江口",
"奈林皋",
"南陵",
"尼木",
"南宁东",
"南宁西",
"南平北",
"南堡北",
"宁强南",
"南雄",
"纳雍",
"南阳寨",
"普安",
"普安县",
"屏边",
"平坝南",
"平昌",
"普定",
"平度",
"蒲江",
"皮口",
"盘龙城",
"蓬莱市",
"普宁",
"平南南",
"平泉北",
"彭山北",
"盘山",
"坪上",
"萍乡北",
"鄱阳",
"濮阳",
"平遥古城",
"平原东",
"盘州",
"普者黑",
"彭州",
"秦安",
"青白江东",
"青川",
"青岛北",
"千岛湖",
"祁东",
"启东",
"青堆",
"青岛西",
"前锋",
"清河门北",
"齐河",
"曲靖北",
"綦江东",
"曲江",
"邛崃",
"青莲",
"齐齐哈尔南",
"清水北",
"青神",
"岐山",
"庆盛",
"清水县",
"曲水县",
"祁县东",
"乾县",
"旗下营南",
"祁阳",
"青州市北",
"全州南",
"棋子湾",
"仁布",
"荣昌北",
"荣成",
"瑞昌西",
"如东",
"榕江",
"日喀则",
"饶平",
"日照西",
"宋城路",
"三道湖",
"邵东",
"三都县",
"胜芳",
"双峰北",
"商河",
"泗洪",
"四会",
"石家庄东",
"三江南",
"三井子",
"双流机场",
"双龙湖",
"石林西",
"沙岭子西",
"双流西",
"胜利镇",
"三明北",
"三明",
"嵩明",
"树木岭",
"神木",
"苏尼特左旗",
"山坡东",
"石桥",
"沈丘",
"鄯善北",
"狮山北",
"三水北",
"松山湖北",
"狮山",
"三水南",
"韶山南",
"三穗",
"石梯",
"汕尾",
"歙县北",
"绍兴北",
"绍兴东",
"泗县",
"始兴",
"双洋",
"泗阳",
"三阳",
"射阳",
"双阳",
"邵阳北",
"松原北",
"山阴",
"邵阳西",
"沈阳西",
"深圳北",
"神州",
"尚志南",
"深圳坪山",
"石嘴山",
"石柱县",
"台安南",
"桃村北",
"田东北",
"土地堂东",
"太谷西",
"吐哈",
"通海",
"太和北",
"天河机场",
"天河街",
"唐海南",
"通化县",
"同江",
"托克托东",
"吐鲁番北",
"铜陵北",
"桐庐",
"泰宁",
"铜仁南",
"天水南",
"通渭",
"田心东",
"汤逊湖",
"藤县",
"太原南",
"通远堡西",
"桐梓北",
"桐梓东",
"通州",
"吴川",
"文登东",
"潍坊北",
"五府山",
"威虎岭北",
"威海北",
"苇河西",
"温江",
"乌兰察布",
"五龙背东",
"乌龙泉南",
"乌兰木图",
"五女山",
"武胜",
"五通",
"无为",
"瓦屋山",
"闻喜西",
"武义北",
"武夷山北",
"武夷山东",
"婺源",
"渭源",
"万州北",
"武陟",
"梧州南",
"兴安北",
"许昌东",
"项城",
"新都东",
"西渡",
"西丰",
"先锋",
"湘府路",
"襄汾西",
"孝感北",
"孝感东",
"西湖东",
"新化南",
"新晃西",
"新津",
"小金口",
"辛集南",
"新津南",
"西来",
"新民北",
"门",
"咸宁东",
"咸宁南",
"溆浦南",
"西平西",
"响水县",
"湘潭北",
"邢台东",
"西乌旗",
"修武西",
"修文县",
"萧县北",
"新香坊北",
"新乡东",
"新余北",
"西阳村",
"信阳东",
"咸阳秦都",
"仙游",
"祥云",
"新郑机场",
"香樟路",
"忻州西",
"雅安",
"永安南",
"迎宾路",
"亚布力西",
"永城北",
"盐城北",
"运城北",
"永川东",
"禹城东",
"宜春",
"岳池",
"云东海",
"姚渡",
"云浮东",
"永福南",
"雨格",
"洋河",
"永济北",
"弋江",
"延吉西",
"永康南",
"依兰",
"运粮河",
"炎陵",
"杨陵南",
"羊马",
"一面坡北",
"伊敏",
"郁南",
"云南驿",
"银瓶",
"延平西",
"原平西",
"杨桥",
"阳曲西",
"阳朔",
"永寿",
"云山",
"玉山南",
"雁石南",
"永泰",
"银滩",
"鹰潭北",
"烟台南",
"伊通",
"烟台西",
"尤溪",
"云霄",
"宜兴",
"玉溪",
"阳信",
"应县",
"攸县南",
"洋县西",
"义县西",
"余姚北",
"榆中",
"诏安",
"淄博北",
"正定机场",
"纸坊东",
"准格尔",
"庄河北",
"昭化",
"织金北",
"张家川",
"芷江",
"织金",
"仲恺",
"曾口",
"珠琳",
"左岭",
"樟木头东",
"驻马店西",
"邹平",
"漳浦",
"漳平西",
"章丘北",
"肇庆东",
"庄桥",
"昭山",
"钟山西",
"朱砂古镇",
"漳县",
"资阳北",
"遵义",
"遵义南",
"张掖西",
"资中北",
"涿州东",
"枣庄东",
"卓资东",
"郑州东",
"株洲南",
"香港红磡",
"宜宾西"
]
CITY_CODE = [
"VAP",
"BOP",
"BJP",
"VNP",
"BXP",
"IZQ",
"CUW",
"CQW",
"CRW",
"CXW",
"GGQ",
"SHH",
"SNH",
"AOH",
"SXH",
"TBP",
"TJP",
"TIP",
"TXP",
"XJA",
"CCT",
"CET",
"CRT",
"ICW",
"CNW",
"CDW",
"CSQ",
"CWQ",
"JAK",
"FZS",
"FYS",
"GIW",
"GZQ",
"GXQ",
"HBB",
"VBB",
"VAB",
"HFH",
"HTH",
"NDC",
"HHC",
"KEQ",
"HMQ",
"VUQ",
"HGH",
"HZH",
"XHH",
"JNK",
"JGK",
"KMM",
"KXM",
"LSO",
"LVJ",
"LZJ",
"LAJ",
"NCG",
"NJH",
"NKH",
"NNZ",
"VVP",
"SJP",
"SYT",
"SBT",
"SDT",
"SOT",
"TBV",
"TDV",
"TYV",
"WHN",
"KNM",
"WAR",
"EAY",
"XAY",
"CAY",
"XNO",
"YIJ",
"ZZF",
"ART",
"AKY",
"ASR",
"AHX",
"AKR",
"APT",
"AQH",
"ASW",
"AST",
"AYF",
"BAB",
"BBH",
"BCT",
"BHZ",
"BEL",
"BAP",
"BJY",
"BJB",
"BKX",
"BIZ",
"HJL",
"BTT",
"BDC",
"BTC",
"BXR",
"BXT",
"BEC",
"BXJ",
"BZH",
"CBN",
"VGQ",
"CDP",
"CDT",
"CFD",
"CDG",
"CEH",
"CPP",
"CRG",
"CTT",
"CDB",
"CXK",
"COM",
"CXT",
"CBF",
"IYH",
"CZJ",
"CZH",
"CZQ",
"CZF",
"COP",
"CZZ",
"RNT",
"DCT",
"DUT",
"DFB",
"DMQ",
"DHD",
"DHL",
"DHJ",
"DHT",
"DJB",
"DFP",
"DDW",
"DFT",
"DKM",
"DLT",
"DNG",
"DZX",
"DOC",
"DQT",
"DTV",
"DPK",
"DUX",
"RYW",
"DOF",
"RXW",
"DZP",
"EJC",
"RLC",
"ESN",
"FES",
"FJQ",
"FLV",
"FLW",
"FRX",
"FET",
"FSQ",
"FXD",
"FYH",
"GRO",
"GHW",
"GJV",
"GBZ",
"GRX",
"GLZ",
"GXN",
"GSN",
"GNJ",
"GYW",
"GBQ",
"GZG",
"GLT",
"GBT",
"AUH",
"HRH",
"HMB",
"HVN",
"HBV",
"KCN",
"HCY",
"HDP",
"HDB",
"HGB",
"HTT",
"HEM",
"HJB",
"HHQ",
"HKN",
"HLD",
"HRX",
"HWD",
"HLB",
"HMV",
"HMR",
"HAH",
"HNB",
"EUH",
"HQM",
"HBP",
"HRP",
"OSN",
"HSY",
"HKH",
"HSN",
"HSP",
"HYQ",
"HIK",
"HXZ",
"HOY",
"HCQ",
"VAG",
"JAL",
"JBG",
"JCF",
"JJZ",
"JCG",
"JFF",
"JGX",
"JGG",
"JHL",
"RNH",
"JBH",
"JJG",
"JLL",
"JMN",
"JMB",
"JIK",
"JAC",
"JQJ",
"JUH",
"JIQ",
"JTL",
"JVJ",
"JXB",
"JRH",
"JGJ",
"JFW",
"JKP",
"JZT",
"JZD",
"KLR",
"KFF",
"KLV",
"KLW",
"KSR",
"KNH",
"KTR",
"KYT",
"UAH",
"LBF",
"UCH",
"LCW",
"LKZ",
"LCN",
"LCG",
"UTP",
"LDL",
"LDQ",
"LFV",
"LGP",
"LHC",
"LON",
"LWJ",
"UHP",
"LHM",
"LQL",
"LJL",
"LHV",
"LLG",
"LKV",
"UPP",
"UMW",
"LVV",
"LST",
"LWH",
"LXJ",
"LEQ",
"UEP",
"LYS",
"LYQ",
"LYF",
"UKH",
"LDF",
"LVK",
"LLF",
"DHR",
"LYD",
"LYL",
"LZX",
"LZZ",
"LZD",
"MCN",
"MDX",
"MDB",
"MRX",
"MGH",
"MHX",
"MVX",
"MDQ",
"MMZ",
"MSB",
"MJT",
"VAW",
"MYW",
"MOQ",
"MLX",
"NVH",
"NGH",
"NCB",
"NCW",
"NDZ",
"NMP",
"NFT",
"NHX",
"NGX",
"NJW",
"NPS",
"NUH",
"NFF",
"NZX",
"PEN",
"PVD",
"PIJ",
"POJ",
"PQP",
"PSQ",
"PXG",
"PXZ",
"PCW",
"PRW",
"QRN",
"QSW",
"QDK",
"QYP",
"QJM",
"QNW",
"QEB",
"QHX",
"QTB",
"QVV",
"QRS",
"QYS",
"QEH",
"RAZ",
"RQJ",
"RJG",
"RZK",
"SCB",
"SFB",
"SGQ",
"SHD",
"SHB",
"SFX",
"SXT",
"SLL",
"OMY",
"SMF",
"ONY",
"NIW",
"SPT",
"SQF",
"SRG",
"SSQ",
"OAH",
"OTQ",
"SWS",
"OEP",
"SEQ",
"JUQ",
"SYQ",
"SNN",
"SMS",
"SSB",
"VYT",
"SZH",
"SZQ",
"OXH",
"SZN",
"SUV",
"OSQ",
"TBQ",
"TVX",
"TGY",
"TGP",
"TXX",
"THL",
"TLX",
"TFR",
"TLD",
"TLT",
"TPT",
"TML",
"RDQ",
"FUP",
"TFT",
"TAK",
"TSP",
"TSJ",
"TYT",
"TQT",
"UTH",
"TZW",
"TAP",
"WCB",
"WCN",
"WDT",
"WKK",
"WHH",
"WXC",
"WJT",
"WMR",
"WLW",
"WWT",
"WNY",
"WSM",
"WIT",
"WUJ",
"WWJ",
"WXH",
"WXR",
"WPB",
"WAS",
"WYY",
"WYW",
"WZZ",
"RZH",
"VRH",
"ECW",
"XCF",
"ENW",
"XTC",
"XKS",
"XMS",
"XBS",
"XWM",
"XXF",
"XUN",
"XYY",
"XFN",
"XYT",
"XUG",
"XCH",
"YWY",
"YBW",
"YWB",
"YBD",
"HAN",
"YCW",
"AFH",
"YCN",
"YNV",
"YCB",
"YCV",
"YBP",
"YCG",
"YET",
"YGW",
"YIV",
"YJL",
"YKT",
"YKX",
"YLZ",
"YNY",
"ALY",
"TWQ",
"YPB",
"YMR",
"YAY",
"YZW",
"YPV",
"YNP",
"YYV",
"YQB",
"AQP",
"NUW",
"YNG",
"AOP",
"YRT",
"YTG",
"YAK",
"YEX",
"ATP",
"YWH",
"YON",
"YXD",
"AEQ",
"YYQ",
"YUQ",
"AOQ",
"YLH",
"ZBK",
"ZDV",
"ZGW",
"ZHQ",
"ZIQ",
"ZJZ",
"ZJH",
"DIQ",
"ZKP",
"ZMP",
"ZKN",
"ZLC",
"ZTX",
"ZDN",
"ZVQ",
"ZIT",
"ZDW",
"ZWJ",
"ZYW",
"ZIW",
"ZEK",
"ZZW",
"ZZQ",
"ZFK",
"AAX",
"ACB",
"ADX",
"ARW",
"ADP",
"ADO",
"AGT",
"YED",
"AHP",
"PKQ",
"AJJ",
"ARH",
"AJB",
"AJD",
"PYW",
"AER",
"AYY",
"ALD",
"AUZ",
"ASX",
"ALN",
"JTX",
"AZM",
"APH",
"AXT",
"ATV",
"ASH",
"ATR",
"ATL",
"AXS",
"BWQ",
"BPW",
"BGV",
"BMH",
"BCR",
"BUP",
"BEP",
"BDP",
"BPP",
"ILP",
"BNN",
"BGM",
"BUT",
"BIY",
"BVC",
"BWH",
"BEY",
"BJJ",
"BJM",
"IBQ",
"BLQ",
"BBM",
"BSB",
"BTD",
"BKD",
"BKB",
"BAT",
"BRZ",
"BOR",
"BQC",
"BLX",
"BNB",
"BOZ",
"BLB",
"BLR",
"BND",
"AAP",
"BMD",
"BNM",
"BMB",
"BRP",
"RPD",
"BQP",
"BQB",
"BQL",
"BAY",
"BSY",
"BPM",
"BAL",
"BUM",
"FDC",
"BTQ",
"BZP",
"BYP",
"BHT",
"BXK",
"VXD",
"BYC",
"BYB",
"BIV",
"BAC",
"BID",
"BYT",
"BNJ",
"BCD",
"IEW",
"RMP",
"BVP",
"CIN",
"CBC",
"CEJ",
"CCM",
"CCP",
"CID",
"CAX",
"CGT",
"CEF",
"CGV",
"CGY",
"CAJ",
"CZB",
"WBW",
"CHB",
"CHZ",
"CKT",
"CHP",
"CIH",
"CJT",
"CJX",
"CAM",
"CJY",
"CLK",
"CYP",
"CUQ",
"CLP",
"CLT",
"CMB",
"CNJ",
"VBP",
"DAQ",
"CPM",
"CQB",
"CON",
"CSB",
"EFW",
"CSP",
"CST",
"CSL",
"CSC",
"CVT",
"CES",
"CTY",
"CPT",
"CQQ",
"CIP",
"CNZ",
"CXQ",
"CRP",
"CFH",
"CYK",
"CAL",
"CEK",
"CEX",
"CYL",
"CDD",
"CYD",
"CYF",
"CZL",
"CUH",
"ESH",
"CXH",
"CKQ",
"CVK",
"CFP",
"CWM",
"ICQ",
"CBP",
"DAG",
"RAT",
"DBJ",
"DBC",
"DBD",
"NWQ",
"RBT",
"DCR",
"DYJ",
"DBB",
"RDT",
"DGJ",
"DVW",
"DDB",
"DKJ",
"DRD",
"DRX",
"TKX",
"UFQ",
"DGY",
"DIL",
"DMM",
"DTT",
"RGW",
"DGP",
"DHB",
"DHP",
"DQD",
"SOQ",
"DXT",
"DQH",
"DJT",
"DKB",
"DJL",
"DKP",
"RVD",
"DHO",
"DLC",
"DLB",
"DLD",
"DIC",
"DTX",
"DLV",
"DNC",
"GZT",
"DMD",
"DEP",
"DNF",
"DNZ",
"DPD",
"RPP",
"DPI",
"DVT",
"LFX",
"DQX",
"DML",
"DQB",
"MOH",
"RHX",
"DRQ",
"DKH",
"RWW",
"DWT",
"DPM",
"DSL",
"DYC",
"RZT",
"DBH",
"DQK",
"DGT",
"DBM",
"DTL",
"RUH",
"DNT",
"DRJ",
"DFJ",
"DWJ",
"DZZ",
"DFM",
"DXL",
"DXX",
"DSJ",
"DXM",
"DXG",
"DKV",
"DXV",
"RXP",
"DXD",
"DYH",
"DYW",
"DYX",
"DYN",
"EXH",
"IAW",
"DBV",
"DYV",
"EWH",
"RYV",
"DYZ",
"DJP",
"DZD",
"DTJ",
"DIP",
"DCH",
"DVQ",
"DNV",
"DFZ",
"DZV",
"DWV",
"ROP",
"DXP",
"DZY",
"DAP",
"RZP",
"EBW",
"RDP",
"RDX",
"EEC",
"RLD",
"ELA",
"EMW",
"RML",
"PXQ",
"RYJ",
"ECN",
"FAS",
"FCG",
"FNG",
"FIH",
"FEM",
"FHX",
"FHR",
"FHT",
"FEV",
"FHH",
"FIB",
"FTT",
"FLJ",
"FTB",
"FZB",
"FNH",
"AKH",
"FNP",
"FQS",
"VMW",
"FSJ",
"FUQ",
"FSV",
"FST",
"FKP",
"FSZ",
"FTX",
"FYP",
"FDY",
"FXY",
"FEY",
"FXK",
"FUH",
"FAV",
"FBT",
"FYG",
"FYM",
"FYT",
"FYX",
"FBG",
"FZY",
"FZC",
"VZK",
"GFP",
"VJW",
"GBP",
"GBD",
"GDJ",
"GCN",
"GEP",
"GCV",
"GZB",
"GRH",
"GTW",
"IDW",
"GDV",
"GGZ",
"GVP",
"GGT",
"GGL",
"GGJ",
"GGP",
"GAX",
"GEX",
"GDT",
"GKT",
"GLJ",
"GEJ",
"GFM",
"GHT",
"GLF",
"VOW",
"GLP",
"GMK",
"GMC",
"GRT",
"GNT",
"GNM",
"GPF",
"GEY",
"GAG",
"GQD",
"GQY",
"GZD",
"GST",
"GSW",
"GSP",
"GSL",
"GSD",
"GXD",
"GTJ",
"GAY",
"GTS",
"GTP",
"KEP",
"GXG",
"GYH",
"GXF",
"GIP",
"GYF",
"GAW",
"GUJ",
"GYL",
"GYD",
"GZS",
"GNQ",
"GEH",
"GZY",
"GZJ",
"GSQ",
"GXT",
"GOT",
"GSS",
"GAT",
"HIH",
"AMH",
"HWN",
"VXN",
"HBL",
"HEB",
"HAF",
"XEG",
"VCQ",
"HCZ",
"HCN",
"HCT",
"WKW",
"HCJ",
"HCP",
"HXT",
"HGC",
"HDV",
"HFR",
"HFG",
"HXJ",
"HGP",
"HYM",
"IGW",
"HHT",
"VHD",
"HUD",
"HJJ",
"HJR",
"HFM",
"HIM",
"HJF",
"HJV",
"HJS",
"HJT",
"HDC",
"HXP",
"HJM",
"HKJ",
"HKG",
"KOH",
"HUB",
"HPD",
"HHB",
"HIT",
"HOB",
"HIB",
"ULY",
"HRB",
"VLB",
"HAT",
"HLL",
"HIL",
"HAX",
"VTJ",
"HLT",
"VEH",
"HYP",
"HHL",
"HNH",
"HMJ",
"VAQ",
"HZM",
"VQH",
"HEY",
"HRV",
"HRN",
"HDY",
"HDL",
"VSR",
"VSB",
"VSQ",
"HSQ",
"HOT",
"VCH",
"HHP",
"HSJ",
"HUT",
"HSO",
"HEQ",
"VSJ",
"HQB",
"VTK",
"VTR",
"VTQ",
"HZT",
"HWK",
"RWH",
"VXB",
"HYY",
"VHB",
"VTB",
"HTJ",
"VIX",
"HFO",
"HAY",
"HVQ",
"HUW",
"HQY",
"HGJ",
"WHW",
"VIQ",
"HUN",
"HNO",
"HYJ",
"VZH",
"HZZ",
"VON",
"HZV",
"VXQ",
"JRT",
"JIY",
"JBD",
"JEF",
"JCJ",
"JCK",
"JNV",
"JFD",
"JDB",
"JFP",
"JOB",
"UDH",
"JST",
"VGP",
"JHP",
"JHX",
"JHB",
"JHR",
"JIR",
"JHZ",
"AJH",
"VJD",
"JJS",
"JEY",
"JJB",
"JJW",
"JKT",
"JLJ",
"JMM",
"JWQ",
"JES",
"JOK",
"JNP",
"JVS",
"JPC",
"JQX",
"SSX",
"EGH",
"JSH",
"JCN",
"JRN",
"JVV",
"JSL",
"JET",
"JOP",
"JIB",
"EAH",
"JTB",
"JOM",
"JTJ",
"JNL",
"JWX",
"JUG",
"JKK",
"JUK",
"JXV",
"JXH",
"JJP",
"EPH",
"JXT",
"UEH",
"JYW",
"JRQ",
"JYS",
"JYK",
"JYZ",
"JYH",
"JYJ",
"SZL",
"JYF",
"JXJ",
"JZK",
"WEF",
"JZH",
"JEQ",
"JBN",
"JXK",
"JXP",
"JIP",
"JOD",
"JOF",
"JVP",
"JYD",
"KAT",
"KCR",
"KCP",
"KDX",
"KDT",
"KOB",
"KDC",
"KAW",
"KJB",
"KQX",
"KLC",
"KHR",
"PVQ",
"KQL",
"KSH",
"KAB",
"KSB",
"KXZ",
"KAM",
"KHX",
"KXT",
"KZP",
"UBZ",
"LLT",
"LPF",
"LUQ",
"LCQ",
"UCP",
"UCK",
"LCK",
"LDY",
"LRC",
"LDO",
"LDP",
"LVP",
"LVM",
"LJP",
"LOP",
"LFP",
"UFD",
"LNB",
"LGM",
"LOM",
"LGJ",
"LGB",
"UFH",
"LXX",
"LHX",
"JID",
"LNL",
"KLH",
"LHP",
"UNP",
"LEX",
"LRT",
"UDT",
"LVT",
"LKS",
"UJH",
"LJB",
"LJW",
"LJZ",
"UJT",
"LJX",
"UJL",
"LHB",
"ULK",
"LIJ",
"LKF",
"LKB",
"LKQ",
"LAX",
"LAB",
"LRM",
"LLW",
"LWQ",
"LLB",
"UWZ",
"UAP",
"LMX",
"LMB",
"LMJ",
"UNG",
"UQW",
"LPM",
"LPP",
"UPJ",
"LPG",
"UQK",
"UQJ",
"UTW",
"LUM",
"UDQ",
"LGT",
"USP",
"USH",
"LIQ",
"LRN",
"LAF",
"LMK",
"LSV",
"LUL",
"LSG",
"LBT",
"LSD",
"LAS",
"LSB",
"LET",
"LTZ",
"LAR",
"LTP",
"LBM",
"LVZ",
"LTJ",
"VLJ",
"LWK",
"LRJ",
"LNJ",
"UXK",
"LXB",
"LXY",
"LXQ",
"LUG",
"LXK",
"LXC",
"UXP",
"LYK",
"LYY",
"LYT",
"LDD",
"UYK",
"UIH",
"LNF",
"LXL",
"LMH",
"LVS",
"LYX",
"LAQ",
"LYP",
"LPQ",
"LEJ",
"LZT",
"UAQ",
"LIW",
"LIZ",
"LZS",
"LZA",
"LEM",
"LQJ",
"MAH",
"MBY",
"MGY",
"MBN",
"MCF",
"MCL",
"MAP",
"MNF",
"KPM",
"MUQ",
"MOB",
"MDF",
"MRB",
"MGN",
"MHL",
"MHZ",
"MGB",
"MHQ",
"MQQ",
"MHB",
"MLZ",
"MLL",
"MLD",
"MLB",
"MID",
"MGM",
"MUD",
"MLQ",
"MNR",
"UGW",
"MPQ",
"MQB",
"MQS",
"MQF",
"MUT",
"MAB",
"MSW",
"MKW",
"MOM",
"MST",
"PWQ",
"MEB",
"MVY",
"MVQ",
"MUP",
"MMW",
"MYS",
"MUR",
"MZJ",
"MEY",
"MFQ",
"NAB",
"NAT",
"NBK",
"NCK",
"NSP",
"NCZ",
"NES",
"NGP",
"NFP",
"NLT",
"NNH",
"NHH",
"NHS",
"NHJ",
"NHD",
"NVT",
"NJS",
"NJB",
"NJD",
"NKP",
"NKT",
"NNQ",
"NLD",
"NIR",
"ULZ",
"NLF",
"NMD",
"NMZ",
"NMX",
"NNS",
"NPZ",
"NQD",
"NQO",
"NQJ",
"NTT",
"NOQ",
"NWV",
"NWP",
"NEH",
"NXQ",
"NXF",
"NXT",
"NUP",
"NIP",
"NAF",
"NZT",
"PAW",
"PAL",
"PNO",
"PAJ",
"PZT",
"PEY",
"PCY",
"PDB",
"PRP",
"BFF",
"PXJ",
"PRT",
"PFB",
"PGL",
"PGZ",
"PGM",
"PAM",
"PHP",
"PHM",
"PHQ",
"PBD",
"PDP",
"PKT",
"PLT",
"PNT",
"PSB",
"PSW",
"PSR",
"PSL",
"PSV",
"PHW",
"PVT",
"PTM",
"PTS",
"PTW",
"PWT",
"PWV",
"PGV",
"POW",
"KZW",
"PWW",
"PYX",
"PYJ",
"PYV",
"PIK",
"PPJ",
"PYK",
"PYP",
"PZG",
"PJH",
"PZD",
"POD",
"PND",
"QOT",
"QAB",
"QQP",
"QRQ",
"QDM",
"QAK",
"QFT",
"QVP",
"QFK",
"QYQ",
"QTP",
"QUY",
"QIP",
"QHD",
"QHP",
"INH",
"QJZ",
"QJN",
"QJB",
"QJW",
"QBT",
"QNY",
"QZV",
"QLD",
"QLY",
"QLZ",
"QIB",
"QGH",
"QIH",
"QMP",
"QSB",
"QSN",
"QXQ",
"QUJ",
"QYH",
"QVH",
"QAT",
"QTJ",
"QWD",
"QWP",
"QRW",
"QXV",
"QXP",
"QXJ",
"QUV",
"QXC",
"QOY",
"QYF",
"QYL",
"QVQ",
"QYJ",
"QSJ",
"QBQ",
"QYT",
"QDZ",
"QRZ",
"QZK",
"RAH",
"RCW",
"RCG",
"RBH",
"RUQ",
"RQP",
"ROK",
"RSZ",
"RSD",
"RXZ",
"RVP",
"RYF",
"RHD",
"ROF",
"OBJ",
"SBP",
"AQW",
"OBP",
"ZWT",
"SBB",
"OCH",
"SWN",
"SCR",
"SCS",
"SMV",
"SCP",
"SCT",
"SCL",
"SDJ",
"ORQ",
"ODY",
"SIL",
"SXC",
"SEP",
"OUD",
"OJQ",
"OLH",
"OFB",
"STB",
"OTW",
"OKJ",
"OGC",
"SNQ",
"SVK",
"JBS",
"SED",
"SHY",
"SBM",
"SHP",
"SKT",
"SHC",
"VOP",
"SSD",
"SHL",
"OXP",
"OHD",
"OZW",
"SEL",
"SZR",
"SVP",
"ODP",
"SQH",
"OJJ",
"SJL",
"SJB",
"SUB",
"OJB",
"SML",
"SAH",
"SKD",
"OLK",
"IMH",
"SRP",
"SJJ",
"SOZ",
"SMM",
"SJD",
"OZL",
"SHM",
"SWT",
"OMP",
"SUR",
"SHJ",
"VLD",
"SPB",
"SLM",
"ZJD",
"SIB",
"SOL",
"LNM",
"SLQ",
"SLC",
"SNT",
"OLY",
"SLP",
"VFQ",
"SCF",
"OQH",
"OMQ",
"SXF",
"SYP",
"SOB",
"SBZ",
"CYW",
"PPT",
"SON",
"SFJ",
"SQM",
"SPF",
"SID",
"SXY",
"SQT",
"SRB",
"SRL",
"SQB",
"SWB",
"SSR",
"SJQ",
"OSK",
"SAD",
"SFT",
"SAT",
"SRD",
"SST",
"PQQ",
"SSL",
"MZQ",
"SHX",
"SDH",
"OTB",
"SEV",
"SFM",
"SWP",
"SKB",
"SXR",
"OVH",
"SXZ",
"SAS",
"SOH",
"SXL",
"SXM",
"SXJ",
"FMH",
"SYB",
"SYV",
"OYP",
"SYJ",
"SPJ",
"OEJ",
"SOP",
"OYD",
"SYL",
"BDH",
"SAY",
"SUD",
"OYJ",
"SAJ",
"SND",
"OHH",
"SRH",
"BJQ",
"OZP",
"OZY",
"SZD",
"SZB",
"SNM",
"SIN",
"SEM",
"KAH",
"ITH",
"TMK",
"TID",
"TAJ",
"TBF",
"TBB",
"TTH",
"TCX",
"TZK",
"TCL",
"TCK",
"TRQ",
"TDZ",
"TGL",
"TGC",
"TOL",
"TGV",
"THX",
"THM",
"THF",
"THG",
"TKH",
"TIX",
"TNJ",
"TOT",
"PDQ",
"TZP",
"TMD",
"TEX",
"TJH",
"TFZ",
"TIZ",
"TLB",
"PXT",
"QSL",
"TMN",
"TNN",
"TLS",
"TRC",
"TCJ",
"TVT",
"TVW",
"TIT",
"TEB",
"TQX",
"TTK",
"TQL",
"TQJ",
"TCT",
"TAB",
"PUQ",
"TIM",
"TUT",
"THB",
"TXJ",
"TSW",
"TCH",
"TRZ",
"TND",
"TYF",
"TIL",
"TYJ",
"KTT",
"TYB",
"TYP",
"TEK",
"TZH",
"TZJ",
"TXK",
"TZV",
"TEW",
"QWH",
"WBP",
"WAP",
"WVP",
"WUY",
"WEW",
"WCT",
"WEQ",
"WDB",
"WRB",
"WBK",
"WDL",
"WHP",
"WNZ",
"WVT",
"WRN",
"WDP",
"WHX",
"WFK",
"WFB",
"WUT",
"WXT",
"WGB",
"WGY",
"WGL",
"WGM",
"WVC",
"WHB",
"WHF",
"WCJ",
"WUB",
"WAM",
"WJP",
"WJL",
"WJJ",
"WQB",
"WKT",
"WBT",
"WLC",
"WEB",
"WVX",
"VHH",
"WLK",
"WQC",
"WSC",
"WLX",
"WBY",
"WRX",
"WNQ",
"WWG",
"WVY",
"WNJ",
"WPT",
"WUP",
"WQL",
"WWP",
"WSJ",
"WEV",
"WSP",
"WTP",
"WSV",
"WZJ",
"WVR",
"WGH",
"WVB",
"WXV",
"WVV",
"IFH",
"WXN",
"WYZ",
"WYB",
"RYH",
"WWB",
"WIM",
"WYC",
"WZL",
"WZY",
"WZV",
"WZB",
"WQP",
"WKD",
"EAM",
"XAZ",
"XAF",
"XAP",
"EBP",
"XLP",
"ECH",
"XCD",
"XEM",
"XRX",
"XCB",
"XCT",
"EDW",
"EJM",
"XMP",
"XEZ",
"XOD",
"XFB",
"EFG",
"XFV",
"XFW",
"EGG",
"XGV",
"XGN",
"XUJ",
"EUG",
"XIJ",
"XGJ",
"NBB",
"XHY",
"XXB",
"XIR",
"XWJ",
"EEP",
"XAX",
"EFQ",
"XHB",
"XLQ",
"EHQ",
"XHP",
"XEC",
"XYD",
"XYP",
"EKY",
"HYK",
"XJB",
"EJG",
"XJV",
"ENP",
"XJM",
"EKM",
"XJT",
"XTJ",
"XMT",
"EKB",
"EAQ",
"XNB",
"XDD",
"ELP",
"XPX",
"XLB",
"XLJ",
"XYB",
"GCT",
"XPH",
"XLD",
"EXP",
"XZB",
"XGT",
"XMD",
"XMB",
"XAT",
"XNV",
"XRN",
"ENQ",
"XNN",
"XAW",
"XPN",
"XPY",
"XPM",
"XOS",
"EPQ",
"XIW",
"XQB",
"XQD",
"XQJ",
"XRL",
"ESP",
"XSB",
"XZN",
"XSV",
"XST",
"XSP",
"XIZ",
"XAM",
"ETW",
"XOB",
"XDT",
"XSJ",
"XTQ",
"XTP",
"XTG",
"XAN",
"EIP",
"XJQ",
"EPD",
"XWF",
"XSN",
"ENN",
"XQY",
"XXQ",
"XIF",
"XOV",
"XXM",
"XGQ",
"XZC",
"XXP",
"XDB",
"XUY",
"XBY",
"XWN",
"SNZ",
"XHM",
"VIH",
"XRZ",
"EEQ",
"XFM",
"XYX",
"EIF",
"EJH",
"EXM",
"EYB",
"XZJ",
"UUH",
"XZX",
"XRP",
"XZT",
"XXV",
"XZD",
"XRD",
"ERP",
"YAC",
"YAX",
"YAS",
"YNB",
"YBB",
"YUD",
"YAB",
"YKM",
"AIH",
"YYB",
"YER",
"YKJ",
"YYY",
"YQQ",
"YIN",
"YHN",
"YCK",
"YEK",
"YNF",
"YAL",
"YPK",
"YAP",
"ACP",
"IXH",
"YCT",
"YDQ",
"YDJ",
"YDM",
"YGS",
"WLQ",
"YGH",
"YDG",
"YAJ",
"IIQ",
"YYM",
"YRB",
"YOV",
"YIK",
"YOB",
"EVH",
"YHP",
"AEP",
"YHM",
"URH",
"YAM",
"AEW",
"WRQ",
"YHG",
"AJP",
"YAT",
"YGJ",
"YJT",
"YIR",
"AFP",
"AZK",
"RFH",
"YGT",
"YJX",
"YLW",
"YSM",
"YDY",
"YLX",
"YLB",
"YSY",
"ALW",
"YLM",
"YLD",
"YQP",
"YUM",
"YMF",
"YVV",
"YXJ",
"YMN",
"YMM",
"YST",
"YNK",
"YVM",
"YNR",
"YZJ",
"ABM",
"YPP",
"UPH",
"YSR",
"AQK",
"YQT",
"YQV",
"YGP",
"YBF",
"YSJ",
"AUM",
"YSF",
"YUK",
"YSV",
"YVH",
"ASP",
"YSP",
"YAD",
"AIP",
"YSX",
"YUT",
"YIP",
"YTQ",
"YTZ",
"YSL",
"YUX",
"YWM",
"YHW",
"YOG",
"WMQ",
"ACG",
"YXM",
"YIG",
"YYH",
"AFW",
"YIQ",
"ARP",
"YYL",
"YYJ",
"YZY",
"UZH",
"YSZ",
"YZK",
"YQM",
"AEM",
"YZD",
"ZEY",
"ZAD",
"ZBP",
"ZUP",
"ZJJ",
"ZCN",
"ZHY",
"ZQK",
"ZIK",
"ZCV",
"ZHT",
"ZDP",
"ZDB",
"ZFM",
"ZGD",
"ZGB",
"ZHX",
"VNH",
"ZIN",
"ZJY",
"ZUB",
"ZYP",
"ZOB",
"ZDH",
"ZEH",
"ZOD",
"CWJ",
"ZWQ",
"ZUJ",
"ZBW",
"ZLV",
"ZLT",
"ZIV",
"ZLD",
"ZXX",
"ZOQ",
"ZGF",
"ZDJ",
"VNJ",
"ZNJ",
"ZPF",
"ZPS",
"ZPR",
"ZVP",
"ZQY",
"ZTK",
"ZRC",
"ZLM",
"ZGQ",
"ZOG",
"ZHD",
"ZSQ",
"ZSY",
"ZSZ",
"ZSG",
"ZOP",
"ZWB",
"ZWD",
"ZOY",
"ZTN",
"ZXS",
"ZVT",
"ZIP",
"ZXC",
"ZVY",
"ZYN",
"ZAW",
"ZYJ",
"ZUW",
"GOS",
"ZUS",
"ZUX",
"ZZY",
"ZZM",
"ZXP",
"ZAL",
"ZZC",
"ZAQ",
"XPF",
"AQC",
"ABB",
"ARX",
"ADA",
"AUR",
"ARG",
"ASE",
"AXL",
"ADF",
"BBZ",
"BBE",
"FWH",
"BMP",
"FGQ",
"FEP",
"YKP",
"FCP",
"BGU",
"FHP",
"BBY",
"BRT",
"BFQ",
"BPT",
"BUB",
"FZW",
"BSN",
"BGY",
"NGQ",
"BCV",
"BBB",
"BVT",
"UKZ",
"FWP",
"FHW",
"FNC",
"BDE",
"BXY",
"BIK",
"BZB",
"FOP",
"CUY",
"IYP",
"IVP",
"CMW",
"POP",
"PGP",
"CBY",
"VAT",
"GUH",
"KNW",
"EBV",
"CVO",
"FVH",
"CNG",
"FQQ",
"FPQ",
"CQJ",
"COW",
"CSE",
"CSU",
"CBQ",
"RXQ",
"CTE",
"CNS",
"CWY",
"CBH",
"CXE",
"CUM",
"VBT",
"CYN",
"CNQ",
"CYE",
"CZE",
"CWT",
"DCZ",
"RBZ",
"DCG",
"DCJ",
"IYQ",
"RDD",
"RWT",
"DRB",
"KRQ",
"DNE",
"RGT",
"RMT",
"RTQ",
"UWQ",
"NVQ",
"RRQ",
"DJE",
"FWQ",
"DTK",
"DIM",
"DKA",
"DCB",
"DNY",
"KOQ",
"DTB",
"DSD",
"DRH",
"RSC",
"PRH",
"DAL",
"OWH",
"DTO",
"WWQ",
"DNJ",
"DDG",
"DWG",
"IRQ",
"DBN",
"KJW",
"DEE",
"DOK",
"DYG",
"DOP",
"WZQ",
"FQW",
"IXW",
"EGY",
"EFN",
"FBZ",
"FDT",
"FDZ",
"PUH",
"FUW",
"FEW",
"FLN",
"FDU",
"FNM",
"FUY",
"FQE",
"KCQ",
"FAW",
"FSL",
"FOQ",
"FZQ",
"NZQ",
"FOT",
"FYU",
"FBM",
"FYB",
"FDG",
"FZG",
"FNB",
"FXS",
"GCG",
"VUW",
"GAE",
"GMP",
"GCZ",
"GUP",
"FMW",
"GNN",
"KIW",
"GVW",
"HGD",
"GEM",
"GLB",
"GLE",
"GEZ",
"GVK",
"IMQ",
"FBQ",
"GQT",
"GXM",
"GAZ",
"GPT",
"GDE",
"GUN",
"FFQ",
"FKQ",
"GBS",
"GPM",
"GAJ",
"STS",
"KQW",
"KEW",
"GYU",
"GNP",
"HNS",
"PLH",
"HFF",
"HKB",
"SER",
"HUL",
"KUX",
"HPP",
"KDQ",
"HDJ",
"HTV",
"HDO",
"HTB",
"COH",
"ENH",
"KGN",
"KAN",
"HNN",
"KXN",
"HPB",
"KAQ",
"HCF",
"KHQ",
"KHN",
"IHN",
"FAQ",
"HBM",
"OKB",
"KBX",
"KLQ",
"VLY",
"KMQ",
"HMU",
"IUQ",
"HPV",
"HNG",
"HOH",
"HVZ",
"FBH",
"HFV",
"HPN",
"KRN",
"HQT",
"IHP",
"KSN",
"NYH",
"HLN",
"VUR",
"KNN",
"KXQ",
"HKP",
"HEK",
"HTY",
"IYN",
"KXY",
"HYT",
"HWV",
"KNQ",
"JUL",
"JAJ",
"JDG",
"NSH",
"JDU",
"PFQ",
"JHD",
"JOL",
"JMP",
"JLS",
"JLF",
"KJQ",
"JVK",
"JME",
"JUB",
"JCS",
"MDK",
"JJH",
"OKH",
"JUS",
"JNJ",
"JWH",
"JSM",
"JPQ",
"JUN",
"NRH",
"JDV",
"LOH",
"JMZ",
"JXG",
"JBE",
"JOW",
"JBJ",
"JTN",
"JYL",
"PYQ",
"PYH",
"JEP",
"JZV",
"KBF",
"FLQ",
"KHU",
"QKW",
"KLD",
"KOM",
"KTQ",
"KVW",
"KZT",
"IDZ",
"UCZ",
"GMH",
"LTQ",
"LCF",
"NWW",
"ILQ",
"UUP",
"VCZ",
"LGK",
"ACQ",
"FVW",
"LVO",
"UOQ",
"UQQ",
"INW",
"USV",
"LLQ",
"KFQ",
"LQM",
"LXV",
"KGQ",
"BNQ",
"UDP",
"LNE",
"LHA",
"LBN",
"IKW",
"UQZ",
"LNK",
"LUF",
"UKK",
"KFW",
"COK",
"KBQ",
"UKQ",
"INJ",
"LPE",
"LGY",
"UDV",
"IVW",
"LAG",
"LDH",
"LSE",
"UWK",
"KRW",
"LBK",
"UXV",
"LEH",
"LUK",
"LNR",
"LSZ",
"UEK",
"LZE",
"LDJ",
"OMH",
"MHN",
"MUB",
"MDN",
"MNO",
"MJN",
"MLR",
"MBJ",
"MLM",
"MSR",
"MBK",
"MBS",
"MIF",
"IUW",
"MSE",
"MSN",
"MXJ",
"MYO",
"KIQ",
"MBM",
"MZF",
"MZM",
"NBE",
"NEF",
"NCE",
"NDG",
"NOG",
"NXG",
"NDJ",
"NOJ",
"NUT",
"NFG",
"NDN",
"LKT",
"NAM",
"NKW",
"FIW",
"NDQ",
"NGT",
"LLH",
"NMO",
"NFZ",
"NXZ",
"NBS",
"TLP",
"NOY",
"NCQ",
"NYE",
"NYF",
"PAN",
"PUE",
"PBM",
"PBE",
"PCE",
"PGW",
"PAK",
"PJE",
"PUT",
"PNN",
"POK",
"PEQ",
"PAZ",
"PBP",
"PPW",
"PUD",
"PSK",
"PBG",
"PYG",
"PYF",
"PDV",
"PUK",
"PAE",
"PZM",
"PMW",
"QGJ",
"QFW",
"QCE",
"QHK",
"QDU",
"QMQ",
"QOU",
"QET",
"QUK",
"QFB",
"QBD",
"QIK",
"QBM",
"QDE",
"QIM",
"QLE",
"QEW",
"QNB",
"QEJ",
"QVW",
"QAY",
"QSQ",
"QIJ",
"QSO",
"QGV",
"QBY",
"QNC",
"QWQ",
"QOK",
"QNZ",
"QZQ",
"RUO",
"RQW",
"RCK",
"RXG",
"RIH",
"RVW",
"RKO",
"RVQ",
"KZK",
"SFF",
"SDL",
"FIQ",
"KKW",
"SUP",
"NFQ",
"SOK",
"GQH",
"AHQ",
"SXP",
"SWZ",
"OJT",
"IPW",
"OHB",
"SYM",
"IXP",
"IQW",
"OLB",
"SHS",
"SVS",
"SVM",
"FMQ",
"HMY",
"ONC",
"SBN",
"SQE",
"SQN",
"SMR",
"NSQ",
"ARQ",
"KUQ",
"KSQ",
"RNQ",
"INQ",
"QHW",
"STE",
"OGQ",
"NPH",
"SLH",
"SSH",
"GPH",
"IPQ",
"SQS",
"MPH",
"SYU",
"SAU",
"OYT",
"OVQ",
"OCT",
"SNV",
"SXA",
"OOT",
"IOQ",
"SRQ",
"OZB",
"IFQ",
"QQJ",
"OSW",
"TAD",
"TOK",
"TBZ",
"TTN",
"TIV",
"THR",
"TAM",
"JYN",
"TJN",
"TEN",
"IEP",
"TXL",
"TJB",
"TVC",
"TAR",
"KXH",
"TLU",
"TNS",
"TNW",
"TIJ",
"TWJ",
"KQQ",
"THN",
"TAZ",
"TNV",
"TST",
"TBE",
"TDE",
"TOP",
"WAQ",
"WGK",
"WJK",
"WFG",
"WBL",
"WHK",
"WIB",
"WJE",
"WPC",
"WMT",
"WFN",
"VLT",
"WET",
"WSE",
"WTZ",
"IIH",
"WAH",
"WOV",
"WDH",
"WBS",
"WCS",
"WYG",
"WEJ",
"WZE",
"WIF",
"WBZ",
"XDZ",
"XVF",
"ERN",
"EWW",
"XDA",
"XFT",
"NQQ",
"FVQ",
"XTV",
"XJN",
"GDN",
"WDQ",
"EJQ",
"EWQ",
"IRW",
"NKQ",
"IJP",
"ITW",
"XLE",
"XOT",
"EMS",
"XKN",
"UNN",
"EMQ",
"EGQ",
"XSU",
"EDQ",
"EDP",
"XWC",
"EXF",
"XWE",
"QSH",
"RHB",
"EGF",
"XBG",
"XQF",
"OYN",
"XOY",
"XWS",
"XQM",
"EZF",
"FNQ",
"IXV",
"YAE",
"YQS",
"YFW",
"YSB",
"RGH",
"AEH",
"ABV",
"WMW",
"YSK",
"YEG",
"AWW",
"NAQ",
"AOJ",
"IXQ",
"YBZ",
"VTM",
"GTH",
"AJV",
"RVH",
"YXL",
"QUH",
"YEB",
"YEF",
"YAG",
"YEY",
"YME",
"YXB",
"YMX",
"YKQ",
"ANM",
"KPQ",
"YWS",
"IPV",
"YQA",
"IQV",
"YCZ",
"ASY",
"KZQ",
"YGG",
"YMS",
"YTS",
"CTQ",
"YKG",
"YLK",
"YTL",
"YTK",
"YXS",
"YBS",
"YUH",
"AXM",
"YVK",
"YZV",
"YXG",
"YXY",
"YSD",
"CTH",
"IZJ",
"ZDS",
"ZRK",
"ZHP",
"ZMN",
"ZEC",
"ZUT",
"ZHW",
"ZJE",
"ZIJ",
"ZPQ",
"IZW",
"KKQ",
"ZKE",
"ZOM",
"ZSN",
"ZRQ",
"ZLN",
"ZLK",
"ZCS",
"ZXG",
"ZVK",
"FCQ",
"ZQH",
"KWQ",
"ZAZ",
"ZSE",
"ZXJ",
"FYW",
"ZYE",
"ZNE",
"ZEJ",
"WZW",
"ZAP",
"ZNK",
"ZDC",
"ZAF",
"KVQ",
"JQO",
"YXE"
]
def city2code(cityName):
return CITY_CODE[CITY_NAME.index(cityName)]
def code2city(cityCode):
return CITY_NAME[CITY_CODE.index(cityCode)]
from net import NetUtils
import re
class CityCode(object):
__url = r'https://kyfw.12306.cn/otn/resources/js/framework/station_name.js?station_version=1.9110'
def parse(self):
content = NetUtils.EasyHttp.get(CityCode.__url, 10)
return re.findall(r'([\u4e00-\u9fa5]+)\|([A-Z]+)', content)
def save(fileName, data):
with open(fileName, 'a') as file:
file.write(data)
if __name__ == '__main__':
cityCode = CityCode()
cityCodeContent = cityCode.parse()
for cityName, cityCode in cityCodeContent:
# print('"' + cityName + '",')
# CityCode.save('CityName.txt', cityName)
if cityCode == 'YXE' or cityCode == 'BXP' or cityCode == 'GGQ' or cityCode == 'CSQ':
print(cityName,'"' + cityCode + '",')
| 10.843004 | 102 | 0.261522 |
148f4e6a449fc65d325a9f306eb64f74fa4f17c2 | 943 | py | Python | setup.py | NeoDrags/yash | 189adf1dcb8baf06d183914dca9e928a93d08db3 | [
"MIT"
] | 4 | 2021-07-25T13:58:21.000Z | 2021-10-19T08:57:49.000Z | setup.py | NeoDrags/yash | 189adf1dcb8baf06d183914dca9e928a93d08db3 | [
"MIT"
] | 1 | 2021-05-24T12:03:35.000Z | 2021-05-24T12:03:35.000Z | setup.py | NeoDrags/yash | 189adf1dcb8baf06d183914dca9e928a93d08db3 | [
"MIT"
] | null | null | null | from os import name
from setuptools import setup, find_packages
import pathlib
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
setup(
name = "neosh",
packages = ["neosh", "neosh.neosh", "neosh.themes"],
version = "0.0.2",
description = "Yet Another SHell written in python",
long_description = README,
long_description_content_type = "text/markdown",
url="https://github.com/neodrags/yash-shell",
author="Prateek Kesavarapu",
author_email="kesavarapu.prateek@gmail.com",
license="MIT",
include_package_data = True,
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
],
install_requires=["prompt-toolkit", "termcolor", "pygments"],
entry_points = {
"console_scripts": [
"neosh = neosh.neosh.shell:shell"
]
}
)
| 29.46875 | 65 | 0.639449 |
21aa0fdb8a35aafb73f4db1b6683022853976a5e | 1,443 | py | Python | T2/src/main.py | pedromsfernandes/VCOM1920 | c50874c32e1e470bd30bed5b732737ac55ef40a5 | [
"MIT"
] | null | null | null | T2/src/main.py | pedromsfernandes/VCOM1920 | c50874c32e1e470bd30bed5b732737ac55ef40a5 | [
"MIT"
] | null | null | null | T2/src/main.py | pedromsfernandes/VCOM1920 | c50874c32e1e470bd30bed5b732737ac55ef40a5 | [
"MIT"
] | 1 | 2021-03-04T01:29:57.000Z | 2021-03-04T01:29:57.000Z | import keras.backend.tensorflow_backend as tfback
import tensorflow as tf
import sys
import os
from task1 import task1_CNN, task1_BOVW
from task2 import task2
from task3 import task3
def _get_available_gpus():
"""Get a list of available gpu devices (formatted as strings).
# Returns
A list of available GPU devices.
"""
#global _LOCAL_DEVICES
if tfback._LOCAL_DEVICES is None:
devices = tf.config.list_logical_devices()
tfback._LOCAL_DEVICES = [x.name for x in devices]
return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]
# Comment lines 24 and 25 to use the CPU OR comment line 26 to use the GPU
tfback._get_available_gpus = _get_available_gpus
# physical_devices = tf.config.experimental.list_physical_devices('GPU')
# config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
def main():
if (len(sys.argv) != 2 and len(sys.argv) != 3) or (sys.argv[1] != '1' and sys.argv[1] != '2' and sys.argv[1] != '3'):
print("Usage: python " + sys.argv[0] + " <TASK>")
print("Where TASK is one of 1, 2 or 3.")
return
task = sys.argv[1]
if task == '1':
strategy = sys.argv[2]
if strategy == 'bovw':
task1_BOVW()
else:
task1_CNN()
elif task == '2':
task2()
else:
task3()
if __name__ == "__main__":
main()
| 28.294118 | 121 | 0.644491 |
c293adbe6ba0a7121b5c62747c0b5538b898700a | 2,549 | py | Python | lib/sqlalchemy/dialects/mssql/zxjdbc.py | Slashbunny/maraschino | 941a0f82a352e9c178e701d5156711b613f7f6db | [
"MIT"
] | 137 | 2015-01-12T19:29:04.000Z | 2022-02-25T04:51:02.000Z | lib/sqlalchemy/dialects/mssql/zxjdbc.py | Slashbunny/maraschino | 941a0f82a352e9c178e701d5156711b613f7f6db | [
"MIT"
] | 24 | 2015-01-06T08:36:13.000Z | 2019-04-08T13:59:05.000Z | lib/sqlalchemy/dialects/mssql/zxjdbc.py | Slashbunny/maraschino | 941a0f82a352e9c178e701d5156711b613f7f6db | [
"MIT"
] | 57 | 2015-01-01T00:42:44.000Z | 2022-03-10T20:54:41.000Z | # mssql/zxjdbc.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the Microsoft SQL Server database via the zxjdbc JDBC
connector.
JDBC Driver
-----------
Requires the jTDS driver, available from: http://jtds.sourceforge.net/
Connecting
----------
URLs are of the standard form of
``mssql+zxjdbc://user:pass@host:port/dbname[?key=value&key=value...]``.
Additional arguments which may be specified either as query string
arguments on the URL, or as keyword arguments to
:func:`~sqlalchemy.create_engine()` will be passed as Connection
properties to the underlying JDBC driver.
"""
from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector
from sqlalchemy.dialects.mssql.base import MSDialect, MSExecutionContext
from sqlalchemy.engine import base
class MSExecutionContext_zxjdbc(MSExecutionContext):
_embedded_scope_identity = False
def pre_exec(self):
super(MSExecutionContext_zxjdbc, self).pre_exec()
# scope_identity after the fact returns null in jTDS so we must
# embed it
if self._select_lastrowid and self.dialect.use_scope_identity:
self._embedded_scope_identity = True
self.statement += "; SELECT scope_identity()"
def post_exec(self):
if self._embedded_scope_identity:
while True:
try:
row = self.cursor.fetchall()[0]
break
except self.dialect.dbapi.Error, e:
self.cursor.nextset()
self._lastrowid = int(row[0])
if (self.isinsert or self.isupdate or self.isdelete) and \
self.compiled.returning:
self._result_proxy = base.FullyBufferedResultProxy(self)
if self._enable_identity_insert:
table = self.dialect.identifier_preparer.format_table(
self.compiled.statement.table)
self.cursor.execute("SET IDENTITY_INSERT %s OFF" % table)
class MSDialect_zxjdbc(ZxJDBCConnector, MSDialect):
jdbc_db_name = 'jtds:sqlserver'
jdbc_driver_name = 'net.sourceforge.jtds.jdbc.Driver'
execution_ctx_cls = MSExecutionContext_zxjdbc
def _get_server_version_info(self, connection):
return tuple(
int(x)
for x in connection.connection.dbversion.split('.')
)
dialect = MSDialect_zxjdbc
| 33.539474 | 84 | 0.675167 |
979ea6202598a555cee892771c4e0b5dc79574e2 | 22,676 | py | Python | sdk/sql/azure-mgmt-sql/azure/mgmt/sql/operations/_server_azure_ad_administrators_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/sql/azure-mgmt-sql/azure/mgmt/sql/operations/_server_azure_ad_administrators_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/sql/azure-mgmt-sql/azure/mgmt/sql/operations/_server_azure_ad_administrators_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ServerAzureADAdministratorsOperations(object):
"""ServerAzureADAdministratorsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.sql.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
server_name, # type: str
administrator_name, # type: Union[str, "_models.AdministratorName"]
**kwargs # type: Any
):
# type: (...) -> "_models.ServerAzureADAdministrator"
"""Gets a Azure Active Directory administrator.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param administrator_name: The name of server active directory administrator.
:type administrator_name: str or ~azure.mgmt.sql.models.AdministratorName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServerAzureADAdministrator, or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.ServerAzureADAdministrator
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServerAzureADAdministrator"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'administratorName': self._serialize.url("administrator_name", administrator_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServerAzureADAdministrator', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/administrators/{administratorName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
server_name, # type: str
administrator_name, # type: Union[str, "_models.AdministratorName"]
parameters, # type: "_models.ServerAzureADAdministrator"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ServerAzureADAdministrator"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ServerAzureADAdministrator"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'administratorName': self._serialize.url("administrator_name", administrator_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ServerAzureADAdministrator')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ServerAzureADAdministrator', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ServerAzureADAdministrator', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/administrators/{administratorName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
server_name, # type: str
administrator_name, # type: Union[str, "_models.AdministratorName"]
parameters, # type: "_models.ServerAzureADAdministrator"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ServerAzureADAdministrator"]
"""Creates or updates an existing Azure Active Directory administrator.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param administrator_name: The name of server active directory administrator.
:type administrator_name: str or ~azure.mgmt.sql.models.AdministratorName
:param parameters: The requested Azure Active Directory administrator Resource state.
:type parameters: ~azure.mgmt.sql.models.ServerAzureADAdministrator
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ServerAzureADAdministrator or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.sql.models.ServerAzureADAdministrator]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServerAzureADAdministrator"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
server_name=server_name,
administrator_name=administrator_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ServerAzureADAdministrator', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'administratorName': self._serialize.url("administrator_name", administrator_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/administrators/{administratorName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
server_name, # type: str
administrator_name, # type: Union[str, "_models.AdministratorName"]
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01-preview"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'administratorName': self._serialize.url("administrator_name", administrator_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/administrators/{administratorName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
server_name, # type: str
administrator_name, # type: Union[str, "_models.AdministratorName"]
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the Azure Active Directory administrator with the given name.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param administrator_name: The name of server active directory administrator.
:type administrator_name: str or ~azure.mgmt.sql.models.AdministratorName
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
server_name=server_name,
administrator_name=administrator_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'administratorName': self._serialize.url("administrator_name", administrator_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/administrators/{administratorName}'} # type: ignore
def list_by_server(
self,
resource_group_name, # type: str
server_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AdministratorListResult"]
"""Gets a list of Azure Active Directory administrators in a server.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AdministratorListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.sql.models.AdministratorListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AdministratorListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_server.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AdministratorListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_server.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/administrators'} # type: ignore
| 51.072072 | 214 | 0.671062 |
bdcda2b61c398ec291118a658b3a6c1d2dcc8891 | 2,064 | py | Python | src/watchpoints/watch_print.py | KikeM/watchpoints | d101035efef45bab7670e1a05a141c74c34c1f9e | [
"Apache-2.0"
] | null | null | null | src/watchpoints/watch_print.py | KikeM/watchpoints | d101035efef45bab7670e1a05a141c74c34c1f9e | [
"Apache-2.0"
] | null | null | null | src/watchpoints/watch_print.py | KikeM/watchpoints | d101035efef45bab7670e1a05a141c74c34c1f9e | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/gaogaotiantian/watchpoints/blob/master/NOTICE.txt
import sys
import pprint
import threading
class WatchPrint:
def __init__(self, file=sys.stderr, stack_limit=None):
self.file = file
self.stack_limit = stack_limit
def __call__(self, frame, elem, exec_info):
p = self.printer
p("====== Watchpoints Triggered ======")
if threading.active_count() > 1:
curr_thread = threading.current_thread()
p(f"---- {curr_thread.name} ----")
p("Call Stack (most recent call last):")
curr_frame = frame.f_back
frame_counter = 0
trace_back_data = []
while curr_frame and (self.stack_limit is None or frame_counter < self.stack_limit - 1):
trace_back_data.append(self._frame_string(curr_frame))
curr_frame = curr_frame.f_back
frame_counter += 1
for s in trace_back_data[::-1]:
p(s)
p(self._file_string(exec_info))
if elem.alias:
p(f"{elem.alias}:")
elif elem.default_alias:
p(f"{elem.default_alias}:")
p(elem.prev_obj)
p("->")
p(elem.obj)
p("")
def _file_string(self, exec_info):
return f" {exec_info[0]} ({exec_info[1]}:{exec_info[2]}):\n" + \
self.getsourceline(exec_info)
def _frame_string(self, frame):
return self._file_string((frame.f_code.co_name, frame.f_code.co_filename, frame.f_lineno))
def getsourceline(self, exec_info):
try:
with open(exec_info[1], encoding="utf-8") as f:
lines = f.readlines()
return f"> {lines[exec_info[2] - 1].strip()}"
except (FileNotFoundError, PermissionError):
return "unable to locate the source"
def printer(self, obj):
if type(obj) is str:
print(obj, file=self.file)
else:
pprint.pprint(obj, stream=self.file)
| 32.25 | 98 | 0.593508 |
d115e139038683711ae498195b5cbddebf51d4d8 | 1,840 | py | Python | tests/command_line/test_stereographic_projections.py | dials-src/dials | 25055c1f6164dc33e672e7c5c6a9c5a35e870660 | [
"BSD-3-Clause"
] | null | null | null | tests/command_line/test_stereographic_projections.py | dials-src/dials | 25055c1f6164dc33e672e7c5c6a9c5a35e870660 | [
"BSD-3-Clause"
] | null | null | null | tests/command_line/test_stereographic_projections.py | dials-src/dials | 25055c1f6164dc33e672e7c5c6a9c5a35e870660 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import annotations
import json
import os
from pathlib import Path
import procrunner
from dials.command_line import stereographic_projection
def test_stereographic_projection(dials_data, tmp_path):
result = procrunner.run(
(
"dials.stereographic_projection",
dials_data("centroid_test_data", pathlib=True) / "experiments.json",
"hkl_limit=4",
"plot.filename=proj.png",
"json.filename=proj.json",
),
working_directory=tmp_path,
)
assert not result.returncode and not result.stderr
assert tmp_path.joinpath("projections.txt").is_file()
assert tmp_path.joinpath("proj.png").is_file()
assert tmp_path.joinpath("proj.json").is_file()
d = json.loads(tmp_path.joinpath("proj.json").read_text())
assert set(d) == {"data", "layout"}
assert d["data"][0]["name"] == "stereographic_projections"
assert len(d["data"][0]["x"]) == 289
def test_labels(dials_data, tmp_path):
output_file = tmp_path / "proj.json"
experiments = sorted(
dials_data("multi_crystal_proteinase_k", pathlib=True).glob("experiments*.json")
)
args = [str(e) for e in experiments] + [
f"plot.labels={' '.join(str(i) for i in range(len(experiments)))}",
f"json.filename={output_file}",
"hkl=1,0,0",
]
cwd = Path.cwd()
try:
os.chdir(tmp_path)
stereographic_projection.run(args)
finally:
os.chdir(cwd)
d = json.loads(output_file.read_bytes())
assert d["data"][0]["hoverinfo"] == "text"
assert d["data"][0]["hovertext"] == [
"0",
"0",
"1",
"1",
"2",
"2",
"3",
"3",
"4",
"4",
"5",
"5",
"6",
"6",
"7",
"7",
]
| 25.915493 | 88 | 0.57337 |
f83458d1cc6ce0ec3cf05f6b66cc3b6ce0071af6 | 1,946 | py | Python | awx/ui/conf.py | alexander-bauer/awx | d1319b739406dad988f97c41cb92093f180ba822 | [
"Apache-2.0"
] | 1 | 2018-02-25T17:56:18.000Z | 2018-02-25T17:56:18.000Z | awx/ui/conf.py | alexander-bauer/awx | d1319b739406dad988f97c41cb92093f180ba822 | [
"Apache-2.0"
] | null | null | null | awx/ui/conf.py | alexander-bauer/awx | d1319b739406dad988f97c41cb92093f180ba822 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2016 Ansible, Inc.
# All Rights Reserved.
# Django
from django.utils.translation import ugettext_lazy as _
# Tower
from awx.conf import register, fields
from awx.ui.fields import * # noqa
register(
'PENDO_TRACKING_STATE',
field_class=PendoTrackingStateField,
choices=[
('off', _('Off')),
('anonymous', _('Anonymous')),
('detailed', _('Detailed')),
],
label=_('Analytics Tracking State'),
help_text=_('Enable or Disable Analytics Tracking.'),
category=_('UI'),
category_slug='ui',
)
register(
'CUSTOM_LOGIN_INFO',
field_class=fields.CharField,
allow_blank=True,
default='',
label=_('Custom Login Info'),
help_text=_('If needed, you can add specific information (such as a legal '
'notice or a disclaimer) to a text box in the login modal using '
'this setting. Any content added must be in plain text, as '
'custom HTML or other markup languages are not supported.'),
category=_('UI'),
category_slug='ui',
feature_required='rebranding',
)
register(
'CUSTOM_LOGO',
field_class=CustomLogoField,
allow_blank=True,
default='',
label=_('Custom Logo'),
help_text=_('To set up a custom logo, provide a file that you create. For '
'the custom logo to look its best, use a .png file with a '
'transparent background. GIF, PNG and JPEG formats are supported.'),
placeholder='data:image/gif;base64,R0lGODlhAQABAIABAP///wAAACwAAAAAAQABAAACAkQBADs=',
category=_('UI'),
category_slug='ui',
feature_required='rebranding',
)
register(
'MAX_UI_JOB_EVENTS',
field_class=fields.IntegerField,
min_value=100,
label=_('Max Job Events Retrieved by UI'),
help_text=_('Maximum number of job events for the UI to retrieve within a '
'single request.'),
category=_('UI'),
category_slug='ui',
)
| 29.484848 | 89 | 0.649024 |
89956d6d83688671daf07fc4654ccb8c7253e830 | 1,459 | py | Python | notebooks/boston_housing_hmc.py | janosh/thermo | 0202a47ec8abacfd49b065ddd13ad060b0b9a1a3 | [
"MIT"
] | 9 | 2019-10-08T20:47:30.000Z | 2021-11-20T07:51:25.000Z | notebooks/boston_housing_hmc.py | janosh/thermo | 0202a47ec8abacfd49b065ddd13ad060b0b9a1a3 | [
"MIT"
] | 4 | 2021-12-10T12:42:20.000Z | 2022-03-01T21:24:18.000Z | notebooks/boston_housing_hmc.py | janosh/thermo | 0202a47ec8abacfd49b065ddd13ad060b0b9a1a3 | [
"MIT"
] | 3 | 2019-10-28T21:50:24.000Z | 2021-11-10T18:41:20.000Z | """
This notebook essentially runs an end-to-end test comparing RF vs MAP NN vs HMC
NN performance on the simple Boston housing dataset.
"""
# %%
import tensorflow as tf
import tensorflow_probability as tfp
from thermo.bnn.hmc import hmc_predict
from thermo.bnn.map import map_predict
from thermo.plots import plot_output
from thermo.rf import rf_predict
# %%
# About the data: https://kaggle.com/c/boston-housing
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.boston_housing.load_data()
X_train, y_train = X_train.astype("float32"), y_train.astype("float32")
X_test, y_test = X_test.astype("float32"), y_test.astype("float32")
# %%
rf_y_pred, rf_y_std, rf_model = rf_predict(X_train, y_train, X_test)
# %%
plot_output(y_test, rf_y_pred, rf_y_std, title="RF")
# %%
print(f"RF MAE: {abs(rf_y_pred - y_test).mean():.3f}")
# %%
weight_prior = tfp.distributions.Normal(0, 0.2)
bias_prior = tfp.distributions.Normal(0, 0.2)
map_y_pred, map_y_var, map_log_probs, map_final_state = map_predict(
weight_prior, bias_prior, X_train, y_train, X_test, y_test
)
# %%
print(f"RF MAE: {abs(map_y_pred - y_test).mean():.3f}")
# %%
plot_output(y_test, map_y_pred, map_y_var ** 0.5)
# %%
hmc_y_pred, hmc_y_var, _ = hmc_predict(
weight_prior, bias_prior, map_final_state, X_train, y_train, X_test, y_test
)
# %%
print(f"RF MAE: {abs(hmc_y_pred - y_test).mean():.3f}")
# %%
plot_output(y_test, hmc_y_pred, hmc_y_var ** 0.5, title="HMC")
| 22.446154 | 83 | 0.722413 |
6707c043dbf346da6bf6fc253bf44c0a9c57ec70 | 5,209 | py | Python | backend/tests/unit/client/schemas/test_transaction.py | sdediego/blockchain | 786246c3fc4d80a827d5a6f54e2257b836cc1cf3 | [
"MIT"
] | null | null | null | backend/tests/unit/client/schemas/test_transaction.py | sdediego/blockchain | 786246c3fc4d80a827d5a6f54e2257b836cc1cf3 | [
"MIT"
] | null | null | null | backend/tests/unit/client/schemas/test_transaction.py | sdediego/blockchain | 786246c3fc4d80a827d5a6f54e2257b836cc1cf3 | [
"MIT"
] | null | null | null | # encoding: utf-8
import json
import random
import uuid
from pydantic import ValidationError
from src.client.models.utils import get_utcnow_timestamp
from src.client.models.wallet import Wallet
from src.client.schemas.transaction import TransactionSchema
from tests.unit.client.utilities import ClientMixin
class TransactionSchemaTest(ClientMixin):
def setUp(self):
super(TransactionSchemaTest, self).setUp()
self.output = self._generate_output()
self.input = self._generate_input()
def _generate_address(self):
return Wallet.generate_address()
def _generate_uuid(self):
return uuid.uuid4().int
def _generate_output(self):
output = {}
output[self.recipient] = self.amount
output[self.wallet.address] = self.wallet.balance - self.amount
return output
def _generate_input(self):
input = {}
input['timestamp'] = get_utcnow_timestamp()
input['amount'] = self.wallet.balance
input['address'] = self.wallet.address
input['public_key'] = self.wallet.public_key
input['signature'] = self.wallet.sign(self.output)
return input
def test_transactionschema_valid_uuid_output_input(self):
valid_arguments = {
'uuid': self._generate_uuid(),
'output': self.output,
'input': self.input
}
transactionschema = TransactionSchema(**valid_arguments)
self.assertIsInstance(transactionschema, TransactionSchema)
self.assertIsInstance(transactionschema.uuid, int)
self.assertIsInstance(transactionschema.output, dict)
self.assertIsInstance(transactionschema.input, dict)
def test_transactionschema_valid_sender_recipient_amount(self):
valid_arguments = {
'sender': self.wallet,
'recipient': self._generate_address(),
'amount': self.amount
}
transactionschema = TransactionSchema(**valid_arguments)
self.assertIsInstance(transactionschema, TransactionSchema)
self.assertIsInstance(transactionschema.sender, Wallet)
self.assertIsInstance(transactionschema.recipient, str)
self.assertIsInstance(transactionschema.amount, float)
def test_transactionschema_invalid_arguments_set(self):
valid_arguments = [{
'uuid': self._generate_uuid(),
'output': self.output,
'input': self.input
},{
'sender': self.wallet,
'recipient': self._generate_address(),
'amount': self.amount
}]
arguments = random.choice(valid_arguments)
key = random.choice(list(arguments.keys()))
arguments.pop(key)
with self.assertRaises(ValidationError) as err:
TransactionSchema(**arguments)
errors = json.loads(err.json())
self.assertEqual(len(errors), 1)
self.assertTrue(errors[0].get('type') == 'value_error')
def test_transactionschema_invalid_uuid_output_input_types(self):
invalid_arguments_types = {
'uuid': 'uuid',
'output': 'output',
'input': 'input'
}
with self.assertRaises(ValidationError) as err:
TransactionSchema(**invalid_arguments_types)
errors = json.loads(err.json())
self.assertEqual(len(errors), len(invalid_arguments_types.keys()))
self.assertTrue(all([error.get('type') in ['type_error', 'value_error'] for error in errors]))
def test_transactionschema_invalid_sender_recipient_amount_types(self):
invalid_arguments_types = {
'sender': 's3nd3r',
'recipient': 100,
'amount': 'am0un7'
}
with self.assertRaises(ValidationError) as err:
TransactionSchema(**invalid_arguments_types)
errors = json.loads(err.json())
self.assertEqual(len(errors), len(invalid_arguments_types.keys()))
self.assertTrue(all([error.get('type') in ['type_error', 'value_error'] for error in errors]))
def test_transactionschema_invalid_uuid_output_input_values(self):
invalid_arguments_values = {
'uuid': -10,
'output': {},
'input': {}
}
with self.assertRaises(ValidationError) as err:
TransactionSchema(**invalid_arguments_values)
errors = json.loads(err.json())
self.assertEqual(len(errors), len(invalid_arguments_values.keys()))
self.assertTrue(all([error.get('type') == 'value_error' for error in errors]))
def test_transactionschema_invalid_sender_recipient_amount_values(self):
invalid_arguments_values = {
'sender': self.wallet,
'recipient': 'r3c1p13n7',
'amount': self.wallet.balance - self._generate_float()
}
with self.assertRaises(ValidationError) as err:
TransactionSchema(**invalid_arguments_values)
errors = json.loads(err.json())
self.assertEqual(len(errors), len(invalid_arguments_values.keys()))
self.assertTrue(all([error.get('type') == 'value_error' for error in errors]))
| 39.165414 | 106 | 0.645997 |
a8473e56e01831113f3cc5bd2604adaa6442e259 | 2,450 | py | Python | alipay/aop/api/domain/AlipayEcoEduCampusJobPublishModel.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 32 | 2018-05-24T08:40:15.000Z | 2019-04-04T20:54:55.000Z | alipay/aop/api/domain/AlipayEcoEduCampusJobPublishModel.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 7 | 2018-05-24T08:42:59.000Z | 2020-09-06T23:18:46.000Z | alipay/aop/api/domain/AlipayEcoEduCampusJobPublishModel.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 13 | 2018-04-25T11:27:58.000Z | 2021-03-15T12:22:21.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayEcoEduCampusJobPublishModel(object):
def __init__(self):
self._gmt_expired = None
self._gmt_refresh = None
self._source_code = None
self._source_id = None
@property
def gmt_expired(self):
return self._gmt_expired
@gmt_expired.setter
def gmt_expired(self, value):
self._gmt_expired = value
@property
def gmt_refresh(self):
return self._gmt_refresh
@gmt_refresh.setter
def gmt_refresh(self, value):
self._gmt_refresh = value
@property
def source_code(self):
return self._source_code
@source_code.setter
def source_code(self, value):
self._source_code = value
@property
def source_id(self):
return self._source_id
@source_id.setter
def source_id(self, value):
self._source_id = value
def to_alipay_dict(self):
params = dict()
if self.gmt_expired:
if hasattr(self.gmt_expired, 'to_alipay_dict'):
params['gmt_expired'] = self.gmt_expired.to_alipay_dict()
else:
params['gmt_expired'] = self.gmt_expired
if self.gmt_refresh:
if hasattr(self.gmt_refresh, 'to_alipay_dict'):
params['gmt_refresh'] = self.gmt_refresh.to_alipay_dict()
else:
params['gmt_refresh'] = self.gmt_refresh
if self.source_code:
if hasattr(self.source_code, 'to_alipay_dict'):
params['source_code'] = self.source_code.to_alipay_dict()
else:
params['source_code'] = self.source_code
if self.source_id:
if hasattr(self.source_id, 'to_alipay_dict'):
params['source_id'] = self.source_id.to_alipay_dict()
else:
params['source_id'] = self.source_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEcoEduCampusJobPublishModel()
if 'gmt_expired' in d:
o.gmt_expired = d['gmt_expired']
if 'gmt_refresh' in d:
o.gmt_refresh = d['gmt_refresh']
if 'source_code' in d:
o.source_code = d['source_code']
if 'source_id' in d:
o.source_id = d['source_id']
return o
| 28.488372 | 73 | 0.597551 |
0878b1422bcfaa568440194feb90c7363c24bd1b | 312 | py | Python | scratch/picnic.py | kaiiam/biosys-analytics | dd2d176c7006274b5abc5ba1c070720c328d4fce | [
"MIT"
] | null | null | null | scratch/picnic.py | kaiiam/biosys-analytics | dd2d176c7006274b5abc5ba1c070720c328d4fce | [
"MIT"
] | null | null | null | scratch/picnic.py | kaiiam/biosys-analytics | dd2d176c7006274b5abc5ba1c070720c328d4fce | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""docstring"""
import os
import sys
items = sys.argv[1:]
def main():
items = []
while(True):
item = input('What are you bringing? ["quit" to quit] ')
items.append(item)
if item == 'quit':
break
print('You are bringing {}'.format(items))
main()
| 15.6 | 62 | 0.567308 |
59d3c3e2363556500052529ae5f75040caa51a5c | 27,496 | py | Python | google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py | sararob/python-aiplatform | e64cd5588848a4dcd9117ff905e9569576541b69 | [
"Apache-2.0"
] | null | null | null | google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py | sararob/python-aiplatform | e64cd5588848a4dcd9117ff905e9569576541b69 | [
"Apache-2.0"
] | null | null | null | google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py | sararob/python-aiplatform | e64cd5588848a4dcd9117ff905e9569576541b69 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import operations_v1
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.aiplatform_v1beta1.types import specialist_pool
from google.cloud.aiplatform_v1beta1.types import specialist_pool_service
from google.cloud.location import locations_pb2 # type: ignore
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.longrunning import operations_pb2
from google.longrunning import operations_pb2 # type: ignore
from .base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO
class SpecialistPoolServiceGrpcTransport(SpecialistPoolServiceTransport):
"""gRPC backend transport for SpecialistPoolService.
A service for creating and managing Customer SpecialistPools.
When customers start Data Labeling jobs, they can reuse/create
Specialist Pools to bring their own Specialists to label the
data. Customers can add/remove Managers for the Specialist Pool
on Cloud console, then Managers will get email notifications to
manage Specialists and tasks on CrowdCompute console.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service."""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def create_specialist_pool(
self,
) -> Callable[
[specialist_pool_service.CreateSpecialistPoolRequest], operations_pb2.Operation
]:
r"""Return a callable for the create specialist pool method over gRPC.
Creates a SpecialistPool.
Returns:
Callable[[~.CreateSpecialistPoolRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_specialist_pool" not in self._stubs:
self._stubs["create_specialist_pool"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.SpecialistPoolService/CreateSpecialistPool",
request_serializer=specialist_pool_service.CreateSpecialistPoolRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_specialist_pool"]
@property
def get_specialist_pool(
self,
) -> Callable[
[specialist_pool_service.GetSpecialistPoolRequest],
specialist_pool.SpecialistPool,
]:
r"""Return a callable for the get specialist pool method over gRPC.
Gets a SpecialistPool.
Returns:
Callable[[~.GetSpecialistPoolRequest],
~.SpecialistPool]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_specialist_pool" not in self._stubs:
self._stubs["get_specialist_pool"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.SpecialistPoolService/GetSpecialistPool",
request_serializer=specialist_pool_service.GetSpecialistPoolRequest.serialize,
response_deserializer=specialist_pool.SpecialistPool.deserialize,
)
return self._stubs["get_specialist_pool"]
@property
def list_specialist_pools(
self,
) -> Callable[
[specialist_pool_service.ListSpecialistPoolsRequest],
specialist_pool_service.ListSpecialistPoolsResponse,
]:
r"""Return a callable for the list specialist pools method over gRPC.
Lists SpecialistPools in a Location.
Returns:
Callable[[~.ListSpecialistPoolsRequest],
~.ListSpecialistPoolsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_specialist_pools" not in self._stubs:
self._stubs["list_specialist_pools"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.SpecialistPoolService/ListSpecialistPools",
request_serializer=specialist_pool_service.ListSpecialistPoolsRequest.serialize,
response_deserializer=specialist_pool_service.ListSpecialistPoolsResponse.deserialize,
)
return self._stubs["list_specialist_pools"]
@property
def delete_specialist_pool(
self,
) -> Callable[
[specialist_pool_service.DeleteSpecialistPoolRequest], operations_pb2.Operation
]:
r"""Return a callable for the delete specialist pool method over gRPC.
Deletes a SpecialistPool as well as all Specialists
in the pool.
Returns:
Callable[[~.DeleteSpecialistPoolRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_specialist_pool" not in self._stubs:
self._stubs["delete_specialist_pool"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.SpecialistPoolService/DeleteSpecialistPool",
request_serializer=specialist_pool_service.DeleteSpecialistPoolRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_specialist_pool"]
@property
def update_specialist_pool(
self,
) -> Callable[
[specialist_pool_service.UpdateSpecialistPoolRequest], operations_pb2.Operation
]:
r"""Return a callable for the update specialist pool method over gRPC.
Updates a SpecialistPool.
Returns:
Callable[[~.UpdateSpecialistPoolRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_specialist_pool" not in self._stubs:
self._stubs["update_specialist_pool"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.SpecialistPoolService/UpdateSpecialistPool",
request_serializer=specialist_pool_service.UpdateSpecialistPoolRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_specialist_pool"]
def close(self):
self.grpc_channel.close()
@property
def delete_operation(
self,
) -> Callable[[operations_pb2.DeleteOperationRequest], None]:
r"""Return a callable for the delete_operation method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_operation" not in self._stubs:
self._stubs["delete_operation"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/DeleteOperation",
request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString,
response_deserializer=None,
)
return self._stubs["delete_operation"]
@property
def cancel_operation(
self,
) -> Callable[[operations_pb2.CancelOperationRequest], None]:
r"""Return a callable for the cancel_operation method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "cancel_operation" not in self._stubs:
self._stubs["cancel_operation"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/CancelOperation",
request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,
response_deserializer=None,
)
return self._stubs["cancel_operation"]
@property
def wait_operation(
self,
) -> Callable[[operations_pb2.WaitOperationRequest], None]:
r"""Return a callable for the wait_operation method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_operation" not in self._stubs:
self._stubs["wait_operation"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/WaitOperation",
request_serializer=operations_pb2.WaitOperationRequest.SerializeToString,
response_deserializer=None,
)
return self._stubs["wait_operation"]
@property
def get_operation(
self,
) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]:
r"""Return a callable for the get_operation method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_operation" not in self._stubs:
self._stubs["get_operation"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/GetOperation",
request_serializer=operations_pb2.GetOperationRequest.SerializeToString,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["get_operation"]
@property
def list_operations(
self,
) -> Callable[
[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse
]:
r"""Return a callable for the list_operations method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_operations" not in self._stubs:
self._stubs["list_operations"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/ListOperations",
request_serializer=operations_pb2.ListOperationsRequest.SerializeToString,
response_deserializer=operations_pb2.ListOperationsResponse.FromString,
)
return self._stubs["list_operations"]
@property
def list_locations(
self,
) -> Callable[
[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse
]:
r"""Return a callable for the list locations method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_locations" not in self._stubs:
self._stubs["list_locations"] = self.grpc_channel.unary_unary(
"/google.cloud.location.Locations/ListLocations",
request_serializer=locations_pb2.ListLocationsRequest.SerializeToString,
response_deserializer=locations_pb2.ListLocationsResponse.FromString,
)
return self._stubs["list_locations"]
@property
def get_location(
self,
) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]:
r"""Return a callable for the list locations method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_location" not in self._stubs:
self._stubs["get_location"] = self.grpc_channel.unary_unary(
"/google.cloud.location.Locations/GetLocation",
request_serializer=locations_pb2.GetLocationRequest.SerializeToString,
response_deserializer=locations_pb2.Location.FromString,
)
return self._stubs["get_location"]
@property
def set_iam_policy(
self,
) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]:
r"""Return a callable for the set iam policy method over gRPC.
Sets the IAM access control policy on the specified
function. Replaces any existing policy.
Returns:
Callable[[~.SetIamPolicyRequest],
~.Policy]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "set_iam_policy" not in self._stubs:
self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary(
"/google.iam.v1.IAMPolicy/SetIamPolicy",
request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["set_iam_policy"]
@property
def get_iam_policy(
self,
) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]:
r"""Return a callable for the get iam policy method over gRPC.
Gets the IAM access control policy for a function.
Returns an empty policy if the function exists and does
not have a policy set.
Returns:
Callable[[~.GetIamPolicyRequest],
~.Policy]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_iam_policy" not in self._stubs:
self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary(
"/google.iam.v1.IAMPolicy/GetIamPolicy",
request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["get_iam_policy"]
@property
def test_iam_permissions(
self,
) -> Callable[
[iam_policy_pb2.TestIamPermissionsRequest],
iam_policy_pb2.TestIamPermissionsResponse,
]:
r"""Return a callable for the test iam permissions method over gRPC.
Tests the specified permissions against the IAM access control
policy for a function. If the function does not exist, this will
return an empty set of permissions, not a NOT_FOUND error.
Returns:
Callable[[~.TestIamPermissionsRequest],
~.TestIamPermissionsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "test_iam_permissions" not in self._stubs:
self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary(
"/google.iam.v1.IAMPolicy/TestIamPermissions",
request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,
)
return self._stubs["test_iam_permissions"]
@property
def kind(self) -> str:
return "grpc"
__all__ = ("SpecialistPoolServiceGrpcTransport",)
| 44.928105 | 102 | 0.650822 |
061ffe83487304ef52d05637d4aaffaa35b94777 | 2,883 | py | Python | neodroid/messaging/networking_utils.py | cnHeider/neo | 30c03bb142bbe25f6d7b61f22f66747076f08aa6 | [
"Apache-2.0"
] | null | null | null | neodroid/messaging/networking_utils.py | cnHeider/neo | 30c03bb142bbe25f6d7b61f22f66747076f08aa6 | [
"Apache-2.0"
] | null | null | null | neodroid/messaging/networking_utils.py | cnHeider/neo | 30c03bb142bbe25f6d7b61f22f66747076f08aa6 | [
"Apache-2.0"
] | 1 | 2018-09-27T14:31:20.000Z | 2018-09-27T14:31:20.000Z | from threading import Thread
import zmq
from .FlatBufferModels import FlatBufferState as FlatBufferState
from .FlatBufferUtilities import build_flat_reaction, create_state
_connected = False
_waiting_for_response = False
_ctx = zmq.Context.instance()
_req_socket = _ctx.socket(zmq.REQ)
_use_inter_process_communication = False
_time_out = 2000 # Milliseconds
def send_reaction(reaction):
global _waiting_for_response, _connected
if _connected and not _waiting_for_response:
flat_reaction = build_flat_reaction(reaction)
_req_socket.send(flat_reaction)
_waiting_for_response = True
def receive_state(timeout_callback,
on_step_done_callback=None):
global _waiting_for_response
if _waiting_for_response:
if _req_socket.poll(timeout=_time_out) is 0:
timeout_callback()
return
by = _req_socket.recv()
_waiting_for_response = False
flat_buffer_state = FlatBufferState.GetRootAsFlatBufferState(by, 0)
state = create_state(flat_buffer_state)
if on_step_done_callback:
on_step_done_callback(state)
else:
return state
def setup_connection(tcp_address, tcp_port, on_connected_callback=None):
global _connected, _req_socket
_req_socket = _ctx.socket(zmq.REQ)
if _use_inter_process_communication:
_req_socket.connect("ipc:///tmp/neodroid/messages0")
# _req_socket.connect("inproc://neodroid")
print('using inter-process communication protocol')
else:
_req_socket.connect("tcp://%s:%s" % (tcp_address, tcp_port))
print('using tcp communication protocol')
_connected = True
if on_connected_callback:
on_connected_callback()
def close_connection(on_disconnect_callback=None):
global _connected, _req_socket
_req_socket.setsockopt(zmq.LINGER, 0)
_req_socket.close()
_connected = False
if on_disconnect_callback:
on_disconnect_callback()
def start_setup_connection_thread(on_connected_callback,
tcp_ip_address='127.0.0.1',
tcp_port=5555):
thread = Thread(
target=setup_connection,
args=(tcp_ip_address, tcp_port, on_connected_callback))
thread.daemon = True
# Terminate with the rest of the program
# is a Background Thread
thread.start()
def start_send_reaction_thread(reaction, on_reaction_sent_callback):
thread = Thread(target=send_reaction,
args=(action,
on_reaction_sent_callback))
# Terminate with the rest of the program
thread.daemon = True # is a Background Thread
thread.start()
def start_receive_state_thread(on_step_done_callback, timeout_callback):
thread = Thread(target=receive_state,
args=(timeout_callback,
on_step_done_callback))
# Terminate with the rest of the program
thread.daemon = True # is a Background Thread
thread.start()
| 31 | 72 | 0.732917 |
d25e843c9c76c09d8c307489e5a5968cc1eea021 | 3,442 | py | Python | iDict/iDict/iDict.py | gaufung/CodeBase | 0292b06cfe002b3ad0299e43bb51192816a02c74 | [
"MIT"
] | 1 | 2018-10-06T23:50:53.000Z | 2018-10-06T23:50:53.000Z | iDict/iDict.py | gaufung/iDict | 9ff1d1dcb473d8a28bbea61a4fad207f76ab7408 | [
"MIT"
] | null | null | null | iDict/iDict.py | gaufung/iDict | 9ff1d1dcb473d8a28bbea61a4fad207f76ab7408 | [
"MIT"
] | 1 | 2018-10-06T23:50:50.000Z | 2018-10-06T23:50:50.000Z | """
iDict is a command line tool for look up word
by local database or from bing dict
"""
import sys
import argparse
import os
import logging
from termcolor import colored
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from iDict.parser import BingParser, DbParser
from iDict.word import Base, Word
from iDict.config import config
def _display_word(word):
title = colored(word.name, color='blue')
print(title)
print(colored('Definition: ', color='yellow'))
for explain in word.explains:
print(colored("*", color='red'), colored(explain.content, color='green'))
print(colored('Sentences:', color='yellow'))
for sentence in word.sentences:
print(colored("*", color='red'), colored(sentence.content, color='green'))
def display_words(words):
for idx, word in enumerate(words):
if word.priority >= 3:
word_display = colored(word.name, color='blue')
elif 1 < word.priority < 3:
word_display = colored(word.name, color='green')
else:
word_display = colored(word.name, color='yellow')
print(colored(str(idx+1), color='red'), word_display)
logging.basicConfig(level=logging.DEBUG,
filename='dict.log',
format='%(message)s')
def main():
parser = argparse.ArgumentParser(description="iDict")
parser.add_argument('-w', dest='word',
help='the word which you want to look up')
parser.add_argument('-p', '--priority', dest='priority',
action='store', help='set word priority')
parser.add_argument('-s', action='store_true', default=False,
help='show words by priority')
parser.add_argument('--delete', action='store', dest='delete_word')
args = parser.parse_args(sys.argv[1:])
con = config['production']
engine = create_engine(con.DATABASE_URL)
session = sessionmaker()
session.configure(bind=engine)
session = session()
if not os.path.exists(os.path.join(con.DEFAULT_PATH, con.URL)):
logging.info('Create the database')
Base.metadata.create_all(engine)
priority = int(args.priority) if args.priority else 1
if args.word:
try:
parser = DbParser(session, successor=BingParser(session,
DbParser(session, priority=priority), priority),
priority=priority)
word = parser.parse(args.word)
_display_word(word)
except Exception as err:
logging.error(err)
print(colored('Cannot search this word', color='red'))
elif args.s:
display_words(session.query(Word).order_by(Word.priority.desc()))
elif args.delete_word:
word = session.query(Word).filter(Word.name == args.delete_word).first()
if word:
try:
session.delete(word)
session.commit()
print(colored('!', color="yellow"), colored('Word: %s has been deleted' % word.name, color='blue'))
except Exception as err:
print(colored('Delete fail', color='yellow'))
logging.error(err)
session.rollback()
finally:
session.close()
else:
print(colored('No such word in database', color='yellow'))
else:
pass
| 37.010753 | 115 | 0.604881 |
94fe841efd8d577e06a83f7475866ec1676bfe0c | 1,615 | py | Python | setup.py | leandro-ro/aea-ledger-ethereum-tud | 55ff0405da1f6ee47b9b6f527099ff31e71de330 | [
"Apache-2.0"
] | null | null | null | setup.py | leandro-ro/aea-ledger-ethereum-tud | 55ff0405da1f6ee47b9b6f527099ff31e71de330 | [
"Apache-2.0"
] | null | null | null | setup.py | leandro-ro/aea-ledger-ethereum-tud | 55ff0405da1f6ee47b9b6f527099ff31e71de330 | [
"Apache-2.0"
] | 1 | 2022-01-31T07:45:26.000Z | 2022-01-31T07:45:26.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2020 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Setup script for "aea_ledger_ethereum" package."""
import aea_ledger_ethereum_tud
from setuptools import find_packages, setup
setup(
name="aea-ledger-ethereum-tud",
version="0.0.1",
author="Leandro Rometsch (Wallet Code)",
license="Apache-2.0",
description="Python package wrapping a custom hot/cold wallet (tudwallet).",
packages=find_packages(include=["aea_ledger_ethereum_tud*"]),
install_requires=[
"aea>=1.0.0, <2.0.0",
"eth-account==0.5.2",
"eth-utils==1.10.0",
"jpype1==1.3.0",
],
tests_require=["pytest"],
entry_points={
"aea.cryptos": ["tudwallet = aea_ledger_ethereum_tud:EthereumTudWallet"],
},
package_data={'': ['aea_ledger_ethereum_tud/tudwallet/libs/*']},
include_package_data=True,
)
| 35.888889 | 81 | 0.617957 |
18d4b9a6965764c2118379c4871f5d34a847a259 | 178 | py | Python | tests/migrations/test_migrations_squashed_extra/0001_squashed_0002.py | iMerica/dj-models | fbe4a55ac362f9355a2298f58aa0deb0b6082e19 | [
"BSD-3-Clause"
] | 5 | 2019-02-15T16:47:50.000Z | 2021-12-26T18:52:23.000Z | tests/migrations/test_migrations_squashed_extra/0001_squashed_0002.py | iMerica/dj-models | fbe4a55ac362f9355a2298f58aa0deb0b6082e19 | [
"BSD-3-Clause"
] | null | null | null | tests/migrations/test_migrations_squashed_extra/0001_squashed_0002.py | iMerica/dj-models | fbe4a55ac362f9355a2298f58aa0deb0b6082e19 | [
"BSD-3-Clause"
] | 2 | 2021-08-09T02:29:09.000Z | 2021-08-20T03:30:11.000Z | from djmodels.db import migrations
class Migration(migrations.Migration):
replaces = [
("migrations", "0001_initial"),
("migrations", "0002_second"),
]
| 19.777778 | 39 | 0.640449 |
8b7a2ffd3f6df47cfad55d1ed4f3684d6597de80 | 9,652 | py | Python | src/game.py | dphennessy/BrassLands | 085c2ea1732f669eb7198c33aa6776a37a729749 | [
"MIT"
] | 2 | 2019-02-11T18:11:11.000Z | 2019-02-26T11:25:00.000Z | src/game.py | dphennessy/BrassLands | 085c2ea1732f669eb7198c33aa6776a37a729749 | [
"MIT"
] | 9 | 2019-02-11T07:39:55.000Z | 2019-03-28T19:32:45.000Z | src/game.py | dphennessy/BrassLands | 085c2ea1732f669eb7198c33aa6776a37a729749 | [
"MIT"
] | 7 | 2019-02-15T13:17:48.000Z | 2019-02-26T13:16:27.000Z | # libraries
import pygame as pg
import json
from config import Config
# Class meant to be inherited by anything that should display a tooltip when hovered over
class Hover:
def __init__(self, name, description, icon=Config['resources']['ui']['icons']['default']):
# Fetches resources needed
font = pg.font.Font(Config['resources']['ui']['fonts']['tooltip'], 30)
self.icon = pg.image.load(icon)
self.surf = pg.image.load(Config['resources']['ui']['hover']) # main surface
self.rect = self.surf.get_rect()
# Sets the name and description of tooltip
name = "<Item Name>" if name is None else name
description = "<Item Description>" if description is None else description
# Generates surfaces for the name and description
self.name_surf = font.render(name, True, (235, 235, 235))
self.desc_surf = font.render(description, True, (235, 235, 235))
# Blits the icon, name, and description onto the main surface
self.surf.blit(self.icon, (13, 13))
self.surf.blit(self.name_surf, (65, 15))
self.surf.blit(self.desc_surf, (13, 55))
# Blits the tooltip onto a surface at the desired position
def show_tooltip(self, surface, pos):
self.rect.bottomright = pos
surface.blit(self.surf, self.rect)
# Class meant to be inherited by items
class Item(Hover):
def __init__(self, j_string):
self.name, self.description = None, None
self.parse_json(j_string)
super().__init__(self.name, self.description) # Initializes the tooltip for this item
# Takes in a json string and attempts to get the name and description of the item
# Classes inheriting Item can call this to set the name and description
# and then should implement their own parse_json method
def parse_json(self, j_string):
obj = json.loads(j_string)
try:
self.name = obj['name']
self.description = obj['description']
except KeyError as e:
print(f"Incorrect json format, unknown attribute: \"{e.args[0]}\"")
class Inventory:
def __init__(self):
w, h = 7, 3 # Number of rows and columns in the visible inventory
self.capacity = w * h # Max capacity of inventory
self.visible = False
self.surface = pg.image.load(Config['resources']['ui']['inventory']) # main surface
self.rect = self.surface.get_rect()
self.rect.bottomright = (Config['game']['width'], Config['game']['height']) # Positions the inventory
x, y = 20, 0
# Creates a Rect for each slot in the visible inventory
self.inv_rects = [pg.Rect((x + i * 42, y + 290 + j * 42), (40, 40)) for j in range(h) for i in range(w)]
self.items = [] # List to hold the item objects currently in inventory
def get_items(self):
return self.items
# Adds an item to the inventory as long as it doesn't exceed the maximum capacity of the inventory
def add(self, item):
if isinstance(item, Item) and len(self.items) < self.capacity:
self.items.append(item)
self.update()
return True
return False
# Removes the specified item from the inventory
def remove(self, item):
self.surface = pg.image.load(
Config['resources']['ui']['inventory']) # reloads the image to remove previously draw icons
self.items.remove(item)
self.update()
# Method called to redraw the inventory onto the main surface
def update(self):
for i, item in enumerate(self.items):
self.surface.blit(item.icon, self.inv_rects[i])
def event_handler(self, event):
if event.type == pg.KEYDOWN:
if event.key == pg.K_i:
self.visible = not self.visible
def draw(self, surface):
if self.visible:
surface.blit(self.surface, self.rect)
mouse_pos = pg.mouse.get_pos()
rel_pos = tuple( # relative position to 0,0 coordinate of main surface
map(lambda m, r: m - r, mouse_pos, self.rect.topleft)) # Subtracts mouse_pos from rect.topleft
# Draws tooltip of item on mouseover
for i, item in enumerate(self.items):
if self.inv_rects[i].collidepoint(rel_pos):
item.show_tooltip(surface, mouse_pos)
class Ground(pg.sprite.Sprite):
def __init__(self, pos):
super().__init__()
self.image = pg.image.load(Config['resources']['sprites']['ground'])
self.rect = self.image.get_rect()
self.rect.center = pos
class Wall(Ground):
def __init__(self, pos):
super().__init__(pos)
self.image = pg.image.load(Config['resources']['sprites']['wall'])
class Player(pg.sprite.Sprite):
def __init__(self, sprite_sheet, grid):
super().__init__()
self.sheet = pg.image.load(sprite_sheet).convert_alpha()
self.sheet_cells = [] # Divisions of sprite sheet
self.image = self.get_image(1)
self.rect = self.image.get_rect()
self.curr_pos = (0, 0) # Stores current position of player
self.grid = grid # List of coordinates player can move on screen
def set_pos(self, x, y):
tile_width = Config['game']['tile_width']
max_w = Config['game']['width'] // tile_width
max_h = Config['game']['height'] // tile_width
if x >= max_w or y >= max_h or x < 0 or y < 0: # Does nothing if pos outside bounds
return
self.curr_pos = (x, y)
self.rect.center = self.grid[y][x]
def get_pos(self):
return self.curr_pos
def update(self):
pass
def move(self, direction, scenery):
d = (0, 0)
if direction == pg.K_w:
self.image = self.get_image(0)
d = (0, -1)
if direction == pg.K_s:
self.image = self.get_image(1)
d = (0, 1)
if direction == pg.K_d:
self.image = self.get_image(2)
d = (1, 0)
if direction == pg.K_a:
d = (-1, 0)
self.image = pg.transform.flip(self.get_image(2), True, False)
x, y = self.curr_pos[0] + d[0], self.curr_pos[1] + d[1]
if not scenery or not isinstance(scenery[y][x], Wall): # Will only move if destination isn't a wall
self.set_pos(x, y)
def get_image(self, cell_index): # Divides the sprite sheet and stores the divisions in self.cells
if not self.sheet_cells:
cols = 3
rows = 1
total_cells = cols * rows
rect = self.sheet.get_rect()
w = int(rect.width / cols)
h = int(rect.height / rows)
self.sheet_cells = list([(index % cols * w, int(index / cols) * h, w, h) for index in range(total_cells)])
return self.sheet.subsurface(self.sheet_cells[cell_index])
# Makes list of coordinates
def make_grid():
tile_width = Config['game']['tile_width']
x = Config['game']['width'] // tile_width
y = Config['game']['height'] // tile_width
w = tile_width // 2
return [[(w + tile_width * j, w + tile_width * i) for j in range(x)] for i in range(y)]
# Draws sprites from a file into a sprite group
# Returns list with the location of all scenery objects
def make_level(sprite_grp, grid, level):
lst = []
sprite_grp.empty() # Empties the grp to remove previous level's sprites
try:
level_file = open(level, "r")
except FileNotFoundError:
print("File not found")
return []
lines = level_file.readlines()
for i, line in enumerate(lines):
lst.append([])
for j, char in enumerate(line):
try:
if char == Config['game']['ground_char']:
lst[i].append(Ground(grid[i][j]))
elif char == Config['game']['wall_char']:
lst[i].append(Wall(grid[i][j]))
except IndexError:
print("Map is not 25x20")
sprite_grp.add(lst)
return lst
sprite_grp.add(lst)
return lst
class Game:
def __init__(self):
self.__running = True
self.__size = Config['game']['width'], Config['game']['height']
self.__display_surf = pg.display.set_mode(self.__size, pg.HWSURFACE | pg.DOUBLEBUF)
self.player_grp = pg.sprite.Group()
self.scenery_grp = pg.sprite.Group() # Group for walls, ground, etc.
self.coord_grid = make_grid()
self.scenery_grid = make_level(self.scenery_grp, self.coord_grid, Config['resources']['levels']['level1'])
self.char = Player(Config['resources']['sprites']['player'], self.coord_grid) # Initializes player at (0, 0)
self.player_grp.add(self.char)
self.inventory = Inventory()
def start(self):
self.__game_loop()
def __game_loop(self):
while self.__running:
self.__display_surf.fill((255, 255, 255))
for event in pg.event.get():
self.__on_event(event)
self.scenery_grp.draw(self.__display_surf)
self.player_grp.update() # Call the update() method on all the sprites in the group
self.player_grp.draw(self.__display_surf) # Draw the sprites in the group
self.inventory.draw(self.__display_surf)
pg.display.update()
def __on_event(self, event):
if event.type == pg.QUIT:
self.__running = False
self.inventory.event_handler(event)
if event.type == pg.KEYDOWN:
self.char.move(event.key, self.scenery_grid)
| 38.150198 | 118 | 0.604952 |
75f1de2902bcd7196f2a06a98f403cb757baef98 | 18,945 | py | Python | rpython/jit/backend/llsupport/test/test_regalloc.py | Qointum/pypy | c0ed88efbc135a75a535f4534ca1f3baf0bf39d8 | [
"Apache-2.0",
"OpenSSL"
] | 34 | 2015-07-09T04:53:27.000Z | 2021-07-19T05:22:27.000Z | idea2/pypyjs-3/deps/pypy/rpython/jit/backend/llsupport/test/test_regalloc.py | igormcoelho/neo-boa | c141b503183cab287744cd19be5dfd86d9bc8daf | [
"MIT"
] | 6 | 2015-05-30T17:20:45.000Z | 2017-06-12T14:29:23.000Z | idea2/pypyjs-3/deps/pypy/rpython/jit/backend/llsupport/test/test_regalloc.py | igormcoelho/neo-boa | c141b503183cab287744cd19be5dfd86d9bc8daf | [
"MIT"
] | 11 | 2015-09-07T14:26:08.000Z | 2020-04-10T07:20:41.000Z | import py
from rpython.jit.metainterp.history import BoxInt, ConstInt, BoxFloat, INT, FLOAT,\
BoxPtr
from rpython.jit.backend.llsupport.regalloc import FrameManager, LinkedList
from rpython.jit.backend.llsupport.regalloc import RegisterManager as BaseRegMan
def newboxes(*values):
return [BoxInt(v) for v in values]
def newrefboxes(count):
return [BoxPtr() for _ in range(count)]
def boxes_and_longevity(num):
res = []
longevity = {}
for i in range(num):
box = BoxInt(0)
res.append(box)
longevity[box] = (0, 1)
return res, longevity
class FakeReg(object):
def __init__(self, i):
self.n = i
def __repr__(self):
return 'r%d' % self.n
r0, r1, r2, r3 = [FakeReg(i) for i in range(4)]
regs = [r0, r1, r2, r3]
class RegisterManager(BaseRegMan):
all_regs = regs
def convert_to_imm(self, v):
return v
class FakeFramePos(object):
def __init__(self, pos, box_type):
self.pos = pos
self.box_type = box_type
def __repr__(self):
return 'FramePos<%d,%s>' % (self.pos, self.box_type)
def __eq__(self, other):
return self.pos == other.pos and self.box_type == other.box_type
def __ne__(self, other):
return not self == other
class TFrameManagerEqual(FrameManager):
def frame_pos(self, i, box_type):
return FakeFramePos(i, box_type)
def frame_size(self, box_type):
return 1
def get_loc_index(self, loc):
assert isinstance(loc, FakeFramePos)
return loc.pos
class TFrameManager(FrameManager):
def frame_pos(self, i, box_type):
return FakeFramePos(i, box_type)
def frame_size(self, box_type):
if box_type == FLOAT:
return 2
else:
return 1
def get_loc_index(self, loc):
assert isinstance(loc, FakeFramePos)
return loc.pos
class MockAsm(object):
def __init__(self):
self.moves = []
def regalloc_mov(self, from_loc, to_loc):
self.moves.append((from_loc, to_loc))
class TestRegalloc(object):
def test_freeing_vars(self):
b0, b1, b2 = newboxes(0, 0, 0)
longevity = {b0: (0, 1), b1: (0, 2), b2: (0, 2)}
rm = RegisterManager(longevity)
rm.next_instruction()
for b in b0, b1, b2:
rm.try_allocate_reg(b)
rm._check_invariants()
assert len(rm.free_regs) == 1
assert len(rm.reg_bindings) == 3
rm.possibly_free_vars([b0, b1, b2])
assert len(rm.free_regs) == 1
assert len(rm.reg_bindings) == 3
rm._check_invariants()
rm.next_instruction()
rm.possibly_free_vars([b0, b1, b2])
rm._check_invariants()
assert len(rm.free_regs) == 2
assert len(rm.reg_bindings) == 2
rm._check_invariants()
rm.next_instruction()
rm.possibly_free_vars([b0, b1, b2])
rm._check_invariants()
assert len(rm.free_regs) == 4
assert len(rm.reg_bindings) == 0
def test_register_exhaustion(self):
boxes, longevity = boxes_and_longevity(5)
rm = RegisterManager(longevity)
rm.next_instruction()
for b in boxes[:len(regs)]:
assert rm.try_allocate_reg(b)
assert rm.try_allocate_reg(boxes[-1]) is None
rm._check_invariants()
def test_need_lower_byte(self):
boxes, longevity = boxes_and_longevity(5)
b0, b1, b2, b3, b4 = boxes
class XRegisterManager(RegisterManager):
no_lower_byte_regs = [r2, r3]
rm = XRegisterManager(longevity)
rm.next_instruction()
loc0 = rm.try_allocate_reg(b0, need_lower_byte=True)
assert loc0 not in XRegisterManager.no_lower_byte_regs
loc = rm.try_allocate_reg(b1, need_lower_byte=True)
assert loc not in XRegisterManager.no_lower_byte_regs
loc = rm.try_allocate_reg(b2, need_lower_byte=True)
assert loc is None
loc = rm.try_allocate_reg(b0, need_lower_byte=True)
assert loc is loc0
rm._check_invariants()
def test_specific_register(self):
boxes, longevity = boxes_and_longevity(5)
rm = RegisterManager(longevity)
rm.next_instruction()
loc = rm.try_allocate_reg(boxes[0], selected_reg=r1)
assert loc is r1
loc = rm.try_allocate_reg(boxes[1], selected_reg=r1)
assert loc is None
rm._check_invariants()
loc = rm.try_allocate_reg(boxes[0], selected_reg=r1)
assert loc is r1
loc = rm.try_allocate_reg(boxes[0], selected_reg=r2)
assert loc is r2
rm._check_invariants()
def test_force_allocate_reg(self):
boxes, longevity = boxes_and_longevity(5)
b0, b1, b2, b3, b4 = boxes
fm = TFrameManager()
class XRegisterManager(RegisterManager):
no_lower_byte_regs = [r2, r3]
rm = XRegisterManager(longevity,
frame_manager=fm,
assembler=MockAsm())
rm.next_instruction()
loc = rm.force_allocate_reg(b0)
assert isinstance(loc, FakeReg)
loc = rm.force_allocate_reg(b1)
assert isinstance(loc, FakeReg)
loc = rm.force_allocate_reg(b2)
assert isinstance(loc, FakeReg)
loc = rm.force_allocate_reg(b3)
assert isinstance(loc, FakeReg)
loc = rm.force_allocate_reg(b4)
assert isinstance(loc, FakeReg)
# one of those should be now somewhere else
locs = [rm.loc(b) for b in boxes]
used_regs = [loc for loc in locs if isinstance(loc, FakeReg)]
assert len(used_regs) == len(regs)
loc = rm.force_allocate_reg(b0, need_lower_byte=True)
assert isinstance(loc, FakeReg)
assert loc not in [r2, r3]
rm._check_invariants()
def test_make_sure_var_in_reg(self):
boxes, longevity = boxes_and_longevity(5)
fm = TFrameManager()
rm = RegisterManager(longevity, frame_manager=fm,
assembler=MockAsm())
rm.next_instruction()
# allocate a stack position
b0, b1, b2, b3, b4 = boxes
sp = fm.loc(b0)
assert sp.pos == 0
loc = rm.make_sure_var_in_reg(b0)
assert isinstance(loc, FakeReg)
rm._check_invariants()
def test_force_result_in_reg_1(self):
b0, b1 = newboxes(0, 0)
longevity = {b0: (0, 1), b1: (1, 3)}
fm = TFrameManager()
asm = MockAsm()
rm = RegisterManager(longevity, frame_manager=fm, assembler=asm)
rm.next_instruction()
# first path, var is already in reg and dies
loc0 = rm.force_allocate_reg(b0)
rm._check_invariants()
rm.next_instruction()
loc = rm.force_result_in_reg(b1, b0)
assert loc is loc0
assert len(asm.moves) == 0
rm._check_invariants()
def test_force_result_in_reg_2(self):
b0, b1 = newboxes(0, 0)
longevity = {b0: (0, 2), b1: (1, 3)}
fm = TFrameManager()
asm = MockAsm()
rm = RegisterManager(longevity, frame_manager=fm, assembler=asm)
rm.next_instruction()
loc0 = rm.force_allocate_reg(b0)
rm._check_invariants()
rm.next_instruction()
loc = rm.force_result_in_reg(b1, b0)
assert loc is loc0
assert rm.loc(b0) is not loc0
assert len(asm.moves) == 1
rm._check_invariants()
def test_force_result_in_reg_3(self):
b0, b1, b2, b3, b4 = newboxes(0, 0, 0, 0, 0)
longevity = {b0: (0, 2), b1: (0, 2), b3: (0, 2), b2: (0, 2), b4: (1, 3)}
fm = TFrameManager()
asm = MockAsm()
rm = RegisterManager(longevity, frame_manager=fm, assembler=asm)
rm.next_instruction()
for b in b0, b1, b2, b3:
rm.force_allocate_reg(b)
assert not len(rm.free_regs)
rm._check_invariants()
rm.next_instruction()
rm.force_result_in_reg(b4, b0)
rm._check_invariants()
assert len(asm.moves) == 1
def test_force_result_in_reg_4(self):
b0, b1 = newboxes(0, 0)
longevity = {b0: (0, 1), b1: (0, 1)}
fm = TFrameManager()
asm = MockAsm()
rm = RegisterManager(longevity, frame_manager=fm, assembler=asm)
rm.next_instruction()
fm.loc(b0)
rm.force_result_in_reg(b1, b0)
rm._check_invariants()
loc = rm.loc(b1)
assert isinstance(loc, FakeReg)
loc = rm.loc(b0)
assert isinstance(loc, FakeFramePos)
assert len(asm.moves) == 1
def test_bogus_make_sure_var_in_reg(self):
b0, = newboxes(0)
longevity = {b0: (0, 1)}
fm = TFrameManager()
asm = MockAsm()
rm = RegisterManager(longevity, frame_manager=fm, assembler=asm)
rm.next_instruction()
# invalid call to make_sure_var_in_reg(): box unknown so far
py.test.raises(KeyError, rm.make_sure_var_in_reg, b0)
def test_return_constant(self):
asm = MockAsm()
boxes, longevity = boxes_and_longevity(5)
fm = TFrameManager()
rm = RegisterManager(longevity, assembler=asm,
frame_manager=fm)
rm.next_instruction()
loc = rm.return_constant(ConstInt(1), selected_reg=r1)
assert loc is r1
loc = rm.return_constant(ConstInt(1), selected_reg=r1)
assert loc is r1
loc = rm.return_constant(ConstInt(1))
assert isinstance(loc, ConstInt)
for box in boxes[:-1]:
rm.force_allocate_reg(box)
assert len(asm.moves) == 2 # Const(1) -> r1, twice
assert len(rm.reg_bindings) == 4
rm._check_invariants()
def test_force_result_in_reg_const(self):
boxes, longevity = boxes_and_longevity(2)
fm = TFrameManager()
asm = MockAsm()
rm = RegisterManager(longevity, frame_manager=fm,
assembler=asm)
rm.next_instruction()
c = ConstInt(0)
rm.force_result_in_reg(boxes[0], c)
rm._check_invariants()
def test_loc_of_const(self):
rm = RegisterManager({})
rm.next_instruction()
assert isinstance(rm.loc(ConstInt(1)), ConstInt)
def test_call_support(self):
class XRegisterManager(RegisterManager):
save_around_call_regs = [r1, r2]
def call_result_location(self, v):
return r1
fm = TFrameManager()
asm = MockAsm()
boxes, longevity = boxes_and_longevity(5)
rm = XRegisterManager(longevity, frame_manager=fm,
assembler=asm)
for b in boxes[:-1]:
rm.force_allocate_reg(b)
rm.before_call()
assert len(rm.reg_bindings) == 2
assert fm.get_frame_depth() == 2
assert len(asm.moves) == 2
rm._check_invariants()
rm.after_call(boxes[-1])
assert len(rm.reg_bindings) == 3
rm._check_invariants()
def test_call_support_save_all_regs(self):
class XRegisterManager(RegisterManager):
save_around_call_regs = [r1, r2]
def call_result_location(self, v):
return r1
fm = TFrameManager()
asm = MockAsm()
boxes, longevity = boxes_and_longevity(5)
rm = XRegisterManager(longevity, frame_manager=fm,
assembler=asm)
for b in boxes[:-1]:
rm.force_allocate_reg(b)
rm.before_call(save_all_regs=True)
assert len(rm.reg_bindings) == 0
assert fm.get_frame_depth() == 4
assert len(asm.moves) == 4
rm._check_invariants()
rm.after_call(boxes[-1])
assert len(rm.reg_bindings) == 1
rm._check_invariants()
def test_different_frame_width(self):
class XRegisterManager(RegisterManager):
pass
fm = TFrameManager()
b0 = BoxInt()
longevity = {b0: (0, 1)}
asm = MockAsm()
rm = RegisterManager(longevity, frame_manager=fm, assembler=asm)
f0 = BoxFloat()
longevity = {f0: (0, 1)}
xrm = XRegisterManager(longevity, frame_manager=fm, assembler=asm)
xrm.loc(f0)
rm.loc(b0)
assert fm.get_frame_depth() == 3
def test_spilling(self):
b0, b1, b2, b3, b4, b5 = newboxes(0, 1, 2, 3, 4, 5)
longevity = {b0: (0, 3), b1: (0, 3), b3: (0, 5), b2: (0, 2), b4: (1, 4), b5: (1, 3)}
fm = TFrameManager()
asm = MockAsm()
rm = RegisterManager(longevity, frame_manager=fm, assembler=asm)
rm.next_instruction()
for b in b0, b1, b2, b3:
rm.force_allocate_reg(b)
assert len(rm.free_regs) == 0
rm.next_instruction()
loc = rm.loc(b3)
spilled = rm.force_allocate_reg(b4)
assert spilled is loc
spilled2 = rm.force_allocate_reg(b5)
assert spilled2 is loc
rm._check_invariants()
def test_hint_frame_locations_1(self):
for hint_value in range(11):
b0, = newboxes(0)
fm = TFrameManager()
fm.hint_frame_pos[b0] = hint_value
blist = newboxes(*range(10))
for b1 in blist:
fm.loc(b1)
for b1 in blist:
fm.mark_as_free(b1)
assert fm.get_frame_depth() == 10
loc = fm.loc(b0)
if hint_value < 10:
expected = hint_value
else:
expected = 0
assert fm.get_loc_index(loc) == expected
assert fm.get_frame_depth() == 10
def test_linkedlist(self):
class Loc(object):
def __init__(self, pos, size, tp):
self.pos = pos
self.size = size
self.tp = tp
class FrameManager(object):
@staticmethod
def get_loc_index(item):
return item.pos
@staticmethod
def frame_pos(pos, tp):
if tp == 13:
size = 2
else:
size = 1
return Loc(pos, size, tp)
fm = FrameManager()
l = LinkedList(fm)
l.append(1, Loc(1, 1, 0))
l.append(1, Loc(4, 1, 0))
l.append(1, Loc(2, 1, 0))
l.append(1, Loc(0, 1, 0))
assert l.master_node.val == 0
assert l.master_node.next.val == 1
assert l.master_node.next.next.val == 2
assert l.master_node.next.next.next.val == 4
assert l.master_node.next.next.next.next is None
item = l.pop(1, 0)
assert item.pos == 0
item = l.pop(1, 0)
assert item.pos == 1
item = l.pop(1, 0)
assert item.pos == 2
item = l.pop(1, 0)
assert item.pos == 4
assert l.pop(1, 0) is None
l.append(1, Loc(1, 1, 0))
l.append(1, Loc(5, 1, 0))
l.append(1, Loc(2, 1, 0))
l.append(1, Loc(0, 1, 0))
item = l.pop(2, 13)
assert item.tp == 13
assert item.pos == 0
assert item.size == 2
assert l.pop(2, 0) is None # 2 and 4
l.append(1, Loc(4, 1, 0))
item = l.pop(2, 13)
assert item.pos == 4
assert item.size == 2
assert l.pop(1, 0).pos == 2
assert l.pop(1, 0) is None
l.append(2, Loc(1, 2, 0))
# this will not work because the result will be odd
assert l.pop(2, 13) is None
l.append(1, Loc(3, 1, 0))
item = l.pop(2, 13)
assert item.pos == 2
assert item.tp == 13
assert item.size == 2
def test_frame_manager_basic_equal(self):
b0, b1 = newboxes(0, 1)
fm = TFrameManagerEqual()
loc0 = fm.loc(b0)
assert fm.get_loc_index(loc0) == 0
#
assert fm.get(b1) is None
loc1 = fm.loc(b1)
assert fm.get_loc_index(loc1) == 1
assert fm.get(b1) == loc1
#
loc0b = fm.loc(b0)
assert loc0b == loc0
#
fm.loc(BoxInt())
assert fm.get_frame_depth() == 3
#
f0 = BoxFloat()
locf0 = fm.loc(f0)
assert fm.get_loc_index(locf0) == 3
assert fm.get_frame_depth() == 4
#
f1 = BoxFloat()
locf1 = fm.loc(f1)
assert fm.get_loc_index(locf1) == 4
assert fm.get_frame_depth() == 5
fm.mark_as_free(b1)
assert fm.freelist
b2 = BoxInt()
fm.loc(b2) # should be in the same spot as b1 before
assert fm.get(b1) is None
assert fm.get(b2) == loc1
fm.mark_as_free(b0)
p0 = BoxPtr()
ploc = fm.loc(p0)
assert fm.get_loc_index(ploc) == 0
assert fm.get_frame_depth() == 5
assert ploc != loc1
p1 = BoxPtr()
p1loc = fm.loc(p1)
assert fm.get_loc_index(p1loc) == 5
assert fm.get_frame_depth() == 6
fm.mark_as_free(p0)
p2 = BoxPtr()
p2loc = fm.loc(p2)
assert p2loc == ploc
assert len(fm.freelist) == 0
for box in fm.bindings.keys():
fm.mark_as_free(box)
fm.bind(BoxPtr(), FakeFramePos(3, 'r'))
assert len(fm.freelist) == 6
def test_frame_manager_basic(self):
b0, b1 = newboxes(0, 1)
fm = TFrameManager()
loc0 = fm.loc(b0)
assert fm.get_loc_index(loc0) == 0
#
assert fm.get(b1) is None
loc1 = fm.loc(b1)
assert fm.get_loc_index(loc1) == 1
assert fm.get(b1) == loc1
#
loc0b = fm.loc(b0)
assert loc0b == loc0
#
fm.loc(BoxInt())
assert fm.get_frame_depth() == 3
#
f0 = BoxFloat()
locf0 = fm.loc(f0)
# can't be odd
assert fm.get_loc_index(locf0) == 4
assert fm.get_frame_depth() == 6
#
f1 = BoxFloat()
locf1 = fm.loc(f1)
assert fm.get_loc_index(locf1) == 6
assert fm.get_frame_depth() == 8
fm.mark_as_free(b1)
assert fm.freelist
b2 = BoxInt()
fm.loc(b2) # should be in the same spot as b1 before
assert fm.get(b1) is None
assert fm.get(b2) == loc1
fm.mark_as_free(b0)
p0 = BoxPtr()
ploc = fm.loc(p0)
assert fm.get_loc_index(ploc) == 0
assert fm.get_frame_depth() == 8
assert ploc != loc1
p1 = BoxPtr()
p1loc = fm.loc(p1)
assert fm.get_loc_index(p1loc) == 3
assert fm.get_frame_depth() == 8
fm.mark_as_free(p0)
p2 = BoxPtr()
p2loc = fm.loc(p2)
assert p2loc == ploc
assert len(fm.freelist) == 0
fm.mark_as_free(b2)
f3 = BoxFloat()
fm.mark_as_free(p2)
floc = fm.loc(f3)
assert fm.get_loc_index(floc) == 0
for box in fm.bindings.keys():
fm.mark_as_free(box)
| 33.236842 | 92 | 0.566376 |
90a992b5a21d6304a3d60244d0770775e08a1d37 | 436 | py | Python | Python3/1081-Smallest-Subsequence-of-Distinct-Characters/soln-1.py | wyaadarsh/LeetCode-Solutions | 3719f5cb059eefd66b83eb8ae990652f4b7fd124 | [
"MIT"
] | 5 | 2020-07-24T17:48:59.000Z | 2020-12-21T05:56:00.000Z | Python3/1081-Smallest-Subsequence-of-Distinct-Characters/soln-1.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | null | null | null | Python3/1081-Smallest-Subsequence-of-Distinct-Characters/soln-1.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | 2 | 2020-07-24T17:49:01.000Z | 2020-08-31T19:57:35.000Z | class Solution:
def smallestSubsequence(self, text: str) -> str:
last = {ch : i for i, ch in enumerate(text)}
stack = []
ins = set()
for i, ch in enumerate(text):
if ch not in ins:
while stack and ch < stack[-1] and last[stack[-1]] > i:
ins.remove(stack.pop())
stack.append(ch)
ins.add(ch)
return ''.join(stack)
| 33.538462 | 71 | 0.479358 |
0b5cfac3ace322443006c40333851b3d3c92f890 | 2,076 | py | Python | Threading In Python/Locks/locks_doesnot_lock_any_thing.py | abdelrhman-adel-ahmed/concurrency-parallelism-Programming | 6267d5502189bb11ceade7244bcd1c7222388c51 | [
"MIT"
] | null | null | null | Threading In Python/Locks/locks_doesnot_lock_any_thing.py | abdelrhman-adel-ahmed/concurrency-parallelism-Programming | 6267d5502189bb11ceade7244bcd1c7222388c51 | [
"MIT"
] | null | null | null | Threading In Python/Locks/locks_doesnot_lock_any_thing.py | abdelrhman-adel-ahmed/concurrency-parallelism-Programming | 6267d5502189bb11ceade7244bcd1c7222388c51 | [
"MIT"
] | null | null | null |
"""
A primitive lock is in one of two states, "locked" or "unlocked". It is created in the
unlocked state. It has two basic methods, acquire() and release(). When the state is unlocked,
acquire() changes the state to locked and returns immediately. When the state is locked, acquire()
blocks until a call to release() in another thread changes it to unlocked, then the acquire()
call resets it to locked and returns. The release() method should only be called in the locked
state; it changes the state to unlocked and returns immediately. If an attempt is made to release
an unlocked lock, a RuntimeError will be raised.
note 1 :if thread call acquired and then it acquired the lock if it call acquired again before realse
it will block because the second acquired is waiting for the state to be unlocked
note 2 :acquire has defult bool args that either make it block or not and a timeout for how time
it will wait for the lock , and return true if it acquired and false if not in case its specified
to be not blocking
"""
from threading import Lock ,Thread ,enumerate,main_thread
import time
lock = Lock()
num = 1
def sumOne():
global num
s=lock.acquire()
print("sum one acquire the lock",s)
time.sleep(1) # make it sleep so the other thread go and run ,and bypass the lock
num = num + 1
try:
lock.release()
print("not realsed 1")
except:
pass
def sumTwo():
global num
s=lock.acquire(0)
print("sum two bypass acquire the lock",s)
num = num / 2
lock.release()
print('sum two relased the lock')
#it can realse it neverless its not the one that aquire it ,not like rlock which only can be released by the thread that acquire it
#so when sumone thread continue it will throw an error when it try to relase the lock
# calling the functions
Thread(target=sumOne).start()
Thread(target=sumTwo).start()
main_thread=main_thread()
for thread in enumerate():
if thread !=main_thread:
thread.join()
# displaying the value of shared resource
print(num)
| 32.952381 | 136 | 0.716281 |
60fc6ab10401878451f19075d5472b1cf8b7b6e2 | 1,358 | py | Python | test_get_types.py | mconlon17/vivo-foundation | 202f458bc72fb76c7d89240091c4fb00522cfe3f | [
"BSD-3-Clause"
] | null | null | null | test_get_types.py | mconlon17/vivo-foundation | 202f458bc72fb76c7d89240091c4fb00522cfe3f | [
"BSD-3-Clause"
] | 1 | 2015-04-04T01:38:51.000Z | 2015-04-04T01:38:51.000Z | tools/test_get_types.py | mconlon17/vivo-1.5-improvement | 44d8335eb7bbe518374a53c0e1f9f39014023ee7 | [
"BSD-3-Clause"
] | null | null | null | """
test_get_types.py -- Given a URI, get the types
Version 0.1 MC 2014-06-21
-- Initial version.
Version 0.2 MC 2014-07-25
-- Updated for Tools 2.0
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2014, University of Florida"
__license__ = "BSD 3-Clause license"
__version__ = "0.2"
from vivofoundation import get_types
from datetime import datetime
# Test cases for access and display functions
print datetime.now(), "Start"
print "\nPosition"
print get_types("http://vivo.ufl.edu/individual/n7320")
print "\nDateTime"
print get_types("http://vivo.ufl.edu/individual/n4872")
print "\nDateTimeInterval"
print get_types("http://vivo.ufl.edu/individual/n5807")
print "\nOrganization"
print get_types("http://vivo.ufl.edu/individual/n2003")
print "\nAuthorship"
print get_types("http://vivo.ufl.edu/individual/n148010391")
print "\nRole"
print get_types("http://vivo.ufl.edu/individual/n2926")
print "\nPerson"
print get_types("http://vivo.ufl.edu/individual/n3715")
print "\nNot Found"
print get_types("http://vivo.ufl.edu/notfound")
print "\nPublication Venue"
print get_types("http://vivo.ufl.edu/individual/n378789540")
print "\nPaper"
print get_types("http://vivo.ufl.edu/individual/n4703866415")
print "\nGrant"
print get_types("http://vivo.ufl.edu/individual/n5432")
print datetime.now(), "Finish"
| 23.016949 | 61 | 0.735641 |
15d7b2d27338bdade9c2a746833380bab2f07093 | 4,059 | py | Python | revoice/pyin.py | tuxzz/roca_prototype | cd2ba182b7b81c56314766f2f95ed2d9dd844dd6 | [
"Apache-2.0"
] | null | null | null | revoice/pyin.py | tuxzz/roca_prototype | cd2ba182b7b81c56314766f2f95ed2d9dd844dd6 | [
"Apache-2.0"
] | null | null | null | revoice/pyin.py | tuxzz/roca_prototype | cd2ba182b7b81c56314766f2f95ed2d9dd844dd6 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from .common import *
from . import yin
def normalized_pdf(a, b, begin, end, number):
x = np.arange(0, number, dtype = np.float64) * ((end - begin) / number)
v = np.power(x, a - 1.0) * np.power(1.0 - x, b - 1.0)
for i in range(2, len(v) + 1):
i = len(v) - i
if(v[i] < v[i + 1]):
v[i] = v[i + 1]
return v / np.sum(v)
class Processor:
def __init__(self, sr, **kwargs):
self.samprate = float(sr)
self.hopSize = kwargs.get("hopSize", roundUpToPowerOf2(self.samprate * 0.0025))
self.minFreq = kwargs.get("minFreq", 80.0)
self.maxFreq = kwargs.get("maxFreq", 1000.0)
self.maxIter = 4
self.prefilter = kwargs.get("prefilter", True)
self.valleyThreshold = kwargs.get("valleyThreshold", 1.0)
self.valleyStep = kwargs.get("valleyStep", 0.01)
self.probThreshold = kwargs.get("probThreshold", 0.02)
self.weightPrior = kwargs.get("weightPrior", 5.0)
self.bias = kwargs.get("bias", 1.0)
self.pdf = kwargs.get("pdf", normalized_pdf(1.7, 6.8, 0.0, 1.0, 128))
def extractF0(self, obsProbList):
nHop = len(obsProbList)
out = np.zeros(nHop, dtype = np.float64)
for iHop, (freqProb) in enumerate(obsProbList):
if(len(freqProb) > 0):
out[iHop] = freqProb.T[0][np.argmax(freqProb.T[1])]
return out
def __call__(self, x, removeDC = True):
nX = len(x)
nHop = getNFrame(nX, self.hopSize)
pdfSize = len(self.pdf)
if(removeDC):
x = simpleDCRemove(x)
if(self.prefilter):
x = yin.doPrefilter(x, self.maxFreq, self.samprate)
out = []
for iHop in range(nHop):
windowSize = 0
minFreq = self.minFreq
newWindowSize = max(roundUpToPowerOf2(self.samprate / minFreq * 2), self.hopSize * 2)
iIter = 0
while(newWindowSize != windowSize and iIter < self.maxIter):
windowSize = newWindowSize
frame = getFrame(x, iHop * self.hopSize, windowSize)
if(removeDC):
frame = simpleDCRemove(frame)
buff = yin.difference(frame)
buff = yin.cumulativeDifference(buff)
valleyIndexList = yin.findValleys(buff, minFreq, self.maxFreq, self.samprate, threshold = self.valleyThreshold, step = self.valleyStep)
nValley = len(valleyIndexList)
if(valleyIndexList):
possibleFreq = max(self.samprate / valleyIndexList[-1] - 20.0, self.minFreq)
newWindowSize = max(int(np.ceil(self.samprate / possibleFreq * 2)), self.hopSize * 4)
if(newWindowSize % 2 == 1):
newWindowSize += 1
iIter += 1
freqProb = np.zeros((nValley, 2), dtype = np.float64)
probTotal = 0.0
weightedProbTotal = 0.0
for iValley, valley in enumerate(valleyIndexList):
ipledIdx, ipledVal = parabolicInterpolation(buff, valley)
freq = self.samprate / ipledIdx
v0 = 1 if(iValley == 0) else min(1.0, buff[valleyIndexList[iValley - 1]] + 1e-10)
v1 = 0 if(iValley == nValley - 1) else max(0.0, buff[valleyIndexList[iValley + 1]]) + 1e-10
prob = 0.0
for i in range(int(v1 * pdfSize), int(v0 * pdfSize)):
prob += self.pdf[i] * (1.0 if(ipledVal < i / pdfSize) else 0.01)
prob = min(prob, 0.99)
prob *= self.bias
probTotal += prob
if(ipledVal < self.probThreshold):
prob *= self.weightPrior
weightedProbTotal += prob
freqProb[iValley] = freq, prob
# renormalize
if(nValley > 0 and weightedProbTotal != 0.0):
freqProb.T[1] *= probTotal / weightedProbTotal
out.append(freqProb)
return out
| 39.407767 | 151 | 0.542991 |
65d7989c3fcb68dda20a0ffb237110c07cf31248 | 20,654 | py | Python | toontown/pets/PetBrain.py | SuperM0use24/TT-CL-Edition | fdad8394f0656ae122b687d603f72afafd220c65 | [
"MIT"
] | null | null | null | toontown/pets/PetBrain.py | SuperM0use24/TT-CL-Edition | fdad8394f0656ae122b687d603f72afafd220c65 | [
"MIT"
] | 1 | 2021-06-08T17:16:48.000Z | 2021-06-08T17:16:48.000Z | toontown/pets/PetBrain.py | SuperM0use24/TT-CL-Edition | fdad8394f0656ae122b687d603f72afafd220c65 | [
"MIT"
] | 3 | 2021-06-03T05:36:36.000Z | 2021-06-22T15:07:31.000Z | from panda3d.core import *
from libtoontown import *
from direct.showbase.PythonUtil import weightedChoice, randFloat, Functor
from direct.showbase.PythonUtil import list2dict
from direct.showbase import DirectObject
from direct.distributed import DistributedObjectAI
from direct.directnotify import DirectNotifyGlobal
from direct.task import Task
from direct.fsm import FSM
from toontown.toon import DistributedToonAI
from toontown.pets import PetConstants, PetObserve, PetGoal, PetGoalMgr
from toontown.pets import PetTricks, PetLookerAI
import random, types
class PetBrain(DirectObject.DirectObject, CPetBrain):
notify = DirectNotifyGlobal.directNotify.newCategory('PetBrain')
def __init__(self, pet):
CPetBrain.__init__(self)
self.pet = pet
self.focus = None
self.started = 0
self.inMovie = 0
self.chaseNode = self.pet.getRender().attachNewNode('PetChaseNode')
self.goalMgr = PetGoalMgr.PetGoalMgr(self.pet)
self.doId2goals = {}
self.nearbyAvs = {}
self.avAwareness = {}
self.lastInteractTime = {}
self.nextAwarenessIndex = 0
if __dev__:
self.pscPrior = PStatCollector('App:Show code:petThink:UpdatePriorities')
self.pscAware = PStatCollector('App:Show code:petThink:ShuffleAwareness')
self.pscResc = PStatCollector('App:Show code:petThink:Reschedule')
return
def destroy(self):
taskMgr.remove(self.getTeleportTaskName())
if __dev__:
del self.pscPrior
del self.pscAware
del self.pscResc
self.stop()
self.goalMgr.destroy()
self.chaseNode.removeNode()
del self.chaseNode
del self.focus
del self.pet
if self.doId2goals:
self.notify.warning('destroy(): self.doId2goals is not empty: %s' % self.doId2goals.keys())
for goalList in self.doId2goals.values():
for goal in goalList:
goal.destroy()
del self.doId2goals
del self.avAwareness
def getThinkTaskName(self):
return 'petThink-%s' % self.pet.doId
def getTeleportTaskName(self):
return 'petTeleport-%s' % self.pet.doId
def getObserveEventAttendedByAvStart(self, otherDoId):
return 'petObserveAttendedByAvStart-%s-%s' % (self.pet.doId, otherDoId)
def getObserveEventAttendedByAvStop(self, otherDoId):
return 'petObserveAttendedByAvStop-%s-%s' % (self.pet.doId, otherDoId)
def getObserveEventAttendingAvStart(self, otherDoId):
return 'petObserveAttendingAvStart-%s-%s' % (self.pet.doId, otherDoId)
def getObserveEventAttendingAvStop(self, otherDoId):
return 'petObserveAttendingAvStop-%s-%s' % (self.pet.doId, otherDoId)
def start(self):
PetBrain.notify.debug('start: %s' % self.pet.doId)
self.lookers = {}
self.lookees = {}
self.accept(PetLookerAI.getStartLookedAtByOtherEvent(self.pet.doId), self._handleLookedAtByOtherStart)
self.accept(PetLookerAI.getStopLookedAtByOtherEvent(self.pet.doId), self._handleLookedAtByOtherStop)
self.accept(PetLookerAI.getStartLookingAtOtherEvent(self.pet.doId), self._handleLookingAtOtherStart)
self.accept(PetLookerAI.getStopLookingAtOtherEvent(self.pet.doId), self._handleLookingAtOtherStop)
self.globalGoals = [PetGoal.Wander()]
for goal in self.globalGoals:
self.goalMgr.addGoal(goal)
for doId in self.pet._getNearbyAvatarDict():
self._handleAvatarArrive(doId)
self.tLastLonelinessUpdate = globalClock.getFrameTime()
taskMgr.doMethodLater(simbase.petThinkPeriod * random.random(), self._think, self.getThinkTaskName())
self.started = 1
def stop(self):
PetBrain.notify.debug('stop: %s' % self.pet.doId)
if not self.started:
return
self.started = 0
del self.lookers
del self.lookees
for doId in self.pet._getNearbyAvatarDict():
self._handleAvatarLeave(doId)
for goal in self.globalGoals:
self.goalMgr.removeGoal(goal)
goal.destroy()
del self.globalGoals
self.clearFocus()
taskMgr.remove(self.getThinkTaskName())
self.ignore(PetLookerAI.getStartLookedAtByOtherEvent(self.pet.doId))
self.ignore(PetLookerAI.getStopLookedAtByOtherEvent(self.pet.doId))
self.ignore(PetLookerAI.getStartLookingAtOtherEvent(self.pet.doId))
self.ignore(PetLookerAI.getStopLookingAtOtherEvent(self.pet.doId))
def observe(self, petObserve):
if petObserve.isForgettable():
if random.random() < 0.05 * self.pet.traits.forgetfulness:
return
petObserve._influence(self)
def updateLastInteractTime(self, avId):
if avId in self.lastInteractTime:
self.lastInteractTime[avId] = globalClock.getFrameTime()
def _think(self, task = None):
if not self.inMovie:
if __dev__:
self.pscPrior.start()
self._updatePriorities()
if __dev__:
self.pscPrior.stop()
if __dev__:
self.pscAware.start()
if len(self.nearbyAvs) > PetConstants.MaxAvatarAwareness:
self.nextAwarenessIndex %= len(self.nearbyAvs)
self._considerBecomeAwareOf(self.nearbyAvs.keys()[self.nextAwarenessIndex])
self.nextAwarenessIndex += 1
if __dev__:
self.pscAware.stop()
curT = globalClock.getFrameTime()
tSinceLastLonelinessUpdate = curT - self.tLastLonelinessUpdate
if tSinceLastLonelinessUpdate >= PetConstants.LonelinessUpdatePeriod:
self.tLastLonelinessUpdate = curT
numLookers = len(self.lookers)
if numLookers:
dt = tSinceLastLonelinessUpdate
self.pet.lerpMood('loneliness', max(-1.0, dt * -.003 * numLookers))
if numLookers > 5:
self.pet.lerpMood('excitement', min(1.0, dt * 0.001 * numLookers))
if __dev__:
self.pscResc.start()
taskMgr.doMethodLater(simbase.petThinkPeriod, self._think, self.getThinkTaskName())
if __dev__:
self.pscResc.stop()
return Task.done
def _updatePriorities(self):
self.goalMgr.updatePriorities()
def _handleLookingAtOtherStart(self, avId):
if avId in self.lookees:
PetBrain.notify.warning('%s: already looking at av %s' % (self.pet.doId, avId))
return
self.lookees[avId] = avId
self.observe(PetObserve.PetActionObserve(PetObserve.Actions.ATTENDING_START, avId))
def _handleLookingAtOtherStop(self, avId):
if avId not in self.lookees:
PetBrain.notify.warning('%s: not looking at av %s' % (self.pet.doId, avId))
return
del self.lookees[avId]
self.observe(PetObserve.PetActionObserve(PetObserve.Actions.ATTENDING_STOP, avId))
def _handleLookedAtByOtherStart(self, avId):
if avId in self.lookers:
PetBrain.notify.warning('%s: av %s already looking at me' % (self.pet.doId, avId))
return
self.lookers[avId] = avId
self.observe(PetObserve.PetActionObserve(PetObserve.Actions.ATTENDED_START, avId))
def _handleLookedAtByOtherStop(self, avId):
if avId not in self.lookers:
PetBrain.notify.warning('%s: av %s not looking at me' % (self.pet.doId, avId))
return
del self.lookers[avId]
self.observe(PetObserve.PetActionObserve(PetObserve.Actions.ATTENDED_STOP, avId))
def lookedAtBy(self, avId):
return avId in self.lookers
def lookingAt(self, avId):
return avId in self.lookees
def getAvIdsLookingAtUs(self):
return self.lookers
def getAvIdsWeAreLookingAt(self):
return self.lookees
def setFocus(self, object):
if isinstance(self.focus, DistributedObjectAI.DistributedObjectAI):
self.ignore(self.focus.getDeleteEvent())
self.lastInteractTime.setdefault(self.focus.doId, 0)
PetBrain.notify.debug('setFocus: %s' % object)
self.focus = object
if isinstance(self.focus, DistributedObjectAI.DistributedObjectAI):
self.accept(self.focus.getDeleteEvent(), self._handleFocusHasLeft)
def getFocus(self):
return self.focus
def clearFocus(self):
self.setFocus(None)
return
def _handleFocusHasLeft(self):
if self.focus.isEmpty():
self.chaseNode.setPos(self.pet, 0, 0, 0)
else:
self.chaseNode.setPos(self.focus, 0, 0, 0)
self._inspectSpot(self.chaseNode)
def _chase(self, target):
if callable(target):
target = target()
if target is None:
return 0
self.setFocus(target)
self.pet.actionFSM.request('Chase', target)
return 1
def _wander(self):
self.clearFocus()
self.pet.actionFSM.request('Wander')
return 1
def _unstick(self):
self.clearFocus()
self.pet.actionFSM.request('Unstick')
return 1
def _flee(self, chaser):
if callable(chaser):
chaser = chaser()
if chaser is None:
return 0
self.setFocus(chaser)
self.pet.actionFSM.request('Flee', chaser)
return 1
def _inspectSpot(self, spot = None):
if spot is None:
spot = NodePath('randomSpot')
spot.setPos(randFloat(-20, 20), randFloat(-20, 20), 0)
self.setFocus(spot)
self.pet.actionFSM.request('InspectSpot', spot)
return 1
def _stay(self, avatar):
self.setFocus(avatar)
self.pet.actionFSM.request('Stay', avatar)
return 1
def _doTrick(self, trickId, avatar):
self.setFocus(avatar)
self.pet.actionFSM.request('Trick', avatar, trickId)
return 1
def _heal(self, avatar):
if callable(avatar):
avatar = avatar()
if avatar is None:
return 0
self.setFocus(avatar)
self.pet.actionFSM.request('Heal', avatar)
return 1
def _startMovie(self):
self.setFocus(None)
self.pet.actionFSM.request('Movie')
self.inMovie = 1
return
def _endMovie(self):
self.inMovie = 0
def _handleGenericObserve(self, observe):
pass
def _handleActionObserve(self, observe):
action = observe.getAction()
avId = observe.getAvId()
OA = PetObserve.Actions
dbg = PetBrain.notify.debug
if action == OA.ATTENDED_START:
dbg('avatar %s is looking at me' % avId)
self.pet.lerpMoods({'boredom': -.1,
'excitement': 0.05,
'loneliness': -.05})
messenger.send(self.getObserveEventAttendedByAvStart(avId))
elif action == OA.ATTENDED_STOP:
dbg('avatar %s is no longer looking at me' % avId)
messenger.send(self.getObserveEventAttendedByAvStop(avId))
elif action == OA.ATTENDING_START:
dbg('I am looking at avatar %s' % avId)
messenger.send(self.getObserveEventAttendingAvStart(avId))
elif action == OA.ATTENDING_STOP:
dbg('I am no longer looking at avatar %s' % avId)
messenger.send(self.getObserveEventAttendingAvStop(avId))
elif action == OA.CHANGE_ZONE:
if avId != self.pet.doId:
oldZoneId, newZoneId = observe.getData()
PetBrain.notify.debug('%s.CHANGE_ZONE: %s, %s->%s' % (self.pet.doId,
avId,
oldZoneId,
newZoneId))
myZoneId = self.pet.zoneId
if newZoneId != oldZoneId:
if newZoneId == myZoneId:
self._handleAvatarArrive(avId)
elif oldZoneId == myZoneId:
self._handleAvatarLeave(avId)
if self.pet.inEstate:
if avId in (self.pet.ownerId, self.pet.estateOwnerId):
if oldZoneId in self.pet.estateZones and newZoneId not in self.pet.estateZones:
if avId == self.pet.ownerId:
self._handleOwnerLeave()
else:
self._handleEstateOwnerLeave()
elif action == OA.LOGOUT:
if avId == self.pet.ownerId:
self._handleOwnerLeave()
elif avId == self.pet.estateOwnerId:
self._handleEstateOwnerLeave()
elif action == OA.FEED:
dbg('avatar %s is feeding me' % avId)
self.pet.lerpMoods({'affection': 0.35,
'anger': -.07,
'boredom': -.5,
'excitement': 0.5,
'fatigue': -.2,
'hunger': -.5,
'loneliness': -.08,
'playfulness': 0.1,
'restlessness': -.05,
'sadness': -.2})
self.updateLastInteractTime(avId)
avatar = simbase.air.doId2do.get(avId)
if avatar is not None:
avatar.setHatePets(0)
elif action == OA.SCRATCH:
dbg('avatar %s is scratching me' % avId)
self.pet.lerpMoods({'affection': 0.45,
'anger': -.1,
'boredom': -.8,
'excitement': 0.5,
'fatigue': -.25,
'loneliness': -.2,
'playfulness': 0.1,
'restlessness': -.2,
'sadness': -.2})
self.updateLastInteractTime(avId)
avatar = simbase.air.doId2do.get(avId)
if avatar is not None:
avatar.setHatePets(0)
elif action == OA.GARDEN:
dbg('avatar %s is gardening' % avId)
avatar = simbase.air.doId2do.get(avId)
if avatar is not None:
if self.getFocus() == avatar:
self._wander()
return
def _handlePhraseObserve(self, observe):
def _handleGettingFriendlyAttention(avId, self = self):
self.pet.lerpMoods({'boredom': -.85,
'restlessness': -.1,
'playfulness': 0.2,
'loneliness': -.4,
'sadness': -.1,
'fatigue': -.05,
'excitement': 0.05,
'anger': -.05})
self.updateLastInteractTime(avId)
def _handleComeHere(avId, self = self):
avatar = simbase.air.doId2do.get(avId)
if avatar:
self._chase(avatar)
avatar.setHatePets(0)
def _handleFollowMe(avId, self = self):
avatar = simbase.air.doId2do.get(avId)
if avatar:
self._chase(avatar)
avatar.setHatePets(0)
def _handleStay(avId, self = self):
avatar = simbase.air.doId2do.get(avId)
if avatar:
self._stay(avatar)
def _handleCriticism(avId, self = self):
ownerFactor = 0.5
if avId == self.pet.ownerId:
ownerFactor = 1.0
self.pet.lerpMoods({'affection': -.4,
'anger': 0.4,
'boredom': -.3,
'confusion': 0.05,
'fatigue': 0.2,
'playfulness': -.1,
'sadness': 0.5 * ownerFactor})
def _handleGoAway(avId, self = self):
avatar = simbase.air.doId2do.get(avId)
if avatar is not None:
if self.getFocus() == avatar:
self._wander()
return
def _handleDoTrick(trickId, avId, self = self):
avatar = simbase.air.doId2do.get(avId)
if avatar:
if self.lookedAtBy(avatar.doId):
if not self.goalMgr.hasTrickGoal():
if not self.pet._willDoTrick(trickId):
self.pet.trickFailLogger.addEvent(trickId)
trickId = PetTricks.Tricks.BALK
trickGoal = PetGoal.DoTrick(avatar, trickId)
self.goalMgr.addGoal(trickGoal)
phrase = observe.getPetPhrase()
avId = observe.getAvId()
OP = PetObserve.Phrases
if phrase in list2dict([OP.COME,
OP.FOLLOW_ME,
OP.STAY,
OP.NEED_LAFF,
OP.NEED_GAGS,
OP.NEED_JB,
OP.HI,
OP.SOOTHE,
OP.PRAISE,
OP.HAPPY,
OP.QUESTION,
OP.FRIENDLY,
OP.LETS_PLAY,
OP.DO_TRICK]):
_handleGettingFriendlyAttention(avId)
if phrase == OP.COME:
_handleComeHere(avId)
if phrase == OP.FOLLOW_ME:
_handleFollowMe(avId)
if phrase == OP.STAY:
_handleStay(avId)
if phrase == OP.CRITICISM:
_handleCriticism(avId)
if phrase == OP.GO_AWAY:
_handleGoAway(avId)
if phrase == OP.DO_TRICK:
_handleDoTrick(observe.getTrickId(), avId)
def _addGoalsReAvatar(self, avId):
av = self.pet.air.doId2do.get(avId)
if av is None:
PetBrain.notify.warning('%s._addGoalsReAvatar: %s not in doId2do' % (self.pet.doId, avId))
return
if avId not in self.doId2goals:
goals = [PetGoal.ChaseAvatar(av), PetGoal.FleeFromAvatar(av)]
self.doId2goals[avId] = goals
self.lastInteractTime.setdefault(avId, 0)
for goal in self.doId2goals[avId]:
self.goalMgr.addGoal(goal)
return
def _removeGoalsReAvatar(self, avId):
if avId not in self.doId2goals:
PetBrain.notify.warning('no goals re av %s to remove' % avId)
return
for goal in self.doId2goals[avId]:
self.goalMgr.removeGoal(goal)
goal.destroy()
del self.doId2goals[avId]
def _considerBecomeAwareOf(self, avId):
av = simbase.air.doId2do.get(avId)
if av is None:
PetBrain.notify.warning('_considerBecomeAwareOf: av %s does not exist' % avId)
return
if avId in self.avAwareness:
return
def becomeAwareOf(avId, self = self):
self.avAwareness[avId] = None
self._addGoalsReAvatar(avId)
return
if len(self.avAwareness) < PetConstants.MaxAvatarAwareness:
becomeAwareOf(avId)
return
def calcInterest(avId, self = self):
if avId == self.pet.ownerId:
return 100.0
return random.random()
avInterest = calcInterest(avId)
minInterest = avInterest
minInterestAvId = avId
for awAvId in self.avAwareness:
i = calcInterest(awAvId)
if i < minInterest:
minInterest = i
minInterestAvId = awAvId
break
if minInterestAvId != avId:
self._removeAwarenessOf(minInterestAvId)
becomeAwareOf(avId)
return
def _removeAwarenessOf(self, avId):
if avId in self.avAwareness:
self._removeGoalsReAvatar(avId)
del self.avAwareness[avId]
def _handleAvatarArrive(self, avId):
PetBrain.notify.debug('%s._handleAvatarArrive: %s' % (self.pet.doId, avId))
if avId in self.nearbyAvs:
PetBrain.notify.warning('%s already in self.nearbyAvs' % avId)
return
self.nearbyAvs[avId] = None
excitement = 0.3
if avId == self.pet.ownerId:
excitement = 0.7
self.pet.lerpMoods({'excitement': 0.7,
'loneliness': -.4})
self._considerBecomeAwareOf(avId)
return
def _handleAvatarLeave(self, avId):
PetBrain.notify.debug('%s._handleAvatarLeave: %s' % (self.pet.doId, avId))
if avId not in self.nearbyAvs:
PetBrain.notify.warning('av %s not in self.nearbyAvs' % avId)
return
del self.nearbyAvs[avId]
self.pet.lerpMoods({'loneliness': 0.1})
self._removeAwarenessOf(avId)
def _handleOwnerLeave(self):
self.pet.teleportOut()
taskMgr.doMethodLater(PetConstants.TELEPORT_OUT_DURATION, self.pet.requestDelete, self.getTeleportTaskName())
def _handleEstateOwnerLeave(self):
self.pet.teleportOut()
taskMgr.doMethodLater(PetConstants.TELEPORT_OUT_DURATION, self.pet.requestDelete, self.getTeleportTaskName())
| 36.685613 | 117 | 0.589135 |
6b0384d1d81262d417397561fc846315d54b1594 | 33 | py | Python | Week 3: Real numbers/3 (22).py | MLunov/Python-programming-basics-HSE | 7df8bba105db84d6b932c454fdc39193a648254e | [
"MIT"
] | null | null | null | Week 3: Real numbers/3 (22).py | MLunov/Python-programming-basics-HSE | 7df8bba105db84d6b932c454fdc39193a648254e | [
"MIT"
] | null | null | null | Week 3: Real numbers/3 (22).py | MLunov/Python-programming-basics-HSE | 7df8bba105db84d6b932c454fdc39193a648254e | [
"MIT"
] | null | null | null | print(input().replace('@', ''))
| 16.5 | 32 | 0.515152 |
6af412a24710e80b8d6d8e61ed9d54e47e2312cf | 86 | py | Python | rest_access_policy/__init__.py | tanonl/drf-access-policy | acd2ef74bd30981ffae671360c1f5edea5cd1982 | [
"MIT"
] | null | null | null | rest_access_policy/__init__.py | tanonl/drf-access-policy | acd2ef74bd30981ffae671360c1f5edea5cd1982 | [
"MIT"
] | null | null | null | rest_access_policy/__init__.py | tanonl/drf-access-policy | acd2ef74bd30981ffae671360c1f5edea5cd1982 | [
"MIT"
] | null | null | null | from .exceptions import AccessPolicyException
from .access_policy import AccessPolicy
| 28.666667 | 45 | 0.883721 |
f5afd9d590a4201c1a511d3fb66f55918b2b34fd | 847 | py | Python | generate_gt.py | Zhonghao2016/SNIPER | 33f721a36f568b7a60b93562d87c30853e4aa06b | [
"Apache-2.0"
] | 1 | 2021-02-18T16:55:54.000Z | 2021-02-18T16:55:54.000Z | generate_gt.py | Zhonghao2016/SNIPER | 33f721a36f568b7a60b93562d87c30853e4aa06b | [
"Apache-2.0"
] | null | null | null | generate_gt.py | Zhonghao2016/SNIPER | 33f721a36f568b7a60b93562d87c30853e4aa06b | [
"Apache-2.0"
] | null | null | null | import csv
import os
import pdb
'''
words = lines.split(',')
imid = words[0]
classid = self._class_to_ind_image[words[1]]
x1 = float(words[2])
y1 = float(words[3])
x2 = float(words[4])
y2 = float(words[5])
crowd = int(words[6])
height = float(words[7])
width = float(words[8])
'''
classid = '/m/01g317'
x1 = float(0.1)
y1 = float(0.1)
x2 = float(0.2)
y2 = float(0.2)
crowd = int(0)
height = float(0.1)
width = float(0.1)
f = open('./data/openimage/annotations/test_challenge_2018.csv', 'w')
csvwriter = csv.writer(f, delimiter=',')
for im_name in os.listdir('./data/openimage/images/test_challenge_2018'):
imid = im_name.split('.')[0]
csvwriter.writerow([imid, classid, x1, y1, x2, y2, crowd, height, width])
f.close()
| 28.233333 | 77 | 0.571429 |
04be678c56f7838e6a542019c6eaa0b017a49f70 | 4,922 | py | Python | setup.py | jab/pytest-flask | 41f985ffdb469a487bf1fba9f0386b7a63cddaa5 | [
"MIT"
] | null | null | null | setup.py | jab/pytest-flask | 41f985ffdb469a487bf1fba9f0386b7a63cddaa5 | [
"MIT"
] | null | null | null | setup.py | jab/pytest-flask | 41f985ffdb469a487bf1fba9f0386b7a63cddaa5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
pytest-flask
============
A set of `pytest <https://docs.pytest.org>`_ fixtures to test Flask
extensions and applications.
Features
--------
Plugin provides some fixtures to simplify app testing:
- ``client`` - an instance of ``app.test_client``,
- ``client_class`` - ``client`` fixture for class-based tests,
- ``config`` - the application config,
- ``live_server`` - runs an application in the background (useful for tests
with `Selenium <http://www.seleniumhq.org>`_ and other headless browsers),
- ``request_ctx`` - the request context,
- ``accept_json``, ``accept_jsonp``, ``accept_any`` - accept headers
suitable to use as parameters in ``client``.
To pass options to your application use the ``pytest.mark.options`` marker:
.. code:: python
@pytest.mark.options(debug=False)
def test_app(app):
assert not app.debug, 'Ensure the app not in debug mode'
During tests execution the request context has been pushed, e.g. ``url_for``,
``session`` and other context bound objects are available without context
managers:
.. code:: python
def test_app(client):
assert client.get(url_for('myview')).status_code == 200
Response object has a ``json`` property to test a view that returns
a JSON response:
.. code:: python
@api.route('/ping')
def ping():
return jsonify(ping='pong')
def test_api_ping(client):
res = client.get(url_for('api.ping'))
assert res.json == {'ping': 'pong'}
If you want your tests done via Selenium or other headless browser use
the ``live_server`` fixture. The server’s URL can be retrieved using
the ``url_for`` function:
.. code:: python
from flask import url_for
@pytest.mark.usefixtures('live_server')
class TestLiveServer:
def test_server_is_up_and_running(self):
res = urllib2.urlopen(url_for('index', _external=True))
assert b'OK' in res.read()
assert res.code == 200
Quick Start
-----------
To start using a plugin define your application fixture in ``conftest.py``:
.. code:: python
from myapp import create_app
@pytest.fixture
def app():
app = create_app()
return app
Install the extension with dependencies and run your test suite:
.. code:: bash
$ pip install pytest-flask
$ py.test
Documentation
-------------
The latest documentation is available at
http://pytest-flask.readthedocs.org/en/latest/.
Contributing
------------
Don’t hesitate to create a `GitHub issue
<https://github.com/vitalk/pytest-flask/issues>`_ for any **bug** or
**suggestion**.
"""
import os
from setuptools import find_packages
from setuptools import setup
def read(*parts):
"""Reads the content of the file located at path created from *parts*."""
try:
return open(os.path.join(*parts), "r", encoding="utf-8").read()
except OSError:
return ""
requirements = read("requirements", "main.txt").splitlines()
tests_require = []
extras_require = {
"docs": read("requirements", "docs.txt").splitlines(),
"tests": tests_require,
}
setup(
name="pytest-flask",
# Versions should comply with PEP440, and automatically obtained from tags
# thanks to setuptools_scm
use_scm_version={"write_to": "pytest_flask/_version.py"},
setup_requires=["setuptools-scm"],
author="Vital Kudzelka",
author_email="vital.kudzelka@gmail.com",
url="https://github.com/vitalk/pytest-flask",
project_urls={
"Source": "https://github.com/pytest-dev/pytest-flask",
"Tracker": "https://github.com/pytest-dev/pytest-flask/issues",
},
description="A set of py.test fixtures to test Flask applications.",
long_description=__doc__,
license="MIT",
packages=find_packages(exclude=["docs", "tests"]),
zip_safe=False,
platforms="any",
install_requires=requirements,
tests_require=tests_require,
extras_require=extras_require,
keywords="pytest flask testing",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Plugins",
"Environment :: Web Environment",
"Framework :: Pytest",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Testing",
],
python_requires=">=3.5",
# The following makes the plugin available to pytest
entry_points={
"pytest11": [
"flask = pytest_flask.plugin",
]
},
)
| 28.125714 | 78 | 0.658879 |
61e37e9ce2d63fe590b3ef4b6bc0cba88228a180 | 51,828 | py | Python | grblas/matrix.py | vishalbelsare/grblas | d181ac4cb495d1bc806253137f53a42d65693b5e | [
"Apache-2.0"
] | null | null | null | grblas/matrix.py | vishalbelsare/grblas | d181ac4cb495d1bc806253137f53a42d65693b5e | [
"Apache-2.0"
] | null | null | null | grblas/matrix.py | vishalbelsare/grblas | d181ac4cb495d1bc806253137f53a42d65693b5e | [
"Apache-2.0"
] | null | null | null | import itertools
import warnings
import numpy as np
from . import _automethods, backend, binary, ffi, lib, monoid, semiring, utils
from ._ss.matrix import ss
from .base import BaseExpression, BaseType, call
from .dtypes import _INDEX, lookup_dtype, unify
from .exceptions import NoValue, check_status
from .expr import AmbiguousAssignOrExtract, IndexerResolver, Updater
from .mask import StructuralMask, ValueMask
from .operator import get_typed_op
from .scalar import Scalar, ScalarExpression, _CScalar
from .utils import (
_CArray,
_Pointer,
class_property,
ints_to_numpy_buffer,
output_type,
values_to_numpy_buffer,
wrapdoc,
)
from .vector import Vector, VectorExpression, _VectorAsMatrix
ffi_new = ffi.new
class Matrix(BaseType):
"""
GraphBLAS Sparse Matrix
High-level wrapper around GrB_Matrix type
"""
__slots__ = "_nrows", "_ncols", "ss"
_is_transposed = False
_name_counter = itertools.count()
def __init__(self, gb_obj, dtype, *, name=None):
if name is None:
name = f"M_{next(Matrix._name_counter)}"
self._nrows = None
self._ncols = None
super().__init__(gb_obj, dtype, name)
# Add ss extension methods
self.ss = ss(self)
def __del__(self):
gb_obj = getattr(self, "gb_obj", None)
if gb_obj is not None:
# it's difficult/dangerous to record the call, b/c `self.name` may not exist
check_status(lib.GrB_Matrix_free(gb_obj), self)
def __repr__(self, mask=None):
from .formatting import format_matrix
from .recorder import skip_record
with skip_record:
return format_matrix(self, mask=mask)
def _repr_html_(self, mask=None, collapse=False):
from .formatting import format_matrix_html
from .recorder import skip_record
with skip_record:
return format_matrix_html(self, mask=mask, collapse=collapse)
def __reduce__(self):
# SS, SuiteSparse-specific: export
pieces = self.ss.export(raw=True)
return self._deserialize, (pieces, self.name)
@staticmethod
def _deserialize(pieces, name):
# SS, SuiteSparse-specific: import
return Matrix.ss.import_any(name=name, **pieces)
@property
def S(self):
return StructuralMask(self)
@property
def V(self):
return ValueMask(self)
def __delitem__(self, keys):
del Updater(self)[keys]
def __getitem__(self, keys):
resolved_indexes = IndexerResolver(self, keys)
return AmbiguousAssignOrExtract(self, resolved_indexes)
def __setitem__(self, keys, delayed):
Updater(self)[keys] = delayed
def __contains__(self, index):
extractor = self[index]
if not extractor.resolved_indexes.is_single_element:
raise TypeError(
f"Invalid index to Matrix contains: {index!r}. A 2-tuple of ints is expected. "
"Doing `(i, j) in my_matrix` checks whether a value is present at that index."
)
scalar = extractor.new(name="s_contains")
return not scalar.is_empty
def __iter__(self):
rows, columns, values = self.to_values()
return zip(rows.flat, columns.flat)
def isequal(self, other, *, check_dtype=False):
"""
Check for exact equality (same size, same empty values)
If `check_dtype` is True, also checks that dtypes match
For equality of floating point Vectors, consider using `isclose`
"""
other = self._expect_type(
other, (Matrix, TransposedMatrix), within="isequal", argname="other"
)
if check_dtype and self.dtype != other.dtype:
return False
if self._nrows != other._nrows:
return False
if self._ncols != other._ncols:
return False
if self._nvals != other._nvals:
return False
if check_dtype:
common_dtype = self.dtype
else:
common_dtype = unify(self.dtype, other.dtype)
matches = Matrix.new(bool, self._nrows, self._ncols, name="M_isequal")
matches << self.ewise_mult(other, binary.eq[common_dtype])
# ewise_mult performs intersection, so nvals will indicate mismatched empty values
if matches._nvals != self._nvals:
return False
# Check if all results are True
return matches.reduce_scalar(monoid.land).new().value
def isclose(self, other, *, rel_tol=1e-7, abs_tol=0.0, check_dtype=False):
"""
Check for approximate equality (including same size and empty values)
If `check_dtype` is True, also checks that dtypes match
Closeness check is equivalent to `abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)`
"""
other = self._expect_type(
other, (Matrix, TransposedMatrix), within="isclose", argname="other"
)
if check_dtype and self.dtype != other.dtype:
return False
if self._nrows != other._nrows:
return False
if self._ncols != other._ncols:
return False
if self._nvals != other._nvals:
return False
matches = self.ewise_mult(other, binary.isclose(rel_tol, abs_tol)).new(
bool, name="M_isclose"
)
# ewise_mult performs intersection, so nvals will indicate mismatched empty values
if matches._nvals != self._nvals:
return False
# Check if all results are True
return matches.reduce_scalar(monoid.land).new().value
@property
def nrows(self):
n = ffi_new("GrB_Index*")
scalar = Scalar(n, _INDEX, name="s_nrows", empty=True)
call("GrB_Matrix_nrows", [_Pointer(scalar), self])
return n[0]
@property
def ncols(self):
n = ffi_new("GrB_Index*")
scalar = Scalar(n, _INDEX, name="s_ncols", empty=True)
call("GrB_Matrix_ncols", [_Pointer(scalar), self])
return n[0]
@property
def shape(self):
return (self._nrows, self._ncols)
@property
def nvals(self):
n = ffi_new("GrB_Index*")
scalar = Scalar(n, _INDEX, name="s_nvals", empty=True)
call("GrB_Matrix_nvals", [_Pointer(scalar), self])
return n[0]
@property
def _nvals(self):
"""Like nvals, but doesn't record calls"""
n = ffi_new("GrB_Index*")
check_status(lib.GrB_Matrix_nvals(n, self.gb_obj[0]), self)
return n[0]
@property
def T(self):
return TransposedMatrix(self)
def clear(self):
call("GrB_Matrix_clear", [self])
def resize(self, nrows, ncols):
nrows = _CScalar(nrows)
ncols = _CScalar(ncols)
call("GrB_Matrix_resize", [self, nrows, ncols])
self._nrows = nrows.scalar.value
self._ncols = ncols.scalar.value
def to_values(self, dtype=None):
"""
GrB_Matrix_extractTuples
Extract the rows, columns and values as a 3-tuple of numpy arrays
"""
nvals = self._nvals
rows = _CArray(size=nvals, name="&rows_array")
columns = _CArray(size=nvals, name="&columns_array")
values = _CArray(size=nvals, dtype=self.dtype, name="&values_array")
n = ffi_new("GrB_Index*")
scalar = Scalar(n, _INDEX, name="s_nvals", empty=True)
scalar.value = nvals
call(
f"GrB_Matrix_extractTuples_{self.dtype.name}",
[rows, columns, values, _Pointer(scalar), self],
)
values = values.array
if dtype is not None:
dtype = lookup_dtype(dtype)
if dtype != self.dtype:
values = values.astype(dtype.np_type) # copies
return (
rows.array,
columns.array,
values,
)
def build(self, rows, columns, values, *, dup_op=None, clear=False, nrows=None, ncols=None):
# TODO: accept `dtype` keyword to match the dtype of `values`?
rows = ints_to_numpy_buffer(rows, np.uint64, name="row indices")
columns = ints_to_numpy_buffer(columns, np.uint64, name="column indices")
values, dtype = values_to_numpy_buffer(values, self.dtype)
n = values.size
if rows.size != n or columns.size != n:
raise ValueError(
f"`rows` and `columns` and `values` lengths must match: "
f"{rows.size}, {columns.size}, {values.size}"
)
if clear:
self.clear()
if nrows is not None or ncols is not None:
if nrows is None:
nrows = self._nrows
if ncols is None:
ncols = self._ncols
self.resize(nrows, ncols)
if n == 0:
return
dup_op_given = dup_op is not None
if not dup_op_given:
dup_op = binary.plus
dup_op = get_typed_op(dup_op, self.dtype, kind="binary")
if dup_op.opclass == "Monoid":
dup_op = dup_op.binaryop
else:
self._expect_op(dup_op, "BinaryOp", within="build", argname="dup_op")
rows = _CArray(rows)
columns = _CArray(columns)
values = _CArray(values, self.dtype)
call(
f"GrB_Matrix_build_{self.dtype.name}",
[self, rows, columns, values, _CScalar(n), dup_op],
)
# Check for duplicates when dup_op was not provided
if not dup_op_given and self._nvals < n:
raise ValueError("Duplicate indices found, must provide `dup_op` BinaryOp")
def dup(self, dtype=None, *, mask=None, name=None):
"""
GrB_Matrix_dup
Create a new Matrix by duplicating this one
"""
if dtype is not None or mask is not None:
if dtype is None:
dtype = self.dtype
rv = Matrix.new(dtype, nrows=self._nrows, ncols=self._ncols, name=name)
rv(mask=mask)[...] = self
else:
new_mat = ffi_new("GrB_Matrix*")
rv = Matrix(new_mat, self.dtype, name=name)
call("GrB_Matrix_dup", [_Pointer(rv), self])
rv._nrows = self._nrows
rv._ncols = self._ncols
return rv
def wait(self):
"""
GrB_Matrix_wait
In non-blocking mode, the computations may be delayed and not yet safe
to use by multiple threads. Use wait to force completion of a Matrix
and make it safe to use as input parameters on multiple threads.
"""
call("GrB_Matrix_wait", [_Pointer(self)])
@classmethod
def new(cls, dtype, nrows=0, ncols=0, *, name=None):
"""
GrB_Matrix_new
Create a new empty Matrix from the given type, number of rows, and number of columns
"""
new_matrix = ffi_new("GrB_Matrix*")
dtype = lookup_dtype(dtype)
rv = cls(new_matrix, dtype, name=name)
nrows = _CScalar(nrows)
ncols = _CScalar(ncols)
call("GrB_Matrix_new", [_Pointer(rv), dtype, nrows, ncols])
rv._nrows = nrows.scalar.value
rv._ncols = ncols.scalar.value
return rv
@classmethod
def from_values(
cls,
rows,
columns,
values,
dtype=None,
*,
nrows=None,
ncols=None,
dup_op=None,
name=None,
):
"""Create a new Matrix from the given lists of row indices, column
indices, and values. If nrows or ncols are not provided, they
are computed from the max row and column index found.
values may be a scalar, in which case duplicate indices are ignored.
"""
rows = ints_to_numpy_buffer(rows, np.uint64, name="row indices")
columns = ints_to_numpy_buffer(columns, np.uint64, name="column indices")
values, dtype = values_to_numpy_buffer(values, dtype)
# Compute nrows and ncols if not provided
if nrows is None:
if rows.size == 0:
raise ValueError("No row indices provided. Unable to infer nrows.")
nrows = int(rows.max()) + 1
if ncols is None:
if columns.size == 0:
raise ValueError("No column indices provided. Unable to infer ncols.")
ncols = int(columns.max()) + 1
# Create the new matrix
C = cls.new(dtype, nrows, ncols, name=name)
if values.ndim == 0:
if dup_op is not None:
raise ValueError(
"dup_op must be None if values is a scalar so that all "
"values can be identical. Duplicate indices will be ignored."
)
# SS, SuiteSparse-specific: build_Scalar
C.ss.build_scalar(rows, columns, values.tolist())
else:
# Add the data
# This needs to be the original data to get proper error messages
C.build(rows, columns, values, dup_op=dup_op)
return C
@property
def _carg(self):
return self.gb_obj[0]
#########################################################
# Delayed methods
#
# These return a delayed expression object which must be passed
# to __setitem__ to trigger a call to GraphBLAS
#########################################################
def ewise_add(self, other, op=monoid.plus, *, require_monoid=True):
"""
GrB_Matrix_eWiseAdd
Result will contain the union of indices from both Matrices
Default op is monoid.plus.
Unless explicitly disabled, this method requires a monoid (directly or from a semiring).
The reason for this is that binary operators can create very confusing behavior when
only one of the two elements is present.
Examples:
- binary.minus where left=N/A and right=4 yields 4 rather than -4 as might be expected
- binary.gt where left=N/A and right=4 yields True
- binary.gt where left=N/A and right=0 yields False
The behavior is caused by grabbing the non-empty value and using it directly without
performing any operation. In the case of `gt`, the non-empty value is cast to a boolean.
For these reasons, users are required to be explicit when choosing this surprising behavior.
"""
method_name = "ewise_add"
other = self._expect_type(
other,
(Matrix, TransposedMatrix),
within=method_name,
argname="other",
op=op,
)
op = get_typed_op(op, self.dtype, other.dtype, kind="binary")
# Per the spec, op may be a semiring, but this is weird, so don't.
if require_monoid:
if op.opclass != "BinaryOp" or op.monoid is None:
self._expect_op(
op,
"Monoid",
within=method_name,
argname="op",
extra_message="A BinaryOp may be given if require_monoid keyword is False.",
)
else:
self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op")
expr = MatrixExpression(
method_name,
f"GrB_Matrix_eWiseAdd_{op.opclass}",
[self, other],
op=op,
at=self._is_transposed,
bt=other._is_transposed,
)
if self.shape != other.shape:
expr.new(name="") # incompatible shape; raise now
return expr
def ewise_mult(self, other, op=binary.times):
"""
GrB_Matrix_eWiseMult
Result will contain the intersection of indices from both Matrices
Default op is binary.times
"""
method_name = "ewise_mult"
other = self._expect_type(
other, (Matrix, TransposedMatrix), within=method_name, argname="other", op=op
)
op = get_typed_op(op, self.dtype, other.dtype, kind="binary")
# Per the spec, op may be a semiring, but this is weird, so don't.
self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op")
expr = MatrixExpression(
method_name,
f"GrB_Matrix_eWiseMult_{op.opclass}",
[self, other],
op=op,
at=self._is_transposed,
bt=other._is_transposed,
)
if self.shape != other.shape:
expr.new(name="") # incompatible shape; raise now
return expr
def mxv(self, other, op=semiring.plus_times):
"""
GrB_mxv
Matrix-Vector multiplication. Result is a Vector.
Default op is semiring.plus_times
"""
method_name = "mxv"
other = self._expect_type(other, Vector, within=method_name, argname="other", op=op)
op = get_typed_op(op, self.dtype, other.dtype, kind="semiring")
self._expect_op(op, "Semiring", within=method_name, argname="op")
expr = VectorExpression(
method_name,
"GrB_mxv",
[self, other],
op=op,
size=self._nrows,
at=self._is_transposed,
)
if self._ncols != other._size:
expr.new(name="") # incompatible shape; raise now
return expr
def mxm(self, other, op=semiring.plus_times):
"""
GrB_mxm
Matrix-Matrix multiplication. Result is a Matrix.
Default op is semiring.plus_times
"""
method_name = "mxm"
other = self._expect_type(
other, (Matrix, TransposedMatrix), within=method_name, argname="other", op=op
)
op = get_typed_op(op, self.dtype, other.dtype, kind="semiring")
self._expect_op(op, "Semiring", within=method_name, argname="op")
expr = MatrixExpression(
method_name,
"GrB_mxm",
[self, other],
op=op,
nrows=self._nrows,
ncols=other._ncols,
at=self._is_transposed,
bt=other._is_transposed,
)
if self._ncols != other._nrows:
expr.new(name="") # incompatible shape; raise now
return expr
def kronecker(self, other, op=binary.times):
"""
GrB_kronecker
Kronecker product or sum (depending on op used)
Default op is binary.times
"""
method_name = "kronecker"
other = self._expect_type(
other, (Matrix, TransposedMatrix), within=method_name, argname="other", op=op
)
op = get_typed_op(op, self.dtype, other.dtype, kind="binary")
# Per the spec, op may be a semiring, but this is weird, so don't.
self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op")
return MatrixExpression(
method_name,
f"GrB_Matrix_kronecker_{op.opclass}",
[self, other],
op=op,
nrows=self._nrows * other._nrows,
ncols=self._ncols * other._ncols,
at=self._is_transposed,
bt=other._is_transposed,
)
def apply(self, op, right=None, *, left=None):
"""
GrB_Matrix_apply
Apply UnaryOp to each element of the calling Matrix
A BinaryOp can also be applied if a scalar is passed in as `left` or `right`,
effectively converting a BinaryOp into a UnaryOp
"""
method_name = "apply"
extra_message = (
"apply only accepts UnaryOp with no scalars or BinaryOp with `left` or `right` scalar."
)
if left is None and right is None:
op = get_typed_op(op, self.dtype, kind="unary")
self._expect_op(
op,
"UnaryOp",
within=method_name,
argname="op",
extra_message=extra_message,
)
cfunc_name = "GrB_Matrix_apply"
args = [self]
expr_repr = None
elif right is None:
if type(left) is not Scalar:
try:
left = Scalar.from_value(left, name="")
except TypeError:
left = self._expect_type(
left,
Scalar,
within=method_name,
keyword_name="left",
extra_message="Literal scalars also accepted.",
op=op,
)
op = get_typed_op(op, left.dtype, self.dtype, is_left_scalar=True, kind="binary")
if op.opclass == "Monoid":
op = op.binaryop
else:
self._expect_op(
op,
"BinaryOp",
within=method_name,
argname="op",
extra_message=extra_message,
)
cfunc_name = f"GrB_Matrix_apply_BinaryOp1st_{left.dtype}"
args = [_CScalar(left), self]
expr_repr = "{1.name}.apply({op}, left={0})"
elif left is None:
if type(right) is not Scalar:
try:
right = Scalar.from_value(right, name="")
except TypeError:
right = self._expect_type(
right,
Scalar,
within=method_name,
keyword_name="right",
extra_message="Literal scalars also accepted.",
op=op,
)
op = get_typed_op(op, self.dtype, right.dtype, is_right_scalar=True, kind="binary")
if op.opclass == "Monoid":
op = op.binaryop
else:
self._expect_op(
op,
"BinaryOp",
within=method_name,
argname="op",
extra_message=extra_message,
)
cfunc_name = f"GrB_Matrix_apply_BinaryOp2nd_{right.dtype}"
args = [self, _CScalar(right)]
expr_repr = "{0.name}.apply({op}, right={1})"
else:
raise TypeError("Cannot provide both `left` and `right` to apply")
return MatrixExpression(
method_name,
cfunc_name,
args,
op=op,
nrows=self._nrows,
ncols=self._ncols,
expr_repr=expr_repr,
at=self._is_transposed,
bt=self._is_transposed,
)
def reduce_rowwise(self, op=monoid.plus):
"""
GrB_Matrix_reduce
Reduce all values in each row, converting the matrix to a vector
Default op is monoid.lor for boolean and monoid.plus otherwise
"""
method_name = "reduce_rowwise"
op = get_typed_op(op, self.dtype, kind="binary")
self._expect_op(op, ("BinaryOp", "Monoid", "Aggregator"), within=method_name, argname="op")
# Using a monoid may be more efficient, so change to one if possible.
# Also, SuiteSparse doesn't like user-defined binarops here.
if op.opclass == "BinaryOp" and op.monoid is not None:
op = op.monoid
return VectorExpression(
method_name,
f"GrB_Matrix_reduce_{op.opclass}",
[self],
op=op,
size=self._nrows,
at=self._is_transposed,
)
def reduce_rows(self, op=monoid.plus):
"""
GrB_Matrix_reduce
Reduce all values in each row, converting the matrix to a vector
Default op is monoid.lor for boolean and monoid.plus otherwise
**This function is deprecated. Please use ``Matrix.reduce_rowwise`` instead.
"""
warnings.warn(
"`Matrix.reduce_rows` is deprecated; please use `Matrix.reduce_rowwise` instead",
DeprecationWarning,
)
return self.reduce_rowwise(op)
def reduce_columnwise(self, op=monoid.plus):
"""
GrB_Matrix_reduce
Reduce all values in each column, converting the matrix to a vector
Default op is monoid.lor for boolean and monoid.plus otherwise
"""
method_name = "reduce_columnwise"
op = get_typed_op(op, self.dtype, kind="binary")
self._expect_op(op, ("BinaryOp", "Monoid", "Aggregator"), within=method_name, argname="op")
# Using a monoid may be more efficient, so change to one if possible.
# Also, SuiteSparse doesn't like user-defined binarops here.
if op.opclass == "BinaryOp" and op.monoid is not None:
op = op.monoid
return VectorExpression(
method_name,
f"GrB_Matrix_reduce_{op.opclass}",
[self],
op=op,
size=self._ncols,
at=not self._is_transposed,
)
def reduce_columns(self, op=monoid.plus):
"""
GrB_Matrix_reduce
Reduce all values in each column, converting the matrix to a vector
Default op is monoid.lor for boolean and monoid.plus otherwise
**This function is deprecated. Please use ``Matrix.reduce_columnwise`` instead.
"""
warnings.warn(
"`Matrix.reduce_columns` is deprecated; please use `Matrix.reduce_columnwise` instead",
DeprecationWarning,
)
return self.reduce_columnwise(op)
def reduce_scalar(self, op=monoid.plus):
"""
GrB_Matrix_reduce
Reduce all values into a scalar
Default op is monoid.lor for boolean and monoid.plus otherwise
"""
method_name = "reduce_scalar"
op = get_typed_op(op, self.dtype, kind="binary")
if op.opclass == "BinaryOp" and op.monoid is not None:
op = op.monoid
else:
self._expect_op(op, ("Monoid", "Aggregator"), within=method_name, argname="op")
if op.opclass == "Aggregator" and op.name in {
"argmin",
"argmax",
"first_index",
"last_index",
}:
raise ValueError(f"Aggregator {op.name} may not be used with Matrix.reduce_scalar.")
return ScalarExpression(
method_name,
"GrB_Matrix_reduce_{output_dtype}",
[self],
op=op, # to be determined later
)
##################################
# Extract and Assign index methods
##################################
def _extract_element(self, resolved_indexes, dtype=None, name="s_extract"):
if dtype is None:
dtype = self.dtype
else:
dtype = lookup_dtype(dtype)
rowidx, colidx = resolved_indexes.indices
if self._is_transposed:
rowidx, colidx = colidx, rowidx
result = Scalar.new(dtype, name=name)
if (
call(
f"GrB_Matrix_extractElement_{dtype}",
[_Pointer(result), self, rowidx.index, colidx.index],
)
is not NoValue
):
result._is_empty = False
return result
def _prep_for_extract(self, resolved_indexes):
method_name = "__getitem__"
(rowsize, rows, rowscalar), (colsize, cols, colscalar) = resolved_indexes.indices
if rowsize is None:
# Row-only selection; GraphBLAS doesn't have this method, so we hack it using transpose
return VectorExpression(
method_name,
"GrB_Col_extract",
[self, cols, colscalar, rows],
expr_repr="{0.name}[{3}, [{2} cols]]",
size=colsize,
dtype=self.dtype,
at=not self._is_transposed,
)
elif colsize is None:
# Column-only selection
return VectorExpression(
method_name,
"GrB_Col_extract",
[self, rows, rowscalar, cols],
expr_repr="{0.name}[[{2} rows], {3}]",
size=rowsize,
dtype=self.dtype,
at=self._is_transposed,
)
else:
return MatrixExpression(
method_name,
"GrB_Matrix_extract",
[self, rows, rowscalar, cols, colscalar],
expr_repr="{0.name}[[{2} rows], [{4} cols]]",
nrows=rowsize,
ncols=colsize,
dtype=self.dtype,
at=self._is_transposed,
)
def _assign_element(self, resolved_indexes, value):
rowidx, colidx = resolved_indexes.indices
if type(value) is not Scalar:
try:
value = Scalar.from_value(value, name="")
except TypeError:
value = self._expect_type(
value,
Scalar,
within="__setitem__",
argname="value",
extra_message="Literal scalars also accepted.",
)
# should we cast?
call(
f"GrB_Matrix_setElement_{value.dtype}",
[self, _CScalar(value), rowidx.index, colidx.index],
)
def _prep_for_assign(self, resolved_indexes, value, mask=None, is_submask=False):
method_name = "__setitem__"
(rowsize, rows, rowscalar), (colsize, cols, colscalar) = resolved_indexes.indices
extra_message = "Literal scalars also accepted."
value_type = output_type(value)
if value_type is Vector:
if type(value) is not Vector:
value = self._expect_type(
value,
Vector,
within=method_name,
)
if rowsize is None and colsize is not None:
# Row-only selection
if mask is not None and type(mask.mask) is Matrix:
if is_submask:
# C[i, J](M) << v
raise TypeError(
"Indices for subassign imply Vector submask, "
"but got Matrix mask instead"
)
else:
# C(M)[i, J] << v
# Upcast v to a Matrix and use Matrix_assign
rows = _CArray([rows.scalar.value])
rowscalar = _CScalar(1)
delayed = MatrixExpression(
method_name,
"GrB_Matrix_assign",
[_VectorAsMatrix(value), rows, rowscalar, cols, colscalar],
expr_repr="[[{2} rows], [{4} cols]] = {0.name}",
nrows=self._nrows,
ncols=self._ncols,
dtype=self.dtype,
at=True,
)
else:
if is_submask:
# C[i, J](m) << v
# SS, SuiteSparse-specific: subassign
cfunc_name = "GrB_Row_subassign"
expr_repr = "[{1}, [{3} cols]](%s) << {0.name}" % mask.name
else:
# C(m)[i, J] << v
# C[i, J] << v
cfunc_name = "GrB_Row_assign"
expr_repr = "[{1}, [{3} cols]] = {0.name}"
delayed = MatrixExpression(
method_name,
cfunc_name,
[value, rows, cols, colscalar],
expr_repr=expr_repr,
nrows=self._nrows,
ncols=self._ncols,
dtype=self.dtype,
)
elif colsize is None and rowsize is not None:
# Column-only selection
if mask is not None and type(mask.mask) is Matrix:
if is_submask:
# C[I, j](M) << v
raise TypeError(
"Indices for subassign imply Vector submask, "
"but got Matrix mask instead"
)
else:
# C(M)[I, j] << v
# Upcast v to a Matrix and use Matrix_assign
cols = _CArray([cols.scalar.value])
colscalar = _CScalar(1)
delayed = MatrixExpression(
method_name,
"GrB_Matrix_assign",
[_VectorAsMatrix(value), rows, rowscalar, cols, colscalar],
expr_repr="[[{2} rows], [{4} cols]] = {0.name}",
nrows=self._nrows,
ncols=self._ncols,
dtype=self.dtype,
)
else:
if is_submask:
# C[I, j](m) << v
# SS, SuiteSparse-specific: subassign
cfunc_name = "GrB_Col_subassign"
expr_repr = "[{1}, [{3} cols]](%s) << {0.name}" % mask.name
else:
# C(m)[I, j] << v
# C[I, j] << v
cfunc_name = "GrB_Col_assign"
expr_repr = "[{1}, [{3} cols]] = {0.name}"
delayed = MatrixExpression(
method_name,
cfunc_name,
[value, rows, rowscalar, cols],
expr_repr=expr_repr,
nrows=self._nrows,
ncols=self._ncols,
dtype=self.dtype,
)
elif colsize is None and rowsize is None:
# C[i, j] << v (mask doesn't matter)
value = self._expect_type(
value,
Scalar,
within=method_name,
extra_message=extra_message,
)
else:
# C[I, J] << v (mask doesn't matter)
value = self._expect_type(
value,
(Scalar, Matrix, TransposedMatrix),
within=method_name,
extra_message=extra_message,
)
elif value_type in {Matrix, TransposedMatrix}:
if type(value) not in {Matrix, TransposedMatrix}:
value = self._expect_type(
value,
(Matrix, TransposedMatrix),
within=method_name,
)
if rowsize is None or colsize is None:
if rowsize is None and colsize is None:
# C[i, j] << A (mask doesn't matter)
value = self._expect_type(
value,
Scalar,
within=method_name,
extra_message=extra_message,
)
else:
# C[I, j] << A
# C[i, J] << A (mask doesn't matter)
value = self._expect_type(
value,
(Scalar, Vector),
within=method_name,
extra_message=extra_message,
)
if is_submask:
# C[I, J](M) << A
# SS, SuiteSparse-specific: subassign
cfunc_name = "GrB_Matrix_subassign"
expr_repr = "[[{2} rows], [{4} cols]](%s) << {0.name}" % mask.name
else:
# C[I, J] << A
# C(M)[I, J] << A
cfunc_name = "GrB_Matrix_assign"
expr_repr = "[[{2} rows], [{4} cols]] = {0.name}"
delayed = MatrixExpression(
method_name,
cfunc_name,
[value, rows, rowscalar, cols, colscalar],
expr_repr=expr_repr,
nrows=self._nrows,
ncols=self._ncols,
dtype=self.dtype,
at=value._is_transposed,
)
else:
if type(value) is not Scalar:
try:
value = Scalar.from_value(value, name="")
except TypeError:
if rowsize is None or colsize is None:
types = (Scalar, Vector)
else:
types = (Scalar, Matrix, TransposedMatrix)
value = self._expect_type(
value,
types,
within=method_name,
argname="value",
extra_message=extra_message,
)
if mask is not None and type(mask.mask) is Vector:
if rowsize is None and colsize is not None:
if is_submask:
# C[i, J](m) << c
# SS, SuiteSparse-specific: subassign
cfunc_name = "GrB_Row_subassign"
value_vector = Vector.new(value.dtype, size=mask.mask._size, name="v_temp")
expr_repr = "[{1}, [{3} cols]](%s) << {0.name}" % mask.name
else:
# C(m)[i, J] << c
# C[i, J] << c
cfunc_name = "GrB_Row_assign"
value_vector = Vector.new(value.dtype, size=colsize, name="v_temp")
expr_repr = "[{1}, [{3} cols]] = {0.name}"
# SS, SuiteSparse-specific: assume efficient vector with single scalar
value_vector << value
# Row-only selection
delayed = MatrixExpression(
method_name,
cfunc_name,
[value_vector, rows, cols, colscalar],
expr_repr=expr_repr,
nrows=self._nrows,
ncols=self._ncols,
dtype=self.dtype,
)
elif colsize is None and rowsize is not None:
if is_submask:
# C[I, j](m) << c
# SS, SuiteSparse-specific: subassign
cfunc_name = "GrB_Col_subassign"
value_vector = Vector.new(value.dtype, size=mask.mask._size, name="v_temp")
else:
# C(m)[I, j] << c
# C[I, j] << c
cfunc_name = "GrB_Col_assign"
value_vector = Vector.new(value.dtype, size=rowsize, name="v_temp")
# SS, SuiteSparse-specific: assume efficient vector with single scalar
value_vector << value
# Column-only selection
delayed = MatrixExpression(
method_name,
cfunc_name,
[value_vector, rows, rowscalar, cols],
expr_repr="[[{2} rows], {3}] = {0.name}",
nrows=self._nrows,
ncols=self._ncols,
dtype=self.dtype,
)
elif colsize is None and rowsize is None:
# Matrix object, Vector mask, scalar index
# C(m)[i, j] << c
# C[i, j](m) << c
raise TypeError(
"Unable to use Vector mask on single element assignment to a Matrix"
)
else:
# Matrix object, Vector mask, Matrix index
# C(m)[I, J] << c
# C[I, J](m) << c
raise TypeError("Unable to use Vector mask on Matrix assignment to a Matrix")
else:
if is_submask:
if rowsize is None or colsize is None:
if rowsize is None and colsize is None:
# C[i, j](M) << c
raise TypeError("Single element assign does not accept a submask")
else:
# C[i, J](M) << c
# C[I, j](M) << c
raise TypeError(
"Indices for subassign imply Vector submask, "
"but got Matrix mask instead"
)
# C[I, J](M) << c
# SS, SuiteSparse-specific: subassign
cfunc_name = f"GrB_Matrix_subassign_{value.dtype}"
expr_repr = "[[{2} rows], [{4} cols]](%s) = {0}" % mask.name
else:
# C(M)[I, J] << c
# C(M)[i, J] << c
# C(M)[I, j] << c
# C(M)[i, j] << c
if rowsize is None:
rows = _CArray([rows.scalar.value])
rowscalar = _CScalar(1)
if colsize is None:
cols = _CArray([cols.scalar.value])
colscalar = _CScalar(1)
cfunc_name = f"GrB_Matrix_assign_{value.dtype}"
expr_repr = "[[{2} rows], [{4} cols]] = {0}"
delayed = MatrixExpression(
method_name,
cfunc_name,
[_CScalar(value), rows, rowscalar, cols, colscalar],
expr_repr=expr_repr,
nrows=self._nrows,
ncols=self._ncols,
dtype=self.dtype,
)
return delayed
def _delete_element(self, resolved_indexes):
rowidx, colidx = resolved_indexes.indices
call("GrB_Matrix_removeElement", [self, rowidx.index, colidx.index])
def to_pygraphblas(self): # pragma: no cover
"""Convert to a new `pygraphblas.Matrix`
This does not copy data.
This gives control of the underlying GraphBLAS object to `pygraphblas`.
This means operations on the current `grblas` object will fail!
"""
if backend != "suitesparse":
raise RuntimeError(
f"to_pygraphblas only works with 'suitesparse' backend, not {backend}"
)
import pygraphblas as pg
matrix = pg.Matrix(self.gb_obj, pg.types._gb_type_to_type(self.dtype.gb_obj))
self.gb_obj = ffi.NULL
return matrix
@classmethod
def from_pygraphblas(cls, matrix): # pragma: no cover
"""Convert a `pygraphblas.Matrix` to a new `grblas.Matrix`
This does not copy data.
This gives control of the underlying GraphBLAS object to `grblas`.
This means operations on the original `pygraphblas` object will fail!
"""
if backend != "suitesparse":
raise RuntimeError(
f"from_pygraphblas only works with 'suitesparse' backend, not {backend!r}"
)
import pygraphblas as pg
if not isinstance(matrix, pg.Matrix):
raise TypeError(f"Expected pygraphblas.Matrix object. Got type: {type(matrix)}")
dtype = lookup_dtype(matrix.gb_type)
rv = cls(matrix._matrix, dtype)
rv._nrows = matrix.nrows
rv._ncols = matrix.ncols
matrix._matrix = ffi.NULL
return rv
Matrix.ss = class_property(Matrix.ss, ss)
class MatrixExpression(BaseExpression):
__slots__ = "_ncols", "_nrows"
output_type = Matrix
_is_transposed = False
def __init__(
self,
method_name,
cfunc_name,
args,
*,
at=False,
bt=False,
op=None,
dtype=None,
expr_repr=None,
ncols=None,
nrows=None,
):
super().__init__(
method_name,
cfunc_name,
args,
at=at,
bt=bt,
op=op,
dtype=dtype,
expr_repr=expr_repr,
)
if ncols is None:
ncols = args[0]._ncols
if nrows is None:
nrows = args[0]._nrows
self._ncols = ncols
self._nrows = nrows
def construct_output(self, dtype=None, *, name=None):
if dtype is None:
dtype = self.dtype
return Matrix.new(dtype, self._nrows, self._ncols, name=name)
def __repr__(self):
from .formatting import format_matrix_expression
return format_matrix_expression(self)
def _repr_html_(self):
from .formatting import format_matrix_expression_html
return format_matrix_expression_html(self)
@property
def ncols(self):
return self._ncols
@property
def nrows(self):
return self._nrows
@property
def shape(self):
return (self._nrows, self._ncols)
# Paste here from _automethods.py
_get_value = _automethods._get_value
S = wrapdoc(Matrix.S)(property(_automethods.S))
T = wrapdoc(Matrix.T)(property(_automethods.T))
V = wrapdoc(Matrix.V)(property(_automethods.V))
__and__ = wrapdoc(Matrix.__and__)(property(_automethods.__and__))
__contains__ = wrapdoc(Matrix.__contains__)(property(_automethods.__contains__))
__getitem__ = wrapdoc(Matrix.__getitem__)(property(_automethods.__getitem__))
__iter__ = wrapdoc(Matrix.__iter__)(property(_automethods.__iter__))
__matmul__ = wrapdoc(Matrix.__matmul__)(property(_automethods.__matmul__))
__or__ = wrapdoc(Matrix.__or__)(property(_automethods.__or__))
__rand__ = wrapdoc(Matrix.__rand__)(property(_automethods.__rand__))
__rmatmul__ = wrapdoc(Matrix.__rmatmul__)(property(_automethods.__rmatmul__))
__ror__ = wrapdoc(Matrix.__ror__)(property(_automethods.__ror__))
_carg = wrapdoc(Matrix._carg)(property(_automethods._carg))
_name_html = wrapdoc(Matrix._name_html)(property(_automethods._name_html))
_nvals = wrapdoc(Matrix._nvals)(property(_automethods._nvals))
apply = wrapdoc(Matrix.apply)(property(_automethods.apply))
ewise_add = wrapdoc(Matrix.ewise_add)(property(_automethods.ewise_add))
ewise_mult = wrapdoc(Matrix.ewise_mult)(property(_automethods.ewise_mult))
gb_obj = wrapdoc(Matrix.gb_obj)(property(_automethods.gb_obj))
isclose = wrapdoc(Matrix.isclose)(property(_automethods.isclose))
isequal = wrapdoc(Matrix.isequal)(property(_automethods.isequal))
kronecker = wrapdoc(Matrix.kronecker)(property(_automethods.kronecker))
mxm = wrapdoc(Matrix.mxm)(property(_automethods.mxm))
mxv = wrapdoc(Matrix.mxv)(property(_automethods.mxv))
name = wrapdoc(Matrix.name)(property(_automethods.name))
name = name.setter(_automethods._set_name)
nvals = wrapdoc(Matrix.nvals)(property(_automethods.nvals))
reduce_columns = wrapdoc(Matrix.reduce_columns)(property(_automethods.reduce_columns))
reduce_columnwise = wrapdoc(Matrix.reduce_columnwise)(property(_automethods.reduce_columnwise))
reduce_rows = wrapdoc(Matrix.reduce_rows)(property(_automethods.reduce_rows))
reduce_rowwise = wrapdoc(Matrix.reduce_rowwise)(property(_automethods.reduce_rowwise))
reduce_scalar = wrapdoc(Matrix.reduce_scalar)(property(_automethods.reduce_scalar))
ss = wrapdoc(Matrix.ss)(property(_automethods.ss))
to_pygraphblas = wrapdoc(Matrix.to_pygraphblas)(property(_automethods.to_pygraphblas))
to_values = wrapdoc(Matrix.to_values)(property(_automethods.to_values))
wait = wrapdoc(Matrix.wait)(property(_automethods.wait))
# These raise exceptions
__array__ = wrapdoc(Matrix.__array__)(Matrix.__array__)
__bool__ = wrapdoc(Matrix.__bool__)(Matrix.__bool__)
__iadd__ = _automethods.__iadd__
__iand__ = _automethods.__iand__
__ifloordiv__ = _automethods.__ifloordiv__
__imatmul__ = _automethods.__imatmul__
__imod__ = _automethods.__imod__
__imul__ = _automethods.__imul__
__ior__ = _automethods.__ior__
__ipow__ = _automethods.__ipow__
__isub__ = _automethods.__isub__
__itruediv__ = _automethods.__itruediv__
__ixor__ = _automethods.__ixor__
class TransposedMatrix:
__slots__ = "_matrix", "_ncols", "_nrows", "__weakref__"
_is_scalar = False
_is_transposed = True
def __init__(self, matrix):
self._matrix = matrix
self._nrows = matrix._ncols
self._ncols = matrix._nrows
def __repr__(self):
from .formatting import format_matrix
return format_matrix(self)
def _repr_html_(self, collapse=False):
from .formatting import format_matrix_html
return format_matrix_html(self, collapse=collapse)
def new(self, dtype=None, *, mask=None, name=None):
if dtype is None:
dtype = self.dtype
output = Matrix.new(dtype, self._nrows, self._ncols, name=name)
if mask is None:
output.update(self)
else:
output(mask=mask).update(self)
return output
dup = new
@property
def T(self):
return self._matrix
@property
def gb_obj(self):
return self._matrix.gb_obj
@property
def dtype(self):
return self._matrix.dtype
@wrapdoc(Matrix.to_values)
def to_values(self, dtype=None):
rows, cols, vals = self._matrix.to_values(dtype)
return cols, rows, vals
@property
def _carg(self):
return self._matrix.gb_obj[0]
@property
def name(self):
return f"{self._matrix.name}.T"
@property
def _name_html(self):
return f"{self._matrix._name_html}.T"
# Properties
nrows = Matrix.ncols
ncols = Matrix.nrows
shape = Matrix.shape
nvals = Matrix.nvals
_nvals = Matrix._nvals
# Delayed methods
ewise_add = Matrix.ewise_add
ewise_mult = Matrix.ewise_mult
mxv = Matrix.mxv
mxm = Matrix.mxm
kronecker = Matrix.kronecker
apply = Matrix.apply
reduce_rowwise = Matrix.reduce_rowwise
reduce_columnwise = Matrix.reduce_columnwise
reduce_rows = Matrix.reduce_rows
reduce_columns = Matrix.reduce_columns
reduce_scalar = Matrix.reduce_scalar
# Operator sugar
__or__ = Matrix.__or__
__ror__ = Matrix.__ror__
__and__ = Matrix.__and__
__rand__ = Matrix.__rand__
__matmul__ = Matrix.__matmul__
__rmatmul__ = Matrix.__rmatmul__
# Bad sugar
__iadd__ = _automethods.__iadd__
__iand__ = _automethods.__iand__
__ifloordiv__ = _automethods.__ifloordiv__
__imatmul__ = _automethods.__imatmul__
__imod__ = _automethods.__imod__
__imul__ = _automethods.__imul__
__ior__ = _automethods.__ior__
__ipow__ = _automethods.__ipow__
__isub__ = _automethods.__isub__
__itruediv__ = _automethods.__itruediv__
__ixor__ = _automethods.__ixor__
# Misc.
isequal = Matrix.isequal
isclose = Matrix.isclose
_extract_element = Matrix._extract_element
_prep_for_extract = Matrix._prep_for_extract
__eq__ = Matrix.__eq__
__bool__ = Matrix.__bool__
__getitem__ = Matrix.__getitem__
__contains__ = Matrix.__contains__
__iter__ = Matrix.__iter__
_expect_type = Matrix._expect_type
_expect_op = Matrix._expect_op
__array__ = Matrix.__array__
utils._output_types[Matrix] = Matrix
utils._output_types[MatrixExpression] = Matrix
utils._output_types[TransposedMatrix] = TransposedMatrix
# Import infix to import _infixmethods, which has side effects
from . import infix # noqa isort:skip
| 37.665698 | 100 | 0.544474 |
30a2c524f55c57dc6b52b295ecabf12fe060fcd4 | 52 | py | Python | src/LacosDeRepeticao/for/exemplo-for 2-0.py | santa-python/python-workshop | 00a17b96218625933681df85f73268326adbeb22 | [
"MIT"
] | 1 | 2019-03-16T14:49:27.000Z | 2019-03-16T14:49:27.000Z | src/LacosDeRepeticao/for/exemplo-for 2-0.py | santa-python/python-workshop | 00a17b96218625933681df85f73268326adbeb22 | [
"MIT"
] | null | null | null | src/LacosDeRepeticao/for/exemplo-for 2-0.py | santa-python/python-workshop | 00a17b96218625933681df85f73268326adbeb22 | [
"MIT"
] | null | null | null | for i in range(5):
print(i)
# 0
# 1
# 2
# 3
# 4 | 6.5 | 18 | 0.442308 |
7be0b5761a0b33dcb957230b6c9a376315639944 | 479 | py | Python | data/text/metadata2raw.py | egirgin/cmpe493-term-project | 8af20fe33bf3b18d1b8bd66159da7559fe3387a3 | [
"MIT"
] | null | null | null | data/text/metadata2raw.py | egirgin/cmpe493-term-project | 8af20fe33bf3b18d1b8bd66159da7559fe3387a3 | [
"MIT"
] | null | null | null | data/text/metadata2raw.py | egirgin/cmpe493-term-project | 8af20fe33bf3b18d1b8bd66159da7559fe3387a3 | [
"MIT"
] | null | null | null | import pandas as pd
metadata = pd.read_csv("./data/text/metadata.csv", low_memory=False)
raw_data = metadata[["cord_uid", "title", "abstract"]]
print("Original shape: " + str(raw_data.shape))
with open("./data/doc_list.txt" ,"r") as docFile:
doc_list = docFile.readlines()
doc_list = list(map(lambda x: x[:-1], doc_list))
raw_data = raw_data[raw_data["cord_uid"].isin(doc_list)]
print("New shape: " + str(raw_data.shape))
raw_data.to_csv("./data/text/raw_data.csv")
| 23.95 | 68 | 0.697286 |
bafc38d6fc5b9561e728cfa6adcb1cb2a5f145f1 | 5,808 | py | Python | core/preprocess_test.py | yekeren/Cap2Det | 727b3025f666e2053b3bbf94cf18f9ab56fb1599 | [
"Apache-2.0"
] | 32 | 2019-07-30T11:46:31.000Z | 2022-03-30T07:38:03.000Z | core/preprocess_test.py | yekeren/Cap2Det | 727b3025f666e2053b3bbf94cf18f9ab56fb1599 | [
"Apache-2.0"
] | 33 | 2019-07-28T21:58:28.000Z | 2022-03-11T23:54:44.000Z | core/preprocess_test.py | yekeren/Cap2Det | 727b3025f666e2053b3bbf94cf18f9ab56fb1599 | [
"Apache-2.0"
] | 11 | 2019-07-30T11:46:16.000Z | 2021-09-08T20:58:14.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from google.protobuf import text_format
import os
import cv2
from core import preprocess
from protos import preprocess_pb2
_TESTDATA = "testdata"
_TESTFILE = "114144.jpg"
_TMPDIR = "tmp"
tf.logging.set_verbosity(tf.logging.INFO)
class PreprocessTest(tf.test.TestCase):
def test_random_crop(self):
tf.reset_default_graph()
scale = tf.placeholder(tf.float32, [])
image = tf.placeholder(tf.uint8, [None, None, 3])
cropped = preprocess.random_crop(image, random_crop_min_scale=scale)
filename = os.path.join(_TESTDATA, _TESTFILE)
image_data = cv2.imread(filename)[:, :, ::-1].copy()
with self.test_session() as sess:
for scale_data in [1.0, 0.8, 0.6, 0.4, 0.2]:
roi = sess.run(
cropped, feed_dict={
image: image_data,
scale: scale_data
})
filename = os.path.join(_TMPDIR, "%.1lf-%s" % (scale_data, _TESTFILE))
cv2.imwrite(filename, roi[:, :, ::-1])
tf.logging.info("ROI image at scale %.1lf is written to %s.",
scale_data, filename)
def _preprocess(self, options, prefix):
g = tf.Graph()
with g.as_default():
image = tf.placeholder(tf.uint8, [None, None, 3])
preprocessed = preprocess.preprocess_image(image, options)
filename = os.path.join(_TESTDATA, _TESTFILE)
image_data = cv2.imread(filename)[:, :, ::-1].copy()
with self.test_session(graph=g) as sess:
result = sess.run(preprocessed, feed_dict={image: image_data})
filename = os.path.join(_TMPDIR, "%s-%s" % (prefix, _TESTFILE))
cv2.imwrite(filename, result[:, :, ::-1])
tf.logging.info("Preprocessed image is written to %s.", filename)
def test_flip(self):
options = preprocess_pb2.Preprocess()
text_format.Merge(r"""
random_flip_left_right_prob: 0.5
""", options)
self._preprocess(options, "flip_no")
options.random_flip_left_right_prob = 1.0
self._preprocess(options, "flip_yes")
def test_brightness(self):
for max_delta in [0.4, 0.8]:
options = preprocess_pb2.Preprocess()
text_format.Merge(
r"""
random_brightness_prob: 1.0
random_brightness_max_delta: %.2lf
""" % (max_delta), options)
self._preprocess(options, "brightness_%.2lf" % (max_delta))
def test_contrast(self):
for contrast in [0.4, 0.6, 0.8]:
options = preprocess_pb2.Preprocess()
text_format.Merge(
r"""
random_contrast_prob: 1.0
random_contrast_lower: %.2lf
random_contrast_upper: %.2lf
""" % (contrast, contrast + 0.01), options)
self._preprocess(options, "contrast_%.2lf" % (contrast))
def test_hue(self):
for max_delta in [0.05, 0.10, 0.15]:
options = preprocess_pb2.Preprocess()
text_format.Merge(
r"""
random_hue_prob: 1.0
random_hue_max_delta: %.2lf
""" % (max_delta), options)
self._preprocess(options, "hue_%.2lf" % (max_delta))
def test_saturation(self):
for saturation in [0.4, 1.6]:
options = preprocess_pb2.Preprocess()
text_format.Merge(
r"""
random_saturation_prob: 1.0
random_saturation_lower: %.2lf
random_saturation_upper: %.2lf
""" % (saturation, saturation + 0.01), options)
self._preprocess(options, "saturation_%.2lf" % (saturation))
def test_preprocess(self):
for index in range(20):
options = preprocess_pb2.Preprocess()
text_format.Merge(
r"""
random_flip_left_right_prob: 0.5
random_crop_prob: 1.0
random_crop_min_scale: 0.6
random_brightness_prob: 0.8
random_brightness_max_delta: 0.2
random_contrast_prob: 0.8
random_contrast_lower: 0.7
random_contrast_upper: 1.0
random_hue_prob: 0.2
random_hue_max_delta: 0.10
random_saturation_prob: 0.8
random_saturation_lower: 0.6
random_saturation_upper: 1.4
""", options)
self._preprocess(options, "preprocess_%i" % (index))
def test_parse_texts(self):
tf.reset_default_graph()
tokens = tf.placeholder(dtype=tf.string, shape=[None])
offsets = tf.placeholder(dtype=tf.int64, shape=[None])
lengths = tf.placeholder(dtype=tf.int64, shape=[None])
# Lengths of offsets and lengths are not matched.
(num_texts, text_strings, text_lengths) = preprocess.parse_texts(
tokens, offsets, lengths)
with self.test_session() as sess:
with self.assertRaises(tf.errors.InvalidArgumentError):
(num_caps, cap_strings, cap_lengths) = sess.run(
[num_texts, text_strings, text_lengths],
feed_dict={
tokens: ["first", "second", "text", "the", "third", "text"],
offsets: [0, 1],
lengths: [1, 2, 3]
})
# Basic tests.
(num_texts, text_strings, text_lengths) = preprocess.parse_texts(
tokens, offsets, lengths)
with self.test_session() as sess:
(num_caps, cap_strings, cap_lengths) = sess.run(
[num_texts, text_strings, text_lengths],
feed_dict={
tokens: ["first", "second", "text", "the", "third", "text"],
offsets: [0, 1, 3],
lengths: [1, 2, 3]
})
self.assertEqual(num_caps, 3)
self.assertAllEqual(cap_strings,
[[b"first", b"", b""], [b"second", b"text", b""],
[b"the", b"third", b"text"]])
self.assertAllEqual(cap_lengths, [1, 2, 3])
if __name__ == '__main__':
tf.test.main()
| 33 | 78 | 0.614325 |
7168fdbc7d87b6c2af0b4dcc9974ba06fefc57dd | 963 | py | Python | steemexchange/deep_eq.py | cryptomental/python-goloslib | 0766f9b48e478bc3cdd18b22a6b5867b82a9f81e | [
"MIT"
] | 1 | 2017-04-08T05:08:35.000Z | 2017-04-08T05:08:35.000Z | steemexchange/deep_eq.py | cryptomental/python-goloslib | 0766f9b48e478bc3cdd18b22a6b5867b82a9f81e | [
"MIT"
] | null | null | null | steemexchange/deep_eq.py | cryptomental/python-goloslib | 0766f9b48e478bc3cdd18b22a6b5867b82a9f81e | [
"MIT"
] | null | null | null | def deep_eq(_v1, _v2):
import operator
import types
def _deep_dict_eq(d1, d2):
k1 = sorted(d1.keys())
k2 = sorted(d2.keys())
if k1 != k2: # keys should be exactly equal
return False
return sum(deep_eq(d1[k], d2[k]) for k in k1) == len(k1)
def _deep_iter_eq(l1, l2):
if len(l1) != len(l2):
return False
return sum(deep_eq(v1, v2) for v1, v2 in zip(l1, l2)) == len(l1)
op = operator.eq
c1, c2 = (_v1, _v2)
# guard against strings because they are also iterable
# and will consistently cause a RuntimeError (maximum recursion limit reached)
if isinstance(_v1, str):
return op(c1, c2)
if isinstance(_v1, dict):
op = _deep_dict_eq
else:
try:
c1, c2 = (list(iter(_v1)), list(iter(_v2)))
except TypeError:
c1, c2 = _v1, _v2
else:
op = _deep_iter_eq
return op(c1, c2)
| 26.75 | 82 | 0.554517 |
6ae63ca292b1b18856424f4364423b93970d6624 | 1,515 | py | Python | samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_artifact_async.py | sakagarwal/python-aiplatform | 62b4a1ea589235910c6e87f027899a29bf1bacb1 | [
"Apache-2.0"
] | 1 | 2022-03-30T05:23:29.000Z | 2022-03-30T05:23:29.000Z | samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_artifact_async.py | sakagarwal/python-aiplatform | 62b4a1ea589235910c6e87f027899a29bf1bacb1 | [
"Apache-2.0"
] | null | null | null | samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_artifact_async.py | sakagarwal/python-aiplatform | 62b4a1ea589235910c6e87f027899a29bf1bacb1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateArtifact
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1_MetadataService_CreateArtifact_async]
from google.cloud import aiplatform_v1
async def sample_create_artifact():
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.CreateArtifactRequest(
parent="parent_value",
)
# Make the request
response = await client.create_artifact(request=request)
# Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_CreateArtifact_async]
| 32.934783 | 85 | 0.768317 |
675719b1765f6a976e411657fb798de94180f6c5 | 3,930 | py | Python | examples/asr/speech_to_text_infer.py | ttyio/NeMo | ad0edf2e6910b7b6de3ae93fba9b728f6479c50e | [
"Apache-2.0"
] | 1 | 2021-01-26T21:54:36.000Z | 2021-01-26T21:54:36.000Z | examples/asr/speech_to_text_infer.py | aiskumo/NeMo | b51a39f9834ad50db77c4246aeb6e2349695add5 | [
"Apache-2.0"
] | null | null | null | examples/asr/speech_to_text_infer.py | aiskumo/NeMo | b51a39f9834ad50db77c4246aeb6e2349695add5 | [
"Apache-2.0"
] | 2 | 2021-02-04T14:45:50.000Z | 2021-02-04T14:56:05.000Z | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script serves three goals:
(1) Demonstrate how to use NeMo Models outside of PytorchLightning
(2) Shows example of batch ASR inference
(3) Serves as CI test for pre-trained checkpoint
"""
from argparse import ArgumentParser
import torch
from nemo.collections.asr.metrics.wer import WER, word_error_rate
from nemo.collections.asr.models import EncDecCTCModel
from nemo.utils import logging
try:
from torch.cuda.amp import autocast
except ImportError:
from contextlib import contextmanager
@contextmanager
def autocast(enabled=None):
yield
can_gpu = torch.cuda.is_available()
def main():
parser = ArgumentParser()
parser.add_argument(
"--asr_model", type=str, default="QuartzNet15x5Base-En", required=True, help="Pass: 'QuartzNet15x5Base-En'",
)
parser.add_argument("--dataset", type=str, required=True, help="path to evaluation data")
parser.add_argument("--batch_size", type=int, default=4)
parser.add_argument("--wer_tolerance", type=float, default=1.0, help="used by test")
parser.add_argument(
"--normalize_text", default=True, type=bool, help="Normalize transcripts or not. Set to False for non-English."
)
args = parser.parse_args()
torch.set_grad_enabled(False)
if args.asr_model.endswith('.nemo'):
logging.info(f"Using local ASR model from {args.asr_model}")
asr_model = EncDecCTCModel.restore_from(restore_path=args.asr_model)
else:
logging.info(f"Using NGC cloud ASR model {args.asr_model}")
asr_model = EncDecCTCModel.from_pretrained(model_name=args.asr_model)
asr_model.setup_test_data(
test_data_config={
'sample_rate': 16000,
'manifest_filepath': args.dataset,
'labels': asr_model.decoder.vocabulary,
'batch_size': args.batch_size,
'normalize_transcripts': args.normalize_text,
}
)
if can_gpu:
asr_model = asr_model.cuda()
asr_model.eval()
labels_map = dict([(i, asr_model.decoder.vocabulary[i]) for i in range(len(asr_model.decoder.vocabulary))])
wer = WER(vocabulary=asr_model.decoder.vocabulary)
hypotheses = []
references = []
for test_batch in asr_model.test_dataloader():
if can_gpu:
test_batch = [x.cuda() for x in test_batch]
with autocast():
log_probs, encoded_len, greedy_predictions = asr_model(
input_signal=test_batch[0], input_signal_length=test_batch[1]
)
hypotheses += wer.ctc_decoder_predictions_tensor(greedy_predictions)
for batch_ind in range(greedy_predictions.shape[0]):
seq_len = test_batch[3][batch_ind].cpu().detach().numpy()
seq_ids = test_batch[2][batch_ind].cpu().detach().numpy()
reference = ''.join([labels_map[c] for c in seq_ids[0:seq_len]])
references.append(reference)
del test_batch
wer_value = word_error_rate(hypotheses=hypotheses, references=references)
if wer_value > args.wer_tolerance:
raise ValueError(f"Got WER of {wer_value}. It was higher than {args.wer_tolerance}")
logging.info(f'Got WER of {wer_value}. Tolerance was {args.wer_tolerance}')
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| 38.910891 | 119 | 0.695929 |
0ce74d8e5fe02fb630c722b208a5b443f2ba942c | 4,684 | py | Python | sauce.py | brunogenaro/webex-js-sdk | c32ff65d1b8b2e45e682fea5335ff4df5d758fd1 | [
"MIT"
] | null | null | null | sauce.py | brunogenaro/webex-js-sdk | c32ff65d1b8b2e45e682fea5335ff4df5d758fd1 | [
"MIT"
] | null | null | null | sauce.py | brunogenaro/webex-js-sdk | c32ff65d1b8b2e45e682fea5335ff4df5d758fd1 | [
"MIT"
] | null | null | null | import csv
import os
import subprocess
import threading
# Gather the packages to test.
PREFIX = './packages/node_modules/'
CISCOSPARK = os.path.join(PREFIX, '@ciscospark')
WEBEX = os.path.join(PREFIX, '@webex')
PROD_ENV_VARS = {
'CONVERSATION_SERVICE': 'https://conv-a.wbx2.com/conversation/api/v1',
'ENCRYPTION_SERVICE_URL': 'https://encryption-a.wbx2.com',
'IDBROKER_BASE_URL': 'https://idbroker.webex.com',
'IDENTITY_BASE_URL': 'https://identity.webex.com',
'U2C_SERVICE_URL': 'https://u2c.wbx2.com/u2c/api/v1',
'WDM_SERVICE_URL': 'https://wdm-a.wbx2.com/wdm/api/v1',
# Logging
'ENABLE_VERBOSE_NETWORK_LOGGING': 'true',
# Enable CI for Sauce Labs
'CI': 'true'
}
INT_ENV_VARS = {
# Environments
'ATLAS_SERVICE_URL': 'https://atlas-intb.ciscospark.com/admin/api/v1',
'CONVERSATION_SERVICE': 'https://conversation-intb.ciscospark.com/conversation/api/v1',
'ENCRYPTION_SERVICE_URL': 'https://encryption-intb.ciscospark.com/encryption/api/v1',
# Do not use 'https://hydra-intb.ciscospark.com/v1' for Hydra. CI expects 'apialpha'.
'HYDRA_SERVICE_URL': 'https://apialpha.ciscospark.com/v1/',
'IDBROKER_BASE_URL': 'https://idbrokerbts.webex.com',
'IDENTITY_BASE_URL': 'https://identitybts.webex.com',
'U2C_SERVICE_URL': 'https://u2c-intb.ciscospark.com/u2c/api/v1',
'WDM_SERVICE_URL': 'https://wdm-intb.ciscospark.com/wdm/api/v1',
'WHISTLER_API_SERVICE_URL': 'https://whistler.onint.ciscospark.com/api/v1',
# Logging
'ENABLE_VERBOSE_NETWORK_LOGGING': 'true',
# Enable CI for Sauce Labs
'CI': 'true'
}
OUTPUT_DIR = 'output'
OUTPUT_FILE_PATH = os.path.join(OUTPUT_DIR, 'test-comparison.csv')
TEST_COMMAND = 'npm run sauce:run -- npm test -- --packages %s'
SKIP_PACKAGES = [
'@webex/test-helper-server' # no tests
'@webex/internal-plugin-calendar', # no tests
'@webex/plugin-webhooks' # no tests
]
def should_include_package(path_name, name):
scoped_name = os.path.join(os.path.basename(path_name), name)
return os.path.isdir(os.path.join(path_name, name)) and scoped_name not in SKIP_PACKAGES
def get_package_names(path_name):
namespace = path_name.replace(PREFIX, '')
return [os.path.join(namespace, name) for name in os.listdir(path_name) if should_include_package(path_name, name)]
def run_subprocess(bash_command, env_vars):
env = os.environ.copy()
env.update(env_vars)
process = subprocess.Popen(bash_command.split(), stdout=subprocess.PIPE, env=env)
output, error = process.communicate()
return process.returncode # , output, error
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def print_result(return_code, prefix='Tests are a...'):
if return_code == 0:
print(bcolors.OKGREEN + prefix + 'success.' + bcolors.ENDC)
else:
print(bcolors.FAIL + prefix + 'failure.' + bcolors.ENDC)
def run_test(package, environment):
env_vars = INT_ENV_VARS if environment is 'integration' else PROD_ENV_VARS
print(bcolors.OKBLUE + 'Testing `%s` on %s...' % (package, environment) + bcolors.ENDC)
bash_command = TEST_COMMAND % package
return_code = run_subprocess(bash_command, env_vars)
print_result(return_code, prefix='Testing `%s` on %s...' % (package, environment))
return return_code
def run_env_tests(package, writer, csv_file):
prod_return_code = run_test(package, 'production')
int_return_code = run_test(package, 'integration')
writer.writerow([package, prod_return_code, int_return_code])
csv_file.flush()
def run_tests_in_sequence(packages, writer, csv_file):
for package in packages:
run_env_tests(package, writer, csv_file)
def run_tests_in_parallel(packages, writer, csv_file):
threads = [threading.Thread(target=run_env_tests, args=(package, writer, csv_file)) for package in packages]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def main():
ciscospark_packages = get_package_names(CISCOSPARK)
webex_packages = get_package_names(WEBEX)
packages = ciscospark_packages + webex_packages
print ('Skipping %d packages: %s' % (len(SKIP_PACKAGES), ', '.join(SKIP_PACKAGES)))
print('Testing %d packages...' % len(packages))
try:
os.mkdir(OUTPUT_DIR)
except OSError:
pass
threads = []
with open(OUTPUT_FILE_PATH, 'wb') as csv_file:
writer = csv.writer(csv_file, quoting=csv.QUOTE_MINIMAL)
writer.writerow(['Package', 'Production exit code', 'Integration exit code'])
run_tests_in_sequence(packages, writer, csv_file)
print('Wrote output to: %s' % OUTPUT_FILE_PATH)
print('Done.')
if __name__ == "__main__":
main()
| 34.441176 | 117 | 0.720111 |
e5397fc5e247d992922943daff3096634e31cc47 | 2,985 | py | Python | paasta_tools/mesos/task.py | sobolevn/paasta | 8b87e0b13816c09b3d063b6d3271e6c7627fd264 | [
"Apache-2.0"
] | 1,711 | 2015-11-10T18:04:56.000Z | 2022-03-23T08:53:16.000Z | paasta_tools/mesos/task.py | sobolevn/paasta | 8b87e0b13816c09b3d063b6d3271e6c7627fd264 | [
"Apache-2.0"
] | 1,689 | 2015-11-10T17:59:04.000Z | 2022-03-31T20:46:46.000Z | paasta_tools/mesos/task.py | sobolevn/paasta | 8b87e0b13816c09b3d063b6d3271e6c7627fd264 | [
"Apache-2.0"
] | 267 | 2015-11-10T19:17:16.000Z | 2022-02-08T20:59:52.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import a_sync
from . import exceptions
from . import framework
from . import mesos_file
from paasta_tools.async_utils import async_ttl_cache
class Task:
cmd_re = re.compile(r"\(Command: (.+)\)")
def __init__(self, master, items):
self.master = master
self.__items = items
def __str__(self):
return "{}:{}".format(a_sync.block(self.slave), self["id"])
def __getitem__(self, name):
return self.__items[name]
async def executor(self):
return await (await self.slave()).task_executor(self["id"])
async def framework(self):
return framework.Framework(await self.master.framework(self["framework_id"]))
@async_ttl_cache(cleanup_self=True)
async def directory(self):
try:
return (await self.executor())["directory"]
except exceptions.MissingExecutor:
return ""
@async_ttl_cache(cleanup_self=True)
async def slave(self):
return await self.master.slave(self["slave_id"])
async def file(self, path):
return mesos_file.File(await self.slave(), self, path)
async def file_list(self, path):
return await (await self.slave()).file_list(os.path.join(self.directory, path))
async def stats(self):
try:
return await (await self.slave()).task_stats(self["id"])
except exceptions.MissingExecutor:
return {}
async def cpu_time(self):
st = await self.stats()
secs = st.get("cpus_user_time_secs", 0) + st.get("cpus_system_time_secs", 0)
return secs
async def cpu_limit(self):
return (await self.stats()).get("cpus_limit", 0)
async def mem_limit(self):
return (await self.stats()).get("mem_limit_bytes", 0)
async def rss(self):
return (await self.stats()).get("mem_rss_bytes", 0)
async def command(self):
try:
result = self.cmd_re.search((await self.executor())["name"])
except exceptions.MissingExecutor:
result = None
if not result:
return "none"
return result.group(1)
async def user(self):
return (await self.framework()).user
| 31.421053 | 87 | 0.666667 |
0a0ef0d4018e1eb8e64caf05cc7062d1ba94d7bb | 2,374 | py | Python | testcases/technical_ratio_test.py | daxlab/pyalgotrade | 5517c2644da97e7ef143d344d813232d6845a29f | [
"Apache-2.0"
] | 1,000 | 2016-01-26T12:10:11.000Z | 2022-03-01T23:59:50.000Z | testcases/technical_ratio_test.py | daxlab/pyalgotrade | 5517c2644da97e7ef143d344d813232d6845a29f | [
"Apache-2.0"
] | 22 | 2016-01-26T15:14:09.000Z | 2019-01-30T02:36:38.000Z | testcases/technical_ratio_test.py | daxlab/pyalgotrade | 5517c2644da97e7ef143d344d813232d6845a29f | [
"Apache-2.0"
] | 613 | 2016-01-27T01:02:30.000Z | 2022-03-21T01:38:58.000Z | # PyAlgoTrade
#
# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
import common
from pyalgotrade.technical import ratio
from pyalgotrade import dataseries
class TestCase(common.TestCase):
def __buildRatio(self, values, ratioMaxLen=dataseries.DEFAULT_MAX_LEN):
seqDS = dataseries.SequenceDataSeries()
ret = ratio.Ratio(seqDS, ratioMaxLen)
for value in values:
seqDS.append(value)
return ret
def testSimple(self):
ratio = self.__buildRatio([1, 2, 1])
self.assertEqual(ratio[0], None)
self.assertEqual(ratio[1], 1)
self.assertEqual(ratio[2], -0.5)
self.assertEqual(ratio[-1], -0.5)
with self.assertRaises(IndexError):
ratio[3]
self.assertEqual(ratio[-2], ratio[1])
self.assertEqual(ratio[-1], ratio[2])
self.assertEqual(len(ratio.getDateTimes()), 3)
for i in range(len(ratio)):
self.assertEqual(ratio.getDateTimes()[i], None)
def testNegativeValues(self):
ratio = self.__buildRatio([-1, -2, -1])
self.assertEqual(ratio[0], None)
self.assertEqual(ratio[1], -1)
self.assertEqual(ratio[2], 0.5)
self.assertEqual(ratio[-1], 0.5)
with self.assertRaises(IndexError):
ratio[3]
self.assertEqual(ratio[-2], ratio[1])
self.assertEqual(ratio[-1], ratio[2])
self.assertEqual(len(ratio.getDateTimes()), 3)
for i in range(len(ratio)):
self.assertEqual(ratio.getDateTimes()[i], None)
def testBounded(self):
ratio = self.__buildRatio([-1, -2, -1], 2)
self.assertEqual(ratio[0], -1)
self.assertEqual(ratio[1], 0.5)
self.assertEqual(len(ratio), 2)
| 32.972222 | 79 | 0.655434 |
67809467ca214294ee5948431422cf8f56a2d356 | 3,861 | py | Python | hsdecomp/infer.py | popjy0312/hsdecomp | f89a8d6c98c864fa45ee80b92221a973d81bac31 | [
"MIT"
] | 99 | 2016-01-05T00:43:33.000Z | 2021-08-06T15:23:34.000Z | hsdecomp/infer.py | popjy0312/hsdecomp | f89a8d6c98c864fa45ee80b92221a973d81bac31 | [
"MIT"
] | 5 | 2017-12-29T09:02:56.000Z | 2020-12-07T01:54:19.000Z | hsdecomp/infer.py | popjy0312/hsdecomp | f89a8d6c98c864fa45ee80b92221a973d81bac31 | [
"MIT"
] | 21 | 2016-01-27T21:24:41.000Z | 2020-11-28T09:11:18.000Z | from hsdecomp import show, optimize
from hsdecomp.types import *
bool_type = EnumType(constructor_names = {1: 'False', 2: 'True'}, complete = True)
known_types = {
'ghczmprim_GHCziClasses_zeze_info': FunctionType(arg = UnknownType(), result = FunctionType(arg = UnknownType(), result = FunctionType(arg = UnknownType(), result = bool_type))),
'ghczmprim_GHCziClasses_znze_info': FunctionType(arg = UnknownType(), result = FunctionType(arg = UnknownType(), result = FunctionType(arg = UnknownType(), result = bool_type))),
'ghczmprim_GHCziClasses_zgze_info': FunctionType(arg = UnknownType(), result = FunctionType(arg = UnknownType(), result = FunctionType(arg = UnknownType(), result = bool_type))),
'ghczmprim_GHCziClasses_zlze_info': FunctionType(arg = UnknownType(), result = FunctionType(arg = UnknownType(), result = FunctionType(arg = UnknownType(), result = bool_type))),
'ghczmprim_GHCziClasses_zg_info': FunctionType(arg = UnknownType(), result = FunctionType(arg = UnknownType(), result = FunctionType(arg = UnknownType(), result = bool_type))),
'ghczmprim_GHCziClasses_zl_info': FunctionType(arg = UnknownType(), result = FunctionType(arg = UnknownType(), result = FunctionType(arg = UnknownType(), result = bool_type))),
}
def infer_type_for(settings, interps, types, pointer):
if not pointer in types:
if isinstance(pointer, StaticValue) and show.get_name_for_address(settings, pointer.value) in known_types:
types[pointer] = known_types[show.get_name_for_address(settings, pointer.value)]
else:
types[pointer] = UnknownType()
if pointer in interps:
types[pointer] = infer_type(settings, interps, types, interps[pointer])
def infer_type(settings, interps, types, interp):
if isinstance(interp, Apply):
ty = infer_type(settings, interps, types, interp.func)
for i in range(len(interp.pattern)):
if isinstance(ty, FunctionType):
ty = ty.result
else:
assert isinstance(ty, UnknownType)
break
return ty
elif isinstance(interp, Lambda):
ty = infer_type(settings, interps, types, interp.body)
for pat in interp.arg_pattern:
if pat == 'v':
arg_ty = StateType()
else:
arg_ty = UnknownType()
ty = FunctionType(arg = arg_ty, result = ty)
return ty
elif isinstance(interp, Pointer):
infer_type_for(settings, interps, types, interp.pointer)
return types[interp.pointer]
else:
return UnknownType()
def run_rename_tags(settings, interps, types):
optimize.run_rewrite_pass(interps, lambda interp: rename_tags(settings, interps, types, interp))
def rename_tags(settings, interps, types, interp):
if isinstance(interp, Case):
scrut_ty = infer_type(settings, interps, types, interp.scrutinee)
if isinstance(scrut_ty, EnumType):
seen_tags = {}
for i in range(len(interp.tags)):
tag = interp.tags[i]
if isinstance(tag, NumericTag):
seen_tags[tag.value] = None
interp.tags[i] = NamedTag(name = scrut_ty.constructor_names[tag.value], value = tag.value)
if scrut_ty.complete and len(interp.tags) == len(scrut_ty.constructor_names):
assert len(seen_tags) == len(scrut_ty.constructor_names) - 1
for i in range(len(interp.tags)):
if not i+1 in seen_tags:
missing_tag = i+1
break
for i in range(len(interp.tags)):
if isinstance(interp.tags[i], DefaultTag):
interp.tags[i] = NamedTag(name = scrut_ty.constructor_names[missing_tag], value = missing_tag)
| 54.380282 | 182 | 0.644911 |
15774919e869b1cc197dd28853c9be59d63d903d | 954 | py | Python | axcell/pipeline_logger.py | Kabongosalomon/axcell | f9c74910561f6064a04a10118824c99e871f8a38 | [
"Apache-2.0"
] | 335 | 2020-05-07T19:57:36.000Z | 2022-03-16T07:05:51.000Z | axcell/pipeline_logger.py | doc22940/axcell | b41c1623377d89c3c45a61907f0a47ea029269de | [
"Apache-2.0"
] | 16 | 2020-06-12T16:43:29.000Z | 2021-11-24T11:19:09.000Z | axcell/pipeline_logger.py | doc22940/axcell | b41c1623377d89c3c45a61907f0a47ea029269de | [
"Apache-2.0"
] | 50 | 2020-05-07T20:35:18.000Z | 2022-02-16T06:37:31.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import re
class PipelineLogger:
def __init__(self):
self.observers = []
def reset(self):
self.observers = []
def register(self, pattern, observer):
if isinstance(pattern, str):
pattern = re.compile(pattern)
self.observers.append((pattern, observer))
def unregister(self, pattern, observer):
if pattern is None:
self.observers = [(p, o) for p, o in self.observers if o != observer]
else:
if isinstance(pattern, str):
pattern = re.compile(pattern)
self.observers = [(p, o) for p, o in self.observers if o != observer or p.pattern != pattern.pattern]
def __call__(self, step, **args):
for pattern, observer in self.observers:
if pattern.match(step):
observer(step, **args)
pipeline_logger = PipelineLogger()
| 28.909091 | 113 | 0.601677 |
17fd0827c1c6c66041d22c32854bdaf76a00be79 | 19,288 | py | Python | AutomatedTesting/Gem/PythonTests/physics/C4976201_RigidBody_MassIsAssigned.py | cypherdotXd/o3de | bb90c4ddfe2d495e9c00ebf1e2650c6d603a5676 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-08-08T19:54:51.000Z | 2021-08-08T19:54:51.000Z | AutomatedTesting/Gem/PythonTests/physics/C4976201_RigidBody_MassIsAssigned.py | cypherdotXd/o3de | bb90c4ddfe2d495e9c00ebf1e2650c6d603a5676 | [
"Apache-2.0",
"MIT"
] | 2 | 2022-01-13T04:29:38.000Z | 2022-03-12T01:05:31.000Z | AutomatedTesting/Gem/PythonTests/physics/C4976201_RigidBody_MassIsAssigned.py | cypherdotXd/o3de | bb90c4ddfe2d495e9c00ebf1e2650c6d603a5676 | [
"Apache-2.0",
"MIT"
] | null | null | null | """
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
# Test case ID : C4976201
# Test Case Title : Verify that the value assigned to the Mass of the object, gets actually assigned to the object
# fmt: off
class Tests:
# test iteration 1
enter_game_mode_1 = ("Entered game mode first time", "Failed to enter game mode first time")
ProjectileSphere_exists_1 = ("ProjectileSphere entity found first time", "ProjectileSphere entity not found first time")
TargetSphere_exists_1 = ("TargetSphere entity found first time", "TargetSphere entity not found first time")
Trigger1_exists_1 = ("Trigger1 entity found first time", "Trigger1 entity not found first time")
Trigger2_exists_1 = ("Trigger2 entity found first time", "Trigger2 entity not found first time")
Trigger3_exists_1 = ("Trigger3 entity found first time", "Trigger3 entity not found first time")
TargetSphere_mass_1 = ("Mass of TargetSphere was set to 1.0", "Mass of TargetSphere was not set to 1.0")
spheres_collided_1 = ("ProjectileSphere and TargetSphere collided first time", "Timed out before ProjectileSphere & 2 collided first time")
stopped_correctly_1 = ("TargetSphere hit Trigger1 & Trigger2 but not Trigger_3", "TargetSphere did not stop correctly")
check_y_1 = ("sphere did not move far from expected in Y direction _1", "TargetSphere moved an unexpected distance in Y direction _1")
check_z_1 = ("sphere did not move far from expected in Z direction _1", "TargetSphere moved an unexpected distance in Z direction _1")
exit_game_mode_1 = ("Exited game mode first time", "Couldn't exit game mode first time")
# test iteration 2
enter_game_mode_2 = ("Entered game mode second time", "Failed to enter game mode second time")
ProjectileSphere_exists_2 = ("ProjectileSphere entity found second time", "ProjectileSphere entity not found second time")
TargetSphere_exists_2 = ("TargetSphere entity found second time", "TargetSphere entity not found second time")
Trigger1_exists_2 = ("Trigger1 entity found second time", "Trigger1 entity not found second time")
Trigger2_exists_2 = ("Trigger2 entity found second time", "Trigger2 entity not found second time")
Trigger3_exists_2 = ("Trigger3 entity found second time", "Trigger3 entity not found second time")
TargetSphere_mass_2 = ("Mass of TargetSphere was set to 10.0", "Mass of TargetSphere was not set to 10.0")
spheres_collided_2 = ("ProjectileSphere and TargetSphere collided second time", "Timed out before ProjectileSphere & 2 collided second time")
stopped_correctly_2 = ("TargetSphere hit Trigger1 but not Trigger2 or Trigger3", "TargetSphere did not stop correctly")
check_y_2 = ("sphere did not move far from expected in Y direction _2", "TargetSphere moved an unexpected distance in Y direction _2")
check_z_2 = ("sphere did not move far from expected in Z direction _2", "TargetSphere moved an unexpected distance in Z direction _2")
exit_game_mode_2 = ("Exited game mode second time", "Couldn't exit game mode second time")
# test iteration 3
enter_game_mode_3 = ("Entered game mode third time", "Failed to enter game mode third time")
ProjectileSphere_exists_3 = ("ProjectileSphere entity found third time", "ProjectileSphere entity not found third time")
TargetSphere_exists_3 = ("TargetSphere entity found third time", "TargetSphere entity not found third time")
Trigger1_exists_3 = ("Trigger1 entity found third time", "Trigger1 entity not found third time")
Trigger2_exists_3 = ("Trigger2 entity found third time", "Trigger2 entity not found third time")
Trigger3_exists_3 = ("Trigger3 entity found third time", "Trigger3 entity not found third time")
TargetSphere_mass_3 = ("Mass of TargetSphere was set to 100.0", "Mass of TargetSphere was not set to 100.0")
spheres_collided_3 = ("ProjectileSphere and TargetSphere collided third time", "Timed out before ProjectileSphere & 2 collided third time")
stopped_correctly_3 = ("TargetSphere did not hit Trigger1, Trigger2, or Trigger3", "TargetSphere hit one or more triggers before stopping")
check_y_3 = ("sphere did not move far from expected in Y direction _3", "TargetSphere moved an unexpected distance in Y direction _3")
check_z_3 = ("sphere did not move far from expected in Z direction _3", "TargetSphere moved an unexpected distance in Z direction _3")
exit_game_mode_3 = ("Exited game mode third time", "Couldn't exit game mode third time")
# general
velocity_sizing = ("The velocities are in the correct order of magnitude", "The velocities are not correctly ordered in magnitude")
# fmt: on
def C4976201_RigidBody_MassIsAssigned():
"""
Summary:
Checking that the mass set to the object is actually applied via colliding entities
Level Description:
ProjectileSphere (entity) - Sphere shaped Mesh; Sphere shaped PhysX Collider;
PhysX Rigid Body: initial linear velocity in X direction is 5m/s, initial mass 1kg,
gravity disabled, linear damping default (0.05)
TargetSphere (entity) - Sphere shaped Mesh; Sphere shaped PhysX Collider;
PhysX Rigid Body: no initial velocity, initial mass 1kg, gravity disabled, linear damping 1.0
Expected Behavior:
The ProjectileSphere entity will float towards TargetSphere entity and then collide with it.
Because they are the same mass initially, the second sphere will move after collision.
TargetSphere's mass will be increased and scenario will run again,
but TargetSphere will have a smaller velocity after collision.
TargetSphere will then increase mass again and should barely move after the final collision.
Test Steps:
1) Open level
2) Repeat steps 3-9
3) Enter game mode
4) Find and setup entities
5) Set mass of the TargetSphere
6) Check for collision
7) Wait for TargetSphere x velocity = 0
8) Check the triggers
9) Exit game mode
10) Verify the velocity of TargetSphere decreased after collision as mass increased
11) Close the editor
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
import os
import sys
import ImportPathHelper as imports
imports.init()
import azlmbr.legacy.general as general
import azlmbr.bus
import azlmbr
from editor_python_test_tools.utils import Report
from editor_python_test_tools.utils import TestHelper as helper
MOVEMENT_TIMEOUT = 7.0
COLLISION_TIMEOUT = 2.0
VELOCITY_ZERO = 0.01
Y_Z_BUFFER = 0.01
TARGET_SPHERE_NAME = "TargetSphere"
PROJECTILE_SPHERE_NAME = "ProjectileSphere"
TRIGGER_1_NAME = "Trigger1"
TRIGGER_2_NAME = "Trigger2"
TRIGGER_3_NAME = "Trigger3"
class ProjectileSphere:
def __init__(self, test_iteration):
self.name = PROJECTILE_SPHERE_NAME
self.test_iteration = test_iteration
self.timeout_reached = True
self.id = general.find_game_entity(self.name)
Report.critical_result(
Tests.__dict__["ProjectileSphere_exists_" + str(self.test_iteration)], self.id.IsValid()
)
def destroy_me(self):
azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "DestroyGameEntity", self.id)
class TargetSphere:
def __init__(self, mass_to_assign, stop_before_trigger_name, expected_trigger_pattern, test_iteration):
self.id = None
self.name = TARGET_SPHERE_NAME
self.start_mass = None
self.mass_to_assign = mass_to_assign
self.collision_begin = False
self.after_collision_velocity = None
self.x_movement_timeout = True
self.stop_before_trigger_name = stop_before_trigger_name
self.expected_trigger_pattern = expected_trigger_pattern
self.collision_ended = False
self.test_iteration = test_iteration
self.test_set_mass = self.get_test("TargetSphere_mass_")
self.test_enter_game_mode = self.get_test("enter_game_mode_")
self.test_ProjectileSphere_exist = self.get_test("ProjectileSphere_exists_")
self.test_TargetSphere_exist = self.get_test("TargetSphere_exists_")
self.test_spheres_collided = self.get_test("spheres_collided_")
self.test_stop_properly = self.get_test("stopped_correctly_")
self.test_check_y = self.get_test("check_y_")
self.test_check_z = self.get_test("check_z_")
self.test_exit_game_mode = self.get_test("exit_game_mode_")
def get_test(self, test_prefix):
return Tests.__dict__[test_prefix + str(self.test_iteration)]
def find(self):
self.id = general.find_game_entity(self.name)
Report.critical_result(Tests.__dict__["TargetSphere_exists_" + str(self.test_iteration)], self.id.IsValid())
def setup_mass(self):
self.start_mass = azlmbr.physics.RigidBodyRequestBus(azlmbr.bus.Event, "GetMass", self.id)
Report.info("{} starting mass: {}".format(self.name, self.start_mass))
azlmbr.physics.RigidBodyRequestBus(azlmbr.bus.Event, "SetMass", self.id, self.mass_to_assign)
general.idle_wait_frames(1) # wait for mass to apply
mass_after_set = azlmbr.physics.RigidBodyRequestBus(azlmbr.bus.Event, "GetMass", self.id)
Report.info("{} mass after setting: {}".format(self.name, mass_after_set))
Report.result(self.test_set_mass, self.mass_to_assign == mass_after_set)
def current_velocity(self):
return azlmbr.physics.RigidBodyRequestBus(azlmbr.bus.Event, "GetLinearVelocity", self.id)
def on_collision_begin(self, args):
other_id = args[0]
if other_id.Equal(self.id):
Report.info("spheres collision begin")
self.collision_begin = True
def on_collision_end(self, args):
other_id = args[0]
if other_id.Equal(self.id):
Report.info("spheres collision end")
self.after_collision_velocity = self.current_velocity()
self.collision_ended = True
def add_collision_handlers(self, projectile_sphere_id):
self.handler = azlmbr.physics.CollisionNotificationBusHandler()
self.handler.connect(projectile_sphere_id)
self.handler.add_callback("OnCollisionBegin", self.on_collision_begin)
self.handler.add_callback("OnCollisionEnd", self.on_collision_end)
def x_velocity_zero(self):
if abs(self.current_velocity().x) < VELOCITY_ZERO:
Report.info("TargetSphere has stopped moving.")
self.x_movement_timeout = False
return True
return False
def collision_complete(self):
return self.collision_begin and self.collision_ended
def check_y_z_movement_from_collision(self):
"""
Used to check that the entity has not moved too far in either the Y or Z direction
"""
def is_within_tolerance(velocity_one_direction):
return abs(velocity_one_direction) < Y_Z_BUFFER
Report.info_vector3(self.after_collision_velocity, "Initial Velocity: ")
Report.result(self.test_check_y, is_within_tolerance(self.after_collision_velocity.y))
Report.result(self.test_check_z, is_within_tolerance(self.after_collision_velocity.z))
class Trigger:
"""
Used in the level to tell if the TargetSphere entity has moved a certain distance.
There are three triggers set up in the level.
"""
def __init__(self, name, test_iteration):
self.name = name
self.handler = None
self.triggered = False
self.test_iteration = test_iteration
self.id = general.find_game_entity(self.name)
Report.critical_result(Tests.__dict__[self.name + "_exists_" + str(self.test_iteration)], self.id.IsValid())
self.setup_handler()
def on_trigger_enter(self, args):
"""
This is passed into this object's handler.add_callback().
"""
other_id = args[0]
self.triggered = True
triggered_by_name = azlmbr.entity.GameEntityContextRequestBus(
azlmbr.bus.Broadcast, "GetEntityName", other_id
)
Report.info("{} was triggered by {}.".format(self.name, triggered_by_name))
def setup_handler(self):
"""
This is called to setup the handler for this trigger object
"""
self.handler = azlmbr.physics.TriggerNotificationBusHandler()
self.handler.connect(self.id)
self.handler.add_callback("OnTriggerEnter", self.on_trigger_enter)
class TriggerResultPattern:
"""
Used to store and determine which triggers were activated and compare to expected
"""
def __init__(self, trigger1_activated, trigger2_activated, trigger3_activated):
self.trigger1_activated = trigger1_activated
self.trigger2_activated = trigger2_activated
self.trigger3_activated = trigger3_activated
def __eq__(self, other_pattern):
"""
Used to determine if two patterns equal/match each other (i.e. Expected VS Actual)
"""
if isinstance(other_pattern, self.__class__):
return (
self.trigger1_activated == other_pattern.trigger1_activated
and self.trigger2_activated == other_pattern.trigger2_activated
and self.trigger3_activated == other_pattern.trigger3_activated
)
else:
return False
def report(self, expect_actual):
Report.info(
"""TargetSphere {} Triggers:
Trigger_1: {}
Trigger_2: {}
Trigger_3: {}
""".format(
expect_actual, self.trigger1_activated, self.trigger2_activated, self.trigger3_activated
)
)
target_sphere_1kg = TargetSphere(
mass_to_assign=1.0,
stop_before_trigger_name=TRIGGER_3_NAME,
expected_trigger_pattern=TriggerResultPattern(True, True, False),
test_iteration=1,
)
target_sphere_10kg = TargetSphere(
mass_to_assign=10.0,
stop_before_trigger_name=TRIGGER_2_NAME,
expected_trigger_pattern=TriggerResultPattern(True, False, False),
test_iteration=2,
)
target_sphere_100kg = TargetSphere(
mass_to_assign=100.0,
stop_before_trigger_name=TRIGGER_1_NAME,
expected_trigger_pattern=TriggerResultPattern(False, False, False),
test_iteration=3,
)
target_spheres = [target_sphere_1kg, target_sphere_10kg, target_sphere_100kg]
target_sphere_velocities = {}
helper.init_idle()
# 1) Open level
helper.open_level("Physics", "C4976201_RigidBody_MassIsAssigned")
# 2) Repeat steps 3-9
for target_sphere in target_spheres:
Report.info("***************** Begin Test Iteration {} ******************".format(target_sphere.test_iteration))
# 3) Enter game mode
helper.enter_game_mode(target_sphere.test_enter_game_mode)
# 4) Find and setup entities
projectile_sphere = ProjectileSphere(target_sphere.test_iteration)
target_sphere.find()
target_sphere.add_collision_handlers(projectile_sphere.id)
trigger_1 = Trigger(TRIGGER_1_NAME, target_sphere.test_iteration)
trigger_2 = Trigger(TRIGGER_2_NAME, target_sphere.test_iteration)
trigger_3 = Trigger(TRIGGER_3_NAME, target_sphere.test_iteration)
# 5) Set mass of the TargetSphere
target_sphere.setup_mass()
# 6) Check for collision
helper.wait_for_condition(target_sphere.collision_complete, COLLISION_TIMEOUT)
Report.critical_result(target_sphere.test_spheres_collided, target_sphere.collision_complete())
projectile_sphere.destroy_me()
Report.info_vector3(
target_sphere.after_collision_velocity, "Velocity of {} after the collision: ".format(target_sphere.name)
)
Report.info("The sphere should stop before touching {}".format(target_sphere.stop_before_trigger_name))
# 7) Wait for TargetSphere x velocity = 0
helper.wait_for_condition(target_sphere.x_velocity_zero, MOVEMENT_TIMEOUT)
if target_sphere.x_movement_timeout is True:
Report.info("TargetSphere failed to stop moving in the x direction before timeout was reached.")
# 8) Check the triggers
actual_trigger_pattern = TriggerResultPattern(trigger_1.triggered, trigger_2.triggered, trigger_3.triggered)
patterns_match = actual_trigger_pattern == target_sphere.expected_trigger_pattern
target_sphere.expected_trigger_pattern.report("Expected")
actual_trigger_pattern.report("Actual")
Report.result(target_sphere.test_stop_properly, patterns_match)
target_sphere.check_y_z_movement_from_collision()
target_sphere_velocities.update({target_sphere.test_iteration: target_sphere.after_collision_velocity.x})
# 9) Exit game mode
helper.exit_game_mode(target_sphere.test_exit_game_mode)
Report.info("~~~~~~~~~~~~~~ Test Iteration {} End ~~~~~~~~~~~~~~~~~~".format(target_sphere.test_iteration))
# 10) Verify the velocity of TargetSphere decreased after collision as mass increased
outcome = target_sphere_velocities[1] > target_sphere_velocities[2] > target_sphere_velocities[3]
Report.result(Tests.velocity_sizing, outcome)
if __name__ == "__main__":
import ImportPathHelper as imports
imports.init()
from editor_python_test_tools.utils import Report
Report.start_test(C4976201_RigidBody_MassIsAssigned)
| 50.624672 | 155 | 0.663262 |
1a445ca6f37cd4d17a568d56c7337c882a76710d | 237 | py | Python | deepdanbooru/model/__init__.py | fredgido/DeepDanbooru | 044a92472bd153e227287be29c6f32ce7feb28a6 | [
"MIT"
] | 4 | 2020-04-01T09:55:31.000Z | 2022-01-07T08:40:52.000Z | deepdanbooru/model/__init__.py | Amaotomiyabi/DeepDanbooru | bf98806390ced6e78e8d4a05e006c1216bf48508 | [
"MIT"
] | null | null | null | deepdanbooru/model/__init__.py | Amaotomiyabi/DeepDanbooru | bf98806390ced6e78e8d4a05e006c1216bf48508 | [
"MIT"
] | 1 | 2020-11-25T06:54:52.000Z | 2020-11-25T06:54:52.000Z | import deepdanbooru.model.layers
import deepdanbooru.model.losses
from .resnet import create_resnet_152
from .resnet import create_resnet_custom_v1
from .resnet import create_resnet_custom_v2
from .resnet import create_resnet_custom_v3
| 29.625 | 43 | 0.877637 |
f8a096bb6e9d7cfb1beb7447fbd542d8288d0406 | 8,766 | py | Python | tests/test_md_utils.py | deklanw/obsidiantools | 8c1903021438f3a421592ab3444bbb25ab47cc36 | [
"CNRI-Python",
"RSA-MD"
] | 59 | 2021-09-14T01:19:56.000Z | 2022-03-30T18:49:30.000Z | tests/test_md_utils.py | deklanw/obsidiantools | 8c1903021438f3a421592ab3444bbb25ab47cc36 | [
"CNRI-Python",
"RSA-MD"
] | 9 | 2021-10-30T11:58:05.000Z | 2022-03-01T21:02:53.000Z | tests/test_md_utils.py | deklanw/obsidiantools | 8c1903021438f3a421592ab3444bbb25ab47cc36 | [
"CNRI-Python",
"RSA-MD"
] | 8 | 2021-09-18T15:00:28.000Z | 2022-02-22T14:53:57.000Z | import pytest
from glob import glob
from obsidiantools.md_utils import (_get_all_wikilinks_from_html_content,
_get_all_embedded_files_from_html_content,
_get_unique_wikilinks,
_get_all_md_link_info_from_ascii_plaintext,
_get_unique_md_links_from_ascii_plaintext,
_get_html_from_md_file,
_get_ascii_plaintext_from_md_file, get_embedded_files,
get_front_matter, get_tags, get_wikilinks)
@pytest.fixture
def html_wikilinks_stub():
html = r"""
<pre># Intro</pre>
This is a very basic string representation.
## Shopping list
Here is a **[[Shopping list | shopping list]]**:
- [[Bananas]]: also have these for [[Banana splits]]
- [[Apples]]
- [[Flour]]: not a [[Flower | flower]]
Oh and did I say [[Bananas | BANANAS]]??
There's no link for [Cherries]. Though there is for [[Durians]].
![[Egg.jpg]]
## Drinks
- [[Apples|Freshly squeezed apple juice]]
- [[Bananas|Banana smoothie]]
- [[Protein shakes#Protein powder|Vanilla whey protein]]
![[Easter egg.png]]
![[Egg.jpg | 125]]
"""
return html
@pytest.fixture
def txt_md_links_stub():
text = r"""
* [The Times 03/Jan/2009 Chancellor on brink of second bailout for banks](<https://www.thetimes.co.uk/article/chancellor-alistair-darling-on-brink-of-second-bailout-for-banks-n9l382mn62h>)
* [Chancellor Alistair Darling on brink of second bailout for banks](<https://www.thetimes.co.uk/article/chancellor-alistair-darling-on-brink-of-second-bailout-for-banks-n9l382mn62h>)
* [This is a statement inside square brackets]
* (This is a statement inside parentheses)
* (<https://www.markdownguide.org/basic-syntax/>)[Getting the bracket types in wrong order]
* [Markdown basic syntax - <> not in the link](https://www.markdownguide.org/basic-syntax/)
* []()
* [()]
* ([])
* [([)
* ([)]
[ADA](<https://cardano.org/>)
"""
return text
@pytest.fixture
def txt_sussudio_stub():
text = _get_html_from_md_file('tests/vault-stub/Sussudio.md')
return text
def test_get_all_wikilinks_from_html_content(html_wikilinks_stub):
actual_results = _get_all_wikilinks_from_html_content(html_wikilinks_stub)
expected_results = ['Shopping list', 'Bananas', 'Banana splits',
'Apples',
'Flour', 'Flower',
'Bananas',
'Durians',
'Apples', 'Bananas', 'Protein shakes']
assert actual_results == expected_results
def test_get_all_wikilinks_from_html_content_keep_aliases(html_wikilinks_stub):
actual_results = _get_all_wikilinks_from_html_content(
html_wikilinks_stub, remove_aliases=False)
expected_results = ['Shopping list | shopping list',
'Bananas', 'Banana splits',
'Apples',
'Flour', 'Flower | flower',
'Bananas | BANANAS',
'Durians',
'Apples|Freshly squeezed apple juice',
'Bananas|Banana smoothie',
'Protein shakes#Protein powder|Vanilla whey protein']
assert actual_results == expected_results
def test_get_all_embedded_files_from_html_content(html_wikilinks_stub):
actual_results = _get_all_embedded_files_from_html_content(
html_wikilinks_stub)
expected_results = ['Egg.jpg', 'Easter egg.png', 'Egg.jpg']
assert actual_results == expected_results
def test_get_all_embedded_files_from_html_content_keep_aliases(
html_wikilinks_stub):
actual_results = _get_all_embedded_files_from_html_content(
html_wikilinks_stub, remove_aliases=False)
expected_results = ['Egg.jpg', 'Easter egg.png', 'Egg.jpg | 125']
assert actual_results == expected_results
def test_get_unique_wikilinks_from_html_content(html_wikilinks_stub):
actual_results = _get_unique_wikilinks(
html_wikilinks_stub, remove_aliases=True)
expected_results = ['Shopping list',
'Bananas', 'Banana splits',
'Apples',
'Flour', 'Flower',
'Durians',
'Protein shakes']
assert actual_results == expected_results
assert isinstance(expected_results, list)
def test_get_unique_wikilinks_from_html_content_has_unique_links(html_wikilinks_stub):
actual_links = _get_unique_wikilinks(html_wikilinks_stub)
assert len(set(actual_links)) == len(actual_links)
def test_get_all_md_link_info(txt_md_links_stub):
expected_links = [('The Times 03/Jan/2009 Chancellor on brink of second bailout for banks',
'https://www.thetimes.co.uk/article/chancellor-alistair-darling-on-brink-of-second-bailout-for-banks-n9l382mn62h'),
("Chancellor Alistair Darling on brink of second bailout for banks",
'https://www.thetimes.co.uk/article/chancellor-alistair-darling-on-brink-of-second-bailout-for-banks-n9l382mn62h'),
('ADA', 'https://cardano.org/')
]
actual_links = _get_all_md_link_info_from_ascii_plaintext(txt_md_links_stub)
assert actual_links == expected_links
def test_get_unique_md_links_has_order_preserved(txt_md_links_stub):
expected_links = ['https://www.thetimes.co.uk/article/chancellor-alistair-darling-on-brink-of-second-bailout-for-banks-n9l382mn62h',
'https://cardano.org/']
actual_links = _get_unique_md_links_from_ascii_plaintext(txt_md_links_stub)
assert actual_links == expected_links
def test_get_unique_md_links_has_unique_links(txt_md_links_stub):
actual_links = _get_unique_md_links_from_ascii_plaintext(txt_md_links_stub)
assert len(set(actual_links)) == len(actual_links)
def test_pretend_wikilink_not_extracted_from_front_matter(txt_sussudio_stub):
actual_links = _get_unique_wikilinks(txt_sussudio_stub)
assert not set(['Polka Party!']).issubset(set(actual_links))
def test_sussudio_front_matter():
expected_metadata = {'title': 'Sussudio',
'artist': 'Phil Collins',
'category': 'music',
'year': 1985,
'url': 'https://www.discogs.com/Phil-Collins-Sussudio/master/106239',
'references': [[['American Psycho (film)']], 'Polka Party!'],
'chart_peaks': [{'US': 1}, {'UK': 12}]}
actual_metadata = get_front_matter(
'tests/vault-stub/Sussudio.md')
assert actual_metadata == expected_metadata
def test_ne_fuit_front_matter():
expected_metadata = {}
actual_metadata = get_front_matter(
'tests/vault-stub/lipsum/Ne fuit.md')
assert actual_metadata == expected_metadata
def test_front_matter_only_parsing():
fm_only_files = glob('tests/general/frontmatter-only*.md',
recursive=True)
for f in fm_only_files:
actual_txt = _get_ascii_plaintext_from_md_file(f)
expected_txt = '\n'
assert actual_txt == expected_txt
def test_separators_not_front_matter_parsing():
files = glob('tests/general/not-frontmatter*.md',
recursive=True)
for f in files:
actual_output = get_front_matter(f)
expected_output = {}
assert actual_output == expected_output
def test_handle_invalid_front_matter():
files = glob('tests/general/invalid-frontmatter*.md',
recursive=True)
for f in files:
actual_output = get_front_matter(f)
expected_output = {}
assert actual_output == expected_output
def test_sussudio_tags():
actual_tags = get_tags(
'tests/vault-stub/Sussudio.md')
expected_tags = ['y1982', 'y_1982', 'y-1982', 'y1982', 'y2000']
assert actual_tags == expected_tags
def test_embedded_files_alias_scaling():
actual_embedded_images = get_embedded_files(
'tests/general/embedded-images_in-table.md')
expected_embedded_images = ['test-image_1_before.png',
'test-image_1_after.png',
'test-image_2_before.png',
'test-image_2_after.png']
assert actual_embedded_images == expected_embedded_images
def test_wikilinks_code_block():
actual_links = get_wikilinks(
'tests/general/wikilinks_exclude-code.md')
expected_links = []
assert actual_links == expected_links
| 37.948052 | 192 | 0.645562 |
3b1de371bb2a003671dcb9b06c6888c73c65fcfc | 399 | py | Python | backend_crm/wsgi.py | mobius-labs/app | bdf8226d8b16cea609a7af01be51c9bd4b867ab3 | [
"MIT"
] | 1 | 2021-11-13T10:52:08.000Z | 2021-11-13T10:52:08.000Z | backend_crm/wsgi.py | mobius-labs/app | bdf8226d8b16cea609a7af01be51c9bd4b867ab3 | [
"MIT"
] | 1 | 2021-11-13T04:25:00.000Z | 2021-11-13T04:25:00.000Z | backend_crm/wsgi.py | mobius-labs/app | bdf8226d8b16cea609a7af01be51c9bd4b867ab3 | [
"MIT"
] | null | null | null | """
WSGI config for backend_crm project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend_crm.settings')
application = get_wsgi_application()
| 23.470588 | 78 | 0.789474 |
ceccf9b5b0611ccbab6c623abe05486b99fb93a2 | 26,013 | py | Python | PcmPy/model.py | DiedrichsenLab/PcmPy | 4d9e476793f628c8529906103c3cbee50e7915a8 | [
"MIT"
] | 4 | 2019-09-20T15:16:33.000Z | 2022-02-07T20:46:38.000Z | PcmPy/model.py | DiedrichsenLab/PcmPy | 4d9e476793f628c8529906103c3cbee50e7915a8 | [
"MIT"
] | 1 | 2022-03-11T20:49:47.000Z | 2022-03-11T20:49:47.000Z | PcmPy/model.py | DiedrichsenLab/PcmPy | 4d9e476793f628c8529906103c3cbee50e7915a8 | [
"MIT"
] | 1 | 2021-11-17T18:40:48.000Z | 2021-11-17T18:40:48.000Z | import numpy as np
from numpy import exp, eye, log, sqrt
from numpy.linalg import solve, eigh, cholesky, pinv
import PcmPy as pcm
import pandas as pd
class Model:
"""
Abstract PCM Model Class
"""
def __init__(self,name):
"""
Args:
name ([str]): Name of the the model
"""
self.name = name
self.n_param = 0
self.algorithm = 'newton' # Default optimization algorithm
self.theta0 = np.zeros((0,)) # Empty theta0
def predict(self,theta):
"""
Prediction function: Needs to be implemented
"""
raise(NameError("caluclate G needs to be implemented"))
def set_theta0(self,G_hat):
pass
class FeatureModel(Model):
"""
Feature model:
A = sum (theta_i * Ac_i)
G = A*A'
"""
def __init__(self,name,Ac):
"""
Args:
name (string)
name of the particular model for indentification
Ac (numpy.ndarray)
3-dimensional array with components of A
"""
Model.__init__(self,name)
if (Ac.ndim <3):
Ac = Ac.reshape((1,)+Ac.shape)
self.Ac = Ac
self.n_param = Ac.shape[0]
def predict(self,theta):
"""
Calculation of G
Args:
theta (np.ndarray)
Vector of model parameters
Returns:
G (np.ndarray):
2-dimensional (K,K) array of predicted second moment
dG_dTheta (np.ndarray):
3-d (n_param,K,K) array of partial matrix derivatives of G in respect to theta
"""
Ac = self.Ac * np.reshape(theta,(theta.size,1,1))# Using Broadcasting
A = Ac.sum(axis=0)
G = A@A.transpose()
dG_dTheta = np.zeros((self.n_param,)+G.shape)
for i in range(0,self.n_param):
dA = self.Ac[i,:,:] @ A.transpose()
dG_dTheta[i,:,:] = dA + dA.transpose()
return (G,dG_dTheta)
class ComponentModel(Model):
"""
Component model class
G = sum (exp(theta_i) * Gc_i)
"""
def __init__(self,name,Gc):
"""
Parameters:
name (string):
name of the particular model for indentification
Gc (numpy.ndarray):
3-dimensional array with compoments of G
"""
Model.__init__(self,name)
if type(Gc) is list:
Gc = np.stack(Gc,axis=0)
if (Gc.ndim <3):
Gc = Gc.reshape((1,)+Gc.shape)
self.Gc = Gc
self.n_param = Gc.shape[0]
def predict(self,theta):
"""
Calculation of G
Parameters:
theta (numpy.ndarray): Vector of model parameters
Returns:
G (np.ndarray):
2-dimensional (K,K) array of predicted second moment
dG_dTheta (np.ndarray):
3-d (n_param,K,K) array of partial matrix derivatives of G in respect to theta
"""
exp_theta=np.reshape(np.exp(theta),(theta.size,1,1)) # Bring into the right shape for broadcasting
dG_dTheta = self.Gc * exp_theta # This is also the derivative dexp(x)/dx = exp(x)
G = dG_dTheta.sum(axis=0)
return (G,dG_dTheta)
def set_theta0(self,G_hat):
"""Sets theta0 based on the crossvalidated second-moment
Parameters:
G_hat (numpy.ndarray):
Crossvalidated estimate of G
"""
if self.n_param==0:
self.theta0 = np.zeros((0,))
else:
X = np.zeros((G_hat.shape[0]**2, self.n_param))
for i in range(self.n_param):
X[:,i] = self.Gc[i,:,:].reshape((-1,))
h0 = pinv(X) @ G_hat.reshape((-1,1))
h0[h0<10e-4] = 10e-4
self.theta0 = log(h0.reshape(-1,))
class CorrelationModel(Model):
"""
Correlation model class for a fixed or flexible correlation model
it models the correlation between different items across 2 experimental conditions.
In this paramaterization:
var(x) = exp(theta_x)
var(y) = exp(theta_y)
cov(x,y) = sqrt(var(x)*var(y))* r
r = (exp(2*theta_z)-1)/(exp(2*theta_z)+1); % Fisher inverse
"""
def __init__(self,name,within_cov = None,num_items=1,
corr=None,cond_effect = False):
"""
Parameters:
name (string):
name of the particular model for indentification
within_cov (numpy.ndarray or None):
how to model within condition cov-ariance between items
num_items (int):
Number of items within each condition
"""
Model.__init__(self,name)
self.num_items = num_items
self.num_cond = 2 # Current default
self.cond_effect = cond_effect
self.cond_vec = np.kron(np.arange(self.num_cond), np.ones((self.num_items,)))
self.item_vec = np.kron(np.ones((self.num_cond,)),np.arange(self.num_items))
K = self.num_cond * self.num_items
if within_cov is None:
self.within_cov = np.eye(num_items).reshape(1,num_items,num_items)
else:
self.within_cov = within_cov
# Initialize the Gc structure
self.n_cparam = self.num_cond * self.cond_effect # Number of condition effect parameters
self.n_wparam = self.within_cov.shape[0] # Number of within condition parameters
self.n_param = self.num_cond * self.n_wparam + self.n_cparam
self.Gc = np.zeros((self.n_param,K,K))
# Now add the condition effect and within condition covariance structure
for i in range(self.num_cond):
ind = np.where(self.cond_vec==i)[0]
if self.cond_effect:
self.Gc[np.ix_([i],ind,ind)] = 1
c = np.arange(self.n_wparam) + i * self.n_wparam + self.n_cparam
self.Gc[np.ix_(c,ind,ind)] = self.within_cov
# Check if fixed or flexible correlation model
self.corr = corr
if self.corr is None:
self.n_param = self.n_param + 1
def predict(self,theta):
"""
Calculation of G for a correlation model
Parameters:
theta (numpy.ndarray): Vector of model parameters
Returns:
G (np.ndarray)
2-dimensional (K,K) array of predicted second moment
dG_dTheta (np.ndarray)
3-d (n_param,K,K) array of partial matrix derivatives of G in respect to theta
"""
# Determine the correlation to model
if self.corr is None:
z = theta[-1] # Last item
if np.abs(z)>150:
r=np.nan
else:
r = (exp(2*z)-1)/(exp(2*z)+1) # Fisher inverse transformation
else:
r = self.corr
# Get the basic variances within conditons
n = self.n_wparam * self.num_cond + self.num_cond * self.cond_effect # Number of basic parameters
o = self.num_cond * self.cond_effect # Number of condition
dG_dTheta = np.zeros((self.n_param,self.Gc.shape[1],self.Gc.shape[1]))
exp_theta=np.reshape(np.exp(theta[0:n]),(n,1,1)) # Bring into the right shape for broadcasting
dG_dTheta[0:n,:,:] = self.Gc * exp_theta # This is also the derivative dexp(x)/dx = exp(x)
# Sum current G-matrix without the condition effects
G = dG_dTheta[o:n,:,:].sum(axis=0)
# Now determine the cross_condition block (currently only for 2 conditions)
i1 = np.where(self.cond_vec==0)[0]
i2 = np.where(self.cond_vec==1)[0]
p1 = np.arange(self.n_wparam) + self.n_cparam
p2 = p1 + self.n_wparam
C = sqrt(G[np.ix_(i1,i1)] * G[np.ix_(i2,i2)]) # Maximal covariance
G[np.ix_(i1,i2)] = C * r
G[np.ix_(i2,i1)] = C.T * r
# Now add the across-conditions blocks to the derivatives:
for j in range(self.n_wparam):
dG1 = dG_dTheta[np.ix_([p1[j]],i1,i1)]
dG1 = dG1[0,:,:]
G1 = G[np.ix_(i1,i1)]
dG2 = dG_dTheta[np.ix_([p2[j]],i2,i2)]
dG2 = dG2[0,:,:]
G2 = G[np.ix_(i2,i2)]
dC1 = np.zeros(dG1.shape)
dC2 = np.zeros(dG2.shape)
ind = C!=0
dC1[ind] = 0.5 * 1/C[ind] * r * G2[ind] * dG1[ind]
dC2[ind] = 0.5 * 1/C[ind] * r * G1[ind] * dG2[ind]
dG_dTheta[np.ix_([p1[j]],i1,i2)] = dC1
dG_dTheta[np.ix_([p1[j]],i2,i1)]= dC1.T
dG_dTheta[np.ix_([p2[j]],i1,i2)] = dC2
dG_dTheta[np.ix_([p2[j]],i2,i1)] = dC2.T
# Now add the main Condition effect co-variance
G = G+dG_dTheta[0:o,:,:].sum(axis=0)
# Add the derivative for the correlation parameter for flexible models
if self.corr is None:
with np.errstate(all='ignore'):
dC = C*4*exp(2*z)/(exp(2*z)+1)**2
dG_dTheta[np.ix_([n],i1,i2)] = dC
dG_dTheta[np.ix_([n],i2,i1)] = dC.T
return (G,dG_dTheta)
def set_theta0(self,G_hat):
"""
Sets theta0 based on the crossvalidated second-moment
Parameters:
G_hat (numpy.ndarray)
Crossvalidated estimate of G
"""
n_p = self.n_param - (self.corr is None)
G_hat = pcm.util.make_pd(G_hat)
X = np.zeros((G_hat.shape[0]**2, n_p))
for i in range(n_p):
X[:,i] = self.Gc[i,:,:].reshape((-1,))
h0 = pinv(X) @ G_hat.reshape((-1,1))
h0[h0<10e-4] = 10e-4
self.theta0 = log(h0.reshape(-1,))
if self.corr is None:
self.theta0 = np.concatenate([self.theta0,np.zeros((1,))])
def get_correlation(self,theta):
"""
Returns the correlations from a set of fitted parameters
Parameters:
theta (numpy.ndarray):
n_param vector or n_param x n_subj matrix of model parameters
Returns:
correlations (numpy.ndarray)
Correlation value
"""
N , n_param = theta.shape
if self.corr is None:
z = theta[self.n_param-1]
r = (exp(2*z)-1)/(exp(2*z)+1)
else:
r = self.corr # Fixed correlations
return r
class FixedModel(Model):
"""
Fixed PCM with a rigid predicted G matrix and no parameters
"""
def __init__(self,name,G):
"""
Parameters:
name (string)
name of the particular model for indentification
G (numpy.ndarray)
2-dimensional array giving the predicted second moment
"""
Model.__init__(self,name)
if (G.ndim>2):
raise(NameError("G-matrix needs to be 2-d array"))
self.G = G
self.n_param = 0
def predict(self,theta=None):
"""
Calculation of G
Returns:
G (np.ndarray)
2-dimensional (K,K) array of predicted second moment
dG_dTheta (None)
"""
return (self.G,None)
class FreeModel(Model):
"""
Free model class: Second moment matrix is
G = A*A', where A is a upper triangular matrix that is flexible
"""
def __init__(self,name,n_cond):
"""
Parameters:
name (string)
name of the particular model for indentification
n_cond (int)
number of conditions for free model
"""
Model.__init__(self,name)
self.n_cond = n_cond
self.index = np.tri(n_cond)
self.row, self.col = np.where(self.index)
self.n_param = len(self.row)
def predict(self,theta):
"""
Calculation of G
Args:
theta (numpy.ndarray): Vector of model parameters
Returns:
G (np.ndarray)
2-dimensional (K,K) array of predicted second moment
dG_dTheta (np.ndarray)
3-d (n_param,K,K) array of partial matrix derivatives of G in respect to theta
"""
A = np.zeros((self.n_cond, self.n_cond))
A[self.row, self.col] = theta
G = A @ A.T
dGdtheta = np.zeros((self.n_param,self.n_cond,self.n_cond))
for i in range (self.n_param):
dGdtheta[i,self.row[i],:] += A[:,self.col[i]]
dGdtheta[i,:,self.row[i]] += A[:,self.col[i]]
return (G,dGdtheta)
def set_theta0(self,G_hat):
"""
Sets theta0 based on the crossvalidated second-moment
Parameters:
G_hat (numpy.ndarray)
Crossvalidated estimate of G
"""
G_pd = pcm.util.make_pd(G_hat)
A = cholesky(G_pd)
self.theta0 = A[self.row, self.col]
class NoiseModel:
"""
Abstract PCM Noise model class
"""
def __init__(self):
pass
def predict(self, theta):
raise(NameError("predict needs to be implemented"))
def inverse(self, theta):
raise(NameError("inverse needs to be implemented"))
def derivative(self, theta):
raise(NameError("derivative needs to be implemented"))
def set_theta0(self, Y, Z, X=None):
raise(NameError("get_theta0 needs to be implemented"))
class IndependentNoise(NoiseModel):
"""
Simple Indepdennt noise model (i.i.d)
the only parameter is the noise variance
"""
def __init__(self):
NoiseModel.__init__(self)
self.n_param = 1
theta0 = 0
def predict(self, theta):
"""
Prediction function returns S - predicted noise covariance matrix
Args:
theta ([np.array]): Array like of noiseparamters
Returns:
s (double)
Noise variance (for simplicity as a scalar)
"""
return np.exp(theta[0])
def inverse(self, theta):
"""
Returns S^{-1}
Args:
theta ([np.array]): Array like of noiseparamters
Returns:
s (double)
Inverse of noise variance (scalar)
"""
return 1./np.exp(theta[0])
def derivative(self, theta, n=0):
"""
Returns the derivative of S in respect to it's own parameters
Args:
theta ([np.array])
Array like of noiseparamters
n (int, optional)
Number of parameter to get derivate for. Defaults to 0.
Returns:
d (np-array)
derivative of S in respective to theta
"""
return np.exp(theta[0])
def set_theta0(self, Y, Z, X=None):
"""Makes an initial guess on noise paramters
Args:
Y ([np.array])
Data
Z ([np.array])
Random Effects matrix
X ([np.array], optional)
Fixed effects matrix.
"""
N, P = Y.shape
if X is not None:
Z = np.c_[Z, X]
RY = Y - Z @ pinv(Z) @ Y
noise0 = np.sum(RY*RY)/(P * (N - Z.shape[1]))
if noise0 <= 0:
raise(NameError("Too many model factors to estimate noise variance. Consider removing terms or setting runEffect to 'none'"))
self.theta0 = np.array([log(noise0)])
class BlockPlusIndepNoise(NoiseModel):
"""
This noise model uses correlated noise per partition (block)
plus independent noise per observation
For beta-values from an fMRI analysis, this is an adequate model
"""
def __init__(self,part_vec):
"""
Args:
part_vec ([np.array]): vector indicating the block membership for each observation
"""
NoiseModel.__init__(self)
self.n_param = 2
self.part_vec = part_vec
self.B = pcm.matrix.indicator(part_vec)
self.N, self.M = self.B.shape
self.BTB = np.sum(self.B,axis=0)
self.BBT = self.B @ self.B.T
def predict(self, theta):
"""Prediction function returns S - predicted noise covariance matrix
Args:
theta ([np.array]): Array like of noiseparamters
Returns:
s (np.array):
Noise covariance matrix
"""
S = self.BBT * exp(theta[0]) + eye(self.N) * exp(theta[1])
return S
def inverse(self, theta):
"""Returns S^{-1}
Args:
theta (np.array): Array like of noiseparamters
Returns:
iS (np.array): Inverse of noise covariance
"""
sb = exp(theta[0]) # Block parameter
se = exp(theta[1])
A = eye(self.M) * se / sb + self.B.T @ self.B
S = (eye(self.N) - self.B @ np.linalg.solve(A,self.B.T)) / se
return S
def derivative(self, theta,n=0):
"""Returns the derivative of S in respect to it's own parameters
Args:
theta (np.array): Array like of noiseparamters
n (int, optional): Number of parameter to get derivate for. Defaults to 0.
Returns:
d (np.array): derivative of S in respective to theta
"""
if n==0:
return self.BBT * np.exp(theta[0])
elif n==1:
return eye(self.N) * np.exp(theta[1])
class ModelFamily:
"""
ModelFamily class is basically a list (iterable) of models,
which is constructed from a combining a set of components in
every possible way. Every components can be either switched in or out.
You can always specify a list of 'base components', which are always present.
A Model family can be either constructed from a component model, or
a list of (usually fixed) models.
"""
def __init__(self,components, basecomponents=None,comp_names=None):
"""
Args:
components (list)
A list of model components, which are used to create the model family
Can be a list of fixed models, a component model, or a num_comp x N xN array
basecomponents (list)
This specifies the components that are present everywhere
"""
if type(components) is ComponentModel:
self.num_comp = components.Gc.shape[0]
self.Gc = components.Gc
self.comp_names = comp_names
elif type(components) is np.ndarray:
if components.ndim != 3:
raise(NameError('ndarray input needs to have 3 dimensions (num_comp x N x N'))
self.num_comp = components.shape[0]
self.Gc = components
self.comp_names = comp_names
elif type(components) is list:
self.num_comp = len(components)
for i,m in enumerate(components):
if type(m) is not FixedModel:
raise(NameError('Can only construct a model class from fixed models'))
if i==0:
self.Gc=np.empty((len(components),m.G.shape[0],m.G.shape[1]))
self.comp_names=np.empty((len(components),),dtype=object)
self.Gc[i,:,:]=m.G
self.comp_names[i]=m.name
else:
raise(NameError('Input needs to be Component model, ndarray, or a list of fixed models'))
# Check if component names are given:
if self.comp_names is None:
self.comp_names = [f'{d}' for d in np.arange(self.num_comp)+1]
self.comp_names = np.array(self.comp_names)
# Build all combination of 0,1,2... components
if self.num_comp > 12:
raise(NameError('More than 12 components is probably not recommended '))
self.num_models = 2 ** self.num_comp
self.combinations = np.empty((self.num_models,0),dtype=int)
ind = np.arange(self.num_models)
for i in range(self.num_comp):
self.combinations = np.c_[self.combinations,np.floor(ind/(2**i))%2]
# Order the combinations by the number of components that they contain
self.num_comp_per_m = self.combinations.sum(axis=1).astype(int)
ind = np.argsort(self.num_comp_per_m)
self.num_comp_per_m = self.num_comp_per_m[ind]
self.combinations = self.combinations[ind,:]
# Now build all model combinations as individual models
self.models = []
self.model_names = []
for m in range(self.num_models):
ind = self.combinations[m]>0
if ind.sum()==0:
name = 'base'
mod = FixedModel(name,np.zeros(self.Gc[0].shape))
else:
name = '+'.join(self.comp_names[ind])
mod = ComponentModel(name,self.Gc[ind,:,:])
self.model_names.append(name)
self.models.append(mod)
def __getitem__(self,key):
return self.models[key]
def __len__(self):
return self.num_models
def get_layout(self):
"""generate 2d layout of the model tree
root model will be at (0,0)
"""
x = np.zeros((self.num_models,))
y = np.zeros((self.num_models,))
max_comp=np.max(self.num_comp_per_m)
for i in range(max_comp+1):
ind = self.num_comp_per_m==i
y[ind]=i
x_coord = np.arange(np.sum(ind))
x[ind]= x_coord - x_coord.mean()
return x,y
def get_connectivity(self):
""" return a connectivty
matrix that determines whether
2 models only differ by a single component
"""
connect = np.zeros((self.num_models,self.num_models),dtype=int)
connect_sgn = np.zeros((self.num_models,self.num_models),dtype=int)
for i in range(self.num_comp):
diff = self.combinations[:,i].reshape((-1,1)) - self.combinations[:,i].reshape((1,-1))
connect = connect + np.abs(diff).astype(int)
connect_sgn = connect_sgn + diff.astype(int)
return (connect==1)*connect_sgn
def model_posterior(self,likelihood,method='AIC',format='ndarray'):
""" Determine posterior of the model across model family
Args:
likelihood ([np.array or DataFrame]):
N x num_models log-likelihoods
method (string):
Method by which to correct for number of parameters(k)
'AIC' (default): LL-k
None: No correction - use if crossvalidated likelihood is used
format (string):
Return format for posterior
'ndarray': Simple N x num_models np.array
'DataFrame': pandas Data frame
Returns:
posterior (DataFrame or ndarray):
Model posterior - rows are data set, columns are models
"""
if type(likelihood) in [pd.Series,pd.DataFrame]:
LL = likelihood.to_numpy()
else:
LL = likelihood
if LL.ndim==1:
LL=LL.reshape((1,-1))
if method=='AIC':
crit = LL - self.num_comp_per_m
elif method is None:
crit = LL
else:
raise(NameError('Method needs be either BIC, AIC, or None'))
# Safe transform into probability
crit = crit - crit.max(axis=1).reshape(-1,1)
crit = np.exp(crit)
p = crit / crit.sum(axis=1).reshape(-1,1)
if format == 'DataFrame':
return pd.DataFrame(data=p,
index=np.arange(p.shape[0]),
columns = self.model_names)
else:
return p
def component_posterior(self,likelihood,method='AIC',format='ndarray'):
""" Determine the posterior of the component (absence / presence)
Args:
likelihood ([np.array or DataFrame]):
N x num_models log-likelihoods
method (string):
Method by which to correct for number of parameters(k)
'AIC' (default): LL-k
None: No correction - use if crossvalidated likelihood is used
format (string):
Return format for posterior
'ndarray': Simple N x num_models np.array
'DataFrame': pandas Data frame
Returns:
posterior (DataFrame):
Component posterior - rows are data set, columns are components
"""
mposterior = self.model_posterior(likelihood,method)
cposterior = np.empty((mposterior.shape[0],self.num_comp))
for i in range(self.num_comp):
cposterior[:,i] = mposterior[:,self.combinations[:,i]==1].sum(axis=1)
if format == 'DataFrame':
return pd.DataFrame(data=cposterior,
index=np.arange(cposterior.shape[0]),
columns = self.comp_names)
return cposterior
def component_bayesfactor(self,likelihood,method='AIC',format='ndarray'):
""" Returns a log-bayes factor for each component
Args:
likelihood ([np.array or DataFrame]):
N x num_models log-likelihoods
method (string):
Method by which to correct for number of parameters(k)
'AIC' (default): LL-k
None: No correction - use if crossvalidated likelihood is used
format (string):
Return format for posterior
'ndarray': Simple N x num_models np.array
'DataFrame': pandas Data frame
Returns:
posterior (DataFrame):
Component posterior - rows are data set, columns are components
"""
mposterior = self.model_posterior(likelihood,method)
c_bf = np.empty((mposterior.shape[0],self.num_comp))
for i in range(self.num_comp):
c_bf[:,i] = np.log(mposterior[:,self.combinations[:,i]==1].sum(axis=1))-np.log(mposterior[:,self.combinations[:,i]==0].sum(axis=1))
if format == 'DataFrame':
return pd.DataFrame(data=c_bf,
index=np.arange(c_bf.shape[0]),
columns = self.comp_names)
return c_bf
| 34.182654 | 143 | 0.557414 |
49e71f1e2ad18751b402845c07470d98f7f082a4 | 1,087 | py | Python | ICA .py | tusharmishra288/Dimensionality-reduction | af6f89f6c52a050fe1f8c7e16c27b91c4b88ee7e | [
"Apache-2.0"
] | 2 | 2020-03-26T10:02:00.000Z | 2020-04-24T17:14:42.000Z | ICA .py | tusharmishra288/Dimensionality-reduction | af6f89f6c52a050fe1f8c7e16c27b91c4b88ee7e | [
"Apache-2.0"
] | null | null | null | ICA .py | tusharmishra288/Dimensionality-reduction | af6f89f6c52a050fe1f8c7e16c27b91c4b88ee7e | [
"Apache-2.0"
] | null | null | null |
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import FastICA as ICA
import sklearn.model_selection as k
r=pd.read_csv('bank_contacts.csv')
x=r.drop('credit_application',axis=1)
y=r['credit_application']
train_x,test_x,train_y,test_y=k.train_test_split(x,y,test_size=0.2,random_state=42)
sc=StandardScaler()
train_x=sc.fit_transform(train_x)
test_x=sc.transform(test_x)
ica=ICA(n_components=4,random_state=42)
train_x=ica.fit_transform(train_x,train_y)
test_x=ica.transform(test_x)
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(max_depth=2, random_state=0)
classifier.fit(train_x,train_y)
pred = classifier.predict(test_x)
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
print(confusion_matrix(test_y,pred))
print('Accuracy:',accuracy_score(test_y,pred))
plt.scatter(pred,test_x[:,0],marker='o')
plt.scatter(pred,test_x[:,1],marker='o')
plt.scatter(pred,test_x[:,2],marker='o')
plt.show()
| 33.96875 | 84 | 0.792088 |
c2aa804ea0832731f9951100edd6e124171be484 | 1,113 | py | Python | config/includes.chroot/usr/local/share/S0lar0S/src/ranger/ranger/__init__.py | ddarksmith/S0lar0S | b91971000c089f77d1ff76a00262252a65680e5b | [
"WTFPL"
] | null | null | null | config/includes.chroot/usr/local/share/S0lar0S/src/ranger/ranger/__init__.py | ddarksmith/S0lar0S | b91971000c089f77d1ff76a00262252a65680e5b | [
"WTFPL"
] | null | null | null | config/includes.chroot/usr/local/share/S0lar0S/src/ranger/ranger/__init__.py | ddarksmith/S0lar0S | b91971000c089f77d1ff76a00262252a65680e5b | [
"WTFPL"
] | null | null | null | # This file is part of ranger, the console file manager.
# License: GNU GPL version 3, see the file "AUTHORS" for details.
"""A console file manager with VI key bindings.
It provides a minimalistic and nice curses interface with a view on the
directory hierarchy. The secondary task of ranger is to figure out which
program you want to use to open your files with.
"""
import sys
import os
import tempfile
# Information
__license__ = 'GPL3'
__version__ = '1.8.1'
__author__ = __maintainer__ = 'Roman Zimbelmann'
__email__ = 'hut@hut.pm'
# Constants
RANGERDIR = os.path.dirname(__file__)
TICKS_BEFORE_COLLECTING_GARBAGE = 100
TIME_BEFORE_FILE_BECOMES_GARBAGE = 1200
MAX_RESTORABLE_TABS = 3
MACRO_DELIMITER = '%'
DEFAULT_PAGER = 'less'
CACHEDIR = os.path.expanduser("~/.cache/ranger")
USAGE = '%prog [options] [path]'
VERSION = 'ranger-master %s\n\nPython %s' % (__version__, sys.version)
# If the environment variable XDG_CONFIG_HOME is non-empty, CONFDIR is ignored
# and the configuration directory will be $XDG_CONFIG_HOME/ranger instead.
CONFDIR = '~/.config/ranger'
from ranger.core.main import main
| 29.289474 | 78 | 0.7646 |
c9a9ce1e275a310d7727a9187f45dd120a47dd0a | 236 | py | Python | python/users/urls.py | Sult/dnd_social | ecbc5b3e847e74b29e9b05478f68566f03ef34e8 | [
"MIT"
] | 4 | 2018-09-10T14:02:23.000Z | 2021-07-17T20:58:50.000Z | python/users/urls.py | SocialBuddies/dnd_social | 993950ac59d394c1a0268cf6e87a63193f7bd3a7 | [
"MIT"
] | null | null | null | python/users/urls.py | SocialBuddies/dnd_social | 993950ac59d394c1a0268cf6e87a63193f7bd3a7 | [
"MIT"
] | null | null | null | from django.urls import path
from users import views
app_name = 'users'
urlpatterns = [
path('', views.index, name='index'),
path('guest/', views.guest, name='guest'),
path('logout/', views.user_logout, name='logout'),
]
| 19.666667 | 54 | 0.65678 |
307627959bcdb07749f768da4348e882915bf13d | 1,129 | py | Python | Calibration/HcalAlCaRecoProducers/python/ALCARECOHcalCalHBHEMuon_Output_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | Calibration/HcalAlCaRecoProducers/python/ALCARECOHcalCalHBHEMuon_Output_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | Calibration/HcalAlCaRecoProducers/python/ALCARECOHcalCalHBHEMuon_Output_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
# output block for alcastream HCAL HBHEMuon
# output module
# module alcastreamHcalHBHEMuonOutput = PoolOutputModule
OutALCARECOHcalCalHBHEMuon_noDrop = cms.PSet(
# use this in case of filter available
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('pathALCARECOHcalHBHEMuon')
),
outputCommands = cms.untracked.vstring(
'keep edmTriggerResults_*_*_*',
'keep HcalNoiseSummary_hcalnoise_*_*',
'keep recoTracks_globalMuons_*_*',
'keep recoTrackExtras_globalMuons_*_*',
'keep recoTracks_standAloneMuons_*_*',
'keep recoTrackExtras_standAloneMuons_*_*',
'keep recoTracks_generalTracks_*_*',
'keep recoTrackExtras_generalTracks_*_*',
'keep recoTracks_tevMuons_*_*',
'keep recoTrackExtras_tevMuons_*_*',
'keep *_hbhereco_*_*',
'keep *_hbheprereco_*_*',
'keep *_HBHEMuonProd_*_*',
)
)
import copy
OutALCARECOHcalCalHBHEMuon=copy.deepcopy(OutALCARECOHcalCalHBHEMuon_noDrop)
OutALCARECOHcalCalHBHEMuon.outputCommands.insert(0,"drop *")
| 34.212121 | 75 | 0.708592 |
964588b3ece992bfd2ef0d20d03dafbef605a52e | 530 | py | Python | nmt/collections.py | luozhouyang/tf-nmt-keras | bcceeec0a477eb09c4a8915e638a27dae6c95562 | [
"Apache-2.0"
] | 7 | 2018-09-10T03:49:06.000Z | 2020-06-15T06:10:28.000Z | nmt/collections.py | luozhouyang/tf-nmt-keras | bcceeec0a477eb09c4a8915e638a27dae6c95562 | [
"Apache-2.0"
] | 1 | 2019-02-18T10:01:44.000Z | 2019-02-18T10:01:44.000Z | nmt/collections.py | luozhouyang/tf-nmt-keras | bcceeec0a477eb09c4a8915e638a27dae6c95562 | [
"Apache-2.0"
] | 1 | 2018-09-15T05:49:31.000Z | 2018-09-15T05:49:31.000Z | import tensorflow as tf
ITERATOR = "dataset_iterator"
def add_to_collection(key, value):
tf.add_to_collection(key, value)
def get_from_collection(key):
return tf.get_collection(key)
def add_dict_to_collection(name, _dict):
for k, v in _dict.items():
tf.add_to_collection(name + "_key", k)
tf.add_to_collection(name + "_value", v)
def get_dict_from_collection(name):
keys = tf.get_collection(name + "_key")
values = tf.get_collection(name + "_value")
return dict(zip(keys, values))
| 22.083333 | 48 | 0.701887 |
4a2294c64152cf6107175b8ad3ae9351af9474d5 | 3,667 | py | Python | edabit/hard/first_repeating_character/first_repeating_character.py | ticotheps/practice_problems | 943c5ab9eebeac4e5cf162adbdc681119603dc36 | [
"MIT"
] | null | null | null | edabit/hard/first_repeating_character/first_repeating_character.py | ticotheps/practice_problems | 943c5ab9eebeac4e5cf162adbdc681119603dc36 | [
"MIT"
] | null | null | null | edabit/hard/first_repeating_character/first_repeating_character.py | ticotheps/practice_problems | 943c5ab9eebeac4e5cf162adbdc681119603dc36 | [
"MIT"
] | null | null | null | """
FIND FIRST CHARACTER THAT REPEATS
Create a function that takes a string and returns the first character that
repeats. If there is no repeat of a character, then return '-1' (string).
Examples:
- first_repeat('legolas') -> 'l'
- first_repeat('Gandalf') -> 'a'
- first_repeat('Balrog') -> '-1'
- first_repeat('Isildur') -> '-1' // Case-sensitive 'I' != 'i'
Notes:
- Tests are case-sensitive.
"""
"""
U.P.E.R. Problem-Solving Framework
PHASE I: UNDERSTAND
Objective:
- Write an algorithm that takes in a single input (of string data type) and
returns a single output (of string or integer data type).
Expected Input(s):
- Number Of: 1
- Data Type: string
- Var Name: 'input_str'
Expected Output(s):
- Number Of: 1
- Data Type: string
- Var Name: 'repeating_char'
Constraints:
- Tests are case-sensitive.
- i.e. - "I" != "i"
PHASE II: PLAN
Brute Force Solution (nested 'for' loops):
(1) Define a function that takes in a single input string argument and
returns a single output string or integer depending on the existence of
repeating characters.
(2) Declare a var, 'repeating_char', that will hold the first character that is found to repeat itself in the input string. Initialize this var with a value of None.
(3) Use an outer 'for' loop to iterate through each character in the input string. This will provide access to each letter of the input string for comparison purposes.
(4) Nest an inner 'for' loop inside of the outer 'for' loop to iterate through each of the same characters in the input string for a second time. This second iteration will enable each character to be compared to itself and to each of the other characters.
(5) Inside of the inner 'for' loop, evaluate whether or not the iterated element, 'j'(of the inner 'for' loop), is a repeating character of the iterated element, 'i' (of the outer 'for' loop).
(a) If it is a repeating character, set the value of 'repeating_char' equal to 'i'.
(b) If it is NOT a repeating character, do nothing.
(6) If no repeating characters were found, set the value of 'repeating_char' to '-1'.
(7) Return the value of 'repeating_char'.
PHASE III: EXECUTE (Please see below)
PHASE IV: REFLECT/REFACTOR
Brute Force Solution:
- Asymptotic Analysis:
- Time Complexity: O(n^2) -> 'quadratic'
- Space Complexity: O(1) -> 'constant'
- Could we improve the time or space complexity of this solution?
- Yes. We could cache characters in a python dictionary by ensuring that a key:value pair exists in the dictionary for each character. The lookup for dictionaries is O(1) time complexity.
- Please see the first_repeate_optimized() solution below.
Optimized Solution:
- Asymptotic Analysis:
- Time Complexity: O(n) -> 'linear'
- Space Complexity: O(n) -> 'linear'
"""
def first_repeat(chars):
repeating_char = None
for i in range(0, len(chars)):
for j in range(0, len(chars)):
if i != j:
if chars[i] == chars[j]:
repeating_char = chars[j]
return repeating_char
repeating_char = '-1'
return repeating_char
def first_repeat_optimized(chars):
cache = {}
repeating_char = None
for i in range(0, len(chars)):
if chars[i] not in cache:
cache[chars[i]] = True
else:
repeating_char = chars[i]
return repeating_char
repeating_char = '-1'
return repeating_char | 34.59434 | 260 | 0.647123 |
1484dec531733e5fada9e44b36612d5004cd17bb | 510 | py | Python | regularexp20.py | PRASAD-DANGARE/PYTHON | 36214f7dc3762d327e5a29e40752edeb098249c8 | [
"MIT"
] | 1 | 2021-06-07T07:55:28.000Z | 2021-06-07T07:55:28.000Z | regularexp20.py | PRASAD-DANGARE/PYTHON | 36214f7dc3762d327e5a29e40752edeb098249c8 | [
"MIT"
] | null | null | null | regularexp20.py | PRASAD-DANGARE/PYTHON | 36214f7dc3762d327e5a29e40752edeb098249c8 | [
"MIT"
] | null | null | null | # Python Program To Create A Regular Expression To Search At The Ending Of A String By Ignoring The Case
'''
Function Name : Regular Expression To Search String Using re.IGNORECASE
Function Date : 29 Sep 2020
Function Author : Prasad Dangare
Input : String
Output : String
'''
import re
str = "Hello World"
res = re.search(r"World$", str, re.IGNORECASE)
if res:
print("String Ends With 'World' ")
else:
print("String Does Not End With 'World' ")
| 28.333333 | 105 | 0.64902 |
42d091a2337ba12830b807ad12dec1cb54053c58 | 1,149 | py | Python | pyrep/robots/arms/manipulator.py | taitoushiyun/PyRep | ed9c2ca82f54ab3b88f6736031c43ce8afcd26b9 | [
"MIT"
] | null | null | null | pyrep/robots/arms/manipulator.py | taitoushiyun/PyRep | ed9c2ca82f54ab3b88f6736031c43ce8afcd26b9 | [
"MIT"
] | null | null | null | pyrep/robots/arms/manipulator.py | taitoushiyun/PyRep | ed9c2ca82f54ab3b88f6736031c43ce8afcd26b9 | [
"MIT"
] | null | null | null | from pyrep.robots.arms.arm import Arm
from pyrep.objects.collision import Collision
from pyrep.objects.dummy import Dummy
from pyrep.objects.shape import Shape
from typing import List, Tuple
class Manipulator(Arm):
def __init__(self, count=0, name='manipulator', num_joints=12, collision_cnt= 15):
super().__init__(count, name, num_joints)
self.agent_base = Shape('manipulator_base_visual')
self.collisions = [Collision(cname)
for cname in [f'Collision{i_}' for i_ in range(collision_cnt)] + ['Collision']]
def get_collision_result(self) -> bool:
return any([c.read_collision() for c in self.collisions])
def get_base(self) -> Shape:
return self.agent_base
def get_joint_initial_positions(self) -> List[float]:
return super().get_joint_positions()
def get_joint_positions(self) -> List[float]:
return [self.joints[i].get_joint_position() for i in range(len(self.joints)) if i % 2 != 0]
def get_joint_velocities(self) -> List[float]:
return [self.joints[i].get_joint_velocity() for i in range(len(self.joints)) if i % 2 != 0]
| 37.064516 | 106 | 0.684943 |
0c4066277fdfa71f9946906f6640db47a8253b1d | 14,483 | py | Python | lemur/plugins/lemur_acme/tests/test_acme.py | x-lhan/lemur | 1d8aaa4a242af610363e961167bc31691b358773 | [
"Apache-2.0"
] | null | null | null | lemur/plugins/lemur_acme/tests/test_acme.py | x-lhan/lemur | 1d8aaa4a242af610363e961167bc31691b358773 | [
"Apache-2.0"
] | null | null | null | lemur/plugins/lemur_acme/tests/test_acme.py | x-lhan/lemur | 1d8aaa4a242af610363e961167bc31691b358773 | [
"Apache-2.0"
] | null | null | null | import unittest
from mock import MagicMock, Mock, patch
from lemur.plugins.lemur_acme import plugin
class TestAcme(unittest.TestCase):
@patch('lemur.plugins.lemur_acme.plugin.dns_provider_service')
def setUp(self, mock_dns_provider_service):
self.ACMEIssuerPlugin = plugin.ACMEIssuerPlugin()
self.acme = plugin.AcmeHandler()
mock_dns_provider = Mock()
mock_dns_provider.name = "cloudflare"
mock_dns_provider.credentials = "{}"
mock_dns_provider.provider_type = "cloudflare"
self.acme.dns_providers_for_domain = {"www.test.com": [mock_dns_provider],
"test.fakedomain.net": [mock_dns_provider]}
@patch('lemur.plugins.lemur_acme.plugin.len', return_value=1)
def test_find_dns_challenge(self, mock_len):
assert mock_len
from acme import challenges
c = challenges.DNS01()
mock_authz = Mock()
mock_authz.body.resolved_combinations = []
mock_entry = Mock()
mock_entry.chall = c
mock_authz.body.resolved_combinations.append(mock_entry)
result = yield self.acme.find_dns_challenge(mock_authz)
self.assertEqual(result, mock_entry)
def test_authz_record(self):
a = plugin.AuthorizationRecord("host", "authz", "challenge", "id")
self.assertEqual(type(a), plugin.AuthorizationRecord)
@patch('acme.client.Client')
@patch('lemur.plugins.lemur_acme.plugin.current_app')
@patch('lemur.plugins.lemur_acme.plugin.len', return_value=1)
@patch('lemur.plugins.lemur_acme.plugin.AcmeHandler.find_dns_challenge')
def test_start_dns_challenge(self, mock_find_dns_challenge, mock_len, mock_app, mock_acme):
assert mock_len
mock_order = Mock()
mock_app.logger.debug = Mock()
mock_authz = Mock()
mock_authz.body.resolved_combinations = []
mock_entry = MagicMock()
from acme import challenges
c = challenges.DNS01()
mock_entry.chall = TestAcme.test_complete_dns_challenge_fail
mock_authz.body.resolved_combinations.append(mock_entry)
mock_acme.request_domain_challenges = Mock(return_value=mock_authz)
mock_dns_provider = Mock()
mock_dns_provider.create_txt_record = Mock(return_value=1)
values = [mock_entry]
iterable = mock_find_dns_challenge.return_value
iterator = iter(values)
iterable.__iter__.return_value = iterator
result = self.acme.start_dns_challenge(mock_acme, "accountid", "host", mock_dns_provider, mock_order, {})
self.assertEqual(type(result), plugin.AuthorizationRecord)
@patch('acme.client.Client')
@patch('lemur.plugins.lemur_acme.plugin.current_app')
@patch('lemur.plugins.lemur_acme.cloudflare.wait_for_dns_change')
def test_complete_dns_challenge_success(self, mock_wait_for_dns_change, mock_current_app, mock_acme):
mock_dns_provider = Mock()
mock_dns_provider.wait_for_dns_change = Mock(return_value=True)
mock_authz = Mock()
mock_authz.dns_challenge.response = Mock()
mock_authz.dns_challenge.response.simple_verify = Mock(return_value=True)
mock_authz.authz = []
mock_authz.host = "www.test.com"
mock_authz_record = Mock()
mock_authz_record.body.identifier.value = "test"
mock_authz.authz.append(mock_authz_record)
mock_authz.change_id = []
mock_authz.change_id.append("123")
mock_authz.dns_challenge = []
dns_challenge = Mock()
mock_authz.dns_challenge.append(dns_challenge)
self.acme.complete_dns_challenge(mock_acme, mock_authz)
@patch('acme.client.Client')
@patch('lemur.plugins.lemur_acme.plugin.current_app')
@patch('lemur.plugins.lemur_acme.cloudflare.wait_for_dns_change')
def test_complete_dns_challenge_fail(self, mock_wait_for_dns_change, mock_current_app, mock_acme):
mock_dns_provider = Mock()
mock_dns_provider.wait_for_dns_change = Mock(return_value=True)
mock_authz = Mock()
mock_authz.dns_challenge.response = Mock()
mock_authz.dns_challenge.response.simple_verify = Mock(return_value=False)
mock_authz.authz = []
mock_authz.host = "www.test.com"
mock_authz_record = Mock()
mock_authz_record.body.identifier.value = "test"
mock_authz.authz.append(mock_authz_record)
mock_authz.change_id = []
mock_authz.change_id.append("123")
mock_authz.dns_challenge = []
dns_challenge = Mock()
mock_authz.dns_challenge.append(dns_challenge)
self.assertRaises(
ValueError,
self.acme.complete_dns_challenge(mock_acme, mock_authz)
)
@patch('acme.client.Client')
@patch('OpenSSL.crypto', return_value="mock_cert")
@patch('josepy.util.ComparableX509')
@patch('lemur.plugins.lemur_acme.plugin.AcmeHandler.find_dns_challenge')
@patch('lemur.plugins.lemur_acme.plugin.current_app')
def test_request_certificate(self, mock_current_app, mock_find_dns_challenge, mock_jose, mock_crypto, mock_acme):
mock_cert_response = Mock()
mock_cert_response.body = "123"
mock_cert_response_full = [mock_cert_response, True]
mock_acme.poll_and_request_issuance = Mock(return_value=mock_cert_response_full)
mock_authz = []
mock_authz_record = MagicMock()
mock_authz_record.authz = Mock()
mock_authz.append(mock_authz_record)
mock_acme.fetch_chain = Mock(return_value="mock_chain")
mock_crypto.dump_certificate = Mock(return_value=b'chain')
mock_order = Mock()
self.acme.request_certificate(mock_acme, [], mock_order)
def test_setup_acme_client_fail(self):
mock_authority = Mock()
mock_authority.options = []
with self.assertRaises(Exception):
self.acme.setup_acme_client(mock_authority)
@patch('lemur.plugins.lemur_acme.plugin.BackwardsCompatibleClientV2')
@patch('lemur.plugins.lemur_acme.plugin.current_app')
def test_setup_acme_client_success(self, mock_current_app, mock_acme):
mock_authority = Mock()
mock_authority.options = '[{"name": "mock_name", "value": "mock_value"}]'
mock_client = Mock()
mock_registration = Mock()
mock_registration.uri = "http://test.com"
mock_client.register = mock_registration
mock_client.agree_to_tos = Mock(return_value=True)
mock_acme.return_value = mock_client
mock_current_app.config = {}
result_client, result_registration = self.acme.setup_acme_client(mock_authority)
assert result_client
assert result_registration
@patch('lemur.plugins.lemur_acme.plugin.current_app')
def test_get_domains_single(self, mock_current_app):
options = {
"common_name": "test.netflix.net"
}
result = self.acme.get_domains(options)
self.assertEqual(result, [options["common_name"]])
@patch('lemur.plugins.lemur_acme.plugin.current_app')
def test_get_domains_multiple(self, mock_current_app):
options = {
"common_name": "test.netflix.net",
"extensions": {
"sub_alt_names": {
"names": [
"test2.netflix.net",
"test3.netflix.net"
]
}
}
}
result = self.acme.get_domains(options)
self.assertEqual(result, [options["common_name"], "test2.netflix.net", "test3.netflix.net"])
@patch('lemur.plugins.lemur_acme.plugin.AcmeHandler.start_dns_challenge', return_value="test")
def test_get_authorizations(self, mock_start_dns_challenge):
mock_order = Mock()
mock_order.body.identifiers = []
mock_domain = Mock()
mock_order.body.identifiers.append(mock_domain)
mock_order_info = Mock()
mock_order_info.account_number = 1
mock_order_info.domains = ["test.fakedomain.net"]
result = self.acme.get_authorizations("acme_client", mock_order, mock_order_info)
self.assertEqual(result, ["test"])
@patch('lemur.plugins.lemur_acme.plugin.AcmeHandler.complete_dns_challenge', return_value="test")
def test_finalize_authorizations(self, mock_complete_dns_challenge):
mock_authz = []
mock_authz_record = MagicMock()
mock_authz_record.authz = Mock()
mock_authz_record.change_id = 1
mock_authz_record.dns_challenge.validation_domain_name = Mock()
mock_authz_record.dns_challenge.validation = Mock()
mock_authz.append(mock_authz_record)
mock_dns_provider = Mock()
mock_dns_provider.delete_txt_record = Mock()
mock_acme_client = Mock()
result = self.acme.finalize_authorizations(mock_acme_client, mock_authz)
self.assertEqual(result, mock_authz)
@patch('lemur.plugins.lemur_acme.plugin.current_app')
def test_create_authority(self, mock_current_app):
mock_current_app.config = Mock()
options = {
"plugin": {
"plugin_options": [{
"name": "certificate",
"value": "123"
}]
}
}
acme_root, b, role = self.ACMEIssuerPlugin.create_authority(options)
self.assertEqual(acme_root, "123")
self.assertEqual(b, "")
self.assertEqual(role, [{'username': '', 'password': '', 'name': 'acme'}])
@patch('lemur.plugins.lemur_acme.plugin.current_app')
@patch('lemur.plugins.lemur_acme.dyn.current_app')
@patch('lemur.plugins.lemur_acme.cloudflare.current_app')
@patch('lemur.plugins.lemur_acme.plugin.dns_provider_service')
def test_get_dns_provider(self, mock_dns_provider_service, mock_current_app_cloudflare, mock_current_app_dyn,
mock_current_app):
provider = plugin.ACMEIssuerPlugin()
route53 = provider.get_dns_provider("route53")
assert route53
cloudflare = provider.get_dns_provider("cloudflare")
assert cloudflare
dyn = provider.get_dns_provider("dyn")
assert dyn
@patch('lemur.plugins.lemur_acme.plugin.AcmeHandler.setup_acme_client')
@patch('lemur.plugins.lemur_acme.plugin.current_app')
@patch('lemur.plugins.lemur_acme.plugin.authorization_service')
@patch('lemur.plugins.lemur_acme.plugin.dns_provider_service')
@patch('lemur.plugins.lemur_acme.plugin.AcmeHandler.get_authorizations')
@patch('lemur.plugins.lemur_acme.plugin.AcmeHandler.finalize_authorizations')
@patch('lemur.plugins.lemur_acme.plugin.AcmeHandler.request_certificate')
def test_get_ordered_certificate(
self, mock_request_certificate, mock_finalize_authorizations, mock_get_authorizations,
mock_dns_provider_service, mock_authorization_service, mock_current_app, mock_acme):
mock_client = Mock()
mock_acme.return_value = (mock_client, "")
mock_request_certificate.return_value = ("pem_certificate", "chain")
mock_cert = Mock()
mock_cert.external_id = 1
provider = plugin.ACMEIssuerPlugin()
provider.get_dns_provider = Mock()
result = provider.get_ordered_certificate(mock_cert)
self.assertEqual(
result,
{
'body': "pem_certificate",
'chain': "chain",
'external_id': "1"
}
)
@patch('lemur.plugins.lemur_acme.plugin.AcmeHandler.setup_acme_client')
@patch('lemur.plugins.lemur_acme.plugin.current_app')
@patch('lemur.plugins.lemur_acme.plugin.authorization_service')
@patch('lemur.plugins.lemur_acme.plugin.dns_provider_service')
@patch('lemur.plugins.lemur_acme.plugin.AcmeHandler.get_authorizations')
@patch('lemur.plugins.lemur_acme.plugin.AcmeHandler.finalize_authorizations')
@patch('lemur.plugins.lemur_acme.plugin.AcmeHandler.request_certificate')
def test_get_ordered_certificates(
self, mock_request_certificate, mock_finalize_authorizations, mock_get_authorizations,
mock_dns_provider_service, mock_authorization_service, mock_current_app, mock_acme):
mock_client = Mock()
mock_acme.return_value = (mock_client, "")
mock_request_certificate.return_value = ("pem_certificate", "chain")
mock_cert = Mock()
mock_cert.external_id = 1
mock_cert2 = Mock()
mock_cert2.external_id = 2
provider = plugin.ACMEIssuerPlugin()
provider.get_dns_provider = Mock()
result = provider.get_ordered_certificates([mock_cert, mock_cert2])
self.assertEqual(len(result), 2)
self.assertEqual(result[0]['cert'], {'body': 'pem_certificate', 'chain': 'chain', 'external_id': '1'})
self.assertEqual(result[1]['cert'], {'body': 'pem_certificate', 'chain': 'chain', 'external_id': '2'})
@patch('lemur.plugins.lemur_acme.plugin.AcmeHandler.setup_acme_client')
@patch('lemur.plugins.lemur_acme.plugin.dns_provider_service')
@patch('lemur.plugins.lemur_acme.plugin.current_app')
@patch('lemur.plugins.lemur_acme.plugin.AcmeHandler.get_authorizations')
@patch('lemur.plugins.lemur_acme.plugin.AcmeHandler.finalize_authorizations')
@patch('lemur.plugins.lemur_acme.plugin.AcmeHandler.request_certificate')
@patch('lemur.plugins.lemur_acme.plugin.authorization_service')
def test_create_certificate(self, mock_authorization_service, mock_request_certificate,
mock_finalize_authorizations, mock_get_authorizations,
mock_current_app, mock_dns_provider_service, mock_acme):
provider = plugin.ACMEIssuerPlugin()
mock_authority = Mock()
mock_client = Mock()
mock_acme.return_value = (mock_client, "")
mock_dns_provider = Mock()
mock_dns_provider.credentials = '{"account_id": 1}'
mock_dns_provider.provider_type = "route53"
mock_dns_provider_service.get.return_value = mock_dns_provider
issuer_options = {
'authority': mock_authority,
'dns_provider': mock_dns_provider,
"common_name": "test.netflix.net"
}
csr = "123"
mock_request_certificate.return_value = ("pem_certificate", "chain")
result = provider.create_certificate(csr, issuer_options)
assert result
| 45.11838 | 117 | 0.683215 |
c84e4e6f094e0e62ac0113a9453a044e151f4a7e | 1,115 | py | Python | tests/lowlevel/mount.py | sjdv1982/seamless | 1b814341e74a56333c163f10e6f6ceab508b7df9 | [
"MIT"
] | 15 | 2017-06-07T12:49:12.000Z | 2020-07-25T18:06:04.000Z | tests/lowlevel/mount.py | sjdv1982/seamless | 1b814341e74a56333c163f10e6f6ceab508b7df9 | [
"MIT"
] | 110 | 2016-06-21T23:20:44.000Z | 2022-02-24T16:15:22.000Z | tests/lowlevel/mount.py | sjdv1982/seamless | 1b814341e74a56333c163f10e6f6ceab508b7df9 | [
"MIT"
] | 6 | 2016-06-21T11:19:22.000Z | 2019-01-21T13:45:39.000Z | import seamless
from seamless.core import macro_mode_on
from seamless.core import context, cell, transformer
import os
os.makedirs("/tmp/mount-test", exist_ok=True)
with macro_mode_on():
ctx = context(toplevel=True)
ctx.cell1 = cell().set(1)
ctx.cell2 = cell().set(2)
ctx.result = cell()
ctx.tf = transformer({
"a": "input",
"b": "input",
"c": "output"
})
ctx.cell1.connect(ctx.tf.a)
ctx.cell2.connect(ctx.tf.b)
ctx.code = cell("transformer").set("c = a + b")
ctx.code.connect(ctx.tf.code)
ctx.tf.c.connect(ctx.result)
ctx.result.mount("/tmp/mount-test/myresult", persistent=True, mode="w")
ctx.cell1.mount("/tmp/mount-test/cell1", persistent=True)
ctx.cell2.mount("/tmp/mount-test/cell2", persistent=True)
ctx.sub = context(toplevel=False)
ctx.sub.mycell = cell("text").set("This is my cell\nend")
ctx.compute()
print(ctx.result.value)
ctx.cell1.set(10)
ctx.compute()
print(ctx.result.value)
print(ctx.result.value)
ctx.code.set("c = float(a) + float(b) + 1000")
ctx.compute()
print(ctx.result.value)
print(ctx.status)
| 28.589744 | 75 | 0.659193 |
5985bae2dd1f53f43440f3070377fdedb954c3d1 | 1,462 | py | Python | motion.py | EmbSys-WWU/SLAMtesting | 8b92531b32d57a915b00027d985d030707b90086 | [
"MIT"
] | 1 | 2022-03-23T07:13:58.000Z | 2022-03-23T07:13:58.000Z | motion.py | EmbSys-WWU/SLAMtesting | 8b92531b32d57a915b00027d985d030707b90086 | [
"MIT"
] | null | null | null | motion.py | EmbSys-WWU/SLAMtesting | 8b92531b32d57a915b00027d985d030707b90086 | [
"MIT"
] | null | null | null | import numpy as np
# ---------------------------------------------------------------------------------------------- #
# --------------------------------- ODOMETRY MOTION MODEL -------------------------------------- #
# ---------------------------------------------------------------------------------------------- #
class Odometry:
def __init__(self, t1: float, d: float, t2: float):
self.turn1 = Turn(t1)
self.drive = Drive(d)
self.turn2 = Turn(t2)
def convert_to_array(self) -> np.ndarray:
return np.array([self.turn1.amount, self.drive.amount, self.turn2.amount], dtype=np.float)
class Motion:
def __init__(self, amount: float):
self.amount = amount
class Turn(Motion):
def __init__(self, amount: float):
super(Turn, self).__init__(amount)
class Drive(Motion):
def __init__(self, amount: float):
super(Drive, self).__init__(amount)
# ---------------------------------------------------------------------------------------------- #
# --------------------------------- VELOCITY MOTION MODEL -------------------------------------- #
# ---------------------------------------------------------------------------------------------- #
class Velocity:
def __init__(self, v: float, yaw_rate: float):
self.v = v
self.yaw_rate = yaw_rate
def convert_to_array(self) -> np.ndarray:
return np.array([self.v, self.yaw_rate], dtype=np.float)
| 34.809524 | 100 | 0.400821 |
74a485bae55fbd91e6b68d25035ddc0cc55e7089 | 13,287 | py | Python | scripts/s3/migrate_to_external_accounts.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | null | null | null | scripts/s3/migrate_to_external_accounts.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | null | null | null | scripts/s3/migrate_to_external_accounts.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | null | null | null | import logging
import os
import sys
import urlparse
from modularodm import Q
from modularodm.exceptions import NoResultsFound
from framework.mongo import database
from framework.transactions.context import TokuTransaction
from website import settings
from website.app import init_app
from website.models import User, Node
from website.oauth.models import ExternalAccount
from website.addons.s3 import settings as s3_settings
from website.addons.s3 import utils
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
PROVIDER = 's3'
PROVIDER_NAME = 'Amazon S3'
ENCRYPT_UPLOADS = s3_settings.ENCRYPT_UPLOADS_DEFAULT
def verify_user_settings_documents(user_document):
try:
assert('_id' in user_document)
assert('deleted' in user_document)
assert('owner' in user_document)
assert('access_key' in user_document)
assert('secret_key' in user_document)
assert(user_document.get('owner', None))
except AssertionError:
return False
else:
return True
def verify_node_settings_document(document):
try:
assert('_id' in document)
assert('deleted' in document)
assert('bucket' in document)
assert('owner' in document)
assert('user_settings' in document)
assert(document.get('owner', None))
except AssertionError:
return False
try:
assert('encrypt_uploads' in document)
except AssertionError:
try:
database['addons3nodesettings'].find_and_modify(
{'_id': document['_id']},
{
'$set': {
'encrypt_uploads': ENCRYPT_UPLOADS,
}
}
)
except Exception:
return False
return True
def migrate_to_external_account(user_settings_document):
user_info = utils.get_user_info(access_key=user_settings_document['access_key'], secret_key=user_settings_document['secret_key'])
user = User.load(user_settings_document['owner'])
if not user_info:
return (None, None, None)
new = False
try:
external_account = ExternalAccount.find_one(Q('provider_id', 'eq', user_info.id))
logger.info('Duplicate account use found: s3usersettings {0} with id {1}'.format(user_settings_document['_id'], user._id))
except NoResultsFound:
new = True
external_account = ExternalAccount(
provider=PROVIDER,
provider_name=PROVIDER_NAME,
provider_id=user_info.id,
oauth_key=user_settings_document['access_key'],
oauth_secret=user_settings_document['secret_key'],
display_name=user_info.display_name,
)
external_account.save()
user.external_accounts.append(external_account)
user.save()
return external_account, user, new
def make_new_user_settings(user):
# kill backrefs to old models
database['user'].find_and_modify(
{'_id': user._id},
{
'$unset': {
'__backrefs.addons.addons3usersettings': ''
}
}
)
user.reload()
return user.get_or_add_addon('s3', override=True)
def make_new_node_settings(node, node_settings_document, external_account=None, user_settings_instance=None):
# kill backrefs to old models
database['node'].find_and_modify(
{'_id': node._id},
{
'$unset': {
'__backrefs.addons.addons3nodesettings': ''
}
}
)
node.reload()
node_settings_instance = node.get_or_add_addon('s3', auth=None, override=True, log=False)
node_settings_instance.bucket = node_settings_document['bucket']
node_settings_instance.save()
if external_account and user_settings_instance:
node_settings_instance.set_auth(
external_account,
user_settings_instance.owner,
log=False
)
return node_settings_instance
def migrate(dry_run=True):
user_settings_list = list(database['addons3usersettings'].find())
# get in-memory versions of collections and collection sizes
old_user_settings_collection = database['addons3usersettings']
old_user_settings_count = old_user_settings_collection.count()
old_node_settings_collection = database['addons3nodesettings']
old_node_settings_count = old_node_settings_collection.count()
# Lists of IDs for logging purposes
external_accounts_created = []
migrated_user_settings = []
migrated_node_settings = []
deleted_user_settings = []
broken_user_settings = []
user_no_oauth_creds = []
invalid_oauth_creds = []
inactive_user_or_no_owner = []
unverifiable_node_settings = []
deleted_node_settings = []
nodeless_node_settings = []
duped_accounts = {}
dupe_count = 0
for user_settings_document in user_settings_list:
if user_settings_document['deleted']:
logger.info(
"Found addons3usersettings document (id:{0}) that is marked as deleted. It will not be migrated".format(user_settings_document['_id'])
)
deleted_user_settings.append(user_settings_document['_id'])
continue
if not verify_user_settings_documents(user_settings_document):
logger.info(
"Found broken addons3usersettings document (id:{0}) that could not be fixed.".format(user_settings_document['_id'])
)
broken_user_settings.append(user_settings_document['_id'])
continue
if not user_settings_document['access_key'] or not user_settings_document['secret_key']:
logger.info(
"Found addons3usersettings document (id:{0}) with incomplete or no oauth credentials. It will not be migrated.".format(user_settings_document['_id'])
)
user_no_oauth_creds.append(user_settings_document['_id'])
continue
external_account, user, new = migrate_to_external_account(user_settings_document)
if not external_account:
invalid_oauth_creds.append(user_settings_document['_id'])
logger.warn('AddonS3UserSettings<{}> has invalid credentials. It will not be migrated'.format(
user_settings_document['_id']
))
continue
if new:
external_accounts_created.append(external_account._id)
else:
try:
duped_accounts[external_account._id].append(user_settings_document['_id'])
except KeyError:
duped_accounts[external_account._id] = [user_settings_document['_id']]
finally:
dupe_count += 1
linked_node_settings_documents = old_node_settings_collection.find({
'user_settings': user_settings_document['_id']
})
if not user or not user.is_active:
if linked_node_settings_documents.count() and not user.is_merged:
logger.warn("AddonS3UserSettings<_id:{0}> has no owner, but is used by AddonS3NodeSettings: {1}.".format(
user_settings_document['_id'],
', '.join([each['_id'] for each in linked_node_settings_documents])
))
raise RuntimeError("This should never happen.")
else:
logger.info("AddonS3UserSettings<_id:{0}> either has no owner or the owner's account is not active, and will not be migrated.".format(
user_settings_document['_id']
))
inactive_user_or_no_owner.append(user_settings_document['_id'])
continue
else:
user_settings_instance = make_new_user_settings(user)
for node_settings_document in linked_node_settings_documents:
if not verify_node_settings_document(node_settings_document):
logger.info(
"Found addons3nodesettings document (id:{0}) that could not be verified. It will not be migrated.".format(
node_settings_document['_id'],
)
)
unverifiable_node_settings.append((node_settings_document['_id']))
continue
if node_settings_document['deleted']:
logger.info(
"Found addons3nodesettings document (id:{0}) that is marked as deleted.".format(
node_settings_document['_id'],
)
)
deleted_node_settings.append(node_settings_document['_id'])
continue
node = Node.load(node_settings_document['owner'])
if not node:
logger.info("AddonS3NodeSettings<_id:{0}> has no associated Node, and will not be migrated.".format(
node_settings_document['_id']
))
nodeless_node_settings.append(node_settings_document['_id'])
continue
else:
node_settings_document = database['addons3nodesettings'].find_one({'_id': node_settings_document['_id']})
make_new_node_settings(
node,
node_settings_document,
external_account,
user_settings_instance
)
migrated_node_settings.append(node_settings_document['_id'])
migrated_user_settings.append(user_settings_document['_id'])
logger.info(
"Created {0} new external accounts from {1} old user settings documents:\n{2}".format(
len(external_accounts_created), old_user_settings_count, [e for e in external_accounts_created]
)
)
logger.info(
"Successfully migrated {0} user settings from {1} old user settings documents:\n{2}".format(
len(migrated_user_settings), old_user_settings_count, [e for e in migrated_user_settings]
)
)
logger.info(
"Successfully migrated {0} node settings from {1} old node settings documents:\n{2}".format(
len(migrated_node_settings), old_node_settings_count, [e for e in migrated_node_settings]
)
)
if duped_accounts:
logger.info(
"Found {0} cases of duplicate account use across {1} addons3usersettings, causing ExternalAccounts to not be created for {2} user settings.\n\
Note that original linked user settings are excluded from this list:\n{3}".format(
len(duped_accounts),
len(duped_accounts) + dupe_count,
dupe_count,
['{}: {}'.format(e, duped_accounts[e]) for e in duped_accounts.keys()]
)
)
if user_no_oauth_creds:
logger.warn(
"Skipped migration of {0} invalid user settings with a lack of oauth credentials:\n{1}".format(
len(user_no_oauth_creds), [e for e in user_no_oauth_creds]
)
)
if invalid_oauth_creds:
logger.warn(
"Skipped migration of {0} user settings due to invalid oauth credentials:\n{1}".format(
len(invalid_oauth_creds), [e for e in invalid_oauth_creds]
)
)
if deleted_user_settings:
logger.warn(
"Skipped migration of {0} deleted user settings: {1}".format(
len(deleted_user_settings), [e for e in deleted_user_settings]
)
)
if broken_user_settings:
logger.warn(
"Skipped migration of {0} addons3usersettings because they could not be verified:\n{1}".format(
len(broken_user_settings), [e for e in broken_user_settings]
)
)
if inactive_user_or_no_owner:
logger.warn(
"Skipped migration of {0} user settings due to an inactive or null owner:\n{1}".format(
len(inactive_user_or_no_owner), [e for e in inactive_user_or_no_owner]
)
)
if unverifiable_node_settings:
logger.warn(
"Skipped migration of {0} addons3nodesettings documents because they could not be verified:\n{1}".format(
len(unverifiable_node_settings), [e for e in unverifiable_node_settings]
)
)
if deleted_node_settings:
logger.warn(
"Skipped migration of {0} deleted node settings:\n{1}".format(
len(deleted_node_settings), [e for e in deleted_node_settings]
)
)
if nodeless_node_settings:
logger.warn(
"Skipped migration of {0} node settings without an associated node:\n{1}".format(
len(nodeless_node_settings), [e for e in nodeless_node_settings]
)
)
if dry_run:
raise RuntimeError('Dry run, transaction rolled back.')
def main():
dry_run = False
if '--dry' in sys.argv:
dry_run = True
if not dry_run:
script_utils.add_file_logger(logger, __file__)
init_app(set_backends=True, routes=False)
with TokuTransaction():
migrate(dry_run=dry_run)
if __name__ == "__main__":
main() | 40.263636 | 165 | 0.625499 |
2c4cee124a79aa82a21b5f9b51b2fa2837461fbf | 3,433 | py | Python | nlpir/__init__.py | lihea/nlpir-python | 62bae60ff93c5f3447d26c2817bbbdce40445cc5 | [
"MIT"
] | null | null | null | nlpir/__init__.py | lihea/nlpir-python | 62bae60ff93c5f3447d26c2817bbbdce40445cc5 | [
"MIT"
] | null | null | null | nlpir/__init__.py | lihea/nlpir-python | 62bae60ff93c5f3447d26c2817bbbdce40445cc5 | [
"MIT"
] | null | null | null | # coding : utf-8
import os
import typing
import re
import logging
import sys
import functools
__version__ = "0.0.1"
PACKAGE_DIR = os.path.abspath(os.path.dirname(__file__))
logger = logging.getLogger("nlpir")
class NLPIRException(Exception):
pass
def clean_logs(data_path: typing.Optional[str] = None, include_current: bool = False):
"""
Clean logs
:param data_path: the cus
:param include_current: include current directory or not
"""
if data_path is None:
data_path = os.path.join(PACKAGE_DIR, "Data")
delete_list = [data_path]
if include_current:
delete_list.append(os.path.abspath("./"))
delete_file_list = []
for path in delete_list:
for filename in os.listdir(path):
if re.match(r'\d{8}\.log|err', filename):
delete_file_list.append(os.path.abspath(os.path.join(path, filename)))
logger.info("The following file will be deleted: \n\t{}".format("\n\t".join(delete_file_list)))
for f in delete_file_list:
try:
os.remove(f)
except OSError as e:
logger.error(e)
def get_instance(func: callable) -> callable:
"""
A wrapper to init instance when call the function
直接使用单层装饰器时,此装饰器会在import module的时候直接被调用,
生成对应的函数,导致对应的类实例过早初始化.
为了让类实例真正在函数调用时才初始化,使用下面的 :func:`functions.warps`,
此方法在直接import的时候不会被调用(因为初始化时仅仅为函数没有函数参数),故使用
这种方式.
让函数在真正执行时才进行类实例初始化的原因是为了使 :func:`init_setting` 可以被使用,
类似于 :func:logging.basicConfig 方法,可以在import对应module后可以有一次修改初始化
参数的可能.
"""
@functools.wraps(func)
def wraps(*args, **kwargs):
"""
"""
module = sys.modules[func.__module__]
module = init_setting(module) if module.__instance__ is None else module
module.__instance__ = module.__cls__(
encode=module.__nlpir_encode__,
lib_path=module.__lib__,
data_path=module.__data__,
license_code=module.__license_code__
) if module.__instance__ is None else module.__instance__
return func(*args, **kwargs)
return wraps
def init_setting(
init_module,
encode: typing.Optional[int] = None,
lib_path: typing.Optional[int] = None,
data_path: typing.Optional[str] = None,
license_code: str = ''
):
"""
Init the NLPIR module for custom usage.
**Only can init it , before call any process function in that module**
:param ModuleType init_module: The high-level module want to use
:param int encode: same as in :class:`nlpir.native.nlpir_base.NLPIRBase()`
:param str lib_path: same as in :class:`nlpir.native.nlpir_base.NLPIRBase()`
:param str data_path: same as in :class:`nlpir.native.nlpir_base.NLPIRBase()`
:param str license_code: same as in :class:`nlpir.native.nlpir_base.NLPIRBase()`
:raise: NLPIRException
:return: init module
"""
if init_module.__instance__ is not None:
raise NLPIRException("Already have a instance can not change the setting")
init_module.__nlpir_encode__ = encode if encode is not None else init_module.__nlpir_encode__
init_module.__lib__ = lib_path if lib_path is not None else init_module.__lib__
init_module.__data__ = data_path if data_path is not None else init_module.__data__
init_module.__license_code__ = license_code if license_code is not None else init_module.__license_code__
return init_module
| 32.695238 | 109 | 0.687154 |
415e8dc55ec17899315a208fa9eba2ce10e1e5dc | 144 | py | Python | HanderCode/aidaiwangApp/aidaiwangApp/ChangePassword_from_aidaiwangApp.py | mocne/PycharmProjects | b009e530f4f01e5b1826bbe2364d86b65bcd66e3 | [
"MIT"
] | null | null | null | HanderCode/aidaiwangApp/aidaiwangApp/ChangePassword_from_aidaiwangApp.py | mocne/PycharmProjects | b009e530f4f01e5b1826bbe2364d86b65bcd66e3 | [
"MIT"
] | null | null | null | HanderCode/aidaiwangApp/aidaiwangApp/ChangePassword_from_aidaiwangApp.py | mocne/PycharmProjects | b009e530f4f01e5b1826bbe2364d86b65bcd66e3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = 'aidai_TEC_QA'
# -*- date:'2017/8/1 0001' -*-
def start_to_changepassword():
print(u'change password') | 24 | 30 | 0.638889 |
10eb83f297ee075de917f16728d8d265ecb4f2db | 5,965 | py | Python | src/cli.py | TheBossProSniper/electric-windows | 0c41bb4c7da614ef193b314a92ed98f9cd893da2 | [
"Apache-2.0"
] | 210 | 2021-03-03T05:26:46.000Z | 2022-02-21T17:25:06.000Z | src/cli.py | TheBossProSniper/electric-windows | 0c41bb4c7da614ef193b314a92ed98f9cd893da2 | [
"Apache-2.0"
] | 7 | 2021-03-22T14:25:48.000Z | 2022-03-18T04:29:22.000Z | src/cli.py | TheBossProSniper/electric-windows | 0c41bb4c7da614ef193b314a92ed98f9cd893da2 | [
"Apache-2.0"
] | 9 | 2021-03-11T15:46:36.000Z | 2021-09-21T09:13:58.000Z | ######################################################################
# SUPERCHARGECLI (EXTENSIONS) #
######################################################################
# -*- coding: utf-8 -*-
"""
Extension for the python ``click`` module to provide
a group with a git-like *did-you-mean* feature.
"""
import click
import difflib
from colorama import Fore
__version__ = "0.0.3"
_click7 = click.__version__[0] >= "7"
class DYMMixin(object): # pylint: disable=too-few-public-methods
"""
Mixin class for click MultiCommand inherited classes
to provide git-like *did-you-mean* functionality when
a certain command is not registered.
"""
def __init__(self, *args, **kwargs):
self.max_suggestions = kwargs.pop("max_suggestions", 3)
self.cutoff = kwargs.pop("cutoff", 0.5)
super(DYMMixin, self).__init__(*args, **kwargs)
self._commands = {}
self._aliases = {}
def resolve_command(self, ctx, args):
"""
Overrides clicks ``resolve_command`` method
and appends *Did you mean ...* suggestions
to the raised exception message
"""
original_cmd_name = click.utils.make_str(args[0])
try:
return super(DYMMixin, self).resolve_command(ctx, args)
except click.exceptions.UsageError as error:
error_msg = str(error)
matches = difflib.get_close_matches(
original_cmd_name,
self.list_commands(ctx),
self.max_suggestions,
self.cutoff,
)
if matches:
error_msg += "\n\nDid you mean one of these?\n %s" % "\n ".join(
matches
) # pylint: disable=line-too-long
raise click.exceptions.UsageError(error_msg, error.ctx)
def command(self, *args, **kwargs):
aliases = kwargs.pop("aliases", [])
decorator = super(DYMMixin, self).command(*args, **kwargs)
if not aliases:
return decorator
def _decorator(f):
cmd = decorator(f)
if aliases:
self._commands[cmd.name] = aliases
for alias in aliases:
self._aliases[alias] = cmd.name
return cmd
return _decorator
def group(self, *args, **kwargs):
aliases = kwargs.pop("aliases", [])
decorator = super(DYMMixin, self).group(*args, **kwargs)
if not aliases:
return decorator
def _decorator(f):
cmd = decorator(f)
if aliases:
self._commands[cmd.name] = aliases
for alias in aliases:
self._aliases[alias] = cmd.name
return cmd
return _decorator
def resolve_alias(self, cmd_name):
if cmd_name in self._aliases:
return self._aliases[cmd_name]
return cmd_name
def get_command(self, ctx, cmd_name):
cmd_name = self.resolve_alias(cmd_name)
command = super(DYMMixin, self).get_command(ctx, cmd_name)
if command:
return command
def format_commands(self, ctx, formatter):
rows = []
sub_commands = self.list_commands(ctx)
max_len = max(len(cmd) for cmd in sub_commands)
limit = formatter.width - 6 - max_len
for sub_command in sub_commands:
cmd = self.get_command(ctx, sub_command)
if cmd is None:
continue
if hasattr(cmd, "hidden") and cmd.hidden:
continue
if sub_command in self._commands:
aliases = ",".join(sorted(self._commands[sub_command]))
sub_command = "{0} ({1})".format(sub_command, aliases)
cmd_help = (
cmd.get_short_help_str(limit) if _click7 else cmd.short_help or ""
)
rows.append((sub_command, cmd_help))
if rows:
with formatter.section("Commands"):
formatter.write_dl(rows)
def format_help(self, ctx, formatter):
"""
Overrides default click cli help command
"""
message = f"""Electric Package Manager v1.0.0 Stable Build
Usage: electric <command> [<options>]
{Fore.LIGHTGREEN_EX}Commands:{Fore.RESET}
{Fore.LIGHTCYAN_EX}Software Management{Fore.RESET}
{Fore.LIGHTMAGENTA_EX}*{Fore.RESET} install
{Fore.LIGHTMAGENTA_EX}*{Fore.RESET} uninstall
{Fore.LIGHTMAGENTA_EX}*{Fore.RESET} update
{Fore.LIGHTMAGENTA_EX}*{Fore.RESET} bundle
{Fore.LIGHTCYAN_EX}Explore Packages{Fore.RESET}
{Fore.LIGHTMAGENTA_EX}*{Fore.RESET} list
{Fore.LIGHTMAGENTA_EX}*{Fore.RESET} search
{Fore.LIGHTMAGENTA_EX}*{Fore.RESET} show
{Fore.LIGHTCYAN_EX}Configuration Development And Management{Fore.RESET}
{Fore.LIGHTMAGENTA_EX}*{Fore.RESET} new
{Fore.LIGHTMAGENTA_EX}*{Fore.RESET} config
{Fore.LIGHTMAGENTA_EX}*{Fore.RESET} generate
{Fore.LIGHTMAGENTA_EX}*{Fore.RESET} sign
{Fore.LIGHTCYAN_EX}Customization And Cleanup{Fore.RESET}
{Fore.LIGHTMAGENTA_EX}*{Fore.RESET} feature
{Fore.LIGHTMAGENTA_EX}*{Fore.RESET} settings
{Fore.LIGHTMAGENTA_EX}*{Fore.RESET} cleanup"""
click.echo(message)
class SuperChargeCLI(DYMMixin, click.Group): # pylint: disable=too-many-public-methods
"""
click Group to provide git-like
*did-you-mean* functionality when a certain
command is not found in the group.
"""
# def format_help(self, ctx, formatter):
# # Custom Help Message =>
# click.echo(click.style('Commands :', fg='bright_green'))
# click.echo('Next Line')
class DYMCommandCollection(
DYMMixin, click.CommandCollection
): # pylint: disable=too-many-public-methods
"""
click CommandCollection to provide git-like
*did-you-mean* functionality when a certain
command is not found in the group.
"""
| 32.955801 | 87 | 0.591618 |
93f5f6192d3c9986958c7769c0cf82aab9f413aa | 26 | py | Python | imgtda/locally_striped/__init__.py | rachellevanger/tda-image-analysis | 5560e6ec9e2b7b74f91cad25a9cdefa1df172711 | [
"MIT"
] | 6 | 2017-04-08T19:46:14.000Z | 2019-11-12T04:43:10.000Z | imgtda/locally_striped/__init__.py | rachellevanger/tda-image-analysis | 5560e6ec9e2b7b74f91cad25a9cdefa1df172711 | [
"MIT"
] | 1 | 2019-08-23T21:20:03.000Z | 2019-08-23T21:20:03.000Z | imgtda/locally_striped/__init__.py | rachellevanger/tda-image-analysis | 5560e6ec9e2b7b74f91cad25a9cdefa1df172711 | [
"MIT"
] | null | null | null |
from image import Image
| 6.5 | 23 | 0.769231 |
0c54caf6d281e4add6a83c8fdf437a92dfcda757 | 6,247 | py | Python | singlepath/plot_singlepath.py | LARG/regression-importance-sampling | 8cf2acf9313ab270c192fda29e1d5c1db68c2acc | [
"MIT"
] | 8 | 2019-06-06T17:56:08.000Z | 2021-11-27T05:42:40.000Z | singlepath/plot_singlepath.py | LARG/regression-importance-sampling | 8cf2acf9313ab270c192fda29e1d5c1db68c2acc | [
"MIT"
] | 1 | 2020-12-21T16:08:01.000Z | 2021-01-04T13:53:48.000Z | singlepath/plot_singlepath.py | LARG/regression-importance-sampling | 8cf2acf9313ab270c192fda29e1d5c1db68c2acc | [
"MIT"
] | 6 | 2019-06-14T00:18:31.000Z | 2022-02-05T21:50:27.000Z | """Plot singlepath results."""
from __future__ import print_function
from __future__ import division
import os
import argparse
import numpy as np
from matplotlib import pyplot as plt
import results_pb2
parser = argparse.ArgumentParser()
parser.add_argument('result_directory', help='Result directory with results to load.')
parser.add_argument('--plot_file', help='File to save plot to.')
FLAGS = parser.parse_args()
# The confidence level. 1.96 is for 95% confidence intervals
Z_SCORE = 1.96
# Plot params
plot_params = {'bfont': 30,
'lfont': 30,
'tfont': 25,
'legend': True,
'legend_loc': 1,
'legend_cols': 2,
'y_range': [10e-8, 1],
'x_range': [100, 1e4],
'y_range': None,
'x_range': None,
'log_scale': True,
'x_label': 'Number of Trajectories',
'y_label': 'Mean Squared Error',
'plot_error': True,
'shade_error': True,
'x_mult': 100}
methods_to_plot = ['RIS(1)', 'RIS(4)', 'RIS(5)', 'IS', 'REG', 'OIS']
class LineStyle(object): # noqa
def __init__(self, style, color, width, marker=None, markersize=None, dashes=None, alpha=0.5): # noqa
self.color = color
self.style = style
self.width = width
self.marker = marker
self.markersize = markersize
self.dashes = dashes
self.alpha = alpha
def get_line_style(label): # noqa
if label == 'RIS(0)':
return LineStyle('-', 'b', 3, alpha=0.25)
elif label == 'OIS':
return LineStyle('--', 'r', 3, alpha=0.25)
elif label == 'REG':
return LineStyle('-', 'g', 6, alpha=0.25)
elif label == 'RIS(2)':
return LineStyle('-', 'k', 3, alpha=0.25)
elif label == 'RIS(4)':
return LineStyle('-.', 'k', 3, alpha=0.25)
elif label == 'RIS(1)':
return LineStyle('--', 'g', 3, alpha=0.25)
elif label == 'RIS(3)':
return LineStyle(':', 'c', 4, alpha=0.25)
else:
return None
def set_line_style(label, line): # noqa
style = get_line_style(label)
if style is None:
return
line.set_linestyle(style.style)
if style.marker is not None:
line.set_marker(style.marker)
if style.markersize is not None:
line.set_markersize(style.markersize)
line.set_color(style.color)
line.set_linewidth(style.width)
if style.dashes is not None:
line.set_dashes(style.dashes)
def get_label(label): # noqa
strip_len = len('mse') + 1
label = label[:-strip_len]
if label == 'IS':
return 'OIS'
return label
def load_results(result_directory): # noqa
data = {}
results = results_pb2.Results()
for filename in os.listdir(result_directory):
with open(os.path.join(result_directory, filename), 'rb') as f:
try:
results.ParseFromString(f.read())
except Exception as e:
# Checking if is cluster stderr file.
if filename.endswith('.err'):
continue
raise e
for method in results.methods:
mse_key = '%s_mse' % method.method_name
if mse_key not in data:
data[mse_key] = []
data[mse_key].append(np.array(method.mse))
return data
def compute_stats(data): # noqa
means = {}
confidences = {}
zero_conf = False
for key in data:
data[key] = np.array(data[key])
means[key] = np.mean(data[key], axis=0)
confidences[key] = (Z_SCORE * np.std(data[key], axis=0) /
np.sqrt(np.size(data[key], axis=0)))
print(key, np.size(data[key], axis=0))
if zero_conf:
confidences[key] = np.zeros(np.size(means[key]))
return means, confidences
def main(): # noqa
if not FLAGS.result_directory:
print('Must provide results directory.')
return
data = load_results(FLAGS.result_directory)
means, confidences = compute_stats(data)
fig, ax = plt.subplots()
fig.set_size_inches(13.5, 12.0, forward=True)
ktp = ('%s_mse' % key for key in methods_to_plot if '%s_mse' % key in means)
for key in ktp:
y_data = means[key]
err = confidences[key]
label = get_label(key)
print('Checking', label)
if methods_to_plot is not None and label not in methods_to_plot:
continue
if 'inf' in label:
label = 'RIS(5)'
if label.startswith('RIS'):
print (label)
end = label.find(')')
num = int(label[4:end]) - 1
label = '%s%d%s' % (label[:4], num, label[end:])
style = get_line_style(label)
xs = np.arange(len(y_data)) + 1
# To account for the fact that the first 100 points are sampled more frequently.
# See singlepath.py and how eval_freq is used.
xs *= plot_params['x_mult']
line, = plt.plot(xs, y_data, label=label)
alpha = 0.25 if style is None else style.alpha
color = line.get_color() if style is None else style.color
plt.fill_between(xs, y_data - err, y_data + err, alpha=alpha, facecolor=color)
if style is not None:
set_line_style(label, line)
x_title = plot_params['x_label']
y_title = plot_params['y_label']
if plot_params['log_scale']:
ax.set_xscale('log')
ax.set_yscale('log')
if plot_params['y_range'] is not None:
plt.ylim(plot_params['y_range'])
if plot_params['x_range'] is not None:
plt.xlim(plot_params['x_range'])
ax.set_xlabel(x_title, fontsize=plot_params['bfont'])
ax.set_ylabel(y_title, fontsize=plot_params['bfont'])
ax.xaxis.set_tick_params(labelsize=plot_params['tfont'])
ax.yaxis.set_tick_params(labelsize=plot_params['tfont'])
if plot_params['legend']:
plt.legend(fontsize=plot_params['lfont'], loc=plot_params['legend_loc'],
ncol=plot_params['legend_cols'])
if FLAGS.plot_file is None:
plt.show()
else:
fig.savefig(FLAGS.plot_file)
if __name__ == '__main__':
main()
| 30.473171 | 106 | 0.582199 |
371fc9d09ea3eda96514da7afe64c2d3d161f4d5 | 1,167 | py | Python | layers/activation.py | colinrgodsey/sm-depth | 01fef37fe35c88964b358f2c4203e6490aa818ee | [
"MIT"
] | null | null | null | layers/activation.py | colinrgodsey/sm-depth | 01fef37fe35c88964b358f2c4203e6490aa818ee | [
"MIT"
] | null | null | null | layers/activation.py | colinrgodsey/sm-depth | 01fef37fe35c88964b358f2c4203e6490aa818ee | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow.keras.utils import get_custom_objects
class Clampazzo(tf.keras.layers.Layer):
def __init__(self, beta=1., **kwargs):
super(Clampazzo, self).__init__(**kwargs)
self.beta = beta
def get_config(self):
config = {
'beta': self.beta
}
config.update(super(Clampazzo, self).get_config())
return config
def build(self, input_shapes):
self.built = True
def call(self, inputs):
# TEST: could also be softsign instead of tanh?
#return tf.nn.tanh(inputs) * tf.nn.sigmoid(self.beta * inputs)
return tf.nn.softsign(inputs) * tf.nn.sigmoid(self.beta * inputs)
class Swish(tf.keras.layers.Layer):
def __init__(self, beta=1., **kwargs):
super(Swish, self).__init__(**kwargs)
self.beta = beta
def get_config(self):
config = {
'beta': self.beta
}
config.update(super(Swish, self).get_config())
return config
def build(self, input_shapes):
self.built = True
def call(self, inputs):
return inputs * tf.nn.sigmoid(self.beta * inputs)
get_custom_objects().update({
'clampz': Clampazzo(),
'clampazzo': Clampazzo(),
'swish': Swish()
})
| 23.816327 | 69 | 0.664953 |
b0d65d8fe8a120207bca38eb4b807e47d2c4f590 | 1,307 | py | Python | tests/classification/logistic_regression.py | MartinBCN/BasicML | 7ad7bd075c62d883143dd10b54c80287d06a99b0 | [
"MIT"
] | null | null | null | tests/classification/logistic_regression.py | MartinBCN/BasicML | 7ad7bd075c62d883143dd10b54c80287d06a99b0 | [
"MIT"
] | null | null | null | tests/classification/logistic_regression.py | MartinBCN/BasicML | 7ad7bd075c62d883143dd10b54c80287d06a99b0 | [
"MIT"
] | null | null | null | import unittest
import numpy as np
from src.classification.logistic_regression import LogReg
class TestStringMethods(unittest.TestCase):
def setUp(self):
self.X = np.random.randn(100, 4)
self.Y = np.random.randn(100, 2)
self.lr = LogReg()
self.lr.initialize_weights(self.X, self.Y)
def test_initialize(self):
self.assertNotEqual(self.lr.W.shape, np.array(1).shape)
self.assertNotEqual(self.lr.b.shape, np.array(1).shape)
def test_forward(self):
self.Y_hat = self.lr.forward(self.X)
self.assertEqual(self.Y_hat.shape, self.Y.shape)
self.assertAlmostEqual(self.Y_hat[0].sum(), 1)
def test_backward(self):
# X, T, Y, Z
T = self.Y
Y = self.lr.forward(self.X)
self.lr.backward(self.X, T, Y)
assert True
# def test_derivatives(self):
# X = self.X
# Y = self.lr.forward(self.X)
# T = self.Y
# # ---------------------------------------------
# dW = self.lr.derivative_W(X, T, Y)
# self.assertEqual(dW.shape, self.lr.W.shape)
# # ---------------------------------------------
# db = self.lr.derivative_b(T, Y)
# self.assertEqual(db.shape, self.lr.b.shape)
if __name__ == '__main__':
unittest.main() | 29.704545 | 63 | 0.555471 |
d66c62cfa935a0919db959b1e85bdf7a70bb7156 | 2,093 | py | Python | tests/functional/test_59_delete.py | tomberek/liberaforms | 5830acea9d456208ae612c6c548eb35c2d3c4106 | [
"MIT"
] | null | null | null | tests/functional/test_59_delete.py | tomberek/liberaforms | 5830acea9d456208ae612c6c548eb35c2d3c4106 | [
"MIT"
] | null | null | null | tests/functional/test_59_delete.py | tomberek/liberaforms | 5830acea9d456208ae612c6c548eb35c2d3c4106 | [
"MIT"
] | null | null | null | """
This file is part of LiberaForms.
# SPDX-FileCopyrightText: 2021 LiberaForms.org
# SPDX-License-Identifier: AGPL-3.0-or-later
"""
import os
import pytest
from liberaforms.models.form import Form
from liberaforms.models.answer import Answer, AnswerAttachment
from liberaforms.models.log import FormLog
from .utils import login
class TestDeleteForm():
def test_delete_form(self, client, users, forms):
#pytest.exit("stopped before deleting forms")
login(client, users['editor'])
form_id = forms['test_form'].id
initial_answers_count = forms['test_form'].answers.count()
response = client.get(
f"/forms/delete/{form_id}",
follow_redirects=False,
)
assert response.status_code == 200
html = response.data.decode()
assert '<!-- delete_form_page -->' in html
assert f'<span class="highlightedText">{initial_answers_count}' in html
# test incorrect slug
response = client.post(
f"/forms/delete/{forms['test_form'].id}",
data = {
"slug": f"{forms['test_form'].slug}-wrong"
},
follow_redirects=False,
)
assert response.status_code == 200
html = response.data.decode()
assert '<!-- delete_form_page -->' in html
# test correct slug
response = client.post(
f"/forms/delete/{forms['test_form'].id}",
data = {
"slug": forms['test_form'].slug
},
follow_redirects=True,
)
assert response.status_code == 200
html = response.data.decode()
assert '<!-- my_forms_page -->' in html
assert Form.find(id=form_id) == None
assert Answer.find(form_id=form_id) == None
assert AnswerAttachment.find(form_id=form_id) == None
assert FormLog.find(form_id=form_id) == None
| 38.054545 | 79 | 0.555184 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.