index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
994,500 | b158ae2372122c7dc082a81779c3fe938ead4b4f | # Dakota Bourne (db2nb) Nick Manalac (ntm4kd)
import robot
def square():
answer = 1
r = robot.Robot(1)
while True:
if r.check_south():
r.south()
answer += 1
else:
r.say(answer ** 2)
break
def rect():
r = robot.Robot(2)
vert = 1
horz = 1
while True:
if r.check_south():
r.south()
vert += 1
else:
break
while True:
if r.check_east():
r.east()
horz += 1
else:
r.say(vert * horz)
break
def middle1():
r = robot.Robot(3)
vert = 1
horz = 1
while True:
if r.check_west():
r.west()
elif r.check_north():
r.north()
else:
break
while True:
if r.check_south():
r.south()
vert += 1
else:
break
while True:
if r.check_east():
r.east()
horz += 1
else:
r.say(vert * horz)
break
def middle():
r = robot.Robot(4)
room_lst = [[0, 0]]
x = 0
y = 0
new_cord = [x, y]
while True:
if r.check_north():
if [x,y+1] not in room_lst:
r.north()
y += 1
room_lst.append(new_cord)
elif r.check_west():
if [x-1,y] not in room_lst:
r.west()
x -= 1
room_lst.append(new_cord)
elif r.check_south():
if [x,y-1] not in room_lst:
r.south()
y -= 1
room_lst.append(new_cord)
else:
if [x+1,y] not in room_lst:
r.east()
x += 1
room_lst.append(new_cord)
if [x,y+1] not in room_lst and [x-1,y] not in room_lst and [x,y-1] not in room_lst and [x+1,y] not in room_lst:
break
a = 'at least ' + str(len(room_lst)) + ' rooms'
r.say(a)
middle() |
994,501 | b6e6e6bbb3aebb7b233ab7c4f8a636e602cc49bf | # coding=utf-8
# 这个是获取芝麻代理ip的小程序,通过requests.get 访API 接口,获取json数据,数据选型的时候勾选过期时间
# 。此api设定为每次获取任意个ip,ip存活时间为5-25分钟不等。放到redis中
# 启动blog爬虫之前,先启动此proxies.py 让redis的代理ip实时更新着
import requests
from time import sleep
import json
from redis import *
import time
import datetime
from other_process.python_send_emil import let_send
pool = ConnectionPool(host='127.0.0.1', port=6379, db=14)
r = StrictRedis(connection_pool=pool)
ip_key_name = []
def get_ip():
# 一次提取5个
html_get = requests.get(
'http://webapi.http.zhimacangku.com/getip?num=5&type=2&pro=&city=0&yys=0&port=1&pack=21479&ts=1&ys=0&cs=0&lb=1&sb=0&pb=45&mr=1®ions=')
get_json = json.loads(html_get.content.decode())
# print(type(get_json['success']))
if get_json['success']:
ip_list = get_json['data']
# print(ip_list)
return ip_list
else:
return
def get_ip1():
# 一次提取5个
html_get = requests.get(
'http://webapi.http.zhimacangku.com/getip?num=1&type=2&pro=&city=0&yys=0&port=1&pack=21479&ts=1&ys=0&cs=0&lb=1&sb=0&pb=45&mr=1®ions=')
get_json = json.loads(html_get.content.decode())
# print(type(get_json['success']))
if get_json['success']:
ip_list = get_json['data']
# print(ip_list)
return ip_list
else:
return
def get_full_ip():
ip_list = get_ip()
# # 使用redis的db14 数据库
# 1.确定redis中的key,将ip地址放到redis里,key分别为ip*
# 获取地址数量
nums = len(ip_list)
# 生成key列表
for i in range(nums):
ip_key_name.append('ip'+str(i))
for i in range(nums):
# ip从列表中拼接出ip地址
ip = ip_list[i]['ip'] + ':' + str(ip_list[i]['port'])
print(ip)
#获取失效时间的时间戳
ip_time = time.strptime(ip_list[i]['expire_time'], "%Y-%m-%d %H:%M:%S")
ip_time = int(time.mktime(ip_time))
now = int(time.time())
ip_left = int(ip_time) - now
# 2.设置ip的过期时间
a = r.setex(ip_key_name[i], ip_left, ip)
print(a)
# 3.查询
PROXIES = []
for key in ip_key_name:
ip_str = r.get(key).decode()
PROXIES.append(ip_str)
print(PROXIES)
get_full_ip()
# 3.不停的查看ip是否为空,就请求api再获得ip,存入redis中失效的建中。继续2-3的步骤
while True:
for key in ip_key_name:
if r.get(key) is None:
print(key,'为空,即将补充ip地址')
ip_get = get_ip1()
print(ip_get)
if ip_get is not None:
ip = ip_get[0]['ip'] + ':' + str(ip_get[0]['port'])
ip_time = time.strptime(ip_get[0]['expire_time'], "%Y-%m-%d %H:%M:%S")
ip_time = int(time.mktime(ip_time))
now = int(time.time())
ip_left = int(ip_time) - now
r.setex(key, ip_left, ip)
else:
sleep(5)
get_time = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
print(get_time)
PROXIES = []
for ip_key in ip_key_name:
ip_str = r.get(ip_key)
if ip_str is not None:
ip_str = ip_str.decode()
PROXIES.append(ip_str)
print(PROXIES)
if len(PROXIES) < 4:
let_send()
if len(PROXIES) <= 2:
get_full_ip()
print("get full ip")
|
994,502 | 168d33db167e86d116570ac85d171b44c8ca6198 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2017-07-31 22:12
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AuditorCountry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action', models.CharField(db_index=True, max_length=30)),
('table', models.CharField(db_index=True, max_length=20)),
('field', models.CharField(db_index=True, max_length=20)),
('before_value', models.CharField(db_index=True, max_length=30)),
('after_value', models.CharField(blank=True, db_index=True, max_length=30, null=True)),
('date', models.DateField(db_index=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='auditor_country_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('country_code', models.CharField(blank=True, db_index=True, default=uuid.uuid4, max_length=64)),
('country_name', models.CharField(max_length=64)),
('active', models.BooleanField(db_index=True, default=True)),
],
),
migrations.CreateModel(
name='Section',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('section_code', models.CharField(db_index=True, default=uuid.uuid4, max_length=64, null=True)),
('section_name', models.CharField(max_length=30)),
('active', models.BooleanField(db_index=True, default=True)),
('fk_country', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='section_country', to='country.Country')),
('fk_section', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='section_section', to='country.Section')),
],
),
migrations.CreateModel(
name='SectionType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('section_type_code', models.CharField(db_index=True, default=uuid.uuid4, max_length=64, null=True)),
('section_type_name', models.CharField(max_length=40, null=True)),
('active', models.BooleanField(db_index=True, default=True)),
],
),
migrations.AlterIndexTogether(
name='sectiontype',
index_together=set([('section_type_code', 'active')]),
),
migrations.AddField(
model_name='section',
name='fk_section_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='section_section_type', to='country.SectionType'),
),
migrations.AlterIndexTogether(
name='country',
index_together=set([('country_code', 'active')]),
),
migrations.AlterIndexTogether(
name='section',
index_together=set([('section_code', 'active')]),
),
migrations.AlterIndexTogether(
name='auditorcountry',
index_together=set([('action', 'table', 'field', 'before_value', 'after_value', 'date')]),
),
]
|
994,503 | a3699625819bbdb41d7f1622cf99f1c996ee406d | import numpy as np
import matplotlib.pyplot as plt
from scipy.special import comb
p = 0.4
n = 10
x = np.linspace(1,n,n)
y = p*(1-p)**(x-1)
print(x)
print(y)
z = 0
for i in y:
z = z +i
print(z)
plt.scatter(x,y)
plt.grid(True)
plt.show() |
994,504 | 7e3f77afc93e9c5915f5487953c4508e5e3487bb | # -*- coding: UTF-8 -*-
import os
import numpy as np
import pandas as pd
from stock.globalvar import FINANCE_DIR, BASIC_DIR, LRB_CH2EN, XJLLB_CH2EN, ZCFZB_CH2EN
from stock.utils.symbol_util import symbol_to_exsymbol
def _set_quarter(df):
for i in range(len(df)):
dt = df.index[i]
if dt.month == 3:
df.loc[df.index[i], "quarter"] = "Q1"
elif dt.month == 6:
df.loc[df.index[i], "quarter"] = "Q2"
elif dt.month == 9:
df.loc[df.index[i], "quarter"] = "Q3"
elif dt.month == 12:
df.loc[df.index[i], "quarter"] = "Q4"
def _parse_cell(string, parser=None):
string = string.strip()
if string == "":
return np.nan
if string == "--":
return np.nan
if parser == None:
return string
return parser(string)
def get_lrb_data(exsymbol):
filename = "%s_lrb" % exsymbol
path = os.path.join(FINANCE_DIR["stock"], filename)
if not os.path.isfile(path):
msg = "%s has no lrb data" % exsymbol
raise Exception(msg)
content = None
with open(path, "r") as f:
content = f.read()
lines = content.splitlines()
data = {}
index = []
for line in lines:
if line.strip() == "":
continue
cells = line.split(",")
col = cells[0].strip()
if col == "报告日期":
index = map(lambda x: _parse_cell(x, str), cells[1:])
else:
en = LRB_CH2EN.get(col)
if en == "":
raise Exception("en for %s not defined" % cells[0])
array = data.setdefault(en, [])
parsed = map(lambda x: _parse_cell(x, float), cells[1:])
array.extend(parsed)
df = pd.DataFrame(data=data, index=index).fillna(0.0)
df = df[pd.notnull(df.index)]
df.set_index(pd.to_datetime(df.index, format="%Y-%m-%d"), inplace=True)
_set_quarter(df)
return df.iloc[::-1]
def get_zcfzb_data(exsymbol):
filename = "%s_zcfzb" % exsymbol
path = os.path.join(FINANCE_DIR["stock"], filename)
if not os.path.isfile(path):
msg = "%s has no zcfzb data" % exsymbol
raise Exception(msg)
content = None
with open(path, "r") as f:
content = f.read()
lines = content.splitlines()
data = {}
index = []
for line in lines:
if line.strip() == "":
continue
cells = line.split(",")
col = cells[0].strip()
if col == "报告日期":
index = map(lambda x: _parse_cell(x, str), cells[1:])
else:
en = ZCFZB_CH2EN.get(col)
if not en:
raise Exception("en for %s not defined" % cells[0])
array = data.setdefault(en, [])
parsed = map(lambda x: _parse_cell(x, float), cells[1:])
array.extend(parsed)
df = pd.DataFrame(data=data, index=index).fillna(0.0)
df = df[pd.notnull(df.index)]
df.set_index(pd.to_datetime(df.index, format="%Y-%m-%d"), inplace=True)
_set_quarter(df)
return df.iloc[::-1]
def get_xjllb_data(exsymbol):
filename = "%s_xjllb" % exsymbol
path = os.path.join(FINANCE_DIR["stock"], filename)
if not os.path.isfile(path):
msg = "%s has no xjllb data" % exsymbol
raise Exception(msg)
content = None
with open(path, "r") as f:
content = f.read()
lines = content.splitlines()
data = {}
index = []
for line in lines:
if line.strip() == "":
continue
cells = line.split(",")
col = cells[0].strip()
if col == "报告日期":
index = map(lambda x: _parse_cell(x, str), cells[1:])
else:
en = XJLLB_CH2EN.get(col.strip())
if not en:
raise Exception("en for %s not defined" % cells[0])
array = data.setdefault(en, [])
parsed = map(lambda x: _parse_cell(x, float), cells[1:])
array.extend(parsed)
df = pd.DataFrame(data=data, index=index).fillna(0.0)
df = df[pd.notnull(df.index)]
df.set_index(pd.to_datetime(df.index, format="%Y-%m-%d"), inplace=True)
_set_quarter(df)
return df.iloc[::-1]
def load_stock_basics():
filepath = os.path.join(BASIC_DIR, "basics")
df = pd.read_csv(filepath, encoding="utf-8", dtype={
"symbol": str,
"name": str,
"close": np.float64,
"mcap": np.float64,
"liquid_mcap": np.float64,
"pe": np.float64,
"total_share": np.float64,
"liquid_share": np.float64,
})
df["exsymbol"] = list(map(lambda x: symbol_to_exsymbol(x), df["symbol"]))
df.set_index("exsymbol", inplace=True)
return df
|
994,505 | 70bad2bf7bd0e86680908541ff992f922e150d15 | import os
import sys
import pytest
from layer_linter.contract import get_contracts, Layer
from layer_linter.dependencies import ImportPath
from layer_linter.module import Module
class TestGetContracts:
def test_happy_path(self):
self._initialize_test()
contracts = get_contracts(self.filename_and_path, package_name='singlecontractfile')
assert len(contracts) == 2
expected_contracts = [
{
'name': 'Contract A',
'packages': ['singlecontractfile.foo', 'singlecontractfile.bar'],
'layers': ['one', 'two'],
},
{
'name': 'Contract B',
'packages': ['singlecontractfile'],
'layers': ['one', 'two', 'three'],
'whitelisted_paths': [
('baz.utils', 'baz.three.green'),
('baz.three.blue', 'baz.two'),
],
},
]
sorted_contracts = sorted(contracts, key=lambda i: i.name)
for contract_index, contract in enumerate(sorted_contracts):
expected_data = expected_contracts[contract_index]
assert contract.name == expected_data['name']
for package_index, package in enumerate(contract.containers):
expected_package_name = expected_data['packages'][package_index]
assert package == Module(expected_package_name)
for layer_index, layer in enumerate(contract.layers):
expected_layer_data = expected_data['layers'][layer_index]
assert isinstance(layer, Layer)
assert layer.name == expected_layer_data
for whitelisted_index, whitelisted_path in enumerate(contract.whitelisted_paths):
expected_importer, expected_imported = expected_data['whitelisted_paths'][
whitelisted_index]
assert isinstance(whitelisted_path, ImportPath)
assert whitelisted_path.importer == Module(expected_importer)
assert whitelisted_path.imported == Module(expected_imported)
def test_container_does_not_exist(self):
self._initialize_test('layers_with_missing_container.yml')
with pytest.raises(ValueError) as e:
get_contracts(self.filename_and_path, package_name='singlecontractfile')
assert str(e.value) == "Invalid container 'singlecontractfile.missing': no such package."
def _initialize_test(self, config_filename='layers.yml'):
# Append the package directory to the path.
dirname = os.path.dirname(__file__)
package_dirname = os.path.join(dirname, '..', 'assets', 'singlecontractfile')
sys.path.append(package_dirname)
# Set the full config filename and path as an instance attribute.
self.filename_and_path = os.path.join(package_dirname, config_filename)
|
994,506 | 0f89e31d9b3acee567be23fe481d9a6794ccd0b9 | """
Load a CSV raster file to RasterAggregatedLayer object and related NumericRasterAggregateData.
NOTE: Input CSV expected to be AGGREGATED.
"""
import os
import csv
import gzip
import datetime
from django.core.management.base import BaseCommand, CommandError
from django.contrib.gis.geos import Point
from django.utils import timezone
from ...models import RasterAggregatedLayer, NumericRasterAggregateData
WGS84_SRID = 4326
SPHERICAL_MERCATOR_SRID = 3857 # google maps projection
COMMIT_COUNT = 50000
def load_raster_csv(filepath, layer_name, csv_encoding, pixel_size, csv_srid, indexes, lon_idx, lat_idx, datetime_idx, datetime_format_str, opacity, no_datetime=False, no_headers=False, aggregation_method="mean"):
open_func = open
if filepath.lower().endswith(".gz"):
open_func = gzip.open
with open_func(filepath, "rt", encoding=csv_encoding) as in_f:
reader = csv.reader(in_f)
headers = None
if not no_headers:
headers = next(reader) # remove headers
# prepare KPI raster layers
index_layers = {}
for data_idx in indexes:
if not headers:
kpi_name = "Unknown (no-headers)"
else:
kpi_name = headers[data_idx]
if not layer_name:
layer_name = "{} ({})".format(os.path.split(filepath)[-1], kpi_name)
layer = RasterAggregatedLayer(name=layer_name,
filepath=filepath,
data_model="NumericRasterAggregateData",
opacity=opacity,
aggregation_method=aggregation_method,
pixel_size_meters=pixel_size,
minimum_samples=1, # sample number is not known for pre-aggregated items.
)
layer.save()
index_layers[data_idx] = layer
count = 0
pixels = []
expected_indexes = [lon_idx, lat_idx, datetime_idx]
for row in reader:
if row and all(row[idx] for idx in expected_indexes):
if no_datetime:
datetime_value = timezone.now()
else:
naive_datetime_value = datetime.datetime.strptime(row[datetime_idx], datetime_format_str)
current_timezone = timezone.get_default_timezone()
datetime_value = timezone.make_aware(naive_datetime_value, current_timezone)
lon = float(row[lon_idx])
lat = float(row[lat_idx])
p = Point(lon, lat, srid=csv_srid)
for value_idx in indexes:
if row[value_idx]:
# currently only supporting numeric values!
value = float(row[value_idx])
data = NumericRasterAggregateData(layer=index_layers[value_idx],
location=p,
dt = datetime_value,
mean=value,
samples=1)
pixels.append(data)
if len(pixels) >= COMMIT_COUNT:
NumericRasterAggregateData.objects.bulk_create(pixels)
count += len(pixels)
pixels = []
if pixels:
NumericRasterAggregateData.objects.bulk_create(pixels)
count += len(pixels)
return index_layers.values(), count
class Command(BaseCommand):
help = __doc__
def add_arguments(self, parser):
parser.add_argument("-f", "--filepath",
required=True,
default=None,
help="CSV Raster File to load")
parser.add_argument("-e", "--encoding",
default="utf8",
help="Encoding of the CSV file [DEFAULT='utf8']")
parser.add_argument("-p", "--pixel-size",
type=int,
default=5,
help="CSV Raster Pixel Size (meters)")
parser.add_argument("-c", "--csv-srid",
dest="csv_srid",
type=int,
default=WGS84_SRID,
help="Input CSV Lon/Lat SRID. (DEFAULT=4326 [WGS84])")
parser.add_argument("-i", "--indexes",
type=int,
default=[3,],
nargs="+",
help="Column indexes for the 'value(s)' to be loaded [DEFAULT=(3,)]")
parser.add_argument("--lon-idx",
dest="lon_idx",
default=2,
type=int,
help="Column Index (0 start) of 'longitude' in decimal degrees [DEFAULT=1]")
parser.add_argument("--lat-idx",
dest="lat_idx",
default=1,
type=int,
help="Column Index (0 start) of 'latitude' in decimal degrees [DEFAULT=2]")
parser.add_argument("-n", "--name",
default=None,
type=str,
help="If given this name will be applied to resulting RasterAggregatedLayer [DEFAULT=None]")
parser.add_argument("-o", "--opacity",
default=0.75,
type=float,
help="Layer Suggested Opacity [DEFAULT={}]".format(0.75))
parser.add_argument("-d", "--datetime-idx",
default=0,
type=int,
help="Column index of datetime [DEFAULT=0]")
parser.add_argument("--datetime-format-str",
default="%H:%M:%S.%f %d-%m-%Y",
help="Datetime format string to use [DEFAULT='%%H:%%M:%%S.%%f %%d-%%m-%%Y']")
parser.add_argument("--no-datetime",
default=False,
action="store_true",
help="If given datetime column will not be necessary, and load time will be used.")
parser.add_argument("--no-headers",
default=False,
action="store_true",
help="If given the first line will be *included* as data")
def handle(self, *args, **options):
result_layers, count = load_raster_csv(options["filepath"],
options["name"],
options["encoding"],
options["pixel_size"],
options["csv_srid"],
options["indexes"],
options["lon_idx"],
options["lat_idx"],
options["datetime_idx"],
options["datetime_format_str"],
options["opacity"],
options["no_datetime"],
options["no_headers"])
self.stdout.write("Created ({}) pixels in the following RasterAggregatedLayer(s): ".format(count))
for raster_layer in result_layers:
# auto create legend
legend = raster_layer.auto_create_legend(more_is_better=True)
raster_layer.legend = legend
raster_layer.save()
# create map layer (for viewing)
raster_layer.create_map_layer()
self.stdout.write("[{}] {}".format(raster_layer.id, raster_layer.name))
|
994,507 | f25f3e17c4f5140befa9b3060f0528698efdf937 | from pycocotools.coco import COCO
import numpy as np
import skimage.io as io
import matplotlib.pyplot as plt
import pylab
import cv2
import copy
import math
rescale_size = 300
n_vertices = 128
pylab.rcParams['figure.figsize'] = (8.0, 10.0)
dataDir = '/media/keyi/Data/Research/course_project/AdvancedCV_2020/data/COCO17'
dataType = 'val2017'
annFile = '{}/annotations/instances_{}.json'.format(dataDir, dataType)
coco = COCO(annFile)
out_npy_file = '{}/shape_{}_{}.npy'.format(dataDir, dataType, n_vertices)
cats = coco.loadCats(coco.getCatIds())
nms = [cat['name'] for cat in cats]
catIds = coco.getCatIds(catNms=nms)
imgIds = coco.getImgIds(catIds=catIds)
annIds = coco.getAnnIds(catIds=catIds)
all_anns = coco.loadAnns(ids=annIds)
def calculateCurvatureThreshold(min_angle=5.):
min_rad = math.pi / 180. * min_angle
x = math.cos(math.pi / 2. - min_rad)
c = math.sin(math.pi / 2. - min_rad)
y = c / math.tan(2 * min_rad)
return 1. / (x + y)
def computeCurvatureThreePoints(point1, point2, point3): # note that the curvature in calculated at point2, the order matters
a = np.sqrt((point1[0] - point2[0]) ** 2 + (point1[1] - point2[1]) ** 2)
b = np.sqrt((point1[0] - point3[0]) ** 2 + (point1[1] - point3[1]) ** 2)
c = np.sqrt((point2[0] - point3[0]) ** 2 + (point2[1] - point3[1]) ** 2)
if a < 1e-6 or b < 1e-6 or c < 1e-6:
return 0., a + c
s = (a + b + c) / 2.
if (s - a) * (s - b) * (s - c) < 0.:
return 0., a + c
area = np.sqrt(s * (s - a) * (s - b) * (s - c))
return 4 * area / (a * b * c), a + c
def normalizeShapeRepresentation(polygons_input, n_vertices, threshold=calculateCurvatureThreshold()):
polygons = copy.deepcopy(polygons_input)
total_vertices = len(polygons) // 2
curvature_thres = threshold
if total_vertices == n_vertices:
# print('direct return')
return polygons
elif n_vertices * 0.25 <= total_vertices < n_vertices:
while(len(polygons) < n_vertices * 2):
max_idx = -1
max_dist = 0.
insert_coord = [-1, -1]
for i in range(len(polygons) // 2):
x1 = polygons[2 * i]
y1 = polygons[2 * i + 1]
x2 = polygons[(2 * i + 2) % len(polygons)]
y2 = polygons[(2 * i + 3) % len(polygons)]
dist = np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
if dist > max_dist:
max_idx = (2 * i + 2) % len(polygons)
max_dist = dist
insert_coord[0] = (x1 + x2) / 2
insert_coord[1] = (y1 + y2) / 2
polygons.insert(max_idx, insert_coord[1])
polygons.insert(max_idx, insert_coord[0])
# print('less than: ', n_vertices)
return polygons
elif n_vertices < total_vertices <= n_vertices * 2:
visited = [0 for i in range(len(polygons))]
while(len(polygons) > n_vertices * 2):
min_idx_curv = -1
min_curv = 0.
min_idx_side = -1
min_side = math.inf
min_side_curv = 100.
for i in range(len(polygons) // 2):
if visited[(2 * i + 2) % len(polygons)] == 1:
continue
point1 = (polygons[2 * i], polygons[2 * i + 1])
point2 = (polygons[(2 * i + 2) % len(polygons)], polygons[(2 * i + 3) % len(polygons)])
point3 = (polygons[(2 * i + 4) % len(polygons)], polygons[(2 * i + 5) % len(polygons)])
curvature, side = computeCurvatureThreePoints(point1, point2, point3)
if side < min_side and curvature < curvature_thres:
min_idx_side = (2 * i + 2) % len(polygons)
min_side = side
elif side < min_side and curvature >= curvature_thres:
min_idx_side = (2 * i + 2) % len(polygons)
visited[min_idx_side] = 1
visited[(2 * i + 3) % len(polygons)] = 1
# if curvature < min_curv:
# min_idx_curv = (2 * i + 2) % len(polygons)
# min_curv = curvature
del polygons[min_idx_side]
del polygons[min_idx_side]
if np.prod(visited) == 1:
return None
# if min_side_curv < curvature_thres:
# del polygons[min_idx_side]
# del polygons[min_idx_side]
# del visited[min_idx_side]
# del visited[min_idx_side]
# else:
# visited[min_idx_side] = 1
# visited[min_idx_side + 1] = 1
# del polygons[min_idx]
# del polygons[min_idx]
# print('more than: ', n_vertices)
return polygons
else:
# print('return none.')
return None
counter_iscrowd = 0
counter_total = 0
counter_poor = 0
length_polygons = []
curvature_thres = calculateCurvatureThreshold(min_angle=2.5)
COCO_shape_matrix = np.zeros(shape=(n_vertices * 2, 0))
for annotation in all_anns:
if annotation['iscrowd'] == 1:
counter_iscrowd += 1
continue
img = coco.loadImgs(annotation['image_id'])[0]
image_name = '%s/images/%s/%s' % (dataDir, dataType, img['file_name'])
w_img = img['width']
h_img = img['height']
polygons = annotation['segmentation'][0]
bbox = annotation['bbox'] #top-left corner coordinates, width and height convention
shape_list = normalizeShapeRepresentation(polygons, n_vertices, threshold=curvature_thres)
if shape_list is None:
counter_poor += 1
continue
# print('original list size: ', len(polygons))
# print('returned list size: ', len(shape_list))
assert len(shape_list) == n_vertices * 2
counter_total += 1
# image = cv2.imread(image_name)
# bound_image = image[int(bbox[1]):int(bbox[1] + bbox[3]), int(bbox[0]):int(bbox[0] + bbox[2])]
# bound_image = cv2.resize(bound_image, dsize=(rescale_size, rescale_size))
# bound_ref = cv2.resize(bound_image, dsize=(rescale_size, rescale_size))
norm_shape = shape_list
for j in range(n_vertices):
# norm_shape[2 * j] = max(shape_list[2 * j] - bbox[0], 0.) / bbox[2] * rescale_size * 1.
# norm_shape[2 * j + 1] = max(shape_list[2 * j + 1] - bbox[1], 0.) / bbox[3] * rescale_size * 1.
norm_shape[2 * j] = max(shape_list[2 * j] - bbox[0], 0.) / bbox[2] * 1.
norm_shape[2 * j + 1] = max(shape_list[2 * j + 1] - bbox[1], 0.) / bbox[3] * 1.
# x = int(norm_shape[2 * j])
# y = int(norm_shape[2 * j + 1])
# cv2.circle(bound_image, center=(x, y), radius=2, color=(0, 0, 255), thickness=2)
# cv2.putText(bound_image, text=str(j + 1), org=(x, y), color=(0, 255, 255),
# fontFace=cv2.FONT_HERSHEY_COMPLEX_SMALL, fontScale=0.8, thickness=1)
# norm_polygon = polygons
# for j in range(len(polygons) // 2):
# norm_polygon[2 * j] = max(polygons[2 * j] - bbox[0], 0.) / bbox[2] * rescale_size * 1.
# norm_polygon[2 * j + 1] = max(polygons[2 * j + 1] - bbox[1], 0.) / bbox[3] * rescale_size * 1.
#
# x = int(norm_polygon[2 * j])
# y = int(norm_polygon[2 * j + 1])
# cv2.circle(bound_ref, center=(x, y), radius=2, color=(0, 0, 255), thickness=2)
# cv2.putText(bound_ref, text=str(j + 1), org=(x, y), color=(0, 255, 255),
# fontFace=cv2.FONT_HERSHEY_COMPLEX_SMALL, fontScale=0.8, thickness=1)
# concat_image = np.zeros((rescale_size, 2 * rescale_size, 3), dtype=np.uint8)
# concat_image[:, 0:rescale_size, :] = bound_ref
# concat_image[:, rescale_size:, :] = bound_image
# cv2.imshow('Compare Image', concat_image)
# cv2.waitKey()
# construct data matrix
# print(norm_shape)
atom = np.expand_dims(np.array(norm_shape), axis=1)
# print(atom.shape)
# print(COCO_shape_matrix.shape)
COCO_shape_matrix = np.concatenate((COCO_shape_matrix, atom), axis=1)
print('Total valid shape: ', counter_total)
print('Poor shape: ', counter_poor)
print('Iscrowd: ', counter_iscrowd)
print('Total number: ', counter_poor + counter_iscrowd + counter_total)
print('Size of shape matrix: ', COCO_shape_matrix.shape)
np.save(out_npy_file, COCO_shape_matrix) |
994,508 | a13d429c9ac55f991c1f9699dd5d27e5108c7517 | """
===========================================
DL8.5 used to perform predictive clustering
===========================================
This example illustrates how to use a user-specified error function to perform predictive
clustering. The PyDL8.5 library also provides an implementation of predictive clustering
that does not require the use of user-specified error function.
Check the DL85Cluster class for this implementation.
The main purpose of this example is to show how users of the library can implement their
own decision tree learning task using PyDL8.5's interface for writing error functions.
"""
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.neighbors import DistanceMetric
import time
from dl85 import DL85Predictor
dataset = np.genfromtxt("../datasets/anneal.txt", delimiter=' ')
X = dataset[:, 1:]
X_train, X_test = train_test_split(X, test_size=0.2, random_state=0)
print("############################################################################################\n"
"# DL8.5 clustering : user specific error function and leaves' values assignment #\n"
"############################################################################################")
# The quality of every cluster is determined using the Euclidean distance.
eucl_dist = DistanceMetric.get_metric('euclidean')
# user error function
def error(tids):
# collect the complete examples identified using the tids.
X_subset = X_train[list(tids),:]
# determine the centroid of the cluster
centroid = np.mean(X_subset, axis=0)
# calculate the distances towards centroid
distances = eucl_dist.pairwise(X_subset, [centroid])
# return the sum of distances as the error
return float(sum(distances))
# user leaf assignment
def leaf_value(tids):
# The prediction for every leaf is the centroid of the cluster
return np.mean(X.take(list(tids)))
# Change the parameters of the algorithm as desired.
clf = DL85Predictor(max_depth=2, min_sup=5, error_function=error, leaf_value_function=leaf_value, time_limit=600)
start = time.perf_counter()
print("Model building...")
clf.fit(X_train)
duration = time.perf_counter() - start
print("Model built. Duration of the search =", round(duration, 4))
predicted = clf.predict(X_test)
|
994,509 | 37de8547c68f4d36561f2f36de10014409003a96 | import tensorflow as tf
import argparse
import configparser
import os
import time
import numpy as np
from model import Prototypical
from load_data import load
def preprocess_config(c):
conf_dict = {}
int_params = ['data.train_way', 'data.test_way', 'data.train_support',
'data.test_support', 'data.train_query', 'data.test_query',
'data.query', 'data.support', 'data.way', 'data.episodes',
'model.z_dim', 'train.epochs',
'train.patience']
float_params = ['train.lr']
for param in c:
if param in int_params:
conf_dict[param] = int(c[param])
elif param in float_params:
conf_dict[param] = float(c[param])
else:
conf_dict[param] = c[param]
return conf_dict
def train(config):
# Create folder for model
model_dir = config['model.save_path'][:config['model.save_path'].rfind('/')]
if not os.path.exists(model_dir):
os.makedirs(model_dir)
# load data
data_dir = f"data/{config['data.dataset']}"
ret = load(data_dir, config, ['train', 'val'])
train_loader = ret['train']
val_loader = ret['val']
# Setup training operations
n_support = config['data.train_support']
n_query = config['data.train_query']
w, h, c = list(map(int, config['model.x_dim'].split(',')))
model = Prototypical(n_support, n_query, w, h, c)
optimizer = tf.keras.optimizers.Adam(config['train.lr'])
def run_optimization(support, query): # train_step
# Forward & update gradients
with tf.GradientTape() as tape:
loss, acc = model(support, query)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
for epoch in range(config['train.epochs']):
for i_episode in range(config['data.episodes']):
support, query = train_loader.get_next_episode()
run_optimization(support, query)
if i_episode % 5 == 0:
loss, acc = model(support, query)
print("epoch: %i, episode: %i, loss: %f, acc: %f" %(epoch, i_episode, loss, acc * 100))
print("Training succeed!")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run training')
parser.add_argument("--config", type=str, default="config_omniglot.conf",
help="Path to the config file.")
time_start = time.time()
# Run training
args = vars(parser.parse_args())
config = configparser.ConfigParser()
config.read(args['config'])
config = preprocess_config(config['TRAIN'])
train(config)
time_end = time.time()
elapsed = time_end - time_start
h, min = elapsed//3600, elapsed%3600//60
sec = elapsed-min*60
print(f"Training took: {h} h {min} min {sec} sec")
|
994,510 | 508b7eac7ec92e4d422804cda1460ed95500aaa4 | from django.shortcuts import redirect, render
from django.utils import timezone
from .models import Post
from django.shortcuts import render, get_object_or_404
from .forms import PostForm
def home(request):
return render (request,'home.html')
def post_list(request):
posts=Post.objects.filter(created_at__lte=timezone.now()).order_by('created_at')
posts=Post.objects.filter(updated_at__lte=timezone.now()).order_by('updated_at')
return render(request, 'blog/post_list.html', {'posts': posts})
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request, 'blog/post_detail.html', {'post': post})
def post_new(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.created_at = timezone.now()
post.updated_at = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm()
return render(request, 'blog/post_edit.html', {'form': form})
def post_edit(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = PostForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.updated_at = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm(instance=post)
return render(request, 'blog/post_edit.html', {'form': form})
from django.shortcuts import redirect, render
from django.http.response import HttpResponse
from .models import User,UserForm
from django.contrib.auth import authenticate,login,logout
# Create your views here.
def home (request):
return render(request,'home.html')
def add_user(request):
if request.method=="POST":
f=UserForm(request.POST)
f.save()
return redirect('/')
else:
f=UserForm
d={'form':f}
return render(request,'form.html',d)
def login_view(request):
if request.method=="POST":
uname=request.POST.get('uname')
passw=request.POST.get('passw')
user=authenticate(request,username=uname,password=passw)
if user is not None:
request.session['userid']=user.id
login(request,user)
return render(request,'user.html')
else:
return HttpResponse("Invalid Username and PAssword")
else:
return render(request,'login.html')
def logout_view(request):
logout(request)
return redirect('/') |
994,511 | 6a041af537c4c9bdcffb847c1407f4d36a89def0 |
from urllib import request as url_request
import requests
from requests.exceptions import SSLError, HTTPError as ReqHttpError
# from requests.adapters import HTTPAdapter
# from requests.packages.urllib3.util.ssl_ import create_urllib3_context
from io import open
import os
import sys
cwd = os.getcwd()
splits = cwd.split(os.sep)
splits.pop()
parent_path = '/'.join(splits)
sys.path.append(parent_path)
from base import base_bs, base_requests, base_log
requests_tool = base_requests.BaseRequests()
Host_Wallpaper_Site = 'https://wallpapersite.com'
def get_img_url_from_href(href: str):
page_url = 'https://wallpapersite.com'+href
html = base_requests.base_request(page_url)
if html is not None:
bs = base_bs.get_bs_parse_result(html)
a_list = bs.findAll('a', {'class': 'original'})
if a_list is not None:
node = a_list[0]
href = node.get('href')
# print('img url = ', href)
# return href
def if_a_has_img(tag):
if tag is None:
return False
if tag.contents is None:
return False
for child in tag.contents:
if child.name == 'img':
return True
return False
def get_img_original_url(page_url):
result = base_requests.base_request(page_url)
if result is not None:
bs = base_bs.get_bs_parse_result(result)
tag = bs.find('a', {'class': 'original'})
# print(tag)
href = tag.get('href')
original_url = Host_Wallpaper_Site+href
print('get original url of {}'.format(page_url))
return original_url
def download_img(img_url):
print('start download img {} '.format(img_url))
requests_tool.download_img_by_requests(img_url, overwrite=False, download_dir=cwd+'/resource')
def get_img_page_href_by_index(page):
fetch_url = 'https://wallpapersite.com/anime/?page={}'.format(page)
result = base_requests.base_request(fetch_url)
if result is not None:
# print(result)
bs = base_bs.get_bs_parse_result(result)
img_list = bs.findAll('a', {'class': None})
# print('length = ', len(img_list))
result = []
if img_list is not None:
for node in img_list:
node_has_img = if_a_has_img(node)
if node_has_img:
# print(node)
href = node.get('href')
result.append(Host_Wallpaper_Site+href)
# node = img_list[0]
# href = node.get('href')
# # img_url = get_img_url_from_href(href)
# print('href = ', node)
print('find {} img href'.format(len(result)))
return result
def download_images_by_page_index(index):
href_list = get_img_page_href_by_index(index)
# print('href list = ', href_list)
for node in href_list:
img_url = get_img_original_url(node)
download_img(img_url)
def run():
requests_tool.use_proxy(3)
index = 1
while index < 10:
download_images_by_page_index(index)
index += 1
run()
|
994,512 | e45b8d380f38d769575250bc6851e9618a726b95 | import os
import sys
from sandvet.wsgi import application |
994,513 | 187607c4e04af8ec87ad7c8a795192867833fa4a | from channels.generic.websocket import AsyncWebsocketConsumer
from channels.layers import get_channel_layer
import redis
import json
# https://ssungkang.tistory.com/entry/Django-Channels-%EB%B9%84%EB%8F%99%EA%B8%B0%EC%A0%81-%EC%B1%84%ED%8C%85-%EA%B5%AC%ED%98%84%ED%95%98%EA%B8%B0-WebSocket-3?category=320582
class ChatConsumer(AsyncWebsocketConsumer) :
async def connect(self) :
# chat/<company>/ 에서 company 를 가져온다.
self.company = self.scope['url_route']['kwargs']['company']
self.company_chat = 'chat_%s' % self.company
# 동기적인 함수를 비동기적으로 변경
await self.channel_layer.group_add(
self.company_chat,
self.channel_name
)
await self.accept()
# TDOD : 최적화 필요
# 이전 대화 목록 + 몇번째 대화인지 보이기
r = redis.Redis(charset="utf-8", decode_responses=True)
chat_list = r.lrange(self.company_chat,0,-1)
# user_id를 기준으로 현재 채팅의 참여자 수 가져오기, 전체 채팅방 제외!!
user_cnt = 0
if self.company != "0":
self.key = self.company_chat+':user'
self.user_id = self.scope['url_route']['kwargs']['user']
self.user_key = self.key+"_"+self.user_id
# 사용자 저장
user = r.get(self.user_key) or 0
r.set(self.user_key, int(user)+1) # 같은 사용자가 열고 있는 소켓 수
r.sadd(self.key, self.user_id)
user_cnt = r.scard(self.key)
else :
# 전체 : 열려있는 소켓 수
user_cnt = len(r.zrange('asgi::group:chat_0',0,-1))
await self.send(text_data = json.dumps({
'type' : 1,
'chat_list' : chat_list,
'user_cnt' : user_cnt,
}))
async def disconnect(self, close_code) :
# 사용자 제거
r = redis.Redis()
if self.company != "0":
user = r.get(self.user_key)
user = int(user)-1
r.set(self.user_key, user)
# 동시 접속한 사용자가 없다면 삭제
if user == 0 :
r.srem(self.key, self.user_id)
await self.channel_layer.group_discard(
self.company_chat,
self.channel_name
)
async def receive(self, text_data) :
text_data_json = json.loads(text_data)
nickname = text_data_json['nickname']
message = text_data_json['message']
date = text_data_json['date']
time = text_data_json['time']
r = redis.Redis()
r.rpush(self.company_chat, text_data)
await self.channel_layer.group_send(
self.company_chat,
{
'type' : 'chat_message',
'message' : message,
'nickname' : nickname,
'date' : date,
'time' : time
}
)
# 채팅방에서 메시지 receive
async def chat_message(self, event) :
message = event['message']
nickname = event['nickname']
date = event['date']
time = event['time']
# 소켓에게 메시지 전달
await self.send(text_data = json.dumps({
'type' : 2,
'chat_list' : [{
'message': message,
'nickname' : nickname,
'date' : date,
'time' : time
}]
}))
|
994,514 | 8dd87d68191bbde02ce2f00e699807078f6acfa9 | import sys
num = int(input("Enter number of matrices:"))
matrices = []
for i in range(1, num+1):
a = (eval(input("enter number "+str(i)+" matrice:")))
rows = len(a)
col = len(a[0])
for j in a:
if len(j) != col:
print("INVALID")
sys.exit()
matrices.append((a, (rows, col)))
order = matrices[0][1]
print(order)
for k in matrices:
if k[1] != order:
sys.exit()
res = [[0 for _ in range(order[1])] for p in range(order[0])]
for i in matrices:
m = i[0]
for p in range(order[0]): # No. of rows
for q in range(order[1]):
res[p][q] += m[p][q]
for i in res:
print(i)
|
994,515 | b01462fe60a92db4c3f7052aa43e135447bb91f1 | from livelineentities import Odds
class Participant(object):
def _setParticipantName(self, participant_name=None):
self._participant_name = participant_name
def _getParticipantName(self):
return self._participant_name
def _setRotNum(self, rot_num=None):
self._rot_num = rot_num
def _getRotNum(self):
return self._rot_num
def _setVisitingHomeDraw(self, visiting_home_draw=None):
self._visiting_home_draw = visiting_home_draw
def _getVisitingHomeDraw(self):
return self._visiting_home_draw
def _setOdds(self, odds=None):
self._odds = odds
def _getOdds(self, odds=None):
return self._odds
participant_name = property(_getParticipantName,_setParticipantName)
rot_num = property(_getRotNum,_setRotNum)
visiting_home_draw = property(_getVisitingHomeDraw, _setVisitingHomeDraw)
odds = property(_getOdds, _setOdds);
|
994,516 | 70c69ab8c1036c7a96de7540d7ea7248f5a1f244 | # Brute Force method
# 2 loops - first loop on input_string_1 and second loop on input_string_2
# Time Complexity - O(n^2)
# Space Complexity - O(n)
def check_permutations_bfm(input_string_1, input_string_2):
# Check if the lengths are equal or not
if len(input_string_1) != len(input_string_2):
print("Strings are not equal")
return False
# Initialize the character count
char_count = 0
# Loop 1 - Input String 1
for i in range(len(input_string_1)):
# Loop 2 Input String 2
for j in range(len(input_string_2)):
# Check the characters
if input_string_1[i] == input_string_2[j]:
# Increase the count
char_count += 1
# Check if the character count is equal to the length of the input_string_1 or 2
if char_count == len(input_string_1):
return True
else:
return False
# Driver Call
print("BFM {}".format(check_permutations_bfm("abc", "cba")))
|
994,517 | 72d16b134b4d38b1f4554a205823d07bccd6045d | import csv
with open('u.item') as csv_file:
csv_reader = csv.reader(csv_file, delimiter='|')
line_count = 0
for row in csv_reader:
line_count += 1
if ((row[1] == "") or (row[2] == "") or (row[4] == "")):
print 'Skipping movie ID=' + str(line_count)
|
994,518 | de88caad8181101487dd6c2ec98f4fe20678cd33 | #! /usr/bin/python3
from pymongo import MongoClient
client = MongoClient('mongodb://147.2.212.204:27017/')
prods = client.bz.prods
prod_all = [ 'SUSE Linux Enterprise Desktop 12',
'SUSE Linux Enterprise Desktop 11 SP3',
'SUSE Linux Enterprise Desktop 11 SP4 (SLED 11 SP4)']
bug_sts = ['---','FIXED','UPSTREAM','NORESPONSE','MOVED','INVALID','WONTFIX','DUPLICATE','FEATURE','WORKSFORME']
#def allBug(prod_all):
# for prod in prod_all:
# bugTeam+"${prod}" = prods.find({'product': "${prod}",'creator': {'$in': ['xdzhang@suse.com', 'xjin@suse.com','yfjiang@suse.com','ychen@suse.com','ysun@suse.com','wjiang@suse.com','whdu@suse.com','sywang@suse.com','yosun@suse.com','nwang@suse.com','bchou@suse.com']} })
# print ${prod}
# print bugTeam+"${prod}"
teamBugA = {}
totalBugA = {}
teamBugV = {}
totalBugV = {}
for prod in prod_all:
teamA = prods.find({'product': prod,'creator': {'$in': ['xdzhang@suse.com', 'xjin@suse.com','yfjiang@suse.com','ychen@suse.com','ysun@suse.com','wjiang@suse.com','whdu@suse.com','sywang@suse.com','yosun@suse.com','nwang@suse.com','bchou@suse.com']} }).count()
teamBugA[prod] = teamA
totalA = prods.find({'product': prod,'cf_foundby': '---'}).count()
totalBugA[prod] = totalA
teamV = prods.find({'product': prod,'creator': {'$in': ['xdzhang@suse.com', 'xjin@suse.com','yfjiang@suse.com','ychen@suse.com','ysun@suse.com','wjiang@suse.com','whdu@suse.com','sywang@suse.com','yosun@suse.com','nwang@suse.com','bchou@suse.com']},'resolution':{'$in':['FIXED','UPSTEAM','NORESPONSE','---','MOVED']},'severity':{'$ne':'Enhancement'} }).count()
teamBugV[prod] = teamV
totalV = prods.find({'product': prod,'cf_foundby': '---','resolution':{'$in':['FIXED','UPSTEAM','NORESPONSE','---','MOVED']},'severity':{'$ne':'Enhancement'} }).count()
totalBugV[prod] = totalV
print("num of total bugs reported by QA APACI is:" + str(teamBugA))
print("num of total bugs reported by all QA colleague is:" + str(teamBugA))
print("num of valid bugs reported by QA APACI is:" + str(teamBugA))
print("num of valid bugs reported by all QA colleague is:" + str(teamBugA))
# format print
'num of total bugs reported by QA APACI is: {0}'
#pxeitem = 'LABEL {0}\n' \
# ' MENU LABEL {0}\n' \
# ' KERNEL {1}linux\n' \
# ' APPEND initrd={1}initrd install={2}\n'.format(
# label, ploader, repo)
|
994,519 | a811aee8c4cd42eda287c4c0804b87777c3198c8 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
a=10 #a is a varibale name which isalso called identifier
# '=' is the assignment operator
print(a)
# In[2]:
a=10.6#over riding of variables
print(a)
# In[8]:
#to check the type of variable
a=10.6
type(a)
# In[9]:
a='HUZAIFA'
type(a)
# In[10]:
a=10
type(a)
# In[14]:
name='Huzaifa'
city='Karachi'
print(name,"lives in",city)
# In[15]:
#concatination
a='Pakistan'
b='zindabad'
print(a+b)
# In[16]:
num1=3
num2=5
sum=num1+num2
print(sum)
# In[ ]:
|
994,520 | 0d87c9544c24336bc1577737a627deb6e848055b | from panther_base_helpers import gsuite_details_lookup as details_lookup
def rule(event):
if event['id'].get('applicationName') != 'groups_enterprise':
return False
return bool(
details_lookup('moderator_action', ['ban_user_with_moderation'], event))
def title(event):
return 'User [{}] banned another user from a group.'.format(
event.get('actor', {}).get('email'))
|
994,521 | 56d047b5737d5313b3d3588dc700b3806b193934 | from Crypto.PublicKey import DSA
from Crypto.Signature import DSS
from Crypto.Hash import SHA256
from base64 import b64encode
import binascii
import json
def formatHex(temp_key):
return "\""+hex(temp_key)+"\""
file_in = open("Transaction_Format.JSON","r+")
content = file_in.read()
#DSAParam & pubkey
key = DSA.generate(2048) # Generates a 2048 Bit public key
key_chain = [key.y, key.g, key.p, key.q] # Key y is the Public Key | Key G P Q are the DSA Param
print("SigningKey: "+ str(key.x))
#Write to JSON file here <pubKey> <g> p q #Must be hexadecimal
content = content.replace('<g>', formatHex(key.g)) # Int to String formatHex
content = content.replace('<p>', formatHex(key.p))
content = content.replace('<q>', formatHex(key.q))
content = content.replace('<pubKey>', formatHex(key.y))
#Sig
message = b"Cybersecurity is cool!"
hash_obj = SHA256.new(message)
signer = DSS.new(key, 'fips-186-3')
signature = signer.sign(hash_obj)
#Write to JSON file here <sig> #Must be hexadecimal
signature_hexed ="\"0x" #Formating JSON
signature = binascii.hexlify(signature) # Byte to String Hex
signature = signature.decode('utf-8')
signature_hexed += signature
signature_hexed +="\"" #Formating JSON
content = content.replace('<sig>', str(signature_hexed) )
#pubKeyHash
pub_key = bytes(str(key.y), 'utf-8') # Converts int public key to str and then Str to Byte
hash_pub_key = SHA256.new(pub_key) # Hashes byte public key
hash_pub_key = hash_pub_key.hexdigest() # Turns Bytes to hexadecimal
hash_pub_key_hexed ="\"0x" #Formating JSON
hash_pub_key_hexed += hash_pub_key[-40:]
hash_pub_key_hexed +="\"" #Formating JSON
#Write to JSON file here <pubKeyHash> #Must be hexadecimal 160 bits = 20 Bytes = 40Hex
content = content.replace('<pubKeyHash>', hash_pub_key_hexed) # Only adds the 160 least significant bits of hash value
#Update Transaction_Format.JSON
file_in.seek(0) # Go back to the start of the file
file_in.write(content) # Update JSON file
file_in.close() # Close JSON file
|
994,522 | 74ccc5ec5cb926784f673e0e45c03c2d0737db80 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# In[2]:
pip install xlrd
# In[3]:
excelfile=pd.ExcelFile("TakenMind-Python-Analytics-Problem-case-study-1-1.xlsx")
df_resign=pd.read_excel(excelfile,'Employees who have left') # Sheet 2
df_exist=pd.read_excel(excelfile,'Existing employees') # Sheet 3
# In[4]:
#Data Understanding
df_resign.head()
# In[65]:
df_resign.describe()
# In[5]:
df_exist.head()
# In[66]:
df_exist.describe()
# In[6]:
df_resign['dept'].value_counts()
# In[7]:
#Data Visualization
plt.figure(figsize=(15,10))
sns_plot99 = sns.catplot('dept',data=df_resign,kind='count',aspect=2)
# fig = sns_plot99.get_figure()
# In[67]:
df = pd.DataFrame({'Employee Status':['Existing', 'Left'], 'Number':[11429, 3572]})
ax = df.plot.bar(x='Employee Status', y='Number', rot=0)
# In[8]:
plt.figure(figsize=(16,10))
plt.title("Satisfication level vs last evaluation")
sns_plot = sns.scatterplot(x=df_resign['satisfaction_level'],y=df_resign['last_evaluation'],hue='number_project',data=df_resign)
fig = sns_plot.get_figure()
fig.savefig("figure1.png")
# In[68]:
heatmap1_data = pd.pivot_table(df_resign, values='satisfaction_level',
index=['time_spend_company'],
columns='salary')
sns.heatmap(heatmap1_data, cmap="YlGnBu", annot=True)
# In[9]:
plt.figure(figsize=(15,8))
plt.title('Salary vs Satisfaction level')
sns_plot1 = sns.boxplot(x=df_resign['salary'],y=df_resign['satisfaction_level'],hue='time_spend_company',data=df_resign,palette='Blues')
fig = sns_plot1.get_figure()
fig.savefig("figure2.png")
# In[10]:
plt.figure(figsize=(15,8))
plt.title("Salary vs Monthly hours spent")
sns.boxplot(x=df_resign['salary'],y=df_resign['average_montly_hours'],hue='number_project',palette='Blues',data=df_resign)
plt.show()
# In[11]:
plt.figure(figsize=(15,8))
plt.title("Average monthly hours vs promotions in last 5 years")
sns.boxplot(x=df_resign['promotion_last_5years'],y=df_resign['average_montly_hours'],hue='time_spend_company',data=df_resign,palette='Set3')
plt.show()
# In[69]:
plt.figure(figsize=(15,8))
plt.title('Average monthly hours vs number of projects')
sns.boxplot(x=df_resign['number_project'],y=df_resign['average_montly_hours'],data=df_resign,palette='Set3')
plt.show()
# In[70]:
plt.figure(figsize=(18,10))
plt.title("Department vs Satisfcation level")
sns.boxplot(x=df_resign['dept'],y=df_resign['satisfaction_level'],hue='time_spend_company',data=df_resign,palette='Set3')
plt.show()
# In[71]:
#Combining both the datasets into a single dataset
df_resign['Left'] = 1
df_exist['Left'] = 0
combined_df=pd.concat([df_resign,df_exist],axis=0)
# print(combined_df)
combined_df.head()
# In[15]:
combined_df.info()
# In[16]:
# Creating dummies
# columns=['dept','salary']
# dummies=pd.get_dummies(combined_df[columns],drop_first=True)
# combined_df=pd.concat([combined_df,dummies],axis=1)
# In[17]:
# from sklearn.preprocessing import OneHotEncoder
# from sklearn.pipeline import Pipeline
# from sklearn.compose import ColumnTransformer
# cat_col = ['dept', 'salary']
# categorical_transformer = Pipeline(steps=[
# ('onehotencoder',OneHotEncoder(handle_unknown='ignore'))
# ])
# ct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [0])], remainder='passthrough')
# X = np.array(ct.fit_transform(X))
# preprocessor = ColumnTransformer(transformers /
# In[18]:
# X_train.head()
# In[19]:
# combined_df=combined_df.drop(columns,axis=1) # Dropping uncessary columns
# In[73]:
combined_df.head()
# In[74]:
combined_df.tail()
# In[72]:
combined_df.info()
# In[75]:
print("{0:.1f}% of people that have resigned from company X".format(100-(len(combined_df[combined_df['Left'] == 0])/len(combined_df))*100))
# In[22]:
# # Dividing the dataset into X and Y
# combined_df.drop('Emp ID',inplace = True, axis = 1)
# X = combined_df.iloc[:, :-1]
# y = combined_df.iloc[:, -1]
# X.head()
# In[23]:
# from sklearn.compose import ColumnTransformer
# from sklearn.preprocessing import OneHotEncoder
# ct1 = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [-2])], remainder='passthrough')
# ct2 = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [-1])], remainder='passthrough')
# X = np.array(ct1.fit_transform(X))
# X = np.array(ct2.fit_transform(X))
# # X = ct1.fit_transform(X)
# # X = ct2.fit_transform(X)
# In[24]:
# X = pd.DataFrame(data = X, index = combined_df.index, columns = combined_df.columns)
# In[25]:
# adjusting categorical columns
columns=['dept','salary']
dummies=pd.get_dummies(combined_df[columns],drop_first=True)
combined_df=pd.concat([combined_df,dummies],axis=1)
combined_df=combined_df.drop(columns,axis=1)
# In[26]:
# Dividing the dataset into X and Y
X=combined_df.drop('Left',axis=1)
y=combined_df['Left']
# In[27]:
X.head()
# In[77]:
# Splitting of the X and y datasets into train and test set
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.25,random_state=0)
# In[78]:
X=pd.concat([X_train,y_train],axis=1)
emp_resign = X[X.Left==0]
emp_exist = X[X.Left==1]
# In[79]:
# X_train.drop('Emp ID', inplace = True, axis = 1)
# X_train.head()
# In[80]:
# Logisstic regression
from sklearn.linear_model import LogisticRegression
# from sklearn.feature_selection import RFE
classifier1 = LogisticRegression()
# pipeline1 = Pipeline(steps = [
# ('preprocessor',preprocessor),
# ('classifier',classifier1)
# ])
# model=LogisticRegression()
# logreg=RFE(model,15)
# pipeline1.fit(X_train,y_train)
classifier1.fit(X_train.drop('Emp ID', axis = 1),y_train)
from sklearn.metrics import accuracy_score
predictions = classifier1.predict(X_test.drop('Emp ID', axis = 1))
predictions
# print("The Accuracy score using logistic regression is:{:.3f}".format(accuracy_score(y_test,classifier1.predict(X_test))))
# In[81]:
# Model evaluation
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
print("The Accuracy score using logistic regression is:{:.3f}".format(accuracy_score(y_test,classifier1.predict(X_test.drop('Emp ID', axis = 1)))))
print("The Precison score using logistic regression is:{:.3f}".format(precision_score(y_test,classifier1.predict(X_test.drop('Emp ID', axis = 1)))))
print("The Recall score using logistic regression is:{:.3f}".format(recall_score(y_test,classifier1.predict(X_test.drop('Emp ID', axis = 1)))))
print("The F1 score using logistic regression is:{:.3f}".format(f1_score(y_test,classifier1.predict(X_test.drop('Emp ID', axis = 1)))))
# In[82]:
# Random forest classifier
from sklearn.ensemble import RandomForestClassifier
classifier2 = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0)
# pipeline2 = Pipeline(steps = [
# ('preprocessor',preprocessor),
# ('classifier',classifier2)
# ])
# # model=LogisticRegression()
# # logreg=RFE(model,15)
# pipeline2.fit(X_train,y_train)
# print("The Accuracy score using logistic regression is:{:.3f}".format(accuracy_score(y_test,pipeline2.predict(X_test))))
classifier2.fit(X_train.drop('Emp ID', axis = 1),y_train)
predictions2 = classifier2.predict(X_test.drop('Emp ID', axis = 1))
predictions2
# print("The Accuracy score using random forest classifer is:{:.3f}".format(accuracy_score(y_test,classifier2.predict(X_test))))
# In[83]:
# Model evaluation
# from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
print("The Accuracy score using Random Forest Classifier is:{:.3f}".format(accuracy_score(y_test,classifier2.predict(X_test.drop('Emp ID', axis = 1)))))
print("The Precison score using Random Forest Classifier is:{:.3f}".format(precision_score(y_test,classifier2.predict(X_test.drop('Emp ID', axis = 1)))))
print("The Recall score using Random Forest Classifier is:{:.3f}".format(recall_score(y_test,classifier2.predict(X_test.drop('Emp ID', axis = 1)))))
print("The Recall score using Random Forest Classifier is:{:.3f}".format(f1_score(y_test,classifier2.predict(X_test.drop('Emp ID', axis = 1)))))
# In[84]:
# Support vector classifier
from sklearn.svm import SVC
classifier3 = SVC(kernel = 'rbf', C = 1)
# pipeline3 = Pipeline(steps = [
# ('preprocessor', preprocessor),
# ('classifier', classifier3)
# ])
# model=LogisticRegression()
# logreg=RFE(model,15)
classifier3.fit(X_train,y_train)
predictions3 = classifier3.predict(X_test)
predictions3
# In[85]:
# Model evaluation
# from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix
print("The Accuracy score using SVC is:{:.3f}".format(accuracy_score(y_test,classifier3.predict(X_test))))
print("The Precison score using SVC is:{:.3f}".format(precision_score(y_test,classifier3.predict(X_test))))
print("The Recall score using SVC is:{:.3f}".format(recall_score(y_test,classifier3.predict(X_test))))
# In[86]:
# building with random forest classification as it is best suited
pred_h = np.concatenate((predictions2.reshape(len(predictions2),1),y_test.values.reshape(len(y_test),1)),1)
print(pred_h)
# In[87]:
# employees_prone_to_leave = []
# for emp_id, i in enumerate(pred_h):
# if (i[0]!=i[-1] and i[0]==1):
# employees_prone_to_leave.append(emp_id+1)
# employees_prone_to_leave#.extend([x for x in y_train if ])
# # print(len(pred_h))
# In[88]:
# Oversamlpling
# from sklearn.utils import resample
# y = combined_df['Left']
# X= combined_df.drop(['Left'],axis=1)
# X_train, X_test, y_train, y_test = train_test_split(X,y,train_size=0.75,random_state=50)
# X=pd.concat([X_train,y_train],axis=1)
# emp_not_left=X[X.Left==0]
# emp_left=X[X.Left==1]
# In[89]:
# unsampling the minority by adding dummy rows to the left equal to 1
# left_upsampled= resample(emp_left,replace=True,n_samples=len(emp_not_left),random_state=50)
# left_upsampled=pd.concat([emp_not_left,left_upsampled])
# In[90]:
# left_upsampled.Left.value_counts() # Both classes now having equal samples
# In[91]:
# # Preparing for X train and Y train dataset
# y_train=left_upsampled.Left
# X_train=left_upsampled.drop('Left',axis=1)
# In[92]:
# Model building
# new_logreg=LogisticRegression()
# logreg_rfe=RFE(new_logreg,15)
# logreg_rfe.fit(X_train.drop('Emp ID',axis=1),y_train)
# upsampled_pred=logreg_rfe.predict(X_test.drop('Emp ID',axis=1))
# In[93]:
# # Model evaluation
# from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
# print("The Accuracy score using logistic regression is:{:.3f}".format(accuracy_score(y_test,upsampled_pred)))
# print("The Precison score using logistic regression is:{:.3f}".format(precision_score(y_test,upsampled_pred)))
# print("The Recall score using logistic regression is:{:.3f}".format(recall_score(y_test,upsampled_pred)))
# print("The F1 score using logistic regression is:{:.3f}".format(f1_score(y_test,upsampled_pred)))
# In[94]:
# # Model building
# rfc_upsampled=RandomForestClassifier()
# rfc_upsampled.fit(X_train.drop('Emp ID',axis=1),y_train)
# upsampled_rfc_pred=rfc_upsampled.predict(X_test.drop('Emp ID',axis=1))
# In[95]:
# Model evaluation
# from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
# print("The Accuracy score using Random Forest Classifier is:{:.3f}".format(accuracy_score(y_test,upsampled_rfc_pred)))
# print("The Precison score using Random Forest Classifier is:{:.3f}".format(precision_score(y_test,upsampled_rfc_pred)))
# print("The Recall score using Random Forest Classifier is:{:.3f}".format(recall_score(y_test,upsampled_rfc_pred)))
# print("The F1 score using Random Forest Classifier is:{:.3f}".format(f1_score(y_test,upsampled_rfc_pred)))
# In[96]:
# # Model Building
# upsampled_svc=SVC(C=1)
# upsampled_svc.fit(X_train.drop('Emp ID',axis=1),y_train)
# svc_upsampled_pred=upsampled_svc.predict(X_test.drop('Emp ID',axis=1))
# In[97]:
# # Model evaluation
# from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix
# print("The Accuracy score using SVC is:{:.3f}".format(accuracy_score(y_test,svc_upsampled_pred)))
# print("The Precison score using SVC is:{:.3f}".format(precision_score(y_test,svc_upsampled_pred)))
# print("The Recall score using SVC is:{:.3f}".format(recall_score(y_test,svc_upsampled_pred)))
# print("The F1 score using SVC is:{:.3f}".format(f1_score(y_test,svc_upsampled_pred)))
# In[98]:
# Random Forest
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
rfc=RandomForestClassifier(random_state=50)
n_folds=KFold(n_splits=5,shuffle=True, random_state=50)
parameters={'criterion':['gini','entropy'],'max_depth': range(5,30,5),'max_features': range(10,18,2),
'min_samples_split': range(2,10,2)}
model_cv = GridSearchCV(estimator=classifier2,param_grid=parameters,cv=n_folds,verbose=1,
return_train_score=True,scoring='recall')
# In[99]:
model_cv.fit(X_train,y_train)
# In[100]:
model_cv.best_params_
# In[101]:
model_cv.best_score_
# In[102]:
final_classifier=RandomForestClassifier(criterion='entropy', max_depth=5, max_features=14, min_samples_split=2, random_state=0)
final_classifier.fit(X_train.drop('Emp ID',axis=1),y_train)
y_pred=final_classifier.predict(X_test.drop('Emp ID',axis=1))
# In[103]:
# #model evaluation
# from sklearn.metrics import classification_report
# print(classification_report(y_test,y_pred))
# In[104]:
# final_classifier.feature_importances_
# In[105]:
# X_train.columns
# In[106]:
# features=np.array(X_train.drop('Emp ID',axis=1).columns)
# important=final_rfc.feature_importances_
# indexes_features=important.argsort()
# for i in indexes_features:
# print("{} : {:.2f}%".format(features[i],important[i]*100))
# In[107]:
# Finding employees who are prone to leave
y_test1=pd.concat([y_test,X_test['Emp ID']],axis=1)
y_test3=pd.DataFrame(y_pred)
y_test3.reset_index(inplace=True, drop=True)
gf=pd.concat([y_test1.reset_index(),y_test3],1)
new_df=gf[gf.Left==0]
new_df=new_df.drop('index',axis=1)
new_df.columns=['Left','Emp ID','Predicted_left']
Employees_prone_to_leave=new_df[new_df['Predicted_left']==1]
Employees_prone_to_leave=Employees_prone_to_leave.reset_index()
Employees_prone_to_leave=Employees_prone_to_leave.drop(['Left','Predicted_left','index'],axis=1)
# In[108]:
Employees_prone_to_leave
# In[109]:
result = []
for i in Employees_prone_to_leave.values:
for j in i:
result.append(j)
result
# In[110]:
output = pd.DataFrame({'Emp ID': result})
output.to_csv('submission.csv', index=False)
# In[111]:
output
# In[112]:
#Accuracy Check
print("The Accuracy score using final classifier is:{:.3f}".format(accuracy_score(y_test,y_pred)))
print("The Precison score using final classifier is:{:.3f}".format(precision_score(y_test,y_pred)))
print("The Recall score using final classifier is:{:.3f}".format(recall_score(y_test,y_pred)))
# In[ ]:
|
994,523 | f75afff7cc1c224ecdd005afebaca2de3019402d | import numpy as np
import cv2
import cv
import freenect
import numpy as np
import time
range_dic=((400,677,50),(677,724,100),(724,834,150),(834,890,200))
def getDepthMat(lower,higher,color):
depth,timestamp = freenect.sync_get_depth()
depth = 255 * np.logical_and(depth > lower, depth < higher)
depth = depth.astype(np.uint8)
c1=200
r1=0
depth = depth[c1:c1+640,r1:r1+480]
depth=depth*color
return depth
'''
while True:
depth = getDepthMat()
print(depth)
cv2.imshow('Depth', depth)
cv2.waitKey(10)
'''
data_=[]
for rang in range_dic:
print('%d < depth < %d' % (rang[0], rang[1]))
image=getDepthMat(rang[0], rang[1],rang[2])
cv2.imshow('Depth',image)
data_.append(image)
cv2.waitKey(1000)
time.sleep(.1)
np.save('/home/pawan/PycharmProjects/Knet/live.npy',data_)
|
994,524 | c275bd8e08ed28ab94b5545f443df5e7292598bd | #!/usr/bin/env python3
'''This program takes an image that in TIFF format, which is rotated counter-clockwise with a size of 192 * 192
and chaning it to .jpeg file type, correcting the rotation, resizing it to 128 * 128 and saving the resultant
image in new folder /opt/icons/'''
import os
from PIL import Image
directory = './images/'
new_path = './opt/icons/'
#iterate over each image and modify and save as needed.
for filename in os.listdir(directory):
im = Image.open('./images/'+filename)
if im.mode != 'RGB':
im = im.convert('RGB')
im.rotate(90).resize((128, 128)).save(new_path + filename + '.jpeg')
im.close()
|
994,525 | ec82d11a3afe0b57fa440617d843946a9b9140d0 | lista= []
palavra= 10
while palavra!= 'fim':
palavra= input('qual a palavra? ')
primeira_letra= palavra[0]
if primeira_letra =='a':
lista.append (palavra)
print (lista) |
994,526 | 9838303d1f090e8302d2eed3f0a8c1cb9bf4d180 | # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Experimental Resolver for evaluating the condition."""
from typing import Dict, List, Optional
from tfx import types
from tfx.dsl.compiler import placeholder_utils
from tfx.dsl.components.common import resolver
from tfx.orchestration import metadata
from tfx.orchestration.portable import data_types as portable_data_types
from tfx.orchestration.portable.input_resolution import exceptions
from tfx.proto.orchestration import placeholder_pb2
class ConditionalStrategy(resolver.ResolverStrategy):
"""Strategy that resolves artifacts if predicates are met.
This resolver strategy is used by TFX internally to support conditional.
Not intended to be directly used by users.
"""
def __init__(self, predicates: List[placeholder_pb2.PlaceholderExpression]):
self._predicates = predicates
def resolve_artifacts(
self, metadata_handler: metadata.Metadata,
input_dict: Dict[str, List[types.Artifact]]
) -> Optional[Dict[str, List[types.Artifact]]]:
for placeholder_pb in self._predicates:
context = placeholder_utils.ResolutionContext(
exec_info=portable_data_types.ExecutionInfo(input_dict=input_dict))
predicate_result = placeholder_utils.resolve_placeholder_expression(
placeholder_pb, context)
if not isinstance(predicate_result, bool):
raise ValueError("Predicate evaluates to a non-boolean result.")
if not predicate_result:
raise exceptions.SkipSignal("Predicate evaluates to False.")
return input_dict
|
994,527 | cfc9d48afcd6241f2bb241f34f64b73bd7024b1a | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Disassembler engine for disassemble and instrumentation base on Capstone
disassemble engine.
"""
import struct
import os
from Log import LoggerFactory
class CodeManager(object):
def __init__(self, code, rva):
self.code = code
self.rva = rva
self.log = LoggerFactory().get_new_logger("Instrument.log")
self._code_need_handled = True
def __del__(self):
self.log.fin()
def get_base_rva(self):
return self.rva
def get_dword_from_offset(self, offset, offset_end):
return self.get_data_from_offset_with_format(offset, offset_end)
def get_data_from_offset_with_format(self, offset, offset_end):
size = offset_end - offset
return struct.unpack(self.get_format_from_size(size),
self.code[offset:offset_end])[0]
def get_data_at_offset(self, offset, offset_end):
return self.code[offset:offset_end]
def instrument(self, offset, instrument_instruction):
self.log.log(
'[0] [0x{:05x}]\t{}\n'.format(offset, instrument_instruction))
self.code[offset:offset] = instrument_instruction
self.need_code_handle()
def instrument_with_replace(self, offset, origin_instruction_size,
instrument_instruction):
self.log.log(
'[0] [0x{:05x}]\t{}\n'.format(offset, instrument_instruction))
self.code[offset:origin_instruction_size] = instrument_instruction
self.need_code_handle()
def instrument_at_last(self, instrument_instruction):
offset = len(self.code) - 1
self.log.log("[LAST]")
self.instrument(offset, instrument_instruction)
return offset
def set_instruction_at_offset(self, offset, offset_end, instruction):
self.log.log(
'[1] [0x{:05x}]\t{} \t{} \n'.format(offset,
self.code[offset:offset_end],
instruction))
self.code[offset:offset_end] = instruction
self.need_code_handle()
def set_data_at_offset_with_format(self, offset, offset_end, data):
size = offset_end - offset
fmt = self.get_format_from_size(size)
unpack_data = struct.unpack(fmt, self.code[offset:offset_end])
self.log.log('[2] [0x{:05x}]\t{} \t{} \n'.format(offset,
unpack_data,
data))
self.code[offset:offset_end] = struct.pack(fmt, data)
self.need_code_handle()
def get_code(self):
return self.code
def is_need_code_handle(self):
return self._code_need_handled
def code_handled(self):
self._code_need_handled = False
def need_code_handle(self):
self._code_need_handled = True
@staticmethod
def get_format_from_size(size):
if size == 8:
fmt = 'q'
elif size == 4:
fmt = 'i'
elif size == 2:
fmt = 'h'
elif size == 1:
fmt = 'b'
else:
fmt = None
return fmt
@staticmethod
def get_format_from_size_little_endian(size):
if size == 8:
fmt = '<q'
elif size == 4:
fmt = '<i'
elif size == 2:
fmt = '<h'
elif size == 1:
fmt = '<b'
else:
fmt = None
print("ERROR")
exit()
return fmt
def get_data_from_rva(self, rva, length):
zero_relative_rva = rva - self.rva
data = self.get_data_at_offset(zero_relative_rva,
zero_relative_rva + length)
return data
|
994,528 | 6e0b8580a8f858f9dbe279e4c1f214c561b46dbb | # Exploratory data analysis for auto-mpg dataset
# https://www.kaggle.com/devanshbesain/exploration-and-analysis-auto-mpg
#https://github.com/tensorflow/docs/blob/master/site/en/tutorials/keras/basic_regression.ipynb
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
pd.set_option('precision', 2) # 2 decimal places
pd.set_option('display.max_rows', 20)
pd.set_option('display.max_columns', 30)
pd.set_option('display.width', 150) # wide windows
import os
figdir = "../figures"
def save_fig(fname):
if figdir:
plt.savefig(os.path.join(figdir, fname))
#from sklearn.datasets import fetch_openml
#auto = fetch_openml('autoMpg', cache=True)
# The OpenML version converts the original categorical data
# to integers starting at 0.
# We want the 'raw' data.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
# We made a cached copy since UCI repository is often down
#url = 'https://raw.githubusercontent.com/probml/pyprobml/master/data/mpg.csv'
#column_names = ['mpg','cylinders','displacement','horsepower','weight',
# 'acceleration', 'model_year', 'origin', 'name']
column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight',
'Acceleration', 'Year', 'Origin', 'Name']
df = pd.read_csv(url, names=column_names, sep='\s+', na_values="?")
# The last column (name) is a unique id for the car, so we drop it
df = df.drop(columns=['Name'])
df.info()
# We notice that there are only 392 horsepower rows, but 398 of the others.
# This is because the HP column has 6 missing values (also called NA, or
# not available).
# There are 3 main ways to deal with this:
# Drop the rows with any missing values using dropna()
# Drop any columns with any missing values using drop()
# Replace the missing vales with some other valye (eg the median) using fillna.
# (This latter is called missing value imputation.)
df = df.dropna()
# Origin is categorical (1=USA, 2=Europe, 3=Japan)
df['Origin'] = df.Origin.replace([1,2,3],['USA','Europe','Japan'])
df['Origin'] = df['Origin'].astype('category')
# Cylinders is an integer in [3,4,5,6,8]
#df['Cylinders'] = df['Cylinders'].astype('category')
# Year is an integer year (between 70 and 82)
#df['Year'] = df['Year'].astype('category')
df0 = df.copy()
# Let us check the datatypes
print(df.dtypes)
# Let us check the categories
df['Origin'].cat.categories
# Let us inspect the data
df.tail()
#https://www.kaggle.com/devanshbesain/exploration-and-analysis-auto-mpg
# Plot mpg distribution for cars from different countries of origin
data = pd.concat( [df['MPG'], df['Origin']], axis=1)
fig, ax = plt.subplots()
ax = sns.boxplot(x='Origin', y='MPG', data=data)
ax.axhline(data.MPG.mean(), color='r', linestyle='dashed', linewidth=2)
plt.savefig(os.path.join(figdir, 'auto-mpg-origin-boxplot.pdf'))
plt.show()
# Plot mpg distribution for cars from different years
data = pd.concat( [df['MPG'], df['Year']], axis=1)
fig, ax = plt.subplots()
ax = sns.boxplot(x='Year', y='MPG', data=data)
ax.axhline(data.MPG.mean(), color='r', linestyle='dashed', linewidth=2)
plt.savefig(os.path.join(figdir, 'auto-mpg-year-boxplot.pdf'))
plt.show()
|
994,529 | cde1b524cd60c9d9b1995ce42828b663845a2edf | import networkx as nx
import torch_geometric
from torch_geometric.datasets import Planetoid
import markov_clustering as mc
import random
import matplotlib.pyplot as plt
dataset = Planetoid(root='/local/scratch', name='Cora')
G = torch_geometric.utils.to_networkx(dataset[0])
G = G.to_undirected()
matrix = nx.to_scipy_sparse_matrix(G)
result = mc.run_mcl(matrix, inflation = 1.3)
clusters = mc.get_clusters(result)
print(clusters)
file = open("Markov_Modularities.txt","w")
# perform clustering using different inflation values from 1.1 and 2.6
# for each clustering run, calculate the modularity
for inflation in [i / 10 for i in range(11, 26)]:
result = mc.run_mcl(matrix, inflation=inflation)
clusters = mc.get_clusters(result)
Q = mc.modularity(matrix=result, clusters=clusters)
print("inflation:", inflation, "modularity:", Q)
file.write(str(inflation) + "," + str(Q))
file.write('\n')
file.close()
|
994,530 | 0086688ec97851d3f62e998fe4f586b9d088561d | from function_stock_esg_scraper import(
get_stock_index,
download_yahoo_stock_htmlfile,
get_stock_data,
write_to_csv,
get_esg_from_html,
join_stock_esg,
download_msci_esg_ratings_htmlfile
)
def scrap_stock():
table = get_stock_index(url='https://finance.yahoo.com/quote/%5EDJI/components?p=%5EDJI')
download_yahoo_stock_htmlfile(stock_index=table)
stock_data = get_stock_data(stock_index=table)
download_msci_esg_ratings_htmlfile(stock_index=table)
esg_data = get_esg_from_html(stock_index=table)
stock_esg_data = join_stock_esg(df_stock=stock_data,df_esg=esg_data)
write_to_csv(df_stock_esg=stock_esg_data)
if __name__ == "__main__":
scrap_stock()
|
994,531 | 947b78f50fb1596a33d162652f426b88143a733b | import numpy as np
def series_to_supervised(data, window, forcast_horizon):
X = []
y = []
for i in range(data.shape[0] - window - forcast_horizon + 1):
X.append(data.iloc[i:i + window])
y.append(data.iloc[i + window: window + forcast_horizon + i, 0])
X = np.stack(X, axis=0)
y = np.stack(y, axis=0)
return X, y
def _calculate_mape(Y_real, Y_pred):
return np.sum(np.abs(Y_real - Y_pred)) / np.sum(Y_pred)
|
994,532 | 766c7689b8ed6ae19cb31ff4e6c8a8b08fdaeb44 | """
INPUT:
4
Sasikumar:50:60:70
Arun:60:40:90
Manoj:50:50:60
Rekha:60:35:45
OUTPUT:
Arun
"""
n=int(input())
max=0
for i in range(n):
inp=input().split()
for j in range(len(inp)):
name,m,p,c=inp[j].split(':')
m,p,c=int(m),int(p),int(c)
sum=m+p+c
if sum>max:
max=sum
maxstud=name
print(maxstud)
|
994,533 | 240e28359913bc8949ce85c6f1489d3d113d69d7 | #!/usr/bin/python3
# Powered by FJW!
import sys
import os
import argparse
import random
import threading
import backTCP
from utils import *
# Actions: What to do for a stream of incoming packets
# 0: Do nothing and forward
# 1: Drop unless retransmitted
# 2: Swap two packets
# 3: Randomly order 3 packets and maybe drop one and maybe duplicate one
#
# You can configure the following list to change the possibility of each action
ACTIONS = [0] * 7 + [1] * 5 + [2] * 5 + [3] * 3
def pass_through(from_socket, to_socket):
def handler(from_socket, to_socket):
while True:
if from_socket.sock is None:
# Closed - don't waste CPU
break
try:
# Blindly forward packets
p = from_socket.recv()
to_socket.send(p)
except Exception:
pass
# Run in background and don't worry anymore
t = threading.Thread(target=handler, args=(from_socket, to_socket), daemon=True)
t.start()
return t
def btMITM(out_addr, out_port, in_addr, in_port):
# This is going to be challenging: listen and send at the same time while manipulating packets
in_sock = backTCP.BTcpConnection('recv', in_addr, in_port)
out_sock = backTCP.BTcpConnection('send', out_addr, out_port)
# We're not going to manipulate server responses
pass_through(out_sock, in_sock)
packets = []
while True:
action = random.choice(ACTIONS)
log('debug', f"Action: {action}")
packet_needed = max(1, action)
packet_count = 0
while packet_count < packet_needed:
p = in_sock.recv()
if p is None:
# The last ones aren't manipulated
for p in packets:
out_sock.send(p)
out_sock.send(None) # Tell the receiver to close
in_sock.close()
out_sock.close()
return
packet_count += 1
packets.append(p)
if action == 0:
pass # through
elif action == 1:
if not packets[0].flag & 1:
# Packet loss
packets.pop()
elif action == 2:
# Swap packets
packets = packets[::-1]
else:
# Shuffle three packets ...
random.shuffle(packets)
for i in range(len(packets)):
if random.random() >= 0.8:
# ... and maybe duplicate one ...
packets.append(random.choice(packets))
break
if not packets[i].flag & 1 and random.random() >= 0.5:
# ... or drop up to 1 at random
packets.pop(i)
break
for p in packets:
out_sock.send(p)
packets = []
def parse_args():
parser = argparse.ArgumentParser(description="starts a backTCP test channel", epilog="This program is created by iBug")
parser.add_argument('-a', '--out-addr', '--address', metavar="addr", help="address of receiver", default="127.0.0.1")
parser.add_argument('-p', '--out-port', '--port', metavar="port", type=int, help="port of receiver", default=6666)
parser.add_argument('-A', '--in-addr', metavar="addr", help="address to listen for sender", default="0.0.0.0")
parser.add_argument('-P', '--in-port', metavar="port", type=int, help="port to listen for sender", default=6667)
parser.add_argument('-l', '--log-level', metavar="level", help="logging level", default=LOG_WARNING)
return parser.parse_args()
def main():
args = parse_args()
set_log_level(args.log_level)
btMITM(args.out_addr, args.out_port, args.in_addr, args.in_port)
if __name__ == '__main__':
main() |
994,534 | 838b547d74999d4c1ccf4e393d7d8d9295db5fdb | from .encodeClass import encoderClass
from .decodeClass import decoderClass
|
994,535 | 3c13ac301182ddfb9971996d40f46eec493b300b | # you are given a an array of words and ask to check if that list exist or not
class Solution:
def findRansom(self, arr, word):
res = {}
for i in arr:
if (i in res):
res[i] += 1
else:
res[i] = 1
for w in word:
if (w in res):
if(res[w]== 1):
del res[w]
else:
res[w] -=1
else:
return False
print(res)
return True
res = Solution().findRansom(['a','b','b','b','c'], 'abbbbc')
print(res)
|
994,536 | 5b5574468d3716c96c76ea5aef1b125352497fe5 | #! /usr/bin/env python
import os, sys, glob, re, shutil, time, threading, json
def doCmd(cmd, dryRun=False, inDir=None):
if not inDir:
print "--> "+time.asctime()+ " in ", os.getcwd() ," executing ", cmd
else:
print "--> "+time.asctime()+ " in " + inDir + " executing ", cmd
cmd = "cd " + inDir + "; "+cmd
sys.stdout.flush()
sys.stderr.flush()
start = time.time()
ret = 0
while cmd.endswith(";"): cmd=cmd[:-1]
if dryRun:
print "DryRun for: "+cmd
else:
from commands import getstatusoutput
ret, outX = getstatusoutput(cmd)
if outX: print outX
stop = time.time()
print "--> "+time.asctime()+" cmd took", stop-start, "sec. ("+time.strftime("%H:%M:%S",time.gmtime(stop-start))+")"
sys.stdout.flush()
sys.stderr.flush()
return ret
def runThreadMatrix(basedir, logger, workflow, args=''):
workdir = os.path.join(basedir, workflow)
matrixCmd = 'runTheMatrix.py -l ' + workflow +' '+args
try:
if not os.path.isdir(workdir):
os.makedirs(workdir)
except Exception, e:
print "runPyRelVal> ERROR during test PyReleaseValidation, workflow "+str(workflow)+" : can't create thread folder: " + str(e)
wftime = time.time()
try:
ret = doCmd(matrixCmd, False, workdir)
except Exception, e:
print "runPyRelVal> ERROR during test PyReleaseValidation, workflow "+str(workflow)+" : caught exception: " + str(e)
wftime = time.time() - wftime
outfolders = [file for file in os.listdir(workdir) if re.match("^" + str(workflow) + "_", file)]
if len(outfolders)==0: return
outfolder = os.path.join(basedir,outfolders[0])
wfdir = os.path.join(workdir,outfolders[0])
ret = doCmd("rm -rf " + outfolder + "; mkdir -p " + outfolder)
ret = doCmd("find . -mindepth 1 -maxdepth 1 -name '*.xml' -o -name '*.log' -o -name '*.py' -o -name 'cmdLog' -type f | xargs -i mv '{}' "+outfolder+"/", False, wfdir)
ret = doCmd("mv "+os.path.join(workdir,"runall-report-step*.log")+" "+os.path.join(outfolder,"workflow.log"))
ret = doCmd("echo " + str(wftime) +" > " + os.path.join(outfolder,"time.log"))
logger.updateRelValMatrixPartialLogs(basedir, outfolders[0])
shutil.rmtree(workdir)
return
class PyRelValsThread(object):
def __init__(self, jobs, basedir, jobid="1of1", outdir=None):
if not outdir: outdir = basedir
self.jobs = jobs
self.basedir = basedir
self.jobid=jobid
self.outdir = outdir
def startWorkflows(self, logger, add_args='', workflows=''):
from commands import getstatusoutput
add_args = add_args.replace('\\"','"')
print "Extra Args>>",add_args
w_args = ""
m=re.search('\s*(-w\s+[^ ]+)',add_args)
if m:
w_args = m.group(1)
add_args = add_args.replace(w_args,"")
if workflows == '':
m=re.search('\s*(-l\s+\d+[^ ]+)',add_args)
if m:
workflows = m.group(1)
add_args = add_args.replace(workflows,"")
workflowsCmd = "runTheMatrix.py -n "+w_args+" "+workflows+" | grep -v ' workflows with ' | grep -E '^[0-9][0-9]*(\.[0-9][0-9]*|)\s\s*' | sort -nr | awk '{print $1}'"
print "RunTheMatrix>>",workflowsCmd
cmsstat, workflows = getstatusoutput(workflowsCmd)
print workflows
if not cmsstat:
workflows = workflows.split("\n")
else:
print "runPyRelVal> ERROR during test PyReleaseValidation : could not get output of " + workflowsCmd
return
threads = []
jobs = self.jobs
m=re.search(".* (-j|--nproc)(=| )(\d+) "," "+add_args)
if m: jobs=int(m.group(3))
print "Running ",jobs," in parallel"
while(len(workflows) > 0):
threads = [t for t in threads if t.is_alive()]
print "Active Threads:",len(threads)
if(len(threads) < jobs):
try:
t = threading.Thread(target=runThreadMatrix, args=(self.basedir, logger, workflows.pop(), w_args+" "+add_args))
t.start()
threads.append(t)
except Exception, e:
print "runPyRelVal> ERROR threading matrix : caught exception: " + str(e)
else:
time.sleep(5)
for t in threads: t.join()
ret, out = getstatusoutput("touch "+self.basedir+"/done."+self.jobid)
logger.updateRelValMatrixPartialLogs(self.basedir, "done."+self.jobid)
return
def update_runall(self):
outFile = open(os.path.join(self.outdir,"runall-report-step123-.log"),"w")
status_ok = []
status_err = []
len_ok = 0
len_err = 0
for logFile in glob.glob(self.basedir+'/*/workflow.log'):
inFile = open(logFile)
for line in inFile:
if re.match("^\s*(\d+\s+)+tests passed,\s+(\d+\s+)+failed\s*$",line):
res = line.strip().split(" tests passed, ")
res[0] = res[0].split()
res[1]=res[1].replace(" failed","").split()
len_res = len(res[0])
if len_res>len_ok:
for i in range(len_ok,len_res): status_ok.append(0)
len_ok = len_res
for i in range(0,len_res):
status_ok[i]=status_ok[i]+int(res[0][i])
len_res = len(res[1])
if len_res>len_err:
for i in range(len_err,len_res): status_err.append(0)
len_err = len_res
for i in range(0,len_res):
status_err[i]=status_err[i]+int(res[1][i])
else: outFile.write(line)
inFile.close()
outFile.write(" ".join(str(x) for x in status_ok)+" tests passed, "+" ".join(str(x) for x in status_err)+" failed\n")
outFile.close()
def update_wftime(self):
time_info = {}
logRE = re.compile('^.*/([1-9][0-9]*\.[0-9]+)_[^/]+/time\.log$')
for logFile in glob.glob(self.basedir+'/*/time.log'):
m = logRE.match(logFile)
if not m: continue
wf = m.group(1)
inFile = open(logFile)
line = inFile.readline().strip()
inFile.close()
try:
m = re.match("^(\d+)\.\d+$",line)
if m: time_info[wf]=int(m.group(1))
except:
pass
outFile = open(os.path.join(self.outdir,"relval-times.json"),"w")
json.dump(time_info, outFile)
outFile.close()
def parseLog(self):
logData = {}
logRE = re.compile('^.*/([1-9][0-9]*\.[0-9]+)_[^/]+/step([1-9])_.*\.log$')
max_steps = 0
for logFile in glob.glob(self.basedir+'/[1-9]*/step[0-9]*.log'):
m = logRE.match(logFile)
if not m: continue
wf = m.group(1)
step = int(m.group(2))
if step>max_steps: max_steps=step
if not logData.has_key(wf):
logData[wf] = {'steps': {}, 'events' : [], 'failed' : [], 'warning' : []}
if not logData[wf]['steps'].has_key(step):
logData[wf]['steps'][step]=logFile
cache_read=0
log_processed=0
for wf in logData:
for k in logData[wf]:
if k == 'steps': continue
for s in range(0, max_steps):
logData[wf][k].append(-1)
index =0
for step in sorted(logData[wf]['steps']):
data = [0, 0, 0]
logFile = logData[wf]['steps'][step]
json_cache = os.path.dirname(logFile)+"/logcache_"+str(step)+".json"
if (not os.path.exists(json_cache)) or (os.path.getmtime(logFile)>os.path.getmtime(json_cache)):
inFile = open(logFile)
for line in inFile:
if '%MSG-w' in line: data[1]=data[1]+1
if '%MSG-e' in line: data[2]=data[2]+1
if 'Begin processing the ' in line: data[0]=data[0]+1
inFile.close()
jfile = open(json_cache,"w")
json.dump(data,jfile)
jfile.close()
log_processed+=1
else:
jfile = open(json_cache,"r")
data = json.load(jfile)
jfile.close()
cache_read+=1
logData[wf]['events'][index] = data[0]
logData[wf]['failed'][index] = data[2]
logData[wf]['warning'][index] = data[1]
index+=1
del logData[wf]['steps']
print "Log processed: ",log_processed
print "Caches read:",cache_read
from pickle import Pickler
outFile = open(os.path.join(self.outdir,'runTheMatrixMsgs.pkl'), 'w')
pklFile = Pickler(outFile)
pklFile.dump(logData)
outFile.close()
return
|
994,537 | acfc53e80ecdb62a70a57e9d02270aaf7a4310d7 | # 若為Mac電腦,請先貼上此段程式碼
########### For Mac user ###########
import os
import ssl
# used to fix Python SSL CERTIFICATE_VERIFY_FAILED
if not os.environ.get('PYTHONHTTPSVERIFY', '') and getattr(
ssl, '_create_unverified_context', None
):
ssl._create_default_https_context = ssl._create_unverified_context
####################################
# 引入urllib
from urllib import request
url = 'https://www.ptt.cc/bbs/joke/index.html'
# res = request.urlopen(url)
# 使用headers
useragent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'
headers = {'User-Agent': useragent}
req = request.Request(url=url, headers=headers)
res = request.urlopen(req)
print(res.read().decode('utf-8'))
|
994,538 | 5d4b47237af0299b6bbfa67d119cc01c0708dab7 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# ###Description and preliminary code for Continuous-Time Markov Chain Model
#
# This model will test the importance of including a spatial component in the system. We will use ODEs to describe the dynamics of each lineage and competition between lineages.
# The different states that each cell can move through are as follows
#
# * Healthy Hepatocytes
#
# * Latently Infected Hepatocytes
#
# * Infected Hepatocytes
#
# * Dead Infected Hepatocytes
#
# * Dead Healthy Hepatocytes
#
# Healthy cells are regenerated from Dead cells. Interacting with Infected cells, they become Latently Infected, and after the eclipse phase, Latent Infections become Infectious. Both Healthy and Infected Hepatocytes die, with Infected being eliminated by the immune response faster than natural death rates. Dead cells regenerate, but those dead after being infected with HCV have a lower probability of regenerating.
#
# Adapting the Perelson/Neumann model, we have
#
# $\begin{eqnarray*}
# \frac{dT}{dt}& =& \phi_{DT} D_T + \phi_{DI} D_I - (\lambda_{virions} + \lambda_{local} +\nu_T) T\\
# \frac{dE}{dt}& =& (\lambda_{virions} + \lambda_{local} )T - (\alpha +\nu_T)E\\
# \frac{dI}{dt}& =& \alpha E- \nu_I I\\
# \frac{dD_T}{dt}& =& \nu_T(T+E) - \phi_{DT} D_T\\
# \frac{dD_I}{dt}& =& \nu_I I - \phi_{DI} D_I\\\
# \end{eqnarray*}$
#
#
#
#
# To translate these equations into a continuous-time Markov Chain model, we can calculate the transition probabilities from the parameters above. Let $\vec{X(t)} = [T(t), E(t), I(t), D_T(t), D_I(t)]$, so the probability of state change is defined as Prob$\{\Delta \vec{X(t)} = (a, b, c, d, e)|\vec{X(t)}\}$, where $a$ represents the change in state $T$, $b$ in state $E$, etc. We assume that the time step is small enough that each change is only in one cell, so $a - e$ can only take the values 0 or $\pm 1$. The transition probabilities are as follows
#
#
# $$\begin{cases}
# (\lambda_{virions} + \lambda_{local}) T\ \Delta t + o(\Delta t), & a = -1, b = 1\\
# \nu_T T \Delta t + o(\Delta t), & a = -1, d = 1\\
# \alpha E \Delta t + o(\Delta t), & b = -1, c = 1\\
# \nu_T E \Delta t + o(\Delta t), & b = -1, d = 1\\
# \nu_I I \Delta t + o(\Delta t), & c = -1, e = 1 \\
# \phi_{DT} D_T \Delta t + o(\Delta t), & d = -1, a = 1\\
# \phi_{DI} D_I \Delta t + o(\Delta t), & e = -1, a = 1\\
# \end{cases}$$
#
# The generator matrix $\mathbf{Q}$ derived from these transition probabilities is thus as follows
#
# <!--($$ \mathbf{Q} =
# \left[ \begin{array}{ccccc}
# - (\beta I + \lambda +d) T & (\beta I + \lambda) T & 0 & 0 & dT \\
# 0 & -(\eta + d) L & \eta L &0 & dL \\
# 0 & 0 & -\delta I & \delta I & 0 \\
# \alpha_I D_I &0 &0 & -\alpha_I D_I&0\\
# \alpha_T D_T & 0 & 0& 0& -\alpha_T D_T\\
# \end{array} \right] $$ -->
#
# $$ \mathbf{Q} =
# \left[ \begin{array}{ccccc}
# 0& (\lambda_{virions} + \lambda_{local}) T& 0 & 0 & \nu_T T \\
# 0 & 0 & \alpha E & \nu_T E &0 \\
# 0 & 0 & 0 & 0 & \nu_I I\\
# \phi_{DT} D_T &0 &0 & 0&0\\
# \phi_{DI} D_I & 0 & 0& 0& 0\\
# \end{array} \right] $$
# <codecell>
%matplotlib inline
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
# <codecell>
beta=.2
nu = .01
d = 2e-2
eta = 1
delta = 3*d
alpha_I = .8e-1
alpha_T = 2e-1
# <codecell>
from __future__ import division
import numpy as np
#Number of state transitions to observe
M = int(1e6)
# time vector
time = np.zeros(M)
#Define parameters
rho = 8.18 #viral export rate
c = 22.3 #viral clearance rate
gamma = 1500 #scaling factor
R = 4.1825 #average HCV RNA in infected hepatocyte
N_liver = int(8e10) #Number of cells in liver
alpha = 1 #1/latent period (days)
nu_T = 1.4e-2 #death rate of healthy cells
nu_I = 1/7 #death rate of infected cells
phi_T = 10*nu_T #regeneration rate of dead healthy cells
phi_I = .8*phi_T #regeneration rate of dead infected cells
beta_V = 1e-8 #viral transmision rate
beta_L = R*1e-5/(60*24) #cell-cell transmission rate
N=N_liver/1e6
init=10
v_init = 1e6
sim=3
Q = np.zeros(7)
Q[0] = (beta_L*init + beta_V*v_init); #Infection of Target cell
Q[1] = nu_T; #Death of target cell
Q[2] = alpha; #latent cell becomes infected
Q[3] = nu_T; #latent cell dies
Q[4] = nu_I; #Infected cell dies
Q[5] = phi_T; #Healthy cell regenerates
Q[6] = phi_I; #Infected cell regenerates
#Construct matrix of state transition vectors
trans_vecs = np.zeros([5,7])
#state 1: infection of healthy cell
trans_vecs[0,0] = -1;
trans_vecs[1,0] = 1;
#state 2: death of healthy cell
trans_vecs[0,1] = -1;
trans_vecs[3,1] = 1;
#state 3: movement of latent cell into infected
trans_vecs[1,2] = -1;
trans_vecs[2,2] = 1;
#state 4: death of latent cell
trans_vecs[1,3] = -1;
trans_vecs[3,3] = 1;
#state 5: death of infected cell
trans_vecs[2,4] = -1;
trans_vecs[4,4] = 1;
#state 6: regeneration of dead healthy cell
trans_vecs[3,5] = -1;
trans_vecs[0,5] = 1;
#state 6: regeneration of dead infected cell
trans_vecs[4,6] = -1;
trans_vecs[0,6] = 1;
#Initialize state variable vectors
T = np.zeros(M)
E = np.zeros(M)
I = np.zeros(M)
Dt = np.zeros(M)
Di = np.zeros(M)
VL = np.zeros(M)
#Input initial conditions
I[0] = init;
T[0] = N-init;
VL[0] = v_init
#Initialize state vector and index
#state_vec = np.vstack([S,E,I,Di,Dt])
j =0
while I[j] >0 and j<M-1:
#print [T[j],E[j],I[j],Dt[j],Di[j]]
#Update Q to reflect new number of infected cells and viruses
Q[0] = (beta_L*I[j] +beta_V*VL[j]);
#Calculate transition matrix
Qij = Q*[T[j],T[j],E[j],E[j],I[j],Dt[j],Di[j]]
#Draw from exponential distributions of waiting times
time_vec = -np.log(np.random.random(7))/Qij
#np.random.exponential([1/Qij])[0]
#
#find minimum waiting time and obtain index to ascertain next state jump
newTime = min(time_vec)
time_vecL = time_vec.tolist()
state_idx = time_vecL.index(min(time_vecL))
[T[j+1],E[j+1],I[j+1],Dt[j+1],Di[j+1]]=[T[j],E[j],I[j],Dt[j],Di[j]]+ trans_vecs[:,state_idx]
VL[j+1] = VL[0]+rho*I[j]*R/(gamma*c)
time[j+1] = time[j] + newTime
j+=1
# <codecell>
[T[j],E[j],I[j],Dt[j],Di[j]]
rho*I[j]*R/(gamma*c)
# <codecell>
%%timeit
np.random.exponential(y)
# <codecell>
y= np.ones(11)
# <codecell>
plt.plot(time[0:M-1],VL[0:M-1])
# <codecell>
plt.plot(time,T, label = 'Susc')
plt.plot(time,I, label = 'Infected')
plt.plot(time,Dt, label = 'Dead (healthy)')
plt.plot(time,Di, label = 'Dead (infected)')
plt.legend(loc = 'upper right')
# <markdowncell>
# An updated version of the model includes a second latent class that keeps cells latently infected for longer before becoming infectious, and also allows for proliferation of infected cells by allowing cells to be reborn into the latent class
#
# * Healthy Hepatocytes
#
# * Latently Infected Hepatocytes
#
# * Long-lived Latently Infected Hepatocytes
#
# * Infected Hepatocytes
#
# * Dead Infected Hepatocytes
#
# * Dead Healthy Hepatocytes
#
# Healthy cells are regenerated from Dead cells. Interacting with Infected cells, they become Latently Infected, and after the eclipse phase, Latent Infections become Infectious. Both Healthy and Infected Hepatocytes die, with Infected being eliminated by the immune response faster than natural death rates. Dead cells regenerate, but those dead after being infected with HCV have a lower probability of regenerating. Some cells regenerate into infectious cells.
#
# Adapting the Perelson/Neumann model, we have
#
# $\begin{eqnarray*}
# \frac{dT}{dt}& =& \phi_{DT} D_T + (1-\kappa)\phi_{DI} D_I - (\lambda_{virions} + \lambda_{local} +\nu_T) T\\
# \frac{dE}{dt}& =& (1-\eta)(\lambda_{virions} + \lambda_{local} )T - (\alpha +\nu_T)E\\
# \frac{dEX}{dt}& =& \eta(\lambda_{virions} + \lambda_{local} )T - (\alpha_X +\nu_T)E\\
# \frac{dI}{dt}& =& \kappa\phi_{DI} D_I+ \alpha E- \nu_I I\\
# \frac{dD_T}{dt}& =& \nu_T(T+E+EX) - \phi_{DT} D_T\\
# \frac{dD_I}{dt}& =& \nu_I I - \phi_{DI} D_I\\\
# \end{eqnarray*}$
#
# To translate these equations into a continuous-time Markov Chain model, we can calculate the transition probabilities from the parameters above. Let $\vec{X(t)} = [T(t), E(t), EX(t) I(t), D_T(t), D_I(t)]$, so the probability of state change is defined as Prob$\{\Delta \vec{X(t)} = (a, b, c, d, e, f)|\vec{X(t)}\}$, where $a$ represents the change in state $T$, $b$ in state $E$, etc. We assume that the time step is small enough that each change is only in one cell, so $a - f$ can only take the values 0 or $\pm 1$. The transition probabilities are as follows
#
#
# $$\begin{cases}
# (1-\eta)(\lambda_{virions} + \lambda_{local}) T\ \Delta t + o(\Delta t), & a = -1, b = 1\\
# \eta(\lambda_{virions} + \lambda_{local}) T\ \Delta t + o(\Delta t), & a = -1, c = 1\\
# \nu_T T \Delta t + o(\Delta t), & a = -1, e = 1\\
# \alpha E \Delta t + o(\Delta t), & b = -1, d = 1\\
# \nu_T E \Delta t + o(\Delta t), & b = -1, e = 1\\
# \alpha_X EX \Delta t + o(\Delta t), & c = -1, d = 1\\
# \nu_T EX \Delta t + o(\Delta t), & c = -1, e = 1\\
# \nu_I I \Delta t + o(\Delta t), & d = -1, f = 1 \\
# \phi_{DT} D_T \Delta t + o(\Delta t), & d = -1, a = 1\\
# \kappa\phi_{DI} D_I \Delta t + o(\Delta t), & f = -1, d = 1\\
# (1-\kappa)\phi_{DI} D_I \Delta t + o(\Delta t), & f = -1, a = 1\\
# \end{cases}$$
#
# The generator matrix $\mathbf{Q}$ derived from these transition probabilities is thus as follows
#
#
# $$ \mathbf{Q} =
# \left[ \begin{array}{cccccc}
# 0& (1-\eta)(\lambda_{virions} + \lambda_{local}) T& \eta(\lambda_{virions} + \lambda_{local}) T& 0 & \nu_T T &0\\
# 0 & 0 & \alpha E &0 &\nu_T E & 0\\
# 0 & 0 & \alpha_X EX &0 &\nu_T E & 0\\
# 0 & 0 & 0 & 0 & 0&\nu_I I \\
# \phi_{DT} D_T &0 &0 & 0&0&0\\
# (1-\kappa)\phi_{DI} D_I & 0 & 0& \kappa \phi_{DI}& 0&0\\
# \end{array} \right] $$
#
# <codecell>
%load_ext cythonmagic
# <codecell>
%%cython
from __future__ import division
import numpy as np
import random
class HCVHepatocyte:
def __init__(self, cellID, parentID, infType, tLat, cellType, tInf = None, tDead = None):
self.cellID = cellID #ID of cell
self.parentID = parentID #ID of infector, whether it is virus or infected cell
self.infType = infType #type of infection (from virus or from infected cell)
self.tLat = tLat #time of infection of cell (time cell became latently infected)
self.cellType = cellType #type of cell latent, longterm, infectious, infectious from longterm,
#dead, dead from long term
self.tInf = tInf #time to become infectious
self.tDead = tDead #time of death
if cellType in ('Infected', 'InfectedL'):
if tInf == None:
print("Error: Infectious cells must have time Infectious")
elif cellType in ('Dead', 'DeadL'):
if tInf == None:
print("Error: Dead cells must have time of death")
#define method for infecting a susceptible cell
def InfectCell(self, newID, simTime, newInfType):
''' Method for infecting new cell'''
if self.cellType not in ['Infected', 'InfectedL']:
print("Error: Latent Cell cannot infect")
else:
return HCVHepatocyte(newID, self.cellID, 'Cell', simTime, newInfType)
class HCVVirion:
def __init__(self, virusID, parentID):
self.virusID = virusID
self.parentID = parentID
def InfectCell(self, newID, simTime, newInfType):
return HCVHepatocyte(newID, self.virusID, 'Virus', simTime, newInfType)
time = 0;
cell1 = HCVHepatocyte(1, None, 'Virus', time, 'Latent')
#Create function to randomly select one cell to infect
def CreateLatent(cellHandle, newID, state_idx, simTime):
if state_idx in [0,1]:
newLatent = cellHandle.InfectCell(newID, simTime, 'Latent')
return newLatent
elif state_idx in [2,3]:
newLatent = cellHandle.InfectCell(newID, simTime, 'LatentL')
return newLatent
else:
print("Error: State is not an infecting transition")
#Create function to Kill Infected cell
def KillInfected(cellHandle, time):
cellHandle.tDead = time
if cellHandle.cellType == 'Infected':
cellHandle.cellType = 'Dead'
elif cellHandle.cellType == 'InfectedL':
cellHandle.cellType = 'DeadL'
else:
print("Error: Cannot kill uninfected cell")
return cellHandle
#Create function to move latent to infectious
def LatentInfectious(cellHandle, time):
cellHandle.tInf = time
if cellHandle.cellType == 'Latent':
cellHandle.cellType = 'Infected'
elif cellHandle.cellType == 'LatentL':
cellHandle.cellType = 'InfectedL'
else:
print("Error: Cell not Latent")
return cellHandle
#Number of state transitions to observe
M = int(1e7)
# time vector
time = np.zeros(M)
#Define parameters
init=10 #10 #initial number of infected hepatocytes
v_init = 0#initial viral load
ALT_init = 100 #initial ALT level
rho = 8.18 #viral export rate
c = 22.3 #viral clearance rate
gamma = 1500 #scaling factor -
R = 4.1825 #average HCV RNA in infected hepatocyte
N_liver = int(1e11) #Number of cells in liver
alpha = 1 #1/latent period (days)
alpha_x = 1.3e-2 #1/long-term latent period
nu_T = 1.4e-2 #death rate of healthy cells
nu_I = 1/7 #death rate of infected cells
phi_T = 10*nu_T #regeneration rate of dead healthy cells
phi_I = .8*phi_T #regeneration rate of dead infected cells
beta_V = .5e-8 #viral transmision rate
beta_L = R*1e-5/(60*24) #cell-cell transmission rate
eta = .01 #proportion of infected cells that go long-term latent
kappa = 0 #.1 #proportion of dead infected cells regenerated as infected cells
changes = 13;
delta = .33 #ALT degradation rate
N=N_liver/1e7 #initial number of hepatocytes
eps = (delta*ALT_init)/(nu_T*N) #rate of ALT production
Q = np.zeros(changes)
Q[0] = (1-eta)*(beta_L*init) #Infection of Target cell by cell-> latent
Q[1] = (1-eta)*beta_V*v_init #Infection of Target cell by virus -> latent
Q[2] = eta*beta_L*init #Infection of Target cell by cell -> long-term latent
Q[3] = eta*beta_V*v_init #Infection of Target cell by virus -> long-term latent
Q[4] = nu_T; #Death of target cell
Q[5] = alpha; #latent cell becomes infected
Q[6] = nu_T; #latent cell dies
Q[7] = alpha_x #long-term latent cell becomes infected
Q[8] = nu_T #long-term latent cell dies
Q[9] = nu_I; #Infected cell dies
Q[10] = phi_T; #Healthy cell regenerates
Q[11] = (1-kappa)*phi_I; #Infected cell regenerates into healthy cell
Q[12] = kappa*phi_I
#Construct matrix of state transition vectors
trans_vecs = np.zeros([6, changes])
#state 1: infection of healthy cell by cell-> latent
trans_vecs[0,0] = -1;
trans_vecs[1,0] = 1;
#state 2: infection of healthy cell by virus -> latent
trans_vecs[0,1] = -1;
trans_vecs[1,1] = 1;
#state 3: infection of healthy cell by cell -> long-term latent
trans_vecs[0,2] = -1;
trans_vecs[2,2] = 1;
#state 4: infection of healthy cell by virus -> long-term latent
trans_vecs[0,3] = -1;
trans_vecs[2,3] = 1;
#state 5: death of healthy cell
trans_vecs[0,4] = -1;
trans_vecs[4,4] = 1;
#state 6: movement of latent cell into infected
trans_vecs[1,5] = -1;
trans_vecs[3,5] = 1;
#state 7: death of latent cell
trans_vecs[1,6] = -1;
trans_vecs[4,6] = 1;
#state 8: movement of long-term latent cell into infected
trans_vecs[2,7] = -1;
trans_vecs[3,7] = 1;
#state 9: death of long-term latent cell
trans_vecs[2,8] = -1;
trans_vecs[4,8] = 1;
#state 10: death of infected cell
trans_vecs[3,9] = -1;
trans_vecs[5,9] = 1;
#state 11: regeneration of dead healthy cell
trans_vecs[4,10] = -1;
trans_vecs[0,10] = 1;
#state 12: regeneration of dead infected cell into healthy cell
trans_vecs[5,11] = -1;
trans_vecs[0,11] = 1;
#state 13: regeneration of dead infected cell into infected cell
trans_vecs[5,12] = -1;
trans_vecs[3,12] = 1;
#Initialize state variable vectors
T = np.zeros(M)
E = np.zeros(M)
Ex = np.zeros(M)
I = np.zeros(M)
Dt = np.zeros(M)
Di = np.zeros(M)
VL = np.zeros(M)
ALT = np.zeros(M)
state_vec = np.zeros(M)
InfectionChain = []
Infecteds = []
#Initialize Infected Hepatocyte objects
InfectedDict = {}
for i in range(0,int(init/2)):
x = HCVHepatocyte(i, None, 'Initial', -1, 'Infected', 0)
InfectedDict[i] = x
for i in range(int(init/2),init):
x = HCVHepatocyte(i, None, 'Initial', -83, 'InfectedL', 0)
InfectedDict[i] = x
LatentDict = {}
LatentLDict = {}
DeadDict = {}
lastCellID = init-1 #get last cellID
#Input initial conditions
I[0] = init;
T[0] = N-init;
VL[0] = v_init
j =0
InfectionArray = []
while I[j] >= 0 and j<M-1:
#print [T[j],E[j],I[j],Dt[j],Di[j]]
#Update Q to reflect new number of infected cells and viruses
Q[0] = (1-eta)*beta_L*I[j]
Q[1] = (1-eta)*beta_V*VL[j]
Q[2] = eta*beta_L*I[j]
Q[3] = eta*beta_V*VL[j]
#Calculate transition matrix
Qij = Q*[T[j],T[j],T[j], T[j],T[j], E[j],E[j], Ex[j], Ex[j], I[j], Dt[j], Di[j], Di[j]]
#Draw from exponential distributions of waiting times
time_vec = -np.log(np.random.random(changes))/Qij
#np.random.exponential([1/Qij])[0]
#
#find minimum waiting time and obtain index to ascertain next state jump
newTime = min(time_vec)
time_vecL = time_vec.tolist()
state_idx = time_vecL.index(min(time_vecL))
state_vec[j] = state_idx
[T[j+1],E[j+1],Ex[j+1],I[j+1],Dt[j+1],Di[j+1]]=[T[j],E[j],Ex[j],I[j],Dt[j],Di[j]]+ trans_vecs[:,state_idx]
#make adjustments to hepatocyte dictionaries according to state transition
#Infection of healthy cell by cell or virus -> latent or longterm latent
if state_idx in [0,1,2,3]:
Infector = InfectedDict[random.choice(list(InfectedDict.keys()))] #choose random infector cell
newCellID = lastCellID + 1
lastCellID = newCellID
newLatent = CreateLatent(Infector, newCellID, state_idx, time[j])
if state_idx in [0,1]:
LatentDict[newCellID] = newLatent
elif state_idx in [2,3]:
LatentLDict[newCellID] = newLatent
else:
print('Incorrect State')
#Latent cell becomes infectious
elif state_idx in [5,7]:
if state_idx == 5:
LatCell = LatentDict[random.choice(list(LatentDict.keys()))]
del LatentDict[LatCell.cellID] #remove cell from Latent Dict
elif state_idx == 7:
LatCell = LatentLDict[random.choice(list(LatentLDict.keys()))]
del LatentLDict[LatCell.cellID]
else:
print('Incorrect State')
InfectedDict[LatCell.cellID] = LatentInfectious(LatCell, time[j]) #add cell to Infected Dict
#Latent cell dies
elif state_idx == 6:
del LatentDict[random.choice(list(LatentDict.keys()))]
#LatentL cell dies
elif state_idx == 8:
del LatentLDict[random.choice(list(LatentLDict.keys()))]
#Infected cell dies
elif state_idx == 9:
KilledCell = InfectedDict[random.choice(list(InfectedDict.keys()))] #choose random infector cell
del InfectedDict[KilledCell.cellID]
KilledCell.cellType = 'Dead'
KilledCell.tDead = time[j]
#newDead = KillInfected(KilledCell,time[j])
#DeadDict[newDead.cellID] = newDead
DeadDict[KilledCell.cellID] = KilledCell
#Dead infected cell regenerates into health cell -- just delete from dead dict
elif state_idx == 11:
del DeadDict[random.choice(list(DeadDict.keys()))]
#Infected cell regenerated from Dead cell
elif state_idx == 12:
newCellID = lastCellID + 1
lastCellID = newCellID
DeadGen = DeadDict[random.choice(list(DeadDict.keys()))]
del DeadDict[DeadGen.cellID]
newInfected = HCVHepatocyte(newCellID,DeadGen.cellID,'DeadGen', DeadGen.tDead, 'Infected', time[j])
InfectedDict[newInfected.cellID] = newInfected
#Output Infection chain and infecteds at each time step
#check lengths of InfectionChain and Infecteds
if len(InfectionChain)< int(time[j])+1:
InfectionChain.append([])
if len(Infecteds) < int(time[j])+1:
Infecteds.append([])
#add to array of infections with timestep
if state_idx in [0,1,2,3]:
#if int(time[j]) in InfectionChain:
# InfectionChain[int(time[j])].append([Infector.cellID, newCellID])
#else:
# InfectionChain[int(time[j])] = [[Infector.cellID, newCellID]]
InfectionChain[int(time[j])].append([Infector.cellID, newCellID])
elif state_idx == 12:
#if int(time[j]) in InfectionChain:
# InfectionChain[int(time[j])].append([DeadGen.cellID, newInfected.cellID])
#else:
# InfectionChain[int(time[j])] = [DeadGen.cellID, newInfected.cellID]
InfectionChain[int(time[j])].append([DeadGen.cellID, newInfected.cellID])
#else:
# InfectionChain.append([])
#Infecteds.append(int([time[j]),list(InfectedDict.keys())])
#if int(time[j]) in Infecteds:
Infecteds[int(time[j])] = list(set(Infecteds[int(time[j])] + InfectedDict.keys() +LatentDict.keys() +LatentLDict.keys()))
#update viral load and ALT
VL[j+1] = np.floor(rho*N_liver*(I[j+1]/N)*R/(gamma*c)) #VL[j] + (I[j]/N)*rho*N_liver*newTime - c*gamma*VL[j]*newTime #
ALT[j+1] = ALT[j] + (eps*(nu_T*(T[j] + E[j] + Ex[j]) + nu_I*I[j])-delta*ALT[j])*newTime
time[j+1] = time[j] + newTime
j+=1
# <codecell>
#Sort Infecteds and Infection chain, and break up infection chain
InfectedsSort = dict()
for i in Infecteds.keys():
InfectedsSort[i] = sorted(Infecteds[i])
InfectionChainSort = {}
for i in InfectionChain.keys():
a = sorted(list(InfectionChain[i]), key=lambda x: x[0])
InfectionChainSort[i] = [b for c in a for b in c]
# <codecell>
#Sort Infecteds and Infection chain, and break up infection chain
InfectedsSort = dict()
for key, item in enumerate(Infecteds):
InfectedsSort[key] = sorted(item)
InfectionChainSort = dict()
for key, item in enumerate(InfectionChain):
a = sorted(list(item), key=lambda x: x[0])
InfectionChainSort[key] = [b for c in a for b in c]
# <codecell>
import csv
f = open('Infecteds1e7.txt', 'w')
writer = csv.writer(f, delimiter = ' ')
for key, value in InfectedsSort.iteritems():
writer.writerow([key] + value)
f = open('InfectionChain1e7.txt', 'w')
writer = csv.writer(f, delimiter = ' ')
for key, value in InfectionChainSort.iteritems():
writer.writerow([key] + value)
# <codecell>
f = open('Infecteds.txt', 'w')
writer = csv.writer(f, delimiter = '\t')
for key, value in Infecteds.iteritems():
writer.writerow([key] + [value])
# <codecell>
len(InfectionChainSort)
# <codecell>
InfectionChainSort[10]
# <codecell>
InfectionChain[10]
# <codecell>
plt.plot(time,T, label = 'Susc')
plt.plot(time,I, label = 'Infected')
plt.plot(time,Dt, label = 'Dead (healthy)')
plt.plot(time,Di, label = 'Dead (infected)')
plt.legend(loc = 'upper right')
# <codecell>
plt.plot(time,VL)
# <codecell>
random.choice(list(InfectedDict.keys()))
InfectedDict[8].cellType
# <codecell>
plt.plot(time,T, label = 'Susceptible')
plt.plot(time,I+Di, label = 'Ever Infected')
plt.legend(loc = 'upper right')
# <codecell>
HepatocyteDict = {}
for i in range(init):
x = HCVHepatocyte(i, None, 'Initial', -1, 'Infected', 0)
HepatocyteDict[i] = x
# <codecell>
InfectedDict = {}
for i in range(0,int(init/2)-2):
x = HCVHepatocyte(i, None, 'Initial', -1, 'Infected', 0)
InfectedDict[i] = x
for i in range(int(init/2)-1,init-1):
x = HCVHepatocyte(i, None, 'Initial', -83, 'InfectedL', 0)
InfectedDict[i] = x
# <codecell>
InfectedDict[53].cellType
# <codecell>
#Create Module for infection functions
#Build infected cell class
import random
class HCVHepatocyte:
def __init__(self, cellID, parentID, infType, tLat, cellType, tInf = None, tDead = None):
self.cellID = cellID #ID of cell
self.parentID = parentID #ID of infector, whether it is virus or infected cell
self.infType = infType #type of infection (from virus or from infected cell)
self.tLat = tLat #time of infection of cell (time cell became latently infected)
self.cellType = cellType #type of cell latent, longterm, infectious, infectious from longterm,
#dead, dead from long term
self.tInf = tInf #time to become infectious
self.tDead = tDead #time of death
if cellType in ('Infected', 'InfectedL'):
if tInf == None:
print("Error: Infectious cells must have time Infectious")
elif cellType in ('Dead', 'DeadL'):
if tInf == None:
print("Error: Dead cells must have time of death")
#define method for infecting a susceptible cell
def InfectCell(self, newID, simTime, newInfType):
''' Method for infecting new cell'''
if self.cellType not in ['Infected', 'InfectedL']:
print("Error: Latent Cell cannot infect")
else:
return HCVHepatocyte(newID, self.cellID, 'Cell', simTime, newInfType)
class HCVVirion:
def __init__(self, virusID, parentID):
self.virusID = virusID
self.parentID = parentID
def InfectCell(self, newID, simTime, newInfType):
return HCVHepatocyte(newID, self.virusID, 'Virus', simTime, newInfType)
time = 0;
cell1 = HCVHepatocyte(1, None, 'Virus', time, 'Latent')
#Create function to randomly select one cell to infect
def CreateLatent(cellHandle, newID, state_idx, simTime):
if state_idx in [0,1]:
newLatent = cellHandle.InfectCell(newID, simTime, 'Latent')
return newLatent
elif state_idx in [2,3]:
newLatent = cellHandle.InfectCell(newID, simTime, 'LatentL')
return newLatent
else:
print("Error: State is not an infecting transition")
#Create function to Kill Infected cell
def KillInfected(cellHandle, time):
cellHandle.tDead = time
if cellHandle.cellType == 'Infected':
cellHandle.cellType = 'Dead'
elif cellHandle.cellType == 'InfectedL':
cellHandle.cellType = 'DeadL'
else:
print("Error: Cannot kill uninfected cell")
return cellHandle
#Create function to move latent to infectious
def LatentInfectious(cellHandle, time):
cellHandle.tInf = time
if cellHandle.cellType == 'Latent':
cellHandle.cellType = 'Infected'
elif cellHandle.cellType == 'LatentL':
cellHandle.cellType = 'InfectedL'
else:
print("Error: Cell not Latent")
return cellHandle
# <codecell>
state_idx = 0
time = np.zeros(1e3)
j=1
time[j] = 1
Infector = InfectedDict[random.choice(list(InfectedDict.keys()))] #choose random infector cell
newCellID = lastCellID + 1
lastCellID = newCellID
newLatent = CreateLatent(Infector, newCellID, state_idx, time[j])
if state_idx ==0:
LatentDict[newCellID] = newLatent
elif state_idx == 2:
LatentLDict[newCellID] = newLatent
else:
print('Incorrect State')
# <codecell>
#Try numba
from numba import double
from numba.decorators import jit, autojit
import timeit
from __future__ import division
import numpy as np
import random
X = np.random.random((1000, 3))
D = np.empty((1000, 1000))
def pairwise_python(X):
M = X.shape[0]
N = X.shape[1]
D = np.empty((M, M), dtype=np.float)
for i in range(M):
for j in range(M):
d = 0.0
for k in range(N):
tmp = X[i, k] - X[j, k]
d += tmp * tmp
D[i, j] = np.sqrt(d)
return D
%timeit pairwise_python(X)
# <codecell>
# <codecell>
@autojit
class HCVHepatocyte:
def __init__(self, cellID, parentID, infType, tLat, cellType, tInf = None, tDead = None):
self.cellID = cellID #ID of cell
self.parentID = parentID #ID of infector, whether it is virus or infected cell
self.infType = infType #type of infection (from virus or from infected cell)
self.tLat = tLat #time of infection of cell (time cell became latently infected)
self.cellType = cellType #type of cell latent, longterm, infectious, infectious from longterm,
#dead, dead from long term
self.tInf = tInf #time to become infectious
self.tDead = tDead #time of death
if cellType in ('Infected', 'InfectedL'):
if tInf == None:
print("Error: Infectious cells must have time Infectious")
elif cellType in ('Dead', 'DeadL'):
if tInf == None:
print("Error: Dead cells must have time of death")
#define method for infecting a susceptible cell
def InfectCell(self, newID, simTime, newInfType):
''' Method for infecting new cell'''
if self.cellType not in ['Infected', 'InfectedL']:
print("Error: Latent Cell cannot infect")
else:
return HCVHepatocyte(newID, self.cellID, 'Cell', simTime, newInfType)
class HCVVirion:
def __init__(self, virusID, parentID):
self.virusID = virusID
self.parentID = parentID
def InfectCell(self, newID, simTime, newInfType):
return HCVHepatocyte(newID, self.virusID, 'Virus', simTime, newInfType)
time = 0;
cell1 = HCVHepatocyte(1, None, 'Virus', time, 'Latent')
#Create function to randomly select one cell to infect
def CreateLatent(cellHandle, newID, state_idx, simTime):
if state_idx in [0,1]:
newLatent = cellHandle.InfectCell(newID, simTime, 'Latent')
return newLatent
elif state_idx in [2,3]:
newLatent = cellHandle.InfectCell(newID, simTime, 'LatentL')
return newLatent
else:
print("Error: State is not an infecting transition")
CreateLatentNumba = autojit(CreateLatent)
#Create function to Kill Infected cell
def KillInfected(cellHandle, time):
cellHandle.tDead = time
if cellHandle.cellType == 'Infected':
cellHandle.cellType = 'Dead'
elif cellHandle.cellType == 'InfectedL':
cellHandle.cellType = 'DeadL'
else:
print("Error: Cannot kill uninfected cell")
return cellHandle
KillInfected = autojit(KillInfected)
#Create function to move latent to infectious
def LatentInfectious(cellHandle, time):
cellHandle.tInf = time
if cellHandle.cellType == 'Latent':
cellHandle.cellType = 'Infected'
elif cellHandle.cellType == 'LatentL':
cellHandle.cellType = 'InfectedL'
else:
print("Error: Cell not Latent")
return cellHandle
#Number of state transitions to observe
M = int(1e5)
# time vector
time = np.zeros(M)
#Define parameters
init=10 #10 #initial number of infected hepatocytes
v_init = 0#initial viral load
ALT_init = 100 #initial ALT level
rho = 8.18 #viral export rate
c = 22.3 #viral clearance rate
gamma = 1500 #scaling factor -
R = 4.1825 #average HCV RNA in infected hepatocyte
N_liver = int(1e11) #Number of cells in liver
alpha = 1 #1/latent period (days)
alpha_x = 1.3e-2 #1/long-term latent period
nu_T = 1.4e-2 #death rate of healthy cells
nu_I = 1/7 #death rate of infected cells
phi_T = 10*nu_T #regeneration rate of dead healthy cells
phi_I = .8*phi_T #regeneration rate of dead infected cells
beta_V = .5e-8 #viral transmision rate
beta_L = R*1e-5/(60*24) #cell-cell transmission rate
eta = .01 #proportion of infected cells that go long-term latent
kappa = 0 #.1 #proportion of dead infected cells regenerated as infected cells
changes = 13;
delta = .33 #ALT degradation rate
N=N_liver/1e6 #initial number of hepatocytes
eps = (delta*ALT_init)/(nu_T*N) #rate of ALT production
Q = np.zeros(changes)
Q[0] = (1-eta)*(beta_L*init) #Infection of Target cell by cell-> latent
Q[1] = (1-eta)*beta_V*v_init #Infection of Target cell by virus -> latent
Q[2] = eta*beta_L*init #Infection of Target cell by cell -> long-term latent
Q[3] = eta*beta_V*v_init #Infection of Target cell by virus -> long-term latent
Q[4] = nu_T; #Death of target cell
Q[5] = alpha; #latent cell becomes infected
Q[6] = nu_T; #latent cell dies
Q[7] = alpha_x #long-term latent cell becomes infected
Q[8] = nu_T #long-term latent cell dies
Q[9] = nu_I; #Infected cell dies
Q[10] = phi_T; #Healthy cell regenerates
Q[11] = (1-kappa)*phi_I; #Infected cell regenerates into healthy cell
Q[12] = kappa*phi_I
#Construct matrix of state transition vectors
trans_vecs = np.zeros([6, changes])
#state 1: infection of healthy cell by cell-> latent
trans_vecs[0,0] = -1;
trans_vecs[1,0] = 1;
#state 2: infection of healthy cell by virus -> latent
trans_vecs[0,1] = -1;
trans_vecs[1,1] = 1;
#state 3: infection of healthy cell by cell -> long-term latent
trans_vecs[0,2] = -1;
trans_vecs[2,2] = 1;
#state 4: infection of healthy cell by virus -> long-term latent
trans_vecs[0,3] = -1;
trans_vecs[2,3] = 1;
#state 5: death of healthy cell
trans_vecs[0,4] = -1;
trans_vecs[4,4] = 1;
#state 6: movement of latent cell into infected
trans_vecs[1,5] = -1;
trans_vecs[3,5] = 1;
#state 7: death of latent cell
trans_vecs[1,6] = -1;
trans_vecs[4,6] = 1;
#state 8: movement of long-term latent cell into infected
trans_vecs[2,7] = -1;
trans_vecs[3,7] = 1;
#state 9: death of long-term latent cell
trans_vecs[2,8] = -1;
trans_vecs[4,8] = 1;
#state 10: death of infected cell
trans_vecs[3,9] = -1;
trans_vecs[5,9] = 1;
#state 11: regeneration of dead healthy cell
trans_vecs[4,10] = -1;
trans_vecs[0,10] = 1;
#state 12: regeneration of dead infected cell into healthy cell
trans_vecs[5,11] = -1;
trans_vecs[0,11] = 1;
#state 13: regeneration of dead infected cell into infected cell
trans_vecs[5,12] = -1;
trans_vecs[3,12] = 1;
#Initialize state variable vectors
T = np.zeros(M)
E = np.zeros(M)
Ex = np.zeros(M)
I = np.zeros(M)
Dt = np.zeros(M)
Di = np.zeros(M)
VL = np.zeros(M)
ALT = np.zeros(M)
state_vec = np.zeros(M)
InfectionChain = dict()
Infecteds = dict()
#Initialize Infected Hepatocyte objects
InfectedDict = {}
for i in range(0,int(init/2)):
x = HCVHepatocyte(i, None, 'Initial', -1, 'Infected', 0)
InfectedDict[i] = x
for i in range(int(init/2),init):
x = HCVHepatocyte(i, None, 'Initial', -83, 'InfectedL', 0)
InfectedDict[i] = x
LatentDict = {}
LatentLDict = {}
DeadDict = {}
lastCellID = init-1 #get last cellID
#Input initial conditions
I[0] = init;
T[0] = N-init;
VL[0] = v_init
j =0
InfectionArray = []
while I[j] >= 0 and j<M-1:
#print [T[j],E[j],I[j],Dt[j],Di[j]]
#Update Q to reflect new number of infected cells and viruses
Q[0] = (1-eta)*beta_L*I[j]
Q[1] = (1-eta)*beta_V*VL[j]
Q[2] = eta*beta_L*I[j]
Q[3] = eta*beta_V*VL[j]
#Calculate transition matrix
Qij = Q*[T[j],T[j],T[j], T[j],T[j], E[j],E[j], Ex[j], Ex[j], I[j], Dt[j], Di[j], Di[j]]
#Draw from exponential distributions of waiting times
time_vec = -np.log(np.random.random(changes))/Qij
#np.random.exponential([1/Qij])[0]
#
#find minimum waiting time and obtain index to ascertain next state jump
newTime = min(time_vec)
time_vecL = time_vec.tolist()
state_idx = time_vecL.index(min(time_vecL))
state_vec[j] = state_idx
[T[j+1],E[j+1],Ex[j+1],I[j+1],Dt[j+1],Di[j+1]]=[T[j],E[j],Ex[j],I[j],Dt[j],Di[j]]+ trans_vecs[:,state_idx]
#make adjustments to hepatocyte dictionaries according to state transition
#Infection of healthy cell by cell or virus -> latent or longterm latent
if state_idx in [0,1,2,3]:
Infector = InfectedDict[random.choice(list(InfectedDict.keys()))] #choose random infector cell
newCellID = lastCellID + 1
lastCellID = newCellID
newLatent = CreateLatentNumba(Infector, newCellID, state_idx, time[j])
if state_idx in [0,1]:
LatentDict[newCellID] = newLatent
elif state_idx in [2,3]:
LatentLDict[newCellID] = newLatent
else:
print('Incorrect State')
#Latent cell becomes infectious
elif state_idx in [5,7]:
if state_idx == 5:
LatCell = LatentDict[random.choice(list(LatentDict.keys()))]
del LatentDict[LatCell.cellID] #remove cell from Latent Dict
elif state_idx == 7:
LatCell = LatentLDict[random.choice(list(LatentLDict.keys()))]
del LatentLDict[LatCell.cellID]
else:
print('Incorrect State')
InfectedDict[LatCell.cellID] = LatentInfectious(LatCell, time[j]) #add cell to Infected Dict
#Latent cell dies
elif state_idx == 6:
del LatentDict[random.choice(list(LatentDict.keys()))]
#LatentL cell dies
elif state_idx == 8:
del LatentLDict[random.choice(list(LatentLDict.keys()))]
#Infected cell dies
elif state_idx == 9:
KilledCell = InfectedDict[random.choice(list(InfectedDict.keys()))] #choose random infector cell
del InfectedDict[KilledCell.cellID]
KilledCell.cellType = 'Dead'
KilledCell.tDead = time[j]
#newDead = KillInfected(KilledCell,time[j])
#DeadDict[newDead.cellID] = newDead
DeadDict[KilledCell.cellID] = KilledCell
#Dead infected cell regenerates into health cell -- just delete from dead dict
elif state_idx == 11:
del DeadDict[random.choice(list(DeadDict.keys()))]
#Infected cell regenerated from Dead cell
elif state_idx == 12:
newCellID = lastCellID + 1
lastCellID = newCellID
DeadGen = DeadDict[random.choice(list(DeadDict.keys()))]
del DeadDict[DeadGen.cellID]
newInfected = HCVHepatocyte(newCellID,DeadGen.cellID,'DeadGen', DeadGen.tDead, 'Infected', time[j])
InfectedDict[newInfected.cellID] = newInfected
#Output Infection chain and infecteds at each time step
#add to array of infections with timestep
if state_idx in [0,1,2,3]:
if int(time[j]) in InfectionChain:
InfectionChain[int(time[j])].append([Infector.cellID, newCellID])
else:
InfectionChain[int(time[j])] = [[Infector.cellID, newCellID]]
elif state_idx == 12:
if int(time[j]) in InfectionChain:
InfectionChain[int(time[j])].append([DeadGen.cellID, newInfected.cellID])
else:
InfectionChain[int(time[j])] = [DeadGen.cellID, newInfected.cellID]
else:
if int(time[j]) not in InfectionChain:
InfectionChain[int(time[j])] = []
#Infecteds.append(int([time[j]),list(InfectedDict.keys())])
if int(time[j]) in Infecteds:
Infecteds[int(time[j])] = list(set(Infecteds[int(time[j])] + InfectedDict.keys() +LatentDict.keys() +LatentLDict.keys()))
else:
Infecteds[int(time[j])] = InfectedDict.keys() +LatentDict.keys() +LatentLDict.keys()
#update viral load and ALT
VL[j+1] = np.floor(rho*N_liver*(I[j+1]/N)*R/(gamma*c)) #VL[j] + (I[j]/N)*rho*N_liver*newTime - c*gamma*VL[j]*newTime #
ALT[j+1] = ALT[j] + (eps*(nu_T*(T[j] + E[j] + Ex[j]) + nu_I*I[j])-delta*ALT[j])*newTime
time[j+1] = time[j] + newTime
j+=1
# <codecell>
#write out function patterns for each state transition
if state_idx in [0,1,2,3]: #Infection of healthy cell by cell or virus -> latent or longterm latent
Infector = InfectedDict[random.choice(list(InfectedDict.keys()))] #choose random infector cell
newCellID = lastCellID + 1
lastCellID = newCellID
newLatent = CreateLatent(Infector, newCellID, state_idx, time[j])
if state_idx ==0:
LatentDict[newCellID] = newLatent
elif state_idx == 2:
LatentLDict[newCellID] = newLatent
else:
print('Incorrect State')
elif state_idx in [6,8]: #Latent cell becomes infectious
if state_idx == 6:
LatCell = LatentDict[random.choice(list(LatentDict.keys()))]
del LatentDict[LatCell.cellID] #remove cell from Latent Dict
elif state_idx == 8:
LatCell = LatentLDict[random.choice(list(LatentLDict.keys()))]
del LatentDict[LatCell.cellID]
else:
print('Incorrect State')
InfectedDict[LatCell.cellID] = LatentInfectious(LatCell, time[j]) #add cell to Infected Dict
elif state_idx == 7: #Latent cell dies
del LatentDict[random.choice(list(LatentDict.keys()))]
elif state_idx == 8: #LatentL cell dies
del LatentLDict[random.choice(list(LatentLDict.keys()))]
elif state_idx == 10:
KilledCell = InfectedDict[random.choice(list(InfectedDict.keys()))] #choose random infector cell
newDead = KillInfected(KilledCell,time[j])
DeadDict[newDead.cellID] = newDead
elif state_idx == 13: #Infected cell regenerated from Dead cell
newCellID = lastCellID + 1
lastCellID = newCellID
DeadGen = DeadDict[random.choice(list(InfectedDict.keys()))]
newInfected = HCVHepatocyte(newCellID,DeadGen.cellID,'DeadGen', DeadGen.tDead, 'Infected', time[j])
InfectedDict[newInfected.cellID] = newInfected
# <codecell>
##########
elif state_idx in [1,3]: #Infection of healthy cell by virus -> latent or longterm latent
InfectorID = random.choice(virionList[j])
Infector = InfectedDict[InfectorID]
newCellID +=lastCellID
newLatent = CreateLatent(Infector, newID, state_idx, newTime)
if state_idx ==0:
LatentDict[newCellID] = newLatent
elif state_idx == 2:
LatentLDict[newCellID] = newLatent
else:
print('Incorrect State')
#Create virion objects from infected cells
def GenerateVirions(cellDict, rho,R,gamma,c, N, N_liver): #lastVirusID,
#newVirusID = lastVirusID #start ID count
virionList = [] #initialize virion list
nVirions = int(np.floor(rho*(N_liver/N)*R/(gamma*c)))
for idx in cellDict.keys():
newVirions = np.empty(nVirions)
newVirions = newVirions.fill(cellDict[idx].cellID)
virionList.extend(newVirions)
#for i in range(nVirions):
# newVirion = [newVirusID,cellDict[idx].cellID]
# virionList.append(newVirion)
# newVirusID += 1
return virionList, newVirusID
# <markdowncell>
# Incorporating lineage dynamics:
#
# create class InfectedCell
#
# create class LatentCell
#
# create class LongTermCell
#
# if transition is infected cell, add new cell to latent class, pick one infected cell randomly and take its sequence, change it by one random step
#
# if transition is latent cell becomes infectious, randomly choose latent cell and move it to infectious list
#
# keep a snapshot of what sequences are around at each timestep
#
# keep an id for each cell
#
# Latent cell array
#
# add in latent cells at each time step
#
#
# Infected cell class attributes:
#
# id
#
# parent id
#
# virus or cell infection
#
# time infectious
#
# time infected
#
# longterm
#
# Latent cell class attributes
#
# id
#
# parent id
#
# time infected
#
# virus or cell infection
#
# longterm
#
#
# virion class attributes
#
# id
#
# parent id
#
# lists of infected cells and latent cells at each timestep
#
#
# pseudo code
#
# create an array of latent cells
#
# create array of infected cells
#
# create list of infected cell ids
# create list of latent cell ids
# create list of longterm cell ids
#
#
# export timestep and infections: which cell(s) infected which on each day
# <codecell>
cell2 = HCVHepatocyte(2, None, 'Virus', time, 'Infected', time+1)
# <codecell>
newID = 3
newLatent = CreateLatent(cell2, newID, 0, time)
# <codecell>
xlist= []
xlist.append(1)
# <codecell>
np.floor((rho*(N_liver/N)*R/(gamma*c)))
# <codecell>
del cell2
# <codecell>
KillInfected(cell2,time)
# <codecell>
cell2.tDead
# <codecell>
cell2
# <codecell>
|
994,539 | 457309a52de2137e72028c15cab4eea122b7aeb6 | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 14 10:53:22 2021
@author: ant67
"""
# 1. 단어 횟수를 기록한 사전을 만든다.(띄어쓰기 기반)
# 2. 각 단어에 대해 연속된 2개의 글자의 숫자를 세고 가장 많이 나오는 글자 2개의 조합을 찾는다.(bi-gram)
# 3. 두 글자를 합쳐 기존 사전의 단어를 수정한다.
# 4. 미리 정해 놓은 횟수만큼 2~3번의 과정을 반복한다.
# Algorithm 1: Learn BPE opertaions
import re, collections
def get_stats(vocab):
pairs = collections.defaultdict(int)
for word, freq in vocab.items():
symbols = word.split()
for i in range(len(symbols)-1):
pairs[symbols[i],symbols[i+1]]+=freq
return pairs
def merge_vocab(pair, v_in):
v_out = {}
bigram = re.escape(' '.join(pair))
p=re.compile(r'(?<!\\S)' + bigram + r'(?!\\S)')
for word in v_in:
w_out = p.sub(''.join(pair),word)
v_out[w_out]=v_in[word]
return v_out
'''
vocab = {'l o w </w>': 5, 'l o w e r </w>':2,'n e w e s t </w>':6, 'w i d e s t </w>':3} # 1번 과정
num_merges = 10
for i in range(num_merges): # 4번 과정
pairs = get_stats(vocab) # 2번 과정
best = max(pairs, key=pairs.get) # 2번 과정
vocab = merge_vocab(best,vocab) # 3번 과정
print(f'Step {i+1}')
print(best)
print(vocab)
print('\\n')
'''
###################################
S1 = "나는 책상 위에 사과를 먹었다"
S2 = "알고 보니 그 사과는 Jason 것이었다"
S3 = "그래서 Jason에게 사과를 했다."
token_counts = {}
index = 0
for sentence in [S1,S2,S3]:
tokens = sentence.split()
for token in tokens:
if token_counts.get(token) == None:
token_counts[token] = 1
else:
token_counts[token] +=1
print(token_counts)
token_counts = {" ".join(token) : counts for token, counts in token_counts.items()}
print(token_counts)
num_merges=10
for i in range(num_merges):
pairs = get_stats(token_counts)
best=max(pairs,key=pairs.get)
token_counts = merge_vocab(best, token_counts)
print(f'Step {i+1}')
print(best)
print(token_counts)
print('\\n')
|
994,540 | e53bba1f745551ea19b8d94b1b0b069426830f62 | """
This type stub file was generated by pyright.
"""
from .vtkDataObject import vtkDataObject
class vtkAbstractElectronicData(vtkDataObject):
"""
vtkAbstractElectronicData - Provides access to and storage of
chemical electronic data
Superclass: vtkDataObject
"""
def DeepCopy(self, vtkDataObject):
"""
V.DeepCopy(vtkDataObject)
C++: void DeepCopy(vtkDataObject *obj) override;
Deep copies the data object into this.
"""
...
def GetElectronDensity(self):
"""
V.GetElectronDensity() -> vtkImageData
C++: virtual vtkImageData *GetElectronDensity()
Returns vtkImageData for the molecule's electron density. The
data will be calculated when first requested, and cached for
later requests.
"""
...
def GetHOMO(self):
"""
V.GetHOMO() -> vtkImageData
C++: vtkImageData *GetHOMO()
Returns vtkImageData for the Highest Occupied Molecular Orbital.
"""
...
def GetHOMOOrbitalNumber(self):
"""
V.GetHOMOOrbitalNumber() -> int
C++: vtkIdType GetHOMOOrbitalNumber()
Returns the orbital number of the Highest Occupied Molecular
Orbital.
"""
...
def GetLUMO(self):
"""
V.GetLUMO() -> vtkImageData
C++: vtkImageData *GetLUMO()
Returns vtkImageData for the Lowest Unoccupied Molecular Orbital.
"""
...
def GetLUMOOrbitalNumber(self):
"""
V.GetLUMOOrbitalNumber() -> int
C++: vtkIdType GetLUMOOrbitalNumber()
Returns the orbital number of the Lowest Unoccupied Molecular
Orbital.
"""
...
def GetMO(self, p_int):
"""
V.GetMO(int) -> vtkImageData
C++: virtual vtkImageData *GetMO(vtkIdType orbitalNumber)
Returns the vtkImageData for the requested molecular orbital.
"""
...
def GetNumberOfElectrons(self):
"""
V.GetNumberOfElectrons() -> int
C++: virtual vtkIdType GetNumberOfElectrons()
Returns the number of electrons in the molecule.
"""
...
def GetNumberOfGenerationsFromBase(self, string):
"""
V.GetNumberOfGenerationsFromBase(string) -> int
C++: vtkIdType GetNumberOfGenerationsFromBase(const char *type)
override;
Given a the name of a base class of this class type, return the
distance of inheritance between this class type and the named
class (how many generations of inheritance are there between this
class and the named class). If the named class is not in this
class's inheritance tree, return a negative value. Valid
responses will always be nonnegative. This method works in
combination with vtkTypeMacro found in vtkSetGet.h.
"""
...
def GetNumberOfGenerationsFromBaseType(self, string):
"""
V.GetNumberOfGenerationsFromBaseType(string) -> int
C++: static vtkIdType GetNumberOfGenerationsFromBaseType(
const char *type)
Given a the name of a base class of this class type, return the
distance of inheritance between this class type and the named
class (how many generations of inheritance are there between this
class and the named class). If the named class is not in this
class's inheritance tree, return a negative value. Valid
responses will always be nonnegative. This method works in
combination with vtkTypeMacro found in vtkSetGet.h.
"""
...
def GetNumberOfMOs(self):
"""
V.GetNumberOfMOs() -> int
C++: virtual vtkIdType GetNumberOfMOs()
Returns the number of molecular orbitals available.
"""
...
def GetPadding(self):
"""
V.GetPadding() -> float
C++: virtual double GetPadding()
Get the padding between the molecule and the cube boundaries.
This is used to determine the dataset's bounds.
"""
...
def IsA(self, string):
"""
V.IsA(string) -> int
C++: vtkTypeBool IsA(const char *type) override;
Return 1 if this class is the same type of (or a subclass of) the
named class. Returns 0 otherwise. This method works in
combination with vtkTypeMacro found in vtkSetGet.h.
"""
...
def IsHOMO(self, p_int):
"""
V.IsHOMO(int) -> bool
C++: bool IsHOMO(vtkIdType orbitalNumber)
Returns true if the given orbital number is the Highest Occupied
Molecular Orbital, false otherwise.
"""
...
def IsLUMO(self, p_int):
"""
V.IsLUMO(int) -> bool
C++: bool IsLUMO(vtkIdType orbitalNumber)
Returns true if the given orbital number is the Lowest Unoccupied
Molecular Orbital, false otherwise.
"""
...
def IsTypeOf(self, string):
"""
V.IsTypeOf(string) -> int
C++: static vtkTypeBool IsTypeOf(const char *type)
Return 1 if this class type is the same type of (or a subclass
of) the named class. Returns 0 otherwise. This method works in
combination with vtkTypeMacro found in vtkSetGet.h.
"""
...
def NewInstance(self):
"""
V.NewInstance() -> vtkAbstractElectronicData
C++: vtkAbstractElectronicData *NewInstance()
"""
...
def SafeDownCast(self, vtkObjectBase):
"""
V.SafeDownCast(vtkObjectBase) -> vtkAbstractElectronicData
C++: static vtkAbstractElectronicData *SafeDownCast(
vtkObjectBase *o)
"""
...
def __delattr__(self, *args, **kwargs):
""" Implement delattr(self, name). """
...
def __getattribute__(self, *args, **kwargs):
""" Return getattr(self, name). """
...
def __init__(self, *args, **kwargs) -> None:
...
@staticmethod
def __new__(*args, **kwargs):
""" Create and return a new object. See help(type) for accurate signature. """
...
def __repr__(self, *args, **kwargs):
""" Return repr(self). """
...
def __setattr__(self, *args, **kwargs):
""" Implement setattr(self, name, value). """
...
def __str__(self, *args, **kwargs) -> str:
""" Return str(self). """
...
__this__ = ...
__dict__ = ...
__vtkname__ = ...
|
994,541 | 5ca4140c98c82cfcc4132e7e4e081ac2776f5ea2 | # all the computations for these problems are done using binary arithmetic
# only the user input and the final output will be in decimal.
# dec2bin and bin2dec convert between binary and decimal.
import random
import sys
import time
sys.setrecursionlimit(10000000)
from random import *
def Problem1Proj2(N, k):
#simply consumes the primality3 function to check primality
if primality3(dec2bin(N), k):
print("N is a prime")
else:
print("N is not a prime")
def Problem2Proj2(N, k):
#Mainly utilizes the genPrime function and then prints that.
bitVec = genPrime(N, k)
print("Integer %s is a prime" %bin2dec(bitVec))
def Problem3Proj2(n, k):
#Calculate time to generate all required values
start_time = time.time()
#calculate E, D, N, then encrypt and decrypt message M.
#create two empty bit vectors
p = []
q = []
#keep finding a prime number until they are both different primes
while compare(p, q) == 0:
p = genPrime(n, k)
q = genPrime(n, k)
#calculate N and generate a random E
N = mult(p, q)
E = randomBitVec(k)
#make E a new bitvector until gcd(E, (p-1)*(q*1)) = 1 (coprimes)
while bin2dec(gcd(E, mult(sub(p, dec2bin(1)), sub(q, dec2bin(1))))) != 1:
E = randomBitVec(k)
#find D through the modinverse function
D = modinv(E, mult(sub(p, dec2bin(1)), sub(q, dec2bin(1))))
print("--- %s Seconds to generate cryptographic values ---" %(time.time() - start_time))
print("N: %s" %bin2dec(N))
print("E: %s" %bin2dec(E))
print("D: %s" %bin2dec(D))
M = int(input("Please enter a message as an integer: "))
#calculate Cipher by raising binary message M to power E mod N
C = modexp(dec2bin(M), E, N)
print("Encrypted Message: %s" %bin2dec(C))
#decryption statement to get CPrime (the decrypted Message)
CPrime = modexp(C, D, N)
print("Decrypted Message: %s" %bin2dec(CPrime))
def Problem1(A, B, C, D):
#This problem calculates A^B - C^D
A1 = dec2bin(A)
C1 = dec2bin(C)
print (bin2dec(sub(exp(A1, B), exp(C1, D))))
def Problem2(A, B, C, D):
#this problem calculates A^B / C^D
A1 = dec2bin(A)
C1 = dec2bin(C)
(q, r) = (divide(exp(A1, B), exp(C1, D)))
print ("quotient:")
print (bin2dec(q))
print ("remainder:")
print (bin2dec(r))
def Problem3(A):
#this problem calculates sum of 1/1 + 1/2 +... + 1/A
(n, d) = problem3Help(dec2bin(1), dec2bin(A))
G = gcd(n, d)
print ("Numerator:")
(q, r) = divide(n, G)
print (bin2dec(q))
print ("Denominator:")
(q, r) = divide(d, G)
print (bin2dec(q))
def problem3Help(A, B):
#recursively goes through to calculate totals for numerator and denominator
if compare(B, dec2bin(1)) == 0:
return (B,A)
#incrementing A by 1 for the depth of the series
(n, d) = problem3Help(add(A, dec2bin(1)), sub(B, dec2bin(1)))
#multiply A by the bottom of other factor i.e. 1/2 + 1/3 == 3/6 + 2/6
return (add(mult(n, A), d), mult(d, A))
def primality(N):
#generate random integer 1 < X < N
X = randint(2, bin2dec(sub(N, dec2bin(1))))
#call modular exponentiation function to check x^(N - 1) = 1 mod N
r = modexp(dec2bin(X), sub(N, dec2bin(1)), N)
#1 is the good sign!
if bin2dec(r) == 1:
return True
else :
return False
def primality2(N, k):
#simply loop in range of confidence to check primality
for i in range (0, k - 1):
if not primality(N):
return False
return True
def primality3(N, k):
#check primality by common Divisors: i.e. remainder = 0
(q, r) = divide(N, dec2bin(2))
if bin2dec(r) == 0:
return False
(q, r) = divide(N, dec2bin(3))
if bin2dec(r) == 0:
return False
(q, r) = divide(N, dec2bin(5))
if bin2dec(r) == 0:
return False
(q, r) = divide(N, dec2bin(7))
if bin2dec(r) == 0:
return False
#Call primality2 to run loop.
if primality2(N, k):
return True
return False
def genPrime(N, k):
#keep generating random bitvectors until it is prime with k confidence
while True:
bitVec = randomBitVec(N)
if primality3(bitVec, k):
return bitVec
def randomBitVec(N):
#create empty bit vector
bitVec = []
#fill vector randomly with 1's and 0's
for i in range(0, N - 2):
bitVec.append(randint(0, 1))
#add 1 to front and back of the bit vector
bitVec.append(1)
bitVec.insert(0, 1)
return bitVec
def modinv(A, B):
#find the extendedEuclid
(x, y) = extendedEuclid(A, B)
#if the negative flag is set
if x[len(x) - 1] == -1:
#we delete the negative flag and increment x by B:
#example -117 with N of 352 -> 352 - 117 = 235 (the correct D)
del x[len(x) - 1]
x = sub(B, x)
return x
def extendedEuclid(A, B):
if zero(B):
#set D and return 1 and 0 binary
return (dec2bin(1), dec2bin(0))
#get the remainder and the q, ones used in recursion, the other in the return
(q, r) = divide(A, B)
(x, y) = extendedEuclid(B, r)
#just reset x and y if they're 0, sometimes get get trimmed to no items yikes!
if len(x) == 0:
x = [0]
if len(y) == 0:
y = [0]
if len(y) > 0:
#if y is negative
if y[len(y) - 1] == -1:
#create a new non negative version of y that we can math with
retY = y[0:len(y)-1]
if len(x) > 0:
#if x is negative
if x[len(x) - 1] == -1:
#make x a non negative, since X - QY and x and y < 0, -QY > 0, so QY - X suffices
del x[len(x) - 1]
return (y, sub(mult(q, retY), x))
#just y < 0 -> -QY > 0 -> X + QY
return (y, add(mult(q, retY), x))
if len(x) > 0:
#check if x is negative
if x[len(x) - 1] == -1:
#This means X < 0, so make non negative, X - QY = -(|X|+QY)
del x[len(x) - 1]
a = add(x, mult(q, y))
a.append(-1)
return (y, a)
#Simply use the standard case, neither are negative.
return (y, sub(x, mult(q, y)))
def modexp(x, y, N):
#this is really just the modular exponentiation algorithm
if bin2dec(y) == 0:
return [1]
(q, r) = divide(y, dec2bin(2))
z = modexp(x, q, N)
if even(y):
(q, r) = divide(exp(z, 2), N)
return r
else:
(q, r) = divide(mult(x, exp(z, 2)), N)
return r
def shift(A, n):
if n == 0:
return A
return [0 ] +shift(A, n-1)
def mult(X, Y):
# mutiplies two arrays of binary numbers
# with LSB stored in index 0
if zero(Y):
return [0]
Z = mult(X, div2(Y))
if even(Y):
return add(Z, Z)
else:
return add(X, add(Z, Z))
def Mult(X, Y):
X1 = dec2bin(X)
Y1 = dec2bin(Y)
return bin2dec(mult(X1, Y1))
def zero(X):
# test if the input binary number is 0
# we use both [] and [0, 0, ..., 0] to represent 0
if len(X) == 0:
return True
else:
for j in range(len(X)):
if X[j] == 1:
return False
return True
def div2(Y):
if len(Y) == 0:
return Y
else:
return Y[1:]
def even(X):
if ((len(X) == 0) or (X[0] == 0)):
return True
else:
return False
#################################################
# Addition Functions #
#################################################
def add(A, B):
A1 = A[:]
B1 = B[:]
n = len(A1)
m = len(B1)
if n < m:
for j in range(len(B1) - len(A1)):
A1.append(0) #This adds to the A List
else:
for j in range(len(A1) - len(B1)):
B1.append(0) #This adds to the B1 List
N = max(m, n)
C = []
carry = int(0)
for j in range(N):
C.append(exc_or(int(A1[j]), int(B1[j]), int(carry)))
carry = nextcarry(int(carry), int(A1[j]), int(B1[j]))
if carry == 1:
C.append(carry)
return C
def Add(A, B):
return bin2dec(add(dec2bin(A), dec2bin(B)))
#################################################
# Subtraction Functions #
#################################################
def sub(X,Y):
A1 = X[:]
B1 = Y[:]
n = len(A1)
m = len(B1)
negative = False
if zero(Y):
return X
if n < m:
for j in range(len(B1) - len(A1)):
A1.append(0) # This adds to the A List
else:
for j in range(len(A1) - len(B1)):
B1.append(0) # This adds to the B1 List
A1.append(0)
B1.append(0)
for j in range(len(B1)):
if B1[j] == 1:
B1[j] = 0
else:
B1[j] = 1
BC = add(dec2bin(1), B1)
S = add(A1, BC)
if len(S) > len(BC):
S.pop()
if S[len(S) - 1] == 1:
negative = True
for j in range(len(S)):
if S[j] == 1:
S[j] = 0
else:
S[j] = 1
S = add(dec2bin(1), S)
S.pop()
if negative:
S.append(-1)
return S
def Sub(A,B):
return bin2dec(sub(dec2bin(A), dec2bin(B)))
def exp(A,B):
A1 = A[:]
tot = A[:]
for j in range(B-1):
tot = mult(A1, tot)
return tot
def Exp(A,B):
return bin2dec(exp(dec2bin(A), B))
def exc_or(a, b, c):
return (a ^ (b ^ c))
def nextcarry(a, b, c):
if ((a & b) | (b & c) | (c & a)):
return 1
else:
return 0
def bin2dec(A):
if len(A) == 0:
return 0
multiple = 1
if A[len(A) - 1] == -1:
del A[len(A) - 1]
multiple = -1
val = A[0]
pow = 2
for j in range(1, len(A)):
val = val + pow * A[j]
pow = pow * 2
return val * multiple
def reverse(A):
B = A[::-1]
return B
def trim(A):
if len(A) == 0:
return A
A1 = reverse(A)
while ((not (len(A1) == 0)) and (A1[0] == 0)):
A1.pop(0)
return reverse(A1)
def compare(A, B):
# compares A and B outputs 1 if A > B, 2 if B > A and 0 if A == B
A1 = reverse(trim(A))
A2 = reverse(trim(B))
if len(A1) > len(A2):
return 1
elif len(A1) < len(A2):
return 2
else:
for j in range(len(A1)):
if A1[j] > A2[j]:
return 1
elif A1[j] < A2[j]:
return 2
return 0
def Compare(A, B):
return bin2dec(compare(dec2bin(A), dec2bin(B)))
def dec2bin(n):
if n == 0:
return []
m = n / 2
A = dec2bin(m)
fbit = n % 2
return [fbit] + A
def map(v):
if v == []:
return '0'
elif v == [0]:
return '0'
elif v == [1]:
return '1'
elif v == [0, 1]:
return '2'
elif v == [1, 1]:
return '3'
elif v == [0, 0, 1]:
return '4'
elif v == [1, 0, 1]:
return '5'
elif v == [0, 1, 1]:
return '6'
elif v == [1, 1, 1]:
return '7'
elif v == [0, 0, 0, 1]:
return '8'
elif v == [1, 0, 0, 1]:
return '9'
def bin2dec1(n):
if len(n) <= 3:
return map(n)
else:
temp1, temp2 = divide(n, [0, 1, 0, 1])
return bin2dec1(trim(temp1)) + map(trim(temp2))
def divide(X, Y):
# finds quotient and remainder when X is divided by Y
if zero(X):
return ([], [])
(q, r) = divide(div2(X), Y)
q = add(q, q)
r = add(r, r)
if not even(X):
r = add(r, [1])
if not compare(r, Y) == 2:
r = sub(r, Y)
q = add(q, [1])
return (q, r)
def Divide(X, Y):
(q, r) = divide(dec2bin(X), dec2bin(Y))
return (bin2dec(q), bin2dec(r))
def gcd(A,B):
if not zero(B):
q, r = divide(A, B)
return gcd(B, r)
else:
return A
def GCD(A,B):
return bin2dec(gcd(dec2bin(A), dec2bin(B)))
def main():
I = int(input("Select a function: \n1. A^B - C^D\n2. A^B / C^D\n3. 1/1 + ... + 1/n\n4. Primality Test\n5. Generate N bit prime\n6. Encrypt and Decrypt\nOr 7 to exit\n"))
while I != 7:
if I == 1:
print("inside selection")
print("Selection: A^B - C^D:")
A = int(input("Enter an A value:\n"))
B = int(input("Enter an B value:\n"))
C = int(input("Enter an C value:\n"))
D = int(input("Enter an D value:\n"))
Problem1(A, B, C, D)
if I == 2:
print("Selection: A^B / C^D:")
A = int(input("Enter an A value:\n"))
B = int(input("Enter an B value:\n"))
C = int(input("Enter an C value:\n"))
D = int(input("Enter an D value:\n"))
Problem2(A, B, C, D)
if I == 3:
print("Selection: 1/1 + ... + 1/n")
A = int(input("Enter an A value: \n"))
Problem3(A)
if I == 4:
print("Selection: Primality test")
A = int(input("Enter a possible prime N: "))
k = int(input("Enter a confidence k: "))
Problem1Proj2(A, k)
if I == 5:
print("Selection: Generate N bit prime")
A = int(input("Enter a bit length N: "))
k = int(input("Enter a confidence k: "))
Problem2Proj2(A, k)
if I == 6:
print("Selection: Encrypt and Decrypt")
A = int(input("Enter a bit length N: "))
k = int(input("Enter a confidence k: "))
Problem3Proj2(A, k)
I = int(input("Select another function: \n1. A^B - C^D\n2. A^B / C^D\n3. 1/1 + ... + 1/n\n4. Primality Test\n5. Generate N bit prime\n6. Encrypt and Decrypt\nOr 7 to exit\n"))
if __name__ == '__main__':
main()
|
994,542 | ed3c61b16d7b5341adb3085aae10fd5ca09f839d | # -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-04-26 13:06
# Updated in Django 2.0.5 on 2018-06-02 11:15
from __future__ import unicode_literals
import json
from django.db import migrations, models
PREVIOUS_NAME_MAX_LENGTH = 40
def fix_truncated_language_names(apps, schema_editor):
"""Some languages names were truncated in 0097_auto_20160519_0739 migration.
See https://github.com/swcarpentry/amy/issues/1165 for more info."""
Language = apps.get_model('workshops', 'Language')
# read list of languages
with open('amy/workshops/migrations/data/registry.json', encoding='utf-8') as f:
languages_json = json.load(f)
# 1. (most inner) filter out non-language (sublanguages, dialects etc.)
# 2. (middle) apply ' '.join(language['Description']) and therefore make it
# a list of descriptions
# 3. (top) filter out shorter language names
long_names = filter(
lambda x: len(x) >= PREVIOUS_NAME_MAX_LENGTH,
map(
lambda y: ' '.join(y['Description']),
filter(
lambda z: z['Type'] == 'language' and len(z['Subtag']) <= 2,
languages_json
)
)
)
for language_name in long_names:
truncated = language_name[:PREVIOUS_NAME_MAX_LENGTH]
try:
lang = Language.objects.get(name=truncated)
except Language.DoesNotExist:
pass
else:
lang.name = language_name
lang.save()
class Migration(migrations.Migration):
dependencies = [
('workshops', '0138_auto_20180524_1400'),
]
operations = [
migrations.AlterField(
model_name='language',
name='name',
field=models.CharField(help_text='Description of this language tag in English', max_length=100),
),
migrations.RunPython(fix_truncated_language_names),
]
|
994,543 | e0d1d0529a660aa73b76a1fa804ef1f2563a2e0d | #!/usr/bin/python
#copyright (c) 2010 Knowledge Quest Infotech Pvt. Ltd.
# Produced at Knowledge Quest Infotech Pvt. Ltd.
# Written by: Knowledge Quest Infotech Pvt. Ltd.
# zfs@kqinfotech.com
#
# This software is NOT free to use and you cannot redistribute it
# and/or modify it. You should be possesion of this software only with
# the explicit consent of the original copyright holder.
#
# This is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
###############################################################################
#
# __stc_assertion_start
#
# ID: grow_replicas_001_pos
#
# DESCRIPTION:
# A ZFS file system is limited by the amount of disk space
# available to the pool. Growing the pool by adding a disk
# increases the amount of space.
#
# STRATEGY:
# 1) Fill a ZFS filesystem mirror/raidz until ENOSPC by creating lots
# of files
# 2) Grow the mirror/raidz by adding a disk
# 3) Verify that more data can now be written to the file system
#
# TESTABILITY: explicit
#
# TEST_AUTOMATION_LEVEL: automated
#
# CODING_STATUS: COMPLETED (2005-10-04)
#
# __stc_assertion_end
#
################################################################################
import os
import sys
sys.path.append("../../../../lib")
from libtest import *
from common_variable import *
from grow_replicas_cfg import *
#verify_runnable "global"
log_assert("A zpool mirror/raidz may be increased in capacity by adding a disk.")
log_must([[ZFS,"set","compression=off",TESTPOOL+"/"+TESTFS]])
(out, ret) = cmdExecute([[FILE_WRITE,"-o","create","-f",TESTDIR+"/"+TESTFILE,"-b",BLOCK_SIZE,"-c",WRITE_COUNT,"-d","0"]])
ENOSPC=28
if ret != 28:
log_fail("file_write completed w/o ENOSPC, aborting!!!")
if not os.path.exists(TESTDIR+"/"+TESTFILE):
log_fail(TESTDIR+"/"+TESTFILE +" was not created..")
if not os.path.getsize(TESTDIR+"/"+TESTFILE) > 0 :
log_fail(TESTDIR+"/"+TESTFILE +" was empty..")
DISK2="/dev/"+sys.argv[1]
DISK3="/dev/"+sys.argv[2]
#
log_must([[ZPOOL,"add","-f",TESTPOOL,POOLTYPE,DISK2,DISK3]])
# log_must $ZPOOL add -f $TESTPOOL $POOLTYPE $DISK2"s"$SLICE \
# $DISK3"s"$SLICE
log_must([[FILE_WRITE,"-o","append","-f",TESTDIR+"/"+TESTFILE,"-b",BLOCK_SIZE,"-c",SMALL_WRITE_COUNT,"-d","0"]])
log_must([[ZFS,"inherit","compression",TESTPOOL+"/"+TESTFS]])
log_pass("TESTPOOL mirror/raidz successfully grown")
|
994,544 | 5d55ee09d0ec867dc75a098f29e8b134d23f59a1 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import xml.etree.cElementTree as ET
import csv
import cerberus
import codecs
import tool
"""
When create the csv file ,use this file.
The path is at result directory.
"""
file_name = "data/shanghai_china.osm"
NODES_PATH = "result/nodes.csv"
NODE_TAGS_PATH = "result/nodes_tags.csv"
WAYS_PATH = "result/ways.csv"
WAY_NODES_PATH = "result/ways_nodes.csv"
WAY_TAGS_PATH = "result/ways_tags.csv"
NODE_FIELDS = ['id', 'lat', 'lon', 'user',
'uid', 'version', 'changeset', 'timestamp']
NODE_TAGS_FIELDS = ['id', 'key', 'value', 'type']
WAY_FIELDS = ['id', 'user', 'uid', 'version', 'changeset', 'timestamp']
WAY_TAGS_FIELDS = ['id', 'key', 'value', 'type']
WAY_NODES_FIELDS = ['id', 'node_id', 'position']
class UnicodeDictWriter(csv.DictWriter, object):
def writerow(self, row):
super(UnicodeDictWriter, self).writerow({
k: (v.encode('utf-8') if isinstance(v, str) else v) for k, v in row.items()
})
def writerows(self, rows):
for row in rows:
self.writerow(row)
def get_element(file_name, tags=('node', 'way', 'relation')):
context = ET.iterparse(file_name, events=('start', 'end'))
_, root = next(context)
for event, elem in context:
if event == 'end' and elem.tag in tags:
yield elem
root.clear()
with codecs.open(NODES_PATH, 'w') as nodes_file, \
codecs.open(NODE_TAGS_PATH, 'w') as nodes_tags_file, \
codecs.open(WAYS_PATH, 'w') as ways_file, \
codecs.open(WAY_NODES_PATH, 'w') as way_nodes_file, \
codecs.open(WAY_TAGS_PATH, 'w') as way_tags_file:
nodes_writer = UnicodeDictWriter(nodes_file, NODE_FIELDS)
node_tags_writer = UnicodeDictWriter(nodes_tags_file, NODE_TAGS_FIELDS)
ways_writer = UnicodeDictWriter(ways_file, WAY_FIELDS)
way_nodes_writer = UnicodeDictWriter(way_nodes_file, WAY_NODES_FIELDS)
way_tags_writer = UnicodeDictWriter(way_tags_file, WAY_TAGS_FIELDS)
nodes_writer.writeheader()
node_tags_writer.writeheader()
ways_writer.writeheader()
way_nodes_writer.writeheader()
way_tags_writer.writeheader()
validator = cerberus.Validator()
for element in get_element(file_name, tags=("node", "way")):
el = tool.inital_csvs(element)
print(el)
if el:
tool.validate_element(el, validator)
if element.tag == "node":
nodes_writer.writerow(el['node'])
node_tags_writer.writerows(el['node_tags'])
elif element.tag == 'way':
ways_writer.writerow(el['way'])
way_nodes_writer.writerows(el['way_nodes'])
way_tags_writer.writerows(el['way_tags'])
|
994,545 | e5539edd7ea27f6caaa47aa517c6fdb7192772f6 | #!/usr/bin/env python
import json
import sys
def main():
input_filename = sys.argv[1]
output_filename = sys.argv[2]
output = {'trivia': []}
with open(input_filename, 'r') as input_file:
question = None
category = None
answer = None
regexp = None
for line in input_file:
if line[0] == '#':
continue
parsed = line.split(': ')
if len(parsed) < 2:
continue
var = parsed[0].strip()
val = ': '.join(parsed[1:]).strip().decode('utf8', 'ignore')
if var == 'Category':
category = val
elif var == 'Question':
question = val
elif var == 'Answer':
answer = val
elif var == 'Regexp':
regexp = val
if question and answer and category:
output['trivia'].append({
'question': question,
'answer': answer,
'category': category,
'regexp': regexp
})
question = None
answer = None
category = None
regexp = None
with open(output_filename, 'w') as output_file:
output_file.write(json.dumps(output, ensure_ascii=False, encoding='utf8', indent=2, separators=(',', ': ')))
if __name__ == '__main__':
main() |
994,546 | 0a5303f41d8ff628f79b14c8f35318a3cb2b959e | import string
input = []
with open('day6-input') as file:
lines = file.readlines()
answers = []
questions = 0
persons = 0
for line in lines:
if line != "\n":
persons += 1
answers.append(set(line.strip('\n')))
else:
input.append({"persons": persons, "answers": answers})
answers = []
persons = 0
print(input)
count = 0
for question in input:
# persons = question['persons']
# answer_length = len(question['answers'])
# if persons == 1:
# count += len(question['answers'][0])
# else:
count += len(question['answers'][0].intersection(*question['answers']))
print(count)
#########################################
# with open('test-input') as file:
# lines = file.readlines()
# answers = ""
# for line in lines:
# if line != "\n":
# answers += line.strip("\n")
# else:
# input.append(answers)
# answers = ""
# alphabet_string = set(string.ascii_lowercase)
# total = 0
# for i in input:
# print(set(i).intersection(alphabet_string))
# total += len(set(i).intersection(alphabet_string))
# print(input)
# print(total)
|
994,547 | 4d2116bf9a8cbc748b05f27e7645449f109a3bf7 | #!/usr/bin/env python3
import cmath
def main():
z = complex(input())
print(abs(z))
print(cmath.phase(z))
if __name__ == "__main__":
main()
|
994,548 | 87fb5976611b2eb235ac0a307bbe410f71210a62 | """
python 3.6 built-in functions
https://docs.python.org/3/library/functions.html#built-in-functions
"""
print("\n", 1)
print("abs(x)")
print(abs(-4)) # >>> 4
# absolute value
# argument: int and float
# return the
print("\n", 2)
print("all(iterable)")
print(all([0, 4])) # >>> False
print(all([])) # >>> True
# argument: iterable objects
# return True if all elements of the iterable are true
# if the iterable is empty, return True
print("\n", 3)
print("any(iterable)")
print(any([0, 4])) # >>> True
# argument: iterable objects
# return True if any elements of the iterable are true
# if the iterable is empty, return True
print("\n", 4)
print("ascii(object)")
print(ascii("ö")) # >>> xf6n
print("Pyth\xf6n") # >>>Pythön
# argument: an object
# return a string containing a printable representation of an object, but escape the non-ASCII characters in the string returned by repr() using \x, \u, \U escapes.
# For example, ö is changed to \xf6n, √ is changed to \u221a
print("\n", 5)
print("bin(x)")
print(bin(3)) # >>> 0b11
print(bin(-10)) # >>> -0b1010
print(format(10, "b")) # >>> 1010, this can remove the "0b"
# convert to binary number with a prefix "0b"
# argument: an integer number
# return the binary value
print("\n", 6)
print("class bool([x])")
print(bool(0)) # >>> False
print(bool("0")) # >>> True
print(bool(None)) # >>> False
print(bool([])) # >>> False
# argument can be any object
# return True or False
# None, False, 0, 0.0,空字符串"", 空元组(), 空列表[], 空字典{} 这些算作False
# 其他皆为True
print("\n", 7)
print("class bytearray([source[, encoding[, errors]]])")
print(bytearray([0, 100, 255])) # >>> bytearray(b"\x00d\xff")
print(bytearray(12)) # >>> bytearray(b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")
print(bytes([0, 100, 255])) # >>> b"\x00d\xff"
print(bytes(12)) # >>> b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
print("\n", 8)
print("class bytes([source[, encoding[, errors]]])")
# 返回一个新的字节对象, 是一个在 0<= x < 256之间的不可变的整数序列.
# bytes 是 bytearray 的不可变版本 – 它具有同样的非改变性的方法和同样的索引和切片操作
# 因此, 构造函数参数的解释与bytearray()相同.
print("\n", 9)
print("callable(object)")
print(callable(1)) # >>> False
print(callable(abs)) # >>> True, function is callable
print(callable([1, 2])) # >>> True, function is callable
print(callable(zip())) # >>> False, if with "()"
# argument: any object
# return True if it is callable, otherwise False
print("\n", 10)
print("chr(i)")
print(chr(97)) # >>> a, refer the ascii table
print(ord("a")) # >>> 97, the inverse function of chr
print(chr(127)) # >>>
# The valid range for the argument is from 0 through 1,114,111
# ascii table is from 0 to 127
# return the character accordingly
print("\n", 11)
print("classmethod(function)")
print(classmethod(abs))
# 将函数包装成类方法
# oop环境
print("\n", 12)
print("compile(source, filename, mode, flags=0, dont_inherit=False, optimize=-1)")
# compile(source, filename, mode, flags=0, dont_inherit=False, optimize=-1)
# 暂时不理解
print("\n", 13)
print("class complex([real[, imag]])")
print(complex("1+2j"))
# 返回值形式为real + imag * 1j的复数, 或将字符串或数字转换为复数
# does not allow white space in the string ("1 + 2j") will raise error
# 暂时不理解
print("\n", 14)
print("delattr(object, name)")
# delattr(object, "s")
# 这个函数和setattr()有关. 参数是一个对象和一个字符串.
# 字符串必须是对象的某个属性的名字. 只要对象允许, 这个函数删除该名字对应的属性.
# delattr(x, "foobar")等同于del x.foobar.
# oop环境
print("\n", 15)
print("class dict(**kwarg)")
print("class dict(mapping, **kwarg)")
print("class dict(iterable, **kwarg)")
print(dict(zip([1, 2, 3], ["a", "b", "c"])))
# create a dictionary
print("\n", 16)
print("dir([object])")
print(dir())
# 如果没有参数, 返回当前本地作用域内的名字列表.
# 如果有参数, 尝试返回参数所指明对象的合法属性和方法的列表.
print("\n", 17)
print("divmod(a, b)")
print(divmod(7, 2)) # >>> (3, 1) return a tuple
for i in divmod(7, 2):
print(i)
# Take two (non complex) numbers as arguments
# return a pair of numbers consisting of their quotient and remainder
print("\n", 18)
print("enumerate(iterable, start=0)")
l = ["apple", "banana", "pear", "mango"]
print(list(enumerate(l, start=1)))
# >>> [(1, "apple"), (2, "banana"), (3, "pear"), (4, "mango")]
print(dict(enumerate(l, start=1)))
# >>> {1: "apple", 2: "banana", 3: "pear", 4: "mango"}
# argument: iterable object, default start with 0.
# return a paired value, but needs a container (list, dict, etc)
print("\n", 19)
print("eval(expression, globals=None, locals=None)")
# eval(expression, globals=None, locals=None)
print("see in ZSimpleLearnings/py_eval_exec.py")
print("AVOID USING!!!!")
print("\n", 20)
print("exec(object[, globals[, locals]])")
# exec(object[, globals[, locals]])
print("see in ZSimpleLearnings/py_eval_exec.py")
print("AVOID USING!!!!")
print("\n", 21)
print("filter(function, iterable)")
print("see in ZSimpleLearnings/py_high_order_functions.py")
print("\n", 22)
print("class float([x])")
print(float(25)) # >>> 25.0
print(float("-25")) # >>> -25.0
# convert an int or number in string to a float number
# Also take strings below:
# sign ::= "+" | "-"
# infinity ::= "Infinity" | "inf"
# nan ::= "nan"
# numeric_value ::= floatnumber | infinity | nan
# numeric_string ::= [sign] numeric_value
float("inf") # 正无穷大
float("-inf") # 负无穷大(无穷小)
float("nan") # not a value
print("\n", 23)
print("format(value[, format_spec])")
# "d" for integer
print(format(123, "d")) # must be integer
# "f" for float arguments
print(format(123, "f")) # 总是六位小数
# "b" for binary format
print(format(12, "b"))
# d, f and b are type
# integer
print(format(1234, "*>+7,d"))
# float number
print(format(123.4567, "^-09.3f")) # 暂时不理解
# 四舍五入与round类似
print(format(1.5, "0.0f")) # >>> 2
print(format(2.5, "0.0f")) # >>> 2
print(format(1.55, "0.1f")) # >>> 1.6
print(format(2.55, "0.1f")) # >>> 2.5
print("\n", 24)
print("class frozenset([iterable])")
# print(frozenset([1, 2, 3]))
print("see in ZStandardLibrary/learn_set_operation.py")
print("\n", 25)
print("getattr(object, name[, default])")
print("see in ZSimpleLearnings/py_getattr.py")
print("\n", 26)
print("globals()")
print(globals())
# returns the dict of the current module
print("\n", 27)
print("hasattr(object, name)")
# 参数是一个对象和一个字符串. 如果字符串是对象的一个属性, 则返回True, 否则返回False.
# 它的实现是通过调用getattr(object, name)并查看它是否引发一个AttributeError
# 常用于运行函数前做一个boolean判断,如果True即运行函数,False则不运行
lst = [1,2,3]
print(hasattr(lst, "append")) # >>> True
print(hasattr(lst, "insert")) # >>> True
strin = "abc"
print(hasattr(strin, "isalpha")) # >>> True
print(hasattr(strin, "ascii_lowercase")) # >>> False
import string
print(hasattr(string, "ascii_lowercase")) # >>> True
# 更多用于oop环境
print("\nOOP test")
class Cls():
attr1 = "attribute 1"
def __init__(self, attr2):
self.attr2 = attr2
def meth1(self):
attr3 = "attribute 3"
return "method 1"
def meth2(self, num):
return num**2
obj = Cls("attribute 2")
print(obj.attr1) # >>> attr1
print(obj.attr2) # >>> at3
# print(obj.attr3) # AttributeError
# print(obj.meth1.attr3) # still AttributeError, a method is not an object therefore has no attributes
print(obj.meth1()) # >>> method 1
print(obj.meth2(6)) # >>> 36
print(type(obj.attr1)) # >>> <class "str">
print(type(obj.attr2)) # >>> <class "str">
print(type(obj.meth1)) # >>> <class "method"> # maybe this can tell whether it is a method or not?
print(type(obj.meth2)) # >>> <class "method">
print(hasattr(obj, "attr1")) # >>> True
print(hasattr(obj, "attr2")) # >>> True
print(hasattr(obj, "attr3")) # >>> False
print(hasattr(obj.meth1, "attr3")) # >>> False
print(hasattr(obj, "meth1")) # >>> True # hasattr() does not differenciate attributes and methods
print(hasattr(obj, "meth2")) # >>> True
print(set(dir(obj)) - set(dir(Cls))) # >>> {"attr2"} # only created in __init__() when an instance is made.
# Therefore Cls has no attribute as attr2 but obj has.
# for more information, check my question on STOF
# https://stackoverflow.com/q/48070833/8435726
# This problem can be solved by using callable()
def hasmethod(obj, method_name):
return hasattr(obj, method_name) and callable(getattr(obj, method_name))
def hasattribute(obj, method_name):
return hasattr(obj, method_name) and not callable(getattr(obj, method_name))
print(hasmethod(obj, "meth1")) # >>> True
print(hasmethod(obj, "attr1")) # >>> False
print(hasattribute(obj, "attr1")) # >>> True
print("\n", 28)
print("hash(object)")
# Hash values are just integers which are used to ~
# compare dictionary keys during a dictionary lookup quickly.
print(hash(181))
print(hash(181.23))
print(hash("Python"))
vowels = ("a", "e", "i", "o", "u")
print(hash(vowels))
print("\n", 29)
print("help([object])")
# help() returns the doc str
help(abs)
help(list)
# It's recommenced to try it in your interpreter when you need help to ~
# write Python program and use Python modules
print("\n", 30)
print("hex(x)")
# like bin, hex() returns an integer to hexadecimal number
# start with "0x"
print(hex(123456))
print(format(123456, "x"))
# also use format "x" to skip the "0x" prefix, use "X" to upper the letters
print("\n", 31)
print("id(object)")
# id is very similar to hash, an identity of an object
print(id(5))
print("\n", 32)
print("input([prompt])")
# input()
# to have user input with a hint as the argument
print("\n", 33)
print("class int(x)")
print("class int(x, base=10)")
# don't forget that base can be changed from 2-36.
# base为0意味着完全解释为代码字面值
a = "142AB34"
print(int(a, base=16)) # >>> 21146420
b = "10101"
print(int(b, base=2)) # >>> 21
print("\n", 34)
print("isinstance(object, classinfo)")
# 如果object是clsaainfo的一个实例(或者是classinfo的直接, 间接或虚拟子类的实例), 那么则返回true.
# 如果对象不是给定类型的对象, 则函数始终返回false
# 如果classinfo是对象类型的元组(或递归地, 其他这样的元组), 如果对象是任何类型的实例, 则返回true. 如果classinfo不是类型或类型组成的元祖和此类元组, 则会引发TypeError异常.
# oop环境
print(isinstance(123, int)) # >>> True
print(isinstance("joker", (int, list, str, tuple))) # >>> True # 只要是符合元祖中任一个都返回True
print("\n", 35)
print("issubclass(class, classinfo)")
# 如果 class 是classinfo的子类(直接, 间接或 虚拟) 则返回 true .
# 一个类被认为是它自己的子类. classinfo可以是类对象的元组, 这时classinfo中的每个类对象都会被检查.
print("\n", 36)
print("iter(object[, sentinel])")
# 返回一个迭代器对象
print("\n", 37)
print("len(s)")
# return length of a iterable
print("\n", 38)
print("class list([iterable])")
# turn iterable into a list
print("\n", 39)
print("locals()")
# 暂时不理解
print("\n", 40)
print("map(function, iterable, ...)")
print("see in ZSimpleLearnings/py_high_order_functions.py")
print("\n", 41, 43)
print("max(iterable, *[, key, default])")
print("max(arg1, arg2, *args[, key])")
print("min(iterable, *[, key, default])")
print("min(arg1, arg2, *args[, key])")
print("see in ZSimpleLearnings/py_max_min.py")
print("\n", 42)
print("memoryview(obj)")
# Return the object's memory address?
# memoryview: a bytes-like object is required, not "str"
print(memoryview(b"abcde")) # >>> <memory at 0x7f3271528048>
print(memoryview("abcde".encode("utf-8"))) # >>> <memory at 0x7f3271528048>
print("\n", 44)
print("next(iterator[, default])")
# consume the next item in an iterator
print("see in ZStandardLibrary/learn_itertools.py")
print("\n", 45)
print("class object()")
# oop 环境
print("\n", 46)
print("oct(x)")
# 将整数转换为八进制字符串. 结果是一个合法的Python表达式.
print(oct(120)) # >>> 0o170 "o" means 八进制
print(oct(1999)) # >>> 0o3717
print("\n", 47)
print("open(file, mode="r", buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None)")
print("see in ZCodeSnippets/write_and_write_back.py")
print("\n", 48)
print("ord(c)")
# 给定一个表示一个Unicode字符的字符串, 返回一个表示该字符的Unicode代码点的整数.
print(ord("a")) # >>> 97
print(ord(" ")) # >>> 32
print(ord("#")) # >>> 35
# refer ascii table (0-127)
# but also support more than 0-127
print(chr(1223)) # >>> Ӈ
print(ord("Ӈ")) # >>> 1223
# The valid range for the argument is from 0 through 1,114,111
print("\n", 49)
print("pow(x, y[, z])")
# return x^y
# 如果提供z参数, 返回x^y再除以z的余数
print(pow(2, 3, 7)) # >>> 8 (2^3=8)
print(pow(2, 3, 7)) # >>> 1 (8//7=1, 余1)
print("\n", 50)
print("print(*objects, sep=" ", end='\n', file=sys.stdout, flush=False)")
print("hello world")
print("\n", 51)
print("class property(fget=None, fset=None, fdel=None, doc=None)")
# oop 环境
print("\n", 52)
print("range(stop)")
print("range(start, stop[, step])")
print("well understood")
print("\n", 53)
print("repr(object)")
# 返回某个对象可打印形式的字符串.
# 主要作用是传送出一个可以给eval()运行的字符串
a = [1,2,3]
print(repr(a)) # >>> [1, 2, 3]
print(a)
b = range(5)
print(repr(b)) # >>> range(0, 5)
print(b)
c = "abcd"
print(repr(c)) # >>> "abcd" difference is that it will show ""
print(c) # >>> abcd
import datetime
today = datetime.datetime.now()
# Prints readable format for date-time object
print(today) # >>> 2017-12-21 20:12:24.180042
# prints the official format of date-time object
print(repr(today)) # >>> datetime.datetime(2017, 12, 21, 20, 12, 24, 180042)
print("\n", 54)
print("reversed(seq)")
# 返回一个反向iterator
a = [1,2,3]
print(list(reversed(a)))
print("see in ZSimpleLearnings/py_sort_sorted_reverse.py")
print("\n", 55)
print("round(number[, ndigits])")
# 当一个值刚好在两个边界的中间的时候, round 函数返回离它最近的偶数.
print(round(1.5, 0)) # >>> 2.0
print(round(2.5, 0)) # >>> 2.0
# 也就是说, 对1.5或者2.5的舍入运算都会得到2.
print("\n", 56)
print("class set([iterable])")
# create a set object
print("\n", 57)
print("setattr(object, name, value)")
# 它与getattr()相对应. 参数是一个对象, 一个字符串和一个任意值.
# 字符串可以是一个已存在属性的名字也可以是一个新属性的名字.
# 该函数将值赋值给属性, 只要对象允许.
# OOP环境
print("\n", 58)
print("class slice(stop)")
print("class slice(start, stop[, step])")
# 返回一个slice对象, 表示由索引range(start, stop, step)指出的集合. start和step参数默认为None
# slice a list (切片)
a = [1,2,3,4]
b = a[0:2]
print(b) # >>> [1, 2]
# 但是同样的a[0:2] = [8,9] 则不是在用切片, 而是批量修改a[n for n in range(0, 2)]
print("\n", 59)
print("sorted(iterable[, key][, reverse])")
# sort from small to large (num, alpha)
print("see in ZSimpleLearnings/py_sort_sorted_reverse.py")
print("\n", 60)
print("@ staticmethod(function)")
# 返回function的一个静态方法.
print("\n", 61)
print("class str(object="")")
print("class str(object=b'', encoding='utf-8', errors='strict'")
# turn object into a string version
print("\n", 62)
print("sum(iterable[, start])")
# return the sum of an iterable
# 对于某些使用情况, 有很好的替代sum()的方法.
# 连接字符串序列的首选快速方法是调用"".join(sequence).
# Learn from STOF:
# https://stackoverflow.com/q/52007283/8435726
# Actually sum(a, b) is equal to
# for i in a:
# b += i
# return b
# So that:
a = [1,2,3,4]
print(sum(a)) # >>> 10
print(sum(a, 2)) # >>>12 # equals to 2 + sum(a)
# But default of start is 0, which in an int.
# So if you want to sum up other types of objects, you must change start
# Example: use sum to merge list
a, b = [1], [2]
# print(sum(a, b))
# >>> TypeError: can only concatenate list (not "int") to list
# print(sum([a, b]))
# # >>> TypeError: unsupported operand type(s) for +: "int" and 'list
print(sum([a, b], []))
# >>> [1, 2]
# equals to [] + [1] + [2]
print("\n", 63)
print("super([type[, object-or-type]])")
# 返回一个代理对象, 它委托方法给父类或者type的同级类.
# 这对于访问类中被覆盖的继承方法很有用.
# oop环境
print("\n", 64)
print("tuple([iterable])")
# create a tuple from an iterable
print("\n", 65)
print("class type(object)")
print("class type(name, bases, dict)")
# return the type of the object
print("\n", 66)
print("vars([object])")
print("see in ZSimpleLearnings/py_vars.py")
print("\n", 67)
print("zip(*iterables)")
# very use full to link a group of arrays
print("see in ZSimpleLearnings/py_zip.py")
print("\n", 68)
print("__import__(name, globals=None, locals=None, fromlist=(), level=0)")
# 用于import任何文件名
# surpose a file named 05_if1_guess_number.py, we want to import this file
# mymodule = __import__("05_if1_guess_number")
|
994,549 | 8abb56393f80fa6578eaf5695ceee4632425cdbb | class Interval:
def __init__(self,s=0,e=0):
self.start=s
self.end=e
class Solution:
def merge(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[Interval]
"""
intervals.sort(key=lambda x:x.start)
result=[]
for interval in intervals:
if len(result)==0 or (len(result)>0 and result[len(result)-1].end<interval.start):
result.append(interval)
else:
result[len(result)-1].end = max(result[len(result)-1].end,interval.end)
return result
if __name__=='__main__':
n=int(input())
intervals=[]
for i in range(n):
tem=input()
interval=Interval(tem[0],tem[1])
intervals.append(interval)
solution=Solution()
result=solution.merge(intervals)
for i in result:
print("%d, %d" % (i.start, i.end))
|
994,550 | 97fe61f190289d3a51f5435d208d1fba673e6b83 | x=input("Enter text to encrypt: ")
key=int(input("Enter Caesar's key (0-25): "))
def rotate(string1,k):
M=[]
A="abcdefghijklmnopqrstuvwxyz"
for i in range (len(A)): #cipher solution
M.append(A[(i+k)%26])
#print (M)
N=""
for g in range (len(string1)):
for j in range (len(A)):
if string1[g]==A[j]:
N+=M[j]
return (N)
def unrotate(string1,k):
return (rotate(string1,(-1)*k))
print (rotate(x,key))
print (unrotate(x,key))
|
994,551 | 648c15a119dd91869e1b2f8804473802c8278e1c | # for loops
for n in range(1,20,2): # range(5) start with 0 until and including 4 !!
print(n)
# do more stuff in here
print("Whew all done",n)
print("\n")
print("Printing my food")
food = "kartupelis"
for c in food:
print(c, "::", end="") # are end="" es atsledzu newline
my_list = [1,2,3,6,7,2,19,645,5453,100, -50]
total = 0
for num in my_list:
print(num)
total += num
print(total)
print(sum(my_list))
record = None
for num in my_list:
if record == None:
record = num
if num > record:
record = num
print("The record is held by", record)
print(max(*my_list)) # we can unroll the list and use max to find max
for n in (1,6,7,8,-5,10): # we loop through a tuple
print(n)
|
994,552 | 410ad5c1c480a12b461d70a1b92671163355f73b | import unittest
import arrays
import conversion
import floats
import ifelse
import integers
import strings
ArrayTest = arrays.ArrayTest
ConversionTest = conversion.ConversionTest
FloatsTest = floats.FloatsTest
IfElseTest = ifelse.IfElseTest
IntegersTest = integers.IntegersTest
StringsTest = strings.StringsTest
class PavaTest(unittest.TestCase):
def test_general(self):
pass
if __name__ == "__main__":
unittest.main()
|
994,553 | 8e85b62c8b6ad2ac0bac23093ab3fe15c2ad9711 | import torch
import torch.nn as nn
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 1 input image channel (grayscale), 32 output channels/feature maps
# 4x4 square convolution kernel
## output size = (W-F)/S +1 = (224-4)/1 +1 = 221
# the output Tensor for one image, will have the dimensions: (32, 221, 221)
# after one pool layer, this becomes (32, 110, 110)
self.conv1 = nn.Conv2d(1, 32, 4)
# maxpool layer
# pool with kernel_size=2, stride=2
self.pool = nn.MaxPool2d(2, 2)
# second conv layer: 32 inputs, 64 outputs, 3x3 conv
## output size = (W-F)/S +1 = (110-3)/1 +1 = 108
# the output tensor will have dimensions: (64, 108, 108)
# after one pool layer, this becomes (64, 54, 54)
self.conv2 = nn.Conv2d(32, 64, 3)
# third conv layer: 64 inputs, 128 outputs, 2x2 conv
## output size = (W-F)/S +1 = (54-2)/1 +1 = 53
# the output tensor will have dimensions: (128, 53, 53)
# after one pool layer, this becomes (128, 26, 26)
self.conv3 = nn.Conv2d(64, 128, 2)
# fourth conv layer: 128 inputs, 256 outputs, 1x1 conv
## output size = (W-F)/S +1 = (26-1)/1 +1 = 26
# the output tensor will have dimensions: (256, 26, 26)
# after one pool layer, this becomes (256, 13, 13)
self.conv4 = nn.Conv2d(128, 256, 1)
# fifth conv layer: 256 inputs, 512 outputs, 1x1 conv
## output size = (W-F)/S +1 = (13-1)/1 +1 = 13
# the output tensor will have dimensions: (512, 13, 13)
# after one pool layer, this becomes (512, 6, 6)
self.conv5 = nn.Conv2d(256, 512, 1)
# 512 outputs * the 6*6 filtered/pooled map size
self.fc1 = nn.Linear(512*6*6, 1028)
self.fc1_drop = nn.Dropout(p=0.2)
self.fc2 = nn.Linear(1028, 256)
self.fc2_drop = nn.Dropout(p=0.3)
# finally, create 136 output channels (for the 136 keypoints)
self.fc3 = nn.Linear(256, 136)
def forward(self, x):
# five conv/relu + pool layers
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
x = self.pool(F.relu(self.conv4(x)))
x = self.pool(F.relu(self.conv5(x)))
# prep for linear layer
# this line of code is the equivalent of Flatten in Keras
x = x.view(x.size(0), -1)
# three linear layers with dropout in between
x = F.relu(self.fc1(x))
x = self.fc1_drop(x)
x = F.relu(self.fc2(x))
x = self.fc2_drop(x)
x = self.fc3(x)
# final output
return x
|
994,554 | 92e83a0658f296f830679db2641c499f06ffa911 | from argparse import ArgumentParser
from glob import glob
import os
import numpy as np
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
import pandas as pd
from tqdm import tqdm
from PIL import Image
import matplotlib.pyplot as plt
def load_image(filename):
try:
with open(filename, "rb") as f:
image = Image.open(f)
return image.convert("RGB")
except UserWarning as e:
print(filename)
input("Something wrong happens while loading image: {} {}".format(filename, str(e)))
# Example Model definition
class Model(object):
def __init__(self, dirname):
import animecv
self.encoder = animecv.general.create_OML_ImageFolder_Encoder(dirname)
self.encoder.to("cuda")
# img: PIL image
def encode(self, img):
vecs = self.encoder.encode([img]).detach().cpu().numpy()
return vecs[0]
if __name__=="__main__":
parser = ArgumentParser()
parser.add_argument("--test-pairs", help="CSV file which lists test image pairs.")
parser.add_argument("--test-dataset-dir", help="Directory of test images.")
parser.add_argument("--ignore-list", default=None, help="List of images which should be ignored during pair sampling.")
parser.add_argument("--out-fn", default="adversarial.csv")
parser.add_argument("--n-negative", type=int, default=3000)
args = parser.parse_args()
if not os.path.exists(args.out_fn):
if args.ignore_list is not None:
df = pd.read_csv(args.ignore_list, header=None)
ignore_list = set(df.values.flatten().tolist())
else:
ignore_list = set()
# Generate adversarial negative pairs.
model = Model("0206_resnet152")
images = glob(os.path.join(args.test_dataset_dir, "**"), recursive=True)
images = [fn for fn in images if os.path.isfile(fn)]
labels = [fn.split(os.path.sep)[-2] for fn in images]
vecs = []
for fn in tqdm(images):
img = load_image(fn)
vecs.append(model.encode(img).reshape((1,-1)))
vecs = np.concatenate(vecs, axis=0)
scores = np.sum(vecs[:,np.newaxis,:] * vecs[np.newaxis,:,:], axis=2)
negative_pairs = []
n_img = scores.shape[0]
sorted_idx = np.argsort(-scores, axis=None).tolist()
strip_len = len(args.test_dataset_dir + os.path.sep)
while len(negative_pairs) < args.n_negative:
idx = sorted_idx.pop(0)
i,j = idx // n_img, idx % n_img
if i<=j:
continue
if labels[i] == labels[j]:
continue
if os.path.basename(images[i]) in ignore_list:
continue
if os.path.basename(images[j]) in ignore_list:
continue
negative_pairs.append((images[i][strip_len:], images[j][strip_len:], 0, -1, 0))
# Reuse positive pairs.
positive_pairs = []
df = pd.read_csv(args.test_pairs)
for pathA, pathB in df[df["label"]==1][["pathA", "pathB"]].values:
#print(pathA, pathB)
positive_pairs.append((pathA, pathB, 1, -1, 0))
pairs = shuffle(positive_pairs + negative_pairs)
df = pd.DataFrame(pairs, columns=["pathA", "pathB", "label", "human_prediction", "invalid"])
df.to_csv(args.out_fn, index=False)
else:
print("Reload")
df = pd.read_csv(args.out_fn)
for i_row in tqdm(list(range(df.values.shape[0]))):
pathA, pathB, label, pred, invalid = df.loc[i_row].values
#print(pathA, pathB)
if pred >= 0:
continue
else:
im1 = np.array(Image.open(os.path.join(args.test_dataset_dir, pathA)))
im2 = np.array(Image.open(os.path.join(args.test_dataset_dir, pathB)))
ax = plt.subplot(1,2,1)
ax.imshow(im1)
ax = plt.subplot(1,2,2)
ax.imshow(im2)
plt.draw()
plt.pause(0.001)
cmd = input("correct?[y/n]: ")
if cmd=="y":
pred = 1
elif cmd=="n":
pred = 0
else:
pred = 0
df.loc[i_row, "invalid"] = 1
df.loc[i_row, "human_prediction"] = pred
df.to_csv(args.out_fn, index=False)
plt.close() |
994,555 | 060daac13ed163d889911f72faa1a574491baa97 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# the above line is to avoid 'SyntaxError: Non-UTF-8 code starting with' error
'''
Created on
Course work:
@author: raja
Source:
'''
# Import necessary modules
def get_country_details(name):
country_details = {
"name" : "N/A",
"capital_city" : "N/A",
"population_in_millions" : "N/A"
}
if(name == 'canada'):
country_details = {
"name" : "Canada",
"capital_city" : "Ottawa",
"population_in_millions" : 36.71
}
elif (name == 'india'):
country_details = {
"name" : "India",
"capital_city" : "New Delhi",
"population_in_millions" : 1339
}
return country_details
if __name__ == '__main__':
pass
|
994,556 | b2d4588a72bf7a91ce965a8a8415bcb735467d22 | #!/usr/bin/env python
import time
import pigpio
pi = pigpio.pi()
pin=20
pi.set_mode(pin,pigpio.OUTPUT)
while True:
pi.write(pin, 1)
print('high')
time.sleep(.01)
pi.write(pin,0)
print('low')
time.sleep(.01)
|
994,557 | 38eb795432e44f6cc99a3677a1c23cd9f749bb12 | #内联回调函数
#问题
#当你编写使用回调函数的代码的时候,担心很多小函数的扩张可能会弄乱程序控制流。你希望找到某个方法来让代码看上去更像是一个普通的执行序列
#解决
#通过使用生成器和协程可以使得回调函数内联在某个函数中
#为了演示说明,假设你有如下所示的一个执行某种计算任务然后调用一个回调函数的函数
from queue import Queue
from functools import wraps
def apply_async(func,args,*,callback):
#compute the result
result =func(*args)
#invoke the callback with the result
callback(result)
#接下来让我们看一下下面代码,他包含了一个Async类和一个inlined_async装饰器
class Async:
def __init__(self,func,args):
self.func=func
self.args=args
def inlined_async(func):
@wraps(func)
def wrapper(*args):
f=func(*args)
result_queue=Queue()
result_queue.put(None)
while True:
result=result_queue.get()
try:
a=f.send(result)
apply_async(a.func,a.args,callback=result_queue.put)
except StopIteration:
break
return wrapper
#这两个代码片段允许你使用yield语句内联回调步骤。比如
def add(x,y):
return x+y
@inlined_async
def test():
r=yield Async(add,(2,3))
print(r)
r=yield Async(add,('hello','world'))
print(r)
for n in range(10):
r=yield Async(add,(n,n))
print(r)
print('goodbye')
test()
#你会发现,除了那个特别的装饰器和yield语句外,其他地方并没有出现任何的回调函数(其实是在后台定义的)
#没懂... |
994,558 | c9b541b8e77aff46dd61023323ee9ef643f3ad76 | # coding=utf-8
"""
Logging package.
"""
import logging
LOG = logging.getLogger("trader")
LOG.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
LOG.addHandler(handler)
|
994,559 | 110d5ca2f70dd0dd58ccd5c529200599195dc646 |
import pygame
from pygame import *
import random as ra
pygame.init()
white = (255, 255, 255)
black = (0, 0, 0)
size = width, height = 800, 800
screen = pygame.display.set_mode(size)
points = [[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]
x = 0
y = 0
flag = 1
lst = []
lst_mine = []
lst_android = []
count = 0
text = pygame.font.SysFont('tic', 50)
Play_score = 0
AI_score = 0
def draw_restart():
steps = [(400, 450), (400, 500), (550, 500), (550, 450)]
pygame.draw.polygon(screen, black, steps, 1)
text_x = text.render("AGAIN?", 1, black)
screen.blit(text_x, (410, 460))
def draw_img(player, x, y):
if player == 1:
pygame.draw.circle(screen, black, (x, y), 40, 1)
# ����
else:
pygame.draw.rect(screen, black, ((x - 20, y - 20), (50, 50)), 1)
def draw_score():
text_1 = pygame.font.SysFont('����', 30)
text_player_score = text_1.render('PLAYER ' + str(Play_score), 1, black)
text_AI_score = text_1.render('AI ' + str(AI_score), 1, black)
screen.blit(text_player_score, (410, 10))
screen.blit(text_AI_score, (410, 40))
def draw_back():
screen.fill(white)
steps = [(100, 100), (100, 400), (400, 400), (400, 100)]
pygame.draw.polygon(screen, black, steps, 1)
pygame.draw.lines(screen, black, False, [(100, 200), (400, 200)])
pygame.draw.lines(screen, black, False, [(100, 300), (400, 300)])
pygame.draw.lines(screen, black, False, [(200, 100), (200, 400)])
pygame.draw.lines(screen, black, False, [(300, 100), (300, 400)])
def check_win(tab):
return ((points[0][0] == tab and points[0][1] == tab and points[0][2] == tab) or
(points[1][0] == tab and points[1][1] == tab and points[1][2] == tab) or
(points[2][0] == tab and points[2][1] == tab and points[2][2] == tab) or
(points[0][0] == tab and points[1][0] == tab and points[2][0] == tab) or
(points[0][1] == tab and points[1][1] == tab and points[2][1] == tab) or
(points[0][2] == tab and points[1][2] == tab and points[2][2] == tab) or
(points[0][0] == tab and points[1][1] == tab and points[2][2] == tab) or
(points[0][2] == tab and points[1][1] == tab and points[2][0] == tab)
)
def winner():
# AI
if check_win(100):
return 100
elif check_win(1):
return -100
def is_full():
fl = 0
for i in range(3):
for j in range(3):
if points[i][j] != 0:
fl += 1
return fl
def AI_move():
for i in range(3):
for j in range(3):
if points[i][j] == 0:
points[i][j] = 100
if check_win(100):
return (i, j)
else:
points[i][j] = 0
for i in range(3):
for j in range(3):
if points[i][j] == 0:
points[i][j] = 1
if check_win(1):
return (i, j)
else:
points[i][j] = 0
if points[1][1] == 0:
return (1, 1)
temp = []
for i in (0, 2):
for j in (0, 2):
if points[i][j] == 0:
temp.append((i, j))
if len(temp) != 0:
return ra.choice(temp)
# ռ�ı�
for i in ((0, 1), (1, 0), (1, 2), (2, 1)):
if points[i[0]][i[1]] == 0:
temp.append((i[0], i[1]))
if len(temp) != 0:
return ra.choice(temp)
def draw_all():
draw_back()
draw_score()
for i in lst:
draw_img(i[0], i[1], i[2])
if flag == 100:
text_conent = text.render("AI win", 1, black)
screen.blit(text_conent, (220, 50))
elif flag == -100:
text_conent = text.render("You win", 1, black)
screen.blit(text_conent, (220, 50))
elif flag == 123:
text_conent = text.render("TIE", 1, black)
screen.blit(text_conent, (220, 50))
if flag == 123 or flag == 100 or flag == -100:
draw_restart()
def play():
global flag, AI_score, Play_score
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit()
if event.type == MOUSEBUTTONDOWN:
x, y = pygame.mouse.get_pos()
if 400 < x < 550 and 450 < y < 500:
lst.clear()
for i in range(3):
for j in range(3):
points[i][j] = 0
flag = 1
if 100 <= x <= 400 and 100 <= y <= 400:
x = (x - 100) // 100
y = (y - 100) // 100
l_x = x * 100 + 150
l_y = y * 100 + 150
# player
if flag == 1:
if is_full() != 9:
if points[x][y] == 0:
points[x][y] = 1
lst.append((1, l_x, l_y))
if winner() == -100:
flag = -100
Play_score += 1
print('player win')
else:
flag = -1
else:
flag = 123
if flag == -1:
if is_full() != 9:
# �˻���
xx, yy = AI_move()
l_x = xx * 100 + 150
l_y = yy * 100 + 150
points[xx][yy] = 100
lst.append((2, l_x, l_y))
if winner() == 100:
flag = 100
AI_score += 1
print('AI win')
else:
flag = 1
else:
flag = 123
draw_all()
pygame.display.flip()
if __name__ == '__main__':
play() |
994,560 | 79dcb9cac76973b1bfbdfd0d68486c4cdcd246e9 | print("This program will compute the student average")
print(" ")
pre = float(input("Enter your Prelims Score : "))
mid = float(input("Enter your Midterms Score : "))
sem = float(input("Enter your Semis Score : "))
finals = float(input("Enter your Finals Score : "))
avg = (pre + mid + sem + finals) / 4
print("Your average is {}!".format(avg)) |
994,561 | 20626f29619db47758062a2d54177a1a944756f6 | fin = open('input.txt')
words = []
A = {}
for now in [i.split() for i in fin.readlines()]:
words.extend(now)
for i in words:
A[i] = A.get(i, 0) + 1
print(A[i] - 1, end=' ')
|
994,562 | 231a1404c0b624ea813e265050516d54457b5e30 | from setuptools import setup, find_packages
setup(
name='tasktimer',
version='0.0.1',
packages=find_packages(),
url='',
license='',
author='thedjaney',
author_email='thedjaney@gmail.com',
description='',
entry_points={
'console_scripts': ['tasktimer=tasktimer.cli:main'],
},
install_requires=[
'requests~=2.25.1',
'click~=8.0.1',
]
)
|
994,563 | 99664a41a46e882804aeec1c68bd04d8f591d8f2 | #문자열 포맷팅
print("I eat %d apples. " %3)
number = 10
day = "three"
print ("I ate %d apples. so I was sick fot %s days." %(number, day)
#정렬과 공백
print ( "%10s" % "hi" )
print ("%-10sjane." 'hi')
#소수점 표현
print ("%0.4f" % 3.42134234)
print ("%10.4f" % 3.42134234) |
994,564 | ee373a37ff9ceb9a5cff3a4d53c64838eec40043 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 21 22:41:08 2018
@author: ubuntu
"""
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense,Dropout,Flatten
from keras.layers import Conv2D,MaxPooling2D
from keras.optimizers import SGD
x_train = np.random.rand(1000,100,100,3)
y_train_label = np.random.randint(10,size=(1000,1))
y_train = keras.utils.to_categorical(y_train_label,num_classes=10)
x_test = np.random.rand(100,100,100,3)
y_test_label = np.random.randint(10,size=(100,1))
y_test = keras.utils.to_categorical(y_test_label,num_classes=10)
model = Sequential()
model.add(Conv2D(32,(3,3),activation='relu',input_shape=(100,100,3)))
model.add(Conv2D(32,(3,3),activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(64,(3,3),activation='relu'))
model.add(Conv2D(64,(3,3),activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10,activation='softmax'))
sgd = SGD(lr=0.01,momentum=0.9,decay=1e-6,nesterov=True)
model.compile(loss='categorical_crossentropy',optimizer=sgd,metrics=['accuracy'])
model.fit(x_train,y_train,batch_size=32,epochs=10)
score = model.evaluate(x_test,y_test,batch_size=32) |
994,565 | cb6c05dc820cd0ddf1fdd2d70188b3d53bf4fbf7 | import torch
import pandas
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
"""
Human Activity neural network implementation (Non-quantization aware training)
"""
class HARnn(torch.nn.Module):
def __init__(self):
super(HARnn, self).__init__()
self.linear1 = torch.nn.Linear(560, 800)
self.linear2 = torch.nn.Linear(800, 400)
self.linear3 = torch.nn.Linear(400, 200)
self.linear4 = torch.nn.Linear(200, 100)
self.linear5 = torch.nn.Linear(100, 50)
self.linear6 = torch.nn.Linear(50, 6)
def forward(self, x):
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = F.relu(self.linear3(x))
x = F.relu(self.linear4(x))
x = F.relu(self.linear5(x))
x = F.log_softmax(self.linear6(x))
return x
train_frame = pandas.read_csv('train.csv')
accelerometer_data = train_frame.iloc[1:, 1:561]
values = train_frame.iloc[1:, 562]
labels = {
"STANDING": torch.tensor([0]),
"SITTING": torch.tensor([1]),
"LAYING": torch.tensor([2]),
"WALKING": torch.tensor([3]),
"WALKING_DOWNSTAIRS": torch.tensor([4]),
"WALKING_UPSTAIRS": torch.tensor([5])
}
def predict(model, prev_accuracy):
test_frame = pandas.read_csv('test.csv')
test_accelerometer_data = test_frame.iloc[1:, 1:561]
test_values = test_frame.iloc[1:, 562]
correct_pred = 0;
for x in range(len(test_values)):
data = torch.tensor([test_accelerometer_data.iloc[x]])
data.requires_grad = True;
result_tensor = model(data)
pred = np.argmax(result_tensor.data.numpy())
if (labels[test_values.iloc[x]][0] == pred):
correct_pred += 1;
accuracy = 100. * correct_pred / len(test_values)
print('Accuracy: {}'.format(accuracy))
if (accuracy > prev_accuracy):
torch.save(model.state_dict(), './HARNN_MODEL')
return accuracy
#Hard-coded parameters
epochs = 50
learning_rate = 0.01
accuracy = 0;
# Construct our model by instantiating the class defined above.
model = HARnn()
loss_fn = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr = learning_rate,)
for epoch in range(epochs):
# Forward pass: Compute predicted y by passing x to the model
for x in range(len(values)):
data = torch.tensor([accelerometer_data.iloc[x]])
data.requires_grad = True;
target = labels[values.iloc[x]]
pred = model(data)
loss = loss_fn(pred, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if x % 2000 == 0 and x:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, x, len(values), x / len(values) * 100., loss.data))
accuracy = predict(model, accuracy)
|
994,566 | 1040981351bf495babb3970f655190fc8ce9f588 | '''
https://www.hackerrank.com/challenges/dynamic-array
'''
# Enter your code here. Read input from STDIN. Print output to STDOUT
|
994,567 | 4d60a6bbfa91e6b5269af254cadbe43076f4512a | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from termcolor import *
banner = """ _____________________
| _________________ |
| | LA CHOFIS 7u7 . | |
| |_________________| |
| ___ ___ ___ ___ |
| | 7 | 8 | 9 | | + | |
| |___|___|___| |___| |
| | 4 | 5 | 6 | | - | |
| |___|___|___| |___| |
| | 1 | 2 | 3 | | x | |
| |___|___|___| |___| |
| | . | 0 | = | | / | |
| |___|___|___| |___| |
|_____________________|"""
def main():
print(colored(banner, "blue"))
cc1 = input(colored("\nIngresa una CC: ", "yellow"))
cc2 = input(colored("\nIngresa otra CC: ", "yellow"))
tres1 = cc1[9:11]
tres2 = cc2[9:11]
t1 = 0
t2 = 0
for n in tres1:
t1 = t1 + int(n)
for n in tres2:
t2 = t2 + int(n)
t1 = t1 / 2
t2 = t2 / 2
t1 = t1 * 5.
t2 = t2 * 5.
t1 = int(t1)
t2 = int(t2)
r = t1 + t2
b = cc1[0:8]
print(colored("\n---------------------------", "green"))
print(colored("Tu BIN es:", "green"), colored(b + "{}{}".format(r,"xxxxxx")))
print(colored("---------------------------", "green"))
if __name__ == '__main__':
main()
|
994,568 | f381db855471196ce7babd45c40d10af7ecb3d97 | from setuptools import setup, find_packages
with open('requirements.txt') as f_in:
lines = (l.strip() for l in f_in.readlines())
install_requires = [l for l in lines if l and not l.startswith('--')]
with open('README.md') as f_in:
long_description = f_in.read()
setup(
name='fangorn',
version='0.0.1',
description='Slackbot for personal use',
long_description=long_description,
url='https://github.com/lwbrooke/slackbot',
license='Apache',
author='Logan Brooke',
packages=find_packages(),
package_data={
'fangorn': ['config_files/*.yaml']
},
entry_points={
'console_scripts': [
'fangorn = fangorn.__main__:main'
]
},
install_requires=install_requires,
setup_requires=[
'wheel'
]
)
|
994,569 | 078fd85df46231cd0a4b3d202aa254e4fc09b1e8 | import tensorflow as tf
import tensorflow.keras as keras
from dataframe_landmark import DataRepository
from sign_detector import SignDetector
if __name__=='__main__':
datadir = './csv' |
994,570 | 697c648190b7cf023265bce695adfdae38f74ff8 | from .types import Vector2
from . import settings as s
from . import sprites
import pygame as pg
class Menu():
def __init__(self):
self.selected = 0
self.quit_state = None
self.pressed_up = False
self.pressed_down = False
self.selector_pos = Vector2(239, 404)
def draw(self):
s.screen.fill((0, 0, 0))
s.screen.blit(sprites.menu, (0, 0))
s.screen.blit(sprites.tile_set, (self.selector_pos.x, self.selector_pos.y), sprites.SELECTOR)
def input_actions(self):
if s.keys[pg.K_w] and not self.pressed_down and not self.pressed_up:
self.selected += 1
self.pressed_up = True
if s.keys[pg.K_s] and not self.pressed_up and not self.pressed_down:
self.selected -= 1
self.pressed_down = True
if not s.keys[pg.K_w]:
self.pressed_up = False
if not s.keys[pg.K_s]:
self.pressed_down = False
def check_for_quit(self):
for event in pg.event.get():
if event.type == pg.QUIT:
self.quit_state = 'exit'
return False
if s.keys[pg.K_ESCAPE]:
self.quit_state = 'exit'
return False
if s.keys[pg.K_RETURN] and self.selected % 2 == 0:
self.quit_state = 'play'
return False
return True
def menu_loop(self):
while True:
s.keys = pg.key.get_pressed()
s.clock.tick()
self.input_actions()
if self.selected % 2 == 0:
self.selector_pos = Vector2(239, 404)
else:
self.selector_pos = Vector2(239, 448)
self.draw()
if not self.check_for_quit():
break
pg.display.flip()
|
994,571 | 8bc8437754703fcdae3f74f4e078d8fb61df424e | import png
import pyqrcode
from pyqrcode import QRCode
file = open ("arquivo.txt" )
for line in file.readlines():
s = line
Stringformat = s.replace("\n","")
url = pyqrcode.create(Stringformat)
url.png(Stringformat+'.png',scale=6)
file.close()
|
994,572 | 7d81c67d5e41d970b99325ed5d685b3b53dd4224 | from django.contrib import admin
from .models.comment import Comment
from .models.post import Post
from .models.nav import Nav
from .models.files import Files
from .models.meeting import Meeting
from .models.member import Member
from .models.signin import Signin
from .models.maintext import MainText
from django.db.models import F, Q, Sum
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
import nested_admin
from .models.users import Details
from simple_history.admin import SimpleHistoryAdmin
# Define an inline admin descriptor for Employee model
# which acts a bit like a singleton
class DetailsInline(admin.StackedInline):
model = Details
can_delete = False
verbose_name_plural = 'details'
# Define a new User admin
class UserAdmin(BaseUserAdmin):
inlines = (DetailsInline,)
class NavInline(nested_admin.NestedStackedInline):
model = Nav
list_display = (
'title',
'user',
'link',
'pub_date',
)
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.filter(parent__isnull=False)
class NavAdmin(nested_admin.NestedModelAdmin):
inlines = [
NavInline,
]
exclude = [
'parent',
]
list_display = (
'title',
'user',
'link',
'pub_date',
)
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.filter(parent=None)
class SigninInline(nested_admin.NestedStackedInline):
model = Signin
# raw_id_fields = ('meeting',)
# autocomplete_lookup_fields = {'fk': ('meeting',)}
readonly_fields = (
'start_time',
)
fields = (
'meeting',
'start_time',
'end_time',
)
class MemberAdmin(admin.ModelAdmin):
inlines = [
SigninInline,
]
readonly_fields = ('hours', 'created', 'modified')
fields = ('user', 'team', 'name', 'slack', 'created', 'modified', 'hours')
list_display = (
'team',
'name',
'user',
'slack',
'hours',
)
def hours(self, obj):
return list(Signin.objects.filter(user=obj).annotate(signin_time=F('end_time') - F('start_time')).aggregate(
Sum('signin_time')).values())[0]
class MeetingAdmin(SimpleHistoryAdmin, admin.ModelAdmin):
inlines = [
SigninInline,
]
readonly_fields = ('user', 'start_time',)
fields = ('user', 'start_time', 'end_time')
list_display = ('user', 'start_time', 'end_time')
# Re-register UserAdmin
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(Nav, NavAdmin)
admin.site.register(Post)
admin.site.register(Comment)
admin.site.register(Files)
admin.site.register(MainText)
admin.site.register(Meeting, MeetingAdmin)
admin.site.register(Signin)
admin.site.register(Member, MemberAdmin)
|
994,573 | 610eb5f37a3520ef1aca9232196838166b347dc1 | import os, sys
import numpy as np
import tensorflow as tf
from tensorflow.contrib.crf import viterbi_decode
from tensorflow.contrib.crf import crf_log_likelihood
from modules import embedding, positional_encoding, \
multihead_attention, feedforward, \
label_smoothing, gelu_fast
from keras import backend as K
from keras.layers import Dense
from keras.objectives import categorical_crossentropy
from keras.metrics import categorical_accuracy as accuracy
class TransformerTagger:
def __init__(self,
data_preparer,
num_blocks=6,
num_heads=8,
hidden_units=128,
vocab_size=9000,
emb_pos_type='sin',
lr=1e-2):
self.data_preparer = data_preparer
self.num_blocks = num_blocks
self.num_heads = num_heads
self.hidden_units = hidden_units
self.maxlen = data_preparer.length
self.vocab_size = vocab_size
self.emb_pos_type = emb_pos_type
self.lr = lr
assert hidden_units % num_heads == 0
def build(self):
x, y = self.create_placeholders()
emb = self.build_embedding_layer(x)
outs = self.build_blocks(emb, tf.to_float(tf.not_equal(x, self.data_preparer.vocab.get('[PAD]', 0))))
logits = self.build_linear_projection_layer(outs)
loss = self.compute_loss(logits, y)
g_step, train_op = self.set_optimizer(loss)
# tf.summary.scalar('acc', self.acc)
# tf.summary.scalar('mean_loss', self.mean_loss)
# self.merged = tf.summary.merge_all()
merged = self.summary({
'acc': self.crf_acc,
'mean_loss': self.mean_loss,
})
sess = self.new_session()
self.train_writer = tf.summary.FileWriter('./logs/train', sess.graph)
return g_step, logits, train_op
def summary(self, var_dict={}):
for name, var in var_dict.items():
tf.summary.scalar(name, var)
self.merged = tf.summary.merge_all()
return self.merged
def new_session(self, sess=None):
if hasattr(self, 'sess'):
self.sess.close()
if sess is None:
config = tf.ConfigProto()
config.allow_soft_placement = True
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
else:
self.sess = sess
self.sess.run(tf.global_variables_initializer())
return self.sess
def train_batch(self, x_batch, y_batch, z_batch, dropout=0.1):
feed_dict = {
self.x : x_batch,
self.y : y_batch,
self.z : z_batch,
self.dropout : dropout,
}
if not hasattr(self, 'sess'):
self.new_session()
g_step, loss, crf_acc, cly_acc, _, summary, label_loss = self.sess.run(
[self.global_step, self.mean_loss, self.crf_acc, self.cly_acc, self.train_op, self.merged, self.label_logits],
feed_dict=feed_dict,
)
#print(label_loss)
self.train_writer.add_summary(summary, g_step)
return g_step, loss, crf_acc, cly_acc
def predict(self, x, pretoken=False):
matrix = self.sess.run(self.trans)
if isinstance(x, str):
x = self.data_preparer.sentence2idx(x, pretoken=pretoken)
x = [[self.data_preparer.vocab['[CLS]']] + x + [self.data_preparer.vocab['[SEP]']]]
x = self.data_preparer.pad_batch(x, self.maxlen)
feed_dict = {
self.x : x,
self.dropout : 0.0,
}
if not hasattr(self, 'sess'):
self.new_session()
logits, lengths, label_logits = self.sess.run(
[self.logits_no_cls_sep, self.lengths, self.label_logits],
feed_dict=feed_dict,
)
lengths = lengths.astype(np.int32)
paths = self.decode(logits, lengths, matrix)
tags = [self.data_preparer.idx2tag[idx] for idx in paths[0]]
return tags, label_logits
#preds = np.argmax(logits, axis=-1)
#return [self.data_preparer.idx2tag[i] for i in preds.flatten()]
# x = [[self.data_preparer.vocab['[CLS]']] + self.data_preparer.sentence2idx(line, pretoken=pretoken) + [self.data_preparer.vocab['[SEP]']] for line in x]
# x = self.data_preparer.pad_batch(x, self.maxlen)
#
# rets = []
#
# begin = 0
# while begin < x.shape[0]:
#
# feed_dict = {
# self.x : x[begin:begin+16],
# self.dropout : 0.0,
# }
#
# if not hasattr(self, 'sess'):
# self.new_session()
#
# logits, lengths = self.sess.run(
# [self.logits_no_cls_sep, self.lengths,],
# feed_dict=feed_dict,
# )
# lengths = lengths.astype(np.int32)
# #rets += [[self.data_preparer.idx2tag[i] for i in preds[j]] for j in range(preds.shape[0])]
# paths = self.decode(logits, lengths, matrix)
# tags = [[self.data_preparer.idx2tag[idx] for idx in path] for path in paths]
#
# rets += tags
#
# begin += 16
#
# print('PPPPPPPPPPPPPPPP')
# print(rets)
# print(label_logits)
# return rets, label_logits
def decode(self, logits, lengths, matrix):
"""
:param logits: [batch_size, num_steps, num_tags]float32, logits
:param lengths: [batch_size]int32, real length of each sequence
:param matrix: transaction matrix for inference
:return:
"""
# inference final labels usa viterbi Algorithm
paths = []
#small = -1000.0
#start = np.asarray([[small]*self.num_tags +[0]])
for score, length in zip(logits, lengths):
score = score[:length]
logits = score
#pad = small * np.ones([length, 1])
#logits = np.concatenate([score, pad], axis=1)
#logits = np.concatenate([start, logits], axis=0)
path, _ = viterbi_decode(logits, matrix)
#paths.append(path[1:])
paths.append(path)
return paths
def save(self, path):
if not hasattr(self, 'saver'):
self.saver = tf.train.Saver()
self.saver.save(self.sess, path)
def load(self, path):
if not hasattr(self, 'saver'):
self.saver = tf.train.Saver()
if not hasattr(self, 'sess'):
self.new_session()
self.saver.restore(self.sess, path)
def create_placeholders(self):
# input and target
self.x = tf.placeholder(tf.int32, shape=(None, self.maxlen))
self.y = tf.placeholder(tf.int32, shape=(None, self.maxlen))
self.z = tf.placeholder(tf.int32, shape=(None, 4))
# dropout
self.dropout = tf.placeholder(tf.float32,)
return self.x, self.y, self.z
def build_embedding_layer(self, inputs, reuse=None):
self.emb_char = embedding(inputs,
vocab_size=self.vocab_size,
num_units=self.hidden_units,
scale=True,
scope="emb_char",
reuse=reuse)
self.emb_char_pos = self.emb_char
if self.emb_pos_type == 'sin':
self.emb_char_pos += positional_encoding(inputs,
num_units=self.hidden_units,
zero_pad=False,
scale=False,
scope="emb_pos",
reuse=reuse)
else:
self.emb_char_pos += embedding(tf.tile(tf.expand_dims(tf.range(tf.shape(inputs)[1]), 0), [tf.shape(inputs)[0], 1]),
vocab_size=self.maxlen,
num_units=self.hidden_units,
zero_pad=False,
scale=False,
scope="emb_pos",
reuse=reuse)
self.emb = tf.layers.dropout(self.emb_char_pos, rate=self.dropout,)
return self.emb
def build_blocks(self, inputs, masks, reuse=None):
self.blk = inputs
for i in range(self.num_blocks):
with tf.variable_scope("blocks_{}".format(i), reuse=reuse):
## Multihead Attention ( self-attention)
self.blk = multihead_attention(queries=self.blk,
keys=self.blk,
qkv_masks=masks,
num_units=self.hidden_units,
num_heads=self.num_heads,
dropout_rate=self.dropout,
# is_training=is_training,
causality=False,
scope="self_attention",
reuse=reuse)
self.blk = feedforward(self.blk, num_units=[4*self.hidden_units, self.hidden_units], reuse=reuse)
return self.blk
def build_linear_projection_layer(self, inputs, reuse=None):
self.logits = tf.layers.dense(inputs, len(self.data_preparer.tag2idx), name='logits', reuse=reuse)
return self.logits
def attention(self, inputs, attention_size=768, time_major=False):
if isinstance(inputs, tuple):
inputs = tf.concat(inputs, 2)
if time_major: # (T,B,D) => (B,T,D)
inputs = tf.transpose(inputs, [1, 0, 2])
hidden_size = inputs.shape[2].value
# Trainable parameters
w_omega = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1))
b_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
u_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
v = tf.tanh(tf.tensordot(inputs, w_omega, axes=1) + b_omega)
vu = tf.tensordot(v, u_omega, axes=1, name='vu') # (B,T) shape
alphas = tf.nn.softmax(vu, name='alphas') # (B,T) shape
# the result has (B,D) shape
output = tf.reduce_sum(inputs * tf.expand_dims(alphas, -1), 1)
return output
def build_dense_layer(self, inputs, filter_sizes, num_filters, reuse=None):
# self.out1 = tf.layers.dense(inputs, 100, name='out1', reuse=reuse)
# self.out1 = tf.nn.relu(self.out1)
# self.label_logits = tf.layers.dense(self.out1, len(self.data_preparer.label2id), name='label_logits', reuse=reuse)
# print('YYYYYYYYYYYYYYYYYYYYYYYYY')
# print(np.shape(self.label_logits))
# Create a convolution + maxpool layer for each filter size
inputs = tf.expand_dims(inputs, -1)
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# Convolution Layer
filter_shape = [filter_size, 768, 1, num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.02), name="W")
b = tf.Variable(tf.constant(0.01, shape=[num_filters]), name="b")
conv = tf.nn.conv2d(
inputs,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Maxpooling over the outputs
pooled = tf.nn.max_pool(
h,
ksize=[1, 512 - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
pooled_outputs.append(pooled)
num_filters_total = num_filters * len(filter_sizes)
self.h_pool = tf.concat(pooled_outputs, 3)
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
# Add dropout
with tf.name_scope("dropout"):
self.h_drop = tf.nn.dropout(self.h_pool_flat, keep_prob=0.8)
return self.h_drop
def build_dense_layer(self, inputs, filter_sizes, num_filters, reuse=None):
num_filters_total = num_filters * len(filter_sizes)
with tf.name_scope("output"):
W = tf.get_variable(
"W",
shape=[num_filters_total, 4],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.01, shape=[4]), name="b")
self.label_logits = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores")
return self.label_logits
def compute_label_loss(self, logits, labels):
# skip [CLS] at the beginning and [SEP] at the end
# logits = logits[:, :]
# labels = labels[:, :]
self.cly_acc = tf.reduce_sum(tf.cast(tf.equal(tf.argmax(logits, axis=1, output_type=tf.int32), tf.argmax(labels, axis=1, output_type=tf.int32)), "float")) / tf.reduce_sum(tf.cast(tf.equal(tf.argmax(labels, axis=1, output_type=tf.int32), tf.argmax(labels, axis=1, output_type=tf.int32)), "float"))
# self.debug_var = tf.reduce_sum(tf.cast(tf.equal(tf.argmax(logits, axis=1, output_type=tf.int32), tf.reshape(labels, [-1])), "float"))
self.label_loss = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=labels)
self.label_mean_loss = tf.reduce_mean(self.label_loss)
# self.y_smoothed = tf.one_hot(labels, depth=len(self.data_preparer.tag2idx)) #label_smoothing(tf.one_hot(labels, depth=len(self.data_preparer.tag2idx)))
# self.loss = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=self.y_smoothed)
# self.mean_loss = tf.reduce_mean(self.loss*self.istarget)
return self.label_mean_loss
def compute_loss(self, logits, labels):
# skip [CLS] at the beginning and [SEP] at the end
logits = logits[:, 1:-1,:]
labels = labels[:, :-2]
self.logits_no_cls_sep = logits
self.istarget = tf.to_float(tf.not_equal(self.x, self.data_preparer.vocab['[PAD]'])[:, 1:-1])
self.lengths = tf.reduce_sum(self.istarget, axis=-1)
self.preds = tf.to_int32(tf.argmax(logits, axis=-1))
self.crf_acc = tf.reduce_sum(tf.to_float(tf.equal(self.preds, labels))*self.istarget) / tf.reduce_sum(self.istarget)
self.trans = tf.get_variable(
"transitions",
shape=[len(self.data_preparer.tag2idx), len(self.data_preparer.tag2idx)],)
log_likelihood, self.trans = crf_log_likelihood(
inputs=logits,
tag_indices=labels,
transition_params=self.trans,
sequence_lengths=self.lengths)
self.loss = -log_likelihood
self.mean_loss = tf.reduce_mean(self.loss)
# self.y_smoothed = tf.one_hot(labels, depth=len(self.data_preparer.tag2idx)) #label_smoothing(tf.one_hot(labels, depth=len(self.data_preparer.tag2idx)))
# self.loss = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=self.y_smoothed)
# self.mean_loss = tf.reduce_mean(self.loss*self.istarget)
return self.mean_loss
def merge_loss(self, loss1, loss2):
self.all_loss = loss1 + loss2
return self.all_loss
def set_optimizer(self, loss):
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr, beta1=0.9, beta2=0.98, epsilon=1e-8)
# grads = self.optimizer.compute_gradients(loss)
# for i, (g, v) in enumerate(grads):
# if g is not None:
# grads[i] = (tf.clip_by_norm(g, 5), v) # 阈值这里设为5
# self.train_op = self.optimizer.apply_gradients(grads)
self.train_op = self.optimizer.minimize(loss, global_step=self.global_step)
return self.global_step, self.train_op
|
994,574 | 23a399bb9f716f7ac01235e657c4e8850312874f | """Construct the link (foreign key or association table) between models."""
from open_alchemy import facades
from open_alchemy import types
from . import association_table as _association_table
from . import foreign_key as _foreign_key
def construct(
*,
artifacts: types.ObjectArtifacts,
model_schema: types.Schema,
schemas: types.Schemas,
) -> None:
"""
Construct the link between the tables for a reference between models in an array.
For a one to many relationship, a foreign key is added to the referenced table. For
a many to many relationship, an association table is constructed.
Args:
artifacts: The object reference artifacts.
model_schema: The schema of the model in which the array reference is embedded.
schemas: Used to retrieve the referenced schema and to resolve any $ref.
"""
if artifacts.relationship.secondary is None:
_foreign_key.set_(
ref_model_name=artifacts.relationship.model_name,
logical_name=artifacts.logical_name,
model_schema=model_schema,
schemas=schemas,
fk_column=artifacts.fk_column,
)
else:
table = _association_table.construct(
parent_schema=model_schema,
child_schema=artifacts.spec,
schemas=schemas,
tablename=artifacts.relationship.secondary,
)
facades.models.set_association(
table=table, name=artifacts.relationship.secondary
)
|
994,575 | c027389845a8e684618088f174cc75e76dd2c82e | import pandas as pd
import re
import nltk
from bs4 import BeautifulSoup
from nltk.corpus import stopwords
stemmer = nltk.stem.PorterStemmer()
def _apply_df(args):
df, func = args
return df.apply(func)
def make_sentences(reviews):
sentences = list()
for review in reviews:
sentences += review_to_sentences(review)
return sentences
def review_to_words(raw_review):
#1. HTML 제거
review_text = BeautifulSoup(raw_review, 'html.parser').get_text()
#2. 영문자가 아닌 문자는 공백으로 변환
letters_only = re.sub('[^a-zA-Z]', ' ', review_text)
#3. 소문자 변환 후 공백으로 토크나이징
words = letters_only.lower().split()
#4. 파이썬은 리스트보다 세트로 찾는게 훨씬 빠름
stops = set(stopwords.words('english'))
#5. Stopwords 불용어 제거
meaningful_words = [w for w in words if not w in stops]
#6. 어간 추출
stemming_words = [stemmer.stem(w) for w in meaningful_words]
#7. 공백으로 구분된 문자열로 결합하여 결과를 반환
return ' '.join(stemming_words)
def review_to_wordlist(raw_review, remove_stopwords=False):
#1. HTML 제거
review_text = BeautifulSoup(raw_review, 'html.parser').get_text()
#2. 영문자가 아닌 문자는 공백으로 변환
letters_only = re.sub('[^a-zA-Z]', ' ', review_text)
# 3. 소문자 변환 후 공백으로 토크나이징
meaningful_words = letters_only.lower().split()
if remove_stopwords:
#4. 파이썬은 리스트보다 세트로 찾는게 훨씬 빠름
stops = set(stopwords.words('english'))
#5. Stopwords 불용어 제거
meaningful_words = [w for w in words if not w in stops]
return meaningful_words
# Define a function to split a review into parsed sentences
def review_to_sentences( review, remove_stopwords=False ):
# Load the punkt tokenizer
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
# tokenizer를 통해 review를 sentences로 분리한다.
raw_sentences = tokenizer.tokenize(review.strip())
# 분리된 리뷰의 문장들을 loop를 통해 wordlist로 변환한다.
sentences = []
for raw_sentence in raw_sentences:
# If a sentence is empty, skip it
if len(raw_sentence) > 0:
# Otherwise, call review_to_wordlist to get a list of words
sentences.append( review_to_wordlist( raw_sentence, remove_stopwords ))
return sentences |
994,576 | 05385360c132d95a7d17ecca81fbef22fbf5d5b3 | import gzip
import requests
import msgpack
from multiprocessing import Pool
import time
import xmltodict
WORKER_COUNT = 4 # Add CPUs & increase this value to supercharge processing downloaded
def extract_xml_step(xml_row):
"""
Multiprocessing the extraction of key info from selected xml items!
TODO: Handle varying XML format (different contents, validation, etc..)
"""
return {'id': xml_row['id'], 'total_credit': xml_row['total_credit'], 'expavg_credit': xml_row['expavg_credit'], 'cpid': xml_row['cpid']}
def download_extract_stats(project_url):
"""
Download an xml.gz, extract gz, parse xml, reduce & return data.
"""
downloaded_file = requests.get(project_url, stream=True)
if downloaded_file.status_code == 200:
# Worked
if '.gz' in project_url:
# Compressed!
with gzip.open(downloaded_file.raw, 'rb') as uncompressed_file:
file_content = xmltodict.parse(uncompressed_file.read())
else:
# Not compressed!
file_content = xmltodict.parse(downloaded_file.text) # Not confirmed
# print("len: {}".format(len(file_content['users']['user'])))
pool = Pool(processes=WORKER_COUNT) # 4 workers
pool_xml_data = pool.map(extract_xml_step, file_content['users']['user']) # Deploy the pool workers
msg_packed_results = msgpack.packb(pool_xml_data, use_bin_type=True)
# unpacked_results = msgpack.unpackb(msg_packed_results, raw=False)
return msg_packed_results
else:
print(downloaded_file.status_code)
print("FAIL")
# Didn't work
return None
download_extract_stats("http://csgrid.org/csg/stats/user.gz")
|
994,577 | 6be8461161516ed2e243dc166ea30337c90763fa | from textblob import TextBlob
import pandas as pd
testwords = ['good is bad, bad is good good', 'hello', 'fucking', 'best', 'beautiful', 'bad', 'wonderful', 'horrible',
'haha', 'ok', 'accaptable', 'jnfjanfjanfja']
testparagraph = """
Google was bound to make it in here somehow. Here are some intern perks at Google:
1. Google pays for flights from anywhere in the world to your office and from your office to anywhere in the world,
before and after your internship. (This is standard for most companies)
2. Google gives us free, and in my opinion, luxury housing. Although we share an apartment we three others, it's
equipped with a nice TV, a patio, a kitchen with a dishwasher, 2 baths, a washer and dryer, and biweekly cleaning. We
also have access to a 24-hour gym, a hot tub, a swimming pool, a games room, and a park.
3. Google buses pick us from corp housing and drop us back to corp housing many times during the day.
4. Google bikes are temporary bikes available in and around Google to use to cycle around campus. You can rent a bike
for free.
5. Google has over 20 gourmet cafeterias all over campus with almost all types of cuisine almost everyday. They serve 3
meals on all weekdays, with few exceptions.
6. Everyone is less than 100 feet away from a microkitchen, stuffed with all sorts of snacks, fruits and drinks. They
also come with a automatic coffee machine and an espresso machine. If there's something you want in your microkitchen,
it can be asked for.
7. Chess tables, board games, pool tables, table tennis tables and swimming pools can be found frequently around campus.
You're encouraged to use them, during work.
8. Interns get an hours worth of massage credits and get a professional massage. Massage chairs are scattered around
campus in case you want something automatic.
9. Weekly TGIF involves wine, beer, watching Larry and Sergey address the company, possibly asking them questions and
more. During work.
10. No work hours. Come and go when you want - just get work done and be present virtually at your meetings.
11. Request any electronic item you might need for use for your entire internship. Usually work related, but includes
laptops of choice, headphones, etc. You get to keep some of them. Interns can work on a chromebook, a chrome book pixel
or a 15" inch retina MacBook Pro, as of 2013.
12. Dogfood the newest stuff google makes.
13. Attend special internal hackathons to be the first to work on Google's coolest products.
14. Watch the first Loon launch.
15. Need to run errands at work? Borrow a google car and go anywhere you want for any amount of time.
16. The office never closes. Stay all night!
17. Nap pods. Sleep at work, in style.
18. Intern Boat Cruise in the bay. As crazy as they get.
19. Great pay on top of all the free stuff.
20. Heated toilet seats at work.
21. No clothing guidelines (this is the norm at most tech companies). Hair color, tattoos, piercings - it all runs as
long as you code.
22. The best internal tools. Can't say much more.
23. Volleyball courts, Soccer fields, and intra company sporting competitions. I'm sure they have more facilities I'm
not even aware of.
24. There are 5 or more full fledged high tech gyms at google including outdoor pull up bars and what not. When I say
high tech, I mean they count your reps for you. Free gym classes for everything you can imagine.
25. Free classes for random things - from python and C++ to salsa and more. You can take breaks from work to learn
something cool.
26. Free Google swag. Interns get a T shirt and a Patagonia backpack and a hoodie. Plus, you get to keep headphones and
if you're lucky, more valuable freebies.
27. You get to have a full fledge Hollywood movie featuring Owen Wilson and Vince Vaughn based on how cool your job is,
albeit more than slightly exaggerated. You also get free tickets to the red carpet premier a week before release. So
what if it's a crappy movie? Unlike Jobs or The Social Network, this is about the interns! It's about you.
28. Getting a full time job at google is very in demand and as a result, very hard. I won't reveal numbers but it is
orders if magnitude harder than the most selective college in America. Converting from an internship is much easier,
and that extra boost is great to have especially in a market where "Ex-Googler" is a status symbol.
29. Get to meet some legends. Just by being a little pushy and very lucky, you can easily set up meetings with Ken
Thompson and Jeff Dean. It's much easier to set up people with lesser known people all around google and just talk about
their projects and technology.
30. Last, but not least. The biggest perk of being at google are the people. The interns are extremely smart and
passionate but sociable and nice as well. The full timers are amazing too. All companies have great engineers,
but at Google you're surrounded by a city of so many of the smartest software engineers shaping the forefront of
technology today. The sheer quantity is mesmerizing. They are so well read (in code) and knowledgeable and very helpful.
If you make use of it, it's like infinite office hours with a professor who's always at your service!
Edit:
31. On-site haircuts two days a week, with professional stylists.
32. On-site laundry if you so please.
33. "Park at Google when you go to concerts at Shoreline. Also, pick up free drinks and snacks at Google at the same
time. Sometimes it's nice, after the concert, to play a game of pool or something with your friends while the
concertgoers are stuck in traffic." - Iain McClatchie
This summer they had artists ranging from John Mayer to Brad Paisley to Wiz Khalifa.
34. If you're lost or need any transport, you can call the GCar or the GShuttle to pick you up if you're anywhere around
campus.
187.9k Views · View Upvotes
Upvote2.7kDownvoteComments27+
Share
Bogdan Cristian Tătăroiu
Bogdan Cristian Tătăroiu, Intern at Dropbox, formerly at Twitter and Facebook
Updated Aug 15, 2013 · Featured in Forbes · Upvoted by Oliver Emberton, Founder of Silktide and Ryhan Hassan, Interned
at Apple, Google. Joining Dropbox.
Dropbox has by far the most perks I've seen in any Silicon Valley company.
The major event that stood out to me this summer was Parent's Weekend, where they flew out all intern parents down to
their San Francisco office, housed them for 2 nights, organised a bunch of talks explaining Dropbox to them, where we
stand now, our future products, our vision etc. and basically helped them understand why all of us working here are so
excited about what we're doing.
It was an awesome family-like experience all round and for me personally it was made even better by the fact that it was
my father's first trip to the United States and my mother's second and they finally got to understand why I chose to do
what I do and be where I am right now.
Other than that:
They completely cover your housing - either 2 people in a 2 bedroom apartment or, if you're lucky, 1 person in a 1
bedroom apartment.
They have shuttles which pick you up from corporate housing locations and take you back from the office to _anywhere_
in SF.
The Tuckshop (our in-house restaurant) literally makes better food than I find in most restaurants I eat in over the
weekend in SF.
They cover expenses: phone plan, caltrain gopass, muni & bart pass, flights.
Giant music room with everything from grand piano to electric guitars and drumset
Massages, haircuts, professional ping pong training, on-site gym.
No work hours - come and go as you please.
We host Hack Week, where the entire company stops what they are normally doing, brings in guests (expenses covered) and
works on anything.
The quality of the people you work with is incredible. Every once in a while there comes a tech company that becomes a
magnet for engineering talent - first it was Google, then it was Facebook, now Dropbox seems to be following in their
footsteps.
We have an internal joke that if the file syncing business goes bust, we can just turn into a restaurant and t-shirt
company and we'll be fine. That's reflected in the amount of swag you get here.
Request anything from IT department (we got StarCraft II licences for a hack week AI).
100$ monthly Exec credit
Saving the best for last, you can set your own Dropbox space quota for life.
The list goes on and on and while some of the perks I mentioned can be found at other companies, if you actually see
them first hand, they always have a slight twist which makes them even better.
"""
blob = TextBlob(testparagraph)
# blob = blob.correct()
words = list(blob.tags)
word_type_list = ['JJ', 'NN', 'NR', 'NT', 'PN', 'AD']
words2 = list()
pair_list = list()
for i in range(0, len(words)):
if words[i][1] in word_type_list:
# print(words[i])
words2.append(words[i])
last_noun_position = 0
last_PN_position = 0
for i in range(0, len(words2)):
if last_noun_position > last_PN_position:
last_position = last_noun_position
else:
last_position = last_PN_position
if words2[i][1] in ['NN', 'NR', 'NT']:
for j in range(last_position, i):
if words2[j][1] == 'JJ':
pair_list.append((words2[j], words2[i]))
last_noun_position = i
elif words2[i][1] == 'PN':
for j in range(last_position, i):
if words2[j][1] == 'JJ':
pair_list.append((words2[j], words2[last_noun_position]))
last_PN_position = i
result = dict()
for pair in pair_list:
if pair[1][0] not in result:
result[pair[1][0]] = TextBlob(pair[0][0]).sentiment.polarity
else:
result[pair[1][0]] += TextBlob(pair[0][0]).sentiment.polarity
result = pd.Series(result)
result.sort_values(ascending=False, inplace=True)
positive_reason = result[:5]
negative_reason = result[-5:].sort_values()
print('Top five positive reasons: ')
print(positive_reason)
print('Top five negative reasons: ')
print(negative_reason)
print('end')
|
994,578 | 5fffe560739a589eba94744a3bd58b2efaab3c9e | import struct
import socket
class Ip:
def __init__(self,raw=None):
iph=struct.unpack('!BBHHHBBH4s4s',raw[:20])
self._version=iph[0]>>4
self._ihl=iph[0]&0xF
#iph_length=ihl*4
self._ttl=iph[5]
self._protocol=iph[6]
self._srcip=socket.inet_ntoa(iph[8])
self._dstip=socket.inet_ntoa(iph[9])
def version(self):
return self._version
def iph_length(self):
return self._ihl
def ttl(self):
return self._ttl
def protocol(self):
return self._protocol
def srcip(self):
return self._srcip
def dstip(self):
return self._dstip
class Tcp:
def __init__(self,raw=None):
ipl=Ip(raw).iph_length()*4
tcph=struct.unpack('!HHLLBBHHH',raw[ipl:ipl+20])
self._srcport=tcph[0]
self._dstport=tcph[1]
self._seq=tcph[2]
self._ack=tcph[3]
self._reserved=tcph[4]
self._hlen=tcph[4]>>4
self._data=raw[ipl+self._hlen*4:]
def srcport(self):
return self._srcport
def dstport(self):
return self._dstport
def sequence(self):
return self._seq
def acknowledgment(self):
return self._ack
def header_len(self):
return self._hlen
def data(self):
return self._data |
994,579 | 8bd47109436bf0cc2afb345d1485957d641ea66c | import socket
from logger.logger import Logger
SEND=b'PING!'
RECV=b'PONG!'
PORT = 9999
logger = Logger()
def ping(host):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, PORT))
sock.sendall(SEND)
# logger.debug(f"Client Sent Request Ping to {host}")
except socket.error:
return False
try:
data=b''
while len(data) < len(RECV):
data += sock.recv(len(RECV) - len(data))
# logger.debug(f"Client Receive from {host}: data={data} len(data)={len(data)}")
except socket.error:
return False
if data != RECV:
# logger.debug(f"Ping: received {data} from {host}. Returned false")
return False
return True
|
994,580 | ef896745ad7279be1bcd8c24b110013e627705bf | from __future__ import absolute_import, division, print_function
import os
from subprocess import call
import yaml
import importlib
from collections import OrderedDict
import numpy as np
import xarray as xr
import pandas as pd
import cftime
import calc
grid_file = '/glade/work/mclong/grids/pop-grid-g16.nc'
year_range_clim = slice(1964,2014)
dirf = './fig'
if not os.path.exists(dirf):
call(['mkdir','-p',dirf])
dirt = '/glade/scratch/mclong/calcs/o2-prediction'
if not os.path.exists(dirt):
call(['mkdir','-p',dirt])
xr_open_ds = {'chunks' : {'time':1},
'decode_coords' : False,
'decode_times' : False}
xr.set_options(enable_cftimeindex=True)
ypm = np.array([31,28,31,30,31,30,31,31,30,31,30,31])/365
#------------------------------------------------------------------------------------
#-- function
#------------------------------------------------------------------------------------
def make_time(year_range):
from itertools import product
return [cftime.DatetimeNoLeap(year, month, 1) for year, month in
product(range(year_range[0], year_range[1]+1), range(1, 13))]
#------------------------------------------------------------------------------------
#-- function
#------------------------------------------------------------------------------------
def open_collection(base_dataset,
variables,
op,
isel_name='',
isel={},
clobber=False):
if isel and not isel_name:
raise ValueError('need isel_name with isel')
operators = {'ann': calc.compute_ann_mean,
'monclim': calc.compute_mon_climatology,
'monanom': calc.compute_mon_anomaly}
if isinstance(op,str) and op in operators:
operator = operators[op]
else:
raise ValueError(f'{op} unknown')
with open('collections.yml') as f:
spec = yaml.load(f)
if base_dataset not in spec:
raise ValueError(f'Unknown dataset: {base_dataset}')
spec = spec[base_dataset]
data_mod = importlib.import_module(spec['source'])
if operator:
collection_file_base = f'{dirt}/{base_dataset}.{op}'
else:
collection_file_base = f'{dirt}/{base_dataset}'
if isel:
collection_file_base = f'{collection_file_base}.{isel_name}'
ds = xr.Dataset()
for v in variables:
collection_file = f'{collection_file_base}.{v}.zarr'
if clobber:
call(['rm','-frv',collection_file])
if os.path.exists(collection_file):
print(f'reading {collection_file}')
dsi = xr.open_zarr(collection_file,decode_times=False,decode_coords=False)
else:
dsm = data_mod.open_dataset(variable_list=v,**spec['open_dataset'])
if isel:
dsm = dsm.isel(**isel)
dsi = operator(dsm)
print(f'writing {collection_file}')
dsi.to_zarr(collection_file)
ds = xr.merge((ds,dsi))
return ds
#------------------------------------------------------------------------------------
#-- function
#------------------------------------------------------------------------------------
def annmean_collection(base_dataset,
variables,
isel={},
isel_name='',
clobber=False):
if isel and not isel_name:
raise ValueError('need isel_name with isel')
with open('collections.yml') as f:
spec = yaml.load(f)
if base_dataset not in spec:
raise ValueError(f'Unknown dataset: {base_dataset}')
spec = spec[base_dataset]
data_mod = importlib.import_module(spec['source'])
ds = xr.Dataset()
for v in variables:
if isel:
collection_file = f'{dirt}/{base_dataset}.ann.{isel_name}.{v}.zarr'
else:
collection_file = f'{dirt}/{base_dataset}.ann.{v}.zarr'
if clobber:
call(['rm','-frv',collection_file])
if os.path.exists(collection_file):
print(f'reading {collection_file}')
dsi = xr.open_zarr(collection_file,decode_times=False,decode_coords=False)
else:
dsm = data_mod.open_dataset(variable_list=v,**spec['open_dataset'])
if isel:
dsm = dsm.isel(**isel)
dsi = calc.compute_ann_mean(dsm)
print(f'writing {collection_file}')
dsi.to_zarr(collection_file)
ds = xr.merge((ds,dsi))
return ds
#------------------------------------------------------------------------------------
#-- function
#------------------------------------------------------------------------------------
def compute_ann_mean_old(dsm,wgt):
grid_vars = [v for v in dsm.variables if 'time' not in dsm[v].dims]
variables = [v for v in dsm.variables if 'time' in dsm[v].dims and v not in ['time','time_bound']]
# save attrs
attrs = {v:dsm[v].attrs for v in dsm.variables}
encoding = {v:dsm[v].encoding for v in dsm.variables}
# groupby.sum() does not seem to handle missing values correctly: yields 0 not nan
# the groupby.mean() does return nans, so create a mask of valid values for each variable
valid = {v : dsm[v].groupby('time.year').mean(dim='time').notnull().rename({'year':'time'}) for v in variables}
ones = dsm.drop(grid_vars).where(dsm.isnull()).fillna(1.).where(dsm.notnull()).fillna(0.)
# compute the annual means
ds = (dsm.drop(grid_vars) * wgt).groupby('time.year').sum('time').rename({'year':'time'},inplace=True)
ones_out = (ones * wgt).groupby('time.year').sum('time').rename({'year':'time'},inplace=True)
ones_out = ones_out.where(ones_out>0.)
# renormalize to appropriately account for missing values
ds = ds / ones_out
# put the grid variables back
ds = xr.merge((ds,dsm.drop([v for v in dsm.variables if v not in grid_vars])))
# apply the valid-values mask
for v in variables:
ds[v] = ds[v].where(valid[v])
# put the attributes back
for v in ds.variables:
ds[v].attrs = attrs[v]
# put the encoding back
for v in ds.variables:
ds[v].encoding = encoding[v]
return ds
#------------------------------------------------------------------------------------
#-- function
#------------------------------------------------------------------------------------
def region_box(ds=None):
m = region_mask(ds,masked_area=False)
if len(m.region) != 1:
raise ValueError('Region > 1 not yet implemented')
lat = np.concatenate((np.array([(m.where(m>0) * m.TLAT).min().values]),
np.array([(m.where(m>0) * m.TLAT).max().values])))
lon = np.concatenate((np.array([(m.where(m>0) * m.TLONG).min().values]),
np.array([(m.where(m>0) * m.TLONG).max().values])))
y = [lat[0], lat[0], lat[1], lat[1], lat[0]]
x = [lon[0], lon[1], lon[1], lon[0], lon[0]]
return x,y
#------------------------------------------------------------------------------------
#-- function
#------------------------------------------------------------------------------------
def region_mask(ds=None,masked_area=True):
if ds is None:
ds = xr.open_dataset(grid_file,decode_coords=False)
TLAT = ds.TLAT
TLONG = ds.TLONG
KMT = ds.KMT
TAREA = ds.TAREA
nj,ni = KMT.shape
#-- define the mask logic
M = xr.DataArray(np.ones(KMT.shape),dims=('nlat','nlon'))
region_defs = OrderedDict([
( 'CalCOFI', M.where((25 <= TLAT) & (TLAT <= 38) &
(360-126<=TLONG) & (TLONG <= 360-115)) )
])
#-- do things different if z_t is present
if 'z_t' not in ds.variables:
mask3d = xr.DataArray(np.ones(((len(region_defs),)+KMT.shape)),
dims=('region','nlat','nlon'),
coords={'region':list(region_defs.keys()),
'TLAT':TLAT,
'TLONG':TLONG})
for i,mask_logic in enumerate(region_defs.values()):
mask3d.values[i,:,:] = mask_logic.fillna(0.)
mask3d = mask3d.where(KMT>0)
else:
z_t = ds.z_t
nk = len(z_t)
ONES = xr.DataArray(np.ones((nk,nj,ni)),dims=('z_t','nlat','nlon'),coords={'z_t':z_t})
K = xr.DataArray(np.arange(0,len(z_t)),dims=('z_t'))
MASK = K * ONES
MASK = MASK.where(MASK <= KMT-1)
MASK.values = np.where(MASK.notnull(),1.,0.)
mask3d = xr.DataArray(np.ones(((len(region_defs),)+z_t.shape+KMT.shape)),
dims=('region','z_t','nlat','nlon'),
coords={'region':list(region_defs.keys()),
'TLAT':TLAT,
'TLONG':TLONG})
for i,mask_logic in enumerate(region_defs.values()):
mask3d.values[i,:,:,:] = ONES * mask_logic.fillna(0.)
mask3d = mask3d.where(MASK==1.)
if masked_area:
area_total = (mask3d * TAREA).sum(['nlat','nlon'])
mask3d = (mask3d * TAREA) / area_total.where(area_total > 0)
for i in range(len(region_defs)):
valid = mask3d.isel(region=i).sum(['nlat','nlon'])
valid = valid.where(valid>0)
#np.testing.assert_allclose(valid[~np.isnan(valid)],np.ones(len(z_t))[~np.isnan(valid)])
return mask3d
#------------------------------------------------------------------------------------
#-- function
#------------------------------------------------------------------------------------
def regional_mean(ds,masked_weights=None,mask_z_level=0.):
if masked_weights is None:
masked_weights = region_mask(ds,masked_area=True)
save_attrs = {v:ds[v].attrs for v in ds.variables}
dsr = xr.Dataset()
valid = masked_weights.sum(['nlat','nlon'])
if 'z_t' in ds.variables:
validk = valid.sel(z_t=mask_z_level,method='nearest')
for v in ds.variables:
if ds[v].dims[-2:] == ('nlat','nlon'):
if 'z_t' in ds[v].dims or 'z_t' not in ds.variables:
dsr[v] = (ds[v] * masked_weights).sum(['nlat','nlon']).where(valid>0)
else:
dsr[v] = (ds[v] * masked_weights.sel(z_t=mask_z_level,method='nearest')).sum(['nlat','nlon']).where(validk>0)
dsr[v].attrs = save_attrs[v]
else:
dsr[v] = ds[v]
return dsr
#------------------------------------------------------------------------------------
#-- function
#------------------------------------------------------------------------------------
def xcorr(x,y,dim=None):
valid = (x.notnull() & y.notnull())
N = valid.sum(dim=dim)
x = x.where(valid)
y = y.where(valid)
x_dev = x - x.mean(dim=dim)
y_dev = y - y.mean(dim=dim)
cov = (x_dev * y_dev).sum(dim=dim) / N
covx = (x_dev ** 2).sum(dim=dim) / N
covy = (y_dev ** 2).sum(dim=dim) / N
return ( cov / np.sqrt(covx * covy) )
#------------------------------------------------------------------------------------
#-- function
#------------------------------------------------------------------------------------
def rmsd(x,y,dim=None):
valid = (x.notnull() & y.notnull())
N = valid.sum(dim=dim)
return np.sqrt(((x-y)**2).sum(dim=dim) / N )
#------------------------------------------------------------------------------------
#-- function
#------------------------------------------------------------------------------------
def open_ann_fosi(anomaly=True):
#-- open the dataset
xr_open_ds = { #'chunks' : {'time':1}, # chunking breaks "rolling" method
'decode_coords' : False,
'decode_times' : False}
case = 'g.e11_LENS.GECOIAF.T62_g16.009'
file_in = f'/glade/work/yeager/{case}/budget_O2_npac_{case}.0249-0316.nc'
ds = xr.open_dataset(file_in,**xr_open_ds)
#-- convert units
ds = conform_budget_dataset(ds)
grid = ds.drop([v for v in ds.variables if 'time' in ds[v].dims])
#-- interpret time: make time into "year"
offset = cftime.date2num(cftime.DatetimeGregorian(1699,1,1),
ds.time.attrs['units'],
ds.time.attrs['calendar'])
ds['date'] = cftime.num2date(ds.time+offset,
ds.time.attrs['units'],
ds.time.attrs['calendar'])
ds.time.values = [d.year*1. for d in ds.date.values]
#-- make into an anomaly
if anomaly:
for v in ds.variables:
if 'time' in ds[v].dims and v != 'time':
attrs = ds[v].attrs
ds[v] = ds[v] - ds[v].sel(time=year_range_clim).mean('time')
ds[v].attrs = attrs
return ds
#------------------------------------------------------------------------------------
#-- function
#------------------------------------------------------------------------------------
def open_fosi_grid():
#-- open the dataset
xr_open_ds = { #'chunks' : {'time':1}, # chunking breaks "rolling" method
'decode_coords' : False,
'decode_times' : False}
case = 'g.e11_LENS.GECOIAF.T62_g16.009'
file_in = f'/glade/work/yeager/{case}/budget_O2_npac_{case}.0249-0316.nc'
ds = xr.open_dataset(file_in,**xr_open_ds)
return ds.drop([v for v in ds.variables if 'time' in ds[v].dims])
#------------------------------------------------------------------------------------
#-- function
#------------------------------------------------------------------------------------
def open_ann_dple():
from glob import glob
xr_open_ds = {'chunks' : {'S':1},
'decode_coords' : False,
'decode_times' : False}
files = glob('/glade/p_old/decpred/CESM-DPLE/postproc/O2_budget_npac/CESM-DPLE.O2_*.annmean.anom.nc')
varnames = [f[f.find('.O2_')+1:f.find('.annmean')] for f in files]
dp = xr.Dataset()
for v,f in zip(varnames,files):
dsi = xr.open_dataset(f,**xr_open_ds)
dsi.rename({'anom':v,'S':'time'},inplace=True)
dp = xr.merge((dp,dsi))
dp = xr.merge((dp,open_fosi_grid()))
return conform_budget_dataset(dp)
#------------------------------------------------------------------------------------
#-- function
#------------------------------------------------------------------------------------
def conform_budget_dataset(ds):
nmols_to_molm2yr = 1e-9 * 365. * 86400. / ds.TAREA * 1e4
mol_to_molm2 = 1 / ds.TAREA * 1e4
long_name = {'O2_lat_adv_res' : 'Lateral advection',
'O2_vert_adv_res' : 'Vertical advection',
'O2_dia_vmix' : 'Vertical mixing (diabatic)',
'O2_adi_vmix' : 'Vertical mixing (adiabatic)',
'O2_lat_mix' : 'Lateral mixing',
'O2_rhs_tend' : 'Total tendency',
'O2_sms' : 'Source/sink',
'O2_adv' : 'Total advection',
'O2_zint' : 'O$_2$ inventory'}
for v in ds.variables:
if 'O2_' in v:
attrs = ds[v].attrs
if v == 'O2_zint':
ds[v] = (ds[v] * mol_to_molm2).where(ds.KMT > 0)
new_units = 'mol m$^{-2}$'
else:
ds[v] = (ds[v] * nmols_to_molm2yr).where(ds.KMT > 0)
new_units = 'mol m$^{-2}$ yr$^{-1}$'
ds[v].attrs = attrs
ds[v].attrs['units'] = new_units
#-- add some new fields
ds['O2_sms'] = ds.O2_prod - ds.O2_cons
ds['O2_sms'].attrs = ds.O2_cons.attrs
ds['O2_adv'] = ds.O2_lat_adv_res + ds.O2_vert_adv_res
ds['O2_adv'].attrs = ds.O2_lat_adv_res.attrs
for v,l in long_name.items():
ds[v].attrs['long_name'] = l
return ds
#------------------------------------------------------------------------------------
#-- function
#------------------------------------------------------------------------------------
def dataview(forecast_lead,apply_region_mask=False):
ds = open_ann_fosi(anomaly=True)
dp = open_ann_dple()
if hasattr(forecast_lead, '__iter__'):
dpi = dp.sel(L=slice(forecast_lead[0],forecast_lead[1])).mean(dim='L')
else:
dpi = dp.sel(L=forecast_lead)
dpi.time.values = dpi.time.values + np.mean(forecast_lead)
time_slice = slice(np.max((ds.time[0],dpi.time[0])),
np.min((ds.time[-1],dpi.time[-1])))
dsi = ds.sel(time=time_slice)
dpi = dpi.sel(time=time_slice)
#-- if this is a forecast window, apply running mean
if hasattr(forecast_lead, '__iter__'):
save_attrs = {v:dsi[v].attrs for v in dsi.variables}
N = np.diff(forecast_lead)[0] + 1
dsi = dsi.rolling(time=N,center=True).mean()
for v in dsi.variables:
dsi[v].attrs = save_attrs[v]
# chunk it
dsi = dsi.chunk({'time':1})
if apply_region_mask:
masked_weights = region_mask(dsi,masked_area=True)
dsi = regional_mean(dsi,masked_weights=masked_weights).compute()
dpi = regional_mean(dpi,masked_weights=masked_weights).compute()
if not np.array_equal(dsi.time, dpi.time):
raise ValueError('Time coords do not match.')
return {'fosi':dsi,'dp':dpi}
#------------------------------------------------------------------------------------
#-- function
#------------------------------------------------------------------------------------
def load_pdo(year_range=None,apply_ann_filter=False):
'''read pdo from JSON file:
https://www.ncdc.noaa.gov/teleconnections/pdo/data.json
'''
import json
with open('data/pdo-data.json','r') as f:
pdo_data = json.load(f)
year = xr.DataArray([float(d[0:4]) for d in pdo_data['data'].keys()],dims='time')
mon = xr.DataArray([float(d[4:6]) for d in pdo_data['data'].keys()],dims='time')
time = xr.DataArray([cftime.DatetimeNoLeap(y, m, 1) for y, m in zip(year.values,mon.values)],dims='time')
data = xr.DataArray([float(d) for d in pdo_data['data'].values()],dims='time',coords={'time':time})
ds = xr.Dataset({'PDO':data,'year':year,'mon':mon})
if year_range is not None:
nx = np.where((year_range[0]<=year) & (year <= year_range[1]))[0]
ds = ds.isel(time=nx)
if apply_ann_filter:
save_attrs = {v:ds[v].attrs for v in ds.variables}
N = 12
ds = ds.rolling(time=N,center=True).mean()
return ds
#------------------------------------------------------------------------------------
#-- function
#------------------------------------------------------------------------------------
def load_npgo(year_range=None,apply_ann_filter=False):
df = pd.read_table('data/npgo.txt',names=['year','mon','NPGO'],comment='#',delimiter='\s+')
year = xr.DataArray(df.year.values,dims='time')
mon = xr.DataArray(df.mon.values,dims='time')
time = xr.DataArray([cftime.DatetimeNoLeap(y, m, 1) for y, m in zip(year.values,mon.values)],dims='time')
data = xr.DataArray(df.NPGO.values,dims='time',coords={'time':time})
ds = xr.Dataset({'NPGO':data,'year':year,'mon':mon})
if year_range is not None:
nx = np.where((year_range[0]<=year) & (year <= year_range[1]))[0]
ds = ds.isel(time=nx)
if apply_ann_filter:
save_attrs = {v:ds[v].attrs for v in ds.variables}
N = 12
ds = ds.rolling(time=N,center=True).mean()
return ds
#------------------------------------------------------------------------------------
#-- function
#------------------------------------------------------------------------------------
def interp3d(coord_field,ds,new_levels,dim,**kwargs):
'''kludged function for interpolation
'''
method = kwargs.pop('method','linear')
if method == 'linear':
from metpy.interpolate import interpolate_1d
interp_func = interpolate_1d
elif method == 'log':
from metpy.interpolate import log_interpolate_1d
interp_func = log_interpolate_1d
newdim = new_levels.dims[0]
dso = xr.Dataset()
for v in ds.variables:
if dim not in ds[v].dims:
dso[v] = ds[v]
else:
dims_in = ds[v].dims
if len(dims_in) == 1: continue
interp_axis = dims_in.index(dim)
dims_out = list(dims_in)
dims_out[interp_axis] = newdim
dso[v] = xr.DataArray(
interp_func(new_levels.values,
coord_field.values,ds[v].values,axis=interp_axis),
dims=dims_out,attrs=ds[v].attrs)
return dso
#------------------------------------------------------------------------------------
#-- function
#------------------------------------------------------------------------------------
def interp_to_pd(ds):
'''interpolate onto sigma coordinates'''
sigma = xr.DataArray(np.array([1.026]),dims='sigma')
grid_vars = [v for v in ds.variables if 'time' not in ds[v].dims]+['time_bound']
dso = xr.Dataset()
for i in range(len(ds.time)):
print(f'interpolating time level {i+1}')
dsi = ds.isel(time=i).drop(grid_vars).expand_dims('time')
dsoi = interp3d(dsi.PD,dsi,sigma,dim='z_t')
if i > 0:
dso = xr.concat((dso,dsoi),dim='time')
else:
dso = dsoi
dso = dso.chunk({'time':1})
#-- put grid variables back
dso = xr.merge((dso,ds.drop([v for v in ds.variables if v not in grid_vars])))
return dso
|
994,581 | b34ff480a08a9c64a7cce23a53d7d29e6b9c8f26 | import importlib
from config import settings
for engine_name in settings.ENGINES:
importlib.import_module("." + engine_name, "translators")
__all__ = settings.ENGINES
|
994,582 | b9bab84ef609a956d16e8d9123a93e61857c10b3 | import asyncio
from Heliotrope.utils.hitomi.common import image_model_generator, image_url_from_image
from Heliotrope.utils.hitomi.hitomi_requester import (
fetch_index,
get_gallery,
get_galleryinfo,
)
from Heliotrope.utils.option import config
from Heliotrope.utils.shuffle import shuffle_image_url
async def info(index: int):
arg = await get_gallery(index)
if not arg:
return None
else:
url, tags = arg
data = {
"status": 200,
"title": {"value": tags.title, "url": url},
"galleryid": index,
"thumbnail": tags.thumbnail,
"artist": tags.artist,
"group": tags.group,
"type": tags.type_,
"language": tags.language,
"series": tags.series,
"characters": tags.characters,
"tags": tags.tags,
}
return data
async def galleryinfo(index: int):
galleryinfomodel = await get_galleryinfo(index)
if not galleryinfomodel:
return None
data = {
"status": 200,
"language_localname": galleryinfomodel.language_localname,
"language": galleryinfomodel.language,
"date": galleryinfomodel.date,
"files": galleryinfomodel.files,
"tags": galleryinfomodel.tags,
"japanese_title": galleryinfomodel.japanese_title,
"title": galleryinfomodel.title,
"id": galleryinfomodel.galleryid,
"type": galleryinfomodel.type_,
}
return data
async def integrated_info(index: int):
galleryinfomodel = await get_galleryinfo(index)
_, tags = await get_gallery(index)
if not galleryinfomodel:
gi = None
else:
gi = {
"language_localname": galleryinfomodel.language_localname,
"language": galleryinfomodel.language,
"date": galleryinfomodel.date,
"files": galleryinfomodel.files,
"tags": galleryinfomodel.tags,
"japanese_title": galleryinfomodel.japanese_title,
"title": galleryinfomodel.title,
"id": galleryinfomodel.galleryid,
"type": galleryinfomodel.type_,
}
if not tags:
ts = None
else:
ts = {
"title": tags.title,
"artist": tags.artist,
"group": tags.group,
"type": tags.type_,
"language": tags.language,
"series": tags.series,
"characters": tags.characters,
"tags": tags.tags,
}
data = {
"data": [
{
"status": 200,
"galleryinfo": gi,
"tags": ts,
}
]
}
return data
async def list_(num: int):
index_list = await fetch_index(config)
split_index_list = [
index_list[i * 15 : (i + 1) * 15]
for i in range((len(index_list) + 15 - 1) // 15)
]
if len(split_index_list) < num + 1:
return None
done, _ = await asyncio.wait([info(index) for index in split_index_list[num]])
info_list = [d.result() for d in done]
data = {"status": 200, "list": info_list}
return data
async def images(index: int):
galleryinfomodel = await get_galleryinfo(index)
if not galleryinfomodel:
return None
images = [
{
"url": f"https://doujinshiman.ga/v3/api/proxy/{shuffle_image_url(image_url_from_image(index, img, True))}",
}
for img in image_model_generator(galleryinfomodel.files)
]
return images
async def index():
return await fetch_index(config)
|
994,583 | 623caa16a826eb4027b558c2a8767899fa75bf50 | """Third commit
Revision ID: 5fc9173a4088
Revises: de9e12edbc8b
Create Date: 2021-05-26 20:02:36.390851
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5fc9173a4088'
down_revision = 'de9e12edbc8b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('rating', 'teacher_review',
existing_type=sa.VARCHAR(length=200),
nullable=True)
op.alter_column('rating', 'company_review',
existing_type=sa.VARCHAR(length=200),
nullable=True)
op.create_unique_constraint(None, 'rating', ['work_id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'rating', type_='unique')
op.alter_column('rating', 'company_review',
existing_type=sa.VARCHAR(length=200),
nullable=False)
op.alter_column('rating', 'teacher_review',
existing_type=sa.VARCHAR(length=200),
nullable=False)
# ### end Alembic commands ###
|
994,584 | e4d62ed1c75f7c975b8ed2d5ba0c2c69dd886ec6 | import frappe
from frappe import _
def execute():
# Initial-Erstellung Demographie Bins
print("Patch: Initial-Erstellung Demographie Bins")
kunden = frappe.db.sql("""SELECT `name` FROM `tabCustomer`""", as_dict=True)
m_max = len(kunden)
print("found {0} Kunden".format(m_max))
loop = 1
for kunde in kunden:
print("Create {0} of {1}".format(loop, m_max))
try:
m = frappe.get_doc("Customer", kunde.name)
m.save()
except Exception as err:
print("{0} failed".format(kunde.name))
loop += 1
return
|
994,585 | 2f7845ab0f0131b90504883006f3ef6a55b8a473 | from sys import stdout
from time import time
from os import urandom, remove
def write_data_timing(*, fh=None, pth=None, size=1024**3, blocksize=4*1024, pth_remove=True, timeout=60):
if not fh:
assert pth, 'provide fh or pth'
fh = open(pth, 'wb+')
assert fh.writable(), 'fh not writable'
stdout.write('writing {0:}MB to {1:}... '.format(size // 1024**2, pth or fh))
data = urandom(blocksize)
times = [0.]
t_start = time()
for k in range(0, size, blocksize):
fh.write(data)
times.append(time() - t_start)
if time() - t_start > timeout:
print('timed out after {0:.3f}, wrote {1:}MB'.format(time() - t_start, (k * blocksize) // 1024**2))
break
else:
fh.flush()
print('took {0:.3f}s'.format(time() - t_start))
if pth:
fh.close()
if pth_remove:
remove(pth)
return times
|
994,586 | 0a3bb3c1078470a32f79384768abaebf9ccc68a4 | # -*- coding: utf-8 -*-
#!/usr/bin/env python2
"""
Example for you to import this function:
from csv_func import csv_deal
filename = "example.csv"
csv_do = csv_deal(filename)
csv_do.print_row()
data = [[1, 2 ,3], [4, 5, 6]]
csv_do.write_row(data)
"""
import csv
class csv_deal:
def __init__(self, filename):
self.filename = filename
def print_row(self):
f = open(self.filename, 'r')
for row in csv.reader(f):
print row
f.close()
def write_row(self, data):
f = open(self.filename, 'w')
w = csv.writer(f)
w.writerows(data)
f.close()
|
994,587 | 6e932b59ca7b2e52da6a912820f5d1bde3cb743e | from django.conf.urls import include, url
from django.contrib import admin
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf import settings
urlpatterns = [
url(r'^$', include('pages.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^webcam/', include('camera.urls')),
url(r'^summernote/', include('django_summernote.urls')),
url(r'^dep(?P<id>[0-9]+)/$', include('orgs.urls')),
url(r'^orgs/', include('orgs.urls')),
url(r'^instagram/', include('instagram.urls')),
url(r'^machines/', include('machines.urls')),
url(r'^maps/', include('maps.urls')),
url(r'^orders/', include('orders.urls')),
url(r'^page/', include('pages.urls')),
url(r'^psd/', include('psd.urls')),
url(r'^reports/', include('reports.urls')),
url(r'^roads/', include('roads.urls')),
url(r'^user/', include('users.urls')),
url(r'^workbook/', include('workbook.urls')),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += staticfiles_urlpatterns() |
994,588 | 7c6144efb42fed8e77c7b3351bdd6102b56f33b3 |
#Welcome to vastauine
#This is a sample code on Raspberry PI (Client End)
#Feel free to edit this code to meet your requirenment
#READ THE LICENCE BEFORE YOU USE OR REPRODUCE THIS CONTENT
#this is a very basic program and not highly realible more realible versions to be roll out sooner
#Vastauine holds no WARRENTY on use of this Product
# This Program will check for updates if available will ask Shell to update
print('******************************************************')
print('---------------Welcome to Vastauine-------------------')
print('______________Download version 1.0____________________')
print('******************************************************')
import os,sys,time,urllib2
def version_check(MaxRetry):
while(MaxRetry >= 0):
try:
version=urllib2.urlopen("http://vastauine.com/update/version").read()
return version
except Exception:
print('Internet connectivity Error Retrying in 5 seconds :');print(MaxRetry)
time.sleep(5)
MaxRetry=MaxRetry - 1
print('Connect to Internet and Retry Later ')
print('######--- Closing Vastauine -- ######')
exit()
def download_vastauine(MaxRetry):
while(MaxRetry >= 0):
try:
os.system("sudo wget http://vastauine.com/vastauine.tar")
return True
except OSError:
print('Error downloading file Check Internet connection. Retrying Download in 5 Seconds :')
time.sleep(5)
MaxRetry=MaxRetry - 1
print('Connect to Internet and Retry Later ')
print('######--- Closing Vastauine -- ######')
exit()
def checkStatus():
# Function Returns False if no new updates
# True if Update Available or existing file corrupt
# 404 if Internet Connectivity Problem
print("Checking for system updates . . .")
version=version_check(5)
if os.path.exists("vastauine/health.py")== True:
try:
from vastauine.health import health
except ImportError:
print("Import Error")
return (True)
else:
log=health()
if log == version:
print('System is uptodate')
return (False)
#os.system('python vastauine/')
else:
# We may want to solve problem of Recheck First
print('Some Thing is not Good < Consider Update>')
return (True)
elif os.path.exists("vastauine/health.py")== False:
print("No pakages currently Installed .. ")
print("Updating .. ")
return (True)
else:
print("!!!!FATAL ERROR : Some components might have went missing")
return(False)
def install():
if os.path.exists("vastauine.tar")== True:
try:
os.system("sudo rm vastauine.tar")
except OSError:
print (OSError + "")
return(False)
else:
print("Removed Dust")
else:
print("")
if download_vastauine(5) == True:
print("Sucessfully downloaded update")
else:
return(False)
try:
os.system("sudo tar -xf vastauine.tar")
except OSError:
print (OSError + "Error occured while extracting files")
return(False)
else:
print("Device sucessfully updated")
try:
os.system("sudo rm vastauine.tar")
except OSError:
print (OSError + "Error removing addational packages")
return(False)
else:
print("Device sucessfully updated")
return (True)
def update():
if os.path.exists("vastauine/")== True:
try:
os.system("sudo rm -r vastauine")
except OSError:
print (OSError + "Error occured while removing vastauine")
exit()
else:
print("Sucessfully removed existing version")
install_status=install()
if install_status == True:
return (True)
elif (install_status) == False:
return (False)
else:
print("Unknown Error")
else:
install_status=install()
if install_status == True:
return (True)
elif install_status == False:
return (False)
else:
print("Unknown Error")
def initilization():
log_status=checkStatus()
if log_status == True :
log_update=update()
if log_update == True:
print('Sucessfull updated initilization')
elif log_update == False:
print('Update Failed')
else:
print('Unknown error Occured')
elif log_status == False :
print("No new update Available")
from vastauine import communication
start()
#communication()
else:
print('Unknown error occured')
initilization()
|
994,589 | 0c3916938e709df901c940a6741e7e5262c4d856 | from Defs import Defs
class Board:
def __init__(self):
self.defs = Defs()
def new_board(self):
board = [ [ 0 for x in range(self.defs.cols) ]
for y in range(self.defs.rows - 1) ]
board += [[ 1 for x in range(self.defs.cols)]]
for i in range(len(board)):
board[i][0] = 9
board[i][len(board[i]) - 1] = 9
return board
def check_filled_rows(self, board):
yaaay = 0
for y, row in enumerate(board):
pos = 0
for x, val in enumerate(row):
if val:
pos += 1
if pos == (len(row)) and y != len(board) - 1:
backb = list(board)
board[y] = [ 0 for x in range(self.defs.cols)]
board[y][0] = 1
board[y][len(row) - 1] = 1
for j in range(0, y):
board[j+1] = backb[j]
board[len(board) - 1] = [ 1 for x in range(self.defs.cols)]
yaaay = 30
break
# board[0] = [ 0 for x in range(self.defs.cols)]
# board[0][0] = 1
# board[0][len(board)+1] = 1
return board, yaaay |
994,590 | 913dc7f8749aa913620d2fe5825a0ed50ec86366 | from session_directory import get_session
import data_preprocessing as d_pp
from microscoPy_load.ff_video_fixer import load_session
from helper_functions import find_closest, ismember
import numpy as np
import matplotlib.pyplot as plt
import microscoPy_load.cell_reg as cell_reg
from scipy.stats import pearsonr, spearmanr
import microscoPy_load.calcium_events as ca_events
from scipy.stats.mstats import zscore
def time_lapse_corr(mouse, session, ref_session='FC', bin_size=1,
slice_size=60, ref_mask_start=None, plot_flag=True,
ref_indices=None, ref_neurons=None, corr=pearsonr,
active_all_days=True, B=1000):
"""
Takes the reference session and computes the average event rate for
each cell during that session. Then correlate those rates to rates
during a session of interest, taken from progressive slices.
Parameters
---
mouse: string, mouse name.
session: string, session name.
ref_session: string, session name for the reference, usually the
fear conditioning session.
bin_size: scalar, size of bin, in seconds.
slice_size: scalar, size of slices of sessions, in seconds.
ref_mask_start: scalar, timestamp from which to calculate reference
firing rate vector, from start of session.
plot_flag: boolean, whether to plot correlation vector.
"""
session_index = get_session(mouse, (ref_session, session))[0]
# If ref_mask_start is a scalar, clip the time series starting from
# the specified timestamp.
ff_ref = load_session(session_index[0])
data, t = ca_events.load_events(session_index[0])
data[data > 0] = 1
if ref_mask_start == 'pre_shock':
ref_mask = np.zeros(ff_ref.mouse_in_cage.shape, dtype=bool)
start_idx = np.where(ff_ref.mouse_in_cage)[0][-1]
end_idx = 698
ref_mask[start_idx:end_idx] = True
elif ref_mask_start == 'post_shock':
ref_mask = np.zeros(ff_ref.mouse_in_cage.shape, dtype=bool)
start_idx = 698
end_idx = np.where(ff_ref.mouse_in_cage)[0][-1]
ref_mask[start_idx:end_idx] = True
else:
ref_mask = None
if ref_indices is not None:
assert ref_mask is None, "ref_mask_start must be None to use this feature"
ref_mask = np.zeros(ff_ref.mouse_in_cage.shape, dtype=bool)
if ref_indices == 'homecage1':
end_idx = np.where(ff_ref.mouse_in_cage)[0][0]
ref_mask[:end_idx] = True
elif ref_indices == 'homecage2':
start_idx = np.where(ff_ref.mouse_in_cage)[0][-1]
ref_mask[start_idx:] = True
map = cell_reg.load_cellreg_results(mouse)
trimmed_map = cell_reg.trim_match_map(map, session_index,
active_all_days=active_all_days)
if ref_neurons is None:
ref_neurons = trimmed_map[:,0]
neurons = trimmed_map[:,1]
else:
in_there, idx = ismember(trimmed_map[:, 0], ref_neurons)
ref_neuron_rows = idx[in_there]
neurons = trimmed_map[ref_neuron_rows, 1]
ref_neurons = trimmed_map[ref_neuron_rows, 0]
assert len(neurons) == len(np.unique(neurons)), 'Error.'
# Get average event rates from the reference session.
ref_event_rates = d_pp.get_avg_event_rate(mouse, ref_session,
data=data, t=t,
session=ff_ref,
bin_size=bin_size,
mask=ref_mask,
neurons=ref_neurons)
# if z:
# ref_event_rates = zscore(ref_event_rates)
# Load other session.
ff_session = load_session(session_index[1])
data, t = ca_events.load_events(session_index[1])
data[data > 0] = 1
# Get indices for when the mouse is in the chamber, then slice them.
in_cage = np.where(ff_session.mouse_in_cage)[0]
bins = d_pp.make_bins(in_cage, slice_size*20)
binned_in_cage = d_pp.bin_time_series(in_cage, bins)
# Make slice masks.
masks = np.zeros((len(binned_in_cage),
len(ff_session.mouse_in_cage)), dtype=bool)
for i, indices in enumerate(binned_in_cage):
masks[i,indices] = True
event_rates = np.zeros((masks.shape[0], len(neurons)))
for i, mask in enumerate(masks):
event_rates[i,:] = d_pp.get_avg_event_rate(mouse, session,
data=data, t=t,
session=ff_session,
bin_size=bin_size,
mask=mask,
neurons=neurons)
event_rates[:, neurons==-1] = 0
event_rates[~np.isfinite(event_rates)] = 0
# if z:
# event_rates[i,:] = zscore(event_rates[i,:])
correlations = np.zeros((len(event_rates)))
shuffles = []
for iteration in range(B):
placeholder = np.empty_like(correlations)
for i, vector in enumerate(event_rates):
placeholder[i] = corr(np.random.permutation(vector), ref_event_rates)[0]
shuffles.append(placeholder)
shuffles = np.vstack(shuffles)
for i, vector in enumerate(event_rates):
correlations[i] = corr(vector, ref_event_rates)[0]
if len(binned_in_cage[-1]) < len(binned_in_cage[0])/2:
correlations[-1] = np.nan
if plot_flag:
plt.plot(correlations)
plt.show()
return correlations, ref_event_rates, event_rates, shuffles
def session_corr(mouse, session, ref_session='FC', corr=pearsonr):
session_index = get_session(mouse, (ref_session, session))[0]
map = cell_reg.load_cellreg_results(mouse)
trimmed_map = cell_reg.trim_match_map(map, session_index)
ref_neurons = trimmed_map[:,0]
neurons = trimmed_map[:,1]
ref_event_rates = d_pp.get_avg_event_rate(mouse, ref_session,
neurons=ref_neurons)
event_rates = d_pp.get_avg_event_rate(mouse, session,
neurons=neurons)
correlation, pvalue = corr(ref_event_rates, event_rates)
return correlation, pvalue
def sort_PVs(mouse, session, ref_session='FC', bin_size=1,
slice_size=60, ref_mask_start=None, plot_flag=True,
corr=pearsonr):
_, ref_event_rates, event_rates = time_lapse_corr(mouse, session,
ref_session=ref_session,
bin_size=bin_size,
slice_size=slice_size,
ref_mask_start=ref_mask_start,
plot_flag=False, corr=corr)
# Sort by neuron activity in reference, then reorder.
neurons = np.arange(len(ref_event_rates))
order = np.argsort(ref_event_rates)
event_rates = event_rates[:,order]
n_slices = event_rates.shape[0]
f, axs = plt.subplots(n_slices, figsize=(3,30), sharey=True)
axs[0].bar(neurons, ref_event_rates[order])
axs[0].tick_params(
axis='x',
which='both',
bottom=False,
top=False,
labelbottom=False
)
for i, vector in enumerate(event_rates[:-1]):
axs[i+1].bar(neurons, vector)
if i+1 != n_slices-1:
axs[i+1].tick_params(
axis='x',
which='both',
bottom=False,
top=False,
labelbottom=False
)
else:
axs[i+1].set_xlabel('Cell #')
axs[i+1].set_xticks([0, np.max(neurons)])
f.show()
#f, ax = plt.subplots(1,1)
#X = event_rates[:-1]
#X = X.T / np.amax(X, axis=1)
#ax.imshow(X)
#ax.set_xlabel('Time')
#ax.set_ylabel('Cell #')
f.show()
pass
if __name__ == '__main__':
sort_PVs('Mundilfari','RE_1',slice_size=30) |
994,591 | 6d72981084b62dda71573afd0a4eafc0383b9bd9 | #Write a Python GUI program to create three push buttons using Tkinter. The
#background color of frame should be different when different buttons are clicked
from tkinter import *
from random import choice
top=Tk()
top.title("BG Color")
C=Canvas (top, height=250, width=400)
button1 = Button (top, text = "Red", anchor = W, command=lambda: C.configure(bg="red"))
button1.configure (width = 10, activebackground = "red", relief = FLAT)
button2 = Button (top, text = "Blue", anchor = W, command=lambda: C.configure(bg="blue"))
button2.configure (width = 10, activebackground = "blue", relief = FLAT)
button3 = Button (top, text = "Green", anchor = W, command=lambda: C.configure(bg="green"))
button3.configure(width = 10, activebackground = "green", relief = FLAT)
button1_window = C.create_window(10, 10, anchor=NW, window=button1)
button2_window = C.create_window(50, 10, anchor=NW, window=button2)
button3_window = C.create_window(100, 10, anchor=NW, window=button3)
C.pack()
top.mainloop() |
994,592 | 3cd0eaab3243358df8d0a889b17658662d67bba5 | #
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
#
# This code was in part derived from the python-magic library:
# The MIT License (MIT)
#
# Copyright (c) 2001-2014 Adam Hupp
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os.path
import ctypes
from commoncode import system
from commoncode import command
"""
magic2 is minimal and specialized wrapper around a vendored libmagic file
identification library. This is NOT thread-safe. It is based on python-magic
by Adam Hup and adapted to the specific needs of ScanCode.
"""
data_dir = os.path.join(os.path.dirname(__file__), 'data')
bin_dir = os.path.join(os.path.dirname(__file__), 'bin')
# path to vendored magic DB, possibly OS-specific
basemag = os.path.join(data_dir, 'magic')
# keep the first which is the most specific directory
magdir = command.get_base_dirs(basemag)[0]
magic_db = os.path.join(magdir, 'magic.mgc')
#
# Cached detectors
#
detectors = {}
# libmagic flags
MAGIC_NONE = 0
MAGIC_MIME = 16
MAGIC_MIME_ENCODING = 1024
MAGIC_NO_CHECK_ELF = 65536
MAGIC_NO_CHECK_TEXT = 131072
MAGIC_NO_CHECK_CDF = 262144
DETECT_TYPE = MAGIC_NONE
DETECT_MIME = MAGIC_NONE | MAGIC_MIME
DETECT_ENC = MAGIC_NONE | MAGIC_MIME | MAGIC_MIME_ENCODING
def file_type(location):
""""
Return the detected filetype for file at `location` or an empty string if
nothing found or an error occurred.
"""
try:
return _detect(location, DETECT_TYPE)
except:
# TODO: log errors
return ''
def mime_type(location):
""""
Return the detected mimetype for file at `location` or an empty string if
nothing found or an error occurred.
"""
try:
return _detect(location, DETECT_MIME)
except:
# TODO: log errors
return ''
def encoding(location):
""""
Return the detected encoding for file at `location` or an empty string.
Raise an exception on errors.
"""
return _detect(location, DETECT_ENC)
def _detect(location, flags):
""""
Return the detected type using `flags` of file at `location` or an empty
string. Raise an exception on errors.
"""
try:
detector = detectors[flags]
except KeyError:
detector = Detector(flags=flags)
detectors[flags] = detector
val = detector.get(location)
val = val or ''
val = val.decode('ascii', 'ignore').strip()
return ' '.join(val.split())
class MagicException(Exception):
pass
class Detector(object):
def __init__(self, flags, magic_file=magic_db):
"""
Create a new libmagic detector.
flags - the libmagic flags
magic_file - use a mime database other than the vendored default
"""
self.flags = flags
self.cookie = _magic_open(self.flags)
_magic_load(self.cookie, magic_file)
def get(self, location):
"""
Return the magic type info from a file at `location`. The value
returned depends on the flags passed to the object. If this fails
attempt to get it using a UTF-encoded location or from loading the
first 16K of the file. Raise a MagicException on error.
"""
assert location
try:
# first use the path as is
return _magic_file(self.cookie, location)
except:
# then try to get a utf-8 encoded path: Rationale:
# https://docs.python.org/2/library/ctypes.html#ctypes.set_conversion_mode ctypes
# encode strings to byte as ASCII or MBCS depending on the OS The
# location string may therefore be mangled and the file not accessible
# anymore by libmagic in some cases.
try:
uloc = location.encode('utf-8')
return _magic_file(self.cookie, uloc)
except:
# if all fails, read the start of the file instead
with open(location) as fd:
buf = fd.read(16384)
return _magic_buffer(self.cookie, buf, len(buf))
def __del__(self):
"""
During shutdown magic_close may have been cleared already so make sure
it exists before using it.
"""
if self.cookie and _magic_close:
_magic_close(self.cookie)
def load_lib():
"""
Return the loaded libmagic shared library object from vendored paths.
"""
root_dir = command.get_base_dirs(bin_dir)[0]
_bin_dir, lib_dir = command.get_bin_lib_dirs(root_dir)
magic_so = os.path.join(lib_dir, 'libmagic' + system.lib_ext)
# add lib path to the front of the PATH env var
new_path = os.pathsep.join([lib_dir, os.environ['PATH']])
os.environ['PATH'] = new_path
if os.path.exists(magic_so):
lib = ctypes.CDLL(magic_so)
if lib and lib._name:
return lib
raise ImportError('Failed to load libmagic from %(magic_so)r' % locals())
# Main ctypes proxy
libmagic = load_lib()
def check_error(result, func, args): # @UnusedVariable
"""
ctypes error handler/checker: Check for errors and raise an exception or
return the result otherwise.
"""
if result is None or result < 0 or str(result).startswith('cannot open'):
err = _magic_error(args[0])
raise MagicException(err)
else:
return result
# ctypes functions aliases.
_magic_open = libmagic.magic_open
_magic_open.restype = ctypes.c_void_p
_magic_open.argtypes = [ctypes.c_int]
_magic_close = libmagic.magic_close
_magic_close.restype = None
_magic_close.argtypes = [ctypes.c_void_p]
_magic_error = libmagic.magic_error
_magic_error.restype = ctypes.c_char_p
_magic_error.argtypes = [ctypes.c_void_p]
_magic_file = libmagic.magic_file
_magic_file.restype = ctypes.c_char_p
_magic_file.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
_magic_file.errcheck = check_error
_magic_buffer = libmagic.magic_buffer
_magic_buffer.restype = ctypes.c_char_p
_magic_buffer.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t]
_magic_buffer.errcheck = check_error
_magic_load = libmagic.magic_load
_magic_load.restype = ctypes.c_int
_magic_load.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
_magic_load.errcheck = check_error
|
994,593 | 85836cbfcd508df10d27de76d57aed50565ca528 | import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import base64
from datetime import datetime
import shutil
import numpy as np
import socketio
import eventlet
import eventlet.wsgi
from PIL import Image
from flask import Flask
from io import BytesIO
import utils
from keras.models import load_model
import argparse
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
parser = argparse.ArgumentParser()
parser.add_argument('--ip', type=str,
help='Enter IP address for socket', default = '0.0.0.0')
parser.add_argument('--min_speed', type=int,
help='Enter Minimum Speed of Car', default = 10)
parser.add_argument('--max_speed', type=int,
help='Enter Maximum Speed of Car', default = 25)
parser.add_argument('--path', type=str,
help='Enter path to saved model file', default = './model.h5')
args = parser.parse_args()
path = args.path
ip = args.ip
MAX_SPEED = args.max_speed
MIN_SPEED = args.min_speed
speed_limit = MAX_SPEED
model = load_model(path)
sio = socketio.Server()
app = Flask(__name__)
def send_control(steering_angle, throttle):
sio.emit(
"steer",
data={
'steering_angle': steering_angle.__str__(),
'throttle': throttle.__str__()
},
skip_sid=True)
@sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
send_control(0, 0)
@sio.on('telemetry')
def telemetry(sid, data):
if data:
steering_angle = float(data["steering_angle"])
throttle = float(data["throttle"])
speed = float(data["speed"])
#print (steering_angle, throttle, speed)
image = Image.open(BytesIO(base64.b64decode(data["image"])))
try:
image = np.asarray(image)
image = utils.process(image)
image = image/255.0
image = np.array([image])
steering_angle,throttle = model.predict(image, batch_size=1)
steering_angle=float(steering_angle)
throttle =float(throttle)
# global speed_limit
# if speed > speed_limit:
# speed_limit = MIN_SPEED # slow down
# else:
# speed_limit = MAX_SPEED
# throttle = 1.0 - ( (steering_angle)**2 ) - ( (speed/speed_limit)**2 )
#throttle = 1.0
if speed>=MAX_SPEED:
throttle=1.0-( (steering_angle)**2 ) - ( (speed/speed_limit)**2 )
print('{} {} {}'.format(steering_angle, throttle, speed))
send_control(steering_angle, throttle)
except Exception as e:
print(e)
else:
sio.emit('manual', data={}, skip_sid=True)
app = socketio.Middleware(sio, app)
eventlet.wsgi.server(eventlet.listen((ip, 4567)), app) |
994,594 | 82929b0c9a35ea543d843826ad4bcf3bb1cb2f89 | from socket import *
serverIP = "127.0.0.1"
serverPort = 12345
clientSocket = socket(AF_INET, SOCK_STREAM)
clientSocket.connect((serverIP, serverPort))
req = "GET /" + "hello.html" + " HTTP/1.1\r\n\r\n"
clientSocket.send(req.encode())
rec=""
rec2 = clientSocket.recv(1024).decode()
while True:
if len(rec2) != 0:
rec2 = clientSocket.recv(1024).decode()
rec += rec2
else:
break
print(rec)
print("socket's closing")
clientSocket.close() |
994,595 | 1806693428d861e136f3b734ec7b5c89608acf91 | import numpy as np
import random
import os
GAME_SIZE = 8
START_POS = [1,1]
END_POS = [GAME_SIZE - 1, GAME_SIZE - 1]
def clear():
if os.name == "nt":
_ = os.system("cls")
else:
_ = os.system("clear")
class Game():
def __init__(self):
self.map_array = np.zeros((GAME_SIZE, GAME_SIZE))
self.action_space = ["UP", "DOWN", "LEFT", "RIGHT"]
self.player_pos = []
self.map_temp = []
def buid_default_map(self):
self.player_pos = list(START_POS)
player_x, player_y = self.player_pos
for x in range(GAME_SIZE):
for y in range(GAME_SIZE):
if y == 0 or y == GAME_SIZE - 1:
self.map_array[y][x] = 8
else:
if x == 0:
self.map_array[y][0] = 8
elif x == GAME_SIZE - 1:
self.map_array[y][GAME_SIZE - 1] = 8
else:
self.map_array[y][x] = random.choice([0,0,0,0,8])
self.map_array[player_y][player_x] = 1
self.map_array[GAME_SIZE - 2][GAME_SIZE - 2] = 9
self.map_temp = np.array(self.map_array)
def reset(self):
self.player_pos = list(START_POS)
self.map_array = np.array(self.map_temp)
def show(self):
clear()
for h in self.map_array:
for w in h:
if (w == 1):
print("{}".format("◆"), end="")
elif (w == 8):
print("{}".format("■"), end="")
elif (w == 9):
print("{}".format("●"), end="")
elif (w == 99):
print("{}".format("※"), end="")
else:
print("{}".format("□"), end="")
print("\r")
def get_reward(self):
player_x, player_y = self.player_pos
entity = self.map_array[player_y][player_x]
if (entity == 9):
return "done", 1
elif (entity == 8):
return "done", -1
else:
return "move", 0
def move(self, action):
player_x, player_y = self.player_pos
if (action == "UP"):
self.player_pos[0] = player_x
self.player_pos[1] = player_y -1
if self.map_array[player_y - 1][player_x] == 0:
self.map_array[player_y][player_x] = 0
self.map_array[player_y - 1][player_x] = 1
else:
self.map_array[player_y][player_x] = 99
elif (action == "DOWN"):
self.player_pos[0] = player_x
self.player_pos[1] = player_y + 1
if self.map_array[player_y + 1][player_x] == 0:
self.map_array[player_y][player_x] = 0
self.map_array[player_y + 1][player_x] = 1
else:
self.map_array[player_y][player_x] = 99
elif (action == "LEFT"):
self.player_pos[0] = player_x - 1
self.player_pos[1] = player_y
if self.map_array[player_y][player_x - 1] == 0:
self.map_array[player_y][player_x] = 0
self.map_array[player_y][player_x - 1] = 1
else:
self.map_array[player_y][player_x] = 99
elif (action == "RIGHT"):
self.player_pos[0] = player_x + 1
self.player_pos[1] = player_y
if self.map_array[player_y][player_x + 1] == 0:
self.map_array[player_y][player_x] = 0
self.map_array[player_y][player_x + 1] = 1
else:
self.map_array[player_y][player_x] = 99
|
994,596 | a4013e5bdac2e4ff5b5b5020c19cf59322107c83 | print("hello view") |
994,597 | e25fa02261c22dd7e44223faf04931c6c8a95cec | from django.apps import AppConfig
class VisualadminConfig(AppConfig):
name = 'visualAdmin'
|
994,598 | ff29168da30024b6dd5e3ca57de674412fb0a371 | from sympy import *
from abc import ABC, abstractmethod
from spec.time import timifyVar
from spec.contract import *
import spec.conf
# Some motion primitives have parameters, we represent that with a factory.
# Given some concrete value for the parameters we get a motion primitive.
# Currently the values should be concrete, formula not yet supported
class MotionPrimitiveFactory(ABC):
def __init__(self, component):
self._component = component
component.addMotionPrimitive(self)
def name(self):
return self.__class__.__name__
def parameters(self):
return []
# returns a MotionPrimitive
@abstractmethod
def setParameters(self, args):
pass
class MotionPrimitive(AssumeGuaranteeContract):
def __init__(self, name, component):
super().__init__(name)
self._component = component
def name(self):
return self._name
def components(self):
return {self._component}
def timify(self, pred):
time = { var: timifyVar(var) for var in self._component.variables() }
return pred.subs(time)
def modifies(self):
'''some motion primitive (like idle) does not change all the variables'''
return self._component.ownVariables()
def wellFormed(self, extra = ExtraInfo()):
vcs = super().wellFormed(extra)
if spec.conf.enableFPCheck and spec.conf.enableMPincludeFPCheck:
# checks that the components FP is in the motion primitive MP
prefix = self.name + " well-formed: contains component FP "
px, py, pz = symbols('inFpX inFpY inFpZ')
frame = self.frame()
point = frame.origin.locate_new("inFp", px * frame.i + py * frame.j + pz * frame.k )
pointDomain = And(px >= spec.conf.minX, px <= spec.conf.maxX,
py >= spec.conf.minY, py <= spec.conf.maxY,
pz >= spec.conf.minZ, pz <= spec.conf.maxZ)
#pre
pre = And(pointDomain,
self._component.invariantG(),
self.preA(),
self.preG(),
Not(self.preFP(point)),
extra.pre,
extra.always)
vcs.append( VC(prefix + "pre", [And(pre, self._component.abstractResources(point)),
And(pre, self._component.ownResources(point))]) )
#inv
# no quantification over time for the moment
assert(self.isInvTimeInvariant())
inv = And(pointDomain,
self._component.invariantG(),
self.deTimifyFormula(self.invA()),
self.deTimifyFormula(self.invG()),
Not(self.deTimifyFormula(self.invFP(point))),
self.deTimifyFormula(extra.inv),
extra.always)
vcs.append( VC(prefix + "inv", [And(inv, self._component.abstractResources(point)),
And(inv, self._component.ownResources(point))]) )
# post
post = And(pointDomain,
self._component.invariantG(),
self.postA(),
self.postG(),
Not(self.postFP(point)),
extra.post,
extra.always)
vcs.append( VC(prefix + "post", [And(post, self._component.abstractResources(point)),
And(post, self._component.ownResources(point))]) )
return vcs
|
994,599 | 2db44bcdb5d23331b3837dff79f874f937df504b | import signal
import asyncio
from pathlib import Path
from sys import stderr
from pika import BasicProperties
from pika.exceptions import UnroutableError, AMQPConnectionError, ChannelClosed, ChannelWrongStateError
import json
from collector.brdige import BLiveDMBridge
from mylib.mq import connect_message_queue, queue_name
from mylib.constants import BODY_ADDON_KEY_ROOM_ID
from collector.args import args
def danmaku_filter(*args):
return True
if args.filter is not None:
from importlib.util import spec_from_file_location, module_from_spec
from pathlib import Path
from os.path import splitext
import sys
f = Path(args.filter).absolute()
if not f.is_file():
print("过滤器文件不存在: " + f.as_posix())
exit(1)
sys.path.insert(0, f.parent.as_posix())
spec = spec_from_file_location(splitext(f.name)[0], f.as_posix())
mdl = module_from_spec(spec)
sys.modules["danmaku_filter"] = mdl
spec.loader.exec_module(mdl)
danmaku_filter = getattr(mdl, 'filter')
if danmaku_filter is None:
print("过滤器文件错误,需要定义filter函数")
exit(1)
def serialize_class(instance):
if type(instance) == dict:
return instance
ret = {}
if hasattr(instance, '__dict__'):
for attribute, value in instance.__dict__.items():
ret[attribute] = value
else:
print(type(instance))
print(instance)
raise Exception("serialize_class:不知道是什么:" + str(type(instance)))
return ret
lock = asyncio.Lock()
async def create_log(room_id, kind, body):
global rmq
body = serialize_class(body)
body[BODY_ADDON_KEY_ROOM_ID] = room_id
content = json.dumps(body, ensure_ascii=False, check_circular=False).encode('utf8')
tryies = 0
while True:
try:
rmq.basic_publish(exchange='',
routing_key=queue_name(kind),
body=content,
properties=BasicProperties(content_type='application/json',
content_encoding='utf-8',
delivery_mode=2),
mandatory=True)
except UnroutableError:
print(f'message was rejected: {content}', file=stderr)
except (AMQPConnectionError, ChannelClosed, ConnectionResetError, ChannelWrongStateError) as error:
tryies += 1
print(f'connection lost: {error}, try to reconnect ({tryies} times)...')
await asyncio.sleep(1)
async with lock:
if rmq.is_closed:
rmq = connect_message_queue(args.server, args.cacert)
continue
except Exception as e:
print("<FATAL> publish rabbitmq failed:", type(e), e)
exit(1)
break
rmq = connect_message_queue(args.server, args.cacert)
clients = []
async def run(room_id):
print(f'连接直播间:{room_id}')
client = BLiveDMBridge(room_id, callback=create_log, dm_filter=danmaku_filter)
clients.append(client)
await client.start()
async def stop_all():
for client in clients:
await client.close()
rmq.close()
def signal_handler(*args):
asyncio.get_event_loop().run_until_complete(stop_all())
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
try:
tasks = []
for room_id in args.rooms:
tasks.append(asyncio.ensure_future(run(room_id)))
asyncio.get_event_loop().run_until_complete(asyncio.wait(tasks))
except KeyboardInterrupt:
pass
asyncio.get_event_loop().run_until_complete(stop_all())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.