index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
22,100 | 675f57ffc3770a9e2e99b7af2c179ba4b3d16ef0 | # Generated by Django 3.1 on 2020-09-01 16:27
import accounts.managers
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('email', models.EmailField(max_length=254, unique=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', accounts.managers.UserManager()),
],
),
migrations.CreateModel(
name='BankAccountType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('maximum_withdrawal_amount', models.DecimalField(decimal_places=2, max_digits=12)),
('annual_interest_rate', models.DecimalField(decimal_places=2, help_text='Interest rate from 0 - 100', max_digits=5, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)])),
('interest_calculation_per_year', models.PositiveSmallIntegerField(help_text='The number of times interest will be calculated per year', validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(12)])),
],
),
migrations.CreateModel(
name='UserBankAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('account_no', models.PositiveIntegerField(unique=True)),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=1)),
('birth_date', models.DateField(blank=True, null=True)),
('balance', models.DecimalField(decimal_places=2, default=0, max_digits=12)),
('interest_start_date', models.DateField(blank=True, help_text='The month number that interest calculation will start from', null=True)),
('initial_deposit_date', models.DateField(blank=True, null=True)),
('account_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='accounts', to='accounts.bankaccounttype')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='account', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='UserAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('street_address', models.CharField(max_length=512)),
('city', models.CharField(max_length=256)),
('postal_code', models.PositiveIntegerField()),
('country', models.CharField(max_length=256)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='address', to=settings.AUTH_USER_MODEL)),
],
),
]
|
22,101 | 97f027c7c382cdf379b5c3fbe58dac4be208d884 | import numpy as np
import pandas as pd
import pickle
# Scikit Learn
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from datetime import datetime
RSEED = 42
df = pd.read_csv('data/Kickstarter_preprocessed.csv')
print('Data is read in')
# Deleting the first column (bears no information)
df.drop(['Unnamed: 0'],axis=1,inplace=True);
# Rename some columns to have more meaningful names
df.rename(columns={'name_category':'category_sub',
'slug_category':'category','blurb':'description'},inplace=True)
# Time conversion
time_cols = ['created_at','deadline','state_changed_at','launched_at']
df[time_cols] = df[time_cols].applymap(lambda x: datetime.utcfromtimestamp(x))
df['description_length'] = df['description'].apply(lambda x: len(str(x).split()))
df = df.eval('usd_goal = static_usd_rate * goal')
# Calculating the duration of project
df['duration'] = df['deadline'] - df['launched_at']
df['duration_days']=df['duration'].dt.days
# Start year and month of the projects
df['start_month']= df['launched_at'].dt.month
df['start_year']= df['launched_at'].dt.year
# Splitting the text in column category, keeping only the left part of the string --> main category
df.category = df.category.apply(lambda x: x.split('/')[0])
# change to lower case string
df.category_sub = df.category_sub.str.lower()
categorical_features = [
'currency',
'country',
'staff_pick',
'category',
'category_sub',
'start_month'
]
# Convert strings and numbers to categories
df[categorical_features] = df[categorical_features].apply(lambda x: x.astype('category'))
# Convert strings to numbers
df[categorical_features] = df[categorical_features].apply(lambda x: x.cat.codes)
# Convert numbers to categories
df[categorical_features] = df[categorical_features].apply(lambda x: x.astype('category'))
features = ['description_length','duration_days','usd_goal','country','staff_pick','category','category_sub','start_month']
target = ['state']
X = df[features]
y = df[target]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42, stratify=y)
y_train = y_train.values.ravel()
y_test = y_test.values.ravel()
print('Data cleaning and train-test split is done')
print ('Train set shape:', X_train.shape, y_train.shape)
print ('Test set shape: ', X_test.shape , y_test.shape)
# These are the parameters found via the grid search
rfc = RandomForestClassifier(max_depth=20,
max_features=None,
min_samples_split=5,
n_jobs=-1)
rfc.fit(X_train,y_train)
print('Model is fitted, saving model now')
filename = 'optimized_random_forest_model.pickle'
with open(filename, 'wb') as file:
pickle.dump(rfc, file)
|
22,102 | 47dcba1a3c78b814499f6dc847b0da6822189c79 | import os
from icarousell import ICarousell, Picture
class SortedCarousell(ICarousell):
def __init__(self, folder):
files = os.listdir(folder)
self.files = sorted(files)
self.position = 0
def next(self) -> Picture:
p = self.files[self.position % len(self.files)]
self.position += 1
return Picture(p)
|
22,103 | 95d126b648fced7baea12575007b9fc536311abc | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-10-25 05:08
from __future__ import unicode_literals
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='SessionStorage',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('user_agent_string', models.CharField(blank=True, max_length=2056, null=True)),
('os_family', models.CharField(blank=True, max_length=52, null=True)),
('os_version', models.CharField(blank=True, max_length=52, null=True)),
('browser_family', models.CharField(blank=True, max_length=52, null=True)),
('browser_version', models.CharField(blank=True, max_length=52, null=True)),
('device_family', models.CharField(blank=True, max_length=52, null=True)),
('device_brand', models.CharField(blank=True, max_length=52, null=True)),
('device_model', models.CharField(blank=True, max_length=52, null=True)),
('cookie_enabled', models.BooleanField(default=False)),
('current_language', models.CharField(blank=True, max_length=20, null=True)),
('languages', models.CharField(blank=True, max_length=100, null=True)),
('java_enabled', models.BooleanField(default=False)),
('online', models.BooleanField(default=False)),
('plugin_list', models.CharField(blank=True, max_length=3000, null=True)),
('timezone_offset', models.IntegerField(blank=True, null=True)),
('screen_height', models.IntegerField(blank=True, null=True)),
('screen_width', models.IntegerField(blank=True, null=True)),
('screen_color_depth', models.IntegerField(blank=True, null=True)),
('location', models.CharField(blank=True, max_length=512, null=True)),
('referrer', models.CharField(blank=True, max_length=512, null=True)),
('page_title', models.TextField(blank=True, null=True)),
('form_has_hidden_fields', models.BooleanField(default=False)),
],
),
]
|
22,104 | de7025658d821909cc9f8a1d02f44dbf1ca53e06 | from flask import request
from nec_lib import NECTables as NEC
from nec_lib import ConduitFill as CF
from nec_lib import WireDerate as WD
def get_voltage_drop(material, phase, size, length, current, voltage):
form = VD_Form()
if form.is_submitted():
mtrl = float(request.form.get(material))
phs = float(request.form.get(phase))
sz = str(request.form.get(size))
lngth = int(request.form.get(length))
crnt = int(request.form.get(current))
vltg = int(request.form.get(voltage))
numerator = (phs * mtrl * lngth * crnt)
denominator = NEC.awg_to_circmils[sz]
resulting_voltage = vltg - (numerator / denominator)
percentage = resulting_voltage / vltg
final = round(((1 - percentage) * 100), 3)
return final
else:
return 0
def get_conduit_size(size, number, insulation, ground, conduit):
form = CF_Form()
if form.is_submitted():
sz = str(request.form.get(size))
nmbr = request.form.get(number)
ins = str(request.form.get(insulation))
grnd = str(request.form.get(ground))
cndt = str(request.form.get(conduit))
if ins == 'PV':
ins_area = CF.pv_awg_to_area[sz]
grnd_area = CF.pv_awg_to_area[grnd]
else:
ins_area = CF.thhn_awg_to_area[sz]
grnd_area = CF.thhn_awg_to_area[grnd]
total_area = (ins_area * float(nmbr)) + grnd_area
if cndt == "EMT":
lst = CF.emt_lst
dict = CF.emt_dict
else:
lst = CF.pvc_lst
dict = CF.pvc_dict
for x in lst:
if (total_area / .4) > x:
continue
elif (total_area / .4) < x:
cross_sect = (total_area / x) * 100
return round(cross_sect, 1), dict[x]
else:
return 0
def get_wire_size(current, number, insulation, temperature, continuous):
form = WS_Form()
if form.is_submitted():
crnt = int(request.form.get(current))
nmbr = int(request.form.get(number))
ins = str(request.form.get(insulation))
tmp = int(request.form.get(temperature))
cntns = float(request.form.get(continuous))
fill_factor = 1
temp_factor = 1
copper_tuple = (0, 0)
al_tuple = (0, 0)
for x in WD.fill_lst:
if nmbr > x:
continue
elif nmbr < x:
fill_factor = WD.fill_dict[x]
break
for y in WD.temp_lst:
if tmp > y:
continue
elif tmp < y:
temp_factor = WD.temp_dict_90[y]
break
required_ampacity = (crnt / fill_factor / temp_factor) * cntns
for z in WD.cu_ampacity_90:
if required_ampacity > z:
continue
elif required_ampacity < z:
copper_tuple = (WD.cu_ampacity_to_awg_90[z], z)
break
for z in WD.al_ampacity_90:
if required_ampacity > z:
continue
elif required_ampacity < z:
al_tuple = (WD.al_ampacity_to_awg_90[z], z)
break
return_list = [copper_tuple[0], al_tuple[0], round(required_ampacity,2)]
return return_list
def rotational_mass(masslbs, rpm, radius):
form = RM_Form()
if form.is_submitted():
mss = float(request.form.get(masslbs))
RPM = float(request.form.get(rpm))
rds_inch = float(request.form.get(radius))
mass_kg = mss * .453592
radius_meters = rds_inch * .0254
velocity_in_inch = (rds_inch * 3.141592653) * (RPM / 60)
velocity_in_meters = (radius_meters * 3.141592653) * (RPM / 60)
centripetal_force = (mass_kg * (velocity_in_meters ** 2)) / radius_meters
centrifugal_force = centripetal_force * .224809
horsepower = centripetal_force * .00134102209
output_list = [velocity_in_inch, velocity_in_meters, centripetal_force,
centrifugal_force, horsepower]
return output_list
else:
return 0
from vd import VD_Form, CF_Form, WS_Form, RM_Form
|
22,105 | ae443eac9e893c60ee4b99beb4c6ba812a7150e9 | import numpy as np
import pandas as pd
data = pd.read_csv('./data/dacon/comp4/201901-202003.csv')
# submission = pd.read_csv('./data/dacon/comp4/submission.csv', index_col=0)
# print(data.head())
# print(data.tail())
# REG_YYMM CARD_SIDO_NM CARD_CCG_NM STD_CLSS_NM HOM_SIDO_NM HOM_CCG_NM AGE SEX_CTGO_CD FLC CSTMR_CNT AMT CNT
# 0 201901 ๊ฐ์ ๊ฐ๋ฆ์ ๊ฑด๊ฐ๋ณด์กฐ์ํ ์๋งค์
๊ฐ์ ๊ฐ๋ฆ์ 20s 1 1 4 311200 4
# 1 201901 ๊ฐ์ ๊ฐ๋ฆ์ ๊ฑด๊ฐ๋ณด์กฐ์ํ ์๋งค์
๊ฐ์ ๊ฐ๋ฆ์ 30s 1 2 7 1374500 8
# 2 201901 ๊ฐ์ ๊ฐ๋ฆ์ ๊ฑด๊ฐ๋ณด์กฐ์ํ ์๋งค์
๊ฐ์ ๊ฐ๋ฆ์ 30s 2 2 6 818700 6
# 3 201901 ๊ฐ์ ๊ฐ๋ฆ์ ๊ฑด๊ฐ๋ณด์กฐ์ํ ์๋งค์
๊ฐ์ ๊ฐ๋ฆ์ 40s 1 3 4 1717000 5
# 4 201901 ๊ฐ์ ๊ฐ๋ฆ์ ๊ฑด๊ฐ๋ณด์กฐ์ํ ์๋งค์
๊ฐ์ ๊ฐ๋ฆ์ 40s 1 4 3 1047300 3
# REG_YYMM CARD_SIDO_NM CARD_CCG_NM STD_CLSS_NM HOM_SIDO_NM HOM_CCG_NM AGE SEX_CTGO_CD FLC CSTMR_CNT AMT CNT
# 24697787 202003 ์ถฉ๋ถ ์ถฉ์ฃผ์ ํด์์ฝ๋ ์ด์์
์ถฉ๋ถ ์ถฉ์ฃผ์ 30s 1 2 3 43300 4
# 24697788 202003 ์ถฉ๋ถ ์ถฉ์ฃผ์ ํด์์ฝ๋ ์ด์์
์ถฉ๋ถ ์ถฉ์ฃผ์ 40s 1 3 3 35000 3
# 24697789 202003 ์ถฉ๋ถ ์ถฉ์ฃผ์ ํด์์ฝ๋ ์ด์์
์ถฉ๋ถ ์ถฉ์ฃผ์ 50s 1 4 4 188000 6
# 24697790 202003 ์ถฉ๋ถ ์ถฉ์ฃผ์ ํด์์ฝ๋ ์ด์์
์ถฉ๋ถ ์ถฉ์ฃผ์ 50s 2 4 4 99000 6
# 24697791 202003 ์ถฉ๋ถ ์ถฉ์ฃผ์ ํด์์ฝ๋ ์ด์์
์ถฉ๋ถ ์ถฉ์ฃผ์ 60s 1 5 3 194000 3
# print(submission.head())
# print(submission.tail())
# REG_YYMM CARD_SIDO_NM STD_CLSS_NM AMT
# id
# 0 202004 ๊ฐ์ ๊ฑด๊ฐ๋ณด์กฐ์ํ ์๋งค์
0
# 1 202004 ๊ฐ์ ๊ณจํ์ฅ ์ด์์
0
# 2 202004 ๊ฐ์ ๊ณผ์ค ๋ฐ ์ฑ์ ์๋งค์
0
# 3 202004 ๊ฐ์ ๊ด๊ด ๋ฏผ์ํ ๋ฐ ์ ๋ฌผ์ฉํ ์๋งค์
0
# 4 202004 ๊ฐ์ ๊ทธ์ธ ๊ธฐํ ๋ถ๋ฅ์๋ ์ค๋ฝ๊ด๋ จ ์๋น์ค์
0
# REG_YYMM CARD_SIDO_NM STD_CLSS_NM AMT
# id
# 1389 202007 ์ถฉ๋ถ ํผ์ ํ๋ฒ๊ฑฐ ์๋์์น ๋ฐ ์ ์ฌ ์์์ ์
0
# 1390 202007 ์ถฉ๋ถ ํ์ ์์์ ์
0
# 1391 202007 ์ถฉ๋ถ ํธํ
์
0
# 1392 202007 ์ถฉ๋ถ ํ์ฅํ ๋ฐ ๋ฐฉํฅ์ ์๋งค์
0
# 1393 202007 ์ถฉ๋ถ ํด์์ฝ๋ ์ด์์
0
data = data.fillna('')
df = data.copy()
df = df[['REG_YYMM', 'CARD_SIDO_NM', 'STD_CLSS_NM', 'AMT']]
df = df.groupby(['REG_YYMM', 'CARD_SIDO_NM', 'STD_CLSS_NM']).sum().reset_index(drop=False)
df = df.loc[df['REG_YYMM']==202003]
df = df[['CARD_SIDO_NM', 'STD_CLSS_NM', 'AMT']]
# print(df.head())
# print(df.tail())
# CARD_SIDO_NM STD_CLSS_NM AMT
# 8829 ๊ฐ์ ๊ฑด๊ฐ๋ณด์กฐ์ํ ์๋งค์
96059012
# 8830 ๊ฐ์ ๊ณจํ์ฅ ์ด์์
2915797995
# 8831 ๊ฐ์ ๊ณผ์ค ๋ฐ ์ฑ์ ์๋งค์
994816943
# 8832 ๊ฐ์ ๊ด๊ด ๋ฏผ์ํ ๋ฐ ์ ๋ฌผ์ฉํ ์๋งค์
13317300
# 8833 ๊ฐ์ ๊ทธ์ธ ๊ธฐํ ์คํฌ์ธ ์์ค ์ด์์
2075000
# CARD_SIDO_NM STD_CLSS_NM AMT
# 9433 ์ถฉ๋ถ ํผ์ ํ๋ฒ๊ฑฐ ์๋์์น ๋ฐ ์ ์ฌ ์์์ ์
1315245299
# 9434 ์ถฉ๋ถ ํ์ ์์์ ์
16152482704
# 9435 ์ถฉ๋ถ ํธํ
์
15248550
# 9436 ์ถฉ๋ถ ํ์ฅํ ๋ฐ ๋ฐฉํฅ์ ์๋งค์
428881434
# 9437 ์ถฉ๋ถ ํด์์ฝ๋ ์ด์์
12733490
print(df.shape)
submission = pd.read_csv('./data/dacon/comp4/submission.csv', index_col=0)
submission = submission.loc[submission['REG_YYMM']==202004]
submission = submission[['CARD_SIDO_NM', 'STD_CLSS_NM']]
submission = submission.merge(df, left_on=['CARD_SIDO_NM', 'STD_CLSS_NM'], right_on=['CARD_SIDO_NM', 'STD_CLSS_NM'], how='left')
submission = submission.fillna(0)
AMT = list(submission['AMT'])*2
submission = pd.read_csv('./data/dacon/comp4/submission.csv', index_col=0)
submission['AMT'] = AMT
submission.to_csv('./data/dacon/comp4/submission2.csv', encoding='utf-8-sig')
print(submission.head())
print(submission.tail())
print(submission.shape) |
22,106 | d4b5637b8b98bc2547ef0395bf5a5f2802d28f73 | import pandas as pd
import os
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-data_dir', type=str, required=True)
parser.add_argument('-period', type=str)
args = parser.parse_args()
periods = [args.period]
columns = ['900001','900002','900003','900004','900005','900006','900007','900008','900010','900011','900012','900013','900014','900015', '900016']
for period in periods:
stays = []
is_notes = []
feature_path = f'{args.data_dir}/timeseries_features_{period}/note_mask/'
for i, stay in enumerate(os.listdir(feature_path)):
if i % 5000 == 0:
print(f"processed {i} admission")
note_mask = np.load(os.path.join(feature_path, stay), allow_pickle=True).astype(float)
note_mask = 1-np.equal(note_mask.sum(axis=0), 0).astype(float) # check existance of each
stays.append(stay[:-4]+'.csv')
is_notes.append(note_mask)
df = pd.DataFrame(data=is_notes, columns=columns)
df.insert(0, 'stay', stays)
df.to_csv(f'{args.data_dir}/patient2notes_{period}.csv', index=False)
|
22,107 | a0af362bf02b8ee18a8dc359ef73a2fd5ad23a55 | from inspect import isclass
from itertools import chain
from typing import Type, Optional, Union, Any
from pydantic import BaseModel, create_model, validate_model, Field, ConfigError
from pydantic.main import ModelMetaclass
import django
from django.contrib.contenttypes.fields import GenericRelation
from django.utils.functional import Promise
from django.utils.encoding import force_str
from django.core.serializers.json import DjangoJSONEncoder
from .fields import ModelSchemaField
_is_base_model_class_defined = False
class ModelSchemaJSONEncoder(DjangoJSONEncoder):
def default(self, obj):
if isinstance(obj, Promise):
return force_str(obj)
return super().default(obj) # pragma: nocover
class ModelSchemaMetaclass(ModelMetaclass):
def __new__(
mcs: Type["ModelSchemaMetaclass"],
name: str,
bases: tuple,
namespace: dict,
):
cls = super().__new__(mcs, name, bases, namespace)
for base in reversed(bases):
if _is_base_model_class_defined and issubclass(base, ModelSchema) and base == ModelSchema:
config = namespace["Config"]
include = getattr(config, "include", None)
exclude = getattr(config, "exclude", None)
if include and exclude:
raise ConfigError("Only one of 'include' or 'exclude' should be set in configuration.")
annotations = namespace.get("__annotations__", {})
try:
fields = config.model._meta.get_fields()
except AttributeError as exc:
raise ConfigError(f"{exc} (Is `Config.model` a valid Django model class?)")
field_values = {}
_seen = set()
for field in chain(fields, annotations.copy()):
field_name = getattr(field, "name", getattr(field, "related_name", field))
if (
field_name in _seen
or (include and field_name not in include)
or (exclude and field_name in exclude)
):
continue
_seen.add(field_name)
python_type = None
pydantic_field = None
if field_name in annotations and field_name in namespace:
python_type = annotations.pop(field_name)
pydantic_field = namespace[field_name]
if hasattr(pydantic_field, "default_factory") and pydantic_field.default_factory:
pydantic_field = pydantic_field.default_factory()
elif field_name in annotations:
python_type = annotations.pop(field_name)
pydantic_field = None if Optional[python_type] == python_type else Ellipsis
else:
python_type, pydantic_field = ModelSchemaField(field)
field_values[field_name] = (python_type, pydantic_field)
cls.__doc__ = namespace.get("__doc__", config.model.__doc__)
cls.__fields__ = {}
p_model = create_model(name, __base__=cls, __module__=cls.__module__, **field_values)
setattr(p_model, "instance", None)
setattr(p_model, "objects", p_model._objects())
setattr(p_model, "get", p_model._get)
setattr(p_model, "create", p_model._create)
return p_model
return cls
class ModelSchema(BaseModel, metaclass=ModelSchemaMetaclass):
def save(self) -> None:
cls = self.__class__
p_model = cls.from_django(self.instance, save=True)
self._แนฃet_object(__dict__=p_model.__dict__, __fields_set__=p_model.__fields_set__)
def refresh(self) -> None:
cls = self.__class__
instance = cls.objects.get(pk=self.instance.pk)
p_model = cls.from_django(instance)
self._แนฃet_object(__dict__=p_model.__dict__, __fields_set__=p_model.__fields_set__)
def delete(self) -> None:
self.instance.delete()
cls = self.__class__
cls.instance = None
p_model = cls.__new__(cls)
self._แนฃet_object(__dict__=p_model.__dict__)
@classmethod
def schema_json(
cls,
*,
by_alias: bool = True,
encoder_cls=ModelSchemaJSONEncoder,
**dumps_kwargs: Any,
) -> str:
return cls.__config__.json_dumps(cls.schema(by_alias=by_alias), cls=encoder_cls, **dumps_kwargs)
@classmethod
def get_fields(cls):
if hasattr(cls.__config__, "include"):
fields = cls.__config__.include
elif hasattr(cls.__config__, "exclude"):
fields = [
field.name for field in cls.__config__.model._meta.get_fields() if field not in cls.__config__.exclude
]
else:
fields = [field.name for field in cls.__config__.model._meta.get_fields()]
return fields
def _แนฃet_object(self, **kwargs) -> None:
object.__setattr__(self, "__dict__", kwargs["__dict__"])
object.__setattr__(self, "__fields_set__", kwargs.get("__fields_set__", {}))
@classmethod
def _get_object_model(cls, obj_data):
values, fields_set, validation_error = validate_model(cls, obj_data)
if validation_error: # pragma: nocover
raise validation_error
p_model = cls.__new__(cls)
object.__setattr__(p_model, "__dict__", values)
object.__setattr__(p_model, "__fields_set__", fields_set)
return p_model
@classmethod
def _objects(cls) -> django.db.models.manager.Manager:
return cls.__config__.model.objects
@classmethod
def _create(cls, **kwargs) -> Type["ModelSchema"]:
instance = cls.objects.create(**kwargs)
return cls.from_django(instance)
@classmethod
def _get(cls, **kwargs) -> Type["ModelSchema"]:
instance = cls.objects.get(**kwargs)
return cls.from_django(instance)
@classmethod
def from_django(
cls: Type["ModelSchema"],
instance: Union[django.db.models.Model, django.db.models.QuerySet],
many: bool = False,
cache: bool = True,
save: bool = False,
) -> Union[Type["ModelSchema"], Type["ModelSchemaQuerySet"]]:
if not many:
obj_data = {}
annotations = cls.__annotations__
for field in instance._meta.get_fields():
model_cls = None
if (
field.name in annotations
and isclass(cls.__fields__[field.name].type_)
and issubclass(cls.__fields__[field.name].type_, ModelSchema)
):
model_cls = cls.__fields__[field.name].type_
if not field.concrete and field.auto_created:
accessor_name = field.get_accessor_name()
related_obj = getattr(instance, accessor_name, None)
if not related_obj:
related_obj_data = None
elif field.one_to_many:
related_qs = related_obj.all()
if model_cls:
related_obj_data = [
model_cls.construct(**obj_vals)
for obj_vals in related_qs.values(*model_cls.get_fields())
]
else:
related_obj_data = list(related_obj.all().values("id"))
elif field.one_to_one:
if model_cls:
related_obj_data = model_cls.construct(
**{_field: getattr(related_obj, _field) for _field in model_cls.get_fields()}
)
else:
related_obj_data = related_obj.pk
obj_data[accessor_name] = related_obj_data
elif field.one_to_many or field.many_to_many:
if isinstance(field, GenericRelation):
related_qs = getattr(instance, field.name)
if model_cls:
related_fields = [field for field in model_cls.get_fields() if field != "content_object"]
related_obj_data = [
model_cls.construct(**obj_vals) for obj_vals in related_qs.values(*related_fields)
]
else:
related_obj_data = list(related_qs.values())
obj_data[field.name] = related_obj_data
else:
obj_data[field.name] = [_obj.pk for _obj in field.value_from_object(instance)]
else:
obj_data[field.name] = field.value_from_object(instance)
p_model = cls._get_object_model(obj_data)
if save:
instance.save()
if cache:
cls.instance = instance
return p_model
p_model_list = []
for obj in instance:
p_model = cls.from_django(obj, cache=False, many=False, save=False)
p_model_list.append(p_model)
model_name = p_model.__config__.model._meta.model_name
model_name_plural = f"{model_name}s"
fields = {model_name_plural: (list, Field(None, title=f"{model_name_plural}"))}
p_model_qs = create_model(
"ModelSchemaQuerySet",
__base__=BaseModel,
__module__=cls.__module__,
**fields,
)
return p_model_qs(**{model_name_plural: p_model_list})
_is_base_model_class_defined = True
|
22,108 | df07ff88e497bbe3b97f3656d743ef6dbf8e5c46 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import random
import qianGuaHua
import kunGuaHua
chuYao = (random.choice('้ด้ณ'))
erYao = (random.choice('้ด้ณ'))
sanYao = (random.choice('้ด้ณ'))
siYao = (random.choice('้ด้ณ'))
wuYao = (random.choice('้ด้ณ'))
shangYao = (random.choice('้ด้ณ'))
huaGua =str(chuYao) + str(erYao) + str(sanYao) + str(siYao) + str(wuYao) + str(shangYao)
print (huaGua)
if chuYao == '้ณ':
qianGuaHua.draw_chuYaoYang_star_flag()
else:
kunGuaHua.draw_chuYaoYin_star_flag()
if erYao == '้ณ':
qianGuaHua.draw_erYaoYang_star_flag()
else:
kunGuaHua.draw_erYaoYin_star_flag()
if sanYao == '้ณ':
qianGuaHua.draw_sanYaoYang_star_flag()
else:
kunGuaHua.draw_sanYaoYin_star_flag()
if siYao == '้ณ':
qianGuaHua.draw_siYaoYang_star_flag()
else:
kunGuaHua.draw_siYaoYin_star_flag()
if wuYao == '้ณ':
qianGuaHua.draw_wuYaoYang_star_flag()
else:
kunGuaHua.draw_wuYaoYin_star_flag()
if shangYao == '้ณ':
qianGuaHua.draw_shangYaoYang_star_flag()
else:
kunGuaHua.draw_shangYaoYin_star_flag()
|
22,109 | ca06665e4bf18687e5ce7395f6977579e5089ddf | import pandas as pd
data=pd.read_csv('data_with_embeddings_sg.csv')
former =[]
for descrip in data['job-title']:
if "Current" in descrip:
former.append("current")
else:
former.append("former")
data['formerOrCurrent']=former
data.to_csv('data_with_embeddings_sg.csv', index=False)
|
22,110 | 3d8b2b7b82c1bac30bf1ef2879b1db499a3913ab |
from scipy import signal
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import time
import os
from Classifier.message import create_msg
from Classifier.load_data import read_arduino, process_data, read_arduinbro
def three_pronged_smoothing_classifier(arr, samprate, downsample_rate=10, window_size_seconds=0.3, max_loops=10):
arr_ds = arr[0::downsample_rate]
fs = samprate/downsample_rate
dt = 1/fs
t = np.arange(0, (len(arr_ds)*dt), dt)
# Smooth wave
window_length = int(window_size_seconds*samprate/downsample_rate + 1)
filtered_arr = signal.savgol_filter(arr_ds, window_length, 1)
# Indices of positive maxima
max_locs = np.array(signal.argrelextrema(filtered_arr, np.greater)[0])
max_vals = filtered_arr[max_locs]
max_locs = max_locs[max_vals > 0]
# Indices of negative minima
min_locs = np.array(signal.argrelextrema(filtered_arr, np.less)[0])
min_vals = filtered_arr[min_locs]
min_locs = min_locs[min_vals < 0]
# Appended indices
max_min_locs = np.append(max_locs, min_locs)
# Values of above indices
max_min_values = filtered_arr[max_min_locs]
# Absolute value of those values
abs_max_min_values = np.abs(max_min_values)
# A vector with a length equal to the number of minimums: all '-1' to say minimum
numMin = [-1]*len(min_locs)
# A vector with a length equal to the number of maximums: all '1' to say maximum
numMax = [1]*len(max_locs)
# Vector same size as max_min_values with first half being maximums and second half being minimums
isMin = np.append(numMax, numMin)
# Stack the three vectors
val_and_idx = np.vstack([abs_max_min_values, max_min_locs, isMin])
# Sort the magnitudes of the extrema in descending order (-1 indicates descending)
val_and_idx_sorted = val_and_idx[ :, (-1*val_and_idx[0]).argsort()]
classificationFound = False
# We will continue looping until we have an appropriate classification. This relies on having the extrema INTERCHANGE between max and min (no two min right next to eachother)
loops = 0
while not classificationFound and loops < max_loops:
# Take the top three magnitudes
top_3 = val_and_idx_sorted[:, 0:3]
# Sort according to the indices of those values
top_3_sorted = top_3[ :, top_3[1].argsort()]
# Break if we run out of turning points
if top_3_sorted.shape != (3, 3):
return "_"
# If two min or two max occur one after the other, we know we have an inappropriate result so we delete one of those doubled min/max
if top_3_sorted[2, 0]*top_3_sorted[2, 1] > 0:
val_and_idx_sorted = np.delete(val_and_idx_sorted, 1, 1)
elif top_3_sorted[2, 1]*top_3_sorted[2, 2] > 0:
val_and_idx_sorted = np.delete(val_and_idx_sorted, 2, 1)
else:
classificationFound = True
loops += 1
if np.sum(top_3_sorted[2, :]) == -1:
return 'L'
elif np.sum(top_3_sorted[2, :]) == 1:
return 'R'
else:
return "_"
def two_pronged_smoothing_classifier(arr, samprate, downsample_rate=10, window_size_seconds=0.3, max_loops=10):
arr_ds = arr[0::downsample_rate]
fs = samprate/downsample_rate
dt = 1/fs
t = np.arange(0, (len(arr_ds)*dt), dt)
# Smooth wave
window_length = int(window_size_seconds*samprate/downsample_rate + 1)
filtered_arr = signal.savgol_filter(arr_ds, window_length, 1)
# Indices of positive maxima
max_locs = np.array(signal.argrelextrema(filtered_arr, np.greater)[0])
max_vals = filtered_arr[max_locs]
max_locs = max_locs[max_vals > 0]
# Indices of negative minima
min_locs = np.array(signal.argrelextrema(filtered_arr, np.less)[0])
min_vals = filtered_arr[min_locs]
min_locs = min_locs[min_vals < 0]
# Appended indices
max_min_locs = np.append(max_locs, min_locs)
# Values of above indices
max_min_values = filtered_arr[max_min_locs]
# Absolute value of those values
abs_max_min_values = np.abs(max_min_values)
# A vector with a length equal to the number of minimums: all '-1' to say minimum
numMin = [-1]*len(min_locs)
# A vector with a length equal to the number of maximums: all '1' to say maximum
numMax = [1]*len(max_locs)
# Vector same size as max_min_values with first half being maximums and second half being minimums
isMin = np.append(numMax, numMin)
# Stack the three vectors
val_and_idx = np.vstack([abs_max_min_values, max_min_locs, isMin])
# Sort the magnitudes of the extrema in descending order (-1 indicates descending)
val_and_idx_sorted = val_and_idx[ :, (-1*val_and_idx[0]).argsort()]
classificationFound = False
# We will continue looping until we have an appropriate classification. This relies on having the extrema INTERCHANGE between max and min (no two min right next to eachother)
loops = 0
while not classificationFound and loops < max_loops:
# Take the top three magnitudes
top_2 = val_and_idx_sorted[:, 0:2]
# Sort according to the indices of those values
top_2_sorted = top_2[ :, top_2[1].argsort()]
# Break if we run out of turning points
if top_2_sorted.shape != (3, 2):
return "_"
# If two min or two max occur one after the other, we know we have an inappropriate result so we delete one of those doubled min/max
if top_2_sorted[2, 0]*top_2_sorted[2, 1] > 0:
val_and_idx_sorted = np.delete(val_and_idx_sorted, 1, 1)
else:
classificationFound = True
loops += 1
if top_2_sorted[2, 0] == -1:
return 'L'
elif top_2_sorted[2, 0] == 1:
return 'R'
else:
return "_"
#ZEROES CLASSIFIER:
#Looks for x samples (after downsampling) that are consecutively positive / negative.
#Classifies only using the first hump of the wave.
from numba import njit
@njit
def consecutive(data, stepsize=0): #returns a list of sub-arrays, grouped by the same consecutive value (in this case they are groups of consecutive 1s or -1s)
return np.split(data, np.where(np.diff(data) != stepsize)[0]+1)
@njit #numba 'decorator' that performs just-in-time (jit) compilation
def zeroes_classifier(arr, samprate, downsample_rate=10, ave_height = 20, consec_seconds = 0.19):
arr_ds = arr[0::downsample_rate]
arr_sign = np.sign(arr_ds) #returns array of -1s and 1s, depending on if the number was negative or positive respectively
i = 0
split_arrays = consecutive(arr_sign) #returns a list of sub-arrays, grouped by the same consecutive value (in this case they are groups of consecutive 1s or -1s)
for sub_arr in split_arrays:
if len(sub_arr) > consec_seconds * samprate / downsample_rate: #RHS converts seconds to number of samples
# #if there were 'consec_seconds' seconds of no zero-crossings, then check if the average height is bigger than 'ave_height'
if np.mean(arr_ds[i:(i + len(sub_arr) - 1)]) > ave_height:
return 'R'
elif np.mean(arr_ds[i:(i + len(sub_arr) - 1)]) < -1 * ave_height:
return 'L'
i += len(sub_arr)
return '_' #unable to classify because there were not 'consec_seconds' seconds of no zero-crossings
def max_min_classifier(arr, samprate, downsample_rate=10):
arr_ds = arr[0::downsample_rate]
arr_max = np.amax(arr_ds)
arr_min = np.amin(arr_ds)
max_loc = np.where(arr_ds == arr_max)[0][0]
min_loc = np.where(arr_ds == arr_min)[0][0]
if max_loc > min_loc:
return "R"
elif min_loc > max_loc:
return "L"
else:
print("Flat wave!!!!")
return "_"
@njit
def max_min_range_classifier(arr, samprate, downsample_rate=10, range = 30):
arr_ds = arr[0::downsample_rate]
arr_max = np.amax(arr_ds) # maximum height in the array
arr_min = np.amin(arr_ds) # minimum height in the array
max_loc = np.where(arr_ds == arr_max)[0][0] # location (array index) of maximum
min_loc = np.where(arr_ds == arr_min)[0][0] # location (array index) of minimum
if arr_max > range and arr_min < -1 * range: # if the whole wave shape is present in this window
if max_loc > min_loc: # if the maximum occurred before the minimum
return "R"
elif min_loc > max_loc: # if the maximum occurred before the minimum
return "L"
else:
return "_"
elif arr_max > range: # if only the maximum is present in this window
return "R"
elif arr_min < -1 * range: # if only the minimum is present in this window
return "L"
else:
return "_"
def one_pronged_smoothing_classifier(arr, samprate, downsample_rate=10, window_size_seconds=0.3, max_loops=10, range = 2):
arr_ds = arr[0::downsample_rate]
fs = samprate/downsample_rate
dt = 1/fs
t = np.arange(0, (len(arr_ds)*dt), dt)
# Smooth wave
window_length = int(window_size_seconds*samprate/downsample_rate + 1)
filtered_arr = signal.savgol_filter(arr_ds, window_length, 1)
# Indices of positive maxima
max_locs = np.array(signal.argrelextrema(filtered_arr, np.greater)[0])
max_vals = filtered_arr[max_locs]
max_locs = max_locs[max_vals > 0]
# Indices of negative minima
min_locs = np.array(signal.argrelextrema(filtered_arr, np.less)[0])
min_vals = filtered_arr[min_locs]
min_locs = min_locs[min_vals < 0]
# Appended indices
max_min_locs = np.append(max_locs, min_locs)
# Values of above indices
max_min_values = filtered_arr[max_min_locs]
# Absolute value of those values
abs_max_min_values = np.abs(max_min_values)
# A vector with a length equal to the number of minimums: all '-1' to say minimum
numMin = [-1]*len(min_locs)
# A vector with a length equal to the number of maximums: all '1' to say maximum
numMax = [1]*len(max_locs)
# Vector same size as max_min_values with first half being maximums and second half being minimums
isMin = np.append(numMax, numMin)
# Stack the three vectors
val_and_idx = np.vstack([abs_max_min_values, max_min_locs, isMin])
# Sort the magnitudes of the extrema in descending order (-1 indicates descending)
val_and_idx_sorted = val_and_idx[ :, (-1*val_and_idx[0]).argsort()]
""" # idk why but this chunk broke something after adding code that clears the classifier_optimisation.csv file
with open("print.txt", "w") as file:
file.write(str(val_and_idx_sorted.shape))
"""
if val_and_idx_sorted.shape == (3, 0):
if val_and_idx_sorted[2] == -1:
return 'L'
elif val_and_idx_sorted[2] == 1:
return 'R'
else:
return "_"
else:
if val_and_idx_sorted[2, 0] == -1:
return 'L'
elif val_and_idx_sorted[2, 0] == 1:
return 'R'
else:
return "_"
# catch22 kNN classifier (using stepwise selected features)
#arr: the array (the event) to be classified (a numpy array)
#Prep:
"""
from catch22 import catch22_all
import catch22
from sklearn.neighbors import KNeighborsClassifier
path = ""
step_csv = "catch22_step_selected_features.csv"
catch22_step_training_data = pd.read_csv(path+step_csv)
X_train = catch22_step_training_data.iloc[:,0:-1]
y_labels = catch22_step_training_data.iloc[:,-1]
neigh = KNeighborsClassifier(n_neighbors=5)
neigh.fit(X_train, y_labels)
"""
def catch22_knn_classifier(arr, samprate, downsample_rate=10):
arr_ds = arr[0::downsample_rate]
arr_list = arr_ds.tolist() # single catch22 feature won't take numpy arrays, only lists or tuples
feature_one = catch22.DN_HistogramMode_5(arr_list)
feature_two = catch22.SB_BinaryStats_mean_longstretch1(arr_list)
feature_three = catch22.FC_LocalSimple_mean1_tauresrat(arr_list)
feature_four = catch22.DN_OutlierInclude_p_001_mdrmd(arr_list)
feature_five = catch22.SP_Summaries_welch_rect_area_5_1(arr_list)
test_features = [[feature_one, feature_two, feature_three, feature_four, feature_five]]
return neigh.predict(test_features)[0] # returns a single item list, so use index 0 to return the prediction itself
def streaming_classifier(
wav_array, # Either the array from file (or ser if live = True)
samprate,
classifier = one_pronged_smoothing_classifier,
window_size = 1.5, # Total detection window [s]
N_loops_over_window = 15, # implicitly defines buffer to be 1/x of the window
hyp_detection_buffer_end = 0.3, # seconds - how much time to shave off end of the window in order to define the middle portion
hyp_detection_buffer_start = 0.7, # seconds - how much time to shave off start of the window in order to define the middle portion
hyp_event_threshold = 20,
hyp_event_smart_threshold_window = 5, # The length of the calibration period to define the threshold
hyp_calibration_statistic_function = lambda x: np.max(x) - np.min(x), # Function that calculates the calibration statistic
hyp_test_statistic_function = lambda x: np.max(x) - np.min(x), # Function that calculates the test statistic
hyp_event_smart_threshold_factor = 0.5, # The scale factor of the calibration range that will become the threshold
hyp_event_history = 5, # How many historical event detection results are kept in memory (whether the test criteria failed or passed)
hyp_consecutive_triggers = 3, # How many threshold triggers need to occur in a row for an event to be called
hyp_consecutive_reset = 1, # How many threshold failures need to occur in a row for the classifier to be primed for a new event
hyp_timeout = 10,
# Zeros Classifier Parameters
using_zeroes_classifier = False,
plot_zeroes_classifier = False, # Plot the waves that took an unusually long time for the zeroes classifier
use_smart_height_threshold_mmr = 1,
smart_height_factor = 1, # Factor to multiply the smart threshold by to receive a number that can be fed in as the `ave_height`
zeroes_height_threshold = 10, # Only used if using_zeroes_classifier is true
zeroes_consec_threshold = 0.19, # Only used if using_zeroes_classifier is true
# Max-Min-Range Classifier Parameters
using_max_min_range_classifier = False,
max_min_range_threshold = 30, # Only used if using_max_min_range_classifier is true
use_smart_range_threshold_mmr = False,
smart_range_factor = 1, # Factor to multiply the smart threshold by to receive a number that can be fed in as the `range`
dumb_threshold = False,
total_time = None, # max time. If none, it goes forever!
plot = False, # Whether to plot the livestream data
store_events = False, # Whether to return the classification window array for debugging purposes
store_times = False, # Store time taken for each classification
nil_classifier = False, # Does not classify, just gives L as its prediction always. Used for time complexity purposes.
verbose=False, # lol
live = False, # Whether we're live
timeout = False,
flip_threshold = False):
### Initialisation ###
if total_time is None:
try:
total_time = len(wav_array)/samprate
except:
total_time = 1000000 # Just a large number
if store_events:
predictions_storage = []
predictions = ""
predictions_timestamps = []
# Initialise variables
inputBufferSize = int(window_size/N_loops_over_window * samprate)
N_loops = (total_time*samprate)//inputBufferSize # len(wav_array)//inputBufferSize
T_acquire = inputBufferSize/samprate # length of time that data is acquired for
N_loops_over_window = window_size/T_acquire # total number of loops to cover desire time window
if timeout:
timer = hyp_timeout
# Initialise plot
if plot:
min_y = -200 #np.min(wav_array)
max_y = 200 #np.max(wav_array)
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
plt.ion()
fig.show()
fig.canvas.draw()
# Hyperparameter conversions
hyp_detection_buffer_start_ind = int(round(hyp_detection_buffer_start * samprate))
hyp_detection_buffer_end_ind = int(round(hyp_detection_buffer_end * samprate))
# Initialise Calibration
calibrate = True
N_loops_calibration = hyp_event_smart_threshold_window//(window_size/N_loops_over_window)
# Initialise Event History
event_history = np.array([False]*hyp_event_history)
primed = True
### Start stream ###
classification_times = []
for k in range(0,int(N_loops)):
if live:
data = read_arduino(wav_array,inputBufferSize)
data_temp = process_data(data)
else:
data_temp = read_arduinbro(wav_array, inputBufferSize, k)
if k < N_loops_over_window:
if k==0:
data_cal = data_temp
data_plot = data_temp
else:
data_plot = np.append(data_temp, data_plot)
if calibrate:
data_cal = np.append(data_temp, data_cal)
continue
else:
data_plot = np.roll(data_plot,len(data_temp))
data_plot[0:len(data_temp)] = data_temp
if calibrate:
data_cal = np.append(data_temp,data_cal)
if (k > N_loops_calibration):
st_range = hyp_calibration_statistic_function(data_cal)
if not dumb_threshold:
hyp_event_threshold = st_range*hyp_event_smart_threshold_factor
# with open("./print.txt", "a") as file:
# file.write(str(hyp_event_threshold)+',')
calibrate = False
continue
### CLASSIFIER ###
# Event Detection
interval = data_plot[hyp_detection_buffer_start_ind:-hyp_detection_buffer_end_ind] # Take middle part of window
test_stat = hyp_test_statistic_function(interval) # Calculate test stat (defaults to range)
if flip_threshold:
is_event = (test_stat < hyp_event_threshold) # Test threshold
else:
is_event = (test_stat > hyp_event_threshold) # Test threshold
# Record History
event_history[1::] = event_history[0:-1]
event_history[0] = is_event
# Pass window to classifier
if np.all(event_history[0:hyp_consecutive_triggers]) and primed:
#print(test_stat)
if store_times:
if nil_classifier:
start = time.time_ns()
prediction = 'L'
end = time.time_ns()
time_taken = end - start
classification_times.append(time_taken)
else:
if using_zeroes_classifier:
if use_smart_height_threshold_mmr:
start = time.time_ns()
prediction = classifier(data_plot, samprate, consec_seconds = zeroes_consec_threshold, ave_height = smart_range_factor * hyp_event_threshold)
end = time.time_ns()
time_taken = end - start
classification_times.append(time_taken)
if plot_zeroes_classifier and time_taken > 998800:
plt.figure()
plt.plot(data_plot)
else:
start = time.time_ns()
prediction = classifier(data_plot, samprate, consec_seconds = zeroes_consec_threshold, ave_height = smart_range_factor * smar)
end = time.time_ns()
time_taken = end - start
classification_times.append(time_taken)
if plot_zeroes_classifier and time_taken > 998800:
plt.figure()
plt.plot(data_plot)
elif using_max_min_range_classifier:
start = time.time_ns()
prediction = classifier(data_plot, samprate, range = smart_range_factor * max_min_range_threshold)
end = time.time_ns()
time_taken = end - start
classification_times.append(time_taken)
else:
start = time.time_ns()
prediction = classifier(data_plot, samprate)
end = time.time_ns()
time_taken = end - start
classification_times.append(time_taken)
else:
if nil_classifier:
prediction = 'L'
else:
if using_zeroes_classifier:
prediction = classifier(data_plot, samprate, consec_seconds = zeroes_consec_threshold, ave_height = smart_height_factor * hyp_event_threshold)
elif using_max_min_range_classifier:
prediction = classifier(data_plot, samprate, range = smart_range_factor * max_min_range_threshold)
else:
prediction = classifier(data_plot, samprate)
#msg = create_msg(prediction)
#os.write(fifo, msg)
predictions += prediction
print(f"CONGRATULATIONS, ITS AN {prediction}!") if verbose else None
if store_events:
predictions_storage.append(data_plot)
# Record time interval of event
end_time = round(k*inputBufferSize/samprate, 2)
start_time = round(end_time - window_size, 2)
predictions_timestamps.append((start_time, end_time))
timer = hyp_timeout
primed = False
if not timeout:
if np.all(~event_history[0:hyp_consecutive_reset]):
primed = True
else:
timer -= 1
if timer < 0:
primed = True
## PLOT ##
if plot:
t = (min(k+1,N_loops_over_window))*inputBufferSize/samprate*np.linspace(0,1,(data_plot).size)
ax1.clear()
# Debugging Annotations
if np.all(event_history[0:hyp_consecutive_triggers]) and timer >0:
ax1.annotate(f"ITS AN {prediction}!!!", (window_size/2, max_y-50))
ax1.annotate(f"{event_history}", (window_size/2, max_y-70))
ax1.set_xlim(0, window_size)
ax1.set_ylim(min_y, max_y)
plt.xlabel('time [s]')
ax1.plot(t,data_plot)
fig.canvas.draw()
plt.show()
#os.close(fifo)
if store_events and store_times:
return predictions, predictions_timestamps, predictions_storage, classification_times
elif store_events:
return predictions, predictions_timestamps, predictions_storage
elif store_times:
return predictions, predictions_timestamps, classification_times
else:
return predictions, predictions_timestamps
|
22,111 | 8d69d18eea099e572e7d30dba71d1983262cf100 | #!/usr/bin/env python
import sys
import os
import json
from pprint import pprint
import random
from uuid import getnode
try:
# see if we're running in a plex plug-in
HTTP
except:
import requests
HTTP = None
class Logger:
def Debug(self, *args):
print args
Log = Logger()
BASE_HEADERS = {
'X-Plex-Platform': "PMS",
'X-Plex-Platform-Version': "1",
'X-Plex-Provides': 'controller',
'X-Plex-Product': "shufflebyalbum",
'X-Plex-Version': "1",
'X-Plex-Device': "PMS-Plugin",
'X-Plex-Device-Name': "pms",
'X-Plex-Client-Identifier': str(hex(getnode()))
}
def http_comm(url, method, headers):
if HTTP:
r = HTTP.Request(url, headers=headers, cacheTime=0, method=method)
else:
if method == "GET":
request_func = requests.get
if method == "POST":
request_func = requests.post
if method == "DELETE":
request_func = requests.delete
r = request_func(url, headers=headers, allow_redirects=True)
return r
class PlexServer(object):
def __init__(self, host='localhost',port=32400, token = ""):
self.base_url = "http://{}:{}".format(host,port)
self.token = token
def query(self, path, method):
url = self.base_url + path
headers = dict(BASE_HEADERS)
headers['Accept'] = 'application/json'
if self.token:
headers['X-Plex-Token'] = self.token
r = http_comm(url, method, headers)
try:
response = json.loads( r.content )
return response
except:
return None
def get(self, path):
return self.query(path, "GET")
def post(self, path):
return self.query(path, "POST")
def delete(self, path):
return self.query(path, "DELETE")
def getClients(self):
path = "/clients"
response = self.get(path)
try:
return response['MediaContainer']['Server']
except:
return []
def getSections(self):
path = "/library/sections"
response = self.get(path)
try:
return response['MediaContainer']['Directory']
except:
return []
def getAlbums(self, section):
path = "/library/sections/{}/albums".format(section)
response = self.get(path)
try:
albums = response['MediaContainer']['Metadata']
return albums
except:
return []
def getServerInfo(self):
path = ""
response = self.get(path)
try:
return response['MediaContainer']
except:
return {}
def getPlaylists(self):
path = "/playlists"
response = self.get(path)
try:
return response['MediaContainer']['Metadata']
except:
return []
# takes a dict item as returned from getPlaylists
def deletePlaylist(self, playlist):
playlist_key = playlist['key']
path = playlist_key.replace("/items", "")
return self.delete(path)
# takes a list of album dict items as returned from getAlbums
def createPlaylistOfAlbums(self, title, album_list, guid):
key_list = []
for a in album_list:
key_num = a['key'].replace("/children","").replace("/library/metadata/", "")
key_list.append(key_num)
path = "/playlists"
path += "?type=audio"
path += "&title={}".format(title)
path += "&smart=0"
path += "&uri=library://{}/directory//library/metadata/".format(guid)
path += ",".join(key_list)
response = self.post(path)
try:
return response['MediaContainer']['Metadata'][0]
except:
return []
def createPlayQueueForPlaylist(self, playlist_id):
path = "/playQueues"
path += "?playlistID={}".format(playlist_id)
path += "&shuffle=0&type=audio&includeChapters=1&includeRelated=1"
return self.post(path)['MediaContainer']
def get_music_sections(server_ip, server_port, token):
server = PlexServer(server_ip, server_port, token)
music_sections = []
# Look for music sections
sections = server.getSections()
for s in sections:
if s['type'] == 'artist':
music_sections.append(s)
return music_sections
def generate_playlist(server_ip, server_port, token, section, playlist_name, list_size):
server = PlexServer(server_ip, server_port, token)
max_num_of_random_albums = list_size
section_key = section['key']
section_uuid = section['uuid']
# list all albums for section
print "Getting full album list from music section..."
all_albums = server.getAlbums(section_key)
# TODO: filter out unwanted genres here...
num_of_random_albums = min(max_num_of_random_albums, len(all_albums))
# choose random set of albums
print "Creating random list of {} albums".format(num_of_random_albums)
random_album_list = []
while len(random_album_list) < num_of_random_albums:
idx = random.randrange(len(all_albums))
a = all_albums[idx]
if a not in random_album_list:
print u" {} - {}".format(a['title'], a['parentTitle'])
random_album_list.append(a)
if not random_album_list:
print "No albums in random list. Done."
return
# Delete old playlist with the same name, if it exists
print "Getting list of existing playlists..."
playlists = server.getPlaylists()
for p in playlists:
if p['title'] == playlist_name:
print u"Deleting playlist: [{}]...".format(playlist_name)
server.deletePlaylist(p)
break
# create new playlist with the selected albums
print u"Creating playlist: [{}]".format(playlist_name)
playlist = server.createPlaylistOfAlbums(playlist_name, random_album_list, section_uuid)
return playlist
def get_clients(server_ip, server_port, token):
server = PlexServer(server_ip, server_port, token)
return server.getClients()
def play_on_client(server_ip, server_port, token, client, playlist):
server = PlexServer(server_ip, server_port, token)
CLIENT_IP = client['host']
CLIENT_PORT = client['port']
MEDIA_ID = playlist['ratingKey']
CLIENT_ID = client['machineIdentifier']
SERVER_ID = server.getServerInfo()['machineIdentifier']
# Make a playqueue for the playlist
playqueue = server.createPlayQueueForPlaylist(MEDIA_ID)
playqueue_selected_metadata_item_id = playqueue[u'playQueueSelectedMetadataItemID']
playqueue_id = playqueue[u'playQueueID']
# Tell the client to play the playlist
url = "http://{}:{}/player/playback/playMedia".format(CLIENT_IP,CLIENT_PORT)
url += "?key=%2Flibrary%2Fmetadata%2F{}".format(playqueue_selected_metadata_item_id)
url += "&offset=0"
#url += "&X-Plex-Client-Identifier={}".format(CLIENT_ID)
url += "&machineIdentifier={}".format(SERVER_ID)
url += "&address={}".format(server_ip)
url += "&port={}".format(server_port)
url += "&protocol=http"
url += "&containerKey=%2FplayQueues%2F{}%3Fown%3D1%26window%3D200".format(playqueue_id)
url += "&commandID=2"
headers = dict()
headers['X-Plex-Target-Client-Identifier'] = CLIENT_ID
r = http_comm(url, "GET", headers=headers)
print r.content
def test():
name = "ShuffleByAlbum"
list_size = 15
server_ip = "localhost"
server_port = 32400
token = "9494tdZFWpKRXsWV6Fjp"
music_sections = get_music_sections(server_ip, server_port, token)
if not music_sections:
print "No music sections"
return
# choose the first section
section = music_sections[0]
playlist = generate_playlist(server_ip, server_port, token, section, name, list_size)
clients = get_clients(server_ip, server_port, token)
new_list = []
for c in clients:
if c['product'] != "Plex Web":
new_list.append(c)
clients = new_list
if not clients:
print "No clients"
return
# choose the first client
client = clients[0]
try:
play_on_client(server_ip, server_port, token, client, playlist)
except:
print "Error talking to client"
#------------------------------------
if __name__ == "__main__":
test()
|
22,112 | 265683989a9e9bd56ef103482b729c2dc54dbfcd | #!/usr/bin/env python
import sys
import os.path as path
import shutil
def build(src, dst):
outfile = "eicher_chronology.csv"
print("writing %s" % outfile)
shutil.copy(path.join(src, "rawdata", "eicher", "chronology.csv"),
path.join(dst, outfile))
def main():
src, dst = sys.argv[1:3]
build(src, dst)
if __name__ == "__main__":
main()
|
22,113 | 7796ee11d92633dcbc95ee92ee659281cdf295b0 | from fxdayu_sinta.IO.config import root
import json
import os
config_path = os.path.join(root, "adjust.json")
def read_config():
try:
return json.load(open(config_path))
except IOError:
from fxdayu_sinta.adjust.config import adjust
return adjust
def get_db():
from fxdayu_sinta.adjust import CLIENT, DB
from fxdayu_sinta.utils.mongo import create_client
config = read_config()
return create_client(**config.get(CLIENT, {}))[config.get(DB, "adjust")]
def get_home():
from fxdayu_sinta.adjust import HOME
return read_config().get(HOME, "/rqalpha")
def get_adj_dir():
from fxdayu_sinta.adjust import ADJ
return os.path.join(get_home(), ADJ)
def create(path):
from fxdayu_sinta.adjust.config import adjust, HOME
adjust[HOME] = path
if not os.path.exists(root):
os.makedirs(root)
json.dump(adjust, open(config_path, 'w'))
def generate():
import click
return {'create': click.Command("create", callback=create,
help="Create adjust config file with rqalpha bundle adjust data path.",
params=[click.Argument(["path"], nargs=1)])}
|
22,114 | ef368ae8f3c31df4626d9414c1e229ab5e367ed0 | '''
Created on May 30, 2013
@author: Yubin Bai
'''
import time
infinity = (1 << 13)
maxN = 4200
maxL = 5
# wrong solution, can't work with this type
def solve(S):
size = len(S)
memo = []
for i in range(maxN):
memo.append(list([infinity] * maxL))
memo[0][4] = 0
for consumed in range(size):
for unchanged in range(maxL):
if memo[consumed][unchanged] == infinity:
continue
for w in wordDict:
nextIndexAllowedToChange = 4 - unchanged
newUnchanged = unchanged
countChanges = 0
good = (len(w) + consumed) <= size
i = 0
while (i < len(w)) and good:
newUnchanged += 1
if w[i] != S[consumed + i]:
newUnchanged = 0
if i < nextIndexAllowedToChange:
good = False
break
nextIndexAllowedToChange = i + 5
countChanges += 1
i += 1
if good:
newUnchanged = min(newUnchanged, 4)
v1 = memo[consumed + len(w)][newUnchanged]
v2 = memo[consumed][unchanged] + countChanges
memo[consumed + len(w)][newUnchanged] = min(v1, v2)
return min(memo[size])
wordDict = []
if __name__ == '__main__':
fDict = open('garbled_email_dictionary.txt')
wordDict = fDict.read().split()
fIn = open('input.txt')
fOut = open('output.txt', 'w')
numOfTests = int(fIn.readline())
millis1 = int(round(time.time() * 1000))
for t in range(numOfTests):
line = fIn.readline().strip()
result = solve(line)
fOut.write("Case #%d: %d \n" % (t + 1, result))
millis2 = int(round(time.time() * 1000))
print("Time in milliseconds: %d " % (millis2 - millis1))
fIn.close()
fOut.close()
|
22,115 | a06c67e3c83d51da3fbedcf59f3ec3929bdd0fd8 | from setuptools import setup
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name='example',
version='1.0',
packages=['src','tests'],
url='https://thoughtworks.com',
license='',
author='onkarshedge',
author_email='oshedge@thoughtworks.com',
description='This is test package',
install_requires = required,
test_suite = 'tests'
) |
22,116 | 8dcb729e3256d78821191c4476c95a59b847c809 | N = input()
d = input()
S = filter(None, raw_input().replace('+', ' ').replace('d{', ' { ').replace('}', ' } ').split())
coef = [0 for i in xrange(d+1)]
dx = 0
for s in S:
if s == '{':
dx += 1
elif s == '}':
dx -= 1
else:
t = sorted(s.split('*'))
if t[0].isdigit():
c = int(t[0])
t.pop(0)
else:
c = 1
for i in xrange(len(t), len(t)-dx, -1):
c *= i
e = len(t) - dx
if e >= 0:
coef[e] += c
print ' '.join(map(str, coef))
|
22,117 | d4aeef7ff209d4d912fa0137c441c139ddcc3049 | class Solution:
def findMinArrowShots(self, points: List[List[int]]) -> int:
if not points:
return 0
points.sort(key=lambda balloon: balloon[1])
pos = points[0][1]
ans = 1
for balloon in points:
if balloon[0] > pos:
pos = balloon[1]
ans += 1
return ans |
22,118 | d4a4b630fac79634843832bd9f03e8c9daad75c4 | import os
import sys
def check_range(a, b):
solutions = set()
for i in range(a, b):
string = '{:010}'.format(i)
array = []
for j in range(10):
array.append(int(string[j]))
valid = True
for k in range(10):
if array[k] != array.count(k):
valid = False
if valid:
solution = ''
for h in range(10):
solution = solution + str(array[h])
solutions.add(solution)
return solutions
if __name__ == '__main__':
begin = 0
end = 10000000000
write = False
if len(sys.argv) == 2:
end = int(sys.argv[1])
elif len(sys.argv) > 2:
begin = int(sys.argv[1])
end = int(sys.argv[2])
if len(sys.argv) == 4:
if sys.argv[3] == '-w':
write = True
else:
print('Error: Unrecognized argument "{}"'.format(sys.argv[3]))
quit()
if len(sys.argv) == 1:
print('Usage: python3 {} [[max] || [begin end]] [-w]'.format(sys.argv[0]))
quit()
solutions = check_range(begin, end)
if write:
with open('{}-{}.txt'.format(begin, end), 'w') as outfile:
for solution in solutions:
print(solution, file=outfile)
|
22,119 | 1d585c4b8e0feeb6fb323dd24372cf1042ceb9d2 | def test():
control = 0
while control > 0:
print(control)
control -= 1
test() |
22,120 | a6e3b98b7e185cf3b833477560bc61d012a1f6bc | for _ in range(eval(input())):
input()
nums = sorted(list(map(int, input().split(' '))))
max_len, temp_len = 0, 1
for i in range(1, len(nums)):
if nums[i] - nums[i - 1] == 1:
temp_len += 1
else:
max_len = max(max_len, temp_len)
temp_len = 1
max_len = max(temp_len, max_len)
print(max_len) |
22,121 | b779a8190c0ad6352f718435bc9ac7339505ef87 |
# A Binary Tree Node
class Node:
# Constructor to create a new node
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def construct_tree(pre, size):
# The first element is always the root
root = Node(pre[0])
s = []
# Push root
s.append(root)
# Iterate through the rest of the array
for value in pre[1:]:
temp = None
# Keep on popping while next value is greater than stack's top value
while len(s) > 0 and value > s[-1].data:
temp = s.pop()
# If temp is not Null, that means the value is greater than root
# and hence should be its right child
if temp is not None:
temp.right = Node(value)
s.append(temp.right)
# Else it should be the left child of the current root
else:
temp = s[-1]
temp.left = Node(value)
s.append(temp.left)
return root
def printInorder(root):
if root is None:
return
printInorder(root.left)
print root.data,
printInorder(root.right)
if __name__ == "__main__":
pre = [10, 5, 1, 7, 40, 50]
root = construct_tree(pre, len(pre))
print root.right.data
printInorder(root)
|
22,122 | 198a15a3055796105a81a59bb40550eee17174f4 |
#!/usr/bin/env python3
#Description: Provide the covid worlwide statistics and country wise.
"""
Provide the current worldwide COVID-19 statistics.
This data is being scrapped from 'https://www.worldometers.info/coronavirus/countries'.
"""
import logging as logger
import requests
import texttable as tt
from bs4 import BeautifulSoup
def world_covid_status(url: str = "https://www.worldometers.info/coronavirus") -> dict:
"""
Return a dict of current worldwide COVID-19 statistics
"""
try:
soup = BeautifulSoup(requests.get(url).text, "html.parser")
keys = soup.findAll("h1")
values = soup.findAll("div", {"class": "maincounter-number"})
keys += soup.findAll("span", {"class": "panel-title"})
values += soup.findAll("div", {"class": "number-table-main"})
return {key.text.strip(): value.text.strip() for key, value in zip(keys, values)}
except Exception as error:
logger.error(error)
def create_table(data):
try:
table = tt.Texttable()
# Add an empty row at the beginning for the headers
table.add_rows([(None, None, None, None)] + data)
# 'l' denotes left, 'c' denotes center,
# and 'r' denotes right
table.set_cols_align(('c', 'c', 'c', 'c'))
table.header((' Country ', ' Number of cases ', ' Deaths ', ' Continent '))
print(table.draw())
except Exception as error:
logger.error(error)
def world_covid_table():
try:
# URL for scrapping data
url = 'https://www.worldometers.info/coronavirus/countries-where-coronavirus-has-spread/'
# get URL html
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
data = []
# soup.find_all('td') will scrape every
# element in the url's table
data_iterator = iter(soup.find_all('td'))
# data_iterator is the iterator of the table
# This loop will keep repeating till there is
# data available in the iterator
while True:
try:
country = next(data_iterator).text
confirmed = next(data_iterator).text
deaths = next(data_iterator).text
continent = next(data_iterator).text
# For 'confirmed' and 'deaths',
# make sure to remove the commas
# and convert to int
data.append((
country,
int(confirmed.replace(',', '')),
int(deaths.replace(',', '')),
continent
))
# StopIteration error is raised when
# there are no more elements left to
# iterate through
except StopIteration:
break
# Sort the data by the number of confirmed cases
data.sort(key = lambda row: row[1], reverse = True)
create_table(data)
except Exception as error:
logger.error(error)
def main():
try:
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covid_status().items():
print(f"{key}\n{value}\n")
world_covid_table()
except Exception as error:
logger.error(error)
if __name__ == "__main__":
main()
|
22,123 | e556f131ec984ea29447b3c2ac9bf58184aa21f5 | import json
import unittest
from datetime import datetime
from unittest.mock import Mock
from dateutil.tz import tzutc
from messagebird import Client
from messagebird.conversation_webhook import \
CONVERSATION_WEBHOOK_EVENT_CONVERSATION_CREATED, \
CONVERSATION_WEBHOOK_EVENT_CONVERSATION_UPDATED
class TestConversationWebhook(unittest.TestCase):
def test_conversation_webhook_create(self):
http_client = Mock()
http_client.request.return_value = '{"id":"20c308852190485bbb658e43baffc5fa","url":"https://example.com","channelId":"c0dae31e440145e094c4708b7d908842","events":["conversation.created","conversation.updated"],"status":"enabled","createdDatetime":"2019-04-03T07:46:37.984026573Z","updatedDatetime":null}'
webhookRequestData = {
'channelId': '20c308852190485bbb658e43baffc5fa',
'events': [CONVERSATION_WEBHOOK_EVENT_CONVERSATION_CREATED,
CONVERSATION_WEBHOOK_EVENT_CONVERSATION_UPDATED],
'url': 'https://example.com'
}
Client('', http_client).conversation_create_webhook(webhookRequestData)
http_client.request.assert_called_once_with('webhooks', 'POST', webhookRequestData)
def test_conversation_webhook_delete(self):
http_client = Mock()
http_client.request.return_value = ''
Client('', http_client).conversation_delete_webhook('webhook-id')
http_client.request.assert_called_once_with('webhooks/webhook-id', 'DELETE', None)
def test_conversation_webhook_list(self):
http_client = Mock()
http_client.request.return_value = '{"offset":0,"limit":10,"count":2,"totalCount":2,"items":[{"id":"57b96dbe0fda40f0a814f5e3268c30a9","contactId":"8846d44229094c20813cf9eea596e680","contact":{"id":"8846d44229094c20813cf9eea596e680","href":"https://contacts.messagebird.com/v2/contacts/8846d44229094c20813cf9eea596e680","msisdn":31617110163,"displayName":"31617110163","firstName":"","lastName":"","customDetails":{},"attributes":{},"createdDatetime":"2019-04-02T08:54:39Z","updatedDatetime":"2019-04-02T08:54:40Z"},"channels":[{"id":"c0dae31e440145e094c4708b7d908842","name":"test","platformId":"sms","status":"active","createdDatetime":"2019-04-01T15:25:12Z","updatedDatetime":"0001-01-01T00:00:00Z"}],"status":"active","createdDatetime":"2019-04-02T08:54:38Z","updatedDatetime":"2019-04-02T14:24:09.192202886Z","lastReceivedDatetime":"2019-04-02T14:24:09.14826339Z","lastUsedChannelId":"c0dae31e440145e094c4708b7d908842","messages":{"totalCount":2,"href":"https://conversations.messagebird.com/v1/conversations/57b96dbe0fda40f0a814f5e3268c30a9/messages"}},{"id":"07e823fdb36a462fb5e187d6d7b96493","contactId":"459a35432b0c4195abbdae353eb19359","contact":{"id":"459a35432b0c4195abbdae353eb19359","href":"https://contacts.messagebird.com/v2/contacts/459a35432b0c4195abbdae353eb19359","msisdn":31615164888,"displayName":"31615164888","firstName":"","lastName":"","customDetails":{},"attributes":{},"createdDatetime":"2019-04-02T08:19:37Z","updatedDatetime":"2019-04-02T08:19:38Z"},"channels":[{"id":"c0dae31e440145e094c4708b7d908842","name":"test","platformId":"sms","status":"active","createdDatetime":"2019-04-01T15:25:12Z","updatedDatetime":"0001-01-01T00:00:00Z"}],"status":"active","createdDatetime":"2019-04-02T08:19:37Z","updatedDatetime":"2019-04-03T07:35:47.35395356Z","lastReceivedDatetime":"2019-04-02T12:02:22.707634424Z","lastUsedChannelId":"c0dae31e440145e094c4708b7d908842","messages":{"totalCount":16,"href":"https://conversations.messagebird.com/v1/conversations/07e823fdb36a462fb5e187d6d7b96493/messages"}}]}'
Client('', http_client).conversation_list_webhooks()
http_client.request.assert_called_once_with('webhooks?limit=10&offset=0', 'GET', None)
def test_conversation_webhook_list_pagination(self):
http_client = Mock()
http_client.request.return_value = '{"offset":0,"limit":10,"count":2,"totalCount":2,"items":[{"id":"57b96dbe0fda40f0a814f5e3268c30a9","contactId":"8846d44229094c20813cf9eea596e680","contact":{"id":"8846d44229094c20813cf9eea596e680","href":"https://contacts.messagebird.com/v2/contacts/8846d44229094c20813cf9eea596e680","msisdn":31617110163,"displayName":"31617110163","firstName":"","lastName":"","customDetails":{},"attributes":{},"createdDatetime":"2019-04-02T08:54:39Z","updatedDatetime":"2019-04-02T08:54:40Z"},"channels":[{"id":"c0dae31e440145e094c4708b7d908842","name":"test","platformId":"sms","status":"active","createdDatetime":"2019-04-01T15:25:12Z","updatedDatetime":"0001-01-01T00:00:00Z"}],"status":"active","createdDatetime":"2019-04-02T08:54:38Z","updatedDatetime":"2019-04-02T14:24:09.192202886Z","lastReceivedDatetime":"2019-04-02T14:24:09.14826339Z","lastUsedChannelId":"c0dae31e440145e094c4708b7d908842","messages":{"totalCount":2,"href":"https://conversations.messagebird.com/v1/conversations/57b96dbe0fda40f0a814f5e3268c30a9/messages"}},{"id":"07e823fdb36a462fb5e187d6d7b96493","contactId":"459a35432b0c4195abbdae353eb19359","contact":{"id":"459a35432b0c4195abbdae353eb19359","href":"https://contacts.messagebird.com/v2/contacts/459a35432b0c4195abbdae353eb19359","msisdn":31615164888,"displayName":"31615164888","firstName":"","lastName":"","customDetails":{},"attributes":{},"createdDatetime":"2019-04-02T08:19:37Z","updatedDatetime":"2019-04-02T08:19:38Z"},"channels":[{"id":"c0dae31e440145e094c4708b7d908842","name":"test","platformId":"sms","status":"active","createdDatetime":"2019-04-01T15:25:12Z","updatedDatetime":"0001-01-01T00:00:00Z"}],"status":"active","createdDatetime":"2019-04-02T08:19:37Z","updatedDatetime":"2019-04-03T07:35:47.35395356Z","lastReceivedDatetime":"2019-04-02T12:02:22.707634424Z","lastUsedChannelId":"c0dae31e440145e094c4708b7d908842","messages":{"totalCount":16,"href":"https://conversations.messagebird.com/v1/conversations/07e823fdb36a462fb5e187d6d7b96493/messages"}}]}'
Client('', http_client).conversation_list_webhooks(2, 1)
http_client.request.assert_called_once_with('webhooks?limit=2&offset=1', 'GET', None)
def test_conversation_webhook_read(self):
http_client = Mock()
http_client.request.return_value = '{"id":"5031e2da142d401c93fbc38518ebb604","url":"https://example.com","channelId":"c0dae31e440145e094c4708b7d908842","events":["conversation.created","conversation.updated"],"status":"enabled","createdDatetime":"2019-04-03T08:41:37Z","updatedDatetime":null}'
web_hook = Client('', http_client).conversation_read_webhook('webhook-id')
http_client.request.assert_called_once_with('webhooks/webhook-id', 'GET', None)
self.assertEqual(datetime(2019, 4, 3, 8, 41, 37, tzinfo=tzutc()), web_hook.createdDatetime)
self.assertEqual(None, web_hook.updatedDatetime)
self.assertEqual(['conversation.created', 'conversation.updated'], web_hook.events)
def test_conversation_webhook_update(self):
http_client = Mock()
http_client.request.return_value = json.dumps({"id": "985ae50937a94c64b392531ea87a0263",
"url": "https://example.com/webhook",
"channelId": "853eeb5348e541a595da93b48c61a1ae",
"events": [
"message.created",
"message.updated",
],
"status": "enabled",
"createdDatetime": "2018-08-29T10:04:23Z",
"updatedDatetime": "2018-08-29T10:10:23Z"
})
webhookRequestData = {
'events': [CONVERSATION_WEBHOOK_EVENT_CONVERSATION_CREATED,
CONVERSATION_WEBHOOK_EVENT_CONVERSATION_UPDATED],
'url': 'https://example.com/webhook',
'status': 'enabled'
}
web_hook = Client('', http_client).conversation_update_webhook('webhook-id', webhookRequestData)
http_client.request.assert_called_once_with('webhooks/webhook-id', 'PATCH', webhookRequestData)
|
22,124 | 5a9035bbe86139d4d1c65c7a295a8876bf740350 | """
MIT License
Copyright (c) 2021 kyawakyawa
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import argparse
import math
import torch
import torchvision
from utils import try_gpu, to_gray_scale
from box_filter import box_filter
from shift_filter import shift_filter
from first_order_derivative_operation import (
x_derivative_operator,
y_derivative_operator,
x_prewitt_filter,
y_prewitt_filter,
x_sobel_filter,
y_sobel_filter,
steerable_filter,
)
from second_order_derivative_operation import (
x_2nd_derivative_operator,
y_2nd_derivative_operator,
laplacian_filter,
)
from gaussian_filter import gaussian_filter
from laplacian_of_gaussian import laplacian_of_gaussian
from garbor_filter import garbor_filter
from PIL import Image
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-i",
"--image",
action="store",
nargs=1,
const=None,
default=None,
type=str,
choices=None,
help="filepath to image",
metavar=None,
)
args = parser.parse_args()
im = Image.open(args.image[0])
torch.set_grad_enabled(False)
im = torchvision.transforms.functional.to_tensor(im)
H, W = im.shape[1:3]
im = try_gpu(im)
im = im.unsqueeze(0)
# box filter
im_box_filterd = box_filter(im, 5, 2)
im_box_filterd = im_box_filterd.squeeze()
im_box_filterd = torchvision.transforms.functional.to_pil_image(
im_box_filterd
)
im_box_filterd.save("output-box-filtered.jpg")
# shift filter
im_shift_filterd = shift_filter(im, 5, 2)
im_shift_filterd = im_shift_filterd.squeeze()
im_shift_filterd = torchvision.transforms.functional.to_pil_image(
im_shift_filterd
)
im_shift_filterd.save("output-shift-filtered.jpg")
# x derivative operator
im_x_derivative_operator = to_gray_scale(im)
im_x_derivative_operator = x_derivative_operator(im_x_derivative_operator)
im_x_derivative_operator = torch.abs(im_x_derivative_operator)
mx = torch.max(im_x_derivative_operator)
im_x_derivative_operator = im_x_derivative_operator / mx
im_x_derivative_operator = im_x_derivative_operator.squeeze()
im_x_derivative_operator = torchvision.transforms.functional.to_pil_image(
im_x_derivative_operator
)
im_x_derivative_operator.save("output-x-derivative-operator.jpg")
# y derivative operator
im_y_derivative_operator = to_gray_scale(im)
im_y_derivative_operator = y_derivative_operator(im_y_derivative_operator)
im_y_derivative_operator = torch.abs(im_y_derivative_operator)
mx = torch.max(im_y_derivative_operator)
im_y_derivative_operator = im_y_derivative_operator / mx
im_y_derivative_operator = im_y_derivative_operator.squeeze()
im_y_derivative_operator = torchvision.transforms.functional.to_pil_image(
im_y_derivative_operator
)
im_y_derivative_operator.save("output-y-derivative-operator.jpg")
# x prewitt filter
im_x_prewitt_filter = to_gray_scale(im)
im_x_prewitt_filter = x_prewitt_filter(im_x_prewitt_filter)
im_x_prewitt_filter = torch.abs(im_x_prewitt_filter)
mx = torch.max(im_x_prewitt_filter)
im_x_prewitt_filter = im_x_prewitt_filter / mx
im_x_prewitt_filter = im_x_prewitt_filter.squeeze()
im_x_prewitt_filter = torchvision.transforms.functional.to_pil_image(
im_x_prewitt_filter
)
im_x_prewitt_filter.save("output-x-prewitt-filter.jpg")
# y prewitt filter
im_y_prewitt_filter = to_gray_scale(im)
im_y_prewitt_filter = y_prewitt_filter(im_y_prewitt_filter)
im_y_prewitt_filter = torch.abs(im_y_prewitt_filter)
mx = torch.max(im_y_prewitt_filter)
im_y_prewitt_filter = im_y_prewitt_filter / mx
im_y_prewitt_filter = im_y_prewitt_filter.squeeze()
im_y_prewitt_filter = torchvision.transforms.functional.to_pil_image(
im_y_prewitt_filter
)
im_y_prewitt_filter.save("output-y-prewitt-filter.jpg")
# x sobel filter
im_x_sobel_filter = to_gray_scale(im)
im_x_sobel_filter = x_sobel_filter(im_x_sobel_filter)
im_x_sobel_filter = torch.abs(im_x_sobel_filter)
mx = torch.max(im_x_sobel_filter)
im_x_sobel_filter = im_x_sobel_filter / mx
im_x_sobel_filter = im_x_sobel_filter.squeeze()
im_x_sobel_filter = torchvision.transforms.functional.to_pil_image(
im_x_sobel_filter
)
im_x_sobel_filter.save("output-x-sobel-filter.jpg")
# y sobel filter
im_y_sobel_filter = to_gray_scale(im)
im_y_sobel_filter = y_sobel_filter(im_y_sobel_filter)
im_y_sobel_filter = torch.abs(im_y_sobel_filter)
mx = torch.max(im_y_sobel_filter)
im_y_sobel_filter = im_y_sobel_filter / mx
im_y_sobel_filter = im_y_sobel_filter.squeeze()
im_y_sobel_filter = torchvision.transforms.functional.to_pil_image(
im_y_sobel_filter
)
im_y_sobel_filter.save("output-y-sobel-filter.jpg")
# steerable filter
im_steerable_filter = to_gray_scale(im)
im_steerable_filter = steerable_filter(
im_steerable_filter, math.radians(45)
)
im_steerable_filter = torch.abs(im_steerable_filter)
mx = torch.max(im_steerable_filter)
im_steerable_filter = im_steerable_filter / mx
im_steerable_filter = im_steerable_filter.squeeze()
im_steerable_filter = torchvision.transforms.functional.to_pil_image(
im_steerable_filter
)
im_steerable_filter.save("output-steerable_filter.jpg")
# x 2nd derivative operator
im_x_2nd_derivative_operator = to_gray_scale(im)
im_x_2nd_derivative_operator = x_2nd_derivative_operator(
im_x_2nd_derivative_operator
)
im_x_2nd_derivative_operator = torch.abs(im_x_2nd_derivative_operator)
mx = torch.max(im_x_2nd_derivative_operator)
im_x_2nd_derivative_operator = im_x_2nd_derivative_operator / mx
im_x_2nd_derivative_operator = im_x_2nd_derivative_operator.squeeze()
im_x_2nd_derivative_operator = (
torchvision.transforms.functional.to_pil_image(
im_x_2nd_derivative_operator
)
)
im_x_2nd_derivative_operator.save("output-x-2nd-derivative-operator.jpg")
# y 2nd derivative operator
im_y_2nd_derivative_operator = to_gray_scale(im)
im_y_2nd_derivative_operator = y_2nd_derivative_operator(
im_y_2nd_derivative_operator
)
im_y_2nd_derivative_operator = torch.abs(im_y_2nd_derivative_operator)
mx = torch.max(im_y_2nd_derivative_operator)
im_y_2nd_derivative_operator = im_y_2nd_derivative_operator / mx
im_y_2nd_derivative_operator = im_y_2nd_derivative_operator.squeeze()
im_y_2nd_derivative_operator = (
torchvision.transforms.functional.to_pil_image(
im_y_2nd_derivative_operator
)
)
im_y_2nd_derivative_operator.save("output-y-2nd-derivative-operator.jpg")
# laplacian filter
im_laplacian_filter = to_gray_scale(im)
im_laplacian_filter = laplacian_filter(im_laplacian_filter)
im_laplacian_filter = torch.abs(im_laplacian_filter)
mx = torch.max(im_laplacian_filter)
im_laplacian_filter = im_laplacian_filter / mx
im_laplacian_filter = im_laplacian_filter.squeeze()
im_laplacian_filter = torchvision.transforms.functional.to_pil_image(
im_laplacian_filter
)
im_laplacian_filter.save("output-laplacian-filter.jpg")
# gaussian filter
im_gaussian_filter = gaussian_filter(im, 9, 4, 3)
im_gaussian_filter = torch.abs(im_gaussian_filter)
mx = torch.max(im_gaussian_filter)
im_gaussian_filter = im_gaussian_filter / mx
im_gaussian_filter = im_gaussian_filter.squeeze()
im_gaussian_filter = torchvision.transforms.functional.to_pil_image(
im_gaussian_filter
)
im_gaussian_filter.save("output-gaussian-filter.jpg")
# laplacian of gaussian filter
im_laplacian_of_gaussian = to_gray_scale(im)
im_laplacian_of_gaussian = laplacian_of_gaussian(
im_laplacian_of_gaussian, 9, 4, 1
)
im_laplacian_of_gaussian = torch.abs(im_laplacian_of_gaussian)
mx = torch.max(im_laplacian_of_gaussian)
im_laplacian_of_gaussian = im_laplacian_of_gaussian / mx
im_laplacian_of_gaussian = im_laplacian_of_gaussian.squeeze()
im_laplacian_of_gaussian = torchvision.transforms.functional.to_pil_image(
im_laplacian_of_gaussian
)
im_laplacian_of_gaussian.save("output-laplacian-of-gaussian.jpg")
# garbor filter
im_garbor_filter = to_gray_scale(im)
im_garbor_filter = garbor_filter(
im, 21, 10, math.pi / 4, 2.0, 2.0, 2 * math.pi, 0.0
)
im_garbor_filter = torch.abs(im_garbor_filter)
mx = torch.max(im_garbor_filter)
im_garbor_filter = im_garbor_filter / mx
im_garbor_filter = im_garbor_filter.squeeze()
im_garbor_filter = torchvision.transforms.functional.to_pil_image(
im_garbor_filter
)
im_garbor_filter.save("output-garbor-filter.jpg")
|
22,125 | 14be28442bf8ed5a3f45fdb5bdbae8b0df491091 | import os
from flask import Flask
from flask import render_template
from flask import request
from flask import redirect
import tools
app = Flask(__name__)
images_dir = app.config['IMAGE_UPLOADS'] = './static/images/uploads'
images = []
undos = []
def img_path():
return '{}/{}'.format(images_dir, images[-1])
@app.route('/load-img', methods=['GET', 'POST'])
def load_img():
if request.method == 'POST':
if request.files:
picture = request.files['picture']
picture.save(os.path.join(images_dir, picture.filename))
images.clear()
images.append(picture.filename)
images.append(tools.read_image(f'{images_dir}/{picture.filename}'))
return redirect('/')
else:
return
@app.route('/')
def home():
if len(images) == 2:
return render_template('home.html', picture=f'{images_dir}/{images[0]}')
elif len(images) > 2:
return render_template('home.html', picture=f'data:image/jpeg;base64,{tools.img_enc(images[-1])}')
else:
return render_template('home.html')
@app.route('/init')
def initialize():
if len(images) >= 2:
images.append(images[1])
return render_template('home.html', picture=f'{images_dir}/{images[0]}')
else:
pass
@app.route('/undo')
def undo():
if len(images) >= 2:
img = images.pop()
undos.append(img)
return render_template('home.html', picture=f'data:image/jpeg;base64,{tools.img_enc(images[-1])}')
else:
return render_template('home.html', picture=f'{images_dir}/{images[0]}')
@app.route('/restore')
def restore():
if len(undos) >= 1:
img = undos.pop()
images.append(img)
return render_template('home.html', picture=f'data:image/jpeg;base64,{tools.img_enc(images[-1])}')
else:
return render_template('home.html', picture=f'{images_dir}/{images[0]}')
@app.route('/save-img')
def save_img():
if len(images) > 1:
tools.save_image(images[-1])
images.clear()
return redirect('/')
else:
return "Vous n'avez encore applique une modification"
@app.route('/gray-scale')
def gray_scale():
try:
img = images[-1]
img = tools.to_gray_scale(img)
images.append(img)
img_enc = tools.img_enc(img)
return render_template('home.html', picture=f'data:image/jpeg;base64,{img_enc}')
except:
return render_template('home.html', picture=f'data:image/jpeg;base64,{images[-1]}')
@app.route('/binary')
def binary_inv():
try:
img = images[-1]
img = tools.to_binary_inv(img)
images.append(img)
img_enc = tools.img_enc(img)
return render_template('home.html', picture=f'data:image/jpeg;base64,{img_enc}')
except:
return render_template('home.html', picture=f'data:image/jpeg;base64,{images[-1]}')
@app.route('/truncate')
def truncate():
try:
img = images[-1]
img = tools.to_truncate(img)
images.append(img)
img_enc = tools.img_enc(img)
return render_template('home.html', picture=f'data:image/jpeg;base64,{img_enc}')
except:
return render_template('home.html', picture=f'data:image/jpeg;base64,{images[-1]}')
@app.route('/zero')
def zero():
try:
img = images[-1]
img = tools.to_zero(img)
images.append(img)
img_enc = tools.img_enc(img)
return render_template('home.html', picture=f'data:image/jpeg;base64,{img_enc}')
except:
return render_template('home.html', picture=f'data:image/jpeg;base64,{images[-1]}')
@app.route('/zero-inv')
def zero_inv():
try:
img = images[-1]
img = tools.to_zero_inv(img)
images.append(img)
img_enc = tools.img_enc(img)
return render_template('home.html', picture=f'data:image/jpeg;base64,{img_enc}')
except:
return render_template('home.html', picture=f'data:image/jpeg;base64,{images[-1]}')
# MORPHOLOGY
# ***********
@app.route('/erosion')
def erosion():
try:
img = images[-1]
img = tools.erosion(img)
images.append(img)
img_enc = tools.img_enc(img)
return render_template('home.html', picture=f'data:image/jpeg;base64,{img_enc}')
except:
return render_template('home.html', picture=f'data:image/jpeg;base64,{images[-1]}')
@app.route('/dilatation')
def dilatation():
try:
img = images[-1]
img = tools.dilatation(img)
images.append(img)
img_enc = tools.img_enc(img)
return render_template('home.html', picture=f'data:image/jpeg;base64,{img_enc}')
except:
return render_template('home.html', picture=f'data:image/jpeg;base64,{images[-1]}')
@app.route('/open')
def to_open():
try:
img = images[-1]
img = tools.open(img)
images.append(img)
img_enc = tools.img_enc(img)
return render_template('home.html', picture=f'data:image/jpeg;base64,{img_enc}')
except:
return render_template('home.html', picture=f'data:image/jpeg;base64,{images[-1]}')
@app.route('/close')
def to_close():
try:
img = images[-1]
img = tools.close(img)
images.append(img)
img_enc = tools.img_enc(img)
return render_template('home.html', picture=f'data:image/jpeg;base64,{img_enc}')
except:
return render_template('home.html', picture=f'data:image/jpeg;base64,{images[-1]}')
@app.route('/gradient')
def gradient():
try:
img = images[-1]
img = tools.gradient(img)
images.append(img)
img_enc = tools.img_enc(img)
return render_template('home.html', picture=f'data:image/jpeg;base64,{img_enc}')
except:
return render_template('home.html', picture=f'data:image/jpeg;base64,{images[-1]}')
@app.route('/gaussian')
def gaussian():
try:
img = images[-1]
img = tools.gaussian_blur(img)
images.append(img)
img_enc = tools.img_enc(img)
return render_template('home.html', picture=f'data:image/jpeg;base64,{img_enc}')
except:
return render_template('home.html', picture=f'data:image/jpeg;base64,{images[-1]}')
@app.route('/median')
def median():
try:
img = images[-1]
img = tools.median_blur(img)
images.append(img)
img_enc = tools.img_enc(img)
return render_template('home.html', picture=f'data:image/jpeg;base64,{img_enc}')
except:
return render_template('home.html', picture=f'data:image/jpeg;base64,{images[-1]}')
@app.route('/edges')
def edges():
try:
img = images[-1]
img = tools.edge_detect(img)
images.append(img)
img_enc = tools.img_enc(img)
return render_template('home.html', picture=f'data:image/jpeg;base64,{img_enc}')
except:
return render_template('home.html', picture=f'data:image/jpeg;base64,{images[-1]}')
if __name__ == '__main__':
app.run(debug=True)
|
22,126 | 3744873a927e9d00d91f52bd99a0864794f83d3d | # -*- coding: utf-8 -*-
"""
Created on Sat Jun 2 16:31:24 2018
@author: MSIK
"""
import pandas as pd
import numpy as np
from datetime import date
import warnings
warnings.filterwarnings('ignore')
import lightgbm
from sklearn.metrics import f1_score, roc_auc_score, classification_report
from sklearn.model_selection import cross_val_predict
# wa_train.txt ็จๆท็ฝ็ซ่ฎฟ้ฎ่ฎฐๅฝๆฐๆฎ
waTrain = pd.read_table('../data/wa_train.txt', header = None)
waTrain.columns = ['uid', 'wa_name', 'visit_cnt', 'visit_dura', 'up_flow',
'down_flow', 'wa_type', 'date']
waTest = pd.read_table('../data/wa_test_a.txt', header = None)
waTest.columns = ['uid', 'wa_name', 'visit_cnt', 'visit_dura', 'up_flow',
'down_flow', 'wa_type', 'date']
waTestb = pd.read_table('../data/wa_test_b.txt', header = None)
waTestb.columns = ['uid', 'wa_name', 'visit_cnt', 'visit_dura', 'up_flow',
'down_flow', 'wa_type', 'date']
# ๅ่ฎพ็ฌฌไธๅคฉๆฏๅจ5
weekList = [5,6,7,1,2,3,4]*7
weekList = weekList[:45]
waData = pd.concat([waTrain, waTest])
waData = pd.concat([waData, waTestb])
#waData['weekday'] = waData['date'].astype('int').map(lambda x: weekList[x-1] if x >0 else 1)
# ๆ ็ญพuid_train.txt 0:4099 1:900
uidTrain = pd.read_table('../data/uid_train.txt', header = None)
uidTrain.columns = ['uid', 'label']
uidTest = pd.DataFrame()
uidTest['uid'] = range(5000,7000)
uidTest.uid = uidTest.uid.apply(lambda x: 'u'+str(x).zfill(4))
feature = pd.concat([uidTrain.drop('label', axis=1), uidTest])
def get_user_visit_num_at_day(waData, wa_type):
if wa_type not in [0,1]:
t = waData[['uid','date']]
else:
t = waData[waData.wa_type == wa_type][['uid','date']]
t['visit_num'] = 1
t = t.groupby(['uid','date'])['visit_num'].sum().reset_index()
for day in range(1, 46):
s = 'user_visit_%s_num_at_day_%s' % (str(wa_type), str(day))
t0 = t[t.date == day][['uid','visit_num']]
t0.rename(columns={'visit_num':s}, inplace=True)
t = t.merge(t0, on='uid', how='left')
t.drop(['date','visit_num'], axis=1, inplace=True)
t.fillna(0, inplace=True)
t.drop_duplicates(inplace=True)
df = t.drop('uid', axis=1)
for fun in [('mean', np.mean), ('max',np.max), ('std', np.std), ('var', np.var)]:
s = 'user_visit_%s_num_at_day_%s' % (str(wa_type), fun[0])
val = df.apply(fun[1], axis=1).values
t[s] = val
return t
def get_user_visit_time_at_day(waData, wa_type):
if wa_type not in [0,1]:
t = waData[['uid','date','visit_dura']]
else:
t = waData[waData.wa_type == wa_type][['uid','date','visit_dura']]
t = t.groupby(['uid','date'])['visit_dura'].sum().reset_index()
for day in range(1, 46):
s = 'user_visit_%s_time_at_day_%s' % (str(wa_type), str(day))
t0 = t[t.date == day][['uid','visit_dura']]
t0.rename(columns={'visit_dura':s}, inplace=True)
t = t.merge(t0, on='uid', how='left')
t.drop(['date', 'visit_dura'], axis=1, inplace=True)
t.fillna(0, inplace=True)
t.drop_duplicates(inplace=True)
df = t.drop('uid', axis=1)
for fun in [('mean', np.mean), ('max',np.max), ('std', np.std), ('var', np.var)]:
s = 'user_visit_%s_time_at_day_%s' % (str(wa_type), fun[0])
val = df.apply(fun[1], axis=1).values
t[s] = val
return t
def get_user_visit_flow_at_day(waData, wa_type):
if wa_type not in [0,1]:
t = waData[['uid','date','down_flow']]
else:
t = waData[waData.wa_type == wa_type][['uid','date','down_flow']]
t = t.groupby(['uid','date'])['down_flow'].sum().reset_index()
for day in range(1, 46):
s = 'user_visit_%s_flow_at_day_%s' % (str(wa_type), str(day))
t0 = t[t.date == day][['uid','down_flow']]
t0.rename(columns={'down_flow':s}, inplace=True)
t = t.merge(t0, on='uid', how='left')
t.drop(['date', 'down_flow'], axis=1, inplace=True)
t.fillna(0, inplace=True)
t.drop_duplicates(inplace=True)
df = t.drop('uid', axis=1)
for fun in [('mean', np.mean), ('max',np.max), ('std', np.std), ('var', np.var)]:
s = 'user_visit_%s_flow_at_day_%s' % (str(wa_type), fun[0])
val = df.apply(fun[1], axis=1).values
t[s] = val
return t
def get_user_visit_up_flow_at_day(waData, wa_type):
if wa_type not in [0,1]:
t = waData[['uid','date','up_flow']]
else:
t = waData[waData.wa_type == wa_type][['uid','date','up_flow']]
t = t.groupby(['uid','date'])['up_flow'].sum().reset_index()
for day in range(1, 46):
s = 'user_visit_%s_up_flow_at_day_%s' % (str(wa_type), str(day))
t0 = t[t.date == day][['uid','up_flow']]
t0.rename(columns={'up_flow':s}, inplace=True)
t = t.merge(t0, on='uid', how='left')
t.drop(['date', 'up_flow'], axis=1, inplace=True)
t.fillna(0, inplace=True)
t.drop_duplicates(inplace=True)
df = t.drop('uid', axis=1)
for fun in [('mean', np.mean), ('max',np.max), ('std', np.std), ('var', np.var)]:
s = 'user_visit_%s_up_flow_at_day_%s' % (str(wa_type), fun[0])
val = df.apply(fun[1], axis=1).values
t[s] = val
return t
def make_user_wa_feature(waData, feature):
# user_visit_web_cate_num
t0 = waData[waData.wa_type == 0][['uid','wa_name']]
t0 = t0.groupby('uid')['wa_name'].nunique().reset_index().\
rename(columns={'wa_name':'user_visit_web_cate_num'})
feature = feature.merge(t0, on='uid', how='left')
# user_visit_app_cate_num
t0 = waData[waData.wa_type == 1][['uid','wa_name']]
t0 = t0.groupby('uid')['wa_name'].nunique().reset_index().\
rename(columns={'wa_name':'user_visit_app_cate_num'})
feature = feature.merge(t0, on='uid', how='left')
# user_visit_wa_cate_num
t0 = waData[['uid','wa_name']]
t0 = t0.groupby('uid')['wa_name'].nunique().reset_index().\
rename(columns={'wa_name':'user_visit_wa_cate_num'})
feature = feature.merge(t0, on='uid', how='left')
# user_visit_num
t0 = waData.groupby('uid')['date'].count().reset_index().\
rename(columns={'date':'user_visit_num'})
feature = feature.merge(t0, on='uid', how='left')
# user_visit_app_num
t0 = waData[waData.wa_type == 1].groupby('uid')['date'].count().reset_index().\
rename(columns={'date':'user_visit_app_num'})
feature = feature.merge(t0, on='uid', how='left')
# user_visit_web_num
t0 = waData[waData.wa_type == 0].groupby('uid')['date'].count().reset_index().\
rename(columns={'date':'user_visit_web_num'})
feature = feature.merge(t0, on='uid', how='left')
# user_visit_web_num_ratio
feature['user_visit_web_num_ratio'] = (feature['user_visit_web_num'].fillna(0) + 0.001)/\
(feature['user_visit_num'].fillna(0) + 0.001)
# user_visit_app_hot_value
t0 = waData[waData.wa_type == 1][['wa_name']]
t0['app_hot_value'] = 1
t0 = t0.groupby('wa_name')['app_hot_value'].sum().reset_index()
t0['app_hot_value'] = t0['app_hot_value'].astype('float')/\
t0['app_hot_value'].sum()
waData = waData.merge(t0, on='wa_name', how='left')
t0 = waData.groupby('uid')['app_hot_value'].sum().reset_index().\
rename(columns={'app_hot_value':'user_visit_app_hot_value'})
feature = feature.merge(t0, on='uid', how='left')
# user_visit_web_hot_value
t0 = waData[waData.wa_type == 0][['wa_name']]
t0['web_hot_value'] = 1
t0 = t0.groupby('wa_name')['web_hot_value'].sum().reset_index()
t0['web_hot_value'] = t0['web_hot_value'].astype('float')/\
t0['web_hot_value'].sum()
waData = waData.merge(t0, on='wa_name', how='left')
t0 = waData.groupby('uid')['web_hot_value'].sum().reset_index().\
rename(columns={'web_hot_value':'user_visit_web_hot_value'})
feature = feature.merge(t0, on='uid', how='left')
# user_visit_app_day_num
t0 = waData[waData.wa_type == 1][['uid','date']]
t0 = t0.groupby('uid')['date'].nunique().reset_index().\
rename(columns={'date':'user_visit_app_day_num'})
feature = feature.merge(t0, on='uid', how='left')
# user_visit_web_day_num
t0 = waData[waData.wa_type == 0][['uid','date']]
t0 = t0.groupby('uid')['date'].nunique().reset_index().\
rename(columns={'date':'user_visit_web_day_num'})
feature = feature.merge(t0, on='uid', how='left')
# user_visit_day_num
t0 = waData.groupby('uid')['date'].nunique().reset_index().\
rename(columns={'date':'user_visit_day_num'})
feature = feature.merge(t0, on='uid', how='left')
# user_visit_app_total_cnt
t0 = waData[waData.wa_type == 1].groupby('uid')['visit_cnt'].sum().reset_index().\
rename(columns={'visit_cnt':'user_visit_app_total_cnt'})
feature = feature.merge(t0, on='uid', how='left')
# user_visit_web_total_cnt
t0 = waData[waData.wa_type == 0].groupby('uid')['visit_cnt'].sum().reset_index().\
rename(columns={'visit_cnt':'user_visit_web_total_cnt'})
feature = feature.merge(t0, on='uid', how='left')
# user_visit_app_total_time
t0 = waData[waData.wa_type == 1].groupby('uid')['visit_dura'].sum().reset_index().\
rename(columns={'visit_dura':'user_visit_app_total_time'})
feature = feature.merge(t0, on='uid', how='left')
# user_visit_web_total_time
t0 = waData[waData.wa_type == 0].groupby('uid')['visit_dura'].sum().reset_index().\
rename(columns={'visit_dura':'user_visit_web_total_time'})
feature = feature.merge(t0, on='uid', how='left')
# user_visit_app_mean_time
feature['user_visit_app_mean_time'] = (feature['user_visit_app_total_time'].fillna(0) + 0.001)/\
(feature['user_visit_app_total_cnt'].fillna(0) + 0.001)
# user_visit_web_mean_time
feature['user_visit_web_mean_time'] = (feature['user_visit_web_total_time'].fillna(0) + 0.001)/\
(feature['user_visit_web_total_cnt'].fillna(0) + 0.001)
# user_visit_app_down_flow
t0 = waData[waData.wa_type == 1].groupby('uid')['down_flow'].sum().reset_index().\
rename(columns={'down_flow':'user_visit_app_down_flow'})
feature = feature.merge(t0, on='uid', how='left')
# user_visit_web_down_flow
t0 = waData[waData.wa_type == 0].groupby('uid')['down_flow'].sum().reset_index().\
rename(columns={'down_flow':'user_visit_web_down_flow'})
feature = feature.merge(t0, on='uid', how='left')
# user_visit_app_up_flow
t0 = waData[waData.wa_type == 1].groupby('uid')['up_flow'].sum().reset_index().\
rename(columns={'up_flow':'user_visit_app_up_flow'})
feature = feature.merge(t0, on='uid', how='left')
# user_visit_web_up_flow
t0 = waData[waData.wa_type == 0].groupby('uid')['up_flow'].sum().reset_index().\
rename(columns={'up_flow':'user_visit_web_up_flow'})
feature = feature.merge(t0, on='uid', how='left')
# user_visit_app_max_cnt
t0 = waData[waData.wa_type == 1].groupby(['uid','wa_name'])['visit_cnt'].sum().reset_index()
t0 = t0.groupby('uid')['visit_cnt'].max().reset_index().\
rename(columns={'visit_cnt':'user_visit_app_max_cnt'})
feature = feature.merge(t0, on='uid', how='left')
# user_visit_web_max_cnt
t0 = waData[waData.wa_type == 0].groupby(['uid','wa_name'])['visit_cnt'].sum().reset_index()
t0 = t0.groupby('uid')['visit_cnt'].max().reset_index().\
rename(columns={'visit_cnt':'user_visit_web_max_cnt'})
feature = feature.merge(t0, on='uid', how='left')
# user_visit_app_max_time
t0 = waData[waData.wa_type == 1].groupby(['uid','wa_name'])['visit_dura'].sum().reset_index()
t0 = t0.groupby('uid')['visit_dura'].max().reset_index().\
rename(columns={'visit_dura':'user_visit_app_max_time'})
feature = feature.merge(t0, on='uid', how='left')
# user_visit_web_max_time
t0 = waData[waData.wa_type == 0].groupby(['uid','wa_name'])['visit_dura'].sum().reset_index()
t0 = t0.groupby('uid')['visit_dura'].max().reset_index().\
rename(columns={'visit_dura':'user_visit_web_max_time'})
feature = feature.merge(t0, on='uid', how='left')
# user_visit_app_max_down_flow
t0 = waData[waData.wa_type == 1].groupby(['uid','wa_name'])['down_flow'].sum().reset_index()
t0 = t0.groupby('uid')['down_flow'].max().reset_index().\
rename(columns={'down_flow':'user_visit_app_max_down_flow'})
feature = feature.merge(t0, on='uid', how='left')
# user_visit_web_max_down_flow
t0 = waData[waData.wa_type == 0].groupby(['uid','wa_name'])['down_flow'].sum().reset_index()
t0 = t0.groupby('uid')['down_flow'].max().reset_index().\
rename(columns={'down_flow':'user_visit_web_max_down_flow'})
feature = feature.merge(t0, on='uid', how='left')
# user_visit_0_num_at_day_y
t0 = get_user_visit_num_at_day(waData, 0)
feature = feature.merge(t0, on='uid', how='left')
# user_visit_1_num_at_day_y
t0 = get_user_visit_num_at_day(waData, 1)
feature = feature.merge(t0, on='uid', how='left')
# user_visit_0_time_at_day_y
t0 = get_user_visit_time_at_day(waData, 0)
feature = feature.merge(t0, on='uid', how='left')
# user_visit_1_time_at_day_y
t0 = get_user_visit_time_at_day(waData, 1)
feature = feature.merge(t0, on='uid', how='left')
# user_visit_0_flow_at_day_y
t0 = get_user_visit_flow_at_day(waData, 0)
feature = feature.merge(t0, on='uid', how='left')
# user_visit_1_flow_at_day_y
t0 = get_user_visit_flow_at_day(waData, 1)
feature = feature.merge(t0, on='uid', how='left')
t0 = get_user_visit_up_flow_at_day(waData, 2)
feature = feature.merge(t0, on='uid', how='left')
# ๆฏๅคฉๅไธๆถๅปๅๅบ็็ญไฟกๆฐ้ = ๆฏๅคฉๆป้ - ๆฏๅคฉๅ็ๆถ้ดๆฐ
return feature
# ๆๅ็นๅพ
feature = make_user_wa_feature(waData, feature)
# feature.to_csv('../data/feature_wa_03.csv', index=False)
# ่ฎญ็ป้
train = feature[:4999].copy()
train = train.merge(uidTrain, on='uid', how='left')
# ๆไนฑ้กบๅบ
np.random.seed(201805)
idx = np.random.permutation(len(train))
train = train.iloc[idx]
X_train = train.drop(['uid','label'], axis=1).values
y_train = train.label.values
"""
lgb = lightgbm.LGBMClassifier(boosting_type='gbdt',
objective= 'binary',
metric= 'auc',
min_child_weight= 1.5,
num_leaves = 2**5,
lambda_l2= 10,
subsample= 0.7,
colsample_bytree= 0.5,
colsample_bylevel= 0.5,
learning_rate= 0.1,
scale_pos_weight= 20,
seed= 201805,
nthread= 4,
silent= True)
"""
lgb = lightgbm.LGBMClassifier(random_state=201805)
def fitModel(model, feature1):
X = feature1.drop(['uid','label'], axis=1).values
y = feature1.label.values
lgb_y_pred = cross_val_predict(model, X, y, cv=5,
verbose=2, method='predict')
lgb_y_proba = cross_val_predict(model, X, y, cv=5,
verbose=2, method='predict_proba')[:,1]
f1score = f1_score(y, lgb_y_pred)
aucscore = roc_auc_score(y, lgb_y_proba)
print('F1:', f1score,
'AUC:', aucscore,
'Score:', f1score*0.4 + aucscore*0.6)
print(classification_report(y, lgb_y_pred))
model.fit(X, y)
featureList0 = list(feature1.drop(['uid','label'], axis=1))
featureImportant = pd.DataFrame()
featureImportant['feature'] = featureList0
featureImportant['score'] = model.feature_importances_
featureImportant.sort_values(by='score', ascending=False, inplace=True)
featureImportant.reset_index(drop=True, inplace=True)
print(featureImportant)
# ไบคๅ้ช่ฏๆจกๅ
fitModel(lgb, train) |
22,127 | d464d4d6ce5d97a323e3aee0307593ec031d364a | #!/usr/bin/env python3
a, b, c, k = map(int, input().split())
if a + b <= k:
print(a - (k - (a + b)))
exit()
elif a <= k:
print(a)
else:
print(k) |
22,128 | e7cfb8b680b1f3414281006a3892ee66d597f866 | """
Turns the pressed button's light on for as long as it is pressed.
"""
import mido
from mido import Message
inputPortName = "Launchpad Mini 0"
outputPortName = "Launchpad Mini 1"
try:
with mido.open_output(outputPortName) as outputPort, mido.open_input(inputPortName) as inputPort:
print(f'Using {inputPort} and {outputPort}')
while True:
for message in inputPort:
outputPort.send(message)
except KeyboardInterrupt:
pass |
22,129 | e517f7a25ab754321515f4463d7631d9231b9c59 | from django.shortcuts import render
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from .forms import *
from .models import *
from django.views.generic.edit import FormView
from .filters import *
from .forms import FileFieldForm
import datetime
from datetime import datetime, timedelta
from django.http import HttpResponseRedirect
import re
from django.views import generic
from watson import search as watson
from .tables import *
from django_tables2 import RequestConfig
from django_tables2 import SingleTableView
from django.views.generic import ListView,TemplateView,UpdateView
def basic_search(request):
return render(request, 'DAGR/basic_search.html', {})
class DAGRListView(TemplateView):
template_name='DAGR/searchable.html'
def get_queryset(self,**kwargs):
return DAGR.objects.all()
def get_context_data(self, **kwargs):
context = super(DAGRListView, self).get_context_data(**kwargs)
filter = DAGRListFilter(self.request.GET, queryset=self.get_queryset(**kwargs))
filter.form.helper = DAGRListFormHelper()
table = DAGRTable(filter.qs)
RequestConfig(self.request).configure(table)
context['filter'] = filter
context['table'] = table
return context
def model_form_upload(request):
if request.method == 'POST':
form = DocumentForm(request.POST, request.FILES)
if form.is_valid():
obj = form.save(commit =False)
obj.CreationTime = datetime.now()
obj.Size = form.cleaned_data.get("Links").size
Type = form.cleaned_data.get('Links').name.split('.')[-1]
obj.Type = Type
if not form.cleaned_data.get('Name'):
name = form.cleaned_data.get('Links').name.split('/')[-1]
else:
name = form.cleaned_data.get('Name')
obj.Name = name
obj.FileName = form.cleaned_data.get('Links').name.split('/')[-1]
if not obj.Owner:
New_DAGR = DAGR(Name = name, Author = form.cleaned_data.get('Author'), \
CreationTime = datetime.now(), HasKids = False, Size = obj.Size)
New_DAGR.save()
obj.Owner = New_DAGR
else:
d = DAGR.objects.get(pk=obj.Owner.pk)
d.Size = d.Size + obj.Size
d.save()
obj.save()
return HttpResponseRedirect('/DAGR/')
else:
form = DocumentForm()
return render(request, 'DAGR/upload.html', {
'form': form
})
def url_upload(request):
form = URLForm(request.POST or None);
name = "URL";
context = {
'form' : form,
'name' : name
}
if form.is_valid():
form.save()
return HttpResponseRedirect('/DAGR/')
return render(request, 'DAGR/add_new.html', context)
def find_duplicates(request):
unique_fields = ['field_1', 'field_n']
duplicates = (MyModel.objects.values(*unique_fields)
.order_by()
.annotate(max_id=models.Max('id'),
count_id=models.Count('id'))
.filter(count_id__gt=1))
for duplicate in duplicates:
(MyModel.objects.filter(**{x: duplicate[x] for x in unique_fields})
.exclude(id=duplicate['max_id'])
.delete())
class FileFieldView(FormView):
form_class = FileFieldForm
template_name = 'DAGR/upload.html' # Replace with your template.
success_url = 'mult' # Replace with your URL or reverse().
def post(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = self.get_form(form_class)
files = request.FILES.getlist('file_field')
if form.is_valid():
print(form.cleaned_data.get('Author'))
for f in files:
New_DAGR = DAGR(Name = f.name.split('.')[0], Author = form.cleaned_data.get('Author'), \
CreationTime = datetime.now(), HasKids = False, Size = f.size)
New_DAGR.save()
Type = f.name.split('.')[-1]
New_Doc = Document(Name = f, Author = form.cleaned_data.get('Author'),\
CreationTime = datetime.now(), Size = f.size, Owner = New_DAGR, \
Type = Type,Links = f, FileName = f.name)
New_Doc.save()
return self.form_valid(form)
else:
return self.form_invalid(form)
def reach_help(pk,arr):
DAGRs = DAGR.objects.get(pk=pk)
for kid in DAGRs.Kids.all():
arr.append(kid)
reach_help(kid.pk,arr)
return arr
def DAGR_Reach(request,pk):
arr = []
reach_help(pk,arr)
context = {
'kids' : arr
}
return render(request,'DAGR/reach.html',context)
def DAGR_Sterile(request):
arr =[]
D = DAGR.objects.all()
for da in D:
if DAGRChildren.objects.filter(Parent=da.pk) or DAGRChildren.objects.filter(Children = da.pk):
print(da.pk)
else:
arr.append(da)
print(DAGRChildren.objects.filter(Parent=da.pk))
print(da.pk)
print("test")
print(arr)
context = {
'kids':arr
}
return render(request,'DAGR/reach.html',context)
def DAGR_Delete(request,pk):
arr = []
reach_help(pk,arr)
D = DAGR.objects.get(pk=pk)
context = {
'kids' : arr,
'object' : D
}
return render(request,'DAGR/delete.html',context)
def delete(request, pk):
arr =[]
arr.append(DAGR.objects.get(pk=pk))
reach_help(pk,arr)
print(arr)
for kids in arr:
D = DAGR.objects.get(pk=kids.pk)
D.DeletionTime = datetime.now()
DAGRChildren.objects.filter(Parent=kids.pk).delete()
DAGRChildren.objects.filter(Children = kids.pk).delete()
DAGRCategory.objects.filter(DAGRID=kids.pk).delete()
Doc = Document.objects.get(Owner = kids.pk)
Doc.Owner = None
Doc.save()
D.save()
return HttpResponseRedirect('/DAGR/')
def add_cat(request):
form = CategoryForm(request.POST or None);
name = "Category";
context = {
'form' : form,
'name' : name
}
if form.is_valid():
form.save()
return HttpResponseRedirect('/DAGR/')
return render(request, 'DAGR/add_new.html', context)
class DAGR_Detailview(generic.DetailView):
model = DAGR
template_name='DAGR/detail.html'
class DAGR_Update(UpdateView):
model = DAGR
fields = ['Name','Author','Kids','CategoryID']
def form_valid(self,form):
print(form.instance.Kids.all())
form.instance.LastModified = datetime.now()
kids = form.instance.Kids.all()
if kids:
for temp in kids:
docSize = Document.objects.get(Owner = temp.pk).Size
print(docSize)
form.instance.Size = form.instance.Size + docSize
form.instance.HasKids = True
redirect_url = super(DAGR_Update, self).form_valid(form)
return redirect_url
|
22,130 | 657212474bb2f14b619b109f26abc7be10f999e7 |
class Config(object):
""" Wrapper class for various (hyper)parameters. """
def __init__(self):
# about the model architecture
self.image_size = 64
self.label_length = 6
self.num_classes = 11
# about the optimization
self.batch_size = 32
self.num_step = 300000
# about the saver
self.save_period = 2000
self.save_dir = './models/'
self.summary_dir = './logs/'
|
22,131 | e50c688dbc2631a063673b2f028c8c876712b996 | #coding:utf-8
import torch.nn as nn
from model.modeling_bert import BertModel
import numpy as np
import torch
import torch.nn.functional as F
from sklearn import metrics
import time
from datetime import timedelta
from model.optimization import BertAdam
from tensorboardX import SummaryWriter
import argparse
from data_process.build_data_bin import *
from config.Config import Config
from torch.utils.data import DataLoader
def get_time_dif(start_time):
end_time=time.time()
time_dif=end_time-start_time
return timedelta(seconds=int(round(time_dif)))
def to_devie(data,device):
if isinstance(data,(list,tuple)):
return [to_devie(x,device)for x in data]
return data.to(device,non_blocking=True)
class Model(nn.Module):
def __init__(self,config):
super(Model,self).__init__()
self.bert=BertModel.from_pretrained(config.bert_pretrain_model)
num=0
for param in self.bert.parameters():
param.requires_grad=True
num=num+1
print(num)
self.fc=nn.Linear(config.hidden_size,config.num_class)
def forward(self, input_x,mask):
_,pooled=self.bert(input_x,attention_mask=mask)
out=self.fc(pooled)
return out
def train(config,model,train_iter,dev_iter,test_iter):
model.train()
if config.use_cuda:
print("ๅ ่ฝฝๆจกๅๅฐGPU.....")
model.cuda()
start_time=time.time()
param_optimizer=list(model.named_parameters())
no_decay=['bias','LayerNorm.bias','LayerNorm.weigth']
for n,p in param_optimizer:
print(n,p)
paramizer_grouped_parameters=[
#nไธญๆฒกๆไปปไฝๅจno_decayไธญ็ๅๆฐ
{'params':[ p for n,p in param_optimizer if not any(nd in n for nd in no_decay)],'weight_decay':0.01},
#nไธญไปปไฝๅจno_decayไธญ็ๅๆฐ
{'params':[ p for n,p in param_optimizer if any(nd in n for nd in no_decay)],'weight_decay':0.0}
]
optimizer=BertAdam(paramizer_grouped_parameters,
lr=config.learning_rate,
warmup=0.05,
t_total=len(train_iter)*config.num_epoch
)
total_batch=0
dev_best_loss=float('inf')
last_improve=0#ๆๅๆๅ็batch
flag=False#่ฎฐๅฝๆฏๅฆๆๅ
writer=SummaryWriter(log_dir=config.log_path+'/'+time.strftime('%m-%d_%H.%M',time.localtime()))
for epoch in range(config.num_epoch):
print("Epoch [{}/{}]".format(epoch+1,config.num_epoch))
for train_data in train_iter:
train_data_X=train_data[0]
labels=train_data[1]
train_mask=train_data[2]
if config.use_cuda:
train_data_X=train_data_X.cuda()
labels=labels.cuda()
train_mask=train_mask.cuda()
model.zero_grad()
output=model(train_data_X,train_mask)
loss=F.cross_entropy(output,labels)
loss.backward()
optimizer.step()
if total_batch %100==0:
true=labels.data.cpu()
predict=torch.max(output.data,1)[1].cpu()
train_acc=metrics.accuracy_score(true,predict)
print("่ฎญ็ป้loss:", loss)
print("่ฎญ็ป้็ฒพๅบฆ:", train_acc)
dev_acc,dev_loss=evaluate(config,model,dev_iter)
print("้ช่ฏ้็ฒพๅบฆ:",dev_acc)
if dev_acc<dev_best_loss:
dev_best_loss=dev_loss
torch.save(model.state_dict(),config.save_path)
last_improve=total_batch#ๆๅๆๅ็ไธไธชbatch
improve="*"
else:
improve=""
time_dif=get_time_dif(start_time)
msg='่ฟญไปฃๆฌกๆฐ๏ผ{0:>6},่ฎญ็ป่ฏฏๅทฎ๏ผ{1:>5.2},่ฎญ็ป็ฒพๅบฆ๏ผ{2:>6.2%},้ช่ฏ่ฏฏๅทฎ๏ผ{3:>5.2},้ช่ฏ็ฒพๅบฆ๏ผ{4:>6.2%}๏ผ่ฑ่ดนๆถ้ด๏ผ{5} {6}'
print(msg.format(total_batch,loss.item(),train_acc,dev_loss,dev_acc,time_dif,improve))
writer.add_scalar("loss_train",loss,total_batch)
writer.add_scalar("loss_dev", dev_loss, total_batch)
writer.add_scalar("acc_train", train_acc, total_batch)
writer.add_scalar("acc_dev", dev_acc, total_batch)
model.train()
total_batch=total_batch+1
if total_batch-last_improve>config.require_improvement:
print("{}ๆฌก่ฏฏๅทฎ้ฝๆฒกๆไธ้๏ผๅๆญข่ฟญไปฃ...".format(config.require_improvement))
flag=True
break
if flag:
break
writer.close()
test(config,model,test_iter)
def evaluate(config,model,data_iter,test=False):
model.eval()
loss_total=0
predict_all=np.array([],dtype=int)
labels_all=np.array([],dtype=int)
with torch.no_grad():
for data in data_iter:
data_X = data[0]
labels = data[1]
mask = data[2]
if config.cuda:
data_X = data_X.cuda()
labels = labels.cuda()
mask = mask.cuda()
outputs=model(data_X,mask)
loss=F.cross_entropy(outputs,labels)
loss_total=loss_total+loss
labels=labels.data.cpu().numpy()
predict=torch.max(outputs.data,1)[1].cpu().numpy()
labels_all=np.append(labels_all,labels)
predict_all=np.append(predict_all,predict)
acc=metrics.accuracy_score(labels_all,predict_all)
loss_total_avg=loss_total/len(data_iter)
if test:
report=metrics.classification_report(labels_all,predict_all,target_names=config.mul_class_list,digits=4)
confusion=metrics.confusion_matrix(labels_all,predict_all)
return acc,loss_total_avg,report,confusion
return acc,loss_total_avg
def test(config,model,test_iter):
print("ๆต่ฏ้้ขๆต".center(40,'_'))
model.load_state_dict(torch.load(config.save_path))
model.eval()
start_time=time.time()
test_acc,test_loss,test_report,test_confusion=evaluate(config,model,test_iter,test=True)
msg="ๆต่ฏ้ๆๅคฑ๏ผ{0:>5.2},ๆต่ฏ้ๅ็กฎ็๏ผ{1:>6.2}"
print(msg.format(test_loss,test_acc))
print("ๅ็กฎ็๏ผๅฌๅ็๏ผF1...")
print(test_report)
print("ๆททๆท็ฉ้ต...")
print(test_confusion)
print("่ฑ่ดนๆถ้ด๏ผ",get_time_dif(start_time))
if __name__ == '__main__':
# ่พๅ
ฅๅๆฐๅฎไน
parser = argparse.ArgumentParser(description="bertไธญๆๅ็ฑป")
parser.add_argument('--use_cuda', default='true', required=True, type=str, help='ๆฏๅฆไฝฟ็จGPU')
parser.add_argument('--train_path', default="", required=True, type=str, help='่ฎญ็ปๆไปถ')
parser.add_argument('--dev_path', default='', required=True, type=str, help='้ช่ฏๆไปถ')
parser.add_argument('--test_path', default='', required=True, type=str, help='ๆต่ฏๆไปถ')
parser.add_argument('--save_path', default='./model_save/', required=True, type=str, help='ๆจกๅไฟๅญ่ทฏๅพ')
parser.add_argument('--num_class', default=2, required=True, type=int, help='็ฑปๅซไธชๆฐ')
parser.add_argument('--num_epoch', default=3, required=True, type=int, help='epochๆฌกๆฐ')
parser.add_argument('--pad_size', default=128, required=True, type=int, help='ๆๅคงๅบๅ้ฟๅบฆ')
parser.add_argument('--lr', default=5e-5, required=True, type=float, help='ๅญฆไน ็')
parser.add_argument('--pretrain_model_path', default="", required=True, type=str, help='้ข่ฎญ็ปๆจกๅ่ทฏๅพ')
parser.add_argument('--batch_size', default=8, required=True, type=int, help='batchๅคงๅฐ')
args = parser.parse_args()
start_time = time.time()
config=Config.Config(args)
print("้ข่ฎญ็ปๆจกๅๅ ่ฝฝ".center(40, "_"))
model = Model(config)
print("ๅ ่ฝฝๆจกๅ่ฑ่ดนๆถ้ด".center(40, "_"))
print("{}็ง".format(get_time_dif(start_time)))
print("่ฎญ็ป้ๆฐๆฎๅ ่ฝฝ".center(40, "_"))
train_data_set = TrainData()
train_data_loader = DataLoader(dataset=train_data_set, batch_size=config.batch_size, shuffle=True, pin_memory=True)
print("้ช่ฏ้ๆฐๆฎๅ ่ฝฝ".center(40, "_"))
dev_data_set = DevData()
dev_data_loader = DataLoader(dataset=dev_data_set, batch_size=config.batch_size, shuffle=True, pin_memory=True)
print("ๆต่ฏ้ๆฐๆฎๅ ่ฝฝ".center(40, "_"))
test_data_set = TestData()
test_data_loader = DataLoader(dataset=test_data_set, batch_size=config.batch_size, shuffle=True, pin_memory=True)
train(config, model, train_data_loader, dev_data_loader, test_data_loader)
|
22,132 | be6f5d867039036e0cc2d54cce6dc1fe0cb18f34 | """
@file : model_simcse_GS_infoNCE.py
@author : xiaolu
@email : luxiaonlp@163.com
@time : 2021-10-25
"""
import torch
import random
from torch import nn
import torch.nn.functional as F
from transformers import BertConfig, BertModel
class Model(nn.Module):
def __init__(self, q_size=256, dup_rate=0.32, temperature=0.05, gamma=0.99):
super(Model, self).__init__()
self.config = BertConfig.from_pretrained('./roberta_pretrain/config.json')
self.bert = BertModel.from_pretrained('./roberta_pretrain/pytorch_model.bin', config=self.config)
self.gamma = gamma
self.q = []
self.q_size = q_size
self.dup_rate = dup_rate
self.temperature = temperature
self.loss_fct = nn.CrossEntropyLoss()
def cal_cos_sim(self, embedding1, embedding2):
embedding1_norm = F.normalize(embedding1, p=2, dim=1)
embedding2_norm = F.normalize(embedding2, p=2, dim=1)
return torch.mm(embedding1_norm, embedding2_norm.transpose(0, 1)) # (batch_size, batch_size)
def forward(self, input_ids1, attention_mask1):
'''
:param input_ids:
:param attention_mask:
:param encoder_type: encoder_type: "first-last-avg", "last-avg", "cls", "pooler(cls + dense)"
:return:
'''
s1_embedding = self.bert(input_ids1, attention_mask1, output_hidden_states=True).last_hidden_state[:, 0]
# print(s1_embedding.size())
input_ids2, attention_mask2 = torch.clone(input_ids1), torch.clone(attention_mask1)
s2_embedding = self.bert(input_ids2, attention_mask2, output_hidden_states=True).last_hidden_state[:, 0]
# print(s2_embedding.size())
cos_sim = self.cal_cos_sim(s1_embedding, s2_embedding) / self.temperature # (batch_size, batch_size)
mean, std = 0, 1
reg_size = 32 # ้ไพฟๅฎไน ็็ปๅฎๅขๅ ๅคๅฐ่ดๆ ทๆฌ
hidden_size = 768
reg_random = torch.normal(mean, std, size=(reg_size, hidden_size)).cuda()
# print(reg_random.size()) # torch.Size([32, 768])
# print(s1_embedding.size()) # torch.Size([16, 768])
reg_cos_sim = self.cal_cos_sim(s1_embedding, reg_random) / self.temperature
# print(reg_cos_sim.size()) # torch.Size([16, 32])
cos_sim = torch.cat((cos_sim, reg_cos_sim), dim=1)
batch_size = cos_sim.size(0)
labels = torch.arange(batch_size).cuda()
loss = self.loss_fct(cos_sim, labels)
return loss
def encode(self, input_ids, attention_mask):
s1_embedding = self.bert(input_ids, attention_mask, output_hidden_states=True).last_hidden_state[:, 0]
return s1_embedding
|
22,133 | 3bcaa9860130d1cce71e146b378b35e9b14c9e33 | from prac_08.unreliable_car import UnreliableCar
def main():
my_car = UnreliableCar('Trashbox', 100, 50)
my_car.drive(50)
print(my_car)
main()
|
22,134 | aff72c79e7f09162f570b71c73661fa28e549afd | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
from derg import DERG, ThirdPartyLibRepo
import utils
def run(dergs_dir, derg_name, lib_repo_path, recover=False, output_suffix=""):
print("Loading third party library repository %s ..." % lib_repo_path)
repo = ThirdPartyLibRepo()
repo.load(lib_repo_path)
print("Loading dergs %s %s ..." % (dergs_dir, derg_name))
dergs = utils.load_dergs(dergs_dir, derg_name)
print("Identifying ...")
for derg in dergs:
print("- identifying %s" % derg.derg_path)
repo.identify_3lib_packages(derg)
if recover:
print("Recovering")
for derg in dergs:
print("- recovering %s" % derg.derg_path)
repo.recover_derg(derg)
print("Outputting ...")
for derg in dergs:
derg.export(derg.derg_path + output_suffix)
print("Done")
def parse_args():
"""
parse command line input
"""
parser = argparse.ArgumentParser(
description="identify 3rd-party lib nodes in dergs based on a library repository.")
parser.add_argument("-dergs_dir", action="store", dest="dergs_dir",
required=True, help="path to the dir of the dergs")
parser.add_argument("-derg_name", action="store", dest="derg_name",
required=True, help="the file name of the dergs")
parser.add_argument("-lib_repo", action="store", dest="lib_repo",
required=True, help="path to the third party library repository")
parser.add_argument("-recover", action="store_true", dest="recover", default=False,
required=False, help="whether to recover the third party nodes using graph matching.")
parser.add_argument("-output_suffix", action="store", dest="output_suffix", default="",
required=False, help="the suffix added to the new derg file. "
"default will overwrite the original file.")
options = parser.parse_args()
print options
return options
def main():
"""
the main function
"""
opts = parse_args()
run(opts.dergs_dir, opts.derg_name, opts.lib_repo, opts.recover, opts.output_suffix)
return
if __name__ == "__main__":
main()
|
22,135 | c6910ff258729ba20d1a4c5ef8df68ffd3c7c1f9 | #!/usr/bin/python
import traceback
import time
import datetime
import csv
from random import *
import math
from csv import DictReader
import pandas as pd
import numpy as np
import threading
import uuid
import cx_Oracle
import random
import json
import pandas as pd
import bil_metesim as bm
import sys
import os
cwd = os.getcwd()
time_period = input("Enter the time Period:")
file_name = raw_input("""Enter the csv file name
:""")
ip_file = cwd + '/'+file_name
data = pd.read_csv(ip_file)
list = data["METER_NO"].tolist()
dic = dict(zip(data.METER_NO, data.CUM_ENG_KWH))
x= bm.billing()
con = cx_Oracle.connect('ORACLE','oracle123',cx_Oracle.makedsn('orcl.cociefkf8fny.ap-southeast-1.rds.amazonaws.com',1521,'ORACLE'))
cur = con.cursor()
while True:
for i in dic:
x.mtr_no = i
x.cum_kwh = dic[i]
result = x.bill()
fields = result.keys()
values = result.values()
bindvars = ":" + ",:".join(fields)
sql = "insert into MPM.MPM_BILLING_PARAMETERS (BILLING_PARA_ID,REAL_TIME_CLOCK_DATE_TIME,BILLING_DATE,%s) values (MPM.MPM_BILLING_PARAMETERS_S.NEXTVAL,current_timestamp,current_timestamp,%s)" % (",".join(fields), bindvars)
print sql
cur.execute(sql, values)
con.commit()
dic[i] = result["CUM_ENG_KWH"]
print dic
list = dic.items()
print list
index = range(len(list))
print index
cols = ['METER_NO', 'CUM_ENG_KWH']
df_r = pd.DataFrame(list, columns = cols)
df_r.to_csv(ip_file, index = False)
print df_r
time.sleep(time_period)
|
22,136 | b790bdd4c2ce9b353e02d929526e8fc8dde7f3de | from os.path import join
from pathlib import Path
SOURCE_PATH = Path(__file__).parent.parent
def get_default_file_path(file_name: str) -> str:
"""
This function takes as input a file name and returns
the absolute path to this file in the data directory.
:param file_name: a string containing the name of thee text file
:return: a string containing the absolute path to the text file
from the data directory
"""
return join(SOURCE_PATH, 'data', file_name)
def read_file(file_path: str) -> str:
"""
This function reads the text file and returns it as a string.
:param file_path: the path to location of text file
:return: content of the text file as a python string
"""
try:
with open(file=file_path, mode='r', encoding="utf8") as f:
return f.read()
except FileNotFoundError:
raise FileNotFoundError(f'No text file was found at location {file_path}')
def store_file(text: str, file_path: str) -> None:
"""
This function creates a file and writes a text into it.
:param text: a string containing the desired text to be stored
:param file_path: a string containing the absolute path to where one
wishes to store the text file
"""
with open(file=file_path, mode='w', encoding='utf8') as f:
f.write(text)
|
22,137 | 0b63d0d76c2875ee303fc10f9f9a1e1b17ed2c7e | # Generated by Django 2.1.7 on 2020-05-27 16:40
from django.conf import settings
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('sno', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=255)),
('phone', models.CharField(max_length=13)),
('email', models.CharField(max_length=100)),
('content', models.TextField()),
('timeStamp', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Profiledetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('firstname', models.CharField(default='', max_length=255)),
('middlename', models.CharField(default='', max_length=255)),
('lastname', models.CharField(default='', max_length=255)),
('dob', models.CharField(default='', max_length=255)),
('gender', models.CharField(default='', max_length=255)),
('phone', models.CharField(default='', max_length=255)),
('address', models.CharField(default='', max_length=255)),
('address2', models.CharField(default='', max_length=255)),
('country', models.CharField(default='', max_length=50)),
('statename', models.CharField(default='', max_length=50)),
('city', models.CharField(default='', max_length=70)),
('zipf', models.CharField(default='', max_length=6)),
('image', models.ImageField(default='', upload_to='home/userprofiles')),
('email', models.ForeignKey(default=django.contrib.auth.models.User, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, to_field='username', unique=True)),
],
),
]
|
22,138 | 6ff4d0b9ac7df602b41a105dca843215676f0ff4 | # -*- coding: utf-8 -*-
import unittest
import numpy as np
import pandas as pd
from os import path
from scipy import sparse
from ridge.models import FMClassifier
from sklearn.metrics import accuracy_score
BASEDIR = path.dirname(path.abspath(__file__))
class TestFMClassifier(unittest.TestCase):
"""Testing FMClassifier with the Titanic dataset.
"""
def setUp(self):
def impute_age(age_mean):
def _impute_age(x):
if x.Sex == 'male':
return round(age_mean['male'])
elif x.Sex == 'female':
return round(age_mean['female'])
return _impute_age
train_df = pd.read_csv(path.join(BASEDIR, 'data', 'titanic-train.csv'))
test_df = pd.read_csv(path.join(BASEDIR, 'data', 'titanic-test.csv'))
train_df = train_df.drop(['Name','SibSp','Parch','Ticket','Fare','Cabin','Embarked'],axis=1)
test_df = test_df.drop(['Name','SibSp','Parch','Ticket','Fare','Cabin','Embarked'],axis=1)
# [START Age Imputation]
train_age_mean = train_df.groupby('Sex').Age.mean()
test_age_mean = test_df.groupby('Sex').Age.mean()
train_df.Age.fillna(train_df[train_df.Age.isnull()].apply(impute_age(train_age_mean), axis=1), inplace=True)
test_df.Age.fillna(test_df[test_df.Age.isnull()].apply(impute_age(test_age_mean), axis=1), inplace=True)
# [END Age Imputation]
# [START One-hot vectorization]
train_df['Female'] = train_df['Sex'].map({'male': 0, 'female': 1}).astype(int)
test_df['Female'] = test_df['Sex'].map({'male': 0, 'female': 1}).astype(int)
pclass_train_df = pd.get_dummies(train_df['Pclass'], prefix=('Class'))
pclass_test_df = pd.get_dummies(test_df['Pclass'], prefix=('Class'))
pclass_train_df = pclass_train_df.drop(['Class_3'], axis=1)
pclass_test_df = pclass_test_df.drop(['Class_3'], axis=1)
train_df = train_df.join(pclass_train_df)
test_df = test_df.join(pclass_test_df)
# [END One-hot vectorization]
self.train = train_df
self.test = test_df
def test_fitting_fmclassifier(self):
X_train = self.train.drop(['PassengerId', 'Survived', 'Pclass', 'Sex'], axis=1).values
y_train = self.train.Survived.values
X_test = self.test.drop(['PassengerId', 'Pclass', 'Sex'], axis=1).values
pids = self.test.PassengerId.values
# X_train = sparse.lil_matrix(X_train)
# X_train = sparse.csr_matrix(X_train)
model = FMClassifier().fit(X_train, y_train, k=3, l2=1e-1, eta=1e-2, n_iter=200)
y_train_pred = model.predict(X_train)
# Print Loss Series
# print(np.mean(model.loss_series))
# print(model.loss_series)
print(accuracy_score(y_train, y_train_pred))
y_pred = model.predict(X_test, target='0-1')
# Make Kaggle Submission Data
pd.DataFrame({'PassengerId': pids, 'Survived': y_pred}).to_csv(path.join(BASEDIR, 'tmp', 'titanic-result.csv'), index=None)
if __name__ == '__main__':
unittest.main() |
22,139 | d7682e8fb1fdd32044fbe4dbd0ac0fe1d74de6ab | from .action_manager import ActionManager
from geometry_msgs.msg import Twist
import numpy as np
import time
from collections import deque
class ContinuousActionManager(ActionManager):
def __init__(self, unreal_settings):
super().__init__()
self.__METERS_TO_UNREAL_UNIT = unreal_settings['METERS_TO_UNREAL_UNIT']
self.__MAX_FPS = unreal_settings['MAX_FPS']
# Autodetect FPS
self.__last_time = time.time()
self.__fps_queue = deque(maxlen=100)
self.__fps_queue.append(self.__MAX_FPS)
def change_settings(self):
pass
def get_actions(self, data):
current_time = time.time()
current_fps = 1 / (current_time - self.__last_time)
if current_fps <= self.__MAX_FPS:
self.__fps_queue.append(current_fps)
fps_mean = sum(self.__fps_queue) / len(self.__fps_queue)
# print("FPS: ", fps_mean)
self.__last_time = current_time
vel_x = data.linear.x
angular_vel = -data.angular.z
# Convert to unreal
forward = (vel_x / fps_mean) * self.__METERS_TO_UNREAL_UNIT
turn = np.rad2deg(angular_vel / fps_mean)
actions = [turn, forward]
return actions
|
22,140 | 59a15068cb473e25145949c8141207506c68c558 | from category import WordCategoryChecker
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tag.stanford import NERTagger
from HTMLParser import HTMLParser
from multiprocessing import Pool
import ConfigParser, csv, urllib2, nltk, os
import multiprocessing, logging, sys
csv.field_size_limit(sys.maxsize)
#Multiprocessing debugger
logger = multiprocessing.log_to_stderr()
logger.setLevel(multiprocessing.SUBDEBUG)
logger.warning('doomed')
parser = HTMLParser()
lemmatizer = WordNetLemmatizer()
# Stop list container
stoplist = stopwords.words("english")
stoplist.append("so")
stoplist.append("tell")
# Conceptnet-based word category checker
word_checker = WordCategoryChecker()
# Category containers
conceptnet_categories = []
NER_categories = []
keyword_categories = {}
columnval_categories = {}
history_file = "history.cfg"
# Named entitory recognizer files
nermuc = NERTagger('stanford-ner/classifiers/english.muc.7class.distsim.crf.ser.gz','stanford-ner/stanford-ner.jar')
nereng = NERTagger('stanford-ner/classifiers/english.all.3class.distsim.crf.ser.gz','stanford-ner/stanford-ner.jar')
# Container of word splits in batches
batch_list = None
''' Adds category features to an existing CSV feature-file using ConceptNet, Stanford NER, keywords or column values '''
class DataManager:
def __init__(self, source_file, output_file, focus_column, read_mode="rU", write_mode="wb", batch = 100, pool_size = 4):
self.headers = []
self.source_file = source_file
self.output_file = output_file
self.focus_column = focus_column
self.read_mode = read_mode
self.write_mode = write_mode
self.batch = batch
self.pool_size = pool_size
self.headers = []
self.split_pool = None
self.temp_row = None
global batch_list
batch_list = [None] * self.batch
def process(self):
self.load_files()
self.read_data()
''' Add categories to header for CSV file and store specific category types '''
def add_header(self, header, header_type = "conceptnet"):
global conceptnet_categories
global NER_categories
global keyword_categories
if header_type != "keyword" and header_type!= "columnval":
if not isinstance(header,list):
header = [header]
self.headers += header
if header_type == "conceptnet":
conceptnet_categories+= header
elif header_type == "NER":
NER_categories += header
elif header_type == "keyword":
keyword_headers = []
for topic in header:
keyword_headers.append(topic[0])
keyword_categories[topic[0]] = topic[1]
self.headers += keyword_headers
elif header_type == "columnval":
columnval_headers = []
for columnval in header:
columnval_headers.append(columnval[0])
columnval_categories[columnval[0]] = (columnval[1],columnval[2])
self.headers += columnval_headers
''' Load source and target files '''
def load_files(self):
self.file_reader = csv.reader(open(self.source_file,self.read_mode), delimiter=",")
self.file_writer = csv.writer(open(self.output_file,self.write_mode), dialect='excel')
''' Read and process source file then store results to target file '''
def read_data(self):
self.split_pool = Pool(processes=self.pool_size)
self.temp_row = []
row_ct = 0
if self.write_mode == 'a' and os.path.isfile(history_file):
config = ConfigParser.RawConfigParser()
config.read(history_file)
hist_ct = config.getint('History','last_row')
row_ct = 0
for i in range(0, hist_ct + 1):
self.file_reader.next()
row_ct += 1
else:
complete_header = self.file_reader.next() + self.headers
self.file_writer.writerow(complete_header)
for row in self.file_reader:
# Clean, split and store words asynchrously and send results to callback function
self.split_pool.apply_async(clean_split_store, (row, self.focus_column, row_ct % self.batch), callback = add_result_to_list)
# Process entire batch
if row_ct != 0 and row_ct % self.batch == 0:
self.process_batch()
self.store_history(row_ct)
# Append row from source file to temporary containter
self.temp_row.append(row)
print "Row count: "+str(row_ct)
row_ct += 1
if (row_ct - 1) % self.batch !=0:
# Process rows exceeding batch value
self.process_batch()
self.store_history(row_ct)
print "Total read: ", row_ct
def process_batch(self):
global batch_list
# Wait for splitting to finish and reinitialize new Pool
self.split_pool.close()
self.split_pool.join()
self.split_pool = Pool(processes=self.pool_size)
# Filter array for None values
batch_list = [x for x in batch_list if x is not None]
# Get category of each word based on keywords
process_pool = Pool(processes=self.pool_size)
keyword_result = process_pool.map_async(get_keyword_categories, batch_list)
# Get category of each word using conceptnet
#conceptnet_pool = Pool(processes=self.pool_size)
conceptnet_result = process_pool.map_async(get_conceptnet_categories, batch_list)
# Get NER categories
#NER_pool = Pool(processes=self.pool_size)
NER_result = process_pool.map_async(get_NER_categories, batch_list)
# Wait for processes in the batch to finish
print "Keyword"
sys.stdout.flush()
keyword_result = keyword_result.get()
#while(not conceptnet_result.ready()):
# print conceptnet_result._number_left
print "NER"
sys.stdout.flush()
NER_result = NER_result.get()
print "Concept net"
sys.stdout.flush()
conceptnet_result = conceptnet_result.get()
#conceptnet_result = process_pool.map(get_conceptnet_categories, batch_list)
# Merge results from each type of category
for i in range(0,len(keyword_result)):
keyword_result[i].update(conceptnet_result[i])
keyword_result[i].update(NER_result[i])
# Build category values based on values of other columns
keyword_result[i].update(get_columnval_categories(keyword_result[i]))
# Build and write column values for CSV file
for i in range(0,len(self.temp_row)):
val_row = []
for column in self.headers:
val_row.append(keyword_result[i][column])
cur_row = self.temp_row[i] + val_row
self.file_writer.writerow(cur_row)
# Reset temporary containers
self.temp_row = []
batch_list = [None] * self.batch
def store_history(self, last_row):
config = ConfigParser.RawConfigParser()
config.add_section('History')
config.set('History','last_row', last_row)
with open(history_file, 'w') as f:
config.write(f)
''' Clean split and return sentence '''
def clean_split_store(data, focus_column, index):
result = nltk.clean_html(parser.unescape(urllib2.unquote(data[focus_column]).decode('utf-8', 'ignore'))).split()
# Apply user defined strategies on cleaned and split data
return (index, result)
''' Add results to list '''
def add_result_to_list(result):
# [0] - row index
# [1] - data
#print "batch adding to ", str(result[0])
batch_list[result[0]] = result[1]
def get_conceptnet_categories(word_list):
result = {}
for category in conceptnet_categories:
result[category] = False
for word in word_list:
word = ''.join(e for e in word if e.isalnum() or e.isspace()).encode('ascii','ignore')
word = lemmatizer.lemmatize(word.lower())
if not word in stoplist and len(word)>1:
for category in conceptnet_categories:
if word_checker.check(word, category):
result[category] = True
return result
def get_keyword_categories(word_list):
result = {}
for category in keyword_categories:
result[category] = False
for word in word_list:
word = ''.join(e for e in word if e.isalnum() or e.isspace()).encode('ascii','ignore')
if not word in stoplist and len(word)>1:
for category in keyword_categories:
for keyword in keyword_categories[category]:
if word == keyword:
result[category] = True
return result
def get_NER_categories(word_list):
result = {}
for categ in NER_categories:
result[categ.lower()] = False
word_list = [''.join(e for e in x if e.isalnum() or e.isspace() or e in ['.', ',', '?', '!', "'",':',';','$']).encode('ascii','ignore') for x in word_list]
ner_result = nermuc.tag(word_list)
for categ in NER_categories:
if categ in [it[1].lower() for it in ner_result]:
result[categ.lower()] = True
ner_result = nereng.tag(word_list)
for categ in NER_categories:
if categ in [it[1].lower() for it in ner_result]:
result[categ.lower()] = True
return result
def get_columnval_categories(column_results):
results = {}
for columnval in columnval_categories:
func, column_list = columnval_categories[columnval]
value_list = []
for column in column_list:
value_list.append(column_results[column])
results[columnval] = func(value_list)
return results
|
22,141 | 29f13891dddc8e89cd23d4a40911882625b8cd23 | from functools import lru_cache
import logging
from mogiminsk.models import Provider, ProviderContact
from mogiminsk.utils import threaded_session
logger = logging.getLogger(__name__)
@lru_cache(maxsize=10)
def get_provider(identifier):
return threaded_session().query(Provider).filter(Provider.identifier == identifier).first()
def find_contact(provider, kind, value):
return threaded_session.query(ProviderContact, Provider).filter(
Provider.identifier == provider,
ProviderContact.kind == kind,
ProviderContact.contact == value,
ProviderContact.provider_id == Provider.id
).first()
def build_contact(provider, kind, value) -> ProviderContact:
provider_instance = get_provider(provider)
if provider_instance is None:
logger.warning('Provider %s is not found.', provider)
return
return ProviderContact(
provider=provider_instance,
kind=kind,
contact=value
)
def run():
contacts = [
{'provider': 'novaja_linija', 'kind': 'velcom', 'value': '+375293597597'},
{'provider': 'novaja_linija', 'kind': 'mts', 'value': '+375333597597'},
{'provider': 'novaja_linija', 'kind': 'web', 'value': 'https://7311.by'},
{'provider': 'atlas', 'kind': 'velcom', 'value': '+375293597597'},
{'provider': 'atlas', 'kind': 'mts', 'value': '+375333597597'},
{'provider': 'atlas', 'kind': 'web', 'value': 'https://atlasbus.by'},
{'provider': 'dve_stolicy', 'kind': 'velcom', 'value': '+375296024444'},
{'provider': 'dve_stolicy', 'kind': 'mts', 'value': '+375336024444'},
{'provider': 'dve_stolicy', 'kind': 'life', 'value': '+375256024444'},
{'provider': 'dve_stolicy', 'kind': 'web', 'value': 'https://2stolict.com'},
{'provider': 'minsk_express', 'kind': 'velcom', 'value': '+375447885533'},
{'provider': 'minsk_express', 'kind': 'mts', 'value': '+375297885533'},
{'provider': 'minsk_express', 'kind': 'life', 'value': '+375257885533'},
{'provider': 'minsk_express', 'kind': 'velcom', 'value': '+375447886633'},
{'provider': 'minsk_express', 'kind': 'mts', 'value': '+375297886633'},
{'provider': 'minsk_express', 'kind': 'life', 'value': '+375257886633'},
{'provider': 'minsk_express', 'kind': 'web', 'value': 'http://mogilevminsk.by'},
{'provider': 'avtoslava', 'kind': 'velcom', 'value': '+375445555161'},
{'provider': 'avtoslava', 'kind': 'mts', 'value': '+375295555161'},
{'provider': 'avtoslava', 'kind': 'life', 'value': '+375256842235'},
{'provider': 'avtoslava', 'kind': 'web', 'value': 'http://avto-slava.by'},
]
db = threaded_session()
try:
for contact in contacts:
if find_contact(**contact):
continue
model_to_save = build_contact(**contact)
if model_to_save is None:
continue
logger.info('New contact will be added: %s', contact)
db.add(model_to_save)
except:
logger.exception('Failed to load.')
db.rollback()
else:
db.commit()
logger.info('Updated successfully.')
if __name__ == '__main__':
run()
|
22,142 | 62ac804d7d0695924a01d604ff66e1561adbf36a | from flask import Flask, request, jsonify
import os
import math
import json
import paho.mqtt.subscribe as subscribe
app = Flask(__name__)
app.config["TEMPLATES_AUTO_RELOAD"] = True
CLIENT_ID = '0b568393-ce99-4d73-a0fc-4d6c4f8c1095'
NETPIE_TOKEN = '9F8Qm8LTB5u84hsrQLNRZD7YjzFoDEPw'
def get_intersections(x0, y0, r0, x1, y1, r1):
# circle 1: (x0, y0), radius r0
# circle 2: (x1, y1), radius r1
d=math.sqrt((x1-x0)**2 + (y1-y0)**2)
# non intersecting
if d > r0 + r1 :
return None
# One circle within other
if d < abs(r0-r1):
return None
# coincident circles
if d == 0 and r0 == r1:
return None
else:
a=(r0**2-r1**2+d**2)/(2*d)
h=math.sqrt(r0**2-a**2)
x2=x0+a*(x1-x0)/d
y2=y0+a*(y1-y0)/d
x3=x2+h*(y1-y0)/d
y3=y2-h*(x1-x0)/d
x4=x2-h*(y1-y0)/d
y4=y2+h*(x1-x0)/d
return (x3, y3, x4, y4)
def triangulate(a,b,c):
with open('points.json') as json_file:
try:
data = json.load(json_file)
data["1"]["d"] = math.pow(10,(-56.4-a)/31.8905)
data["2"]["d"] = math.pow(10,(-56.4-b)/31.8905)
data["3"]["d"] = math.pow(10,(-56.4-c)/31.8905)
temp = get_intersections(data["1"]["x"],data["1"]["y"],data["1"]["d"],data["2"]["x"],data["2"]["y"],data["2"]["d"])
avg12 = [0.5*(temp[0]+temp[2]), 0.5*(temp[1]+temp[3])]
temp = get_intersections(data["1"]["x"],data["1"]["y"],data["1"]["d"],data["3"]["x"],data["3"]["y"],data["3"]["d"])
avg13 = [0.5*(temp[0]+temp[2]), 0.5*(temp[1]+temp[3])]
temp = get_intersections(data["2"]["x"],data["2"]["y"],data["2"]["d"],data["3"]["x"],data["3"]["y"],data["3"]["d"])
avg23 = [0.5*(temp[0]+temp[2]), 0.5*(temp[1]+temp[3])]
avg = [(avg12[0]+avg13[0]+avg23[0])/3, (avg12[1]+avg13[1]+avg23[1])/3]
#print(avg)
return avg
except TypeError:
print("Invalid Coordinate (No Possible Coordinate)")
return -1
ROOM_SIZE_X = 15
ROOM_SIZE_Y = 10
@app.route('/summary', methods=["GET"])
def summary():
node_1_rssi = None
node_2_rssi = None
node_3_rssi = None
while node_1_rssi == None or node_2_rssi == None or node_3_rssi == None:
msg = subscribe.simple('@msg/taist2020/button/#', hostname='mqtt.netpie.io', port=1883, client_id=CLIENT_ID, auth={'username':NETPIE_TOKEN, 'password':None}, keepalive=10)
print("%s %s" % (msg.topic, msg.payload))
data = eval(str(msg.payload).split("'")[1][0:-4])
device_num = int(msg.topic.split("/")[-1])
if(device_num == 1):
print("get rssi: {} form node {}".format(data["rssi"], device_num))
node_1_rssi = data["rssi"]
elif(device_num == 2):
print("get rssi: {} form node {}".format(data["rssi"], device_num))
node_2_rssi = data["rssi"]
elif(device_num == 3):
print("get rssi: {} form node {}".format(data["rssi"], device_num))
node_3_rssi = data["rssi"]
x = triangulate(node_1_rssi,node_2_rssi,node_3_rssi)
if x == -1 :
answer = 'location_not_found'
return jsonify({'status':answer})
else :
if x[0]<0 or x[0] > ROOM_SIZE_X or x[1]<0 or x[1]> ROOM_SIZE_Y :
answer = 'out'
else :
answer = 'in'
resp = {'status':answer, 'x':x[0], 'y':x[1]}
return jsonify(resp)
@app.route('/input', methods=["POST"])
def input():
payload = request.get_json(force=True)
x = payload['x']
y = payload['y']
if x<0 or x > ROOM_SIZE_X or y<0 or y> ROOM_SIZE_Y :
answer = 'out'
else :
answer = 'in'
resp = {'status': answer, 'x': x[0], 'y': x[1]}
return jsonify(resp)
if __name__ == '__main__':
app.run(debug=True, port=8000)
|
22,143 | 5719fe6965ff61f45c517e5d0c7dd45a6fd8c20a | # Generated by Django 3.1.6 on 2021-02-04 05:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('apps', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='instrument',
fields=[
('id', models.CharField(max_length=50, primary_key=True, serialize=False)),
('name', models.CharField(max_length=50)),
('price', models.IntegerField()),
('ins_type', models.CharField(max_length=50)),
('status', models.CharField(max_length=50)),
],
),
]
|
22,144 | 18be1fd822531bbe1fbe2b4bdbe7286c1c5d5fcb | import FWCore.ParameterSet.Config as cms
# - pedestal run based from laser sequence in collisions
from CondCore.ESSources.CondDBESSource_cfi import *
#CondDBConnection.connect = cms.string( 'frontier://FrontierProd/CMS_CONDITIONS' )
RerecoGlobalTag = GlobalTag.clone(
globaltag = cms.string('92X_dataRun2_Prompt_v8'),
toGet = cms.VPSet(
cms.PSet(record = cms.string("EcalPedestalsRcd"),
tag = cms.string("EcalPedestals_Legacy2017_time_v1"),
connect = cms.string("frontier://FrontierProd/CMS_CONDITIONS"),
),
),
)
|
22,145 | 9a7e56a6e700e0f3bf20732a35d05bb234fb0e7e | # -*- coding: utf-8 -*-
import time
from openerp.report import report_sxw
from openerp import pooler
class Parser(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(Parser, self).__init__(cr, uid, name, context=context)
pool = pooler.get_pool(self.cr.dbname)
self.localcontext.update({
'get_birthday': self.get_birthday,
'get_selection_of_field': self.get_selection_of_field,
'get_address': self.get_address,
'context': context,
})
def get_birthday(self, obj):
year = obj.year_of_birth or False
month = obj.month_of_birth or False
day = obj.day_of_birth and obj.day_of_birth.name or False
str_date = ''
if day and month and year:
str_date = '%s/%s/%s' % (day and str(day).zfill(2) or ' ',month and str(month).zfill(2) or ' ',year and str(year).zfill(4) or ' ')
return str_date
def get_address(self, obj):
tmp_address = [obj.street, obj.street2, obj.city, obj.state_id and obj.state_id.name or False, obj.zip, obj.country_id and obj.country_id.name or False]
temp_address = [u'%s'%x for x in tmp_address if x]
return u', '.join(temp_address)
def get_selection_of_field(self, type_key, field):
type_value = ''
if type_key:
dict_temp = {}
try:
dict_temp = dict(self.pool.get('res.partner').fields_get(self.cr, self.uid, field, context=self.localcontext['context'])[field]['selection'])
except Exception:
pass
for i,x in enumerate(dict_temp.keys()):
if x == type_key:
type_value = dict_temp.values()[i]
break
return type_value
|
22,146 | b1a46d97d60369a84f0b36c9503188c7b4c8282b | # ะะฐะฟะธัะธัะต ััะฝะบัะธั get_summ(num_one, num_two), ะบะพัะพัะฐั ะฟัะธะฝะธะผะฐะตั ะฝะฐ ะฒั
ะพะด ะดะฒะฐ ัะตะปัั
ัะธัะปะฐ (int) ะธ ัะบะปะฐะดัะฒะฐะตั ะธั
# ะะฑะฐ ะฐัะณัะผะตะฝัะฐ ะฝัะถะฝะพ ะฟัะธะฒะพะดะธัั ะบ ัะตะปะพะผั ัะธัะปั ะฟัะธ ะฟะพะผะพัะธ int() ะธ ะฟะตัะตั
ะฒะฐััะฒะฐัั ะธัะบะปััะตะฝะธะต ValueError ะตัะปะธ ะฟัะธะฒะตะดะตะฝะธะต
# ัะธะฟะพะฒ ะฝะต ััะฐะฑะพัะฐะปะพ
def get_sum(num_one, num_two):
sum = int(num_one) + int(num_two)
print(sum)
try:
get_sum(1234.9932, '3332')
get_sum('sdlksdlksdl', '3332')
get_sum(123+3.44, 12/3.99)
except ValueError:
print('ะะต ะฒัะต ะฟะตัะตะผะตะฝะฝัะต - ัะธััั!1')
|
22,147 | 302cec8241f4bc1c53bae1df4b9a1be8af0062a4 | import sys
import re
import os
f = open(sys.argv[1], 'rw')
f2 = open("constraints.txt", 'w')
f3 = open("temp.txt", 'w')
found = "false"
os.system("cp "+sys.argv[1]+" problem-original.pddl")
for line in f:
temp = line.split(':')
for i in temp:
temp2 = i.split(' ')
for j in temp2:
if (j == "constraints"):
if (line[0]!=";"):
found = "true"
if (found=="true"):
f2.write(line)
f3.write(";%s" % (line))
else:
f3.write(line)
if (found=="true"):
f3.write("\n)")
f.close
f2.close
f3.close
os.system("mv temp.txt "+sys.argv[1])
|
22,148 | eb04210f9260279493f7e2febb8b4f2ee01708f4 | import time, urllib.parse
import requests, tldextract
from . import common
from ..lib import log_response
class RackspaceDns(common.BaseDns):
def __init__(self, RACKSPACE_USERNAME, RACKSPACE_API_KEY, **kwargs):
self.RACKSPACE_DNS_ZONE_ID = None
self.RACKSPACE_USERNAME = RACKSPACE_USERNAME
self.RACKSPACE_API_KEY = RACKSPACE_API_KEY
self.HTTP_TIMEOUT = 65 # seconds
super().__init__(**kwargs)
self.RACKSPACE_API_TOKEN, self.RACKSPACE_API_BASE_URL = self.get_rackspace_credentials()
self.RACKSPACE_HEADERS = {
"X-Auth-Token": self.RACKSPACE_API_TOKEN,
"Content-Type": "application/json",
}
def get_rackspace_credentials(self):
self.logger.debug("get_rackspace_credentials")
RACKSPACE_IDENTITY_URL = "https://identity.api.rackspacecloud.com/v2.0/tokens"
payload = {
"auth": {
"RAX-KSKEY:apiKeyCredentials": {
"username": self.RACKSPACE_USERNAME,
"apiKey": self.RACKSPACE_API_KEY,
}
}
}
find_rackspace_api_details_response = requests.post(RACKSPACE_IDENTITY_URL, json=payload)
self.logger.debug(
"find_rackspace_api_details_response. status_code={0}".format(
find_rackspace_api_details_response.status_code
)
)
if find_rackspace_api_details_response.status_code != 200:
raise ValueError(
"Error getting token and URL details from rackspace identity server: status_code={status_code} response={response}".format(
status_code=find_rackspace_api_details_response.status_code,
response=log_response(find_rackspace_api_details_response),
)
)
data = find_rackspace_api_details_response.json()
api_token = data["access"]["token"]["id"]
url_data = next(
(item for item in data["access"]["serviceCatalog"] if item["type"] == "rax:dns"), None
)
if url_data is None:
raise ValueError(
"Error finding url data for the rackspace dns api in the response from the identity server"
)
else:
api_base_url = url_data["endpoints"][0]["publicURL"] + "/"
return (api_token, api_base_url)
def get_dns_zone(self, domain_name):
self.logger.debug("get_dns_zone")
extracted_domain = tldextract.extract(domain_name)
self.RACKSPACE_DNS_ZONE = ".".join([extracted_domain.domain, extracted_domain.suffix])
def find_dns_zone_id(self, domain_name):
self.logger.debug("find_dns_zone_id")
self.get_dns_zone(domain_name)
url = self.RACKSPACE_API_BASE_URL + "domains"
find_dns_zone_id_response = requests.get(url, headers=self.RACKSPACE_HEADERS)
self.logger.debug(
"find_dns_zone_id_response. status_code={0}".format(
find_dns_zone_id_response.status_code
)
)
if find_dns_zone_id_response.status_code != 200:
raise ValueError(
"Error getting rackspace dns domain info: status_code={status_code} response={response}".format(
status_code=find_dns_zone_id_response.status_code,
response=log_response(find_dns_zone_id_response),
)
)
result = find_dns_zone_id_response.json()
domain_data = next(
(item for item in result["domains"] if item["name"] == self.RACKSPACE_DNS_ZONE), None
)
if domain_data is None:
raise ValueError(
"Error finding information for {dns_zone} in dns response data:\n{response_data})".format(
dns_zone=self.RACKSPACE_DNS_ZONE,
response_data=log_response(find_dns_zone_id_response),
)
)
dns_zone_id = domain_data["id"]
self.logger.debug("find_dns_zone_id_success")
return dns_zone_id
def find_dns_record_id(self, domain_name, domain_dns_value):
self.logger.debug("find_dns_record_id")
self.RACKSPACE_DNS_ZONE_ID = self.find_dns_zone_id(domain_name)
url = self.RACKSPACE_API_BASE_URL + "domains/{0}/records".format(self.RACKSPACE_DNS_ZONE_ID)
find_dns_record_id_response = requests.get(url, headers=self.RACKSPACE_HEADERS)
self.logger.debug(
"find_dns_record_id_response. status_code={0}".format(
find_dns_record_id_response.status_code
)
)
self.logger.debug(url)
if find_dns_record_id_response.status_code != 200:
raise ValueError(
"Error finding dns records for {dns_zone}: status_code={status_code} response={response}".format(
dns_zone=self.RACKSPACE_DNS_ZONE,
status_code=find_dns_record_id_response.status_code,
response=log_response(find_dns_record_id_response),
)
)
records = find_dns_record_id_response.json()["records"]
RACKSPACE_RECORD_DATA = next(
(item for item in records if item["data"] == domain_dns_value), None
)
if RACKSPACE_RECORD_DATA is None:
raise ValueError(
"Couldn't find record with name {domain_name}\ncontaining data: {domain_dns_value}\nin the response data:{response_data}".format(
domain_name=domain_name,
domain_dns_value=domain_dns_value,
response_data=log_response(find_dns_record_id_response),
)
)
record_id = RACKSPACE_RECORD_DATA["id"]
self.logger.debug("find_dns_record_id success")
return record_id
def poll_callback_url(self, callback_url):
start_time = time.time()
while True:
callback_url_response = requests.get(callback_url, headers=self.RACKSPACE_HEADERS)
if time.time() > start_time + self.HTTP_TIMEOUT:
raise ValueError(
"Timed out polling callbackurl for dns record status. Last status_code={status_code} last response={response}".format(
status_code=callback_url_response.status_code,
response=log_response(callback_url_response),
)
)
if callback_url_response.status_code != 200:
raise Exception(
"Could not get dns record status from callback url. Status code ={status_code}. response={response}".format(
status_code=callback_url_response.status_code,
response=log_response(callback_url_response),
)
)
if callback_url_response.json()["status"] == "ERROR":
raise Exception(
"Error in creating/deleting dns record: status_Code={status_code}. response={response}".format(
status_code=callback_url_response.status_code,
response=log_response(callback_url_response),
)
)
if callback_url_response.json()["status"] == "COMPLETED":
break
def create_dns_record(self, domain_name, domain_dns_value):
self.logger.info("create_dns_record")
self.RACKSPACE_DNS_ZONE_ID = self.find_dns_zone_id(domain_name)
record_name = "_acme-challenge." + domain_name
url = urllib.parse.urljoin(
self.RACKSPACE_API_BASE_URL, "domains/{0}/records".format(self.RACKSPACE_DNS_ZONE_ID)
)
body = {
"records": [{"name": record_name, "type": "TXT", "data": domain_dns_value, "ttl": 3600}]
}
create_rackspace_dns_record_response = requests.post(
url, headers=self.RACKSPACE_HEADERS, json=body, timeout=self.HTTP_TIMEOUT
)
self.logger.debug(
"create_rackspace_dns_record_response. status_code={status_code}".format(
status_code=create_rackspace_dns_record_response.status_code
)
)
if create_rackspace_dns_record_response.status_code != 202:
raise ValueError(
"Error creating rackspace dns record: status_code={status_code} response={response}".format(
status_code=create_rackspace_dns_record_response.status_code,
response=create_rackspace_dns_record_response.text,
)
)
# response=log_response(create_rackspace_dns_record_response)))
# After posting the dns record we want created, the response gives us a url to check that will
# update when the job is done
callback_url = create_rackspace_dns_record_response.json()["callbackUrl"]
self.poll_callback_url(callback_url)
self.logger.info(
"create_dns_record_success. Name: {record_name} Data: {data}".format(
record_name=record_name, data=domain_dns_value
)
)
def delete_dns_record(self, domain_name, domain_dns_value):
self.logger.info("delete_dns_record")
record_name = "_acme-challenge." + domain_name
self.RACKSPACE_DNS_ZONE_ID = self.find_dns_zone_id(domain_name)
self.RACKSPACE_RECORD_ID = self.find_dns_record_id(domain_name, domain_dns_value)
url = self.RACKSPACE_API_BASE_URL + "domains/{domain_id}/records/?id={record_id}".format(
domain_id=self.RACKSPACE_DNS_ZONE_ID, record_id=self.RACKSPACE_RECORD_ID
)
delete_dns_record_response = requests.delete(url, headers=self.RACKSPACE_HEADERS)
# After sending a delete request, if all goes well, we get a 202 from the server and a URL that we can poll
# to see when the job is done
self.logger.debug(
"delete_dns_record_response={0}".format(delete_dns_record_response.status_code)
)
if delete_dns_record_response.status_code != 202:
raise ValueError(
"Error deleting rackspace dns record: status_code={status_code} response={response}".format(
status_code=delete_dns_record_response.status_code,
response=log_response(delete_dns_record_response),
)
)
callback_url = delete_dns_record_response.json()["callbackUrl"]
self.poll_callback_url(callback_url)
self.logger.info(
"delete_dns_record_success. Name: {record_name} Data: {data}".format(
record_name=record_name, data=domain_dns_value
)
)
|
22,149 | 4bd2abd9bd9651d22e2ab24a34a789f12ba8da68 | """Test DBInterface."""
import os
import re
import sys
import time
import errno
import shutil
import logging
import pymongo
import unittest
import pdb
import tensorflow as tf
import mnist_data as data
sys.path.insert(0, "..")
import tfutils.base as base
import tfutils.model as model
import tfutils.optimizer as optimizer
from tfutils.db_interface import TFUTILS_HOME
from tfutils.db_interface import DBInterface
# def logPoint(context):
# """Utility function used for module functions and class methods."""
# callingFunction = inspect.stack()[1][3]
# print 'in %s - %s()' % (context, callingFunction)
def setUpModule():
"""Set up module once, before any TestCases are run."""
logging.basicConfig()
# logPoint('module %s' % __name__)
def tearDownModule():
"""Tear down module after all TestCases are run."""
pass
# logPoint('module %s' % __name__)
class TestDBInterface(unittest.TestCase):
PORT = 29101
HOST = 'localhost'
EXP_ID = 'TEST_EXP_ID'
DATABASE_NAME = 'TFUTILS_TESTDB'
COLLECTION_NAME = 'TFUTILS_TESTCOL'
CACHE_DIR = 'TFUTILS_TEST_CACHE_DIR'
@classmethod
def setUpClass(cls):
"""Set up class once before any test methods are run."""
cls.setup_log()
cls.setup_conn()
cls.setup_cache()
cls.setup_params()
@classmethod
def tearDownClass(cls):
"""Tear down class after all test methods have run."""
cls.remove_directory(cls.CACHE_DIR)
cls.remove_database(cls.DATABASE_NAME)
# Close primary MongoDB connection.
cls.conn.close()
def setUp(self):
"""Set up class before _each_ test method is executed.
Creates a tensorflow session and instantiates a dbinterface.
"""
self.setup_model()
self.sess = tf.Session(
config=tf.ConfigProto(
allow_soft_placement=True,
gpu_options=tf.GPUOptions(allow_growth=True),
log_device_placement=self.params['log_device_placement'],
))
# TODO: Determine whether this should be called here or
# in dbinterface.initialize()
self.sess.run(tf.global_variables_initializer())
self.dbinterface = DBInterface(
sess=self.sess,
params=self.params,
cache_dir=self.CACHE_DIR,
save_params=self.save_params,
load_params=self.load_params)
self.step = 0
def tearDown(self):
"""Tear Down is called after _each_ test method is executed."""
self.sess.close()
@unittest.skip("skipping")
def test_init(self):
# TODO: Test all permutations of __init__ params.
pass
@unittest.skip("skipping")
def test_load_rec(self):
pass
@unittest.skip("skipping")
def test_initialize(self):
pass
def test_get_restore_vars(self):
# First, train model and save a checkpoint
self.train_model() # weights_name='Weights'
saved_path = self.save_test_checkpoint()
# Create a new model with different variable names.
self.setup_model(weights_name='Filters')
# Reset var_list in DBInterface
self.dbinterface.var_list = {
var.op.name: var for var in tf.global_variables()}
# Restore first checkpoint vars.
mapping = {'Weights': 'Filters'}
self.dbinterface.load_param_dict = mapping
restore_vars = self.dbinterface.get_restore_vars(saved_path)
self.log.info('restore_vars:')
for name, var in restore_vars.items():
if name in mapping.keys():
self.log.info('(name, var.name): ({}, {})'.format(name, var.name))
self.assertEqual(var.op.name, mapping[name])
def test_filter_var_list(self):
var_list = {var.op.name: var for var in tf.global_variables()}
# Test None
self.dbinterface.to_restore = None
filtered_var_list = self.dbinterface.filter_var_list(var_list)
self.assertEqual(filtered_var_list, var_list)
# Test list of strings
self.dbinterface.to_restore = ['Weights']
filtered_var_list = self.dbinterface.filter_var_list(var_list)
for name, var in filtered_var_list.items():
self.assertIn(name, ['Weights'])
self.assertNotIn(name, ['Bias', 'global_step'])
# Test regex
self.dbinterface.to_restore = re.compile(r'Bias')
filtered_var_list = self.dbinterface.filter_var_list(var_list)
for name, var in filtered_var_list.items():
self.assertIn(name, ['Bias'])
self.assertNotIn(name, ['Weights', 'global_step'])
# Test invalid type (should raise TypeError)
self.dbinterface.to_restore = {'invalid_key': 'invalid_value'}
with self.assertRaises(TypeError):
filtered_var_list = self.dbinterface.filter_var_list(var_list)
@unittest.skip("skipping")
def test_tf_saver(self):
pass
@unittest.skip("skipping")
def test_load_from_db(self):
pass
@unittest.skip("skipping")
def test_save(self):
self.dbinterface.initialize()
self.dbinterface.start_time_step = time.time()
train_res = self.train_model(num_steps=100)
self.dbinterface.save(train_res=train_res, step=self.step)
@unittest.skip("skipping")
def test_sync_with_host(self):
pass
@unittest.skip("skipping")
def test_save_thread(self):
pass
@unittest.skip("skipping")
def test_initialize_from_ckpt(self):
save_path = self.save_test_checkpoint()
self.load_test_checkpoint(save_path)
def train_model(self, num_steps=100):
x_train = [1, 2, 3, 4]
y_train = [0, -1, -2, -3]
x = tf.get_default_graph().get_tensor_by_name('x:0')
y = tf.get_default_graph().get_tensor_by_name('y:0')
feed_dict = {x: x_train, y: y_train}
pre_global_step = self.sess.run(self.global_step)
for step in range(num_steps):
train_res = self.sess.run(self.train_targets, feed_dict=feed_dict)
self.log.info('Step: {}, loss: {}'.format(step, train_res['loss']))
post_global_step = self.sess.run(self.global_step)
self.assertEqual(pre_global_step + num_steps, post_global_step)
self.step += num_steps
return train_res
def save_test_checkpoint(self):
self.log.info('Saving checkpoint to {}'.format(self.save_path))
saved_checkpoint_path = self.dbinterface.tf_saver.save(self.sess,
save_path=self.save_path,
global_step=self.global_step,
write_meta_graph=False)
self.log.info('Checkpoint saved to {}'.format(saved_checkpoint_path))
return saved_checkpoint_path
def load_test_checkpoint(self, save_path):
reader = tf.train.NewCheckpointReader(save_path)
saved_shapes = reader.get_variable_to_shape_map()
self.log.info('Saved Vars:\n' + str(saved_shapes.keys()))
for name in saved_shapes.keys():
self.log.info(
'Name: {}, Tensor: {}'.format(name, reader.get_tensor(name)))
def setup_model(self, weights_name='Weights', bias_name='Bias'):
"""Set up simple tensorflow model."""
tf.reset_default_graph()
self.global_step = tf.get_variable(
'global_step', [],
dtype=tf.int64, trainable=False,
initializer=tf.constant_initializer(0))
# Model parameters and placeholders.
x = tf.placeholder(tf.float32, name='x')
y = tf.placeholder(tf.float32, name='y')
W = tf.get_variable(weights_name, [1], dtype=tf.float32)
b = tf.get_variable(bias_name, [1], dtype=tf.float32)
# Model output, loss and optimizer.
linear_model = W * x + b
loss = tf.reduce_sum(tf.square(linear_model - y))
optimizer_base = tf.train.GradientDescentOptimizer(0.01)
# Model train op.
optimizer = optimizer_base.minimize(
loss, global_step=self.global_step)
# Train targets.
self.train_targets = {'loss': loss,
'optimizer': optimizer}
@classmethod
def setup_log(cls):
cls.log = logging.getLogger(':'.join([__name__, cls.__name__]))
cls.log.setLevel('DEBUG')
@classmethod
def setup_conn(cls):
cls.conn = pymongo.MongoClient(host=cls.HOST, port=cls.PORT)
@classmethod
def setup_cache(cls):
cls.cache_dir = os.path.join(cls.CACHE_DIR,
'%s:%d' % (cls.HOST, cls.PORT),
cls.DATABASE_NAME,
cls.COLLECTION_NAME,
cls.EXP_ID)
cls.makedirs(cls.cache_dir)
cls.save_path = os.path.join(cls.cache_dir, 'checkpoint')
@classmethod
def setup_params(cls):
cls.model_params = {'func': model.mnist_tfutils_new,
'devices': ['/gpu:0', '/gpu:1'],
'prefix': 'model_0'}
cls.save_params = {
'host': cls.HOST,
'port': cls.PORT,
'dbname': cls.DATABASE_NAME,
'collname': cls.COLLECTION_NAME,
'exp_id': cls.EXP_ID,
'save_valid_freq': 20,
'save_filters_freq': 200,
'cache_filters_freq': 100}
cls.train_params = {
'data_params': {'func': data.build_data,
'batch_size': 100,
'group': 'train',
'directory': TFUTILS_HOME},
'num_steps': 500}
cls.loss_params = {
'targets': ['labels'],
'agg_func': tf.reduce_mean,
'loss_per_case_func': tf.nn.sparse_softmax_cross_entropy_with_logits}
cls.load_params = {'do_restore': True}
cls.optimizer_params = {'func': optimizer.ClipOptimizer,
'optimizer_class': tf.train.MomentumOptimizer,
'clip': True,
'optimizer_kwargs':{'momentum': 0.9}}
cls.learning_rate_params = {'learning_rate': 0.05,
'decay_steps': 10000 // 256,
'decay_rate': 0.95,
'staircase': True}
cls.params = {
'dont_run': False,
'skip_check': True,
'model_params': cls.model_params,
'train_params': cls.train_params,
'validation_params': {},
'log_device_placement': False,
'save_params': cls.save_params,
'load_params': cls.load_params,
'loss_params': cls.loss_params,
'optimizer_params': cls.optimizer_params,
'learning_rate_params': cls.learning_rate_params}
@classmethod
def remove_checkpoint(cls, checkpoint):
"""Remove a tf.train.Saver checkpoint."""
cls.log.info('Removing checkpoint: {}'.format(checkpoint))
# TODO: remove ckpt
cls.log.info('Checkpoint successfully removed.')
raise NotImplementedError
@classmethod
def remove_directory(cls, directory):
"""Remove a directory."""
cls.log.info('Removing directory: {}'.format(directory))
shutil.rmtree(directory)
cls.log.info('Directory successfully removed.')
@classmethod
def remove_database(cls, database_name):
"""Remove a MonogoDB database."""
cls.log.info('Removing database: {}'.format(database_name))
cls.conn.drop_database(database_name)
cls.log.info('Database successfully removed.')
@classmethod
def remove_collection(cls, collection_name):
"""Remove a MonogoDB collection."""
cls.log.debug('Removing collection: {}'.format(collection_name))
cls.conn[cls.DATABASE_NAME][collection_name].drop()
cls.log.info('Collection successfully removed.')
@classmethod
def remove_document(cls, document):
raise NotImplementedError
@staticmethod
def makedirs(dir):
try:
os.makedirs(dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if __name__ == '__main__':
unittest.main()
|
22,150 | 3bef14dfa575cbc722d7ef6d3140d7cac310d514 | import sys
import datetime
from sqlalchemy import DateTime, Column, ForeignKey, String, Integer
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
email = Column(String(250), nullable=False)
picture = Column(String(250))
class Category(Base):
__tablename__ = 'category'
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
user_id = Column(Integer, ForeignKey('user.id'))
user = relationship(User)
@property
def serialize(self):
"""Return object data in easily serializeable format"""
return {
"id": self.id,
"name": self.name,
}
class CategoryItem(Base):
__tablename__ = 'category_item'
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
detail = Column(String(1000))
date_on = Column(DateTime, default=datetime.datetime.utcnow)
cat_id = Column(Integer, ForeignKey('category.id'))
category = relationship(Category)
user_id = Column(Integer, ForeignKey('user.id'))
user = relationship(User)
@property
def serialize(self):
"""Return object data in easily serializeable format"""
return {
"id": self.id,
"name": self.name,
"detail": self.detail,
"date_on": self.date_on,
}
engine = create_engine('sqlite:///ItemCatalog.db')
Base.metadata.create_all(engine)
|
22,151 | 956e8ce0995c1a6f1f54a39144942098b4a24638 | from PyDSALib.linked_list import Node
from PyDSALib.linked_list import LinkedList
class Deque(LinkedList):
"""Node class for deque. Extends LinkedList
Attributes:
head: Node object. The head of the deque
tail: Node object. The tail of the deque
size: the length of the deque
"""
def __init__(self):
"""Initialise Deque class"""
super().__init__()
self.tail = None
def left_enqueue(self, item):
"""Add an item to the left of the deque"""
item_to_add = Node(item)
item_to_add.set_next(self.head)
# if the deque is empty, the new item is the tail
if not self.tail:
self.tail = item_to_add
else:
# connect the old head to the new head
self.head.set_prev(item_to_add)
# set the new node as the head
self.head = item_to_add
self.size += 1
def right_enqueue(self, item):
"""Add an item to the right of the deque"""
item_to_add = Node(item)
item_to_add.set_prev(self.tail)
# if the deque is empty, the new item is the head
if not self.head:
self.head = item_to_add
else:
# connect the old tail to the new tail
self.tail.set_next(item_to_add)
self.tail = item_to_add
self.size += 1
def left_dequeue(self):
"""Remove an item from the left of the deque and return it"""
# get the next head and set it as the new head. Save the old head
next_head = self.head.get_next()
head = self.head
self.head = next_head
self.size -= 1
# return the data in the old head
return head.get_data()
def right_dequeue(self):
"""Remove an item from the right of the deque and return it"""
# get the next tail and set it as the new tail. Save the oldtail
next_last = self.tail.get_prev()
last = self.tail
self.tail = next_last
self.size -= 1
# return the data in the old tail
return last.get_data()
|
22,152 | ac7bb62fafebd659c68a153fb5b79f4f7eb701a3 | #!/usr/bin/env python
import google
from setuptools import setup, find_packages
setup(
name='django_google',
version=google.__version__,
description='Utilities to use Google services (like Google Analytics) with Django.',
long_description=open('README.rst').read(),
author='Alexis Tabary',
author_email='alexis.tabary@gmail.com',
url='https://github.com/atabary/django_google',
packages=find_packages(),
license=open('LICENSE').read(),
include_package_data=True,
zip_safe=False, # Django templates
classifiers=[
'Development Status :: 1 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
)
|
22,153 | c076451ba35b61502a4adbde02f216786a0e6f30 | # -*- coding:UTF-8 -*-
"""
@author: hzjianghongxiang
@date: 2015-01-19
@contact: jhx1008@gmail.com
version: 1.0
@todo:
@modify:
"""
class CommandFactory:
@staticmethod
def getCommand(dbName, action):
try:
url = 'from imagent.'+ dbName+'.commands.'+action+' import '+action
exec url
return eval(action)
except AttributeError,e:
raise e |
22,154 | 9dd6e1e15c42ca77e509d724ecb2d5b6b13be873 | ๏ปฟ"""
* Copyright 2020, Departamento de sistemas y Computaciรณn, Universidad
* de Los Andes
*
*
* Desarrolado para el curso ISIS1225 - Estructuras de Datos y Algoritmos
*
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along withthis program. If not, see <http://www.gnu.org/licenses/>.
"""
import config as cf
import sys
import controller
import tracemalloc
import graph
from DISClib.ADT import list as lt
assert cf
# ===================================
# Funciones de inicializacion
# ===================================
def initCatalog():
"""
Inicializa el catalogo de libros
"""
return controller.initCatalog()
def loadData(catalog):
"""
Carga los libros en el catalogo
"""
return controller.loadData(catalog)
#Funciones para solicitar informaciรณn al usuario
def request_category(catalog):
"""Le pregunta al usuario bajo que categorรญa desea filtrar los algoritmos."""
requested_category = input("Ingrese el nombre de la categoria con la que desea filtrar sus datos: ")
requested_category = " " + requested_category
if not controller.validateCategory(requested_category, catalog):
print("La categorรญa ingresada no existe. Intente de nuevo.")
return request_category(catalog)
return requested_category
def request_country(catalog):
"""Le pregunta al usuario bajo que paรญs desea filtrar los algoritmos."""
requested_country = input("Ingrese el nombre del paรญs con el que desea filtrar sus datos: ")
if not controller.validateCountry(requested_country, catalog):
print("El paรญs ingresado no existe. Intente de nuevo.")
return request_country(catalog)
return requested_country
def request_tag(catalog):
"""Le pregunta al usuario bajo que tag desea filtrar los algoritmos."""
requested_tag = input("Ingrese el nombre del tag con el que desea filtrar sus datos: ")
return requested_tag
def request_nsample():
"""Le pregunta al usuario respecto al tamaรฑo de la muestra sobre la que se desea aplicar una funciรณn."""
n_sample = input("Ingrese el tamaรฑo de la muestra sobre la que desea indagar (recuerde que este no debe exceder la cantidad de videos en el catรกlogo): ")
try:
n_sample = int(n_sample)
except Exception:
print("No ha ingresado un valor numรฉrico. Intentelo de nuevo.")
return request_nsample()
return n_sample
#Funciรณn para imprimir grรกficas
def addTime(catalog,time,memory, label):
return controller.addTime(catalog,time,memory,label)
#Funciones para ejecutar requerimientos
def execute_req1(catalog, req_category, req_country, n_sample):
"""Ejecuta el requerimiento 1"""
return controller.execute_req1(catalog, req_category, req_country, n_sample)
def execute_req2(catalog, req_country):
"""Ejecuta el requerimiento 2"""
return controller.execute_req2(catalog, req_country)
def execute_req3(catalog, req_category):
"""Ejecuta el requerimiento 2"""
return controller.execute_req3(catalog, req_category)
def execute_req4(catalog, req_country ,req_tag, n_sample):
"""Ejecuta el requerimiento 4"""
return controller.execute_req4(catalog, req_country ,req_tag, n_sample)
#Funciones para imprimir requerimientos
def req1Print(catalog):
print(controller.req1Format(catalog))
def req2Print(catalog, days):
print(controller.req2Format(catalog, days))
def req3Print(catalog, days):
print(controller.req3Format(catalog, days))
def req4Print(catalog, days):
print(controller.req4Format(catalog, days))
"""
La vista se encarga de la interacciรณn con el usuario
Presenta el menu de opciones y por cada seleccion
se hace la solicitud al controlador para ejecutar la
operaciรณn solicitada
"""
def printMenu():
print("Bienvenido")
print("1- Inicializar Catรกlogo")
print("2- Cargar informaciรณn en el catรกlogo")
print("3- Videos tendencia con mรกs views (paรญs y categorรญa)")
print("4- Video que ha permanecido mรกs dias en tendencia (paรญs)")
print("5- Video que ha permanecido mรกs dias en tendencia para una categoria")
print("6- Videos con mรกs likes de un paรญs para un Tag especรญfico")
print("7-Grรกficas de tiempo y memoria")
catalog = None
"""
Menu principal
"""
while True:
printMenu()
inputs = input('Seleccione una opciรณn para continuar\n')
if int(inputs[0]) == 1:
print("Cargando informaciรณn de los archivos ....")
catalog = initCatalog()
elif int(inputs[0]) == 2:
print("Cargando informaciรณn de los archivos ....")
answer = loadData(catalog)
print('Paises cargados: ' + str(controller.countriesSize(catalog)))
print('Categorias cargadas: ' + str(controller.categoriesSize(catalog)))
print("Tiempo [ms]: ", f"{answer[0]:.3f}", " || ",
"Memoria [kB]: ", f"{answer[1]:.3f}")
elif int(inputs[0]) == 3:
req_category = request_category(catalog)
req_country = request_country(catalog)
n_sample = request_nsample()
# Filtro por country and category
req1_catalog = execute_req1(catalog, req_category, req_country, n_sample)
req1Print(req1_catalog)
elif int(inputs[0]) == 4:
req_country = request_country(catalog)
# Filtro por country
req2_catalog = execute_req2(catalog, req_country)
req2Print(req2_catalog[0], req2_catalog[1])
elif int(inputs[0]) == 5:
req_category = request_category(catalog)
req3_catalog = execute_req3(catalog, req_category)
req3Print(req3_catalog[0], req3_catalog[1])
elif int(inputs[0]) == 6:
req_country = request_country(catalog)
req_tag = request_tag(catalog)
n_sample = request_nsample()
req4_catalog = execute_req4(catalog, req_country ,req_tag, n_sample)
req4Print(req4_catalog, n_sample)
elif int(inputs[0])==7:
graph.print_execution_time(catalog)
else:
sys.exit(0)
sys.exit(0)
|
22,155 | 07367a8ead889c2650993b2bc80e45435d3c4ce9 | ''' Q1.
Given two words (start and end), and a dictionary, find the length of shortest transformation sequence from start to end, such that:
Only one letter can be changed at a time
Each intermediate word must exist in the dictionary
E.g.
Input: start='hit', end='cog', dict = ['hot','dot','dog','lot','log']
Output: 5
Q2.
Instead of shortest length, find all shortest transformation sequence(s) from start to end
E.g.
Input: start='hit', end='cog', dict = ['hot','dot','dog','lot','log']
Output: [['hit','hot','dot','dog','cog'],['hit','hot','lot','log','cog']]
'''
from collections import deque
def word_ladder_len(start, end, dictionary):
if start==end:
return 0
# just like level order traversal in BinaryTree
n_curr_level_words = 1
n_next_level_words = 0
d=set(dictionary)
q = deque()
visited = set()
q.append(start)
visited.add(start)
min_len=0
# string <=> [prev strings]
path_map = {}
path_map[start] = []
while q:
s=q.popleft()
n_curr_level_words-=1
# i is each position in s
for i in range(len(s)):
# try to replace the curr_pos with chr(97+j)
for j in range(26):
tmp = list(s)
tmp[i] = chr(97+j)
tmp = ''.join(tmp)
if tmp == end:
#return min_len+2
path_map[end] = path_map[s]+[s,end]
return path_map[end]
if tmp in d and tmp not in visited:
q.append(tmp)
visited.add(tmp)
path_map[tmp] = path_map[s]+[s]
n_next_level_words+=1
if n_curr_level_words == 0:
min_len+=1
n_curr_level_words = n_next_level_words
n_next_level_words =0
return []
if __name__=='__main__':
test_cases = [('hit','log',['hot','dot','dog','lot','log'])]
for each_test_case in test_cases:
start, end, dictionary = each_test_case
print start,end,dictionary,word_ladder_len(start,end,dictionary) |
22,156 | e933f577c4a09b673d11c706cf81e7e1703accac | num = int(input())
if num % 2 != 0:
print("Weird")
if num % 2 == 0:
if 2 <= num:
if num <= 5:
print("Not Weird!")
if num % 2 == 0:
if num >= 6:
if num < 20:
print("Weird!")
else:
print("Not Weird!")
"""
tab = [1,2,5,8,6,65,5,4,54,5,56,8,7,4,21,21,1,7,8,9,47,5,7,44,55,64,31,89,45,34,65,64,654]
print(tab.__len__())
num = 0
for x in tab:
num +=1
print("list lenght is :",num)
"""
|
22,157 | 3fa2511efb27cdb5db8c4244ce77651266154cb1 | # coding: utf-8
"""
"""
import datetime
import pytest
import sampledb
from sampledb.models import User, UserType, Action, ActionType, Object
from sampledb.logic import comments, objects, actions
@pytest.fixture
def user():
user = User(name='User', email="example@example.com", type=UserType.PERSON)
sampledb.db.session.add(user)
sampledb.db.session.commit()
return user
@pytest.fixture
def action():
action = actions.create_action(
action_type_id=sampledb.models.ActionType.SAMPLE_CREATION,
name='Example Action',
schema={
'title': 'Example Object',
'type': 'object',
'properties': {
'name': {
'title': 'Sample Name',
'type': 'text'
}
},
'required': ['name']
},
description='',
instrument_id=None
)
return action
@pytest.fixture
def object(user: User, action: Action):
data = {'name': {'_type': 'text', 'text': 'Object'}}
return objects.create_object(user_id=user.id, action_id=action.id, data=data)
def test_comments(user: User, object: Object):
start_datetime = datetime.datetime.utcnow()
assert len(comments.get_comments_for_object(object_id=object.object_id)) == 0
comment_id = comments.create_comment(object_id=object.object_id, user_id=user.id, content="Test 1")
assert len(comments.get_comments_for_object(object_id=object.object_id)) == 1
comment = comments.get_comments_for_object(object_id=object.object_id)[0]
assert comment.id == comment_id
assert comment.user_id == user.id
assert comment.author == user
assert comment.object_id == object.object_id
assert comment.content == "Test 1"
assert comment.utc_datetime >= start_datetime
assert comment.utc_datetime <= datetime.datetime.utcnow()
comments.create_comment(object_id=object.object_id, user_id=user.id, content="Test 2")
assert len(comments.get_comments_for_object(object_id=object.object_id)) == 2
comment2, comment1 = comments.get_comments_for_object(object_id=object.object_id)
assert comment1.content == "Test 2"
assert comment2.content == "Test 1"
assert comment2.utc_datetime >= start_datetime
assert comment2.utc_datetime <= datetime.datetime.utcnow()
|
22,158 | 1e74f9dcb4cbe95fca230b21e6c9782e29a4423c | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 21 15:12:46 2021
@author: eaindraykhin
"""
for i in range(1,11):
print(i)
|
22,159 | 7b4977f937e246177c8f0e3eae319a0322090b7f | import re
import lxml.etree as etree
import bs4
import requests
from openpyxl import Workbook
def get_courses_list():
raw_xml = requests.get('https://www.coursera.org/sitemap~www~courses.xml').content
parser_xml = etree.XMLParser(remove_blank_text=True)
root_xml = etree.fromstring(raw_xml, parser_xml)
list_course = []
for course in root_xml[:19]:
list_course.append(course[0].text)
return list_course
def get_course_info(url):
coursera = requests.get(url)
soup = bs4.BeautifulSoup(coursera.content, "lxml")
content = str(soup.findAll('script', {'type': 'application/ld+json'}))
name_course = soup.html.head.title.string
count_week = len(soup.findAll('div', {'class': 'week-heading body-2-text'}))
date_start = re.findall('"startDate":"(\d\d\d\d-\d\d-\d\d)', content)
date_start = date_start[0] if date_start else ''
lang_course = re.findall('"inLanguage":"(\w\w)', content)
lang_course = lang_course[0] if lang_course else ''
rating_course = re.findall('"ratingValue":(\d.\d)', content)
rating_course = rating_course[0] if rating_course else ''
return name_course, lang_course, date_start, count_week, rating_course
def output_courses_info_to_xlsx(filepath, list_value):
wb = Workbook()
ws = wb.active
ws['A1'] = 'Name course'
ws['B1'] = 'language'
ws['C1'] = 'Date start'
ws['D1'] = 'Count of week'
ws['E1'] = 'Rating of course'
line_sheet = 2
for course_value in list_value:
ws['A' + str(line_sheet)] = course_value[0]
ws['B' + str(line_sheet)] = course_value[1]
ws['C' + str(line_sheet)] = course_value[2]
ws['D' + str(line_sheet)] = course_value[3]
ws['E' + str(line_sheet)] = course_value[4]
line_sheet += 1
wb.save(filepath)
if __name__ == '__main__':
list_value_course = []
list_url_course = get_courses_list()
for course in list_url_course:
list_value_course.append(get_course_info(course))
output_courses_info_to_xlsx('./sample.xlsx', list_value_course)
|
22,160 | 790bd94a48578469d46c5c0b561c1152ab68d64b | """
Select Any drop down from option
Find out how many options exist in drop down
count how many option present
capture option from drop down and print them
"""
from selenium import webdriver
import time
#select class need to import
from selenium.webdriver.support.ui import Select
a=webdriver.Chrome("chromedriver.exe")
a.get("https://fs2.formsite.com/R1Tuim/form1/index.html")
element=a.find_element_by_id("RESULT_RadioButton-6")
drop=Select(element)
drop.select_by_visible_text("M")# Option will selected by visible text
#drop.select_by_value("Radio-1")#option will be selected by Value
#drop.select_by_index("2")#option will be selected by index valu
#count the number of option
print(len(drop.options))
#capture all the option and print the items
all_option=drop.options
for option in all_option:
print(option.text)
time.sleep(5)
a.quit() |
22,161 | 4a6089c5e29a9715aa54619186f1892ff21cda89 | from others.mecab_rouge import Rouge
import os
import argparse
def format_rouge_scores(scores):
return """\n
****** ROUGE SCORES ******
** ROUGE 1
F1 >> {:.3f}
Precision >> {:.3f}
Recall >> {:.3f}
** ROUGE 2
F1 >> {:.3f}
Precision >> {:.3f}
Recall >> {:.3f}
** ROUGE L
F1 >> {:.3f}
Precision >> {:.3f}
Recall >> {:.3f}""".format(
scores["rouge-1"]["f"],
scores["rouge-1"]["p"],
scores["rouge-1"]["r"],
scores["rouge-2"]["f"],
scores["rouge-2"]["p"],
scores["rouge-2"]["r"],
scores["rouge-l"]["f"],
scores["rouge-l"]["p"],
scores["rouge-l"]["r"],
)
def save_rouge_scores(str_scores, path):
with open(path, "w") as output:
output.write(str_scores)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--candidate_path", type=str, required=True, help="Path for dataset to inference"
)
parser.add_argument("--save_path", type=str, default="./results/", help="Path for rouge score")
args = parser.parse_args()
candidate_path = args.candidate_path
gold_path = os.path.splitext(candidate_path)[0] + ".gold"
candidate = []
with open(candidate_path, "r") as f:
for i in f.readlines():
candidate.append(i.strip())
gold = []
with open(gold_path, "r") as f:
for i in f.readlines():
gold.append(i.strip())
rouge_evaluator = Rouge(
metrics=["rouge-n", "rouge-l"],
max_n=2,
limit_length=True,
length_limit=1000,
length_limit_type="words",
apply_avg=True,
apply_best=False,
alpha=0.5, # Default F1_score
weight_factor=1.2,
)
scores = rouge_evaluator.get_scores(candidate, gold)
str_scores = format_rouge_scores(scores)
save_path = os.path.join(
args.save_path, os.path.splitext(os.path.split(candidate_path)[-1])[0] + ".rouge"
)
print(str_scores)
save_rouge_scores(str_scores, save_path)
|
22,162 | 69cf6bb4365b062a9eddfbe25923e96ef3309f57 | from numpy import atleast_1d
from .environments import Environment
from edge.dynamics import SlipDynamics
from edge.reward import ConstantReward, AffineReward
class Slip(Environment):
def __init__(self, random_start=False, default_initial_state=None,
dynamics_parameters=None, reward_done_threshold=None,
steps_done_threshold=None):
if dynamics_parameters is None:
dynamics_parameters = {}
default_dynamics_parameters = {
'gravity': 9.81,
'mass': 80.0,
'stiffness': 8200.0,
'resting_length': 1.0,
'energy': 1877.08,
'shape': (200, 10)
}
default_dynamics_parameters.update(dynamics_parameters)
dynamics = SlipDynamics(**default_dynamics_parameters)
# just give it a reward every step, no matter what
# this is equivalent to "hop as long as possible"
reward = ConstantReward(dynamics.stateaction_space, constant=1)
# now let's incentivize going as fast as possible.
reward += AffineReward(dynamics.stateaction_space, [[1, 0], [0, 0]])
if default_initial_state is None:
# TODO change this so it uses stateaction wrappers
# not sure what the above is referring to...
# max_altitude = default_dynamics_parameters['max_altitude']
# standing_energy = (default_dynamics_parameters['resting_length'] *
# default_dynamics_parameters['mass'] *
# default_dynamics_parameters['gravity'] /
# default_dynamics_parameters['energy'])
default_initial_state = atleast_1d(1.0)
super(Slip, self).__init__(
dynamics=dynamics,
reward=reward,
default_initial_state=default_initial_state,
random_start=random_start,
reward_done_threshold=reward_done_threshold,
steps_done_threshold=steps_done_threshold
)
def is_failure_state(self, state):
# TODO change so it uses stateaction wrappers
return self.dynamics.failed
def reset(self, s=None):
""" Overloading the default reset, to also reset internal variables.
:param s: optional: the state where to initialize
:return: the internal state of the environment (after reinitialization)
"""
self.dynamics.failed = False
return super(Slip, self).reset(s)
|
22,163 | ce8804b46a289bccbfd65aa35c207004fe29c759 | # -*- coding: utf-8 -*-
import sys
import numpy as np
import scipy
import random
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.cross_validation import train_test_split
from sklearn import metrics
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.svm import SVC
from scipy.sparse import coo_matrix, vstack
default_encoding = 'utf-8'
if sys.getdefaultencoding() != default_encoding:
reload(sys)
sys.setdefaultencoding(default_encoding)
# print sys.getdefaultencoding()
# ๅฐๆๅฐ็ปๆไฟๅญๅฐๆไปถ
savedStdout = sys.stdout
f_handler=open('./predict_out/6000/predict_6000_active2.log', 'w')
sys.stdout=f_handler
inputfile1 = './uniform_hotel_30/train_u_out.txt'
inputfile2 = './uniform_hotel_30/test_u_out.txt'
vectorizer = CountVectorizer(token_pattern='(?u)\\b\\w+\\b')#ไฟ็้ฟๅบฆไธบ1็่ฏ๏ผไพๅฆ๏ผๅฅฝ
corpus = []
text_id = []
label = []
xuhao_id_dict = {}
with open(inputfile1,'r') as fr:
for line in fr:
text_id.append(line.strip().split('\t')[0])
label.append(int(line.strip().split('\t')[1]))
text = line.strip().split('\t')[2]
corpus.append(text)
trian_size = len(text_id)
# print trian_size
with open(inputfile2,'r') as fr:
for line in fr:
text_id.append(line.strip().split('\t')[0])
label.append(int(line.strip().split('\t')[1]))
corpus.append(line.strip().split('\t')[2])
label = np.array(label)
text_id = np.array(text_id)
X = vectorizer.fit_transform(corpus)
word_dict = vectorizer.get_feature_names()
size = len(word_dict)
print '----------------count word frquency-----------------'
print 'length of word dict:',size
# X.toarray()
print 'shape of data vectors:',X.shape
print 'shape of labels:',label.shape
# print 'shape of text ids:',text_id.shape
# print 'length of id dict:',len(xuhao_id_dict)
transformer = TfidfTransformer()
X_tfidf = transformer.fit_transform(X)
print '----------------count word tfidf-----------------'
print X_tfidf.shape
train_x = X_tfidf[:trian_size]
# print train_x_1
# train_x = scipy.sparse.vstack((train_x_1,train_x_2))
train_y = label[:trian_size]
# train_y = np.hstack((train_y_1,train_y_2))
test_x = X_tfidf[trian_size:]
test_y = label[trian_size:]
test_x_copy = test_x
test_y_copy = test_y
print 'train x shape:',train_x.shape
print 'train y shape:',train_y.shape
print 'test x shape:',test_x.shape
print 'test y shape:',test_y.shape
print '----------------training initial SVM model-----------------'
clf = SVC(kernel='linear')
clf.fit(train_x, train_y)
classes = clf.predict(test_x)
acc = accuracy_score(test_y,classes)
print 'acc:',acc
print classification_report(test_y,classes)
print '----------------save result-----------------'
# np.savetxt("./predict_out/predict_6000_no1.txt",classes)
# print 'predict no.3:'
# print np.array2string(classes)
# np.savetxt("./predict_out/predict_6000_no4.txt",classes)
def delete_rows_csr(mat, indices):
"""
Remove the rows denoted by ``indices`` form the CSR sparse matrix ``mat``.
"""
if not isinstance(mat, scipy.sparse.csr_matrix):
raise ValueError("works only for CSR format -- use .tocsr() first")
indices = list(indices)
mask = np.ones(mat.shape[0], dtype=bool)
mask[indices] = False
return mat[mask]
add_num = 10
loop1 = 10
num_list = np.arange(len(test_y))
xuhao_id_dict = dict(zip(num_list,text_id[trian_size:]))
# print 'all items of id dict:',xuhao_id_dict.items()
test_left_xuhao = xuhao_id_dict.keys()
test_active_xuhao = test_left_xuhao
# ๆไธ็กฎๅฎ ไธปๅจๅญฆไน ๆจกๅ
print '__________________start random active model__________________'
delete_all = []
for l in range(loop1):
print '----------------select examples-----------------'
delete_list = []
distance_dict = {}
distance_list = clf.decision_function(test_x)
distance_abs = map(abs, distance_list)
distance_dict = dict(zip(num_list,distance_abs))
distance_dict_sorted = sorted(distance_dict.items(),lambda x,y:cmp(x[1],y[1]))
examples = distance_dict_sorted[:add_num]
for item in examples:
delete_list.append(item[0])
# ๅ ้คๆ ทๆฌ็ๅบๅท
print 'delete xuhao list:',delete_list
delete_all[len(delete_all):len(delete_all)] = delete_list
# ไฟๅญๆ้็ๆ ทๆฌid
save_per_delete_id = []
for item in delete_list:
save_per_delete_id.append(xuhao_id_dict[item])
temp_x = test_x_copy[item]
temp_y = test_y_copy[item]
train_x = scipy.sparse.vstack((train_x,temp_x))
train_y = np.hstack((train_y,temp_y))
print 'delete id list:',save_per_delete_id
test_x = delete_rows_csr(test_x_copy,delete_all)
test_y = np.delete(test_y_copy,delete_all)
test_active_xuhao = list(set(test_active_xuhao)^set(delete_list))
print 'update train x shape:',train_x.shape
print 'update train y shape:',train_y.shape
print 'update test x shape:',test_x.shape
print 'update test y shape:',test_y.shape
print 'test left xuhao:',len(test_active_xuhao)
print '----------------training no.'+str(l+1)+' SVM model-----------------'
clf.fit(train_x, train_y)
classes = clf.predict(test_x)
acc = accuracy_score(test_y,classes)
print 'active acc:',acc
label_num = len(train_y) - trian_size
acc_all = (acc*len(test_y)+label_num)/(len(test_y)+label_num)
print 'active acc_all:',acc_all
print classification_report(test_y,classes)
print '----------------save result-----------------'
# np.savetxt("./predict_out/predict_6000_no4.txt",classes)
# np.savetxt("predict_no5.txt",classes)
# ไปtest_out.txtไธญๆฝๅdelete id listไธญๅฏนๅบ็ๆๆฌ
# with open(inputfile2,'r') as fr:
# for line in fr:
# tt_id = line.strip().split('\t')[0]
# if tt_id in save_per_delete_id:
# print line.strip()
# # word_pos = [1030, 31375, 12048, 1298, 10261, 8982, 12061, 1438, 5155, 25253, 27942, 10158, 31049, 2110, 19903, 960, 10184, 19529, 16717, 23889, 1365, 1367, 1112, 13284, 26086, 30057, 13290, 1471, 1261, 28272, 23665, 12021, 7288, 11769, 16634, 1147, 1002, 1151]
# word_pos = [19082, 10741, 12433, 787, 15252, 5527, 924, 15263, 13608, 15273, 19373, 20786, 23220, 825, 13627, 9148, 1087, 19524, 838, 3271, 5705, 843, 10578, 992, 993, 21612, 22512, 14837, 24182, 13304, 3263, 9212]
# weight = 1.3
# size = len(word_dict)
# row = np.arange(size)
# col = np.arange(size)
# # data = np.ones(4)
# data_list = []
# for i in range(size):
# if i in word_pos:
# data_list.append(weight)
# else:
# data_list.append(1)
# data = np.array(data_list)
# # print data
# A = scipy.sparse.csr_matrix((data,(row,col)),shape = (size,size))
# # print A[1030,1030]
# X_tfidf_update = X_tfidf.dot(A)
# # print X_tfidf_update
# train_x = X_tfidf_update[:trian_size]
# # print train_x_1
# # train_x = scipy.sparse.vstack((train_x_1,train_x_2))
# train_y = label[:trian_size]
# # train_y = np.hstack((train_y_1,train_y_2))
# test_x = X_tfidf_update[trian_size:]
# test_y = label[trian_size:]
# test_x_copy = test_x
# test_y_copy = test_y
# print 'train x shape:',train_x.shape
# print 'train y shape:',train_y.shape
# print 'test x shape:',test_x.shape
# print 'test y shape:',test_y.shape
# # clf = SVC(kernel='linear')
# # clf.fit(train_x, train_y)
# # classes = clf.predict(test_x)
# # acc = accuracy_score(test_y,classes)
# print 'weight:',weight
# # print 'acc_add_weight:',acc
# for item in delete_list:
# # save_per_delete_id.append(xuhao_id_dict[item])
# temp_x = test_x_copy[item]
# temp_y = test_y_copy[item]
# train_x = scipy.sparse.vstack((train_x,temp_x))
# train_y = np.hstack((train_y,temp_y))
# # print 'delete id list:',save_per_delete_id
# test_x = delete_rows_csr(test_x_copy,delete_list)
# test_y = np.delete(test_y_copy,delete_list)
# # test_left_xuhao = list(set(test_left_xuhao)^set(delete_list))
# print 'update train x shape:',train_x.shape
# print 'update train y shape:',train_y.shape
# print 'update test x shape:',test_x.shape
# print 'update test y shape:',test_y.shape
# # print 'test left xuhao:',len(test_left_xuhao)
# clf.fit(train_x, train_y)
# classes = clf.predict(test_x)
# acc = accuracy_score(test_y,classes)
# print 'add_weight active acc:',acc
# acc_all = (acc*len(test_y)+add_num)/(len(test_y)+add_num)
# print 'add_weight active acc_all:',acc_all
# # np.savetxt("./predict_out/predict_6000_no5.txt",classes)
# # np.savetxt("predict_no6.txt",classes)
|
22,164 | 6320ddda507ccb8dfd44cf289815818d261a7d5c | from web3 import Web3
import json
infura_url = "https://rinkeby.infura.io/" # Input your own API
web3 = Web3(Web3.HTTPProvider(infura_url))
contract_address = "" # Input your own address
contract_abi = [
{
"stateMutability": "payable",
"type": "receive"
},
{
"inputs": [],
"name": "allDonatorsList",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"name": "list",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
}
]
contract = web3.eth.contract(address=contract_address, abi=contract_abi)
print("Donators list:")
s = contract.functions.allDonatorsList().call().split(" ")
for i in s:
if i == "":
s.remove(i)
continue
s[s.index(i)] = Web3.toChecksumAddress(str(i))
print(s)
|
22,165 | 123d2bf203bd41b5bdb12e7f96ea53daffe52a14 | """ Use import_file(path, filter=True) to import xes or csv with pmy4py functions.
Returns error if not a csv or xes file.
activated filter = True (default) will remove traces without most frequent start and end activity
For CSV: Expected Column names are
case:concept:name -> case ID, concept:name -> activity, time:timestamp -> timestamp
"""
import os
import pandas as pd
from pm4py.algo.filtering.log.end_activities import end_activities_filter
from pm4py.algo.filtering.log.start_activities import start_activities_filter
from pm4py.objects.conversion.log import converter as log_converter
from pm4py.objects.log.importer.xes import importer as xes_importer
from pm4py.objects.log.util import dataframe_utils
from pm4py.objects.log.util import sorting
# import xes and sort the event log by timestamps and remove incomplete traces
def import_xes(path, filter=True):
log_xes = xes_importer.apply(path)
log_xes = sorting.sort_timestamp(log_xes)
if filter:
log_xes = remove_uncomplete_traces(log_xes)
return log_xes
# import csv and sort the event log by timestamps, complete incomplete traces.
# Expected Column names are case:concept:name -> case ID,concept:name -> activity ,time:timestamp -> timestamp
def import_csv(path, filter=True):
log_csv = pd.read_csv(path, sep=',')
log_csv = dataframe_utils.convert_timestamp_columns_in_df(log_csv)
# parameters = {log_converter.Variants.TO_EVENT_LOG.value.Parameters.CASE_ID_KEY: 'Case ID'}
log_csv = log_csv.sort_values('time:timestamp')
event_log = log_converter.apply(
log_csv)
if filter:
event_log = remove_uncomplete_traces(event_log)
return event_log
# Remove incomplete traces / noises :
# If trace does not contain (most frequent) start and end activity, we remove the trace.
# Thus we only allow one start and one end activity
# TODO adjust filter-> sprint 2
def remove_uncomplete_traces(event_log):
start_activity = list(start_activities_filter.get_start_activities(event_log).keys())[0]
end_activities = list(end_activities_filter.get_end_activities(event_log).keys())
filtered_log = end_activities_filter.apply(event_log, end_activities)
filtered_log = start_activities_filter.apply(filtered_log, start_activity)
cnt_removed_traces = len(event_log) - len(filtered_log)
print('Most frequent start activity is: ' + start_activity + ' Removing all traces without that start activity..')
print(
'Most frequent end activity is: ' + str(end_activities) + ' Removing all traces without that end activities..')
print('Number of removed traces: ' + str(cnt_removed_traces))
return filtered_log
def import_file(path, filter=True):
filename, file_extension = os.path.splitext(path)
# file_extension = os.path.splitext(filename)[1]
if file_extension == ".csv":
print('Importing CSV File ' + filename + '...')
print('Filter is set: ' + str(filter))
return import_csv(path, filter)
elif file_extension == ".xes":
print('Importing CSV File ' + filename + '...')
print('Filter is set: ' + str(filter))
return import_xes(path, filter)
else:
print('Error: Please choose XES or CSV file.')
raise ValueError('File not valid')
|
22,166 | f954f0670090a529f0d1fc38c653d34439782918 | from __future__ import print_function
import argparse
import jinja2
import os
import io
import json
import sys
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
def get_args():
example_text = '''
examples:
python %(lighthouse)s -i /tmp/google.com.json -o /tmp/google.com.md -e
python %(lighthouse)s < lighthouse-report.json
lighthouse https://cats.com --output=json | python lighthouse2md.py -o out.md
''' % {'lighthouse': os.path.basename(__file__)}
parser = argparse.ArgumentParser(epilog=example_text, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-i", "--input-file", help="Provide athe path to an input file", default=sys.stdin)
parser.add_argument("-o", "--output-file", help='Provide a filepath where the markdown result gets written')
parser.add_argument("-e", action='store_true', default=False,
help='Echo the output to stdout, even when using the -o option')
return parser.parse_args()
def preprocess_data(data):
for cat in data['categories']:
data['categories'][cat]['audits'] = dict()
for audit_ref in data['categories'][cat]['auditRefs']:
audit = data['audits'][audit_ref['id']]
audit['audit_template'] = '%s.md' % audit_ref['id']
if 'displayValue' in audit and type(audit['displayValue']) is list:
try:
audit['displayValue'] = audit['displayValue'][0] % tuple(audit['displayValue'][1:])
except TypeError:
print(audit)
data['categories'][cat]['audits'][audit_ref['id']] = audit
return data
def read_input(input_file):
if type(input_file) is str:
with io.open(input_file, encoding='utf-8') as stream:
return json.JSONDecoder().decode(stream.read())
else:
return json.JSONDecoder().decode(input_file.read())
def write_output(output_file, rendered, force_stdout=False):
if output_file:
with io.open(output_file, 'w', encoding='utf-8') as stream:
stream.write(rendered)
if force_stdout:
print(rendered)
def main():
args = get_args()
loader = jinja2.FileSystemLoader([
os.path.join(SCRIPT_PATH, 'user', 'templates'),
os.path.join(SCRIPT_PATH, 'templates')
])
env = jinja2.Environment(loader=loader)
template = loader.load(env, 'index.md')
rendered = template.render({
'data': preprocess_data(read_input(args.input_file))
})
write_output(args.output_file, rendered, force_stdout=args.e or not args.output_file)
if __name__ == '__main__':
main()
|
22,167 | 99ef41c67571f5046be0153a3720d67b4d77b309 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class MetaItem(scrapy.Item):
src = scrapy.Field()
ts = scrapy.Field()
name = scrapy.Field()
url = scrapy.Field()
uid = scrapy.Field()
cs = scrapy.Field()
class AutodataItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
Last_Code_Update_Date = scrapy.Field() #Monday, April 29, 2019
Scrapping_Date = scrapy.Field() #Monday, April 29, 2019
Source = scrapy.Field()
Country = scrapy.Field() #UAE
City = scrapy.Field() #Sharjah
Seller_Type = scrapy.Field() #Large independent dealers
Seller_Name = scrapy.Field() #automalluae
Car_URL = scrapy.Field() #https://www.automalluae.com/used-car-shop/9538886-toyota-prius-iconic/
Car_Name = scrapy.Field() #Toyota Prius ICONIC
Year = scrapy.Field() #2017
Make = scrapy.Field() #Toyota
model = scrapy.Field() #Prius
Spec = scrapy.Field() #ICONIC
Doors = scrapy.Field() #5
transmission = scrapy.Field() #Automatic
trim = scrapy.Field() #Grey - Leather
bodystyle = scrapy.Field() #Hatchback
other_specs_gearbox = scrapy.Field() #
other_specs_seats = scrapy.Field()
other_specs_engine_size = scrapy.Field() #1.8l
other_specs_horse_power = scrapy.Field()
colour_exterior = scrapy.Field() #Silver
colour_interior = scrapy.Field()
fuel_type = scrapy.Field() #Diesel
import_yes_no_also_referred_to_as_GCC_spec = scrapy.Field()
mileage = scrapy.Field() #55924
condition = scrapy.Field()
warranty_untill_when = scrapy.Field()
service_contract_untill_when = scrapy.Field()
Price_Currency = scrapy.Field()
asking_price_inc_VAT = scrapy.Field()
asking_price_ex_VAT = scrapy.Field() #69956
warranty = scrapy.Field() #Yes
service_contract = scrapy.Field()
vat = scrapy.Field() #Yes
mileage_unit = scrapy.Field()
engine_unit = scrapy.Field()
meta = scrapy.Field()
autodata_transmission = scrapy.Field()
autodata_transmission_id = scrapy.Field()
autodata_bodystyle = scrapy.Field()
autodata_bodystyle_id = scrapy.Field()
autodata_Make = scrapy.Field()
autodata_Make_id = scrapy.Field()
autodata_model = scrapy.Field()
autodata_model_id = scrapy.Field()
autodata_Spec = scrapy.Field()
autodata_Spec_id = scrapy.Field()
wheel_size = scrapy.Field()
top_speed_kph = scrapy.Field()
cylinders = scrapy.Field()
torque_Nm = scrapy.Field()
acceleration = scrapy.Field()
|
22,168 | bb5984ba681b1511e625000fa76bf3b6eac5f052 | from scapy.all import *
from scapy.layers.inet import IP, TCP
import time , sys ,os
#COLORS============#
blue = '\033[94m' #
green = '\033[32m' #
red = '\033[91m' #
w = '\033[0m' #
#==================#
os.system("cls")
print(green + "Starting.........." + "\n")
print(w+"\npress"+blue+" double Ctrl+c"+w+" To Stop....\n" + red)
time.sleep(3)
def print_summary(pkt):
if IP in pkt:
ip_src = pkt[IP].src
ip_dst = pkt[IP].dst
if TCP in pkt:
tcp_sport = pkt[TCP].sport
tcp_dport = pkt[TCP].dport
time.sleep(1)
print(red + "IP src " + str(ip_src) + " TCP sport " + str(tcp_sport))
print(green + "IP dst " + str(ip_dst) + " TCP dport " + str(tcp_dport))
print("")
if ((pkt[IP].src == "192.168.0.1") or (pkt[IP].dst == "192.168.0.1")):
print("[!]")
try:
sniff(filter="ip", prn=print_summary)
sniff(filter="ip and host 192.168.0.1", prn=print_summary)
if KeyboardInterrupt:
print(blue+"exiting...!")
print(w+"")
sys.exit()
except:
pass
|
22,169 | bc68fdaf04314047be3d71b3fcd0f8911774b6b3 | #!/usr/bin/env python
from __future__ import print_function
import argparse
import sys
from chess_social.pgn_file import PgnFile
from chess_social.graph import ChessGraph
from chess_social.bayes_community_detection import CommunityDetector
def main(data_file_name, iterations, output_dir, min_elo, p_in, p_out, burnin):
with PgnFile(data_file_name) as pgnfile:
graph = ChessGraph(pgnfile, min_elo=min_elo)
detector = CommunityDetector(p_in=p_in, p_out=p_out)
labels = detector.run(graph, iterations=iterations)
assert len(labels) == iterations + 1
chosen_index, communities = CommunityDetector.estimate_partitions(labels, burnin=burnin)
graph.communities = communities
graph.render_community_graph(show_single_nodes=False)
return 0
if __name__ == '__main__':
cmdline_parser = argparse.ArgumentParser()
cmdline_parser.add_argument('filename', metavar='datafile')
cmdline_parser.add_argument('--iterations', action='store', dest='iterations',
type=int, default=100)
cmdline_parser.add_argument('--output_dir', action='store', default='.')
cmdline_parser.add_argument('--burnin', action='store', type=int, default=0)
cmdline_parser.add_argument('--min_elo', action='store', type=int, default=2500)
cmdline_parser.add_argument('--p_in', action='store', type=float, default=0.8)
cmdline_parser.add_argument('--p_out', action='store', type=float, default=0.2)
parsed_args = cmdline_parser.parse_args()
if parsed_args.p_in >= 1.0 or parsed_args.p_out >= 1.0:
print('Invalid edge probabilities: {0}, {1}'.format(parsed_args.p_in,
parsed_args.p_out))
sys.exit(1)
if parsed_args.min_elo < 1000:
print('Invalid minimum ELO rating: {0}'.format(parsed_args.min_elo))
sys.exit(1)
sys.exit(main(parsed_args.filename,
parsed_args.iterations,
parsed_args.output_dir,
parsed_args.min_elo,
parsed_args.p_in,
parsed_args.p_out,
parsed_args.burnin))
|
22,170 | d0d6cf38537cb06143de71f1d01563be861547e7 | import numpy as np
import matplotlib.pyplot as plt
class ricker_fft2:
def __init__(self, N=1024, fc=50., tmax=1., delay=None):
"""
$\int e^(-iwt) dt$ as Fourier transform (normal convention);
the numpy function is np.fft.fft and output is scaled by 1/N
:param N: int, nubmer of samples
:param fc: float, unit = Hz, central freqency
:param tmax: float, unit =s, total length of time
:param zerophase: bool, whether a zero phase wavelet
"""
fc = float(fc)
tmax = float(tmax)
dt = tmax / (N-1)
df = 1. / tmax
istart = -N//2 + 1
nroll = -istart
t = np.arange(istart, N // 2 + 1) * dt
f = np.arange(istart, N // 2 + 1) * df
# arrange f as positve, negative freqs
f = np.roll(f, -nroll)
# freq domain ricker
ricker_f = 2 * f**2 / (np.sqrt(np.pi) * fc**3) * np.exp(-(f/fc)**2)
if delay is None:
delay = 1.5 / fc
ricker_f = ricker_f * np.exp(-1j * 2 * np.pi * f * delay)
# time domain ricker
ricker_t = N * np.real(np.fft.ifft(ricker_f))
amp = np.absolute(ricker_f)
phase = np.unwrap(np.angle(ricker_f, False))
# ricker_f[0] contains the zero frequency term,
# ricker_f[1:N//2] contains the positive-frequency terms,
# ricker_f[N//2 + 1:] contains the negative-frequency terms,
# in increasing order starting from the most negative frequency
self.delay = delay
self.fc = fc
self.dt = dt
# arange t and ricker_t in the order of increasing time; the zero phase case contains negative time
self.ricker_t = np.roll(ricker_t, nroll)
self.f = f
self.t = t
self.df = df
self.ricker_f = ricker_f
self.amp = amp
self.phase = phase
self.nroll = nroll
def plot_ricker(self):
idxNq = len(self.t) // 2
fig2, ax2 = plt.subplots(1, 3)
fig2.set_size_inches(18, 6)
tdelay = self.delay
ax2[0].plot(self.t, self.ricker_t)
ax2[0].set_title(r'$t_{{delay}}$ = {:.4f} s'.format(tdelay))
ax2[0].set_xlabel('t (s)')
ax2[1].plot(self.f[: idxNq + 1], self.amp[: idxNq + 1])
ax2[1].set_xlabel('f (Hz)')
ax2[1].set_ylabel('Amp')
ax2[1].set_title(r'Ricker wavelet, $f_c$ = {:g} Hz'.format(self.fc))
slope = (self.phase[idxNq] - self.phase[0]) / (self.f[idxNq] - self.f[0])
ax2[2].plot(self.f[: idxNq + 1], self.phase[: idxNq + 1])
ax2[2].set_xlabel('f (Hz)')
ax2[2].set_ylabel('Phase (radians)')
ax2[2].set_title(r'$d\phi/d\omega$ = {:.4f}'.format(slope / (2 * np.pi)))
plt.show()
fc = 50.
rk3 = ricker_fft2(N=256, fc=fc, tmax=1., delay=1.5/fc)
rk3.plot_ricker()
|
22,171 | 958b53dc0b6327fca5d64df44d77ee55c19b27a2 | import sys
import os
def add_file(afile, filename='files'):
"""
Add file to the list of files we want to track
for changes.
Currently we are deciding not to add this feature. In our current
security model, we want the monitoring system to be responsible for
all aspects the configuartion as we assume that the machine it is
monitoring could be compromised.
There are a couple issues with supporting adding files with this model.
First: The monitoring system has to be the one that initiates the
add. To do this, it would need to upload a file to the FTP server. We
do not want the monitoring server to have this permission as it increases
the attack vector on the system we are trying to keep secure.
Second: It would be difficult to coordinate between the two servers to
give the monitoring server information about what files are contained on
the monitored server.
It would look something like this:
1. Monitoring server uploads a file containing the name of the file it
wants to upload.
2. Upon recieving, the monitored server checks to make sure that file exists.
The filename would either have to be the absolute path, or we would have to
figure out a way to determine which file the monitoring server intended.
3. After verifying its existance, the monitored server would add the file
to the files file and then copy the file to the current directory.
4. The monitoring folder would have to poll a state variable to check to
see when the file has been copied over.
5. After the copy is made and the state variable is set, the monitoring folder
can then pull the data, make the checksum, and store in the local database.
NOTE: State variables may still need to be set to prevent the monitoring server
from trying to download until the file is copied.
NOTE: To add add functionality, it may be better to use HTTPS, as back and
forth handshare exchange is built into the protocol.
Third: As noted above: How can we distinguish between two files with same filename
unless absolute path is given?
Fourth: When we generate initial checksums, we are assuming that the machine's
integrity is intact. But in the future, we only know that the files we are
monitoring have not been compromised. The newly added files could have been
altered and the monitored server could just be sending the compromised file.
This provides a false sense of security about the newly monitored files that
we want to avoid.
"""
with open(filename, 'a') as f:
f.write(afile)
if __name__ == '__main__':
if(len(sys.argv) == 2):
add_file(sys.argv[1])
|
22,172 | 81bbde99ff7eca3c09a59e4725fcfa8babfe026a | #Correct Inequality Signs
#Create a function that returns true if a given inequality expression is correct and false otherwise.
def correct_signs(string):
return eval(string)
print(correct_signs("3 < 7 < 11"))
#โ True
print(correct_signs("13 > 44 > 33 > 1"))
#โ False
print(correct_signs("1 < 2 < 6 < 9 > 3"))
#โ True
|
22,173 | 4de030ab12419afe74c714d17a70a18b8472f1ae | #https://github.com/ottogroup/palladium/blob/f02d8e28889383ae5842225725e301c9182461bf/palladium/util.py
try:
from collections import UserDict
except:
from UserDict import UserDict
from datetime import datetime
class ProcessStore(UserDict,object):
def __init__(self, *args, **kwargs):
self.mtime = {}
super(ProcessStore, self).__init__(*args, **kwargs)
def __setitem__(self, key, item):
super(ProcessStore, self).__setitem__(key, item)
self.mtime[key] = datetime.now()
def __getitem__(self, key):
return super(ProcessStore, self).__getitem__(key)
def __delitem__(self, key):
super(ProcessStore, self).__delitem__(key)
del self.mtime[key]
process_store = ProcessStore(process_metadata={})
cache = process_store
class data(object):
def __init__(self):
print 'sd'
def _find(self):
return 5
x=data()
cache['self.key'] = x
cache
|
22,174 | 61785c9bc9ae0b058d863820fc0932d603fc3632 | #-*- coding: utf-8 -*-
import logging
from abc import ABCMeta, abstractmethod
log = logging.getLogger("qi.data.compfrdat")
class CompanyFrData(metaclass=ABCMeta):
@abstractmethod
def get_data(self, comp_code):
pass
class CompanyGuideFrData(CompanyFrData):
""" company guide site ์์ crawling ํด์
financial report ๋ฅผ ๊ฐ์ ธ์จ๋ค. """
def __init__(self, crawler):
self.crawler = crawler
self.json_reader = None
self.data = {}
def get_data(self, comp_code):
fr_data = self._get_from_json(comp_code)
if fr_data:
return fr_data
if self.data.get(comp_code, None) is None:
fr_data = self.crawler.get_fr_data(comp_code)
self.data[comp_code] = fr_data
return self.data[comp_code]
def _get_from_json(self, comp_code):
fr_data = self.json_reader.read_json(comp_code)
return fr_data
|
22,175 | f1e9785506ca065cf05cba658749794838b6f3a0 | import logging
import random
import subprocess as sp
from optparse import OptionParser
class MacChange():
def __init__(self, mac_addr=None, iface = None):
self.mac = mac_addr
self.iface = iface
#return the current saved interface
def get_iface(self):
return self.iface
#set an interface string
def set_iface(self, iface):
self.iface = iface
#return the current saved mac-address
def get_mac(self):
return self.mac
#set a mac-address
def set_mac(self, mac):
self.mac = mac
#built a random mac-address
def set_random_mac(self):
rnd_mac = ':'.join(("%012x" % random.randint(0, 0xFFFFFFFFFFFF))[i:i+2] for i in range(0, 12, 2))
set_mac(rnd_mac)
#shows the ip link interfaces
def show_interface(self):
if self.iface:
command = "ip link show " +self.iface
else:
command = "ip link show"
sp.call(command.split())
#change the mac address in ip link
def change_mac(self):
if self.iface:
idown = "ip link set dev " + self.iface + " down"
sp.call(idown.split())
if self.mac:
ch_mac = "ip link set dev " + self.iface + " address " + self.mac
sp.call(ch_mac.split())
else:
logging.error("No MacAddress choosen")
exit()
fn_cmd = "ip link set dev " + self.iface + " up"
sp.call(fn_cmd.split())
else:
logging.error("No Interface choosen")
exit()
if __name__ == '__main__':
opt = OptionParser()
mac = MacChange()
#Adding options for change_mac to set a manuelly mac with the interface or a random mac with the interface
opt.add_option('-m', '--mac', help='Choose a Mac Address\n', dest='mac')
opt.add_option('-i', '--interface', help='Choose a Interface\n', dest='iface')
opt.add_option('-r', '--random', help='Build random Mac Address\n', dest='rndmac', action='store_true')
opt.add_option('-s', '--show', help='Show the interface\n', dest='shw', action='store_true')
arg, args = opt.parse_args()
#show all interfaces with -s
if arg.iface == None and arg.shw:
mac.show_interface()
exit()
#show only the interfacce -s -i <interfacename>
if arg.iface and arg.shw:
mac.set_iface(arg.iface)
mac.show_interface()
exit()
if arg.iface == None and arg.shw == None:
logging.error("Interface needed")
exit()
if arg.iface and arg.mac:
mac.set_iface(arg.iface)
mac.set_mac(arg.mac)
mac.change_mac()
elif arg.iface and arg.rndmac:
mac.set_iface(arg.iface)
mac.set_mac(mac.set_random_mac())
mac.change_mac()
else:
logging.error("Failure: Check parameter for interface and mac")
exit()
|
22,176 | 0ba752879e037b0bbcc8336ed28e254d6baa4f79 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from PIL import Image, ImageFont, ImageDraw, ImageFilter
import random
def rndChar():
return chr(random.randint(65, 90))
def rndColor():
return (random.randint(64,255), random.randint(64, 255), random.randint(64, 255))
def rndColor2():
return (random.randint(32, 127), random.randint(32, 127), random.randint(32, 127))
width = 60 * 4
height = 60
image = Image.new('RGB', (width, height), (255, 255, 255))
font = ImageFont.truetype('/usr/share/fonts/lyx/cmmi10.ttf', 36)
draw = ImageDraw.Draw(image)
for x in range(width):
for y in range(height/2):
draw.point((x, 2*y), fill = rndColor())
for t in range(4):
draw.text((60 * t + 10, 10), rndChar(), font = font, fill = rndColor2())
image.save('./test/test3.jpg','jpeg')
image = image.filter(ImageFilter.BLUR)
image.save('./test/test4.jpg','jpeg')
|
22,177 | 7b4f950412e4d09c9e61378f7aba3f608ad7e399 | import csv
FILE = r'../data/iris.csv'
"""
sepal_length,sepal_width,petal_length,petal_width,species
5.4,3.9,1.3,0.4,setosa
5.9,3.0,5.1,1.8,virginica
6.0,3.4,4.5,1.6,versicolor
7.3,2.9,6.3,1.8,virginica
5.6,2.5,3.9,1.1,versicolor
5.4,3.9,1.3,0.4,setosa
"""
with open(FILE) as file:
data = csv.DictReader(file, delimiter=',', quotechar='"')
for line in data:
print(dict(line))
# {'sepal_length': '5.4', 'sepal_width': '3.9', 'petal_length': '1.3', 'petal_width': '0.4', 'species': 'setosa'}
# {'sepal_length': '5.9', 'sepal_width': '3.0', 'petal_length': '5.1', 'petal_width': '1.8', 'species': 'virginica'}
# {'sepal_length': '6.0', 'sepal_width': '3.4', 'petal_length': '4.5', 'petal_width': '1.6', 'species': 'versicolor'}
# {'sepal_length': '7.3', 'sepal_width': '2.9', 'petal_length': '6.3', 'petal_width': '1.8', 'species': 'virginica'}
# ...
|
22,178 | 9c0db6872ce956edcce7b0df0c36cf65b72dea53 | import unittest
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
class LoginTest(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.get("http://facebook.com")
def test_Login(self):
"""Function verifies the login successful or not
@self : Object
"""
driver = self.driver
facebook_username = eval(input("\nEnter Your FaceBook Username:"))
facebook_password = eval(input("\nEnter Your FaceBook Password:"))
emailFieldId = "email"
pwdFieldId = "pass"
loginButtonXpath = "//input[@value='Log In']"
fbLogoPath = "(//a[contains(@href,'logo')])[1]"
emailFieldElement = WebDriverWait(driver, 10).until(
lambda driver: driver.find_element_by_id(emailFieldId)
)
passwordFieldElement = WebDriverWait(driver, 10).until(
lambda driver: driver.find_element_by_id(pwdFieldId)
)
loginButtonElement = WebDriverWait(driver, 10).until(
lambda driver: driver.find_element_by_xpath(loginButtonXpath)
)
emailFieldElement.clear()
emailFieldElement.send_keys(facebook_username)
passwordFieldElement.clear()
passwordFieldElement.send_keys(facebook_password)
loginButtonElement.click()
WebDriverWait(driver, 10).until(
lambda driver: driver.find_element_by_xpath(fbLogoPath)
)
def tearDown(self):
self.driver.quit()
if __name__ == "__main__":
unittest.main()
|
22,179 | e707b51dbf921ee227c2ca033b43dfa1ef1aa7ce | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append("../")
import numpy as np
import unittest
import paddle
from paddleslim.nas import ofa
from paddleslim.nas.ofa import OFA, RunConfig, DistillConfig
from paddleslim.nas.ofa.layers import *
from paddleslim.nas.ofa.layers_base import Block
class ModelCase1(paddle.nn.Layer):
def __init__(self):
super(ModelCase1, self).__init__()
models = [SuperConv2D(3, 4, 3, bias_attr=False)]
models += [SuperConv2D(4, 4, 3, groups=4)]
models += [SuperSyncBatchNorm(4)]
models += [SuperConv2D(4, 4, 3, groups=2)]
models += [SuperConv2DTranspose(4, 4, 3, bias_attr=False)]
models += [SuperConv2DTranspose(4, 4, 3, groups=4)]
models += [paddle.nn.Conv2DTranspose(4, 4, 3, groups=2)]
models += [SuperConv2DTranspose(4, 4, 3, groups=2)]
models += [
SuperSeparableConv2D(
4,
4,
1,
padding=1,
bias_attr=False,
candidate_config={'expand_ratio': (1.0, 2.0)}),
]
self.models = paddle.nn.Sequential(*models)
def forward(self, inputs):
return self.models(inputs)
class ModelCase2(paddle.nn.Layer):
def __init__(self):
super(ModelCase2, self).__init__()
models = [SuperSyncBatchNorm(4)]
self.models = paddle.nn.Sequential(*models)
def forward(self, inputs):
return self.models(inputs)
class TestCase(unittest.TestCase):
def setUp(self):
self.model = ModelCase1()
data_np = np.random.random((1, 3, 64, 64)).astype(np.float32)
self.data = paddle.to_tensor(data_np)
def test_ofa(self):
ofa_model = OFA(self.model)
out = self.model(self.data)
class TestCase2(TestCase):
def setUp(self):
self.model = ModelCase2()
data_np = np.random.random((1, 3, 64, 64)).astype(np.float32)
self.data = paddle.to_tensor(data_np)
def test_ofa(self):
out = self.model(self.data)
class TestSuperSyncBatchNormInStatic(TestCase):
def setUp(self):
paddle.enable_static()
self.model = ModelCase2()
data_np = np.random.random((1, 3, 64, 64)).astype(np.float32)
self.data = paddle.to_tensor(data_np)
def test_ofa(self):
out = self.model(self.data)
if __name__ == '__main__':
unittest.main()
|
22,180 | bde6d86d212238e37519e93b26935a8c903f83a5 | # Script for importing data into console.
def importdata(fpath=''):
import scipy.io
import os
import readhistory;
from Tkinter import Tk
from tkFileDialog import askopenfilename
histdir = os.path.dirname(__file__)
histname = 'load_hist.pkl'
if not os.path.exists(histdir):
histdir = os.getcwd()
histpath = os.path.join(os.path.dirname(histdir), histname);
# If it's not there, use CWD.
hr = readhistory.histReader(histpath, 20)
prevdir = os.getcwd()
for p in hr.histpaths:
if os.path.exists(p):
prevdir = p
break
if fpath != '' and os.path.exists(fpath):
filename = fpath
else:
Tk().withdraw(); # No root window.
filename = askopenfilename(defaultextension='.mat', initialdir=prevdir)
fe = os.path.splitext(filename)[1]
if fe.lower() != '.mat':
raise Exception('Invalid file extension' + fe)
else:
out = scipy.io.loadmat(filename, struct_as_record=False, squeeze_me=True)
hr.addpath(os.path.dirname(os.path.abspath(filename)))
hr.update()
return out
|
22,181 | 92004711c388400512c369ca995890432c8ca865 | #!/usr/bin/env python
import argparse
import getpass
import os
import sys
import nexpose as nx
try:
cert_store = os.environ['REQUESTS_CA_BUNDLE']
except KeyError:
cert_store = None
p = argparse.ArgumentParser()
p.add_argument('--server')
p.add_argument('--cert-store', help='path to trusted CA certs', default=cert_store)
p.add_argument('--validate-certs', help='flag indicating server certs should be validated', action='store_true')
args = p.parse_args()
user = raw_input('\n'*10 + 'Username: ')
pwd = getpass.getpass()
nxclient = nx.Client(server=args.server, username=user, password=pwd, cert_store=args.cert_store)
print 'test.py:nx token: ', nxclient.authtoken
nxclient.login()
print 'test.py:nx token: ', nxclient.authtoken |
22,182 | bf9ed382c2e5bb38cb4c12baafea9c712849a6ce | from collections import OrderedDict
class LRUCache():
def __init__(self, capacity=10):
self.capacity=capacity
self.cache = OrderedDict()
def get(self, key):
if key in self.cache:
self.cache.move_to_end(key, last=False)
return self.cache[key]
else:
return ''
def set(self, key, value):
self.cache[key] = value
self.cache.move_to_end(key, last=False)
if len(self.cache) > self.capacity:
self.cache.popitem()
def remove(self, key):
if key in self.cache:
del self.cache[key]
class LFUCacheEntry():
def __init__(self, data):
self.data = data
self.freq = 0
def increment_freq(self):
self.freq += 1
class LFUCache():
def __init__(self, capacity=10):
self.capacity = capacity
self.cache = OrderedDict()
def get(self, key):
if key in self.cache:
self.cache[key].increment_freq()
return self.cache[key].data
else:
return ''
def set(self, key, value):
if len(self.cache) >= self.capacity and key not in self.cache:
key_to_remove = sorted(self.cache.items(), key=(lambda item: item[1].freq))[-1]
self.remove(key_to_remove)
self.cache[key] = LFUCacheEntry(value)
def remove(self, key):
if key in self.cache:
del self.cache[key]
|
22,183 | 00fa7bc878e32ab461faf0549854b7b9b998fec2 | # Generated by Django 3.1.3 on 2021-01-03 18:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('simulations', '0005_useraction_simulation'),
]
operations = [
migrations.AddField(
model_name='useraction',
name='time',
field=models.DateTimeField(null=True),
preserve_default=False,
),
migrations.AddField(
model_name='useraction',
name='timestep',
field=models.CharField(default='', max_length=100),
preserve_default=False,
),
migrations.AlterField(
model_name='useraction',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='actions', to='simulations.simulationplayer'),
),
]
|
22,184 | 48ee00724e1378bb9911c641797ac2519ea111ac | from __future__ import division
import os
import sys
import random
import glob
import string
import pygame
from pygame.locals import *
# Various setup stuff
bgcol = (196, 196, 196)
textPos = (1,1)
imgPos = (0,20)
timeImage = 300
timeBlank = 200
subject = raw_input('Enter subject ID here: ')
FILE = open(os.path.join('data', subject + '.txt'), 'w')
FILE.write('Subject: %s\n' % subject)
pygame.init()
pygame.mixer.init()
pygame.event.set_grab(1)
SCREEN = pygame.display.set_mode((600,420), 16)
FONT = pygame.font.Font(None, 28)
textSpace = FONT.render('Press SPACE when you see the change.', 1, (0,0,190))
textClick = FONT.render('Click on the location of the change.', 1, (0,0,190))
textFBbad = FONT.render('Wrong location!', 1, (160,0,0))
textFBgood = FONT.render('Right!', 1, (0,160,0))
images = glob.glob(os.path.join('imgA', '*.jpg'))
imageNames = map(lambda fn: string.split(string.split(fn, os.sep)[1], '.')[0], images)
def quit():
FILE.close()
sys.exit(0)
def waitForKey():
wait = True
while wait:
for event in pygame.event.get():
if (event.type == KEYDOWN and event.key == K_SPACE):
wait = False
if (event.type == KEYDOWN and event.key == K_ESCAPE):
quit()
def readBox(imfile):
boxfile = open(os.path.join('boxes', imfile+'.txt'), 'r')
box = [[],[]]
box[0] = map(int, string.split(boxfile.readline().strip(), ','))
box[1] = map(int, string.split(boxfile.readline().strip(), ','))
boxfile.close()
return(box)
def waitForClick():
wait = True
while wait:
for event in pygame.event.get():
if (event.type == MOUSEBUTTONDOWN):
clickPos = event.pos
wait = False
if (event.type == KEYDOWN and event.key == K_ESCAPE):
quit()
clickPos = [clickPos[0]-imgPos[0], clickPos[1]-imgPos[1]]
return(clickPos)
def showText(SCREEN, text, pos=textPos):
SCREEN.fill(bgcol)
textimg = FONT.render(text, 1, (0,0,0))
SCREEN.blit(textimg, pos)
pygame.display.flip()
def runTrial(SCREEN, imfile):
image1 = pygame.image.load(os.path.join('imgA', imfile+'.jpg'))
image2 = pygame.image.load(os.path.join('imgB', imfile+'.jpg'))
clickFrame = readBox(imfile)
iterations = 0
attempts = 0
detected = False
phase = 0
totalLoopTime = 0
timeTrialStart = pygame.time.get_ticks()
while(not detected):
keypressed = False
timeLoopStart = pygame.time.get_ticks()
while(not keypressed):
runtime = pygame.time.get_ticks() - timeLoopStart
modtime = runtime % (timeImage*2 + timeBlank*2)
if(phase == 0 and modtime>0 and modtime<timeImage):
SCREEN.fill(bgcol)
SCREEN.blit(image1, imgPos)
SCREEN.blit(textSpace, textPos)
pygame.display.flip()
phase = 1
elif(phase == 1 and modtime>timeImage and modtime<(timeImage+timeBlank)):
SCREEN.fill(bgcol)
SCREEN.blit(textSpace, textPos)
pygame.display.flip()
phase = 2
elif(phase == 2 and modtime>(timeImage+timeBlank) and modtime<(timeImage*2+timeBlank)):
SCREEN.fill(bgcol)
SCREEN.blit(image2, imgPos)
SCREEN.blit(textSpace, textPos)
pygame.display.flip()
phase = 3
elif(phase == 3 and modtime>(timeImage*2+timeBlank)):
SCREEN.fill(bgcol)
SCREEN.blit(textSpace, textPos)
pygame.display.flip()
phase = 0
iterations += 1
attempts += 1
for event in pygame.event.get():
if (event.type == KEYDOWN and event.key == K_SPACE):
keypressed = True
if (event.type == KEYDOWN and event.key == K_ESCAPE):
quit()
totalLoopTime += runtime
SCREEN.fill(bgcol)
SCREEN.blit(image1, imgPos)
SCREEN.blit(textClick, textPos)
pygame.display.flip()
clickPos = waitForClick()
SCREEN.fill(bgcol)
SCREEN.blit(image1, imgPos)
if(clickPos[0] > clickFrame[0][0] and clickPos[0] < clickFrame[1][0] and clickPos[1] > clickFrame[0][1] and clickPos[1] < clickFrame[1][1]):
detected = True
SCREEN.blit(textFBgood, textPos)
else:
SCREEN.blit(textFBbad, textPos)
pygame.display.flip()
pygame.time.wait(250)
totalTrialTime = pygame.time.get_ticks() - timeTrialStart
return({'image': imfile, 'trialTime': totalTrialTime, 'loopTime': totalLoopTime, 'iterations': iterations, 'attempts':attempts})
showText(SCREEN, 'Welcome to our experiment!', textPos)
waitForKey()
showText(SCREEN, 'Press the space bar when you see the change', textPos)
waitForKey()
random.shuffle(images)
first = True
for imfile in imageNames:
showText(SCREEN, 'Press space for the next trial!', textPos)
waitForKey()
trialOutput = runTrial(SCREEN, imfile)
header = ''
line = ''
for k,v in trialOutput.iteritems():
header += k + '\t'
line += v + '\t'
if first:
first = False
FILE.write(header+'\n')
FILE.write(line+'\n')
showText(SCREEN, 'Thank you for your participation!', textPos)
waitForKey()
quit()
|
22,185 | 01dc46b23c9605f0229f11342d57796a17bb4e62 |
# asyncio ๆฒกๆๆไพhttpๅ่ฎฎ็ๆฅๅฃ๏ผๅฏไปฅไฝฟ็จaiohttp
import socket
from urllib.parse import urlparse
import asyncio
async def get_html(url):
url = urlparse(url)
host = url.netloc
path = url.path
if path == '':
path = '/'
reader, writer = await asyncio.open_connection(host, 80)
writer.write("GET {} HTTP/1.1\r\nHost:{}\r\nConnection:close\r\n\r\n".format(path, host).encode("utf8"))
all_lines = []
async for raw_line in reader:
data = raw_line.decode("utf-8")
all_lines.append(data)
html_data = ''.join(all_lines)
return html_data
async def main():
# ๅฎๆไธไธชtask๏ผ่ทๅไธไธชtask็็ปๆ๏ผ้่ฆๅฆไธๅ
tasks = []
for _ in range(20):
url = "http://www.baidu.com"
tasks.append(asyncio.ensure_future(get_html(url)))
for task in asyncio.as_completed(tasks):
result = await task # taskๆฏไธไธชๅ็จ๏ผๆไปฅ่ฆ็จawait
print(result)
if __name__ == '__main__':
import time
start = time.time()
loop = asyncio.get_event_loop()
tasks = []
# for i in range (20):
# tasks.append(get_html("http://www.baidu.com"))
# loop.run_until_complete(asyncio.wait(tasks))
# -----------------------------------------
# ไธบไบๅพๅฐ็ปๆ๏ผ้่ฆไปfutureๅฏน่ฑกไธญ่ทๅ
# for i in range(20):
# tasks.append(asyncio.ensure_future(get_html("http://www.baidu.com")))
# loop.run_until_complete(asyncio.wait(tasks)) # ๅฏไปฅไฟๆไธๅ๏ผwaitๅฏไปฅๆฅๆถtask, futureๅฏน่ฑก
#
# for task in tasks:
# print(task.result())
# print(time.time() - start)
# -----------------------------------------
# ๅฎๆไธไธชtaskไปไธไธชtaskไธญ่ทๅ็ปๆ
loop.run_until_complete(main())
print(time.time() - start)
|
22,186 | 8c5cc46bba96f17ff7d61af3f1b4476f7d936193 | # The purpose of the app is to connect with instance 1 and receive user name from it
# harvest tweets via user timeline, then put the raw data to raw_result database
# and processed data to final_result database
# Author: Group 10
import socket
import tweepy
import pycouchdb
import preprocess
host = []
fname = 'hosts'
config = open(fname, 'r')
for line in config:
host.append(line.strip())
config.close()
print(host[1])
print(host[4])
# Connect to the CouchDB database
while True:
try:
couch = pycouchdb.Server('http://admin:coconut@' + host[1] + ':5984/')
# Connect to instance 1 according to IP address and port number
obj = socket.socket()
obj.connect((host[4], 5985))
if obj is not None and couch is not None:
break
except:
couch = None
obj = None
try:
raw_db = couch.database('ccc_ass_2')
except:
raw_db = couch.create('ccc_ass_2')
try:
db = couch.database('final_result')
except:
db = couch.create('final_result')
# User2 App1 API token and keys
# Authentication information
consumer_key = "RaHCJytQzL28DoOg11vPm6OQt"
consumer_secret = "ByvDTH5y5tt7gcGb3wSBjlwjguwVPuBRlwa7xkJTYQdPcwwvhY"
access_token = "1127769015684718592-X8GZcq53QrPsDzak2uspMg0SevsK17"
access_token_secret = "fO7Sg7KBz22hL3zwJWNqbEajfFTU1YKN1SVqA01BPMa28"
# Access to the endpoint and input authentication information
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token,access_token_secret)
api = tweepy.API(auth)
# Keep logging the error messages for debug
log = 'log_client.log'
logfile = open(log, "a")
# Keep listening messages from instance 1 and collecting tweets
# via user timeline API for a given user name
# Save raw data to raw_result database
# Pre-process tweets and save them to final_result
while True:
ret = str(obj.recv(1024), encoding="utf-8")
print(ret)
stuff = []
try:
stuff = api.user_timeline(screen_name=ret, count=1000, include_rts=True)
except Exception as e:
logfile.write(str(e) + "\n")
logfile.write(ret + "\n")
for timeline in stuff:
try:
raw_db.save(timeline._json)
except Exception as e:
logfile.write(str(e) + "\n")
sjson = preprocess.transfer(timeline._json)
if sjson:
try:
db.save(sjson)
except Exception as e:
logfile.write(str(e) + "\n")
logfile.write(str(sjson) + "\n")
|
22,187 | ec4fac187d979417f12097a4657a99f88372c7a5 | # -*-coding:utf-8-*-
class FinalList:
"""
list
"""
# ๅฟๅๅฅฝๅๆ ๆณ่พๅ
ฅ็ๅ็งฐ, ็ญๅผ ้ๆจก็ณ
DIRTY_LIST = ['1', '2', '3']
class FinalMap:
"""
map
"""
# ็็บงๅ็งฐไธvector-mapๅฏนๅบcodeๅญๅ
ธ
# "้ฆๆธฏ","ๆพณ้จ","ๅฐๆนพ็" ๆฒกๆ
VECTOR_MAP_ADDRESS_CODE_MAP = {'ๆฑ่็': 'CN-32', '่ดตๅท็': 'CN-52', 'ไบๅ็': 'CN-53', '้ๅบๅธ': 'CN-50', 'ๅๅท็': 'CN-51', 'ไธๆตทๅธ': 'CN-31', '่ฅฟ่': 'CN-54', 'ๆตๆฑ็': 'CN-33', 'ๅ
่ๅค': 'CN-15', 'ๅฑฑ่ฅฟ็': 'CN-14', '็ฆๅปบ็': 'CN-', 'ๅคฉๆดฅๅธ': 'CN-12', 'ๆฒณๅ็': 'CN-13', 'ๅไบฌๅธ': 'CN-11', 'ๅฎๅพฝ็': 'CN-34', 'ๆฑ่ฅฟ็': 'CN-36', 'ๅฑฑไธ็': 'CN-37', 'ๆฒณๅ็': 'CN-41', 'ๆนๅ็': 'CN-43', 'ๆนๅ็': 'CN-42', 'ๅนฟ่ฅฟ': 'CN-45', 'ๅนฟไธ็': 'CN-44', 'ๆตทๅ็': 'CN-46', 'ๆฐ็': 'CN-65', 'ๅฎๅค': 'CN-64', '้ๆตท็': 'CN-63', '็่็': 'CN-62', '้่ฅฟ็': 'CN-61', '้ป้พๆฑ็': 'CN-23', 'ๅๆ็': 'CN-22', '่พฝๅฎ็': 'CN-21'}
|
22,188 | c9e9c958c9b33fde3e3697a6ea8b0ca55e1ae30a | CROSSREF_URL = 'https://api.crossref.org/works'
ENA_URL = 'https://www.ebi.ac.uk/ena/browser/api/xml'
ENA_SEARCH_URL = 'https://www.ebi.ac.uk/ena/portal/api/search'
# NCBI entrez urls
NCBI_LINK_URL = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/elink.fcgi'
NCBI_SEARCH_URL = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi'
NCBI_SUMMARY_URL = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi'
NCBI_FETCH_URL = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi'
# TODO: replace all of the uses of these URLS to the general NCBI ones
GSE_SEARCH_URL = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=gds&term='
GSE_SEARCH_TERMS = '%5bGEO%20Accession&retmax=1&retmode=json'
GSE_SUMMARY_URL = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?db=gds&id='
GSE_SUMMARY_TERMS = '&retmode=json'
|
22,189 | e50a01b0320ca634d4ab36c038b4c389dafefee3 | import numpy as np
def print_wires(origin, wires):
out = np.full(wires.shape[1:], fill_value='.')
for i,layer in enumerate(wires, 1):
out[layer] = str(i)
out[origin[0], origin[1]] = 'o'
print('\n'.join([''.join([c for c in row]) for row in out.T[::-1]]))
def get_wires(inp):
"""Return the origin position and an array of wires of shape (2,posx,posy)"""
rows = []
for line in inp.splitlines():
words = line.split(',')
pos = np.zeros((1, 2), dtype=int)
row = [pos]
for word in words:
dir,length = word[0], int(word[1:])
piece = np.zeros((length, 2), dtype=int)
l = np.arange(1, length + 1)
if dir == 'R':
piece[:, 0] = l
elif dir == 'L':
piece[:, 0] = -l
elif dir == 'U':
piece[:, 1] = l
elif dir == 'D':
piece[:, 1] = -l
else:
assert False, f'Invalid direction {dir}!'
row.append(pos + piece)
pos += piece[-1, :]
rows.append(np.concatenate(row))
# find common bounding box, construct ndarray with shape (nwire, posx, posy)
minvals = np.min([row.min(0) for row in rows], 0)
maxvals = np.max([row.max(0) for row in rows], 0)
size = (len(rows), maxvals[0] - minvals[0] + 1, maxvals[1] - minvals[1] + 1)
origin = -minvals
wires = np.zeros(size, dtype=bool)
# True where there's a wire, False where there's none
for i,row in enumerate(rows):
shifted = row - minvals
wires[i, shifted[:, 0], shifted[:, 1]] = True
return origin,wires
def get_pathlengths(inp, origin, wires):
"""Brute force path length computer, needs a double array under the hood, too much memory"""
lengths = np.full(wires.shape, fill_value=np.inf) # shape (2, posx, posy), float for inf sentinel
lengths[:, origin[0], origin[1]] = 0
for i,line in enumerate(inp.splitlines()):
pos = origin.copy()
pathlen = 0
for word in line.split(','):
dir,length = word[0], int(word[1:])
l = np.arange(1, length + 1)
# need min(...) for masking, hence inf sentinel
if dir == 'R':
lengths[i, pos[0] + l, pos[1] + 0*l] = np.minimum(l + pathlen, lengths[i, pos[0] + l, pos[1] + 0*l])
pos += [length, 0]
if dir == 'L':
lengths[i, pos[0] - l, pos[1] + 0*l] = np.minimum(l + pathlen, lengths[i, pos[0] - l, pos[1] + 0*l])
pos += [-length, 0]
if dir == 'U':
lengths[i, pos[0] + 0*l, pos[1] + l] = np.minimum(l + pathlen, lengths[i, pos[0] + 0*l, pos[1] + l])
pos += [0, length]
if dir == 'D':
lengths[i, pos[0] + 0*l, pos[1] - l] = np.minimum(l + pathlen, lengths[i, pos[0] + 0*l, pos[1] - l])
pos += [0, -length]
pathlen += length
# unmask, return int array of paths
lengths[np.isinf(lengths)] = -1
return lengths.astype(np.uint8)
def walk_lines(inp, origin, crosses):
"""Linear path length solver looking for crossings, returns every length"""
crossset = set(zip(*crosses))
crossdats = [] # keeps pos: length dict for each crossing, for each wire
for i,line in enumerate(inp.splitlines()):
pos = origin.copy()
pathlen = 0
crossdat = {}
seens = set()
for word in line.split(','):
dir,length = word[0], int(word[1:])
if dir == 'R':
step = [1, 0]
if dir == 'L':
step = [-1, 0]
if dir == 'U':
step = [0, 1]
if dir == 'D':
step = [0, -1]
for _ in range(length):
pos += step
pathlen += 1
tpos = tuple(pos)
if tpos in seens:
continue
seens.add(tpos)
if tpos in crossset:
crossdat[tpos] = pathlen
crossdats.append(crossdat)
keys = crossdats[0].keys()
lengths = [[crossdat[k] for k in keys] for crossdat in crossdats]
sums = [sum(ls) for ls in zip(*lengths)]
return sums
def day03(inp):
origin,wires = get_wires(inp)
#print_wires(origin, wires)
# wires is ~ 700 MB for real input
# mask out origin, find crossings
wires[:, origin[0], origin[1]] = False
crosses = wires.all(0).nonzero()
dists = abs(crosses[0] - origin[0]) + abs(crosses[1] - origin[1])
closest = dists.argmin()
part1 = dists[closest]
# brute force solver: MemoryError due to floats
#pathlengths = get_pathlengths(inp, origin, wires) # array with shape (2, posx, posy)
#crosslengths = pathlengths[:, crosses[0], crosses[1]] # shape (2, ncross)
#part2 = crosslengths.sum(0).min()
# walk over each wire... safe
crosslengths = walk_lines(inp, origin, crosses)
part2 = min(crosslengths)
return part1, part2
if __name__ == "__main__":
testinp = open('day03.testinp').read()
print(day03(testinp))
inp = open('day03.inp').read()
print(day03(inp))
|
22,190 | 635fb694d2dfc83262dd6a6996ecd50e2dddab15 | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def oddEvenList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head is None or head.next is None or head.next.next is None:
return head
pre=head
prenode=head.next
node=prenode.next
while node:
prenode.next=node.next
node.next=pre.next
pre.next=node
pre=node
prenode=prenode.next
if prenode is None:
node=None
else:
node=prenode.next
return head |
22,191 | 4653ba87244a22f9264f9d5b50ed62340e964d27 | #!/usr/bin/env python3
import os
import re
import time
from termcolor import cprint
import requests
import json
import hashlib
import yaml
from bs4 import BeautifulSoup
from ipdb import set_trace
settings = "settings.yaml"
base_url = "https://www.ebay-kleinanzeigen.de{}"
product_search_url = "https://www.ebay-kleinanzeigen.de/s-suchanfrage.html?keywords={}&categoryId=&locationStr={}&locationId=&radius=0&sortingField=SORTING_DATE&adType=&posterType=&pageNum=1&action=find&maxPrice=&minPrice="
DATA_DIR = "data"
SEARCH_PAGES = 2
class Offer(object):
def __init__(self, offer_html):
bs = BeautifulSoup(offer_html, 'html.parser')
self.title = bs.find("meta", {'property':"og:title"}).attrs['content']
self.image_url = bs.find("meta", {'property':"og:image"}).attrs['content']
self.description = bs.find("meta", {'property':"og:description"}).attrs['content']
self.url = bs.find("meta", {'property':"og:url"}).attrs['content']
self.locality = bs.find("meta", {'property':"og:locality"}).attrs['content']
self.latitude = bs.find("meta", {'property':"og:latitude"}).attrs['content']
self.longitude = bs.find("meta", {'property':"og:longitude"}).attrs['content']
try:
self.price = re.findall('\d+', re.findall(r'ExactPreis": "\d+"', offer_html)[0])[0]
except:
self.price = 0
def __repr__(self):
return self.title
class EbayKleinanzeigen(object):
def __init__(self):
self.create_requests_session()
self.settings = yaml.safe_load(open(settings))
self.notifications = []
for search in self.settings.get('ebay-kleinanzeigen', []):
cprint("Looking for '{}' in {}".format(search['product'], search['location']), 'magenta')
self.search(search['product'], search['location'], search.get('max_price', -1))
def create_requests_session(self):
self.session = requests.Session()
self.session.headers.update({'User-Agent': 'ebay-kleinanzeigen bod'})
def search(self, product, location, price):
search_url = self.session.get(product_search_url.format(product, location)).url # follows a redirect
offers = []
for offer_html in self.get_offers_as_html(search_url, price, product):
offer = Offer(offer_html)
cprint(" Found: '{}'".format(offer.title), 'green')
offers.append(offer)
filename = os.path.join(DATA_DIR, "{}-{}.json".format(product, location))
if not os.path.exists(filename):
with open(filename, "w") as f:
json.dump([o.__dict__ for o in offers], f)
self.find_new_offers(offers, filename)
def get_offers_as_html(self, base_search_url, price, product):
parts = base_search_url.split("/")
if base_search_url.endswith("k0"):
# deutschland-weite Suche
search_url = "https://www.ebay-kleinanzeigen.de/s-anzeige:angebote/seite:{}/preis:{}/%s/k0" % product
else:
search_url = "/".join(parts[:len(parts)-2]) + \
"/anzeige:angebote/" + \
"seite:{}/" + \
"preis::{}/" + \
"/".join(parts[len(parts)-2:])
print(search_url)
for i in range(1, SEARCH_PAGES+1):
print(" Looking at result page {}".format(i))
resp = self.session.get(search_url.format(i, price), allow_redirects=False)
if "Es wurden keine Anzeigen fรผr" in resp.text or resp.status_code == 302:
print(" Nothing found here")
return
if "You look like a robot" in resp.text:
cprint("Bot detection. What helped for me: use ipv4 instead of ipv6 (don't know why). Just put the ipv4 addresses for www.ebay-kleinanzeigen.de in /etc/hosts", "red")
exit()
relevant_html = resp.text.split("Alternative Anzeigen")[0]
bs = BeautifulSoup(relevant_html, 'html.parser')
offer_links = [x['data-href'] for x in bs.findAll('div', {'class': 'imagebox srpimagebox'})]
for offer_url in offer_links:
resp = self.session.get(base_url.format(offer_url))
if resp.status_code == 429:
cprint("Bot error: Too many requests \n{}\n{}".format(resp.headers, resp.text), 'red')
return
yield resp.text
self.create_requests_session()
def find_new_offers(self, offers, filename):
print("Comparing the crawled offers with the last ones")
offers_last_state = json.load(open(filename))
offer_urls_last_time = [o['url'] for o in offers_last_state]
for offer in offers:
if offer.url not in offer_urls_last_time:
self.notify_test(offer)
self.notifications.append(offer)
with open(filename, "w") as f:
json.dump([o.__dict__ for o in offers], f)
def notify_test(self, offer):
cprint(" Found new offer: {}".format(offer.url), 'cyan')
if __name__ == '__main__':
EbayKleinanzeigen()
def go():
ebk = EbayKleinanzeigen()
return ['Ebay Kleinanzeigen: {} fรผr {} EUR {}'.format(x.title, x.price, x.url) for x in ebk.notifications]
|
22,192 | 876bca86ea7324795dbd93857aedf09a6f88d4a7 | from app.utility.base_world import BaseWorld
from plugins.vue.app.vue_gui import VueGUI
from plugins.vue.app.vue_api import VueAPI
name = 'Vue'
description = 'Testing out a plugin with vue'
address = '/plugin/vue/gui'
# access = BaseWorld.Access.RED
async def enable(services):
app = services.get('app_svc').application
vue_gui = VueGUI(services, name=name, description=description)
app.router.add_static('/vue', 'plugins/vue/templates/vue', append_version=True)
app.router.add_route('GET', '/plugin/vue/gui', vue_gui.splash)
vue_api = VueAPI(services)
# Add API routes here
app.router.add_route('POST', '/plugin/vue/mirror', vue_api.mirror)
|
22,193 | 2c5204ab169340ca7a05ea72d6d7fd463e6e09e4 | #!/usr/bin/env python
import tf
from tf import TransformListener
import roslib, rospy
from control import acker
from mpl_toolkits.mplot3d import Axes3D
from itertools import product, combinations
import sys
import math
from math import pi as PI
from math import atan2, sin, cos, sqrt, fabs, floor
from scipy.misc import factorial as ft
from scipy.misc import comb as nCk
from numpy import linalg as LA
from numpy.linalg import inv
import matplotlib.patches
import matplotlib.pyplot as plt
import time
import numpy as np
from cvxopt import matrix, solvers
import pickle #save multiple objects
from geometry_msgs.msg import Twist
from geometry_msgs.msg import PoseStamped
from std_msgs.msg import String
from crazyflie_demo.msg import Kalman_pred
from sensor_msgs.msg import Imu
def norm(p):
temp = 0
for a in p:
temp = temp + a**2
return sqrt(temp)
class Kalman():
def __init__(self, i):
self.worldFrame = rospy.get_param("~worldFrame", "/world")
self.frame = 'crazyflie%d' %i
self.index = i
self.imunew = 0 # see if new data comes
self.posnew = 0
self.time0 = rospy.Time.now()
self.time1 = rospy.Time.now()
sub_imu_data = '/crazyflie%d/imu' %i
sub_pos_data = '/crazyflie%d/pos' %i
#self.subImu = rospy.Subscriber(sub_imu_data, Imu, self.ImuCallback)
#self.subpos = rospy.Subscriber(sub_pos_data, PoseStamped, self.posCallback)
pub_name = '/crazyflie%d/estimator' %i
self.pubEst = rospy.Publisher(pub_name, Kalman_pred, queue_size=1)
self.position = []
self.orientation = []
self.Imu = []
self.X = []
self.Y = []
self.Z = []
self.Xvar = []
self.Yvar = []
self.Zvar = []
self.Q = []
self.R = []
self.SX = []
self.SY = []
self.SZ = []
def EstnoImu(self):
dt = 0.02
newX = [self.position[0]]
newY = [self.position[1]]
newZ = [self.position[2]]
if len(self.X) == 0:
self.X = np.array([self.position[0], 0, 0])
self.Y = np.array([self.position[1], 0, 0])
self.Z = np.array([self.position[2], 0, 0])
self.Q = np.array([[1e-4, 1e-6, 1e-6],[1e-6, 1e-4, 1e-6],[1e-6, 1e-6, 1e-1]]) # dynamic noise
self.R = np.array([1e-8]) # measurement noise
self.SX = self.Q # state estimate noise
self.SY = self.Q # state estimate noise
self.SZ = self.Q # state estimate noise
Phi = np.array([[1, dt, 0],[0, 1, dt],[0,0,1]]) # state transition matrix
M = np.array([1,0,0]) # measurement matrix
# prediction step
self.X = np.dot(Phi,self.X)
self.Y = np.dot(Phi,self.Y)
self.Z = np.dot(Phi,self.Z)
self.SX = Phi.dot(self.SX).dot(Phi.transpose()) + self.Q
self.SY = Phi.dot(self.SY).dot(Phi.transpose()) + self.Q
self.SZ = Phi.dot(self.SZ).dot(Phi.transpose()) + self.Q
# Kalman gain
KX = self.SX.dot(M.transpose())*1/( M.dot(self.SX).dot(M.transpose())+self.R )
KY = self.SY.dot(M.transpose())*1/( M.dot(self.SY).dot(M.transpose())+self.R )
KZ = self.SZ.dot(M.transpose())*1/( M.dot(self.SZ).dot(M.transpose())+self.R )
# update
self.X = self.X + KX*(newX-M.dot(self.X))
self.Y = self.Y + KY*(newY-M.dot(self.Y))
self.Z = self.Z + KZ*(newZ-M.dot(self.Z))
self.SX = self.SX - np.outer(KX,M.dot(self.SX))
self.SY = self.SY - np.outer(KY,M.dot(self.SY))
self.SZ = self.SZ - np.outer(KZ,M.dot(self.SZ))
data = Kalman_pred()
data.predX.x = self.X[0]
data.predX.y = self.X[1]
data.predX.z = self.X[2]
data.varX = self.SX.flatten()
data.predY.x = self.Y[0]
data.predY.y = self.Y[1]
data.predY.z = self.Y[2]
data.varY = self.SY.flatten()
data.predZ.x = self.Z[0]
data.predZ.y = self.Z[1]
data.predZ.z = self.Z[2]
data.varZ = self.SZ.flatten()
self.pubEst.publish(data)
def EstImu(self):
dt = 0.02#(self.time1-self.time0).to_sec()
if self.imunew == 0 or self.posnew == 0 or dt == 0:
return 0
newX = np.array([self.position[0], self.Imu[0]])
newY = np.array([self.position[1], self.Imu[1]])
newZ = np.array([self.position[2], self.Imu[2]])
if len(self.X) == 0:
dt = 0.02
self.X = np.array([self.position[0], 0, 0])
self.Y = np.array([self.position[1], 0, 0])
self.Z = np.array([self.position[2], 0, 0])
self.Q = np.array([[1e-8, 1e-12, 1e-12],[1e-12, 1e-8, 1e-12],[1e-12, 1e-12, 1e-2]]) # dynamic noise
self.R = np.array([[1e-6, 1e-12],[1e-12,1e-1]]) # measurement noise
self.SX = self.Q # state estimate noise
self.SY = self.Q # state estimate noise
self.SZ = self.Q # state estimate noise
Phi = np.array([[1, dt, 0],[0, 1, dt],[0,0,1]]) # state transition matrix
M = np.array([[1,0,0],[0,0,1]]) # measurement matrix
# prediction step
self.X = np.dot(Phi,self.X)
self.Y = np.dot(Phi,self.Y)
self.Z = np.dot(Phi,self.Z)
self.SX = Phi.dot(self.SX).dot(Phi.transpose()) + self.Q
self.SY = Phi.dot(self.SY).dot(Phi.transpose()) + self.Q
self.SZ = Phi.dot(self.SZ).dot(Phi.transpose()) + self.Q
# Kalman gain
KX = self.SX.dot(M.transpose()).dot(inv( M.dot(self.SX).dot(M.transpose())+self.R ))
KY = self.SY.dot(M.transpose()).dot(inv( M.dot(self.SY).dot(M.transpose())+self.R ))
KZ = self.SZ.dot(M.transpose()).dot(inv( M.dot(self.SZ).dot(M.transpose())+self.R ))
# update
self.X = self.X + KX.dot(newX-M.dot(self.X))
self.Y = self.Y + KY.dot(newY-M.dot(self.Y))
self.Z = self.Z + KZ.dot(newZ-M.dot(self.Z))
self.SX = self.SX - KX.dot(M.dot(self.SX))
self.SY = self.SY - KY.dot(M.dot(self.SY))
self.SZ = self.SZ - KZ.dot(M.dot(self.SZ))
data = Kalman_pred()
data.predX.x = self.X[0]
data.predX.y = self.X[1]
data.predX.z = self.X[2]
data.varX = self.SX.flatten()
data.predY.x = self.Y[0]
data.predY.y = self.Y[1]
data.predY.z = self.Y[2]
data.varY = self.SY.flatten()
data.predZ.x = self.Z[0]
data.predZ.y = self.Z[1]
data.predZ.z = self.Z[2]
data.varZ = self.SZ.flatten()
self.pubEst.publish(data)
self.imunew = 0 # reset flags
self.posnew = 0
self.time0 = self.time1
def ImuCallback(self, sdata):
acc = sdata.linear_acceleration
self.Imu = np.array([acc.x, -acc.y, 9.8-acc.z])
self.imunew = 1
self.time1 = sdata.header.stamp
self.EstImu()
def posCallback(self, sdata):
self.position = np.array([sdata.pose.position.x, sdata.pose.position.y, sdata.pose.position.z])
self.posnew = 1
self.time1 = sdata.header.stamp
self.EstImu()
def updatepos(self):
t = 0
#t = self.listener.getLatestCommonTime(self.worldFrame, self.frame)
#if self.listener.canTransform(self.worldFrame, self.frame, t):
# self.position, quaternion = self.listener.lookupTransform(self.worldFrame, self.frame, t)
# rpy = tf.transformations.euler_from_quaternion(quaternion)
# self.orientation = rpy
if __name__ == '__main__':
# init all params, publisers, subscrivers
rospy.init_node('cf_Estimator', anonymous = True)
N = rospy.get_param("~cfnumber", 2) # total number of cfs
dt = 0.02
f = open('/home/li/quad_ws/src/crazyflie_ros-master/crazyflie_demo/scripts/cf1_Diff_Flat20170421fbimu10.pckl')
t_hist, p_hist, phat_hist, ptrack_hist, u_hist, uhat_hist, cmd_hist, cmdreal_hist, rpytrack_hist, imu_hist = pickle.load(f)
f.close()
NN = len(t_hist) # number of data points, data rate 50Hz
imu = np.zeros((NN,3))
cfs = dict()
print '----Kalman filter started!!!----'
for i in range(N):
cfs[i] = Kalman(i)
for i in range(NN):
cfs[0].position = np.array([ptrack_hist[i][0], ptrack_hist[i][1], ptrack_hist[i][2]])
imu[i] = imu_hist[i] #note x data needs to be flipped
cfs[0].Imu = np.array([imu[i,0],-imu[i,1],9.8-imu[i,2]])
cfs[0].imunew = 1
cfs[0].posnew = 1
cfs[0].EstImu()
# debug Kalman
if fabs(cfs[0].position[0] -0.626817286015)<1e-7 or fabs(cfs[0].position[0] +0.877806127071)<1e-7:
print cfs[0].position[0], cfs[0].X[1], cfs[0].X[2] , cfs[0].SX
|
22,194 | 631c4c2ef83da1cf0fd07c55122e345390afd085 | def bits_to_int(bits):
out = 0
for bit in bits:
out = (out << 1) | bit
return out
def get_seat(bsp):
row = bits_to_int({"F": 0, "B": 1}[char] for char in bsp[:7])
col = bits_to_int({"L": 0, "R": 1}[char] for char in bsp[7:])
return row, col
def get_seat_id(row, col):
return row * 8 + col
def process_data(data):
return [get_seat_id(*get_seat(bsp)) for bsp in data.splitlines()]
def solve_a(data):
seat_ids = process_data(data)
return max(seat_ids)
def solve_b(data):
seat_ids = process_data(data)
min_id = min(seat_ids)
max_id = max(seat_ids)
difference = set(range(min_id, max_id+1)).difference(seat_ids)
return difference.pop()
|
22,195 | bf4226228202eefcf22e65c84664a3fd6068d457 | class AnalyzeGPA_US:
def __init__(self, cource_list):
self.cource_list = cource_list
def get_gpa(self):
sum_gp_by_credits = 0.0
sum_credits = 0.0
for cource_dict in self.cource_list:
gp_us = self.grade_points_to_gp(cource_dict["grade_points"])
print("{}:\t{}".format(cource_dict["course_title"], gp_us))
sum_gp_by_credits += gp_us * cource_dict["credits"]
sum_credits += cource_dict["credits"]
gpa_us = sum_gp_by_credits / sum_credits
return gpa_us
def grade_points_to_gp(self, grade_points):
if grade_points > 92.4: # A
return 4
elif grade_points > 89.9: # A-
return 3.7
elif grade_points > 87.4: # B+
return 3.33
elif grade_points > 82.4: # B
return 3
elif grade_points > 79.9: # B-
return 2.7
elif grade_points > 77.4: # C+
return 2.3
elif grade_points > 72.4: # C
return 2
elif grade_points > 69.9: # C-
return 1.7
elif grade_points > 67.4: # D+
return 1.3
elif grade_points > 62.4: # D
return 1
elif grade_points > 59.0: # D-
return 0.7
else: # F
return 0
if __name__ == "__main__":
from main import main
agpa_us = AnalyzeGPA_US(main())
print(agpa_us.get_gpa())
|
22,196 | 29c5bfcca44db7fff056b423343843ec2bd28b64 | # Generated by Django 3.0.5 on 2020-07-24 09:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0002_remove_registermodel_passport_num'),
]
operations = [
migrations.AddField(
model_name='registermodel',
name='passport_num',
field=models.CharField(default='0', max_length=12),
),
]
|
22,197 | 71643c9c7780062cb2617f65492085d59e241fe4 | import requests
from requests import HTTPError
from pbincli.utils import PBinCLIError
def _config_requests(settings=None, shortener=False):
if settings['no_insecure_warning']:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
session = requests.Session()
session.verify = not settings['no_check_certificate']
if settings['auth'] and not shortener: # do not leak PrivateBin authorization to shortener services
if settings['auth'] == 'basic' and settings['auth_user'] and settings['auth_pass']:
session.auth = (settings['auth_user'], settings['auth_pass'])
elif settings['auth'] == 'custom' and settings['auth_custom']:
from json import loads as json_loads
auth = json_loads(settings['auth_custom'])
session.headers.update(auth)
else:
PBinCLIError("Incorrect authorization configuration")
if settings['proxy']:
scheme = settings['proxy'].split('://')[0]
if (scheme.startswith("socks")):
session.proxies.update({
"http": settings['proxy'],
"https": settings['proxy']
})
else:
session.proxies.update({scheme: settings['proxy']})
return session
class PrivateBin:
def __init__(self, settings=None):
self.server = settings['server']
self.headers = {'X-Requested-With': 'JSONHttpRequest'}
self.session = _config_requests(settings, False)
def post(self, request):
result = self.session.post(
url = self.server,
headers = self.headers,
data = request)
try:
return result.json()
except ValueError:
PBinCLIError("Unable parse response as json. Received (size = {}):\n{}".format(len(result.text), result.text))
def get(self, request):
return self.session.get(
url = self.server + "?" + request,
headers = self.headers).json()
def delete(self, request):
# using try as workaround for versions < 1.3 due to we cant detect
# if server used version 1.2, where auto-deletion is added
try:
result = self.session.post(
url = self.server,
headers = self.headers,
data = request).json()
except ValueError:
# unable parse response as json because it can be empty (1.2), so simulate correct answer
print("NOTICE: Received empty response. We interpret that as our paste has already been deleted.")
from json import loads as json_loads
result = json_loads('{"status":0}')
if not result['status']:
print("Paste successfully deleted!")
elif result['status']:
PBinCLIError("Something went wrong...\nError:\t\t{}".format(result['message']))
else:
PBinCLIError("Something went wrong...\nError: Empty response.")
def getVersion(self):
result = self.session.get(
url = self.server + '?jsonld=paste',
headers = self.headers)
try:
jsonldSchema = result.json()
return jsonldSchema['@context']['v']['@value'] \
if ('@context' in jsonldSchema and
'v' in jsonldSchema['@context'] and
'@value' in jsonldSchema['@context']['v']) \
else 1
except ValueError:
PBinCLIError("Unable parse response as json. Received (size = {}):\n{}".format(len(result.text), result.text))
def getServer(self):
return self.server
class Shortener:
"""Some parts of this class was taken from
python-yourls (https://github.com/tflink/python-yourls/) library
"""
def __init__(self, settings=None):
self.api = settings['short_api']
if self.api is None:
PBinCLIError("Unable to activate link shortener without short_api.")
# we checking which service is used, because some services doesn't require
# any authentication, or have only one domain on which it working
if self.api == 'yourls':
self._yourls_init(settings)
elif self.api == 'isgd' or self.api == 'vgd':
self._gd_init()
elif self.api == 'custom':
self.apiurl = settings['short_url']
self.session = _config_requests(settings, True)
def _yourls_init(self, settings):
if not settings['short_url']:
PBinCLIError("YOURLS: An API URL is required")
# setting API URL
apiurl = settings['short_url']
if apiurl.endswith('/yourls-api.php'):
self.apiurl = apiurl
elif apiurl.endswith('/'):
self.apiurl = apiurl + 'yourls-api.php'
else:
PBinCLIError("YOURLS: Incorrect URL is provided.\n" +
"It must contain full address to 'yourls-api.php' script (like https://example.com/yourls-api.php)\n" +
"or just contain instance URL with '/' at the end (like https://example.com/)")
# validating for required credentials
if settings['short_user'] and settings['short_pass'] and settings['short_token'] is None:
self.auth_args = {'username': settings['short_user'], 'password': settings['short_pass']}
elif settings['short_user'] is None and settings['short_pass'] is None and settings['short_token']:
self.auth_args = {'signature': settings['short_token']}
elif settings['short_user'] is None and settings['short_pass'] is None and settings['short_token'] is None:
self.auth_args = {}
else:
PBinCLIError("YOURLS: either username and password or token are required. Otherwise set to default (None)")
def _gd_init(self):
if self.api == 'isgd':
self.apiurl = 'https://is.gd/'
else:
self.apiurl = 'https://v.gd/'
self.useragent = 'Mozilla/5.0 (compatible; pbincli - https://github.com/r4sas/pbincli/)'
def getlink(self, url):
# that is api -> function mapper for running service-related function when getlink() used
servicesList = {
'yourls': self._yourls,
'clckru': self._clckru,
'tinyurl': self._tinyurl,
'isgd': self._gd,
'vgd': self._gd,
'cuttly': self._cuttly,
'custom': self._custom
}
# run function selected by choosen API
servicesList[self.api](url)
def _yourls(self,url):
request = {'action': 'shorturl', 'format': 'json', 'url': url}
request.update(self.auth_args)
result = self.session.post(
url = self.apiurl,
data = request)
try:
result.raise_for_status()
except HTTPError:
try:
response = result.json()
except ValueError:
PBinCLIError("YOURLS: Unable parse response. Received (size = {}):\n{}".format(len(result.text), result.text))
else:
PBinCLIError("YOURLS: Received error from API: {} with JSON {}".format(result, response))
else:
response = result.json()
if {'status', 'statusCode', 'message'} <= set(response.keys()):
if response['status'] == 'fail':
PBinCLIError("YOURLS: Received error from API: {}".format(response['message']))
if not 'shorturl' in response:
PBinCLIError("YOURLS: Unknown error: {}".format(response['message']))
else:
print("Short Link:\t{}".format(response['shorturl']))
else:
PBinCLIError("YOURLS: No status, statusCode or message fields in response! Received:\n{}".format(response))
def _clckru(self, url):
request = {'url': url}
try:
result = self.session.post(
url = "https://clck.ru/--",
data = request)
print("Short Link:\t{}".format(result.text))
except Exception as ex:
PBinCLIError("clck.ru: unexcepted behavior: {}".format(ex))
def _tinyurl(self, url):
request = {'url': url}
try:
result = self.session.post(
url = "https://tinyurl.com/api-create.php",
data = request)
print("Short Link:\t{}".format(result.text))
except Exception as ex:
PBinCLIError("TinyURL: unexcepted behavior: {}".format(ex))
def _gd(self, url):
request = {
'format': 'json',
'url': url,
'logstats': 0 # we don't want use any statistics
}
headers = { 'User-Agent': self.useragent}
try:
result = self.session.post(
url = self.apiurl + "create.php",
headers = headers,
data = request)
response = result.json()
if 'shorturl' in response:
print("Short Link:\t{}".format(response['shorturl']))
else:
PBinCLIError("{}: got error {} from API: {}".format(
"is.gd" if self.api == 'isgd' else 'v.gd',
response['errorcode'],
response['errormessage']))
except Exception as ex:
PBinCLIError("{}: unexcepted behavior: {}".format(
"is.gd" if self.api == 'isgd' else 'v.gd',
ex))
def _cuttly(self, url):
request = {
'url': url,
'domain': 0
}
try:
result = self.session.post(
url = "https://cutt.ly/scripts/shortenUrl.php",
data = request)
print("Short Link:\t{}".format(result.text))
except Exception as ex:
PBinCLIError("cutt.ly: unexcepted behavior: {}".format(ex))
def _custom(self, url):
if self.apiurl is None:
PBinCLIError("No short_url specified - link will not be shortened.")
from urllib.parse import quote
qUrl = quote(url, safe="") # urlencoded paste url
rUrl = self.apiurl.replace("{{url}}", qUrl)
try:
result = self.session.get(
url = rUrl)
print("Short Link:\t{}".format(result.text))
except Exception as ex:
PBinCLIError("Shorter: unexcepted behavior: {}".format(ex))
|
22,198 | 4feb51248cbe6231301051481c8ef3782e2437e4 | from django.urls import path
from . import views
urlpatterns = [
path('home',views.homefn,name="home"),
path('register',views.regFun,name="register"),
path('dashboard',views.dashfun,name="dashboard")
]
|
22,199 | aa45e6fb10e7ab9484053fc7627ce934f4ad44fb | ###############################################################################
## BisQue ##
## Center for Bio-Image Informatics ##
## University of California at Santa Barbara ##
## ------------------------------------------------------------------------- ##
## ##
## Copyright (c) 2015 by the Regents of the University of California ##
## All rights reserved ##
## ##
## Redistribution and use in source and binary forms, with or without ##
## modification, are permitted provided that the following conditions are ##
## met: ##
## ##
## 1. Redistributions of source code must retain the above copyright ##
## notice, this list of conditions, and the following disclaimer. ##
## ##
## 2. Redistributions in binary form must reproduce the above copyright ##
## notice, this list of conditions, and the following disclaimer in ##
## the documentation and/or other materials provided with the ##
## distribution. ##
## ##
## 3. All advertising materials mentioning features or use of this ##
## software must display the following acknowledgement: This product ##
## includes software developed by the Center for Bio-Image Informatics##
## University of California at Santa Barbara, and its contributors. ##
## ##
## 4. Neither the name of the University nor the names of its ##
## contributors may be used to endorse or promote products derived ##
## from this software without specific prior written permission. ##
## ##
## THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS" AND ANY ##
## EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ##
## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE ##
## DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ##
## ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL ##
## DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS ##
## OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) ##
## HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, ##
## STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ##
## ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ##
## POSSIBILITY OF SUCH DAMAGE. ##
## ##
###############################################################################
"""
CSV table exporter
"""
__author__ = "Dmitry Fedorov <dima@dimin.net>"
__version__ = "1.0"
__copyright__ = "Center for Bio-Image Informatics, University of California at Santa Barbara"
# default imports
import os
import logging
import datetime as dt
import numbers
__all__ = [ 'ExporterJSON' ]
log = logging.getLogger("bq.table.export.json")
try:
import numpy as np
except ImportError:
log.info('Numpy was not found but required for table service!')
try:
import pandas as pd
except ImportError:
log.info('Pandas was not found but required for table service!')
try:
import json
except ImportError:
log.info('Json was not found but needed for JSON output...')
from bq.table.controllers.table_exporter import TableExporter
#---------------------------------------------------------------------------------------
# Json serializer
#---------------------------------------------------------------------------------------
class ExtEncoder(json.JSONEncoder):
def default(self, o): # pylint: disable=method-hidden
if isinstance(o, (dt.datetime, dt.date, dt.time)):
return o.isoformat()
elif isinstance(o, np.integer):
return int(o)
elif isinstance(o, np.floating):
return float(o)
elif isinstance(o, np.ndarray):
return o.tolist()
#try:
# return super(ExtEncoder, o).default(o)
#except TypeError:
# return str(o)
return json.JSONEncoder.default(self, o)
def _replace_nans(o):
if isinstance(o,list):
return [_replace_nans(el) for el in o]
elif isinstance(o, numbers.Number) and np.isnan(o): # NaN not a JSON standard; replace with "null"
return None
else:
return o
#---------------------------------------------------------------------------------------
# exporters: ExtJS - JSON formatted for ExtJS store ingest
#---------------------------------------------------------------------------------------
class ExporterExtJS (TableExporter):
'''Formats tables as ExtJS'''
name = 'extjs'
version = '1.0'
ext = 'extjs'
mime_type = 'application/json'
def info(self, table):
super(ExporterExtJS, self).info(table)
v = {}
if table.headers:
# has headers => this is a leaf object (table or matrix)
v["headers"] = table.headers
v["types"] = table.types
if table.sizes is not None:
v["sizes"] = table.sizes
if table.tables is not None:
v["group"] = table.tables
if table.meta is not None and len(table.meta)>0:
v["meta"] = table.meta
#log.debug(v)
return json.dumps(v, cls=ExtEncoder)
def format(self, table):
""" converts table to JSON """
#return table.data.to_json()
data = _replace_nans(table.as_array().tolist())
if hasattr(data, "strip") or \
(not hasattr(data, "__getitem__") and \
not hasattr(data, "__iter__")):
# data is not a list/tuple => wrap it
data = [ data ]
v = {
'offset': table.offset,
'data': data,
'headers': table.headers,
'types': table.types,
}
if table.sizes is not None:
v["sizes"] = table.sizes
return json.dumps(v, cls=ExtEncoder)
#---------------------------------------------------------------------------------------
# exporters: Json
#---------------------------------------------------------------------------------------
class ExporterJSON (TableExporter):
'''Formats tables as Json'''
name = 'json'
version = '1.0'
ext = 'json'
mime_type = 'application/json'
def info(self, table):
super(ExporterJSON, self).info(table)
v = {}
if table.headers:
# has headers => this is a leaf object (table or matrix)
v["headers"] = table.headers
v["types"] = table.types
if table.sizes is not None:
v["sizes"] = table.sizes
if table.tables is not None:
v["group"] = table.tables
if table.meta is not None and len(table.meta)>0:
v["meta"] = table.meta
#log.debug(v)
return json.dumps(v, cls=ExtEncoder)
def format(self, table):
""" converts table to JSON """
#return table.data.to_json()
m = table.as_array()
rank = len(m.shape)
is_table = len(table.headers)<=5 or (len(table.headers)>5 and (table.headers[0] != '0' or table.headers[1] != '1' or table.headers[2] != '2' ))
if rank<3 and is_table:
v = []
for i in range(len(table.headers)):
vv = {
'offset': table.offset,
'header': table.headers[i],
'type': table.types[i],
'data': _replace_nans(m[:,i].tolist()) if rank>1 else _replace_nans(m.tolist()),
}
if table.sizes is not None:
vv["size"] = table.sizes[0]
v.append(vv)
else:
# if hasattr(data, "strip") or \
# (not hasattr(data, "__getitem__") and \
# not hasattr(data, "__iter__")):
# # data is not a list/tuple => wrap it
# data = [ data ]
v = {
'offset': table.offset,
#'headers': table.headers,
'type': table.types[0],
'data': _replace_nans(m.tolist()),
}
if table.sizes is not None:
v["size"] = table.sizes
return json.dumps(v, cls=ExtEncoder)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.