index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
997,400 | c46ed962ad51bb04d572ecd7b21d0ea41c9b40a4 | # Does it appear that our data might be coming iid from some distribution?
# Plot the appearance of new things vs. bootstrapped iid samples
import numpy as np
import re
import matplotlib.pyplot as plt
def iidPlots(fileName, numReps, fileDesc):
# Read file
f = open(fileName, "r")
data = f.read()
dataList = re.split("\n", data)
# Get the plot for this data
fracObs = fracSeen(dataList)
# Get bootstrap replicates
replicates = np.zeros(shape=(len(fracObs), numReps))
for idx in range(numReps):
# Shuffle the data
shuffledData = np.random.choice(a=dataList,
size=len(dataList),
replace=True)
rep = fracSeen(shuffledData)
replicates[:,idx] = rep
CI95idx = int(numReps*0.95)
CI05idx = int(numReps*0.05)
plt.plot(np.arange(len(fracObs)), fracObs, label="Observed")
plt.fill_between(np.arange(len(fracObs)),
[sorted(replicates[i, :])[CI05idx] for i in range(len(fracObs))],
[sorted(replicates[i, :])[CI95idx] for i in range(len(fracObs))],
alpha=0.2, label="Bootstrapped 90% CI for IID data")
plt.ylabel("Cumulative fraction of items observed")
plt.xlabel("Number of items observed")
plt.title("Test for iid property of " + fileDesc + " data")
plt.legend()
plt.tight_layout(pad=2)
plt.savefig("testIId_"+fileDesc+".png")
plt.clf()
def fracSeen(data):
"""As we read through the data, plot the cumulative fraction seen"""
itemSet = set()
numSeen = []
for x in data:
itemSet.add(x)
numSeen.append(len(itemSet))
fracSeen = [x/numSeen[-1] for x in numSeen]
return fracSeen
iidPlots("hamletWords.txt", numReps=100, fileDesc="Hamlet")
iidPlots("institutions.txt", numReps=10, fileDesc="DBLP-institutions") |
997,401 | 2e86513e5a07a84f6851c7bbb305071074214e8e | """
The setting of Superb IC
Authors
* Wei-Cheng Tseng 2021
* Leo 2021
* Leo 2022
"""
import logging
import pickle
from pathlib import Path
import pandas as pd
import torch
from omegaconf import MISSING
from torch.utils.data import Dataset
from s3prl.dataio.corpus.fluent_speech_commands import FluentSpeechCommands
from s3prl.dataio.dataset import EncodeCategories, LoadAudio
from s3prl.dataio.encoder.category import CategoryEncoders
from s3prl.dataio.sampler import FixedBatchSizeBatchSampler
from s3prl.nn.linear import MeanPoolingLinear
from s3prl.task.utterance_classification_task import (
UtteranceMultiClassClassificationTask,
)
from .run import Common
logger = logging.getLogger(__name__)
__all__ = [
"fsc_for_multi_classification",
"SuperbIC",
]
def fsc_for_multi_classification(
target_dir: str,
cache_dir: str,
dataset_root: str,
n_jobs: int = 6,
get_path_only: bool = False,
):
"""
Prepare Fluent Speech Command for multi-class classfication
following :obj:`SuperbIC.prepare_data` format. The standard usage
is to use three labels jointly: action, object, and location.
Args:
dataset_root (str): The root path of Fluent Speech Command
n_jobs (int): to speed up the corpus parsing procedure
"""
target_dir = Path(target_dir)
train_path = target_dir / f"train.csv"
valid_path = target_dir / f"valid.csv"
test_paths = [target_dir / f"test.csv"]
if get_path_only:
return train_path, valid_path, test_paths
def format_fields(data_points: dict):
return {
key: dict(
wav_path=value["path"],
labels=f"{value['action']} ; {value['object']} ; {value['location']}",
)
for key, value in data_points.items()
}
corpus = FluentSpeechCommands(dataset_root, n_jobs)
train_data, valid_data, test_data = corpus.data_split
train_data = format_fields(train_data)
valid_data = format_fields(valid_data)
test_data = format_fields(test_data)
def dict_to_csv(data_dict, csv_path):
keys = sorted(list(data_dict.keys()))
fields = sorted(data_dict[keys[0]].keys())
data = dict()
for field in fields:
data[field] = []
for key in keys:
data[field].append(data_dict[key][field])
data["id"] = keys
df = pd.DataFrame(data)
df.to_csv(csv_path, index=False)
dict_to_csv(train_data, train_path)
dict_to_csv(valid_data, valid_path)
dict_to_csv(test_data, test_paths[0])
return train_path, valid_path, test_paths
class SuperbIC(Common):
def default_config(self) -> dict:
return dict(
start=0,
stop=None,
target_dir=MISSING,
cache_dir=None,
remove_all_cache=False,
prepare_data=dict(
dataset_root=MISSING,
),
build_encoder=dict(),
build_dataset=dict(),
build_batch_sampler=dict(
train=dict(
batch_size=32,
shuffle=True,
),
valid=dict(
batch_size=32,
),
test=dict(
batch_size=32,
),
),
build_upstream=dict(
name=MISSING,
),
build_featurizer=dict(
layer_selections=None,
normalize=False,
),
build_downstream=dict(
hidden_size=256,
),
build_model=dict(
upstream_trainable=False,
),
build_task=dict(),
build_optimizer=dict(
name="Adam",
conf=dict(
lr=1.0e-4,
),
),
build_scheduler=dict(
name="ExponentialLR",
gamma=0.9,
),
save_model=dict(),
save_task=dict(),
train=dict(
total_steps=200000,
log_step=100,
eval_step=5000,
save_step=250,
gradient_clipping=1.0,
gradient_accumulate=1,
valid_metric="accuracy",
valid_higher_better=True,
auto_resume=True,
resume_ckpt_dir=None,
),
)
def prepare_data(
self,
prepare_data: dict,
target_dir: str,
cache_dir: str,
get_path_only: bool = False,
):
"""
Prepare the task-specific data metadata (path, labels...).
By default call :obj:`fsc_for_multi_classification` with :code:`**prepare_data`
Args:
prepare_data (dict): same in :obj:`default_config`,
arguments for :obj:`fsc_for_multi_classification`
target_dir (str): Parse your corpus and save the csv file into this directory
cache_dir (str): If the parsing or preprocessing takes too long time, you can save
the temporary files into this directory. This directory is expected to be shared
across different training sessions (different hypers and :code:`target_dir`)
get_path_only (str): Directly return the filepaths no matter they exist or not.
Returns:
tuple
1. train_path (str)
2. valid_path (str)
3. test_paths (List[str])
Each path (str) should be a csv file containing the following columns:
==================== ====================
column description
==================== ====================
id (str) - the unique id for this data point
wav_path (str) - the absolute path of the waveform file
labels (str) - the string labels of the waveform, separated by a ';'
==================== ====================
The number of the label columns can be arbitrary.
"""
return fsc_for_multi_classification(
**self._get_current_arguments(flatten_dict="prepare_data")
)
def build_encoder(
self,
build_encoder: dict,
target_dir: str,
cache_dir: str,
train_csv_path: str,
valid_csv_path: str,
test_csv_paths: list,
get_path_only: bool = False,
):
"""
Build the encoder (for the labels) given the data metadata, and return the saved encoder path.
By default generate and save a :obj:`s3prl.dataio.encoder.CategoryEncoders` from all the columns
prefixing :code:`label` from all the csv files.
Args:
build_encoder (dict): same in :obj:`default_config`, no argument supported for now
target_dir (str): Save your encoder into this directory
cache_dir (str): If the preprocessing takes too long time, you can save
the temporary files into this directory. This directory is expected to be shared
across different training sessions (different hypers and :code:`target_dir`)
train_csv_path (str): the train path from :obj:`prepare_data`
valid_csv_path (str): the valid path from :obj:`prepare_data`
test_csv_paths (List[str]): the test paths from :obj:`prepare_data`
get_path_only (bool): Directly return the filepaths no matter they exist or not.
Returns:
str
tokenizer_path: The tokenizer should be saved in the pickle format
"""
encoder_path = Path(target_dir) / "encoder.pkl"
if get_path_only:
return encoder_path
train_csv = pd.read_csv(train_csv_path)
valid_csv = pd.read_csv(valid_csv_path)
test_csvs = [pd.read_csv(path) for path in test_csv_paths]
all_csv = pd.concat([train_csv, valid_csv, *test_csvs])
multilabels = [
[label.strip() for label in multilabel.split(";")]
for multilabel in all_csv["labels"].tolist()
]
encoder = CategoryEncoders(
[single_category_labels for single_category_labels in zip(*multilabels)]
)
with open(encoder_path, "wb") as f:
pickle.dump(encoder, f)
return encoder
def build_dataset(
self,
build_dataset: dict,
target_dir: str,
cache_dir: str,
mode: str,
data_csv: str,
encoder_path: str,
frame_shift: int,
):
"""
Build the dataset for train/valid/test.
Args:
build_dataset (dict): same in :obj:`default_config`, no argument supported for now
target_dir (str): Current experiment directory
cache_dir (str): If the preprocessing takes too long time, you can save
the temporary files into this directory. This directory is expected to be shared
across different training sessions (different hypers and :code:`target_dir`)
mode (str): train/valid/test
data_csv (str): The metadata csv file for the specific :code:`mode`
encoder_path (str): The pickled encoder path for encoding the labels
Returns:
torch Dataset
For all train/valid/test mode, the dataset should return each item as a dictionary
containing the following keys:
==================== ====================
key description
==================== ====================
x (torch.FloatTensor) - the waveform in (seq_len, 1)
x_len (int) - the waveform length :code:`seq_len`
class_ids (torch.LongTensor) - the encoded class ids. shape: (num_class, )
labels (List[str]) - the class name. length: num_class
unique_name (str) - the unique id for this datapoint
==================== ====================
"""
csv = pd.read_csv(data_csv)
ids = csv["id"].tolist()
audio_loader = LoadAudio(csv["wav_path"].tolist())
with open(encoder_path, "rb") as f:
encoder = pickle.load(f)
label_encoder = EncodeCategories(
[
[label.strip() for label in multilabel.split(";")]
for multilabel in csv["labels"].tolist()
],
encoder,
)
class Dataset:
def __len__(self):
return len(audio_loader)
def __getitem__(self, index: int):
audio = audio_loader[index]
label = label_encoder[index]
return {
"x": audio["wav"],
"x_len": audio["wav_len"],
"class_ids": label["class_ids"],
"labels": label["labels"],
"unique_name": ids[index],
}
dataset = Dataset()
return dataset
def build_batch_sampler(
self,
build_batch_sampler: dict,
target_dir: str,
cache_dir: str,
mode: str,
data_csv: str,
dataset: Dataset,
):
"""
Return the batch sampler for torch DataLoader.
By default call :obj:`superb_sid_batch_sampler` with :code:`**build_batch_sampler`.
Args:
build_batch_sampler (dict): same in :obj:`default_config`
==================== ====================
key description
==================== ====================
train (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`
valid (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`
test (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`
==================== ====================
target_dir (str): Current experiment directory
cache_dir (str): If the preprocessing takes too long time, save
the temporary files into this directory. This directory is expected to be shared
across different training sessions (different hypers and :code:`target_dir`)
mode (str): train/valid/test
data_csv (str): the :code:`mode` specific csv from :obj:`prepare_data`
dataset: the dataset from :obj:`build_dataset`
Returns:
batch sampler for torch DataLoader
"""
def _build_batch_sampler(
train: dict = None, valid: dict = None, test: dict = None
):
if mode == "train":
return FixedBatchSizeBatchSampler(dataset, **train)
elif mode == "valid":
return FixedBatchSizeBatchSampler(dataset, **valid)
elif mode == "test":
return FixedBatchSizeBatchSampler(dataset, **test)
return _build_batch_sampler(**build_batch_sampler)
def build_downstream(
self,
build_downstream: dict,
downstream_input_size: int,
downstream_output_size: int,
downstream_input_stride: int,
):
"""
Return the task-specific downstream model.
By default build the :obj:`MeanPoolingLinear` model
Args:
build_downstream (dict): same in :obj:`default_config`,
support arguments of :obj:`MeanPoolingLinear`
downstream_input_size (int): the required input size of the model
downstream_output_size (int): the required output size of the model
downstream_input_stride (int): the input feature's stride (from 16 KHz)
Returns:
:obj:`AbsUtteranceModel`
"""
model = MeanPoolingLinear(
downstream_input_size, downstream_output_size, **build_downstream
)
return model
def build_task(
self,
build_task: dict,
model: torch.nn.Module,
encoder,
valid_df: pd.DataFrame = None,
test_df: pd.DataFrame = None,
):
"""
Build the task, which defines the logics for every train/valid/test forward step for the :code:`model`,
and the logics for how to reduce all the batch results from multiple train/valid/test steps into metrics
By default build :obj:`UtteranceMultiClassClassificationTask`
Args:
build_task (dict): same in :obj:`default_config`, no argument supported for now
model (torch.nn.Module): the model built by :obj:`build_model`
encoder: the encoder built by :obj:`build_encoder`
valid_df (pd.DataFrame): metadata of the valid set
test_df (pd.DataFrame): metadata of the test set
Returns:
Task
"""
return UtteranceMultiClassClassificationTask(model, encoder)
|
997,402 | ef266bdc14e1f210f042b9d9d4c8b937cca49c2a | import idom
@idom.component
def AndGate():
input_1, toggle_1 = use_toggle()
input_2, toggle_2 = use_toggle()
return idom.html.div(
idom.html.input({"type": "checkbox", "onClick": lambda event: toggle_1()}),
idom.html.input({"type": "checkbox", "onClick": lambda event: toggle_2()}),
idom.html.pre(f"{input_1} AND {input_2} = {input_1 and input_2}"),
)
def use_toggle():
state, set_state = idom.hooks.use_state(False)
def toggle_state():
set_state(lambda old_state: not old_state)
return state, toggle_state
idom.run(AndGate)
|
997,403 | b4aa976c054cf33c1425980ff324ca2e155e8771 | from django.conf.urls import url
from . import views
from django.conf.urls.static import static
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^$',views.index,name='index' ),
url(r'^category/(?P<category_id>[\w\-]+)', views.category,name='category'),
url(r'^register/$', views.register, name='register'),
url(r'^login/$', views.user_login, name='login'),
url(r'^about/', views.about,name='about'),
url(r'^logout/$', views.user_logout,name='logout'),
url(r'^goods/(?P<goods_id>[\w\-]+)', views.goods_page,name='goods'),
url(r'^add_goods', views.add_goods,name='add_goods'),
url(r'^add_comment/(?P<goods_id>[\w\-]+)', views.add_comment,name='add_comment'),
url(r'^profile/(?P<user_id>[\w\-]+)', views.profile,name='profile'),
url(r'^search',views.search,name='search'),
url(r'^message/', views.display_message, name='message'),
]
|
997,404 | 516e7904a5330c90c8137dc88195fe54a6d91c78 | __version__ = "0.1.1f"
|
997,405 | cfd2f476c43918dcbe2320191e3fcb2cb0eb45f6 | from django.contrib import admin
from ennarroot.models import Admin_list,Products
# Register your models here.
admin.site.register(Admin_list)
admin.site.register(Products)
|
997,406 | cd38449c208cc547988e49a67887a52d527a8e52 | def vatcalculate(totalPrice):
result = totalPrice+(totalPrice*7/100)
return result
totle = int(input("ราคาสินค้า : "))
print(vatcalculate(totle)) |
997,407 | ba3cde87b297e5cfacc6156579fd9516cca560ec | """ the omdb module """
from .exceptions import (
OMDBException,
OMDBLimitReached,
OMDBNoResults,
OMDBTooManyResults,
)
from .omdb import OMDB
__author__ = "Tyler Barrus"
__maintainer__ = "Tyler Barrus"
__email__ = "barrust@gmail.com"
__license__ = "MIT"
__version__ = "0.2.1"
__url__ = "https://github.com/barrust/pyomdbapi"
__bugtrack_url__ = f"{__url__}/issues"
__all__ = [
"OMDB",
"OMDBException",
"OMDBNoResults",
"OMDBLimitReached",
"OMDBTooManyResults",
]
|
997,408 | ff9b49bfcf0d98c7a8cfeab4decb837615589392 | from encryptor import Encryptor
if __name__ == '__main__':
key = b'\xa2\x8a\x84\xd4\xe6\xb4\x7f\x13\xbc\x01\x04\x83\xf5N\x0bg'
enc = Encryptor(key)
enc.encrypt_file('plain_video.mp4')
# enc.decrypt_file('plain_video.mp4.enc') |
997,409 | 1a1ca666b15d388180bcf8399f32632e81c797ff | #-*- coding: utf-8 -*-
from odoo import models, fields, api
class MeetingsTask(models.Model):
_name = 'meetings'
_inherit = ['meetings','mail.thread']
user_id = fields.Many2one('res.users', 'Organizador')
date_deadline = fields.Date('Fecha Reunion')
participantes_estimados = fields.Integer('Cupo de participantes')
horas_estimadas = fields.Integer('Estimacion en horas')
informacion = fields.Char('Descripcion') |
997,410 | 970fdd6f8141a5a9429009554637914247782310 |
# coding: utf-8
# In[2]:
#import pakeage
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import matplotlib
get_ipython().run_line_magic('matplotlib', 'inline')
matplotlib.style.use('ggplot')
import seaborn as sns
# In[3]:
data = pd.read_csv('character-deaths.csv')
# In[4]:
#check info
print(data.shape)
data.info()
# In[5]:
#check all columns
print(data.columns.values)
data.head(5)
# * Allegiances: 所屬國家
# * Death Year: 死亡年 (選擇)
# * Book of Death: 在第幾集死亡
# * Death Chapter: 在第幾章死亡
# * Book Intro Chapter: 書籍介紹章節
# * Gender: 1為男 0為女
# * Nobility: 1是貴族 0不是貴族
# * GoT: 1有出現在書本第一集 0沒有出現在書本第一集
# * CoK: 1有出現在書本第二集 0沒有出現在書本第二集
# * SoS: 1有出現在書本第三集 0沒有出現在書本第三集
# * FfC: 1有出現在書本第四集 0沒有出現在書本第四集
# * DwD: 1有出現在書本第五集 0沒有出現在書本第五集
# In[6]:
#drop Book of Death & Death Chapter
data.drop('Book of Death' , 1, inplace=True)
data.drop('Death Chapter', 1, inplace=True)
# In[7]:
#check all columns again
print(data.columns.values)
data.head(5)
# In[8]:
# have some null in data
print('Train columns with null values:\n', data.isnull().sum())
#Death Year & Book Intro Chapter
# In[9]:
#complete missing values
data['Death Year'] = data['Death Year'].fillna(0)
data['Book Intro Chapter'] = data['Book Intro Chapter'].fillna(0)
# In[10]:
data.head(5)
# In[11]:
#change into 1 or 0, 0 is survived, 1 is dead
data['Death']=data['Death Year']
data.loc[data['Death Year'] !=0,'Death']=1
# In[12]:
data.head(5)
# In[13]:
# we only keep one death column
data.drop('Death Year' , 1, inplace=True)
data.head(5)
# In[14]:
#One-Hot Encoding
one_hot_encoding= pd.get_dummies(data['Allegiances'], prefix='A')
# In[15]:
one_hot_encoding.head(5)
# In[16]:
#combine these feature into data
data2 = pd.concat([one_hot_encoding, data], axis=1)
# In[17]:
#and drop Allegiances
data2.drop('Allegiances' , 1, inplace=True)
data2.head(5)
# In[18]:
#Death is predict
data_y = data['Death']
# In[19]:
data2.columns
# In[20]:
data_x = data2[['A_Arryn', 'A_Baratheon', 'A_Greyjoy', 'A_House Arryn',
'A_House Baratheon', 'A_House Greyjoy', 'A_House Lannister',
'A_House Martell', 'A_House Stark', 'A_House Targaryen',
'A_House Tully', 'A_House Tyrell', 'A_Lannister', 'A_Martell',
'A_None', 'A_Stark', 'A_Targaryen', 'A_Tully', 'A_Tyrell',
'A_Wildling', 'Book Intro Chapter', 'Gender',
'Nobility', 'GoT', 'CoK', 'SoS', 'FfC', 'DwD']]
# In[26]:
#split data into train and test
from sklearn.tree import DecisionTreeClassifier
from sklearn import metrics
from sklearn.model_selection import cross_validate
from sklearn.model_selection import train_test_split
from sklearn import tree
train_X, test_X, train_y, test_y = train_test_split(data_x, data_y, random_state=9487,test_size = 0.25)
# In[27]:
# build Classifier
clf = tree.DecisionTreeClassifier()
got_clf = clf.fit(train_X, train_y)
# In[28]:
# predict
test_y_predicted = got_clf.predict(test_X)
print(test_y_predicted)
# In[29]:
# ground truth
print(test_y.values)
# In[35]:
#show tree
fn = ['A_Arryn', 'A_Baratheon', 'A_Greyjoy', 'A_House Arryn',
'A_House Baratheon', 'A_House Greyjoy', 'A_House Lannister',
'A_House Martell', 'A_House Stark', 'A_House Targaryen',
'A_House Tully', 'A_House Tyrell', 'A_Lannister', 'A_Martell',
'A_None', 'A_Stark', 'A_Targaryen', 'A_Tully', 'A_Tyrell',
'A_Wildling', 'Book Intro Chapter', 'Gender',
'Nobility', 'GoT', 'CoK', 'SoS', 'FfC', 'DwD']
cn = ['0', '1']
fig, axes = plt.subplots(nrows = 1,ncols = 1,figsize = (4,4), dpi=300)
tree.plot_tree(got_clf, max_depth=3
, feature_names = fn
, class_names=cn
, filled = True)
fig.savefig('got.jpg')
# In[59]:
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
# In[60]:
y_true = test_y.values
y_pred = test_y_predicted
# In[61]:
print('accuracy:{}'.format(accuracy_score(y_true, y_pred)))
print('precision:{}'.format(precision_score(y_true, y_pred, average=None)))
print('recall:{}'.format(recall_score(y_true, y_pred, average=None)))
# In[45]:
#show Confusion Matrix
from sklearn.metrics import confusion_matrix
confmat = confusion_matrix(y_true=y_true, y_pred=y_pred)
fig, ax = plt.subplots(figsize=(2.5, 2.5))
ax.matshow(confmat, cmap=plt.cm.Blues, alpha=0.3)
for i in range(confmat.shape[0]):
for j in range(confmat.shape[1]):
ax.text(x=j, y=i, s=confmat[i,j], va='center', ha='center')
plt.xlabel('predicted label')
plt.ylabel('true label')
plt.show()
|
997,411 | ec941cd3dcacbaceebc88b578550c912b327a338 | import unittest
from pybinson.binson_bytes import BinsonBytes
from pybinson.binson_integer import BinsonInteger
class TestBinsonBytes(unittest.TestCase):
def test_sanity(self):
bytes_rep = bytearray(b'\x18\x01\x01')
bytes_val, consumed = BinsonBytes.from_bytes(bytes_rep, 0)
self.assertEqual(1, len(bytes_val.get_value()))
self.assertEqual(bytes_val.get_value()[0], bytes_rep[2])
self.assertEqual(consumed, 3)
bytes_rep2 = BinsonBytes(bytearray(b'\x01')).serialize()
self.assertEqual(bytes_rep, bytes_rep2)
self.assertTrue(isinstance(bytes_val.get_value(), bytearray))
def helper(self, length):
original = bytearray(length)
bytes_rep = BinsonBytes(original).serialize()
parsed, consumed = BinsonBytes.from_bytes(bytes_rep, 0)
self.assertTrue(isinstance(parsed.get_value(), bytearray))
self.assertEqual(original, parsed.get_value())
self.assertEqual(consumed, len(original) + 1 + BinsonInteger.int_size(length))
def test_bytes(self):
self.helper(0)
self.helper(1)
self.helper(127)
self.helper(128)
self.helper(2**15-1)
# More than this takes very long to run :)
if __name__ == '__main__':
unittest.main() |
997,412 | 49879e99bf95c0344e84aa4885fc806fda64cfee | from twisted.cred import portal, checkers, credentials
from nevow import inevow, guard
from axiom import userbase
from axiom import dependency
from zope.interface import implements
class AdminRealm(object):
"""Returns the admin login page for anonymous users, or the admin main page for logged in users"""
implements(portal.IRealm)
def requestAvatar(self, avatarId, mind, *interfaces):
import pages
for iface in interfaces:
if iface is inevow.IResource:
# do web stuff
if avatarId is checkers.ANONYMOUS:
resc = pages.AdminLoginPage()
resc.realm = self
return (inevow.IResource, resc, self.noLogout)
else:
resc = pages.AdminMainPage(avatarId)
resc.realm = self
return (inevow.IResource, resc, resc.logout)
raise NotImplementedError("Can't support that interface.")
def noLogout(self):
return None
def createLoginSystem(store):
"""Creates a axiom.userbase.LoginSystem on store and returns the LoginSystem"""
loginSys = userbase.LoginSystem(store=store)
dependency.installOn(loginSys, store)
return loginSys
def createAdmin(store):
"""Creates the admin section of the site, guarded with Nevow guard."""
realm = AdminRealm()
cc = checkers.ICredentialsChecker(store)
p = portal.Portal(realm, [checkers.AllowAnonymousAccess(), cc])
resource = guard.SessionWrapper(p)
return resource
|
997,413 | 54093d1069cdf11b5417d26ecc5baeeef2da0aa4 | import numpy as np
import matplotlib.pyplot as plt
# skapar funktionen f(x) = x^2
def f(x):
return x**2
x = np.linspace(-1,1)
plt.plot(x, f(x)) # ritar kurvan
plt.show() |
997,414 | 8a8e02f64853305594d9d76707d756cb201ac89b | import glob
import json
import os
import pprint
science_goals = glob.glob("science_goal*")
all_flag_info = {}
for sg in science_goals:
# walk only the science goals: walking other directories can be extremely
# inefficient
for dirpath, dirnames, filenames in os.walk(sg):
if dirpath.count(os.path.sep) >= 5:
# skip over things below the sci/gro/mou/<blah>/* level
continue
for fn in filenames:
if (fn.endswith('12Mlong') or fn.endswith('12Mshort') or
fn.endswith('7M') or fn.endswith('TP')):
field_id = fn
for fn in sorted(dirnames):
if fn[-10:] == ".split.cal":
fullfn = os.path.join(dirpath, fn)
flagversions = flagmanager(fullfn, mode='list')
all_flag_info[field_id] = {}
for ii,fv in flagversions.items():
if 'name' in fv:
print(fv, fullfn)
flagmanager(fullfn, mode='restore', versionname=fv['name'])
flag_info = flagdata(vis=fullfn, mode='summary')
flag_info['field']['fraction'] = flag_info['field'][field_id]['flagged'] / flag_info['field'][field_id]['total']
all_flag_info[field_id][fv['name']] = flag_info['field']
print(flag_info['field'], flag_info['field'][field_id]['flagged'] / flag_info['field'][field_id]['total'])
with open('all_flag_info.json', 'w') as fh:
json.dump(all_flag_info, fh)
print({key: val['G353.41']['flagged'] / val['G353.41']['total'] for key, val in all_flag_info['G353.41_B3_12Mlong'].items()})
for key1 in all_flag_info:
print(key1)
print()
field = key1.split("_")[0]
pprint.pprint({key: val[field]['flagged'] / val[field]['total'] for key, val in all_flag_info[key1].items()
if field in val})
|
997,415 | e282175c37e6ca15da042b2671a5f38230f6fd13 | # -*- coding: utf-8 -*-
import os
from zipfile import ZipFile
from tempfile import TemporaryDirectory
from typing import Tuple, List, Callable, Sequence
import pickle
import dawg
import numpy as np
def _lazy_property(fn):
'''Decorator that makes a property lazy-evaluated.
'''
attr_name = '_lazy_' + fn.__name__
@property
def _lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
return _lazy_property
class Parse:
def __init__(self, grammemes_mappings: Tuple[Tuple[str]], grammar_value_mapping: Tuple[Tuple[int]],
lemmatize_rule_mapping: Tuple[Tuple[int, str]], word: str,
grammar_value_index: int, lemmatize_rule_index: int, freq: float):
self._grammemes_mappings = grammemes_mappings
self._grammar_value_mapping = grammar_value_mapping
self._lemmatize_rule_mapping = lemmatize_rule_mapping
self._grammar_value_index = grammar_value_index
self.lemmatize_rule_index = lemmatize_rule_index
self.word = word
self.frequency = freq
@_lazy_property
def lemma(self):
cut, append = self._lemmatize_rule_mapping[self.lemmatize_rule_index]
return (self.word[:-cut] if cut != 0 else self.word) + append
@_lazy_property
def grammar_value(self):
return GrammarValue(self._grammemes_mappings, self._grammar_value_mapping,
self._grammar_value_index)
def __str__(self):
return 'Word - {}; GrammarValue - {}; Frequency - {}'.format(self.word, self.grammar_value, self.frequency)
def __repr__(self):
return 'Parse: {}'.format(self)
class GrammarValue:
def __init__(self, grammemes_mappings: Tuple[Tuple[str]], grammar_value_mapping: Tuple[Tuple[int]],
grammar_value_index: int):
self._grammemes_mappings = grammemes_mappings
self._grammar_value_mapping = grammar_value_mapping
self._grammar_value_index = grammar_value_index
def __str__(self):
return ' '.join(self._grammemes_mappings[i][grammeme_index]
for i, grammeme_index in enumerate(self._grammar_value_mapping[self._grammar_value_index])
if grammeme_index != 0)
def __repr__(self):
return 'GrammarValue: {}'.format(self)
class MorphoAnalyser:
def __init__(self, dict_path: str):
with TemporaryDirectory('dict') as temp_dir:
with ZipFile(dict_path) as zip_file:
zip_file.extractall(temp_dir)
self._dawg = dawg.RecordDAWG('>HHH')
self._dawg.load(os.path.join(temp_dir, 'dict.dawg'))
with open(os.path.join(temp_dir, 'dict.info'), 'rb') as f:
self._categories, self._grammemes_mappings, self._grammar_value_mapping, self._lemmatize_rule_mapping, \
self._alphabet, self._similar_letters, self._quantized_freqs_mapping = pickle.load(f)
self._similar_letters_replacements = self._compile_replacements()
self._grammemes_matrix = self._build_grammemes_matrix()
def _compile_replacements(self):
similar_letters_replacements = {}
for first_letter, second_letter in self._similar_letters:
similar_letters_replacements[first_letter] = second_letter
similar_letters_replacements[first_letter.upper()] = second_letter.upper()
return self._dawg.compile_replaces(similar_letters_replacements)
def _build_grammemes_matrix(self) -> np.ndarray:
grammemes_vector_len = sum(len(grammar_category) for grammar_category in self._grammemes_mappings)
grammemes_matrix = np.zeros((len(self._grammar_value_mapping), grammemes_vector_len))
for i, grammar_value in enumerate(self._grammar_value_mapping):
shift = 0
for grammar_category_index, grammeme in enumerate(grammar_value):
grammemes_matrix[i, shift + grammeme] = 1.
shift += len(self._grammemes_mappings[grammar_category_index])
return grammemes_matrix
def build_grammemes_vector(self, word: str, is_case_sensitive: bool=False) -> np.ndarray:
def _build_grammemes_vector(word, grammar_val_index, lemmatize_rule_index, freq):
freq = max(freq, 1e-9)
cur_vector = freq * self._grammemes_matrix[grammar_val_index]
_build_grammemes_vector.grammemes_vector += cur_vector
_build_grammemes_vector.sum_freq += freq
_build_grammemes_vector.grammemes_vector = np.zeros(self._grammemes_matrix.shape[1])
_build_grammemes_vector.sum_freq = 0.
self._analyse_word(word, is_case_sensitive, _build_grammemes_vector)
if _build_grammemes_vector.sum_freq != 0.:
return _build_grammemes_vector.grammemes_vector / _build_grammemes_vector.sum_freq
else:
assert np.all(_build_grammemes_vector.grammemes_vector == 0.)
return _build_grammemes_vector.grammemes_vector
def analyse_word(self, word: str, is_case_sensitive: bool=False) -> List[Parse]:
def _collect_parses(word, grammar_val_index, lemmatize_rule_index, freq):
res.append(self._get_parse(word, grammar_val_index, lemmatize_rule_index, freq))
res = []
self._analyse_word(word, is_case_sensitive, _collect_parses)
return res
def _analyse_word(self, word: str, is_case_sensitive: bool, callback: Callable[[str, int, int, float], None]):
if not is_case_sensitive:
word = word.lower()
self._analyse_single_word(word, callback)
if not is_case_sensitive:
self._analyse_single_word(word.capitalize(), callback)
self._analyse_single_word(word.upper(), callback)
def _analyse_single_word(self, word: str, callback: Callable[[str, int, int, float], None]):
for corrected_word, values in self._dawg.similar_items(word, self._similar_letters_replacements):
for freq_index, lemmatize_rule_index, grammar_val_index in values:
callback(corrected_word, grammar_val_index, lemmatize_rule_index,
self._quantized_freqs_mapping[freq_index])
def _get_parse(self, word: str, grammar_val_index: int, lemmatize_rule_index: int, freq: float) -> Parse:
return Parse(self._grammemes_mappings, self._grammar_value_mapping, self._lemmatize_rule_mapping,
word, grammar_val_index, lemmatize_rule_index, freq)
@property
def grammemes_count(self) -> int:
return self._grammemes_matrix.shape[1]
@property
def lemmatize_rule_mapping(self) -> Sequence[Tuple[int, str]]:
return self._lemmatize_rule_mapping
def main():
import time
start_time = time.time()
morph = MorphoAnalyser('RussianDict.zip')
print('Dictionary loaded in {:.2f} ms'.format((time.time() - start_time) * 1000))
parses = morph.analyse_word('берегу')
for parse in parses:
print(parse, 'Lemma - ', parse.lemma)
print(morph.build_grammemes_vector('берегу'))
if __name__ == '__main__':
main()
|
997,416 | 21c18ccfedec6b27d9d23a1282fc14c3f265031b | # # Import numpy and set seed
# import numpy as np
# np.random.seed(123)
# # Use randint() to simulate a dice
# np.random.randint(1,7)
# # Use randint() again
# np.random.randint(1,7)
# #######################################################################################################
# # Numpy is imported, seed is set
# # Starting step
# step = 50
# # Roll the dice
# dice = np.random.randint(1,7)
# # Finish the control construct
# if dice <= 2 :
# step = step - 1
# elif dice >=3 and dice <=5 :
# step = step +1
# else :
# step = step + np.random.randint(1,7)
# # Print out dice and step
# print(dice)
# print(step)
# #########################################################################################
# # Numpy is imported, seed is set
# # Initialize random_walk
# random_walk = [0]
# # Complete the for loo[]
# for x in range(100) :
# # Set step: last element in random_walk
# step = random_walk[-1]
# # Roll the dice
# dice = np.random.randint(1,7)
# # Determine next step
# if dice <= 2:
# step = step - 1
# elif dice <= 5:
# step = step + 1
# else:
# step = step + np.random.randint(1,7)
# # append next_step to random_walk
# random_walk.append(step)
# # Print random_walk
# print(random_walk)
# #####################################################################################
# # Numpy is imported, seed is set
# # Initialization
# random_walk = [0]
# for x in range(100) :
# step = random_walk[-1]
# dice = np.random.randint(1,7)
# if dice <= 2:
# step = max(0, step - 1)
# elif dice <= 5:
# step = step + 1
# else:
# step = step + np.random.randint(1,7)
# random_walk.append(step)
# # Import matplotlib.pyplot as plt
# import matplotlib.pyplot as plt
# # Plot random_walk
# plt.plot(random_walk)
# # Show the plot
# plt.show()
# ##########################################################################
# # Numpy is imported; seed is set
# # Initialize all_walks (don't change this line)
# all_walks = []
# # Simulate random walk 10 times
# for i in range(10) :
# # Code from before
# random_walk = [0]
# for x in range(100) :
# step = random_walk[-1]
# dice = np.random.randint(1,7)
# if dice <= 2:
# step = max(0, step - 1)
# elif dice <= 5:
# step = step + 1
# else:
# step = step + np.random.randint(1,7)
# random_walk.append(step)
# # Append random_walk to all_walks
# all_walks.append(random_walk)
# # Print all_walks
# print(all_walks)
# ###########################################################################################
# # numpy and matplotlib imported, seed set.
# # initialize and populate all_walks
# all_walks = []
# for i in range(10) :
# random_walk = [0]
# for x in range(100) :
# step = random_walk[-1]
# dice = np.random.randint(1,7)
# if dice <= 2:
# step = max(0, step - 1)
# elif dice <= 5:
# step = step + 1
# else:
# step = step + np.random.randint(1,7)
# random_walk.append(step)
# all_walks.append(random_walk)
# # Convert all_walks to Numpy array: np_aw
# np_aw = np.array(all_walks)
# # Plot np_aw and show
# plt.plot(np_aw)
# plt.show()
# ##############################################################################################
# # numpy and matplotlib imported, seed set.
# # initialize and populate all_walks
# all_walks = []
# for i in range(10) :
# random_walk = [0]
# for x in range(100) :
# step = random_walk[-1]
# dice = np.random.randint(1,7)
# if dice <= 2:
# step = max(0, step - 1)
# elif dice <= 5:
# step = step + 1
# else:
# step = step + np.random.randint(1,7)
# random_walk.append(step)
# all_walks.append(random_walk)
# # Convert all_walks to Numpy array: np_aw
# np_aw = np.array(all_walks)
# # Plot np_aw and show
# plt.plot(np_aw)
# plt.show()
# # Clear the figure
# plt.clf()
# # Transpose np_aw: np_aw_t
# np_aw_t = np.transpose(np_aw)
# # Plot np_aw_t and show
# plt.plot(np_aw_t)
# plt.show()
# numpy and matplotlib imported, seed set
import numpy as np
import matplotlib.pyplot as plt
# Simulate random walk 500 times
all_walks = []
for i in range(500) :
random_walk = [0]
for x in range(100) :
step = random_walk[-1]
dice = np.random.randint(1,7)
if dice <= 2:
step = max(0, step - 1)
elif dice <= 5:
step = step + 1
else:
step = step + np.random.randint(1,7)
if np.random.rand() <= 0.001 :
step = 0
random_walk.append(step)
all_walks.append(random_walk)
# Create and plot np_aw_t
np_aw_t = np.transpose(np.array(all_walks))
# Select last row from np_aw_t: ends
ends = np_aw_t[-1]
# Plot histogram of ends, display plot
plt.hist(ends)
plt.show() |
997,417 | e30007f03aa5c043225c4ab86e51e9348adbf562 | import nltk
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from nltk.probability import FreqDist
print ("********************************************************************")
print ("* *")
print ("* СИСТЕМА ОПРЕДЕЛЕНИЯ ТЕМАТИКИ *")
print ("* НЕСТРУКТУРИРОВАННОГО ТЕКСТА *")
print ("* *")
print ("* (с) Руткевич Р. 2020 *")
print ("********************************************************************")
print ("База входных документов: dbIn")
print ("База выходных документов: dbOut")
print ("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
dbIn = open('D:\Text Analyse\infodocs.txt', 'r',encoding='utf-8') # dbIn
text = dbIn.read()
inputtext = text
dbIn.close()
print ("Агент aIn: документ * " + dbIn.name + " * считан из dbIn")
print ("Начало парсинга текста")
regexpression = RegexpTokenizer(r'\w+')
stop_words = set(stopwords.words("russian"))
text = text.lower() # приводим текст к нижнему регистру
text_tokenized = regexpression.tokenize(text)
text = [i for i in text_tokenized if not i in stop_words] # удаление stop-words из текста
top_words = FreqDist(text)
top_words = top_words.most_common(5)
first_element , third_elemenet = top_words[0] # разбиваем самую частую пару на слово и количество его повторений
second_element , forth_elemenet = top_words[1] # разбиваем вторую по частоте пару на слово и количество его повторений
print ("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print('Название документа: ' + dbIn.name)
print('Тематика: ' + first_element + " " + second_element)
print('Ключевые слова: ' + str(top_words))
print ("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print('Исходный текст: ')
print(inputtext)
|
997,418 | 41071f6f9bb8c6d5a5dba810f85672e2e34d2861 | import os
from unittest.mock import patch
from pytest import fixture, mark
from ..bitbucket import BitbucketOAuthenticator
from .mocks import setup_oauth_mock
def user_model(username):
"""Return a user model"""
return {
'username': username,
}
@fixture
def bitbucket_client(client):
setup_oauth_mock(client,
host=['bitbucket.org', 'api.bitbucket.org'],
access_token_path='/site/oauth2/access_token',
user_path='/2.0/user',
)
return client
async def test_bitbucket(bitbucket_client):
authenticator = BitbucketOAuthenticator()
handler = bitbucket_client.handler_for_user(user_model('yorba'))
user_info = await authenticator.authenticate(handler)
assert sorted(user_info) == ['auth_state', 'name']
name = user_info['name']
assert name == 'yorba'
auth_state = user_info['auth_state']
assert 'access_token' in auth_state
assert 'bitbucket_user' in auth_state
async def test_team_whitelist(bitbucket_client):
client = bitbucket_client
authenticator = BitbucketOAuthenticator()
authenticator.bitbucket_team_whitelist = ['blue']
teams = {
'red': ['grif', 'simmons', 'donut', 'sarge', 'lopez'],
'blue': ['tucker', 'caboose', 'burns', 'sheila', 'texas'],
}
def list_teams(request):
token = request.headers['Authorization'].split(None, 1)[1]
username = client.access_tokens[token]['username']
values = []
for team, members in teams.items():
if username in members:
values.append({'username': team})
return {
'values': values
}
client.hosts['api.bitbucket.org'].append(
('/2.0/teams', list_teams)
)
handler = client.handler_for_user(user_model('caboose'))
user_info = await authenticator.authenticate(handler)
name = user_info['name']
assert name == 'caboose'
handler = client.handler_for_user(user_model('donut'))
name = await authenticator.authenticate(handler)
assert name is None
# reverse it, just to be safe
authenticator.team_whitelist = ['red']
handler = client.handler_for_user(user_model('caboose'))
name = await authenticator.authenticate(handler)
assert name is None
handler = client.handler_for_user(user_model('donut'))
user_info = await authenticator.authenticate(handler)
name = user_info['name']
assert name == 'donut'
|
997,419 | b77c94ac6d5265fc0f6c7d8722a15682d39f20fc | # This script is copyright 2018 Jordan LeDoux and may not be used or distributed without permission
import praw
import settings
import calendar
import SlackWebHook
import sys
from datetime import datetime
from datetime import timedelta
from datetime import timezone
reddit = praw.Reddit(client_id=settings.REDDIT_CLIENT_ID,
client_secret=settings.REDDIT_CLIENT_SECRET,
password=settings.REDDIT_PASSWORD,
username=settings.REDDIT_USERNAME,
user_agent=settings.REDDIT_USER_AGENT)
subreddit = reddit.subreddit(settings.REDDIT_SUBREDDIT)
HookBot = SlackWebHook.WebHook(settings=settings)
if len(sys.argv) > 1:
runCommand = sys.argv[1]
if len(sys.argv) > 2:
otherSub = sys.argv[2]
else:
otherSub = None
else:
runCommand = None
otherSub = None
if runCommand is not None and runCommand == 'dryrun':
status_text = 'ActualBernieBot is about to compile the weekly transparency report in *dryrun* mode'
elif runCommand is not None and runCommand == 'othersub' and otherSub is not None:
status_text = 'ActualBernieBot is about to compile an activity report for a different subreddit: r/'+otherSub
subreddit = reddit.subreddit(otherSub)
print('Report for another sub selected, some data not collected')
else:
status_text = 'ActualBernieBot is about to compile the weekly transparency report and post it to the subreddit'
HookBot.post_status(
'ActualBernieBot Status Message',
status_text,
settings.SLACK_STATUS_CHANNEL
)
submitters = {}
commenters = {}
build_week = True
current_dt = datetime.utcnow()
process_dt = datetime(year=current_dt.year, month=current_dt.month, day=current_dt.day, hour=0, minute=0, second=0, microsecond=0, tzinfo=timezone.utc)
weekly_data = {}
week_num = 0
day_step = timedelta(days=-1)
print('Building Week Object')
while build_week:
if process_dt.weekday() == 6:
week_num += 1
if week_num == 2:
build_week = False
break
if week_num not in weekly_data:
weekly_data[week_num] = {
'uniques': 0,
'views': 0,
'subs': 0,
'days': {
0: {},
1: {},
2: {},
3: {},
4: {},
5: {},
6: {}
},
'actions': {
'total': 0,
'removals': 0,
'bans': 0,
'unbans': 0,
'approvals': 0,
'flair': 0,
'sticky': 0,
'other': 0
},
'activity': {
'top_post': None,
'top_comment': None,
'most_discussed': None,
'total_posts': 0,
'total_comments': 0
}
}
weekly_data[week_num]['days'][process_dt.weekday()] = {
'uniques': 0,
'views': 0,
'subs': 0,
'date': process_dt,
'actions': {
'total': 0,
'removals': 0,
'bans': 0,
'unbans': 0,
'approvals': 0,
'flair': 0,
'sticky': 0,
'other': 0
},
'activity': {
'top_post': None,
'top_comment': None,
'most_discussed': None,
'total_posts': 0,
'total_comments': 0
}
}
process_dt = process_dt + day_step
print('Week Object Built')
end_utc_ts = calendar.timegm(weekly_data[1]['days'][0]['date'].timetuple())
start_utc_ts = calendar.timegm(weekly_data[0]['days'][0]['date'].timetuple())
week_num = 0
if runCommand is None or runCommand == 'dryrun':
print('Processing Subreddit Traffic')
traffic = subreddit.traffic()
for day in traffic['day']:
day_dt = datetime.utcfromtimestamp(day[0])
day_uniques = day[1]
day_views = day[2]
day_subs = day[3]
if day_dt.weekday() == 6:
week_num += 1
if week_num == 2:
break
weekly_data[week_num]['uniques'] += day_uniques
weekly_data[week_num]['views'] += day_views
weekly_data[week_num]['subs'] += day_subs
weekly_data[week_num]['days'][day_dt.weekday()]['uniques'] += day_uniques
weekly_data[week_num]['days'][day_dt.weekday()]['views'] += day_views
weekly_data[week_num]['days'][day_dt.weekday()]['subs'] += day_subs
print('Subreddit Traffic Processed')
count = 0
print('Processing Subreddit Modlog')
for log_entry in subreddit.mod.log(limit=5000):
count += 1
log_dt = datetime.utcfromtimestamp(log_entry.created_utc)
if log_entry.created_utc > start_utc_ts:
continue
if log_entry.created_utc > end_utc_ts:
if log_entry.action == 'banuser':
weekly_data[1]['days'][log_dt.weekday()]['actions']['bans'] += 1
weekly_data[1]['actions']['bans'] += 1
elif log_entry.action == 'unbanuser':
weekly_data[1]['days'][log_dt.weekday()]['actions']['unbans'] += 1
weekly_data[1]['actions']['unbans'] += 1
elif log_entry.action == 'removelink':
weekly_data[1]['days'][log_dt.weekday()]['actions']['removals'] += 1
weekly_data[1]['actions']['removals'] += 1
elif log_entry.action == 'removecomment':
weekly_data[1]['days'][log_dt.weekday()]['actions']['removals'] += 1
weekly_data[1]['actions']['removals'] += 1
elif log_entry.action == 'approvelink' or log_entry.action == 'approvecomment':
weekly_data[1]['days'][log_dt.weekday()]['actions']['approvals'] += 1
weekly_data[1]['actions']['approvals'] += 1
elif log_entry.action == 'editflair':
weekly_data[1]['days'][log_dt.weekday()]['actions']['flair'] += 1
weekly_data[1]['actions']['flair'] += 1
elif log_entry.action == 'sticky' or log_entry.action == 'unsticky':
weekly_data[1]['days'][log_dt.weekday()]['actions']['sticky'] += 1
weekly_data[1]['actions']['sticky'] += 1
else:
weekly_data[1]['days'][log_dt.weekday()]['actions']['other'] += 1
weekly_data[1]['actions']['other'] += 1
weekly_data[1]['days'][log_dt.weekday()]['actions']['total'] += 1
weekly_data[1]['actions']['total'] += 1
else:
break
print(str(count)+' Log Entries Processed')
sub_count = 0
com_count = 0
print('Processing Subreddit Activity')
for submission in subreddit.new(limit=1000):
sub_count += 1
sub_dt = datetime.utcfromtimestamp(submission.created_utc)
if sub_count % 10 == 0:
print(str(sub_count)+' Submissions Processed So Far')
if submission.created_utc > start_utc_ts:
continue
if submission.created_utc > end_utc_ts:
if submission.author is not None:
if submission.author.name not in submitters:
submitters[submission.author.name] = 0
submitters[submission.author.name] += 1
weekly_data[1]['activity']['total_posts'] += 1
weekly_data[1]['days'][sub_dt.weekday()]['activity']['total_posts'] += 1
if weekly_data[1]['activity']['top_post'] is None:
weekly_data[1]['activity']['top_post'] = submission
elif weekly_data[1]['activity']['top_post'].score < submission.score:
weekly_data[1]['activity']['top_post'] = submission
if weekly_data[1]['days'][sub_dt.weekday()]['activity']['top_post'] is None:
weekly_data[1]['days'][sub_dt.weekday()]['activity']['top_post'] = submission
elif weekly_data[1]['days'][sub_dt.weekday()]['activity']['top_post'].score < submission.score:
weekly_data[1]['days'][sub_dt.weekday()]['activity']['top_post'] = submission
if weekly_data[1]['activity']['most_discussed'] is None:
weekly_data[1]['activity']['most_discussed'] = submission
elif weekly_data[1]['activity']['most_discussed'].num_comments < submission.num_comments:
weekly_data[1]['activity']['most_discussed'] = submission
if weekly_data[1]['days'][sub_dt.weekday()]['activity']['most_discussed'] is None:
weekly_data[1]['days'][sub_dt.weekday()]['activity']['most_discussed'] = submission
elif weekly_data[1]['days'][sub_dt.weekday()]['activity']['most_discussed'].num_comments < submission.num_comments:
weekly_data[1]['days'][sub_dt.weekday()]['activity']['most_discussed'] = submission
submission.comments.replace_more(limit=None)
for comment in submission.comments.list():
if comment.author is not None:
if comment.author.name not in commenters:
commenters[comment.author.name] = 0
commenters[comment.author.name] += 1
com_count += 1
com_dt = datetime.utcfromtimestamp(comment.created_utc)
if com_count % 50 == 0:
print(str(com_count)+' Comments Processed So Far')
if comment.created_utc > start_utc_ts:
continue
if comment.created_utc > end_utc_ts:
if weekly_data[1]['activity']['top_comment'] is None:
weekly_data[1]['activity']['top_comment'] = comment
elif weekly_data[1]['activity']['top_comment'].score < comment.score:
weekly_data[1]['activity']['top_comment'] = comment
if weekly_data[1]['days'][com_dt.weekday()]['activity']['top_comment'] is None:
weekly_data[1]['days'][com_dt.weekday()]['activity']['top_comment'] = comment
elif weekly_data[1]['days'][com_dt.weekday()]['activity']['top_comment'].score < comment.score:
weekly_data[1]['days'][com_dt.weekday()]['activity']['top_comment'] = comment
weekly_data[1]['activity']['total_comments'] += 1
weekly_data[1]['days'][com_dt.weekday()]['activity']['total_comments'] += 1
else:
continue
else:
break
print(str(sub_count)+' Submissions Processed')
print(str(com_count)+' Comments Processed')
sorted_commenters = {}
for commenter, comment_count in sorted(commenters.items(), key=lambda x:x[1], reverse=True):
sorted_commenters[commenter] = comment_count
sorted_submitters = {}
for poster, post_count in sorted(submitters.items(), key=lambda x:x[1], reverse=True):
sorted_submitters[poster] = post_count
print('Top 5 Submitters')
t5count = 1
t5post = ''
for submitter, submission_count in sorted_submitters.items():
if t5count > 5:
break
print(str(submitter) + ': ' + str(submission_count))
t5post = t5post + str(t5count) + '. ' + str(submitter) + ': ' + str(submission_count) + '\n'
t5count += 1
print('Top 5 Commenters')
t5count = 1
t5com = ''
for commenter, comment_count in sorted_commenters.items():
if t5count > 5:
break
print(str(commenter) + ': ' + str(comment_count))
t5com = t5com + str(t5count) + '. ' + str(commenter) + ': ' + str(comment_count) + '\n'
t5count += 1
print('Compiling Reports')
activity_table = '| Day | # Posts | # Comments | Top Post | Top Comment | Most Discussed |\n'
activity_table = activity_table + '|---|---|---|---|---|---|\n'
for day, info in weekly_data[1]['days'].items():
if day == 0:
activity_table = activity_table + '| Monday | '
elif day == 1:
activity_table = activity_table + '| Tuesday | '
elif day == 2:
activity_table = activity_table + '| Wednesday | '
elif day == 3:
activity_table = activity_table + '| Thursday | '
elif day == 4:
activity_table = activity_table + '| Friday | '
elif day == 5:
activity_table = activity_table + '| Saturday | '
elif day == 6:
activity_table = activity_table + '| Sunday | '
activity_table = activity_table + '{:,}'.format(info['activity']['total_posts']) + ' | '
activity_table = activity_table + '{:,}'.format(info['activity']['total_comments']) + ' | '
activity_table = activity_table + ' [Post](https://www.reddit.com' + info['activity']['top_post'].permalink + ') Score: ' + '{:,}'.format(info['activity']['top_post'].score) + ' | '
activity_table = activity_table + ' [Comment](https://www.reddit.com' + info['activity']['top_comment'].permalink + ') Score: ' + '{:,}'.format(info['activity']['top_comment'].score) + ' | '
activity_table = activity_table + ' [Most Discussed](https://www.reddit.com' + info['activity']['most_discussed'].permalink + ') Comments: ' + '{:,}'.format(info['activity']['most_discussed'].num_comments) + ' |\n'
if day == 6:
activity_table = activity_table + '| **Totals** | '
activity_table = activity_table + '{:,}'.format(weekly_data[1]['activity']['total_posts']) + ' | '
activity_table = activity_table + '{:,}'.format(weekly_data[1]['activity']['total_comments']) + ' | '
activity_table = activity_table + ' [Post](https://www.reddit.com' + weekly_data[1]['activity']['top_post'].permalink + ') Score: ' + '{:,}'.format(weekly_data[1]['activity']['top_post'].score) + ' | '
activity_table = activity_table + ' [Comment](https://www.reddit.com' + weekly_data[1]['activity']['top_comment'].permalink + ') Score: ' + '{:,}'.format(weekly_data[1]['activity']['top_comment'].score) + ' | '
activity_table = activity_table + ' [Most Discussed](https://www.reddit.com' + weekly_data[1]['activity']['most_discussed'].permalink + ') Comments: ' + '{:,}'.format(weekly_data[1]['activity']['most_discussed'].num_comments) + ' |\n'
date_range = weekly_data[1]['days'][0]['date'].strftime('%b %d') + ' - ' + weekly_data[1]['days'][6]['date'].strftime('%b %d')
if runCommand is None or runCommand == 'dryrun':
actions_table = '| Day | Bans | Unbans | Removals | Approvals | Flair | Sticky | Other | **Total** |\n'
actions_table = actions_table + '|---|---|---|---|---|---|---|---|---|\n'
traffic_table = '| Day | Uniques | Views | Subs |\n'
traffic_table = traffic_table + '|---|---|---|---|\n'
total_uniques = 0
total_views = 0
total_subs = 0
for day, info in weekly_data[1]['days'].items():
if day == 0:
actions_table = actions_table + '| Monday | '
traffic_table = traffic_table + '| Monday | '
elif day == 1:
actions_table = actions_table + '| Tuesday | '
traffic_table = traffic_table + '| Tuesday | '
elif day == 2:
actions_table = actions_table + '| Wednesday | '
traffic_table = traffic_table + '| Wednesday | '
elif day == 3:
actions_table = actions_table + '| Thursday | '
traffic_table = traffic_table + '| Thursday | '
elif day == 4:
actions_table = actions_table + '| Friday | '
traffic_table = traffic_table + '| Friday | '
elif day == 5:
actions_table = actions_table + '| Saturday | '
traffic_table = traffic_table + '| Saturday | '
elif day == 6:
actions_table = actions_table + '| Sunday | '
traffic_table = traffic_table + '| Sunday | '
actions_table = actions_table + '{:,}'.format(info['actions']['bans']) + ' | '
actions_table = actions_table + '{:,}'.format(info['actions']['unbans']) + ' | '
actions_table = actions_table + '{:,}'.format(info['actions']['removals']) + ' | '
actions_table = actions_table + '{:,}'.format(info['actions']['approvals']) + ' | '
actions_table = actions_table + '{:,}'.format(info['actions']['flair']) + ' | '
actions_table = actions_table + '{:,}'.format(info['actions']['sticky']) + ' | '
actions_table = actions_table + '{:,}'.format(info['actions']['other']) + ' | '
actions_table = actions_table + '{:,}'.format(info['actions']['total']) + ' |\n'
traffic_table = traffic_table + '{:,}'.format(info['uniques']) + ' | '
traffic_table = traffic_table + '{:,}'.format(info['views']) + ' | '
traffic_table = traffic_table + '{:,}'.format(info['subs']) + ' |\n'
if day == 6:
actions_table = actions_table + '| **Totals** | '+'{:,}'.format(weekly_data[1]['actions']['bans'])+' | '+'{:,}'.format(weekly_data[1]['actions']['unbans'])+' | '+'{:,}'.format(weekly_data[1]['actions']['removals'])+' | '+'{:,}'.format(weekly_data[1]['actions']['approvals'])+' | '+'{:,}'.format(weekly_data[1]['actions']['flair'])+' | '+'{:,}'.format(weekly_data[1]['actions']['sticky'])+' | '+'{:,}'.format(weekly_data[1]['actions']['other'])+' | '+'{:,}'.format(weekly_data[1]['actions']['total'])+' |\n'
traffic_table = traffic_table + '| **Totals** | '+'{:,}'.format(weekly_data[1]['uniques'])+' | '+'{:,}'.format(weekly_data[1]['views'])+' | '+'{:,}'.format(weekly_data[1]['subs'])+' |\n'
log_report = open("PostTemplates/modLogReport.txt").read()
log_report = log_report.replace('{{mod_activity}}', actions_table)
log_report = log_report.replace('{{traffic_report}}', traffic_table)
log_report = log_report.replace('{{activity_report}}', activity_table)
log_report = log_report.replace('{{date_range}}', date_range)
else:
log_report = None
if runCommand is not None and runCommand == 'dryrun' and log_report is not None:
print(log_report)
HookBot.post_status(
'ActualBernieBot Status Message',
'ActualBernieBot compiled the transparency report in *dryrun* mode, so it was not posted to the subreddit',
settings.SLACK_STATUS_CHANNEL
)
elif runCommand is not None and runCommand == 'othersub' and otherSub is not None:
print(activity_table)
posts_per_day = weekly_data[1]['activity']['total_posts']/7
comments_per_day = weekly_data[1]['activity']['total_comments']/7
avg_com_per_post = weekly_data[1]['activity']['total_comments']/weekly_data[1]['activity']['total_posts']
HookBot.post_complex_message(
pretext='Activity report for r/'+str(otherSub)+' compiled',
text='Activity report for '+str(date_range),
fields=[
{
'title': 'Total Posts',
'value': '{:,}'.format(weekly_data[1]['activity']['total_posts']),
'short': True
},
{
'title': 'Total Comments',
'value': '{:,}'.format(weekly_data[1]['activity']['total_comments']),
'short': True
},
{
'title': 'Best Post Score',
'value': '{:,}'.format(weekly_data[1]['activity']['top_post'].score),
'short': True
},
{
'title': 'Best Comment Score',
'value': '{:,}'.format(weekly_data[1]['activity']['top_comment'].score),
'short': True
},
{
'title': 'Posts Per Day',
'value': '{:.2f}'.format(posts_per_day),
'short': True
},
{
'title': 'Comments Per Day',
'value': '{:.2f}'.format(comments_per_day),
'short': True
},
{
'title': 'Comments Per Post',
'value': '{:.2f}'.format(avg_com_per_post),
'short': True
},
{
'title': 'Most Discussed Post',
'value': '{:,}'.format(weekly_data[1]['activity']['most_discussed'].num_comments)+' Comments',
'short': True
},
{
'title': 'Top 5 Submitters',
'value': t5post,
'short': False
},
{
'title': 'Top 5 Commenters',
'value': t5com,
'short': False
}
],
color='good',
channel=settings.SLACK_STATUS_CHANNEL
)
else:
report_submission = subreddit.submit(title='Weekly Mod Transparency Report: '+date_range, selftext=log_report)
report_submission.disable_inbox_replies()
report_submission.flair.select(settings.SUBREDDIT_META_FLAIR_ID, 'Transparency')
report_submission.mod.approve()
report_submission.mod.distinguish()
report_submission.mod.sticky(state=True, bottom=True)
report_submission.mod.ignore_reports()
HookBot.post_submission_link(
username=report_submission.author.name, title=report_submission.title,
permalink=report_submission.permalink, pretext='The transparency report has been compiled and posted',
color='good', channel=settings.SLACK_STATUS_CHANNEL
)
|
997,420 | c3a094945ab42ac3b7bb580a444c20feb14bc0ed | # from video_app.constants import VIDEOS
# from elasticsearch_dsl.connections import connections
# from elasticsearch_dsl import Document, Text, Date
# from elasticsearch_dsl.field import Integer
# from elasticsearch.helpers import bulk
# from elasticsearch import Elasticsearch
# from . import models
# connections.create_connection(hosts=['localhost'])
# class video_index(Document):
# id = Integer()
# videoId = Text()
# title = Text()
# date = Date()
# description = Text()
# photo = Text()
# url = Text()
# class Index:
# name = 'video_index'
# def bulk_indexing():
# video_index.init()
# es = Elasticsearch()
# bulk(client=es, actions=(b.indexing() for b in getattr(models,VIDEOS).objects.all().iterator())) |
997,421 | 7038e70b1f47de910b1939abaf275cddb4ecbcc9 | from collections import Counter
from datetime import datetime
from datetime import timedelta
from datetime import date
import pytz
import github3
from github_project_management import constants as GPMC
from github_project_management import get_server
from github_project_management import list_issues
from github_project_management import milestone_url
def weekly(
gh_user,
gh_password,
gh_api_url,
weekly_config,
configs,
group_name,
template,
# Defaults: only display the GH issue and format dates in ISO style.
test=True,
date_format=lambda x: x.strftime('%Y-%m-%d')):
# Make sure that a template can be parsed.
body_template = 'No file found for %s' % template
if template:
with open(template, 'r') as tf:
body_template = tf.read()
# Track all issues in the timeframe.
tz = pytz.timezone('US/Pacific')
today = tz.localize(datetime.today())
today -= timedelta(seconds=(today.hour * 60 * 60 + today.minute * 60 + today.second), microseconds=today.microsecond)
current_week_monday = today - timedelta(days=today.weekday())
current_week_sunday = current_week_monday + timedelta(days=6)
# make the weekly title. use it for a unique check.
def suffix(d):
return 'th' if 11<=d<=13 else {1:'st',2:'nd',3:'rd'}.get(d%10, 'th')
def custom_strftime(format, t):
return t.strftime(format).replace('{s}', str(t.day) + suffix(t.day))
def make_title(name, start_date, end_date):
return "%s Weekly %s - %s" % (
name,
custom_strftime('%b {s}', start_date),
custom_strftime('%b {s}', end_date))
current_week_title = make_title(group_name, current_week_monday, current_week_sunday)
# Iterate through all the issues that match the configs.
milestones = []
rows = []
for row in list_issues(gh_user, gh_password, gh_api_url, configs, current_week_monday, current_week_sunday):
issue = row[GPMC.ISSUE]
# Track all milestones with at least one open ticket.
if issue.state == 'open':
if issue.milestone:
milestones.append((issue.milestone.title,
milestone_url(gh_api_url, row[GPMC.REPO_USER], row[GPMC.REPO_NAME], issue.milestone.title)))
else:
milestones.append((None, None))
# Skip if the create or close date doesn't make sense given the time range.
if issue.closed_at and issue.closed_at < current_week_monday:
continue
if issue.created_at > current_week_sunday:
continue
# Only show issues in the weekly that have had recent comments.
if row[GPMC.RECENT_COMMENTS] or issue.created_at >= current_week_monday:
rows.append(row)
# Find this week's weekly and also auto-close all old weeklies.
weekly_labels = weekly_config['labels']
weekly_repo_user, weekly_repo_name = weekly_config['repositories'][0].split('/')
weekly_issue = None
for row in list_issues(gh_user, gh_password, gh_api_url, [weekly_config], current_week_monday, current_week_sunday):
issue = row[GPMC.ISSUE]
# Track if this week or last week's issue exists.
if issue.title == current_week_title:
weekly_issue = issue
else:
if not issue.is_closed():
if not test:
print 'Closed old weekly: (%s, %s) #%d' % (weekly_repo_user, weekly_repo_name, issue.number)
issue.close()
else:
print 'Test mode. Would have closed old weekly: (%s, %s) #%d' % (weekly_repo_user, weekly_repo_name, issue.number)
# Build up the body of the current Weekly GH issue.
# First show the executive summary.
executive_summary_comments = []
if weekly_issue:
for com in weekly_issue.iter_comments():
if com.body and com.body.startswith('Executive Summary:'):
executive_summary_comments.append(com.body[len('Executive Summary:'):])
executive_body = ''
if not executive_summary_comments:
executive_body += '- No executive summary comments.\n'
else:
for esc in executive_summary_comments:
executive_body += '- %s\n' % esc
rows = sorted(rows, key=lambda x: x[GPMC.RECENT_COMMENTS], reverse=True)
# Group all issues by the set of labels.
from collections import OrderedDict
config_tuples = [
(config.get('title', None),
(config.get('labels', None),
config.get('link', None),
config.get('description', None))) for config in configs]
title2meta = OrderedDict(config_tuples)
title2issues = {}
for title in title2meta.iterkeys():
title2issues[title] = []
for row in rows:
title2issues[row[GPMC.GROUPING_TITLE]].append(row)
projects_body = ''
for title, (lables, link, description) in title2meta.iteritems():
# Make the per-project header.
if not title:
title = 'All Issues'
if link:
projects_body += '\n<hr/>\n#### [%s](%s)\n' % (title, link)
else:
projects_body += '\n<hr/>\n#### %s\n' % title
if description:
projects_body += '%s\n' % description
# Show all active GH issues as a list sorted by most comments.
open_issues = [row for row in rows if row[GPMC.STATE] == 'open' and row[GPMC.GROUPING_TITLE] == title]
if open_issues:
projects_body += '\n##### Active this week\n'
for issue in open_issues:
num_comments = issue[GPMC.RECENT_COMMENTS]
if num_comments:
projects_body += '- %d comment%s: [%s](%s)\n' % (
num_comments,
's' if num_comments > 1 else '',
issue[GPMC.TITLE],
issue[GPMC.URL])
else:
projects_body += '- New issue: [%s](%s)\n' % (
issue[GPMC.TITLE],
issue[GPMC.URL])
# Show all closed GH issues as a list sorted by most comments.
closed_issues = [row for row in rows if row[GPMC.STATE] != 'open' and row[GPMC.GROUPING_TITLE] == title]
if closed_issues:
projects_body += '\n##### Closed this week\n'
for issue in closed_issues:
projects_body += '- %d comment%s: [%s](%s)\n' % (
issue[GPMC.RECENT_COMMENTS],
's' if issue[GPMC.RECENT_COMMENTS] > 1 else '',
issue[GPMC.TITLE],
issue[GPMC.URL])
if not open_issues and not closed_issues:
projects_body += "- No comment activity this week.\n"
milestone_body = ''
for (title, url), count in Counter(milestones).most_common():
if title:
milestone_body += '- %d open issue%s: [%s](%s)\n' % (count, 's' if count > 1 else '', title, url)
else:
milestone_body += '- %d open issue%s assigned to a milestone\n' % (count, "s aren't" if count > 1 else " isn't")
body = body_template.format(
executive=executive_body.encode('utf-8').strip(),
projects=projects_body.encode('utf-8').strip(),
milestones=milestone_body.encode('utf-8').strip())
# Always run in test mode by default.
if test:
print('### Test Mode. Not posting to GH ###')
print(current_week_title)
print(body)
return
if not weekly_issue:
# Login to the GH enterprise server.
gh = get_server(gh_api_url)
gh.login(gh_user, gh_password)
repo = gh.repository(weekly_repo_user, weekly_repo_name)
print 'Making new issue'
weekly_issue = repo.create_issue(
current_week_title,
body=body,
assignee=gh_user,
labels=weekly_labels)
if not weekly_issue:
raise ValueException("Can't find or create weekly issue.")
else:
print 'Lazy made GH issue #%d as weekly.' % weekly_issue.number
# If the issue exists, update it.
else:
if weekly_issue.is_closed():
weekly_issue.reopen()
weekly_issue.edit(
current_week_title,
body,
assignee=gh_user,
labels=weekly_labels)
print 'Updated: %s/%s#%d' % (weekly_repo_user, weekly_repo_name, weekly_issue.number)
def main():
"""Runs the weekly update code
Optional parameters. Will be prompted for unless set.
-gh_user = GitHub login name. Can also set as env variable of same name.
-gh_pass = GitHub password. Can also set as env variable of same name.
-test = True will display the final markdown. False posts to GitHub and closes old weekly issues.
Required parameters.
-gh_api = GitHub URL for the enterprise instance being used.
-template = Markdown template for the weekly.
-config = JSON formatted configuration.
"""
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-gh_user', action="store", dest='gh_user', help='GitHub login name. Can also set as env variable of same name.')
parser.add_argument('-gh_pass', action="store", dest='gh_pass', help='GitHub password. Can also set as env variable of same name.')
parser.add_argument('-gh_api', action="store", dest='gh_api', help='GitHub URL for the enterprise instance being used.')
parser.add_argument('-template', action="store", dest='template', help='Markdown template for the weekly.')
parser.add_argument('-config', action="store", dest='config', help='JSON formatted configuration.')
parser.add_argument('--test', dest='test', action='store_true')
args = parser.parse_args(sys.argv[1:])
print "Running weekly code"
# Expected arguments.
gh_user = None
if args.gh_user:
gh_user = args.gh_user
elif 'gh_user' in sys.env:
gh_user = sys.env['gh_user']
else:
gh_user = raw_input('GitHub login:')
gh_pass = None
if args.gh_pass:
gh_pass = args.gh_pass
elif 'gh_pass' in sys.env:
gh_pass = sys.env['gh_pass']
else:
gh_pass = getpass('GitHub password:')
gh_api = args.gh_api
# Parse all the other config from the JSON. Should have the template in there too.
import json
config_json = None
with open(args.config, 'r') as jf:
config_json = json.load(jf)
weekly_config = config_json['weekly_config']
configs = config_json['projects']
group_name = config_json['group_name']
# Allow overriding of the template. Fall back on assuming it is in the JSON.
if args.template:
template = args.template
else:
template = config_json['template']
# Run the weekly update.
weekly(
gh_user,
gh_pass,
gh_api,
weekly_config,
configs,
group_name,
template=template,
test= True if args.test else False)
if __name__ == "__main__":
main()
|
997,422 | c5f89ef75c670988498dbeb7002d24f2037855e5 | import matplotlib.pyplot as plt
list1 = [4793,10776,15302,6373]
list2 = [0,1]
for i in list1:
plt.plot([0,16],[i,i],linewidth=1,c='grey')
for i in range(0,16):
plt.plot([i,i],[0,16000],linewidth=1,c='grey')
li = [
[0,1],
[0,list1[0]]
]
for i in range(1,8):
lia = list(li)
lia[0][1]=i
lia[1][1]=list1[0]
lia[0][1]*=8
lia[1][1]*=8
plt.plot(lia[0],lia[1],linewidth=1,c='blue')
plt.xlabel('X', fontsize=16)
plt.ylabel('Y', fontsize=16)
#设置刻度数字的大小
plt.tick_params(axis='both', labelsize=10)
plt.show() |
997,423 | 581a81c79abe9df51473f2b73ac1c145b997be47 | from sqlalchemy.engine import create_engine
from sqlalchemy.inspection import inspect
from sqlalchemy.sql.expression import select, desc, and_
from sqlalchemy.sql.functions import func
from sqlalchemy.sql.schema import MetaData, Table
from cybertrap.dbconst import *
import pandas as pd
from pandas import DataFrame
from pprint import pprint
# https://stackoverflow.com/questions/5225780/turn-off-a-warning-in-sqlalchemy
import warnings
from sqlalchemy import exc as sa_exc
class Database:
def mount_table_from_db(self, tablename):
return Table(tablename, self.meta, autoload=True, autoload_with=self.engine)
def __init__(self, db_url):
self.db_url = db_url
self.engine = create_engine(self.db_url, echo=False)
self.meta = MetaData()
# connect
self.conn = self.engine.connect()
self.inspector = inspect(self.engine)
self.db_all_table_names = self.inspector.get_table_names()
# as a simple self-test of working SQLalchemy+Postgres,
# read and parse event_types table
self.event_types = {}
self.db_table_event_types = self.mount_table_from_db('event_types')
res = self.conn.execute(select([self.db_table_event_types]))
for row in res:
self.event_types[row.id] = row.type
# https://stackoverflow.com/questions/5225780/turn-off-a-warning-in-sqlalchemy
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=sa_exc.SAWarning)
# we are always interested in the events table, so mount it immediately
self.table_events = self.mount_table_from_db('events')
self.stat = None
df_ctunits = pd.read_sql("SELECT * FROM ct_units", self.engine)
if 'machine_key' in df_ctunits:
self.is2016format = False
self.is2017format = True
self.hostname_in_events = 'ct_units_id'
else:
self.is2016format = True
self.is2017format = False
self.hostname_in_events = 'hostnames_id'
def shutdown(self):
self.engine.dispose()
# help garbage collector?
self.engine = None
self.meta = None
self.conn = None
self.inspector = None
def _get_first_last_row_(self, table):
sel1 = table.select().order_by('id').limit(1)
first = self.conn.execute(sel1).fetchone()
sel2 = table.select().order_by(desc('id')).limit(1)
last = self.conn.execute(sel2).fetchone()
return (first,last)
def _count_table_rows_(self, table):
sel = select([func.count()]).select_from(table)
return self.conn.execute(sel).scalar()
def get_db_stat(self) -> dict:
"""
Get rough statistic overview of current database:
rows, min_id, max_id, min_time, max_time
"""
stat = dict()
stat[ROWS] = self._count_table_rows_(self.table_events)
res = self._get_first_last_row_(self.table_events)
stat[MIN_ID] = res[0].id
stat[MIN_TIME] = res[0].time
stat[MAX_ID] = res[1].id
stat[MAX_TIME] = res[1].time
self.stat = stat
return stat
def print_statistics(self):
if self.stat is None:
self.get_db_stat()
print('{} events in database'.format(self.stat['rows']))
print(' first event: {0:>8} {1}'.format(self.stat[MIN_ID], self.stat[MIN_TIME]))
print(' last event: {0:>8} {1}'.format(self.stat[MAX_ID], self.stat[MAX_TIME]))
return self.stat
def get_hostnames(self, count_events=True) -> (DataFrame, dict):
# get all distinct hostnames
if self.is2017format:
df = pd.read_sql("SELECT DISTINCT (ct_units.id), hostname FROM ct_units" +
" LEFT OUTER JOIN events ON events.ct_units_id = ct_units.id" +
" ORDER BY ct_units.id", self.engine)
else: # 2016format
df = pd.read_sql("SELECT DISTINCT (hostnames.id), hostname FROM hostnames" +
" LEFT OUTER JOIN events ON events.hostnames_id = hostnames.id" +
" ORDER BY hostnames.id", self.engine)
df = df.set_index('id')
df['events'] = -1
# figure out which hostnames are actually used by events
if self.is2017format:
df2 = pd.read_sql("SELECT DISTINCT ct_units_id FROM events", self.engine)
df2.columns=['hostnames_id']
else: # 2016format
df2 = pd.read_sql("SELECT DISTINCT hostnames_id FROM events", self.engine)
if count_events:
# query number of events for each hostname_id in database
for name in df2['hostnames_id']:
events = pd.read_sql(
"SELECT COUNT (*) FROM (SELECT * FROM events WHERE "+self.hostname_in_events+"="+str(name)+") AS x",
self.engine)
df.loc[name,'events'] = events['count'][0]
# drop lines(hosts) with no events in database
df = df.drop( df[df.events == -1].index )
# df[id,hostname,events], { id -> {'events'-> , 'hostname'-> } }
return df, df.transpose().to_dict(orient='dict')
def get_count_of_host_event(self, hostid:str) -> int:
events = pd.read_sql(
"SELECT COUNT (*) FROM (SELECT * FROM events WHERE "+self.hostname_in_events+"="+str(hostid)+") AS x",
self.engine)
return int(events['count'][0])
|
997,424 | 2e8de9a68e16ea59ec04d126f7c2dd5ab264997c | class ApplicationSummary(models.AbstractModel):
_name = "report.housemaid.app_sum_rep"
_description = "Application Supmmary"
@api.model
def _get_report_values(self, docids, data=None):
if data['from_date'] and data['from_date']:
domain = [
('tran_date', '>=', data['from_date']),
('tran_date', '<=', data['to_date']),
]
if data['external_office_trans']:
domain = [
('tran_date', '>=', data['from_date']),
('tran_date', '<=', data['to_date']),
('tran_name', '=', data['external_office_trans']),
]
docs = self.env['housemaid.configuration.externalofficetrans'].search(domain)
docargs = {
'doc_ids': self.ids,
'doc_model': 'housemaid.configuration.externalofficetrans',
'docs': docs,
}
return docargs
|
997,425 | d52f0e917307fe84ead015b34dd428d4e06b73ea | import numpy as np
import cv2
def click_event(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
blue= img[x,y,0]
green= img[x,y,1]
red= img[x,y,2]
cv2.circle(img, (x,y),3,(0,0,255),-1)
mycolorImage=np.zeros((512,512,3),np.uint8)
mycolorImage[:]=[blue,green,red]
cv2.imshow("color", mycolorImage)
#img=np.zeros((512,512,3), np.uint8)
img=cv2.imread("lena.jpg")
cv2.imshow("image", img)
points=[]
cv2.setMouseCallback("image", click_event)
cv2.waitKey(0)
cv2.destroyAllWindows() |
997,426 | f6f870437c00db076765e60db97b0fa82b5fdc5b | # imports
from flask import Flask, redirect, url_for
app = Flask(__name__)
# user redirect
@app.route('/user/<name>')
def user_page(name):
if name == "nahum":
return redirect(url_for('admin_page', name=name))
else:
return redirect(url_for('guest_page', name=name))
# admin page
@app.route('/admin/<name>')
def admin_page(name):
return "I am the admin. My name is %s" % name
# guest page
@app.route('/guest/<name>')
def guest_page(name):
return "I am on the guest page. My name is %s" % name
if __name__ == '__main__':
app.debug = True
app.run()
|
997,427 | e68f083b2b65359e0ed3fd0794ef6bf7f5df9676 | # coding: utf-8
from openerp import api, fields, models, tools, _
from openerp.exceptions import Warning
from io import open
import base64
import os
import re
import time
try:
import pysftp
except ImportError:
raise ImportError(
'This module needs pysftp to write files through SFTP. Please install pysftp on your system. (sudo pip install pysftp)')
import logging
_logger = logging.getLogger(__name__)
class SftpExport(models.Model):
_name = 'sftp.export'
_rec_name = 'export_type'
export_type = fields.Selection(
[('ups', 'UPS')], 'Export Type', required=True)
export_extension = fields.Selection(
[('csv', 'CSV')], 'Export Extension', required=True, default='csv')
folder = fields.Char('Local SFTP File Directory', help='Absolute path for storing the SFTP files', required='True',
default='/odoo/sftp-files')
# Columns for external server (SFTP)
sftp_path = fields.Char(
'Path external server',
help='The location to the folder where the files should be written to. For example /sftpuser/files/.\nFiles will then be written to /sftpuser/files/ on your remote server.')
sftp_host = fields.Char(
'IP Address SFTP Server',
help='The IP address from your remote server. For example 192.168.0.1')
sftp_port = fields.Integer(
'SFTP Port', help='The port on the FTP server that accepts SSH/SFTP calls.', default=22)
sftp_user = fields.Char(
'Username SFTP Server',
help='The username where the SFTP connection should be made with. This is the user on the external server.')
sftp_password = fields.Char(
'Password User SFTP Server',
help='The password from the user where the SFTP connection should be made with. This is the password from the user on the external server.')
_sql_constraints = [
('field_unique',
'unique(export_type)',
'Export Type has to be unique!')
]
@api.multi
def test_sftp_connection(self, context=None):
self.ensure_one()
# Check if there is a success or fail and write messages
messageTitle = ""
messageContent = ""
for rec in self:
try:
pathToWriteTo = rec.sftp_path
ipHost = rec.sftp_host
portHost = rec.sftp_port
usernameLogin = rec.sftp_user
passwordLogin = rec.sftp_password
# Connect with external server over SFTP, so we know sure that everything works.
srv = pysftp.Connection(
host=ipHost, username=usernameLogin, password=passwordLogin, port=portHost)
srv.close()
# We have a success.
messageTitle = _("Connection Test Succeeded!")
messageContent = _(
"Everything seems properly set up for exporting files via SFTP!")
except Exception, e:
messageTitle = _("Connection Test Failed!")
if len(rec.sftp_host) < 8:
messageContent += _("\nYour IP address seems to be too short.\n")
messageContent += _("Here is what we got instead:\n")
if "Failed" in messageTitle:
raise Warning(_(messageTitle + '\n\n' +
messageContent + "%s") % tools.ustr(e))
else:
raise Warning(_(messageTitle + '\n\n' + messageContent))
@api.multi
def sftp_export(self, filecontent, type):
conf_ids = self.search([('export_type', '=', type)])
for rec in conf_ids:
# Create Local Backup Directory if not exists
try:
if not os.path.isdir(rec.folder):
os.makedirs(rec.folder)
except:
raise
# Create name for file
file_name = 'import%s.%s' % (type.lower(), rec.export_extension)
file_path = os.path.join(rec.folder, file_name)
# Decode base64 to csv output and write to file
csv_output = base64.decodestring(filecontent)
csv_output = csv_output.decode("utf-8")
with open(file_path, 'w+', encoding='utf-8') as fp:
fp.write(csv_output)
try:
# store all values in variables
pathToWriteTo = rec.sftp_path
ipHost = rec.sftp_host
portHost = rec.sftp_port
usernameLogin = rec.sftp_user
passwordLogin = rec.sftp_password
# Connect with external server over SFTP
srv = pysftp.Connection(
host=ipHost, username=usernameLogin, password=passwordLogin, port=portHost)
# Set keepalive to prevent socket closed / connection dropped error
srv._transport.set_keepalive(30)
# Move to the correct directory on external server.
# If the user made a typo in his path with multiple slashes (/sftpuser//files/) it will be fixed by this regex.
pathToWriteTo = re.sub('([/]{2,5})+', '/', pathToWriteTo)
_logger.debug('sftp remote path: %s' % pathToWriteTo)
try:
srv.chdir(pathToWriteTo)
except IOError:
# Create directory and subdirs if they do not exist.
currentDir = ''
for dirElement in pathToWriteTo.split('/'):
currentDir += dirElement + '/'
try:
srv.chdir(currentDir)
except:
_logger.info(
'(Part of the) path didn\'t exist. Creating it now at ' + currentDir)
# Make directory and then navigate into it
srv.mkdir(currentDir, mode=777)
srv.chdir(currentDir)
pass
srv.put(file_path)
# Close the SFTP session.
srv.close()
except Exception, e:
_logger.debug(
'Exception! We couldn\'t transfer to the SFTP server..')
|
997,428 | 28698afdaae82536271675d26d97263abadeffa8 | from tkinter import *
from Controller import GemController
class BoardView:
"""
The class responsible for the graphical representation of the playing field.
Visualizes the menu in the game.
"""
_BACKGROUND_MENU_COLOR = "#DADEEB"
_MENU_BUTTON_SIZE = 12
def __init__(self, root, width, height, canvas, mediator):
self._root = root
self._width = width
self._height = height
self._canvas = canvas
self._mediator = mediator
self._score_point = 0
self._create_board()
self._create_menu()
self._draw_menu()
def draw_board(self):
self._canvas.grid(row=0, column=0, rowspan=self._height)
def hide_board(self):
self._canvas.grid_forget()
def set_gem(self, row: int, column: int, gem_controller: GemController):
self.gems[row][column] = gem_controller
def update_score(self, score_point: int):
"""updates and displays game points"""
self._score_point = score_point
self._update_score() # change the visual display of points for the player
def _create_board(self):
self.gems = [[0] * self._height for _ in range(self._width)] # initializes a two-dimensional array
def _create_menu(self):
# creates elements for display on the canvas
self._score_label = Label(self._root, text=f"Score: {self._score_point}", bg=self._BACKGROUND_MENU_COLOR,
font='Helvetica 10 bold')
self._main_menu = Button(self._root, text=f"Menu", bg=self._BACKGROUND_MENU_COLOR, font='Helvetica 10 bold',
width=self._MENU_BUTTON_SIZE,
command=self._mediator.show_menu)
self._new_game = Button(self._root, text=f"New game", bg=self._BACKGROUND_MENU_COLOR, font='Helvetica 10 bold',
width=self._MENU_BUTTON_SIZE,
command=self._mediator.start_new_game)
self._quit = Button(self._root, text=f"Quit", bg=self._BACKGROUND_MENU_COLOR, font='Helvetica 10 bold',
width=self._MENU_BUTTON_SIZE,
command=self._mediator.quit)
def _draw_menu(self):
self._score_label.grid(row=0, column=2)
self._main_menu.grid(row=1, column=2)
self._new_game.grid(row=2, column=2)
self._quit.grid(row=7, column=2)
def _update_score(self):
self._score_label["text"] = f"Score: {self._score_point}"
|
997,429 | cb9bd939fe334e215b0c3697427718d83c96b278 | #!/usr/bin/env python3
"""
Script language: Python3
Talks to:
- Vega wallet (REST)
- Vega node (gRPC)
Apps/Libraries:
- REST (wallet): Vega-API-client (https://pypi.org/project/Vega-API-client/)
- gRPC (node): Vega-API-client (https://pypi.org/project/Vega-API-client/)
"""
# Note: this file uses special tags in comments to enable snippets to be
# included in documentation.
# Example
# # __something:
# some code here
# # :something__
import sys
# __import_client:
import vegaapiclient as vac
# :import_client__
import helpers
# --- Edit these values below ---
node_url_grpc = ">> e.g. n06.testnet.vega.xyz:3002"
walletserver_url = ">> Vega-hosted wallet: https://wallet.testnet.vega.xyz"
walletserver_url = ">> self-hosted wallet: http://localhost:1789"
wallet_name = ">> your wallet name here"
wallet_passphrase = ">> your passphrase here"
# --- Edit these values above ---
if "--ci" in sys.argv:
node_url_grpc = helpers.get_from_env("NODE_URL_GRPC")
walletserver_url = helpers.get_from_env("WALLETSERVER_URL")
wallet_name = helpers.get_from_env("WALLET_NAME")
wallet_passphrase = helpers.get_from_env("WALLET_PASSPHRASE")
if not helpers.check_var(node_url_grpc):
print("Invalid Vega node URL (gRPC)")
print('Edit this script and look for "Edit these values"')
exit(1)
if not helpers.check_url(walletserver_url):
print("Invalid wallet server URL")
print('Edit this script and look for "Edit these values"')
exit(1)
if not helpers.check_var(wallet_name):
print("Invalid wallet name")
print('Edit this script and look for "Edit these values"')
exit(1)
if not helpers.check_var(wallet_passphrase):
print("Invalid wallet passphrase")
print('Edit this script and look for "Edit these values"')
exit(1)
# Help guide users against including api version suffix on url
walletserver_url = helpers.fix_walletserver_url(walletserver_url)
# __create_wallet:
# Vega node: Create client for accessing public data
datacli = vac.VegaTradingDataClient(node_url_grpc)
# Vega node: Create client for trading (e.g. submitting orders)
tradingcli = vac.VegaTradingClient(node_url_grpc)
# Wallet server: Create a walletclient (see above for details)
walletclient = vac.WalletClient(walletserver_url)
login_response = walletclient.login(wallet_name, wallet_passphrase)
# :create_wallet__
helpers.check_response(login_response)
# __get_market:
# Get a list of markets
markets = datacli.Markets(vac.api.trading.MarketsRequest()).markets
# Choose the first.
marketID = markets[0].id
# :get_market__
# __generate_keypair:
GENERATE_NEW_KEYPAIR = False
if GENERATE_NEW_KEYPAIR:
# If you don't already have a keypair, generate one.
response = walletclient.generatekey(
wallet_passphrase,
[{"key": "alias", "value": "my_key_alias"}],
)
helpers.check_response(response)
pub_key = response.json()["key"]["pub"]
else:
# List keypairs
response = walletclient.listkeys()
helpers.check_response(response)
keys = response.json()["keys"]
assert len(keys) > 0
pub_key = keys[0]["pub"]
# :generate_keypair__
# __prepare_order:
req = vac.wallet.v1.wallet.SubmitTransactionRequest(
pub_key=pub_key,
propagate=False,
order_submission=vac.commands.v1.commands.OrderSubmission(
market_id=marketID,
# price is an integer. For example 123456 is a price of 1.23456,
# assuming 5 decimal places.
price=100000,
size=1,
side=vac.vega.Side.SIDE_BUY,
time_in_force=vac.vega.Order.TimeInForce.TIME_IN_FORCE_GTC,
expires_at=0, # not needed for GTC orders
type=vac.vega.Order.Type.TYPE_LIMIT,
reference="repo:api;lang:python;sample:submit-order-grpc",
# pegged_order=None,
),
)
# :prepare_order__
# __sign_tx:
# Use the helper function to sign and submit the tx
tradingcli.sign_submit_tx_v2(walletclient, req)
# :sign_tx__
print("All is well.")
|
997,430 | f2451700fe0caf954f22ef4006b77c1495eee381 | from importlib import import_module
from .nlp import print_tree
from .parser_en import ParserEN
from .text import edge_text
def create_parser(lang=None, parser_class=None, lemmas=False, corefs=False,
beta='repair', normalize=True, post_process=True):
"""Creates and returns a parser (as an instanceof a subclass of Parser)
for the language specified in the parameter. If parser_class is specified,
then the parser specified by this class is instantiated instead. Throws
exception if language is not implemented.
Available parsers:
'en' -- English
Keyword argument:
parser_class -- specify an external parser class.
lemmas -- if True, lemma edges are generated by the parser.
corefs -- if True, coreference resolution is performed.
(default: False)
beta -- beta stage mode, current options are 'strict' and 'repair'
(default: 'repair')
normalize -- perform hyperedge normalization (default: True)
post_process -- perform hyperedge post-processing (default: True)
"""
if not lang and not parser_class:
raise RuntimeError(
'Either "lang" or "parser_class" must be specified.')
if parser_class:
package = None
if parser_class[0] == '.':
parser_class = parser_class[1:]
package = '.'
path_parts = parser_class.split('.')
module_name = '.'.join(path_parts[:-1])
class_name = path_parts[-1]
class_obj = getattr(import_module(module_name, package=package),
class_name)
parser = class_obj(lemmas=lemmas, corefs=corefs, beta=beta,
normalize=normalize, post_process=post_process)
if lang and parser.lang != lang:
raise RuntimeError(
'Specified language and parser class do not match.')
return parser
elif lang == 'en':
return ParserEN(lemmas=lemmas, corefs=corefs, beta=beta,
normalize=normalize, post_process=post_process)
else:
raise RuntimeError('Unknown parser: {}'.format(lang))
def parser_lang(parser_class):
package = None
if parser_class[0] == '.':
parser_class = parser_class[1:]
package = '.'
path_parts = parser_class.split('.')
module_name = '.'.join(path_parts[:-1])
return getattr(import_module(module_name, package=package), 'LANG')
|
997,431 | af0fbb26cb0742800630c7245cc306384ed74bb8 | import csv
import pandas as pd
faculty = pd.read_csv('faculty.csv')
#print faculty
namesFL = faculty.name
faculty1 = pd.read_csv('faculty.csv')
dte = faculty[[' degree', ' title',' email']]
def get_col(filename, col_num):
files = csv.reader(open(filename,"rb"),delimiter=',')
next(files)
degree = []
for row in files:
degree.append(row[col_num])
return degree
def makeSplit(filename, split_on):
update = []
for line in filename:
x = line.split(split_on)
update.append(x)
return update
def get_column ( array, column_number ):
#try:
return [row[column_number] for row in array]
#def makeDict(names, )
#names = get_col('faculty.csv',0)
split_names = makeSplit(namesFL, ' ')
last_names = get_column(split_names,-1)
first_names = get_column(split_names,0)
namesTlf = pd.Series(zip(last_names, first_names))
namesTfl = pd.Series(zip(first_names, last_names))
pd_last_names = pd.Series(last_names)
last_names_df = pd.concat([pd_last_names.reset_index(drop=True), dte], axis=1)
last_names_df.columns = ['name','degree','title','email']
#headers = last_names_df.dtypes.index
#print headers
#print last_names_df
#print type(last_names_df)
#dict_last_name = last_names_df.set_index('name').to_dict()
#print dict_last_name
from collections import defaultdict
faculty_dict = {k: g[['degree', 'title', 'email']].values.tolist() for k,g in last_names_df.groupby("name")}
fl_names_df = pd.concat([namesTfl.reset_index(drop=True), dte], axis=1)
fl_names_df.columns = ['name','degree','title','email']
dict_fl_name = fl_names_df.set_index('name').T.to_dict('list')
print dict_fl_name
lf_names_df = pd.concat([namesTlf.reset_index(drop=True), dte], axis=1)
lf_names_df.columns = ['name','degree','title','email']
dict_lf_name = lf_names_df.set_index('name').T.to_dict('list')
print " "
print dict_lf_name
#dictLN = {}
#for x in range(len(last_names_df)):
# currentid = last_names_df.iloc[x,0]
# currentvalue = last_names_df.iloc[x,1:]
# dictLN.setdefault(currentid, [])
# dictLN[currentid].append(currentvalue)
#print faculty_dict
print " "
first3_fac = {k: faculty_dict[k] for k in faculty_dict.keys()[:3]}
print first3_fac
print " "
first3_fl_name = {k: dict_fl_name[k] for k in dict_fl_name.keys()[:3]}
print first3_fl_name
print ' '
first3_lf_name = {k: dict_lf_name[k] for k in dict_lf_name.keys()[:3]}
print first3_lf_name
#
|
997,432 | 9404b787cc50bdfecb9a9b7c69a4d3d2c644fc2d | # NEED this to be sourced before
#voms-proxy-init -voms cms
#export X509_USER_PROXY=$(voms-proxy-info --path)
#export PYTHONPATH=/afs/cern.ch/cms/PPD/PdmV/tools/wmcontrol:${PYTHONPATH}
#export PATH=/afs/cern.ch/cms/PPD/PdmV/tools/wmcontrol:${PATH}
# source /afs/cern.ch/cms/PPD/PdmV/tools/wmclient/current/etc/wmclient.sh
import os
import sys
sys.path.append('/afs/cern.ch/cms/PPD/PdmV/tools/McM/')
from rest import McM
mcm = McM(dev=False)
#requests = mcm.get('requests', query='tags=Summer16MiniAODv3T2sub1&priority=90000')
#requests = mcm.get('requests', query='tags=Summer16MiniAODv3T2sub2&priority=90000')
#requests = mcm.get('requests', query='priority=85000&status=submitted&prepid=*Autumn18DR*')
requests = mcm.get('requests', query='status=submitted&tags=PAGLHCP2019&priority=85000')
#requests = mcm.get('requests', query='prepid=EXO-RunIIFall17GS-009*&dataset_name=Mustar*')
#requests = mcm.get('requests', query='status=submitted&tags=Summer16MiniAODv3T3')
#requests = mcm.get('requests', query='status=submitted&prepid=HIG-PhaseIIMTDTDRAutumn18wmLHEGS-0000*')
#requests = mcm.get('requests', query='status=submitted&dataset_name=VBF_BulkGravToWW_narrow_M-*')
#requests = mcm.get('requests', query='status=submitted&prepid=SMP-*LowPU*GS*')
#requests = mcm.get('requests', query='prepid=BPH-RunIIFall18GS-0006*')
print('Found %s requests' % (len(requests)))
for request in requests:
if len(request['reqmgr_name']) > 0:
# We change priority only if request has a registered workflow
# Remove echo command to acutally execute it -> already removed
# Change priority to 200000
result = os.system("wmpriority.py %s %s" % (request['reqmgr_name'][-1]['name'], 86000))
if result != 0:
print('Change of priority failed for: %s. Exit code: %s' % (request['prepid'], result))
else:
print result
else:
print('Workflow is not registered for %s' % (request['prepid']))
|
997,433 | 905510db1d49c5f0e090aca87d91cd6af1eb012e | '''
Created on 16.04.2014
@author: martin
'''
from numpy import *
from uncertainties import ufloat
def make_LaTeX_table(data,header, flip= 'false', onedim = 'false'):
output = '\\begin{table}\n\\centering\n\\begin{tabular}{'
#Get dimensions
if(onedim == 'true'):
if(flip == 'false'):
data = array([[i] for i in data])
else:
data = array([data])
row_cnt, col_cnt = data.shape
header_cnt = len(header)
if(header_cnt == col_cnt and flip== 'false'):
#Make Format
for i in range(col_cnt):
output += 'S'
output += '}\n\\toprule\n{'+ header[0]
for i in range (1,col_cnt):
output += '} &{ ' + header[i]
output += ' }\\\\\n\\midrule\n'
for i in data:
if(isinstance(i[0],(int,float,int32))):
output += str( i[0] )
else:
output += ' ${:L}$ '.format(i[0])
for j in range(1,col_cnt):
if(isinstance(i[j],(int,float,int32))):
output += ' & ' + str( i[j])
else:
output += ' & ' + str( i[j]).replace('/','')
output += '\\\\\n'
output += '\\bottomrule\n\\end{tabular}\n\\label{}\n\\caption{}\n\\end{table}\n'
return output
else:
return 'ERROR'
def err(data):
mean = data.mean()
N = len(data)
err = 0
for i in data:
err += (i - mean)**2
err = sqrt(err/((N-1)*N))
return ufloat(mean,err)
def lin_reg(x,y):
N = len(x)
sumx = x.sum()
sumy = y.sum()
sumxx = (x*x).sum()
sumxy = (x*y).sum()
m = (sumxy - sumx*sumy/N)/(sumxx- sumx**2/N)
b = sumy/N - m*sumx/N
sy = sqrt(((y - m*x - b)**2).sum()/(N-1))
m_err = sy *sqrt(N/(N*sumxx - sumx**2))
b_err= m_err * sqrt(sumxx/N)
return ufloat(m,m_err), ufloat(b,b_err) |
997,434 | 9f515ef80e2a08949d119da87a4010a13f07ded3 | from django.db import models
class description(models.Model):
book_name=models.CharField(max_length=50)
book_price=models.IntegerField(default=0)
book_author=models.CharField(max_length=60)
def __str__(self):
return self.book_name
|
997,435 | e32021746ebdaf697d194ee515c6902354706ed3 | def reverse_bits(n):
res = 0
for i in range(32):
res <<= 1
res += n % 2
print res
n >>= 1
return res
if __name__ == '__main__':
n = 43261596
print reverse_bits(n)
|
997,436 | edc6f289550a7b4efc8aab9e494480b779a13047 | from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.parsers import MultiPartParser, FormParser
from rest_framework.response import Response
from rest_framework import status
from .serializers import RegisterSerializer
from .models import User
# Create your views here.
class RegistrationView(APIView):
parser_classes = (MultiPartParser, FormParser)
def post(self,request,*args, **kwargs):
serializerClass = RegisterSerializer(request.data)
if serializerClass.is_valid():
firstName = serializerClass.data.get('firstName')
lastName = serializerClass.data.get('lastName')
phoneNumber = serializerClass.data.get('phoneNumber')
email = serializerClass.data.get('email')
avatar = serializerClass.data.get('avatar')
passtoken = serializerClass.data.get('passtoken')
queryset = queryset = User.objects.filter(Email=serializerClass.request.get('Email'))
if queryset.exists():
return Response({'Bad Request': 'Email entered exists'},status=status.HTTP_400_BAD_REQUEST)
newUser = User(firstName=firstName,lastName=lastName,phoneNumber=phoneNumber,email=email,avatar=avatar,passtoken=passtoken)
newUser.save()
return Response(RegisterSerializer(newUser).data,status=status.HTTP_201_CREATED)
return Response({'Bad Request': 'Invalid entry....'},status=status.HTTP_400_BAD_REQUEST) |
997,437 | 87e0a54dcfe617cd608359f3f2d0bff2e85e9441 | def set_fv_geom(mlat,nlon):
# Sets the geometric factors used in FV core for the lat-lon grid
# This is based on fv_pack.f90 in the AM2 source code, initially
# edited by Andrew Shao (andrew.shao@noaa.gov)
import numpy as np
pi = 4.*np.arctan(1.)
# Set aliases for trignometric functions
lonb = np.zeros(nlon+1)
rlonb = np.zeros(nlon+1)
latb = np.zeros(mlat+1)
for i in range(1,nlon+2):
lonb[i-1] = (i-1)*360./(nlon)
for j in range( 2, mlat+1 ):
# latb[j-1] = -90. + (((j-1)-0.5) * 180.) / (mlat-1)
latb[j-1] = -90. + (((j-1)-0.5) * 180.) / (mlat-1)
latb[0] = -90
latb[-1] = 90
# To be consistent with the coupler and the atmospheric grid, we must first convert the lat/lon to radians
# and then back to degrees using very slightly different conversion factors
latb = latb*(pi/180.)
latb = latb/(np.arctan(1.)/45.)
# Refine latb and lonb so that they resemble a grid with twice the resolution
latb_super = np.zeros(2*len(latb)-1)
lonb_super = np.zeros(2*len(lonb)-1)
latb_super[::2] = latb
lonb_super[::2] = lonb
latb_super[1::2] = 0.5*(latb[0:-1]+latb[1:])
lonb_super[1::2] = 0.5*(lonb[0:-1]+lonb[1:])
longrid, latgrid = np.meshgrid(lonb_super, latb_super)
return latgrid, longrid
|
997,438 | 81daa486f04bc4764f268bfe0cc959535eee4bec | from download import download
import MeCab
import pickle
download("http://www.cl.ecei.tohoku.ac.jp/nlp100/data/neko.txt", "neko.txt")
with open("neko.txt", mode="r") as f:
neko = f.read()
mt = MeCab.Tagger("-d /usr/lib/x86_64-linux-gnu/mecab/dic/mecab-ipadic-neologd/")
parsed = mt.parse(neko)
with open("neko.txt.mecab", mode="w") as f:
f.write(parsed)
parsed = parsed.split("\n")
parsed = [x.split("\t") for x in parsed]
parsed = [[x[0]] + x[1].split(",") if len(x) > 1 else x for x in parsed]
parsed = [{"表層形": x[0], "基本形": x[7], "品詞": x[1], "品詞細分類1": x[2]} for x in parsed if len(x) > 1]
with open("neko.pickle", mode="wb") as f:
pickle.dump(parsed, f)
|
997,439 | da50d9582ac24397462e7a43c10f22243c51bd66 | import torch
import torch.nn as nn
from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
import numpy as np
import ctypes
try:
ctypes.CDLL('libtorch2trt_plugins.so')
def create_example_plugin(scale):
registry = trt.get_plugin_registry()
creator = registry.get_plugin_creator('ExamplePlugin', '1', '')
fc = trt.PluginFieldCollection([
trt.PluginField(
'scale',
scale * np.ones((1,)).astype(np.float32),
trt.PluginFieldType.FLOAT32
)
])
return creator.create_plugin('', fc)
class ExampleLayer(nn.Module):
def __init__(self, scale):
super().__init__()
self.scale = scale
def forward(self, x):
return self.scale * x
@tensorrt_converter(ExampleLayer.forward)
def convert_example_layer(ctx):
module = get_arg(ctx, 'self', pos=0, default=None)
input = get_arg(ctx, 'x', pos=1, default=None)
output = ctx.method_return
input_trt = input._trt
plugin = create_example_plugin(module.scale)
layer = ctx.network.add_plugin_v2([input_trt], plugin)
output._trt = layer.get_output(0)
@add_module_test(torch.float32, torch.device("cuda"), [(1, 3, 4, 6)])
def test_example_layer_scale3():
return ExampleLayer(3.0)
@add_module_test(torch.float32, torch.device("cuda"), [(1, 3, 4, 6)])
def test_example_layer_scale4():
return ExampleLayer(4.0)
except:
pass # TODO: log plugin not found |
997,440 | dc32b9242679a36d19562121ca9a143a4a4ca083 | # Copyright 2021 Garena Online Private Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Protocol of C++ EnvPool."""
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Type, Union
try:
from typing import Protocol
except ImportError:
from typing_extensions import Protocol # type: ignore
import dm_env
import gym
import numpy as np
from dm_env import TimeStep
class EnvSpec(Protocol):
"""Cpp EnvSpec class."""
_config_keys: List[str]
_default_config_values: Tuple
gen_config: Type
def __init__(self, config: Tuple):
"""Protocol for constructor of EnvSpec."""
@property
def _state_spec(self) -> Tuple:
"""Cpp private _state_spec."""
@property
def _action_spec(self) -> Tuple:
"""Cpp private _action_spec."""
@property
def _state_keys(self) -> List:
"""Cpp private _state_keys."""
@property
def _action_keys(self) -> List:
"""Cpp private _action_keys."""
@property
def _config_values(self) -> Tuple:
"""Cpp private _config_values."""
@property
def config(self) -> NamedTuple:
"""Configuration used to create the current EnvSpec."""
@property
def state_array_spec(self) -> Dict[str, Any]:
"""Specs of the states of the environment in ArraySpec format."""
@property
def action_array_spec(self) -> Dict[str, Any]:
"""Specs of the actions of the environment in ArraySpec format."""
def observation_spec(self) -> Dict[str, Any]:
"""Specs of the observations of the environment in dm_env format."""
def action_spec(self) -> Union[dm_env.specs.Array, Dict[str, Any]]:
"""Specs of the actions of the environment in dm_env format."""
@property
def observation_space(self) -> Dict[str, Any]:
"""Specs of the observations of the environment in gym.Env format."""
@property
def action_space(self) -> Union[gym.Space, Dict[str, Any]]:
"""Specs of the actions of the environment in gym.Env format."""
@property
def reward_threshold(self) -> Optional[float]:
"""Reward threshold, None for no threshold."""
class ArraySpec(object):
"""Spec of numpy array."""
def __init__(
self, dtype: np.dtype, shape: List[int], bounds: Tuple[Any, Any]
):
"""Constructor of ArraySpec."""
self.dtype = dtype
self.shape = shape
self.minimum, self.maximum = bounds
def __repr__(self) -> str:
"""Beautify debug info."""
return (
f"ArraySpec(shape={self.shape}, dtype={self.dtype}, "
f"minimum={self.minimum}, maximum={self.maximum})"
)
class EnvPool(Protocol):
"""Cpp PyEnvpool class interface."""
_state_keys: List[str]
_action_keys: List[str]
spec: Any
def __init__(self, spec: EnvSpec):
"""Constructor of EnvPool."""
def __len__(self) -> int:
"""Return the number of environments."""
@property
def _spec(self) -> EnvSpec:
"""Cpp env spec."""
@property
def _action_spec(self) -> List:
"""Cpp action spec."""
def _check_action(self, actions: List) -> None:
"""Check action shapes."""
def _recv(self) -> List[np.ndarray]:
"""Cpp private _recv method."""
def _send(self, action: List[np.ndarray]) -> None:
"""Cpp private _send method."""
def _reset(self, env_id: np.ndarray) -> None:
"""Cpp private _reset method."""
def _from(
self,
action: Union[Dict[str, Any], np.ndarray],
env_id: Optional[np.ndarray] = None,
) -> List[np.ndarray]:
"""Convertion for input action."""
def _to(
self, state: List[np.ndarray], reset: bool
) -> Union[TimeStep, Tuple[Any, np.ndarray, np.ndarray, Any]]:
"""A switch of to_dm and to_gym for output state."""
@property
def all_env_ids(self) -> np.ndarray:
"""All env_id in numpy ndarray with dtype=np.int32."""
@property
def is_async(self) -> bool:
"""Return if this env is in sync mode or async mode."""
@property
def observation_space(self) -> Union[gym.Space, Dict[str, Any]]:
"""Gym observation space."""
@property
def action_space(self) -> Union[gym.Space, Dict[str, Any]]:
"""Gym action space."""
def observation_spec(self) -> Tuple:
"""Dm observation spec."""
def action_spec(self) -> Union[dm_env.specs.Array, Tuple]:
"""Dm action spec."""
@property
def config(self) -> Dict[str, Any]:
"""Envpool config."""
def send(
self,
action: Union[Dict[str, Any], np.ndarray],
env_id: Optional[np.ndarray] = None,
) -> None:
"""Envpool send wrapper."""
def recv(self, reset: bool = False) -> Union[TimeStep, Tuple]:
"""Envpool recv wrapper."""
def async_reset(self) -> None:
"""Envpool async reset interface."""
def step(
self,
action: Union[Dict[str, Any], np.ndarray],
env_id: Optional[np.ndarray] = None,
) -> Union[TimeStep, Tuple]:
"""Envpool step interface that performs send/recv."""
def reset(self,
env_id: Optional[np.ndarray] = None) -> Union[TimeStep, Tuple]:
"""Envpool reset interface."""
|
997,441 | 4611991674b5109db5b7b18a95cce0087f3f2304 | #!/usr/bin/env python
import rospy
import random
from hri_api.entities import Person, World, Saliency
from zoidstein_hri.zoidstein import Zoidstein, Expression, ZoidGestureData
from hri_api.query import Query
import time
# Initialize objects
world = World()
robot = Zoidstein()
people = [Person(1), Person(2), Person(3)]
bob = Person(4)
time.sleep(5)
#robot.say("Hi, I'm Zoidstein! Hahahahahahaha")
i = 0
robot.expression(Expression.smile, 1.0)
time.sleep(1)
# robot.show_expression(ZoidExpression.smile, 1.0)
#
# time.sleep(1)
#
# robot.show_expression(ZoidExpression.frown, 0.0)
#
# time.sleep(1)
#
# robot.show_expression(ZoidExpression.smile, 1.0)
#
# time.sleep(1)
#
# robot.show_expression(ZoidExpression.frown, 0.0)
#
# time.sleep(1)
robot.gesture(ZoidGestureData.HeadUpDown)
robot.expression( Expression.smile, 1.0)
time.sleep(2)
while i < 5000:
person = random.choice(people)
robot.gaze_and_wait(person.head, speed=0.5)
wait_time = random.randrange(1, 5)
if (wait_time == 4) or (i % 10 == 0):
wait_time=4
utterance = random.randrange(1, 5)
if (utterance == 1):
robot.expression(Expression.smile,1.0)
robot.say("I sound like a woman. Give me a man's voice.") # Haha! Good going :D
elif (utterance == 2):
robot.expression(Expression.frown)
robot.say("No more I love you")
elif (utterance == 3):
robot.expression(Expression.smile)
robot.say("I'll only love you till the money comes")
else:
robot.expression(Expression.frown)
robot.say("I don't think I like you better")
time.sleep(wait_time)
# robot.show_expression(ZoidExpression.frown, 1.0)
#
# time.sleep(1)
#
# # robot.show_expression(ZoidExpression.frown_mouth, 1.0)
# #
# # time.sleep(1)
#
# robot.show_expression(ZoidExpression.smile, 1.0)
#
# time.sleep(1)
#
# robot.show_expression(ZoidExpression.open_mouth, 1.0)
#
# time.sleep(1)
i += 1
rospy.loginfo('hello')
robot.show_expression_and_wait(Expression.frown, 0.0) #TODO: show expression without waiting doesn't work
robot.show_expression_and_wait(Expression.smile, 0.0)
|
997,442 | 989eaa12ae4078a6faa49077b380981b10d2d13c | import os, json
def parse_opt(opt_path):
with open(opt_path, 'r') as f:
opt = json.load(f)
# export CUDA_VISIBLE_DEVICES
if opt['use_gpu']:
gpu_list = ','.join(str(x) for x in opt['gpu_ids'])
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_list
print('===> Export CUDA_VISIBLE_DEVICES = [' + gpu_list + ']')
else:
print('===> CPU mode is set (NOTE: GPU is recommended)')
# path
exp_name = opt['name']
opt['path']['exp_root'] = os.path.join(opt['path']['root_path'],
'experiments', exp_name)
assert os.path.exists(opt['path']['exp_root']) == opt['resume'] # make sure no conflict
opt['path']['checkpoint_dir'] = os.path.join(opt['path']['exp_root'],
'checkpoint')
opt['path']['visual_dir'] = os.path.join(opt['path']['exp_root'],
'visualization')
opt['path']['tb_logger_root'] = opt['path']['exp_root'].replace(
'experiments', 'tb_logger')
if not opt['resume']:
for k, v in opt['path'].items():
if k == 'root_path':
continue
elif k == 'tb_logger_root':
if opt['use_tb_logger']:
os.makedirs(v)
else:
os.makedirs(v)
# dataset
for k, v in opt['datasets'].items():
opt['datasets'][k]['phase'] = k
opt['datasets'][k]['num_keypoints'] = opt['num_keypoints']
# network
opt['networks']['hourglass']['num_keypoints'] = opt['num_keypoints']
opt['networks']['ghcu']['output_dim'] = 2 * opt['num_keypoints']
opt['networks']['ghcu']['in_channel'] = opt['num_keypoints']
with open(os.path.join(opt['path']['exp_root'], 'opt.json'), 'w') as f:
json.dump(opt, f, indent=4)
return opt |
997,443 | 60e4b802281cdf66f43af2e02edd210f3dc39a5f | from django.shortcuts import render
# Create your views here.
from django.contrib.auth.decorators import login_required
from .models import Prescription
@login_required(login_url='/login/')
def eprescription(request, template_name='pages/eprescription.html'):
ctx = {}
hadm_id = request.user.hadm_id
prescription = Prescription.objects.filter(hadm_id=hadm_id).order_by('startdate')
ctx['prescription'] = prescription
return render(request, template_name, ctx)
|
997,444 | f7b61278acceab4e03121f66d4244d781d2a01c7 | # Deep Knowledge Tracing (Plus)
#
# Paper: Chun-Kit Yeung, Dit-Yan Yeung
# Addressing Two Problems in Deep Knowledge Tracing via Prediction-Consistent Regularization
# arXiv:1806.02180v1 [cs.AI] 6 Jun 2018
#
#
# For further reference:
#
# 1. Paper: Chris Piech, Jonathan Spencer, Jonathan Huang, et al.
# Deep Knowledge Tracing
# https://arxiv.org/abs/1506.05908
#
# 2. Paper: Chun-Kit Yeung, Zizheng Lin, Kai Yang, et al.
# Incorporating Features Learned by an Enhanced Deep Knowledge Tracing Model for STEM/Non-STEM Job Prediction
# arXiv:1806.03256v1 [cs.CY] 6 Jun 2018
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
from sklearn.metrics import roc_auc_score, mean_squared_error, mean_absolute_error
from math import sqrt
from time import time
# import seaborn as sns
import json
import os
import sys
import argparse
class SkillLSTM(nn.Module):
def __init__(self, input_size, hidden_size, output_size, dropout=.1):
super(SkillLSTM, self).__init__()
self.rnn = nn.LSTM(input_size, hidden_size, batch_first=True)
self.dropout = nn.Dropout(dropout)
self.linear = nn.Linear(hidden_size, output_size)
def forward(self, x, lengths, hidden=None):
mask = torch.zeros(x.size(0), x.size(1), x.size(2) // 2, device=x.device)
for idx in range(mask.size(0)):
mask[idx][:lengths[idx]] = 1
orig_len = x.size(1)
x = pack_padded_sequence(x, lengths, batch_first=True, enforce_sorted=False)
if hidden is not None:
x, (hn, cn) = self.rnn(x, hidden)
else:
x, (hn, cn) = self.rnn(x)
x, _ = pad_packed_sequence(x, batch_first=True, total_length=orig_len)
x = torch.sigmoid(self.linear(self.dropout(x)))
return x * mask, (hn, cn)
class SkillDataset(Dataset):
def __init__(self, problems_file, submissions_file, training, group, max_q=0, skills=True):
super(SkillDataset, self).__init__()
with open(problems_file, 'r') as file:
problem_data = json.load(file)
with open(submissions_file, 'r') as file:
user_submissions = json.load(file)
if skills:
tags = set()
for problem in problem_data:
tags |= set(problem['tags'])
self.max_skill = len(tags)
else:
self.max_skill = len(problem_data)
self.skills = skills
self.problem_id_2_tags = {problem['id']: problem['tags'] for problem in problem_data}
self.max_q = max_q
self.students_data = []
for user_data in user_submissions:
user_group = user_data['group']
if training and user_group == group \
or not training and user_group != group:
continue
submissions = user_data['submissions']
num_submissions = len(submissions)
if max_q == 0:
self.max_q = max(self.max_q, num_submissions)
self.students_data.append(submissions)
else:
res = [submissions[k:min(k + max_q, num_submissions)]
for k in range(0, num_submissions, max_q)]
self.students_data += filter(lambda x: len(x) > 1, res)
def __len__(self):
return len(self.students_data)
def __getitem__(self, idx):
submission = torch.zeros(self.max_q, 2 * self.max_skill)
for i in range(len(self.students_data[idx])):
sub = self.students_data[idx][i]
if self.skills:
for tag in self.problem_id_2_tags[sub['problem']]:
submission[i][tag] = 1
if sub['verdict'] == 1:
submission[i][self.max_skill + tag] = 1
else:
problem_id = sub['problem']
submission[i][problem_id] = 1
if sub['verdict'] == 1:
submission[i][self.max_skill + problem_id] = 1
return submission, torch.tensor(len(self.students_data[idx]))
def output(auc, rmse, mae):
print("ROC AUC: {}".format(auc))
print("RMSE: {}".format(rmse))
print("MAE: {}".format(mae))
def train(problems, submissions, model_dir, num, group,
lambda_o, lambda_w1, lambda_w2, hidden_size, dropout=.1,
lr=.001, betas=(.9, .999), max_grad_norm=2., patience=10,
num_epochs=30, batch_size=32, max_q=1000, skills=True, dump=False,
shuffle=False, compact=False):
model_name = os.path.join(model_dir, ('dkt - %d %d %.1f %.1f %.1f' % (num, group, lambda_o, lambda_w1, lambda_w2)) + (' - skills' if skills else ''))
dkt_model_path = model_name + '.pth'
training_set = SkillDataset(problems_file=problems, submissions_file=submissions,
training=True, group=group, max_q=max_q, skills=skills)
training_set_loader = DataLoader(training_set,
batch_size=batch_size,
shuffle=shuffle,
num_workers=2)
test_set = SkillDataset(problems_file=problems, submissions_file=submissions,
training=False, group=group, max_q=max_q, skills=skills)
test_set_loader = DataLoader(test_set,
batch_size=batch_size,
shuffle=False,
num_workers=2)
print('max skills: %d' % training_set.max_skill)
model = SkillLSTM(training_set.max_skill * 2, hidden_size, training_set.max_skill, dropout)
model.cuda()
optimizer = optim.Adam(model.parameters(), lr=lr, betas=betas)
loss_bce = nn.BCELoss()
loss_list = []
best_auc = 0
best_auc_epoch = 0
epoch = 0
while epoch != num_epochs:
epoch += 1
epoch_loss = 0
print("Entering #%d, group %d, epoch %d:" % (num, group, epoch))
model.train()
with torch.enable_grad(), tqdm(total=len(training_set), ascii=True) as progress_bar:
for student, lengths in training_set_loader:
student = student.cuda()
optimizer.zero_grad()
batch_out, _ = model(student, lengths)
loss = torch.tensor(0, dtype=torch.float).cuda()
if compact:
student_0 = student[:, :, :training_set.max_skill]
student_1 = student[:, :, training_set.max_skill:]
assert batch_out.size() == student_0.size()
assert batch_out.size() == student_1.size()
mask_next = (student_0[:, 1:] != 0)
loss += loss_bce(batch_out[:, :-1].masked_select(mask_next),
student_1[:, 1:].masked_select(mask_next))
mask_curr = (student_0 != 0)
loss += lambda_o * loss_bce(batch_out.masked_select(mask_curr),
student_1.masked_select(mask_curr))
diff = batch_out[:, 1:] - batch_out[:, :-1]
loss += lambda_w1 * torch.mean(torch.abs(diff))
loss += lambda_w2 * torch.mean(diff ** 2)
else:
for batch_idx in range(student.size(0)):
batch_out_part = batch_out[batch_idx][:lengths[batch_idx]]
student_part = student[batch_idx][:lengths[batch_idx]]
student_part_0 = student_part[:, :training_set.max_skill]
student_part_1 = student_part[:, training_set.max_skill:]
assert batch_out_part.size() == student_part_0.size()
assert batch_out_part.size() == student_part_1.size()
loss += loss_bce(batch_out_part[:-1] * student_part_0[1:], student_part_1[1:])
loss += lambda_o * loss_bce(batch_out_part * student_part_0, student_part_1)
diff = batch_out_part[1:] - batch_out_part[:-1]
loss += lambda_w1 * torch.mean(torch.abs(diff))
loss += lambda_w2 * torch.mean(diff ** 2)
epoch_loss += loss.item()
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), max_norm=max_grad_norm)
optimizer.step()
progress_bar.update(student.size(0))
progress_bar.set_postfix(epoch=epoch, loss=loss.item())
loss_list.append(epoch_loss)
print("Epoch loss: {}".format(epoch_loss))
print("Evaluating the trained model")
auc, rmse, mae = evaluate(model, test_set, test_set_loader)
output(auc, rmse, mae)
print("Evaluation complete")
if auc > best_auc:
torch.save(model.state_dict(), dkt_model_path)
best_auc = auc
best_auc_epoch = epoch
if epoch - best_auc_epoch >= patience:
print('Early Stopping: No AUC improvement in the last %d epochs.' % patience)
break
plt.figure()
plt.plot(loss_list)
plt.savefig(model_name + '.svg')
model.load_state_dict(torch.load(dkt_model_path))
model.cuda()
auc, rmse, mae = evaluate(model, test_set, test_set_loader)
print('*' * 30)
print('Best Model: %d' % best_auc_epoch)
output(auc, rmse, mae)
if skills and dump:
print('+' * 30)
print('Dumping user profiles')
dataset = SkillDataset(problems_file=problems,
submissions_file=submissions,
training=True, group=-1, max_q=0, skills=True)
user_profiles = []
model.eval()
with torch.no_grad(), tqdm(total=len(dataset), ascii=True) as progress_bar:
for student, length in dataset:
student = student.cuda()
batch_out, _ = model(student.unsqueeze(0), length.unsqueeze(0))
batch_out = batch_out[0]
user_profiles.append(batch_out[:length].cpu().numpy())
progress_bar.update(1)
print('Total:', len(user_profiles))
with open(model_name + ' - profiles.bin', 'wb') as file:
import pickle
pickle.dump(user_profiles, file)
return auc, rmse, mae
def evaluate(model, test_set, test_set_loader):
y_true = torch.zeros(0).cuda()
y_pred = torch.zeros(0).cuda()
model.eval()
with torch.no_grad(), tqdm(total=len(test_set), ascii=True) as progress_bar:
for student, lengths in test_set_loader:
student = student.cuda()
batch_out, _ = model(student, lengths)
y_true_0 = student[:, 1:, :test_set.max_skill]
y_true_1 = student[:, 1:, test_set.max_skill:]
batch_out = batch_out[:, :-1]
assert batch_out.size() == y_true_0.size()
assert batch_out.size() == y_true_1.size()
mask = (y_true_0 != 0)
y_true = torch.cat([y_true, y_true_1.masked_select(mask)])
y_pred = torch.cat([y_pred, batch_out.masked_select(mask)])
progress_bar.update(student.size(0))
y_true = y_true.cpu().numpy()
y_pred = y_pred.cpu().numpy()
return roc_auc_score(y_true, y_pred), \
sqrt(mean_squared_error(y_true, y_pred)), \
mean_absolute_error(y_true, y_pred)
def main(argv):
# sns.set()
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-p', '--problems', type=str, help='file path to problems.json')
parser.add_argument('-s', '--submissions', type=str, help='file path to user_submissions.json')
parser.add_argument('-D', '--dir', type=str, help='dir to models')
parser.add_argument('-b', '--batch', type=int, default=32, help='batch size')
parser.add_argument('-e', '--epochs', type=int, default=30, help='number of epochs')
parser.add_argument('-H', '--hidden', type=int, default=64, help='DKT hidden layer size')
parser.add_argument('-l', type=int, default=200, help='max length')
parser.add_argument('-o', type=float, nargs='+', default=[0.], help='lambda_o')
parser.add_argument('-w1', type=float, nargs='+', default=[0.], help='lambda_w1')
parser.add_argument('-w2', type=float, nargs='+', default=[0.], help='lambda_w2')
parser.add_argument('--dropout', type=float, default=.1, help='dropout probability')
parser.add_argument('--skills', action='store_true', default=False, help='train skills DKT instead of standard DKT (use skill-level tags instead of exercise-level tags)')
parser.add_argument('--dump', action='store_true', default=False, help='dump user profiles for skills DKT')
parser.add_argument('--shuffle', action='store_true', default=False, help='random shuffle training set data')
parser.add_argument('--compact-loss', action='store_true', default=False, help='use a compact form of loss function')
parser.add_argument('--alpha', type=float, default=.001, help='adam-alpha')
parser.add_argument('--betas', type=float, nargs=2, default=[.9, .999], help='adam-betas')
parser.add_argument('--max-grad-norm', type=float, default=2., help='max grad norm allowed when clipping')
parser.add_argument('--patience', type=int, default=10, help='number of epochs to wait when AUC does not improve')
parser.add_argument('-r', '--repeat', type=int, default=1, help='times of repetition')
parser.add_argument('-k', type=int, default=1, help='k-fold cross validation')
parser.add_argument('--seed', type=int, default=1, help='random seed')
args = parser.parse_args(argv)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
k = args.k
r = args.repeat
start_time = time()
for lambda_o in args.o:
for lambda_w1 in args.w1:
for lambda_w2 in args.w2:
auc = np.zeros(r * k)
rmse = np.zeros(r * k)
mae = np.zeros(r * k)
for j in range(r):
print('#%d:' % j)
for i in range(k):
print('group %d:' % i)
auc[j * k + i], rmse[j * k + i], mae[j * k + i] = train(args.problems, args.submissions, args.dir, j, i, lambda_o, lambda_w1, lambda_w2,
hidden_size=args.hidden, dropout=args.dropout,
lr=args.alpha, betas=args.betas, max_grad_norm=args.max_grad_norm,
batch_size=args.batch, num_epochs=args.epochs, patience=args.patience,
max_q=args.l, skills=args.skills, dump=args.dump,
shuffle=args.shuffle, compact=args.compact_loss)
print('-' * 30)
print()
print('=' * 30)
pattern = '{name}: {mean} (+/- {std})'
print('o = %.1f, w1 = %.1f, w2 = %.1f' % (lambda_o, lambda_w1, lambda_w2))
print(pattern.format(name='ROC AUC', mean=auc.mean(), std=auc.std()))
print(pattern.format(name='RMSE', mean=rmse.mean(), std=rmse.std()))
print(pattern.format(name='MAE', mean=mae.mean(), std=mae.std()))
print('=' * 30)
print()
print()
print('Elapsed time: ' + str(time() - start_time))
if __name__ == '__main__':
main(sys.argv[1:])
|
997,445 | d514938370232370a63b213b374f2de288b22f23 | import math
def evenarray(*args, **kwargs):
if len(args) == 2:
lower = 0
upper = args[0]
length = args[1]
elif len(args) == 3:
lower = args[0]
upper = args[1]
length = args[2]
else:
raise ValueError("Evenarray takes 2 or 3 args.")
include_lower = kwargs.get('include_lower', True)
include_upper = kwargs.get('include_upper', False)
rounded = kwargs.get('round', False)
if length < 1:
return [lower]
if not include_lower:
length = length + 1
if include_upper:
divider = length -1 if length > 1 else 1
else:
divider = length
arr = [lower + float(x*(upper-lower))/float(divider) for x in range(length)]
if rounded:
arr = [int(math.floor(n)) for n in arr]
if not include_lower:
return arr[1:]
else:
return arr
|
997,446 | 221baf93ac6bb3b763c64e3dec4f20b677dc6dfb | # To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
import bitalino # pylint: disable=E0401
import time
import BITalino
import numpy # pylint: disable=E0401
import requests # pylint: disable=E0401
# %%
# Set variables
srate = 1000
channels = [0]
nframes = 100
threshold = 5
# %%
# Mac OS
# macAddress = "/dev/tty.BITalino-XX-XX-DevB"
# Windows/Container/Raspberry
macAddress = "98:D3:41:FD:4F:19"
device = bitalino.BITalino(macAddress)
# %%
emg = BITalino.Electromyography(
device, srate=srate, channels=channels, nframes=nframes, threshold=threshold)
def main():
pin = 23
try:
while True:
signal = emg.aquire_signals()
if numpy.mean(signal[:, 1]) < 1:
break
emg_data = signal[:, -1]
envelope = numpy.mean(abs(numpy.diff(emg_data)))
if envelope > threshold:
emg.set_led_status(pin=pin, status="on")
else:
emg.set_led_status(pin=pin, status="off")
finally:
print("STOP")
device.stop()
device.close()
if __name__ == "__main__":
main()
# %%
|
997,447 | cd8d88113b239409b3d23b92381b31822481d453 | #All variables from Fokker 100 Data sheet
#Everything defined as meter, Newton, or deg
C_a = 0.505 # [m]
l_a = 1.611 # [m]
x_1 = 0.125 # [m]
x_2 = 0.498 # [m]
x_3 = 1.494 # [m]
x_a = 0.245 # [m]
h = 0.161 # [m]
t_sk = 0.0011 # [m]
t_sp = 0.0024 # [m]
t_st = 0.0012 # [m]
h_st = 0.012 # [m]
w_st = 0.017 # [m]
n_st = 11
d_1 = 0.00389 # [m]
d_3 = 0.01245 # [m]
theta = 30 # [degrees]
P = 49200 # [Newton] |
997,448 | 45a4aa24961cd5a486e86ac316330f5b34745c13 | #!/usr/bin/env python
import hashlib, os, sys, stat, time, gdbm
# TODO exclude and include filters
# CONSTANTS:
# This list represents files that may linger in directories
# preventing this algorithm from recognizing them as empty.
# we market them as deletable, even if we do NOT have other
# copies available:
deleteList = [ "album.dat", "album.dat.lock", "photos.dat", "photos.dat.lock", "Thumbs.db", ".lrprev", "Icon\r", '.dropbox.cache', '.DS_Store' ]
# This list describes files and directories we do not want to risk
# messing with. If we encounter these, never mark them as deletable.
doNotDeletList = []
# size of hashing buffer:
BUF_SIZE = 65536
# default to quiet mode:
verbose=False
def resolve_candidates(candidates, currentDepth=None):
"""Helper function which examines a list of candidate objects with identical
contents (as determined elsewhere) to determine which of the candidates is
the "keeper" (or winner). The other candidates are designated losers.
The winner is selected by incrementally incrasing the directory depth (from 0)
until one of the candidates is encountered.
TODO - other criteria?
"""
depthMap={}
losers = []
for candidate in candidates:
if currentDepth != None and candidate.depth > currentDepth:
# this candidate is too deep
continue
if candidate.depth not in depthMap:
# encountered a new candidate, lets store it
depthMap[candidate.depth] = candidate
else:
# found another candidate at the same depth
incumbent = depthMap[candidate.depth]
# use pathname length as a tie-breaker
if len(incumbent.pathname) > len(candidate.pathname):
depthMap[candidate.depth] = candidate
k=depthMap.keys()
if len(k) == 0:
# nothing to resolve at this depth
return None, None
k.sort()
md=k.pop(0)
# we choose the candidate closest to the root
# deeper candidates are the losers
winner=depthMap[md]
if isinstance(winner, DirObj) and winner.is_empty():
# we trim empty directories using DirObj.prune_empty()
# because it produces less confusing output
return None, None
# once we have a winner, mark all the other candidates as losers
for candidate in candidates:
if candidate != winner:
losers.append(candidate)
return winner, losers
def issocket(path):
"""For some reason python provides isfile and isdirectory but not issocket"""
mode = os.stat(path).st_mode
return stat.S_ISSOCK(mode)
def generate_delete(filename):
# characters that we will wrap with double quotes:
delimTestChars = set("'()")
if any((c in delimTestChars) for c in filename):
print 'rm -rf "' + filename + '"'
else:
print "rm -rf '" + filename + "'"
def check_int(s):
if s[0] in ('-', '+'):
return s[1:].isdigit()
return s.isdigit()
def check_level(pathname):
parts=pathname.split(':')
if len(parts) > 1:
firstPart=parts.pop(0)
remainder=':'.join(parts)
if check_int(firstPart):
return int(firstPart), remainder
# if anything goes wrong just fail back to assuming the whole thing is a path
return 0, pathname
class EntryList:
"""A container for all source directories and files to examine"""
def __init__(self, arguments, databasePathname, staggerPaths):
self.contents = {}
self.modTime = None
self.db = None
stagger=0;
if databasePathname != None:
try:
self.modTime = os.stat(databasePathname).st_mtime
except OSError:
print "# db " + databasePathname + " doesn't exist yet"
self.modTime = None
self.db = gdbm.open(databasePathname, 'c')
if self.modTime == None:
self.modTime = time.time()
print '# db last modification time is ' + str(time.time() - self.modTime) + ' seconds ago'
# walk arguments adding files and directories
for entry in arguments:
# strip trailing slashes, they are not needed
entry=entry.rstrip('/')
# check if a weight has been provided for this argument
weightAdjust, entry = check_level(entry)
if os.path.isfile(entry):
if staggerPaths:
weightAdjust=weightAdjust + stagger
self.contents[entry]=FileObj(entry, dbTime=self.modTime, db=self.db, weightAdjust=weightAdjust)
if staggerPaths:
stagger=stagger + self.contents[entry].depth
elif issocket(entry):
print '# Skipping a socket ' + entry
elif os.path.isdir(entry):
if staggerPaths:
weightAdjust=weightAdjust + stagger
topDirEntry=DirObj(entry, weightAdjust)
self.contents[entry]=topDirEntry
for dirName, subdirList, fileList in os.walk(entry, topdown=False):
dirEntry=topDirEntry.place_dir(dirName, weightAdjust)
for fname in fileList:
if issocket(dirEntry.pathname + '/' + fname):
print '# Skipping a socket ' + dirEntry.pathname + '/' + fname
else:
dirEntry.files[fname]=FileObj(fname, parent=dirEntry, dbTime=self.modTime, db=self.db, weightAdjust=weightAdjust)
if staggerPaths:
stagger=topDirEntry.max_depth()
else:
print "I don't know what this is" + entry
sys.exit()
if self.db != None:
self.db.close()
def count_deleted_bytes(self): # EntryList.count_deleted_bytes
"""Returns a count of all the sizes of the deleted objects within"""
bytes=0
for name, e in self.contents.iteritems():
bytes = bytes + e.count_deleted_bytes()
return bytes
def count_deleted(self): # EntryList.count_deleted
"""Returns a count of all the deleted objects within"""
count=0
for name, e in self.contents.iteritems():
count = count + e.count_deleted()
return count
def prune_empty(self): # EntryList.prune_empty
"""Crawls through all directories and deletes the children of the deleted"""
prevCount = self.count_deleted()
for name, e in allFiles.contents.iteritems():
e.prune_empty()
return allFiles.count_deleted() - prevCount
def walk(self): # EntryList.walk
for name, topLevelItem in allFiles.contents.iteritems():
for item in topLevelItem.walk():
yield item
def generate_commands(self): # EntryList.generate_commands
"""Generates delete commands to dedup all contents"""
selectDirMap={}
selectFileMap={}
emptyMap={}
# TODO file removals should be grouped by the winner for better reviewing
for name, e in self.contents.iteritems():
e.generate_commands(selectDirMap, selectFileMap, emptyMap)
winnerList=selectDirMap.keys()
if len(winnerList):
print '####################################################################'
print '# redundant directories:'
winnerList.sort()
for winner in winnerList:
losers=selectDirMap[winner]
print "# '" + winner + "'"
for loser in losers:
generate_delete(loser)
print
winnerList=selectFileMap.keys()
if len(winnerList):
print '####################################################################'
print '# redundant files:'
winnerList.sort()
for winner in winnerList:
losers=selectFileMap[winner]
print "# '" + winner + "'"
for loser in losers:
generate_delete(loser)
print
emptyDirs=emptyMap.keys()
if len(emptyDirs):
print '####################################################################'
print '# directories that are or will be empty after resolving duplicates:'
emptyDirs.sort()
for emptyDir in emptyDirs:
generate_delete(emptyDir)
class HashMap:
"""A wrapper to a python dict with some helper functions"""
def __init__(self,allFiles):
self.contentHash = {}
self.minDepth = 1
self.maxDepth = 0
self.allFiles=allFiles # we will use this later to count deletions
for name, e in allFiles.contents.iteritems():
if isinstance(e, FileObj):
self.add_entry(e)
else:
for dirEntry in e.dirwalk():
#print '\n# adding dir ' + dirEntry.pathname
if not dirEntry.deleted:
for name, fileEntry in dirEntry.files.iteritems():
if not fileEntry.deleted:
self.add_entry(fileEntry)
#print '# added file ' + fileEntry.pathname
else:
#print '# skipping deleted file ' + fileEntry.pathname
pass
dirEntry.finalize()
self.add_entry(dirEntry)
#print '# added dir ' + dirEntry.pathname
else:
#print '# skipping deleted dir ' + dirEntry.pathname
pass
maxd=e.max_depth()
if self.maxDepth < maxd:
self.maxDepth=maxd
def add_entry(self, entry): # Hashmap.add_entry
"""Store a file or directory in the HashMap, indexed by it's hash"""
if entry.hexdigest in self.contentHash:
self.contentHash[entry.hexdigest].append(entry)
else:
self.contentHash[entry.hexdigest] = [ entry ]
if entry.depth < self.minDepth:
self.minDepth = entry.depth
def display(self): # Hashmap.display
"""Generate a human readable report."""
for hashval, list in self.contentHash.iteritems():
for entry in list:
entry.display(False, False)
def delete(self, entry): # Hashmap.delete
"""Marks an entry as deleted then remove it from the HashMap"""
entry.delete()
# remove the entry from the hashmap
list=self.contentHash[entry.hexdigest]
newlist = []
for e in list:
if e != entry:
newlist.append(e)
# if there are no more entries for this hashval, remove
# it from the dictionary m
if len(newlist):
self.contentHash[entry.hexdigest] = newlist
else:
del self.contentHash[entry.hashval]
# also remove all the deleted children from the hashmap
self.prune()
def prune(self): # HashMap.prune
"""Removes deleted objects from the HashMap"""
for hashval, list in self.contentHash.iteritems():
newlist=[]
for entry in list:
if not entry.deleted:
newlist.append(entry)
self.contentHash[hashval]=newlist
def resolve(self): # HashMap.resolve
"""Compares all entries and where hash collisions exists, pick a keeper"""
prevCount = self.allFiles.count_deleted()
# no need to resolve uniques, so remove them from the HashMap
deleteList=[]
for hashval, list in self.contentHash.iteritems():
if len(list) == 1:
deleteList.append(hashval)
for e in deleteList:
del self.contentHash[e]
# delete the directories first, in order of
# increasing depth
if verbose:
print '# checking candidates from depth ' + str(self.minDepth) + ' through ' + str(self.maxDepth)
for currentDepth in xrange(self.minDepth-1,self.maxDepth+1):
for hashval, list in self.contentHash.iteritems():
example = list[0]
if isinstance(example, DirObj):
winner, losers = resolve_candidates(list, currentDepth)
if losers != None:
for loser in losers:
if not loser.deleted:
if verbose:
print '# dir "' + loser.pathname + '" covered by "' + winner.pathname + '"'
self.delete(loser)
loser.winner = winner
self.prune()
for hashval, list in self.contentHash.iteritems():
example = list[0]
if isinstance(example, FileObj):
winner, losers = resolve_candidates(list)
for loser in losers:
if not loser.deleted:
if verbose:
print '# file "' + loser.pathname + '" covered by "' + winner.pathname + '"'
self.delete(loser)
loser.winner = winner
return self.allFiles.count_deleted() - prevCount
class DirObj():
"""A directory object which can hold metadata and references to files and subdirectories"""
def __init__(self, name, weightAdjust=0, parent=None):
self.name=name
self.files={}
self.deleted=False
self.winner = None
self.subdirs={}
self.weightAdjust=weightAdjust
self.parent=parent
ancestry=self.get_lineage()
self.pathname='/'.join(ancestry)
self.depth=len(ancestry) + self.weightAdjust
self.ignore=self.name in deleteList
#if verbose:
# print '# ' + self.pathname + ' has an adjusted depth of ' + str(self.depth)
def get_lineage(self): # DirObj.get_lineage
"""Crawls back up the directory tree and returns a list of parents"""
if self.parent == None:
return self.name.split('/')
ancestry=self.parent.get_lineage()
ancestry.append(self.name)
return ancestry
def max_depth(self): # DirObj.max_depth
"""Determine the deepest point from this directory"""
md=self.depth
if len(self.subdirs.keys()):
for name, entry in self.subdirs.iteritems():
if not entry.deleted:
td = entry.max_depth()
if td > md:
md=td
return md
elif len(self.files.keys()):
return md + 1
else:
return md
def display(self, contents=False, recurse=False): # DirObj.display
"""Generate a human readable report.
'contents' controls if files are displayed
'recurse' controls if subdirs are displayed
"""
if recurse:
for name, entry in self.subdirs.iteritems():
entry.display(contents, recurse)
if contents:
for name, entry in self.files.iteritems():
entry.display(contents, recurse);
print '# Directory\t' + str(self.deleted) + '\t' + str(self.ignore) + '\t' + str(self.depth) + '\t' + self.hexdigest + ' ' + self.pathname
def place_dir(self, inputDirName, weightAdjust): # DirObj.place_dir
"""Matches a pathname to a directory structure and returns a DirObj"""
#print "looking to place " + inputDirName + " in " + self.name
inputDirList=inputDirName.split('/')
nameList=self.name.split('/')
while (len(inputDirList) and len(nameList)):
x=inputDirList.pop(0)
y=nameList.pop(0)
if x != y:
print x + ' and ' + y + ' do not match'
raise LookupError
if len(inputDirList) == 0:
return self
nextDirName=inputDirList[0]
if nextDirName in self.subdirs:
#print "found " + nextDirName + " in " + self.name
return self.subdirs[nextDirName].place_dir('/'.join(inputDirList), weightAdjust)
#print "did not find " + nextDirName + " in " + self.name
nextDir=DirObj(nextDirName, weightAdjust, self)
self.subdirs[nextDirName]=nextDir
return nextDir.place_dir('/'.join(inputDirList), weightAdjust)
def dirwalk(self, topdown=False): # DirObj.dirwalk
"""A generator which traverses just subdirectories"""
if topdown:
yield self
for name, d in self.subdirs.iteritems():
for dirEntry in d.dirwalk():
yield dirEntry
if not topdown:
yield self
def walk(self): # DirObj.walk
"""A generator which traverses files and subdirs"""
for name, subdir in self.subdirs.iteritems():
for e in subdir.walk():
yield e
for name, fileEntry in self.files.iteritems():
yield fileEntry
yield self
def delete(self): # DirObj.delete
"""Mark this directory and all children as deleted"""
self.deleted=True
for name, d in self.subdirs.iteritems():
d.delete()
for name, f in self.files.iteritems():
f.delete()
def generate_commands(self, selectDirMap, selectFileMap, emptyMap): # DirObj.generate_commands
"""Generates delete commands to dedup all contents of this dir"""
if self.deleted:
if self.winner != None:
if self.winner.pathname in selectDirMap:
selectDirMap[self.winner.pathname].append(self.pathname)
else:
selectDirMap[self.winner.pathname] = [ self.pathname ]
else:
emptyMap[self.pathname]=True
else:
for fileName, fileEntry in self.files.iteritems():
fileEntry.generate_commands(selectDirMap, selectFileMap, emptyMap)
for dirName, subdir in self.subdirs.iteritems():
subdir.generate_commands(selectDirMap, selectFileMap, emptyMap)
def is_empty(self): # DirObj.is_empty
"""Checks if the dir is empty, ignoring items marked as deleted or ignored"""
for fileName, fileEntry in self.files.iteritems():
if not fileEntry.deleted and not fileEntry.ignore:
#print '# ' + self.pathname + ' is not empty due to a file ' + fileEntry.name
return False
for dirName, subdir in self.subdirs.iteritems():
if not subdir.deleted and not subdir.is_empty() and not subdir.ignore:
#print '# ' + self.pathname + ' is not empty due to a dir ' + subdir.name
return False
#print '# ' + self.pathname + ' is empty!'
return True
def prune_empty(self): # DirObj.prune_empty
"""Crawls through all directories and marks the shallowest empty entiries for deletion"""
#print '# checking ' + self.pathname + ' for empties'
if self.is_empty() and not self.deleted and self.parent == None:
self.delete()
#print '# TLD ' + self.pathname + ' is now empty: ' + str(self.is_empty())
elif self.is_empty() and not self.deleted and self.parent != None and not self.parent.is_empty():
self.delete()
#print '# ' + self.pathname + ' is now empty: ' + str(self.is_empty())
else:
#print '# ' + self.pathname + ' is not empty: ' + str(self.is_empty())
for dirname, dirEntry in self.subdirs.iteritems():
dirEntry.prune_empty()
def finalize(self): # DirObj.finalize
"""Once no more files or directories are to be added, we can
create a meta-hash of all the hashes therein. This allows us to
test for directories which have the same contents.
"""
digests=[]
for filename, fileEntry in self.files.iteritems():
digests.append(fileEntry.hexdigest)
for dirname, dirEntry in self.subdirs.iteritems():
digests.append(dirEntry.hexdigest)
digests.sort()
sha1 = hashlib.sha1()
for d in digests:
sha1.update(d)
self.hexdigest=sha1.hexdigest()
def count_deleted_bytes(self): # DirObj.count_deleted_bytes
"""returns a count of all the sizes of the deleted objects within"""
bytes=0
for name, d in self.subdirs.iteritems():
bytes = bytes + d.count_deleted_bytes()
for name, f in self.files.iteritems():
if f.deleted:
bytes = bytes + f.count_deleted_bytes()
return bytes
def count_deleted(self): # DirObj.count_deleted
"""returns a count of all the deleted objects within"""
if self.deleted:
deleted=1
else:
deleted=0
for name, d in self.subdirs.iteritems():
deleted = deleted + d.count_deleted()
for name, f in self.files.iteritems():
if f.deleted:
deleted = deleted + 1
return deleted
class FileObj():
"""A file object which stores some metadata"""
def __init__(self, name, parent=None, dbTime=None, db=None, weightAdjust=0):
self.name=name;
self.winner=None
self.parent = parent
self.deleted=False
self.weightAdjust=weightAdjust
self.ignore=self.name in deleteList
if self.parent != None:
ancestry=self.parent.get_lineage()
self.pathname='/'.join(ancestry) + '/' + self.name
self.depth=len(ancestry) + self.weightAdjust
else:
self.pathname=self.name
self.depth=self.weightAdjust
#if verbose:
# print '# ' + self.pathname + ' has an adjusted depth of ' + str(self.depth)
statResult = os.stat(self.pathname)
self.modTime = statResult.st_mtime
self.createTime = statResult.st_ctime
self.bytes = statResult.st_size
if self.bytes == 0:
self.ignore = True
self.hexdigest='da39a3ee5e6b4b0d3255bfef95601890afd80709'
return
if db != None:
#print '# ' + self.pathname + ' is ' + str(dbTime - self.modTime) + ' seconds older than the db.'
pass
if db != None and self.pathname in db:
# we've a cached hash value for this pathname
if self.modTime > dbTime:
# file is newer than db
#print '# ' + self.pathname + ' is newer than the db'
pass
else:
# db is newer than file
if verbose:
print '# ' + self.pathname + ' already in db'
self.hexdigest=db[self.pathname]
return
elif db != None:
#print '# ' + self.pathname + ' not in db'
pass
# open and read the file
sha1 = hashlib.sha1()
with open(self.pathname, 'rb') as f:
while True:
data = f.read(BUF_SIZE)
if not data:
break
sha1.update(data)
self.hexdigest=sha1.hexdigest()
if verbose:
print '# computed new hash for ' + self.pathname
if db != None:
# add/update the cached hash value for this entry
#if self.pathname in db:
# print '# updating db entry for ' + self.pathname
#else:
# print '# inserting db entry for ' + self.pathname
db[self.pathname]=self.hexdigest
def max_depth(self): # FileObj.max_depth
return self.depth
def walk(self): # FileObj.walk
"""Used to fit into other generators"""
yield self
def delete(self): # FileObj.delete
"""Mark for deletion"""
self.deleted=True
def generate_commands(self, selectDirMap, selectFileMap, emptyMap): # FileObj.generate_commands
"""Generates delete commands to dedup all contents"""
if self.deleted and not self.ignore:
if self.winner != None:
if self.bytes != self.winner.bytes:
print '# BIRTHDAY CRISIS! matched hashes and mismatched sizes!'
sys.exit(-1)
if self.winner.pathname in selectFileMap:
selectFileMap[self.winner.pathname].append(self.pathname)
else:
selectFileMap[self.winner.pathname] = [self.pathname]
else:
emptyMap[self.pathname] = True
def prune_empty(self): # FileObj.prune_empty
"""Crawls through all directories and deletes the children of the deleted"""
return False # can't prune a file
def display(self, contents=False, recurse=False): # FileObj.display
"""Generate a human readable report."""
print '# File\t\t' + str(self.deleted) + '\t' + str(self.ignore) + '\t' + str(self.depth) + '\t' + self.hexdigest + ' ' + self.pathname + ' '
def count_deleted_bytes(self): # FileObj.count_deleted_bytes
"""Returns a count of all the sizes of the deleted objects within"""
if self.deleted:
return self.bytes
else:
return 0
def count_deleted(self): # FileObj.count_deleted
"""Returns a count of all the deleted objects within"""
if self.deleted:
return 1
else:
return 0
def clean_database(databasePathname):
"""function to remove dead nodes from the hash db"""
print '# loading database ' + databasePathname
try:
db = gdbm.open(databasePathname, 'w')
except:
print "# " + databasePathname + " could not be loaded"
sys.exit(-1)
# even though gdbm supports memory efficient iteration over
# all keys, I want to order my traversal across similar
# paths to leverage caching of directory files:
allKeys=db.keys()
print '# finished loaded keys from ' + databasePathname
allKeys.sort()
print '# finished sorting keys from ' + databasePathname
print '# deleting dead nodes'
count=0
for currKey in allKeys:
try:
os.stat(currKey)
sys.stdout.write('.')
except OSError:
del db[currKey]
sys.stdout.write('*')
count=count+1
sys.stdout.flush()
print "\n# reorganizing " + databasePathname
db.reorganize()
db.sync()
db.close()
print '# done cleaning ' + databasePathname + ', removed ' + str(count) + ' dead nodes!'
if __name__ == '__main__':
startTime=time.time()
sys.argv.pop(0) # do away with the command itself
# defaults
databasePathname=None
cleanDatabase=False
staggerPaths=False
again=True
while again:
try:
nextArg=sys.argv[0] # peek ahead
except IndexError:
break # no more args
again=False
if nextArg == '-v' or nextArg == '--verbose':
sys.argv.pop(0)
again=True
verbose=True
if nextArg == '-db' or nextArg == '--database':
sys.argv.pop(0)
try:
databasePathname=sys.argv.pop(0)
except IndexError:
print '# argument needed for -db switch'
sys.exit(-1)
again=True
if nextArg == '-cdb' or nextArg == '--clean-database':
sys.argv.pop(0)
cleanDatabase=True
again=True
if nextArg == '-s' or nextArg == '--stagger-paths':
sys.argv.pop(0)
staggerPaths=True
again=True
if databasePathname != None:
print '# set to use database: ' + databasePathname
if cleanDatabase:
clean_database(databasePathname)
sys.exit(0)
elif cleanDatabase:
print '# database file must be specified for --clean-database command (use -db)'
sys.exit(-1)
allFiles = EntryList(sys.argv, databasePathname, staggerPaths)
print '# files loaded'
passCount=0
deleted=1 # fake value to get the loop started
while deleted > 0: # while things are still being removed, keep working
h = HashMap(allFiles)
deletedDirectories = allFiles.prune_empty()
h = HashMap(allFiles)
deletedHashMatches = h.resolve()
deleted = deletedDirectories + deletedHashMatches
passCount = passCount + 1
if deleted > 0:
print '# ' + str(deleted) + ' entries deleted on pass ' + str(passCount)
allFiles.generate_commands()
#for e in allFiles.walk():
# e.display(False,False)
endTime=time.time()
print '# total bytes marked for deletion (not including directory files): ' + str(allFiles.count_deleted_bytes()) + '\n'
print '# total running time: ' + str(endTime - startTime) + ' seconds.'
# vim: set expandtab sw=4 ts=4:
|
997,449 | 5482664e5ec937137344333aa3a4965387e71d64 | from mdp_util import print_policy,generate_small_mdp,print_action
import mdptoolbox.example
import matplotlib.pyplot as plt
import numpy as np
def small_value_iteration(P,R):
epsilon_list=[0.01,0.001,0.0001,0.00001]
x_ticks=np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16])
num_iter_list=[]
fig,ax=plt.subplots(figsize=(12,6))
for i in range(4):
my_mdp=mdptoolbox.mdp.ValueIteration(P,R,0.99,epsilon=epsilon_list[i])
my_mdp.run()
print('\n')
print('policy for \nepsilon='+str(epsilon_list[i]))
print_policy(my_mdp.policy)
num_iter_list.append(my_mdp.iter)
ax.bar(x_ticks-0.2*(2-i),list(my_mdp.V)[:-1],0.2,label='espsilon='+str(epsilon_list[i]))
ax.set_title('Utility for different epsilon')
ax.set_xticks(x_ticks)
ax.set_xlabel('state')
ax.set_ylabel('utility')
ax.legend()
print(num_iter_list)
_,ax_iter=plt.subplots()
ax_iter.bar([1,2,3,4],num_iter_list)
ax_iter.set_xticks([1,2,3,4])
ax_iter.set_xticklabels(['0.01','0.001','0.0001','0.00001'])
ax_iter.set_xlabel('epsilon')
ax_iter.set_ylabel('num iteration')
ax_iter.set_title('num of iteration for different epsilon')
plt.show()
def small_policy_iteration(P,R):
x_ticks=np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16])
my_mdp=mdptoolbox.mdp.PolicyIteration(P,R,0.99)
my_mdp.run()
num_iter_list=[]
print('Optimal policy for grid MDP using Policy Iteration:')
print_policy(my_mdp.policy)
num_iter_list.append(my_mdp.iter)
v_pi=list(my_mdp.V)[:-1]
my_vi_mdp=mdptoolbox.mdp.ValueIteration(P,R,0.99,epsilon=0.00001)
my_vi_mdp.run()
v_vi=list(my_vi_mdp.V)[:-1]
num_iter_list.append(my_vi_mdp.iter)
fig1,ax=plt.subplots()
ax.bar(x_ticks-0.3,v_pi,0.3,label='policy iteration')
ax.bar(x_ticks,v_vi,0.2,label='value iteration')
ax.set_xticks(x_ticks)
ax.legend()
ax.set_xlabel('state')
ax.set_ylabel('utility value')
ax.set_title('utility value between PI and VI')
_,ax1=plt.subplots(figsize=(4,4))
ax1.bar([1,2],num_iter_list,width=0.5)
ax1.set_xticks([1,2])
ax1.set_xticklabels(['PI','VI'])
ax1.set_title('number of iterations between PI and VI')
ax1.set_ylabel('iterations')
plt.show()
def big_value_iteration(P,R):
epsilon_list=[0.01,0.001,0.0001,0.00001]
v_list=[]
iter_list=[]
for i in range(4):
my_mdp=mdptoolbox.mdp.ValueIteration(P,R,0.99,epsilon=epsilon_list[i])
my_mdp.run()
print('policy for epsilon='+str(epsilon_list[i]))
print_action(my_mdp.policy,epsilon_list[i])
v_list.append(my_mdp.V)
iter_list.append(my_mdp.iter)
for i in range(4):
plt.plot(np.arange(500)+1,v_list[i],label='epsilon='+str(epsilon_list[i]))
plt.legend()
plt.xlabel('state')
plt.ylabel('utility value')
plt.title('utility for different epsilon')
plt.show()
_,ax=plt.subplots(figsize=(4,4))
ax.bar([1,2,3,4],iter_list,width=0.5)
ax.set_xticks([1,2,3,4])
ax.set_xticklabels(['0.01','0.001','0.0001','0.00001'])
ax.set_title('num of iterations for different epsilon')
ax.set_ylabel('iterations')
plt.show()
def big_policy_iteration(P,R):
my_mdp=mdptoolbox.mdp.PolicyIteration(P,R,0.99)
my_mdp.run()
print('optimal policy for PI on forest MDP:')
print_action(my_mdp.policy)
print(my_mdp.time)
print('\n')
my_mdp_vi=mdptoolbox.mdp.ValueIteration(P,R,0.99,epsilon=0.00001)
my_mdp_vi.run()
print(my_mdp_vi.time)
plt.plot(np.arange(500)+1,my_mdp.V,label='PI')
plt.plot(np.arange(500)+1,my_mdp_vi.V,label='VI')
plt.legend()
plt.xlabel('states')
plt.ylabel('utility value')
plt.title('utility value between PI and VI')
plt.show()
_,ax=plt.subplots(figsize=(4,4))
ax.bar([1,2],[my_mdp.iter,my_mdp_vi.iter],width=0.5)
ax.set_xticks([1,2])
ax.set_xticklabels(['PI','VI'])
ax.set_title('num of iteration between PI and VI')
plt.show()
_,ax1=plt.subplots(figsize=(4,4))
ax1.bar([1,2],[my_mdp.time,my_mdp_vi.time],width=0.5)
ax1.set_xticks([1,2])
ax1.set_xticklabels(['PI','VI'])
ax1.set_title('time comparison between PI and VI')
plt.show()
if __name__=='__main__':
P_small,R_small=generate_small_mdp(-0.01)
small_value_iteration(P_small,R_small)
small_policy_iteration(P_small,R_small)
P_big,R_big=mdptoolbox.example.forest(500,10,5,0.1)
big_value_iteration(P_big,R_big)
big_policy_iteration(P_big,R_big)
|
997,450 | 243fa4787ef4d1ed1d703b60c139f29d923bfd30 | T = open("sample.txt", "r")
d = dict()
for line in T:
line = line.strip()
line = line.lower()
words = line.split(" ")
for word in words:
if word in d:
d[word] = d[word] + 1
else:
d[word] = 1
for k in list(d.keys()):
print(k, ":", d[k])
f = open("sample.txt", "a+")
for k in list(d.keys()):
f.write("\n")
f.write(k)
f.write(" : ")
f.write(str(d[k]))
f.close() |
997,451 | e910992988c808631d27d162d278fb0f3cd87e58 | # encoding: utf-8
import boto3
from config import auth_config
from config import config
class AthenaConnector(object):
"""
"""
def __init__(self):
self.region_name = config.region_name
self.aws_access_key_id = auth_config.aws_access_key_id
self.aws_secret_access_key = auth_config.aws_secret_access_key
def get_athena_connector(self):
"""
athena connector
:return: athena
"""
conf = {
"service_name": "athena",
"region_name": self.region_name,
"aws_access_key_id": self.aws_access_key_id,
"aws_secret_access_key": self.aws_secret_access_key
}
athena = boto3.client(**conf)
return athena
def execute_sql(
self,
request # type: AthenaRequestEntity
):
"""
execute sql
:param request:
QueryString : sql
QueryExecutionContext: database
ResultConfiguration:
:return:
"""
self.get_athena_connector().start_query_execution(**request.get_request_object())
class AthenaRequestEntity(object):
def __init__(self, sql, database, result_path):
self.sql = sql
self.database = database
self.result_path = result_path
def get_request_object(self):
query_execution_context = {
"Database": self.database
}
result_configuration = {
"OutputLocation": self.result_path
}
return {
"QueryString": self.sql,
"QueryExecutionContext": query_execution_context,
"ResultConfiguration": result_configuration
}
|
997,452 | ba9bcf4d584ad1c421efa6edfbfc9dd26884fe74 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/10/27 上午 11:27
# @Author : Aaron Chou
# @Site : https://github.com/InsaneLife
import numpy as np
out_path = "./vocab/google_in_vocab_embedding.npy"
ss = np.load(out_path)
new = []
for each in ss:
new.append(np.array(each))
new = np.array(new)
out_path = "./vocab/google_in_vocab_embedding1.npy"
ss1 = np.load(out_path)
# np.save(out_path, new)
pass |
997,453 | 43d6569a45f6ada64a1d82440d2e6a857448b055 | class Node(object):
def __init__(self, value):
self.value = value
self.next = None
def swap_nodes(head, left_index, right_index):
# Values to swap
node = head
position = 0
while position <= right_index:
if position == left_index:
left_data = node.value
if position == right_index:
right_data = node.value
position += 1
node = node.next
# Swapping values
node = head
position = 0
while position <= right_index:
if position == left_index:
node.value = right_data
if position == right_index:
node.value = left_data
position += 1
node = node.next
return head
def test_function(test_case):
head = test_case[0]
print_linked_list(head)
left_index = test_case[1]
right_index = test_case[2]
updated_head = swap_nodes(head, left_index, right_index)
print_linked_list(updated_head)
# helper functions for testing purpose
def create_linked_list(arr):
if len(arr)==0:
return None
head = Node(arr[0])
tail = head
for data in arr[1:]:
tail.next = Node(data)
tail = tail.next
return head
def print_linked_list(head):
while head:
print(head.value, end=" ")
head = head.next
print()
arr = [3, 4, 5, 2, 6, 1, 9]
head = create_linked_list(arr)
left_index = 3
right_index = 4
test_case = [head, left_index, right_index]
updated_head = test_function(test_case)
arr = [3, 4, 5, 2, 6, 1, 9]
left_index = 2
right_index = 4
head = create_linked_list(arr)
test_case = [head, left_index, right_index]
updated_head = test_function(test_case)
arr = [3, 4, 5, 2, 6, 1, 9]
left_index = 0
right_index = 1
head = create_linked_list(arr)
test_case = [head, left_index, right_index]
updated_head = test_function(test_case) |
997,454 | b3ee57ace00237ca7612ea2ccf643e72ef9e32db | TEMPLATE_p1="""
/* ****** ****** ****** ****** ****** ****** ****** ****** ****** ******
MBMC - Tracer POST-HOOK (C) CMU SEI 2013: Will Casey, Jeff Gennari,
Jose Morales, Evan Wright, Jono Spring.
Michael Appel
NYU CIMS 2013: Bud Mishra, Thomson Nguyen
*/
#include "pin.H"
namespace WINDOWS
{
#include<Windows.h>
}
#include <iostream>
#include <fstream>
#include <iomanip>
/* ===================================================================== */
/* Global Variables */
/* ===================================================================== */
std::ofstream TraceFile;
int call_stack_depth = 0;
/* Supporting global code from extras.py */
"""
TEMPLATE_p1a="""
/* done with supporting extras.py */
/* ===================================================================== */
/* Commandline Switches */
/* ===================================================================== */
KNOB<string> KnobOutputFile(KNOB_MODE_WRITEONCE, "pintool",
"o", "V7.out", "specify trace file name");
/* ===================================================================== */
/* Print Help Message */
/* ===================================================================== */
INT32 Usage()
{
cerr << "This tool produces a trace of calls to RtlAllocateHeap.";
cerr << endl << endl;
cerr << KNOB_BASE::StringKnobSummary();
cerr << endl;
return -1;
}
/* ===================================================================== */
/* Analysis routines */
/* ===================================================================== */
"""
#VOID Before(CHAR * name, WINDOWS::HANDLE hHeap,
# WINDOWS::DWORD dwFlags, WINDOWS::DWORD dwBytes)
#{
# TraceFile << "Before: " << name << "(" << hex << hHeap << ", "
# << dwFlags << ", " << dwBytes << ")" << dec << endl;
#}
#
#VOID After(CHAR * name, ADDRINT ret)
#{
# TraceFile << "After: " << name << " returns " << hex
# << ret << dec << endl;
#}
TEMPLATE_p2="""
/* ===================================================================== */
/* Instrumentation routines */
/* ===================================================================== */
VOID instr_hook(INS instr, VOID *v)
{
if(INS_IsSyscall(instr))
{
TraceFile << "got SYS call "<< IARG_INST_PTR << endl;
}
}
VOID Image(IMG img, VOID *v)
{
// Walk through the symbols in the symbol table.
//
for (SYM sym = IMG_RegsymHead(img); SYM_Valid(sym); sym = SYM_Next(sym))
{
string undFuncName = PIN_UndecorateSymbolName(SYM_Name(sym), UNDECORATION_NAME_ONLY);
// Find the RtlAllocHeap() function.
//if (undFuncName == "RtlAllocateHeap")
{
RTN allocRtn = RTN_FindByAddress(IMG_LowAddress(img) + SYM_Value(sym));
if (RTN_Valid(allocRtn))
{
// Instrument to print the input argument value and the return value.
RTN_Open(allocRtn);
"""
# RTN_InsertCall(allocRtn, IPOINT_BEFORE, (AFUNPTR)Before,
# IARG_ADDRINT, "RtlAllocateHeap",
# IARG_FUNCARG_ENTRYPOINT_VALUE, 0,
# IARG_FUNCARG_ENTRYPOINT_VALUE, 1,
# IARG_FUNCARG_ENTRYPOINT_VALUE, 2,
# IARG_END);
# RTN_InsertCall(allocRtn, IPOINT_AFTER, (AFUNPTR)After,
# IARG_ADDRINT, "RtlAllocateHeap",
# IARG_FUNCRET_EXITPOINT_VALUE,
# IARG_END);
TEMPLATE_p3="""
RTN_Close(allocRtn);
}
}
}
}
/* ===================================================================== */
VOID Fini(INT32 code, VOID *v)
{
TraceFile.close();
}
/* ===================================================================== */
/* Main */
/* ===================================================================== */
int main(int argc, char *argv[])
{
// Initialize pin & symbol manager
PIN_InitSymbols();
if( PIN_Init(argc,argv) )
{
return Usage();
}
// Write to a file since cout and cerr maybe closed by the application
TraceFile.open(KnobOutputFile.Value().c_str());
TraceFile << hex;
TraceFile.setf(ios::showbase);
// Register Image to be called to instrument functions.
IMG_AddInstrumentFunction(Image, 0);
INS_AddInstrumentFunction(instr_hook, 0);
PIN_AddFiniFunction(Fini, 0);
// Never returns
PIN_StartProgram();
return 0;
}
/* ===================================================================== */
/* eof */
/* ===================================================================== */
"""
|
997,455 | 58520ac8720c356479893658cf37b5346012d55b | # Licensed under the MIT license
# Copyright (c) 2016 Yves Van Belle (yvanbelle@brother.be)
import os
import sys
import glob
import socket
import cherrypy
from xml.dom.minidom import parseString
from mail_pdf_att import mail_with_pdf
import tesseract_ocr
import barcode2pdfs
import move_files
HTTP_PORT = 9999
IP_ADDRESS = socket.gethostbyname(socket.gethostname())
class Bsi(object):
@staticmethod
def get_xml_file(filename):
with open('xml/' + filename) as f:
xml_data = f.read()
f.close()
return xml_data
# Main BSI screen
@cherrypy.expose
def index(self, **kwargs):
main_menu = self.get_xml_file('linkscreen_img.xml')
return main_menu
# Scan to CIFS interface
@cherrypy.expose
def orderscan(self, **kwargs):
return self.get_xml_file('orderscan.xml')
# Scan to CIFS actual scan
@cherrypy.expose
def scan2cifs(self, **kwargs):
scan2cifs = self.get_xml_file('scan2cifs.xml')
scan2cifs = scan2cifs.replace('cifsserver_ip', cifsserver_ip)
scan2cifs = scan2cifs.replace('cifsserver_share', cifsserver_share)
scan2cifs = scan2cifs.replace('cifsuser_loging', cifsuser_loging)
scan2cifs = scan2cifs.replace('cifsuser_passwd', cifsuser_passwd)
return scan2cifs
# Scan to email - email send by Brother device
@cherrypy.expose
def scan2mail(self, **kwargs):
scan2mail = self.get_xml_file('scan2mail.xml')
scan2mail = scan2mail.replace('email_address', email_address)
return scan2mail
# Scan to FTP server - Scan JPEG files to OCR later
@cherrypy.expose
def scan2ocr(self, **kwargs):
scan2ocrftp = self.get_xml_file('scan2ocr.xml')
scan2ocrftp = scan2ocrftp.replace('ftpipaddress', ftpserver_ip)
return scan2ocrftp
# OCR JPEG files and create 1 PDF
@cherrypy.expose
def ocr_and_pdf(self, **kwargs):
working_dir = os.getcwd()
os.chdir(ftpserver_dir)
tesseract_ocr.ocr_pdf_merge()
os.chdir(working_dir)
return self.get_xml_file('end.xml')
# Scan to FTP server
@cherrypy.expose
def scan2ftp(self, **kwargs):
post_data = (kwargs['xml'])
order_number = post_data[(post_data.find('<Value>') + len('<Value>')):post_data.find('</Value>')]
scan2ftp = self.get_xml_file('scan2ftp.xml')
scan2ftp = scan2ftp.replace('ordernr', order_number)
scan2ftp = scan2ftp.replace('ftpipaddress', ftpserver_ip)
return scan2ftp
# Scan to multiple emails - interface
@cherrypy.expose
def selector(self, **kwargs):
sel = self.get_xml_file('selector.xml')
return sel
# Scan to multiple emails - extraction of email addresses & scan attachment to FTP server
emaillist = ''
@cherrypy.expose
def selectoraction(self, **kwargs):
xmldata = parseString(kwargs['xml'])
for selected in xmldata.getElementsByTagName('Value'):
email = selected.toxml()
email = email[(email.find('<Value>')+len('<Value>')):email.find('</Value>')]
self.emaillist = self.emaillist + email + ';'
scan_att = self.get_xml_file('scan_selector_att.xml')
scan_att = scan_att.replace('ftpserver_ip', ftpserver_ip)
return scan_att
# Scan to multiple emails - sending email via SMTP server with the attachment
@cherrypy.expose
def selectoraction2(self, **kwargs):
from_email_address = 'brother.bsi@brother.be'
to_email_address = self.emaillist
email_subject = 'BSI send email with attachment'
# Get file to attach
brother_bsi_files = ftpserver_dir + os.sep + 'BrotherBSI*.pdf'
brother_bsi_files = glob.glob(brother_bsi_files)
attachment = brother_bsi_files[0]
email_body = '''BSI email with PDF attachment
Scan with BSI to file, then send email(s) from server with PDF attachment.'''
mail_with_pdf(smtpserver, from_email_address, to_email_address, email_subject, attachment, email_body)
# Remove scanned file
working_dir = os.getcwd()
os.chdir(ftpserver_dir)
for file in brother_bsi_files:
os.remove(file)
os.chdir(working_dir)
# Create message with email addresses
msg = self.get_xml_file('message_with_image.xml')
email_addresses = self.emaillist.split(';')
email_addresses = str.join(' ', email_addresses)
msg = msg.replace('EMAILS', email_addresses)
self.emaillist = ''
return msg
# Scan pages with QR codes - scan pages to FTP server
@cherrypy.expose
def scan_qrcodes(self, **kwargs):
qr_code = self.get_xml_file('scan_qrcodes.xml')
qr_code = qr_code.replace('ftpserver_ip', ftpserver_ip)
return qr_code
# Scan pages with QR codes - convert JPG images to PDF files - move to the PDF files correct directory
@cherrypy.expose
def qrimages2pdfs(self, **kwargs):
barcode2pdfs.createpdfs(ftpserver_dir) # convert JPG images to PDF files
move_files.move_files(ftpserver_dir) # move PDF files to the correct directory
end = self.get_xml_file('end.xml')
return end
# Read RfID Card
@cherrypy.expose
def read_rfid(self, **kwargs):
readrfid = self.get_xml_file('read_rfid.xml')
return readrfid
# Show message with RFID card number
@cherrypy.expose
def show_rfidnr(self, **kwargs):
post_data = (kwargs['xml'])
rfid_number = post_data[(post_data.find('<Value>') + len('<Value>')):post_data.find('</Value>')]
showrfidnr = self.get_xml_file('show_rfidnr.xml')
showrfidnr = showrfidnr.replace('rfidnumber', rfid_number)
return showrfidnr
def run():
print(r'Starting HTTP server: http://' + IP_ADDRESS + ':' + str(HTTP_PORT) + r'/')
cherrypy.server.socket_host = '0.0.0.0'
cherrypy.server.socket_port = HTTP_PORT
conf = {
'/img': {
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.join(os.path.dirname(__file__), 'img')
}
}
cherrypy.quickstart(Bsi(), config=conf)
if __name__ == "__main__":
ftpserver_ip = sys.argv[1]
ftpserver_dir = sys.argv[2]
cifsserver_ip = sys.argv[3]
cifsserver_share = sys.argv[4]
cifsuser_loging = sys.argv[5]
cifsuser_passwd = sys.argv[6]
smtpserver = sys.argv[7]
email_address = sys.argv[8]
run()
|
997,456 | 5ffacf3d7790994c5b448d8daff876c5f70ac079 | n = int(input())
arr = [list(map(int,input().split())) for _ in range(n)]
lst = sorted(arr,key = lambda x : x[2])
lst.insert(0,[0,0,0])
dp = [0] * (n+1)
dp[1] = lst[1][1]
res = 0
for i in range(1,len(lst)) :
max = 0
for j in range(i-1,0,-1) :
if lst[i][0] > lst[j][0] and dp[j] >= max :
max = dp[j]
dp[i] = lst[i][1] + max
if dp[i] > res :
res = dp[i]
print(res) |
997,457 | 41dbb951d060e77e89067e8c641d6fe413d52dfe |
import numpy as np
import theano
from theano import tensor
from blocks.bricks import MLP, Rectifier, Softmax
from blocks.initialization import Constant, IsotropicGaussian
from blocks.bricks.cost import CategoricalCrossEntropy, BinaryCrossEntropy
from blocks.graph import ComputationGraph
from blocks.filter import VariableFilter
from blocks.roles import INPUT, OUTPUT, WEIGHT, BIAS
def run_experiment():
np.random.seed(42)
X = tensor.matrix('features')
y = tensor.matrix('targets')
mlp = MLP( activations=[Rectifier(), Rectifier(), Softmax()],
dims=[100, 50, 50, 10],
weights_init=IsotropicGaussian(std=0.1), biases_init=IsotropicGaussian(std=0.01))
mlp.initialize()
y_hat = mlp.apply(X)
# This whole thing is thrown out of whack by the fact that Blocks divises
# the cost by N internally because it calls .mean() on the output.
# This is a fine thing to do, but it throws off the computation
# of the individual gradients because they find themselves divided
# by that factor of N which has nothing to do with them.
cost = CategoricalCrossEntropy().apply(y, y_hat) * X.shape[0]
#cost = CategoricalCrossEntropy().apply(y_hat, y)
#cost = BinaryCrossEntropy().apply(y.flatten(), y_hat.flatten())
cg = ComputationGraph([y_hat])
"""
print "--- INPUT ---"
for v in VariableFilter(bricks=mlp.linear_transformations, roles=[INPUT])(cg.variables):
print v.tag.annotations[0].name
print "--- OUTPUT ---"
#print(VariableFilter(bricks=mlp.linear_transformations, roles=[OUTPUT])(cg.variables))
for v in VariableFilter(bricks=mlp.linear_transformations, roles=[OUTPUT])(cg.variables):
print v.tag.annotations[0].name
print "--- WEIGHT ---"
#print(VariableFilter(bricks=mlp.linear_transformations, roles=[WEIGHT])(cg.variables))
for v in VariableFilter(bricks=mlp.linear_transformations, roles=[WEIGHT])(cg.variables):
print v.tag.annotations[0].name
print "--- BIAS ---"
#print(VariableFilter(bricks=mlp.linear_transformations, roles=[BIAS])(cg.variables))
for v in VariableFilter(bricks=mlp.linear_transformations, roles=[BIAS])(cg.variables):
print v.tag.annotations[0].name
"""
# check out .tag on the variables to see which layer they belong to
print "----------------------------"
from expression_builder import SumGradSquareNormAndVariance
sgsnav = SumGradSquareNormAndVariance.make_from_blocks(mlp, cg, cost)
sgsnav_grad = sgsnav.get_sum_gradient_square_norm()
sgsnav_var = sgsnav.get_sum_gradient_variance()
#L_grads = [tensor.grad(cost, p) for p in cg.parameters]
L_grads = [tensor.grad(cost, v) for v in VariableFilter(roles=[WEIGHT, BIAS])(cg.variables)]
# works on the sum of the gradients in a mini-batch
sum_square_norm_gradients = sum([tensor.sqr(g).sum() for g in L_grads])
N = 10
# Option 1.
Xtrain = np.random.randn(N, 100).astype(np.float32)
ytrain = np.zeros((N, 10), dtype=np.float32)
for n in range(N):
label = np.random.randint(low=0, high=10)
ytrain[n, label] = 1.0
# Option 2.
#Xtrain = np.ones((N, 100)).astype(np.float32)
#ytrain = np.ones((N, 10), dtype=np.float32)
def grad_covariance(Xtrain, ytrain):
N = Xtrain.shape[0]
assert N == ytrain.shape[0]
# add the BIAS here roles=[WEIGHT, BIAS] when you want to include it again
fc = theano.function([X, y], [tensor.grad(cost, v) for v in VariableFilter(roles=[WEIGHT, BIAS])(cg.variables)])
L_minibatch_grads = fc(Xtrain, ytrain)
LL_single_grads = []
for n in range(N):
LL_single_grads.append(fc(Xtrain[n,:].reshape((1,100)), ytrain[n,:].reshape((1,10))))
result = np.zeros((N,))
for (n, L_single_grads) in enumerate(LL_single_grads):
#print "n : %d" % n
#print L_single_grads
for (minibatch_grad, single_grad) in zip(L_minibatch_grads, L_single_grads):
#print single_grad.shape
#print minibatch_grad.shape
B = (single_grad - minibatch_grad)**2
#print B.shape
#print B.sum()
result[n] += B.sum()
return result
f = theano.function([X,y],
[cost,
sgsnav_grad,
sgsnav_var,
sum_square_norm_gradients])
[c0, measured_sgsnav_grad_norm, measured_sgsnav_var, _] = f(Xtrain, ytrain)
L_c, L_measured_grad_norm = ([], [])
for n in range(N):
[c, _, _, measured_grad_norm] = f(Xtrain[n,:].reshape((1,100)), ytrain[n,:].reshape((1,10)))
L_c.append(c0)
L_measured_grad_norm.append(measured_grad_norm)
print "Cost for whole mini-batch in single shot : %f." % c
print "Cost for whole mini-batch accumulated : %f." % sum(L_c)
print ""
print "Square-norm of all gradients for each data point in single shot :"
print measured_sgsnav_grad_norm.reshape((1,-1))
print "Square-norm of all gradients for each data point iteratively :"
print np.array(L_measured_grad_norm).reshape((1,-1))
print ""
print "Difference max abs : %f." % np.max(np.abs(measured_sgsnav_grad_norm - np.array(L_measured_grad_norm)))
#print "Difference max abs : %f." % np.max(np.abs(v0 - np.array(L_gs2)))
print ""
#print "Ratios (from experimental method): "
#print np.array(L_gs1).reshape((1,-1)) / v0.reshape((1,-1))
#print "Ratios (from scan) : "
#print np.array(L_gs1).reshape((1,-1)) / sc0.reshape((1,-1))
print ""
print ""
print "measured_sgsnav_var"
print measured_sgsnav_var
print "grad_covariance(Xtrain, ytrain)"
print grad_covariance(Xtrain, ytrain)
if __name__ == "__main__":
run_experiment()
|
997,458 | b4647b69e1d30269870ab9da2109343cece36936 | # coding=utf-8
import requests
import dateutil
import dateutil.parser
import logging
from plugins.packagetracker.provider import Package
__author__ = "tigge"
class PostnordPackage(Package):
API_URL = "https://api2.postnord.com"
FIND_IDENTIFIER = (
API_URL
+ "/rest/shipment/v1/trackandtrace/findByIdentifier.json"
+ "?id={id}&locale={locale}&apikey={apikey}"
)
apikey = None
@classmethod
def get_type(cls):
return "Postnord"
@classmethod
def set_apikey(cls, id):
cls.apikey = id
@staticmethod
def format_address(address):
logging.info("PostnordPackage.format_address", address)
result = ""
if "street1" in address:
result += address["street1"]
if "street2" in address:
result += ", " + address["street2"]
if len(result) > 0:
result += ", "
if "postCode" in address:
result += address["postCode"]
if "city" in address:
result += " " + address["city"]
if "country" in address:
result += ", " + address["country"]
return result
@staticmethod
def create_event(event):
e = PostnordPackage.Event()
e.datetime = dateutil.parser.parse(event["eventTime"])
e.description = "{0} ({1})".format(
event["eventDescription"], event["location"]["displayName"]
)
return e
@classmethod
def is_package(cls, package_id):
data = cls._get_data(package_id)
if "TrackingInformationResponse" in data:
if "shipments" in data["TrackingInformationResponse"]:
if len(data["TrackingInformationResponse"]["shipments"]) > 0:
return True
return False
@classmethod
def _get_url(cls, package_id, locale="en"):
return PostnordPackage.FIND_IDENTIFIER.format(
id=package_id, locale=locale, apikey=PostnordPackage.apikey
)
@classmethod
def _get_data(cls, package_id, locale="en"):
try:
return requests.get(PostnordPackage._get_url(package_id, locale)).json()
except ValueError as e:
logging.exception("Exception while getting package")
return {}
def update(self):
res = self._get_data(self.id)
try:
data = res["TrackingInformationResponse"]
for shipment in data["shipments"]:
self.service = shipment["service"]["name"]
self.consignor = shipment["consignor"]["name"]
if "address" in shipment["consignor"]:
self.consignor += ", " + PostnordPackage.format_address(
shipment["consignor"]["address"]
)
self.consignee = PostnordPackage.format_address(
shipment["consignee"]["address"]
)
if "totalWeight" in shipment:
self.weight = (
shipment["totalWeight"]["value"]
+ " "
+ shipment["totalWeight"]["unit"]
)
if "totalVolume" in shipment:
self.volume = (
shipment["totalVolume"]["value"]
+ " "
+ shipment["totalVolume"]["unit"]
)
if (
"statusText" in shipment
and "header" in shipment["statusText"]
and "body" in shipment["statusText"]
):
self.status = (
shipment["statusText"]["header"]
+ ": "
+ shipment["statusText"]["body"]
)
last_updated = self.last_updated
for postnord_item in shipment["items"]:
for postnord_event in postnord_item["events"]:
event = self.create_event(postnord_event)
if event.datetime > last_updated:
last_updated = event.datetime
if event.datetime > self.last_updated:
self.on_event(PostnordPackage.create_event(postnord_event))
self.last_updated = last_updated
except Exception as e:
logging.exception("Exception while updating package")
logging.debug("Data: %r", res)
|
997,459 | 7a55bf87fa5f5efc8c2286f5f31d5937b6abe35c | import numpy as np
def gaussianGen(num_k, num_v, means, covs):
num = np.sum(num_v)
data = np.zeros(shape = (num, 3), dtype = np.float)
counter = 0
for i in range(num_k):
data[counter:counter+num_v[i], 0:2] = np.random.multivariate_normal(means[i], covs[i], num_v[i])
data[counter:counter+num_v[i],2] = i
counter = counter+num_v[i]
return data
def init_means(data, num_k):
num_v = data.shape[0]
sam = np.array(np.round(np.random.sample(num_k)*(num_v-1)), dtype = np.int8)
init_u = np.zeros(shape = (num_k,2), dtype = np.float64)
for i in range(num_k):
init_u[i,:] = data[sam[i], 0:2]
return init_u |
997,460 | a709e83fcdf2e209b0eb2b9a0b6fc7acee42ea9a | import sys
import scipy.io
import numpy as np
#Load Data
#Do download the mnist_data.mat for label 7 and 8 (Handwritten Digits)
data = scipy.io.loadmat('mnist_data.mat')
#Understanding Labels and Data
#7 = 0
#8 = 1
label_index = {"7":0, "8":1}
#Categorising into corresponding values
trX = data['trX']
trY = data['trY']
tsX = data['tsX']
tsY = data['tsY']
#Feauture Count
f_count = 2
#New Feature Sets
trX_Final = np.zeros(trX.shape[0]*f_count).reshape(trX.shape[0],f_count)
tsX_Final = np.zeros(tsX.shape[0]*f_count).reshape(tsX.shape[0],f_count)
shape = [trX.shape[0],f_count]
#Calculating Priors
sample_count = np.zeros(len(set(trY[0])))
for i in trY[0]:
if(i==0):
sample_count[0] += 1
else:
sample_count[1] += 1
sample_count_test_label = np.zeros(len(set(tsY[0])))
for i in tsY[0]:
if(i==0):
sample_count_test_label[0] += 1
else:
sample_count_test_label[1] += 1
priors = []
for i in range(len(sample_count)):
priors.append(sample_count[i]/sum(sample_count))
print("Prior: ",priors)
#Understanding Guassian parameters
means = np.zeros(f_count*len(sample_count)).reshape((f_count,len(sample_count)))
variances = np.zeros(f_count*len(sample_count)).reshape((f_count,len(sample_count)))
#Feature Extraction
for i in range(trX.shape[0]):
trX_Final[i][0] = np.mean(trX[i])
trX_Final[i][1] = np.var(trX[i])
for i in range(tsX.shape[0]):
tsX_Final[i][0] = np.mean(tsX[i])
tsX_Final[i][1] = np.var(tsX[i])
trX_7 = [trX_Final[i] for i in range(trX.shape[0]) if trY[0][i]==0]
trX_8 = [trX_Final[i] for i in range(trX.shape[0]) if trY[0][i]==1]
trX_7, trX_8 = np.array(trX_7), np.array(trX_8)
#Categorising into particular labels
trX_7_mean =np.array([x[0] for x in trX_7])
trX_8_mean =np.array([x[0] for x in trX_8])
trX_7_var = np.array([x[1] for x in trX_7])
trX_8_var = np.array([x[1] for x in trX_8])
trX_means = np.array([trX_7_mean, trX_8_mean])
trX_vars = np.array([trX_7_var, trX_8_var])
#Mean Matrix
means[0][0] = np.mean(trX_means[0])
means[0][1] = np.mean(trX_means[1])
means[1][0] = np.mean(trX_vars[0])
means[1][1] = np.mean(trX_vars[1])
means_T = means.transpose()
means_T = np.matrix(means_T)
print("Mean Matrix: ",means)
#Variance Matrix
variances[0][0] = np.var(trX_means[0])
variances[0][1] = np.var(trX_means[1])
variances[1][0] = np.var(trX_vars[0])
variances[1][1] = np.var(trX_vars[1])
variances_T = variances.transpose()
variances_T = np.matrix(variances_T)
print("Variance Matrix: ",variances)
#Covariance Matrix
print("-Covariance Matrix-")
#Covariance of 7
cov_7 = np.zeros(4).reshape((2,2))
cov_7[0][0] = variances[0][0]
cov_7[1][1] = variances[1][0]
cov_7 = np.matrix(cov_7)
print("Covariance Matrix (Digit 7): ",cov_7)
# #If dependent
# cov_7_dep = np.matrix(np.cov(trX_7_mean,trX_7_var))
#Covariance of 8
cov_8 = np.zeros(4).reshape((2,2))
cov_8[0][0] = variances[0][1]
cov_8[1][1] = variances[1][1]
cov_8 = np.matrix(cov_8)
print("Covariance Matrix (Digit 8): ",cov_8)
# #If dependent
# cov_8_dep = np.matrix(np.cov(trX_8_mean,trX_8_var))
#Naive Bayes
def naive_bayes(feature):
#Calculate Prediction
prob = [0,0]
#P(X|Y=0) Likelihood
likelihood_0 = ( 1/np.sqrt(((2*np.pi)**2)*(np.linalg.det(cov_7))) ) * ( np.exp( (-0.5)*np.dot( np.dot( (np.subtract(feature,means_T[0].reshape((2,1)))).transpose(), np.linalg.inv(cov_7) ) , (np.subtract(feature_arg,means_T[0].reshape((2,1))))) ) )
#P(Y=0|X) Posterior
prob[0] = likelihood_0*priors[0]
#P(X|Y=1) Likelihood
likelihood_1 = ( 1/np.sqrt(((2*np.pi)**2)*(np.linalg.det(cov_8))) ) * ( np.exp( (-0.5)*np.dot( np.dot( (np.subtract(feature,means_T[1].reshape((2,1)))).transpose(), np.linalg.inv(cov_8) ) , (np.subtract(feature_arg,means_T[1].reshape((2,1))))) ) )
#P(Y=1|X) Posterior
prob[1] = likelihood_1*priors[1]
print("Posterior Value: ",prob)
#Label the image
if(prob[0]>prob[1]):
return 0
else:
return 1
#Logistic Regression
#Sigmoid Function
def sigmoid(z):
return 1 / (1 + np.exp(-z))
#Log Likelihood
def log_likelihood(features, target, weights):
scores = np.dot(features, weights)
return np.sum( target*scores - np.log(1 + np.exp(scores)) )
#Logistic Regression
def logistic_regression(features, labels, iterations, learning_rate):
features = np.hstack((np.ones((features.shape[0], 1)), features))
weights = np.zeros(features.shape[1])
# weights = np.array([-3.0, 300.0, -400.0])
for i in range(iterations):
predictions = sigmoid(np.dot(features, weights))
# Update weights with gradient
weights += np.dot(features.T, labels - predictions) * learning_rate
# Print log-likelihood (at certain interval)
if i % 500 == 0:
print("----------------------------------")
print("Iteration: ",i)
print("Log Likelihood: ",log_likelihood(features, labels, weights))
print("Weights: ",weights)
return weights
#Test Labels
labels_gaussian = [0]*len(tsX_Final)
labels_logistic = [0]*len(tsX_Final)
#Call Test Data
#Gaussian
for i in range(len(tsX_Final)):
feature_arg = [tsX_Final[i][0], tsX_Final[i][1]]
feature_arg = np.array(feature_arg).reshape((2,1))
print("----------------------------------")
print("Labelling Image: ",i)
labels_gaussian[i] = naive_bayes(feature_arg)
#Logistic
#Training
learning_rate = 0.001
iterations = 100000
weights = logistic_regression(trX_Final, trY[0],iterations,learning_rate)
#Testing
test_data = np.hstack((np.ones((len(tsX_Final), 1)),tsX_Final))
label_values = np.dot(test_data, weights)
labels_logistic = np.round(sigmoid(label_values))
#Accuracy Calculator
print("----------------------------------")
print("------------Accuracies------------")
print("----------------------------------")
print("Gaussian Accuracy (Total): ",(labels_gaussian == tsY[0]).sum().astype(float)*100/len(labels_gaussian),"%")
count = [0,0]
for i in range(len(tsY[0])):
if(labels_gaussian[i]==tsY[0][i] and labels_gaussian[i]==0):
count[0]+=1
if(labels_gaussian[i]==tsY[0][i] and labels_gaussian[i]==1):
count[1]+=1
print("Gaussian Accuracy Label 7: ",count[0]*100/sample_count_test_label[0],"%")
print("Gaussian Accuracy Label 8: ",count[1]*100/sample_count_test_label[1],"%")
print("\nLogistic Accuracy (Total): ",(labels_logistic == tsY[0]).sum().astype(float)*100/len(labels_logistic),"%")
count = [0,0]
for i in range(len(tsY[0])):
if(labels_logistic[i]==tsY[0][i] and labels_logistic[i]==0):
count[0]+=1
if(labels_logistic[i]==tsY[0][i] and labels_logistic[i]==1):
count[1]+=1
print("Logistic Accuracy Label 7: ",count[0]*100/sample_count_test_label[0],"%")
print("Logistic Accuracy Label 8: ",count[1]*100/sample_count_test_label[1],"%") |
997,461 | cf341a89700ee156b49f98f43d56385fdd780977 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 30 10:59:29 2020
@author: appedu
"""
from mcpi.minecraft import Minecraft
import random
mc = Minecraft.create()
x,y,z = mc.player.getPos()
for i in range(500):
r = random.randrange(1,5)
if r==1:
mc.setBlocks(x,y,z,x,y,z+5,1)
z=z+5
elif r==2:
mc.setBlocks(x,y,z,x,y,z-5,1)
z=z-5
elif r==3:
mc.setBlocks(x,y,z,x+5,y,z,1)
x=x+5
elif r==4:
mc.setBlocks(x,y,z,x-5,y,z,1)
x=x-5
elif r==5:
mc.setBlocks(x,y,z,x,y+5,z,1)
y=y+5
elif r==6:
mc.setBlocks(x,y,z,x,y-5,z,1)
y=y-5 |
997,462 | e183099efedc4dec417813a0a9fba4a4d3fa7416 | class Solution:
def minStartValue(self, nums: List[int]) -> int:
s= 0
m= 0
for i in nums:
s += i
m = min(m,s)
return 1-m |
997,463 | eeafea3ee221ab1d4940054107d58ce8b4ddb667 | from cargo import Cargo
cargo = Cargo(id='pipe',
type_name='string(STR_CARGO_NAME_PIPE)',
unit_name='string(STR_CARGO_NAME_PIPE)',
type_abbreviation='string(STR_CID_PIPE)',
sprite='NEW_CARGO_SPRITE',
weight='1.0',
cargo_payment_list_colour='198',
is_freight='1',
cargo_classes='bitmask(CC_PIECE_GOODS)',
cargo_label='PIPE',
town_growth_effect='TOWNGROWTH_NONE',
town_growth_multiplier='1.0',
units_of_cargo='80',
items_of_cargo='string(STR_CARGO_UNIT_PIPE)',
penalty_lowerbound='12',
single_penalty_length='255',
price_factor='129',
capacity_multiplier='1',
icon_indices=(7, 3))
|
997,464 | f6fe122e63dee664a7fb1f37a3eae6eae406c0ff | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: tasdik
# @Contributers : Branden (Github: @bardlean86)
# @Date: 2016-01-17
# @Email: prodicus@outlook.com Github: @tasdikrahman
# @Last Modified by: tasdik
# @Last Modified by: Branden
# @Last Modified by: Dic3
# @Last Modified time: 2016-10-16
# MIT License. You can find a copy of the License @ http://prodicus.mit-license.org
## Game music Attribution
##Frozen Jam by tgfcoder <https://twitter.com/tgfcoder> licensed under CC-BY-3 <http://creativecommons.org/licenses/by/3.0/>
## Additional assets by: Branden M. Ardelean (Github: @bardlean86)
from __future__ import division
import pygame
import random
import time
import os
import sqlite3
main_dir = os.path.split(os.path.abspath(__file__))[0]
data_dir = os.path.join(main_dir, 'data')
from os import path
## assets folder
img_dir = path.join(path.dirname(__file__), 'assets')
sound_folder = path.join(path.dirname(__file__), 'sounds')
###############################
## to be placed in "constant.py" later
WIDTH = 480
HEIGHT = 600
FPS = 60
POWERUP_TIME = 5000
BAR_LENGTH = 100
BAR_HEIGHT = 10
# Define Colors
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
###############################
###############################
## to placed in "__init__.py" later
## initialize pygame and create window
pygame.init()
pygame.mixer.init() ## For sound
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Space Shooter")
clock = pygame.time.Clock() ## For syncing the FPS
###############################
font_name = pygame.font.match_font('arial')
#wave(stage) 함수
def count_wave(wavecounter):
if wavecounter >= 0 and wavecounter <=200:
return 1
elif wavecounter > 200 and wavecounter <=400:
return 2
elif wavecounter > 400 and wavecounter <= 600:
return 3
elif wavecounter > 600 :
return 4
def main_menu():
global screen
menu_song = pygame.mixer.music.load(path.join(sound_folder, "menu.ogg"))
pygame.mixer.music.play(-1)
showHiScores = False
pygame.display.update()
font = pygame.font.Font(None, 36)
hiScores = Database.getScores()
highScoreTexts = [
font.render("NAME", 1, WHITE),
font.render("SCORE", 1, WHITE)
]
highScorePos = [highScoreTexts[0].get_rect(
topleft=screen.get_rect().inflate(-100, -100).topleft),
highScoreTexts[1].get_rect(
topright=screen.get_rect().inflate(-100, -100).topright)]
for hs in hiScores:
highScoreTexts.extend([font.render(str(hs[x]), 1, BLUE)
for x in range(2)])
highScorePos.extend([highScoreTexts[x].get_rect(
topleft=highScorePos[x].bottomleft) for x in range(-2, 0)])
while True:
title = pygame.image.load(path.join(img_dir, "main.png")).convert()
title = pygame.transform.scale(title, (WIDTH, HEIGHT), screen)
score_background = pygame.image.load(path.join(img_dir, "starfield1.png")).convert()
pygame.display.update()
for event in pygame.event.get():
# if event.type == pygame.KEYDOWN:
# showHiScores = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
#pygame.mixer.music.stop()
ready = pygame.mixer.Sound(path.join(sound_folder, 'getready.ogg'))
ready.play()
screen.fill(BLACK)
draw_text(screen, "GET READY!", 40, WIDTH/2, HEIGHT/2)
pygame.display.update()
return
elif event.type == pygame.KEYDOWN and event.key == pygame.K_q:
pygame.quit()
quit()
# 스코어 추가
elif event.type == pygame.KEYDOWN and event.key == pygame.K_s:
showHiScores = True
elif (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE and showHiScores == True):
showHiScores = False
screen.fill((0,0,0))
continue
elif event.type == pygame.QUIT:
pygame.quit()
quit()
if showHiScores:
screen.blit(score_background, (0, 0))
textOverlays = zip(highScoreTexts, highScorePos)
for txt, pos in textOverlays:
screen.blit(txt, pos)
else:
# TODO: 이상하게 스코어를 조회하고 다시 돌아오면 화면이 안돌아온다...
screen.blit(title, (0, 0))
draw_text(screen, "Press [ENTER] To Begin", 30, WIDTH/2, HEIGHT/2)
draw_text(screen, "or [Q] To Quit", 30, WIDTH/2, (HEIGHT/2)+40)
draw_text(screen, "or [S] To Score", 30, WIDTH/2, (HEIGHT/2)+80)
pygame.display.flip()
class Keyboard(object):
keys = {pygame.K_a: 'A', pygame.K_b: 'B', pygame.K_c: 'C', pygame.K_d: 'D',
pygame.K_e: 'E', pygame.K_f: 'F', pygame.K_g: 'G', pygame.K_h: 'H',
pygame.K_i: 'I', pygame.K_j: 'J', pygame.K_k: 'K', pygame.K_l: 'L',
pygame.K_m: 'M', pygame.K_n: 'N', pygame.K_o: 'O', pygame.K_p: 'P',
pygame.K_q: 'Q', pygame.K_r: 'R', pygame.K_s: 'S', pygame.K_t: 'T',
pygame.K_u: 'U', pygame.K_v: 'V', pygame.K_w: 'W', pygame.K_x: 'X',
pygame.K_y: 'Y', pygame.K_z: 'Z'}
def scoreBorder(highScore):
font = pygame.font.Font(None, 36)
hiScores = Database.getScores()
isHiScore = len(hiScores) < Database.numScores or score > hiScores[-1][1]
name = ''
nameBuffer = []
score_background = pygame.image.load(path.join(img_dir, "starfield1.png")).convert()
while True:
# Event Handling
for event in pygame.event.get():
if (event.type == pygame.QUIT
or not isHiScore
and event.type == pygame.KEYDOWN
and event.key == pygame.K_ESCAPE):
return False
elif (event.type == pygame.KEYDOWN
and event.key == pygame.K_RETURN
and not isHiScore):
return True
elif (event.type == pygame.KEYDOWN
and event.key in Keyboard.keys.keys()
and len(nameBuffer) < 8):
nameBuffer.append(Keyboard.keys[event.key])
name = ''.join(nameBuffer)
elif (event.type == pygame.KEYDOWN
and event.key == pygame.K_BACKSPACE
and len(nameBuffer) > 0):
nameBuffer.pop()
name = ''.join(nameBuffer)
elif (event.type == pygame.KEYDOWN
and event.key == pygame.K_RETURN
and len(name) > 0):
Database.setScore(hiScores, (name, score))
return True
if isHiScore:
hiScoreText = font.render('NEW HIGH SCORE', 1, WHITE)
hiScorePos = hiScoreText.get_rect(
midbottom=screen.get_rect().center)
scoreText = font.render(str(score), 1, GREEN)
scorePos = scoreText.get_rect(midtop=hiScorePos.midbottom)
enterNameText = font.render('LEAVE YOUR LOG:', 1, WHITE)
enterNamePos = enterNameText.get_rect(midtop=scorePos.midbottom)
nameText = font.render(name, 1, BLUE)
namePos = nameText.get_rect(midtop=enterNamePos.midbottom)
textOverlay = zip([hiScoreText, scoreText,
enterNameText, nameText],
[hiScorePos, scorePos,
enterNamePos, namePos])
else:
gameOverText = font.render('GAME OVER', 1, GREEN)
gameOverPos = gameOverText.get_rect(
center=screen.get_rect().center)
scoreText = font.render('SCORE: {}'.format(score), 1, GREEN)
scorePos = scoreText.get_rect(midtop=gameOverPos.midbottom)
textOverlay = zip([gameOverText, scoreText],
[gameOverPos, scorePos])
# Update and draw all sprites
screen.blit(score_background, (0, 0))
for txt, pos in textOverlay:
screen.blit(txt, pos)
pygame.display.flip()
# 점수 저장하는 db 클래스
class Database(object):
path = os.path.join(data_dir, 'score.db')
numScores = 15
@staticmethod
def getScores():
conn = sqlite3.connect(Database.path)
c = conn.cursor()
c.execute('''CREATE TABLE if not exists scores (name text, score integer)''')
c.execute("SELECT * FROM scores ORDER BY score DESC")
hiScores = c.fetchall()
conn.close()
return hiScores
@staticmethod
def setScore(hiScores, entry):
conn = sqlite3.connect(Database.path)
c = conn.cursor()
if len(hiScores) == Database.numScores:
lowScoreName = hiScores[-1][0]
lowScore = hiScores[-1][1]
c.execute("DELETE FROM scores WHERE (name = ? AND score = ?)", (lowScoreName, lowScore))
c.execute("INSERT INTO scores VALUES (?,?)", entry)
conn.commit()
conn.close()
def draw_text(surf, text, size, x, y):
## selecting a cross platform font to display the score
font = pygame.font.Font(font_name, size)
text_surface = font.render(text, True, WHITE) ## True denotes the font to be anti-aliased
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
surf.blit(text_surface, text_rect)
def draw_shield_bar(surf, x, y, pct):
# if pct < 0:
# pct = 0
pct = max(pct, 0)
## moving them to top
# BAR_LENGTH = 100
# BAR_HEIGHT = 10
fill = (pct / 100) * BAR_LENGTH
outline_rect = pygame.Rect(x, y, BAR_LENGTH, BAR_HEIGHT)
fill_rect = pygame.Rect(x, y, fill, BAR_HEIGHT)
pygame.draw.rect(surf, GREEN, fill_rect)
pygame.draw.rect(surf, WHITE, outline_rect, 2)
def draw_lives(surf, x, y, lives, img):
for i in range(lives):
img_rect= img.get_rect()
img_rect.x = x + 30 * i
img_rect.y = y
surf.blit(img, img_rect)
def newmob(wave):
if wave == 1:
mob_element = Mob()
all_sprites.add(mob_element)
mobs.add(mob_element)
elif wave == 2:
Mob.image_orig = random.choice(ghost_images)
mob_element = Mob()
mob_element.mobchange()
all_sprites.add(mob_element)
mobs.add(mob_element)
elif wave == 3:
Mob.image_orig = random.choice(cockpit_images)
mob_element = Mob()
mob_element.mobchange2()
all_sprites.add(mob_element)
mobs.add(mob_element)
elif wave == 4:
Mob.image_orig = random.choice(cockpit2_images)
mob_element = Mob()
mob_element.mobchange3()
all_sprites.add(mob_element)
mobs.add(mob_element)
class Explosion(pygame.sprite.Sprite):
def __init__(self, center, size):
pygame.sprite.Sprite.__init__(self)
self.size = size
self.image = explosion_anim[self.size][0]
self.rect = self.image.get_rect()
self.rect.center = center
self.frame = 0
self.last_update = pygame.time.get_ticks()
self.frame_rate = 75
def update(self):
now = pygame.time.get_ticks()
if now - self.last_update > self.frame_rate:
self.last_update = now
self.frame += 1
if self.frame == len(explosion_anim[self.size]):
self.kill()
else:
center = self.rect.center
self.image = explosion_anim[self.size][self.frame]
self.rect = self.image.get_rect()
self.rect.center = center
class Player(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
## scale the player img down
self.image = pygame.transform.scale(player_img, (50, 38))
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.radius = 20
self.rect.centerx = WIDTH / 2
self.rect.bottom = HEIGHT - 10
self.speedx = 0
self.speedy = 0
self.shield = 100
self.shoot_delay = 250
self.last_shot = pygame.time.get_ticks()
self.lives = 3
self.hidden = False
self.hide_timer = pygame.time.get_ticks()
self.power = 1
self.invincibility = 2000 # 무적시간 2초
self.power_count = 30
self.power_count_text = "∞"
self.bomb_count = 1
def update(self):
## time out for powerups
# if self.power >=2 and pygame.time.get_ticks() - self.power_time > POWERUP_TIME:
if self.power >=2 and self.power_count == 0:
self.power = 1
self.power_count = 30
# self.power_time = pygame.time.get_ticks()
## unhide
if self.hidden and pygame.time.get_ticks() - self.hide_timer > 1000:
self.hidden = False
self.rect.centerx = WIDTH / 2
self.rect.bottom = HEIGHT - 30
self.speedx = 0 ## makes the player static in the screen by default.
# then we have to check whether there is an event hanlding being done for the arrow keys being
## pressed
self.speedy = 0
## will give back a list of the keys which happen to be pressed down at that moment
keystate = pygame.key.get_pressed()
## 대각선으로 이동 가능하게 하는 기능
if (keystate[pygame.K_UP] and keystate[pygame.K_RIGHT]):
self.speedx = 5
self.speedy = -5
elif (keystate[pygame.K_UP] and keystate[pygame.K_LEFT]):
self.speedx = -5
self.speedy = -5
elif (keystate[pygame.K_DOWN] and keystate[pygame.K_RIGHT]):
self.speedx = 5
self.speedy = +5
elif (keystate[pygame.K_DOWN] and keystate[pygame.K_LEFT]):
self.speedx = -5
self.speedy = +5
elif keystate[pygame.K_LEFT]:
self.speedx = -5
elif keystate[pygame.K_RIGHT]:
self.speedx = 5
elif keystate[pygame.K_UP]: #화살표 위 클릭시 y 좌표 -5
self.speedy = -5
elif keystate[pygame.K_DOWN]: #화살표 아래 클릭시 y 좌표 5
self.speedy = +5
#Fire weapons by holding spacebar
if keystate[pygame.K_SPACE]:
self.shoot()
if keystate[pygame.K_z]:
self.bomb_shoot()
#z누르면 폭탄나감
## check for the borders at the left and right
if self.rect.right > WIDTH:
self.rect.right = WIDTH
if self.rect.left < 0:
self.rect.left = 0
if self.rect.bottom > HEIGHT-10: #바닥 제한 설정
self.rect.bottom = HEIGHT-10
self.rect.x += self.speedx
self.rect.y += self.speedy
#폭탄 발사 함수 생성
def bomb_shoot(self):
now = pygame.time.get_ticks()
if self.bomb_count >= 1:
if now - self.last_shot > self.shoot_delay:
self.last_shot = now
bomb1 = Bomb(self.rect.centerx-160, self.rect.centery+80)
bomb2 = Bomb(self.rect.centerx-140, self.rect.centery+70)
bomb3 = Bomb(self.rect.centerx-120, self.rect.centery+60)
bomb4 = Bomb(self.rect.centerx-100, self.rect.centery+50)
bomb5 = Bomb(self.rect.centerx-80, self.rect.centery+40)
bomb6 = Bomb(self.rect.centerx-60, self.rect.centery+30)
bomb7 = Bomb(self.rect.centerx-40, self.rect.centery+20)
bomb8 = Bomb(self.rect.centerx-20, self.rect.centery+10)
bomb9 = Bomb(self.rect.centerx, self.rect.top)#센터
bomb10 = Bomb(self.rect.centerx+20, self.rect.centery+10)
bomb11 = Bomb(self.rect.centerx+40, self.rect.centery+20)
bomb12 = Bomb(self.rect.centerx+60, self.rect.centery+30)
bomb13 = Bomb(self.rect.centerx+80, self.rect.centery+40)
bomb14 = Bomb(self.rect.centerx+100, self.rect.centery+50)
bomb15 = Bomb(self.rect.centerx+120, self.rect.centery+60)
bomb16 = Bomb(self.rect.centerx+140, self.rect.centery+70)
bomb17 = Bomb(self.rect.centerx+160, self.rect.centery+80)
all_sprites.add(bomb1)
all_sprites.add(bomb2)
all_sprites.add(bomb3)
all_sprites.add(bomb4)
all_sprites.add(bomb5)
all_sprites.add(bomb6)
all_sprites.add(bomb7)
all_sprites.add(bomb8)
all_sprites.add(bomb9)
all_sprites.add(bomb10)
all_sprites.add(bomb11)
all_sprites.add(bomb12)
all_sprites.add(bomb13)
all_sprites.add(bomb14)
all_sprites.add(bomb15)
all_sprites.add(bomb16)
all_sprites.add(bomb17)
bombs.add(bomb1)
bombs.add(bomb2)
bombs.add(bomb3)
bombs.add(bomb4)
bombs.add(bomb5)
bombs.add(bomb6)
bombs.add(bomb7)
bombs.add(bomb8)
bombs.add(bomb9)
bombs.add(bomb10)
bombs.add(bomb11)
bombs.add(bomb12)
bombs.add(bomb13)
bombs.add(bomb14)
bombs.add(bomb15)
bombs.add(bomb16)
bombs.add(bomb17)
shooting_sound.play()
self.bomb_count -= 1
def shoot(self):
## to tell the bullet where to spawn
now = pygame.time.get_ticks()
if now - self.last_shot > self.shoot_delay:
self.last_shot = now
if self.power == 1:
bullet = Bullet(self.rect.centerx, self.rect.top)
all_sprites.add(bullet)
bullets.add(bullet)
self.power_count_text = "∞"
shooting_sound.play()
if self.power == 2:
bullet1 = Bullet(self.rect.left, self.rect.centery)
bullet2 = Bullet(self.rect.right, self.rect.centery)
all_sprites.add(bullet1)
all_sprites.add(bullet2)
bullets.add(bullet1)
bullets.add(bullet2)
self.power_count_text = str(self.power_count)
shooting_sound.play()
self.power_count -= 1 #파워 2이상일때 총알 카운트
""" MOAR POWAH """
if self.power >= 3:
bullet1 = Bullet(self.rect.left, self.rect.centery)
bullet2 = Bullet(self.rect.right, self.rect.centery)
missile1 = Missile(self.rect.centerx, self.rect.top) # Missile shoots from center of ship
all_sprites.add(bullet1)
all_sprites.add(bullet2)
all_sprites.add(missile1)
bullets.add(bullet1)
bullets.add(bullet2)
bullets.add(missile1)
self.power_count_text = str(self.power_count)
shooting_sound.play()
missile_sound.play()
self.power_count -= 1 #파워 2이상일때 총알 카운트
def powerup(self):
self.power += 1
# self.power_time = pygame.time.get_ticks()
self.power_count = 30
def hide(self):
"""죽었을 때"""
self.hidden = True
self.hide_timer = pygame.time.get_ticks()
self.rect.center = (WIDTH / 2, HEIGHT + 200)
self.power = 1 #죽었을 때 파워 초기화
self.power_count = 30 #죽었을 때 파워 카운트(총알 갯수) 초기화
self.bomb_count = 1 #죽었을 때 폭탄 갯수 초기화
# defines the enemies
class Mob(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image_orig = random.choice(meteor_images)
self.image_orig.set_colorkey(BLACK)
self.image = self.image_orig.copy()
self.rect = self.image.get_rect()
self.radius = int(self.rect.width *.90 / 2)
self.rect.x = random.randrange(0, WIDTH - self.rect.width)
self.rect.y = random.randrange(-150, -100)
self.speedy = random.randrange(5, 20) ## for randomizing the speed of the Mob
## randomize the movements a little more
self.speedx = random.randrange(-3, 3)
## adding rotation to the mob element
self.rotation = 0
self.rotation_speed = random.randrange(-8, 8)
self.last_update = pygame.time.get_ticks() ## time when the rotation has to happen
def rotate(self):
time_now = pygame.time.get_ticks()
if time_now - self.last_update > 50: # in milliseconds
self.last_update = time_now
self.rotation = (self.rotation + self.rotation_speed) % 360
new_image = pygame.transform.rotate(self.image_orig, self.rotation)
old_center = self.rect.center
self.image = new_image
self.rect = self.image.get_rect()
self.rect.center = old_center
#wave2 몹 change
def mobchange(self):
self.image_orig = random.choice(ghost_images)
self.image_orig.set_colorkey(BLACK)
self.image = self.image_orig.copy()
self.rect = self.image.get_rect()
self.radius = int(self.rect.width *.90 / 2)
self.rect.x = random.randrange(0, WIDTH - self.rect.width)
self.rect.y = random.randrange(-150, -100)
self.speedy = random.randrange(5, 20)
#wave3 몹 cahnge
def mobchange2(self):
self.image_orig = random.choice(cockpit_images)
self.image_orig.set_colorkey(BLACK)
self.image = self.image_orig.copy()
self.rect = self.image.get_rect()
self.radius = int(self.rect.width *.90 / 2)
self.rect.x = random.randrange(0, WIDTH - self.rect.width)
self.rect.y = random.randrange(-150, -100)
self.speedy = random.randrange(5, 20)
#wave4 몹 change
def mobchange3(self):
self.image_orig = random.choice(cockpit2_images)
self.image_orig.set_colorkey(BLACK)
self.image = self.image_orig.copy()
self.rect = self.image.get_rect()
self.radius = int(self.rect.width *.90 / 2)
self.rect.x = random.randrange(0, WIDTH - self.rect.width)
self.rect.y = random.randrange(-150, -100)
self.speedy = random.randrange(5, 20)
def update(self):
if self.image_orig == random.choice(meteor_images):
self.rotate()
self.rect.x += self.speedx
self.rect.y += self.speedy
## now what if the mob element goes out of the screen
if (self.rect.top > HEIGHT + 10) or (self.rect.left < -25) or (self.rect.right > WIDTH + 20):
self.rect.x = random.randrange(0, WIDTH - self.rect.width)
self.rect.y = random.randrange(-100, -40)
self.speedy = random.randrange(1, 8) ## for randomizing the speed of the Mob
## defines the sprite for Powerups
class Pow(pygame.sprite.Sprite):
def __init__(self, center):
pygame.sprite.Sprite.__init__(self)
self.type = random.choice(['shield', 'gun', 'bomb', 'heart']) #폭탄, 목숨 아이템 추가
self.image = powerup_images[self.type]
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
## place the bullet according to the current position of the player
self.rect.center = center
self.speedy = 2
def update(self):
"""should spawn right in front of the player"""
self.rect.y += self.speedy
## kill the sprite after it moves over the top border
if self.rect.top > HEIGHT:
self.kill()
#폭탄을 위한 Bomb 클래스 생성
class Bomb(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = bomb_img
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.bottom = y
self.rect.centerx = x
self.speedy = -10
def update(self):
self.rect.y += self.speedy
if self.rect.bottom < 0:
self.kill()
## defines the sprite for bullets
class Bullet(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = bullet_img
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
## place the bullet according to the current position of the player
self.rect.bottom = y
self.rect.centerx = x
self.speedy = -10
def update(self):
"""should spawn right in front of the player"""
self.rect.y += self.speedy
## kill the sprite after it moves over the top border
if self.rect.bottom < 0:
self.kill()
## now we need a way to shoot
## lets bind it to "spacebar".
## adding an event for it in Game loop
## FIRE ZE MISSILES
class Missile(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = missile_img
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.bottom = y
self.rect.centerx = x
self.speedy = -10
def update(self):
"""should spawn right in front of the player"""
self.rect.y += self.speedy
if self.rect.bottom < 0:
self.kill()
###################################################
## Load all game images
#배경화면 이미지 리스트로 저장
starfield=['starfield1.png', 'starfield2.png', 'starfield3.png', 'starfield4.png']
#스테이지 당 배경화면 설정
def setbackground(wave):
if wave == 1:
background = pygame.image.load(path.join(img_dir, starfield[0])).convert()
elif wave == 2:
background = pygame.image.load(path.join(img_dir, starfield[1])).convert()
elif wave == 3:
background = pygame.image.load(path.join(img_dir, starfield[2])).convert()
elif wave == 4:
background = pygame.image.load(path.join(img_dir, starfield[3])).convert()
return background
player_img = pygame.image.load(path.join(img_dir, 'playerShip1_orange.png')).convert()
player_mini_img = pygame.transform.scale(player_img, (25, 19))
player_mini_img.set_colorkey(BLACK)
bullet_img = pygame.image.load(path.join(img_dir, 'laserRed16.png')).convert()
bomb_img = pygame.image.load(path.join(img_dir, 'spaceMissiles_006.png')).convert()
missile_img = pygame.image.load(path.join(img_dir, 'missile.png')).convert_alpha()
# meteor_img = pygame.image.load(path.join(img_dir, 'meteorBrown_med1.png')).convert()
meteor_images = []
meteor_list = [
'meteorBrown_big1.png',
'meteorBrown_big2.png',
'meteorBrown_med1.png',
'meteorBrown_med3.png',
'meteorBrown_small1.png',
'meteorBrown_small2.png',
'meteorBrown_tiny1.png'
]
for image in meteor_list:
meteor_images.append(pygame.image.load(path.join(img_dir, image)).convert())
#외계인 이미지 추가 wave2 몹
ghost_images = []
ghost_list = [
'ghost1.png',
'ghost2.png',
'ghost3.png',
'ghost4.png'
]
for image in ghost_list:
ghost_images.append(pygame.image.load(path.join(img_dir, image)).convert())
#우주선 이미지 추가 wave3 몹
cockpit_images = []
cockpit_list = [
'cockpit1Green.png',
'cockpit2Green.png',
'cockpit3Green.png',
'cockpit1Grey.png',
'cockpit2Grey.png',
'cockpit3Grey.png',
'cockpit1Red.png',
'cockpit2Red.png',
'cockpit3Red.png',
'cockpit1Yellow.png',
'cockpit2Yellow.png',
'cockpit3Yellow.png'
]
for image in cockpit_list:
cockpit_images.append(pygame.image.load(path.join(img_dir, image)).convert())
#우주선2 이미지 추가 wave4 몹
cockpit2_images = []
cockpit2_list = [
'cockpit4Green.png',
'cockpit5Green.png',
'sampleShip1.png',
'cockpit4Grey.png',
'cockpit5Grey.png',
'sampleShip2.png',
'cockpit4Red.png',
'cockpit5Red.png',
'sampleShip3.png',
'cockpit4Yellow.png',
'cockpit5Yellow.png'
]
for image in cockpit2_list:
cockpit2_images.append(pygame.image.load(path.join(img_dir, image)).convert())
## meteor explosion
explosion_anim = {}
explosion_anim['lg'] = []
explosion_anim['sm'] = []
explosion_anim['player'] = []
for i in range(9):
filename = 'regularExplosion0{}.png'.format(i)
img = pygame.image.load(path.join(img_dir, filename)).convert()
img.set_colorkey(BLACK)
## resize the explosion
img_lg = pygame.transform.scale(img, (75, 75))
explosion_anim['lg'].append(img_lg)
img_sm = pygame.transform.scale(img, (32, 32))
explosion_anim['sm'].append(img_sm)
## player explosion
filename = 'sonicExplosion0{}.png'.format(i)
img = pygame.image.load(path.join(img_dir, filename)).convert()
img.set_colorkey(BLACK)
explosion_anim['player'].append(img)
## load power ups
powerup_images = {}
powerup_images['shield'] = pygame.image.load(path.join(img_dir, 'shield_gold.png')).convert()
powerup_images['gun'] = pygame.image.load(path.join(img_dir, 'bolt_gold.png')).convert()
powerup_images['bomb'] = pygame.image.load(path.join(img_dir, 'bomb_gold.png')).convert()
#bomb 이미지 설정
powerup_images['heart'] = pygame.image.load(path.join(img_dir, 'heart.png')).convert()
#heart(목숨) 아이템 이미지 설정
###################################################
###################################################
### Load all game sounds
shooting_sound = pygame.mixer.Sound(path.join(sound_folder, 'pew.wav'))
missile_sound = pygame.mixer.Sound(path.join(sound_folder, 'rocket.ogg'))
expl_sounds = []
for sound in ['expl3.wav', 'expl6.wav']:
expl_sounds.append(pygame.mixer.Sound(path.join(sound_folder, sound)))
## main background music
#pygame.mixer.music.load(path.join(sound_folder, 'tgfcoder-FrozenJam-SeamlessLoop.ogg'))
pygame.mixer.music.set_volume(0.2) ## simmered the sound down a little
player_die_sound = pygame.mixer.Sound(path.join(sound_folder, 'rumble1.ogg'))
###################################################
## TODO: make the game music loop over again and again. play(loops=-1) is not working
# Error :
# TypeError: play() takes no keyword arguments
#pygame.mixer.music.play()
#############################
## Game loop
running = True
menu_display = True
while running:
if menu_display:
main_menu()
pygame.time.wait(3000)
#Stop menu music
pygame.mixer.music.stop()
#Play the gameplay music
pygame.mixer.music.load(path.join(sound_folder, 'tgfcoder-FrozenJam-SeamlessLoop.ogg'))
pygame.mixer.music.play(-1) ## makes the gameplay sound in an endless loop
menu_display = False
bossStage = False
wavecounter = 0 #처음에 몹 죽인 횟수 0으로 count
wave = 1 #stage(wave) = 1
background = setbackground(wave)
background_rect = setbackground(wave).get_rect()
## group all the sprites together for ease of update
all_sprites = pygame.sprite.Group()
player = Player()
all_sprites.add(player)
## spawn a group of mob
mobs = pygame.sprite.Group()
for i in range(8): ## 8 mobs
# mob_element = Mob()
# all_sprites.add(mob_element)
# mobs.add(mob_element)
newmob(wave)
## group for bullets
bullets = pygame.sprite.Group()
powerups = pygame.sprite.Group()
bombs = pygame.sprite.Group() #bombs 라는 그룹 생성
#### Score board variable
score = 0
#1 Process input/events
clock.tick(FPS) ## will make the loop run at the same speed all the time
for event in pygame.event.get(): # gets all the events which have occured till now and keeps tab of them.
## listening for the the X button at the top
if event.type == pygame.QUIT:
running = False
## Press ESC to exit game
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
running = False
# ## event for shooting the bullets
# elif event.type == pygame.KEYDOWN:
# if event.key == pygame.K_SPACE:
# player.shoot() ## we have to define the shoot() function
#2 Update
all_sprites.update()
## check if a bullet hit a mob
## now we have a group of bullets and a group of mob
hits = pygame.sprite.groupcollide(mobs, bullets, True, True)
## now as we delete the mob element when we hit one with a bullet, we need to respawn them again
## as there will be no mob_elements left out
for hit in hits:
wavecounter += 1
wave = count_wave(wavecounter)
if wavecounter %200 <=10:
background = setbackground(wave)
background_rect = setbackground(wave).get_rect()
score += 100 - hit.radius ## give different scores for hitting big and small metoers
random.choice(expl_sounds).play()
# m = Mob()
# all_sprites.add(m)
# mobs.add(m)
expl = Explosion(hit.rect.center, 'lg')
all_sprites.add(expl)
if random.random() > 0.95:
pow = Pow(hit.rect.center)
all_sprites.add(pow)
powerups.add(pow)
newmob(wave) ## spawn a new mob
## ^^ the above loop will create the amount of mob objects which were killed spawn again
#########################
#폭탄과 몬스터가 충돌 시 코드
b_hits = pygame.sprite.groupcollide(mobs, bombs, True, True)
for hit in b_hits:
wavecounter += 1
wave = count_wave(wavecounter)
if wavecounter %200 <=10:
background = setbackground(wave)
background_rect = setbackground(wave).get_rect()
score += 100 - hit.radius ## give different scores for hitting big and small metoers
random.choice(expl_sounds).play()
# m = Mob()
# all_sprites.add(m)
# mobs.add(m)
expl = Explosion(hit.rect.center, 'lg')
all_sprites.add(expl)
if random.random() > 0.95:
pow = Pow(hit.rect.center)
all_sprites.add(pow)
powerups.add(pow)
newmob(wave)
## check if the player collides with the mob
# gives back a list, True makes the mob element disappear
# 몬스터와 플레이어가 부딛혔을 시 코드
hits = pygame.sprite.spritecollide(
player, mobs, True, pygame.sprite.collide_circle)
for hit in hits:
player.shield -= hit.radius * 2
expl = Explosion(hit.rect.center, 'sm')
all_sprites.add(expl)
newmob(wave)
if player.shield <= 0:
player_die_sound.play()
death_explosion = Explosion(player.rect.center, 'player')
all_sprites.add(death_explosion)
# running = False ## GAME OVER 3:D
player.hide()
player.lives -= 1
player.shield = 100
## if the player hit a power up
hits = pygame.sprite.spritecollide(player, powerups, True)
for hit in hits:
if hit.type == 'shield':
player.shield += random.randrange(10, 30)
if player.shield >= 100:
player.shield = 100
if hit.type == 'gun':
player.powerup()
if hit.type == 'bomb':
player.bomb_count += 1 #폭탄 먹었을 때 폭탄 갯수 +1
if player.bomb_count >= 3:
player.bomb_count = 3
if hit.type == 'heart': #하트 추가, 갯수 제한
player.lives += 1
if player.lives >= 3:
player.lives = 3
## if player died and the explosion has finished, end game
if player.lives == 0 and not death_explosion.alive():
scoreBorder(score)
menu_display = True
continue
# running = False
# pygame.display.update()
#3 Draw/render
screen.fill(BLACK)
## draw the stargaze.png image
screen.blit(background, background_rect)
all_sprites.draw(screen)
# 15px down from the screen
draw_text(screen, 'score: ' + str(score), 18, WIDTH / 8, 15)
draw_text(screen, 'bullet: ' + str(player.power_count_text), 18, WIDTH / 10, 35)
draw_shield_bar(screen, 5, 5, player.shield)
draw_text(screen, 'kill ' + str(wavecounter),18, WIDTH / 2,25)
draw_text(screen, 'wave ' + str(wave), 18, WIDTH / 2, 40)
# Draw lives
draw_lives(screen, WIDTH - 100, 5, player.lives, player_mini_img)
# Draw bombs 폭탄 갯수 화면에 표시
draw_lives(screen, WIDTH - 100, 25, player.bomb_count, powerup_images['bomb'])
## Done after drawing everything to the screen
pygame.display.flip()
pygame.quit()
|
997,465 | 9df725ba6c5e5d18e2072e95bbc0629e21c8a10b | import configparser
import os
class Controller(object):
"""A controller class for getting and setting key/value pairs in the config file
"""
section_default = 'BunqAPI'
def __init__(self):
"""Create an instance of a config controller for getting and setting information
:param section: The default section from and to which to get and set information
"""
self.path = os.path.dirname(os.path.realpath(__file__)) + '/config.ini'
self.config = configparser.ConfigParser()
self.config.read(self.path)
def get(self, name, section=section_default):
"""Returns a value with a given name from the configuration file."""
return self.config[section][name]
def set(self, name, val, section=section_default):
"""Sets an entry in the default section of the config file to a specifieg value
:param section: [Optional] The section in which an entry should be changed
:param name: The entry whose value should be changed
:param val: The new value for the specified entry
:return: Nothing, but happiness
"""
if section not in self.config.sections():
self.config.add_section(section)
self.config.set(section, name, val)
self.save()
def save(self):
"""Saves the changes to the config file.
"""
file = open(self.path, 'w')
self.config.write(file)
file.close()
|
997,466 | 13056696553c7c873432fb0c66f55fdbdc2aa774 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import os
import sys
import unittest.mock as mock
import pytest
import txaio
if os.environ.get('USE_ASYNCIO', False):
from autobahn.asyncio.component import Component
@pytest.mark.skipif(sys.version_info < (3, 5), reason="requires Python 3.5+")
@pytest.mark.asyncio(forbid_global_loop=True)
def test_asyncio_component(event_loop):
orig_loop = txaio.config.loop
txaio.config.loop = event_loop
comp = Component(
transports=[
{
"url": "ws://localhost:12/bogus",
"max_retries": 1,
"max_retry_delay": 0.1,
}
]
)
# if having trouble, try starting some logging (and use
# "py.test -s" to get real-time output)
# txaio.start_logging(level="debug")
f = comp.start(loop=event_loop)
txaio.config.loop = event_loop
finished = txaio.create_future()
def fail():
finished.set_exception(AssertionError("timed out"))
txaio.config.loop = orig_loop
txaio.call_later(4.0, fail)
def done(f):
try:
f.result()
finished.set_exception(AssertionError("should get an error"))
except RuntimeError as e:
if 'Exhausted all transport connect attempts' not in str(e):
finished.set_exception(AssertionError("wrong exception caught"))
finished.set_result(None)
txaio.config.loop = orig_loop
assert comp._done_f is None
f.add_done_callback(done)
return finished
@pytest.mark.skipif(sys.version_info < (3, 5), reason="requires Python 3.5+")
@pytest.mark.asyncio(forbid_global_loop=True)
def test_asyncio_component_404(event_loop):
"""
If something connects but then gets aborted, it should still try
to re-connect (in real cases this could be e.g. wrong path,
TLS failure, WebSocket handshake failure, etc)
"""
orig_loop = txaio.config.loop
txaio.config.loop = event_loop
class FakeTransport(object):
def close(self):
pass
def write(self, data):
pass
fake_transport = FakeTransport()
actual_protocol = [None] # set in a closure below
def create_connection(protocol_factory=None, server_hostname=None, host=None, port=None, ssl=False):
if actual_protocol[0] is None:
protocol = protocol_factory()
actual_protocol[0] = protocol
protocol.connection_made(fake_transport)
return txaio.create_future_success((fake_transport, protocol))
else:
return txaio.create_future_error(RuntimeError("second connection fails completely"))
with mock.patch.object(event_loop, 'create_connection', create_connection):
event_loop.create_connection = create_connection
comp = Component(
transports=[
{
"url": "ws://localhost:12/bogus",
"max_retries": 1,
"max_retry_delay": 0.1,
}
]
)
# if having trouble, try starting some logging (and use
# "py.test -s" to get real-time output)
# txaio.start_logging(level="debug")
f = comp.start(loop=event_loop)
txaio.config.loop = event_loop
# now that we've started connecting, we *should* be able
# to connetion_lost our transport .. but we do a
# call-later to ensure we're after the setup stuff in the
# event-loop (because asyncio doesn't synchronously
# process already-completed Futures like Twisted does)
def nuke_transport():
actual_protocol[0].connection_lost(None) # asyncio can call this with None
txaio.call_later(0.1, nuke_transport)
finished = txaio.create_future()
def fail():
finished.set_exception(AssertionError("timed out"))
txaio.config.loop = orig_loop
txaio.call_later(1.0, fail)
def done(f):
try:
f.result()
finished.set_exception(AssertionError("should get an error"))
except RuntimeError as e:
if 'Exhausted all transport connect attempts' not in str(e):
finished.set_exception(AssertionError("wrong exception caught"))
finished.set_result(None)
txaio.config.loop = orig_loop
f.add_done_callback(done)
return finished
|
997,467 | 240bf606c9825580221d33615c6835c0bf421d89 | # coding=utf-8
import collections
from django.db.models import Q
from applications.activity.share import expert
from applications.common.services import area_name
from applications.user.models import Area
from applications.work.models import Work
from applications.work.share import struct_work
from utils.const_def import TRUE_INT, FALSE_INT, WORK_SHOW_STATUS_SUBJUDGE_DONE, WORK_SHOW_STATUS_SUBJUDGE_DOING
from utils.utils_type import str2bool
def struct_subjudge_expert(subjudge, e):
raw = expert(e)
raw['subjudge_id'] = str(subjudge.id)
return raw
def struct_work_subjudge(work, subjudge_status, subjudge_score, subjudge_team, subjudge_rule):
result = collections.OrderedDict()
result.update(struct_work(work, None))
result['subjudge_score_id'] = str(subjudge_score.id) if subjudge_score else ''
result['subjudge_score_display'] = subjudge_rule.parse_rule().display_judge(score_obj=subjudge_score) if subjudge_score else '/'
result['subjudge_status'] = subjudge_status
result['subjudge_team_id'] = str(subjudge_team.id)
result['subjudge_team_name'] = subjudge_team.name
return result
def struct_work_subjudge_leader(
subjudge_rule, work, judge_status, subjudge_score, subjudge_team, expert_count, expert_judge):
result = collections.OrderedDict()
result.update(struct_work_subjudge(work, judge_status, subjudge_score, subjudge_team, subjudge_rule))
expert_score_list = list()
for i in xrange(expert_count):
if i in expert_judge:
expert_score_list.append(subjudge_rule.parse_rule().display_judge(score_obj=expert_judge[i])
if expert_judge[i] else '-')
else:
expert_score_list.append('-')
result['subjudge_expert_score_list'] = expert_score_list
return result |
997,468 | ec5e897cc76cd89fb47961fe67954ebfc81b0b02 | def mutate_string(string, position, character):
# As described in the question we can solve it in two ways.
# We will comment out one way i.e. using substring.
str_list = list(string)
str_list[position] = character
list_str = "".join(str_list)
# or the second way
# list_str = string[:position] + character + string[position+1:]
return list_str
if __name__ == '__main__':
s = input()
i, c = input().split()
s_new = mutate_string(s, int(i), c)
print(s_new) |
997,469 | 6ea5361d3ab57570520f3c36a3d150349a78c892 | import io
import unittest
import yaml
from backup_tool.pipeline import BackupPipeline
class PipelineTest(unittest.TestCase):
def setUp(self):
with open('pipeline.yaml') as f:
self.pipeline_data = yaml.load(f)
def test_load(self):
pipeline = BackupPipeline(self.pipeline_data['pipeline'], {})
def test_forward(self):
pipeline = BackupPipeline(self.pipeline_data['pipeline'], {})
print(pipeline.forward({
'connection': {
'host': 'jbnas',
'remote_user': 'jb',
'dest_folder': '/data',
},
'folders': '/tmp/foo /tmp/bar /tmp/baz',
'pipeline': pipeline
}))
|
997,470 | f105914d918fd5dd51930bc192935540a81edf3f | from django.contrib import admin
from users.models import Follow, UserCustom
@admin.register(Follow)
class FollowUsers(admin.ModelAdmin):
list_display = ('id', 'follower', 'author', )
list_display_links = ('id', 'follower', )
list_filter = ('follower', 'author', )
@admin.register(UserCustom)
class CustomUsers(admin.ModelAdmin):
list_display = ('id', 'username', 'first_name', 'last_name', 'email', )
list_display_links = ('id', 'username', 'first_name', 'last_name', )
search_fields = ('username', )
|
997,471 | e7e68fa17f5842a6691bf34bda96d7465e3c80fd | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 14 01:53:05 2019
@author: b18007
"""
#importing libraries
import numpy as np
ss=[]
l=0
#N=50
for i in range(100000):
birthday=list(np.random.randint(1,366,23))
#applying algorithm
#take elements from set of birthday..
#calculate no. of time it repeats..
#subtract -1
#multiply with string
#filter None
#calculate length
#append it in list ss
a=["a"*(birthday.count(s)-1) for s in set(birthday)]
b=list(filter(None,a))
#checking the condition.. c>=1
if len(b)>=1:
l+=1
#print answer
print("probability",l/100000)
|
997,472 | 461c84781c32ef846f658e12a6958cfd47a1261c | import argparse
import yaml
import torch.backends.cudnn as cudnn
import torch
from PIL import Image
import numpy as np
import os
from sklearn import metrics
import matplotlib.pyplot as plt
from tqdm import tqdm
import ast
from itertools import product
from numpy.linalg import norm
from util import trainer_util, metrics
from util.iter_counter import IterationCounter
from models.dissimilarity_model import DissimNet, DissimNetPrior
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, help='Path to the config file.')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
#parser.add_argument('--weights', type=str, default='[0.70, 0.1, 0.1, 0.1]', help='weights for ensemble testing [model, entropy, mae, distance]')
opts = parser.parse_args()
cudnn.benchmark = True
#weights = ast.literal_eval(opts.weights)
def normalize(weights):
# calculate l1 vector norm
result = norm(weights, 1)
# check for a vector of all zeros
if result == 0.0:
return weights
# return normalized vector (unit norm)
return weights / result
# grid search weights
def grid_search(model_num=4):
# define weights to consider
d = {}
w = [0, 1, 2, 3]
best_score, best_roc, best_ap, best_weights = 1.0, 0, 0, None
# iterate all possible combinations (cartesian product)
for weights in product(w, repeat=model_num):
# skip if all weights are equal
if len(set(weights)) == 1:
continue
# hack, normalize weight vector
weights = normalize(weights)
if str(weights) in d:
continue
else:
d[str(weights)] = 0
# evaluate weights
score_roc, score_ap, score_fp = evaluate_ensemble(weights)
print('Weights: %s Score_FP: %.3f Score_ROC:%.3f Score_AP:%.3f' % (weights, score_fp, score_roc, score_ap))
if score_fp < best_score:
best_score, best_weights, best_roc, best_ap = score_fp, weights, score_roc, score_ap
print('>BEST SO FAR %s Score_FP: %.3f Score_ROC:%.3f Score_AP:%.3f' % (best_weights, best_score, best_roc, best_ap))
return list(best_weights), best_score, best_roc, best_ap
def evaluate_ensemble(weights_f):
# create memory locations for results to save time while running the code
dataset = cfg_test_loader['dataset_args']
h = int((dataset['crop_size']/dataset['aspect_ratio']))
w = int(dataset['crop_size'])
flat_pred = np.zeros(w*h*len(test_loader), dtype='float32')
flat_labels = np.zeros(w*h*len(test_loader), dtype='float32')
with torch.no_grad():
for i, data_i in enumerate(test_loader):
original = data_i['original'].cuda()
semantic = data_i['semantic'].cuda()
synthesis = data_i['synthesis'].cuda()
label = data_i['label'].cuda()
if prior:
entropy = data_i['entropy'].cuda()
mae = data_i['mae'].cuda()
distance = data_i['distance'].cuda()
outputs = softmax(diss_model(original, synthesis, semantic, entropy, mae, distance))
else:
outputs = softmax(diss_model(original, synthesis, semantic))
(softmax_pred, predictions) = torch.max(outputs,dim=1)
soft_pred = outputs[:,1,:,:]*weights_f[0] + entropy*weights_f[1] + mae*weights_f[2] + distance*weights_f[3]
flat_pred[i*w*h:i*w*h+w*h] = torch.flatten(soft_pred).detach().cpu().numpy()
flat_labels[i*w*h:i*w*h+w*h] = torch.flatten(label).detach().cpu().numpy()
# Save results
predicted_tensor = predictions * 1
label_tensor = label * 1
file_name = os.path.basename(data_i['original_path'][0])
label_img = Image.fromarray(label_tensor.squeeze().cpu().numpy().astype(np.uint8))
soft_img = Image.fromarray((soft_pred.squeeze().cpu().numpy()*255).astype(np.uint8))
predicted_img = Image.fromarray(predicted_tensor.squeeze().cpu().numpy().astype(np.uint8))
predicted_img.save(os.path.join(store_fdr_exp, 'pred', file_name))
soft_img.save(os.path.join(store_fdr_exp, 'soft', file_name))
label_img.save(os.path.join(store_fdr_exp, 'label', file_name))
if config['test_dataloader']['dataset_args']['roi']:
invalid_indices = np.argwhere(flat_labels == 255)
flat_labels = np.delete(flat_labels, invalid_indices)
flat_pred = np.delete(flat_pred, invalid_indices)
results = metrics.get_metrics(flat_labels, flat_pred)
return results['auroc'], results['AP'], results['FPR@95%TPR']
if __name__ == '__main__':
# Load experiment setting
with open(opts.config, 'r') as stream:
config = yaml.load(stream, Loader=yaml.FullLoader)
# get experiment information
exp_name = config['experiment_name']
save_fdr = config['save_folder']
epoch = config['which_epoch']
store_fdr = config['store_results']
store_fdr_exp = os.path.join(config['store_results'], exp_name)
if not os.path.isdir(store_fdr):
os.mkdir(store_fdr)
if not os.path.isdir(store_fdr_exp):
os.mkdir(store_fdr_exp)
if not os.path.isdir(os.path.join(store_fdr_exp, 'pred')):
os.mkdir(os.path.join(store_fdr_exp, 'label'))
os.mkdir(os.path.join(store_fdr_exp, 'pred'))
os.mkdir(os.path.join(store_fdr_exp, 'soft'))
# Activate GPUs
config['gpu_ids'] = opts.gpu_ids
gpu_info = trainer_util.activate_gpus(config)
# checks if we are using prior images
prior = config['model']['prior']
# Get data loaders
cfg_test_loader = config['test_dataloader']
# adds logic to dataloaders (avoid repetition in config file)
cfg_test_loader['dataset_args']['prior'] = prior
test_loader = trainer_util.get_dataloader(cfg_test_loader['dataset_args'], cfg_test_loader['dataloader_args'])
# get model
if config['model']['prior']:
diss_model = DissimNetPrior(**config['model']).cuda()
elif 'vgg' in config['model']['architecture']:
diss_model = DissimNet(**config['model']).cuda()
else:
raise NotImplementedError()
diss_model.eval()
model_path = os.path.join(save_fdr, exp_name, '%s_net_%s.pth' % (epoch, exp_name))
model_weights = torch.load(model_path)
diss_model.load_state_dict(model_weights)
softmax = torch.nn.Softmax(dim=1)
best_weights, best_score, best_roc, best_ap = grid_search()
print('Best weights: %s Score_FP: %.3f Score_ROC:%.3f Score_AP:%.3f' % (best_weights, best_score, best_roc, best_ap)) |
997,473 | a67724a21e144a6024c6d8dfe7bce677ce5afa2b | # To change this template, choose Tools | Templates
# and open the template in the editor.
__author__="Tim Sobolewski (TJSO)"
__date__ ="$Sep 1, 2010 11:34:13 AM$"
__version__="2.1"
# Last addition: added writepidfile()
import os.path
import datetime
import os
debug = False
class Filewriter:
"A simple class for creating & writting out various kinds of files"
# filename = None
# Initialize and create file by type
def __init__(self, filename, nametype=None, debug = False):
if debug:
self.debug = True
else:
self.debug = False
if nametype != None:
nametype = nametype.upper()
self.nametype = nametype
if nametype == None:
self.filename = filename
elif nametype == "DATESTAMP" or nametype == "TIMESTAMP":
self.setdsfilename(filename, nametype)
elif nametype == "OTSNCD":
self.filename = filename
elif nametype == "INC":
# use incrementing nametype
print
else:
self.filename = filename
self.nametype = None
# End __init__ function
def setdsfilename(self, filename, nametype):
now = datetime.datetime.utcnow()
if nametype == 'DATESTAMP':
datestring = now.strftime('%Y%m%d')
elif nametype == 'TIMESTAMP':
datestring = now.strftime('%Y%m%d%H%M%S')
if filename.count('.') > 0:
# print ("filename: %s" % filename)
extindex = filename.rfind('.')
filenametemp = filename[:extindex]
ext = filename[extindex:]
self.filename = ("%s_%s%s" % (filenametemp, datestring, ext))
else:
self.filename = filename + '_' + datestring
# print ("filename: %s" % filename)
def write(self, buffer=None):
if self.nametype == 'DATESTAMP' or self.nametype == 'TIMESTAMP' or self.nametype == None:
self.writefile(buffer)
# try:
# file = open( self.filename, "a")
# if buffer <> None:
# file.write(buffer)
# file.close()
# except Exception as e:
# print e
elif self.nametype == 'OTSNCD':
# Put code here to handle OTS/NCD and similar filetypes
if os.path.exists(self.filename):
# File already exists, do nothing
if debug:
print ('File %s exists.' % self.filename)
None
else:
if debug:
print ('File %s does not exist.' % self.filename)
length = len(self.filename)
if self.filename.endswith('.VOP'):
# Delete OTS or NCD file and write VOP file
delfile = self.filename
delfile = delfile[:length - 4] + '.OTS'
# If the OTS file exists, delete it
if os.path.exists(delfile):
self.removefile(delfile)
delfile = delfile[:length - 4] + '.NCD'
# If the NCD file exists, delete it
if os.path.exists(delfile):
self.removefile(delfile)
self.writefile(buffer)
elif self.filename.endswith('.OTS'):
# Delete VOP file and write OTS file
delfile = self.filename
delfile = delfile[:length - 4] + '.VOP'
# If the VOP file exists, delete it
if os.path.exists(delfile):
self.removefile(delfile)
delfile = delfile[:length - 4] + '.NCD'
# If the NCD file exists, delete it
if os.path.exists(delfile):
self.renamefile(delfile, self.filename)
#if not os.path.exists(self.filename):
else:
self.writefile(buffer)
elif self.filename.endswith('.NCD'):
# Delete VOP file and write OTS file
delfile = self.filename
delfile = delfile[:length - 4] + '.VOP'
# If the VOP file exists, delete it
if os.path.exists(delfile):
self.removefile(delfile)
delfile = delfile[:length - 4] + '.OTS'
# If the NCD file exists, delete it
if os.path.exists(delfile):
self.renamefile(delfile, self.filename)
#if not os.path.exists(self.filename):
else:
self.writefile(buffer)
elif self.nametype == 'INC':
# Put code here to handle incremental file naming
print 'nametype = INC <not yet supported>'
# End write function
def removefile(self, filename=None):
if filename == None:
filename = self.filename
try:
os.remove(filename)
except Exception as e:
print e
def renamefile(self, oldfile, newfile):
try:
os.rename(oldfile, newfile)
except Exception as e:
print e
def writefile(self, buffertemp):
try:
file = open( self.filename, "a")
if buffertemp <> None:
file.write(buffertemp)
file.close()
except Exception as e:
print e
def logentry(self, buffertemp):
datestring = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')
try:
file = open( self.filename, "a")
if buffertemp <> None:
buffertemp = (datestring + ' : ' + buffertemp + '\n')
file.write(buffertemp)
file.close()
except Exception as e:
print e
# Creates a file containing the process identification number
# - Useful for killing a process or confirming that a process is running
def writepidfile(self):
filename = None
try:
if self.filename <> None:
extindex = self.filename.index('.pid')
if extindex > 0:
filename = self.filename[:extindex]
else: filename = self.filename
except Exception as e:
print e
if filename <> None and filename <> '':
try:
pid = os.getpid()
self.filename = ('%s_%s.pid' % (filename, pid))
file = open( self.filename, "w")
if pid <> None:
file.write(str(pid))
file.close()
return pid
except Exception as e:
print ('Cannot write PID file.')
print e
def test(self):
print ('filename: %s' % self.filename)
print ('nametype: %s' % self.nametype)
# End test function
# For debug
#x = Filewriter('/TEST2/testfiletxt', 'timestamp')
#x = Filewriter('/TEST2/ABCOBS.vop', 'otsncd')
#x.test()
#x.write()
#x = Filewriter('/TEST2/filewriterclass_test.pid')
#x.writepidfile()
#x.removefile('/TEST2/filewriterclass_test.pid')
|
997,474 | 9cd3a38d09b5ab44285615916a464eaea7720573 | """
Name: PatternDNA
Coder: HaoLing ZHANG (BGI-Research)[V1]
Current Version: 1
Functions:
(1) Initiate Pattern DNA from a DNA single strand or a protein;
(2) Get Information of the created Pattern DNA.
"""
import copy
from methods.inherent import *
# noinspection PyMethodMayBeStatic
class PatternDNA(object):
def __init__(self, indices, protein=None, dna_strand=None):
"""
Initiate the pattern DNA by protein or DNA single strand.
A binary choice, inumpyut protein or dna single strand.
:param protein: the inumpyut protein (int[:]).
:param dna_strand: the obtain DNA single strand (int[:]).
"""
if protein is None and dna_strand is None:
raise ValueError("Pattern DNA " + str(indices) + " is Error, because the value cannot be obtained.")
elif protein is not None and dna_strand is not None:
raise ValueError("Pattern DNA " + str(indices) + " is Error, because the value cannot be selected.")
self._indices = indices
if protein is not None:
self._t_strand = self._obtain_t_strand(protein)
self._c_strand = self._obtain_c_strand(self._t_strand)
else:
self._t_strand = copy.deepcopy(dna_strand)
self._c_strand = self._obtain_c_strand(dna_strand)
def _obtain_t_strand(self, protein):
"""
Get the corresponding template DNA strand from the protein.
:param protein: the obtained protein.
:return: corresponding information of the template DNA strand.
"""
strand = [[], []]
for amino_acid in protein:
codon_chooser = base_maps[numpy.where(codons == amino_acid)]
for index in range(3):
strand[0].append(codon_chooser[0][index])
if len(codon_chooser) > 1:
strand[1].append(codon_chooser[1][index])
else:
strand[1].append(0)
return numpy.array(strand)
def _obtain_c_strand(self, template_strand):
"""
Get the corresponding complementary DNA strand from the normal DNA strand.
:param template_strand: normal strand of the pattern DNA.
:return: corresponding information of complementary DNA strand.
"""
input_strand = numpy.transpose(copy.deepcopy(template_strand))
strand = [[], []]
for bases in input_strand:
strand[0].insert(0, c_pairing[numpy.where(t_pairing == bases[0])][0])
if bases[1] != 0:
strand[1].insert(0, c_pairing[numpy.where(t_pairing == bases[1])][0])
else:
strand[1].insert(0, 0)
return numpy.array(strand)
def get_strand(self, strand_type=0):
"""
Get the strand by strand type.
1 is the template strand; -1 is the complementary strand; the others is the whole strand.
:return: strand.
"""
if strand_type == 1:
return [self._t_strand]
elif strand_type == -1:
return [self._c_strand]
else:
return [self._t_strand, self._c_strand]
def get_dna_fragment(self, start_position=0, stop_position=0):
"""
Get the intuitive images of Pattern DNA.
:param start_position: start position of reading the Pattern DNA.
:param stop_position: stop position of reading the Pattern DNA. (positive number)
:return: intuitive strand information of this Pattern DNA.
"""
if start_position != 0:
information = [["".join([chr(data) for data in self._t_strand[0]][start_position: ]),
"".join([chr(data) for data in self._t_strand[1]][start_position: ])],
["".join([chr(data) for data in self._c_strand[0]][start_position: ]),
"".join([chr(data) for data in self._c_strand[1]][start_position: ])]]
else:
information = [["".join([chr(data) for data in self._t_strand[0]][start_position:]),
"".join([chr(data) for data in self._t_strand[1]][start_position:])],
["".join([chr(data) for data in self._c_strand[0]][start_position:]),
"".join([chr(data) for data in self._c_strand[1]][start_position:])]]
return information
def get_indices(self):
"""
Get the protein indices of this Pattern DNA.
:return: protein indices of this Pattern DNA.
"""
return self._indices
def get_all_proteins(self):
pass
def __str__(self):
"""
Get the whole intuitive information of this Pattern DNA.
:return: whole intuitive information of this Pattern DNA.
"""
information = self.get_dna_fragment()
return "protein indices = " + str(self._indices) + ": \n" + \
"t~ strand = " + str(information[0][0]) + "\n" + \
" " + str(information[0][1]) + "\n" + \
"c~ strand = " + str(information[1][0]) + "\n" + \
" " + str(information[1][1]) + "\n"
|
997,475 | fac3e8439eb2c5868833217539bc1f6c04e049f6 | from .models import person , Message , roomName
from rest_framework import serializers
class person_serializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = person
fields = ['image' , 'username' , 'password']
class Message_serializer(serializers.HyperlinkedModelSerializer):
class Meta :
model = Message
fields = '__all__'
class Room_serializer (serializers.ModelSerializer):
class Meta:
model = roomName
fields = '__all__' |
997,476 | 4f0fff11fd04aaf174d3bc7074205845edbcbaaa | """Custom briefy.alexandria events for model Collection."""
from briefy.alexandria import logger
from briefy.ws.resources import events
class CollectionCreatedEvent(events.ObjectCreatedEvent):
"""Event to notify collection creation."""
event_name = 'collection.created'
logger = logger
class CollectionUpdatedEvent(events.ObjectUpdatedEvent):
"""Event to notify collection update."""
event_name = 'collection.updated'
logger = logger
class CollectionDeletedEvent(events.ObjectDeletedEvent):
"""Event to notify collection delete."""
class CollectionLoadedEvent(events.ObjectLoadedEvent):
"""Event to notify collection load."""
|
997,477 | 88b8606b06f375ef3e8ba70201571a39a09ae4b8 | '''
Generates recommendation for the user based on
bmi, smoking, tobacco usage, alcohol consumption, exercise
travel time, sleep time, job type.
'''
import csv, re
featureWeights_dict={}
healthy_bmi = 0
moderate_travel = 1
excess_travel = 2
low_sleep = 0
moderate_sleep = 1
no_exercise = 0
moderate_exercise = 1
optimal_exercise = 2
def preprocessData(data):
# print("Recoomendation preprocess")
# print(data)
data["exercise"] = [data["exercise"],3]
data["travel_time"] = [data["travel_time"],3]
data["sleep_time"] = [data["sleep_time"],3]
data["drink"] = [1 if data["drink"] else 0,2]
data["tobacco"] = [1 if data["tobacco"] else 0,2]
data["smoke"] = [1 if data["smoke"] else 0,2]
"""Bag of words to identify past ailments and dangerous job types"""
ailments=set(['heart','brain','kidney','liver','breating','asthema'])
job_type=set(['army','defence','factory'])
#pattern = re.compile("\s+|^\s+|\s*,*\s*|\s+$")
pattern = re.compile("\s+,*\s*")
current_ailments = set([ x for x in pattern.split(data["ailments"]) if x])
current_jobtype = set([ x for x in pattern.split(data["job_type"]) if x])
data["ailments"] = [1 if current_ailments.intersection(ailments) else 0,2]
data["job_type"] = [1 if current_jobtype.intersection(job_type) else 0,2]
"""Identifying Healthy BMI & Age range"""
data["age"]=[0 if data["age"]>18 and data["age"]<45 else 1,2]
data["bmi"]=data["weight"]/(data["height"]*data["height"])*703
data["bmi"]=[0 if data["bmi"]>18.5 and data["bmi"]<24.9 else 1,2]
# print("preprocess",data)
return data
def initialize_feature_weights():
reader = csv.reader(open('pyScripts/feature_weights.csv'))
for row in reader:
value=[]
split_row= row[0].split('\t')
key=split_row[0]
value=split_row[1:]
featureWeights_dict[key]=value
# print(featureWeights_dict)
return featureWeights_dict
#Calculates the number of points healthscore will improve, rounded to 2 decimals
def getPointsForImprovement(current,levels, weight, maxHealthScore):
return (round((float(weight) * maxHealthScore * (current/levels)) , 2))
def getBmiRec(bmi_data):
if bmi_number != healthy_bmi:
return ("If you get your bmi (body-mass-index) in the healthly range "
"(18.5 - 24. 9) your healthscore will improve 100 points.")
return None
def getDrinkRec(drinks):
if drinks: #drinks alcohol
return ("If you stop drinking alcohol your healthscore will improve by "
" 50 points.")
return None
def getExerciseRec(exercise):
if exercise == no_exercise:
return ("If start exercising 6 hours a week your healthscore will improve "
" 17 points.")
elif exercise == moderate_exercise:
return ("If you exercise more than 15 hours a week "
" your healthscore will improve 17 points.")
return None
def getSmokeRec(smokes):
if smokes:
return ("If you quit smoking your healthscore will improve 50 points.")
return None
def getTobaccoRec(uses_tobacco):
if uses_tobacco:
return ("If you stop using tobacco your healthscore will improve 50 points.")
return None
def getTravelRec(travel_time):
if travel_time == excess_travel:
return ("If you reduce your travel_time to under 10 hours "
"your healthscore will improve 17 points.")
elif travel_time == moderate_travel:
return ("If you reduce your travel_time to under 5 hours "
"your healthscore will improve 17 points.")
return None
def getSleepRec(sleep):
if sleep == low_sleep:
return ("If you increase sleep to more than 6 hours a day "
"your healthscore will improve 17 points.")
elif sleep == moderate_sleep:
return ("If you increase your sleep to more than 8 hours a day "
"your healthscore will improve 17 points.")
return None
#Calculates improvement for a key
def getRecommendationPointsForKey(data, featureWeight, maxHealthScore):
if featureWeight[1] == 'negative':
return getNegativeRecommendation(data, featureWeight[0], maxHealthScore)
return getPositiveRecommendation(data, featureWeight[0], maxHealthScore)
#Calculates improvement for a key that has a negative relationship
def getNegativeRecommendation(data, weight, maxHealthScore):
if data[0] == 0:
return None
return getPointsForImprovement(data[0],data[1], weight, maxHealthScore)
#Calculates improvement for a key that has a positive relationship
def getPositiveRecommendation(data, weight, maxHealthScore):
if data[0] != 2:
print("New method")
print(type(weight))
print(weight)
print(data)
# return getPointsForImprovement(data[1], weight, maxHealthScore)
return float(weight)*((data[1]-data[0]-1)/data[1])*maxHealthScore
return None
def initializeStrDic():
return{"smoke" : ["", "stop smoking"], "exercise" : ["increase your exercise to atleast 6 hours a week", "increase your exercise to more than 15 hours a week"], "sleep_time": ["increase the amount you sleep to atleast 6 hours a day","increase the amount you sleep to above 8 hours a day"], "bmi": ["", "get your bmi in the healthy range (18.5 - 24 .9)"],"drink": ["", "stop drinking"], "tobacco": ["", "stop using tobacco"], "travel_time" : ["", "reduce the travel time to less than 5 hours","reduce the travel to less than 10 hours"]}
def processRecommendations(data, maxHealthScore):
'''
recs = {}
recs["bmi"] = getBmiRec(data["bmi"], featureWeights["bmi"], maxHealthScore)
recs["drink"] = getDrinkRec(data["drink"][0], featureWeights["drink"], maxHealthScore)
recs["exercise"] = getExerciseRec(data["exercise"][0], featureWeights["exercise"], maxHealthScore)
recs["smoke"] = getSmokeRec(data["smoke"][0], featureWeights["smoke"], maxHealthScore)
recs["tobacco"] = getTobaccoRec(data["tobacco"][0], featureWeights["tobacco"], maxHealthScore)
recs["travel_time"] = getTravelRec(data["travel_time"][0],
featureWeights["travel_time"], maxHealthScore)
recs["sleep_time"] = getSleepRec(data["sleep_time"][0], featureWeights["sleep_time"], maxHealthScore )
'''
print("processRecommendations")
data = preprocessData(data)
print(data)
all_recommendations = []
print("end")
featureWeights = initialize_feature_weights()
points = 0.0
resultStrings = []
recStrDic = initializeStrDic()
print("recStrDict : ",recStrDic)
for key in ["exercise","sleep_time","drink","tobacco","smoke","bmi","travel_time"]:
result = getRecommendationPointsForKey(data[key], featureWeights[key], maxHealthScore)
if result is not None:
points += result
print("Result is ",result);
resultStrings.append(recStrDic[key][data[key][0]])
all_recommendations.append(getRecommendationString([recStrDic[key][data[key][0]]],result))
all_recommendations.append(getRecommendationString(resultStrings, points))
# for key in ["exercise","sleep_time","drink","tobacco","smoke","bmi","travel_time"]:
# all_recommendations.append(getRecommendationString([recStrDic[key][data[key][0]]],getRecommendationPointsForKey(data[key], featureWeights[key], maxHealthScore)))
all_recommendations = [all_recommendations[-1]]+all_recommendations[0:len(all_recommendations)-1]
return all_recommendations,round(((points/maxHealthScore)*data["healthcare_costs"]),2)
def getRecommendationString(resultStrings, points):
recommendationString = "If you "
resultStringsLength = len(resultStrings)
if resultStringsLength == 0:
return ["You are in good shape."]
for index in (range(resultStringsLength - 1)):
recommendationString += (resultStrings[index] + ", ")
if len(resultStrings) == 1:
recommendationString += (resultStrings[resultStringsLength -1] + " your healthscore will improve by " + str(round(points, 2)) + " points.")
else:
recommendationString += ("and " + resultStrings[resultStringsLength -1] + " your healthscore will improve by " + str(round(points, 2)) + " points.")
return recommendationString
if __name__ == "__main__":
data = {}
data["exercise"] = [0,3]
data["travel_time"] = [0,3]
data["sleep_time"] = [0,3]
data["drink"] = [1,2]
data["tobacco"] = [1,2]
data["smoke"] = [1,2]
data["bmi"] = [1,2]
featureWeights = {'age': ['0.1', 'negative'], 'bmi': ['0.2', 'negative'],
'ailments': ['0.2', 'negative'], 'tobacco': ['0.1', 'negative'], 'smoke': ['0.1', 'negative'],
'drink': ['0.1', 'negative'], 'exercise': ['0.05', 'positive'], 'travel_time': ['0.05', 'negative'],
'sleep_time': ['0.05', 'positive'], 'job_type': ['0.05', 'negative']}
print(processRecommendations(data, 1000))
|
997,478 | b4ec87589b5a5dc6e6a9f8ceaea0a3868dadf0af | class osztaly():
def __init__(self, *args, **kwargs):
super(osztaly, self).__init__(*args, **kwargs)
initial = kwargs.pop('initial')
self.valtozo = initial['valtozo']
#instance = osztaly(initial={"valtozo": "ertek"})
#print (instance.valtozo)
'''
def test(*args, **kwargs):
initial = kwargs.pop('initial')
keresztnev = initial['keresztnev']
return keresztnev
print (test, {'keresztnev': "Éva"})
def test(**kwargs):
print (kwargs['a'])
print (kwargs['b'])
print (kwargs['c'])
args = { 'b': 2, 'c': 3}
test( a=1, **args )
'''
def test(**kwargs):
print (kwargs['a'])
print (kwargs['b'])
print (kwargs['c'])
initial = {'nev': "Éva"}
args = initial
def mytest(*args, **kwargs):
initial = kwargs.pop('initial')
nev = initial['nev']
return nev
print(mytest(initial = {"nev": "Eva"})) |
997,479 | 3545f0e6995ed0570d1cbf22b6203ad1c5c63c9f | import featuretools as ft
import categorical_encoding as ce
from featuretools.tests.testing_utils import make_ecommerce_entityset
def create_feature_matrix():
es = make_ecommerce_entityset()
f1 = ft.Feature(es["log"]["product_id"])
f2 = ft.Feature(es["log"]["value"])
features = [f1, f2]
ids = [0, 1, 2, 3, 4, 5]
feature_matrix = ft.calculate_feature_matrix(features, es, instance_ids=ids)
return feature_matrix, features, f1, f2, es, ids |
997,480 | 18faaa9298baa632efbfdd1c593673d878ecf7a8 | import os
import sys
import yaml
config_file = "config/certman-sample.conf"
def load_config(config_file):
if os.path.isfile(config_file):
with open(config_file) as config_file:
config = yaml.load(config_file, Loader=yaml.FullLoader)
return config
else:
print('Config file ' + config_file + ' does not exist!')
return False
c = load_config(config_file) |
997,481 | 9881374f9704dca54d8eea42fba44477d89d15f3 | # Enter your code here. Read input from STDIN. Print output to STDOUT
for _ in range(int(input())):
ip = input().split()
try:
a, b = int(ip[0]), int(ip[1])
result = a/b
print(result)
except ValueError as e:
print("Error Code:", e)
# print("Error Code:", e)
except ZeroDivisionError as e:
print("Error Code: integer division or modulo by zero")
|
997,482 | 4031384487e30db3258e6aff25516c3f96a809aa | #BaseNet contain basic functions exists in alny other classes. Nothing fancy. For example, get parameters, init weights
import torch as t
import torchvision as tv
import torch.nn.utils.parametrizations
import numpy as np
import math
import lib
class BaseNet(t.nn.Module):
def __init__(self):
super(BaseNet, self).__init__()
def get_info(self):
if isinstance(self, list): #what does this line even mean?
print ("self if a list?")
raise
self = self[0]
num_params = 0
for param in self.parameters():
num_params += param
print (f'Net name: {type(self).__name__}, Net size: {num_params/1e6} Millions')
def init_weights(self, init_type='normal', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
#print (f'init weight with classname={classname}')
if classname.find('InstanceNorm2d') != -1:
if hasattr(m, 'weight') and m.weight is not None:
t.nn.init.constant_(m.weight.data, 1.0)
if hasattr(m, 'bias') and m.bias is not None:
t.nn.init.constant_(m.bias.data, 0.0)
elif hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
t.nn.init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
t.nn.init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'xavier_uniform':
t.nn.init.xavier_uniform_(m.weight.data, gain=1.0)
elif init_type == 'kaiming':
t.nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
t.nn.init.orthogonal_(m.weight.data, gain=gain)
elif init_type == 'none': # uses pytorch's default init method
m.reset_parameters()
else:
raise NotImplementedError(
'initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
t.nn.init.constant_(m.bias.data, 0.0)
self.apply(init_func) #apply to childrend recursively?
for m in self.children():
if hasattr(m, 'init_weights'):
m.init_weights(init_type, gain)
def forward(self, masked_frames, masks):
raise
def infer(self, feat, masks):
raise
class SuperResV0(BaseNet): #try to reconstruct the network used by the author of the super-res paper
def __init__(self, hidden, stack_num, scale_factor, init_weights=True):
super(SuperResV0, self).__init__()
self.scale_factor = scale_factor
self.hidden = hidden
self.stack_num = stack_num
self.encoder0 = t.nn.Sequential(
t.nn.Conv2d(3, self.hidden, kernel_size=3, stride=1, padding=1),
t.nn.LeakyReLU(0.2, inplace=True),
t.nn.BatchNorm2d(self.hidden),
)
encoder_blocks = []
for _ in range(self.stack_num - 1):
encoder_blocks.append(FeedForward(channels_in=self.hidden, channels_out=self.hidden))
self.encoders = t.nn.Sequential(*encoder_blocks)
self.decoders = t.nn.Sequential(
t.nn.Conv2d(self.hidden, self.hidden, kernel_size=3, stride=1, padding=1, groups=self.hidden),
t.nn.LeakyReLU(0.2, inplace=True),
t.nn.Conv2d(self.hidden, self.hidden, kernel_size=1),
t.nn.ConvTranspose2d(self.hidden, 3, kernel_size=2, stride=2),
)
if init_weights:
self.init_weights()
def forward(self, x):
x = t.nn.functional.interpolate(x, scale_factor=self.scale_factor, mode='bicubic')
enc_feat = self.encoder0(x)
enc_feat = enc_feat + self.encoders(enc_feat)
output = self.decoders(enc_feat)
output = t.tanh(output)
return output
class SuperRes(SuperResV0):
def __init__(self, hidden, stack_num, scale_factor, init_weights=True):
super(SuperRes, self).__init__(hidden, stack_num, scale_factor, init_weights)
encoder_blocks = []
for _ in range(self.stack_num - 1):
encoder_blocks.append(FeedForwardDWSConv(channels_in=self.hidden, channels_out=self.hidden))
self.encoders = t.nn.Sequential(*encoder_blocks)
####################################
class FeedForward(t.nn.Module):
def __init__(self, channels_in, channels_out):
super(FeedForward, self).__init__()
self.conv = t.nn.Sequential(
t.nn.Conv2d(channels_in, channels_out, kernel_size=3, stride=1, padding=1),
t.nn.LeakyReLU(0.2, inplace=True),
t.nn.BatchNorm2d(channels_out),
)
def forward(self, x):
x = x + self.conv(x)
return x
class FeedForwardDWSConv(t.nn.Module):
def __init__(self, channels_in, channels_out):
# from https://discuss.pytorch.org/t/how-to-modify-a-conv2d-to-depthwise-separable-convolution/15843/6
# self.depthwise = nn.Conv2d(nin, nin, kernel_size=3, padding=1, groups=nin)
# self.pointwise = nn.Conv2d(nin, nout, kernel_size=1)
super(FeedForwardDWSConv, self).__init__()
self.conv = t.nn.Sequential(
t.nn.Conv2d(channels_in, channels_out, kernel_size=3, stride=1, padding=1, groups=channels_in),
t.nn.LeakyReLU(0.2, inplace=True),
t.nn.BatchNorm2d(channels_out),
t.nn.Conv2d(channels_in, channels_out, kernel_size=1),
t.nn.LeakyReLU(0.2, inplace=True),
t.nn.BatchNorm2d(channels_out),
)
def forward(self, x):
x = x + self.conv(x)
return x
######Discriminator code#############
class Discriminator(BaseNet):
def __init__(self, in_channels=3, use_sigmoid=False, use_spectral_norm=True, init_weights=True):
super(DiscriminatorV0, self).__init__()
self.use_sigmoid = use_sigmoid
nf = 64
sptrnorm = t.nn.utils.spectral_norm
self.conv = t.nn.Sequential(
sptrnorm(t.nn.Conv3d(in_channels=in_channels, out_channels=nf*1, kernel_size=(3, 5, 5), stride=(1, 2, 2),
padding=1, bias=False)),
# nn.InstanceNorm2d(64, track_running_stats=False),
t.nn.LeakyReLU(0.2, inplace=True),
sptrnorm(t.nn.Conv3d(nf*1, nf*2, kernel_size=(3, 5, 5), stride=(1, 2, 2),
padding=(1, 2, 2), bias=False)),
# nn.InstanceNorm2d(128, track_running_stats=False),
t.nn.LeakyReLU(0.2, inplace=True),
sptrnorm(t.nn.Conv3d(nf * 2, nf * 4, kernel_size=(3, 5, 5), stride=(1, 2, 2),
padding=(1, 2, 2), bias=False)),
# nn.InstanceNorm2d(256, track_running_stats=False),
t.nn.LeakyReLU(0.2, inplace=True),
sptrnorm(t.nn.Conv3d(nf * 4, nf * 4, kernel_size=(3, 5, 5), stride=(1, 2, 2),
padding=(1, 2, 2), bias=False)),
# nn.InstanceNorm2d(256, track_running_stats=False),
t.nn.LeakyReLU(0.2, inplace=True),
sptrnorm(t.nn.Conv3d(nf * 4, nf * 4, kernel_size=(3, 5, 5), stride=(1, 2, 2),
padding=(1, 2, 2), bias=False)),
# nn.InstanceNorm2d(256, track_running_stats=False),
t.nn.LeakyReLU(0.2, inplace=True),
t.nn.Conv3d(nf * 4, nf * 4, kernel_size=(3, 5, 5),
stride=(1, 2, 2), padding=(1, 2, 2))
)
if init_weights:
self.init_weights()
def forward(self, xs):
# T, C, H, W = xs.shape
xs_t = t.transpose(xs, 1, 2)
#xs_t = xs_t.unsqueeze(0) # B, C, T, H, W
feat = self.conv(xs_t)
if self.use_sigmoid:
feat = torch.sigmoid(feat)
out = torch.transpose(feat, 1, 2) # B, T, C, H, W
return out |
997,483 | bcbacacc17842d6199846cb82699268915eda0f5 | n = int(input())
a = n // 11
b = n - a * 11
if (b == 0):
ans = a * 2
elif (b <= 6):
ans = 1 + (a * 2)
else:
ans = 2 + (a * 2)
print(ans) |
997,484 | 37248372cbba905157fd3f9d5ecc1c193881b72c | # from VideoInfo import Mp4Url
# mp4 = Mp4Url('http://clips.vorwaerts-gmbh.de/big_buck_bunny.mp4')
# mp4.getAll()
# print(mp4)
from temp.jpeg import Jpeg
j = Jpeg.from_file(
r"C:\\Users\\kentxxq\Desktop\\a01de1df06bbc2563316d5e579cb9b79_1200x500.jpg"
)
print(j.segments)
|
997,485 | 444176091686e5189f43e2c4931c724fa587df42 | # -*- coding: utf-8 -*-
# file: tests.py
# author: JinTian
# time: 17/04/2017 2:40 PM
# Copyright 2017 JinTian. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
from utils.cookies import get_cookie_from_network
from settings.accounts import accounts
from settings.config import COOKIES_SAVE_PATH, DEFAULT_USER_ID
import pickle
import requests
def test():
cookies = get_cookie_from_network(accounts[0]['id'], accounts[0]['password'])
print(cookies)
def test_for_cookies():
with open(COOKIES_SAVE_PATH, 'rb') as f:
cookies_dict = pickle.load(f)
print(cookies_dict)
user_id = '15116123160'
url = 'http://weibo.cn/u/%s?filter=%s&page=1' % (DEFAULT_USER_ID, 0)
print(url)
cookie = {
"Cookie": cookies_dict[user_id]
}
print(list(cookies_dict.keys()))
def test_numric():
a = '124556'
b = './huogeh/grjtioh'
print(float(a))
print(float(b))
if __name__ == '__main__':
test_numric() |
997,486 | 19b522684bc575dd531e0e07291bfe0938c762c7 | # -*- coding=utf-8 -*-
import os
import cv2
from tqdm import tqdm
import subprocess
img_size = (3840, 2160)
img_det_dir = '../../mmsr_dest/'
assert os.path.exists(img_det_dir), "出错, 路径{}不存在。。。 ".format(img_det_dir)
# img_size = (960, 540)
# img_det_dir = '../../SDR_540p_jpg/'
dest_vids = '../../mmsr_dest_vids/'
if not os.path.exists(dest_vids):
os.mkdir(dest_vids)
video_names = os.listdir(img_det_dir)
# first_file_name = file_name_list[0]
# temp_file_path = first_file_name[0:first_file_name.rfind("_")] + ".txt"
# merge_file_path = first_file_name[0:first_file_name_rfind("_")] + ".mp4"
#
# cmd = "ffmpeg -f concat -loglevel error -safe 0 -i " + \
# temp_file_path + " -g 10 -s 640*340 -q 20 -c -copy " + merge_file_path
# print(cmd)
# subprocess.call(cmd, shell=True)
fps = 30
fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
for vid in tqdm(video_names):
video_dir = os.path.join(dest_vids, vid) + '.mp4'
file_names = os.listdir(os.path.join(img_det_dir, vid))
file_names.sort()
if len(file_names) == 100 and not os.path.isfile(video_dir):
video_writer = cv2.VideoWriter(video_dir, fourcc, fps, img_size)
for file_name in file_names:
img_path = os.path.join(img_det_dir, vid, file_name)
img = cv2.imread(img_path)
# print("Loading img from: {}".format(img_path))
video_writer.write(img)
cmd = "ffmpeg - threads 4 -r 24000/1001 - i {}/%%d.jpg -vcodec libx265 -pix_fmt yuv422p -crf 10 {}{}.mp4" \
.format(os.path.join(img_det_dir, vid), dest_vids, vid)
# cmd = "ffmpeg - i {}/%%d.jpg - c:v libx264 - preset veryslow - crf 18 -c:a copy D:\dest1.mp4"
print(cmd)
subprocess.call(cmd, shell=True)
video_writer.release()
else:
print("{} already exsists, next....".format(video_dir))
continue
# "图像序列编码,组委会建议采用以下编码命令样例/参数:
# ffmpeg -r 24000/1001 -i pngs/out%4d.png -vcodec libx265 -pix_fmt yuv422p -crf 10 test.mp4
# 特此说明。" |
997,487 | 2f5c9a8a56b5d8a9cecfb49c82d123603327f236 | """ Helper functions for uploading image to Flickr """
import flickr_api
from django.conf import settings
from django.core.files.storage import FileSystemStorage
def handle_uploaded_file(uploaded_file, duck_id, duck_name, comments):
""" Upload duck location image to flickr """
title = 'Duck #' + str(duck_id) + ' (' + duck_name + ')'
tags = "duckiehunt"
file_path = write_upload_to_file(uploaded_file, settings.UPLOAD_PATH)
photo_info = upload_to_flickr(file_path, title, comments, settings.FLICKR_PHOTO_IS_PUBLIC, tags)
return photo_info
def write_upload_to_file(photo_file, upload_path):
""" Save bufferred file in memory to disk """
fss = FileSystemStorage()
filename = fss.save(upload_path + photo_file.name, photo_file)
uploaded_file_url = fss.path(filename)
return uploaded_file_url
def upload_to_flickr(photo_file, title, comments, is_public, tags):
""" Upload file to flickr """
# No idea why, but is_public DOESN'T WORK.
photo = flickr_api.upload(photo_file=photo_file, title=title, is_public=is_public,
tags=tags, description=comments)
photo_info = photo.getInfo()
photo_info['sizes'] = photo.getSizes()
return photo_info
|
997,488 | a899e8df0199c4e409010c38b83f65f7e1a786f2 | """
You are given a non-empty list of words.
Write a function that returns the *k* most frequent elements.
The list that you return should be sorted by frequency from highest to lowest.
If two words have the same frequency, then the word with the lower alphabetical
order should come first.
Example 1:
```plaintext
Input:
words = ["lambda", "school", "rules", "lambda", "school", "rocks"]
k = 2
Output:
["lambda", "school"]
Explanation:
"lambda" and "school" are the two most frequent words.
```
Example 2:
```plaintext
Input:
words = ["the", "sky", "is", "cloudy", "the", "the", "the", "cloudy", "is", "is"]
k = 4
Output:
["the", "is", "cloudy", "sky"]
Explanation:
"the", "is", "cloudy", and "sky" are the four most frequent words. The words
are sorted from highest frequency to lowest.
```
Notes:
- `k` is always valid: `1 <= k <= number of unique elements.
- words in the input list only contain lowercase letters.
```
"""
def top_k_frequent(words, k):
"""
Input:
words -> List[str]
k -> int
Output:
List[str]
"""
# Your code here
new_map = {}
# count = 0
for word in words:
if word in new_map:
new_map[word] += 1
else:
new_map[word] = 1
print(new_map)
sorted_items = sorted(
new_map, key=lambda x: (-new_map[x], x)
) # makes it into an array; x is the key in new_map; now we sort by value
print(sorted_items)
return sorted_items[:k]
print(top_k_frequent(["lambda", "school", "rules", "lambda", "school", "rocks"], 2))
print(
top_k_frequent(
["the", "sky", "is", "cloudy", "the", "the", "the", "cloudy", "is", "is"], 4
)
)
# uper
# input array of strings
# friquency number k
# output needs to be sorted by frequency from highest to lowest
# plan:
# find the frequencies
# then can we get k most frequent
# build a frequency table / dictionary
# start with an empty dictionary
# write a for loop anf set the words as keys and the counts of the words as the value
# if word exists count += 1
# else add it to the dictionary
# dict.items()
# list(dict)
# sort by value
# if they are the same value / equal -> sort by key
|
997,489 | ab056435854a00949989a287be6b0719922227d5 | # Generated by Django 3.1.5 on 2021-06-24 02:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('utiles', '0008_cedula'),
]
operations = [
migrations.CreateModel(
name='Ruc',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nro_ruc', models.CharField(blank=True, max_length=500, null=True)),
('fecha_alta', models.CharField(blank=True, default='23/06/2021 22:46:25 hs', max_length=500, null=True)),
],
),
migrations.AlterField(
model_name='cedula',
name='fecha_alta',
field=models.CharField(blank=True, default='23/06/2021 22:46:25 hs', max_length=500, null=True),
),
]
|
997,490 | ff67c5845973edb64bd30487823a1ebbaf7bc27b | from flask import Flask, render_template, request, jsonify
import pyodbc
import redis
from flask import request
import time
import os
from datetime import datetime
import json
from datetime import timedelta
import random
import pandas as pd
import matplotlib.pyplot as plt
#port = int(os.getenv("VCAP_APP_PORT"))
port = int(os.getenv('PORT', 5000))
# redis connection code
r = redis.StrictRedis(host='redisCacheAssignemnt3.redis.cache.windows.net',
port=6380, password='CK6g5axYGYf4e0T08H9vBc6+obM7GtWJfZOPqrGgkBE=', ssl=True)
#print(cnxn)
app = Flask(__name__)
@app.route('/')
def hello_world():
# a = [{'a':'abc'}, {'b':'pqr'}, {'c':'xyz'}]
#a = [{"a":'abc'}, {"b":'pqr'}, {"c":'xyz'}]
#return render_template('test.html', data=json.dumps(a))
return render_template('client_homePage.html')
# method to connect to Db
def sqlConnect():
server = 'mysqlserver09.database.windows.net,1433'
database = 'AKVDB'
username = 'azureuser@mysqlserver09'
password = '12345Ajuvad'
driver = '{ODBC Driver 17 for SQL Server}'
cnxn = pyodbc.connect(
'DRIVER=' + driver + ';SERVER=' + server + ';PORT=1433;DATABASE=' + database + ';UID=' + username + ';PWD=' + password)
return cnxn
#function to check in cache
def readOrLoadfromCache(cache, isCacheOnboolean, cursor, queryString):
if r.get(cache) == None:
dataList=[]
#startTime = time.time()
print(queryString)
cursor.execute(queryString)
data = cursor.fetchall()
print(data)
for row in data:
dataList.append([x for x in row])
print('.........',dataList)
#endTime = time.time()
#execTime = endTime - startTime
#data_string = data
#r.rpush(cache, data)
#r.set(cache, str(data_string)) # redis requires data to be in Convert to a byte, string or number first.
r.set(cache, json.dumps(dataList))
#finalList = data
print("No Cache")
#print("No cache Time : ", execTime)
isCacheOn = 'No cache'
else:
#startTime = time.time()
#data = json.loads(r.get(cache))
data = r.get(cache)
print(data)
#endTime = time.time()
#execTime = endTime - startTime
#print("Cache Time is", execTime)
print("Cache")
#print("Cache Time is", execTime)
isCacheOn = 'In cache'
isCacheOnboolean = True
return data, isCacheOnboolean, isCacheOn
#read from DB
def readfromDb(cache, isCacheOnboolean, cursor, queryString):
dataList = []
print(queryString)
cursor.execute(queryString)
data = cursor.fetchall()
# print(data)
print("Only from DB")
isCacheOn = 'From Db Only'
return data, isCacheOnboolean, isCacheOn
#read from cache :- Make sure that data is in cache otherwise throw error
def readFromCache(cache, isCacheOnboolean, cursor, queryString):
if r.get(cache) == None:
isCacheOn = 'key is not in cache'
data = []
else:
data = (r.get(cache))
# print(data)
print("Cache")
isCacheOn = 'In cache'
isCacheOnboolean = True
return data, isCacheOnboolean, isCacheOn
#delete Cache
def deleteCache(delete_cache_string):
r.delete(delete_cache_string)
return delete_cache_string
def convert_fig_to_html(fig):
from io import BytesIO
figfile = BytesIO()
plt.savefig(figfile, format='png')
figfile.seek(0) # rewind to beginning of file
import base64
# figdata_png = base64.b64encode(figfile.read())
figdata_png = base64.b64encode(figfile.getvalue())
return figdata_png
@app.route("/client_homePage" , methods=['GET'])
def routerFunction():
#delete cache
if request.args.get('delete_cache') == 'delete_cache':
delete_cache_string = request.args.get('symbol_operator')
deleteCache(delete_cache_string)
return "deleted cache "+delete_cache_string
if request.args.get('clear_cache') == 'clear_cache':
r.flushdb()
return "Flush Redis"
if request.args.get('form') == 'Submit' or request.args.get('load_db_form') == 'load_db_form' or request.args.get('load_cache_form') == 'load_cache_form':
isCacheOnboolean = False
isLoadFromDb = False
isLoadFromCache = False
if request.args.get('load_db_form') == 'load_db_form':
print('i am in isLoadFromDb')
isLoadFromDb = True
if request.args.get('load_cache_form') == 'load_cache_form':
isLoadFromCache = True
# db = sqlConnect()
# cursor = db.cursor()
# mag = request.args.get('eathquake_mag')
# oper = request.args.get('symbol_operator')
# count = int(request.args.get('count_from_user'))
# cache = "mycache1"+oper+mag
depth1 = float(request.args.get('depth1'))
depth2 = float(request.args.get('depth2'))
count = int(request.args.get('count_from_user'))
cache = "mycache1"
#randomdepth1 = random.uniform(depth1,depth2)
#randomdepth2 = random.uniform(depth1, depth2)
print('This is cache name for your input', cache)
time_list =[]
randomdepth_list1 = []
randomdepth_list2 = []
count_list = []
# connect to db
db = sqlConnect()
cursor = db.cursor()
#For (count)
startTimecumulative = time.time()
for i in range(count):
startTime = time.time()
randomdepth1 = str(random.uniform(depth1, depth2))
randomdepth2 = str(random.uniform(depth1, depth2))
randomdepth_list1.append(randomdepth1)
randomdepth_list2.append(randomdepth2)
print(i)
queryString = "SELECT COUNT(*) FROM quakequiz3updated2 WHERE depthError >="+randomdepth1+ " AND depthError <="+randomdepth2
if(isLoadFromDb):
data, isCacheOnboolean, isCacheOn = readfromDb(cache, isCacheOnboolean, cursor, queryString)
#db function
elif(isLoadFromCache):
#readFromCache
data, isCacheOnboolean, isCacheOn = readFromCache(cache, isCacheOnboolean, cursor, queryString)
else:
data, isCacheOnboolean, isCacheOn = readOrLoadfromCache(cache, isCacheOnboolean, cursor, queryString)
#print('.......this is i............',i)
endTime = time.time()
execTime = endTime - startTime
time_list.append(execTime)
print('data..........',data)
count_list.append(data)
endTimecumulative = time.time()
execTimecumulative = endTimecumulative - startTimecumulative
db.close()
if (isCacheOnboolean):
return render_template("table.html", data=count_list, timetaken=str(execTimecumulative),
isCacheOn=isCacheOn, randomdepthlist1=randomdepth_list1,
randomdepthlist2=randomdepth_list2, timetaken1=time_list)
# return render_template("cachetable.html", data=json.loads(count_list), timetaken=str(execTime), isCacheOn=isCacheOn, randomdepthlist1 = randomdepth_list1, randomdepthlist2 = randomdepth_list2, timetaken1 = time_list)
else:
return render_template("table.html", data=count_list, timetaken=str(execTimecumulative), isCacheOn=isCacheOn, randomdepthlist1 = randomdepth_list1, randomdepthlist2 = randomdepth_list2, timetaken1 = time_list)
if request.args.get('redis_cache_load') == 'redis_cache_load':
isCacheOnboolean = False
cache = "mycache"
magnitudeVal = request.args.get('mag')
count = int(request.args.get('count_from_user'))
finalList = []
# connect to db
db = sqlConnect()
cursor = db.cursor()
startTime = time.time()
for i in range(count):
queryString = "select * from earthquakeAssignment3"
data, isCacheOnboolean, isCacheOn = readOrLoadfromCache(cache, isCacheOnboolean, cursor, queryString)
endTime = time.time()
execTime = endTime - startTime
db.close()
if (isCacheOnboolean):
return render_template("cachetable.html", data = data, timetaken = str(execTime) , isCacheOn = isCacheOn)
else:
return render_template("table.html", data=data, timetaken=str(execTime), isCacheOn=isCacheOn)
#Within some range
#input latitude , longitude, radius and magintude
if request.args.get('form_find_earthquake_in_range') == 'Submit':
isCacheOnboolean = False
cache = "mycache2"
lat = request.args.get('lat')
lon = request.args.get('lon')
rad = request.args.get('rad')
count = int(request.args.get('count_from_user'))
db = sqlConnect()
cursor = db.cursor()
startTime = time.time()
# generate query string
for i in range(count):
queryString = "SELECT id, latitude, longitude, depth, mag, magType "
queryString = queryString + "FROM earthquakeAssignment3 where "
queryString = queryString + "( 6371 * acos( cos( radians(" + lat + ") )"
queryString = queryString + "* cos( radians( latitude ) )"
queryString = queryString + "* cos( radians(longitude) - radians(" + lon + ")) + sin(radians(" + lat + ")) "
queryString = queryString + "* sin( radians(latitude)))) <= " + rad
data, isCacheOnboolean, isCacheOn = readOrLoadfromCache(cache, isCacheOnboolean, cursor, queryString)
endTime = time.time()
execTime = endTime - startTime
db.close()
if (isCacheOnboolean):
return render_template("cachetable.html", data=data, timetaken=str(execTime), isCacheOn=isCacheOn)
else:
return render_template("table.html", data=data, timetaken=str(execTime), isCacheOn=isCacheOn)
#Search by location
if request.args.get('form_find_earthquake_in_location') == 'Submit':
isCacheOnboolean = False
cache = "mycache3"
location =request.args.get('location')
count = int(request.args.get('count_from_user'))
db = sqlConnect()
cursor = db.cursor()
# generate query string
#'''SELECT * FROM BVC79655.TBEARTHQUAKE WHERE "mag" > ''' + mag1
startTime = time.time()
for i in range(count):
queryString = '''SELECT * FROM earthquakeAssignment3 WHERE "locationSource" = \'''' + location +'''\''''
data, isCacheOnboolean, isCacheOn = readOrLoadfromCache(cache, isCacheOnboolean, cursor, queryString)
endTime = time.time()
execTime = endTime - startTime
db.close()
if (isCacheOnboolean):
return render_template("cachetable.html", data=data, timetaken=str(execTime), isCacheOn=isCacheOn)
else:
return render_template("table.html", data=data, timetaken=str(execTime), isCacheOn=isCacheOn)
#search within time range
if request.args.get('form_find_earthquake_in_timerange') == 'Submit':
isCacheOnboolean = False
cache = "mycache4"
date_time1 = request.args.get('time1')
date_time2 = request.args.get('time2')
count = int(request.args.get('count_from_user'))
f = "%Y-%m-%dT%H:%M"
qot = "\'"
print(date_time1)
print(date_time2)
db = sqlConnect()
cursor = db.cursor()
startTime = time.time()
for i in range(count):
queryString = '''SELECT * FROM earthquakeAssignment3 WHERE "time" BETWEEN ''' + "\'" + str(
datetime.strptime(date_time1, f)) + "\'" ''' AND ''' + "\'" + str(datetime.strptime(date_time2, f)) + "\'"
data, isCacheOnboolean, isCacheOn = readOrLoadfromCache(cache, isCacheOnboolean, cursor, queryString)
endTime = time.time()
execTime = endTime - startTime
db.close()
if (isCacheOnboolean):
return render_template("cachetable.html", data=data, timetaken=str(execTime), isCacheOn=isCacheOn)
else:
return render_template("table.html", data=data, timetaken=str(execTime), isCacheOn=isCacheOn)
if request.args.get('form_find_earthquake_in_magrange') == 'Submit':
isCacheOnboolean = False
cache = "mycache5"
mag1 = request.args.get('mag1')
mag2 = request.args.get('mag2')
count = int(request.args.get('count_from_user'))
db = sqlConnect()
cursor = db.cursor()
startTime =time.time()
for i in range(count):
queryString = '''SELECT * FROM earthquakeAssignment3 WHERE "mag" BETWEEN ''' + mag1 +''' and ''' +mag2
data, isCacheOnboolean, isCacheOn = readOrLoadfromCache(cache, isCacheOnboolean, cursor, queryString)
db.close()
endTime = time.time()
execTime = endTime - startTime
if (isCacheOnboolean):
return render_template("cachetable.html", data=data, timetaken=str(execTime), isCacheOn=isCacheOn)
else:
return render_template("table.html", data=data, timetaken=str(execTime), isCacheOn=isCacheOn)
#Assignment 4
if request.args.get('Assignment_4') == "Assignment_4":
#depth1 = float(request.args.get('depth1'))
#depth2 = float(request.args.get('depth2'))
mag = int(request.args.get('mag'))
count = int(request.args.get('count_from_user'))
lstDictionaryDataDisplay = []
magStart = 0
magEnd = magStart+mag
while magEnd<=count:
startTime = time.time()
# randomdepth1 = str(random.uniform(depth1, depth2))
# randomdepth2 = str(random.uniform(depth1, depth2))
queryString = "SELECT COUNT(*) AS counts FROM quakequiz3updated2 WHERE mag BETWEEN " + str(magStart) + " AND " + str(magEnd)
#queryString = "SELECT COUNT(*) AS counts FROM quakequiz3updated2 WHERE depthError >=" + randomdepth1 + " AND depthError <=" + randomdepth2
db = sqlConnect()
cursor = db.cursor()
cursor.execute(queryString)
quakeCount = cursor.fetchone()
print('.........', quakeCount)
#print('.............',depthCount)
print(str(mag))
#lstDictionaryDataDisplay.append({"depthError": depthCount[0]})
lstDictionaryDataDisplay.append({"# People": quakeCount[0], "Age Range": str(magStart) + " to " + str(magEnd)})
print(lstDictionaryDataDisplay)
magStart = magEnd
magEnd = magEnd + mag
db.close()
#return "Hello"
return render_template('assignment4.html', tableDatal=lstDictionaryDataDisplay)
#return render_template('barchartforloop.html', result=json.dumps(lstDictionaryDataDisplay))
if request.args.get('Assignment_4') == "Assignment_4_bar":
mag = request.args.get('mag')
lstDictionaryDataDisplay = []
# randomdepth1 = str(random.uniform(depth1, depth2))
# randomdepth2 = str(random.uniform(depth1, depth2))
queryString = "SELECT COUNT(*) AS counts, mag FROM quakequiz3updated2 GROUP BY mag"
#queryString = "SELECT COUNT(*) AS counts FROM quakequiz3updated2 WHERE depthError >=" + randomdepth1 + " AND depthError <=" + randomdepth2
db = sqlConnect()
cursor = db.cursor()
cursor.execute(queryString)
quakeCount = cursor.fetchall()
print('.......',quakeCount)
#print('.........', quakeCount)
#print('.............',depthCount)
#print(str(mag))
#lstDictionaryDataDisplay.append({"depthError": depthCount[0]})
#lstDictionaryDataDisplay.append({"# People": quakeCount[0], "Age Range": str(magStart) + " to " + str(magEnd)})
#print(lstDictionaryDataDisplay)
db.close()
return render_template('barchart.html', result=quakeCount)
if request.args.get('Assignment_4') == "Assignment_4_pie":
mag = request.args.get('mag')
lstDictionaryDataDisplay = []
# randomdepth1 = str(random.uniform(depth1, depth2))
# randomdepth2 = str(random.uniform(depth1, depth2))
queryString = "SELECT COUNT(*) AS counts, mag FROM quakequiz3updated2 GROUP BY mag"
#queryString = "SELECT COUNT(*) AS counts FROM quakequiz3updated2 WHERE depthError >=" + randomdepth1 + " AND depthError <=" + randomdepth2
db = sqlConnect()
cursor = db.cursor()
cursor.execute(queryString)
quakeCount = cursor.fetchall()
print('.......', quakeCount)
#print('.........', quakeCount)
#print('.............',depthCount)
#print(str(mag))
#lstDictionaryDataDisplay.append({"depthError": depthCount[0]})
#lstDictionaryDataDisplay.append({"# People": quakeCount[0], "Age Range": str(magStart) + " to " + str(magEnd)})
#print(lstDictionaryDataDisplay)
db.close()
return render_template('piechart.html', result=quakeCount)
if request.args.get('Assignment_4') == "Assignment_4_pie_depth":
depth1 = float(request.args.get('depth1'))
depth2 = float(request.args.get('depth2'))
count = int(request.args.get('count'))
final_result = []
for i in range(count):
randomdepth1 = str(random.uniform(depth1, depth2))
randomdepth2 = str(random.uniform(depth1, depth2))
queryString = "SELECT"+randomdepth1+"-"+randomdepth2+" COUNT(*) AS counts FROM quakequiz3updated2 WHERE depth BETWEEN "+randomdepth1 +" AND "+randomdepth2
db = sqlConnect()
cursor = db.cursor()
cursor.execute(queryString)
result = cursor.fetchall()
final_result.append(result)
print('.......', final_result)
result = final_result
db.close()
return render_template('piechartdepth.html', result = result)
if request.args.get('Assignment_4') == "Asssignment_4_bar_graph-1":
queryString = "select t.range as magnitudes, count(*) as occurences from ( select case when mag >= 0 and mag < 1 then 0 when mag >= 1 and mag < 2 then 1 when mag >= 2 and mag < 3 then 2 when mag >= 3 and mag < 4 then 3 when mag >= 4 and mag < 5 then 4 when mag >= 5 and mag < 6 then 5 when mag >= 6 and mag < 7 then 6 when mag >= 7 and mag < 8 then 7 when mag >= 8 and mag < 9 then 8 when mag >= 9 and mag < 10 then 9 else -1 end as range from quakequiz3updated2) t group by t.range order by magnitudes;"
db = sqlConnect()
cursor = db.cursor()
cursor.execute(queryString)
result = cursor.fetchall()
print('..........',result)
db.close()
return render_template('barchart.html', result = result)
if request.args.get('Assignment_4') == "Asssignment_4_pie_graph-1":
queryString = "select t.range as magnitudes, count(*) as occurences from ( select case when mag >= 0 and mag < 1 then 0 when mag >= 1 and mag < 2 then 1 when mag >= 2 and mag < 3 then 2 when mag >= 3 and mag < 4 then 3 when mag >= 4 and mag < 5 then 4 when mag >= 5 and mag < 6 then 5 when mag >= 6 and mag < 7 then 6 when mag >= 7 and mag < 8 then 7 when mag >= 8 and mag < 9 then 8 when mag >= 9 and mag < 10 then 9 else -1 end as range from quakequiz3updated2) t group by t.range order by magnitudes;"
db = sqlConnect()
cursor = db.cursor()
cursor.execute(queryString)
result = cursor.fetchall()
print('..........',result)
db.close()
return render_template('piechart.html', result = result)
if request.args.get('Quiz4_Q1') == "Quiz4_Q1":
pop1 = request.args.get('pop1')
pop2 = request.args.get('pop2')
queryString = '''SELECT StateName FROM TBquizupdated4 WHERE TotalPop BETWEEN ''' + pop1 + ''' and ''' + pop2
db = sqlConnect()
cursor = db.cursor()
cursor.execute(queryString)
result = cursor.fetchall()
print('result', result)
db.close()
return render_template('custom_table.html', data = result)
if request.args.get('Quiz4_Q2') == "Quiz4_Q2":
mlist = []
pop1 = int(request.args.get('pop1'))
pop2 = int(request.args.get('pop2'))
interval=int(request.args.get('intv'))
for i in range(pop1,pop2,interval):
l1 = []
l = []
query = "SELECT TotalPop FROM TBquizupdated4 where TotalPop >"+str(i)+" and TotalPop<="+str(i+interval)+""
print(query)
db = sqlConnect()
cursor = db.cursor()
cursor.execute(query)
rows = cursor.fetchone()
l = str(i) + "--" + str(i + interval)
l1.append(l)
print(l1)
l1.append(rows[0])
print(l1)
mlist.append(l1)
y = pd.DataFrame(mlist)
print(y[1])
fig = plt.figure()
plt.pie(y[1], autopct='%1.1f%%', labels=y[0])
plt.legend()
plot = convert_fig_to_html(fig)
return render_template('quest6-1.html', data=plot.decode('utf8'))
if request.args.get('Quiz4_Q8') == "Quiz4_Q8":
num = request.args.get('num')
return render_template('quest8.html', result=num)
|
997,491 | 4c1d8e7e46e0a19ff5e2801cf00ffd792451c622 | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 26 11:56:17 2016
@author: calum
"""
import numpy as np
import kcorrect
import math
array = np.load('/home/calum/Documents/MPhysProj/mgs_sample/mgs_kcorrect_array.npy')
# set things up
#kcorrect.load_templates()
#kcorrect.load_filters()
#Note that the conversion to the inverse variances from the maggies and the magnitude errors is (0.4 ln(10) × maggies × magerr)-2
# maggies are simply related to magnitudes by 10−0.4m
#−2.5 log10(maggies/maggies.z0)
for row in array[:1]:
maggies = np.array(pow(10,-0.4*row[2:7]))
invar = pow(0.4*math.log(10)*maggies*np.array(row[7:]),-2)
k_tuple = np.concatenate(([row[1]],maggies,invar))
kcorrect_tuple = kcorrect.fit_coeffs(k_tuple)
k_coeff = kcorrect.reconstruct_maggies(kcorrect_tuple)
final_input = maggies/np.array(k_coeff[1:])
kcorrection_array = [-2.5*math.log(item,10) for item in final_input]
|
997,492 | 2188e7cc63f366e8c9beaaa7e48625a1dbe94936 | __author__ = 'loic'
from sklearn.linear_model import LogisticRegression
from sklearn import preprocessing
class machine(object):
def __init__(self):
self.scaler = None
self.model = None
def train(self, X, Y):
"""
:param X: training sample X
:param Y: training sample classes
:return:
"""
raise NotImplementedError()
def use(self, X):
"""
:param X: unknown samples
:return: predicted classes
"""
Xnorm = self.scaler.transform(X)
y_prediction = self.model.predict(Xnorm)
return y_prediction
def _train_scaler(self, X):
self.scaler = preprocessing.StandardScaler().fit(X)
class logistic(machine):
def train(self, X, Y):
self._train_scaler(X)
Xnorm = self.scaler.transform(X)
self.model = LogisticRegression(C=1.)
self.model.fit(Xnorm, Y)
class lda(machine):
def train(self, X, Y):
self._train_scaler(X)
Xnorm = self.scaler.transform(X)
self.model = LDA(C=1.)
self.model.fit(Xnorm, Y)
|
997,493 | ff7b8d8bcf5e7ae8843e5cc646d9e6c8af1a079d | import pandas as pd
import numpy as np
from numpy.random import randn
np.random.seed(100)
data = randn(5,4) # 5행 4열
print(data)
df = pd.DataFrame(data, index='A B C D E'.split(), columns='가 나 다 라'.split())
print(df)
data2 = [[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16],[17,18,19,20]] # 5,4
df2 = pd.DataFrame(data2, index=['A','B','C','D','E'], columns=['가','나','다','라'])
print(df2)
# 가 나 다 라
# A 1 2 3 4
# B 5 6 7 8
# C 9 10 11 12
# D 13 14 15 16
# E 17 18 19 20
dff = pd.DataFrame(data2)
print(dff)
# 0 1 2 3
# 0 1 2 3 4
# 1 5 6 7 8
# 2 9 10 11 12
# 3 13 14 15 16
# 4 17 18 19 20
print("dff[0] : \n",dff[0])
# 0 1
# 1 5
# 2 9
# 3 13
# 4 17
df3 = pd.DataFrame(np.array([[1,2,3],[1,2,3]]))
print(df3)
# 컬럼
print("df2['나'] : \n",df2['나'])
# A 2
# B 6
# C 10
# D 14
# E 18
# Name: 나, dtype: int64
print("df2[['나','라']] : \n",df2[['나','라']])
# 나 라
# A 2 4
# B 6 8
# C 10 12
# D 14 16
# E 18 20
# print("df2[0] : \n",df2[0]) # error 컬럼명으로 해주어야함
# ROW
# print("df2.loc['나'] : \n",df2.loc['나']) # error, location은 행일때 사용한다.
print("df2.iloc['A'] : \n",df2.loc['A']) # loc : location
# 가 1
# 나 2
# 다 3
# 라 4
print("df2.iloc[:,1] : \n",df2.iloc[:,1]) # iloc : index location
# A 2
# B 6
# C 10
# D 14
# E 18
print("df2.iloc[['A','C']] : \n",df2.loc[['A','C']]) # loc : location
# 가 나 다 라
# A 1 2 3 4
# C 9 10 11 12
# print("df2.iloc[0] : \n",df2.loc[0]) # error loc
print("df2.iloc[[0,2]] : \n",df2.iloc[[0,2]]) # iloc : index location
# 가 나 다 라
# A 1 2 3 4
# C 9 10 11 12
# 행렬
print("df2.loc[['A','B'],['나']['다']] : \n",df2.loc[['A','B'],['나','다']])
# 나 다
# A 2 3
# B 6 7
# 한개의 값만 확인
print("df2.loc['E','다'] : \n",df2.loc['E','다']) # 19
print("df2.iloc[4,2] : \n",df2.iloc[4,2]) # 19
print("df2.iloc[4][2] : \n",df2.iloc[4][2]) # 19
|
997,494 | fb7e36210bd6ea197b90fec898cccc655972b13a | #!/usr/bin/env python
import json
import os
def main():
python_interp = os.popen('which python').read().rstrip()
java_home = os.popen('/usr/libexec/java_home').read().rstrip()
google_ime = os.popen('which gskkserv').read().rstrip()
print(json.dumps({
'localhost': {
'hosts': [
'127.0.0.1'
]
},
'_meta': {
'hostvars': {
'127.0.0.1': {
'ansible_connection': 'local',
'ansible_python_interpreter': python_interp,
'ansible_java_path': '%s/bin/java' % java_home,
'ansible_google_ime_path': google_ime,
'ansible_user_dir': os.environ['HOME'],
'ansible_user_id': os.environ['USER'],
'ansible_repos_dir': '%s/repos' % os.environ['HOME']
}
}
}
}))
if __name__ == '__main__':
main()
|
997,495 | 34c2245689b7772332747e62c28deddc9a6c992a | """
A set of tools which allow specifying a model consisting of sheets
organized in levels, and projections connecting these sheets. The
sheets have an attribute matchconditions allowing to specify which
other (incoming) sheets a sheet should connect to.
Instances of the LabelDecorator decorator are offered for setting
parameters/matchconditions for a sheet within a level, as well as
parameters for projections.
"""
import itertools
from collections import defaultdict
import param
import lancet
import numbergen
from holoviews.core.tree import AttrTree
import topo.sheet, topo.projection
from specifications import SheetSpec, ProjectionSpec, ModelSpec, ArraySpec # pyflakes:ignore (API import)
from topo.misc.commandline import global_params
def select(index, *decorators):
"""
A meta-decorator that applies one-of-N possible decorators based
on the index. The index may be a boolean value for selecting
between two options.
"""
def wrapped(*args, **kwargs):
return decorators[int(index)](*args, **kwargs)
return wrapped
def order_projections(model, connection_order):
"""
Helper function for reproducing random streams when
time_dependent=False (where the order of projection instantiation
matters). This function should only be used for legacy reasons and
should not be used with new models.
The first argument is an instance of Model (with projections
configured) and the second is the projection ordering, specified
by the (decorated) method names generating those projections.
This function allows sorting on a single source sheet property. To
specify such an ordering, use a tuple where the first element is
the relevant method name and the second element is a source sheet
property dictionary to match. For instance, specifying the
connection_order list as:
[('V1_afferent_projections', {'polarity':'On'}),
('V1_afferent_projections', {'polarity':'Off'})]
will order the 'On' projections generated by the
V1_afferent_projections method before the 'Off' projections.
"""
connection_list = [el if isinstance(el, tuple) else (el, None)
for el in connection_order]
for spec in model.projections:
matches = [(i, el) for i, el in enumerate(connection_list)
if el[0] == spec.matchname]
if len(matches) == 0:
raise Exception("Could not order projection %r" % spec)
elif len(matches) == 1:
(i, (k, v)) = matches[0]
spec.sort_precedence = i
continue
property_keys = [pdict.keys() for (_, (_, pdict)) in matches]
if not all(len(pkeys)==1 for pkeys in property_keys):
raise Exception("Please specify only a single property to sort on")
if not all(pkey[0]==property_keys[0][0] for pkey in property_keys):
raise Exception("Please specify only a single property to sort on")
key = property_keys[0][0]
spec_property_value = spec.src.properties[key]
match = [ind for (ind, (_, pdict)) in matches if pdict[key] == spec_property_value]
if len(match) != 1:
raise Exception("Could not order projection %r by property %r" % (spec, key))
spec.sort_precedence = match[0]
class MatchConditions(object):
"""
Decorator class for matchconditions.
"""
def __call__(self, level, method_name):
def decorator(f):
f._matchcondition_level = level
f._target_method_name = method_name
return f
return decorator
def __repr__(self):
return "MatchConditions()"
class ComponentDecorator(object):
"""
Decorator class that can be instantiated with a component type to
create a decorator used to associate methods with the
corresponding component.
This class works by setting a '_component_type' attribute on the
decorated method. Methods that have been annotated in this way may
be tracked in classes decorated with the ComponentRegistry class
decorator.
"""
def __init__(self, name, object_type):
self.name = name
self.type = object_type
# Enable IPython tab completion in the settings method
kwarg_string = ", ".join("%s=%s" % (name, type(p.default))
for (name, p) in object_type.params().items())
self.params.__func__.__doc__ = 'params(%s)' % kwarg_string
def params(self, **kwargs):
"""
A convenient way of generating parameter dictionaries with
tab-completion in IPython.
"""
return kwargs
def __call__(self, f):
f._component_type = self.type
return f
def __repr__(self):
return "ComponentDecorator(%s, %s)" % (self.name, self.type.name)
class ComponentRegistry(object):
"""
An instance of this class is to be used as class decorator. Any
decorated class using ClassDecorators on their methods will be
registered with their corresponding component types.
"""
def __init__(self):
self.method_registry = defaultdict(dict)
self.type_registry = defaultdict(dict)
self.matchconditions = []
def compute_conditions(self, level, model, properties):
"""
Collect the matchcondition dictionary for a particular level
given a certain Model instance and sheet properties. If no
matchconditions are available, an empty dictionary is
returned.
Respects the appropriate method resolution order (mro) of the
given model instance. """
mro = [el.__name__ for el in model.__class__.mro()]
filtered = [(target, fn) for (cls, lvl, target, fn) in self.matchconditions
if (cls in mro) and lvl == level]
return dict((target, fn(model, properties)) for (target, fn) in filtered)
def lookup(self, cls, name, mode):
"""
Given a class, a method name and a mode, return either the
component type (if mode='type) or the appropriate method (if
mode='method').
"""
mro = [el.__name__ for el in cls.mro()]
registry = self.method_registry if mode=='method' else self.type_registry
for class_name in mro:
entries = registry[class_name]
if name in entries:
return entries[name]
raise KeyError("Could not find method named %r."
" Please ensure classes using component decorators"
" are decorated with the Model.definition"
" class decorator." % name)
def __call__(self, cls):
for name, method in cls.__dict__.iteritems():
class_name = cls.__name__
component_type = getattr(method, "_component_type", False)
if component_type:
method_name = method.__name__
self.method_registry[class_name].update({method_name:method})
self.type_registry[class_name].update({method_name:component_type})
match_level = getattr(method, '_matchcondition_level', False)
target_method_name = getattr(method, '_target_method_name', False)
if match_level and target_method_name:
info = (class_name, match_level, target_method_name, method)
self.matchconditions.append(info)
return cls
# A singleton to be used by model class using decorators
definition = ComponentRegistry()
class Model(param.Parameterized):
"""
The available setup options are:
:'training_patterns': fills the training_patterns AttrTree
with pattern generator instances. The path is the name of the
input sheet. Usually calls PatternCoordinator to do this.
:'sheets_setup': determines the number of sheets, their types
and names sets sheet parameters according to the registered
methods in level sets sheet matchconditions according to the
registered methods in matchconditions
:'projections': determines which connections should be present
between the sheets according to the matchconditions of
SheetSpec objects, using connect to specify the
connection type and sets their parameters according to the
registered methods in connect
The available instantiate options are:
:'sheets': instantiates all sheets and registers them in
topo.sim
:'projections': instantiates all projections and registers
them in topo.sim
"""
random_seed = param.Integer(default=None, doc="""
Overrides the default global seed on param.random_seed when not None.""")
__abstract = True
# A convenient handle on the definition class decorator
definition = definition
matchconditions = MatchConditions()
sheet_decorators = set()
projection_decorators = set()
@classmethod
def register_decorator(cls, object_type):
name = object_type.name
decorator = ComponentDecorator(name, object_type)
setattr(cls, name, decorator)
if issubclass(object_type, topo.sheet.Sheet):
cls.sheet_decorators.add(decorator)
if issubclass(object_type, topo.projection.Projection):
cls.projection_decorators.add(decorator)
@property
def modified_parameters(self):
"Dictionary of modified model parameters"
return {k:v for k,v in self.get_param_values(onlychanged=True)}
def __init__(self, register=True, time_dependent=True, **params):
numbergen.TimeAware.time_dependent = time_dependent
if register:
self._register_global_params(params)
super(Model,self).__init__(**params)
self.specification = None
self.properties = {}
# Training patterns need to be accessed by GeneratorSheets
self.properties['training_patterns'] = AttrTree()
def _register_global_params(self, params):
"""
Register the parameters of this object as global parameters
available for users to set from the command line. Values
supplied as global parameters will override those of the given
dictionary of params.
"""
for name,obj in self.params().items():
global_params.add(**{name:obj})
for name,val in params.items():
global_params.params(name).default=val
params.update(global_params.get_param_values())
params["name"]=self.name
#==============================================#
# Public methods to be implemented by modelers #
#==============================================#
def property_setup(self, properties):
"""
Method to precompute any useful properties from the class
parameters. For instance, if there is a ``num_lags``
parameter, this method could compute the actual projection
delays and store it as properties['lags']. The return value is
the updated 'properties' dictionary.
In addition, this method can be used to configure class
attributes of the model components.
"""
return properties
def training_pattern_setup(self, **overrides):
"""
Returns a dictionary of PatternGenerators to be added to
self.training_patterns, with the target sheet name keys and
pattern generator values.
The overrides keywords can be used by a subclass to
parameterize the training patterns e.g. override the default
parameters of a PatternCoordinator object.
"""
raise NotImplementedError
def sheet_setup(self):
"""
Returns a dictionary of properties or equivalent Lancet.Args
object. Each outer key must be the level name and the values
are lists of property dictionaries for the sheets at that
level (or equivalent Lancet Args object). For instance, two
LGN sheets at the 'LGN' level could be defined by either:
{'LGN':[{'polarity':'ON'}, {'polarity':'OFF'}]}
OR
{'LGN':lancet.List('polarity', ['ON', 'OFF'])}
The specified properties are used to initialize the sheets
AttrTree with SheetSpec objects.
"""
raise NotImplementedError
def analysis_setup(self):
"""
Set up appropriate defaults for analysis functions in
topo.analysis.featureresponses.
"""
pass
#====================================================#
# Remaining methods should not need to be overridden #
#====================================================#
def setup(self,setup_options=True):
"""
This method can be used to setup certain parts of the
submodel. If setup_options=True, all setup methods are
called. setup_options can also be a list, whereas all list
items of available_setup_options are accepted.
Available setup options are:
'training_patterns','sheets','projections' and 'analysis'.
This method returns a ModelSpec object which is also set as
the value of the 'specification' attribute.
Please consult the docstring of the Model class for more
information about each setup option.
"""
available_setup_options = ['attributes',
'training_patterns',
'sheets',
'projections',
'analysis']
if self.random_seed is not None:
param.random_seed = self.random_seed
if setup_options==True:
setup_options = available_setup_options
if 'attributes' in setup_options:
self.properties = self.property_setup({})
model = ModelSpec(self, self.properties)
if 'training_patterns' in setup_options:
training_patterns = self.training_pattern_setup()
for name, training_pattern in training_patterns.items():
model.training_patterns.set_path(name, training_pattern)
self.properties['training_patterns'] = training_patterns
if 'sheets' in setup_options:
sheet_properties = self.sheet_setup()
enumeration = enumerate(sheet_properties.items())
for (ordering, (level, property_list)) in enumeration:
sheet_type = self.definition.lookup(self.__class__, level, mode='type')
if isinstance(property_list, lancet.Identity):
property_list = [{}]
elif isinstance(property_list, lancet.Args):
property_list = property_list.specs
# If an empty list or Args()
elif not property_list:
continue
for properties in property_list:
spec_properties = dict(level=level, **properties)
sheet_spec = SheetSpec(sheet_type, spec_properties)
sheet_spec.sort_precedence = ordering
model.sheets.set_path(str(sheet_spec), sheet_spec)
model = self._update_sheet_spec_parameters(model)
if 'projections' in setup_options:
model = self._compute_projection_specs(model)
if 'analysis' in setup_options:
self.analysis_setup()
self.specification = model
return model
def _update_sheet_spec_parameters(self, model):
for sheet_spec in model.sheets.data.values():
param_method = self.definition.lookup(self.__class__, sheet_spec.level, 'method')
if not param_method:
raise Exception("Parameters for sheet level %r not specified" % sheet_spec.level)
updated_params = param_method(self, sheet_spec.properties)
sheet_spec.update(**updated_params)
return model
def _matchcondition_holds(self, matchconditions, src_sheet):
"""
Given a dictionary of properties to match and a target sheet
spec, return True if the matchcondition holds else False.
"""
matches=True
if matchconditions is None:
return False
for incoming_key, incoming_value in matchconditions.items():
if (incoming_key in src_sheet.properties and \
str(src_sheet.properties[incoming_key]) != str(incoming_value)) \
or (incoming_key not in src_sheet.properties and incoming_value is not None):
matches=False
break
return matches
def _compute_projection_specs(self, model):
"""
Loop through all possible combinations of SheetSpec objects in
self.sheets If the src_sheet fulfills all criteria specified
in dest_sheet.matchconditions, create a new ProjectionSpec
object and add this item to self.projections.
"""
sheetspec_product = itertools.product(model.sheets.data.values(),
model.sheets.data.values())
for src_sheet, dest_sheet in sheetspec_product:
conditions = self.definition.compute_conditions(dest_sheet.level, self,
dest_sheet.properties)
for matchname, matchconditions in conditions.items():
if self._matchcondition_holds(matchconditions, src_sheet):
paramfn = self.definition.lookup(self.__class__, matchname, 'method')
projtype = self.definition.lookup(self.__class__, matchname, 'type')
proj = ProjectionSpec(projtype, src_sheet, dest_sheet)
paramsets = paramfn(self, src_sheet.properties, dest_sheet.properties)
paramsets = [paramsets] if isinstance(paramsets, dict) else paramsets
for paramset in paramsets:
proj = ProjectionSpec(projtype, src_sheet, dest_sheet)
proj.update(**paramset)
# Only used when time_dependent=False
# (which is to be deprecated)
proj.matchname = matchname
path = (str(dest_sheet), paramset['name'])
model.projections.set_path(path, proj)
return model
def __call__(self,setup_options=True, instantiate_options=True, verbose=False):
"""
A convenient way to setup a model object, instantiate it and
return it.
"""
model = self.setup(setup_options)
model(instantiate_options, verbose)
return model
def __getitem__(self, key):
"Convenient property access."
return self.properties[key]
def __setitem__(self, key, val):
raise NotImplementedError("Models must define properties via the property_setup method")
def keys(self):
"The list of available property keys."
return self.properties.keys()
def items(self):
"The property items."
return self.properties.items()
# Register the sheets and projections available in Topographica
def register_submodel_decorators(classes,superclass=None):
"""
Register a Model decorator for each of the non-abstract classes provided.
Only registers those that are subclasses of the given superclass, if specified.
"""
with param.logging_level('CRITICAL'):
for c in classes:
if (isinstance(c, type) and
(superclass==None or issubclass(c, superclass)) and
(not hasattr(c, "_%s__abstract" % c.name))):
Model.register_decorator(c)
register_submodel_decorators(param.concrete_descendents(topo.sheet.Sheet).values())
register_submodel_decorators(param.concrete_descendents(topo.projection.Projection).values())
|
997,496 | 0ce1015c1c11924b58a06c69cc7c59e31ecfe116 | survey_obs = {
"resourceType": "Observation",
"id": "id",
"meta": {
"extension": [
{
"url": "http://hl7.org/fhir/StructureDefinition/instance-name",
"valueString": "Prapare Example [loinc]"
},
{
"url": "http://hl7.org/fhir/StructureDefinition/instance-description",
"valueMarkdown": "This is a Prapare Multiselect Example for the *US Core Screening Response Observation Profile*."
}
],
"profile": [
"http://hl7.org/fhir/us/core/StructureDefinition/us-core-observation-screening-assessment"
]
},
"status": "final",
"category": [
{
"coding": [
{
"system": "http://terminology.hl7.org/CodeSystem/observation-category",
"code": "survey",
"display": "Survey"
}
]
}
],
"code": {
"coding": [
{
"system": "http://loinc.org",
"code": "code",
"display": "display]"
}
]
},
"subject": {
"reference": "Patient/example"
},
"effectiveDateTime": "2022-03-28T18:30:40-07:00",
"performer": [
{
"reference": "Patient/example"
}
],
"hasMember": [
{
"reference": "reference",
"display": "display"
}
],
"derivedFrom": [
{
"reference": "reference",
"display": "display"
}
]
}
docref = {
"resourceType" : "DocumentReference",
"id" : "docref",
"identifier" : [{
"system" : "urn:ietf:rfc:3986",
"value" : "urn:oid:2.16.840.1.113883.19.5.99999.1"
}],
"status" : "current",
"type" : {
"coding" : [{
"system" : "http://loinc.org",
"code" : "34133-9",
"display" : "Summary of episode note"
}],
"text" : "CCD Document"
},
"category" : [{
"coding" : [{
"system" : "http://hl7.org/fhir/us/core/CodeSystem/us-core-documentreference-category",
"code" : "clinical-note",
"display" : "Clinical Note"
}],
"text" : "Clinical Note"
}],
"subject": {
"reference": "Patient/example"
},
"created": "2022-01-01T12:00:00Z",
"indexed": "2022-01-01T12:00:00Z",
"author": [
{
"reference": "Practitioner/example"
}
],
"content": [
{
"attachment": {
"contentType": "application/pdf",
"data": "JVBERi0xLjQKJ...",
"title": "PatientHealthQuestionnaire-2_v1.0_2014Jul2"
}
}
]
} |
997,497 | 94c15b4c05bc1667ef75dd35bde697c1cd6c99d8 | import sqlalchemy
# The Postgres and SQLite driver has different syntax for parameterized SQL. SQLAlchemy handles that with the text()
# wrapper and gives us a driver neutral way of creating a new connection.
from sqlalchemy.sql.expression import text
import argparse
parser = argparse.ArgumentParser(description='Copy the geocode cache data from one database to another.')
parser.add_argument('fromdb')
parser.add_argument('todb')
args = parser.parse_args()
import_into = sqlalchemy.create_engine(args.todb).connect()
import_from = sqlalchemy.create_engine(args.fromdb).connect()
print 'existing records:', import_into.execute('select count(*) from Geocode').fetchone()
print 'importing records:', import_from.execute('select count(*) from Geocode').fetchone()
updated = 0
count = 0
inserted = 0
trans = import_into.begin()
for row in import_from.execute('select query, lat, lng from Geocode'):
count += 1
query, lat, lng = row
if len(query) > 255:
query = query[-255:]
try:
existing = import_into.execute(text('select lat, lng from Geocode where query=:query'), query=query).fetchone()
if existing:
# Only overwrite None values
if existing[0] is None and not lat is None:
updated += 1
import_into.execute(text('update Geocode set lat=:lat, lng=:lng where query=:query'),
lat=lat, lng=lng, query=query)
else:
import_into.execute(
text('insert into Geocode (query, lat, lng, source, json) values (:query, :lat, :lng, :source, :json)'),
query=query, lat=lat, lng=lng, source=None, json=None)
inserted += 1
except sqlalchemy.exc.DataError:
print "Bad Unicode: ", query
trans.commit()
trans = import_into.begin()
if count % 1000 == 0:
print count, updated, inserted
trans.commit()
trans = import_into.begin()
trans.commit()
print 'final records:', import_into.execute('select count(*) from Geocode').fetchone()
|
997,498 | b871f966b363fa825b376068cdb819c7debdd7bf | import os
import unittest
from carbon.hashing import ConsistentHashRing
class HashIntegrityTest(unittest.TestCase):
def test_2_node_positional_itegrity(self):
"""Make a cluster, verify we don't have positional collisions"""
ring = ConsistentHashRing([])
for n in range(2):
ring.add_node(("192.168.10.%s" % str(10+n),"%s" % str(10+n)))
self.assertEqual(
len([n[0] for n in ring.ring]),
len(set([n[0] for n in ring.ring])))
def test_3_node_positional_itegrity(self):
"""Make a cluster, verify we don't have positional collisions"""
ring = ConsistentHashRing([])
for n in range(3):
ring.add_node(("192.168.10.%s" % str(10+n),"%s" % str(10+n)))
self.assertEqual(
len([n[0] for n in ring.ring]),
len(set([n[0] for n in ring.ring])))
def test_4_node_positional_itegrity(self):
"""Make a cluster, verify we don't have positional collisions"""
ring = ConsistentHashRing([])
for n in range(4):
ring.add_node(("192.168.10.%s" % str(10+n),"%s" % str(10+n)))
self.assertEqual(
len([n[0] for n in ring.ring]),
len(set([n[0] for n in ring.ring])))
def test_5_node_positional_itegrity(self):
"""Make a cluster, verify we don't have positional collisions"""
ring = ConsistentHashRing([])
for n in range(5):
ring.add_node(("192.168.10.%s" % str(10+n),"%s" % str(10+n)))
self.assertEqual(
len([n[0] for n in ring.ring]),
len(set([n[0] for n in ring.ring])))
def test_6_node_positional_itegrity(self):
"""Make a cluster, verify we don't have positional collisions"""
ring = ConsistentHashRing([])
for n in range(6):
ring.add_node(("192.168.10.%s" % str(10+n),"%s" % str(10+n)))
self.assertEqual(
len([n[0] for n in ring.ring]),
len(set([n[0] for n in ring.ring])))
def test_7_node_positional_itegrity(self):
"""Make a cluster, verify we don't have positional collisions"""
ring = ConsistentHashRing([])
for n in range(7):
ring.add_node(("192.168.10.%s" % str(10+n),"%s" % str(10+n)))
self.assertEqual(
len([n[0] for n in ring.ring]),
len(set([n[0] for n in ring.ring])))
def test_8_node_positional_itegrity(self):
"""Make a cluster, verify we don't have positional collisions"""
ring = ConsistentHashRing([])
for n in range(8):
ring.add_node(("192.168.10.%s" % str(10+n),"%s" % str(10+n)))
self.assertEqual(
len([n[0] for n in ring.ring]),
len(set([n[0] for n in ring.ring])))
def test_9_node_positional_itegrity(self):
"""Make a cluster, verify we don't have positional collisions"""
ring = ConsistentHashRing([])
for n in range(9):
ring.add_node(("192.168.10.%s" % str(10+n),"%s" % str(10+n)))
self.assertEqual(
len([n[0] for n in ring.ring]),
len(set([n[0] for n in ring.ring])))
|
997,499 | 12cadc1e66718b5790fdbd342b9b11ae12210590 | import dataset
import xlrd
import math
import numpy as np
import matplotlib.pyplot as plt
# BOARDNAME = '04A3E2BD29' # name of the board
# CHEMICAL = 'NO2' # chemical (either IRR, IAQ, SO2, H2S, OZO, NO2, or CMO)
# INSTANTTEMP = 26.0 # the temperature at the moment of measurement
# INSTANTCURRENT = 2492 # the current at the moment of measurement
ZEROTEMP = 25.0 # initial temperature
directory = "C:/Users/Caeley/Documents/Argonne/Code/Chemsense_Spec_Data.xlsx" # directory of the sensor data
# creates a dataset and opens the excel sensor data
db = dataset.connect()
table = db.create_table('stats')
xl = xlrd.open_workbook(directory, "rb")
sheet = xl.sheet_by_name('Sheet1')
# inserts every column of the excel workbook into the dataset
# column names: board, IRR_baseline,IAQ_baseline, SO2_baseline, H2S_baseline,
# OZO_baseline, NO2_baseline, CMO_baseline, IRR_M, IAQ_M, SO2_M,
# H2S_M, OZO_M, NO2_M,CMO_M, IRR_Sensitivity, IAQ_Sensitivity,
# SO2_Sensitivity, H2S_Sensitivity, OZO_Sensitivity,
# NO2_Sensitivity, and CMO_Sensitivity
for rownum in range(sheet.nrows):
rowValues = sheet.row_values(rownum)
table.insert(dict(board = rowValues[0], IRR_baseline = rowValues[1],
IAQ_baseline = rowValues[2], SO2_baseline = rowValues[3],
H2S_baseline = rowValues[4], OZO_baseline = rowValues[5],
NO2_baseline = rowValues[6], CMO_baseline = rowValues[7],
IRR_M = rowValues[8], IAQ_M = rowValues[9], SO2_M = rowValues[10],
H2S_M = rowValues[11], OZO_M = rowValues[12], NO2_M = rowValues[13],
CMO_M = rowValues[14], IRR_Sensitivity = rowValues[15],
IAQ_Sensitivity = rowValues[16], SO2_Sensitivity = rowValues[17],
H2S_Sensitivity = rowValues[18], OZO_Sensitivity = rowValues[19],
NO2_Sensitivity = rowValues[20], CMO_Sensitivity = rowValues[21]))
# function returns the PPM
def PPM(BOARDNAME, CHEMICAL, INSTANTTEMP, INSTANTCURRENT):
results = table.find(board = BOARDNAME)
# finds the baseline current and m time-constant based on the name of the
# board and the given chemical
for row in results:
baseline = (row[CHEMICAL + '_baseline'])
MValue = (row[CHEMICAL + '_M'])
sensitivity = (row[CHEMICAL + '_Sensitivity'])
baseline = float(baseline)
MValue = float(MValue)
sensitivity = float(sensitivity)
# calculates and returns the value of the PPM (rounds to the nearest hundredth)
if MValue == 'INFINITY':
ICORRECTED = INSTANTCURRENT - baseline #if the M value is infinity, the exponent dissapears
else:
ICORRECTED = INSTANTCURRENT - baseline * math.exp((INSTANTTEMP - ZEROTEMP)/MValue)
return(str(round(ICORRECTED/sensitivity, 2)))
# print(PPM('04A3E2BD29', 'NO2', 26.0, 2492))
#function plots PPM vs. Temperature
def plot(BOARDNAME, CHEMICAL, INSTANTCURRENT):
results = table.find(board = BOARDNAME)
for row in results:
baseline = (row[CHEMICAL + '_baseline'])
MValue = (row[CHEMICAL + '_M'])
sensitivity = (row[CHEMICAL + '_Sensitivity'])
baseline = float(baseline)
MValue = float(MValue)
sensitivity = float(sensitivity)
ICorrectedList = []
for i in range(-40, 45):
INSTANTTEMP = i
if MValue == 'INFINITY':
ICORRECTED = INSTANTCURRENT - baseline #if the M value is infinity, the exponent dissapears
else:
ICORRECTED = INSTANTCURRENT - baseline * math.exp((INSTANTTEMP - ZEROTEMP)/MValue)
toAppend = ICORRECTED/sensitivity
ICorrectedList.append(toAppend)
ICorrectedArray = np.array(ICorrectedList)
TempArray = np.array(range(-40,45))
plt.xlabel('Temperature')
plt.ylabel('PPM')
plt.title('Temperature vs PPM')
plt.grid(True)
plt.plot(TempArray, ICorrectedArray)
# plt.savefig("testGraph.png")
plt.show()
# plot('04A3E2BD29', 'CMO', 2492)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.