text stringlengths 957 885k |
|---|
<filename>GoogleCloudMLEngine/experiments/transferLearningTest1.py<gh_stars>0
# trying the advice of this site:
# https://towardsdatascience.com/keras-transfer-learning-for-beginners-6c9b8b7143e
# Note that I had to create a test set and rename images to test in the downloaded gs://sportseventdetection-football directory
# so that it would confirm to the flow_from_directory convention:
# https://medium.com/@vijayabhaskar96/tutorial-image-classification-with-keras-flow-from-directory-and-generators-95f75ebe5720
import pandas as pd
import numpy as np
import os
import keras
import matplotlib.pyplot as plt
from keras.layers import Dense,GlobalAveragePooling2D
from keras.applications import MobileNet
from keras.preprocessing import image
from keras.applications.mobilenet import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.optimizers import Adam
base_model=MobileNet(weights='imagenet',include_top=False) #imports the mobilenet model and discards the last 1000 neuron layer.
x=base_model.output
x=GlobalAveragePooling2D()(x)
x=Dense(1024,activation='relu')(x) #we add dense layers so that the model can learn more complex functions and classify for better results.
x=Dense(1024,activation='relu')(x) #dense layer 2
x=Dense(512,activation='relu')(x) #dense layer 3
preds=Dense(2,activation='softmax')(x) #final layer with softmax activation. First argument is the number of classes
model=Model(inputs=base_model.input,outputs=preds) #specify the inputs and outputs
#now a model has been created based on our architecture
# for layer in model.layers:
# layer.trainable=False
# or if we want to set the first 10 layers of the network to be non-trainable
for layer in model.layers[:5]:
layer.trainable=False
for layer in model.layers[5:]:
layer.trainable=True
train_datagen=ImageDataGenerator(preprocessing_function=preprocess_input) #included in our dependencies
train_generator=train_datagen.flow_from_directory('./../images/sportseventdetection-football/train/',
target_size=(224,224),
color_mode='rgb',
batch_size=8,
class_mode='categorical',
shuffle=True)
model.compile(optimizer='Adam',loss='categorical_crossentropy',metrics=['accuracy'])
# Adam optimizer
# loss function will be categorical cross entropy
# evaluation metric will be accuracy
step_size_train=train_generator.n//train_generator.batch_size
model.fit_generator(generator=train_generator,
steps_per_epoch=step_size_train,
epochs=5)
model.save('model3.h5')
test_datagen=ImageDataGenerator(preprocessing_function=preprocess_input) #included in our dependencies
test_generator = test_datagen.flow_from_directory(
directory='./../images/sportseventdetection-football/test/',
target_size=(224, 224),
color_mode="rgb",
batch_size=1,
class_mode=None,
shuffle=False,
seed=42
)
STEP_SIZE_TEST=test_generator.n//test_generator.batch_size
test_generator.reset()
pred=model.predict_generator(test_generator,
steps=STEP_SIZE_TEST,
verbose=1)
predicted_class_indices=np.argmax(pred,axis=1)
labels = (train_generator.class_indices)
labels = dict((v,k) for k,v in labels.items())
predictions = [labels[k] for k in predicted_class_indices]
filenames=test_generator.filenames
results=pd.DataFrame({"Filename":filenames,
"Predictions":predictions})
results.to_csv("results.csv",index=False) |
<reponame>gwk/glossy<gh_stars>0
# Dedicated to the public domain under CC0: https://creativecommons.org/publicdomain/zero/1.0/.
import re
from dataclasses import dataclass
from functools import singledispatchmethod
from typing import Iterable, List, Optional, Tuple
from docutils import frontend as _frontend, nodes as _nodes
from docutils.nodes import Node as Node, Text
from docutils.parsers.rst import Parser as _RstParser
from docutils.utils import new_document as _new_document
from ..io import errL
from ..tree import OmitNode, transform_tree
from . import Syntax
def parse_rst(path:str, text:str) -> Syntax:
parser = _RstParser()
settings = _frontend.OptionParser(components=(_RstParser,)).get_default_values()
document = _new_document(path, settings=settings)
parser.parse(text, document)
ctx = _Ctx(path=path, text=text, lines=text.splitlines(keepends=True))
return transform_tree(document, _get_children, ctx.visit)
@dataclass
class Ref:
text: Syntax
target: Optional[Syntax]
def __iter__(self):
yield self.text
if self.target: yield self.target
def _get_children(node:Node) -> Iterable[Node]:
return node.children
@dataclass
class _Ctx:
path: str
text: str
lines: List[str]
line: int = 0
col: int = 0
@property
def curr_line_text(self) -> str: return self.lines[self.line]
def match_text(self, text:str, skip_leading:bool, label:str, raises=False) -> Syntax.Pos:
'Match a single line of text, advancing self.line and self.col.'
if skip_leading:
while rst_syntax_re.fullmatch(self.curr_line_text, self.col):
#errL(f'SKIPPED: {self.curr_line_text[self.col:]!r}')
self.line += 1
self.col = 0
line = self.line
col = self.curr_line_text.find(text, self.col)
if col == -1:
if raises: raise ValueError(text)
errL(Syntax.diagnostic(self.path, line=self.line, col=self.col,
msg=f'warning: {label} not matched: {text!r}\n {self.curr_line_text!r}'))
col = self.col # better than nothing.
end_col = -1
else: # matched.
end_col = col+len(text)
if text.endswith('\n'):
self.line +=1
self.col = 0
else:
self.col = end_col
#errL(f'MATCHED: {self.line}:{self.col}: {text!r}')
return Syntax.Pos(line=line, col=col, end_line=self.line, end_col=end_col)
@singledispatchmethod
def visit(self, node:Node, stack:Tuple[Node, ...], children:List[Node]) -> Syntax:
'Default visitor.'
pos = Syntax.Pos(line=(-1 if node.line is None else node.line-1), enclosed=children)
return Syntax(path=self.path, pos=pos, kind=_kind_for(node), content=children)
@visit.register # type: ignore[no-redef]
def visit(self, node:Text, stack:Tuple[Node, ...], children:List[Node]) -> Syntax:
'Text visitor. Determines line/col position post-hoc, which docutils does not provide.'
assert node.line is None # Text never has line number.
text = node.astext()
text_lines = text.splitlines(keepends=True)
if len(text_lines) == 1:
pos = self.match_text(text, skip_leading=True, label='text')
return Syntax(path=self.path, pos=pos, kind='text', content=text)
# multiline text blocks.
children = []
for i, text_line in enumerate(text_lines):
is_lead = not i
label = 'lead multiline text' if is_lead else 'tail multiline text'
pos = self.match_text(text_line, skip_leading=is_lead, label=label)
children.append(Syntax(path=self.path, pos=pos, kind='text', content=text_line))
pos = Syntax.Pos(enclosed=children)
return Syntax(path=self.path, pos=pos, kind='lines', content=tuple(children))
@visit.register # type: ignore[no-redef]
def visit(self, node:_nodes.reference, stack:Tuple[Node, ...], children:List[Node]) -> Syntax:
assert len(children) == 1
text = children[0]
assert isinstance(text, Syntax)
attrs = node.attributes
uri = attrs.get('refuri')
target = None
if uri:
try: pos = self.match_text(uri, skip_leading=True, label='ref uri', raises=True)
except ValueError: pass # might have been extracted from the text.
else: target = Syntax(path=self.path, pos=pos, kind='target', content=uri)
pos = Syntax.Pos(enclosed=(text, target))
content = Ref(text=text, target=target)
return Syntax(path=self.path, pos=pos, kind='ref', content=content)
@visit.register # type: ignore[no-redef]
def visit(self, node:_nodes.target, stack:Tuple[Node, ...], children:List[Node]) -> None:
raise OmitNode
rst_syntax_re = re.compile(r'[~=\s\-_<>`:]*') # matches lines that are only markup syntax and not content.
def _kind_for(node:Node) -> str: return type(node).__name__
|
from __future__ import with_statement
import logging
import time
from concurrence import unittest, Tasklet, TaskLocal, TaskInstance
class TestTaskLocal(unittest.TestCase):
"""test tasklet local storage"""
def testSingleTask(self):
local = TaskLocal()
local.piet = 10
self.assertEquals(10, local.piet)
try:
x = local.klaas
self.fail('expected attribute error')
except AttributeError:
pass
local.piet = 20
self.assertEquals(True, hasattr(local, 'piet'))
self.assertEquals(False, hasattr(local, 'klaas'))
self.assertEquals(20, local.piet)
del local.piet
self.assertEquals(False, hasattr(local, 'piet'))
try:
x = local.piet
self.fail('expected attribute error')
except AttributeError:
pass
def testMultiTask(self):
local = TaskLocal()
def t():
local.piet = []
for i in range(10):
local.piet.append(i)
Tasklet.yield_()
self.assertEquals(range(10), local.piet)
t1 = Tasklet.new(t)()
t2 = Tasklet.new(t)()
Tasklet.join_all([t1,t2])
self.assertEquals(2, len(local._d.keys())) #the 2 tasks are sill around, so local keeps their values
#check that values are gone from dict
#when tasks are gone
del t1
del t2
#we need to yield, because our 2 tasks were suspended by the join
#yield will run the scheduler again, so our tasks can properly finish
#the only strange thing is we need 2 yields for python, stackless requires just 1
Tasklet.yield_()
Tasklet.yield_()
self.assertEquals([], local._d.keys())
def testRecursive(self):
#non-recursive
local = TaskLocal()
local.piet = 20
def t():
try:
local.piet
self.fail('expected attr error')
except AttributeError:
pass
Tasklet.join(Tasklet.new(t)())
#recursive
local = TaskLocal(True)
local.piet = 30
def t():
self.assertEquals(30, local.piet)
Tasklet.join(Tasklet.new(t)())
class Adder(object):
def __init__(self, x):
self.x = x
def sum(self, y):
return self.x + y
class TestTaskInstance(unittest.TestCase):
def testTaskInstance(self):
AdderInstance = TaskInstance(True)
try:
AdderInstance.sum(10)
self.fail('expected attribute error')
except AttributeError:
pass
def t():
return AdderInstance.sum(20)
with AdderInstance.set(Adder(10)):
self.assertEquals(30, AdderInstance.sum(20))
#check that child task can also find it
self.assertEquals(30, Tasklet.join(Tasklet.new(t)()))
#should have been unset
try:
AdderInstance.sum(10)
self.fail('expected attribute error')
except AttributeError:
pass
def testTaskInstance2(self):
AdderInstance = TaskInstance(True)
with AdderInstance.set(Adder(10)):
self.assertEquals(30, AdderInstance.sum(20))
#now start 2 child tasks
def t():
self.assertEquals(30, AdderInstance.sum(20)) #expect to find parents instance
#now set my own instance
with AdderInstance.set(Adder(20)):
self.assertEquals(40, AdderInstance.sum(20))
#now it must be unset, and we will find parents instance instead
self.assertEquals(30, AdderInstance.sum(20))
t1 = Tasklet.new(t)()
t2 = Tasklet.new(t)()
Tasklet.join_all([t1, t2])
self.assertEquals(30, AdderInstance.sum(20))
if __name__ == '__main__':
unittest.main()
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from datetime import date, datetime
from .rest_client import RestAPIClient
class Tempo(RestAPIClient):
"""
Basic Client for accessing Tempo Rest API as provided by api.tempo.io.
"""
def __init__(self, auth_token, base_url="https://api.tempo.io/core/3", limit=1000):
self._limit = limit # default limit for pagination (1000 is maximum for Tempo API)
self._base_url = base_url
super().__init__(auth_token=auth_token)
def _resolve_date(self, value):
if isinstance(value, datetime):
return value.date()
if isinstance(value, date):
return value
parsed = datetime.strptime(value, r"%Y-%m-%d").date()
return parsed
def get(self, path, data=None, flags=None, params=None, headers=None, not_json_response=None, trailing=None):
path_absolute = super().url_joiner(self._base_url, path)
resp = super().get(path_absolute, data=data, flags=flags, params=params, headers=headers,
not_json_response=not_json_response, trailing=trailing)
# single item returned
if 'results' not in resp:
return resp
# multiple items
results = resp['results']
# handle all results paginated
while 'next' in resp.get('metadata'):
resp = super().get(resp.get('metadata').get('next'))
results.extend(resp['results'])
return results
# Accounts
def get_accounts(self):
"""
Retrieves existing accounts.
"""
return self.get("/accounts")
# Account - Categories
def get_account_categories(self):
"""
Retrieves existing account categories.
"""
return self.get("/account-categories")
# Account - Category - Types
def get_account_category_types(self):
"""
Retrieves all periods for a given date range as a list.
"""
return self.get("/account-category-types")
# Account - Links
## TBD
# Customers
def get_customers(self, key=None):
"""
Retrieves all customers or customer.
:param key: Return customer for ```key```.
"""
url = "/customers"
if key:
url += f"/{key}"
return self.get(url)
# Plans
def get_plans(self, dateFrom, dateTo, assigneeType=None, planItemType=None, updatedFrom=None, id=None, userId=None):
"""
Retrieves plans or plan.
:param dateFrom:
:param dateTo:
:param assigneeType:
:param planItemType:
:param updatedFrom:
:param id: Plan id
:param userId: ```AccountId``` for user in Tempo
"""
params = {
"from": self._resolve_date(dateFrom).isoformat(),
"to": self._resolve_date(dateTo).isoformat(),
"offset": 0,
"limit": self._limit
}
if assigneeType:
params['assigneeType'] = assigneeType
if planItemType:
params['planItemType'] = planItemType
if updatedFrom:
params['updatedFrom'] = self._resolve_date(updatedFrom).isoformat()
url = "/plans"
if id:
url += f"/{id}"
elif userId:
url += f"/plans/user/{userId}"
return self.get(url, params=params)
# Programs
## TBD
# Roles
## TBD
# Teams
def get_teams(self, teamId=None):
"""
Returns teams information.
:param teamId: Returns details for team ```teamId```.
"""
url = f"/teams"
if (teamId):
url += f"/{teamId}"
return self.get(url)
def get_team_members(self, teamId):
"""
Returns members for particular team.
:param teamId: teamId
"""
url = f"/teams/{teamId}/members"
return self.get(url)
# Team - Links
## TBD
# Team - Memberships
def get_team_memberships(self, membershipId):
"""
Returns members.
:param membershipId:
"""
url = f"/team-memberships/{membershipId}"
return self.get(url)
def get_account_team_membership(self, teamId, accountId):
"""
Returns the active team membership.
:param accountId:
:param teamId:
"""
return self.get(f"/teams/{teamId}/members/{accountId}")
def get_account_team_memberships(self, teamId, accountId):
"""
Returns all team memberships.
:param accountId:
:param teamId:
"""
return self.get(f"/teams/{teamId}/members/{accountId}/memberships")
# Periods
def get_periods(self, dateFrom, dateTo):
"""
Retrieves periods.
:param dateFrom:
:param dateTo:
"""
params = {
"from": self._resolve_date(dateFrom).isoformat(),
"to": self._resolve_date(dateTo).isoformat()
}
return self.get("/periods", params=params)
# Timesheet Approvals
def get_timesheet_approvals_waiting(self):
"""
Retrieve waiting timesheet approvals
"""
return self.get(f"/timesheet-approvals/waiting")
def get_timesheet_approvals(self, dateFrom=None, dateTo=None, userId=None, teamId=None):
"""
Retrieves timesheet approvals.
:param dateFrom:
:param dateTo:
:param userId:
:param teamId:
"""
params = {}
if dateFrom:
params["from"] = self._resolve_date(dateFrom).isoformat()
if dateTo:
params["to"] = self._resolve_date(dateTo).isoformat()
url = f"/timesheet-approvals"
if userId:
url += f"/user/{userId}"
elif teamId:
url += f"/team/{teamId}"
return self.get(url, params=params)
# User Schedule
def get_user_schedule(self, dateFrom, dateTo, userId=None):
"""
Returns user schedule.
:param dateFrom:
:param dateTo:
:param userId:
"""
params = {
"from": self._resolve_date(dateFrom).isoformat(),
"to": self._resolve_date(dateTo).isoformat()
}
url = "/user-schedule"
if userId:
url += f"/{userId}"
return self.get(url, params=params)
# Work Attributes
def get_work_attributes(self):
"""
Returns worklog attributes.
"""
return self.get("/work-attributes")
# Workload Schemes
def get_workload_schemes(self, id=None):
url = f"/workload-schemes"
if id:
url += f"/{id}"
return self.get(url)
# Holiday Schemes
def get_holiday_schemes(self, holidaySchemeId=None):
url = f"/holiday-schemes"
if holidaySchemeId:
url += f"/{holidaySchemeId}/holidays"
return self.get(url)
# Worklogs
def get_worklogs(self, dateFrom, dateTo, updatedFrom=None, worklogId=None, jiraWorklogId=None, jiraFilterId=None,
accountKey=None, projectKey=None, teamId=None, accountId=None, issueId=None):
"""
Returns worklogs for particular parameters.
:param dateFrom:
:param dateTo:
:param updatedFrom:
:param worklogId:
:param jiraWorklogId:
:param jiraFilterId:
:param accountKey:
:param projectKey:
:param teamId:
:param accountId:
:param issue:
"""
params = {
"from": self._resolve_date(dateFrom).isoformat(),
"to": self._resolve_date(dateTo).isoformat(),
"offset": 0,
"limit": self._limit
}
if updatedFrom:
params["updatedFrom"] = self._resolve_date(updatedFrom).isoformat()
url = f"/worklogs"
if worklogId:
url += f"/{worklogId}"
elif jiraWorklogId:
url += f"/jira/{jiraWorklogId}"
elif jiraFilterId:
url += f"/jira/filter/{jiraFilterId}"
elif accountKey:
url += f"/account/{accountKey}"
elif projectKey:
url += f"/project/{projectKey}"
elif teamId:
url += f"/team/{teamId}"
elif accountId:
url += f"/user/{accountId}"
elif issueId:
url += f"/issue/{issueId}"
return self.get(url, params=params)
|
# This file is called separately from the rest of the program. This file takes the original data and creates cleaner csvs for app.py to use
import gsw
import numpy as np
import pandas as pd
# all of the parameters from the full data: 'yyyy-mm-ddThh:mm:ss.sss', 'Longitude [degrees_east]', 'Latitude [degrees_north]',
# 'PRESSURE [dbar]', 'DEPTH [m]', 'CTDTMP [deg C]', 'CTDSAL', 'SALINITY_D_CONC_BOTTLE', 'SALINITY_D_CONC_PUMP',
# 'SALINITY_D_CONC_FISH', 'SALINITY_D_CONC_UWAY', 'NITRATE_D_CONC_BOTTLE [umol/kg]', 'NITRATE_D_CONC_PUMP [umol/kg]',
# 'NITRATE_D_CONC_FISH [umol/kg]', 'NITRATE_D_CONC_UWAY [umol/kg]', 'NITRATE_LL_D_CONC_BOTTLE [umol/kg]',
# 'NITRATE_LL_D_CONC_FISH [umol/kg]', 'NO2+NO3_D_CONC_BOTTLE [umol/kg]', 'NO2+NO3_D_CONC_FISH [umol/kg]',
# 'Fe_D_CONC_BOTTLE [nmol/kg]', 'Fe_D_CONC_FISH [nmol/kg]', 'Fe_II_D_CONC_BOTTLE [nmol/kg]', 'Fe_II_D_CONC_FISH [nmol/kg]',
# 'Fe_S_CONC_BOTTLE [nmol/kg]', 'Fe_S_CONC_FISH [nmol/kg]'
# averages data with the exact same depth.
def average_data(cruise_data):
# from https://stackoverflow.com/questions/48830324/pandas-average-columns-with-same-value-in-other-columns
cruise_data = cruise_data.groupby(
["Latitude", "Longitude", "Station", "Depth"], as_index=False
).mean()
return cruise_data
# removes stations with specifically empty iron data.
def remove_empty_data(cruise_data):
grouped_data = cruise_data.groupby(["Latitude", "Longitude", "Station"])
for name, group in grouped_data:
if group["Iron"].isna().values.all():
cruise_data = cruise_data.drop(grouped_data.get_group(name).index)
return cruise_data
# gets the average nitrate values that are used to get ratio data.
def get_nitrate(cruise_data, index, row):
current_depth = row["Depth"]
min = None
max = None
if row["Depth"] <= 100: # for under 100m, we average nitrates between +/- 5m
min, max = current_depth - 5, current_depth + 5
elif row["Depth"] > 100: # for over 100m, we average nitrates between +/- 10m
min, max = current_depth - 10, current_depth + 10
lon = row["Longitude"]
lat = row["Latitude"]
avg_nitrate = cruise_data["Nitrate"][
(
(cruise_data.Depth <= max)
& (cruise_data.Depth >= min)
& (cruise_data.Longitude == lon)
& (cruise_data.Latitude == lat)
)
].mean()
return avg_nitrate
# create the ratio data
def add_ratio_data(cruise_data):
averaged_nitrate = []
# get averaged nitrate data at each point
for index, row in cruise_data.iterrows():
nitrate = get_nitrate(cruise_data, index, row)
averaged_nitrate.append(nitrate)
ratio = (
np.array(averaged_nitrate) / cruise_data["Iron"]
) # calculate ratio by dividing averaged nitrate by iron
cruise_data[
"Averaged Nitrate"
] = averaged_nitrate # add a column of averaged nitrate
cruise_data["Ratio"] = ratio # add the ratio column
# add the column of density data
def add_density_data(cruise_data):
# Uses the gsw library: http://www.teos-10.org/pubs/gsw/html/gsw_sigma0.html
practical_salinity = cruise_data["Salinity"]
pressure = cruise_data["Pressure"]
longitude = cruise_data["Longitude"]
latitude = cruise_data["Latitude"]
absolute_salinity = gsw.SA_from_SP(
practical_salinity, pressure, longitude, latitude
)
temperature = cruise_data["Temperature"]
sigma0 = gsw.sigma0(absolute_salinity, temperature)
cruise_data["Density"] = sigma0
# read in original data
GA03_data = pd.read_csv("./data/GA03w.csv")
GIPY05_data = pd.read_csv("./data/GIPY05e.csv")
GP02_data = pd.read_csv("./data/GP02w.csv")
GIPY04_data = pd.read_csv("./data/GIPY04.csv")
# the headers for our clean data
headers = [
"Station",
"Date",
"Latitude",
"Longitude",
"Depth",
"Temperature",
"Salinity",
"Nitrate",
"Iron",
"Pressure",
]
# make GA03 dataframe and csv
data = [
GA03_data["Station"],
GA03_data["yyyy-mm-ddThh:mm:ss.sss"],
GA03_data["Latitude [degrees_north]"],
GA03_data["Longitude [degrees_east]"],
GA03_data["DEPTH [m]"],
GA03_data["CTDTMP [deg C]"],
GA03_data["CTDSAL"],
GA03_data["NITRATE_D_CONC_BOTTLE [umol/kg]"],
GA03_data["Fe_D_CONC_BOTTLE [nmol/kg]"],
GA03_data["PRESSURE [dbar]"],
]
GA03 = pd.concat(data, axis=1, keys=headers)
# remove unwanted lons and lats
GA03 = GA03[
((GA03.Longitude <= 360 - 60) & (GA03.Longitude >= 360 - 65))
| (GA03.Longitude >= 360 - 25)
]
# GA03 = average_data(GA03)
add_ratio_data(GA03)
add_density_data(GA03)
GA03 = remove_empty_data(GA03) # remove empty iron data
GA03 = GA03[(GA03.Depth <= 500)] # only keep data less than 500m depth
GA03["Date"] = GA03.Date.str.split("T").str[
0
] # only keep the day,month,year of the date
GA03.loc[(GA03.Station == "Station 10") & (GA03.Longitude < 310), "Station"] = (
GA03.loc[(GA03.Station == "Station 10") & (GA03.Longitude < 310), "Station"].astype(
str
)
+ "W"
)
GA03.loc[(GA03.Station == "Station 11") & (GA03.Longitude < 310), "Station"] = (
GA03.loc[(GA03.Station == "Station 11") & (GA03.Longitude < 310), "Station"].astype(
str
)
+ "W"
)
GA03.loc[(GA03.Station == "Station 10") & (GA03.Longitude > 310), "Station"] = (
GA03.loc[(GA03.Station == "Station 10") & (GA03.Longitude > 310), "Station"].astype(
str
)
+ "E"
)
GA03.loc[(GA03.Station == "Station 11") & (GA03.Longitude > 310), "Station"] = (
GA03.loc[(GA03.Station == "Station 11") & (GA03.Longitude > 310), "Station"].astype(
str
)
+ "E"
)
stations = []
positions = []
for i in range(len(GA03)):
station = GA03["Station"].values[i]
lat = GA03["Latitude"].values[i]
lon = GA03["Longitude"].values[i]
if len(positions) == 0 or [lat, lon] != positions[-1]:
positions.append([lat, lon])
stations.append(station)
# print(stations)
for i in [4]: # choosing specific profiles
GA03 = GA03.drop(
GA03[
(GA03.Latitude == positions[i][0]) & (GA03.Longitude == positions[i][1])
].index
)
GA03.to_csv("./data/GA03_filtered.csv", index=False)
# make GIPY05 dataframe and csv
data = [
GIPY05_data["Station"],
GIPY05_data["yyyy-mm-ddThh:mm:ss.sss"],
GIPY05_data["Latitude [degrees_north]"],
GIPY05_data["Longitude [degrees_east]"],
GIPY05_data["DEPTH [m]"],
GIPY05_data["CTDTMP [deg C]"],
GIPY05_data["CTDSAL"],
GIPY05_data["NO2+NO3_D_CONC_BOTTLE [umol/kg]"],
GIPY05_data["Fe_D_CONC_BOTTLE [nmol/kg]"],
GIPY05_data["PRESSURE [dbar]"],
]
GIPY05 = pd.concat(data, axis=1, keys=headers)
# remove unwanted lons and lats
GIPY05 = GIPY05[(GIPY05.Latitude >= -45) | (GIPY05.Latitude <= -65)]
# GIPY05 = average_data(GIPY05)
add_ratio_data(GIPY05)
add_density_data(GIPY05)
GIPY05 = remove_empty_data(GIPY05)
GIPY05 = GIPY05[(GIPY05.Depth <= 500)]
GIPY05["Date"] = GIPY05.Date.str.split("T").str[0]
positions = []
stations = []
for i in range(len(GIPY05)):
station = GIPY05["Station"].values[i]
lat = GIPY05["Latitude"].values[i]
lon = GIPY05["Longitude"].values[i]
if len(positions) == 0 or [lat, lon] != positions[-1]:
positions.append([lat, lon])
stations.append(station)
# print(stations)
for i in [0]: # choosing specific profiles
GIPY05 = GIPY05.drop(
GIPY05[
(GIPY05.Latitude == positions[i][0]) & (GIPY05.Longitude == positions[i][1])
].index
)
GIPY05.to_csv("./data/GIPY05_filtered.csv", index=False)
# make GP02 dataframe and csv
data = [
GP02_data["Station"],
GP02_data["yyyy-mm-ddThh:mm:ss.sss"],
GP02_data["Latitude [degrees_north]"],
GP02_data["Longitude [degrees_east]"],
GP02_data["DEPTH [m]"],
GP02_data["CTDTMP [deg C]"],
GP02_data["CTDSAL"],
GP02_data["NO2+NO3_D_CONC_BOTTLE [umol/kg]"],
GP02_data["Fe_D_CONC_BOTTLE [nmol/kg]"],
GP02_data["PRESSURE [dbar]"],
]
GP02 = pd.concat(data, axis=1, keys=headers)
# remove unwanted lons and lats
GP02 = GP02[(GP02.Longitude <= 155) | (GP02.Longitude >= 180)]
# GP02 = average_data(GP02)
add_ratio_data(GP02)
add_density_data(GP02)
GP02 = remove_empty_data(GP02)
GP02 = GP02[(GP02.Depth <= 500)]
GP02["Date"] = GP02.Date.str.split("T").str[0]
positions = []
stations = []
for i in range(len(GP02)):
station = GP02["Station"].values[i]
lat = GP02["Latitude"].values[i]
lon = GP02["Longitude"].values[i]
if len(positions) == 0 or [lat, lon] != positions[-1]:
positions.append([lat, lon])
stations.append(station)
# print(stations)
# for i in []: #choosing specific profiles
# GP02 = GP02.drop(GP02[(GP02.Latitude == positions[i][0]) & (GP02.Longitude == positions[i][1])].index)
GP02.to_csv("./data/GP02_filtered.csv", index=False)
# make GIPY04 dataframe and csv
data = [
GIPY04_data["Station"],
GIPY04_data["yyyy-mm-ddThh:mm:ss.sss"],
GIPY04_data["Latitude [degrees_north]"],
GIPY04_data["Longitude [degrees_east]"],
GIPY04_data["DEPTH [m]"],
GIPY04_data["CTDTMP [deg C]"],
GIPY04_data["CTDSAL"],
GIPY04_data["NITRATE_D_CONC_BOTTLE [umol/kg]"],
GIPY04_data["Fe_D_CONC_BOTTLE [nmol/kg]"],
GIPY04_data["PRESSURE [dbar]"],
]
GIPY04 = pd.concat(data, axis=1, keys=headers)
# remove unwanted lons and lats
GIPY04 = GIPY04[(GIPY04.Latitude >= -45)]
# GIPY04 = average_data(GIPY04)
add_ratio_data(GIPY04)
add_density_data(GIPY04)
GIPY04 = remove_empty_data(GIPY04)
GIPY04 = GIPY04[(GIPY04.Depth <= 500)]
# remove specific noisy data
indexes = GIPY04[
(GIPY04.Station == "18 (Super 1)")
& (
(GIPY04.Depth == 78.6)
| (GIPY04.Depth == 98.6)
| (GIPY04.Depth == 149.8)
| (GIPY04.Depth == 172.8)
)
].index
GIPY04.drop(indexes, inplace=True)
GIPY04["Date"] = GIPY04.Date.str.split("T").str[0]
positions = []
stations = []
for i in range(len(GIPY04)):
station = GIPY04["Station"].values[i]
lat = GIPY04["Latitude"].values[i]
lon = GIPY04["Longitude"].values[i]
if len(positions) == 0 or [lat, lon] != positions[-1]:
positions.append([lat, lon])
stations.append(station)
# print(stations)
for i in [0, 2, 4]: # choosing specific profiles
GIPY04 = GIPY04.drop(
GIPY04[
(GIPY04.Latitude == positions[i][0]) & (GIPY04.Longitude == positions[i][1])
].index
)
GIPY04.to_csv("./data/GIPY04_filtered.csv", index=False)
|
<filename>common_classes/para_db_update_prep.py
''' ParaDbUpdatePrep - further documentation:
usage: scripts/db_updater_1
detailed: scripts/documentation/update_process.md'''
# pylint: pylint: disable=unused-import
import sys
import constants.crud as crud
import constants.scripts as constants
import constants.sql_substrings as sql_substrings
import utilities.date_time as dt
import utilities.json_methods as json_helper
import utilities.random_methods as utils
from helpers.no_import_common_class.paragraph_dictionaries import ParagraphDictionaries as para_dict
from projects.models.paragraphs import (Category, Group, # noqa: F401
GroupParagraph, Paragraph,
ParagraphReference, Reference)
from common_classes.para_db_methods import ParaDbMethods
class ParaDbUpdatePrep(ParaDbMethods):
'''
The ParaDbUpdatePrep class retrieves the existing data and relationships used to update
paragraphs.
'''
def __init__(self, input_data, updating=False):
'''
Based on the input data, we collect information to be edited in order to update the
database. These variables will eventually be written to JSON to be manually updated
and used as input to the update process
'''
# https://stackoverflow.com/questions/8653516/python-list-of-dictionaries-search
# this is the output data
super().__init__(updating)
self.for_prod = input_data.pop('for_prod', False)
self.file_data = input_data.pop('file_data')
self.input_data = input_data
self.included_ids = {
'categories': [],
'references': [],
'paragraphs': [],
'groups': [],
'group_paragraph': [],
'paragraph_reference': [],
}
self.output_data = {
'categories': [],
'references': [],
'paragraphs': [],
'groups': [],
'group_paragraph': [],
'paragraph_reference': [],
}
def collect_data_and_write_json(self):
'''
collect_data_and_write_json is Step 1 of the update process. It reads the input file,
which is created manually and passed in by scripts/db_updater_s1 (some input can only
be passed in as parameters. See documentation in script)
Step 1 will never be run in production, since data is updated only in development
'''
self.process_input_and_output()
params = {}
params['directory_path'] = self.input_data['output_directory']
params['prefix'] = constants.PROD_PROCESS_IND if self.for_prod else constants.DEFAULT_PREFIX
json_helper.write_dictionary_to_file(self.output_data, **params)
def process_input_and_output(self):
'''
process_input_and_output is the process where we prepare the data to be updated.
1. Validate the keys to limit careless mistakes.
2. Retrieve the existing data, so that we know what to update.
* Note - The whole prep can be eliminated if there are only only the add_* or delete_*
keys
3. Copy any input that does not depend on data retrieval. These keys are in
crud.COPY_DIRECTLY_TO_OUTPUT
'''
self.validate_input_keys()
if self.for_prod:
self.unique_key_lookup_to_output()
self.retrieve_existing_data()
self.copy_directly_to_output()
def validate_input_keys(self):
'''
validate_input_keys ensures that the user is doing careful work and does not make a careless
mistake by running tests on the input keys.
If there is an error, then processing stops with a message indicating the necessary
correction
'''
if self.invalid_keys():
sys.exit((f'Input error: there is at least one invalid key: {self.file_data}; '
f'The valid keys are {crud.VALID_RETRIEVAL_KEYS + crud.COPY_DIRECTLY_TO_OUTPUT}'))
if self.for_prod_with_adds():
sys.exit(f'Input error: no explicit creates if for_prod: {self.file_data}')
if not self.valid_input_keys():
sys.exit(f'Input error: Must be at least one valid key: {self.file_data}')
def copy_directly_to_output(self):
'''
Copy_directly_to_output is a convenience feature to make it so the user can itemize all the
work needed in step 1. This prepatory process step retrieves data for updates and must be
run beforehand (to capture the existing data and relationships), but the input can also have
some add_* or delete_* keys, which are not prepared for or implemented until step 3.
For COPY_DIRECTLY_TO_OUTPUT data, you can prepare the input and it will carry over to the
next step by copying it directly to the manual json file.
'''
for key in crud.COPY_DIRECTLY_TO_OUTPUT:
if utils.key_not_in_dictionary(self.file_data, key):
continue
self.output_data[key] = self.file_data[key]
def retrieve_existing_data(self):
'''
retrieve_existing_data is necessary for updating records. It builds the query according to
the input critera, retrieves the records from the database, creates a dictionary with the
information and then writes the dictionary to the output directory as a JSON file.
After any manual updates (step 2), the file becomes input to the db_update_s3 process.
'''
for key in crud.VALID_RETRIEVAL_KEYS:
if utils.key_not_in_dictionary(self.file_data, key):
continue
query = self.build_sql(key)
if query is None:
continue
# print(f'Retrieval query == {query}')
raw_queryset = ParaDbMethods.class_based_rawsql_retrieval(query, Paragraph)
self.add_existing_data_to_output(raw_queryset)
# Validation routines
def invalid_keys(self):
'''
invalid_keys tests if there are any invalid keys, exiting with error message if there are
:return: True if there are invalid keys and False otherwise
:rtype: bool
'''
for key in self.file_data.keys():
if key not in crud.VALID_RETRIEVAL_KEYS + crud.COPY_DIRECTLY_TO_OUTPUT:
return True
return False
def for_prod_with_adds(self):
'''
for_prod_with_adds validates that when we are using the for_prod input indicator we are
not doing explicit creates on associations, categories, groups or references
:return: returns True when for_prod is True and the are input keys like add_*
:rtype: bool
'''
if not self.for_prod:
return False
return utils.dictionary_key_begins_with_substring(self.file_data, 'add_')
def valid_input_keys(self):
'''
valid_input_keys ensures that there is at least one key
:return: True if there is at least one valid key, False otherwise
:rtype: bool
'''
num = 0
for key in crud.VALID_RETRIEVAL_KEYS + crud.COPY_DIRECTLY_TO_OUTPUT:
if utils.key_in_dictionary(self.file_data, key):
num += 1
return num > 0
# retrieval routines
def build_sql(self, key):
'''
build_sql uses the JSON input file to build a where statement. It appends the where
to the rest of the sql from the complete query
:return: complete sql query with where that varies depending on input file
:rtype: str
'''
where = self.get_where_statement(key)
if where is None:
print(f'No where for key=={key}, therefore not editing existing records{self.file_data}')
return None
query = ParaDbUpdatePrep.complete_query_from_constants()
query += where + ' order by c.id, g.cat_sort, gp.order'
return query
@staticmethod
def complete_query_from_constants():
'''
complete_query_from_constants creates complete paragraph query other than where statement
:return: Query for paragraphs
:rtype: str
'''
query = sql_substrings.BEGIN_SELECT + ', ' + sql_substrings.COMPLETE_CATEGORY_SELECT + ', '
query += sql_substrings.COMPLETE_GP_SELECT + ', ' + sql_substrings.COMPLETE_GROUP_SELECT + ', '
query += sql_substrings.COMPLETE_PR_SELECT + ', ' + sql_substrings.COMPLETE_PARAGRAPH_SELECT
query += ', ' + sql_substrings.COMPLETE_REFERENCE_SELECT
query += sql_substrings.FROM_PARA + sql_substrings.JOIN_GROUP_TO_PARA
query += sql_substrings.JOIN_CATEGORY_TO_GROUP + sql_substrings.JOIN_REFERENCES_TO_PARA
return query
def get_where_statement(self, key):
'''
get_where_statement creates a where statement based on the contents of the input JSON file
:return: where clause
:rtype: str
'''
if key in ('group_ids', 'category_ids', 'paragraph_ids', 'reference_ids'):
return 'where ' + key[0] + '.id in (' + self.get_where_ids(key) + ')'
if key == 'updated_at':
return self.get_updated_at_where()
return None
def get_updated_at_where(self):
'''
get_updated_at_where creates a where statement that uses the updated_at field to retrieve
recently updated paragraph information
:return: a where statement based on updated_at input (dict from file_data) parameters
:rtype: str
'''
info = self.file_data['updated_at']
oper = info['oper']
units = info['units']
use_date = f"'{dt.timediff_from_now_for_where(oper, units)}'"
return self.upated_at_loop_through_tables(use_date)
@staticmethod
def upated_at_loop_through_tables(use_date):
'''
upated_at_loop_through_tables writes a where to pull in all of the updated paragraph data
:param use_date: date in a format that works with postgres timestamps with time zones
:type use_date: str
:return: where statement using the use_date to get the records updated after that timestamp
:rtype: str
'''
logical_op = ''
where = 'where'
for ind in crud.TABLE_ABBREV:
where += f' {logical_op} {ind}.updated_at >= {use_date}'
logical_op = 'or'
return where
def get_where_ids(self, key):
'''
get_where_ids takes an array of ids and turns it a string to be used as part of a where
statement. The key will be one of these: 'group_ids', 'category_ids', 'paragraph_ids'
or 'reference_ids'
This saves the user some effort, by allowing ints to be passed in. Otherwise, if the user
did not add quotes, the code would throw a ValueError
:param key: key to a python list of ids
:type key: str (will do the conversion)
:return: string with ids in the format to be used in sql, such as g.id in (1, 2, 3)
:rtype: str
'''
return ', '.join(map(str, self.file_data[key]))
def add_existing_data_to_output(self, queryset):
'''
add_existing_data_to_output takes each row and assigns it to a dictionary representation of
the database record. If it was already assigned, it will return without doing anything.
The resulting list of dictionaries will be used to find and update or, if you are using the
output in production, to find or create.
:param queryset: result from the database retrieval using the input file parameters
:type queryset: raw queryset
'''
for row in queryset:
self.assign_category(row)
self.assign_reference(row)
self.assign_paragraph(row)
self.assign_group(row)
self.assign_groupparagraph(row)
self.assign_paragraphreference(row)
def assign_category(self, row):
'''
assign_category takes category data, does a lookup and if it has not been output yet, creates
a dictionary representation of the database record.
:param row: queryset row
:type row: queryset row
'''
if row.category_id is None:
return
if row.category_id in self.included_ids['categories']:
return
self.included_ids['categories'].append(row.category_id)
category = para_dict.category_dictionary()
category['id'] = row.category_id
category['title'] = row.category_title
category['slug'] = row.category_slug
category['category_type'] = row.category_type
self.output_data['categories'].append(category)
if self.for_prod:
self.for_prod_lookup('categories', row.category_id, row.category_slug)
def for_prod_lookup(self, top_key, key, value):
'''
for_prod_lookup will be used in production or in testing that production will work.
It creates a way to lookup the production record by associating the development id with the
unique_keys that are the same in all environments and primary keys (ids)
which will probably differ between environments. This allows us keep associations between
records in sync.
:param top_key: valid top keys are in <UPDATE_RECORD_KEYS> (import constants.crud as crud)
:type top_key: str
:param key: str version of the primary id for the given record
:type key: int
:param value: unique key that is different from the primary key
:type value: str
'''
try:
str_key = str(key)
except ValueError:
sys.exit(f'can not convert key to string {key}')
self.output_data['record_lookups'][top_key][str_key] = value
self.output_data['record_lookups'][top_key][value] = {'dev_id': key}
def assign_reference(self, row):
'''
assign_reference takes reference data, does a lookup and if it has not been output yet,
creates a dictionary representation of the database record.
:param row: queryset row
:type row: queryset row
'''
if row.reference_id is None:
return
if row.reference_id in self.included_ids['references']:
return
self.included_ids['references'].append(row.reference_id)
ref = para_dict.reference_dictionary()
ref['id'] = row.reference_id
ref['link_text'] = row.reference_link_text
ref['slug'] = row.reference_slug
ref['url'] = row.reference_url
ref['short_text'] = row.short_text
self.output_data['references'].append(ref)
if self.for_prod:
self.for_prod_lookup('references', row.reference_id, row.reference_slug)
def assign_paragraph(self, row):
'''
assign_paragraph takes paragraph data, does a lookup and if it has not been output yet,
creates a dictionary representation of the database record.
:param row: queryset row
:type row: queryset row
'''
if row.para_id is None:
return
if row.para_id in self.included_ids['paragraphs']:
return
self.included_ids['paragraphs'].append(row.para_id)
para = para_dict.paragraph_dictionary()
para['id'] = row.para_id
para['subtitle'] = row.para_subtitle
para['short_title'] = row.short_title
para['note'] = row.para_note
para['text'] = row.para_text
para['standalone'] = row.para_standalone
para['image_path'] = row.para_image_path
para['image_info_key'] = row.para_image_info_key
para['guid'] = row.para_guid
para['slug'] = row.para_slug
self.output_data['paragraphs'].append(para)
if self.for_prod:
self.for_prod_lookup('paragraphs', row.para_id, row.para_guid)
def assign_group(self, row):
'''
assign_group takes group data, does a lookup and if it has not been output yet, creates
a dictionary representation of the database record.
:param row: queryset row
:type row: queryset row
'''
if row.group_id is None:
return
if row.group_id in self.included_ids['groups']:
return
self.included_ids['groups'].append(row.group_id)
group = para_dict.group_dictionary()
group['id'] = row.group_id
group['title'] = row.group_title
group['slug'] = row.group_slug
group['note'] = row.group_note
group['category_id'] = row.group_category_id
group['short_name'] = row.group_short_name
group['cat_sort'] = row.cat_sort
group['group_type'] = row.group_type
self.output_data['groups'].append(group)
if self.for_prod:
self.for_prod_lookup('groups', row.group_id, row.group_slug)
self.record_lookups('categories', row.group_category_id, Category)
def assign_groupparagraph(self, row):
'''
assign_groupparagraph takes groupparagraph data, does a lookup and creates a dictionary
representation of the database record, if it does not already exist.
:param row: queryset row
:type row: queryset row
'''
if row.gp_id is None:
return
if row.gp_id in self.included_ids['group_paragraph']:
return
self.included_ids['group_paragraph'].append(row.gp_id)
group_para = para_dict.groupparagraph_dictionary()
group_para['id'] = row.gp_id
group_para['group_id'] = row.gp_group_id
group_para['paragraph_id'] = row.gp_para_id
group_para['order'] = row.gp_order
self.output_data['group_paragraph'].append(group_para)
if self.for_prod:
self.record_lookups('groups', row.gp_group_id, Group)
self.record_lookups('paragraphs', row.gp_para_id, Paragraph)
def assign_paragraphreference(self, row):
'''
assign_paragraphreference takes paragraphreference data, does a lookup and creates a
dictionary representation of the database record, unless one already exists.
:param row: queryset row
:type row: queryset row
'''
if row.pr_id is None:
return
if row.pr_id in self.included_ids['paragraph_reference']:
return
self.included_ids['paragraph_reference'].append(row.pr_id)
para_ref = para_dict.paragraphreference_dictionary()
para_ref['id'] = row.pr_id
para_ref['reference_id'] = row.pr_reference_id
para_ref['paragraph_id'] = row.pr_para_id
self.output_data['paragraph_reference'].append(para_ref)
if self.for_prod:
self.record_lookups('references', row.pr_reference_id, Reference)
self.record_lookups('paragraphs', row.pr_para_id, Paragraph)
def record_lookups(self, top_key, pk_id, class_):
'''
record_lookups will make sure that there is a way to uniquely identify records
that may have a different primary key in production
:param top_key: top key to lookup table: plural form of the main four paragraph records
:type top_key: str
:param pk_id: key for lookup: primary key of the record we need to look up
:type pk_id: int
:param class_: models.Model class for the lookup
:type class_: models.Model
'''
if top_key == 'categories' and pk_id is None:
return
dict_to_check = self.output_data['record_lookups'][top_key]
if utils.key_not_in_dictionary(dict_to_check, pk_id):
rec = class_.objects.get(pk=pk_id)
if top_key == 'paragraphs':
self.for_prod_lookup(top_key, rec.id, rec.guid)
return
self.for_prod_lookup(top_key, rec.id, rec.slug)
def unique_key_lookup_to_output(self):
'''
unique_key_lookup_to_output is the dictionary framework used to associate the development
primary keys that may differ between environments to the unique keys that will be the same
in all environments. This ensures that the development associations make it up to
production.
Development is the source of truth.
:return: the structure of the lookup table, with only the top keys
:rtype: dict
'''
self.output_data['record_lookups'] = {
'categories': {},
'references': {},
'paragraphs': {},
'groups': {},
}
|
import time
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.metrics import precision_recall_fscore_support, pairwise_distances, pairwise
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.preprocessing import normalize
from scipy.stats import entropy
from modules.models.generic_model import GenericModel
from modules.models.model_hyperps import LDA_Model_Hyperp
from modules.utils import similarity_measures as sm
from modules.utils.tokenizers import PorterStemmerBased_Tokenizer
class SimilarityMeasure:
def __init__(self):
self.name = sm.SimilarityMeasure.JSD
# static method
def jsd(p, q):
p = np.asarray(p)
q = np.asarray(q)
# normalize
#p /= p.sum()
#q /= q.sum()
m = (p + q) / 2
return (entropy(p, m) + entropy(q, m)) / 2
"""
params_dict = {
'lda__name' : 'LDA',
'lda__similarity_measure' : SimilarityMeasure.COSINE,
'lda__vectorizer' : TfidfVectorizer(),
'lda__vectorizer__stop_words' : 'english',
'lda__vectorizer__tokenizer' : Tokenizer(),
'lda__vectorizer__use_idf' : True, # optional if type(Vectorizer) == TfidfVectorizer
'lda__vectorizer__smooth_idf' : True, # optional if type(Vectorizer) == TfidfVectorizer
'lda__vectorizer__ngram_range' : (1,2),
'lda__lda_model' : TruncatedSVD(),
'lda__lda_model__n_components' : 5
}
"""
class LDA(GenericModel):
def __init__(self, **kwargs):
self._corpus_matrix = None
self._query_vector = None
self.vectorizer = None
self.lda_model = LatentDirichletAllocation(n_jobs=-1, random_state=42)
super().__init__()
self.similarity_measure = None
self.set_basic_params(**kwargs)
self.set_vectorizer(**kwargs)
self.set_lda_model(**kwargs)
def set_name(self, name):
super().set_name(name)
def set_model_gen_name(self, gen_name):
super().set_model_gen_name(gen_name)
def set_basic_params(self, **kwargs):
self.set_name('LDA' if LDA_Model_Hyperp.NAME.value not in kwargs.keys() else kwargs[LDA_Model_Hyperp.NAME.value])
self.set_model_gen_name('lda')
self.set_similarity_measure(sm.SimilarityMeasure.COSINE if LDA_Model_Hyperp.SIMILARITY_MEASURE.value not in kwargs.keys() else kwargs[LDA_Model_Hyperp.SIMILARITY_MEASURE.value])
def set_similarity_measure(self, sim_measure):
self.similarity_measure = sim_measure
def set_vectorizer(self, **kwargs):
self.vectorizer = TfidfVectorizer(stop_words='english',
use_idf=True,
smooth_idf=True) if LDA_Model_Hyperp.VECTORIZER.value not in kwargs.keys() else kwargs[LDA_Model_Hyperp.VECTORIZER.value]
vec_params = {key.split('__')[2]:kwargs[key] for key,val in kwargs.items() if '__vectorizer__' in key}
self.vectorizer.set_params(**vec_params)
def set_lda_model(self, **kwargs):
lda_model_params = {key.split('__')[2]:kwargs[key] for key,val in kwargs.items() if '__lda_model__' in key}
self.lda_model.set_params(**lda_model_params)
def recover_links(self, corpus, query, test_cases_names, bug_reports_names):
starttime = time.time()
self._corpus_matrix = self.vectorizer.fit_transform(corpus)
self._query_vector = self.vectorizer.transform(query)
self.out_1 = self.lda_model.fit_transform(self._corpus_matrix)
self.out_2 = self.lda_model.transform(self._query_vector)
metric = self.similarity_measure
if metric == sm.SimilarityMeasure.COSINE:
self._sim_matrix = pairwise.cosine_similarity(X=self.out_1, Y=self.out_2)
elif metric == sm.SimilarityMeasure.JSD:
self._sim_matrix = pairwise_distances(X=self.out_1, Y=self.out_2, metric=SimilarityMeasure.jsd)
elif metric == sm.SimilarityMeasure.EUCLIDIAN_DISTANCE:
self._sim_matrix = pairwise_distances(X=self.out_1, Y=self.out_2, metric='euclidean')
#self._sim_matrix = super().normalize_sim_matrix(self._sim_matrix)
self._sim_matrix = pd.DataFrame(data=self._sim_matrix, index=test_cases_names, columns=bug_reports_names)
self._record_docs_feats(corpus, query, test_cases_names, bug_reports_names)
endtime = time.time()
print(f' ..Total processing time: {round(endtime-starttime,2)} seconds')
def _record_docs_feats(self, corpus, query, test_cases_names, bug_reports_names):
self.mrw_tcs = self._recover_mrw_list(test_cases_names, corpus)
self.mrw_brs = self._recover_mrw_list(bug_reports_names, query)
self.dl_tcs = self._recover_dl_list(test_cases_names, corpus)
self.dl_brs = self._recover_dl_list(bug_reports_names, query)
index = list(test_cases_names) + list(bug_reports_names)
self.docs_feats_df = pd.DataFrame(index=index,
columns=['mrw','dl'])
for tc_name, mrw in self.mrw_tcs:
self.docs_feats_df.at[tc_name, 'mrw'] = mrw
for tc_name, dl in self.dl_tcs:
self.docs_feats_df.at[tc_name, 'dl'] = dl
for br_name, mrw in self.mrw_brs:
self.docs_feats_df.at[br_name, 'mrw'] = mrw
for br_name, dl in self.dl_brs:
self.docs_feats_df.at[br_name, 'dl'] = dl
def _recover_dl_list(self, artf_names, artf_descs):
tokenizer = PorterStemmerBased_Tokenizer()
dl_list = []
for artf_name, artf_desc in zip(artf_names, artf_descs):
dl_list.append((artf_name, len(tokenizer.__call__(artf_desc))))
return dl_list
def _recover_mrw_list(self, artf_names, artf_descs):
N_REL_WORDS = 6
mrw_list = [] # list of tuples (artf_name, mrw_list={})
for artf_name, artf_desc in zip(artf_names, artf_descs):
X = self.vectorizer.transform([artf_desc])
df1 = pd.DataFrame(X.T.toarray())
df1['token'] = self.vectorizer.get_feature_names()
df1.sort_values(by=0, ascending=False, inplace=True)
mrw = list(df1.iloc[0:N_REL_WORDS,1].values)
mrw_list.append((artf_name, mrw))
return mrw_list
def model_setup(self):
return {"Setup" :
[
{"Name" : self.get_name()},
{"Similarity Measure and Minimum Threshold" : self.get_sim_measure_min_threshold()},
{"Top Value" : self.get_top_value()},
{"LDA Model" : self.lda_model.get_params()},
{"Vectorizer" : self.vectorizer.get_params()},
{"Vectorizer Type" : type(self.vectorizer)}
]
}
def get_name(self):
return super().get_name()
def get_model_gen_name(self):
return super().get_model_gen_name()
def get_similarity_measure(self):
return self.similarity_measure
def get_sim_matrix(self):
return super().get_sim_matrix()
def get_tokenizer_type(self):
return type(self.tokenizer)
def save_sim_matrix(self):
super().save_sim_matrix()
def get_query_vector(self):
return self._query_vector
def get_corpus_matrix(self):
return self._corpus_matrix
def get_vectorizer_type(self):
return type(self.vectorizer)
def print_topics(self):
feature_names = self.vectorizer.get_feature_names()
n_top_words = 10
for topic_idx, topic in enumerate(self.lda_model.components_):
message = "Topic #%d: " % topic_idx
message += " ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]])
print(message)
|
"""
Based on: https://github.com/openai/baselines/blob/master/baselines/deepq/replay_buffer.py
"""
import random
from typing import List, Tuple
import numpy as np
from pyderl.utils.data_structures import SumSegmentTree, MinSegmentTree
class PrioritizedReplayBuffer:
""" Prioritized replay buffer.
Args:
size (int): Max number of transitions to store in the buffer. When the
buffer overflows the old memories are dropped.
alpha (float): How much prioritization is used (0 for no
prioritization and 1 for full prioritization).
"""
def __init__(self, size: int, alpha: float) -> None:
self._storage = []
self._maxsize = size
self._next_idx = 0
assert alpha >= 0
self._alpha = alpha
it_capacity = 1
while it_capacity < size:
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._it_min = MinSegmentTree(it_capacity)
self._max_priority = 1.0
def __len__(self):
return len(self._storage)
def _add_to_storage(self, obs_t, action, reward, obs_tp1, done):
data = (obs_t, action, reward, obs_tp1, done)
if self._next_idx >= len(self._storage):
self._storage.append(data)
else:
self._storage[self._next_idx] = data
self._next_idx = (self._next_idx + 1) % self._maxsize
def _encode_sample(self, idxes):
obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []
for i in idxes:
data = self._storage[i]
obs_t, action, reward, obs_tp1, done = data
obses_t.append(np.array(obs_t, copy=False))
actions.append(np.array(action, copy=False))
rewards.append(reward)
obses_tp1.append(np.array(obs_tp1, copy=False))
dones.append(done)
return (np.array(obses_t),
np.array(actions),
np.array(rewards),
np.array(obses_tp1),
np.array(dones))
def add(self, obs_t, action, reward, obs_tp1, done):
idx = self._next_idx
self._add_to_storage(obs_t, action, reward, obs_tp1, done)
self._it_sum[idx] = self._max_priority ** self._alpha
self._it_min[idx] = self._max_priority ** self._alpha
def _sample_proportional(self, batch_size):
res = []
p_total = self._it_sum.sum(0, len(self._storage) - 1)
every_range_len = p_total / batch_size
for i in range(batch_size):
mass = random.random() * every_range_len + i * every_range_len
idx = self._it_sum.find_prefixsum_idx(mass)
res.append(idx)
return res
def sample(self, batch_size: int, beta: float) -> Tuple[np.ndarray, ...]:
""" Sample a batch of experiences.
Compared to uniform sampling, this method also returns the importance
weights and idxes of sampled experiences.
Args:
batch_size (int): How many transitions to sample.
beta (float): To what degree to use importance weights (0 means no
corrections and 1 means full correction).
Returns:
Tuple of numpy arrays. More specifically:
* obs_batch: Batch of observations.
* act_batch: Batch of actions executed given obs_batch.
* rew_batch: Rewards received as results of executing act_batch.
* next_obs_batch: Next set of observations seen after executing
act_batch.
* done_mask: done_mask[i] = 1 if executing act_batch[i] resulted
in the end of an episode and 0 otherwise.
* weights: Array of shape (batch_size,) and dtype np.float32
denoting importance weight of each sampled transition.
* idxes: Array of shape (batch_size,) and dtype np.int32 indices
in buffer of sampled experiences.
"""
assert beta > 0
idxes = self._sample_proportional(batch_size)
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self._storage)) ** (-beta)
for idx in idxes:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * len(self._storage)) ** (-beta)
weights.append(weight / max_weight)
weights = np.array(weights)
encoded_sample = self._encode_sample(idxes)
return tuple(list(encoded_sample) + [weights, idxes])
def update_priorities(self,
idxes: List[int],
priorities: List[float]) -> None:
""" Update priorities of the sampled transitions.
Sets the priority of a transition at index idxes[i] in the buffer to
priorities[i].
Args:
idxes (List[int]): List with the indices of the sampled transitions.
priorities (List[float]): List with the updated priorities
corresponding to the transitions at the sampled indices, denoted
by variable `idxes`.
"""
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0
assert 0 <= idx < len(self._storage)
self._it_sum[idx] = priority ** self._alpha
self._it_min[idx] = priority ** self._alpha
self._max_priority = max(self._max_priority, priority)
|
from reclaimer.constants import *
from reclaimer.util import fourcc_to_int
# These are the 4 fourCC that I've found in tag headers
# and this is what I believe the order of their age is.
engine_id_to_name = dict(
BLM_="halo_2",
LAMB="halo_2_old",
MLAB="halo_2_older",
ambl="halo_2_oldest",
)
# Here are some of the differences between engine versions:
# ambl: the tbfd struct was 12 bytes instead of 16, with the second and
# third UInt32 replaced by UInt16, which are the version and bcount
# MLAB: the ascii_str32 instances were replaced with ascii_str_varlen
# LAMB: ???
# BLM!: newest version.
# DO NOT CHANGE THE ORDER OF THESE
HALO2_MAP_TYPES = ("local", "mainmenu", "shared", "single_player_shared")
# bitmap formats
PALLETIZED_FORMATS = (FORMAT_P8_BUMP, FORMAT_P8)
# maps tag class four character codes(fccs) in
# their string encoding to their int encoding.
h2_tag_class_fcc_to_be_int = {}
h2_tag_class_fcc_to_le_int = {}
# maps tag class four character codes(fccs) in
# their int encoding to their string encoding.
h2_tag_class_be_int_to_fcc = {}
h2_tag_class_le_int_to_fcc = {}
# maps tag class four character codes to the tags file extension
# 120 classes, 97 of which are NOT marked with OLD?
h2_tag_class_fcc_to_ext = {
"adlg": "ai_dialogue_globals",
"mdlg": "ai_mission_dialogue",
"ant!": "antenna",
"bipd": "biped",
"bitm": "bitmap",
"bsdt": "breakable_surface",
"$#!+": "cache_file_sound", # OLD?
"trak": "camera_track",
"devo": "cellular_automata",
"whip": "cellular_automata2d",
"char": "character",
"gldf": "chocolate_mountain",
"clwd": "cloth",
"coll": "collision_model",
"coln": "colony",
"colo": "color_table",
"cont": "contrail",
"bloc": "crate",
"crea": "creature",
"jpt!": "damage_effect",
"deca": "decal",
"DECR": "decorator_set",
"DECP": "decorators",
"dobc": "detail_object_collection",
"devi": "device",
"ctrl": "device_control",
"lifi": "device_light_fixture",
"mach": "device_machine",
"udlg": "dialogue",
"effe": "effect",
"eqip": "equipment",
"garb": "garbage",
"matg": "globals",
"grhi": "grenade_hud_interface", # OLD?
"hudg": "hud_globals",
"hmt ": "hud_message_text",
"hud#": "hud_number",
"item": "item",
"itmc": "item_collection",
"lens": "lens_flare",
"ligh": "light",
"MGS2": "light_volume",
"tdtl": "liquid",
"foot": "material_effects",
"mpdt": "material_physics",
"metr": "meter",
"hlmt": "model",
"jmad": "model_animation_graph",
"mcsr": "mouse_cursor_definition",
"unic": "multilingual_unicode_string_list",
"mulg": "multiplayer_globals",
"mply": "multiplayer_scenario_description",
"goof": "multiplayer_variant_settings_interface_definition",
"nhdt": "new_hud_definition",
"obje": "object",
'pctl': "particle_system", # still in use in the halo 2 alpha
"part": "particle_old", # still in use in the halo 2 alpha
"prt3": "particle",
"PRTM": "particle_model",
"pmov": "particle_physics",
"fpch": "patchy_fog",
"phys": "physics",
"phmo": "physics_model",
"pixl": "pixel_shader",
"fog ": "planar_fog",
"pphy": "point_physics",
"proj": "projectile",
"mode": "render_model",
"sbsp": "scenario_structure_bsp",
"ltmp": "scenario_structure_lightmap",
"scnr": "scenario",
"ai**": "scenario_ai_resource", # OLD?
"*ipd": "scenario_bipeds_resource", # OLD?
"cin*": "scenario_cinematics_resource", # OLD?
"clu*": "scenario_cluster_data_resource", # OLD?
"/**/": "scenario_comments_resource", # OLD?
"*rea": "scenario_creature_resource", # OLD?
"dec*": "scenario_decals_resource", # OLD?
"dc*s": "scenario_decorators_resource", # OLD?
"dgr*": "scenario_devices_resource", # OLD?
"*qip": "scenario_equipment_resource", # OLD?
"hsc*": "scenario_hs_source_file", # OLD?
"*cen": "scenario_scenery_resource", # OLD?
"*sce": "scenario_sound_scenery_resource", # OLD?
"sslt": "scenario_structure_lighting_resource", # OLD?
"*igh": "scenario_lights_resource", # OLD?
"trg*": "scenario_trigger_volumes_resource", # OLD?
"*ehi": "scenario_vehicles_resource", # OLD?
"*eap": "scenario_weapons_resource", # OLD?
"scen": "scenery",
"egor": "screen_effect",
"shad": "shader",
"stem": "shader_template",
"slit": "shader_light_response",
"spas": "shader_pass",
"sky ": "sky",
"snd!": "sound",
"ugh!": "sound_cache_file_gestalt",
"sncl": "sound_classes",
"spk!": "sound_dialogue_constants",
"<fx>": "sound_effect_template",
"sfx+": "sound_effect_collection",
"snde": "sound_environment",
"lsnd": "sound_looping",
"snmx": "sound_mix",
"ssce": "sound_scenery",
"BooM": "stereo_system",
"styl": "style",
"sily": "text_value_pair_definition",
"unit": "unit",
"unhi": "unit_hud_interface", # OLD?
"wgtz": "user_interface_globals_definition",
"skin": "user_interface_list_skin_definition",
"wgit": "user_interface_screen_widget_definition",
"wigl": "user_interface_shared_globals_definition",
"vehi": "vehicle",
"vehc": "vehicle_collection",
"vrtx": "vertex_shader",
"weap": "weapon",
"wphi": "weapon_hud_interface", # OLD?
"weat": "weather_system",
"wind": "wind",
}
for tag_cls in h2_tag_class_fcc_to_ext:
h2_tag_class_fcc_to_be_int[tag_cls] = fourcc_to_int(tag_cls)
h2_tag_class_be_int_to_fcc[fourcc_to_int(tag_cls)] = tag_cls
h2_tag_class_fcc_to_le_int[tag_cls] = fourcc_to_int(tag_cls)
h2_tag_class_le_int_to_fcc[fourcc_to_int(tag_cls)] = tag_cls
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from lxml import etree
import odoo
import odoo.tools as tools
from . import print_fnc
from odoo.models import BaseModel
from odoo.tools.safe_eval import safe_eval
class InheritDict(dict):
# Might be useful when we're doing name lookup for call or eval.
def __init__(self, parent=None):
self.parent = parent
def __getitem__(self, name):
if name in self:
return super(InheritDict, self).__getitem__(name)
else:
if not self.parent:
raise KeyError
else:
return self.parent[name]
def tounicode(val):
if isinstance(val, str):
unicode_val = unicode(val, 'utf-8')
elif isinstance(val, unicode):
unicode_val = val
else:
unicode_val = unicode(val)
return unicode_val
class document(object):
def __init__(self, cr, uid, datas, func=False):
# create a new document
self.cr = cr
self.uid = uid
self.datas = datas
self.func = func or {}
self.bin_datas = {}
def node_attrs_get(self, node):
if len(node.attrib):
return node.attrib
return {}
def get_value(self, browser, field_path):
fields = field_path.split('.')
if not len(fields):
return ''
value = browser
for f in fields:
if isinstance(value, (BaseModel, list)):
if not value:
return ''
value = value[0]
value = value[f]
return value or ''
def get_value2(self, browser, field_path):
value = self.get_value(browser, field_path)
if isinstance(value, BaseModel):
return value.id
else:
return value
def eval(self, record, expr):
#TODO: support remote variables (eg address.title) in expr
# how to do that: parse the string, find dots, replace those dotted variables by temporary
# "simple ones", fetch the value of those variables and add them (temporarily) to the _data
# dictionary passed to eval
#FIXME: it wont work if the data hasn't been fetched yet... this could
# happen if the eval node is the first one using this Record
# the next line is a workaround for the problem: it causes the resource to be loaded
#Pinky: Why not this ? eval(expr, browser) ?
# name = browser.name
# data_dict = browser._data[self.get_value(browser, 'id')]
return safe_eval(expr, {}, {'obj': record})
def parse_node(self, node, parent, browser, datas=None):
env = odoo.api.Environment(self.cr, self.uid, {})
attrs = self.node_attrs_get(node)
if 'type' in attrs:
if attrs['type']=='field':
value = self.get_value(browser, attrs['name'])
#TODO: test this
if value == '' and 'default' in attrs:
value = attrs['default']
el = etree.SubElement(parent, node.tag)
el.text = tounicode(value)
#TODO: test this
for key, value in attrs.iteritems():
if key not in ('type', 'name', 'default'):
el.set(key, value)
elif attrs['type']=='attachment':
model = browser._name
value = self.get_value(browser, attrs['name'])
atts = env['ir.attachment'].search([('res_model','=',model),('res_id','=',int(value))])
datas = atts.read()
if len(datas):
# if there are several, pick first
datas = datas[0]
fname = str(datas['datas_fname'])
ext = fname.split('.')[-1].lower()
if ext in ('jpg','jpeg', 'png'):
import base64
from StringIO import StringIO
dt = base64.decodestring(datas['datas'])
fp = StringIO()
fp.write(dt)
i = str(len(self.bin_datas))
self.bin_datas[i] = fp
el = etree.SubElement(parent, node.tag)
el.text = i
elif attrs['type']=='data':
#TODO: test this
txt = self.datas.get('form', {}).get(attrs['name'], '')
el = etree.SubElement(parent, node.tag)
el.text = txt
elif attrs['type']=='function':
if attrs['name'] in self.func:
txt = self.func[attrs['name']](node)
else:
txt = print_fnc.print_fnc(attrs['name'], node)
el = etree.SubElement(parent, node.tag)
el.text = txt
elif attrs['type']=='eval':
value = self.eval(browser, attrs['expr'])
el = etree.SubElement(parent, node.tag)
el.text = str(value)
elif attrs['type']=='fields':
fields = attrs['name'].split(',')
vals = {}
for b in browser:
value = tuple([self.get_value2(b, f) for f in fields])
if not value in vals:
vals[value]=[]
vals[value].append(b)
keys = vals.keys()
keys.sort()
if 'order' in attrs and attrs['order']=='desc':
keys.reverse()
v_list = [vals[k] for k in keys]
for v in v_list:
el = etree.SubElement(parent, node.tag)
for el_cld in node:
self.parse_node(el_cld, el, v)
elif attrs['type']=='call':
if len(attrs['args']):
#TODO: test this
# fetches the values of the variables which names where passed in the args attribute
args = [self.eval(browser, arg) for arg in attrs['args'].split(',')]
else:
args = []
# get the object
if 'model' in attrs:
obj = env[attrs['model']]
else:
obj = browser # the record(set) is an instance of the model
# get the ids
if 'ids' in attrs:
ids = self.eval(browser, attrs['ids'])
else:
ids = browser.ids
# call the method itself
newdatas = getattr(obj, attrs['name'])(*args)
def parse_result_tree(node, parent, datas):
if not node.tag == etree.Comment:
el = etree.SubElement(parent, node.tag)
atr = self.node_attrs_get(node)
if 'value' in atr:
if not isinstance(datas[atr['value']], (str, unicode)):
txt = str(datas[atr['value']])
else:
txt = datas[atr['value']]
el.text = txt
else:
for el_cld in node:
parse_result_tree(el_cld, el, datas)
if not isinstance(newdatas, (BaseModel, list)):
newdatas = [newdatas]
for newdata in newdatas:
parse_result_tree(node, parent, newdata)
elif attrs['type']=='zoom':
value = self.get_value(browser, attrs['name'])
if value:
if not isinstance(value, (BaseModel, list)):
v_list = [value]
else:
v_list = value
for v in v_list:
el = etree.SubElement(parent, node.tag)
for el_cld in node:
self.parse_node(el_cld, el, v)
else:
# if there is no "type" attribute in the node, copy it to the xml data and parse its children
if not node.tag == etree.Comment:
if node.tag == parent.tag:
el = parent
else:
el = etree.SubElement(parent, node.tag)
for el_cld in node:
self.parse_node(el_cld,el, browser)
def xml_get(self):
return etree.tostring(self.doc,encoding="utf-8",xml_declaration=True,pretty_print=True)
def parse_tree(self, ids, model, context=None):
env = odoo.api.Environment(self.cr, self.uid, context or {})
browser = env[model].browse(ids)
self.parse_node(self.dom, self.doc, browser)
def parse_string(self, xml, ids, model, context=None):
# parses the xml template to memory
self.dom = etree.XML(xml)
# create the xml data from the xml template
self.parse_tree(ids, model, context)
def parse(self, filename, ids, model, context=None):
# parses the xml template to memory
src_file = tools.file_open(filename)
try:
self.dom = etree.XML(src_file.read())
self.doc = etree.Element(self.dom.tag)
self.parse_tree(ids, model, context)
finally:
src_file.close()
def close(self):
self.doc = None
self.dom = None
|
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, AI Research, Data Technology Centre, Volkswagen Group"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__maintainer__ = "<NAME>"
import logging
import json
import argparse
import time
import paho.mqtt.client as mqtt
import numpy as np
import cv2
import gdk.config as config
import gdk.utils as utils
import gdk.common as common
common.setup_logging(level=logging.DEBUG)
logger = logging.getLogger(__name__)
from ext.OnAVOS.segmenter import create_engine
from gdk.network_interface import net_segment_image, net_segment_image_batch
from ext.OnAVOS.LatentSpace import create_latent_engine
from debugger import debugger
# from ext.OnAVOS.LatentSpace import create_small_latent_engine2 as create_latent_engine
def _subscribe_img_topic(client, userdata, flags, rc):
logger.debug("Connected with result code "+str(rc))
logger.debug("Subscribing to topic=img")
client.subscribe("img")
def _receive_img(client, userdata, msg):
t1 = time.time()
func, args = json.loads(msg.payload.decode())
image = utils.decode_numpy_array(args["image"])
logger.debug("%s %s", image.shape, image.dtype)
logger.debug("Doing Segmentation")
latents, coords, mask, boxes, small_ims = net_segment_image(image, userdata["engine"], userdata["latent_engine"], occlude=args["occlude"], compute_latent=args["compute_latent"], final_latent=args["final_latent"], scale=args["scale"])
logger.debug("Publishing Coords/Latents")
logger.debug("got %d latents, and %d coords and %d boxes", len(latents), len(coords), len(boxes))
data = {"latents": latents,
"coords": coords,
"boxes": boxes
}
if "return_mask" in args and args["return_mask"]:
logger.debug("Appending mask to message!")
data["mask"] = mask
client.publish(topic="seg_img", payload=utils.encode_cmd(data), retain=False)
logger.debug("seg time: %f", time.time()-t1)
if userdata["viz"]:
cv2.circle(image, (config.XY_TRACK_POINT[0], config.XY_TRACK_POINT[1]), 10, (0, 255, 0), 1)
cv2.imshow("mask", utils.decode_numpy_array(mask))
cv2.imshow("image", image)
cv2.waitKey(1)
if latents and not args["final_latent"]:
userdata["debugger"].add(small_ims, latents)
userdata["debugger"].cluster()
def init_img_topic(client):
client.on_message = _receive_img
client.on_connect = _subscribe_img_topic
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--name', '-n', type=str, default='net_seg', required=True,
help='Name this client/slave announces to the server. Needs to match on server side.')
parser.add_argument('--ip', '-ip', type=str, default='', required=False,
help='Name this client/slave announces to the server. Needs to match on server side.')
parser.add_argument('-viz', action='store_true', help="Activate this to enable visualization output")
args = parser.parse_args()
size = (480, 640)
logger.info("Starting engine")
engine = create_engine()
logger.info("Starting Latent Engine")
latent_engine = create_latent_engine()
debg = debugger()
# Setup Engine
broker_data = {
"size": size,
"engine": engine,
"latent_engine": latent_engine,
"viz": args.viz,
"debugger": debg
}
if args.ip:
config.BROKER_IP = args.ip
else:
config.BROKER_IP = utils.find_master(args.name)
# Defined userdata pkg we give to MMQT
logger.info("Broker: Connecting")
client = mqtt.Client(userdata=broker_data)
client.connect(config.BROKER_IP, config.BROKER_PORT, keepalive=60)
logger.info("Initialize IMG Client Functions")
init_img_topic(client)
# Endless loop that keeps sending all sensor properties and updates once
# per loop the actuator
logger.info("Entering Forever Loop")
client.loop_forever()
client.disconnect()
if __name__ == "__main__":
main()
|
<reponame>tsvstar/vk_downloader
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from os import path
from sys import stdout
from time import clock
class BaseYoutubeError(Exception):
pass
class FileExistsError(BaseYoutubeError):
pass
class MultipleObjectsReturned(BaseYoutubeError):
pass
class YouTubeError(BaseYoutubeError):
pass
class CipherError(BaseYoutubeError):
pass
##import argparse
##class FullPaths(argparse.Action):
## """Expand user- and relative-paths"""
## def __call__(self, parser, namespace, values, option_string=None):
## setattr(namespace, self.dest, path.abspath(path.expanduser(values)))
def safe_filename(text, max_length=200):
"""Sanitizes filenames for many operating systems.
:params text: The unsanitized pending filename.
"""
# Quickly truncates long filenames.
truncate = lambda text: text[:max_length].rsplit(' ', 0)[0]
# Tidy up ugly formatted filenames.
text = text.replace('_', ' ')
text = text.replace(':', ' -')
# NTFS forbids filenames containing characters in range 0-31 (0x00-0x1F)
ntfs = [chr(i) for i in range(0, 31)]
# Removing these SHOULD make most filename safe for a wide range of
# operating systems.
paranoid = ['\"', '\#', '\$', '\%', '\'', '\*', '\,', '\.', '\/', '\:',
'\;', '\<', '\>', '\?', '\\', '\^', '\|', '\~', '\\\\']
blacklist = re.compile('|'.join(ntfs + paranoid), re.UNICODE)
filename = blacklist.sub('', text)
return truncate(filename)
def sizeof(bytes):
"""Takes the size of file or folder in bytes and returns size formatted in
KB, MB, GB, TB or PB.
:params bytes: size of the file in bytes
"""
alternative = [
(1024 ** 5, ' PB'),
(1024 ** 4, ' TB'),
(1024 ** 3, ' GB'),
(1024 ** 2, ' MB'),
(1024 ** 1, ' KB'),
(1024 ** 0, (' byte', ' bytes')),
]
for factor, suffix in alternative:
if bytes >= factor:
break
amount = int(bytes / factor)
if isinstance(suffix, tuple):
singular, multiple = suffix
if amount == 1:
suffix = singular
else:
suffix = multiple
return "%s%s" % (str(amount), suffix)
def print_status(progress, file_size, start):
"""
This function - when passed as `on_progress` to `Video.download` - prints
out the current download progress.
:params progress: The lenght of the currently downloaded bytes.
:params file_size: The total size of the video.
:params start: time when started
"""
percentDone = int(progress) * 100. / file_size
done = int(50 * progress / int(file_size))
dt = (clock() - start)
if dt > 0:
stdout.write("\r [%s%s][%3.2f%%] %s at %s/s\r " %
('=' * done, ' ' * (50 - done), percentDone, sizeof(file_size),
sizeof(progress // dt)))
stdout.flush()
"""============ REPLACE URLLIB -- because 'latin1' is unknown ================"""
"""-------------------"""
import sys, traceback
class ExtractorError(Exception):
"""Error during info extraction."""
def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None):
""" tb, if given, is the original traceback (so that it can be printed out).
If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
"""
##if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
### expected = True
if video_id is not None:
msg = video_id + ': ' + msg
if cause:
msg += ' (caused by %r)' % cause
if not expected:
##if ytdl_is_updateable():
## update_cmd = 'type youtube-dl -U to update'
##else:
## update_cmd = 'see https://yt-dl.org/update on how to update'
msg += '; please report this issue on https://yt-dl.org/bug .'
msg += ' Make sure you are using the latest version; %s.' % update_cmd
msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.'
super(ExtractorError, self).__init__(msg)
self.traceback = tb
self.exc_info = sys.exc_info() # preserve original exception
self.cause = cause
self.video_id = video_id
def format_traceback(self):
if self.traceback is None:
return None
return ''.join(traceback.format_tb(self.traceback))
|
<filename>pypureclient/flasharray/FA_2_4/models/__init__.py
# coding: utf-8
from __future__ import absolute_import
class ReferenceType(object):
"""Class just for type annotations.
It's used for reference arg on api function. This allows user to pass collections of Model objects
to the method without transforming them to ids or names.
Should be Protocol type when the typing module will get support of it.
"""
def __init__(self):
self.id = ''
self.name = ''
def quoteString(s):
r"""Quote string according to
https://wiki.purestorage.com/display/UXReviewers/Filtering
>>> quote("a")
"'a'"
>>> quote("a\\b")
"'a\\\\b'"
>>> quote("a\\b")
"'a\\\\b'"
>>> quote("a'b")
"'a\\'b'"
>>> quote(None)
None
"""
if s is None:
return None
quoted = str(s).replace("\\", "\\\\").replace("'", "\\'")
return "'{}'".format(quoted)
def quoteStrings(s):
if s is None:
return None
return [quoteString(x) for x in s]
# import models into model package
from .active_directory import ActiveDirectory
from .active_directory_get_response import ActiveDirectoryGetResponse
from .active_directory_post import ActiveDirectoryPost
from .active_directory_response import ActiveDirectoryResponse
from .admin import Admin
from .admin_api_token import AdminApiToken
from .admin_api_token_get_response import AdminApiTokenGetResponse
from .admin_api_token_response import AdminApiTokenResponse
from .admin_cache import AdminCache
from .admin_cache_get_response import AdminCacheGetResponse
from .admin_cache_response import AdminCacheResponse
from .admin_get_response import AdminGetResponse
from .admin_patch import AdminPatch
from .admin_post import AdminPost
from .admin_response import AdminResponse
from .admin_role import AdminRole
from .admin_settings import AdminSettings
from .admin_settings_response import AdminSettingsResponse
from .aggregate_replication_performance import AggregateReplicationPerformance
from .alert import Alert
from .alert_event import AlertEvent
from .alert_event_get_response import AlertEventGetResponse
from .alert_event_response import AlertEventResponse
from .alert_get_response import AlertGetResponse
from .alert_response import AlertResponse
from .alert_watcher import AlertWatcher
from .alert_watcher_get_response import AlertWatcherGetResponse
from .alert_watcher_patch import AlertWatcherPatch
from .alert_watcher_post import AlertWatcherPost
from .alert_watcher_response import AlertWatcherResponse
from .api_client import ApiClient
from .api_client_get_response import ApiClientGetResponse
from .api_client_patch import ApiClientPatch
from .api_client_post import ApiClientPost
from .api_client_response import ApiClientResponse
from .api_token import ApiToken
from .api_version_response import ApiVersionResponse
from .app import App
from .app_get_response import AppGetResponse
from .app_node import AppNode
from .app_node_get_response import AppNodeGetResponse
from .app_node_response import AppNodeResponse
from .app_response import AppResponse
from .array import Array
from .array_connection import ArrayConnection
from .array_connection_get_response import ArrayConnectionGetResponse
from .array_connection_key import ArrayConnectionKey
from .array_connection_key_get_response import ArrayConnectionKeyGetResponse
from .array_connection_path import ArrayConnectionPath
from .array_connection_path_get_response import ArrayConnectionPathGetResponse
from .array_connection_path_response import ArrayConnectionPathResponse
from .array_connection_post import ArrayConnectionPost
from .array_connection_response import ArrayConnectionResponse
from .array_factory_reset_token import ArrayFactoryResetToken
from .array_factory_reset_token_get_response import ArrayFactoryResetTokenGetResponse
from .array_factory_reset_token_response import ArrayFactoryResetTokenResponse
from .array_get_response import ArrayGetResponse
from .array_performance import ArrayPerformance
from .array_performance_get_response import ArrayPerformanceGetResponse
from .array_response import ArrayResponse
from .array_space import ArraySpace
from .array_space_get_response import ArraySpaceGetResponse
from .arrays import Arrays
from .audit import Audit
from .audit_get_response import AuditGetResponse
from .audit_response import AuditResponse
from .built_in import BuiltIn
from .built_in_relationship import BuiltInRelationship
from .built_in_resource_no_id import BuiltInResourceNoId
from .certificate import Certificate
from .certificate_get_response import CertificateGetResponse
from .certificate_post import CertificatePost
from .certificate_response import CertificateResponse
from .certificate_signing_request import CertificateSigningRequest
from .certificate_signing_request_post import CertificateSigningRequestPost
from .certificate_signing_request_response import CertificateSigningRequestResponse
from .chap import Chap
from .connection import Connection
from .connection_get_response import ConnectionGetResponse
from .connection_post import ConnectionPost
from .connection_response import ConnectionResponse
from .controller import Controller
from .controller_get_response import ControllerGetResponse
from .controllers import Controllers
from .destroyed_patch_post import DestroyedPatchPost
from .directory import Directory
from .directory_export import DirectoryExport
from .directory_export_get_response import DirectoryExportGetResponse
from .directory_export_post import DirectoryExportPost
from .directory_export_response import DirectoryExportResponse
from .directory_get_response import DirectoryGetResponse
from .directory_patch import DirectoryPatch
from .directory_performance import DirectoryPerformance
from .directory_performance_get_response import DirectoryPerformanceGetResponse
from .directory_policy_export_post import DirectoryPolicyExportPost
from .directory_policy_post import DirectoryPolicyPost
from .directory_post import DirectoryPost
from .directory_response import DirectoryResponse
from .directory_service import DirectoryService
from .directory_service_get_response import DirectoryServiceGetResponse
from .directory_service_management import DirectoryServiceManagement
from .directory_service_response import DirectoryServiceResponse
from .directory_service_role import DirectoryServiceRole
from .directory_service_role_get_response import DirectoryServiceRoleGetResponse
from .directory_service_role_response import DirectoryServiceRoleResponse
from .directory_snapshot import DirectorySnapshot
from .directory_snapshot_get_response import DirectorySnapshotGetResponse
from .directory_snapshot_patch import DirectorySnapshotPatch
from .directory_snapshot_post import DirectorySnapshotPost
from .directory_snapshot_response import DirectorySnapshotResponse
from .directory_space import DirectorySpace
from .directorypolicyexportpost_policies import DirectorypolicyexportpostPolicies
from .directorypolicypost_policies import DirectorypolicypostPolicies
from .dns import Dns
from .dns_get_response import DnsGetResponse
from .dns_patch import DnsPatch
from .dns_response import DnsResponse
from .drive import Drive
from .drive_get_response import DriveGetResponse
from .drive_response import DriveResponse
from .eula import Eula
from .eula_get_response import EulaGetResponse
from .eula_response import EulaResponse
from .eula_signature import EulaSignature
from .file_system import FileSystem
from .file_system_get_response import FileSystemGetResponse
from .file_system_patch import FileSystemPatch
from .file_system_response import FileSystemResponse
from .fixed_name_resource_no_id import FixedNameResourceNoId
from .fixed_reference import FixedReference
from .fixed_reference_no_id import FixedReferenceNoId
from .fixed_reference_with_type import FixedReferenceWithType
from .hardware import Hardware
from .hardware_get_response import HardwareGetResponse
from .hardware_patch import HardwarePatch
from .hardware_response import HardwareResponse
from .host import Host
from .host_get_response import HostGetResponse
from .host_group import HostGroup
from .host_group_get_response import HostGroupGetResponse
from .host_group_patch import HostGroupPatch
from .host_group_performance import HostGroupPerformance
from .host_group_performance_by_array import HostGroupPerformanceByArray
from .host_group_response import HostGroupResponse
from .host_group_space import HostGroupSpace
from .host_patch import HostPatch
from .host_performance import HostPerformance
from .host_performance_balance import HostPerformanceBalance
from .host_performance_balance_get_response import HostPerformanceBalanceGetResponse
from .host_performance_by_array import HostPerformanceByArray
from .host_port_connectivity import HostPortConnectivity
from .host_post import HostPost
from .host_response import HostResponse
from .host_space import HostSpace
from .inline_response400 import InlineResponse400
from .inline_response401 import InlineResponse401
from .kmip import Kmip
from .kmip_get_response import KmipGetResponse
from .kmip_patch import KmipPatch
from .kmip_post import KmipPost
from .kmip_response import KmipResponse
from .kmip_test_result import KmipTestResult
from .kmip_test_result_get_response import KmipTestResultGetResponse
from .maintenance_window import MaintenanceWindow
from .maintenance_window_post import MaintenanceWindowPost
from .maintenance_windows_get_response import MaintenanceWindowsGetResponse
from .maintenance_windows_response import MaintenanceWindowsResponse
from .member import Member
from .member_get_response import MemberGetResponse
from .member_no_id_all import MemberNoIdAll
from .member_no_id_all_get_response import MemberNoIdAllGetResponse
from .member_no_id_all_response import MemberNoIdAllResponse
from .member_no_id_group import MemberNoIdGroup
from .member_response import MemberResponse
from .network_interface import NetworkInterface
from .network_interface_eth import NetworkInterfaceEth
from .network_interface_fc import NetworkInterfaceFc
from .network_interface_get_response import NetworkInterfaceGetResponse
from .network_interface_patch import NetworkInterfacePatch
from .network_interface_performance import NetworkInterfacePerformance
from .network_interface_performance_eth import NetworkInterfacePerformanceEth
from .network_interface_performance_fc import NetworkInterfacePerformanceFc
from .network_interface_performance_get_response import NetworkInterfacePerformanceGetResponse
from .network_interface_post import NetworkInterfacePost
from .network_interface_response import NetworkInterfaceResponse
from .networkinterfacepatch_eth import NetworkinterfacepatchEth
from .networkinterfacepost_eth import NetworkinterfacepostEth
from .new_name import NewName
from .oauth_token_response import OauthTokenResponse
from .offload import Offload
from .offload_azure import OffloadAzure
from .offload_get_response import OffloadGetResponse
from .offload_google_cloud import OffloadGoogleCloud
from .offload_nfs import OffloadNfs
from .offload_post import OffloadPost
from .offload_response import OffloadResponse
from .offload_s3 import OffloadS3
from .override_check import OverrideCheck
from .page_info import PageInfo
from .performance import Performance
from .pod import Pod
from .pod_array_status import PodArrayStatus
from .pod_get_response import PodGetResponse
from .pod_patch import PodPatch
from .pod_performance import PodPerformance
from .pod_performance_by_array import PodPerformanceByArray
from .pod_performance_replication import PodPerformanceReplication
from .pod_performance_replication_by_array import PodPerformanceReplicationByArray
from .pod_performance_replication_by_array_get_response import PodPerformanceReplicationByArrayGetResponse
from .pod_performance_replication_by_array_response import PodPerformanceReplicationByArrayResponse
from .pod_performance_replication_get_response import PodPerformanceReplicationGetResponse
from .pod_performance_replication_response import PodPerformanceReplicationResponse
from .pod_post import PodPost
from .pod_replica_link import PodReplicaLink
from .pod_replica_link_get_response import PodReplicaLinkGetResponse
from .pod_replica_link_lag import PodReplicaLinkLag
from .pod_replica_link_lag_get_response import PodReplicaLinkLagGetResponse
from .pod_replica_link_lag_response import PodReplicaLinkLagResponse
from .pod_replica_link_patch import PodReplicaLinkPatch
from .pod_replica_link_performance import PodReplicaLinkPerformance
from .pod_replica_link_performance_replication import PodReplicaLinkPerformanceReplication
from .pod_replica_link_performance_replication_get_response import PodReplicaLinkPerformanceReplicationGetResponse
from .pod_replica_link_performance_replication_response import PodReplicaLinkPerformanceReplicationResponse
from .pod_replica_link_response import PodReplicaLinkResponse
from .pod_response import PodResponse
from .pod_space import PodSpace
from .policy import Policy
from .policy_get_response import PolicyGetResponse
from .policy_member import PolicyMember
from .policy_member_export import PolicyMemberExport
from .policy_member_export_get_response import PolicyMemberExportGetResponse
from .policy_member_export_post import PolicyMemberExportPost
from .policy_member_export_response import PolicyMemberExportResponse
from .policy_member_get_response import PolicyMemberGetResponse
from .policy_member_post import PolicyMemberPost
from .policy_member_response import PolicyMemberResponse
from .policy_patch import PolicyPatch
from .policy_post import PolicyPost
from .policy_response import PolicyResponse
from .policy_rule_nfs_client import PolicyRuleNfsClient
from .policy_rule_nfs_client_get_response import PolicyRuleNfsClientGetResponse
from .policy_rule_nfs_client_post import PolicyRuleNfsClientPost
from .policy_rule_nfs_client_response import PolicyRuleNfsClientResponse
from .policy_rule_smb_client import PolicyRuleSmbClient
from .policy_rule_smb_client_get_response import PolicyRuleSmbClientGetResponse
from .policy_rule_smb_client_post import PolicyRuleSmbClientPost
from .policy_rule_smb_client_response import PolicyRuleSmbClientResponse
from .policy_rule_snapshot import PolicyRuleSnapshot
from .policy_rule_snapshot_get_response import PolicyRuleSnapshotGetResponse
from .policy_rule_snapshot_post import PolicyRuleSnapshotPost
from .policy_rule_snapshot_response import PolicyRuleSnapshotResponse
from .policy_smb import PolicySmb
from .policy_smb_get_response import PolicySmbGetResponse
from .policy_smb_patch import PolicySmbPatch
from .policy_smb_post import PolicySmbPost
from .policy_smb_response import PolicySmbResponse
from .policymemberexportpost_members import PolicymemberexportpostMembers
from .policymemberpost_members import PolicymemberpostMembers
from .policyrulenfsclientpost_rules import PolicyrulenfsclientpostRules
from .policyrulesmbclientpost_rules import PolicyrulesmbclientpostRules
from .policyrulesnapshotpost_rules import PolicyrulesnapshotpostRules
from .port import Port
from .port_common import PortCommon
from .port_get_response import PortGetResponse
from .port_initiator import PortInitiator
from .port_initiators_get_response import PortInitiatorsGetResponse
from .protection_group import ProtectionGroup
from .protection_group_get_response import ProtectionGroupGetResponse
from .protection_group_performance import ProtectionGroupPerformance
from .protection_group_performance_array import ProtectionGroupPerformanceArray
from .protection_group_performance_array_response import ProtectionGroupPerformanceArrayResponse
from .protection_group_performance_by_array import ProtectionGroupPerformanceByArray
from .protection_group_performance_response import ProtectionGroupPerformanceResponse
from .protection_group_response import ProtectionGroupResponse
from .protection_group_snapshot import ProtectionGroupSnapshot
from .protection_group_snapshot_get_response import ProtectionGroupSnapshotGetResponse
from .protection_group_snapshot_patch import ProtectionGroupSnapshotPatch
from .protection_group_snapshot_post import ProtectionGroupSnapshotPost
from .protection_group_snapshot_response import ProtectionGroupSnapshotResponse
from .protection_group_snapshot_transfer import ProtectionGroupSnapshotTransfer
from .protection_group_snapshot_transfer_get_response import ProtectionGroupSnapshotTransferGetResponse
from .protection_group_snapshot_transfer_response import ProtectionGroupSnapshotTransferResponse
from .protection_group_space import ProtectionGroupSpace
from .protection_group_target import ProtectionGroupTarget
from .protection_group_target_get_response import ProtectionGroupTargetGetResponse
from .protection_group_target_response import ProtectionGroupTargetResponse
from .qos import Qos
from .reference import Reference
from .reference_no_id import ReferenceNoId
from .reference_with_type import ReferenceWithType
from .remote_pod import RemotePod
from .remote_pods_response import RemotePodsResponse
from .remote_protection_group import RemoteProtectionGroup
from .remote_protection_group_get_response import RemoteProtectionGroupGetResponse
from .remote_protection_group_response import RemoteProtectionGroupResponse
from .remote_protection_group_snapshot import RemoteProtectionGroupSnapshot
from .remote_protection_group_snapshot_get_response import RemoteProtectionGroupSnapshotGetResponse
from .remote_protection_group_snapshot_post import RemoteProtectionGroupSnapshotPost
from .remote_protection_group_snapshot_response import RemoteProtectionGroupSnapshotResponse
from .remote_protection_group_snapshot_transfer import RemoteProtectionGroupSnapshotTransfer
from .remote_protection_group_snapshot_transfer_get_response import RemoteProtectionGroupSnapshotTransferGetResponse
from .remote_protection_group_snapshot_transfer_response import RemoteProtectionGroupSnapshotTransferResponse
from .remote_volume_snapshot import RemoteVolumeSnapshot
from .remote_volume_snapshot_get_response import RemoteVolumeSnapshotGetResponse
from .remote_volume_snapshot_response import RemoteVolumeSnapshotResponse
from .remote_volume_snapshot_transfer import RemoteVolumeSnapshotTransfer
from .remote_volume_snapshot_transfer_get_response import RemoteVolumeSnapshotTransferGetResponse
from .remote_volume_snapshot_transfer_response import RemoteVolumeSnapshotTransferResponse
from .replica_link_lag import ReplicaLinkLag
from .replica_link_performance_replication import ReplicaLinkPerformanceReplication
from .replication_performance_with_total import ReplicationPerformanceWithTotal
from .replication_schedule import ReplicationSchedule
from .resource import Resource
from .resource_fixed_non_unique_name import ResourceFixedNonUniqueName
from .resource_no_id import ResourceNoId
from .resource_performance import ResourcePerformance
from .resource_performance_by_array import ResourcePerformanceByArray
from .resource_performance_by_array_get_response import ResourcePerformanceByArrayGetResponse
from .resource_performance_get_response import ResourcePerformanceGetResponse
from .resource_performance_no_id import ResourcePerformanceNoId
from .resource_performance_no_id_by_array import ResourcePerformanceNoIdByArray
from .resource_performance_no_id_by_array_get_response import ResourcePerformanceNoIdByArrayGetResponse
from .resource_performance_no_id_get_response import ResourcePerformanceNoIdGetResponse
from .resource_pod_space import ResourcePodSpace
from .resource_pod_space_get_response import ResourcePodSpaceGetResponse
from .resource_space import ResourceSpace
from .resource_space_get_response import ResourceSpaceGetResponse
from .resource_space_no_id import ResourceSpaceNoId
from .resource_space_no_id_get_response import ResourceSpaceNoIdGetResponse
from .retention_policy import RetentionPolicy
from .session import Session
from .session_get_response import SessionGetResponse
from .smis import Smis
from .smis_get_response import SmisGetResponse
from .smis_response import SmisResponse
from .smtp_server import SmtpServer
from .smtp_server_get_response import SmtpServerGetResponse
from .smtp_server_response import SmtpServerResponse
from .snapshot import Snapshot
from .snapshot_schedule import SnapshotSchedule
from .snmp_agent import SnmpAgent
from .snmp_agent_get_response import SnmpAgentGetResponse
from .snmp_agent_mib import SnmpAgentMib
from .snmp_agent_mib_get_response import SnmpAgentMibGetResponse
from .snmp_agent_mib_response import SnmpAgentMibResponse
from .snmp_agent_response import SnmpAgentResponse
from .snmp_manager import SnmpManager
from .snmp_manager_get_response import SnmpManagerGetResponse
from .snmp_manager_post import SnmpManagerPost
from .snmp_manager_response import SnmpManagerResponse
from .snmp_v2c import SnmpV2c
from .snmp_v3 import SnmpV3
from .software import Software
from .software_get_response import SoftwareGetResponse
from .software_installation import SoftwareInstallation
from .software_installation_patch import SoftwareInstallationPatch
from .software_installation_post import SoftwareInstallationPost
from .software_installation_step import SoftwareInstallationStep
from .software_installation_steps import SoftwareInstallationSteps
from .software_installation_steps_checks import SoftwareInstallationStepsChecks
from .software_installation_steps_get_response import SoftwareInstallationStepsGetResponse
from .software_installation_steps_response import SoftwareInstallationStepsResponse
from .software_installations import SoftwareInstallations
from .software_installations_get_response import SoftwareInstallationsGetResponse
from .software_installations_response import SoftwareInstallationsResponse
from .software_response import SoftwareResponse
from .software_upgrade_plan import SoftwareUpgradePlan
from .space import Space
from .start_end_time import StartEndTime
from .subnet import Subnet
from .subnet_get_response import SubnetGetResponse
from .subnet_patch import SubnetPatch
from .subnet_post import SubnetPost
from .subnet_response import SubnetResponse
from .support import Support
from .support_get_response import SupportGetResponse
from .support_patch import SupportPatch
from .support_remote_assist_paths import SupportRemoteAssistPaths
from .support_response import SupportResponse
from .syslog_server import SyslogServer
from .syslog_server_get_response import SyslogServerGetResponse
from .syslog_server_response import SyslogServerResponse
from .syslog_server_settings import SyslogServerSettings
from .syslog_server_settings_get_response import SyslogServerSettingsGetResponse
from .syslog_server_settings_response import SyslogServerSettingsResponse
from .tag import Tag
from .tag_get_response import TagGetResponse
from .tag_response import TagResponse
from .target_protection_group import TargetProtectionGroup
from .target_protection_group_post_patch import TargetProtectionGroupPostPatch
from .test_result import TestResult
from .test_result_get_response import TestResultGetResponse
from .test_result_response import TestResultResponse
from .test_result_with_resource import TestResultWithResource
from .test_result_with_resource_get_response import TestResultWithResourceGetResponse
from .test_result_with_resource_response import TestResultWithResourceResponse
from .throttle import Throttle
from .time_window import TimeWindow
from .total_item_count_response import TotalItemCountResponse
from .transfer import Transfer
from .username import Username
from .username_response import UsernameResponse
from .volume import Volume
from .volume_common import VolumeCommon
from .volume_get_response import VolumeGetResponse
from .volume_group import VolumeGroup
from .volume_group_get_response import VolumeGroupGetResponse
from .volume_group_performance import VolumeGroupPerformance
from .volume_group_post import VolumeGroupPost
from .volume_group_response import VolumeGroupResponse
from .volume_group_space import VolumeGroupSpace
from .volume_patch import VolumePatch
from .volume_performance import VolumePerformance
from .volume_performance_by_array import VolumePerformanceByArray
from .volume_post import VolumePost
from .volume_response import VolumeResponse
from .volume_snapshot import VolumeSnapshot
from .volume_snapshot_get_response import VolumeSnapshotGetResponse
from .volume_snapshot_patch import VolumeSnapshotPatch
from .volume_snapshot_post import VolumeSnapshotPost
from .volume_snapshot_response import VolumeSnapshotResponse
from .volume_snapshot_transfer import VolumeSnapshotTransfer
from .volume_snapshot_transfer_get_response import VolumeSnapshotTransferGetResponse
from .volume_snapshot_transfer_response import VolumeSnapshotTransferResponse
from .volume_space import VolumeSpace
|
# Copyright (c) 2013 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import sys
import os
import glob
from .command import Command
from sgtk.platform.qt import QtGui, QtCore
from .sgtk_file_dialog import SgtkFileDialog
def _create_invoker():
# If we are already in the main thread, no need for an invoker, invoke directly in this thread.
if QtCore.QThread.currentThread() == QtGui.QApplication.instance().thread():
return lambda fn, *args, **kwargs: fn(*args, **kwargs)
class MainThreadInvoker(QtCore.QObject):
"""
Class that allows sending message to the main thread. This can be useful
when a background thread needs to prompt the user via a dialog. The
method passed into the invoker will be invoked on the main thread and
the result, either a return value or exception, will be brought back
to the invoking thread as if it was the thread that actually executed
the code.
"""
def __init__(self):
"""
Constructor.
"""
QtCore.QObject.__init__(self)
self._res = None
self._exception = None
# Make sure that the invoker is bound to the main thread
self.moveToThread(QtGui.QApplication.instance().thread())
def __call__(self, fn, *args, **kwargs):
"""
Asks the MainTheadInvoker to call a function with the provided parameters in the main
thread.
:param fn: Function to call in the main thread.
:param args: Array of arguments for the method.
:param kwargs: Dictionary of named arguments for the method.
:returns: The result from the function.
"""
self._fn = lambda: fn(*args, **kwargs)
self._res = None
QtCore.QMetaObject.invokeMethod(self, "_do_invoke", QtCore.Qt.BlockingQueuedConnection)
# If an exception has been thrown, rethrow it.
if self._exception:
raise self._exception
return self._res
@QtCore.Slot()
def _do_invoke(self):
"""
Execute function and return result
"""
try:
self._res = self._fn()
except Exception, e:
self._exception = e
return MainThreadInvoker()
class ExecuteTankCommandError(Exception):
pass
class ProcessManager(object):
"""
OS Interface for Shotgun Commands.
"""
platform_name = "unknown"
def _get_toolkit_script_name(self):
return "shotgun"
def _get_toolkit_fallback_script_name(self):
return "tank"
def _get_launcher(self):
"""
Get Launcher file name from environement.
This provides an alternative way to launch applications and open files, instead of os-standard open.
:returns: String Default Launcher filename. None if none was found,
"""
return os.environ.get("SHOTGUN_PLUGIN_LAUNCHER")
def _verify_file_open(self, filepath):
"""
Verify that a file can be opened.
:param filepath: String file path that should be opened.
:raises: Exception If filepath cannot be opened.
"""
if not os.path.exists(filepath):
raise Exception("Error opening path [%s]. Path not found." % filepath)
def _get_full_toolkit_path(self, pipeline_config_path):
"""
Get the full path of the toolkit script.
:param pipeline_config_path: String Pipeline folder
:return: String File path of toolkit script (eg: c:/temp/tank)
"""
exec_script = os.path.join(pipeline_config_path, self._get_toolkit_script_name())
if not os.path.isfile(exec_script):
exec_script = os.path.join(pipeline_config_path, self._get_toolkit_fallback_script_name())
return exec_script
def _verify_pipeline_configuration(self, pipeline_config_path):
"""
Verify that the pipeline configuration provided to is valid.
:param pipeline_config_path: String Pipeline configuration path
:raises: Exception On invalid toolkit pipeline configuration.
"""
if not os.path.isdir(pipeline_config_path):
raise ExecuteTankCommandError("Could not find the Pipeline Configuration on disk: " + pipeline_config_path)
exec_script = self._get_full_toolkit_path(pipeline_config_path)
if not os.path.isfile(exec_script):
raise ExecuteTankCommandError("Could not find the Toolkit command on disk: " + exec_script)
def _launch_process(self, launcher, filepath, message_error="Error executing command."):
"""
Standard way of starting a process and handling errors.
:params launcher: Path of executable
:params filepath: File to pass as executable argument.
:params message_error: String to prefix error message in case of an error.
:returns: Bool If the operation was successful
"""
args = [launcher, filepath]
return_code, out, err = Command.call_cmd(args)
has_error = return_code != 0
if has_error:
# Do not log the command line, it might contain sensitive information.
raise Exception("{message_error}\nReturn code: {return_code}\nOutput: {std_out}\nError: {std_err}"
.format(message_error=message_error, return_code=return_code, std_out=out, std_err=err))
return True
def open(self, filepath):
"""
Opens a file with default os association or launcher found in environments. Not blocking.
:param filepath: String file path (ex: "c:/file.mov")
:return: Bool If the operation was successful
"""
raise NotImplementedError("Open not implemented in base class!")
def execute_toolkit_command(self, pipeline_config_path, command, args):
"""
Execute Toolkit Command
:param pipeline_config_path: String Pipeline configuration path
:param command: Commands
:param args: List Script arguments
:returns: (stdout, stderr, returncode) Returns standard process output
"""
self._verify_pipeline_configuration(pipeline_config_path)
if not command.startswith("shotgun"):
raise ExecuteTankCommandError("ExecuteTankCommand error. Command needs to be a shotgun command [{command}]".format(command=command))
try:
#
# Get toolkit Script Path
exec_script = self._get_full_toolkit_path(pipeline_config_path)
# Get toolkit script argument list
script_args = [command] + args
#
# Launch script
exec_command = [exec_script] + script_args
return_code, out, err = Command.call_cmd(exec_command)
return (out, err, return_code)
except Exception, e:
# call_cmd is not including sentitive information in the error message, so this won't
# either.
raise Exception("Error executing toolkit command: " + e.message)
def _add_action_output(self, actions, out, err, code):
"""
Simple shortcut to quickly add process output to a dictionary
"""
actions['out'] = out
actions['err'] = err
actions['retcode'] = code
def get_project_actions(self, pipeline_config_paths):
"""
Get all actions for all environments from project path
Overly complicated way of keeping track of toolkit's get/cache action command.
Currently creates a dictionary to keep track of all output (error code, stderr/stdout) from toolkit command.
This code was previously part of the shotgun client and is therefore made to match the exact same behavior
as a starting point, in order to always output the same error messages.
It can (and should) be simplified to only output a single error (if any), at the end of all commands,
without any return code or convoluted stderr/stdout embedded dictionaries.
:param pipeline_config_paths: [String] Pipeline configuration paths
"""
project_actions = {}
for pipeline_config_path in pipeline_config_paths:
try:
self._verify_pipeline_configuration(pipeline_config_path)
env_path = os.path.join(pipeline_config_path, "config", "env")
env_glob = os.path.join(env_path, "shotgun_*.yml")
env_files = glob.glob(env_glob)
project_actions[pipeline_config_path] = {}
shotgun_get_actions_dict = project_actions[pipeline_config_path]["shotgun_get_actions"] = {}
shotgun_cache_actions_dict = project_actions[pipeline_config_path]["shotgun_cache_actions"] = {}
for env_filepath in env_files:
env_filename = os.path.basename(env_filepath)
entity = os.path.splitext(env_filename.replace("shotgun_", ""))[0]
cache_filename = "shotgun_" + self.platform_name + "_" + entity + ".txt"
# Need to store where actions have occurred in order to give proper error message to client
# This could be made much better in the future by creating the actual final actions from here instead.
shotgun_get_actions_dict[env_filename] = {}
shotgun_cache_actions_dict[cache_filename] = {}
(out, err, code) = self.execute_toolkit_command(pipeline_config_path,
"shotgun_get_actions",
[cache_filename, env_filename])
self._add_action_output(shotgun_get_actions_dict[env_filename], out, err, code)
if code == 1:
(out, err, code) = self.execute_toolkit_command(pipeline_config_path,
"shotgun_cache_actions",
[entity, cache_filename])
self._add_action_output(shotgun_cache_actions_dict[cache_filename], out, err, code)
if code == 0:
(out, err, code) = self.execute_toolkit_command(pipeline_config_path,
"shotgun_get_actions",
[cache_filename, env_filename])
self._add_action_output(shotgun_get_actions_dict[env_filename], out, err, code)
except ExecuteTankCommandError, e:
# Something is wrong with the pipeline configuration,
# Clear any temporary result we might have accumulated for that pipeline
# contiguration.
project_actions[pipeline_config_path] = {}
# Report the error that just happened.
project_actions[pipeline_config_path]["error"] = True
project_actions[pipeline_config_path]["error_message"] = str(e)
# Move on to the next pipeline configuration.
continue
# We'll keep track of errors in pipeline configurations locally so that
# errors can be tracked on a per pipeline basis, just like before.
return project_actions
def _pick_file_or_directory_in_main_thread(self, multi=False):
dialog = SgtkFileDialog(multi, None)
dialog.setResolveSymlinks(False)
# Get result.
result = dialog.exec_()
files = []
if result:
selected_files = dialog.selectedFiles()
for f in selected_files:
if os.path.isdir(f):
f += os.path.sep
files.append(f)
return files
def pick_file_or_directory(self, multi=False):
"""
Pop-up a file selection window.
Note: Currently haven't been able to get the proper native dialog to multi select
both file and directories. Using this work-around for now.
:param multi: Boolean Allow selecting multiple elements.
:returns: List of files that were selected with file browser.
"""
return _create_invoker()(self._pick_file_or_directory_in_main_thread, multi=multi)
@staticmethod
def create():
"""
Create Process Manager according to current context (such as os, etc..)
:returns: ProcessManager
"""
if sys.platform == "darwin":
from process_manager_mac import ProcessManagerMac
return ProcessManagerMac()
elif sys.platform == "win32":
from process_manager_win import ProcessManagerWin
return ProcessManagerWin()
elif sys.platform.startswith("linux"):
from process_manager_linux import ProcessManagerLinux
return ProcessManagerLinux()
else:
raise RuntimeError("Unsupported platform: %s" % sys.platform)
|
<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
#
# Timewarrior extension: catreport
# Author: <NAME>
# License: MIT
import sys
from timewreport.parser import TimeWarriorParser
class Node(dict):
'''This node represents a task or a category of tasks and may contain other nodes.'''
def __init__(self, name, parent):
'''
Parameters
----------
name: string
parent: instance of Node()
'''
self.intervals = []
self.name = name
self.parent = parent
dict.__init__(self)
def get_node(self, path_to_node, create_if_not_existing = False):
'''Return a node which is somewhere deeper in the hierarchy.
If the specified node does not exist return None. If create_if_not_existing is True and the specified node does not exist, create the node and return the new node.
Parameters
----------
path_to_node: list of strings, e. g. ["projectA", "subprojectA1", "task13"]
create_if_not_existing: bool, optional, default is False.
'''
if len(path_to_node) == 0:
return self
else:
child = path_to_node.pop(0)
if child in self:
return self[child].get_node(path_to_node, create_if_not_existing)
elif create_if_not_existing:
self[child] = Node(child, self)
return self[child].get_node(path_to_node, create_if_not_existing)
else:
return None
def add_node(self, path_to_node):
'''Add a new node and return it.
Parameters
----------
path_to_node: list of strings, e. g. ["projectA", "subprojectA1", "task13"]
'''
return self.get_node(path_to_node, create_if_not_existing = True)
def is_leaf(self):
'''Return True, if the node has no child nodes, and False, if it has child nodes.'''
return len(self) == 0
def get_duration(self):
'''Return the total number of seconds spend in this task/category excluding time spend in subcategories.'''
return sum([i.get_duration().total_seconds() for i in self.intervals])
def get_cumulated_duration(self):
'''Return the total number of seconds spend in this task/category including the spend in subcategories.'''
return self.get_duration() + sum([child.get_cumulated_duration() for child in self.values()])
def store_intervals_in_tree(intervals):
'''Create and return a tree structure containing all tracked time intervals.
Parameters
----------
intervals: list of intervals, as returned by TimeWarriorParser(stdin).get_intervals()
'''
root = Node('root', None)
for interval in intervals:
for tags in interval.get_tags():
node = root.add_node(tags.split('.'))
node.intervals.append(interval)
return root
def print_report(root):
'''Create the catreport.
Parameters
----------
root: instance of class Node, as returned by store_intervals_in_tree()
'''
#tabular layout
width_col1 = 60
width_col2 = 30
width_col3 = 30
#print header
print("\n")
print("{0:<{wc1}}{1:<{wc2}}{2:<{wc3}}".format('Task', 'Time [h]', 'Share [%]', wc1 = width_col1, wc2 = width_col2, wc3 = width_col3))
print((width_col1+width_col2+width_col3)*"=")
#print data
def print_recursively(node, level = 0):
#print line
hours = node.get_cumulated_duration()/(60*60)
if node.parent is None:
share = 100
else:
share = 100 * hours / (node.parent.get_cumulated_duration()/(60*60))
shift = level * ' '
print("{0:<{wc1}}{1:<{wc2}}{2:<{wc3}}".format(shift + node.name, shift + "{:.1f}".format(hours), shift + "{:.1f}".format(share) , wc1 = width_col1, wc2 = width_col2, wc3 = width_col3))
#go down the tree
for key in sorted(node.keys()):
print_recursively(node[key], level + 1)
if node.get_duration() > 0 and len(node) > 0:
h = node.get_duration()/(60*60)
s = 100 * h / hours
shift = (level + 1) * ' '
print("{0:<{wc1}}{1:<{wc2}}{2:<{wc3}}".format(shift + 'unknown', shift + "{:.1f}".format(h), shift + "{:.1f}".format(s) , wc1 = width_col1, wc2 = width_col2, wc3 = width_col3))
print_recursively(root)
print("\n")
def main(stdin):
parser = TimeWarriorParser(stdin)
tw_config = parser.get_config()
tw_intervals = parser.get_intervals()
root = store_intervals_in_tree(tw_intervals)
print_report(root)
#sys.exit(0)
if __name__ == "__main__":
main(sys.stdin)
#print(stdin.read())
######################################################################
## The following code is just for development and debugging. ##
######################################################################
def load_testdata(filename):
'''This function allows testing functions with a static data set instead of the real data from timewarrior.
To create a static data set from your real data, comment main(sys.stdin) and uncomment print(stdin.read()) in if __name__ == "__main__", and then run timew catreport > static-data
'''
with open(filename, "r") as f:
parser = TimeWarriorParser(f)
tw_config = parser.get_config()
tw_intervals = parser.get_intervals()
return tw_config.get_dict(), tw_intervals
def test():
config,intervals = load_testdata("./static-data")
root = store_intervals_in_tree(intervals)
print_report(root)
def show_intervals_with(keyword, intervals):
'''Filter intervals by key.'''
for i in intervals:
if keyword in i.get_tags():
print(i.get_date(),i.get_tags())
|
import argparse
import os
from getpass import getpass
import configparser
from github import Github
CONFIG_FILE_NAME = 'manage-service.ini'
def save_file(config):
with open(CONFIG_FILE_NAME, 'w') as configfile:
config.write(configfile)
def set_and_save_sha(config):
config["github"]["current_commit"] = branch.commit.sha
save_file(config)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Script to check a Github repo, pull contents and run'
+ "pre and post flight commands.\n All arguments are required on first run.\nSubsequent runs"
+ 'will attempt to load them from the config. Provide them again to override')
parser.add_argument('--repo', help='Github repository. e.g. <user>/<repo_name>')
parser.add_argument('--user', help='Github username')
parser.add_argument('--pwd', help='<PASSWORD>')
parser.add_argument('--dir', help='Directory to install to')
parser.add_argument('--pre', help='Preflight script to run before downloading the files from Github')
parser.add_argument('--post', help='Postflight script to run after downloading is complete')
args = parser.parse_args()
repo = args.repo
user = args.user
pwd = args.pwd
dirl = args.dir
pre = args.pre
post = args.post
config = configparser.RawConfigParser()
config.read(CONFIG_FILE_NAME, encoding='utf-8')
if not repo:
if 'github' not in config:
raise ValueError("Could not find an existing config and argument 'repo' was not provided. Check " + 'the help section for more details. -h')
repo = config['github']['repo']
else:
config["github"] = {"repo": repo}
save_file(config)
if not user or not pwd:
if 'login' not in config:
raise ValueError('Could not find existing config and arguments for username or password missing. '
+ 'Check the help section for more details. -h')
user = config['login']['user']
pwd = config['login']['password']
else:
config['login'] = {'user': user, 'password': <PASSWORD>}
save_file(config)
if not dirl:
if 'directory' not in config:
raise ValueError('Could not find existing config and arguments for directory are missing. Check '
+ 'the help section for more details. -h')
dirl = config['directory']['dir']
save_file(config)
else:
config['directory'] = {'dir': dirl}
save_file(config)
if not pre or not post:
if 'actions' not in config:
raise ValueError('Could not find existing config and arguments for pre or post flight missing. '
+ 'Check the help section for more details. -h')
pre = config['actions']['pre_update']
post = config['actions']['post_update']
else:
config["actions"] = {"pre_update": pre, "post_update": post}
save_file(config)
# using username and password
g = Github(user, pwd)
repo = g.get_repo(repo)
branch = repo.get_branch("master")
run_update = False
if "current_commit" not in config["github"]:
set_and_save_sha(config)
run_update = True
if config["github"]["current_commit"] != branch.commit.sha:
set_and_save_sha(config)
run_update = True
run_update = True
if run_update:
os.system(pre)
contents = repo.get_contents("")
while contents:
file_content = contents.pop()
full_path = os.path.join(dirl, file_content.path)
if file_content.type == "dir":
if not os.path.isdir(full_path):
os.mkdir(full_path)
contents.extend(repo.get_contents(file_content.path))
else:
# Create file
with open(full_path, 'wb') as f:
f.write(file_content.decoded_content)
os.system(post)
|
#!/usr/bin/env python3
"""
Requires:
python-mnist
numpy
sklearn
"""
import sys
sys.path.insert(0, 'src/')
import mnist
import numpy as np
from numpy.linalg import norm as l21_norm
from sklearn.metrics.cluster import normalized_mutual_info_score as nmi
import os
gamma = .1
epsilon = 30
np.random.seed(int(os.environ.get('seed', '42')))
# Download t10k_* from http://yann.lecun.com/exdb/mnist/
# Change to directory containing unzipped MNIST data
mndata = mnist.MNIST('/home/hsfzxjy/srcs/RSFKM/data/MNIST-10K/')
def solve_huang_eq_24(u):
n = len(u)
def f(x):
return np.clip(x - u, 0, None).sum() / n - x
def df(x):
return (x > u).sum() / n - 1
EPS = 1e-4
lamb = np.min(u)
while True:
new_lamb = lamb - f(lamb) / df(lamb)
if np.abs(new_lamb - lamb) < EPS:
return new_lamb
lamb = new_lamb
def solve_huang_eq_13(v):
"""
min || alpha - v ||^2, subject to \sum{alpha}=1, alpha >= 0
"""
n = len(v)
u = v - np.ones((n, n)) @ v / (n) + np.ones(n) / (n)
lambda_bar_star = solve_huang_eq_24(u)
lambda_star = (lambda_bar_star - u) + np.clip(u - lambda_bar_star, 0, None)
return u + lambda_star - lambda_bar_star * np.ones(n)
def solve_U(s, x, v, gamma):
n = s.shape[0]
U = np.zeros((n, C))
for i in range(n):
xi = np.repeat(x[i, :].reshape((1, ndim)), C, axis=0)
h = s[i, :] * l21_norm(xi - v, axis=1) ** 2
h = (-h) / (2 * gamma)
U[i, :] = solve_huang_eq_13(h)
return U
def update_V(s, u, x):
v = np.zeros((C, ndim))
su = s * u
for k in range(C):
v[k, :] = np.average(x, axis=0, weights=su[:, k])
return v
def update_S(x, v, epsilon, capped):
s = np.ones((size, C))
for i in range(len(x)):
for k in range(C):
norm_ = l21_norm(x[i, :] - v[k, :])
if norm_ < epsilon:
s[i, k] = 1 / (2 * norm_)
else:
s[i, k] = 0
return s
def NMI(U):
return nmi(labels, np.argmax(U, axis=1))
def search_U(S, X, V, gammas=(.005,)):
return solve_U(S, X, V, gammas[0])
# best_g, best_U, best_NMI = 0, 0, 0
# for gamma in gammas:
# U = solve_U(S, X, V, gamma)
# # result = NMI(U)
# # if result > best_NMI:
# # best_g = gamma
# # best_U = U
# # best_NMI = result
# print(best_g, best_NMI)
# return best_U
ndim = size = C = 0
from basics.orig._numba import solve_U, update_V, update_S
def orig(X, U, V, error, gamma=gamma):
global ndim, size, C
ndim = len(X[0])
size = len(X)
C = len(V)
S = np.ones((size, C))
t = 1
while True:
print('--- ORIG ---')
print('== t = ', t)
new_U = solve_U(S, X, V, gamma)
delta = l21_norm(U - new_U)
U = new_U
old_V = V
V = update_V(S, U, X)
print('DELTA V', l21_norm(V - old_V))
S = update_S(X, V, epsilon, True)
# print('NMI', NMI(U))
print('DELTA', delta)
if delta < error and t != 1:
print('Converged at step', t)
break
t += 1
return U, V
if __name__ == '__main__':
images, labels = mndata.load_testing()
ndim = 784
size = len(labels)
C = 10
X = np.array(images).reshape((size, ndim)) / 255
print(np.linalg.norm(X, axis=1))
t = 0
V = np.random.random((C, ndim))
U = np.zeros((size, C))
for i in range(size):
xi = np.repeat(X[i, :].reshape((1, ndim)), C, axis=0)
U[i, np.argmax(l21_norm(xi - V, axis=1))] = 1
S = np.ones((size, C))
while True:
print('-------------')
print('== t = ', t)
new_U = solve_U(S, X, V, gamma)
delta = l21_norm(U - new_U)
print('DELTA U', delta)
U = new_U
old_V = V
V = update_V(S, U, X)
print('DELTA V', l21_norm(V - old_V))
old_S = S
S = update_S(X, V, epsilon, True)
print('DELTA S', l21_norm(S - old_S))
print('NMI', NMI(U))
if delta < 1e-1:
print('Converged at step', t)
break
t += 1
|
from tensorflow.keras.layers import Layer
import tensorflow as tf
import numpy as np
class OSELM(Layer):
def __init__(self,
n_hidden_nodes,
n_output_nodes,
batch_size,
reg_weight=5,
trainable=True,
name='oselm',
**kwargs):
super().__init__(name=name, trainable=trainable, **kwargs)
self.n_hidden_nodes = n_hidden_nodes
self.n_output_nodes = n_output_nodes
self.reg_weight = reg_weight
self.batch_size = batch_size
self._p = self.add_weight(shape=[n_hidden_nodes, n_hidden_nodes], initializer=tf.zeros_initializer(), trainable=False, name=self.name + '/p')
self._beta = self.add_weight(shape=[n_hidden_nodes, n_output_nodes], initializer=tf.zeros_initializer(), trainable=False, name=self.name + '/beta')
def build(self, input_shape):
# When Tensorflow is building the network graph _target must have a valid shape
self._target = tf.zeros([self.batch_size, self.n_output_nodes])
self.is_finished_init_train = self.add_weight(shape=[], initializer=tf.zeros_initializer(), trainable=False, name=self.name + '/cond', dtype=tf.bool)
def call(self, H, training=True):
if training:
HT = tf.transpose(H)
if not self.is_finished_init_train:
# Initial training
num_feats = tf.shape(H)[1]
Id = tf.linalg.eye(num_feats)
HTH = tf.matmul(HT, H)
p = tf.linalg.inv(HTH + self.reg_weight * Id)
p = self._p.assign(p)
pHT = tf.matmul(p, HT)
pHTt = tf.matmul(pHT, self._target)
self._beta.assign(pHTt)
self.is_finished_init_train.assign(tf.constant(True))
else:
# Sequential training
Id = tf.linalg.eye(self.batch_size)
Hp = tf.matmul(H, self._p)
HpHT = tf.matmul(Hp, HT)
temp = tf.numpy_function(np.linalg.pinv, [Id + HpHT], tf.float32, name="np.linalg.pinv")
pHT = tf.matmul(self._p, HT)
p = self._p.assign_sub(tf.matmul(tf.matmul(pHT, temp), Hp))
pHT = tf.matmul(p, HT)
Hbeta = tf.matmul(H, self._beta)
self._beta.assign_add(tf.matmul(pHT, self._target - Hbeta))
return tf.matmul(H, self._beta)
def _set_target(self, value):
self._target = value
target = property(fset=_set_target)
@property
def beta(self):
return self._beta.read_value()
def get_config(self):
config = super().get_config()
config.update({'n_hidden_nodes': self.n_hidden_nodes,
'n_output_nodes': self.n_output_nodes,
'reg_weight': self.reg_weight,
'batch_size': self.batch_size,
'trainable': self.trainable,
'name': self.name})
return config
# @custom_export('models.WOSELM')
# class WOSELM(OSELM):
# """docstring for WOSELM"""
# def __init__(self,
# n_hidden_nodes=None,
# n_output_nodes=None,
# reg_weight=5,
# trainable=True,
# name='woselm',
# **kwargs):
# super().__init__(name=name,
# trainable=trainable,
# n_hidden_nodes=n_hidden_nodes,
# n_output_nodes=n_output_nodes,
# reg_weight=reg_weight,
# **kwargs)
# def _class_weights(self):
# true_positive = tf.math.count_nonzero(self._target, dtype=tf.int32)
# total = tf.size(self._target)
# true_negative = total - true_positive
# w_neg = 1
# w_pos = true_negative / true_positive
# class_weight = tf.squeeze(tf.where(self._target == 1, w_pos, w_neg))
# return tf.cast(class_weight, tf.float32)
# def _train_graph(self, H, training=True):
# if training:
# class_weight = self._class_weights()
# class_weight = tf.linalg.diag(class_weight)
# HT = tf.transpose(H)
# if not self.is_finished_init_train:
# # Initial training
# num_feats = tf.shape(H)[1]
# Id = tf.linalg.eye(num_feats)
# HTW = tf.matmul(HT, class_weight)
# HTH = tf.matmul(HTW, H)
# p = tf.linalg.inv(HTH + self.reg_weight * Id)
# p = self._p.assign(p)
# pHT = tf.matmul(p, HT)
# pHTt = tf.matmul(pHT, tf.matmul(class_weight, self._target))
# self._beta.assign(pHTt)
# self.is_finished_init_train = True
# else:
# # Sequential training
# inv_class_weights = np.linalg.inv(class_weight)
# Hp = tf.matmul(H, self._p)
# HpHT = tf.matmul(Hp, HT)
# temp = np.linalg.pinv(inv_class_weights + HpHT)
# pHT = tf.matmul(self._p, HT)
# p = self._p.assign_sub(tf.matmul(tf.matmul(pHT, temp), Hp))
# pHT = tf.matmul(p, HT)
# pHTW = tf.matmul(pHT, class_weight)
# Hbeta = tf.matmul(H, self._beta)
# self._beta.assign_add(tf.matmul(pHTW, self._target - Hbeta))
# return self.infer(H)
|
<filename>app/parsing.py
from newspaper import Article
from urllib.parse import urlparse
from htmlTagsExtractor import extract_tags
from factsExtractor import extract_facts
from googleApiSentiment import get_sentiments
from keywordsFinder import KeywordsFinder
from caching import get_cached_article, get_cached_features
from caching import cache_article, cache_features
# parsers and classifiers inits
kf = KeywordsFinder()
def check_fake_source(url):
site = '{uri.netloc}'.format(uri=urlparse(url))
with open('./data/fakes.csv', 'r') as f:
fakes = f.readlines()
fakes = [f.strip() for f in fakes]
return site in fakes
def download_article(url, cache=True):
a = Article(url, keep_article_html=True)
a.download()
a.parse()
a.nlp()
result = {
"author": ", ".join(a.authors),
"source": a.source_url[a.source_url.find("//") + 2:].split("/")[0],
"title": a.title,
"image": a.top_image,
"url": a.url,
"publishedAt": a.publish_date,
"html": a.article_html,
"text": a.text,
"summary": a.summary,
"keywords": a.keywords,
}
if cache:
cache_article(url, result)
return result
def get_article(url, cache=True):
if cache:
result = get_cached_article(url)
if result is not None:
return result
return download_article(url, cache=cache)
def populate_with_features_old(article):
url = article['url']
result = {'url': url, 'error': False, 'post': article, 'fake': False}
tags, raw_text = extract_tags(result['post']['html'])
result['html'] = tags
result['post']['text'] = raw_text
result['fake'] = check_fake_source(url)
result['checkFacts'] = extract_facts(result['post'])
result['stopwords'] = kf.find_keywords(raw_text)
result['entities'] = get_sentiments(raw_text)
return result
def get_text_entities(text):
entities = []
return entities
def populate_with_features(article, cache=True):
url = article['url']
if cache:
features = get_cached_features(url)
if features is not None:
return features
features = {'url': url, 'error': False, 'article': article}
tags, raw_text = extract_tags(article['html'])
features['html_tags'] = tags
features['article']['text'] = raw_text
# entity-based features
features['entities'] = []
# find key phrases
facts = extract_facts(article)
features['entities'].extend({
'offset': e['offset'],
'type': 'key_phrase',
'content': e['content'],
'properties': {}
} for e in facts)
sents_google = get_sentiments(raw_text)
features['entities'].extend({
'offset': e['offset'],
'type': 'sentiment_positive_google' if e["sentiment"] > 0 else "sentiment_negative_google",
'content': e['content'],
'properties': {
'magnitude': e['magnitude'],
'sentiment': e['sentiment'],
}
} for m in sents_google for e in m["mentions"] if e["sentiment"] ** 2 > 0.5 ** 2) # just not to use np.abs()
stopwords = kf.find_keywords(raw_text)
features['entities'].extend({
'offset': e['offset'],
'type': e['type'],
'content': e['content'],
'properties': {}
} for e in stopwords)
# article-based features
features['features'] = {}
features['features']['source_had_fake_news'] = check_fake_source(url)
if cache:
cache_features(url, features)
return features
|
#coding:utf-8
#-----------------------------------------------------------------
# pycparser: explore_ast.py
# 访问 ast树
# This example demonstrates how to "explore" the AST created by
# pycparser to understand its structure. The AST is a n-nary tree
# of nodes, each node having several children, each with a name.
# Just read the code, and let the comments guide you. The lines
# beginning with #~ can be uncommented to print out useful
# information from the AST.
# It helps to have the pycparser/_c_ast.cfg file in front of you.
#
# <NAME> [https://eli.thegreenplace.net/]
# License: BSD
#-----------------------------------------------------------------
from __future__ import print_function
import sys
# This is not required if you've installed pycparser into
# your site-packages/ with setup.py
#
sys.path.extend(['.', '..'])
from pycparser import c_parser, c_ast
# This is some C source to parse. Note that pycparser must begin
# at the top level of the C file, i.e. with either declarations
# or function definitions (this is called "external declarations"
# in C grammar lingo)
#
# Also, a C parser must have all the types declared in order to
# build the correct AST. It doesn't matter what they're declared
# to, so I've inserted the dummy typedef in the code to let the
# parser know Hash and Node are types. You don't need to do it
# when parsing real, correct C code.
text = r"""
typedef int Node, Hash;
void HashPrint(Hash* hash, void (*PrintFunc)(char*, char*))
{
unsigned int i;
if (hash == NULL || hash->heads == NULL)
return;
for (i = 0; i < hash->table_size; ++i)
{
Node* temp = hash->heads[i];
while (temp != NULL)
{
PrintFunc(temp->entry->key, temp->entry->value);
temp = temp->next;
}
}
}
"""
# Create the parser and ask to parse the text. parse() will throw
# a ParseError if there's an error in the code
#
parser = c_parser.CParser()
ast = parser.parse(text, filename='<none>')
# Uncomment the following line to see the AST in a nice, human
# readable way. show() is the most useful tool in exploring ASTs
# created by pycparser. See the c_ast.py file for the options you
# can pass it.
#ast.show(showcoord=True)
# OK, we've seen that the top node is FileAST. This is always the
# top node of the AST. Its children are "external declarations",
# and are stored in a list called ext[] (see _c_ast.cfg for the
# names and types of Nodes and their children).
# As you see from the printout, our AST has two Typedef children
# and one FuncDef child.
# Let's explore FuncDef more closely. As I've mentioned, the list
# ext[] holds the children of FileAST. Since the function
# definition is the third child, it's ext[2]. Uncomment the
# following line to show it:
#ast.ext[2].show()
# A FuncDef consists of a declaration, a list of parameter
# declarations (for K&R style function definitions), and a body.
# First, let's examine the declaration.
function_decl = ast.ext[2].decl
# function_decl, like any other declaration, is a Decl. Its type child
# is a FuncDecl, which has a return type and arguments stored in a
# ParamList node
#function_decl.type.show()
#function_decl.type.args.show()
# The following displays the name and type of each argument:
#for param_decl in function_decl.type.args.params:
#print('Arg name: %s' % param_decl.name)
#print('Type:')
#param_decl.type.show(offset=6)
# The body is of FuncDef is a Compound, which is a placeholder for a block
# surrounded by {} (You should be reading _c_ast.cfg parallel to this
# explanation and seeing these things with your own eyes).
# Let's see the block's declarations:
function_body = ast.ext[2].body
# The following displays the declarations and statements in the function
# body
#for decl in function_body.block_items:
#decl.show()
# We can see a single variable declaration, i, declared to be a simple type
# declaration of type 'unsigned int', followed by statements.
# block_items is a list, so the third element is the For statement:
for_stmt = function_body.block_items[2]
#for_stmt.show()
# As you can see in _c_ast.cfg, For's children are 'init, cond,
# next' for the respective parts of the 'for' loop specifier,
# and stmt, which is either a single stmt or a Compound if there's
# a block.
#
# Let's dig deeper, to the while statement inside the for loop:
while_stmt = for_stmt.stmt.block_items[1]
#while_stmt.show()
# While is simpler, it only has a condition node and a stmt node.
# The condition:
while_cond = while_stmt.cond
#while_cond.show()
# Note that it's a BinaryOp node - the basic constituent of
# expressions in our AST. BinaryOp is the expression tree, with
# left and right nodes as children. It also has the op attribute,
# which is just the string representation of the operator.
#print(while_cond.op)
#while_cond.left.show()
#while_cond.right.show()
# That's it for the example. I hope you now see how easy it is to explore the
# AST created by pycparser. Although on the surface it is quite complex and has
# a lot of node types, this is the inherent complexity of the C language every
# parser/compiler designer has to cope with.
# Using the tools provided by the c_ast package it's easy to explore the
# structure of AST nodes and write code that processes them.
# Specifically, see the cdecl.py example for a non-trivial demonstration of what
# you can do by recursively going through the AST.
|
<filename>targets/ios/config/target.py
import os
from pygemstones.io import file as f
from core import module as m
# -----------------------------------------------------------------------------
def run(proj_path, target_name, params):
# archs
has_ios = True
has_tvos = True
has_watchos = True
has_mac_catalyst = True
archs = []
# ios
if has_ios:
archs.extend(
[
{
"arch": "armv7",
"conan_arch": "armv7",
"conan_profile": "nativium_ios_profile",
"min_version": "9.0",
"supported_platform": "iPhoneOS",
"enable_bitcode": True,
"group": "ios",
},
{
"arch": "arm64",
"conan_arch": "armv8",
"conan_profile": "nativium_ios_profile",
"min_version": "9.0",
"supported_platform": "iPhoneOS",
"enable_bitcode": True,
"group": "ios",
},
{
"arch": "x86_64",
"conan_arch": "x86_64",
"conan_profile": "nativium_ios_profile",
"min_version": "9.0",
"supported_platform": "iPhoneSimulator",
"enable_bitcode": False,
"group": "ios_simulator",
},
]
)
# tvos
if has_tvos:
archs.extend(
[
{
"arch": "arm64",
"conan_arch": "armv8",
"conan_profile": "nativium_tvos_profile",
"min_version": "11.0",
"supported_platform": "AppleTVOS",
"enable_bitcode": True,
"group": "tvos",
},
{
"arch": "x86_64",
"conan_arch": "x86_64",
"conan_profile": "nativium_tvos_profile",
"min_version": "11.0",
"supported_platform": "AppleTVSimulator",
"enable_bitcode": False,
"group": "tvos_simulator",
},
]
)
# watchos
if has_watchos:
archs.extend(
[
{
"arch": "armv7k",
"conan_arch": "armv7k",
"conan_profile": "nativium_watchos_profile",
"min_version": "5.0",
"supported_platform": "WatchOS",
"enable_bitcode": True,
"group": "watchos",
},
{
"arch": "arm64_32",
"conan_arch": "armv8_32",
"conan_profile": "nativium_watchos_profile",
"min_version": "5.0",
"supported_platform": "WatchOS",
"enable_bitcode": True,
"group": "watchos",
},
{
"arch": "x86_64",
"conan_arch": "x86_64",
"conan_profile": "nativium_watchos_profile",
"min_version": "5.0",
"supported_platform": "WatchSimulator",
"enable_bitcode": False,
"group": "watchos_simulator",
},
]
)
# mac catalyst
if has_mac_catalyst:
archs.extend(
[
{
"arch": "x86_64",
"conan_arch": "x86_64",
"conan_profile": "nativium_catalyst_profile",
"min_version": "10.15",
"supported_platform": "MacOSX",
"enable_bitcode": False,
"group": "ios_catalyst",
"subsystem_ios_version": "13.1",
},
{
"arch": "arm64",
"conan_arch": "armv8",
"conan_profile": "nativium_catalyst_profile",
"min_version": "10.15",
"supported_platform": "MacOSX",
"enable_bitcode": True,
"group": "ios_catalyst",
"subsystem_ios_version": "13.1",
},
]
)
return {
"project_name": "nativium",
"product_name": "Nativium",
"version": "1.0.0",
"version_code": "1",
"build_types": ["Debug", "Release"],
"archs": archs,
"umbrella_header": "Nativium.h",
"install_headers": get_header_dir_list(
proj_path,
target_name,
params,
),
}
# -----------------------------------------------------------------------------
def get_header_dir_list(proj_path, target_name, params):
result = []
filter_gen_src = []
filter_impl_src = []
module_list = m.get_list(proj_path)
modules_path = os.path.join(proj_path, "modules")
for module_name in module_list:
gluecode_dir = os.path.join(
modules_path,
module_name,
"gluecode",
)
module_gen_dir = os.path.join(
gluecode_dir,
"generated-src",
"objc",
)
module_impl_dir = os.path.join(
modules_path,
module_name,
"implementation",
"objc",
)
# generated src
if module_name not in filter_gen_src:
if f.dir_exists(module_gen_dir):
result.append(
{
"type": "dir",
"path": module_gen_dir,
}
)
# implementation
if module_name not in filter_impl_src:
if f.dir_exists(module_impl_dir):
result.append(
{
"type": "dir",
"path": module_impl_dir,
}
)
return result
|
<reponame>ioz9/tools
#!/usr/bin/env python
# Filename tools.py
__author__ = '<EMAIL> (duanqz)'
### Import blocks
import os
import shutil
import commands
import tempfile
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
### Class blocks
class Toolkit:
"""
Toolkit including all tools
"""
__DIR = os.path.dirname(os.path.abspath(__file__)) + "/"
__TOOLKIT_XML = __DIR + "toolkit.xml"
__BOOTIMG_TYPE_FILE = "type.config"
allTools = {}
def __init__(self):
"""
Initialize tools factory from config.xml
"""
tree = ET.parse(Toolkit.__TOOLKIT_XML)
for tool in tree.findall("tool"):
type = tool.attrib["type"]
unpackTool = tool.find("unpack").text
packTool = tool.find("pack").text
self.allTools[type] = { "UNPACK" : Toolkit.__DIR + unpackTool,
"PACK" : Toolkit.__DIR + packTool }
def getType(self, bootfile):
"""
Match appropriate tools for the boot image file.
"""
toolsType = None
# Try to unpack boot image for each type,
# choose the appropriate one.
for type in self.allTools.keys():
# Try to unpack the boot image by unpack tool
unpackTool = self.getTools(type, "UNPACK")
if ToolsMatcher.tryUnpack(unpackTool, bootfile) == True:
toolsType = type
break
ToolsMatcher.clearTempDir()
return toolsType
def getTools(self, type, attrib=None):
"""
Get tools by type.
"""
tools = self.allTools.get(type)
if attrib == None :
return tools
else:
return tools[attrib]
@staticmethod
def storeType(type, dir):
# Serialize
fileHandle = open(os.path.join(dir, Toolkit.__BOOTIMG_TYPE_FILE), "w")
fileHandle.write(type)
fileHandle.close()
@staticmethod
def retrieveType(dir):
# De-serialize
try:
fileHandle = open(os.path.join(dir, Toolkit.__BOOTIMG_TYPE_FILE), "r")
type = fileHandle.read().rstrip()
fileHandle.close()
except:
print " >>> Can not find type.config, use COMMON as image type by default"
type = "COMMON"
return type
### End of class Toolkit
class ToolsMatcher:
"""
Match out appropriate tools
"""
# Directory for temporary data storage.
TEMP_DIR=tempfile.mkdtemp()
@staticmethod
def tryUnpack(unpackTool, bootimg):
"""
Try to unpack the boot image into TEMP_DIR.
Return whether unpack successfully or not.
"""
ToolsMatcher.clearTempDir()
cmd = unpackTool + " " + bootimg + " " + ToolsMatcher.TEMP_DIR
result = commands.getstatusoutput(cmd)
# Debug code. Useless for release version
ToolsMatcher.__debug("Try " + cmd)
ToolsMatcher.__debug(result)
return ToolsMatcher.isUnpackSuccess(result)
@staticmethod
def isUnpackSuccess(result):
"""
Check whether unpack the boot image successfully or not.
"""
kernel = ToolsMatcher.TEMP_DIR + "/kernel"
initrc = ToolsMatcher.TEMP_DIR + "/RAMDISK/init.rc"
# True : Result is correct and one the file exists
return ToolsMatcher.isCorretResult(result) and \
(os.path.exists(kernel) or os.path.exists(initrc))
@staticmethod
def isCorretResult(result):
"""
Check whether the result contains error or not
"""
errKey1 = "Could not find any embedded ramdisk images"
errKey2 = "Aborted"
strResult = str(result)
# True : all the error keys are not found in result
return strResult.find(errKey1) < 0 and \
strResult.find(errKey2) < 0
@staticmethod
def clearTempDir():
"""
Clear the temporary directory
"""
if os.path.exists(ToolsMatcher.TEMP_DIR) == True:
shutil.rmtree(ToolsMatcher.TEMP_DIR)
@staticmethod
def __debug(msg):
if False: print msg
### End of class ToolsMatcher
|
<reponame>MattTurnock/PlanetarySciencesMatt<filename>Ass5/Q1.py
from astropy import units as u
from json_to_dict import constants
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import pylab
pi = np.pi
######################################################################################
######################################################################################
#PartB
#leftcol is daughter
leftcol = [0.12193,
0.12260,
0.12322,
0.12371,
0.12570,
0.13283,
0.13362,
0.13656,
0.13744,
0.13286,
0.13354,
0.11953,
0.14143]
#rightcol is parent
rightcol = [0.3360,
0.3446,
0.3521,
0.3588,
0.3835,
0.4747,
0.4839,
0.5232,
0.5330,
0.4746,
0.4828,
0.3058,
0.5841]
# calc the trendline
z = np.polyfit(rightcol, leftcol, 1)
p = np.poly1d(z)
trendXs = np.linspace(0, 0.8, 100)
Nd_t0 = p(0)
print("Initial daughter to stable nuclide ratio Nd_t0 (Os-187 / Os-188) occurs at y-intercept and is: %s" %Nd_t0)
show = False
save = False
do_Gas=True
plt.figure()
legend_plots = ['Linear Trendline']
legend_scatters = ['Plotted Points']
legend = legend_plots + legend_scatters
plt.scatter(rightcol, leftcol, marker='o', s=30)
pylab.plot(trendXs,p(trendXs), color='red', linewidth=1)
plt.grid()
# plt.xlim([0,0.15])
plt.xlabel("Np(t) ratio (Re-187 / Os-188)")
plt.ylabel("Nd(t) ratio (Os-187 / Os-188)")
if show: plt.legend(legend)
if save: plt.savefig("isochron_1.pdf")
if show: plt.show()
####################################################################################################################
# Part D
def doGaplot(t, t12, y_int, trendXs, t0=0):
lmbda = np.log(2)/t12
slope = np.exp(lmbda*(t-t0)) - 1
def y(x, m, c):
y = m*x + c
return y
trendXs = trendXs
trendYs = y(trendXs, slope, y_int)
# print(trendYs)
plt.plot(trendXs, trendYs, linestyle='--')
return None
t12 = 41.2e9
if do_Gas:
Gas = [0,1,2,3,4]
for Ga in Gas:
t = Ga*10**9
doGaplot(t, t12, Nd_t0, trendXs, t0=0)
legend_plots.append("Theoretical isochron at t=%s Ga" %Ga)
legend = legend_plots + legend_scatters
plt.legend(legend)
if save: plt.savefig("isochron_2.pdf")
plt.show()
####################################################################################################################
# Part E THIS IS CURRENTLY WRONG K?
def get_tmt0(tau12, Np_t, Nd_t, Nd_t0):
term1 = tau12/np.log(2)
frac = Np_t/(Np_t + Nd_t - Nd_t0)
tmt0 = -term1*np.log2(frac)
return tmt0
tau12 = t12
Np_t = np.mean(rightcol)
Nd_t = np.mean(leftcol)
Nd_t0 = Nd_t0
print("Half life = %s Gyrs" %(tau12*10**-9))
print("Np(t) = %s \nNd(t) = %s \nNd(t0) = %s \n" %(Np_t, Nd_t, Nd_t0))
age = get_tmt0(tau12, Np_t, Nd_t, Nd_t0)
print("CHECK Meteorite age = %s Gyrs" %(age*10**-9))
|
import os
import pytest
import re
import tempfile
import time
import yatest.common
import yatest.common.network
import yatest.yt
from common_helpers import * # noqa
import zipfile
from testpath.tempdir import TemporaryDirectory
def get_catboost_binary_path():
return yatest.common.binary_path("catboost/app/catboost")
def append_params_to_cmdline(cmd, params):
if isinstance(params, dict):
for param in params.items():
key = "{}".format(param[0])
value = "{}".format(param[1])
cmd.append(key)
cmd.append(value)
else:
for param in params:
cmd.append(param)
def data_file(*path):
return yatest.common.source_path(os.path.join("catboost", "pytest", "data", *path))
@yatest.common.misc.lazy
def get_cuda_setup_error():
for flag in pytest.config.option.flags:
if re.match('HAVE_CUDA=(0|no|false)', flag, flags=re.IGNORECASE):
return flag
train = tempfile.NamedTemporaryFile(delete=False)
train.write('\n'.join(['%i\t%i' % (x, x + 1) for x in range(10)]) + '\n')
train.close()
cd = tempfile.NamedTemporaryFile(delete=False)
cd.write('0\tTarget\n')
cd.close()
try:
cmd = (get_catboost_binary_path(), 'fit',
'--task-type', 'GPU',
'--devices', '0',
'-i', '2',
'-f', train.name,
'--column-description', cd.name
)
yatest.common.execute(cmd)
except Exception as e:
for reason in ['GPU support was not compiled', 'CUDA driver version is insufficient']:
if reason in str(e):
return reason
return str(e)
finally:
os.unlink(train.name)
os.unlink(cd.name)
return None
def run_nvidia_smi():
import subprocess
subprocess.call(['/usr/bin/nvidia-smi'])
# params is either dict or iterable
# devices used only if task_type == 'GPU'
def execute_catboost_fit(task_type, params, devices='0', stdout=None, timeout=None, env=None):
if task_type not in ('CPU', 'GPU'):
raise Exception('task_type must be "CPU" or "GPU"')
cmd = [
get_catboost_binary_path(),
'fit',
'--task-type', task_type
]
if isinstance(params, dict):
for param in params.items():
key = "{}".format(param[0])
value = "{}".format(param[1])
cmd.append(key)
cmd.append(value)
else:
cmd.extend(params)
if task_type == 'GPU':
cmd.extend(
[
'--devices', devices,
'--gpu-ram-part', '0.25'
]
)
mkl_cbwr_env = dict(env) if env else dict()
mkl_cbwr_env.update(MKL_CBWR='SSE4_2')
yatest.common.execute(cmd, stdout=stdout, timeout=timeout, env=mkl_cbwr_env)
# cd_path should be None for yt-search-proto pools
def apply_catboost(model_file, pool_path, cd_path, eval_file, output_columns=None, has_header=False, args=None):
calc_cmd = (
get_catboost_binary_path(),
'calc',
'--input-path', pool_path,
'-m', model_file,
'--output-path', eval_file,
'--prediction-type', 'RawFormulaVal'
)
if cd_path:
calc_cmd += ('--column-description', cd_path)
if output_columns:
calc_cmd += ('--output-columns', ','.join(output_columns))
if has_header:
calc_cmd += ('--has-header',)
if args:
calc_cmd += tuple(args.strip().split())
yatest.common.execute(calc_cmd)
def get_limited_precision_dsv_diff_tool(diff_limit, have_header=False):
diff_tool = [
yatest.common.binary_path("catboost/tools/limited_precision_dsv_diff/limited_precision_dsv_diff"),
]
if diff_limit is not None:
diff_tool += ['--diff-limit', str(diff_limit)]
if have_header:
diff_tool += ['--have-header']
return diff_tool
def get_limited_precision_json_diff_tool(diff_limit):
diff_tool = [
yatest.common.binary_path("catboost/tools/limited_precision_json_diff/limited_precision_json_diff"),
]
if diff_limit is not None:
diff_tool += ['--diff-limit', str(diff_limit)]
return diff_tool
def local_canonical_file(*args, **kwargs):
return yatest.common.canonical_file(*args, local=True, **kwargs)
def format_crossvalidation(is_inverted, n, k):
cv_type = 'Inverted' if is_inverted else 'Classical'
return '{}:{};{}'.format(cv_type, n, k)
def execute_dist_train(cmd):
hosts_path = yatest.common.test_output_path('hosts.txt')
with yatest.common.network.PortManager() as pm:
port0 = pm.get_port()
port1 = pm.get_port()
with open(hosts_path, 'w') as hosts:
hosts.write('localhost:' + str(port0) + '\n')
hosts.write('localhost:' + str(port1) + '\n')
catboost_path = yatest.common.binary_path("catboost/app/catboost")
worker0 = yatest.common.execute((catboost_path, 'run-worker', '--node-port', str(port0),), wait=False)
worker1 = yatest.common.execute((catboost_path, 'run-worker', '--node-port', str(port1),), wait=False)
while pm.is_port_free(port0) or pm.is_port_free(port1):
time.sleep(1)
execute_catboost_fit(
'CPU',
cmd + ('--node-type', 'Master', '--file-with-hosts', hosts_path,)
)
worker0.wait()
worker1.wait()
@pytest.fixture(scope="module")
def compressed_data():
data_path = yatest.common.source_path(os.path.join("catboost", "pytest", "data"))
tmp_dir = TemporaryDirectory()
for file_name in os.listdir(data_path):
if file_name.endswith('.zip'):
with zipfile.ZipFile(os.path.join(data_path, file_name)) as zip_file:
zip_file.extractall(path=tmp_dir.name)
return tmp_dir
|
<gh_stars>1-10
#!/usr/bin/env python3
# Purpose: Create EMR bootstrap script bucket and deploy the cfn stack
# Author: <NAME> (December 2020)
# Reference: https://gist.github.com/svrist/73e2d6175104f7ab4d201280acba049c
# Usage Example: python3 ./create_cfn_stack.py \
# --ec2-key-name emr-demo-123456789012-us-east-1 \
# --ec2-subnet-id subnet-06aa61f790a932b32 \
# --environment dev
import argparse
import json
import logging
import os
import boto3
from botocore.exceptions import ClientError
sts_client = boto3.client('sts')
cfn_client = boto3.client('cloudformation')
region = boto3.DEFAULT_SESSION.region_name
s3_client = boto3.client('s3', region_name=region)
s3 = boto3.resource('s3')
logging.basicConfig(format='[%(asctime)s] %(levelname)s - %(message)s', level=logging.INFO)
def main():
args = parse_args()
# create and tag bucket
account_id = sts_client.get_caller_identity()['Account']
bucket_name = f'superset-emr-demo-bootstrap-{account_id}-{region}'
create_bucket(bucket_name)
tag_bucket(bucket_name)
# upload bootstrap script
dir_path = os.path.dirname(os.path.realpath(__file__))
upload_file(f'{dir_path}/bootstrap_emr/bootstrap_actions.sh', bucket_name, 'bootstrap_actions.sh')
# set variables
stack_name = f'emr-superset-demo-{args.environment}'
cfn_template_path = f'{dir_path}/cloudformation/superset-emr-demo.yml'
cfn_params_path = f'{dir_path}/cloudformation/superset-emr-demo-params-{args.environment}.json'
ec2_key_name = args.ec2_key_name
# append new parameters
cfn_params = _parse_parameters(cfn_params_path)
cfn_params.append({'ParameterKey': 'Ec2KeyName', 'ParameterValue': ec2_key_name})
cfn_params.append({'ParameterKey': 'Ec2SubnetId', 'ParameterValue': args.ec2_subnet_id})
cfn_params.append({'ParameterKey': 'BootstrapBucket', 'ParameterValue': bucket_name})
logging.info(json.dumps(cfn_params, indent=4))
# create the cfn stack
create_stack(stack_name, cfn_template_path, cfn_params)
def create_bucket(bucket_name):
"""Create an S3 bucket in a specified region
:param bucket_name: Bucket to create
:return: True if bucket created, else False
"""
try:
s3_client.create_bucket(Bucket=bucket_name)
logging.info(f'New bucket name: {bucket_name}')
except ClientError as e:
logging.error(e)
return False
return True
def tag_bucket(bucket_name):
"""Apply the common 'Name' tag and value to the bucket"""
try:
bucket_tagging = s3.BucketTagging(bucket_name)
response = bucket_tagging.put(
Tagging={
'TagSet': [
{
'Key': 'Name',
'Value': 'EMR Demo Project'
},
]
}
)
logging.info(f'Response: {response}')
except Exception as e:
logging.error(e)
return False
return True
def upload_file(file_name, bucket_name, object_name):
"""Upload a file to an S3 bucket
:param file_name: File to upload
:param bucket_name: Bucket to upload to
:param object_name: S3 object name
:return: True if file was uploaded, else False
"""
try:
s3_client.upload_file(file_name, bucket_name, object_name)
logging.info(f'File {file_name} uploaded to bucket {bucket_name} as object {object_name}')
except ClientError as e:
logging.error(e)
return False
return True
def create_stack(stack_name, cfn_template, cfn_params):
"""Create EMR Cluster CloudFormation stack"""
template_data = _parse_template(cfn_template)
create_stack_params = {
'StackName': stack_name,
'TemplateBody': template_data,
'Parameters': cfn_params,
'TimeoutInMinutes': 60,
'Capabilities': [
'CAPABILITY_NAMED_IAM',
],
'Tags': [
{
'Key': 'Project',
'Value': 'Superset EMR Demo'
},
]
}
try:
response = cfn_client.create_stack(**create_stack_params)
logging.info(f'Response: {response}')
except ClientError as e:
logging.error(e)
return False
return True
def _parse_template(template):
with open(template) as template_file_obj:
template_data = template_file_obj.read()
cfn_client.validate_template(TemplateBody=template_data)
return template_data
def _parse_parameters(parameters):
with open(parameters) as parameter_file_obj:
parameter_data = json.load(parameter_file_obj)
return parameter_data
def parse_args():
"""Parse argument values from command-line"""
parser = argparse.ArgumentParser(description='Arguments required for script.')
parser.add_argument('-e', '--environment', required=True, choices=['dev', 'test', 'prod'], help='Environment')
parser.add_argument('-k', '--ec2-key-name', required=True, help='Ec2KeyName: Name of EC2 Keypair')
parser.add_argument('-s', '--ec2-subnet-id', required=True, help='Ec2SubnetId: Name of EC2 Keypair')
args = parser.parse_args()
return args
if __name__ == '__main__':
main()
|
<filename>scripts/slave/unittests/annotated_run_test.py
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests that the tools/build annotated_run wrapper actually runs."""
import collections
import contextlib
import json
import logging
import os
import subprocess
import sys
import tempfile
import unittest
import test_env # pylint: disable=W0403,W0611
import mock
from common import chromium_utils
from common import env
from slave import annotated_run
from slave import gce
from slave import infra_platform
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
MockOptions = collections.namedtuple('MockOptions',
('dry_run', 'logdog_force', 'logdog_butler_path', 'logdog_annotee_path',
'logdog_verbose', 'logdog_service_account_json', 'logdog_pubsub_topic',
'logdog_host'))
class AnnotatedRunTest(unittest.TestCase):
def test_example(self):
build_properties = {
'recipe': 'annotated_run_test',
'true_prop': True,
'num_prop': 123,
'string_prop': '321',
'dict_prop': {'foo': 'bar'},
}
script_path = os.path.join(BASE_DIR, 'annotated_run.py')
exit_code = subprocess.call([
'python', script_path,
'--build-properties=%s' % json.dumps(build_properties)])
self.assertEqual(exit_code, 0)
@mock.patch('slave.annotated_run._run_command')
@mock.patch('slave.annotated_run.main')
@mock.patch('sys.platform', return_value='win')
@mock.patch('tempfile.mkstemp', side_effect=Exception('failure'))
@mock.patch('os.environ', {})
def test_update_scripts_must_run(self, _tempfile_mkstemp, _sys_platform,
main, run_command):
annotated_run.main.side_effect = Exception('Test error!')
annotated_run._run_command.return_value = (0, "")
annotated_run.shell_main(['annotated_run.py', 'foo'])
gclient_path = os.path.join(env.Build, os.pardir, 'depot_tools',
'gclient.bat')
run_command.assert_has_calls([
mock.call([gclient_path, 'sync', '--force', '--verbose', '--jobs=2',
'--break_repo_locks'],
cwd=env.Build),
mock.call([sys.executable, 'annotated_run.py', 'foo']),
])
main.assert_not_called()
class _AnnotatedRunExecTestBase(unittest.TestCase):
def setUp(self):
logging.basicConfig(level=logging.ERROR+1)
self.maxDiff = None
self._patchers = []
map(self._patch, (
mock.patch('slave.annotated_run._run_command'),
mock.patch('slave.infra_platform.get'),
mock.patch('os.path.exists'),
mock.patch('os.getcwd'),
mock.patch('os.environ', {}),
))
self.rt = annotated_run.Runtime()
self.basedir = self.rt.tempdir()
self.tdir = self.rt.tempdir()
self.opts = MockOptions(
dry_run=False,
logdog_force=False,
logdog_annotee_path=None,
logdog_butler_path=None,
logdog_verbose=False,
logdog_service_account_json=None,
logdog_pubsub_topic=None,
logdog_host=None)
self.properties = {
'recipe': 'example/recipe',
'mastername': 'master.random',
'buildername': 'builder',
}
self.cwd = os.path.join('home', 'user')
self.rpy_path = os.path.join(env.Build, 'scripts', 'slave', 'recipes.py')
self.recipe_args = [
sys.executable, '-u', self.rpy_path, '--verbose', 'run',
'--workdir=%s' % (self.cwd,),
'--properties-file=%s' % (self._tp('recipe_properties.json'),),
'example/recipe']
# Use public recipes.py path.
os.getcwd.return_value = self.cwd
os.path.exists.return_value = False
# Pretend we're 64-bit Linux by default.
infra_platform.get.return_value = ('linux', 64)
def tearDown(self):
self.rt.close()
for p in reversed(self._patchers):
p.stop()
def _bp(self, *p):
return os.path.join(*((self.basedir,) + p))
def _tp(self, *p):
return os.path.join(*((self.tdir,) + p))
def _patch(self, patcher):
self._patchers.append(patcher)
patcher.start()
return patcher
def _config(self):
return annotated_run.get_config()
def _assertRecipeProperties(self, value):
# Double-translate "value", since JSON converts strings to unicode.
value = json.loads(json.dumps(value))
with open(self._tp('recipe_properties.json')) as fd:
self.assertEqual(json.load(fd), value)
class AnnotatedRunExecTest(_AnnotatedRunExecTestBase):
def test_exec_successful(self):
annotated_run._run_command.return_value = (0, '')
rv = annotated_run._exec_recipe(self.rt, self.opts, self.basedir, self.tdir,
self._config(), self.properties)
self.assertEqual(rv, 0)
self._assertRecipeProperties(self.properties)
annotated_run._run_command.assert_called_once_with(self.recipe_args,
dry_run=False)
class AnnotatedRunLogDogExecTest(_AnnotatedRunExecTestBase):
def setUp(self):
super(AnnotatedRunLogDogExecTest, self).setUp()
self._orig_whitelist = annotated_run.LOGDOG_WHITELIST_MASTER_BUILDERS
annotated_run.LOGDOG_WHITELIST_MASTER_BUILDERS = {
'master.some': [
'yesbuilder',
],
'master.all': [
annotated_run.WHITELIST_ALL,
'blacklisted',
],
}
self.properties.update({
'mastername': 'master.some',
'buildername': 'nobuilder',
'buildnumber': 1337,
})
self.is_gce = False
def is_gce():
return self.is_gce
is_gce_patch = mock.patch('slave.gce.Authenticator.is_gce',
side_effect=is_gce)
is_gce_patch.start()
self._patchers.append(is_gce_patch)
def tearDown(self):
annotated_run.LOGDOG_WHITELIST_MASTER_BUILDERS = self._orig_whitelist
super(AnnotatedRunLogDogExecTest, self).tearDown()
def _assertAnnoteeCommand(self, value):
# Double-translate "value", since JSON converts strings to unicode.
value = json.loads(json.dumps(value))
with open(self._tp('logdog_annotee_cmd.json')) as fd:
self.assertEqual(json.load(fd), value)
def test_should_run_logdog(self):
self.assertFalse(annotated_run._should_run_logdog({
'mastername': 'master.undefined', 'buildername': 'any'}))
self.assertFalse(annotated_run._should_run_logdog({
'mastername': 'master.some', 'buildername': 'nobuilder'}))
self.assertTrue(annotated_run._should_run_logdog({
'mastername': 'master.some', 'buildername': 'yesbuilder'}))
self.assertTrue(annotated_run._should_run_logdog({
'mastername': 'master.all', 'buildername': 'anybuilder'}))
self.assertFalse(annotated_run._should_run_logdog({
'mastername': 'master.all', 'buildername': 'blacklisted'}))
@mock.patch('os.path.isfile')
@mock.patch('slave.annotated_run._get_service_account_json')
def test_exec_with_whitelist_builder_runs_logdog(self, service_account,
isfile):
self.properties['buildername'] = 'yesbuilder'
isfile.return_value = True
butler_path = self._bp('.recipe_logdog_cipd', 'logdog_butler')
annotee_path = self._bp('.recipe_logdog_cipd', 'logdog_annotee')
service_account.return_value = 'creds.json'
annotated_run._run_command.return_value = (0, '')
self._patch(mock.patch('tempfile.mkdtemp', return_value='foo'))
config = self._config()
rv = annotated_run._exec_recipe(self.rt, self.opts, self.basedir, self.tdir,
config, self.properties)
self.assertEqual(rv, 0)
streamserver_uri = 'unix:%s' % (os.path.join('foo', 'butler.sock'),)
service_account.assert_called_once_with(
self.opts, config.logdog_platform.credential_path)
annotated_run._run_command.assert_called_with(
[butler_path,
'-log-level', 'warning',
'-prefix', 'bb/master.some/yesbuilder/1337',
'-output', 'pubsub,topic="projects/luci-logdog/topics/logs"',
'-service-account-json', 'creds.json',
'-output-max-buffer-age', '15s',
'run',
'-stdout', 'tee=stdout',
'-stderr', 'tee=stderr',
'-streamserver-uri', streamserver_uri,
'--',
annotee_path,
'-log-level', 'warning',
'-butler-stream-server', streamserver_uri,
'-logdog-host', 'luci-logdog',
'-annotate', 'tee',
'-name-base', 'recipes',
'-print-summary',
'-tee',
'-json-args-path', self._tp('logdog_annotee_cmd.json'),
],
dry_run=False)
self._assertRecipeProperties(self.properties)
self._assertAnnoteeCommand(self.recipe_args)
@mock.patch('slave.annotated_run._logdog_bootstrap', return_value=0)
def test_runs_bootstrap_when_forced(self, lb):
opts = self.opts._replace(logdog_force=True)
rv = annotated_run._exec_recipe(self.rt, opts, self.basedir, self.tdir,
self._config(), self.properties)
self.assertEqual(rv, 0)
lb.assert_called_once()
annotated_run._run_command.assert_called_once()
@mock.patch('slave.annotated_run._logdog_bootstrap', return_value=2)
def test_forwards_error_code(self, lb):
opts = self.opts._replace(
logdog_force=True)
rv = annotated_run._exec_recipe(self.rt, opts, self.basedir, self.tdir,
self._config(), self.properties)
self.assertEqual(rv, 2)
lb.assert_called_once()
@mock.patch('slave.annotated_run._logdog_bootstrap',
side_effect=Exception('Unhandled situation.'))
def test_runs_directly_if_bootstrap_fails(self, lb):
annotated_run._run_command.return_value = (123, '')
rv = annotated_run._exec_recipe(self.rt, self.opts, self.basedir, self.tdir,
self._config(), self.properties)
self.assertEqual(rv, 123)
lb.assert_called_once()
annotated_run._run_command.assert_called_once_with(self.recipe_args,
dry_run=False)
@mock.patch('os.path.isfile')
@mock.patch('slave.annotated_run._logdog_install_cipd')
@mock.patch('slave.annotated_run._get_service_account_json')
def test_runs_directly_if_logdog_error(self, service_account, cipd, isfile):
self.properties['buildername'] = 'yesbuilder'
# Test Windows builder this time.
infra_platform.get.return_value = ('win', 64)
isfile.return_value = True
cipd.return_value = ('logdog_butler.exe', 'logdog_annotee.exe')
service_account.return_value = 'creds.json'
def error_for_logdog(args, **kw):
if len(args) > 0 and args[0] == 'logdog_butler.exe':
return (250, '')
return (4, '')
annotated_run._run_command.side_effect = error_for_logdog
config = self._config()
self._patch(mock.patch('tempfile.mkdtemp', return_value='foo'))
rv = annotated_run._exec_recipe(self.rt, self.opts, self.basedir, self.tdir,
config, self.properties)
self.assertEqual(rv, 4)
streamserver_uri = 'net.pipe:LUCILogDogButler'
service_account.assert_called_once_with(
self.opts, config.logdog_platform.credential_path)
annotated_run._run_command.assert_has_calls([
mock.call([
'logdog_butler.exe',
'-log-level', 'warning',
'-prefix', 'bb/master.some/yesbuilder/1337',
'-output', 'pubsub,topic="projects/luci-logdog/topics/logs"',
'-service-account-json', 'creds.json',
'-output-max-buffer-age', '15s',
'run',
'-stdout', 'tee=stdout',
'-stderr', 'tee=stderr',
'-streamserver-uri', streamserver_uri,
'--',
'logdog_annotee.exe',
'-log-level', 'warning',
'-butler-stream-server', streamserver_uri,
'-logdog-host', 'luci-logdog',
'-annotate', 'tee',
'-name-base', 'recipes',
'-print-summary',
'-tee',
'-json-args-path', self._tp('logdog_annotee_cmd.json'),
], dry_run=False),
mock.call(self.recipe_args, dry_run=False),
])
@mock.patch('os.path.isfile')
def test_can_find_credentials(self, isfile):
isfile.return_value = True
service_account_json = annotated_run._get_service_account_json(
self.opts, 'creds.json')
self.assertEqual(service_account_json, 'creds.json')
def test_uses_no_credentials_on_gce(self):
self.is_gce = True
service_account_json = annotated_run._get_service_account_json(
self.opts, ('foo', 'bar'))
self.assertIsNone(service_account_json)
def test_cipd_install(self):
annotated_run._run_command.return_value = (0, '')
pkgs = annotated_run._logdog_install_cipd(self.basedir,
annotated_run.CipdBinary('infra/foo', 'v0', 'foo'),
annotated_run.CipdBinary('infra/bar', 'v1', 'baz'),
)
self.assertEqual(pkgs, (self._bp('foo'), self._bp('baz')))
annotated_run._run_command.assert_called_once_with([
sys.executable,
os.path.join(env.Build, 'scripts', 'slave', 'cipd.py'),
'--dest-directory', self.basedir,
'--json-output', os.path.join(self.basedir, 'packages.json'),
'-P', 'infra/foo@v0',
'-P', 'infra/bar@v1',
])
def test_cipd_install_failure_raises_bootstrap_error(self):
annotated_run._run_command.return_value = (1, '')
self.assertRaises(annotated_run.LogDogBootstrapError,
annotated_run._logdog_install_cipd,
self.basedir,
annotated_run.CipdBinary('infra/foo', 'v0', 'foo'),
annotated_run.CipdBinary('infra/bar', 'v1', 'baz'),
)
def test_will_not_bootstrap_if_recursive(self):
os.environ['LOGDOG_STREAM_PREFIX'] = 'foo'
self.assertRaises(annotated_run.LogDogNotBootstrapped,
annotated_run._logdog_bootstrap, self.rt, self.opts, self.basedir,
self.tdir, self._config(), self.properties, [])
if __name__ == '__main__':
unittest.main()
|
<reponame>huseinzol05/Gather-Tensorflow-Serving<filename>misc/6.graph-dependencies/pyan/main.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
pyan.py - Generate approximate call graphs for Python programs.
This program takes one or more Python source files, does a superficial
analysis, and constructs a directed graph of the objects in the combined
source, and how they define or use each other. The graph can be output
for rendering by e.g. GraphViz or yEd.
"""
import logging
from glob import glob
from optparse import OptionParser # TODO: migrate to argparse
from .analyzer import CallGraphVisitor
from .visgraph import VisualGraph
from .writers import TgfWriter, DotWriter, YedWriter
def main():
usage = """usage: %prog FILENAME... [--dot|--tgf|--yed]"""
desc = (
'Analyse one or more Python source files and generate an'
'approximate call graph of the modules, classes and functions'
' within them.'
)
parser = OptionParser(usage = usage, description = desc)
parser.add_option(
'--dot',
action = 'store_true',
default = False,
help = 'output in GraphViz dot format',
)
parser.add_option(
'--tgf',
action = 'store_true',
default = False,
help = 'output in Trivial Graph Format',
)
parser.add_option(
'--yed',
action = 'store_true',
default = False,
help = 'output in yEd GraphML Format',
)
parser.add_option(
'-f',
'--file',
dest = 'filename',
help = 'write graph to FILE',
metavar = 'FILE',
default = None,
)
parser.add_option(
'-l',
'--log',
dest = 'logname',
help = 'write log to LOG',
metavar = 'LOG',
)
parser.add_option(
'-v',
'--verbose',
action = 'store_true',
default = False,
dest = 'verbose',
help = 'verbose output',
)
parser.add_option(
'-V',
'--very-verbose',
action = 'store_true',
default = False,
dest = 'very_verbose',
help = 'even more verbose output (mainly for debug)',
)
parser.add_option(
'-d',
'--defines',
action = 'store_true',
default = True,
dest = 'draw_defines',
help = "add edges for 'defines' relationships [default]",
)
parser.add_option(
'-n',
'--no-defines',
action = 'store_false',
default = True,
dest = 'draw_defines',
help = "do not add edges for 'defines' relationships",
)
parser.add_option(
'-u',
'--uses',
action = 'store_true',
default = True,
dest = 'draw_uses',
help = "add edges for 'uses' relationships [default]",
)
parser.add_option(
'-N',
'--no-uses',
action = 'store_false',
default = True,
dest = 'draw_uses',
help = "do not add edges for 'uses' relationships",
)
parser.add_option(
'-c',
'--colored',
action = 'store_true',
default = False,
dest = 'colored',
help = 'color nodes according to namespace [dot only]',
)
parser.add_option(
'-G',
'--grouped-alt',
action = 'store_true',
default = False,
dest = 'grouped_alt',
help = 'suggest grouping by adding invisible defines edges [only useful with --no-defines]',
)
parser.add_option(
'-g',
'--grouped',
action = 'store_true',
default = False,
dest = 'grouped',
help = 'group nodes (create subgraphs) according to namespace [dot only]',
)
parser.add_option(
'-e',
'--nested-groups',
action = 'store_true',
default = False,
dest = 'nested_groups',
help = 'create nested groups (subgraphs) for nested namespaces (implies -g) [dot only]',
)
parser.add_option(
'--dot-rankdir',
default = 'TB',
dest = 'rankdir',
help = (
"specifies the dot graph 'rankdir' property for "
'controlling the direction of the graph. '
"Allowed values: ['TB', 'LR', 'BT', 'RL']. "
'[dot only]'
),
)
parser.add_option(
'-a',
'--annotated',
action = 'store_true',
default = False,
dest = 'annotated',
help = 'annotate with module and source line number',
)
options, args = parser.parse_args()
filenames = [fn2 for fn in args for fn2 in glob(fn)]
if len(args) == 0:
parser.error('Need one or more filenames to process')
if options.nested_groups:
options.grouped = True
graph_options = {
'draw_defines': options.draw_defines,
'draw_uses': options.draw_uses,
'colored': options.colored,
'grouped_alt': options.grouped_alt,
'grouped': options.grouped,
'nested_groups': options.nested_groups,
'annotated': options.annotated,
}
# TODO: use an int argument for verbosity
logger = logging.getLogger(__name__)
if options.very_verbose:
logger.setLevel(logging.DEBUG)
elif options.verbose:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARN)
logger.addHandler(logging.StreamHandler())
if options.logname:
handler = logging.FileHandler(options.logname)
logger.addHandler(handler)
v = CallGraphVisitor(filenames, logger)
graph = VisualGraph.from_visitor(
v, options = graph_options, logger = logger
)
if options.dot:
writer = DotWriter(
graph,
options = ['rankdir=' + options.rankdir],
output = options.filename,
logger = logger,
)
writer.run()
if options.tgf:
writer = TgfWriter(graph, output = options.filename, logger = logger)
writer.run()
if options.yed:
writer = YedWriter(graph, output = options.filename, logger = logger)
writer.run()
if __name__ == '__main__':
main()
|
<reponame>MaLei666/oms
######################################
# Django 模块
######################################
from django.shortcuts import render, HttpResponseRedirect, redirect, reverse
from django.views import View
from django.http import HttpResponse
from django.db.models import Q
from django.urls import reverse
######################################
# 第三方模块
######################################
from pure_pagination import PageNotAnInteger, Paginator, EmptyPage
######################################
# 系统模块
######################################
import json
import datetime
######################################
# 自建模块
######################################
from utils.login_check import LoginStatusCheck
from .forms import *
from .models import *
from operation_record.models import UserOperationRecord
from oms.settings import WEBSSH_IP, WEBSSH_PORT
##############################################################################
# 主机资产模块
##############################################################################
######################################
# 主机列表
######################################
class HostListView(LoginStatusCheck, View):
def get(self, request):
if request.user.role <3:
# 页面选择
web_chose_left_1 = 'host_management'
web_chose_left_2 = 'host'
web_chose_middle = ''
# 操作系统
systems = OperatingSystemInfo.objects.filter(status=1)
# 项目
projects = ProjectInfo.objects.filter(status=1)
# 用途
uses = UseInfo.objects.filter(status=1)
# 用户
users = UserProfile.objects.filter(status=1)
# 获取主机记录
host_records = HostInfo.objects.filter(status=1).order_by('-update_time')
# 筛选条件
project = int(request.GET.get('project', '0'))
if project != 0:
host_records = host_records.filter(project_id=project)
use = int(request.GET.get('use', '0'))
if use != 0:
host_records = host_records.filter(use_id=use)
# 关键字
keyword = request.GET.get('keyword', '')
if keyword != '':
host_records = host_records.filter(Q(hostname__icontains=keyword) | Q(
use__name__icontains=keyword) | Q(project__name__icontains=keyword) | Q(desc__icontains=keyword))
# 记录数量
record_nums = host_records.count()
# 判断页码
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 对取到的数据进行分页,记得定义每页的数量
p = Paginator(host_records, 16, request=request)
# 分页处理后的 QuerySet
host_records = p.page(page)
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'systems': systems,
'projects': projects,
'uses': uses,
'users': users,
'project': project,
'use': use,
'keyword': keyword,
'host_records': host_records,
'record_nums': record_nums,
'WEBSSH_IP': WEBSSH_IP,
'WEBSSH_PORT': WEBSSH_PORT,
}
return render(request, 'host_management/host/host_list.html', context=context)
else:
return HttpResponse(status=403)
########################################################################################################################
## wessh主机视图
########################################################################################################################
class WebSSHView(LoginStatusCheck, View):
def post(self, request, host_id):
host = HostInfo.objects.get(id=int(host_id))
ret = {}
try:
if host.out_ip:
ip = host.out_ip
else:
ip = host.in_ip
port = host.ssh_port
username = host.admin_user
password = host.admin_pass
ret = {"ip": ip, 'port': port, "username": username, 'password': password, "static": True}
except Exception as e:
ret['status'] = False
ret['error'] = '请求错误,{}'.format(e)
finally:
return HttpResponse(json.dumps(ret))
######################################
# 添加主机
######################################
class AddHostInfoView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
add_host_info_form = AddHostInfoForm(request.POST)
if add_host_info_form.is_valid():
in_ip = request.POST.get('in_ip')
if HostInfo.objects.filter(in_ip=in_ip).filter(status=1):
return HttpResponse('{"status":"failed", "msg":"该 IP 的主机已经存在,请检查!"}',
content_type='application/json')
host = HostInfo()
host.in_ip = request.POST.get('in_ip')
host.out_ip = request.POST.get('out_ip', '')
host.system_id = int(request.POST.get('system'))
host.hostname = request.POST.get('hostname')
host.ssh_port = int(request.POST.get('ssh_port'))
host.use_id = int(request.POST.get('use'))
host.project_id = int(request.POST.get('project'))
host.root_ssh = request.POST.get('root_ssh')
host.admin_user = request.POST.get('admin_user')
host.admin_pass = <PASSWORD>('admin_<PASSWORD>')
host.update_user = request.user
host.desc = request.POST.get('desc', '')
host.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = host.id
op_record.operation = 1
op_record.action = "添加主机:%s" % (host.in_ip)
op_record.save()
return HttpResponse('{"status":"success", "msg":"主机信息添加成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"主机信息填写错误,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 主机详情
######################################
class HostInfoView(LoginStatusCheck, View):
def get(self, request, host_id):
# 页面选择
web_chose_left_1 = 'host_management'
web_chose_left_2 = 'host'
web_chose_middle = ''
# 操作系统
systems = OperatingSystemInfo.objects.filter(status=1)
# 项目
projects = ProjectInfo.objects.filter(status=1)
# 用户
users = UserProfile.objects.filter(status=1)
# 用途
uses = UseInfo.objects.filter(status=1)
# 信息
records = HostInfo.objects.get(id=host_id)
# 服务
services = HostServiceInfo.objects.filter(host_id=host_id).filter(status=1)
# 判断是否添加数据库
is_install_db = DatabaseInfo.objects.filter(host_id=int(host_id)).filter(status=1)
if is_install_db:
for each in is_install_db:
have_db_id = each.id
else:
have_db_id = ''
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'records': records,
'systems': systems,
'projects': projects,
'uses': uses,
'users': users,
'services': services,
'have_db_id': have_db_id,
}
return render(request, 'host_management/host/host_info.html', context=context)
######################################
# 删除主机
######################################
class DeleteHostView(LoginStatusCheck, View):
def post(self, request):
try:
host_id = request.POST.get('host_id')
host = HostInfo.objects.get(id=int(host_id))
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = host.id
op_record.operation = 4
op_record.action = "删除主机:%s" % (host.in_ip)
op_record.save()
host.delete()
return HttpResponse('{"status":"success", "msg":"主机删除成功!"}', content_type='application/json')
except Exception as e:
return HttpResponse('{"status":"falied", "msg":"主机删除失败!"}', content_type='application/json')
######################################
# 修改主机
######################################
class EditHostInfoView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
edit_host_info_form = EditHostInfoForm(request.POST)
if edit_host_info_form.is_valid():
# 获取主机
host = HostInfo.objects.get(id=int(request.POST.get('host_id')))
host.in_ip = request.POST.get('in_ip')
host.out_ip = request.POST.get('out_ip', '')
host.system_id = int(request.POST.get('system'))
host.hostname = request.POST.get('hostname')
host.ssh_port = int(request.POST.get('ssh_port'))
host.use_id = int(request.POST.get('use'))
host.project_id = int(request.POST.get('project'))
host.admin_user = request.POST.get('admin_user')
host.admin_pass = request.POST.get('admin_pass')
host.op_user_id = int(request.POST.get('op_user'))
host.update_user = request.user
host.desc = request.POST.get('desc', '')
host.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = host.id
op_record.operation = 2
op_record.action = "修改主机:%s" % host.in_ip
op_record.save()
return HttpResponse('{"status":"success", "msg":"主机信息修改成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"主机信息填写错误,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 数据库列表
######################################
class DatabaseListView(LoginStatusCheck, View):
def get(self, request):
# 页面选择
web_chose_left_1 = 'host_management'
web_chose_left_2 = 'database'
web_chose_middle = ''
# 主机列表
hosts = HostInfo.objects.filter(status=1)
# 用户
users = UserProfile.objects.filter(status=1)
# 数据库记录
db_records = DatabaseInfo.objects.filter(status=1).order_by('-update_time')
# 记录数量
record_nums = db_records.count()
# 判断页码
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 对取到的数据进行分页,记得定义每页的数量
p = Paginator(db_records, 16, request=request)
# 分页处理后的 QuerySet
db_records = p.page(page)
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'hosts': hosts,
'users': users,
'record_nums': record_nums,
'db_records': db_records,
}
return render(request, 'host_management/host/db_list.html', context=context)
######################################
# 数据库详情
######################################
class DatabaseInfoView(LoginStatusCheck, View):
def get(self, request, db_id):
# 页面选择
web_chose_left_1 = 'host_management'
web_chose_left_2 = 'database'
web_chose_middle = ''
# 用户
users = UserProfile.objects.filter(status=1)
# 主机列表
hosts = HostInfo.objects.filter(status=1)
# 数据库基本信息
db_records = DatabaseInfo.objects.get(id=int(db_id))
# 数据库库信息
db_db_records = DatabaseDBInfo.objects.filter(db_id=int(db_id)).filter(status=1).order_by('-update_time')
# 数据库用户信息
db_user_records = DatabaseUserInfo.objects.filter(db_id=int(db_id)).filter(status=1).order_by('-update_time')
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'db_records': db_records,
'db_db_records': db_db_records,
'db_user_records': db_user_records,
'users': users,
'hosts': hosts,
}
return render(request, 'host_management/host/db_info.html', context=context)
######################################
# 添加数据库记录
######################################
class AddDatabaseInfoView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
if DatabaseInfo.objects.filter(host_id=int(request.POST.get('host_id'))).filter(status=1):
return HttpResponse('{"status":"failed", "msg":"该主机的记录已经存在,请检查!"}', content_type='application/json')
add_db_info_form = AddDatabaseInfoForm(request.POST)
if add_db_info_form.is_valid():
db_info = DatabaseInfo()
db_info.host_id = int(request.POST.get('host_id'))
db_info.db_name = request.POST.get('db_name')
db_info.db_version = request.POST.get('db_version')
db_info.db_admin_user = request.POST.get('db_admin_user')
db_info.db_admin_pass = request.POST.get('db_admin_pass')
db_info.desc = request.POST.get('desc', '')
db_info.add_user = request.user
db_info.update_user = request.user
db_info.status = 1
db_info.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = db_info.id
op_record.operation = 1
op_record.action = "添加数据库记录:%s" % (db_info.host.in_ip)
op_record.save()
return HttpResponse('{"status":"success", "msg":"添加成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 修改数据库记录
######################################
class EditDatabaseInfoView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
edit_db_info_form = EditDatabaseInfoForm(request.POST)
if edit_db_info_form.is_valid():
db_info = DatabaseInfo.objects.get(id=int(request.POST.get('db_id')))
# 判断记录是否重复
db_host = int(request.POST.get('host_id'))
if db_info.host_id != db_host:
if DatabaseInfo.objects.filter(host_id=db_host).filter(status=1):
return HttpResponse('{"status":"failed", "msg":"该主机的记录已经存在,请检查!"}',
content_type='application/json')
# 不重复继续修改
db_info.host_id = db_host
db_info.db_name = request.POST.get('db_name')
db_info.db_version = request.POST.get('db_version')
db_info.db_admin_user = request.POST.get('db_admin_user')
db_info.db_admin_pass = request.POST.get('db_admin_pass')
db_info.desc = request.POST.get('desc', '')
db_info.update_user = request.user
db_info.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = db_info.id
op_record.operation = 2
op_record.action = "修改数据库:%s" % (db_info.db_name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"修改成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 删除数据库记录
######################################
class DeleteDatabaseInfoView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
db_info = DatabaseInfo.objects.get(id=int(request.POST.get('db_id')))
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = db_info.id
op_record.operation = 4
op_record.action = "停用数据库记录:%s" % (db_info.host.in_ip)
op_record.save()
db_info.delete()
return HttpResponse('{"status":"success", "msg":"删除成功!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 添加数据库库表
######################################
class AddDatabaseDBView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
if DatabaseDBInfo.objects.filter(db_id=int(request.POST.get('db_id'))).filter(
name=request.POST.get('name')).filter(status=1):
return HttpResponse('{"status":"failed", "msg":"该记录已经存在,请检查!"}', content_type='application/json')
add_db_form = AddDatabaseDBForm(request.POST)
if add_db_form.is_valid():
db_info = DatabaseDBInfo()
db_info.db_id = request.POST.get('db_id')
db_info.name = request.POST.get('name')
db_info.add_time=datetime.datetime.now()
# db_info.use = request.POST.get('use')
db_info.desc = request.POST.get('desc', '')
db_info.add_user = request.user
# db_info.update_user = request.user
db_info.status = 1
db_info.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = db_info.id
op_record.operation = 1
op_record.action = "添加主机 [ %s ] 的数据库:%s" % (db_info.db.host.in_ip, db_info.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"添加成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 编辑数据库库
######################################
class EditDatabaseDBView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
db_info = DatabaseDBInfo.objects.get(id=int(request.POST.get('db_id')))
# 判断记录是否存在
if db_info.name != request.POST.get('name'):
if DatabaseDBInfo.objects.filter(db_id=int(request.POST.get('db_db_id'))).filter(
name=request.POST.get('name')).filter(status=1):
return HttpResponse('{"status":"failed", "msg":"该记录已经存在,请检查!"}', content_type='application/json')
edit_db_form = EditDatabaseDBForm(request.POST)
if edit_db_form.is_valid():
db_info.db_id = int(request.POST.get('db_db_id'))
db_info.name = request.POST.get('name')
# db_info.use = request.POST.get('use')
db_info.desc = request.POST.get('desc', '')
db_info.update_user = request.user
db_info.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = db_info.id
op_record.operation = 2
op_record.action = "修改主机 [ %s ] 的数据库:%s" % (db_info.db.host.in_ip, db_info.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"修改成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 删除数据库库
######################################
class DeleteDatabaseDBView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
db_info = DatabaseDBInfo.objects.get(id=int(request.POST.get('db_id')))
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.operation = 4
op_record.action = "删除主机 [ %s ] 的数据库:%s" % (db_info.db.host.in_ip, db_info.name)
op_record.save()
db_info.delete()
return HttpResponse('{"status":"success", "msg":"删除成功!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 添加数据库用户
######################################
class AddDatabaseUserView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
# 判断用户
db_user = DatabaseUserInfo.objects.filter(db_id=int(request.POST.get('db_id'))).filter(
username=request.POST.get('username'))
if db_user:
return HttpResponse('{"status":"failed", "msg":"该用户已存在,请检查!"}', content_type='application/json')
add_db_user_form = AddDatabaseUserForm(request.POST)
if add_db_user_form.is_valid():
db_user = DatabaseUserInfo()
db_user.db_id = int(request.POST.get('db_id'))
db_user.username = request.POST.get('username')
db_user.password = request.POST.get('password')
db_user.grant_login = request.POST.get('grant_login')
db_user.desc = request.POST.get('desc', '')
db_user.add_user = request.user
db_user.update_user = request.user
db_user.status = 1
db_user.save()
for each in request.POST.getlist('dbs'):
db = DatabaseDBInfo.objects.get(id=int(each))
db_user.grant_db.add(db)
db_user.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = db_user.id
op_record.operation = 1
op_record.action = "添加主机 [ %s ] 的数据库用户:%s" % (db_user.db.host.in_ip, db_user.username)
op_record.save()
return HttpResponse('{"status":"success", "msg":"添加成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 编辑数据库用户
######################################
class EditDatabaseUserView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
# 判断用户
db_user = DatabaseUserInfo.objects.get(id=int(request.POST.get('db_user_id')))
new_username = request.POST.get('username')
if db_user.username != new_username:
if DatabaseUserInfo.objects.filter(username=new_username).filter(status=1):
return HttpResponse('{"status":"failed", "msg":"该用户已存在,请检查!"}', content_type='application/json')
edit_db_user_form = EditDatabaseUserForm(request.POST)
if edit_db_user_form.is_valid():
db_user.username = request.POST.get('username')
db_user.password = <PASSWORD>.<PASSWORD>.<PASSWORD>('password')
db_user.grant_login = request.POST.get('grant_login')
db_user.desc = request.POST.get('desc', '')
db_user.update_user = request.user
db_user.grant_db.clear()
db_user.save()
for each in request.POST.getlist('dbs'):
db = DatabaseDBInfo.objects.get(id=int(each))
db_user.grant_db.add(db)
db_user.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = db_user.id
op_record.operation = 2
op_record.action = "修改主机 [ %s ] 的数据库用户:%s" % (db_user.db.host.in_ip, db_user.username)
op_record.save()
return HttpResponse('{"status":"success", "msg":"修改成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 删除数据库用户
######################################
class DeleteDatabaseUserView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
# 判断用户
db_user = DatabaseUserInfo.objects.get(id=int(request.POST.get('db_user_id')))
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = db_user.id
op_record.operation = 4
op_record.action = "停用主机 [ %s ] 的数据库用户:%s" % (db_user.db.host.in_ip, db_user.username)
op_record.save()
db_user.delete()
return HttpResponse('{"status":"success", "msg":"删除成功!"}', content_type='application/json')
else:
return HttpResponse(status=403)
##############################################################################
# 基础配置模块
##############################################################################
######################################
# 添加系统服务
######################################
class AddHostServiceView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
add_service_form = AddHostServiceForm(request.POST)
if add_service_form.is_valid():
service = HostServiceInfo()
host = int(request.POST.get('host_id'))
service.host_id = host
service.name = request.POST.get('name')
service.version = request.POST.get('version')
service.listen_user = request.POST.get('listen_user')
service.listen_port = request.POST.get('listen_port')
service.ins_path = request.POST.get('ins_path')
service.log_path = request.POST.get('log_path')
service.backup_path = request.POST.get('backup_path', '')
service.start_cmd = request.POST.get('start_cmd')
service.desc = request.POST.get('desc', '')
service.add_user = request.user
service.update_user = request.user
service.status = 1
service.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = service.id
op_record.operation = 1
op_record.action = "添加主机 [ %s ] 的服务:%s" % (service.host.in_ip, service.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"主机服务添加成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"主机服务填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 编辑系统服务
######################################
class EditHostServiceView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
edit_service_form = EditHostServiceForm(request.POST)
if edit_service_form.is_valid():
service = HostServiceInfo.objects.get(id=int(request.POST.get('ser_id')))
service.name = request.POST.get('name')
service.version = request.POST.get('version')
service.listen_user = request.POST.get('listen_user')
service.listen_port = request.POST.get('listen_port')
service.ins_path = request.POST.get('ins_path')
service.log_path = request.POST.get('log_path')
service.backup_path = request.POST.get('backup_path', '')
service.start_cmd = request.POST.get('start_cmd')
service.desc = request.POST.get('desc', '')
service.update_user = request.user
service.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = service.id
op_record.operation = 2
op_record.action = "修改主机 [ %s ] 的服务:%s" % (service.host.in_ip, service.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"主机服务修改成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"主机服务填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 删除服务
######################################
class DeleteHostServiceView(LoginStatusCheck, View):
def post(self, request):
try:
ser_id = request.POST.get('ser_id')
service = HostServiceInfo.objects.get(id=int(ser_id))
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = service.id
op_record.operation = 4
op_record.action = "停用主机 [ %s ] 的服务:%s" % (service.host.in_ip, service.name)
op_record.save()
service.delete()
return HttpResponse('{"status":"success", "msg":"服务删除成功!"}', content_type='application/json')
except Exception as e:
return HttpResponse('{"status":"falied", "msg":"服务删除失败!"}', content_type='application/json')
######################################
# 操作系统
######################################
class OSListView(LoginStatusCheck, View):
def get(self, request):
if request.user.role <3:
# 页面选择
web_chose_left_1 = 'basic_setting'
web_chose_left_2 = 'os'
web_chose_middle = ''
# 获取操作系统
systems = OperatingSystemInfo.objects.filter(status=1).order_by('-update_time')
# 关键字
keyword = request.GET.get('keyword', '')
if keyword != '':
systems = systems.filter(
Q(name__icontains=keyword) | Q(version__icontains=keyword) | Q(desc__icontains=keyword) | Q(
add_user__user_namee__icontains=keyword) | Q(update_user__user_namee__icontains=keyword))
# 数量
system_nums = systems.count()
# 判断页码
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 对取到的数据进行分页,记得定义每页的数量
p = Paginator(systems, 16, request=request)
# 分页处理后的 QuerySet
systems = p.page(page)
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'systems': systems,
'keyword': keyword,
'system_nums': system_nums,
}
return render(request, 'host_management/other/system_list.html', context=context)
else:
return HttpResponse(status=403)
######################################
# 添加操作系统
######################################
class AddOSView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
add_os_form = AddOsForm(request.POST)
if add_os_form.is_valid():
# 判断是否有相同的记录
name = request.POST.get('name')
version = request.POST.get('version')
bit = int(request.POST.get('bit'))
check_os = OperatingSystemInfo.objects.filter(name=name).filter(version=version).filter(bit=bit).filter(
status=1)
if check_os:
return HttpResponse('{"status":"failed", "msg":"该记录已经存在,请检查!"}', content_type='application/json')
# 添加记录
os = OperatingSystemInfo()
os.name = name
os.version = version
os.bit = bit
os.desc = request.POST.get('desc', '')
os.add_user = request.user
os.update_user = request.user
os.status = 1
os.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = os.id
op_record.operation = 1
op_record.action = "添加操作系统:%s %s ( %s )" % (os.name, os.version, os.bit)
op_record.save()
return HttpResponse('{"status":"success", "msg":"操作系统添加成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 编辑操作系统
######################################
class EditOSView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
edit_os_form = EditOsForm(request.POST)
if edit_os_form.is_valid():
os = OperatingSystemInfo.objects.get(id=int(request.POST.get('sys_id')))
os.name = request.POST.get('name')
os.version = request.POST.get('version')
os.bit = int(request.POST.get('bit'))
os.desc = request.POST.get('desc', '')
os.update_user = request.user
os.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = os.id
op_record.operation = 2
op_record.action = "修改操作系统:%s %s ( %s )" % (os.name, os.version, os.bit)
op_record.save()
return HttpResponse('{"status":"success", "msg":"操作系统修改成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 删除操作系统
######################################
class DeleteOSView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
os = OperatingSystemInfo.objects.get(id=int(request.POST.get('sys_id')))
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = os.id
op_record.operation = 4
op_record.action = "停用操作系统:%s %s ( %s )" % (os.name, os.version, os.bit)
op_record.save()
os.delete()
return HttpResponse('{"status":"success", "msg":"操作系统删除成功!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 项目管理
######################################
class ProjectListView(LoginStatusCheck, View):
def get(self, request):
if request.user.role <3:
# 页面选择
web_chose_left_1 = 'basic_setting'
web_chose_left_2 = 'project'
web_chose_middle = ''
# 人员
users = UserProfile.objects.filter(status=1)
# 获取操作系统
projects = ProjectInfo.objects.filter(status=1).order_by('-update_time')
# 关键字
keyword = request.GET.get('keyword', '')
if keyword != '':
projects = projects.filter(
Q(name__icontains=keyword) | Q(run_env__icontains=keyword) | Q(
add_user__user_name__icontains=keyword) | Q(
update_user__user_name__icontains=keyword) | Q(op_user__user_name__icontains=keyword))
# 数量
project_nums = projects.count()
# 判断页码
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 对取到的数据进行分页,记得定义每页的数量
p = Paginator(projects, 16, request=request)
# 分页处理后的 QuerySet
projects = p.page(page)
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'users': users,
'projects': projects,
'keyword': keyword,
'project_nums': project_nums,
}
return render(request, 'host_management/other/project_list.html', context=context)
else:
return HttpResponse(status=403)
######################################
# 添加项目
######################################
class AddProjectView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
add_project_form = AddProjectForm(request.POST)
if add_project_form.is_valid():
# 判断是否有相同的记录
name = request.POST.get('name')
check_pro = ProjectInfo.objects.filter(name=name).filter(status=1)
if check_pro:
return HttpResponse('{"status":"failed", "msg":"该记录已经存在,请检查!"}', content_type='application/json')
# 添加记录
pro = ProjectInfo()
pro.name = name
pro.op_user_id = int(request.POST.get('op_user'))
pro.run_env = request.POST.get('run_env')
pro.add_user = request.user
pro.update_user = request.user
pro.status = 1
pro.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = pro.id
op_record.operation = 1
op_record.action = "添加项目:%s" % (pro.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"项目添加成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 编辑项目
######################################
class EditProjectView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
edit_project_form = EditProjectForm(request.POST)
if edit_project_form.is_valid():
pro = ProjectInfo.objects.get(id=int(request.POST.get('pro_id')))
pro.name = request.POST.get('name')
pro.op_user_id = int(request.POST.get('op_user'))
pro.run_env = request.POST.get('run_env')
pro.update_user = request.user
pro.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = pro.id
op_record.operation = 2
op_record.action = "修改项目:%s" % (pro.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"项目修改成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 删除项目
######################################
class DeleteProjectView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
pro = ProjectInfo.objects.get(id=int(request.POST.get('pro_id')))
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = pro.id
op_record.operation = 4
op_record.action = "停用项目:%s" % (pro.name)
op_record.save()
pro.delete()
return HttpResponse('{"status":"success", "msg":"项目删除成功!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 端口映射列表
######################################
class PortToPortListView(LoginStatusCheck, View):
def get(self, request):
if request.user.role <3:
# 页面选择
web_chose_left_1 = 'port_domain'
web_chose_left_2 = 'port_port'
web_chose_middle = ''
records = PortToPortInfo.objects.filter(status=1).order_by('-update_time')
# 关键字
keyword = request.GET.get('keyword', '')
if keyword != '':
records = records.filter(
Q(ip_in=keyword) | Q(port_in=keyword) | Q(ip_out=keyword) | Q(port_out=keyword) | Q(
use__icontains=keyword) | Q(desc__icontains=keyword))
# 数量
record_nums = records.count()
# 判断页码
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 对取到的数据进行分页,记得定义每页的数量
p = Paginator(records, 16, request=request)
# 分页处理后的 QuerySet
records = p.page(page)
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'records': records,
'keyword': keyword,
'record_nums': record_nums,
}
return render(request, 'host_management/port/port_to_port_list.html', context=context)
else:
return HttpResponse(status=403)
######################################
# 添加映射
######################################
class AddPortToPortView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
ip_in = request.POST.get('ip_in')
port_in = request.POST.get('port_in')
if PortToPortInfo.objects.filter(ip_in=ip_in).filter(port_in=port_in).filter(status=1):
return HttpResponse('{"status":"failed", "msg":"该记录已存在,请检查!"}', content_type='application/json')
add_port_to_port_form = AddPortToPortForm(request.POST)
if add_port_to_port_form.is_valid():
port_info = PortToPortInfo()
port_info.ip_out = request.POST.get('ip_out', '')
port_info.port_out = request.POST.get('port_out')
port_info.ip_in = ip_in
port_info.port_in = port_in
port_info.use = request.POST.get('use')
port_info.desc = request.POST.get('desc', '')
port_info.add_user = request.user
port_info.update_user = request.user
port_info.status = 1
port_info.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = port_info.id
op_record.operation = 1
op_record.action = "添加 [ %s:%s ] 映射:[ %s:%s ]" % (port_info.ip_out, port_info.port_out, port_info.ip_in, port_info.port_in)
op_record.save()
return HttpResponse('{"status":"success", "msg":"添加映射成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 编辑映射
######################################
class EditPortToPortView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
port_info = PortToPortInfo.objects.get(id=int(request.POST.get('p_id')))
ip_in = request.POST.get('ip_in')
port_in = request.POST.get('port_in')
if (port_info.ip_in != ip_in) and (port_info.port_in != port_in):
if PortToPortInfo.objects.filter(ip_in=ip_in).filter(port_in=port_in).filter(status=1):
return HttpResponse('{"status":"failed", "msg":"该记录已存在,请检查!"}', content_type='application/json')
edit_port_to_port_form = EditPortToPortForm(request.POST)
if edit_port_to_port_form.is_valid():
port_info.ip_out = request.POST.get('ip_out', '')
port_info.port_out = request.POST.get('port_out')
port_info.ip_in = ip_in
port_info.port_in = port_in
port_info.use = request.POST.get('use')
port_info.desc = request.POST.get('desc', '')
port_info.update_user = request.user
port_info.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = port_info.id
op_record.operation = 2
op_record.action = "编辑 [ %s:%s ] 映射:[ %s:%s ]" % (port_info.ip_out, port_info.port_out, port_info.ip_in, port_info.port_in)
op_record.save()
return HttpResponse('{"status":"success", "msg":"编辑映射成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 删除映射
######################################
class DeletePortToPortView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
port_info = PortToPortInfo.objects.get(id=int(request.POST.get('p_id')))
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = port_info.id
op_record.operation = 4
op_record.action = "停用 [ %s:%s ] 映射:[ %s:%s ]" % (port_info.ip_out, port_info.port_out, port_info.ip_in, port_info.port_in)
op_record.save()
port_info.delete()
return HttpResponse('{"status":"success", "msg":"停用映射成功!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 域名列表
######################################
class DomainNameListView(LoginStatusCheck, View):
def get(self, request):
if request.user.role <3:
# 页面选择
web_chose_left_1 = 'port_domain'
web_chose_left_2 = 'domain_name'
web_chose_middle = ''
records = DomainNameInfo.objects.filter(status=1).order_by('-update_time')
# 关键字
keyword = request.GET.get('keyword', '')
if keyword != '':
records = records.filter(
Q(name__icontains=keyword) | Q(desc__icontains=keyword))
# 数量
record_nums = records.count()
# 判断页码
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 对取到的数据进行分页,记得定义每页的数量
p = Paginator(records, 16, request=request)
# 分页处理后的 QuerySet
records = p.page(page)
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'records': records,
'keyword': keyword,
'record_nums': record_nums,
}
return render(request, 'host_management/port/domain_name_list.html', context=context)
else:
return HttpResponse(status=403)
######################################
# 添加域名
######################################
class AddDomainNameView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
name = request.POST.get('name')
if DomainNameInfo.objects.filter(name=name).filter(status=1):
return HttpResponse('{"status":"failed", "msg":"该记录已存在,请检查!"}', content_type='application/json')
add_domain_name_form = AddDomainNameForm(request.POST)
if add_domain_name_form.is_valid():
domain_info = DomainNameInfo()
domain_info.name = request.POST.get('name')
domain_info.desc = request.POST.get('desc', '')
domain_info.add_user = request.user
domain_info.update_user = request.user
domain_info.status = 1
domain_info.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = domain_info.id
op_record.operation = 1
op_record.action = "添加域名:%s" % domain_info.name
op_record.save()
return HttpResponse('{"status":"success", "msg":"添加域名成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 修改域名
######################################
class EditDomainNameView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
domain_info = DomainNameInfo.objects.get(id=int(request.POST.get('do_id')))
name = request.POST.get('name')
if domain_info.name != name:
if DomainNameInfo.objects.filter(name=name).filter(status=1):
return HttpResponse('{"status":"failed", "msg":"该记录已存在,请检查!"}', content_type='application/json')
edit_domain_name_form = EditDomainNameForm(request.POST)
if edit_domain_name_form.is_valid():
domain_info.name = request.POST.get('name')
domain_info.desc = request.POST.get('desc', '')
domain_info.update_user = request.user
domain_info.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = domain_info.id
op_record.operation = 2
op_record.action = "修改域名:%s" % domain_info.name
op_record.save()
return HttpResponse('{"status":"success", "msg":"修改域名成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 删除域名
######################################
class DeleteDomainNameView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
domain_info = DomainNameInfo.objects.get(id=int(request.POST.get('do_id')))
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = domain_info.id
op_record.operation = 4
op_record.action = "停用域名:%s" % domain_info.name
op_record.save()
domain_info.delete()
return HttpResponse('{"status":"success", "msg":"停用域名成功!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 域名解析列表
######################################
class DomainNameResolveListView(LoginStatusCheck, View):
def get(self, request):
if request.user.role <3:
# 页面选择
web_chose_left_1 = 'port_domain'
web_chose_left_2 = 'domain_resolve'
web_chose_middle = ''
records = DomainNameResolveInfo.objects.filter(status=1).order_by('-update_time')
# 关键字
keyword = request.GET.get('keyword', '')
if keyword != '':
records = records.filter(Q(ip=keyword) | Q(domain_name__name__icontains=keyword) | Q(desc__icontains=keyword))
# 数量
record_nums = records.count()
# 判断页码
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 对取到的数据进行分页,记得定义每页的数量
p = Paginator(records, 16, request=request)
# 分页处理后的 QuerySet
records = p.page(page)
domains = DomainNameInfo.objects.filter(status=1)
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'records': records,
'keyword': keyword,
'record_nums': record_nums,
'domains': domains,
}
return render(request, 'host_management/port/domain_name_resolve_list.html', context=context)
else:
return HttpResponse(status=403)
######################################
# 添加域名解析
######################################
class AddDomainNameResolveView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
name = request.POST.get('name')
domain_name_id = int(request.POST.get('domain_name'))
if DomainNameResolveInfo.objects.filter(name=name).filter(domain_name_id=domain_name_id).filter(status=1):
return HttpResponse('{"status":"failed", "msg":"该记录已存在,请检查!"}', content_type='application/json')
add_domain_resolve_form = AddDomainNameResolveForm(request.POST)
if add_domain_resolve_form.is_valid():
domain_info = DomainNameResolveInfo()
domain_info.name = name
domain_info.domain_name_id = domain_name_id
domain_info.desc = request.POST.get('desc', '')
domain_info.ip = request.POST.get('ip')
domain_info.add_user = request.user
domain_info.update_user = request.user
domain_info.status = 1
domain_info.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = domain_info.id
op_record.operation = 1
op_record.action = "添加域名解析:%s" % (domain_info.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"添加域名解析成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 修改域名解析
######################################
class EditDomainNameResolveView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
domain_info = DomainNameResolveInfo.objects.get(id=int(request.POST.get('do_id')))
name = request.POST.get('name')
domain_name_id = int(request.POST.get('domain_name'))
if (domain_info.name != name) and (domain_info.domain_name_id != domain_name_id):
if DomainNameResolveInfo.objects.filter(name=name).filter(domain_name_id=domain_name_id).filter(status=1):
return HttpResponse('{"status":"failed", "msg":"该记录已存在,请检查!"}', content_type='application/json')
edit_domain_reslove_form = EditDomainNameResolveForm(request.POST)
if edit_domain_reslove_form.is_valid():
domain_info.name = name
domain_info.domain_name_id = domain_name_id
domain_info.ip = request.POST.get('ip')
domain_info.desc = request.POST.get('desc', '')
domain_info.update_user = request.user
domain_info.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = domain_info.id
op_record.operation = 2
op_record.action = "修改域名解析:%s.%s" % (domain_info.name, domain_info.domain_name.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"修改域名解析成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 删除域名解析
######################################
class DeleteDomainNameResolveView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
domain_info = DomainNameResolveInfo.objects.get(id=int(request.POST.get('do_id')))
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = domain_info.id
op_record.operation = 4
op_record.action = "停用域名解析:%s.%s" % (domain_info.name, domain_info.domain_name.name)
op_record.save()
domain_info.delete()
return HttpResponse('{"status":"success", "msg":"停用域名成功!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 用途管理
######################################
class UseListView(LoginStatusCheck, View):
def get(self, request):
if request.user.role <3:
# 页面选择
web_chose_left_1 = 'basic_setting'
web_chose_left_2 = 'use'
web_chose_middle = ''
# 获取环境
uses = UseInfo.objects.filter(status=1).order_by('-update_time')
# 关键字
keyword = request.GET.get('keyword', '')
if keyword != '':
uses = uses.filter(
Q(name__icontains=keyword) | Q(desc__icontains=keyword) | Q(
add_user__user_name__icontains=keyword) | Q(
update_user__user_name__icontains=keyword))
# 数量
use_nums = uses.count()
# 判断页码
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 对取到的数据进行分页,记得定义每页的数量
p = Paginator(uses, 16, request=request)
# 分页处理后的 QuerySet
uses = p.page(page)
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'uses': uses,
'keyword': keyword,
'use_nums': use_nums,
}
return render(request, 'host_management/other/use_list.html', context=context)
else:
return HttpResponse(status=403)
######################################
# 添加用途
######################################
class AddUseView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
add_use_form = AddUseForm(request.POST)
if add_use_form.is_valid():
# 判断是否有相同的记录
name = request.POST.get('name')
check_use = UseInfo.objects.filter(name=name).filter(status=1)
if check_use:
return HttpResponse('{"status":"failed", "msg":"该记录已经存在,请检查!"}', content_type='application/json')
# 添加记录
use = UseInfo()
use.name = name
use.desc = request.POST.get('desc', '')
use.add_user = request.user
use.update_user = request.user
use.status = 1
use.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = use.id
op_record.operation = 1
op_record.action = "添加用途:%s" % (use.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"用途添加成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 编辑用途
######################################
class EditUseView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
edit_use_form = EditUseForm(request.POST)
if edit_use_form.is_valid():
use = UseInfo.objects.get(id=int(request.POST.get('use_id')))
use.name = request.POST.get('name')
use.desc = request.POST.get('desc', '')
use.update_user = request.user
use.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = use.id
op_record.operation = 2
op_record.action = "修改用途:%s" % (use.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"用途修改成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 删除用途
######################################
class DeleteUseView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
use = UseInfo.objects.get(id=int(request.POST.get('use_id')))
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 1
op_record.status = 1
op_record.op_num = use.id
op_record.operation = 4
op_record.action = "停用用途:%s" % (use.name)
op_record.save()
use.delete()
return HttpResponse('{"status":"success", "msg":"用途删除成功!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 主机操作记录
######################################
class HostOperationView(LoginStatusCheck, View):
def get(self, request):
if request.user.role <3:
# 页面选择
web_chose_left_1 = 'log_management'
web_chose_left_2 = 'op_log'
web_chose_middle = ''
records = UserOperationRecord.objects.filter(belong=1).order_by('-add_time')
# 关键字
keyword = request.GET.get('keyword', '')
if keyword != '':
records = records.filter(
Q(op_user__user_name=keyword) | Q(action__icontains=keyword))
# 用户选择
user_check = request.GET.get('user_check', 'all')
# 添加
if user_check == 'add':
records = records.filter(operation=1)
# 修改
if user_check == 'edit':
records = records.filter(operation=2)
# 启用
if user_check == 'up':
records = records.filter(operation=3)
# 停用
if user_check == 'down':
records = records.filter(operation=4)
# 数量
record_nums = records.count()
# 判断页码
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 对取到的数据进行分页,记得定义每页的数量
p = Paginator(records, 19, request=request)
# 分页处理后的 QuerySet
records = p.page(page)
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'records': records,
'keyword': keyword,
'record_nums': record_nums,
'user_check': user_check,
}
return render(request, 'host_management/other/host_op_record.html', context=context)
else:
return HttpResponse(status=403)
######################################
# 数据字典列表
######################################
class DictListView(LoginStatusCheck, View):
def get(self, request):
if request.user.role <3:
# 页面选择
web_chose_left_1 = 'basic_setting'
web_chose_left_2 = 'dict'
web_chose_middle = ''
dicts = DataDictInfo.objects.filter()
# 数量
record_nums = dicts.count()
# 判断页码
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# 对取到的数据进行分页,记得定义每页的数量
p = Paginator(dicts, 16, request=request)
# 分页处理后的 QuerySet
dicts = p.page(page)
context = {
'web_chose_left_1': web_chose_left_1,
'web_chose_left_2': web_chose_left_2,
'web_chose_middle': web_chose_middle,
'dicts': dicts,
'record_nums': record_nums,
}
return render(request, 'host_management/other/data_dict_list.html', context=context)
else:
return HttpResponse(status=403)
######################################
# 添加数据字典
######################################
class AddDictView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
name = request.POST.get('name')
value = request.POST.get('value')
if DataDictInfo.objects.filter(name=name).filter(value=value):
return HttpResponse('{"status":"failed", "msg":"该记录已存在,请检查!"}', content_type='application/json')
add_dict_form = AddDictForm(request.POST)
if add_dict_form.is_valid():
dict_info = DataDictInfo()
dict_info.name = name
dict_info.value = value
dict_info.remarks = request.POST.get('remarks', '')
dict_info.create_by=request.user.id
dict_info.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 6
op_record.status = 1
op_record.op_num = dict_info.id
op_record.operation = 1
op_record.action = "添加数据字典:%s" % (dict_info.name)
op_record.save()
return HttpResponse('{"status":"success", "msg":"添加域名解析成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 修改数据字典
######################################
class EditDictView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
dict_info = DataDictInfo.objects.get(name=request.POST.get('name'))
name = request.POST.get('name')
value = request.POST.get('value')
if (dict_info.name != name) and (dict_info.value != value):
if DataDictInfo.objects.filter(name=name).filter(value=value):
return HttpResponse('{"status":"failed", "msg":"该数据已存在,请检查!"}', content_type='application/json')
edit_dict_form = EditDictForm(request.POST)
if edit_dict_form.is_valid():
dict_info.name = name
dict_info.value = value
dict_info.remarks = request.POST.get('remarks')
dict_info.save()
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 6
op_record.status = 1
op_record.op_num = dict_info.id
op_record.operation = 2
op_record.action = "修改数据字典:%s.%s" % (dict_info.name, dict_info.value)
op_record.save()
return HttpResponse('{"status":"success", "msg":"修改数据字典成功!"}', content_type='application/json')
else:
return HttpResponse('{"status":"failed", "msg":"填写不合法,请检查!"}', content_type='application/json')
else:
return HttpResponse(status=403)
######################################
# 删除数据字典
######################################
class DeleteDictView(LoginStatusCheck, View):
def post(self, request):
if request.user.role <3:
dict_info = DataDictInfo.objects.get(id=int(request.POST.get('id')))
# 添加操作记录
op_record = UserOperationRecord()
op_record.op_user = request.user
op_record.belong = 6
op_record.status = 1
op_record.op_num = dict_info.id
op_record.operation = 4
op_record.action = "删除数据字典:%s.%s" % (dict_info.name, dict_info.value)
op_record.save()
dict_info.delete()
return HttpResponse('{"status":"success", "msg":"删除数据字典成功!"}', content_type='application/json')
else:
return HttpResponse(status=403)
|
<reponame>aws-solutions/discovering-hot-topics-using-machine-learning
#!/usr/bin/env python
######################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import json
import os
import unittest
from functools import wraps
from test.fixtures.event_bus_fixture import get_event_bus_stubber
from test.test_stream_helper import stream_setup
import boto3
import mock
import pytest
from botocore import stub
from moto import mock_dynamodb, mock_kinesis, mock_sts
from shared_util import custom_boto_config
def create_event_data_for_ddb(url: str):
return json.dumps(
{
"platform": "fakeplatform",
"account": "fakeaccount",
"query": "fakequery",
"url": url,
}
)
def create_event_data_for_json_str(url: str):
return json.dumps(
{
"platform": "newsfeeds",
"account": "url_params",
"query": "fakequery",
"url": url,
}
)
def create_cw_schedule_event():
return {
"id": "fakeid",
"detail-type": "Scheduled Event",
"source": "aws.events",
"account": "fakeaccount",
"time": "1970-01-01T00:00:00Z",
"region": "us-east-1",
"resources": ["arn:aws:events:us-east-1:fakeaccount/FakeRule"],
"detail": {},
}
def create_event_bus_consumer_event():
return {
"version": "0",
"id": "fakeID",
"detail-type": "newscatcher",
"source": os.environ["INGESTION_NAMESPACE"],
"account": "fakeAccountID",
"time": "2020-06-13T23:14:19Z",
"region": "us-east-1",
"detail": {
"platform": "newsfeeds",
"account": "url_params",
"search_query": "fakequery",
"url": "cnn.com",
},
}
def create_event_bus_mocked_response():
# setting up event bus
event_id = "fakeeventid"
failed_count = 0
return {
"Entries": [{"EventId": event_id}],
"FailedEntryCount": failed_count,
}
def create_kinesis_streams():
stream_setup(os.environ["STREAM_NAME"])
def created_ddb_for_tracker():
table_name = os.environ["TARGET_DDB_TABLE"]
from test.test_query_ddb_helper import ddb_setup
dynamodb = ddb_setup(table_name)
def create_ddb_table_for_US_en():
# setting up ddb
table_name = os.environ["DDB_CONFIG_TABLE_NAME"]
from test.test_config_ddb_helper import ddb_setup
dynamodb = ddb_setup(table_name)
table = dynamodb.Table(table_name)
item = {
"account": "fakeaccount",
"platform": "fakeplatform",
"query": "fakequery",
"enabled": True,
"country": "US",
"language": "en",
}
table.put_item(Item=item)
return dynamodb
def get_news_sites_for_US_en():
return [
"cnn.com",
"reuters.com",
"wsj.com",
"washingtonpost.com",
"usatoday.com",
"wired.com",
"cnbc.com",
"digg.com",
"cbsnews.com",
"theverge.com",
"mashable.com",
"foxnews.com",
"engadget.com",
"nbcnews.com",
"fortune.com",
"aljazeera.com",
"thedailybeast.com",
"nymag.com",
"techradar.com",
"washingtontimes.com",
"msnbc.com",
"denverpost.com",
"ew.com",
"ycombinator.com",
"breitbart.com",
"bleacherreport.com",
"propublica.org",
"nationalreview.com",
"axios.com",
"alternet.org",
"dailydot.com",
"dailycaller.com",
"democracynow.org",
"heavy.com",
"newser.com",
"firstcoastnews.com",
"mintpressnews.com",
"goodnewsnetwork.org",
"thegrio.com",
"pantagraph.com",
"pjstar.com",
"dailyvoice.com",
"ctmirror.org",
"newsy.com",
"conservativereview.com",
]
def lambda_event_bus_event():
return {
"version": "0",
"id": "fakeid",
"detail-type": "config",
"source": os.environ["INGESTION_NAMESPACE"],
"account": "FAKEACCOUNT",
"time": "2020-06-24T17:16:02Z",
"region": "us-west-2",
"resources": [],
"detail": {},
}
@mock_sts
@mock_dynamodb
def test_invoke_lambda_for_ddb_config(get_event_bus_stubber):
lambda_event = create_cw_schedule_event()
create_ddb_table_for_US_en()
site_list = get_news_sites_for_US_en()
for site in site_list:
get_event_bus_stubber.add_response(
"put_events",
{"Entries": [{"EventId": "fakeeventid"}], "FailedEntryCount": 0},
{
"Entries": [
{
"EventBusName": os.environ["EVENT_BUS_NAME"],
"Source": os.environ["INGESTION_NAMESPACE"],
"Detail": create_event_data_for_ddb(site),
"DetailType": "config",
}
]
},
)
get_event_bus_stubber.activate()
from lambda_function import publish_config_handler
assert None == publish_config_handler(lambda_event, None)
@mock_sts
@mock_dynamodb
@mock.patch.dict(os.environ, {"CONFIG_PARAM": '{"country":"US", "language":"en"}'})
@mock.patch.dict(os.environ, {"SEARCH_QUERY": "fakequery"})
def test_invoke_lambda_for_json_str(get_event_bus_stubber):
lambda_event = create_cw_schedule_event()
site_list = get_news_sites_for_US_en()
for site in site_list:
get_event_bus_stubber.add_response(
"put_events",
{"Entries": [{"EventId": "fakeeventid"}], "FailedEntryCount": 0},
{
"Entries": [
{
"EventBusName": os.environ["EVENT_BUS_NAME"],
"Source": os.environ["INGESTION_NAMESPACE"],
"Detail": create_event_data_for_json_str(site),
"DetailType": "config",
}
]
},
)
get_event_bus_stubber.activate()
from lambda_function import publish_config_handler
assert None == publish_config_handler(lambda_event, None)
@mock_sts
@mock_dynamodb
def test_invoke_lambda_for_ddb_config_with_failed_count(get_event_bus_stubber):
lambda_event = create_cw_schedule_event()
create_ddb_table_for_US_en()
site_list = get_news_sites_for_US_en()
for site in site_list:
get_event_bus_stubber.add_response(
"put_events",
{"Entries": [{"EventId": "fakeeventid"}], "FailedEntryCount": 1},
{
"Entries": [
{
"EventBusName": os.environ["EVENT_BUS_NAME"],
"Source": os.environ["INGESTION_NAMESPACE"],
"Detail": create_event_data_for_ddb(site),
"DetailType": "config",
}
]
},
)
get_event_bus_stubber.activate()
from lambda_function import publish_config_handler
assert None == publish_config_handler(lambda_event, None)
@mock_sts
@mock_dynamodb
@mock_kinesis
def test_invoke_process_config_handler():
lambda_event = create_event_bus_consumer_event()
from lambda_function import process_config_handler
create_kinesis_streams()
created_ddb_for_tracker()
assert None == process_config_handler(lambda_event, None)
|
import torch.nn as nn
import torch.nn.functional as F
import torch
from im2scene.common import (
arange_pixels, image_points_to_world, origin_to_world
)
import numpy as np
from scipy.spatial.transform import Rotation as Rot
from im2scene.camera import get_camera_mat, get_random_pose, get_camera_pose
import pdb
from .resnet import resnet34
class Generator(nn.Module):
''' GIRAFFE Generator Class.
Args:
device (pytorch device): pytorch device
z_dim (int): dimension of latent code z
z_dim_bg (int): dimension of background latent code z_bg
decoder (nn.Module): decoder network
range_u (tuple): rotation range (0 - 1)
range_v (tuple): elevation range (0 - 1)
n_ray_samples (int): number of samples per ray
range_radius(tuple): radius range
depth_range (tuple): near and far depth plane
background_generator (nn.Module): background generator
bounding_box_generaor (nn.Module): bounding box generator
resolution_vol (int): resolution of volume-rendered image
neural_renderer (nn.Module): neural renderer
fov (float): field of view
background_rotation_range (tuple): background rotation range
(0 - 1)
sample_object-existance (bool): whether to sample the existance
of objects; only used for clevr2345
use_max_composition (bool): whether to use the max
composition operator instead
'''
def __init__(self, device, z_dim=256, z_dim_bg=128, decoder=None,
range_u=(0, 0), range_v=(0.25, 0.25), n_ray_samples=64,
range_radius=(2.732, 2.732), depth_range=[0.5, 6.],
background_generator=None,
bounding_box_generator=None, resolution_vol=16,
neural_renderer=None,
fov=49.13,
backround_rotation_range=[0., 0.],
sample_object_existance=False,
use_max_composition=False, **kwargs):
super().__init__()
self.device = device
self.n_ray_samples = n_ray_samples
self.range_u = range_u
self.range_v = range_v
self.resolution_vol = resolution_vol
self.range_radius = range_radius
self.depth_range = depth_range
self.bounding_box_generator = bounding_box_generator
self.fov = fov
self.backround_rotation_range = backround_rotation_range
self.sample_object_existance = sample_object_existance
self.z_dim = z_dim
self.z_dim_bg = z_dim_bg
self.use_max_composition = use_max_composition
self.resnet = resnet34(pretrained=True, shape_dim=self.z_dim, app_dim=self.z_dim)
self.camera_matrix = get_camera_mat(fov=fov).to(device) # intrinsic camera
self.range_v = range_v
self.range_u = range_u
if decoder is not None:
self.decoder = decoder.to(device)
else:
self.decoder = None
if background_generator is not None:
self.background_generator = background_generator.to(device)
else:
self.background_generator = None
if bounding_box_generator is not None:
self.bounding_box_generator = bounding_box_generator.to(device)
else:
self.bounding_box_generator = bounding_box_generator
if neural_renderer is not None:
self.neural_renderer = neural_renderer.to(device)
else:
self.neural_renderer = None
def forward(self, img, batch_size=32, latent_codes=None, camera_matrices=None,
transformations=None, bg_rotation=None, mode="training", it=0,
return_alpha_map=False,
not_render_background=False,
only_render_background=False,
need_uv=False):
# edit mira start
batch_size = img.shape[0]
uv, shape, appearance = self.resnet(img) # img 넣어주기 (Discriminator에서 input으로 받는거 그대로)
if latent_codes is None:
latent_codes = shape.unsqueeze(1), appearance.unsqueeze(1) # OK
if camera_matrices is None:
u, v = uv[:, 0], uv[:, 1]
v = v/2
camera_matrices = self.get_random_camera(u, v, batch_size) # camera mat, world mat
swap_cam_matrices = self.get_random_camera(u.flip(0), v.flip(0), batch_size)
random_u = torch.rand(batch_size)*(self.range_u[1] - self.range_u[0]) + self.range_u[0]
random_v = torch.rand(batch_size)*(self.range_v[1] - self.range_v[0]) + self.range_v[0]
rand_cam_matrices = self.get_random_camera(random_u, random_v, batch_size)
if transformations is None:
transformations = self.get_random_transformations(batch_size) # 의미 없음! dummy 변수 <- 나중에 확인할 때 넣어보기!
# edit mira end
if return_alpha_map:
rgb_v, alpha_map = self.volume_render_image(
latent_codes, camera_matrices,
mode=mode, it=it, return_alpha_map=True,
not_render_background=not_render_background)
return alpha_map
else:
rgb_v = self.volume_render_image(
latent_codes, camera_matrices, transformations,
mode=mode, it=it, not_render_background=not_render_background,
only_render_background=only_render_background)
swap_rgb_v = self.volume_render_image(
latent_codes, swap_cam_matrices, transformations,
mode=mode, it=it, not_render_background=not_render_background,
only_render_background=only_render_background)
rand_rgb_v = self.volume_render_image(
latent_codes, rand_cam_matrices, transformations,
mode=mode, it=it, not_render_background=not_render_background,
only_render_background=only_render_background)
if self.neural_renderer is not None:
rgb = self.neural_renderer(rgb_v)
swap_rgb = self.neural_renderer(swap_rgb_v)
rand_rgb = self.neural_renderer(rand_rgb_v)
else:
rgb = rgb_v
swap_rgb = swap_rgb
rand_rgb = rand_rgb
if need_uv==True:
return rgb, swap_rgb, rand_rgb, (uv, uv.flip(0), torch.cat((random_u.unsqueeze(-1), random_v.unsqueeze(-1)), dim=-1))
else:
return rgb, swap_rgb, rand_rgb
def get_n_boxes(self):
if self.bounding_box_generator is not None:
n_boxes = self.bounding_box_generator.n_boxes
else:
n_boxes = 1
return n_boxes
def get_latent_codes(self, batch_size=32, tmp=1.):
z_dim, z_dim_bg = self.z_dim, self.z_dim_bg
n_boxes = self.get_n_boxes()
def sample_z(x): return self.sample_z(x, tmp=tmp)
z_shape_obj = sample_z((batch_size, n_boxes, z_dim))
z_app_obj = sample_z((batch_size, n_boxes, z_dim))
z_shape_bg = sample_z((batch_size, z_dim_bg))
z_app_bg = sample_z((batch_size, z_dim_bg))
return z_shape_obj, z_app_obj, z_shape_bg, z_app_bg # z_shape_obj = (16, 1, 256), n_boxes = 1
def sample_z(self, size, to_device=True, tmp=1.):
z = torch.randn(*size) * tmp
if to_device:
z = z.to(self.device)
return z
def get_vis_dict(self, batch_size=32):
vis_dict = {
'batch_size': batch_size,
'latent_codes': self.get_latent_codes(batch_size),
'camera_matrices': self.get_random_camera(-1, -1, batch_size),
'transformations': self.get_random_transformations(batch_size),
}
return vis_dict
def get_random_camera(self, u=None, v=None, batch_size=32, to_device=True):
camera_mat = self.camera_matrix.repeat(batch_size, 1, 1) # intrinsic camera
world_mat = get_random_pose(
u, v, self.range_radius, batch_size) # (batch, 4, 4)
if to_device:
world_mat = world_mat.to(self.device)
return camera_mat, world_mat
def get_camera(self, val_u=0.5, val_v=0.5, val_r=0.5, batch_size=32,
to_device=True):
camera_mat = self.camera_matrix.repeat(batch_size, 1, 1)
world_mat = get_camera_pose(
self.range_u, self.range_v, self.range_radius, val_u, val_v,
val_r, batch_size=batch_size)
if to_device:
world_mat = world_mat.to(self.device)
return camera_mat, world_mat
def get_random_bg_rotation(self, batch_size, to_device=True):
if self.backround_rotation_range != [0., 0.]:
bg_r = self.backround_rotation_range
r_random = bg_r[0] + np.random.rand() * (bg_r[1] - bg_r[0])
R_bg = [
torch.from_numpy(Rot.from_euler(
'z', r_random * 2 * np.pi).as_dcm()
) for i in range(batch_size)]
R_bg = torch.stack(R_bg, dim=0).reshape(
batch_size, 3, 3).float()
else:
R_bg = torch.eye(3).unsqueeze(0).repeat(batch_size, 1, 1).float()
if to_device:
R_bg = R_bg.to(self.device)
return R_bg
def get_bg_rotation(self, val, batch_size=32, to_device=True):
if self.backround_rotation_range != [0., 0.]:
bg_r = self.backround_rotation_range
r_val = bg_r[0] + val * (bg_r[1] - bg_r[0])
r = torch.from_numpy(
Rot.from_euler('z', r_val * 2 * np.pi).as_dcm()
).reshape(1, 3, 3).repeat(batch_size, 1, 1).float()
else:
r = torch.eye(3).unsqueeze(0).repeat(batch_size, 1, 1).float()
if to_device:
r = r.to(self.device)
return r
def get_random_transformations(self, batch_size=32, to_device=True):
device = self.device
s, t, R = self.bounding_box_generator(batch_size)
if to_device:
s, t, R = s.to(device), t.to(device), R.to(device)
return s, t, R
def get_transformations(self, val_s=[[0.5, 0.5, 0.5]],
val_t=[[0.5, 0.5, 0.5]], val_r=[0.5],
batch_size=32, to_device=True):
device = self.device
s = self.bounding_box_generator.get_scale(
batch_size=batch_size, val=val_s)
t = self.bounding_box_generator.get_translation(
batch_size=batch_size, val=val_t)
R = self.bounding_box_generator.get_rotation(
batch_size=batch_size, val=val_r)
if to_device:
s, t, R = s.to(device), t.to(device), R.to(device)
return s, t, R
def get_transformations_in_range(self, range_s=[0., 1.], range_t=[0., 1.],
range_r=[0., 1.], n_boxes=1,
batch_size=32, to_device=True):
s, t, R = [], [], []
def rand_s(): return range_s[0] + \
np.random.rand() * (range_s[1] - range_s[0])
def rand_t(): return range_t[0] + \
np.random.rand() * (range_t[1] - range_t[0])
def rand_r(): return range_r[0] + \
np.random.rand() * (range_r[1] - range_r[0])
for i in range(batch_size):
val_s = [[rand_s(), rand_s(), rand_s()] for j in range(n_boxes)]
val_t = [[rand_t(), rand_t(), rand_t()] for j in range(n_boxes)]
val_r = [rand_r() for j in range(n_boxes)]
si, ti, Ri = self.get_transformations(
val_s, val_t, val_r, batch_size=1, to_device=to_device)
s.append(si)
t.append(ti)
R.append(Ri)
s, t, R = torch.cat(s), torch.cat(t), torch.cat(R)
if to_device:
device = self.device
s, t, R = s.to(device), t.to(device), R.to(device)
return s, t, R
def get_rotation(self, val_r, batch_size=32, to_device=True):
device = self.device
R = self.bounding_box_generator.get_rotation(
batch_size=batch_size, val=val_r)
if to_device:
R = R.to(device)
return R
def add_noise_to_interval(self, di):
di_mid = .5 * (di[..., 1:] + di[..., :-1])
di_high = torch.cat([di_mid, di[..., -1:]], dim=-1)
di_low = torch.cat([di[..., :1], di_mid], dim=-1)
noise = torch.rand_like(di_low)
ti = di_low + (di_high - di_low) * noise
return ti
def transform_points_to_box(self, p, transformations, box_idx=0, # p = (batch, 256, 3) = (batch, 16x16, 3)
scale_factor=1.):
# edit mira start
# bb_s, bb_t, bb_R = transformations # s: (batch, 1, 3), t: (batch, 1, 3), R: (batch, 1, 3, 3)
# p_box = (bb_R[:, box_idx] @ (p - bb_t[:, box_idx].unsqueeze(1)
# ).permute(0, 2, 1)).permute(
# 0, 2, 1) / bb_s[:, box_idx].unsqueeze(1) * scale_factor
# edit mira end
return p
def get_evaluation_points_bg(self, pixels_world, camera_world, di,
rotation_matrix):
batch_size = pixels_world.shape[0]
n_steps = di.shape[-1]
camera_world = (rotation_matrix @
camera_world.permute(0, 2, 1)).permute(0, 2, 1)
pixels_world = (rotation_matrix @
pixels_world.permute(0, 2, 1)).permute(0, 2, 1)
ray_world = pixels_world - camera_world
p = camera_world.unsqueeze(-2).contiguous() + \
di.unsqueeze(-1).contiguous() * \
ray_world.unsqueeze(-2).contiguous()
r = ray_world.unsqueeze(-2).repeat(1, 1, n_steps, 1)
assert(p.shape == r.shape)
p = p.reshape(batch_size, -1, 3)
r = r.reshape(batch_size, -1, 3)
return p, r
def get_evaluation_points(self, pixels_world, camera_world, di,
transformations):
batch_size = pixels_world.shape[0]
n_steps = di.shape[-1]
pixels_world_i = self.transform_points_to_box(
pixels_world, transformations)
camera_world_i = self.transform_points_to_box(
camera_world, transformations)
ray_i = pixels_world_i - camera_world_i
p_i = camera_world_i.unsqueeze(-2).contiguous() + \
di.unsqueeze(-1).contiguous() * ray_i.unsqueeze(-2).contiguous()
ray_i = ray_i.unsqueeze(-2).repeat(1, 1, n_steps, 1)
assert(p_i.shape == ray_i.shape)
p_i = p_i.reshape(batch_size, -1, 3)
ray_i = ray_i.reshape(batch_size, -1, 3)
return p_i, ray_i
def composite_function(self, sigma, feat):
n_boxes = sigma.shape[0]
if n_boxes > 1:
if self.use_max_composition:
bs, rs, ns = sigma.shape[1:]
sigma_sum, ind = torch.max(sigma, dim=0)
feat_weighted = feat[ind, torch.arange(bs).reshape(-1, 1, 1),
torch.arange(rs).reshape(
1, -1, 1), torch.arange(ns).reshape(
1, 1, -1)]
else:
denom_sigma = torch.sum(sigma, dim=0, keepdim=True)
denom_sigma[denom_sigma == 0] = 1e-4
w_sigma = sigma / denom_sigma
sigma_sum = torch.sum(sigma, dim=0)
feat_weighted = (feat * w_sigma.unsqueeze(-1)).sum(0)
else:
sigma_sum = sigma.squeeze(0)
feat_weighted = feat.squeeze(0)
return sigma_sum, feat_weighted
def calc_volume_weights(self, z_vals, ray_vector, sigma, last_dist=1e10):
dists = z_vals[..., 1:] - z_vals[..., :-1]
dists = torch.cat([dists, torch.ones_like(
z_vals[..., :1]) * last_dist], dim=-1)
dists = dists * torch.norm(ray_vector, dim=-1, keepdim=True)
alpha = 1.-torch.exp(-F.relu(sigma)*dists)
weights = alpha * \
torch.cumprod(torch.cat([
torch.ones_like(alpha[:, :, :1]),
(1. - alpha + 1e-10), ], dim=-1), dim=-1)[..., :-1]
return weights
def get_object_existance(self, n_boxes, batch_size=32):
'''
Note: We only use this setting for Clevr2345, so that we can hard-code
the probabilties here. If you want to apply it to a different scenario,
you would need to change these.
'''
probs = [
.19456788355146545395,
.24355003312266127155,
.25269546846185522711,
.30918661486401804737,
]
n_objects_prob = np.random.rand(batch_size)
n_objects = np.zeros_like(n_objects_prob).astype(np.int)
p_cum = 0
obj_n = [i for i in range(2, n_boxes + 1)]
for idx_p in range(len(probs)):
n_objects[
(n_objects_prob >= p_cum) &
(n_objects_prob < p_cum + probs[idx_p])
] = obj_n[idx_p]
p_cum = p_cum + probs[idx_p]
assert(p_cum <= 1.)
object_existance = np.zeros((batch_size, n_boxes))
for b_idx in range(batch_size):
n_obj = n_objects[b_idx]
if n_obj > 0:
idx_true = np.random.choice(
n_boxes, size=(n_obj,), replace=False)
object_existance[b_idx, idx_true] = True
object_existance = object_existance.astype(np.bool)
return object_existance
def volume_render_image(self, latent_codes, camera_matrices,
transformations, mode='training', # transformation: 의미 X
it=0, return_alpha_map=False,
not_render_background=False,
only_render_background=False):
res = self.resolution_vol # 16
device = self.device
n_steps = self.n_ray_samples # 64
n_points = res * res # 16x16 = 256
depth_range = self.depth_range # [0.5, 6.0]
batch_size = latent_codes[0].shape[0]
z_shape_obj, z_app_obj = latent_codes # (32,1,256), (32,1,256), [(32,128), (32,128) -> delete!]
assert(not (not_render_background and only_render_background))
# Arange Pixels
pixels = arange_pixels((res, res), batch_size, # imgrange (-1., 1.) -> scaled pixel from -1 to 1만 가져옴! -> meshgrid! Arranges pixels for given resolution in range image_range.
invert_y_axis=False)[1].to(device) # (batch, 16(res)x16(res), 2(xy))
pixels[..., -1] *= -1. # y축에 -1을 곱함. why??
# Project to 3D world
pixels_world = image_points_to_world(
pixels, camera_mat=camera_matrices[0],
world_mat=camera_matrices[1]) # fixed depth value 1까지 고려해서 -> (batch, resxres, 3) # worldmat: randomly sampled
camera_world = origin_to_world(
n_points, camera_mat=camera_matrices[0],
world_mat=camera_matrices[1])
ray_vector = pixels_world - camera_world
# batch_size x n_points x n_steps
di = depth_range[0] + \
torch.linspace(0., 1., steps=n_steps).reshape(1, 1, -1) * (
depth_range[1] - depth_range[0]) # (1, 1, 64)
di = di.repeat(batch_size, n_points, 1).to(device) # (batch, n_points, 64)
if mode == 'training':
di = self.add_noise_to_interval(di) # 균일하게 뽑지 않고 noise를 더해서 뽑음!
n_boxes = latent_codes[0].shape[1] # object가 하나라 1로 예측중!
feat, sigma = [], []
p_i, r_i = self.get_evaluation_points( # points들을 transforation을 사용해서 가져옴
pixels_world, camera_world, di, transformations)
z_shape_i, z_app_i = z_shape_obj[:, 0], z_app_obj[:, 0]
feat_i, sigma_i = self.decoder(p_i, r_i, z_shape_i, z_app_i)
if mode == 'training':
# As done in NeRF, add noise during training
sigma_i += torch.randn_like(sigma_i)
# Mask out values outside
padd = 0.1
mask_box = torch.all(
p_i <= 1. + padd, dim=-1) & torch.all(
p_i >= -1. - padd, dim=-1)
sigma_i[mask_box == 0] = 0.
# Reshape
sigma_i = sigma_i.reshape(batch_size, n_points, n_steps)
feat_i = feat_i.reshape(batch_size, n_points, n_steps, -1)
feat.append(feat_i)
sigma.append(sigma_i)
sigma = F.relu(torch.stack(sigma, dim=0))
feat = torch.stack(feat, dim=0)
# edit mira end
if self.sample_object_existance:
object_existance = self.get_object_existance(n_boxes, batch_size)
# add ones for bg
object_existance = np.concatenate(
[object_existance, np.ones_like(
object_existance[..., :1])], axis=-1)
object_existance = object_existance.transpose(1, 0)
sigma_shape = sigma.shape
sigma = sigma.reshape(sigma_shape[0] * sigma_shape[1], -1)
object_existance = torch.from_numpy(object_existance).reshape(-1)
# set alpha to 0 for respective objects
sigma[object_existance == 0] = 0.
sigma = sigma.reshape(*sigma_shape)
# Composite
sigma_sum, feat_weighted = self.composite_function(sigma, feat)
# Get Volume Weights
weights = self.calc_volume_weights(di, ray_vector, sigma_sum)
feat_map = torch.sum(weights.unsqueeze(-1) * feat_weighted, dim=-2)
# Reformat output
feat_map = feat_map.permute(0, 2, 1).reshape(
batch_size, -1, res, res) # B x feat x h x w
feat_map = feat_map.permute(0, 1, 3, 2) # new to flip x/y
if return_alpha_map:
n_maps = sigma.shape[0]
acc_maps = []
for i in range(n_maps - 1):
sigma_obj_sum = torch.sum(sigma[i:i+1], dim=0)
weights_obj = self.calc_volume_weights(
di, ray_vector, sigma_obj_sum, last_dist=0.)
acc_map = torch.sum(weights_obj, dim=-1, keepdim=True)
acc_map = acc_map.permute(0, 2, 1).reshape(
batch_size, -1, res, res)
acc_map = acc_map.permute(0, 1, 3, 2)
acc_maps.append(acc_map)
acc_map = torch.cat(acc_maps, dim=1)
return feat_map, acc_map
else:
return feat_map
|
<reponame>SmartAcoustics/Kea
from myhdl import *
import random
import copy
from kea.test_utils.base_test import (
KeaTestCase, KeaVivadoVHDLTestCase, KeaVivadoVerilogTestCase)
from kea.axi import AxiStreamInterface
from .axis_constant_pad import axis_constant_pad
class TestAxisConstantPad(KeaTestCase):
def setUp(self):
self.clock = Signal(False)
self.enable = Signal(True)
self.axis_in = AxiStreamInterface(4, use_TLAST=False)
self.axis_out = AxiStreamInterface(4, use_TLAST=False)
axis_in_signal_types = {
'TDATA': 'random',
'TVALID': 'random',
'TREADY': 'output'}
axis_out_signal_types = {
'TDATA': 'output',
'TVALID': 'output',
'TREADY': 'random'}
self.default_args = {
'clock': self.clock,
'enable': self.enable,
'axis_in': self.axis_in,
'axis_out': self.axis_out}
self.default_arg_types = {
'clock': 'clock',
'enable': 'custom',
'axis_in': axis_in_signal_types,
'axis_out': axis_out_signal_types}
def test_axis_in_interface(self):
'''The axis_in port should be an instance of
``kea.axi.AxiStreamInterface``.
Anything else should raise a ValueError.
'''
args = self.default_args.copy()
args['axis_in'] = 'not a valid interface'
self.assertRaisesRegex(
ValueError, 'Invalid axis_in port', axis_constant_pad, **args)
def test_axis_out_interface(self):
'''The axis_out port should be an instance of
``kea.axi.AxiStreamInterface``.
Anything else should raise a ValueError.
'''
args = self.default_args.copy()
args['axis_out'] = 'not a valid interface'
self.assertRaisesRegex(
ValueError, 'Invalid axis_out port', axis_constant_pad, **args)
def test_zero_output(self):
'''The axis_constant_pad block should always output a value if it is
enabled.
If the input is a valid transaction, then the output TDATA should
be the input TDATA.
If the input is not a valid transaction then the output TDATA should
be constant. If constant is not set, it should default to zero.
No attempt should be made to buffer inputs, so axis_out.TREADY is
ignored.
'''
samples = 500
@block
def enable_driver(clock, enable):
@always(clock.posedge)
def driver():
if random.random() > 0.9:
enable.next = False
else:
enable.next = True
return driver
@block
def checker(clock, enable, axis_in, axis_out):
check_data = {
'next_output': None}
@always(clock.posedge)
def check():
if enable:
assert axis_in.TREADY == True
if check_data['next_output'] is not None:
assert axis_out.TDATA == check_data['next_output']
assert axis_out.TVALID == True
if axis_in.TREADY and axis_in.TVALID:
check_data['next_output'] = (
copy.copy(axis_in.TDATA.val))
else:
check_data['next_output'] = 0
elif axis_in.TREADY and axis_in.TVALID:
check_data['next_output'] = (
copy.copy(axis_in.TDATA.val))
else:
check_data['next_output'] = None
else:
assert axis_out.TVALID == False
assert axis_in.TREADY == False
check_data['next_output'] = None
return check
custom_sources = [
(enable_driver, (self.clock, self.enable), {}),
(checker,
(self.clock, self.enable, self.axis_in, self.axis_out), {})]
dut_results, ref_results = self.cosimulate(
samples, axis_constant_pad, axis_constant_pad,
self.default_args, self.default_arg_types,
custom_sources=custom_sources)
self.assertTrue(dut_results == ref_results)
def test_constant_output(self):
'''The axis_constant_pad block should always output a value if it is
enabled.
If the input is a valid transaction, then the output TDATA should
be the input TDATA.
If the input is not a valid transaction then the output TDATA should
be constant.
No attempt should be made to buffer inputs, so axis_out.TREADY is
ignored.
'''
samples = 500
self.default_args['constant'] = 1
self.default_arg_types['constant'] = 'non-signal'
@block
def enable_driver(clock, enable):
@always(clock.posedge)
def driver():
if random.random() > 0.9:
enable.next = False
else:
enable.next = True
return driver
@block
def checker(clock, enable, axis_in, axis_out):
check_data = {
'next_output': None}
@always(clock.posedge)
def check():
if enable:
assert axis_in.TREADY == True
if check_data['next_output'] is not None:
assert axis_out.TDATA == check_data['next_output']
assert axis_out.TVALID == True
if axis_in.TREADY and axis_in.TVALID:
check_data['next_output'] = (
copy.copy(axis_in.TDATA.val))
else:
check_data['next_output'] = 1
elif axis_in.TREADY and axis_in.TVALID:
check_data['next_output'] = (
copy.copy(axis_in.TDATA.val))
else:
check_data['next_output'] = None
else:
assert axis_out.TVALID == False
assert axis_in.TREADY == False
check_data['next_output'] = None
return check
custom_sources = [
(enable_driver, (self.clock, self.enable), {}),
(checker,
(self.clock, self.enable, self.axis_in, self.axis_out), {})]
dut_results, ref_results = self.cosimulate(
samples, axis_constant_pad, axis_constant_pad,
self.default_args, self.default_arg_types,
custom_sources=custom_sources)
self.assertTrue(dut_results == ref_results)
class TestAxisConstantPadVivadoVHDL(
KeaVivadoVHDLTestCase, TestAxisConstantPad):
pass
class TestAxisConstantPadVivadoVerilog(
KeaVivadoVerilogTestCase, TestAxisConstantPad):
pass
|
from __future__ import division
from visual import *
def vector3d_add(vector1,vector2):
vector_addition=[0,0,0];
for i in range(0,3):
vector_addition[i]=vector1[i]+vector2[i];
return vector_addition;
def vector3d_sub(vector1,vector2):
vector_addition=[0,0,0];
for i in range(0,3):
vector_addition[i]=vector1[i]-vector2[i];
return vector_addition;
def vector3d_scale(vector1,scaling_factor):
vector_addition=[0,0,0];
for i in range(0,3):
vector_addition[i]=vector1[i]*scaling_factor;
return vector_addition;
def vector3d_dot(vector1,vector2):
dot_product = 0;
for i in range(0,3):
dot_product += vector1[i]*vector2[i];
return dot_product;
def vector3d_cross(vector1,vector2):
cross_product=[0,0,0];
cross_product[0]=vector1[1]*vector2[2] - vector1[2]*vector2[1];
cross_product[1]=vector1[2]*vector2[0] - vector1[0]*vector2[2];
cross_product[2]=vector1[0]*vector2[1] - vector1[1]*vector2[0];
return cross_product;
def vector3d_normalize(vector):
mag_vector = pow((vector[0]**2 + vector[1]**2 + vector[2]**2),.5);
norm_vector[0] = vector[0]/mag_vector;
norm_vector[1] = vector[1]/mag_vector;
norm_vector[2] = vector[2]/mag_vector;
return norm_vector
def dcm_orthonormalize(dcm):
err = -vector3d_dot(dcm[0],dcm[1])/2;
scaled_dcm_0 = vector3d_scale(dcm[0],err);
scaled_dcm_1 = vector3d_scale(dcm[1],err);
dcm[0] = vector3d_add(dcm[0],scaled_dcm_1);
dcm[1] = vector3d_add(dcm[0],scaled_dcm_0);
#z=X x Y
dcm[2] = vector3d_cross(dcm[0],dcm[1]);
#re-normalize
dcm[0]=vector3d_normalize(dcm[0]);
dcm[1]=vector3d_normalize(dcm[1]);
dcm[2]=vector3d_normalize(dcm[2]);
return dcm;
def dcm_rotate(dcm,vector):
#update matrix using formula R(t+1) = R(t)+dR(t) = R(t)+ w*R(t)
for i in range(0,3):
dr=vector3d_cross(vector,dcm[i]);
dcm[i]=vectored_add(dcm[i],dr);
#make matrix orthonormal again
dcm_orthonormalize(dcm)
return dcm
dcmGyro =[[1,0,0],
[0,1,0],
[0,0,1]];
interval_ms = 20;
while 1:
#get_Acc_data(); in Kacc[3] vector
vector3d_normalize(Kacc);
#calculate correction vector to bring dcmGyro's k vector closer to Acc vector
wa = vector3d_cross(dcmGyro[2],Kacc)
#in the absence of magnetometer let's asssume North vector(I) is always in XZ
#plane of the device ( y coordinate is 0)
Imag[0] = pow((1-dcmGyro[0][2]**2),.5);
Imag[1] = 0;
Imag[2] = dcmGyro[0][2];
wm = vector3d_cross(dcmGyro[0],Imag)
#--------
#dcmGyro
#--------
#get_Acc_data() in w[3]
for i in range(0,3):
w[i] *= 20;
w[i] = (w[i] + ACC_WEIGHT*wA[i] + MAG*WEIGHT*wM[i])/(1.0+ACC_WEIGHT+MAG_WEIGHT);
dcmGyro=dcm_rotate(dcmGyro,w);
|
<filename>openmdao/components/meta_model.py
""" Metamodel provides basic Meta Modeling capability."""
import sys
import numpy as np
from copy import deepcopy
from openmdao.core.component import Component, _NotSet
from six import iteritems
class MetaModel(Component):
"""Class that creates a reduced order model for outputs from
parameters. Each output may have it's own surrogate model.
Training inputs and outputs are automatically created with
'train:' prepended to the corresponding parameter/output name.
For a Float variable, the training data is an array of length m.
Options
-------
deriv_options['type'] : str('user')
Derivative calculation type ('user', 'fd', 'cs')
Default is 'user', where derivative is calculated from
user-supplied derivatives. Set to 'fd' to finite difference
this system. Set to 'cs' to perform the complex step
if your components support it.
deriv_options['form'] : str('forward')
Finite difference mode. (forward, backward, central)
deriv_options['step_size'] : float(1e-06)
Default finite difference stepsize
deriv_options['step_calc'] : str('absolute')
Set to absolute, relative
deriv_options['check_type'] : str('fd')
Type of derivative check for check_partial_derivatives. Set
to 'fd' to finite difference this system. Set to
'cs' to perform the complex step method if
your components support it.
deriv_options['check_form'] : str('forward')
Finite difference mode: ("forward", "backward", "central")
During check_partial_derivatives, the difference form that is used
for the check.
deriv_options['check_step_calc'] : str('absolute',)
Set to 'absolute' or 'relative'. Default finite difference
step calculation for the finite difference check in check_partial_derivatives.
deriv_options['check_step_size'] : float(1e-06)
Default finite difference stepsize for the finite difference check
in check_partial_derivatives"
deriv_options['linearize'] : bool(False)
Set to True if you want linearize to be called even though you are using FD.
"""
def __init__(self):
super(MetaModel, self).__init__()
# This surrogate will be used for all outputs that don't have
# a specific surrogate assigned to them
self.default_surrogate = None
# keep list of params and outputs that are not the training vars
self._surrogate_param_names = []
self._surrogate_output_names = []
# training will occur on first execution
self.train = True
self._training_input = np.zeros(0)
self._training_output = {}
# When set to False (default), the metamodel retrains with the new
# dataset whenever the training data values are changed. When set to
# True, the new data is appended to the old data and all of the data
# is used to train.
self.warm_restart = False
# keeps track of which sur_<name> slots are full
self._surrogate_overrides = set()
self._input_size = 0
def add_param(self, name, val=_NotSet, training_data=None, **kwargs):
""" Add a `param` input to this component and a corresponding
training parameter.
Args
----
name : string
Name of the input.
val : float or ndarray or object
Initial value for the input.
training_data : float or ndarray
training data for this variable. Optional, can be set
by the problem later.
"""
if training_data is None:
training_data = []
super(MetaModel, self).add_param(name, val, **kwargs)
super(MetaModel, self).add_param('train:'+name, val=training_data, pass_by_obj=True)
input_size = self._init_params_dict[name]['size']
self._surrogate_param_names.append((name, input_size))
self._input_size += input_size
def add_output(self, name, val=_NotSet, training_data=None, **kwargs):
""" Add an output to this component and a corresponding
training output.
Args
----
name : string
Name of the variable output.
val : float or ndarray
Initial value for the output. While the value is overwritten during
execution, it is useful for infering size.
training_data : float or ndarray
training data for this variable. Optional, can be set
by the problem later.
"""
if training_data is None:
training_data = []
super(MetaModel, self).add_output(name, val, **kwargs)
super(MetaModel, self).add_param('train:'+name, val=training_data, pass_by_obj=True)
try:
output_shape = self._init_unknowns_dict[name]['shape']
except KeyError: #then its some kind of object, and just assume scalar training data
output_shape = 1
self._surrogate_output_names.append((name, output_shape))
self._training_output[name] = np.zeros(0)
if self._init_unknowns_dict[name].get('surrogate'):
self._init_unknowns_dict[name]['default_surrogate'] = False
else:
self._init_unknowns_dict[name]['default_surrogate'] = True
def _setup_variables(self):
"""Returns our params and unknowns dictionaries,
re-keyed to use absolute variable names.
Also instantiates surrogates for the output variables
that use the default surrogate.
"""
# create an instance of the default surrogate for outputs that
# did not have a surrogate specified
if self.default_surrogate is not None:
for name, shape in self._surrogate_output_names:
if self._init_unknowns_dict[name].get('default_surrogate'):
surrogate = deepcopy(self.default_surrogate)
self._init_unknowns_dict[name]['surrogate'] = surrogate
# training will occur on first execution after setup
self.train = True
return super(MetaModel, self)._setup_variables()
def check_setup(self, out_stream=sys.stdout):
"""Write a report to the given stream indicating any potential problems found
with the current configuration of this ``MetaModel``.
Args
----
out_stream : a file-like object, optional
"""
# All outputs must have surrogates assigned
# either explicitly or through the default surrogate
if self.default_surrogate is None:
no_sur = []
for name, shape in self._surrogate_output_names:
surrogate = self._init_unknowns_dict[name].get('surrogate')
if surrogate is None:
no_sur.append(name)
if len(no_sur) > 0:
msg = ("No default surrogate model is defined and the following"
" outputs do not have a surrogate model:\n%s\n"
"Either specify a default_surrogate, or specify a "
"surrogate model for all outputs."
% no_sur)
out_stream.write(msg)
def solve_nonlinear(self, params, unknowns, resids):
"""Predict outputs.
If the training flag is set, train the metamodel first.
Args
----
params : `VecWrapper`, optional
`VecWrapper` containing parameters. (p)
unknowns : `VecWrapper`, optional
`VecWrapper` containing outputs and states. (u)
resids : `VecWrapper`, optional
`VecWrapper` containing residuals. (r)
"""
# Train first
if self.train:
self._train()
# Now Predict for current inputs
inputs = self._params_to_inputs(params)
for name, shape in self._surrogate_output_names:
surrogate = self._init_unknowns_dict[name].get('surrogate')
if surrogate:
unknowns[name] = surrogate.predict(inputs)
else:
raise RuntimeError("Metamodel '%s': No surrogate specified for output '%s'"
% (self.pathname, name))
def _params_to_inputs(self, params, out=None):
"""
Converts from a dictionary of parameters to the ndarray input.
"""
array_real = True
if out is None:
inputs = np.zeros(self._input_size)
else:
inputs = out
idx = 0
for name, sz in self._surrogate_param_names:
val = params[name]
if isinstance(val, list):
val = np.array(val)
if isinstance(val, np.ndarray):
if array_real and np.issubdtype(val.dtype, complex):
array_real = False
inputs = inputs.astype(complex)
inputs[idx:idx + sz] = val.flat
idx += sz
else:
inputs[idx] = val
idx += 1
return inputs
def linearize(self, params, unknowns, resids):
"""
Returns the Jacobian as a dictionary whose keys are tuples of the form
('unknown', 'param') and whose values are ndarrays.
Args
----
params : `VecWrapper`
`VecWrapper` containing parameters. (p)
unknowns : `VecWrapper`
`VecWrapper` containing outputs and states. (u)
resids : `VecWrapper`
`VecWrapper` containing residuals. (r)
Returns
-------
dict
Dictionary whose keys are tuples of the form ('unknown', 'param')
and whose values are ndarrays.
"""
jac = {}
inputs = self._params_to_inputs(params)
for uname, _ in self._surrogate_output_names:
surrogate = self._init_unknowns_dict[uname].get('surrogate')
sjac = surrogate.linearize(inputs)
idx = 0
for pname, sz in self._surrogate_param_names:
jac[(uname, pname)] = sjac[:, idx:idx+sz]
idx += sz
return jac
def _train(self):
"""
Train the metamodel, if necessary, using the provided training data.
"""
num_sample = None
for name, sz in self._surrogate_param_names:
val = self.params['train:' + name]
if num_sample is None:
num_sample = len(val)
elif len(val) != num_sample:
msg = "MetaModel: Each variable must have the same number"\
" of training points. Expected {0} but found {1} "\
"points for '{2}'."\
.format(num_sample, len(val), name)
raise RuntimeError(msg)
for name, shape in self._surrogate_output_names:
val = self.params['train:' + name]
if len(val) != num_sample:
msg = "MetaModel: Each variable must have the same number" \
" of training points. Expected {0} but found {1} " \
"points for '{2}'." \
.format(num_sample, len(val), name)
raise RuntimeError(msg)
if self.warm_restart:
num_old_pts = self._training_input.shape[0]
inputs = np.zeros((num_sample + num_old_pts, self._input_size))
if num_old_pts > 0:
inputs[:num_old_pts, :] = self._training_input
new_input = inputs[num_old_pts:, :]
else:
inputs = np.zeros((num_sample, self._input_size))
new_input = inputs
self._training_input = inputs
# add training data for each input
if num_sample > 0:
idx = 0
for name, sz in self._surrogate_param_names:
val = self.params['train:' + name]
if isinstance(val[0], float):
new_input[:, idx] = val
idx += 1
else:
for row_idx, v in enumerate(val):
if not isinstance(v, np.ndarray):
v = np.array(v)
new_input[row_idx, idx:idx+sz] = v.flat
# add training data for each output
for name, shape in self._surrogate_output_names:
if num_sample > 0:
output_size = np.prod(shape)
if self.warm_restart:
outputs = np.zeros((num_sample + num_old_pts,
output_size))
if num_old_pts > 0:
outputs[:num_old_pts, :] = self._training_output[name]
self._training_output[name] = outputs
new_output = outputs[num_old_pts:, :]
else:
outputs = np.zeros((num_sample, output_size))
self._training_output[name] = outputs
new_output = outputs
val = self.params['train:' + name]
if isinstance(val[0], float):
new_output[:, 0] = val
else:
for row_idx, v in enumerate(val):
if not isinstance(v, np.ndarray):
v = np.array(v)
new_output[row_idx, :] = v.flat
surrogate = self._init_unknowns_dict[name].get('surrogate')
if surrogate is not None:
surrogate.train(self._training_input, self._training_output[name])
self.train = False
def _get_fd_params(self):
"""
Get the list of parameters that are needed to perform a
finite difference on this `Component`.
Returns
-------
list of str
List of names of params for this `Component` .
"""
return [k for k, acc in iteritems(self.params._dat)
if not (acc.pbo or k.startswith('train'))]
def _get_fd_unknowns(self):
"""
Get the list of unknowns that are needed to perform a
finite difference on this `Component`.
Returns
-------
list of str
List of names of unknowns for this `Component`.
"""
return [k for k, acc in iteritems(self.unknowns._dat)
if not (acc.pbo or k.startswith('train'))]
|
#importação da biblioteca pulp
from pulp import LpProblem, LpStatus, lpSum, LpVariable, LpMinimize
class Metodo_exato(object):
def __init__(self, lista_destinos, lista_distancias):
self.lista_destinos = lista_destinos
self.lista_distancias = lista_distancias
#Função para retornar a sequencia de entregas pelo nome do cliente e endereço
def decodificar(self,percurso, indice):
#Definir variável para armazenar a sequencia de entregas
lista = {}
#Looping para transformar a saída do programa de rotas em um lista de
#Sequencia de endereços
for i in range(len(percurso)):
valor = percurso[i].split("_")
lista[valor[1]] = valor[2]
n = 0
i = "0"
sequencia = []
while n < len(self.lista_destinos):
sequencia.append(i)
i = lista[i]
n = n + 1
#Criar uma lista de sequencia de entrega com o nome/endereço
sequencia_final = []
for i in sequencia:
dados = self.lista_destinos[str(i)]
sequencia_final.append(dados[indice])
return sequencia_final
def calcular(self):
#Definir a quantidade e destinos
n = len(self.lista_destinos)
#Loop para preencher a distância das origens e destinos iguais
for i in range(n):
self.lista_distancias[i,i] = 100000
#Chamar o modelo
model = LpProblem(name="PCV", sense=LpMinimize)
#Definição das variáveis
x = LpVariable.dicts("x",(range(n), range(n)), cat="Binary")
u = LpVariable.dicts("u",range(n),lowBound=0, upBound = n-1, cat="Integer")
#Definição da Função objetivo
obj_func = (lpSum(x[i][j] * self.lista_distancias[i,j] for i in range(n) for j in range(n)))
model += obj_func
#Restrição na horizontal
for i in range(n):
model += (lpSum(x[i][j] for j in range(n) if j!=i) == 1)
#Restrição na vertical
for j in range(n):
model += (lpSum(x[i][j] for i in range(n) if i!=j) == 1)
#Restrição para evitar sub-rotas
for i in range(1,n):
for j in range(1,n):
if i!=j:
model += (u[i] - u[j] + x[i][j]*n <= n-1)
else:
model += (u[i] - u[i] == 0)
# Executar modelo
model.solve()
#Ótimo da função
otimo = model.objective.value()
#Exibir apenas variavéis dos locais de entrega que são maiores que 0
percurso = []
for var in model.variables():
if var.value() > 0:
nome = var.name
if nome[0] == "x":
percurso.append(var.name)
print(var.name)
#Chamar função para decodificar retornar a sequencia de entrege pelo
#nome do cliente
sequencia_final_nome = self.decodificar(percurso, "Cliente")
#Chamar função para decodificar retornar a sequencia de entrege pelo
#endereço do cliente
sequencia_final_endereco = self.decodificar(percurso, "Endereço")
#Armazenar valores em um dicionário para o retorno
roteiro = {}
roteiro["distancia"] = otimo
roteiro["sequencia"] = sequencia_final_nome
roteiro["sequencia_endereco"] = sequencia_final_endereco
#Retornar resultado
return roteiro
|
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
import os
import os.path
import re
import subprocess
import shutil
import Core.Common.FUtils as FUtils
from Core.Logic.FSettingEntry import *
from Scripts.FApplication import *
class FXComposer (FApplication):
""" Introduces FX Composer 2 into the testing framework
"""
__SCRIPT_EXTENSION = ".py"
__EXTENSION = ".dae"
__HEX_PATTERN = "^0x[\dabcdef]{8}$" # Describes a valid RGBA hex pattern
__RENDER_PORT = "Render Port"
__RENDER_BACKGROUND = "Background Color"
__RENDER_CAMERA = "Camera"
__RENDER_ANIMATION_START = "Animation Start Frame"
__RENDER_ANIMATION_END = "Animation End Frame"
__RENDER_ANIMATION_FRAMES = "Animation Frames"
__RENDER_STILL_START = "Non-Animation Start Time"
__RENDER_STILL_END = "Non-Animation End Time"
__RENDER_STILL_FRAMES = "Non-Animation Frames"
__RENDER_OUTPUT_FORMAT = "Output Format"
__RENDER_WIDTH = "X resolution"
__RENDER_HEIGHT = "Y resolution"
__RENDER_PORT_OPTIONS = ["OpenGL", "Direct3D9"]
__RENDER_PORT_OGL = 0
__RENDER_PORT_D3D = 1
__RENDER_OPTIONS = [
(__RENDER_PORT, "", __RENDER_PORT_OPTIONS[__RENDER_PORT_OGL]),
(__RENDER_BACKGROUND, "", "0xFFFFFFFF"),
(__RENDER_CAMERA, "", "testCamera"),
(__RENDER_ANIMATION_START, "", "0.0"),
(__RENDER_ANIMATION_END, "", "2.0"),
(__RENDER_ANIMATION_FRAMES, "", "15"),
(__RENDER_STILL_START, "", "0.0"),
(__RENDER_STILL_END, "", "0.0"),
(__RENDER_STILL_FRAMES, "", "1"),
(__RENDER_WIDTH, "", "512"),
(__RENDER_HEIGHT, "", "512"),
(__RENDER_OUTPUT_FORMAT, "", "png")]
def __init__(self, configDict):
"""__init__() -> FXComposer"""
FApplication.__init__(self, configDict)
self.__script = None
self.__workingDir = None
self.__testCount = 0
self.__hex_prog = re.compile(self.__HEX_PATTERN)
def GetPrettyName(self):
"""GetPrettyName() -> str
Implements FApplication.GetPrettyName()
"""
return "NVIDIA FX Composer 2"
def GetOperationsList(self):
"""GetOperationsList() -> list_of_str
Implements FApplication.GetOperationsList()
"""
return [IMPORT, RENDER, EXPORT]
def GetSettingsForOperation(self, operation):
"""GetSettingsForOperation(operation) -> list_of_FSettingEntry
Implements FApplication.GetSettingsForOperation()
"""
if (operation == RENDER):
options = []
for entry in FXComposer.__RENDER_OPTIONS:
options.append(FSettingEntry(*entry))
return options
else:
return []
def BeginScript(self, workingDir):
"""BeginScript(workingDir) -> None
Implements FApplication.BeginScript()
"""
filename = ("script" + str(self.applicationIndex) +
FXComposer.__SCRIPT_EXTENSION)
self.__script = open(os.path.join(workingDir, filename) , "w")
# Write FXC specific script imports
self.__script.write(
"from fxcapi import *\n"+
"from FXComposer.Scene.Commands import *\n"+
"from FXComposer.UI.Animation import *\n"+ # For animation playback
"from FXComposer.IDE.Services import *\n\n"+
"import os, re\n\n"+
"CTS_DEFAULT_CAMERAS = re.compile(r\"(Top)|(Front)|(Right)|(Perspective)$\")\n\n"
# Define functions to overcome FXC's Python script limit from the command line
# SaveFile: Save imported assets to a COLLADA file.
# @param path location to save the COLLADA file
# @param name name of the COLLADA file
# @param log error log
"def ctsSaveFile(path, name, log):\n"+
" try:\n"+
" FXProjectService.Instance.SaveAssetCollection(FXRuntime.Instance.Library.ActiveAssetCollection, FXUri(path))\n"+
" except:\n"+
" log.write(\"Error: %s failed to save!\\n\" % name)\n"+
" else:\n"+
" log.write(\"%s saved successfully.\\n\" % name)\n\n"+
# ImportFile: Import a pre-existing COLLADA file into FXC and then save a copy.
# @see SaveFile
# @param path the import COLLADA file's location
# @param name name of the saved COLLADA file
# @param logname error log's filepath
# @param output location to save the COLLADA file
# @return a boolean flag that is true iff the COLLADA file successfully imports
"def ctsImportFile(path,name,logname,output):\n"+
" FXProjectService.Instance.ResetProject()\n"+
" import_error_log = open(logname,'w')\n"+
" import_successful = True\n"+
" try:\n"+
" FXProjectService.Instance.AddDocument(FXUri(path))\n"+
" except:\n"+
" import_error_log.write(\"Error: %s failed to load!\\n\" % name)\n"+
" print \"Error: %s failed to load!\" % name\n"+
" import_successful = False\n"+
" else:\n"+
" import_error_log.write(\"%s loaded successfully.\\n\" % name)\n"+
" ctsSaveFile(output,name,import_error_log)\n"
" import_error_log.close()\n"+
" return import_successful\n\n"+
# BindToTestLight: Bind all materials to the first light found in the active scene.
# @param scene the active scene
"def ctsBindToTestLight(scene):\n"+
" lightlist = []\n"+
" [lightlist.append(x) for x in scene.FindItems(FXLightInstance)]\n"+
" if len(lightlist) > 0:\n"+
" top_light = lightlist[0]\n"+
" FXDefaultLightBinding.BindMaterialsToLightInstance(top_light.Uri,top_light)\n\n"+
# GetTestCamera: Find a render camera whose name matches the render camera test setting
# If there is no match, use the first non-default camera found in the scene.
# If there are only default lights in the scene, use the default freeform camera.
# @param scene the active scene
# @param camera_name the render camera test setting to match
# @param log error log
# @return the render camera
"def ctsGetTestCamera(scene,camera_name,log):\n"+
" test_candidates = []\n"+
" [test_candidates.append(x) for x in scene.FindItems(FXCameraInstance) if CTS_DEFAULT_CAMERAS.match(x.Name) == None]\n"+
" if test_candidates == []:\n"+
" log.write(\"Warning: Can't find user specified camera. Using default perspective camera instead.\\n\")\n"+
" return scene.GetDefaultCamera(FXCameraType.Freeform)\n"+
" else:\n"+
" templist = [test_candidates.pop()]\n"+
" [templist.append(x) for x in test_candidates if x.ParentNode.Name.lower().startswith(camera_name) or x.Camera.Name.lower().startswith(camera_name)]\n"+
" return templist.pop()\n\n"+
# SetupTestCamera: Find a suitable render camera for the test and bind it to a render port.
# @see GetTestCamera
# @param port the render port
"def ctsSetupTestCamera(port,scene,camera_name,log):\n"+
" testcamera = ctsGetTestCamera(scene,camera_name,log)\n"+
" if testcamera != None:\n"+
" port.CurrentCamera = testcamera\n\n"+
# SetupRender: Bind the test lights, set the background color and dimensions of the render port,
# hide all non-geometry (lights, grid, HUD), and activate the render port.
# @see BindToTestLight
# @param port the render port
# @param scene the active scene
# @param width desired render port width
# @param height desired render port height
# @param backgroundColor desired render port background color
"def ctsSetupRender(port,scene,width,height,backgroundColor):\n"+
" ctsBindToTestLight(scene)\n"+
" FxcScene.ShowGrid(scene, 0)\n"+
" FxcScene.ShowCameras(scene, 0)\n"+
" FxcScene.ShowLights(scene, 0)\n"+
" FxcRender.EnableRenderPortHud(port, 0)\n"+
" FxcScene.SetBackgroundColor(FXMatrix.Vector4(backgroundColor[0],backgroundColor[1],backgroundColor[2],backgroundColor[3]))\n"+
" FxcRender.SetActiveRenderPort(port)\n"+
" FxcRender.SetRenderPortSize(port,width,height)\n\n"+
# Clamp: A standard clamp function -- clamp value x into the range [min_val,max_val].
# @param x value to clamp
# @param min_val minimum
# @param max_val maximum
# @return the clamped value
"def ctsClamp(x,min_val,max_val):\n"+
" curr_val = x\n"
" if curr_val > max_val:\n"+
" curr_val = max_val\n"+
" elif curr_val < min_val:\n"+
" curr_val = min_val\n"+
" return curr_val\n\n"+
# GetOutputFile: Format a render file name into the form [filename]0*x.[extension] where x is the frame number
# @param filename the output file's path or basename
# @param numDigits the maximum number digits required to write the largest frame number (in absolute value)
# @param frame an integer representing current frame number
# @param frameNumber a string representing the frame number iff the scene is animated
# @param extension the output file's extensions (.png, .bmp, etc)
# @return the formatted output file name
"def ctsGetOutputFile(filename,numDigit,frame,frameNumber,extension):\n"
" out_file = filename\n" +
" paddingCount = numDigit - len(str(frame))\n"+
" for j in range(paddingCount):\n"+
" out_file = out_file + '0'\n"+
" return out_file + frameNumber + extension\n\n"+
# RenderFrames: Render out all the frames.
# @see GetOutputFile
# @param port the active renderport
# @param frameCount number of frames to render out
# @param filename the output files' path + basename
# @param cts_start start of the animation (via CTS test setting)
# @param cts_end end of the animation (via CTS test setting)
# @param outputFormat the output files' file format extension (.png, .bmp, etc]
"def ctsRenderFrames(port,frameCount,cts_start,cts_end,filename,outputFormat):\n"+
" starttime, endtime = FxcAnimationPlayback.GetStartFrame(), FxcAnimationPlayback.GetEndFrame()\n"+
" numDigits = len(str(frameCount))\n"+
" for frame in range(frameCount):\n"+
# Prepare a safe value for linear interpolation
" if frameCount > 1:\n"+
" safe_fc = float(frameCount-1)\n"+
" safe_fn = str(frame)\n"+
" else:\n"+
" safe_fc = 1.0\n"+
" safe_fn = \"\"\n"+
" lerp = frame/safe_fc\n"
" currtime = ctsClamp(cts_start*(1.0-lerp)+cts_end*lerp,starttime,endtime)\n"+
" FxcAnimationPlayback.SetCurrentFrame(currtime)\n"+
" ForceRedraw()\n"+
" outfile = ctsGetOutputFile(filename,numDigits,frame,safe_fn,outputFormat)\n"+
" FxcRender.SaveRenderPortImage(port, outfile)\n\n"+
# InvalidSceneOrPort: Assuming an invalid scene and/or render port, specifically report the error.
# @param port the possibly invalid render port (i.e. it wasn't found)
# @param scene the possibly invalid active scene (i.e. it wasn't found)
# @param log error log (from the render stage)
"def ctsInvalidSceneOrPort(scene,port,log):\n"+
" if scene is None:\n"+
" log.write(\"Error: Could not find render scene.\\n\")\n"+
" if port is None:\n"+
" log.write(\"Error: Could not find render port.\\n\")\n\n"+
"FxcCommand.BeginGroup(\"FXC Conformance Test Import and Rendering\")\n")
self.__testCount = 0
self.__workingDir = workingDir
def EndScript(self):
"""EndScript() -> None
Implements FApplication.EndScript()
"""
self.__script.write(
"FxcCommand.EndGroup()\n"+
# Undo everything the regression script just did.
# "Undo()\n"+
"FXProjectService.Instance.ResetProject()\n\n"+
"try:\n"+
" Fxc.Exit()\n"
"except:\n"+
" print \"An unexpected error occurred while closing. Please close FXC 2.0 manually.\"\n")
self.__script.close()
def RunScript(self):
"""RunScript() -> None
Implements FApplication.RunScript()
"""
if (not os.path.isfile(self.configDict["FXComposerPath"])):
print "NVIDIA FX Composer 2 does not exist"
return True
file_name = os.path.basename(self.__script.name)
print ("start running " + file_name)
command = ("\"" + self.configDict["FXComposerPath"] +
"\" \"" + self.__script.name + "\"")
returnValue = self.RunApplication(command, self.__workingDir)
if (returnValue == 0):
print "finished running " + os.path.basename(self.__script.name)
else:
print "crashed running " + os.path.basename(self.__script.name)
return (returnValue == 0)
def WriteImport(self, filename, logname, outputDir, settings, isAnimated, cameraRig, lightingRig):
"""WriteRender(logname, outputDir, settings, isAnimated, cameraRig, lightingRig) -> list_of_str
Implements FApplication.WriteImport().
"""
self.__testCount = self.__testCount + 1
name_only = FUtils.GetProperFilename(filename)
self.__currentImportProperName = name_only
output = (os.path.join(outputDir, name_only))+self.__EXTENSION
self.__script.write(
"if ctsImportFile(r\""+filename+"\",\""+name_only+"\",r\""+logname+"\",r\""+output+"\"):\n"+
" print \""+name_only+"\",\"loaded.\"\n"
)
return [output, ]
def WriteRender(self, logname, outputDir, settings, isAnimated, cameraRig, lightingRig):
"""WriteRender(filename, logname, outputDir, settings, isAnimated, cameraRig, lightingRig) -> list_of_str
Implements FApplication.WriteRender()
"""
name_only = self.__currentImportProperName
for setting in settings:
prettyName = setting.GetPrettyName()
# Non-animation: start, end and total number of frames
if (prettyName == FXComposer.__RENDER_ANIMATION_START):
if (not isAnimated):
continue
start = self.GetSettingValueAs(FXComposer.__RENDER_OPTIONS,
setting, float)
elif (prettyName == FXComposer.__RENDER_ANIMATION_END):
if (not isAnimated):
continue
end = self.GetSettingValueAs(FXComposer.__RENDER_OPTIONS,
setting, float)
elif (prettyName == FXComposer.__RENDER_ANIMATION_FRAMES):
if (not isAnimated):
continue
frameCount = self.GetSettingValueAs(FXComposer.__RENDER_OPTIONS,
setting, int)
# Animation: start, end and total number of frames
elif (prettyName == FXComposer.__RENDER_STILL_START):
if (isAnimated):
continue
start = self.GetSettingValueAs(FXComposer.__RENDER_OPTIONS,
setting, float)
elif (prettyName == FXComposer.__RENDER_STILL_END):
if (isAnimated):
continue
end = self.GetSettingValueAs(FXComposer.__RENDER_OPTIONS,
setting, float)
elif (prettyName == FXComposer.__RENDER_STILL_FRAMES):
if (isAnimated):
continue
frameCount = self.GetSettingValueAs(FXComposer.__RENDER_OPTIONS,
setting, int)
# Render image dimensions: width and height
elif (prettyName == FXComposer.__RENDER_WIDTH):
width = self.GetSettingValueAs(FXComposer.__RENDER_OPTIONS,
setting, int)
elif (prettyName == FXComposer.__RENDER_HEIGHT):
height = self.GetSettingValueAs(FXComposer.__RENDER_OPTIONS,
setting, int)
# Render image format
elif (prettyName == FXComposer.__RENDER_OUTPUT_FORMAT):
value = setting.GetValue().strip()
if (value == ""):
value = self.FindDefault(FXComposer.__RENDER_OPTIONS,
FXComposer.__RENDER_OUTPUT_FORMAT)
outputFormat = "." + value
# Render camera
elif (prettyName == FXComposer.__RENDER_CAMERA):
value = setting.GetValue().strip()
if (value == ""):
value = self.FindDefault(FXComposer.__RENDER_OPTIONS,
FXComposer.__RENDER_CAMERA)
outputCamera = value.lower()
# Render background: a hex value 0x[R][G][B][A] where [*] is a byte
elif (prettyName == FXComposer.__RENDER_BACKGROUND):
value = setting.GetValue().strip().lower()
# If the string is not a hex pattern, use the default
if not self.__hex_prog.match(value):
value = self.FindDefault(FXComposer.__RENDER_OPTIONS,
FXComposer.__RENDER_BACKGROUND)
# Strip opening characters
value = value[value.find("0x")+2:]
backgroundColor = []
# Populate RGBA values as floats in [0,1]
[backgroundColor.append(int(value[i-2:i], 16)/255.0) for i in [2, 4, 6, 8]]
# Render port
elif (prettyName == FXComposer.__RENDER_PORT):
value = setting.GetValue().strip()
# Since render port names are very specific, intelligently find closest match
vallow = value.lower()
if vallow.find("d3d") != -1 or vallow.find("direct") != -1:
value = FXComposer.__RENDER_PORT_OPTIONS[FXComposer.__RENDER_PORT_D3D]
elif vallow.find("gl") != -1 or vallow.find("open") != -1 :
value = FXComposer.__RENDER_PORT_OPTIONS[FXComposer.__RENDER_PORT_OGL]
# If blank, use the default port option
elif vallow == "":
value = self.FindDefault(FXComposer.__RENDER_OPTIONS,
FXComposer.__RENDER_PORT)
# Otherwise, assume the user knows what they're doing
outputPort = value
outputList = []
# Rendered file list (you'll see this routine in other CTS scripts)
if frameCount == 1:
outputList.append(os.path.join(outputDir, name_only + outputFormat))
else:
numDigit = len(str(frameCount))
for i in range(0, frameCount):
outputTemp = name_only
paddingCount = numDigit - len(str(i))
for j in range(0, paddingCount):
outputTemp = outputTemp + "0"
outputTemp = outputTemp + str(i) + outputFormat
outputList.append(os.path.join(outputDir, outputTemp))
self.__script.write(
# Resume script after importing the COLLADA file after a try-except-else statement
" render_error_log = open(r\""+logname+"\",'w')\n"+
" port = FxcRender.FindRenderPort(\""+outputPort+"\")\n"+
" scene = FXSceneService.Instance.ActiveScene\n\n"+
" if scene != None and port != None:\n"+
" ctsSetupTestCamera(port,scene,r\""+str(outputCamera)+"\",render_error_log)\n"+
" ctsSetupRender(port,scene,"+str(width)+","+str(height)+","+str(backgroundColor)+")\n"+
" ctsRenderFrames(port," +str(frameCount)+","+str(start)+","+str(end)
+",r\""+os.path.join(outputDir, name_only)+"\",\""+outputFormat+"\")\n"+
" render_error_log.write(\""+name_only+" has successfully rendered.\\n\")\n"+
" else:\n"+
" ctsInvalidSceneOrPort(scene,port,render_error_log)\n"+
" render_error_log.close()\n\n")
return outputList
def WriteExport(self, logname, outputDir, settings, isAnimated, cameraRig, lightingRig):
"""WriteImport(logname, outputDir, settings, isAnimated, cameraRig, lightingRig) -> list_of_strImplements FApplication.WriteExport(). Feeling Viewer has no export.
Implements FApplication.WriteExport().
"""
name_only = self.__currentImportProperName
output = (os.path.join(outputDir, name_only))+self.__EXTENSION
self.__script.write(
" output_error_log = open(r\""+logname+"\",'w')\n"+
" ctsSaveFile(r\""+output+"\",\""+name_only+"\",output_error_log)\n"+
" output_error_log.close()\n"
)
return [output, ]
|
<filename>neuwon/nmodl/kinetic_models.py
from scipy.linalg import expm
# TODO: What are the units on atol? How does the timestep factor into it?
# TODO: Make this accept a list of pointers instead of "input_ranges"
# TODO: Convert kinetics into a function.
# TODO: Use impulse response integration method in place of sparse solver...
# TODO: How to dispatch to it?
# TODO: Update the kinetic model to use Derivative function instead of sparse deriv equations.
# TODO: Write function to check that derivative_functions are Linear &
# time-invariant. Put this in the KineticModel class.
def _compile_derivative_blocks(self):
""" Replace the derivative_blocks with compiled functions in the form:
f(state_vector, **block.arguments) -> Δstate_vector/Δt
"""
self.derivative_functions = {}
solve_statements = {stmt.block: stmt
for stmt in self.breakpoint_block if isinstance(stmt, SolveStatement)}
for name, block in self.derivative_blocks.items():
if name not in solve_statements: continue
if solve_statements[name].method == "sparse":
self.derivative_functions[name] = self._compile_derivative_block(block)
def _compile_derivative_block(self, block):
""" Returns function in the form:
f(state_vector, **block.arguments) -> derivative_vector """
block = copy.deepcopy(block)
globals_ = {}
locals_ = {}
py = "def derivative(%s, %s):\n"%(code_gen.mangle2("state"), ", ".join(block.arguments))
for idx, name in enumerate(self.states):
py += " %s = %s[%d]\n"%(name, code_gen.mangle2("state"), idx)
for name in self.states:
py += " %s = 0\n"%code_gen.mangle('d' + name)
block.map(lambda x: [] if isinstance(x, _ConserveStatement) else [x])
py += block.to_python(indent=" ")
py += " return [%s]\n"%", ".join(code_gen.mangle('d' + x) for x in self.states)
code_gen.py_exec(py, globals_, locals_)
return numba.njit(locals_["derivative"])
def _compute_propagator_matrix(self, block, time_step, kwargs):
1/0
f = self.derivative_functions[block]
n = len(self.states)
A = np.zeros((n,n))
for i in range(n):
state = np.array([0. for x in self.states])
state[i] = 1
A[:, i] = f(state, **kwargs)
return expm(A * time_step)
class KineticModel:
def __init__(self, time_step, input_pointers, num_states, kinetics,
conserve_sum=False,
atol=1e-3):
# Save and check the arguments.
self.time_step = float(time_step)
self.kinetics = kinetics
self.input_ranges = np.array(input_ranges, dtype=Real)
self.input_ranges.sort(axis=1)
self.lower, self.upper = zip(*self.input_ranges)
self.num_inputs = len(self.input_pointers)
self.num_states = int(num_states)
self.conserve_sum = float(conserve_sum) if conserve_sum else None
self.atol = float(atol)
assert(isinstance(self.kinetics, Callable))
assert(len(self.input_ranges.shape) == 2 and self.input_ranges.shape[1] == 2)
assert(self.num_inputs > 0)
assert(self.num_states > 0)
assert(self.atol > 0)
# Determine how many interpolation points to use.
self.grid_size = np.full((self.num_inputs,), 2)
self._compute_interpolation_grid()
while self._estimate_min_accuracy() >= self.atol:
self.grid_size += 1
self.grid_size *= 2
self._compute_interpolation_grid()
self.data = cp.array(self.data)
def _compute_impulse_response_matrix(self, inputs):
A = np.zeros([self.num_states] * 2, dtype=float)
for src, dst, coef, func in self.kinetics:
if func is not None:
A[dst, src] += coef * func(*inputs)
else:
A[dst, src] += coef
return scipy.linalg.expm(A * self.time_step)
def _compute_interpolation_grid(self):
""" Assumes self.grid_size is already set. """
grid_range = np.subtract(self.upper, self.lower)
grid_range[grid_range == 0] = 1
self.grid_factor = np.subtract(self.grid_size, 1) / grid_range
self.data = np.empty(list(self.grid_size) + [self.num_states]*2, dtype=Real)
# Visit every location on the new interpolation grid.
grid_axes = [list(enumerate(np.linspace(*args, dtype=float)))
for args in zip(self.lower, self.upper, self.grid_size)]
for inputs in itertools.product(*grid_axes):
index, inputs = zip(*inputs)
self.data[index] = self._compute_impulse_response_matrix(inputs)
def _estimate_min_accuracy(self):
atol = 0
num_points = np.product(self.grid_size)
num_test_points = max(int(round(num_points / 10)), 100)
for _ in range(num_test_points):
inputs = np.random.uniform(self.lower, self.upper)
exact = self._compute_impulse_response_matrix(inputs)
interp = self._interpolate_impulse_response_matrix(inputs)
atol = max(atol, np.max(np.abs(exact - interp)))
return atol
def _interpolate_impulse_response_matrix(self, inputs):
assert(len(inputs) == self.num_inputs)
inputs = np.array(inputs, dtype=Real)
assert(all(inputs >= self.lower) and all(inputs <= self.upper)) # Bounds check the inputs.
# Determine which grid box the inputs are inside of.
inputs = self.grid_factor * np.subtract(inputs, self.lower)
lower_idx = np.array(np.floor(inputs), dtype=int)
upper_idx = np.array(np.ceil(inputs), dtype=int)
upper_idx = np.minimum(upper_idx, self.grid_size - 1) # Protect against floating point error.
# Prepare to find the interpolation weights, by finding the distance
# from the input point to each corner of its grid box.
inputs -= lower_idx
corner_weights = [np.subtract(1, inputs), inputs]
# Visit each corner of the grid box and accumulate the results.
irm = np.zeros([self.num_states]*2, dtype=Real)
for corner in itertools.product(*([(0,1)] * self.num_inputs)):
idx = np.choose(corner, [lower_idx, upper_idx])
weight = np.product(np.choose(corner, corner_weights))
irm += weight * np.squeeze(self.data[idx])
return irm
def advance(self, inputs, states):
numba.cuda.synchronize()
assert(len(inputs) == self.num_inputs)
assert(len(states.shape) == 2 and states.shape[1] == self.num_states)
assert(states.dtype == Real)
for l, u, x in zip(self.lower, self.upper, inputs):
assert(x.shape[0] == states.shape[0])
assert(cp.all(cp.logical_and(x >= l, x <= u))) # Bounds check the inputs.
if self.num_inputs == 1:
scratch = cp.zeros(states.shape, dtype=Real)
threads = 64
blocks = (states.shape[0] + (threads - 1)) // threads
_1d[blocks,threads](inputs[0], states, scratch,
self.lower[0], self.grid_size[0], self.grid_factor[0], self.data)
else:
raise TypeError("KineticModel is unimplemented for more than 1 input dimension.")
numba.cuda.synchronize()
# Enforce the invariant sum of states.
if self.conserve_sum is not None:
threads = 64
blocks = (states.shape[0] + (threads - 1)) // threads
_conserve_sum[blocks,threads](states, self.conserve_sum)
numba.cuda.synchronize()
@numba.cuda.jit()
def _1d(inputs, states, scratch, input_lower_bound, grid_size, grid_factor, data):
index = numba.cuda.grid(1)
if index >= states.shape[0]:
return
inpt = inputs[index]
state = states[index]
accum = scratch[index]
# Determine which grid box the inputs are inside of.
inpt = (inpt - input_lower_bound) * grid_factor
lower_idx = int(math.floor(inpt))
upper_idx = int(math.ceil(inpt))
upper_idx = min(upper_idx, grid_size - 1) # Protect against floating point error.
inpt -= lower_idx
# Visit each corner of the grid box and accumulate the results.
_weighted_matrix_vector_multiplication(
1 - inpt, data[lower_idx], state, accum)
_weighted_matrix_vector_multiplication(
inpt, data[upper_idx], state, accum)
for i in range(len(state)):
state[i] = accum[i]
@numba.cuda.jit(device=True)
def _weighted_matrix_vector_multiplication(w, m, v, results):
""" Computes: results += weight * (matrix * vector)
Arguments:
[w]eight
[m]atrix
[v]ector
results - output accumulator """
l = len(v)
for r in range(l):
dot = 0
for c in range(l):
dot += m[r, c] * v[c]
results[r] += w * dot
@numba.cuda.jit()
def _conserve_sum(states, target_sum):
index = numba.cuda.grid(1)
if index >= states.shape[0]:
return
state = states[index]
accumulator = 0.
num_states = len(state)
for i in range(num_states):
accumulator += state[i]
correction_factor = target_sum / accumulator
for i in range(num_states):
state[i] *= correction_factor
|
import itertools
import sys
import os
import re
import fnmatch
from os.path import isdir, isfile
from PIL import Image
IMAGE_STRUCT = '<p align="center">\n\
<img title="{}" src="{}">\n\
</p>'
def quit( msg=None ):
if msg:
print( " " + "\033[91m " + msg + "\033[00m" )
print("Exiting..")
sys.exit()
"""
Return the given path as an ordered list of path components
Can be used to easily determine a file's depth
"""
def splitpath(path, maxdepth=20):
(head, tail) = os.path.split(path)
return splitpath(head, maxdepth - 1) + [tail] if maxdepth and head and head != path else [head or tail]
"""
Recursively find all files of a certain type under some root directory
"""
def list_files(rootdir, ext):
file_list = list()
for root, dirs, files in os.walk(rootdir):
[file_list.append(os.path.join(root, file)) for file in files if file.endswith(ext)]
return file_list
"""
Downscales the image target image size if necessary
"""
def enforce_image_width(image_filepath, target_width):
img = Image.open(image_filepath)
# print(img.format)
# print(img.mode)
# print(img.size[0])
if img.size[0] > target_width:
print("resizing", image_filepath)
wpercent = target_width / float(img.size[0])
hsize = int((float(img.size[1]) * float(wpercent)))
img = img.resize((target_width, hsize), Image.ANTIALIAS)
img.save(image_filepath)
"""
Get something like "pork-brownies.jpg" from "src/desserts/pork-brownies.md"
"""
def recipe_filename_from_photo_filename(path):
return os.path.splitext(os.path.basename(path))[0] + ".md"
"""
Get something like "pork-brownies.jpg" from "src/desserts/pork-brownies.md"
"""
def photo_filename_from_recipe_path(path):
return os.path.splitext(os.path.basename(path))[0] + ".jpg"
"""
Get something like "../../assets/port-brownies.jpg" from "src/desserts/pork-brownies.md"
"""
def photo_relpath_from_recipe_path(path, photo_dir):
image_rel_link = str()
filename = photo_filename_from_recipe_path(path)
depth_diff = len(splitpath(os.path.normpath(path))) - len(splitpath(os.path.normpath(photo_dir)))
for num in range(depth_diff):
image_rel_link = os.path.join("..", image_rel_link)
rel_path = os.path.join(image_rel_link, os.path.basename(photo_dir), filename)
return rel_path
"""
Generate the image structure that can be pasted into a recipe.
"""
def image_struct_from_file(target_file, photo_dir):
rel_path = photo_relpath_from_recipe_path(target_file, photo_dir)
title = re.sub(r"-", " ", os.path.splitext(os.path.basename(target_file))[0])
output = IMAGE_STRUCT.format(title.title(), rel_path)
return output
"""
Place an image structure between the title and the next section
"""
def insert_image_link(target_file, photo_dir):
with open(target_file, "r") as fi:
file_data = fi.read()
image_struct_str = image_struct_from_file(target_file, photo_dir)
new_data = re.sub( r"^(# .+)\n((?:\s)*)(## .+)", r"\1\n\n" + image_struct_str + r"\n\n\3", file_data, flags=re.MULTILINE)
with open(target_file, "w") as fi:
fi.write(new_data)
"""
Replace the current image <p> structure in a recipe with a new one. Good for updating rel paths
"""
def fix_image_link(target_file, photo_dir):
current_link = search_img_link_in_file(target_file)
if current_link:
with open(target_file, "r") as fi:
file_data = fi.read()
image_struct_str = image_struct_from_file(target_file, photo_dir)
new_data = re.sub(r'^(#.+)\n*((?:<.+>\s)+)\n*(#.+)', r"\1\n\n" + image_struct_str + r"\n\n\3", file_data, flags=re.MULTILINE )
with open(target_file, "w") as fi:
fi.write(new_data)
if file_data != new_data:
return True
else:
return False
"""
Find a specific recipe file in the recipe src
"""
def find(root, file, first=True):
for d, subD, f in os.walk(root):
if file in f:
# print("{0} : {1}".format(file, d))
if first == True:
return os.path.join(d, file)
return None
"""
Search for the img <p> in a recipe file, returning the match object
The match object has two groups: the image title and rel link
"""
def search_img_link_in_file(file):
with open(file, "r") as fi:
file_data = fi.read()
img_match = re.search(r"^(?:#.+)\n+((?:<.+>\s)+)", file_data, flags=re.MULTILINE )
if img_match:
return img_match
else:
return None
"""
Adds an image link if it does not yet exist. Only call this for files
that definitely should have a link
"""
def enforce_image_link_exists( file, photo_dir ):
img_match = search_img_link_in_file(file)
if not img_match:
insert_image_link( file, photo_dir )
"""
Fixes an already-existing image link
"""
def enforce_image_link_correct( md_file, photo_dir ):
img_match = search_img_link_in_file(md_file)
if img_match and fix_image_link(md_file, photo_dir):
print(" Adjusted image reference in", md_file)
"""
Ensures a linked image actually exists (only use after all links are added)
"""
def enforce_linked_image_exists( file, photo_dir ):
img_match = search_img_link_in_file(file)
asset_img = os.path.join(photo_dir, photo_filename_from_recipe_path(file))
if img_match:
# Search within the entire link tag to verify the written file's existence
link_data_match = re.search( r"<(?:.*title\s*=\s*\")(.+)(?:\"\s*src\s*=\s*\")(.*)(?:\"\s*>)", img_match.group(1), flags=re.MULTILINE )
title = link_data_match.group(1)
rel_link = link_data_match.group(2)
abs_link = os.path.join( photo_dir, os.path.basename(rel_link) )
if not isfile(abs_link):
print("Could not find", asset_img, "for", file)
quit()
if asset_img != abs_link:
print("Link appears to be incorrect")
print("asset_img", asset_img)
print("abs_link", abs_link)
quit()
"""
Just determine if a recipe has a tags section header or not
"""
def get_recipe_tag_section(file):
with open(file, "r") as fi:
file_data = fi.read()
# Add a newline to the data to handle files without an empty line at the end of the file
return re.search(r"^#*\s*[tT]ags:?\n*(^\w.+\s{0,2})*", file_data + "\n", flags=re.MULTILINE)
"""
Return a list of tags, empty if no tags present in file
"""
def get_recipe_tags(file):
tag_list = list()
tag_section = get_recipe_tag_section(file)
if tag_section:
# Found the tags section, now return a list of the tags. Return an empty list is no tags are present
tags_text_match = re.search(r"^([^#].+\s)+", tag_section.group(0), flags=re.MULTILINE)
if tags_text_match:
tag_list = tags_text_match.group(0).splitlines()
tag_list = [tag for tag in tag_list if tag != ""]
return tag_list
"""
Always normalizes the tag section text
"""
def add_recipe_tags(file, tag_list):
curr_tag_section = get_recipe_tag_section(file)
curr_tags = list()
new_tag_section = "## Tags\n"
# Get current tags
if curr_tag_section:
curr_tags = get_recipe_tags(file)
[curr_tags.append(tag) for tag in tag_list if tag not in curr_tags]
curr_tags.sort()
for tag in curr_tags:
new_tag_section += tag + "\n"
# Write back to the recipe file
with open(file, "r") as fi:
file_data = fi.read().rstrip("\n")
if curr_tag_section:
new_data = re.sub(r"^#*\s*[tT]ags:?\n*(^\w.+\s{0,2})*", new_tag_section, file_data + "\n", flags=re.MULTILINE,)
else:
new_data = file_data + "\n\n" + new_tag_section
# print(" adding", tag_list, "to", file)
with open(file, "w") as fi:
fi.write(new_data)
return curr_tags
"""
Always normalizes the tag section text - UNTESTED
"""
def remove_recipe_tags(file, tag_list):
curr_tag_section = get_recipe_tag_section(file)
curr_tags = list()
new_tag_section = "## Tags\n"
# Get current tags
if curr_tag_section:
curr_tags = get_recipe_tags(file)
[curr_tags.remove(tag) for tag in tag_list if tag in curr_tags]
curr_tags.sort()
for tag in curr_tags:
new_tag_section += tag + "\n"
# Write back to the recipe file
with open(file, "r") as fi:
file_data = fi.read().rstrip("\n")
new_data = re.sub(curr_tag_section.group(0), new_tag_section, file_data + "\n", flags=re.MULTILINE)
print("Untested function, exiting..")
sys.exit()
with open(file, "w") as fi:
fi.write(new_data)
return curr_tags
"""
Main photo processing function
"""
def photo_processor( src_path, src_excludes, assets_dir, photo_excludes, target_width_px ):
print( "Collecting photos from", assets_dir )
image_files = list_files(assets_dir, ".jpg")
md_files = list_files(src_path, ".md")
md_exclude_paths = [os.path.join( src_path, md) for md in src_excludes]
[md_files.remove( md ) for md in md_exclude_paths if md in md_files]
md_file_basenames = [os.path.basename(fi) for fi in md_files]
photo_exclude_paths = [os.path.join( assets_dir, img ) for img in photo_excludes]
[image_files.remove( img ) for img in photo_exclude_paths if img in image_files]
print( "Checking photo sizes" )
[enforce_image_width( img, target_width_px ) for img in image_files ]
print( "Checking for unreferenced photos" )
orphaned_photos = [img for img in image_files if recipe_filename_from_photo_filename(img) not in md_file_basenames ]
if orphaned_photos:
quit( "Error: these photos are orphaned: " + str(orphaned_photos) )
# print( "If recipe file exists, open and check for photo link, add one if not present" )
# Open each theretical recipe file, and look inside it, check for correct link, add if not present, correct if wrong
recipes_to_check = [find(src_path, recipe_filename_from_photo_filename(img)) for img in image_files]
# print(recipes_to_check)
assert(len(recipes_to_check) == len(image_files))
[enforce_image_link_exists(recipe, assets_dir) for recipe in recipes_to_check]
# Do these things for all md files in the book with an image link
photod_recipes = [file for file in md_files if search_img_link_in_file( file )]
[enforce_image_link_correct(recipe, assets_dir) for recipe in photod_recipes]
[enforce_linked_image_exists(recipe, assets_dir) for recipe in photod_recipes]
[add_recipe_tags(recipe, ["verified"]) for recipe in photod_recipes]
|
<reponame>PerkinsAndWill-IO/compute.rhino3d<gh_stars>0
from http.server import HTTPServer, BaseHTTPRequestHandler
from enum import Enum
import json
import rhino3dm
import base64
class ParamAccess(Enum):
ITEM = 0
LIST = 1
TREE = 2
class ParamManager(object):
def __init__(self):
self._items = []
def to_meta_list(self):
return self._items
def parameter_name(self, i: int):
return self._items[i]['Name']
def _create_param(self, nickname: str, description: str,
access: ParamAccess, paramtype: str,
resulttype: str):
param = {
'Name': nickname,
'Description': description,
'AtLeast': 1,
'ParamType': paramtype,
'ResultType': resulttype
}
if access == ParamAccess.ITEM:
param['AtMost'] = 1
self._items.append(param)
return len(self._items) - 1
def add_number_parameter(self, name: str, nickname: str, description: str,
access: ParamAccess):
return self._create_param(nickname, description, access, 'Number', 'System.Double')
def add_curve_parameter(self, name: str, nickname: str, description: str,
access: ParamAccess):
return self._create_param(nickname, description, access, 'Curve', 'Rhino.Geometry.Curve')
def add_point_parameter(self, name: str, nickname: str, description: str,
access: ParamAccess):
return self._create_param(nickname, description, access, 'Point', 'Rhino.Geometry.Point3d')
def add_surface_parameter(self, name: str, nickname: str, description: str,
access: ParamAccess):
return self._create_param(nickname, description, access, 'Surface', 'Rhino.Geometry.Brep')
class InputParamManager(ParamManager):
def __init__(self):
super().__init__()
class OutputParamManager(ParamManager):
def __init__(self):
super().__init__()
def create_result(self, i, data):
class __Rhino3dmEncoder(json.JSONEncoder):
def default(self, o):
if hasattr(o, "Encode"):
return o.Encode()
return json.JSONEncoder.default(self, o)
param = None
if isinstance(i, int):
param = self._items[i]
else:
for item in self._items:
if item['Name'] == i:
param = item
break
if isinstance(data, rhino3dm.Surface):
data = rhino3dm.Brep.CreateFromSurface(data)
result = {
'ParamName': param['Name'],
'InnerTree': {
'0': [{
'type': param['ResultType'],
'data': json.dumps(data, cls=__Rhino3dmEncoder)
}]
}
}
return result
def _coerce_input(item):
data = json.loads(item['data'])
itemtype = item['type']
coercers = {
'System.Double': lambda x: float(x),
'Rhino.Geometry.Point2d': lambda x: rhino3dm.Point2d(x['X'], x['Y']),
'Rhino.Geometry.Point3d': lambda x: rhino3dm.Point3d(x['X'], x['Y'], x['Z']),
'Rhino.Geometry.Vector3d': lambda x: rhino3dm.Vector3d(x['X'], x['Y'], x['Z'])
}
if itemtype in coercers:
rc = coercers[itemtype](data)
return rc
if itemtype.startswith('Rhino.Geometry.'):
rc = rhino3dm.CommonObject.Decode(data)
return rc
return data
class DataAccess(object):
def __init__(self, component, inputs):
self._inputs = inputs
self._component = component
self._results = []
def getdata(self, i):
name = i
if isinstance(i, int):
name = self._component._inputs.parameter_name(i)
data = self._inputs[name]['0'][0]
data = _coerce_input(data)
return (True, data)
def setdata(self, i, data):
result = self._component._outputs.create_result(i, data)
self._results.append(result)
def output_list(self):
return self._results
class Component(object):
def __init__(self, name: str, nickname: str, description: str,
category: str, subcategory: str):
self._name = name
self._nickname = nickname
self._description = description
self._category = category
self._subcategory = subcategory
self._meta = None
self._icon = None
def set_icon(self, path):
image = open(path, 'rb')
base64_bytes = base64.b64encode(image.read())
base64_string = base64_bytes.decode('ascii')
self._icon = base64_string
def build_params(self):
self._inputs = InputParamManager()
self.register_input_params(self._inputs)
self._outputs = OutputParamManager()
self.register_output_params(self._outputs)
def name(self):
return self._name
def nickname(self):
return self._nickname
def meta(self):
if self._meta is None:
meta = {'Description': self._description}
meta['Inputs'] = self._inputs.to_meta_list()
meta['Outputs'] = self._outputs.to_meta_list()
if (self._icon):
meta['Icon'] = self._icon
self._meta = meta
return self._meta
def register_input_params(self, inputs):
pass
def register_output_params(self, outputs):
pass
def solve_instance(self, data_access):
pass
class ComponentCollection(object):
components = None
@staticmethod
def component_nicknames():
names = [c.nickname() for c in ComponentCollection.components]
return names
@staticmethod
def find_component(name: str) -> Component:
name = name.lower()
for component in ComponentCollection.components:
if name == component.name().lower():
return component
for component in ComponentCollection.components:
if name == component.nickname().lower():
return component
return None
@staticmethod
def component_description(name: str):
component = ComponentCollection.find_component(name)
description = component.meta()
return description
@staticmethod
def solve_component(name: str, inputs: list):
component = ComponentCollection.find_component(name)
da = DataAccess(component, inputs)
component.solve_instance(da)
return da
class __HopsServer(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
def do_GET(self):
tokens = self.path.split('?')
path = tokens[0]
if (path == '/'):
self._set_headers()
output = ComponentCollection.component_nicknames()
s = json.dumps(output)
self.wfile.write(s.encode(encoding='utf_8'))
return
self._set_headers()
component_name = tokens[0][1:]
output = ComponentCollection.component_description(component_name)
s = json.dumps(output)
self.wfile.write(s.encode(encoding='utf_8'))
def do_HEAD(self):
self._set_headers()
def do_POST(self):
# read the message and convert it into a python dictionary
length = int(self.headers.get('Content-Length'))
jsoninput = self.rfile.read(length)
data = json.loads(jsoninput)
comp_name = data['pointer']
values = {}
for d in data['values']:
paramname = d['ParamName']
values[paramname] = d['InnerTree']
da = ComponentCollection.solve_component(comp_name, values)
output = da.output_list()
output = {
'values': output
}
self._set_headers()
s = json.dumps(output)
self.wfile.write(s.encode(encoding='utf_8'))
def start_server(components: list, port: int):
for component in components:
component.build_params()
ComponentCollection.components = components
location = ('localhost', port)
httpd = HTTPServer(location, __HopsServer)
print(f"Starting hops python server on {location[0]}:{location[1]}")
httpd.serve_forever()
|
<filename>MuPythonLibrary/Uefi/EdkII/Parsers/BaseParser.py<gh_stars>1-10
# @file BaseParser.py
# Code to support parsing EDK2 files
##
# Copyright (c) 2016, Microsoft Corporation
#
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
###
import os
import logging
class BaseParser(object):
def __init__(self, log):
self.Logger = logging.getLogger(log)
self.Lines = []
self.LocalVars = {}
self.InputVars = {}
self.CurrentSection = ""
self.CurrentFullSection = ""
self.Parsed = False
self.ConditionalStack = []
self.RootPath = ""
self.PPs = []
self.TargetFile = None
self.TargetFilePath = None
#
# For include files set the base root path
#
def SetBaseAbsPath(self, path):
self.RootPath = path
return self
def SetPackagePaths(self, pps=[]):
self.PPs = pps
return self
def SetInputVars(self, inputdict):
self.InputVars = inputdict
return self
def FindPath(self, *p):
# NOTE: Some of this logic should be replaced
# with the path resolution from Edk2Module code.
# If the absolute path exists, return it.
Path = os.path.join(self.RootPath, *p)
if os.path.exists(Path):
return Path
# If that fails, check a path relative to the target file.
if self.TargetFilePath is not None:
Path = os.path.join(self.TargetFilePath, *p)
if os.path.exists(Path):
return Path
# If that fails, check in every possible Pkg path.
for Pkg in self.PPs:
Path = os.path.join(self.RootPath, Pkg, *p)
if os.path.exists(Path):
return Path
# log invalid file path
Path = os.path.join(self.RootPath, *p)
self.Logger.error("Invalid file path %s" % Path)
return Path
def WriteLinesToFile(self, filepath):
self.Logger.debug("Writing all lines to file: %s" % filepath)
f = open(filepath, "w")
for l in self.Lines:
f.write(l + "\n")
f.close()
#
# do logical comparisons
#
def ComputeResult(self, value, cond, value2):
if(cond == "=="):
# equal
return (value.upper() == value2.upper())
elif (cond == "!="):
# not equal
return (value.upper() != value2.upper())
elif (cond == "<"):
return (self.ConvertToInt(value) < (self.ConvertToInt(value2)))
elif (cond == "<="):
return (self.ConvertToInt(value) <= (self.ConvertToInt(value2)))
elif (cond == ">"):
return (self.ConvertToInt(value) > (self.ConvertToInt(value2)))
elif (cond == ">="):
return (self.ConvertToInt(value) >= (self.ConvertToInt(value2)))
#
# convert to int based on prefix
#
def ConvertToInt(self, value):
if(value.upper().startswith("0X")):
return int(value, 16)
else:
return int(value, 10)
#
# Push new value on stack
#
def PushConditional(self, v):
self.ConditionalStack.append(v)
#
# Pop conditional and return the value
#
def PopConditional(self):
if(len(self.ConditionalStack) > 0):
return self.ConditionalStack.pop()
else:
self.Logger.critical("Tried to pop an empty conditional stack. Line Number %d" % self.CurrentLine)
return self.ConditionalStack.pop() # this should cause a crash but will give trace.
#
# Method to replace variables
# in a line with their value from input dict or local dict
#
def ReplaceVariables(self, line):
rep = line.count("$")
result = line
index = 0
while(rep > 0):
start = line.find("$(", index)
end = line.find(")", start)
token = line[start + 2:end]
retoken = line[start:end + 1]
self.Logger.debug("Token is %s" % token)
v = self.LocalVars.get(token)
self.Logger.debug("Trying to replace %s" % retoken)
if(v is not None):
#
# fixme: This should just be a workaround!!!!!
#
if (v.upper() == "TRUE" or v.upper() == "FALSE"):
v = v.upper()
self.Logger.debug("with %s [From Local Vars]" % v)
result = result.replace(retoken, v, 1)
else:
# use the passed in Env
v = self.InputVars.get(token)
if(v is None):
self.Logger.error("Unknown variable %s in %s" % (token, line))
# raise Exception("Invalid Variable Replacement", token)
# just skip it because we need to support ifdef
else:
# found in the Env
#
# fixme: This should just be a workaround!!!!!
#
if (v.upper() == "TRUE" or v.upper() == "FALSE"):
v = v.upper()
self.Logger.debug("with %s [From Input Vars]" % v)
result = result.replace(retoken, v, 1)
index = end + 1
rep = rep - 1
return result
#
# Process Conditional
# return true if line is a conditional otherwise false
#
def ProcessConditional(self, text):
tokens = text.split()
if(tokens[0].lower() == "!if"):
# need to add support for OR/AND
if(len(tokens) < 4):
self.Logger.error("!if conditionals need to be formatted correctly (spaces between each token)")
raise Exception("Invalid conditional", text)
con = self.ComputeResult(tokens[1].strip(), tokens[2].strip(), tokens[3].strip())
self.PushConditional(con)
return True
elif(tokens[0].lower() == "!ifdef"):
self.PushConditional((tokens[1].count("$") == 0))
return True
elif(tokens[0].lower() == "!ifndef"):
self.PushConditional((tokens[1].count("$") > 0))
return True
elif(tokens[0].lower() == "!else"):
v = self.PopConditional()
self.PushConditional(not v)
return True
elif(tokens[0].lower() == "!endif"):
self.PopConditional()
return True
return False
#
# returns true or false depending on what state of conditional you are currently in
#
def InActiveCode(self):
ret = True
for a in self.ConditionalStack:
if not a:
ret = False
break
return ret
#
# will return true if the the line has
# { 0xD3B36F2C, 0xD551, 0x11D4, { 0x9A, 0x46, 0x00, 0x90, 0x27, 0x3F, 0xC1, 0x4D }}
#
def IsGuidString(self, l):
if(l.count("{") == 2 and l.count("}") == 2 and l.count(",") == 10 and l.count("=") == 1):
return True
return False
def ParseGuid(self, l):
# parse a guid in format
# { 0xD3B36F2C, 0xD551, 0x11D4, { 0x9A, 0x46, 0x00, 0x90, 0x27, 0x3F, 0xC1, 0x4D }}
# into F7FDE4A6-294C-493c-B50F-9734553BB757 (NOTE these are not same guid this is just example of format)
entries = l.lstrip(' {').rstrip(' }').split(',')
gu = entries[0].lstrip(' 0').lstrip('x').strip()
# pad front until 8 chars
while(len(gu) < 8):
gu = "0" + gu
gut = entries[1].lstrip(' 0').lstrip('x').strip()
while(len(gut) < 4):
gut = "0" + gut
gu = gu + "-" + gut
gut = entries[2].lstrip(' 0').lstrip('x').strip()
while(len(gut) < 4):
gut = "0" + gut
gu = gu + "-" + gut
# strip off extra {
gut = entries[3].lstrip(' { 0').lstrip('x').strip()
while(len(gut) < 2):
gut = "0" + gut
gu = gu + "-" + gut
gut = entries[4].lstrip(' 0').lstrip('x').strip()
while(len(gut) < 2):
gut = "0" + gut
gu = gu + gut
gut = entries[5].lstrip(' 0').lstrip('x').strip()
while(len(gut) < 2):
gut = "0" + gut
gu = gu + "-" + gut
gut = entries[6].lstrip(' 0').lstrip('x').strip()
while(len(gut) < 2):
gut = "0" + gut
gu = gu + gut
gut = entries[7].lstrip(' 0').lstrip('x').strip()
while(len(gut) < 2):
gut = "0" + gut
gu = gu + gut
gut = entries[8].lstrip(' 0').lstrip('x').strip()
while(len(gut) < 2):
gut = "0" + gut
gu = gu + gut
gut = entries[9].lstrip(' 0').lstrip('x').strip()
while(len(gut) < 2):
gut = "0" + gut
gu = gu + gut
gut = entries[10].split()[0].lstrip(' 0').lstrip('x').rstrip(' } ').strip()
while(len(gut) < 2):
gut = "0" + gut
gu = gu + gut
return gu.upper()
def ResetParserState(self):
self.ConditionalStack = []
self.CurrentSection = ''
self.CurrentFullSection = ''
self.Parsed = False
#
# Base Class for Edk2 build files that use # for comments
#
class HashFileParser(BaseParser):
def __init__(self, log):
BaseParser.__init__(self, log)
def StripComment(self, l):
return l.split('#')[0].strip()
def ParseNewSection(self, l):
if(l.count("[") == 1 and l.count("]") == 1): # new section
section = l.strip().lstrip("[").split(".")[0].split(",")[0].rstrip("]").strip()
self.CurrentFullSection = l.strip().lstrip("[").split(",")[0].rstrip("]").strip()
return (True, section)
return (False, "")
|
<filename>scripts/data_to_JSON.py<gh_stars>0
#!/usr/bin/env python
#-*- coding: utf-8 -*-
"""Converts data from DGS (Direction Générale de la Santé)
in CSV format to a JSON object.
"""
#
# Modules to import
#
import csv
import json
import datetime
def format_date(date):
"""Transforms a date from YYYY/MM/DD format to DD-MM-YYYY"""
if '/' in date:
date = date.split('/')
date = f'{date[2]}-{date[1]}-{date[0]}'
return date
def get_recent_date():
dates = set()
with open('./sp-pe-std-quot-dep.csv', newline='') as csvfile:
reader = csv.DictReader(csvfile, delimiter=';')
for row in reader:
dates.add(row['jour'])
return max(dates)
#
# Main function
#
def main():
# Now
date = datetime.datetime.now()
today = f'{date.year}-{date.month}-{date.day}'
# Paths to the files
path_to_geo = './departements.geojson'
path_to_data = './donnees-hospitalieres.csv'
path_to_incidence = './sp-pe-std-quot-dep.csv'
path_to_incidence_fr = './sp-pe-std-quot-fra.csv'
path_to_geo_full = '../data/covid-france.json'
# Useful structures to analyse the data
accounts = dict() # Dictionary with the accounts
dates = set() # All the dates listed
departments = set() # All the departments listed
# Reading the CSV file
with open(path_to_data, newline='') as csvfile:
fieldnames = ['code', 'sex', 'date', 'hosp', 'rea', 'hosp_conv', 'ssr_usld', 'autres', 'rad', 'dc']
lines = csv.DictReader(csvfile, delimiter=';', fieldnames=fieldnames)
for idx, line in enumerate(lines):
# Skips the header
if idx != 0:
# Dates do not all respect the same format
date = format_date(line['date'])
# Lists all the dates and all the departments
dates.add(date)
departments.add(line['code'])
# Each department is set up with an empty account for each date
for department in departments:
accounts.update({
department: {
'deceased': { date: dict() for date in dates },
'rea': { date: dict() for date in dates },
'hosp': { date: dict() for date in dates },
'incidence': { date: dict() for date in dates },
},
"france": {
'deceased': {
date: {
"0": int(),
"1": int(),
"2": int()
}
for date in dates
},
'rea': {
date: {
"0": int(),
"1": int(),
"2": int()
}
for date in dates
},
'hosp': {
date: {
"0": int(),
"1": int(),
"2": int()
}
for date in dates
},
'incidence': {
date: float() for date in dates
}
}
})
# Reading the data CSV files
with open(path_to_data, newline='') as hospi_file,\
open(path_to_incidence, newline='') as incidence_file,\
open(path_to_incidence_fr, newline='') as incidence_fr_file :
# Fieldnames
hospi_fieldnames = ['code', 'sex', 'date', 'hosp', 'rea', 'hosp_conv', 'ssr_usld', 'autres', 'rad', 'dc']
incidence_fieldnames = ['code', 'date', 'pop', 'P', 'tx_std']
# Fetch rows
hospi_rows = csv.DictReader(hospi_file, delimiter=';', fieldnames=hospi_fieldnames)
incidence_rows = csv.DictReader(incidence_file, delimiter=';', fieldnames=incidence_fieldnames)
incidence_fr_rows = csv.DictReader(incidence_fr_file, delimiter=';', fieldnames=incidence_fieldnames)
# Firstly, the data about hospitalisations
for idx, row in enumerate(hospi_rows):
if idx != 0:
# Dates do not all respect the same format
date = format_date(row['date'])
# Updates the account of each department with:
# - the total amount of deceased people;
# - the number of people in reanimation at the day;
# - the number of people admitted in hospital at the day.
accounts[row['code']]['deceased'][date][row['sex']] = row['dc']
accounts[row['code']]['rea'][date][row['sex']] = row['rea']
accounts[row['code']]['hosp'][date][row['sex']] = row['hosp']
# Updates the nationwide metrics
accounts['france']['deceased'][date][row['sex']] += int(row['dc'])
accounts['france']['rea'][date][row['sex']] += int(row['rea'])
accounts['france']['hosp'][date][row['sex']] += int(row['hosp'])
# Secondly, data about the incidence rate
for idx, row in enumerate(incidence_rows):
# Updates the account of each department with :
# - the standard incidence rate.
if idx != 0 and row['code'] not in ['975', '977', '978']:
accounts[row['code']]['incidence'][row['date']] = row['tx_std']
# Lastly, the same data as above, but nationwide
for idx, row in enumerate(incidence_fr_rows):
if idx != 0:
accounts['france']['incidence'][row['date']] = row['tx_std']
# Sorts the metrics by date
for dept in accounts:
accounts[dept]['deceased'] = dict(sorted(accounts[dept]['deceased'].items(), key=lambda item: item[0]))
accounts[dept]['rea'] = dict(sorted(accounts[dept]['rea'].items(), key=lambda item: item[0]))
accounts[dept]['hosp'] = dict(sorted(accounts[dept]['hosp'].items(), key=lambda item: item[0]))
accounts[dept]['incidence'] = dict(sorted(accounts[dept]['incidence'].items(), key=lambda item: item[0]))
# Reading the geoJSON file of the departments
with open(path_to_geo) as geojson:
output = json.load(geojson)
# Writes a geoJSON file
with open(path_to_geo_full, 'w') as jsonfile:
# The most recent recorded date
recent = get_recent_date()
jsonfile.write('{"date":"' + recent + '", "type":"FeatureCollection","features":[')
for idx, department in enumerate(output['features']):
# Code of the department (e.g.: 75, 01, 37…)
code = department['properties']['code']
# Looks up in the accounts the records for this particular department
department['properties'].update({
'deceased': accounts.get(code)['deceased'],
'rea': accounts.get(code)['rea'],
'hosp': accounts.get(code)['hosp'],
'incidence': accounts.get(code)['incidence'],
})
# As a JSON formatted stream
json.dump(department, jsonfile)
if (idx + 1) < len(output['features']):
jsonfile.write(',\n')
# Closes the FeatureCollection and the geoJSON file
jsonfile.write('\n]}\n')
# Writes the nationwide metrics and the specific ones to a department
for sector in accounts:
if sector:
with open(f'../data/metrics-{sector}.json', 'w') as jsonfile:
json.dump(accounts[sector], jsonfile)
#
# Main
#
if __name__ == '__main__':
main()
|
<reponame>chrinide/AutoML_Alex
from .base import *
import lightgbm as lgb
import numpy as np
import pandas as pd
class LightGBM(ModelBase):
"""
Args:
params (dict or None): parameters for model.
If None default params are fetched.
"""
__name__ = 'LightGBM'
def _init_default_wrapper_params(self,):
"""
Default wrapper_params
"""
wrapper_params = {
'early_stopping':False,
}
return(wrapper_params)
def _init_default_model_param(self,):
"""
Default model_param
"""
model_param = {'random_seed': self._random_state,
'early_stopping_rounds': 50,
'num_iterations': 200,
'verbose': -1,
'device_type': 'gpu' if self._gpu else 'cpu',
}
if self.type_of_estimator == 'classifier':
model_param['objective'] = 'binary'
if self.wrapper_params['early_stopping']:
model_param['num_iterations'] = 1000
if self.metric is not None:
if self.metric.__name__ == 'roc_auc_score':
model_param['metric'] = 'auc'
return(model_param)
#@staticmethod
def get_model_opt_params(self, trial, model, opt_lvl, metric_name):
"""
Return:
dict of DistributionWrappers
"""
model_param = model._init_default_model_param()
################################# LVL 1 ########################################
if opt_lvl == 1:
model_param['num_leaves'] = trial.suggest_int('lgbm_num_leaves', 2, 50,)
if opt_lvl >= 1:
if len(self._data.X_train) > 1000:
model_param['min_child_samples'] = trial.suggest_int('lgbm_min_child_samples', 2, \
(len(self._data.X_train)//100))
else:
model_param['min_child_samples'] = trial.suggest_int('lgbm_min_child_samples', 2, 7)
################################# LVL 2 ########################################
if opt_lvl == 2:
model_param['learning_rate'] = trial.suggest_int('lgbm_learning_rate', 1, 11)/100
model_param['num_leaves'] = trial.suggest_int('lgbm_num_leaves', 2, 50,)
if not model.wrapper_params['early_stopping']:
model_param['num_iterations'] = trial.suggest_int('lgbm_num_iterations', 1, 3,)*100
if opt_lvl >= 2:
model_param['bagging_fraction'] = trial.suggest_discrete_uniform('lgbm_bagging_fraction', 0.4, 1., 0.1)
if model_param['bagging_fraction'] < 1.:
model_param['feature_fraction'] = trial.suggest_discrete_uniform('lgbm_feature_fraction', 0.3, 1., 0.1)
model_param['bagging_freq'] = trial.suggest_int('lgbm_bagging_freq', 2, 11,)
################################# LVL 3 ########################################
if opt_lvl == 3:
model_param['learning_rate'] = trial.suggest_int('lgbm_learning_rate', 1, 100)/1000
if not model.wrapper_params['early_stopping']:
model_param['num_iterations'] = trial.suggest_int('lgbm_num_iterations', 1, 11,)*100
if opt_lvl >= 3:
model_param['num_leaves'] = trial.suggest_int('lgbm_num_leaves', 2, 100,)
################################# LVL 4 ########################################
if opt_lvl == 4:
model_param['learning_rate'] = trial.suggest_loguniform('lgbm_learning_rate', 1e-3, .1)
if opt_lvl >= 4:
model_param['boosting'] = trial.suggest_categorical('lgbm_boosting', ['gbdt', 'dart',])
if model_param['boosting'] == 'dart':
model_param['early_stopping_rounds'] = 0
model_param['uniform_drop'] = trial.suggest_categorical('lgbm_uniform_drop', [True, False])
model_param['xgboost_dart_mode'] = trial.suggest_categorical('lgbm_xgboost_dart_mode', [True, False])
model_param['drop_rate'] = trial.suggest_loguniform('lgbm_drop_rate', 1e-8, 1.0)
model_param['max_drop'] = trial.suggest_int('lgbm_max_drop', 0, 100)
model_param['skip_drop'] = trial.suggest_loguniform('lgbm_skip_drop', 1e-3, 1.0)
model_param['num_iterations'] = trial.suggest_int('lgbm_num_iterations', 1, 6,)*1000
if model.type_of_estimator == 'classifier':
model_param['objective'] = trial.suggest_categorical('lgbm_objective',
[
'binary',
'cross_entropy',
])
elif model.type_of_estimator == 'regression':
model_param['objective'] = trial.suggest_categorical('lgbm_objective',
[
'regression',
'regression_l1',
'mape',
'huber',
'quantile',
])
################################# LVL 5 ########################################
if opt_lvl >= 5:
model_param['max_cat_threshold'] = trial.suggest_int('lgbm_max_cat_threshold', 1, 100)
model_param['min_child_weight'] = trial.suggest_loguniform('lgbm_min_child_weight', 1e-6, 1.0)
model_param['learning_rate'] = trial.suggest_loguniform('lgbm_learning_rate', 1e-5, .1)
model_param['reg_lambda'] = trial.suggest_loguniform('lgbm_reg_lambda', 1e-8, 1.0)
model_param['reg_alpha'] = trial.suggest_loguniform('lgbm_reg_alpha', 1e-8, 1.0)
model_param['max_bin'] = trial.suggest_int('lgbm_max_bin', 1, 5,)*50
#self.model_param['extra_trees'] = trial.suggest_categorical('lgbm_extra_trees', [True, False])
model_param['enable_bundle'] = trial.suggest_categorical('lgbm_enable_bundle', [True, False])
################################# Other ########################################
if model.type_of_estimator == 'classifier':
if metric_name not in ['roc_auc_score', 'log_loss', 'brier_score_loss']:
model_param['scale_pos_weight'] = trial.suggest_discrete_uniform('lgbm_scale_pos_weight', 0.1, 1., 0.1)
return(model_param)
def _fit(self, model=None, X_train=None, y_train=None, X_test=None, y_test=None,):
"""
Args:
X (pd.DataFrame, shape (n_samples, n_features)): the input data
y (pd.DataFrame, shape (n_samples, ) or (n_samples, n_outputs)): the target data
Return:
model (Class)
"""
if model is None:
model = self
if (X_train is None) or (y_train is None):
X_train = model._data.X_train
y_train = model._data.y_train
dtrain = lgb.Dataset(X_train, y_train,)
params = model.model_param.copy()
num_iterations = params.pop('num_iterations')
if model.wrapper_params['early_stopping'] and (X_test is not None):
dtest = lgb.Dataset(X_test, y_test,)
model.model = lgb.train(params,
dtrain,
num_boost_round=num_iterations,
valid_sets=(dtrain, dtest),
verbose_eval=False,
)
else:
early_stopping_rounds = params.pop('early_stopping_rounds')
model.model = lgb.train(
params,
dtrain,
num_boost_round=num_iterations,
verbose_eval=False,
)
dtrain=None
dtest=None
return model
def _predict(self, X=None):
"""
Args:
X (np.array, shape (n_samples, n_features)): the input data
Return:
np.array, shape (n_samples, n_classes)
"""
if self.model is None:
raise Exception("No fit models")
if X is None:
X = self._data.X_test
if self.type_of_estimator == 'classifier':
predicts = np.round(self.model.predict(X),0)
elif self.type_of_estimator == 'regression':
predicts = self.model.predict(X)
return predicts
def is_possible_predict_proba(self):
"""
Return:
bool, whether model can predict proba
"""
return True
def _predict_proba(self, X=None):
"""
Args:
X (np.array, shape (n_samples, n_features)): the input data
Return:
np.array, shape (n_samples, n_classes)
"""
if X is None:
X = self._data.X_test
if self.model is None:
raise Exception("No fit models")
if not self.is_possible_predict_proba():
raise Exception("Model cannot predict probability distribution")
return self.model.predict(X)
def is_possible_feature_importance(self):
"""
Return:
bool, whether model can predict proba
"""
return True
def _get_feature_importance(self, train_x, importance_type='gain',):
"""
Return:
list feature_importance
"""
if not self.is_possible_feature_importance():
raise Exception("Model cannot get feature_importance")
fe_lst = self.model.feature_importance(importance_type=importance_type)
return (pd.DataFrame(fe_lst, index=train_x.columns))
class LightGBMClassifier(LightGBM):
type_of_estimator='classifier'
class LightGBMRegressor(LightGBM):
type_of_estimator='regression' |
#!/usr/bin/env python
# coding: utf-8
import sys
import os
from datetime import datetime, timedelta
import urllib
import matplotlib as mpl
# mpl.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
from scipy.integrate import odeint
import scipy.signal
import pandas as pd
import seaborn as sns
sns.set_context('paper', font_scale=1.3)
red, blue, green = sns.color_palette('Set1', 3)
colors = {'red':red, 'blue':blue, 'green':green}
from click_spinner import spinner
from inference import get_last_NPI_date
from inference import get_first_NPI_date
from inference import params_bounds
from inference import get_model_class
from inference import find_start_day
from model.normal_prior_model import NormalPriorModel
from model.fixed_tau_model import FixedTauModel
from sklearn.metrics import mean_squared_error
def int_to_dt(t):
return pd.to_datetime(start_date) + timedelta(days=t)
def date_to_int(x):
dt = datetime.strptime(x + ' 2020', '%b %d %Y')
td = dt - start_date
return td.days
def date_to_date(x):
dt = datetime.strptime(x + ' 2020', '%b %d %Y')
return dt
def τ_to_string(τ, start_date):
return (pd.to_datetime(start_date) + timedelta(days=τ)).strftime('%b %d')
def load_chain(job_id=None, fname=None, delete_chain_less_than=None, nburn=2_000_000):
with spinner():
if fname is None:
fname = os.path.join(output_folder, job_id, 'inference', '{}.npz'.format(country))
inference_data = np.load(fname)
chain = inference_data['chain']
var_names = list(inference_data['var_names'])
nsteps, ndim, N, Td1, Td2, model_type = inference_data['params']
X = inference_data['incidences']
start_date = inference_data['start_date']
logliks = inference_data['logliks']
# print("Loaded {} with parameters:".format(fname))
# print(var_names)
nchains, nsteps, ndim = chain.shape
if delete_chain_less_than:
if len((chain[:,1_000_000, var_names.index('τ')]<delete_chain_less_than).nonzero())>1:
raise AssertionError('too many bad chains')
bad_chain_ind = (chain[:,1_000_000, var_names.index('τ')]<delete_chain_less_than).nonzero()[0][0]
chain = np.delete(chain, bad_chain_ind, axis=0)
chain = chain[:, nburn:, :]
chain = chain.reshape((-1, ndim))
logliks = logliks.reshape((nchains, nsteps))
if delete_chain_less_than:
logliks = np.delete(logliks, bad_chain_ind, axis=0)
logliks = logliks[:, nburn:].ravel()
return chain, logliks, Td1, Td2, model_type, X, start_date, N
def posterior_prediction(chain, model, nreps):
θ = chain[np.random.choice(chain.shape[0], nreps)]
return np.array([
model.generate_daily_cases(θi) for θi in θ
])
def load_data(country_name, up_to_date=None):
if country_name=='Wuhan':
df = pd.read_csv('../data/Incidence.csv')
df['date'] = pd.to_datetime(df['Date'], dayfirst=True)
df['cases'] = df[country_name]
df = df[::-1] # TODO why?
N = pd.read_csv('../data/pop.csv', index_col='City').loc[country_name].values[0]
else:
url = 'https://github.com/ImperialCollegeLondon/covid19model/raw/master/data/COVID-19-up-to-date.csv'
fname = '../data/COVID-19-up-to-date_master.csv'
if not os.path.exists(fname):
urllib.request.urlretrieve(url, fname)
df = pd.read_csv(fname, encoding='iso-8859-1')
df['date'] = pd.to_datetime(df['dateRep'], format='%d/%m/%Y')
df = df[df['countriesAndTerritories'] == country_name]
N = df.iloc[0]['popData2018']
cases_and_dates = df.iloc[::-1][['cases','date']]
if up_to_date:
cases_and_dates = cases_and_dates[cases_and_dates['date']<=up_to_date]
start_date = find_start_day(cases_and_dates)
X = cases_and_dates.loc[cases_and_dates['date'] >= start_date, 'cases'].values
T = cases_and_dates.loc[cases_and_dates['date'] >= start_date, 'date']
return X, T
if __name__ == '__main__':
nreps = 1000
date_threshold = datetime(2020, 4, 11)
last_date = datetime(2020, 4, 11) + timedelta(15)
output_folder = r'../../output-tmp'
job_id = sys.argv[1]
country = sys.argv[2]
if len(sys.argv) > 2:
color = sys.argv[3]
if color in colors:
color = colors[color]
else:
color = blue
X, T = load_data(country, up_to_date=last_date)
idx = date_threshold < T
ndays = len(X)
chain_fname = os.path.join(output_folder, job_id, 'inference', '{}.npz'.format(country))
delete_chain_less_than = None
if job_id=='7M' and country=='Spain': #TODO make input parameter
delete_chain_less_than = 15
chain, _, Td1, Td2, model_type, _, start_date, N = load_chain(fname=chain_fname,delete_chain_less_than=delete_chain_less_than)
X_mean = scipy.signal.savgol_filter(X, 3, 1)
model_class = get_model_class(model_type)
model = model_class(country, X, pd.to_datetime(start_date), N, get_last_NPI_date(country), get_first_NPI_date(country), params_bounds, Td1, Td2)
X_pred = posterior_prediction(chain, model, nreps)
pvalue = (X_pred[:,idx].max(axis=1) > X[idx].max()).mean() # P(max(X_pred) > max(X))
pvalue_file = os.path.join(output_folder, job_id, 'figures', 'ppc_pvalue.txt'.format(country))
with open(pvalue_file, 'at') as f:
print("{}\t{:.4g}".format(country, pvalue), file=f)
#RMSE
unseen_idxs_14 = T > date_threshold
unseen_idxs_7 = (T > date_threshold) & (T < date_threshold+timedelta(8))
rmse7 = np.sqrt([mean_squared_error(X[unseen_idxs_7],pred) for pred in X_pred[:,unseen_idxs_7]]).mean()
rmse14 = np.sqrt([mean_squared_error(X[unseen_idxs_14],pred) for pred in X_pred[:,unseen_idxs_14]]).mean()
rmse_file = os.path.join(output_folder, job_id, 'figures', 'ppc_rmse.csv'.format(country))
with open(rmse_file, 'at') as f:
print("{}\t{:.4g}\t{:.4g}".format(country, rmse7, rmse14), file=f)
fig, ax = plt.subplots(1, 1, figsize=(6, 4), sharex=True, sharey=True)
ymax = min(X.max()*2, max(X.max(), X_pred.max()))
t = np.arange(0, ndays)
ax.plot(t[~idx], X[~idx], 'o', color='k', alpha=0.5)
ax.plot(t[~idx], X_mean[~idx], '-', color='k')
ax.plot(t[idx], X[idx], '*', color='k', alpha=0.5)
ax.plot(t[idx], X_mean[idx], '--', color='k')
ax.plot(X_pred.T, color=color, alpha=0.01)
ax.axvline((date_threshold-pd.to_datetime(start_date)).days, color='k', ls='--', lw=2)
labels = [τ_to_string(int(d), start_date) for d in t[::5]]
ax.set_xticks(t[::5])
ax.set_xticklabels(labels, rotation=45)
ax.set(ylabel='Daily cases', ylim=(-10, ymax))
NPI_dates = pd.read_csv('../data/NPI_dates.csv')
last_date = pd.to_datetime(NPI_dates.loc[NPI_dates['Country'] == country.replace('_', ' '), 'Last'].values[0])
last_date_days = (last_date - pd.to_datetime(start_date)).days
ax.annotate("", xy=(last_date_days, 0), xytext=(last_date_days-0.5, ymax*0.075), arrowprops=dict(arrowstyle="-|>",facecolor='black'))
if model_type==2 or model_type==1: #have free param τ
τ_med = np.median(chain[:,-1])
ax.annotate(r'', xy=(τ_med, 0), xytext=(τ_med-0.5, ymax*0.075), arrowprops=dict(arrowstyle="-|>",facecolor='white'))
# fig.suptitle(country.replace('_', ' '))
fig.tight_layout()
sns.despine()
# plt.show()
fig_filename = os.path.join(output_folder, job_id, 'figures', '{}_ppc_long.pdf'.format(country))
print("Saving to {}".format(fig_filename))
fig.savefig(fig_filename)
|
<reponame>sdolenc/aws-elastic-beanstalk-cli
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import re
from zipfile import ZipFile
from cement.utils.misc import minimal_logger
from ebcli.operations import gitops, buildspecops, commonops, statusops
from ebcli.operations.tagops import tagops
from ebcli.operations.tagops.taglist import TagList
from ebcli.lib import cloudformation, elasticbeanstalk, heuristics, iam, utils
from ebcli.lib.aws import InvalidParameterValueError
from ebcli.core import io, fileoperations
from ebcli.objects.exceptions import NotAuthorizedError
from ebcli.resources.strings import strings, responses, prompts
from ebcli.resources.statics import iam_attributes
import json
LOG = minimal_logger(__name__)
DEFAULT_ROLE_NAME = 'aws-elasticbeanstalk-ec2-role'
DEFAULT_SERVICE_ROLE_NAME = 'aws-elasticbeanstalk-service-role'
DEFAULT_SERVICE_ROLE_POLICIES = [
'arn:aws:iam::aws:policy/service-role/AWSElasticBeanstalkEnhancedHealth',
'arn:aws:iam::aws:policy/service-role/AWSElasticBeanstalkService'
]
def make_new_env(
env_request,
branch_default=False,
process_app_version=False,
nohang=False,
interactive=True,
timeout=None,
source=None,
):
resolve_roles(env_request, interactive)
build_config = None
if fileoperations.build_spec_exists():
build_config = fileoperations.get_build_configuration()
LOG.debug("Retrieved build configuration from buildspec: {0}".format(build_config.__str__()))
codecommit_setup = gitops.git_management_enabled()
if not env_request.sample_application and not env_request.version_label:
if source is not None:
io.log_info('Creating new application version using remote source')
io.echo("Starting environment deployment via remote source")
env_request.version_label = commonops.create_app_version_from_source(
env_request.app_name, source, process=process_app_version, label=env_request.version_label,
build_config=build_config)
process_app_version = True
elif codecommit_setup:
io.log_info('Creating new application version using CodeCommit')
io.echo("Starting environment deployment via CodeCommit")
env_request.version_label = \
commonops.create_codecommit_app_version(env_request.app_name, process=process_app_version,
build_config=build_config)
process_app_version = True
else:
io.log_info('Creating new application version using project code')
env_request.version_label = \
commonops.create_app_version(env_request.app_name, process=process_app_version,
build_config=build_config)
if build_config is not None:
buildspecops.stream_build_configuration_app_version_creation(
env_request.app_name,
env_request.version_label,
build_config
)
elif process_app_version is True:
success = commonops.wait_for_processed_app_versions(
env_request.app_name,
[env_request.version_label],
timeout=timeout or 5
)
if not success:
return
if env_request.version_label is None or env_request.sample_application:
env_request.version_label = \
commonops.create_dummy_app_version(env_request.app_name)
if env_request.key_name:
commonops.upload_keypair_if_needed(env_request.key_name)
download_sample_app = None
if interactive:
download_sample_app = should_download_sample_app()
io.log_info('Creating new environment')
result, request_id = create_env(env_request,
interactive=interactive)
env_name = result.name
default_env = commonops.get_current_branch_environment()
if not default_env or branch_default:
commonops.set_environment_for_current_branch(env_name)
if codecommit_setup:
io.echo("Setting up default branch")
gitops.set_branch_default_for_current_environment(gitops.get_default_branch())
gitops.set_repo_default_for_current_environment(gitops.get_default_repository())
if download_sample_app:
download_and_extract_sample_app(env_name)
result.print_env_details(
io.echo,
elasticbeanstalk.get_environments,
elasticbeanstalk.get_environment_resources,
health=False
)
statusops.alert_environment_status(result)
if nohang:
return
io.echo('Printing Status:')
commonops.wait_for_success_events(request_id,
timeout_in_minutes=timeout)
def should_download_sample_app():
"""
Method determines whether the present directory is empty. If yes, it allows the user
to choose to download the sample application that the environment will be launched
with.
:return: User's choice of whether the sample application should be downloaded
"""
user_input = None
if heuristics.directory_is_empty():
io.echo(strings['create.sample_application_download_option'])
user_input = download_sample_app_user_choice()
while user_input not in ['y', 'n', 'Y', 'N']:
io.echo(strings['create.user_choice_error'].format(user_choice=user_input))
user_input = download_sample_app_user_choice()
return True if user_input in ['y', 'Y'] else False
def download_and_extract_sample_app(env_name):
"""
Method orchestrates the retrieval, and extraction of application version.
:param env_name: The name of the environment whose application version will be downloaded.
:return: None
"""
try:
url = retrieve_application_version_url(env_name)
zip_file_location = '.elasticbeanstalk/.sample_app_download.zip'
io.echo('INFO: {}'.format(strings['create.downloading_sample_application']))
download_application_version(url, zip_file_location)
ZipFile(zip_file_location, 'r', allowZip64=True).extractall()
os.remove(zip_file_location)
io.echo('INFO: {}'.format(strings['create.sample_application_download_complete']))
except NotAuthorizedError as e:
io.log_warning('{} Continuing environment creation.'.format(e.message))
except cloudformation.CFNTemplateNotFound as e:
io.log_warning('{} Continuing environment creation.'.format(e.message))
def download_application_version(url, zip_file_location):
"""
Method downloads the application version from the URL, 'url', and
writes them at the location specified by `zip_file_location`
:param url: the URL of the application version.
:param zip_file_location: path on the user's system to write the application version ZIP file to.
:return: None
"""
data = utils.get_data_from_url(url, timeout=30)
fileoperations.write_to_data_file(zip_file_location, data)
def retrieve_application_version_url(env_name):
"""
Method retrieves the URL of the application version of the environment, 'env_name',
for the CLI to download from.
The method waits for the CloudFormation stack associated with `env_name` to come
into existence, after which, it retrieves the 'url' of the application version.
:param env_name: Name of the environment that launched with the sample application
:return: The URL of the application version.
"""
env = elasticbeanstalk.get_environment(env_name=env_name)
cloudformation_stack_name = 'awseb-' + env.id + '-stack'
cloudformation.wait_until_stack_exists(cloudformation_stack_name)
template = cloudformation.get_template(cloudformation_stack_name)
url = None
try:
url = template['TemplateBody']['Parameters']['AppSource']['Default']
except KeyError:
io.log_warning('{}. '.format(strings['cloudformation.cannot_find_app_source_for_environment']))
return url
def download_sample_app_user_choice():
"""
Method accepts the user's choice of whether the sample application should be downloaded.
Defaults to 'Y' when none is provided.
:return: user's choice of whether the sample application should be downloaded
"""
return io.get_input('(Y/n)', default='y')
def create_env(env_request, interactive=True):
if env_request.template_name:
platform = env_request.platform
env_request.platform = None
else:
platform = None
while True:
try:
return elasticbeanstalk.create_environment(env_request)
except InvalidParameterValueError as e:
if e.message == responses['app.notexists'].replace(
'{app-name}', '\'' + env_request.app_name + '\''):
commonops.create_app(env_request.app_name)
elif e.message == responses['create.noplatform']:
if platform:
env_request.platform = platform
else:
raise
elif interactive:
LOG.debug('creating env returned error: ' + e.message)
if re.match(responses['env.cnamenotavailable'], e.message):
io.echo(prompts['cname.unavailable'])
io.prompt_for_cname()
elif re.match(responses['env.nameexists'], e.message):
io.echo(strings['env.exists'])
current_environments = elasticbeanstalk.get_all_environment_names()
unique_name = utils.get_unique_name(env_request.env_name,
current_environments)
env_request.env_name = io.prompt_for_environment_name(
default_name=unique_name)
elif e.message == responses['app.notexists'].replace(
'{app-name}', '\'' + env_request.app_name + '\''):
commonops.create_app(env_request.app_name)
else:
raise
else:
raise
def get_service_role():
try:
roles = iam.get_role_names()
if DEFAULT_SERVICE_ROLE_NAME not in roles:
return None
except NotAuthorizedError:
pass
return DEFAULT_SERVICE_ROLE_NAME
def create_default_service_role():
"""
Create the default service role
"""
io.log_info('Creating service role {} with default permissions.'
.format(DEFAULT_SERVICE_ROLE_NAME))
trust_document = _get_default_service_trust_document()
role_name = DEFAULT_SERVICE_ROLE_NAME
try:
iam.create_role_with_policy(role_name, trust_document,
DEFAULT_SERVICE_ROLE_POLICIES)
except NotAuthorizedError as e:
raise NotAuthorizedError(prompts['create.servicerole.nopermissions']
.format(DEFAULT_SERVICE_ROLE_NAME, e))
return DEFAULT_SERVICE_ROLE_NAME
def resolve_roles(env_request, interactive):
"""
Resolves instance-profile and service-role
:param env_request: environment request
:param interactive: boolean
"""
LOG.debug('Resolving roles')
if (
not env_request.instance_profile or
env_request.instance_profile == iam_attributes.DEFAULT_ROLE_NAME
) and not env_request.template_name:
env_request.instance_profile = commonops.create_default_instance_profile()
if (
env_request.platform and
env_request.platform.has_healthd_support and
not env_request.service_role and
not env_request.template_name
):
role = get_service_role()
if role is None:
if interactive:
io.echo()
io.echo(prompts['create.servicerole.info'])
input = io.get_input(prompts['create.servicerole.view'],
default='')
if input.strip('"').lower() == 'view':
for policy_arn in DEFAULT_SERVICE_ROLE_POLICIES:
document = iam.get_managed_policy_document(policy_arn)
io.echo(json.dumps(document, indent=4))
io.get_input(prompts['general.pressenter'])
role = create_default_service_role()
env_request.service_role = role
def _get_default_service_trust_document():
"""
Just a string representing the service role policy.
Includes newlines for pretty printing :)
"""
return """{
"Version": "2012-10-17",
"Statement": [{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": "elasticbeanstalk.amazonaws.com"
},
"Action": "sts:AssumeRole",
"Condition": {
"StringEquals": {
"sts:ExternalId": "elasticbeanstalk"
}
}
}]
}"""
|
import re
import requests
from requests.auth import HTTPBasicAuth
from utils.io_utils import IOUtils
class RestApiService:
def __init__(self, connection):
self.conn = connection
def upload_file(self, remote_path, local_path):
url_format = f"{self.conn.get('protocol')}://{self.conn.get('ip')}:{self.conn.get('port')}/file"
headers = {
"File-Path": remote_path,
"Content-Type": "application/octet-stream"
}
response = requests.post(url_format, headers=headers, data=IOUtils.read_file(local_path),
verify=self.conn.get('cert'),
auth=HTTPBasicAuth(self.conn.get('username'), self.conn.get('password')))
# error, server sent non 200 OK response code
if response.status_code != 200:
raise BaseException("Error: Http code: {}. Http body: {}".format(response.status_code, response.text))
body = response.json()
return body.get('description')
def download_file(self, remote_path, local_path):
url_format = f"{self.conn.get('protocol')}://{self.conn.get('ip')}:{self.conn.get('port')}/file"
headers = {
"File-Path": remote_path,
"Content-Type": "application/octet-stream"
}
response = requests.get(url_format, headers=headers, stream=True, verify=self.conn.get('cert'),
auth=HTTPBasicAuth(self.conn.get('username'), self.conn.get('password')))
response.raw.decode_content = True
# error, server sent non 200 OK response code
if response.status_code != 200:
raise BaseException("Error: Http code: {}.".format(response.status_code))
IOUtils.write_to_file_binary(local_path, raw_response=response.raw)
return f"Saved at location {local_path}"
def send(self, command, keep_state=False, wd=".", wd_cmd="pwd"):
command = command.strip()
url_format = f"{self.conn.get('protocol')}://{self.conn.get('ip')}:{self.conn.get('port')}{self.conn.get('endpoint')}"
headers = {
"Content-Type": "text/plain"
}
cd_cmd = f"cd {wd}&&{command}&&{wd_cmd}" if keep_state else command
response = requests.post(url_format, headers=headers, data=cd_cmd, timeout=5,
verify=self.conn.get('cert'),
auth=HTTPBasicAuth(self.conn.get('username'), self.conn.get('password')))
# error, server sent non 200 OK response code
if response.status_code != 200:
raise BaseException("Error: Http code: {}. Http body: {}".format(response.status_code, response.text))
body = response.json()
# error, the type should be dict
if isinstance(body['description'], str):
return body.get('description'), wd
details = body.get('description').get('commands').get(cd_cmd).get('details')
if re.compile(r"cd\s+(.*)").search(command) and details.get('out') and keep_state:
wd = details.get('out').split("\n")[-2]
return details.get('out') if details.get('err') == "" else details.get('err'), wd.rstrip()
def about(self):
url_format = f"{self.conn.get('protocol')}://{self.conn.get('ip')}:{self.conn.get('port')}/about"
headers = {
"Content-Type": "application/json"
}
response = requests.get(url_format, headers=headers, timeout=5, verify=self.conn.get('cert'),
auth=HTTPBasicAuth(self.conn.get('username'), self.conn.get('password')))
# error, server sent non 200 OK response code
if response.status_code != 200:
raise BaseException("Error: Http code: {}. Http body: {}".format(response.status_code, response.text))
body = response.json()
# error, the type should be dict
if isinstance(body['description'], str):
return body.get('description')
return body.get('description')
def get_os(self):
return self.about().get("system")
def get_wd_cmd(self):
pwd_cmd = "cd" if self.get_os().lower() == "Windows".lower() else "pwd"
return pwd_cmd
|
<reponame>Robertzzel/DHCP-Client_Server<gh_stars>0
from tkinter import StringVar
from tkinter import Tk, NORMAL, DISABLED, END
from Dhcp.packet import Packet
from Dhcp.server_options import ServerOptions
from threading import Thread
from Commons.timer import Timer
from typing import Optional
from datetime import datetime, timedelta
from Interfaces.base_interface import BaseInterface
from queue import Queue
from Backend.client import Client
class ClientInterface(BaseInterface):
def __init__(self):
super().__init__()
self.__timer: Optional[Timer] = None
self.__last_request_packet: Optional[Packet] = None
self._logging_queue = Queue()
self._logging_timer = Timer(interval=1/5, action=self._handle_logging)
self.__ip_history_list = []
self._client: Optional[Client] = None
self._window = Tk()
self._window.geometry("830x720")
self.__connect_button = self._create_button(text="CONNECT", x_position=20, y_position=20,
command=lambda: Thread(target=self.__connect, args=()).start())
self._create_button(text="GEN. DEFAULT", x_position=100, y_position=20,
command=lambda: Thread(target=self.__generate_default, args=()).start())
self._create_button(text="DISCONNECT", command=lambda: Thread(target=self.__disconnect, args=()).start(),
x_position=203, y_position=20)
_, self.__subnet_mask_option = self._create_checkbutton(text='Subnet Mask', x_pos=20, y_pos=280)
_, self.__router_option = self._create_checkbutton(text="Router", x_pos=20, y_pos=320)
_, self.__domain_server_option = self._create_checkbutton("Domain Server", 20, 360)
_, self.__broadcast_address_option = self._create_checkbutton("Broadcast Address", 20, 400)
_, self.__lease_time_option = self._create_checkbutton("Lease Time", 20, 440)
_, self.__renewal_time_option = self._create_checkbutton("Renewal Time", 20, 480)
self._create_label(x_pos=20, y_pos=70, text="HOST NAME")
self._create_label(20, 110, text="ADDRESS REQUEST")
self._create_label(20, 150, text="CLIENT ID")
self._create_label(20, 190, text="MAC")
self._create_label(20, 230, text="CLIENT IP ADDRESS")
_, self.__subnet_mask_value = self._create_label(150, 280, variable_type=StringVar)
_, self.__router_value = self._create_label(150, 320, variable_type=StringVar)
_, self.__domain_server_value = self._create_label(150, 360, variable_type=StringVar)
_, self.__broadcast_address_value = self._create_label(150, 400, variable_type=StringVar)
_, self.__lease_time_value = self._create_label(150, 440, variable_type=StringVar)
_, self.__renewal_time_value = self._create_label(150, 480, variable_type=StringVar)
self._create_label(400, 46, text="Logging")
self._create_label(20, 570, text="_" * 122)
_, self.__renew_datetime_value = self._create_label(150, 650, variable_type=StringVar)
self._create_label(20, 650, text="Renew date")
self._create_label(400, 690, text="Current IP")
_, self.__current_ip_value = self._create_label(453, 690, variable_type=StringVar)
self._create_label(400, 600, text="Ip history")
_, self.__host_name_value = self._create_entry(x_position=150, y_position=70, width=180, height=20)
_, self.__address_request_value = self._create_entry(150, 110, 180, 20)
_, self.__client_id_value = self._create_entry(150, 150, 180, 20)
_, self.__hardware_address_value = self._create_entry(150, 190, 180, 20)
_, self.__client_ip_address_value = self._create_entry(150, 230, 180, 20)
self.__logging_text, _ = self._create_text(x_pos=400, y_pos=70, height=30, width=49, with_state=True)
self.__ip_history_text = self._create_text(400, 630, 3, 49)
def __inputs_to_packet(self) -> Packet:
"""Creates a packet from inputs
:return: Packet
"""
server_options = []
if self.__subnet_mask_option.get():
server_options.append(ServerOptions(1))
if self.__router_option.get():
server_options.append(ServerOptions(3))
if self.__domain_server_option.get():
server_options.append(ServerOptions(6))
if self.__broadcast_address_option.get():
server_options.append(ServerOptions(28))
if self.__lease_time_option.get():
server_options.append(ServerOptions(51))
if self.__renewal_time_option.get():
server_options.append(ServerOptions(58))
new_packet = Packet(packet=None)
new_packet.server_options = server_options
new_packet.host_name = self.__host_name_value.get() if self.__host_name_value.get() != 'None' else None
new_packet.address_request = self.__address_request_value.get() if self.__address_request_value.get() != 'None' else None
new_packet.client_id = self.__client_id_value.get() if self.__client_id_value.get() != 'None' else None
mac = self.__hardware_address_value.get()
if mac != "None":
new_packet.client_hardware_address = self.__hardware_address_value.get()
cia = self.__client_ip_address_value.get()
if cia != "None":
new_packet.client_ip_address = self.__client_ip_address_value.get()
return new_packet
def __append_to_logging(self, text: str):
"""Writes text to logging window
:param text: Text to be written
"""
self.__logging_text.config(state='normal')
self.__logging_text.insert(END, f" {text}\n")
self.__logging_text.config(state='disabled')
def __add_ip_in_history(self, ip: str):
"""Adds an ip to the history
:param ip: Added ip address
"""
if ip not in self.__ip_history_list:
self.__ip_history_list.append(ip)
self.__ip_history_text.config(state=NORMAL)
self.__ip_history_text.insert(END, f" {ip}\n")
self.__ip_history_text.config(state=DISABLED)
def __set_fields_from_dhcpack(self, packet_ack: Packet):
"""Fills the widgets with the ino from ack pack
:param packet_ack: packet from which to read
"""
self.__add_ip_in_history(packet_ack.your_ip_address)
next_request_datetime = datetime.now() + \
timedelta(seconds=packet_ack.renewal_time if packet_ack.renewal_time else
packet_ack.lease_time // 2 if packet_ack.lease_time else "None")
self.__renew_datetime_value.set(f"{next_request_datetime}")
self.__subnet_mask_value.set(packet_ack.subnet_mask if packet_ack.subnet_mask else "None")
self.__router_value.set(packet_ack.router if packet_ack.router else "None")
self.__domain_server_value.set(packet_ack.domain_server if packet_ack.domain_server else "None")
self.__broadcast_address_value.set(packet_ack.broadcast_address if packet_ack.broadcast_address else "None")
self.__lease_time_value.set(packet_ack.lease_time if packet_ack.lease_time else "None")
self.__renewal_time_value.set(packet_ack.renewal_time if packet_ack.renewal_time else
packet_ack.lease_time//2 if packet_ack.lease_time else "None")
self.__current_ip_value.set(packet_ack.your_ip_address)
def __reset_fields(self):
"""Fills widgets with '...'"""
self.__renew_datetime_value.set("...")
self.__subnet_mask_value.set("...")
self.__router_value.set("...")
self.__domain_server_value.set("...")
self.__broadcast_address_value.set("...")
self.__lease_time_value.set("...")
self.__renewal_time_value.set("...")
self.__current_ip_value.set("...")
def __generate_default(self):
"""Fills the inputs with default values"""
self.__host_name_value.set("None")
self.__address_request_value.set("None")
self.__client_id_value.set("None")
self.__hardware_address_value.set("None")
self.__client_ip_address_value.set("None")
self.__subnet_mask_option.set(True)
self.__router_option.set(True)
self.__domain_server_option.set(True)
self.__broadcast_address_option.set(True)
self.__lease_time_option.set(True)
self.__renewal_time_option.set(True)
def __connect(self):
"""Connects to a DHCP Server"""
self.__connect_button["state"] = DISABLED
packet = self.__inputs_to_packet()
self._client = Client(server_options=packet.server_options, host_name=packet.host_name,
address_request=packet.address_request, client_id=packet.client_id,
mac=packet.client_hardware_address, client_ip_address=packet.client_ip_address,
logging_queue=self._logging_queue)
self._logging_timer = Timer(interval=1/5, action=self._handle_logging)
self._logging_timer.start()
self._client.connect()
def __disconnect(self):
"""Disconnects from the current server"""
self._client.disconnect()
self.__reset_fields()
self.__connect_button["state"] = NORMAL
self._logging_timer.cancel()
def _handle_logging(self):
message = self._logging_queue.get()
if type(message) is str and message == "reset":
self.__reset_fields()
elif type(message) is str:
self.__append_to_logging(message)
elif type(message) is bytes:
self.__set_fields_from_dhcpack(Packet(message))
if __name__ == "__main__":
ClientInterface().start()
|
<gh_stars>10-100
from copy import deepcopy
import csv
import io
import json
import logging
import re
import time
import uuid
import hashlib
import arrow
from psycopg2 import sql
from psycopg2.extras import LoggingConnection, LoggingCursor
from target_postgres import json_schema, singer
from target_postgres.exceptions import PostgresError
from target_postgres.sql_base import SEPARATOR, SQLInterface
RESERVED_NULL_DEFAULT = 'NULL'
def _update_schema_0_to_1(table_metadata, table_schema):
"""
Given a `table_schema` of version 0, update it to version 1.
:param table_metadata: Table Metadata
:param table_schema: TABLE_SCHEMA
:return: Table Metadata
"""
for field, property in table_schema['schema']['properties'].items():
if json_schema.is_datetime(property):
table_metadata['mappings'][field]['format'] = json_schema.DATE_TIME_FORMAT
table_metadata['schema_version'] = 1
return table_metadata
def _update_schema_1_to_2(table_metadata, table_path):
"""
Given a `table_metadata` of version 1, update it to version 2.
:param table_metadata: Table Metadata
:param table_path: [String, ...]
:return: Table Metadata
"""
table_metadata['path'] = tuple(table_path)
table_metadata['schema_version'] = 2
table_metadata.pop('table_mappings', None)
return table_metadata
class _MillisLoggingCursor(LoggingCursor):
"""
An implementation of LoggingCursor which tracks duration of queries.
"""
def execute(self, query, vars=None):
self.timestamp = time.monotonic()
return super(_MillisLoggingCursor, self).execute(query, vars)
def callproc(self, procname, vars=None):
self.timestamp = time.monotonic()
return super(_MillisLoggingCursor, self).callproc(procname, vars)
class MillisLoggingConnection(LoggingConnection):
"""
An implementation of LoggingConnection which tracks duration of queries.
"""
def filter(self, msg, curs):
return "MillisLoggingConnection: {} millis spent executing: {}".format(
int((time.monotonic() - curs.timestamp) * 1000),
msg
)
def cursor(self, *args, **kwargs):
kwargs.setdefault('cursor_factory', _MillisLoggingCursor)
return LoggingConnection.cursor(self, *args, **kwargs)
class TransformStream:
def __init__(self, fun):
self.fun = fun
def read(self, *args, **kwargs):
return self.fun()
class PostgresTarget(SQLInterface):
## NAMEDATALEN _defaults_ to 64 in PostgreSQL. The maxmimum length for an identifier is
## NAMEDATALEN - 1.
# TODO: Figure out way to `SELECT` value from commands
IDENTIFIER_FIELD_LENGTH = 63
def __init__(self, connection, *args,
postgres_schema='public',
logging_level=None,
persist_empty_tables=False,
add_upsert_indexes=True,
**kwargs):
self.LOGGER.info(
'PostgresTarget created with established connection: `{}`, PostgreSQL schema: `{}`'.format(connection.dsn,
postgres_schema))
if logging_level:
level = logging.getLevelName(logging_level)
self.LOGGER.setLevel(level)
try:
connection.initialize(self.LOGGER)
self.LOGGER.debug('PostgresTarget set to log all queries.')
except AttributeError:
self.LOGGER.debug('PostgresTarget disabling logging all queries.')
self.conn = connection
self.postgres_schema = postgres_schema
self.persist_empty_tables = persist_empty_tables
self.add_upsert_indexes = add_upsert_indexes
if self.persist_empty_tables:
self.LOGGER.debug('PostgresTarget is persisting empty tables')
with self.conn.cursor() as cur:
self._update_schemas_0_to_1(cur)
self._update_schemas_1_to_2(cur)
def _update_schemas_0_to_1(self, cur):
"""
Given a Cursor for a Postgres Connection, upgrade table schemas at version 0 to version 1.
:param cur: Cursor
:return: None
"""
cur.execute(sql.SQL('''
SELECT c.relname, obj_description(c.oid, 'pg_class')
FROM pg_namespace AS n
INNER JOIN pg_class AS c ON n.oid = c.relnamespace
WHERE n.nspname = {};
''').format(sql.Literal(self.postgres_schema)))
for mapped_name, raw_json in cur.fetchall():
metadata = None
if raw_json:
try:
metadata = json.loads(raw_json)
except:
pass
if metadata and metadata.get('schema_version', 0) == 0:
self.LOGGER.info('Migrating `{}` from schema_version 0 to 1'.format(mapped_name))
table_schema = self.__get_table_schema(cur, mapped_name)
version_1_metadata = _update_schema_0_to_1(metadata, table_schema)
self._set_table_metadata(cur, mapped_name, version_1_metadata)
def _update_schemas_1_to_2(self, cur):
"""
Given a Cursor for a Postgres Connection, upgrade table schemas at version 1 to version 2.
:param cur: Cursor
:return: None
"""
cur.execute(sql.SQL('''
SELECT c.relname, obj_description(c.oid, 'pg_class')
FROM pg_namespace AS n
INNER JOIN pg_class AS c ON n.oid = c.relnamespace
WHERE n.nspname = {};
''').format(sql.Literal(self.postgres_schema)))
for mapped_name, raw_json in cur.fetchall():
metadata = None
if raw_json:
try:
metadata = json.loads(raw_json)
except:
pass
if metadata and metadata.get('schema_version', 0) == 1 and metadata.get('table_mappings'):
self.LOGGER.info('Migrating root_table `{}` children from schema_version 1 to 2'.format(mapped_name))
table_path = tuple()
for mapping in metadata.get('table_mappings'):
table_name = mapping['to']
table_path = mapping['from']
table_metadata = self._get_table_metadata(cur, table_name)
self.LOGGER.info('Migrating `{}` (`{}`) from schema_version 1 to 2'.format(table_path, table_name))
version_2_metadata = _update_schema_1_to_2(table_metadata, table_path)
self._set_table_metadata(cur, table_name, version_2_metadata)
root_version_2_metadata = _update_schema_1_to_2(metadata, table_path[0:1])
self._set_table_metadata(cur, mapped_name, root_version_2_metadata)
def metrics_tags(self):
return {'database': self.conn.get_dsn_parameters().get('dbname', None),
'schema': self.postgres_schema}
def setup_table_mapping_cache(self, cur):
self.table_mapping_cache = {}
cur.execute(sql.SQL('''
SELECT c.relname, obj_description(c.oid, 'pg_class')
FROM pg_namespace AS n
INNER JOIN pg_class AS c ON n.oid = c.relnamespace
WHERE n.nspname = {};
''').format(sql.Literal(self.postgres_schema)))
for mapped_name, raw_json in cur.fetchall():
table_path = None
if raw_json:
table_path = json.loads(raw_json).get('path', None)
self.LOGGER.info("Mapping: {} to {}".format(mapped_name, table_path))
if table_path:
self.table_mapping_cache[tuple(table_path)] = mapped_name
def write_batch(self, stream_buffer):
if not self.persist_empty_tables and stream_buffer.count == 0:
return None
with self.conn.cursor() as cur:
try:
cur.execute('BEGIN;')
self.setup_table_mapping_cache(cur)
root_table_name = self.add_table_mapping_helper((stream_buffer.stream,), self.table_mapping_cache)['to']
current_table_schema = self.get_table_schema(cur, root_table_name)
current_table_version = None
if current_table_schema:
current_table_version = current_table_schema.get('version', None)
if set(stream_buffer.key_properties) \
!= set(current_table_schema.get('key_properties')):
raise PostgresError(
'`key_properties` change detected. Existing values are: {}. Streamed values are: {}'.format(
current_table_schema.get('key_properties'),
stream_buffer.key_properties
))
for key_property in stream_buffer.key_properties:
canonicalized_key, remote_column_schema = self.fetch_column_from_path((key_property,),
current_table_schema)
if self.json_schema_to_sql_type(remote_column_schema) \
!= self.json_schema_to_sql_type(stream_buffer.schema['properties'][key_property]):
raise PostgresError(
('`key_properties` type change detected for "{}". ' +
'Existing values are: {}. ' +
'Streamed values are: {}, {}, {}').format(
key_property,
json_schema.get_type(current_table_schema['schema']['properties'][key_property]),
json_schema.get_type(stream_buffer.schema['properties'][key_property]),
self.json_schema_to_sql_type(
current_table_schema['schema']['properties'][key_property]),
self.json_schema_to_sql_type(stream_buffer.schema['properties'][key_property])
))
target_table_version = current_table_version or stream_buffer.max_version
self.LOGGER.info('Stream {} ({}) with max_version {} targetting {}'.format(
stream_buffer.stream,
root_table_name,
stream_buffer.max_version,
target_table_version
))
root_table_name = stream_buffer.stream
if current_table_version is not None and \
stream_buffer.max_version is not None:
if stream_buffer.max_version < current_table_version:
self.LOGGER.warning('{} - Records from an earlier table version detected.'
.format(stream_buffer.stream))
cur.execute('ROLLBACK;')
return None
elif stream_buffer.max_version > current_table_version:
root_table_name += SEPARATOR + str(stream_buffer.max_version)
target_table_version = stream_buffer.max_version
self.LOGGER.info('Root table name {}'.format(root_table_name))
written_batches_details = self.write_batch_helper(cur,
root_table_name,
stream_buffer.schema,
stream_buffer.key_properties,
stream_buffer.get_batch(),
{'version': target_table_version})
cur.execute('COMMIT;')
return written_batches_details
except Exception as ex:
cur.execute('ROLLBACK;')
message = 'Exception writing records'
self.LOGGER.exception(message)
raise PostgresError(message, ex)
def activate_version(self, stream_buffer, version):
with self.conn.cursor() as cur:
try:
cur.execute('BEGIN;')
self.setup_table_mapping_cache(cur)
root_table_name = self.add_table_mapping(cur, (stream_buffer.stream,), {})
current_table_schema = self.get_table_schema(cur, root_table_name)
if not current_table_schema:
self.LOGGER.error('{} - Table for stream does not exist'.format(
stream_buffer.stream))
elif current_table_schema.get('version') is not None and current_table_schema.get('version') >= version:
self.LOGGER.warning('{} - Table version {} already active'.format(
stream_buffer.stream,
version))
else:
versioned_root_table = root_table_name + SEPARATOR + str(version)
names_to_paths = dict([(v, k) for k, v in self.table_mapping_cache.items()])
cur.execute(sql.SQL('''
SELECT tablename FROM pg_tables
WHERE schemaname = {} AND tablename like {};
''').format(
sql.Literal(self.postgres_schema),
sql.Literal(versioned_root_table + '%')))
for versioned_table_name in map(lambda x: x[0], cur.fetchall()):
table_name = root_table_name + versioned_table_name[len(versioned_root_table):]
table_path = names_to_paths[table_name]
cur.execute(sql.SQL('''
ALTER TABLE {table_schema}.{stream_table} RENAME TO {stream_table_old};
ALTER TABLE {table_schema}.{version_table} RENAME TO {stream_table};
DROP TABLE {table_schema}.{stream_table_old};
COMMIT;
''').format(
table_schema=sql.Identifier(self.postgres_schema),
stream_table_old=sql.Identifier(table_name +
SEPARATOR +
'old'),
stream_table=sql.Identifier(table_name),
version_table=sql.Identifier(versioned_table_name)))
metadata = self._get_table_metadata(cur, table_name)
self.LOGGER.info('Activated {}, setting path to {}'.format(
metadata,
table_path
))
metadata['path'] = table_path
self._set_table_metadata(cur, table_name, metadata)
except Exception as ex:
cur.execute('ROLLBACK;')
message = '{} - Exception activating table version {}'.format(
stream_buffer.stream,
version)
self.LOGGER.exception(message)
raise PostgresError(message, ex)
def _validate_identifier(self, identifier):
if not identifier:
raise PostgresError('Identifier must be non empty.')
if self.IDENTIFIER_FIELD_LENGTH < len(identifier):
raise PostgresError('Length of identifier must be less than or equal to {}. Got {} for `{}`'.format(
self.IDENTIFIER_FIELD_LENGTH,
len(identifier),
identifier
))
if not re.match(r'^[a-z_].*', identifier):
raise PostgresError(
'Identifier must start with a lower case letter, or underscore. Got `{}` for `{}`'.format(
identifier[0],
identifier
))
if not re.match(r'^[a-z0-9_$]+$', identifier):
raise PostgresError(
'Identifier must only contain lower case letters, numbers, underscores, or dollar signs. Got `{}` for `{}`'.format(
re.findall(r'[^0-9]', '1234a567')[0],
identifier
))
return True
def canonicalize_identifier(self, identifier):
if not identifier:
identifier = '_'
return re.sub(r'[^\w\d_$]', '_', identifier.lower())
def add_key_properties(self, cur, table_name, key_properties):
if not key_properties:
return None
metadata = self._get_table_metadata(cur, table_name)
if not 'key_properties' in metadata:
metadata['key_properties'] = key_properties
self._set_table_metadata(cur, table_name, metadata)
def add_table(self, cur, path, name, metadata):
self._validate_identifier(name)
create_table_sql = sql.SQL('CREATE TABLE {}.{}').format(
sql.Identifier(self.postgres_schema),
sql.Identifier(name))
cur.execute(sql.SQL('{} ();').format(create_table_sql))
self._set_table_metadata(cur, name, {'path': path,
'version': metadata.get('version', None),
'schema_version': metadata['schema_version']})
def add_table_mapping(self, cur, from_path, metadata):
mapping = self.add_table_mapping_helper(from_path, self.table_mapping_cache)
if not mapping['exists']:
self.table_mapping_cache[from_path] = mapping['to']
return mapping['to']
def _get_update_sql(self, target_table_name, temp_table_name, key_properties, columns, subkeys):
full_table_name = sql.SQL('{}.{}').format(
sql.Identifier(self.postgres_schema),
sql.Identifier(target_table_name))
full_temp_table_name = sql.SQL('{}.{}').format(
sql.Identifier(self.postgres_schema),
sql.Identifier(temp_table_name))
pk_temp_select_list = []
pk_where_list = []
pk_null_list = []
cxt_where_list = []
for pk in key_properties:
pk_identifier = sql.Identifier(pk)
pk_temp_select_list.append(sql.SQL('{}.{}').format(full_temp_table_name,
pk_identifier))
pk_where_list.append(
sql.SQL('{table}.{pk} = "dedupped".{pk}').format(
table=full_table_name,
temp_table=full_temp_table_name,
pk=pk_identifier))
pk_null_list.append(
sql.SQL('{table}.{pk} IS NULL').format(
table=full_table_name,
pk=pk_identifier))
cxt_where_list.append(
sql.SQL('{table}.{pk} = "pks".{pk}').format(
table=full_table_name,
pk=pk_identifier))
pk_temp_select = sql.SQL(', ').join(pk_temp_select_list)
pk_where = sql.SQL(' AND ').join(pk_where_list)
pk_null = sql.SQL(' AND ').join(pk_null_list)
cxt_where = sql.SQL(' AND ').join(cxt_where_list)
sequence_join = sql.SQL(' AND "dedupped".{} >= {}.{}').format(
sql.Identifier(singer.SEQUENCE),
full_table_name,
sql.Identifier(singer.SEQUENCE))
distinct_order_by = sql.SQL(' ORDER BY {}, {}.{} DESC').format(
pk_temp_select,
full_temp_table_name,
sql.Identifier(singer.SEQUENCE))
if len(subkeys) > 0:
pk_temp_subkey_select_list = []
for pk in (key_properties + subkeys):
pk_temp_subkey_select_list.append(sql.SQL('{}.{}').format(full_temp_table_name,
sql.Identifier(pk)))
insert_distinct_on = sql.SQL(', ').join(pk_temp_subkey_select_list)
insert_distinct_order_by = sql.SQL(' ORDER BY {}, {}.{} DESC').format(
insert_distinct_on,
full_temp_table_name,
sql.Identifier(singer.SEQUENCE))
else:
insert_distinct_on = pk_temp_select
insert_distinct_order_by = distinct_order_by
insert_columns_list = []
dedupped_columns_list = []
for column in columns:
insert_columns_list.append(sql.SQL('{}').format(sql.Identifier(column)))
dedupped_columns_list.append(sql.SQL('{}.{}').format(sql.Identifier('dedupped'),
sql.Identifier(column)))
insert_columns = sql.SQL(', ').join(insert_columns_list)
dedupped_columns = sql.SQL(', ').join(dedupped_columns_list)
return sql.SQL('''
DELETE FROM {table} USING (
SELECT "dedupped".*
FROM (
SELECT *,
ROW_NUMBER() OVER (PARTITION BY {pk_temp_select}
{distinct_order_by}) AS "pk_ranked"
FROM {temp_table}
{distinct_order_by}) AS "dedupped"
JOIN {table} ON {pk_where}{sequence_join}
WHERE pk_ranked = 1
) AS "pks" WHERE {cxt_where};
INSERT INTO {table}({insert_columns}) (
SELECT {dedupped_columns}
FROM (
SELECT *,
ROW_NUMBER() OVER (PARTITION BY {insert_distinct_on}
{insert_distinct_order_by}) AS "pk_ranked"
FROM {temp_table}
{insert_distinct_order_by}) AS "dedupped"
LEFT JOIN {table} ON {pk_where}
WHERE pk_ranked = 1 AND {pk_null}
);
DROP TABLE {temp_table};
''').format(table=full_table_name,
temp_table=full_temp_table_name,
pk_temp_select=pk_temp_select,
pk_where=pk_where,
cxt_where=cxt_where,
sequence_join=sequence_join,
distinct_order_by=distinct_order_by,
pk_null=pk_null,
insert_distinct_on=insert_distinct_on,
insert_distinct_order_by=insert_distinct_order_by,
insert_columns=insert_columns,
dedupped_columns=dedupped_columns)
def serialize_table_record_null_value(self, remote_schema, streamed_schema, field, value):
if value is None:
return RESERVED_NULL_DEFAULT
return value
def serialize_table_record_datetime_value(self, remote_schema, streamed_schema, field, value):
return arrow.get(value).format('YYYY-MM-DD HH:mm:ss.SSSSZZ')
def persist_csv_rows(self,
cur,
remote_schema,
temp_table_name,
columns,
csv_rows):
copy = sql.SQL('COPY {}.{} ({}) FROM STDIN WITH CSV NULL AS {}').format(
sql.Identifier(self.postgres_schema),
sql.Identifier(temp_table_name),
sql.SQL(', ').join(map(sql.Identifier, columns)),
sql.Literal(RESERVED_NULL_DEFAULT))
cur.copy_expert(copy, csv_rows)
pattern = re.compile(singer.LEVEL_FMT.format('[0-9]+'))
subkeys = list(filter(lambda header: re.match(pattern, header) is not None, columns))
canonicalized_key_properties = [self.fetch_column_from_path((key_property,), remote_schema)[0]
for key_property in remote_schema['key_properties']]
update_sql = self._get_update_sql(remote_schema['name'],
temp_table_name,
canonicalized_key_properties,
columns,
subkeys)
cur.execute(update_sql)
def write_table_batch(self, cur, table_batch, metadata):
remote_schema = table_batch['remote_schema']
## Create temp table to upload new data to
target_table_name = self.canonicalize_identifier('tmp_' + str(uuid.uuid4()))
cur.execute(sql.SQL('''
CREATE TABLE {schema}.{temp_table} (LIKE {schema}.{table})
''').format(
schema=sql.Identifier(self.postgres_schema),
temp_table=sql.Identifier(target_table_name),
table=sql.Identifier(remote_schema['name'])
))
## Make streamable CSV records
csv_headers = list(remote_schema['schema']['properties'].keys())
rows_iter = iter(table_batch['records'])
def transform():
try:
row = next(rows_iter)
with io.StringIO() as out:
writer = csv.DictWriter(out, csv_headers)
writer.writerow(row)
return out.getvalue()
except StopIteration:
return ''
csv_rows = TransformStream(transform)
## Persist csv rows
self.persist_csv_rows(cur,
remote_schema,
target_table_name,
csv_headers,
csv_rows)
return len(table_batch['records'])
def add_column(self, cur, table_name, column_name, column_schema):
cur.execute(sql.SQL('''
ALTER TABLE {table_schema}.{table_name}
ADD COLUMN {column_name} {data_type};
''').format(
table_schema=sql.Identifier(self.postgres_schema),
table_name=sql.Identifier(table_name),
column_name=sql.Identifier(column_name),
data_type=sql.SQL(self.json_schema_to_sql_type(column_schema))))
def migrate_column(self, cur, table_name, from_column, to_column):
cur.execute(sql.SQL('''
UPDATE {table_schema}.{table_name}
SET {to_column} = {from_column};
''').format(
table_schema=sql.Identifier(self.postgres_schema),
table_name=sql.Identifier(table_name),
to_column=sql.Identifier(to_column),
from_column=sql.Identifier(from_column)))
def drop_column(self, cur, table_name, column_name):
cur.execute(sql.SQL('''
ALTER TABLE {table_schema}.{table_name}
DROP COLUMN {column_name};
''').format(
table_schema=sql.Identifier(self.postgres_schema),
table_name=sql.Identifier(table_name),
column_name=sql.Identifier(column_name)))
def make_column_nullable(self, cur, table_name, column_name):
cur.execute(sql.SQL('''
ALTER TABLE {table_schema}.{table_name}
ALTER COLUMN {column_name} DROP NOT NULL;
''').format(
table_schema=sql.Identifier(self.postgres_schema),
table_name=sql.Identifier(table_name),
column_name=sql.Identifier(column_name)))
def add_index(self, cur, table_name, column_names):
index_name = 'tp_{}_{}_idx'.format(table_name, "_".join(column_names))
if len(index_name) > self.IDENTIFIER_FIELD_LENGTH:
index_name_hash = hashlib.sha1(index_name.encode('utf-8')).hexdigest()[0:60]
index_name = 'tp_{}'.format(index_name_hash)
cur.execute(sql.SQL('''
CREATE INDEX {index_name}
ON {table_schema}.{table_name}
({column_names});
''').format(
index_name=sql.Identifier(index_name),
table_schema=sql.Identifier(self.postgres_schema),
table_name=sql.Identifier(table_name),
column_names=sql.SQL(', ').join(sql.Identifier(column_name) for column_name in column_names)))
def _set_table_metadata(self, cur, table_name, metadata):
"""
Given a Metadata dict, set it as the comment on the given table.
:param self: Postgres
:param cur: Pscyopg.Cursor
:param table_name: String
:param metadata: Metadata Dict
:return: None
"""
cur.execute(sql.SQL('COMMENT ON TABLE {}.{} IS {};').format(
sql.Identifier(self.postgres_schema),
sql.Identifier(table_name),
sql.Literal(json.dumps(metadata))))
def _get_table_metadata(self, cur, table_name):
cur.execute(sql.SQL('''
SELECT EXISTS (
SELECT 1 FROM pg_tables
WHERE schemaname = {} AND
tablename = {});
''').format(
sql.Literal(self.postgres_schema),
sql.Literal(table_name)))
table_exists = cur.fetchone()[0]
if not table_exists:
return None
cur.execute(
sql.SQL('SELECT description FROM pg_description WHERE objoid = {}::regclass;').format(
sql.Literal(
'"{}"."{}"'.format(self.postgres_schema, table_name))))
comment = cur.fetchone()[0]
if comment:
try:
comment_meta = json.loads(comment)
except:
self.LOGGER.exception('Could not load table comment metadata')
raise
else:
comment_meta = None
return comment_meta
def add_column_mapping(self, cur, table_name, from_path, to_name, mapped_schema):
metadata = self._get_table_metadata(cur, table_name)
if not metadata:
metadata = {}
if not 'mappings' in metadata:
metadata['mappings'] = {}
mapping = {'type': json_schema.get_type(mapped_schema),
'from': from_path}
if 't' == json_schema.shorthand(mapped_schema):
mapping['format'] = 'date-time'
metadata['mappings'][to_name] = mapping
self._set_table_metadata(cur, table_name, metadata)
def drop_column_mapping(self, cur, table_name, mapped_name):
metadata = self._get_table_metadata(cur, table_name)
if not metadata:
metadata = {}
if not 'mappings' in metadata:
metadata['mappings'] = {}
metadata['mappings'].pop(mapped_name, None)
self._set_table_metadata(cur, table_name, metadata)
def new_table_indexes(self, schema):
if self.add_upsert_indexes:
upsert_index_column_names = deepcopy(schema.get('key_properties', []))
for column_name__or__path in schema['schema']['properties'].keys():
column_path = column_name__or__path
if isinstance(column_name__or__path, str):
column_path = (column_name__or__path,)
if len(column_path) == 1:
if column_path[0] == '_sdc_sequence' or column_path[0].startswith('_sdc_level_'):
upsert_index_column_names.append(column_path[0])
return [list(map(self.canonicalize_identifier, upsert_index_column_names))]
else:
return []
def is_table_empty(self, cur, table_name):
cur.execute(sql.SQL('SELECT EXISTS (SELECT * FROM {}.{});').format(
sql.Identifier(self.postgres_schema),
sql.Identifier(table_name)))
return not cur.fetchall()[0][0]
def get_table_schema(self, cur, name):
return self.__get_table_schema(cur, name)
def __get_table_schema(self, cur, name):
# Purely exists for migration purposes. DO NOT CALL DIRECTLY
cur.execute(sql.SQL('''
SELECT column_name, data_type, is_nullable FROM information_schema.columns
WHERE table_schema = {} and table_name = {};
''').format(
sql.Literal(self.postgres_schema), sql.Literal(name)))
properties = {}
for column in cur.fetchall():
properties[column[0]] = self.sql_type_to_json_schema(column[1], column[2] == 'YES')
metadata = self._get_table_metadata(cur, name)
if metadata is None and not properties:
return None
if metadata is None:
metadata = {'version': None}
metadata['name'] = name
metadata['type'] = 'TABLE_SCHEMA'
metadata['schema'] = {'properties': properties}
return metadata
def sql_type_to_json_schema(self, sql_type, is_nullable):
"""
Given a string representing a SQL column type, and a boolean indicating whether
the associated column is nullable, return a compatible JSONSchema structure.
:param sql_type: string
:param is_nullable: boolean
:return: JSONSchema
"""
_format = None
if sql_type == 'timestamp with time zone':
json_type = 'string'
_format = 'date-time'
elif sql_type == 'bigint':
json_type = 'integer'
elif sql_type == 'double precision':
json_type = 'number'
elif sql_type == 'boolean':
json_type = 'boolean'
elif sql_type == 'text':
json_type = 'string'
else:
raise PostgresError('Unsupported type `{}` in existing target table'.format(sql_type))
json_type = [json_type]
if is_nullable:
json_type.append(json_schema.NULL)
ret_json_schema = {'type': json_type}
if _format:
ret_json_schema['format'] = _format
return ret_json_schema
def json_schema_to_sql_type(self, schema):
_type = json_schema.get_type(schema)
not_null = True
ln = len(_type)
if ln == 1:
_type = _type[0]
if ln == 2 and json_schema.NULL in _type:
not_null = False
if _type.index(json_schema.NULL) == 0:
_type = _type[1]
else:
_type = _type[0]
elif ln > 2:
raise PostgresError('Multiple types per column not supported')
sql_type = 'text'
if 'format' in schema and \
schema['format'] == 'date-time' and \
_type == 'string':
sql_type = 'timestamp with time zone'
elif _type == 'boolean':
sql_type = 'boolean'
elif _type == 'integer':
sql_type = 'bigint'
elif _type == 'number':
sql_type = 'double precision'
if not_null:
sql_type += ' NOT NULL'
return sql_type
|
#!/usr/bin/env python
#
#
# Table Of Contents
# -----------------
# 1. Configuration Parser
# 2. Container Types
# 3. Source Control
# 4. Node Model
# 5. Command-Line Interface
from __future__ import print_function, division
import os, sys, subprocess, re, socket, shutil, collections
import time, tempfile, shlex
import pwd, signal, smtplib, getpass
from os import path
try: # python 3
from configparser import ConfigParser as SafeConfigParser, RawConfigParser
from io import StringIO
from urllib.request import urlretrieve
except ModuleNotFoundError: # python 2
from ConfigParser import SafeConfigParser, RawConfigParser
from StringIO import StringIO
from urllib import urlretrieve
from glob import glob
import pkg_resources # part of setuptools
try:
VERSION = pkg_resources.require("jvmctl")[0].version
except pkg_resources.DistributionNotFound:
VERSION = 'unknown'
#----------------------------------------------------------------------
# 1. Configuration Parser
#----------------------------------------------------------------------
SYSTEM_CONF = '/etc/jvmctl.conf'
CONF_ROOT = '/etc/jvmctl/apps'
if path.exists('/opt/jetty/conf'):
CONF_ROOT = '/opt/jetty/conf'
DEFAULTS = """
[jetty]
REPO=https://repo1.maven.org/maven2/org/eclipse/jetty/jetty-distribution/
[jvm]
CONTAINER=jetty
HEAP_SIZE=128m
HEAP_DUMP_PATH=/var/tmp/${NODE}.hprof
USER=webapp
JAVA_HOME=/usr/lib/jvm/java-1.8.0
JETTY_VERSION=9.2.5.v20141112
GIT_BRANCH=HEAD
NLA_ENVIRON=devel
ROOT_URL_PREFIX=/
JAVA_OPTS=
OOM_EMAIL=root@localhost
SOCKET=
SOCKET_USER=root
SOCKET_GROUP=root
SOCKET_MODE=0660
EXEC_PREFIX=
GC_LOG_OPTS=
WEBAPPS_PATH=
[systemd.service.Unit]
After=network.target remote-fs.target
[systemd.service.Service]
Restart=on-failure
StartLimitInterval=120
StartLimitBurst=5
StandardOutput=journal
StandardError=journal
[systemd.service.Install]
WantedBy=multi-user.target
"""
control_tools_root = path.dirname(path.dirname(path.realpath(__file__)))
commands = {}
groups = collections.OrderedDict()
def cli_command(group=None):
if group not in groups:
groups[group] = groups.get(group, [])
def deco(func):
commands[func.__name__] = func
groups[group].append(func)
return func
return deco
def parse_shell_arrays(data):
"""
Convert shell array syntax in legacy configs to ConfigParser's indentation based multiline
values.
(foo\nbar) => foo\n bar
"""
out = ""
pos = 0
for m in re.finditer("(?m)^(?:\s*|^[^=]*=\s*)\(([^)]*)\)\s*$", data):
body = m.group(1)
body = body.replace('\n', '\n ')
out += data[:m.start(1)] + body
pos = m.end()
out += data[pos:]
return out
class RawConfig(RawConfigParser):
def optionxform(self, option):
"""Override optionxform to preserve case"""
return option
def write(self, fp):
"""Override to remove spaces around equals"""
for section in self._sections:
fp.write("[%s]\n" % section)
for (key, value) in self._sections[section].items():
if key != "__name__":
fp.write("%s=%s\n" %
(key, str(value).replace('\n', '\n\t')))
fp.write("\n")
class Config(SafeConfigParser):
def __init__(self, configfile):
SafeConfigParser.__init__(self)
f = open(configfile)
try:
text = f.read()
finally:
f.close()
text = re.sub("(?m)^\s*#.*", "", text)
text = parse_shell_arrays(text)
text = '[jvm]\n' + text
text = re.sub("(?m)^\s*export\s+", "", text)
self.readfp(StringIO(DEFAULTS))
self.load_system_config()
self.readfp(StringIO(text))
self.migrate_legacy()
def load_system_config(self):
if os.path.exists(SYSTEM_CONF):
with open(SYSTEM_CONF) as f:
self.readfp(f)
def optionxform(self, option):
"""Override optionxform to preserve case"""
return option
def migrate_rename(self, oldname, newname):
if self.has_option('jvm', oldname):
self.set('jvm', newname, self.get('jvm', oldname))
self.remove_option('jvm', oldname)
def migrate_legacy(self):
"""Migrate legacy config fields to the new format"""
self.migrate_rename('JETTY_USER', 'USER')
def systemd_environment(self):
s = ""
for k, v in self.items('jvm'):
if '"' in v:
s += '"' + k + '=' + v.replace('"', '\\"') + '" '
else:
s += k + '=' + v + ' '
return s
#----------------------------------------------------------------------
# 2. Container Types
#----------------------------------------------------------------------
class NoneContainer:
def __init__(self, node):
self.node = node
self.properties = {}
self.jvm_opts = node.config.get('jvm', 'APP_OPTS').split(' ')
def deploy(self):
pass
JETTY_XML = """<?xml version="1.0"?>
<!-- Auto-generated by jvmctl. Edit {node.config_file} instead -->
<Configure id="Contexts" class="org.eclipse.jetty.server.handler.ContextHandlerCollection">
{context_xml}
</Configure>
"""
JETTY_CONTEXT_XML = """
<Call name="addHandler"><Arg><New class="org.eclipse.jetty.webapp.WebAppContext">
<Set name="contextPath">{context_path}</Set>
<Set name="war">{war}</Set>
<Get name="securityHandler">
<Set name="loginService">
<New class="org.eclipse.jetty.security.HashLoginService">
<Set name="name">Realm</Set>
<Set name="config">/dev/null</Set>
</New>
</Set>
</Get>
</New></Arg></Call>
"""
JETTY_HTTP_XML = """<?xml version="1.0"?>
<!DOCTYPE Configure PUBLIC "-//Jetty//Configure//EN" "http://www.eclipse.org/jetty/configure_9_0.dtd">
<!-- Auto-generated by jvmctl. Edit {node.config_file} instead -->
<Configure id="Server" class="org.eclipse.jetty.server.Server">
<Call name="addConnector">
<Arg>
<New class="org.eclipse.jetty.server.ServerConnector">
<Arg name="server"><Ref refid="Server" /></Arg>
<Arg name="acceptors" type="int"><Property name="http.acceptors" default="-1"/></Arg>
<Arg name="selectors" type="int"><Property name="http.selectors" default="-1"/></Arg>
<Arg name="factories">
<Array type="org.eclipse.jetty.server.ConnectionFactory">
<Item>
<New class="org.eclipse.jetty.server.HttpConnectionFactory">
<Arg name="config"><Ref refid="httpConfig" /></Arg>
</New>
</Item>
</Array>
</Arg>
<Set name="host"><Property name="jetty.host" /></Set>
<Set name="port"><Property name="jetty.port" default="80" /></Set>
<Set name="idleTimeout"><Property name="http.timeout" default="30000"/></Set>
<Set name="soLingerTime"><Property name="http.soLingerTime" default="-1"/></Set>
<Set name="acceptorPriorityDelta"><Property name="http.acceptorPriorityDelta" default="0"/></Set>
<Set name="acceptQueueSize"><Property name="http.acceptQueueSize" default="0"/></Set>
<Set name="inheritChannel"><Property name="http.inheritChannel" default="true"/></Set>
</New>
</Arg>
</Call>
</Configure>
"""
JETTY_FORWARDED_XML = """<?xml version="1.0"?>
<!-- Auto-generated by jvmctl. Edit {node.config_file} instead -->
<!-- Obey X-Forwared-* headers -->
<Configure id="httpConfig" class="org.eclipse.jetty.server.HttpConfiguration">
<Call name="addCustomizer">
<Arg><New class="org.eclipse.jetty.server.ForwardedRequestCustomizer"/></Arg>
</Call>
</Configure>
"""
class JettyContainer:
cachedir = '/var/cache/jvmctl/container/'
def __init__(self, node):
self.node = node
self.version = node.config.get('jvm', 'JETTY_VERSION')
self.home = path.join(self.cachedir, "jetty-" + self.version)
self.properties = {
'jetty.base': self.node.basedir
}
self.jvm_opts = ['-jar', path.join(self.home, "start.jar")]
self.webapps_path = node.config.get('jvm', 'WEBAPPS_PATH')
if not self.webapps_path:
self.webapps_path = self.node.apps_path
def deploy(self):
self.fetch_jetty()
self.configure_jetty()
def fetch_jetty(self):
"""Download the requested version of Jetty"""
if path.exists(self.home):
return
url = self.node.config.get('jetty','REPO') + self.version + "/jetty-distribution-" + self.version + ".tar.gz"
if not path.exists(self.cachedir):
os.makedirs(self.cachedir)
f = tempfile.mktemp(prefix='jetty-' + self.version + '-', suffix='.tar.gz')
try:
print("Downloading Jetty from " + url)
urlretrieve(url, f)
subprocess.check_call(["tar", "-x", "-C", self.cachedir, "-f", f])
finally:
os.remove(f)
os.rename(path.join(self.cachedir, 'jetty-distribution-' + self.version), self.home)
def configure_jetty(self):
"""Generate jetty XML configuration"""
node = self.node
if not path.exists(node.basedir):
os.makedirs(node.basedir)
if self.version.startswith("8."):
self.configure_jetty8()
else:
self.configure_jetty9()
def configure_jetty8(self):
node = self.node
liblink = path.join(node.basedir, 'lib')
if not path.exists(liblink):
os.symlink(path.join(self.home, 'lib'), liblink)
with open(path.join(node.basedir, "context.xml"), 'w') as f:
contexts = []
for war in self.discover_contexts():
contexts.append(JETTY_CONTEXT_XML.format(context_path=self.context_path_for_war(war), war=war))
f.write(JETTY_XML.format(node=node, context_xml=''.join(contexts)))
with open(path.join(node.basedir, "start.ini"), 'w') as f:
f.write("# Auto-generated by jvmctl. Edit " + node.config_file + " instead\n")
f.write("OPTIONS=Server,jsp,jmx,resources,websocket,ext,plus,annotations")
f.write("\n")
f.write('jetty.port=' + node.config.get('jvm', 'PORT') + '\n')
f.write("\n")
f.write(self.home + "/etc/jetty.xml\n")
f.write(self.home + "/etc/jetty-annotations.xml\n")
f.write(node.basedir + "/context.xml\n")
def configure_jetty9(self):
node = self.node
with open(path.join(node.basedir, "context.xml"), 'w') as f:
contexts = []
for war in self.discover_contexts():
contexts.append(JETTY_CONTEXT_XML.format(context_path=self.context_path_for_war(war), war=war))
f.write(JETTY_XML.format(node=node, context_xml=''.join(contexts)))
with open(path.join(node.basedir, "http.xml"), 'w') as f:
f.write(JETTY_HTTP_XML.format(node=node))
with open(path.join(node.basedir, "forwarded.xml"), 'w') as f:
f.write(JETTY_FORWARDED_XML.format(node=node))
with open(path.join(node.basedir, "start.ini"), 'w') as f:
f.write("# Auto-generated by jvmctl. Edit " + node.config_file + " instead\n")
f.write("--module=server\n--module=webapp\n--module=jsp\n")
f.write("\n")
f.write('jetty.port=' + node.config.get('jvm', 'PORT') + '\n')
f.write("\n")
f.write(node.basedir + "/context.xml\n")
f.write(node.basedir + "/http.xml\n")
f.write(node.basedir + "/forwarded.xml\n")
def discover_contexts(self):
if not path.exists(self.webapps_path):
return
for fn in os.listdir(self.webapps_path):
f = path.join(self.webapps_path, fn)
if fn.startswith('.'):
continue
elif path.isdir(f) or fn.endswith('.war'):
yield f
def context_path_for_war(self, war):
prefix = self.node.config.get('jvm', 'ROOT_URL_PREFIX')
basename = path.basename(war)
if basename == 'ROOT' or basename == 'ROOT.war':
return prefix
if basename.endswith('.war'):
return path.join(prefix, basename[:-4])
return path.join(prefix, basename)
CONTAINER_TYPES = {
'jetty': JettyContainer,
'none': NoneContainer,
}
#----------------------------------------------------------------------
# 3. Source Control
#----------------------------------------------------------------------
class Repo:
def __init__(self, url, node):
self.url = url
self.node = node
@property
def module(self):
"""
Munge the URL to try to come up with some sort of basic name
for this module.
eg /whatsit/tags/1.2.3 => whatsit
/whatsit/trunk@1234 => whatsit
/whatsit
"""
module = re.sub('/trunk/*', '', self.url)
module = re.sub('/tags/[^/][^/]*/*', '', module)
module = re.sub('/*$', '', module)
module = re.sub('.*/', '', module)
module = re.sub('@[^@/]*$', '', module)
if module == 'target' or not module:
module = self.name
return module
class GitRepo(Repo):
def checkout(self, dest, target):
branch = self.node.config.get('jvm', 'GIT_BRANCH')
url = self.url
cachekey = re.sub("[@:/]", "_", url)
gitdir = path.join(os.environ['HOME'], 'gitcache', cachekey)
if not os.path.exists(gitdir):
subprocess.check_call(['git', 'clone', '--bare', url, gitdir])
env = dict(os.environ)
env['GIT_DIR'] = gitdir
subprocess.check_call(['git', '--bare', 'fetch', '-f', url, branch], env=env)
subprocess.check_call(['git', 'branch', '-f', 'todeploy', 'FETCH_HEAD'], env=env)
subprocess.check_call(['git', 'clone', '--branch', 'todeploy', gitdir, dest])
with open(path.join(target, 'git-revision'), 'a') as f:
f.write(branch + '\n')
f.flush()
os.chdir(dest)
subprocess.call(['git', 'log', '-n1'], stdout=f)
class SvnRepo(Repo):
def checkout(self, dest, target):
subprocess.call(['id'])
subprocess.call(['svn', 'co', self.url, dest])
with open(path.join(target, 'svn-revision'), 'a') as f:
os.chdir(dest)
subprocess.call(['svn', 'info'], stdout=f)
def open_repo(url, node):
if url.startswith('git') or url.startswith('https://github.com'):
return GitRepo(url, node)
else:
return SvnRepo(url, node)
#----------------------------------------------------------------------
# 4. Node Model
#----------------------------------------------------------------------
class Node:
def __init__(self, name):
if not name:
raise ValueError('node name cannot be empty')
self.name = name
self.config_file = path.join(CONF_ROOT, self.name) + '.conf'
self.svc = 'jvm:' + name
self.apps_path = path.join('/apps', name)
self.log_file = path.join('/logs', name, 'jetty.log')
self._config = None
self._container = None
self.basedir = '/var/cache/jvmctl/base/' + name
def __lt__(self, other):
return self.name < other.name
@property
def container(self):
if self._container is None:
type_name = self.config.get('jvm', 'CONTAINER')
ctype = CONTAINER_TYPES.get(type_name)
if ctype is None:
sys.stderr.write('Unknown CONTAINER type: ' + type_name + '\n')
sys.exit(1)
self._container = ctype(self)
return self._container
@property
def config(self):
if self._config is None:
self._config = Config(self.config_file)
return self._config
def ensure_valid(self):
if not path.isfile(self.config_file):
die(self.config_file + ': not found')
if subprocess.call(['/bin/bash', '-n', self.config_file]) != 0:
die(self.config_file + ': syntax error')
def spawnctl(self, command):
if os.path.exists('/usr/bin/systemctl'):
return subprocess.call(['systemctl', command, self.svc])
else:
return subprocess.call([control_tools_root + '/bin/spawnctl', command, self.svc])
def autoregister(self):
systemd_register(self)
def port(self):
f = open(self.config_file)
try:
for line in f:
match = re.match('^\s*PORT=["\'(]*([0-9]+).*', line)
if match:
return match.group(1)
finally:
f.close()
def version(self):
try:
with open(path.join(self.apps_path, "git-revision")) as f:
for line in f:
match = re.match('^(?:refs/)?tags/(.*)', line)
if match: return match.group(1)
match = re.match('^commit (\w{7}).*', line)
if match: return match.group(1)
except IOError:
pass
try:
with open(path.join(self.apps_path, "svn-revision")) as f:
for line in f:
match = re.match('^URL: .*/tags/(?:[a-z_-]+/)(?:[a-z]+-)([^/]*)$', line)
if match: return match.group(1).strip()
match = re.match('^Revision: (.*)', line)
if match: return 'r' + match.group(1)
except IOError:
pass
def pid(self):
process = subprocess.Popen(['systemctl', 'show', self.svc, '--property=MainPID'], stdout=subprocess.PIPE)
out = process.communicate()[0].decode("utf-8")
retcode = process.poll()
if retcode:
return None
return int(out.split('=')[1].strip())
@property
def repos(self):
return [open_repo(url, self) for url in self.config.get('jvm', 'REPO').split()]
@property
def java_home(self):
return self.config.get('jvm', 'JAVA_HOME')
#----------------------------------------------------------------------
# 5. Command-line Interface
#----------------------------------------------------------------------
@cli_command(group='Process management')
def start(node):
"""update jetty configuration"""
node.ensure_valid()
reconfigure(node)
node.autoregister()
node.spawnctl('enable')
sys.exit(node.spawnctl('start'))
@cli_command(group="Process management")
def stop(node):
"""stop the jvm"""
sys.exit(node.spawnctl('stop'))
@cli_command(group="Process management")
def disable(node):
"""stop the jvm and prevent it from running on startup"""
sys.exit(node.spawnctl('disable'))
@cli_command(group="Process management")
def enable(node):
"""run the jvm auomatically on startup"""
node.ensure_valid()
reconfigure(node)
node.autoregister()
sys.exit(node.spawnctl('enable'))
@cli_command(group="Process management")
def restart(node):
"""stop and then start the jvm"""
if os.getuid() != 0:
print("restart requires sudo")
sys.exit(1)
node.ensure_valid()
reconfigure(node)
node.autoregister()
node.spawnctl('enable')
sys.exit(node.spawnctl('restart'))
@cli_command(group="Process management")
def status(node):
"""check whether the jvm is running"""
port = node.port()
if port is not None:
print('URL: http://' + socket.gethostname() + ':' + port)
print('Webapp path: ' + node.apps_path)
print('Version: ' + (node.version() or 'uknown'))
print('')
sys.exit(subprocess.call(['systemctl', 'status', node.svc] + sys.argv[3:]))
@cli_command(group="Configuration")
def version(*args):
"""print jvmctl version"""
print('jvmctl ' + VERSION)
@cli_command(group="Configuration")
def delete(node):
"""delete the jvm's binaries and configuration"""
node.spawnctl('stop')
node.spawnctl('disable')
if path.exists('/usr/sbin/svccfg'):
subprocess.call(['/usr/sbin/svccfg', '-s', 'svc:/site/jetty', 'delete', '-f', node.name])
elif path.exists(path.join('/etc/spawn', node.svc)):
shutil.rmtree(path.join('/etc/spawn', node.svc))
if path.exists(node.apps_path):
print("Removing", node.apps_path)
shutil.rmtree(node.apps_path)
if path.exists(node.config_file):
print("Removing", node.config_file)
os.unlink(node.config_file)
@cli_command(group="Debugging")
def log(node):
"""browse the jvm's log file (use -f to follow tail)"""
logfile = '/logs/%s/stdio.log' % (node.name,)
if os.path.exists(logfile):
os.execvp('less', ['less', '-R -n +F', logfile])
if os.getuid() != 0:
print("Hint: try with sudo")
if 'SYSTEMD_PAGER' not in os.environ:
os.environ['SYSTEMD_PAGER'] = "LESS=FRXM less"
os.execvp('journalctl', ['journalctl', '-u', node.svc] + sys.argv[3:])
def find_new_port():
new_port = 8081
for node in iter_nodes():
port = node.port()
if port is not None:
port = int(port)
if port > new_port:
new_port = port
return new_port + 10
@cli_command(group="Configuration")
def new(node):
"""configure a new jvm"""
if path.exists(node.config_file):
die(node.config_file + ': already exists')
if path.exists(node.apps_path):
die(node.apps_path + ': already exists')
port = find_new_port()
f = open(node.config_file, 'w')
try:
print('PORT=' + str(port), file=f)
print('', file=f)
print('#REPO=svn://...', file=f)
print('#JAVA_OPTS=-Dfoo=bar -Dtop.speed=fast', file=f)
print('#HEAP_SIZE=128m', file=f)
finally:
f.close()
@cli_command(group="Configuration")
def show(node):
"""show the jvm's configuration"""
f = open(node.config_file)
try:
sys.stdout.write(f.read())
finally:
f.close()
@cli_command(group="Configuration")
def dump(node):
"""show the jvm's parsed configuration (including defaults)"""
post_config(node)
node.config.write(sys.stdout)
@cli_command(group="Configuration")
def config(node):
"""edit the jvm's configuration"""
if not path.isfile(node.config_file):
die(node.config_file + ': not found\nTo create it use: jvmctl ' + node.name + ' new')
if os.getuid() != 0:
die('config requires sudo')
if 'EDITOR' not in os.environ:
os.environ['EDITOR'] = 'vi'
result = subprocess.call(['sudoedit', node.config_file])
reconfigure(node)
os.chdir(CONF_ROOT)
try:
subprocess.check_call(['git', 'rev-parse', '--is-inside-work-tree'], stdout=open(os.devnull, 'wb'))
except:
subprocess.check_call(['git', 'init'])
subprocess.check_call(['git', 'add', node.config_file])
if subprocess.call(['git', 'diff-index', '--quiet', 'HEAD']):
os.environ['GIT_COMMITTER_NAME'] = 'jvmctl'
os.environ['GIT_COMMITTER_EMAIL'] = 'root@'+os.uname()[1]
subprocess.check_call(['git', 'commit', '--author="{0} <{<EMAIL>>"'.format(os.getlogin()), '-m "Config change for {}"'.format(node.name)])
return result
@cli_command(group="Configuration")
def changed(node):
"""show the last change to the config"""
os.chdir(CONF_ROOT)
hash = fetch_hash(node)
subprocess.check_call(['git', 'show', hash])
@cli_command(group="Configuration")
def revert(node):
"""revert the last change to the config"""
os.chdir(CONF_ROOT)
if os.getuid() != 0:
die('revert requires sudo')
hash = fetch_hash(node)
subprocess.check_call(['git', 'revert', hash, '--no-edit'])
@cli_command(group="Process management")
def pid(node):
"""print the process ID of the jvm"""
pid = node.pid()
if pid is None:
die('not running')
print(pid)
@cli_command(group="Debugging")
def lsof(node):
"""list the jvm's open files and sockets"""
return subprocess.call(['lsof', '-p', str(node.pid())])
@cli_command(group="Debugging")
def stack(node):
"""print a strack trace for all jvm threads"""
pid = node.pid()
stat = os.stat('/proc/%d' % pid)
jstack = path.join(node.java_home, 'bin/jstack')
return subprocess.call([jstack, str(pid)],
preexec_fn=switchuid(stat.st_uid, stat.st_gid))
@cli_command(group="Debugging")
def gcutil(node):
"""print garbage collection statistics"""
pid = node.pid()
stat = os.stat('/proc/%d' % pid)
jstat = path.join(node.java_home, 'bin/jstat')
return subprocess.call([jstat, '-gcutil', str(pid)] + sys.argv[3:],
preexec_fn=switchuid(stat.st_uid, stat.st_gid))
def build(node, workarea, args):
target = path.join(workarea, 'target')
os.makedirs(target)
os.environ['PATH'] = node.config.get('jvm', 'JAVA_HOME') + '/bin:/usr/local/bin:/bin:/usr/bin'
for repo in node.repos:
moduledir = path.join(workarea, repo.module)
repo.checkout(moduledir, target)
os.chdir(moduledir)
nla_environ = node.config.get('jvm', 'NLA_ENVIRON')
nla_deploy = path.join(moduledir, 'nla-deploy.sh')
pom = path.join(moduledir, 'pom.xml')
env = dict(os.environ)
for k, v in node.config.items('jvm'):
env[k] = v
if '-d' in args:
print('\nDropping into debug shell. Type "exit" to continue deploy or "exit 1" to abort.')
subprocess.check_call(os.environ.get('SHELL', '/bin/sh'), env=env)
if path.exists(nla_deploy):
subprocess.call(['/bin/bash', '-e', 'nla-deploy.sh', target, nla_environ, node.apps_path], env=env)
elif path.exists(pom):
if nla_environ:
subprocess.call(['mvn', 'package', '-P', nla_environ], env=env)
else:
subprocess.call(['mvn', 'package'], env=env)
wars = glob(path.join(moduledir, 'target/*.war'))
for war in wars:
if len(wars) == 1:
basename = 'ROOT'
else:
basename = target,re.sub('\.war$', '', path.basename(war))
unpack = path.join(target, basename)
os.makedirs(unpack)
subprocess.call(['unzip', '-d', unpack, war])
if not wars:
jars = glob(path.join(moduledir, 'target/*.jar'))
for jar in jars:
if not path.basename(jar).startswith('original-'):
print("Copying", jar, "to target")
shutil.copy(jar, path.join(target, path.basename(jar)))
else:
print('nla.deploy.sh and pom.xml not found')
print('At least one of them must exist. Bailing.')
sys.exit(1)
@cli_command(group="Configuration")
def deploy(node, *args):
"""(re)build and redeploy the application"""
node.ensure_valid()
if not os.access('/apps', os.W_OK):
die("Need permission to write to /apps (maybe try sudo?)")
node.config # ensure config has been read before dropping privileges
timestamp = time.strftime('%Y%m%d%H%M%S', time.localtime())
workarea = '/var/tmp/jvmctl-build-%s-%s' % (node.name, timestamp)
target = path.join(workarea, 'target')
dest = node.apps_path
pw = pwd.getpwnam('builder')
os.chdir('/') # workaround selinux permission problem when we switchuid
env = dict(os.environ)
pid = os.fork()
if pid == 0:
switchuid(pw.pw_uid, pw.pw_gid)()
if '-s' in args:
os.environ['MAVEN_OPTS'] = '-Dmaven.test.skip=true'
os.environ['HOME'] = pw.pw_dir
os.environ['WEBAPPS_PATH'] = dest
os.environ['NODE'] = node.name
os.environ['WORKAREA'] = workarea
build(node, workarea, args)
sys.exit(0)
else:
pid, result = os.wait()
if result != 0:
die('Build failed. You may inspect ' + workarea)
if not [f for f in os.listdir(target) if not f.endswith('-revision')]:
die('Oh dear! ' + target + ' is empty. I guess the build failed. Bailing out.')
newdest = dest + '.new.' + timestamp
olddest = dest + '.old.' + timestamp
print()
print('Copying %s to %s...' % (target, newdest))
shutil.copytree(target, newdest, symlinks=True)
print('Stopping %s...' % node.name)
node.spawnctl('stop')
print('Swapping in the the version...')
if path.exists(dest):
os.rename(dest, olddest)
os.rename(newdest, dest)
print('Configuring container...')
node.container.deploy()
node.autoregister()
print('Starting %s...' % node.name)
if node.spawnctl('start') == 0:
print("Success! Cleaning up the working area...")
shutil.rmtree(workarea)
if os.path.exists(olddest):
print("Deleting the old version, mwahahaha!")
shutil.rmtree(olddest)
node.spawnctl('enable')
else:
print("Uh.... something seems to have gone wrong starting up")
print("I'm leaving the old version for you in %s" % olddest)
print()
status(node)
def quote(s):
if ' ' in s:
return '"' + s + '"'
else:
return s
def fmt_properties(properties):
return ' '.join(quote('-D' + k + '=' + v) for k,v in properties.items())
def quote_list(list):
return " ".join(quote(s) for s in list)
@cli_command(group="Hidden")
def reconfigure(node):
node.container.deploy()
systemd_register(node)
@cli_command(group="Hidden")
def oomkill(node, pid):
try_kill_jvm(node, pid)
try_rename_heap_dump(node)
send_oom_email(node, pid)
def send_oom_email(node, pid):
oom_emails = node.config.get('jvm', 'OOM_EMAIL').split()
mail_from = getpass.getuser() + '@' + socket.gethostname()
smtp = smtplib.SMTP('localhost')
try:
for mail_to in oom_emails:
message = """\
From: {mail_from}
To: {mail_to}
Subject: JVM-OOM: {name} @ {hostname}
JVM {name} on {hostname} with pid {pid} ran out of memory and was restarted.
Heap dump path: {heap_dump_path}
--
{script}\n""".format(mail_from=mail_from,
mail_to=mail_to,
name=node.name,
pid=pid,
hostname=socket.gethostname(),
heap_dump_path=node.config.get('jvm', 'HEAP_DUMP_PATH'),
script=path.realpath(__file__))
smtp.sendmail(mail_from, mail_to, message)
finally:
smtp.quit()
def try_rename_heap_dump(node):
"Rename the temporary heap dump file. Replace any previous permanent dump file so we only keep the latest."
heap_dump_path = node.config.get('jvm', 'HEAP_DUMP_PATH')
if heap_dump_path and os.path.isfile(heap_dump_path + '.tmp'):
try:
os.rename(heap_dump_path + '.tmp', heap_dump_path)
except OSError:
pass # if there's a permissions problem or something just give up
def try_kill_jvm(node, pid):
print('jvmctl oomkill', node.name, pid)
pid = int(pid)
try:
os.kill(pid, signal.SIGKILL)
except OSError:
pass
def set_unless_present(config, section, option, value):
if not config.has_option(section, option):
config.set(section, option, value)
def post_config(node):
properties = {
'jvmctl.node': node.name
}
property_opts = fmt_properties(properties) + ' ' + fmt_properties(node.container.properties)
jvm_opts = ['-Xmx' + node.config.get('jvm', 'HEAP_SIZE'),
'-XX:OnOutOfMemoryError=/usr/bin/jvmctl oomkill ' + node.name + ' %p' + ' -Dlog4j2.formatMsgNoLookups=true' ]
heap_dump_path = node.config.get('jvm', 'HEAP_DUMP_PATH')
if heap_dump_path:
jvm_opts.append('-XX:+HeapDumpOnOutOfMemoryError')
if heap_dump_path.endswith("/") or os.path.isdir(heap_dump_path):
jvm_opts.append('-XX:HeapDumpPath=' + heap_dump_path)
else:
jvm_opts.append('-XX:HeapDumpPath=' + heap_dump_path + ".tmp")
jvm_opts += shlex.split(node.config.get('jvm', 'GC_LOG_OPTS'))
jvm_opts += shlex.split(node.config.get('jvm', 'JAVA_OPTS'))
jvm_opts += node.container.jvm_opts
exec_prefix = node.config.get('jvm', 'EXEC_PREFIX')
if exec_prefix:
cmd = exec_prefix + ' '
else:
cmd = ''
cmd += node.java_home + "/bin/java " + property_opts + ' ' + quote_list(jvm_opts)
socket = node.config.get('jvm', 'SOCKET')
env_file = path.join(node.basedir, 'environment')
set_unless_present(node.config, 'systemd.service.Service', 'WorkingDirectory', node.basedir)
set_unless_present(node.config, 'systemd.service.Service', 'EnvironmentFile', env_file)
set_unless_present(node.config, 'systemd.service.Service', 'User', node.config.get('jvm', 'USER'))
set_unless_present(node.config, 'systemd.service.Service', 'ExecStart', cmd.replace('%', '%%%%'))
if socket:
socket_unit = 'jvm:' + node.name + '.socket'
old_after = node.config.get('systemd.service.Unit', 'After')
node.config.set('systemd.service.Unit', 'After', old_after + ' ' + socket_unit)
set_unless_present(node.config, 'systemd.service.Unit', 'Requires', socket_unit)
set_unless_present(node.config, 'systemd.service.Service', 'StandardInput', 'socket')
@cli_command(group="Hidden")
def systemd_register(node):
post_config(node)
if not path.exists(node.basedir):
os.makedirs(node.basedir)
env_file = node.config.get('systemd.service.Service', 'EnvironmentFile')
with os.fdopen(os.open(env_file, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600), 'w') as f:
f.write("# Auto-generated by jvmctl. Edit " + node.config_file + " instead\n")
f.write('NODE=' + node.name + '\n')
for k, v in node.config.items('jvm'):
v = v.replace('\n', ' ')
f.write(k + "=" + v + "\n")
socket = node.config.get('jvm', 'SOCKET')
with open('/etc/systemd/system/' + node.svc + '.service', 'w') as f:
f.write("# Auto-generated by jvmctl. Edit " + node.config_file + " instead\n")
conf = RawConfig()
for section in node.config.sections():
if not section.startswith('systemd.service.'): continue
out_section = section.replace('systemd.service.', '')
conf.add_section(out_section)
for k, v in node.config.items(section):
conf.set(out_section, k, v)
conf.write(f)
socket_config = '/etc/systemd/system/' + node.svc + '.socket'
if socket:
with open(socket_config, 'w') as f:
f.write("""[Socket]
ListenStream={socket}
SocketMode={socket_mode}
SocketUser={socket_user}
SocketGroup={socket_group}
[Install]
WantedBy=sockets.target
""".format(socket=socket,
socket_user=node.config.get('jvm', 'SOCKET_USER'),
socket_group=node.config.get('jvm', 'SOCKET_GROUP'),
socket_mode=node.config.get('jvm', 'SOCKET_MODE')))
elif path.exists(socket_config):
os.unlink(socket_config)
subprocess.call(['systemctl', 'daemon-reload'])
if socket:
subprocess.call(['systemctl', 'enable', node.svc + '.socket'])
@cli_command(group="Debugging")
def run(node):
"""run the application interactively"""
pass
def fetch_hash(node):
if not path.isfile(node.config_file):
die(node.config_file + ': not found\nTo create it use: jvmctl ' + node.name + ' new')
try:
hash = subprocess.check_output(['git', 'log', '-n 1', '--pretty=format:%H', node.config_file])
except:
die('{} was not found in version control. Try editing the config with \n jvmctl config {}'.format(node.config_file, node.name))
if not hash:
die('no changes found for ' + node.name)
return hash
def switchuid(uid, gid):
def f():
os.setgroups([])
os.setgid(gid)
os.setuid(uid)
return f
def iter_nodes():
for filename in os.listdir(CONF_ROOT):
if filename.endswith('.conf'):
yield Node(filename.split('.', 2)[0])
def list_nodes():
print('%-30s %7s %5s %s' % ('NODE', 'VERSION', 'PORT', 'STATUS'))
for node in sorted(iter_nodes(), key=lambda node: node.name):
pid = node.pid()
if pid:
status = 'running as ' + str(pid)
else:
status = 'stopped'
print('%-30s %7s %5s %s' % (node.name,
node.version() or "-",
node.port() or "-",
status))
def die(msg):
print(sys.argv[0] + ': ' + msg, file=sys.stderr)
sys.exit(1)
def usage():
print('Usage: %s <app> <command>' % sys.argv[0])
print('Control and deploy JVM applications')
for group, funcs in groups.items():
if group == 'Hidden': continue
print()
print('%s:' % group)
for func in sorted(funcs, key=lambda f: f.__name__):
print(' %-10s- %s' % (func.__name__, func.__doc__))
sys.exit(1)
def main():
if len(sys.argv) == 2:
if sys.argv[1] == 'list':
list_nodes()
elif sys.argv[1] in ('version', '--version'):
version()
elif len(sys.argv) < 3:
usage()
else:
node = sys.argv[1]
cmd = sys.argv[2]
args = sys.argv[3:]
if cmd in commands:
sys.exit(commands[cmd](Node(node), *args) or 0)
elif node in commands:
sys.exit(commands[node](Node(cmd), *args) or 0)
else:
print('Unknown command', file=sys.stderr)
usage()
if __name__ == '__main__': main()
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 <NAME> <<EMAIL>>
# Copyright (C) 2014-2017 <NAME> <<EMAIL>>
# Copyright (C) 2014-2017 <NAME> <<EMAIL>>
# Copyright (C) 2014-2017 <NAME> <<EMAIL>>
# Copyright (C) 2014-2017 <NAME> <<EMAIL>>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import uuid
import threading
from datetime import date, timedelta
from django.conf import settings
from .utils import DUMMY_BMP_DATA
import factory
from taiga.permissions.choices import MEMBERS_PERMISSIONS
class Factory(factory.DjangoModelFactory):
class Meta:
strategy = factory.CREATE_STRATEGY
model = None
abstract = True
_SEQUENCE = 1
_SEQUENCE_LOCK = threading.Lock()
@classmethod
def _setup_next_sequence(cls):
with cls._SEQUENCE_LOCK:
cls._SEQUENCE += 1
return cls._SEQUENCE
class ProjectTemplateFactory(Factory):
class Meta:
strategy = factory.CREATE_STRATEGY
model = "projects.ProjectTemplate"
django_get_or_create = ("slug",)
name = "Template name"
slug = settings.DEFAULT_PROJECT_TEMPLATE
description = factory.Sequence(lambda n: "Description {}".format(n))
epic_statuses = []
us_statuses = []
us_duedates = []
points = []
task_statuses = []
task_duedates = []
issue_statuses = []
issue_types = []
issue_duedates = []
priorities = []
severities = []
roles = []
epic_custom_attributes = []
us_custom_attributes = []
task_custom_attributes = []
issue_custom_attributes = []
default_owner_role = "tester"
class ProjectFactory(Factory):
class Meta:
model = "projects.Project"
strategy = factory.CREATE_STRATEGY
name = factory.Sequence(lambda n: "Project {}".format(n))
slug = factory.Sequence(lambda n: "project-{}-slug".format(n))
logo = factory.django.FileField(data=DUMMY_BMP_DATA)
description = "Project description"
owner = factory.SubFactory("tests.factories.UserFactory")
creation_template = factory.SubFactory("tests.factories.ProjectTemplateFactory")
class ProjectModulesConfigFactory(Factory):
class Meta:
model = "projects.ProjectModulesConfig"
strategy = factory.CREATE_STRATEGY
project = factory.SubFactory("tests.factories.ProjectFactory")
class RoleFactory(Factory):
class Meta:
model = "users.Role"
strategy = factory.CREATE_STRATEGY
name = factory.Sequence(lambda n: "Role {}".format(n))
slug = factory.Sequence(lambda n: "test-role-{}".format(n))
project = factory.SubFactory("tests.factories.ProjectFactory")
class PointsFactory(Factory):
class Meta:
model = "projects.Points"
strategy = factory.CREATE_STRATEGY
name = factory.Sequence(lambda n: "Points {}".format(n))
value = 2
project = factory.SubFactory("tests.factories.ProjectFactory")
class RolePointsFactory(Factory):
class Meta:
model = "userstories.RolePoints"
strategy = factory.CREATE_STRATEGY
user_story = factory.SubFactory("tests.factories.UserStoryFactory")
role = factory.SubFactory("tests.factories.RoleFactory")
points = factory.SubFactory("tests.factories.PointsFactory")
class EpicAttachmentFactory(Factory):
project = factory.SubFactory("tests.factories.ProjectFactory")
owner = factory.SubFactory("tests.factories.UserFactory")
content_object = factory.SubFactory("tests.factories.EpicFactory")
attached_file = factory.django.FileField(data=b"File contents")
class Meta:
model = "attachments.Attachment"
strategy = factory.CREATE_STRATEGY
class UserStoryAttachmentFactory(Factory):
project = factory.SubFactory("tests.factories.ProjectFactory")
owner = factory.SubFactory("tests.factories.UserFactory")
content_object = factory.SubFactory("tests.factories.UserStoryFactory")
attached_file = factory.django.FileField(data=b"File contents")
class Meta:
model = "attachments.Attachment"
strategy = factory.CREATE_STRATEGY
class TaskAttachmentFactory(Factory):
project = factory.SubFactory("tests.factories.ProjectFactory")
owner = factory.SubFactory("tests.factories.UserFactory")
content_object = factory.SubFactory("tests.factories.TaskFactory")
attached_file = factory.django.FileField(data=b"File contents")
class Meta:
model = "attachments.Attachment"
strategy = factory.CREATE_STRATEGY
class IssueAttachmentFactory(Factory):
project = factory.SubFactory("tests.factories.ProjectFactory")
owner = factory.SubFactory("tests.factories.UserFactory")
content_object = factory.SubFactory("tests.factories.IssueFactory")
attached_file = factory.django.FileField(data=b"File contents")
class Meta:
model = "attachments.Attachment"
strategy = factory.CREATE_STRATEGY
class WikiAttachmentFactory(Factory):
project = factory.SubFactory("tests.factories.ProjectFactory")
owner = factory.SubFactory("tests.factories.UserFactory")
content_object = factory.SubFactory("tests.factories.WikiFactory")
attached_file = factory.django.FileField(data=b"File contents")
class Meta:
model = "attachments.Attachment"
strategy = factory.CREATE_STRATEGY
class UserFactory(Factory):
class Meta:
model = settings.AUTH_USER_MODEL
strategy = factory.CREATE_STRATEGY
username = factory.Sequence(lambda n: "user{}".format(n))
email = factory.LazyAttribute(lambda obj: <EMAIL>' % obj.username)
password = factory.PostGeneration(lambda obj, *args, **kwargs: obj.set_password(obj.username))
accepted_terms = True
read_new_terms = True
class MembershipFactory(Factory):
class Meta:
model = "projects.Membership"
strategy = factory.CREATE_STRATEGY
token = factory.LazyAttribute(lambda obj: str(uuid.uuid1()))
project = factory.SubFactory("tests.factories.ProjectFactory")
role = factory.SubFactory("tests.factories.RoleFactory")
user = factory.SubFactory("tests.factories.UserFactory")
class InvitationFactory(Factory):
class Meta:
model = "projects.Membership"
strategy = factory.CREATE_STRATEGY
token = factory.LazyAttribute(lambda obj: str(uuid.uuid1()))
project = factory.SubFactory("tests.factories.ProjectFactory")
role = factory.SubFactory("tests.factories.RoleFactory")
email = factory.Sequence(lambda n: "<EMAIL>".<EMAIL>(n))
class WebhookFactory(Factory):
class Meta:
model = "webhooks.Webhook"
strategy = factory.CREATE_STRATEGY
project = factory.SubFactory("tests.factories.ProjectFactory")
url = "http://localhost:8080/test"
key = "factory-key"
name = "Factory-name"
class WebhookLogFactory(Factory):
class Meta:
model = "webhooks.WebhookLog"
strategy = factory.CREATE_STRATEGY
webhook = factory.SubFactory("tests.factories.WebhookFactory")
url = "http://localhost:8080/test"
status = "200"
request_data = {"text": "test-request-data"}
response_data = {"text": "test-response-data"}
class StorageEntryFactory(Factory):
class Meta:
model = "userstorage.StorageEntry"
strategy = factory.CREATE_STRATEGY
owner = factory.SubFactory("tests.factories.UserFactory")
key = factory.Sequence(lambda n: "key-{}".format(n))
value = factory.Sequence(lambda n: {"value": "value-{}".format(n)})
class EpicFactory(Factory):
class Meta:
model = "epics.Epic"
strategy = factory.CREATE_STRATEGY
ref = factory.Sequence(lambda n: n)
project = factory.SubFactory("tests.factories.ProjectFactory")
owner = factory.SubFactory("tests.factories.UserFactory")
subject = factory.Sequence(lambda n: "Epic {}".format(n))
description = factory.Sequence(lambda n: "Epic {} description".format(n))
status = factory.SubFactory("tests.factories.EpicStatusFactory")
class RelatedUserStory(Factory):
class Meta:
model = "epics.RelatedUserStory"
strategy = factory.CREATE_STRATEGY
epic = factory.SubFactory("tests.factories.EpicFactory")
user_story = factory.SubFactory("tests.factories.UserStoryFactory")
class MilestoneFactory(Factory):
class Meta:
model = "milestones.Milestone"
strategy = factory.CREATE_STRATEGY
name = factory.Sequence(lambda n: "Milestone {}".format(n))
owner = factory.SubFactory("tests.factories.UserFactory")
project = factory.SubFactory("tests.factories.ProjectFactory")
estimated_start = factory.LazyAttribute(lambda o: date.today())
estimated_finish = factory.LazyAttribute(lambda o: o.estimated_start + timedelta(days=7))
class UserStoryFactory(Factory):
class Meta:
model = "userstories.UserStory"
strategy = factory.CREATE_STRATEGY
ref = factory.Sequence(lambda n: n)
project = factory.SubFactory("tests.factories.ProjectFactory")
owner = factory.SubFactory("tests.factories.UserFactory")
subject = factory.Sequence(lambda n: "User Story {}".format(n))
description = factory.Sequence(lambda n: "User Story {} description".format(n))
status = factory.SubFactory("tests.factories.UserStoryStatusFactory")
milestone = factory.SubFactory("tests.factories.MilestoneFactory")
tags = factory.Faker("words")
due_date = factory.LazyAttribute(lambda o: date.today() + timedelta(days=7))
due_date_reason = factory.Faker("words")
@factory.post_generation
def assigned_users(self, create, users_list, **kwargs):
if not create:
return
if users_list:
for user in users_list:
self.assigned_users.add(user)
class TaskFactory(Factory):
class Meta:
model = "tasks.Task"
strategy = factory.CREATE_STRATEGY
ref = factory.Sequence(lambda n: n)
subject = factory.Sequence(lambda n: "Task {}".format(n))
description = factory.Sequence(lambda n: "Task {} description".format(n))
owner = factory.SubFactory("tests.factories.UserFactory")
project = factory.SubFactory("tests.factories.ProjectFactory")
status = factory.SubFactory("tests.factories.TaskStatusFactory")
milestone = factory.SubFactory("tests.factories.MilestoneFactory")
user_story = factory.SubFactory("tests.factories.UserStoryFactory")
tags = factory.Faker("words")
due_date = factory.LazyAttribute(lambda o: date.today() + timedelta(days=7))
due_date_reason = factory.Faker("words")
class IssueFactory(Factory):
class Meta:
model = "issues.Issue"
strategy = factory.CREATE_STRATEGY
ref = factory.Sequence(lambda n: n)
subject = factory.Sequence(lambda n: "Issue {}".format(n))
description = factory.Sequence(lambda n: "Issue {} description".format(n))
owner = factory.SubFactory("tests.factories.UserFactory")
project = factory.SubFactory("tests.factories.ProjectFactory")
status = factory.SubFactory("tests.factories.IssueStatusFactory")
severity = factory.SubFactory("tests.factories.SeverityFactory")
priority = factory.SubFactory("tests.factories.PriorityFactory")
type = factory.SubFactory("tests.factories.IssueTypeFactory")
milestone = factory.SubFactory("tests.factories.MilestoneFactory")
tags = factory.Faker("words")
due_date = factory.LazyAttribute(lambda o: date.today() + timedelta(days=7))
due_date_reason = factory.Faker("words")
class WikiPageFactory(Factory):
class Meta:
model = "wiki.WikiPage"
strategy = factory.CREATE_STRATEGY
project = factory.SubFactory("tests.factories.ProjectFactory")
owner = factory.SubFactory("tests.factories.UserFactory")
slug = factory.Sequence(lambda n: "wiki-page-{}".format(n))
content = factory.Sequence(lambda n: "Wiki Page {} content".format(n))
class WikiLinkFactory(Factory):
class Meta:
model = "wiki.WikiLink"
strategy = factory.CREATE_STRATEGY
project = factory.SubFactory("tests.factories.ProjectFactory")
title = factory.Sequence(lambda n: "Wiki Link {} title".format(n))
href = factory.Sequence(lambda n: "link-{}".format(n))
order = factory.Sequence(lambda n: n)
class EpicStatusFactory(Factory):
class Meta:
model = "projects.EpicStatus"
strategy = factory.CREATE_STRATEGY
name = factory.Sequence(lambda n: "Epic status {}".format(n))
project = factory.SubFactory("tests.factories.ProjectFactory")
class UserStoryStatusFactory(Factory):
class Meta:
model = "projects.UserStoryStatus"
strategy = factory.CREATE_STRATEGY
name = factory.Sequence(lambda n: "User Story status {}".format(n))
project = factory.SubFactory("tests.factories.ProjectFactory")
class UserStoryDueDateFactory(Factory):
class Meta:
model = "projects.UserStoryDueDate"
strategy = factory.CREATE_STRATEGY
name = factory.Sequence(lambda n: "User Story due date {}".format(n))
project = factory.SubFactory("tests.factories.ProjectFactory")
class TaskStatusFactory(Factory):
class Meta:
model = "projects.TaskStatus"
strategy = factory.CREATE_STRATEGY
name = factory.Sequence(lambda n: "Task status {}".format(n))
project = factory.SubFactory("tests.factories.ProjectFactory")
class IssueStatusFactory(Factory):
class Meta:
model = "projects.IssueStatus"
strategy = factory.CREATE_STRATEGY
name = factory.Sequence(lambda n: "Issue Status {}".format(n))
project = factory.SubFactory("tests.factories.ProjectFactory")
class SeverityFactory(Factory):
class Meta:
model = "projects.Severity"
strategy = factory.CREATE_STRATEGY
name = factory.Sequence(lambda n: "Severity {}".format(n))
project = factory.SubFactory("tests.factories.ProjectFactory")
class PriorityFactory(Factory):
class Meta:
model = "projects.Priority"
strategy = factory.CREATE_STRATEGY
name = factory.Sequence(lambda n: "Priority {}".format(n))
project = factory.SubFactory("tests.factories.ProjectFactory")
class IssueTypeFactory(Factory):
class Meta:
model = "projects.IssueType"
strategy = factory.CREATE_STRATEGY
name = factory.Sequence(lambda n: "Issue Type {}".format(n))
project = factory.SubFactory("tests.factories.ProjectFactory")
class EpicCustomAttributeFactory(Factory):
class Meta:
model = "custom_attributes.EpicCustomAttribute"
strategy = factory.CREATE_STRATEGY
name = factory.Sequence(lambda n: "Epic Custom Attribute {}".format(n))
description = factory.Sequence(lambda n: "Description for Epic Custom Attribute {}".format(n))
project = factory.SubFactory("tests.factories.ProjectFactory")
class UserStoryCustomAttributeFactory(Factory):
class Meta:
model = "custom_attributes.UserStoryCustomAttribute"
strategy = factory.CREATE_STRATEGY
name = factory.Sequence(lambda n: "UserStory Custom Attribute {}".format(n))
description = factory.Sequence(lambda n: "Description for UserStory Custom Attribute {}".format(n))
project = factory.SubFactory("tests.factories.ProjectFactory")
class TaskCustomAttributeFactory(Factory):
class Meta:
model = "custom_attributes.TaskCustomAttribute"
strategy = factory.CREATE_STRATEGY
name = factory.Sequence(lambda n: "Task Custom Attribute {}".format(n))
description = factory.Sequence(lambda n: "Description for Task Custom Attribute {}".format(n))
project = factory.SubFactory("tests.factories.ProjectFactory")
class IssueCustomAttributeFactory(Factory):
class Meta:
model = "custom_attributes.IssueCustomAttribute"
strategy = factory.CREATE_STRATEGY
name = factory.Sequence(lambda n: "Issue Custom Attribute {}".format(n))
description = factory.Sequence(lambda n: "Description for Issue Custom Attribute {}".format(n))
project = factory.SubFactory("tests.factories.ProjectFactory")
class EpicCustomAttributesValuesFactory(Factory):
class Meta:
model = "custom_attributes.EpicCustomAttributesValues"
strategy = factory.CREATE_STRATEGY
attributes_values = {}
epic = factory.SubFactory("tests.factories.EpicFactory")
class UserStoryCustomAttributesValuesFactory(Factory):
class Meta:
model = "custom_attributes.UserStoryCustomAttributesValues"
strategy = factory.CREATE_STRATEGY
attributes_values = {}
user_story = factory.SubFactory("tests.factories.UserStoryFactory")
class TaskCustomAttributesValuesFactory(Factory):
class Meta:
model = "custom_attributes.TaskCustomAttributesValues"
strategy = factory.CREATE_STRATEGY
attributes_values = {}
task = factory.SubFactory("tests.factories.TaskFactory")
class IssueCustomAttributesValuesFactory(Factory):
class Meta:
model = "custom_attributes.IssueCustomAttributesValues"
strategy = factory.CREATE_STRATEGY
attributes_values = {}
issue = factory.SubFactory("tests.factories.IssueFactory")
class LikeFactory(Factory):
class Meta:
model = "likes.Like"
strategy = factory.CREATE_STRATEGY
content_type = factory.SubFactory("tests.factories.ContentTypeFactory")
object_id = factory.Sequence(lambda n: n)
user = factory.SubFactory("tests.factories.UserFactory")
class VoteFactory(Factory):
class Meta:
model = "votes.Vote"
strategy = factory.CREATE_STRATEGY
content_type = factory.SubFactory("tests.factories.ContentTypeFactory")
object_id = factory.Sequence(lambda n: n)
user = factory.SubFactory("tests.factories.UserFactory")
class VotesFactory(Factory):
class Meta:
model = "votes.Votes"
strategy = factory.CREATE_STRATEGY
content_type = factory.SubFactory("tests.factories.ContentTypeFactory")
object_id = factory.Sequence(lambda n: n)
class WatchedFactory(Factory):
class Meta:
model = "notifications.Watched"
strategy = factory.CREATE_STRATEGY
content_type = factory.SubFactory("tests.factories.ContentTypeFactory")
object_id = factory.Sequence(lambda n: n)
user = factory.SubFactory("tests.factories.UserFactory")
project = factory.SubFactory("tests.factories.ProjectFactory")
class ContentTypeFactory(Factory):
class Meta:
model = "contenttypes.ContentType"
strategy = factory.CREATE_STRATEGY
django_get_or_create = ("app_label", "model")
app_label = factory.LazyAttribute(lambda obj: "issues")
model = factory.LazyAttribute(lambda obj: "Issue")
class AttachmentFactory(Factory):
class Meta:
model = "attachments.Attachment"
strategy = factory.CREATE_STRATEGY
owner = factory.SubFactory("tests.factories.UserFactory")
project = factory.SubFactory("tests.factories.ProjectFactory")
content_type = factory.SubFactory("tests.factories.ContentTypeFactory")
object_id = factory.Sequence(lambda n: n)
attached_file = factory.django.FileField(data=b"File contents")
class HistoryEntryFactory(Factory):
class Meta:
model = "history.HistoryEntry"
strategy = factory.CREATE_STRATEGY
type = 1
class ApplicationFactory(Factory):
class Meta:
model = "external_apps.Application"
strategy = factory.CREATE_STRATEGY
class ApplicationTokenFactory(Factory):
class Meta:
model = "external_apps.ApplicationToken"
strategy = factory.CREATE_STRATEGY
application = factory.SubFactory("tests.factories.ApplicationFactory")
user = factory.SubFactory("tests.factories.UserFactory")
def create_issue(**kwargs):
"Create an issue and along with its dependencies."
owner = kwargs.pop("owner", None)
if owner is None:
owner = UserFactory.create()
project = kwargs.pop("project", None)
if project is None:
project = ProjectFactory.create(owner=owner)
defaults = {
"project": project,
"owner": owner,
"status": IssueStatusFactory.create(project=project),
"milestone": MilestoneFactory.create(project=project),
"priority": PriorityFactory.create(project=project),
"severity": SeverityFactory.create(project=project),
"type": IssueTypeFactory.create(project=project),
}
defaults.update(kwargs)
return IssueFactory.create(**defaults)
class Missing:
pass
def create_task(**kwargs):
"Create a task and along with its dependencies."
owner = kwargs.pop("owner", None)
if not owner:
owner = UserFactory.create()
project = kwargs.pop("project", None)
if project is None:
project = ProjectFactory.create(owner=owner)
status = kwargs.pop("status", None)
milestone = kwargs.pop("milestone", None)
defaults = {
"project": project,
"owner": owner,
"status": status or TaskStatusFactory.create(project=project),
"milestone": milestone or MilestoneFactory.create(project=project),
}
user_story = kwargs.pop("user_story", Missing)
defaults["user_story"] = (
UserStoryFactory.create(project=project, owner=owner, milestone=defaults["milestone"])
if user_story is Missing
else user_story
)
defaults.update(kwargs)
return TaskFactory.create(**defaults)
def create_membership(**kwargs):
"Create a membership along with its dependencies"
project = kwargs.pop("project", ProjectFactory())
project.points.add(PointsFactory.create(project=project, value=None))
defaults = {
"project": project,
"user": UserFactory.create(),
"role": RoleFactory.create(project=project,
permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
}
defaults.update(kwargs)
return MembershipFactory.create(**defaults)
def create_invitation(**kwargs):
"Create an invitation along with its dependencies"
project = kwargs.pop("project", ProjectFactory())
project.points.add(PointsFactory.create(project=project, value=None))
defaults = {
"project": project,
"role": RoleFactory.create(project=project),
"email": "<EMAIL>",
"token": "<PASSWORD>",
"invited_by_id": project.owner.id
}
defaults.update(kwargs)
return MembershipFactory.create(**defaults)
def create_userstory(**kwargs):
"""Create an user story along with its dependencies"""
owner = kwargs.pop("owner", None)
if not owner:
owner = UserFactory.create()
project = kwargs.pop("project", None)
if project is None:
project = ProjectFactory.create(owner=owner)
project.default_points = PointsFactory.create(project=project)
defaults = {
"project": project,
"owner": owner,
"milestone": MilestoneFactory.create(project=project, owner=owner)
}
defaults.update(kwargs)
return UserStoryFactory(**defaults)
def create_epic(**kwargs):
"Create an epic along with its dependencies"
owner = kwargs.pop("owner", None)
if not owner:
owner = UserFactory.create()
project = kwargs.pop("project", None)
if project is None:
project = ProjectFactory.create(owner=owner)
defaults = {
"project": project,
"owner": owner,
}
defaults.update(kwargs)
return EpicFactory(**defaults)
def create_project(**kwargs):
"Create a project along with its dependencies"
defaults = {}
defaults.update(kwargs)
ProjectTemplateFactory.create(slug=settings.DEFAULT_PROJECT_TEMPLATE)
project = ProjectFactory.create(**defaults)
project.default_issue_status = IssueStatusFactory.create(project=project)
project.default_severity = SeverityFactory.create(project=project)
project.default_priority = PriorityFactory.create(project=project)
project.default_issue_type = IssueTypeFactory.create(project=project)
project.default_us_status = UserStoryStatusFactory.create(project=project)
project.default_task_status = TaskStatusFactory.create(project=project)
project.default_epic_status = EpicStatusFactory.create(project=project)
project.default_points = PointsFactory.create(project=project)
project.save()
return project
def create_user(**kwargs):
"Create an user along with her dependencies"
ProjectTemplateFactory.create(slug=settings.DEFAULT_PROJECT_TEMPLATE)
RoleFactory.create()
return UserFactory.create(**kwargs)
|
<filename>Kruskal_Prim_Spanning_Tree/utils.py
class Graph:
def __init__(self, visited=False, idx=None, adjacent=None):
"""
Generates a single node of undirected weighted graph
:param visited: To use for algorithms like DFS to check visited nodes
:param idx: Just for demonstration purposes
:param adjacent: Adjacent nodes of current node
"""
self.visited = visited
self.idx = idx
self.adjacent = adjacent if adjacent is not None else []
def add_adjacent(self, g, w):
"""
Add a node as a adjacent of current node
:param g: target node as adjacent
:param w: weight of edge to node g
:return: None
"""
self.adjacent.append({g: w})
g.adjacent.append({self: w})
def remove_adjacent(self, d):
"""
Remove a node from adjacent of current node
:param d: Node to remove by its index value
:return: None
"""
len_i = len(self.adjacent)
i = 0
while i < len_i:
if list(self.adjacent[i].keys())[0].idx == d:
adj = list(self.adjacent[i].keys())[0].adjacent
len_j = len(adj)
j = 0
while j < len_j:
if len(adj) > 0:
if list(adj[j].keys())[0].idx == self.idx:
del adj[j]
len_j -= 1
j += 1
del self.adjacent[i]
len_i -= 1
break
i += 1
def describe(self):
"""
Describe some of current node's features
:return: String
"""
ads = [list(i.keys())[0].idx for i in self.adjacent]
print("ID=#{}, Visited = {} ,adjacent={}".format(self.idx, self.visited, ads))
def show_graph(nodes):
"""
Print a graph by printing all nodes and their adjacent
:param nodes: A list of nodes of a graph
:return: None
"""
print("######### START GRAPH #########")
for n in nodes:
print(n.describe())
print("######### END GRAPH #########")
def get_input():
"""
Get user input to build graph by getting graph size and edges with corresponding weights
:return: A list of nodes (a graph)
"""
print('Number of nodes')
size = input()
print('Input edges in this format: "src dst weight"')
print('Enter 0 to exit.')
graph = []
for i in range(int(size)):
graph.append(Graph(idx=i))
e = input()
while e != '0':
es = e.split()
src, dst, w = int(es[0]), int(es[1]), int(es[2])
graph[src].add_adjacent(graph[dst], w)
e = input()
return graph
def edges(graph, p=False):
"""
Gets a list of nodes (a graph) and show its edges with weights
:param graph: A list of nodes
:param p: print edges or just return the list
:return: A list of tuple containing source, destination nodes of a edge and its weight
"""
e = []
for n in graph:
for a in n.adjacent:
if p:
print(n.idx, ' -> ', list(a.keys())[0].idx, ', w=', a[list(a.keys())[0]])
else:
e.append((n.idx, list(a.keys())[0].idx, a[list(a.keys())[0]]))
if not p:
return e
# %% Tools
def dfs(v, graph):
"""
DFS algorithm
:param v: A node of graph
:param graph: A list of nodes (A graph)
:return: None
"""
v.visited = True
for a in v.adjacent:
if not list(a.keys())[0].visited:
dfs(list(a.keys())[0], graph)
def is_connected(graph):
"""
Check if graph is connected or not using DFS visited nodes count
:param graph: A list of nodes (A graph)
:return: True if connected, False if not
"""
dfs(graph[0], graph)
count = 0
for g in graph:
if g.visited:
count += 1
visit_reset(graph)
return len(graph) == count
def visit_reset(graph):
"""
Set visited flag of all nodes in graph to False
:param graph: A list of nodes (A graph)
:return: None
"""
for n in graph:
n.visited = False
def sort_edge(graph):
"""
Sort the edges of graph in non-increasing order
:param graph: A list of nodes (A graph)
:return: A list of tuple containing source, destination nodes of a edge and its weight
"""
edge = edges(graph)
edge.sort(key=lambda x: x[2], reverse=True)
return edge
def clean_edges(edges):
"""
Gets a list of 3-member tuples and remove backward edges (no need in undirected graph)
:param edges: A list of tuples
:return: A list of tuples with half size
"""
e = [d for i, d in enumerate(edges) if i % 2 == 0]
return e
def remove_redundant_edges(graph, edges):
"""
Gets a list of nodes as a graph and a list of 3-member tuples as edges and remove edges from graph
:param graph: A list of nodes (a graph)
:param edges: A list of 3-member tuples in "src, des, weight" format
:return: new graph
"""
edges = [e for e in edges if e[0] < e[1]]
for e in edges:
s, d, _ = e
for g in graph:
if g.idx == s:
g.remove_adjacent(d)
return graph, edges
# %% test
# g0 = Graph(idx=0)
# g1 = Graph(idx=1)
# g2 = Graph(idx=2)
# g0.add_adjacent(g1, 5)
# g1.add_adjacent(g2, 10)
# g = [g0, g1, g2]
# show_graph(g)
# show_edges(g)
|
<gh_stars>1-10
#!/usr/bin/env python
from collections import namedtuple
####################################### 规则 ###########################################
# DOMAIN
BLACK_DOMAIN = ['www.17k.com', 'mm.17k.com', 'www.xs8.cn', 'www.zongheng.com', 'yunqi.qq.com', 'chuangshi.qq.com',
'book.qidian.com', 'www.soduso.com', 'pages.book.qq.com', 'book.km.com', 'www.lread.net',
'www.0dsw.com', 'www.5200xsb.com', 'www.80txt.com', 'www.sodu.tw', 'www.shuquge.com',
'www.shenmanhua.com', 'xiaoshuo.sogou.com', 'www.999wx.com', 'zetianji8.com', 'www.bookso.net',
'm.23us.com', 'www.qbxsw.com', 'www.zhuzhudao.com', 'www.shengyan.org', 'www.360doc.com',
'www.ishuo.cn', 'read.qidian.com', 'www.yunlaige.com', 'www.qidian.com', 'www.sodu888.com',
'www.siluke.cc', 'read.10086.cn', 'www.pbtxt.com', 'c4txt.com', 'www.bokon.net', 'www.sikushu.net',
'www.is028.cn', 'www.tadu.com', 'www.kudu8.com', 'www.bmwen.com', 'www.5858xs.com', 'www.yiwan.com',
'www.x81zw.com', 'www.123du.cc', 'www.chashu.cc', '20xs.com', 'www.haxwx.net', 'www.dushiwenxue.com',
"www.yxdown.com", 'www.jingcaiyuedu.com', 'www.zhetian.org', 'www.xiaoshuo02.com', 'www.xiaoshuo77.com',
'www.868xh.com', 'dp.changyou.com', 'www.iyouman.com', 'www.qq717.com', 'www.yznn.com', "www.69w.cc",
"www.doupocangqiong1.com", "www.manhuatai.com", "www.5wxs.com", "www.ggshuji.com", "www.msxf.net",
"www.mianhuatang.la", "www.boluoxs.com", "www.lbiquge.top", "www.69shu.com", "www.qingkan520.com",
"book.douban.com", "movie.douban.com", "www.txshuku.com", "lz.book.sohu.com", "www.3gsc.com.cn",
"www.txtshu365.com", "www.517yuedu.com", "www.baike.com", "read.jd.com", "www.zhihu.com", "wshuyi.com",
"www.19lou.tw", "www.chenwangbook.com", "www.aqtxt.com", "book.114la.com", "www.niepo.net",
"me.qidian.com", "www.gengd.com", "www.77l.com", "www.geilwx.com", "www.97xiao.com", "www.anqu.com",
"www.wuxiaxs.com", "yuedu.163.com", "b.faloo.com", "bbs.qidian.com", "jingji.qidian.com", "www.sodu.cc",
"forum.qdmm.com", "www.qdmm.com", "game.91.com", "www.11773.com", "mt.sohu.com", "book.dajianet.com",
"haokan.17k.com", "www.qmdsj.com", "www.jjwxc.net", "ishare.iask.sina.com.cn", "www.cmread.com",
"www.52ranwen.net", "www.dingdianzw.com", "www.topber.com", "www.391k.com", "www.qqxzb.com",
"www.zojpw.com", "www.pp8.com", "www.bxwx.org", "www.hrsxb.com", "www.497.com", "www.d8qu.com",
"www.duwanjuan.com", "www.05935.com", "book.zongheng.com", "www.55x.cn", "www.freexs.cn",
"xiaoshuo.360.cn", "www.3kw.cc", "www.gzbpi.com", "book.sina.com.cn", "www.vodtw.com", "wenda.so.com",
"product.dangdang.com", "www.chuiyao.com", "novel.slieny.com", "www.bilibili.com", "donghua.dmzj.com",
"www.yaojingweiba.com", "www.qb5200.com", "www.520tingshu.com", "www.567zw.com", "www.zjrxz.com",
"v.qq.com", "blog.sina.com.cn", "www.hackhome.com", "news.fznews.com.cn", "www.jingyu.com",
"news.so.com", "www.sodu3.com", "vipreader.qidian.com", "www.mozhua9.com", "www.iqiyi.com",
"xs.sogou.com"]
# 针对某些网站检索出来的地址和真正的目录地址不一样从而进行替换
REPLACE_RULES = {
"www.miaobige.com": {
'old': 'miaobige.com/book/',
'new': 'miaobige.com/read/'
},
"www.5ccc.net": {
'old': '5ccc.net/wksz_info/',
'new': '5ccc.net/xiaoshuo/'
},
"www.7kankan.com": {
'old': '7kankan.com/files/article/info/',
'new': '7kankan.com/files/article/html/'
},
"www.xqingdou.net": {
'old': 'xqingdou.net/book_',
'new': 'xqingdou.net/chapter_'
},
"www.wuyanxia.net": {
'old': 'wuyanxia.net/book/',
'new': 'wuyanxia.net/read/'
},
"www.263zw.com": {
'old': '263zw.com/402770/',
'new': '263zw.com/402770/list/'
},
}
# 搜索引擎检索优先级
ENGINE_PRIORITY = ['360', 'baidu', 'bing', 'duck_go']
# Rules
Rules = namedtuple('Rules', 'content_url chapter_selector content_selector')
LatestRules = namedtuple('LatestRules', 'plan meta_value selector')
# 获取小说最新章节
PLAN_01 = LatestRules(
True,
{'latest_chapter_name': 'og:novel:latest_chapter_name', 'latest_chapter_url': 'og:novel:latest_chapter_url'},
None,
)
LATEST_RULES = {
"www.biqugex.com": PLAN_01,
"www.x23us.com": PLAN_01,
"www.23us.la": PLAN_01,
"www.sqsxs.com": PLAN_01,
"www.nuomi9.com": PLAN_01,
"www.biquge.info": PLAN_01,
"www.biquge.tw": PLAN_01,
"www.qu.la": PLAN_01,
"www.ybdu.com": PLAN_01,
"www.wenxuemi.com": PLAN_01,
"www.biquge.com": PLAN_01,
"www.23us.cc": PLAN_01,
"www.xs222.com": PLAN_01,
"www.lewen8.com": PLAN_01,
"www.bqg5200.com": PLAN_01,
"www.vodtw.com": PLAN_01,
"www.6mao.com": PLAN_01,
"www.biquge.sh": PLAN_01,
"www.touxiang.la": PLAN_01,
"www.bxquge.com": PLAN_01,
"www.beidouxin.com": PLAN_01,
"www.biquge.lu": PLAN_01,
"www.263zw.com": PLAN_01,
"www.3qzone.com": PLAN_01,
"wwww.yooread.com": PLAN_01,
# "www.suimeng.la": PLAN_01,
"www.bequge.com": PLAN_01,
"www.biquku.co": PLAN_01,
"www.xbqge.com": PLAN_01,
"www.aiquxs.com": PLAN_01,
"www.23us.com": PLAN_01,
"www.biqiuge.com": PLAN_01,
"www.ddbiquge.com": PLAN_01,
"www.abocms.cn": PLAN_01,
"www.a306.com": PLAN_01,
"www.liewen.cc": PLAN_01,
"www.8535.org": PLAN_01,
"www.dingdianzw.com": PLAN_01,
"www.biquge.cc": PLAN_01,
"www.111bz.org": PLAN_01,
"www.biqugebook.com": PLAN_01,
"www.e8zw.com": PLAN_01,
"www.xqqxs.com": PLAN_01,
"tianyibook.la": PLAN_01,
"www.lingdianksw.com": PLAN_01,
"www.qb5.tw": PLAN_01,
"www.quanben.com": PLAN_01,
"www.58xs.com": PLAN_01,
"www.biqukan.com": PLAN_01,
"www.yssm.org": PLAN_01,
"www.81zw.com": PLAN_01,
"www.ymoxuan.com": PLAN_01,
"www.mytxt.cc": PLAN_01,
"www.woquge.com": PLAN_01,
"www.biquguo.com": PLAN_01,
"www.8jzw.cc": PLAN_01,
"www.biquge.tv": PLAN_01,
"www.biquge5200.com": PLAN_01,
"www.8jzw.com": PLAN_01,
"www.23xsw.cc": PLAN_01,
"www.miaobige.com": PLAN_01,
"www.xs.la": PLAN_01,
"www.44pq.co": PLAN_01,
"www.50zw.la": PLAN_01,
"www.33xs.com": PLAN_01,
"www.zwdu.com": PLAN_01,
"www.ttzw.com": PLAN_01,
"www.zanghaihuatxt.com": PLAN_01,
"www.kuxiaoshuo.com": PLAN_01,
"www.biqudu.com": PLAN_01,
"www.biqugeg.com": PLAN_01,
"www.23txt.com": PLAN_01,
"www.baquge.tw": PLAN_01,
"www.23qb.com": PLAN_01,
"www.lread.cc": PLAN_01,
"www.biqudao.com": PLAN_01,
"www.laidudu.com": PLAN_01,
"www.kxs7.com": PLAN_01,
"www.biquguan.com": PLAN_01,
"www.biquta.com": PLAN_01,
"www.xs98.com": PLAN_01,
"www.bqge.org": PLAN_01,
"www.58xs.tw": PLAN_01,
"www.187ks.com": PLAN_01,
"www.yikanxiaoshuo.com": PLAN_01,
"www.23zw.me": PLAN_01,
"www.37zw.net": PLAN_01,
"www.biquge.cm": PLAN_01,
"www.kanshu58.com": PLAN_01,
"www.biqumo.com": PLAN_01,
"www.mpxiaoshuo.com": PLAN_01,
"www.23wx.cm": PLAN_01,
"www.biquge.jp": PLAN_01,
"www.biqugexsw.com": PLAN_01,
"www.biqu6.com": PLAN_01,
"www.xiuxs.com": PLAN_01,
"www.booktxt.net": PLAN_01,
"www.biqule.com": PLAN_01,
"www.biquzi.com": PLAN_01,
"www.biquku.la": PLAN_01,
"www.00ksw.org": PLAN_01,
"www.bqg.cc": PLAN_01,
"www.biqugezw.com": PLAN_01,
"www.bbiquge.com": PLAN_01,
"www.aikantxt.la": PLAN_01,
"www.biquge.com.tw": PLAN_01,
"www.xxbiquge.com": PLAN_01,
"www.biquwo.com": PLAN_01,
# 其他规则
"www.50331.net": LatestRules(
False,
None,
{'content_url': "http://www.50331.net/", 'tag': 'span.zzjie a'}
)
}
RULES = {
# demo 'name': Rules('content_url', {chapter_selector}, {content_selector})
# content_url=1 表示章节链接使用本身自带的链接,不用拼接
# content_url=0 表示章节网页需要当前页面url拼接
# 'www.biqule.com': Rules('www.biqule.com', {'class': 'box_con'},{}),
# 'www.lingdiankanshu.com': Rules('www.lingdiankanshu.com', {'class': 'box_con'}, {}),
# 'www.hhlwx.com': Rules('www.hhlwx.co', {'class': 'chapterlist'},{}),
'www.biquwu.cc': Rules('https://www.biquwu.cc/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biqugex.com': Rules('http://www.biqugex.com/', {'class': 'listmain'}, {'id': 'content'}),
# 已解析
'www.bbiquge.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biquge.info': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.37zw.net': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biquku.la': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biquge.sh': Rules('1', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biquge.co': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.00ksw.org': Rules('0', {'class': 'ml_list'}, {'id': 'articlecontent'}),
# 已解析
'www.bqge.org': Rules('http://www.bqge.org/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.aikantxt.la': Rules('http://www.aikantxt.la/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biquzi.com': Rules('http://www.biquzi.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.bqg.cc': Rules('http://www.bqg.cc/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biquge.jp': Rules('0', {'id': 'list'}, {'id': 'content'}),
# 已解析
'www.vipzw.com': Rules('http://www.vipzw.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biquge5200.com': Rules('1', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.zanghaihuatxt.com': Rules('http://www.zanghaihuatxt.com/', {'class': 'listmain'}, {'id': 'content'}),
# 已解析
'www.xiuxs.com': Rules('http://www.xiuxs.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.1biquge.com': Rules('http://www.1biquge.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.xiaoshuowan.com': Rules('http://www.xiaoshuowan.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biqugela.com': Rules('http://www.biqugela.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biqu6.com': Rules('http://www.biqu6.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.zhuaji.org': Rules('0', {'tag': 'dd'}, {'id': 'content'}),
# 已解析
'www.sqsxs.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biquge.tv': Rules('http://www.biquge.tv/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biquta.com': Rules('https://www.biquta.com/', {'class': 'box_con'}, {'id': 'content'}),
# # 已解析
'www.xbiquge.la': Rules('http://www.xbiquge.la/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biquge.cm': Rules('http://www.biquge.cm/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.23qb.com': Rules('https://www.23qb.com/', {'id': 'chapterList'}, {'id': 'TextContent'}),
# 已解析
# 'www.txtwan.com': Rules('http://www.txtwan.com/', {'id': 'chapterList'}, {'id': 'txt'}),
# 已解析
'www.biqugexsw.com': Rules('http://www.biqugexsw.com/', {'class': 'listmain'}, {'id': 'content'}),
# 已解析
'www.kuxiaoshuo.com': Rules('1', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.laidudu.com': Rules('http://www.laidudu.com/', {'class': 'listmain'}, {'id': 'content'}),
# 已解析
'www.kanshu58.com': Rules('0', {'class': 'chapters'}, {'id': 'content'}),
# 已解析
'www.mpxiaoshuo.com': Rules('0', {'class': 'mulu_list'}, {'id': 'htmlContent'}),
# 已解析
'www.23zw.me': Rules('0', {'id': 'chapter_list'}, {'id': 'text_area'}),
# 已解析
'www.187ks.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.58xs.tw': Rules('http://www.58xs.tw/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biquguan.com': Rules('http://www.biquguan.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.xs98.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.kxs7.com': Rules('http://www.kxs7.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biqudao.com': Rules('https://www.biqudao.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.nuomi9.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'book.sfacg.com': Rules('http://book.sfacg.com/', {'class': 'story-catalog'}, {'tag': 'p'}),
# 已解析
'www.7kshu.com': Rules('0', {'id': 'chapterlist'}, {'id': 'content'}),
# 已解析
'www.lread.cc': Rules('http://www.lread.cc/', {'class': 'box_con'}, {'id': 'booktext'}),
# 已解析
'www.baquge.tw': Rules('http://www.baquge.tw/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biqudu.com': Rules('https://www.biqudu.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biqugeg.com': Rules('http://www.biqugeg.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.23txt.com': Rules('http://www.23txt.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.ttzw.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.zwdu.com': Rules('http://www.zwdu.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.33xs.com': Rules('http://www.33xs.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.50zw.la': Rules('0', {'class': 'chapterlist'}, {'id': 'htmlContent'}),
# 已解析
'www.44pq.co': Rules('0', {'class': 'chapterlist'}, {'id': 'BookText'}),
# 已解析
'www.wddsnxn.org': Rules('1', {'class': 'booklist'}, {'id': 'BookText'}),
# 已解析
'mianzhuan.wddsnxn.org': Rules('1', {'class': 'booklist'}, {'id': 'BookText'}),
# 已解析
'www.a306.com': Rules('http://www.a306.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.xs52.com': Rules('0', {'id': 'chapter_list'}, {'id': 'text_c'}),
# 已解析
'www.xs.la': Rules('http://www.xs.la/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.23xsw.cc': Rules('http://www.23xsw.cc/', {'class': 'listmain'}, {'id': 'content'}),
# 已解析
'www.8jzw.com': Rules('http://www.8jzw.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biquguo.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.woquge.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.zhonghuawuxia.com': Rules('1', {'class': 'list'}, {'id': 'content'}),
# 已解析
'www.mytxt.cc': Rules('http://www.mytxt.cc/', {'class': 'story_list_m62topxs'}, {'class': 'detail_con_m62topxs'}),
# 已解析
'www.136txt.com': Rules('1', {'class': 'directory_con'}, {'id': 'chapterContent'}),
# 已解析
'www.xs74.com': Rules('1', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.yssm.org': Rules('0', {'class': 'chapterlist'}, {'id': 'content'}),
# 已解析
'www.luoxia.com': Rules('1', {'class': 'book-list'}, {'tag': 'p'}),
# 已解析
'www.sbkk88.com': Rules('http://www.sbkk88.com/', {'class': 'leftList'}, {'id': 'f_article'}),
# 已解析
'www.dxsxs.com': Rules('http://www.dxsxs.com/', {'id': 'yuedu'}, {'class': 'zw'}),
# 已解析
'www.wenku8.com': Rules('0', {'class': 'css'}, {'id': 'content'}),
# 已解析
'www.xqingdou.net': Rules('http://www.xqingdou.net/', {'class': 'dirconone'}, {'id': 'chapter_content'}),
# 已解析
'www.zuowe.com': Rules('http://www.zuowe.com/', {'class': 'book_list'}, {'id': 'htmlContent'}),
# 已解析
'www.biqugek.com': Rules('1', {'class': 'book_list'}, {'id': 'htmlContent'}),
# 已解析
'www.wuyanxia.net': Rules('http://www.wuyanxia.net/', {'class': 'zjlist4'}, {'id': 'htmlContent'}),
# 已解析
'www.50331.net': Rules('http://www.50331.net/', {'id': 'main'}, {'class': 'zhang-txt-nei-rong'}),
# 已解析
'www.wenxuemi.com': Rules('http://www.wenxuemi.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.xs222.com': Rules('http://www.xs222.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.lewen8.com': Rules('http://www.lewen8.com/', {'id': 'chapterlist'}, {'id': 'content'}),
# 已解析
'www.5ccc.net': Rules('0', {'class': 'acss'}, {'id': 'content'}),
# 已解析
# 'www.suimeng.la': Rules('0', {'class': 'acss'}, {'id': 'ccontent'}),
# 已解析
'www.bqg5200.com': Rules('http://www.bqg5200.com/', {'id': 'readerlist'}, {'id': 'content'}),
# 已解析
'www.vodtw.com': Rules('0', {'class': 'insert_list'}, {'class': 'contentbox'}),
# 已解析
'www.6mao.com': Rules('http://www.6mao.com/', {'class': 'liebiao_bottom'}, {'id': 'neirong'}),
# 已解析
'www.touxiang.la': Rules('http://www.touxiang.la/', {'class': 'listmain'}, {'id': 'content'}),
# 已解析
'www.7kankan.com': Rules('0', {'class': 'uclist'}, {'id': 'content'}),
# 已解析
'www.biqugetw.com': Rules('http://www.biqugetw.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'gdbzkz.com': Rules('1', {'class': 'mulu'}, {'class': 'content-body'}),
# 已解析
'www.gdbzkz.com': Rules('1', {'class': 'mulu'}, {'class': 'content-body'}),
# 已解析
'www.freexs.cn': Rules('0', {'class': 'readout'}, {'class': 'shuneirong'}),
# 已解析
'www.bxquge.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.beidouxin.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.3qzone.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.97xs.net': Rules('1', {'class': 'box'}, {'id': 'htmlContent'}),
# 已解析
'www.7dsw.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.263zw.com': Rules('1', {'class': 'chapter'}, {'id': 'chapterContent'}),
# 已解析
'www.biquge5.com': Rules('1', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.yooread.com': Rules('http://www.yooread.com', {'id': 'chapterList'}, {'tag': 'p'}),
# 已解析
'www.xs82.com': Rules('0', {'class': 'chapterlist'}, {'id': 'content'}),
# 已解析
'www.kanshuhai.com': Rules('0', {'id': 'book'}, {'id': 'content'}),
# 已解析
'www.bequge.com': Rules('https://www.bequge.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析 请求失败
# 'www.biquge5200.com': Rules('1', {'id': 'list'}, {'id': 'content'}),
# 已解析
'www.biquku.co': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.xbqge.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.aiquxs.com': Rules('0', {'id': 'list'}, {'id': 'booktext'}),
# 已解析
# 'www.piaotian.com': Rules('0', {'class': 'centent'}, {'class': 'fonts_mesne'}),
# 已解析
'www.ttshu.com': Rules('http://www.ttshu.com', {'class': 'border'}, {'id': 'content'}),
# 已解析
'www.23us.com': Rules('0', {'id': 'at'}, {'id': 'contents'}),
# 已解析
'www.x23us.com': Rules('0', {'id': 'at'}, {'id': 'contents'}),
# 已解析
'www.23wx.cc': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.23wx.cm': Rules('0', {'class': 'book_list'}, {'id': 'htmlContent'}),
# 已解析
'www.ddbiquge.com': Rules('http://www.ddbiquge.com', {'class': 'listmain'}, {'id': 'content'}),
# 已解析
'www.abocms.cn': Rules('http://www.abocms.cn/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.liewen.cc': Rules('https://www.liewen.cc/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.heiyange.com': Rules('http://www.heiyange.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.8535.org': Rules('0', {'class': 'booklist'}, {'class': 'txtc'}),
# 已解析
'www.dingdianzw.com': Rules('http://www.dingdianzw.com/', {'id': 'bgdiv'}, {'id': 'content'}),
# 已解析
'www.biquge.cc': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.lewenxiaoshuo.com': Rules('1', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.111bz.org': Rules('http://www.111bz.org/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biqugebook.com': Rules('http://www.biqugebook.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.e8zw.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.xqqxs.com': Rules('0', {'class': 'box_con'}, {'class': 'content'}),
# 已解析
'www.139book.com': Rules('http://www.139book.com/', {'class': 'list_box'}, {'class': 'box_box'}),
# 已解析
'www.jcdf99.com': Rules('0', {'class': 'list_box'}, {'id': 'content'}),
# 已解析
'www.tianzeba.com': Rules('http://www.tianzeba.com/', {'class': 'chapterlist'}, {'id': 'BookText'}),
# 已解析
'www.kanshuwangzhan.com': Rules('0', {'id': 'chapterlist'}, {'id': 'booktext'}),
# 已解析
'tianyibook.la': Rules('http://tianyibook.la/', {'class': 'chapterlist'}, {'id': 'BookText'}),
# 已解析
'www.quanben.net': Rules('http://www.quanben.net/', {'class': 'chapterlist'}, {'id': 'BookText'}),
# 已解析
# 'www.zhetian.org': Rules('http://www.zhetian.org', {'class': 'body '}, {'class': 'content'}),
# 已解析
'www.lingdianksw.com': Rules('0', {'class': 'acss'}, {'id': 'ccontent'}),
# 已解析
'www.qb5.tw': Rules('http://www.qb5.tw/', {'class': 'zjbox'}, {'id': 'content'}),
# 已解析
'www.ybdu.com': Rules('0', {'class': 'mulu_list'}, {'id': 'htmlContent'}),
# 已解析
'www.quanben.com': Rules('0', {'class': 'mulu_list'}, {'id': 'htmlContent'}),
# 已解析
'www.fhxs.com': Rules('1', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biquge.biz': Rules('http://www.biquge.biz/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.58xs.com': Rules('http://www.58xs.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biqukan.com': Rules('http://www.biqukan.com/', {'class': 'listmain'}, {'id': 'content'}),
# 已解析
'www.shuyuelou.com': Rules('http://www.shuyuelou.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.mangg.com': Rules('http://www.mangg.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.50zw.com': Rules('0', {'class': 'chapterlist'}, {'id': 'htmlContent'}),
# 已解析
'www.lingdiankanshu.co': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biqiku.com': Rules('http://www.biqiku.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.duilianku.com': Rules('http://www.duilianku.com/', {'id': 'list'}, {'class': 'chapter'}),
# 已解析
'www.5xiaxiaoshuo.com': Rules('http://www.5xiaxiaoshuo.com/', {'class': 'art_listmain_main'}, {'id': 'content'}),
# 已解析
'www.81xsw.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.wxguan.com': Rules('http://www.wxguan.com/', {'class': 'listmain'}, {'id': 'content'}),
# 已解析
'www.qb5200.tw': Rules('http://www.qb5200.tw/', {'class': 'listmain'}, {'id': 'content'}),
# 已解析
'www.fox2008.cn': Rules('http://www.fox2008.cn/', {'class': 'book'}, {'id': 'chapterContent'}),
# 已解析
'www.22zw.com': Rules('0', {'class': 'acss'}, {'id': 'content'}),
# 已解析
'www.k6uk.com': Rules('0', {'class': 'acss'}, {'id': 'content'}),
# 已解析
'www.126shu.com': Rules('http://www.126shu.com/', {'id': 'list'}, {'id': 'content'}),
# 已解析
'www.kooxs.com': Rules('0', {'class': 'list'}, {'id': 'content'}),
# 已解析
'www.shubaotxt.com': Rules('0', {'class': 'list'}, {'id': 'content'}),
# 已解析
'www.muyuge.com': Rules('1', {'id': 'xslist'}, {'id': 'content'}),
# 已解析
# 'www.daizhuzai.com': Rules('http://www.daizhuzai.com', {'class': 'dirlist'}, {'class': 'content'}),
# 已解析
'www.biqu.la': Rules('0', {'class': 'book_list'}, {'id': 'htmlContent'}),
# 已解析
'shushu.com.cn': Rules('http://shushu.com.cn/', {'id': 'dirsort01'}, {'id': 'content'}),
# 已解析
'www.shuhai.com': Rules('0', {'class': 'box_chap'}, {'id': 'readcon'}),
# 已解析
'www.37yue.com': Rules('0', {'class': 'list-chapter'}, {'class': 'chapter'}),
# 已解析
'www.35zw.com': Rules('0', {'class': 'book_list'}, {'id': 'htmlContent'}),
# 已解析
'www.xinshu.in': Rules('http://www.xinshu.in/', {'class': 'list_box'}, {'class': 'box_box'}),
# 已解析
'www.lwxs520.com': Rules('0', {'class': 'dccss'}, {'id': 'content'}),
# 已解析
'www.lwxs.la': Rules('http://www.lwxs.la/', {'id': 'defaulthtml4'}, {'id': 'content'}),
# 已解析
'www.biqule.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.33yq.com': Rules('1', {'class': 'box_con'}, {'class': 'zhangjieTXT'}),
# 已解析
'www.dishuge.com': Rules('1', {'class': 'update'}, {'tag': 'p'}),
# 已解析
'www.qu.la': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.shuge.net': Rules('http://www.shuge.ne/t', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.daomengren.com': Rules('http://www.daomengren.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.81zw.net': Rules('http://www.81zw.net/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.09xs.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.fhxiaoshuo.com': Rules('1', {'class': 'box_con'}, {'class': 'zhangjieTXT'}),
# 已解析
'www.yikanxiaoshuo.com': Rules('http://www.yikanxiaoshuo.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.1xiaoshuo.com': Rules('http://www.1xiaoshuo.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.kanshu.la': Rules('http://www.kanshu.la/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.kbiquge.com': Rules('http://www.kbiquge.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.00ksw.net': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.booktxt.net': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'wanmeishijiexiaoshuo.org': Rules('1', {'class': 'bg'}, {'class': 'content'}),
# 已解析
'www.sosoxiaoshuo.cc': Rules('http://www.sosoxiaoshuo.cc/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.ciluke.com': Rules('0', {'id': 'list'}, {'id': 'content'}),
# 已解析
'www.81zw.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.cilook.net': Rules('0', {'id': 'cl_content'}, {'id': 'content'}),
# 已解析
'www.baoliny.com': Rules('http://www.baoliny.com/', {'class': 'readerListShow'}, {'id': 'content'}),
# 已解析
'www.biquge.tw': Rules('http://www.biquge.tw/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.7788xs.net': Rules('http://www.7788xs.net/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.06sy.com': Rules('http://www.06sy.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biqumo.com': Rules('https://www.biqumo.com/', {'class': 'listmain'}, {'id': 'content'}),
# 已解析
'www.kanshuzhe.com': Rules('http://www.kanshuzhe.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biqiuge.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.lwxs.com': Rules('0', {'class': 'box_con'}, {'id': 'TXT'}),
# 已解析
'www.biqugezw.com': Rules('http://www.biqugezw.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析 经常跳到无法预料的网站 故禁止
# 'www.is028.cn': Rules('http://www.biquge.com.tw', {'class': 'box_con'}, {'id': 'content'}),
# www.is028.cn会跳转到http://www.biquge.com.tw
'www.biquge.com.tw': Rules('http://www.biquge.com.tw/', {'class': 'box_con'}, {'id': 'content'}),
# 'www.xs82.com': Rules('-1', {'class': 'chapterlist'}, {'id': 'content'}),
# 已解析
'www.shuqizw.com': Rules('http://www.shuqizw.com/', {'class': 'article_texttitleb'}, {'id': 'book_text'}),
# 已解析
'read.ixdzs.com': Rules('0', {'class': 'catalog'}, {'class': 'content'}),
# 已解析
'www.shumilou.net': Rules('0', {'class': 'chapterlist'}, {'id': 'BookText'}),
# 已解析
'www.8shuw.com': Rules('1', {'class': 'chapterlist'}, {'id': 'readtext'}),
# 已解析
# 'www.ttshu.com': Rules('http://www.ttshu.com', {'class': 'border'}, {'id': 'content'}),
# 已解析
'www.heiyan.la': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.bbsa5.com': Rules('1', {'class': 'panel'}, {'class': 'content-body'}),
# 已解析
'www.tycqxs.com': Rules('http://www.tycqxs.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.miaobige.com': Rules('https://www.miaobige.com/', {'id': 'readerlists'}, {'id': 'content'}),
# 已解析
'www.dashubao.net': Rules('0', {'class': 'ml_main'}, {'class': 'yd_text2'}),
# 已解析
'www.23zw.com': Rules('0', {'id': 'chapter_list'}, {'id': 'text_area'}),
# 已解析
'www.23us.la': Rules('http://www.23us.la/', {'class': 'inner'}, {'id': 'content'}),
# 已解析
'www.2952.cc': Rules('0', {'class': 'inner'}, {'id': 'content'}),
# 已解析
'www.23us.cc': Rules('0', {'class': 'inner'}, {'id': 'content'}),
# 已解析
'www.13xs.com': Rules('0', {'class': 'box_con'}, {'id': 'booktext'}),
# 已解析
'www.tsxsw.com': Rules('0', {'class': 'bdsub'}, {'id': 'contents'}),
# 已解析
'www.ymoxuan.com': Rules('1', {'class': 'mulu'}, {'id': 'content'}),
# 已解析
'zetianjiba.net': Rules('1', {'class': 'bg'}, {'class': 'content'}),
# 已解析
'www.37zw.com': Rules('0', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.555zw.com': Rules('0', {'class': 'dir'}, {'id': 'content'}),
# 已解析
'www.jueshitangmen.info': Rules('1', {'class': 'bg'}, {'class': 'content'}),
# 已解析
'www.bxwx9.org': Rules('0', {'class': 'TabCss'}, {'id': 'content'}),
# 已解析
'www.xxbiquge.com': Rules('https://www.xxbiquge.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.biquwo.com': Rules('https://www.biquwo.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.fs23.com': Rules('http://www.fs23.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.longtengx.com': Rules('http://www.longtengx.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.lingyu.org': Rules('http://www.lingyu.org/', {'class': 'mt10'}, {'id': 'htmlContent'}),
# 已解析
'www.aszw8.com': Rules('0', {'id': 'at'}, {'id': 'contents'}),
# 已解析
'www.23us.so': Rules('1', {'id': 'at'}, {'id': 'contents'}),
# 已解析
'www.biquge.lu': Rules('http://www.biquge.lu/', {'class': 'listmain'}, {'id': 'content'}),
# 已解析
'www.3zm.net': Rules('http://www.3zm.net/', {'class': 'listmain'}, {'id': 'content'}),
# 已解析
'www.biquge.com': Rules('http://www.biquge.com/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
'www.kanshuzhong.com': Rules('0', {'class': 'bookcontent'}, {'class': 'textcontent'}),
# 已解析
'www.siluke.tw': Rules('http://www.siluke.tw/', {'class': 'box_con'}, {'id': 'content'}),
# 已解析
# 'www.ttshu.com': Rules('http://www.ttshu.com', {'class': 'border'}, {'id': 'content'}),
}
|
<reponame>renmcc/bk-PaaS
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
import json
from django import forms
from common.forms import BaseComponentForm, TypeCheckField
from common.constants import API_TYPE_Q
from components.component import Component
from .toolkit import tools, configs
class SearchHost(Component):
"""
apiLabel {{ _("根据条件查询主机") }}
apiMethod POST
### {{ _("功能描述") }}
{{ _("根据条件查询主机") }}
### {{ _("请求参数") }}
{{ common_args_desc }}
#### {{ _("接口参数") }}
| {{ _("字段") }} | {{ _("类型") }} | {{ _("必选") }} | {{ _("描述") }} |
|-----------|------------|--------|------------|
| bk_supplier_account | string | {{ _("否") }} | {{ _("开发商账号") }} |
| bk_biz_id | int | {{ _("否") }} | {{ _("业务ID") }} |
| ip | dict | {{ _("否") }} | {{ _("主机ip列表") }} |
| condition | array | {{ _("否") }} | {{ _("组合条件") }} |
| page | dict | {{ _("否") }} | {{ _("查询条件") }} |
| pattern | string | {{ _("否") }} | {{ _("按表达式搜索") }} |
#### ip
| {{ _("字段") }} | {{ _("类型") }} | {{ _("必选") }} | {{ _("描述") }} |
|-----------|------------|--------|------------|
| data | array | {{ _("否") }} | {{ _("ip 数组") }} |
| exact | int | {{ _("否") }} | {{ _("是否根据ip精确搜索") }} |
| flag | string | {{ _("否") }} | {{ _("bk_host_innerip只匹配内网ip,bk_host_outerip只匹配外网ip, bk_host_innerip,bk_host_outerip同时匹配") }} |
#### condition
| {{ _("字段") }} | {{ _("类型") }} | {{ _("必选") }} | {{ _("描述") }} |
|-----------|------------|--------|------------|
| bk_obj_id | string | {{ _("否") }} | {{ _("对象名,可以为biz,set,module,host,object") }} |
| fields | array | {{ _("否") }} | {{ _("查询输出字段") }} |
| condition | array | {{ _("否") }} | {{ _("查询条件") }} |
#### condition.condition
| {{ _("字段") }} | {{ _("类型") }} | {{ _("必选") }} | {{ _("描述") }} |
|-----------|------------|--------|------------|
| field | string | {{ _("否") }} | {{ _("对象的字段") }} |
| operator | string | {{ _("否") }} | {{ _("操作符, $eq为相等,$neq为不等,$in为属于,$nin为不属于") }} |
| value | string | {{ _("否") }} | {{ _("字段对应的值") }} |
#### page
| {{ _("字段") }} | {{ _("类型") }} | {{ _("必选") }} | {{ _("描述") }} |
|-----------|------------|--------|------------|
| start | int | {{ _("是") }} | {{ _("记录开始位置") }} |
| limit | int | {{ _("是") }} | {{ _("每页限制条数,最大200") }} |
| sort | string | {{ _("否") }} | {{ _("排序字段") }} |
### {{ _("请求参数示例") }}
```python
{
"bk_app_code": "esb_test",
"bk_app_secret": "xxx",
"bk_token": "<PASSWORD>",
"bk_supplier_account": "123456789",
"ip": {
"data": [],
"exact": 1,
"flag": "bk_host_innerip|bk_host_outerip"
},
"condition": [
{
"bk_obj_id": "host",
"fields": [],
"condition": []
},
{
"bk_obj_id":"module",
"fields":[],
"condition":[]
},
{
"bk_obj_id":"set",
"fields":[],
"condition":[]
},
{
"bk_obj_id":"biz",
"fields":[],
"condition":[]
},
{
"bk_obj_id": "object",
"fields": [],
"condition": [
{
"field": "bk_inst_id",
"operator": "$eq",
"value": 76
}
]
}
],
"page": {
"start": 0,
"limit": 10,
"sort": "bk_host_id"
},
"pattern": ""
}
```
### {{ _("返回结果示例") }}
```python
{
"result": true,
"code": 0,
"message": "success",
"data": {
"count": 1,
"info": [
{
"host": {
"bk_cpu": 8,
"bk_os_name": "linux centos",
"bk_host_id": 11,
"import_from": "",
"bk_os_version": "7.2",
"bk_disk": 1789,
"operator": null,
"create_time": "2018-03-22T16:52:53.239+08:00",
"bk_mem": 7843,
"bk_host_name": "test-1",
"bk_host_innerip": "10.0.0.1",
"bk_comment": "",
"bk_os_bit": "64-bit",
"bk_outer_mac": "",
"bk_childid": null,
"bk_input_from": "agent",
"bk_asset_id": "",
"bk_service_term": null,
"bk_cloud_id": [
{
"bk_obj_name": "",
"id": "0",
"bk_obj_id": "plat",
"bk_obj_icon": "",
"bk_inst_id": 0,
"bk_inst_name": "default area"
}
],
"bk_sla": "",
"bk_cpu_mhz": 2534,
"bk_host_outerip": "",
"bk_os_type": "1",
"bk_mac": "00:00:00:00:00:00",
"bk_bak_operator": null,
"bk_sn": "",
"bk_cpu_module": "Intel(R)"
},
"set": [
{
"bk_biz_id": 2,
"bk_service_status": "1",
"description": "",
"bk_set_env": "1",
"default": 0,
"bk_parent_id": 35,
"bk_capacity": null,
"bk_set_id": 3,
"create_time": "2018-06-06T20:53:53.591+08:00",
"bk_supplier_account": "123456789",
"bk_set_name": "test",
"bk_set_desc": "",
"last_time": "2018-06-13T14:20:20.149+08:00"
}
],
"biz": [
{
"bk_biz_id": 2,
"language": "1",
"life_cycle": "1",
"bk_biz_developer": "",
"bk_biz_maintainer": "admin",
"bk_biz_tester": "admin",
"time_zone": "Asia/Shanghai",
"default": 0,
"create_time": "2018-03-22T15:49:57.319+08:00",
"bk_biz_productor": "admin",
"bk_supplier_account": "123456789",
"operator": "",
"bk_biz_name": "test",
"last_time": "2018-06-05T15:03:55.699+08:00",
"bk_supplier_id": 0
}
],
"module": [
{
"bk_biz_id": 2,
"bk_module_id": 38,
"default": 0,
"bk_bak_operator": "",
"create_time": "2018-03-26T16:56:59.486+08:00",
"bk_module_name": "test_1",
"bk_supplier_account": "123456789",
"operator": "admin",
"bk_set_id": 3,
"bk_parent_id": 3,
"last_time": "2018-03-26T16:56:59.486+08:00",
"bk_module_type": "1"
}
]
}
]
}
}
```
### {{ _("返回结果参数说明") }}
#### data
| {{ _("字段") }} | {{ _("类型") }} | {{ _("描述") }} |
|-----------|-----------|-----------|
| count | int | {{ _("记录条数") }} |
| info | array | {{ _("主机实际数据") }} |
#### data.info
| {{ _("字段") }} | {{ _("类型") }} | {{ _("描述") }} |
|-----------|-----------|-----------|
| biz | array | {{ _("主机所属的业务信息") }} |
| set | array | {{ _("主机所属的集群信息") }} |
| module | array | {{ _("主机所属的模块信息") }} |
| host | dict | {{ _("主机自身属性") }} |
""" # noqa
sys_name = configs.SYSTEM_NAME
api_type = API_TYPE_Q
host = configs.host
class Form(BaseComponentForm):
bk_biz_id = forms.IntegerField(label='business id', required=False)
ip = TypeCheckField(label='ip', promise_type=dict, required=False)
condition = TypeCheckField(label='condition', promise_type=list, required=False)
page = TypeCheckField(label='page', promise_type=dict, required=False)
pattern = forms.CharField(label='pattern', required=False)
def clean(self):
return self.get_cleaned_data_when_exist()
def handle(self):
client = tools.CCClient(self)
self.response.payload = client.post(
host=self.host,
path='/api/v3/hosts/search',
data=json.dumps(self.form_data),
)
|
import json
import ssl
from random import randint
from random import random
from threading import Thread
from time import sleep
from django.conf import settings
from django.test import TestCase
from websocket import create_connection
# class ModelTest(TestCase):
#
# def test_gender(self):
# user = UserProfile(sex_str='Female')
# self.assertEqual(user.sex, 2)
# user.sex_str = 'Male'
# self.assertEqual(user.sex, 1)
# user.sex_str = 'WrongString'
# self.assertEqual(user.sex, 0)
#
#
# class RegisterUtilsTest(TestCase):
#
# def test_check_password(self):
# self.assertRaises(ValidationError, check_password, "ag")
# self.assertRaises(ValidationError, check_password, "")
# self.assertRaises(ValidationError, check_password, " ")
# self.assertRaises(ValidationError, check_password, " fs ")
# check_password("<PASSWORD>")
#
# def test_send_email(self):
# up = UserProfile(username='Test', email='<EMAIL>', sex_str='Mail')
# send_email_verification(up, 'Any')
#
# def test_check_user(self):
# self.assertRaises(ValidationError, check_user, "d"*100)
# self.assertRaises(ValidationError, check_user, "asdfs,+")
# check_user("Fine")
#
#
# class SeleniumBrowserTest(TestCase):
#
# def test_check_main_page(self):
# driver = webdriver.Firefox()
# driver.get("localhost:8000") # TODO inject url
# assert "chat" in driver.title
# elem = driver.find_element_by_id("userNameLabel")
# self.assertRegexpMatches(elem.text, "^[a-zA-Z-_0-9]{1,16}$")
# driver.close()
from chat.global_redis import sync_redis
from chat.models import UserProfile
from chat.socials import GoogleAuth
from chat.tornado.constants import VarNames, Actions
class RegisterTest(TestCase):
def test_animals_can_speak(self):
user_profile = UserProfile(
name='test',
surname='test',
email='<EMAIL>',
username='test'
)
gauth = GoogleAuth()
gauth.download_http_photo('https://lh4.googleusercontent.com/-CuLSUOTQ4Kw/AAAAAAAAAAI/AAAAAAAAANQ/VlgHrqehE90/s96-c/photo.jpg', user_profile)
class WebSocketLoadTest(TestCase):
SITE_TO_SPAM = "127.0.0.1:8888"
def setUp(self):
pass
# Room.objects.create(name=ANONYMOUS_REDIS_ROOM)
# subprocess.Popen("/usr/bin/redis-server")
# thread = Thread(target=call_command, args=('start_tornado',))
# thread.start()
def threaded_function(self, session, num):
cookies = '{}={}'.format(settings.SESSION_COOKIE_NAME, session)
ws = create_connection("wss://{}".format(self.SITE_TO_SPAM), cookie=cookies, sslopt={"cert_reqs": ssl.CERT_NONE})
print("Connected #{} with sessions {}".format(num, session))
for i in range(randint(30, 50)):
if i % 10 == 0:
print("{}#{} sent {}".format(session, num, i))
sleep(random())
ws.send(json.dumps({
VarNames.CONTENT: "{}".format(i),
VarNames.EVENT: Actions.SEND_MESSAGE,
VarNames.ROOM_ID: settings.ALL_ROOM_ID
}))
# def read_session(self):
# with open('sessionsids.txt') as f:
# lines =f.read().splitlines()
# return lines
def read_session(self):
return [k for k in sync_redis.keys() if len(k) == 32]
def test_simple(self):
max_users = 10
for session in self.read_session():
max_users -= 1
if max_users < 0:
break
for i in range(randint(3, 7)):
thread = Thread(target=self.threaded_function, args=(session, i))
thread.start()
|
<filename>codility/prefix_sum_genomic_range_query_dna_sequence.py
def solution(S, P, Q):
"""
https://app.codility.com/demo/results/training8QBVFJ-EQB/
100%
Idea is consider solution as single dimensional array and use concept of prefix some ie.
stores the value in the array for p,c and g based on frequency
array stores the frequency of p,c and g for all positions
Example -
# [0, 0, 1, 1, 1, 1, 1, 2] - prefix some of A - represents the max occurrence of A as 2 in array
# [0, 1, 1, 1, 2, 3, 3, 3] - prefix some of C - represents the max occurrence of C as 3 in array
# [0, 0, 0, 1, 1, 1, 1, 1] - prefix some of G - represents the max occurrence of G as 1 in array
# To find the query answers we can just use prefix some and find the distance between position
# two d array - column size is 3 for a,c,g - not taking size 4 since that will be part of else ie. don`t need to
calculate. Row size is the length of DNA sequence
S = CAGCCTA
P[0] = 2 Q[0] = 4
P[1] = 5 Q[1] = 5
P[2] = 0 Q[2] = 6
:return: return the values [2, 4, 1]
"""
print(S)
print(P)
print(Q)
prefix_sum_two_d_array = [[0 for i in range(len(S) + 1)] for j in range(3)]
# find the prefix some of all nucleotide in given sequence
for i, nucleotide in enumerate(S):
print(i)
print(nucleotide)
# store prefix some of each
# nucleotide == 'A -> 1 if true 0 if false
# [0, 0, 1, 1, 1, 1, 1, 2] - prefix some of A - represents the max occurrence of A as 2 in array
prefix_sum_two_d_array[0][i + 1] = prefix_sum_two_d_array[0][i] + (nucleotide == 'A')
# store prefix some of c
# [0, 1, 1, 1, 2, 3, 3, 3] - prefix some of C - represents the max occurrence of C as 3 in array
prefix_sum_two_d_array[1][i + 1] = prefix_sum_two_d_array[1][i] + (nucleotide == 'C')
# store prefix some of g
# [0, 0, 0, 1, 1, 1, 1, 1] - prefix some of G - represents the max occurrence of G as 1 in array
prefix_sum_two_d_array[2][i + 1] = prefix_sum_two_d_array[2][i] + (nucleotide == 'G')
print("Prefix sum 2 d array for A, C, T")
print(prefix_sum_two_d_array)
print("Prefix sum 2 d array for A, C, T - max occurrence (A-2, C-3, T-1)")
print(prefix_sum_two_d_array)
print("Find the queries answer...")
# now to find the query answers we can just use prefix some and find the distance between position
query_answers = []
for position in range(len(P)):
# for each query of p
# find the start index from p
start_index = P[position]
# find the end index from Q
end_index = Q[position] + 1
# find the value from prefix some array - just subtract end index and start index to find the value
if prefix_sum_two_d_array[0][end_index] - prefix_sum_two_d_array[0][start_index]:
print("Found for A ")
query_answers.append(1)
elif prefix_sum_two_d_array[1][end_index] - prefix_sum_two_d_array[1][start_index]:
print("Found for C ")
query_answers.append(2)
elif prefix_sum_two_d_array[2][end_index] - prefix_sum_two_d_array[2][start_index]:
print("Found for G ")
query_answers.append(3)
else:
print("Found for T ")
query_answers.append(4)
print(start_index)
print(end_index)
print(query_answers)
return query_answers
result = solution("CAGCCTA", [2, 5, 0], [4, 5, 6])
print("Sol " + str(result))
# Sol [2, 4, 1]
"""
CAGCCTA
[2, 5, 0]
[4, 5, 6]
0
C
Prefix sum 2 d array for A, C, T
[[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]
1
A
Prefix sum 2 d array for A, C, T
[[0, 0, 1, 0, 0, 0, 0, 0], [0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]
2
G
Prefix sum 2 d array for A, C, T
[[0, 0, 1, 1, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0]]
3
C
Prefix sum 2 d array for A, C, T
[[0, 0, 1, 1, 1, 0, 0, 0], [0, 1, 1, 1, 2, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0]]
4
C
Prefix sum 2 d array for A, C, T
[[0, 0, 1, 1, 1, 1, 0, 0], [0, 1, 1, 1, 2, 3, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0]]
5
T
Prefix sum 2 d array for A, C, T
[[0, 0, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 2, 3, 3, 0], [0, 0, 0, 1, 1, 1, 1, 0]]
6
A
Prefix sum 2 d array for A, C, T
[[0, 0, 1, 1, 1, 1, 1, 2], [0, 1, 1, 1, 2, 3, 3, 3], [0, 0, 0, 1, 1, 1, 1, 1]]
Prefix sum 2 d array for A, C, T - max occurrence (A-2, C-3, T-1)
[[0, 0, 1, 1, 1, 1, 1, 2], [0, 1, 1, 1, 2, 3, 3, 3], [0, 0, 0, 1, 1, 1, 1, 1]]
Find the queries answer...
Found for C
2
5
[2]
Found for T
5
6
[2, 4]
Found for A
0
7
[2, 4, 1]
Sol [2, 4, 1]
"""
|
<reponame>victordomingos/snowy
#!/usr/bin/env python3
from bs4 import BeautifulSoup, Comment
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import PythonLexer
import CommonMark
import inspect
import numpy as np
import os
import pygments.styles
import subprocess
import sys
sys.path.append('../snowy')
import snowy
GRAY_ISLAND = True
def optimize(filename):
os.system('optipng ' + filename + ' >/dev/null 2>&1')
def smoothstep(edge0, edge1, x):
t = np.clip((x - edge0) / (edge1 - edge0), 0.0, 1.0)
return t * t * (3.0 - 2.0 * t)
def create_circle(w, h, radius=0.4, cx=0.5, cy=0.5):
hw, hh = 0.5 / w, 0.5 / h
dp = max(hw, hh)
x = np.linspace(hw, 1 - hw, w)
y = np.linspace(hh, 1 - hh, h)
u, v = np.meshgrid(x, y, sparse=True)
d2, r2 = (u-cx)**2 + (v-cy)**2, radius**2
result = 1 - smoothstep(radius-dp, radius+dp, np.sqrt(d2))
return snowy.reshape(result)
def qualify(filename: str):
scriptdir = os.path.dirname(os.path.realpath(__file__))
return os.path.join(scriptdir, filename)
def create_wrap_figures():
ground = snowy.load(qualify('ground.jpg'))
hground = np.hstack([ground, ground])
ground2x2 = np.vstack([hground, hground])
snowy.save(ground2x2, qualify('ground2x2.jpg'))
ground = snowy.blur(ground, radius=14, filter=snowy.LANCZOS)
snowy.save(ground, qualify('blurry_ground_bad.jpg'))
hground = np.hstack([ground, ground])
ground2x2 = np.vstack([hground, hground])
snowy.save(ground2x2, qualify('blurry_ground2x2_bad.jpg'))
ground = snowy.load(qualify('ground.jpg'))
ground = snowy.blur(ground, radius=14, wrapx=True, wrapy=True,
filter=snowy.LANCZOS)
snowy.save(ground, qualify('blurry_ground_good.jpg'))
hground = np.hstack([ground, ground])
ground2x2 = np.vstack([hground, hground])
snowy.save(ground2x2, qualify('blurry_ground2x2_good.jpg'))
n = snowy.generate_noise(256, 512, frequency=4, seed=42, wrapx=False)
n = 0.5 + 0.5 * np.sign(n) - n
n = np.hstack([n, n])
n = snowy.add_border(n, width=4)
snowy.save(n, qualify('tiled_noise_bad.png'))
n = snowy.generate_noise(256, 512, frequency=4, seed=42, wrapx=True)
n = 0.5 + 0.5 * np.sign(n) - n
n = np.hstack([n, n])
n = snowy.add_border(n, width=4)
snowy.save(n, qualify('tiled_noise_good.png'))
c0 = create_circle(400, 200, 0.3)
c1 = create_circle(400, 200, 0.08, 0.8, 0.8)
circles = np.clip(c0 + c1, 0, 1)
mask = circles != 0.0
sdf = snowy.unitize(snowy.generate_sdf(mask, wrapx=True, wrapy=True))
sdf = np.hstack([sdf, sdf, sdf, sdf])
sdf = snowy.resize(np.vstack([sdf, sdf]), width=512)
sdf = snowy.add_border(sdf)
snowy.save(sdf, qualify('tiled_sdf_good.png'))
sdf = snowy.unitize(snowy.generate_sdf(mask, wrapx=False, wrapy=False))
sdf = np.hstack([sdf, sdf, sdf, sdf])
sdf = snowy.resize(np.vstack([sdf, sdf]), width=512)
sdf = snowy.add_border(sdf)
snowy.save(sdf, qualify('tiled_sdf_bad.png'))
create_wrap_figures()
result = subprocess.run('git rev-parse HEAD'.split(), stdout=subprocess.PIPE)
sha = result.stdout.strip().decode("utf-8")[:7]
sha = f'<a href="https://github.com/prideout/snowy/tree/{sha}">{sha}</a>'
version = f'<small>v0.0.3 ~ {sha}</small>'
header = '''
<!DOCTYPE html>
<head>
<script async
src="https://www.googletagmanager.com/gtag/js?id=UA-19914519-2">
</script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-19914519-2');
</script>
<title>Snowy</title>
<link rel="icon" href="snowflake64.png" type="image/x-icon">
<meta name=viewport content='width=device-width,initial-scale=1'>
<meta charset="utf-8">
<meta property="og:image"
content="https://github.prideout.net/snowy/snowy2.png">
<meta property="og:site_name" content="GitHub">
<meta property="og:type" content="object">
<meta property="og:title" content="prideout/snowy">
<meta property="og:url" content="https://github.prideout.net/snowy/">
<meta property="og:description"
content="Small Python 3 module for manipulating and generating images.">
<link href="https://fonts.googleapis.com/css?family=Alegreya"
rel="stylesheet">
<link href="https://fonts.googleapis.com/css?family=Inconsolata"
rel="stylesheet">
<style>
body {
margin: 0;
font-size: 15px;
font-family: "Avenir Next", "HelveticaNeue", "Helvetica Neue",
Helvetica, Arial, "Lucida Grande", sans-serif;
text-rendering: optimizeLegibility;
font-weight: 400;
-webkit-font-smoothing:auto;
background-color: #e2e2e2;
}
a {
text-decoration: none;
color: #2962ad;
}
hr {
border: 0;
border-bottom: 1px dashed #ccc;
background: #999;
}
small, small a {
color: #a0a0a0;
margin-top: 26px;
}
td:first-child {
padding-right: 15px;
}
p:first-child {
clear: left;
}
h1 {
margin-top: 0;
margin-bottom: 0;
font-family: 'Alegreya', serif;
font-size: 45px;
}
main {
overflow: auto;
margin: 0 auto;
padding: 0 80px 20px 80px;
max-width: 800px;
background-color: #ffffff;
position: relative;
color: #404040;
border-left: solid 2px black;
border-right: solid 2px black;
}
@media (max-width: 960px){
body{ background-color: #f2f2f2; }
main{ padding: 0 20px 100px 20px; }
}
img {
max-width: 100%;
}
pre {
padding: 10px;
background-color: #f8f8f8;
white-space: pre-wrap;
font-family: 'Inconsolata', monospace;
}
code {
font-family: 'Inconsolata', monospace;
}
p.aside {
background: white;
font-size: small;
border: solid 1px gray;
border-left: solid 5px gray;
padding: 10px;
}
h2 a, h3 a, h4 a { color: black }
h2 a:hover, h3 a:hover, h4 a:hover { color: #19529d }
</style>
'''
forkme = '''
<!-- GITHUB FORK ME LOGO -->
<a href="https://github.com/prideout/snowy"
class="github-corner" aria-label="View source on Github">
<svg width="80" height="80" viewBox="0 0 250 250"
style="color:#fff; position: absolute; top: 0; border: 0;
right: 0;" aria-hidden="true"> <path d="M0,0 L115,115 L130,115 L142,142
L250,250 L250,0 Z"></path> <path d="M128.3,109.0 C113.8,99.7 119.0,89.6
119.0,89.6 C122.0,82.7 120.5,78.6 120.5,78.6 C119.2,72.0 123.4,76.3
123.4,76.3 C127.3,80.9 125.5,87.3 125.5,87.3 C122.9,97.6 130.6,101.9
134.4,103.2" fill="currentColor" style="transform-origin: 130px 106px;"
class="octo-arm"></path><path d="M115.0,115.0 C114.9,115.1 118.7,116.5
119.8,115.4 L133.7,101.6 C136.9,99.2 139.9,98.4 142.2,98.6 C133.8,88.0
127.5,74.4 143.8,58.0 C148.5,53.4 154.0,51.2 159.7,51.0 C160.3,49.4
163.2,43.6 171.4,40.1 C171.4,40.1 176.1,42.5 178.8,56.2 C183.1,58.6
187.2,61.8 190.9,65.4 C194.5,69.0 197.7,73.2 200.1,77.6 C213.8,80.2
216.3,84.9 216.3,84.9 C212.7,93.1 206.9,96.0 205.4,96.6 C205.1,102.4
203.0,107.8 198.3,112.5 C181.9,128.9 168.3,122.5 157.7,114.1
C157.9,116.9 156.7,120.9 152.7,124.9 L141.0,136.5 C139.8,137.7
141.6,141.9 141.8,141.8 Z" fill="currentColor"
class="octo-body"></path></svg></a>
<style>.github-corner:hover svg { fill: #19529d }</style>
'''
def generate_page(sourcefile, resultfile, genref):
# Generate html DOM from markdown.
markdown = open(sourcefile).read()
htmldoc = CommonMark.commonmark(markdown)
soup = BeautifulSoup(htmldoc, 'html.parser')
# Remove comments.
comments = soup.find_all(string=lambda text:isinstance(text,Comment))
for comment in comments:
comment.extract()
# All h4 sections are actually asides.
admonitions = soup.findAll("h4")
for admonition in admonitions:
p = admonition.find_next_sibling("p")
p['class'] = 'aside'
admonition.extract()
# Colorize the code blocks.
formatter = HtmlFormatter(style='tango')
snippets = soup.findAll("code", {"class": "language-python"})
for snippet in snippets:
code = snippet.contents[0]
highlighted = highlight(code, PythonLexer(), formatter)
newcode = BeautifulSoup(highlighted, 'html.parser')
snippet.parent.replace_with(newcode)
# Generate the HTML in its initial form, including <style>.
htmlfile = open(resultfile, 'w')
htmlfile.write(header)
htmlfile.write('<style>')
htmlfile.write(formatter.get_style_defs('.highlight'))
htmlfile.write('''
.highlight .mb, .highlight .mf, .highlight .mh, .highlight .mi,
.highlight .mo { color: #0063cf; }
''')
htmlfile.write('</style>')
htmlfile.write('<main>\n')
htmlfile.write(forkme)
htmlfile.write(str(soup))
# Generate quickref.
quickref = ''
if genref:
for member in inspect.getmembers(snowy):
name, value = member
if name.startswith('__'):
continue
if not inspect.isfunction(value):
continue
doc = inspect.getdoc(value)
src = inspect.getsource(value)
dsbegin = src.find(r'"""')
dsend = src.rfind(r'"""') + 4
dsbegin = src[:dsbegin].rfind('\n') + 1
src = src[:dsbegin] + src[dsend:]
nlines = len(src.split('\n'))
highlighted_src = highlight(src, PythonLexer(), formatter)
if doc:
doclines = doc.split('\n')
quickref += '<tr>\n'
quickref += f'<td><a href="#{name}">{name}</a></td>\n'
quickref += f'<td>{doclines[0]}</td>\n'
quickref += '<tr>\n'
htmlfile.write(f'<h3>{name}</h3>\n<p>\n')
htmlfile.write(' '.join(doclines))
htmlfile.write('\n</p>\n')
htmlfile.write(highlighted_src)
htmlfile.write('</main>\n')
htmlfile.close()
# Post process HTML by adding anchors, etc.
htmldoc = open(resultfile).read()
htmldoc = htmldoc.replace('$quickref$', quickref)
htmldoc = htmldoc.replace('<h1>', version + '\n<h1>')
soup = BeautifulSoup(htmldoc, 'html.parser')
for tag in 'h2 h3 h4'.split():
headings = soup.find_all(tag)
for heading in headings:
content = heading.contents[0].strip()
id = content.replace(' ', '_').lower()
heading["id"] = id
anchor = soup.new_tag('a', href='#' + id)
anchor.string = content
heading.contents[0].replace_with(anchor)
open(resultfile, 'w').write(str(soup))
generate_page(qualify('index.md'), qualify('index.html'), False)
generate_page(qualify('reference.md'), qualify('reference.html'), True)
# Test rotations and flips
gibbons = snowy.load(qualify('gibbons.jpg'))
gibbons = snowy.resize(gibbons, width=gibbons.shape[1] // 5)
gibbons90 = snowy.rotate(gibbons, 90)
gibbons180 = snowy.rotate(gibbons, 180)
gibbons270 = snowy.rotate(gibbons, 270)
hflipped = snowy.hflip(gibbons)
vflipped = snowy.vflip(gibbons)
snowy.save(snowy.hstack([gibbons, gibbons180, vflipped],
border_width=4, border_value=[0.5,0,0]), qualify("xforms.png"))
# Test noise generation
n = snowy.generate_noise(100, 100, frequency=4, seed=42, wrapx=True)
n = np.hstack([n, n])
n = 0.5 + 0.5 * n
snowy.show(n)
snowy.save(n, qualify('noise.png'))
# First try minifying grayscale
gibbons = snowy.load(qualify('snowy.jpg'))
gibbons = np.swapaxes(gibbons, 0, 2)
gibbons = np.swapaxes(gibbons[0], 0, 1)
gibbons = snowy.reshape(gibbons)
source = snowy.resize(gibbons, height=200)
blurry = snowy.blur(source, radius=4.0)
diptych_filename = qualify('diptych.png')
snowy.save(snowy.hstack([source, blurry]), diptych_filename)
optimize(diptych_filename)
snowy.show(diptych_filename)
# Next try color
gibbons = snowy.load(qualify('snowy.jpg'))
source = snowy.resize(gibbons, height=200)
blurry = snowy.blur(source, radius=4.0)
diptych_filename = qualify('diptych.png')
snowy.save(snowy.hstack([source, blurry]), diptych_filename)
optimize(diptych_filename)
snowy.show(diptych_filename)
# Moving on to magnification...
parrot = snowy.load(qualify('parrot.png'))
scale = 6
nearest = snowy.resize(parrot, width=32*scale, filter=snowy.NEAREST)
mitchell = snowy.resize(parrot, height=26*scale)
diptych_filename = qualify('diptych-parrot.png')
parrot = snowy.hstack([nearest, mitchell])
parrot = snowy.extract_rgb(parrot)
snowy.save(parrot, diptych_filename)
optimize(diptych_filename)
snowy.show(diptych_filename)
# EXR cropping
sunset = snowy.load(qualify('small.exr'))
sunset = sunset[:100,:,:] / 50.0
cropped_filename = qualify('cropped-sunset.png')
snowy.save(sunset, cropped_filename)
optimize(cropped_filename)
snowy.show(cropped_filename)
# Alpha composition
icon = snowy.load(qualify('snowflake.png'))
icon = snowy.resize(icon, height=100)
sunset[:100,200:300] = snowy.compose(sunset[:100,200:300], icon)
snowy.save(sunset, qualify('composed.png'))
optimize(qualify('composed.png'))
snowy.show(sunset)
# Drop shadows
shadow = np.zeros([150, 150, 4])
shadow[25:-25,25:-25,:] = icon
white = shadow.copy()
white[:,:,:3] = 1.0 - white[:,:,:3]
shadow = snowy.blur(shadow, radius=10.0)
shadow = snowy.compose(shadow, shadow)
shadow = snowy.compose(shadow, shadow)
shadow = snowy.compose(shadow, shadow)
dropshadow = snowy.compose(shadow, white)
snowy.save(dropshadow, qualify('dropshadow.png'))
optimize(qualify('dropshadow.png'))
STEPPED_PALETTE = [
000, 0x203060 ,
64, 0x2C316F ,
125, 0x2C316F ,
125, 0x46769D ,
126, 0x46769D ,
127, 0x324060 ,
131, 0x324060 ,
132, 0x9C907D ,
137, 0x9C907D ,
137, 0x719457 ,
170, 0x719457 , # Light green
170, 0x50735A ,
180, 0x50735A ,
180, 0x9FA881 ,
200, 0x9FA881 ,
250, 0xFFFFFF ,
255, 0xFFFFFF
]
SMOOTH_PALETTE = [
000, 0x203060 , # Dark Blue
126, 0x2C316F , # Light Blue
127, 0xE0F0A0 , # Yellow
128, 0x719457 , # Dark Green
200, 0xFFFFFF , # White
255, 0xFFFFFF ] # White
from scipy import interpolate
def applyColorGradient(elevation_image, gradient_image):
xvals = np.arange(256)
yvals = gradient_image[0]
apply_lut = interpolate.interp1d(xvals, yvals, axis=0)
return apply_lut(snowy.unshape(np.clip(elevation_image, 0, 255)))
def create_falloff(w, h, radius=0.4, cx=0.5, cy=0.5):
hw, hh = 0.5 / w, 0.5 / h
x = np.linspace(hw, 1 - hw, w)
y = np.linspace(hh, 1 - hh, h)
u, v = np.meshgrid(x, y, sparse=True)
d2 = (u-cx)**2 + (v-cy)**2
return 1-snowy.unitize(snowy.reshape(d2))
c0 = create_circle(200, 200, 0.3)
c1 = create_circle(200, 200, 0.08, 0.8, 0.8)
c0 = np.clip(c0 + c1, 0, 1)
circles = snowy.add_border(c0, value=1)
sdf = snowy.unitize(snowy.generate_sdf(circles != 0.0))
stack = snowy.hstack([circles, sdf])
snowy.save(stack, qualify('sdf.png'))
snowy.show(stack)
# Islands
def create_island(seed, gradient, freq=3.5):
w, h = 750, 512
falloff = create_falloff(w, h)
n1 = 1.000 * snowy.generate_noise(w, h, freq*1, seed+0)
n2 = 0.500 * snowy.generate_noise(w, h, freq*2, seed+1)
n3 = 0.250 * snowy.generate_noise(w, h, freq*4, seed+2)
n4 = 0.125 * snowy.generate_noise(w, h, freq*8, seed+3)
elevation = falloff * (falloff / 2 + n1 + n2 + n3 + n4)
mask = elevation < 0.4
elevation = snowy.unitize(snowy.generate_sdf(mask))
if GRAY_ISLAND:
return (1 - mask) * np.power(elevation, 3.0)
elevation = snowy.generate_sdf(mask) - 100 * n4
mask = np.where(elevation < 0, 1, 0)
el = 128 + 127 * elevation / np.amax(elevation)
return applyColorGradient(el, gradient)
def createColorGradient(pal):
inds = pal[0::2]
cols = np.array(pal[1::2])
red, grn, blu = cols >> 16, cols >> 8, cols
cols = [c & 0xff for c in [red, grn, blu]]
cols = [interpolate.interp1d(inds, c) for c in cols]
img = np.arange(0, 255)
img = np.dstack([fn(img) for fn in cols])
return snowy.resize(img, 256, 32)
gradient = createColorGradient(STEPPED_PALETTE)
snowy.save(snowy.add_border(gradient), qualify('gradient.png'))
isles = []
for i in range(6):
isle = create_island(i * 5, gradient)
isle = snowy.resize(isle, width=isle.shape[1] // 3)
isles.append(isle)
snowy.save(isles[2], qualify('island.png'))
optimize(qualify('island.png'))
isles = snowy.hstack(isles)
snowy.save(isles, qualify('isles.png'))
|
<filename>mcpython/client/gui/InventoryPlayerHotbar.py<gh_stars>1-10
"""
mcpython - a minecraft clone written in python licenced under the MIT-licence
(https://github.com/mcpython4-coding/core)
Contributors: uuk, xkcdjerry (inactive)
Based on the game of fogleman (https://github.com/fogleman/Minecraft), licenced under the MIT-licence
Original game "minecraft" by Mojang Studios (www.minecraft.net), licenced under the EULA
(https://account.mojang.com/documents/minecraft_eula)
Mod loader inspired by "Minecraft Forge" (https://github.com/MinecraftForge/MinecraftForge) and similar
This project is not official by mojang and does not relate to it.
"""
import sys
import time
import traceback
import typing
import mcpython.client.gui.ContainerRenderer
import mcpython.client.gui.ContainerRenderingManager
import mcpython.client.gui.Slot
import mcpython.engine.event.EventHandler
import mcpython.engine.ResourceLoader
import mcpython.util.opengl
import mcpython.util.texture
import PIL.Image
import pyglet
from mcpython import shared
from mcpython.engine import logger
class _TEXTURES:
hearts = []
armor = []
hunger = []
bar = None
bar_size = None
selection = None
xp_bars = []
TEXTURES = _TEXTURES
async def reload():
import mcpython.engine.ResourceLoader as ResourceLoader
try:
base: pyglet.image.AbstractImage = await ResourceLoader.read_pyglet_image(
"gui/icons"
)
except:
logger.print_exception("[FATAL] failed to load hotbar image")
import mcpython.common.state.LoadingExceptionViewState as StateLoadingException
StateLoadingException.error_occur(traceback.format_exc())
return
def _get_tex_region(rx, ry, rex, rey):
image = base.get_region(
round(rx / 255 * base.width),
round((1 - rey / 255) * base.height),
round((rex - rx) / 255 * base.width),
round(((rey - ry) / 255) * base.height),
)
return image
base1 = await ResourceLoader.read_image("minecraft:gui/widgets")
base2 = await mcpython.engine.ResourceLoader.read_image("minecraft:gui/icons")
class Textures:
# todo: make %-based
hearts = [
[ # base, regenerate
_get_tex_region(16, 0, 25, 9),
_get_tex_region(25, 0, 34, 9),
_get_tex_region(34, 0, 43, 9),
_get_tex_region(43, 0, 52, 9),
],
[ # normal, hit
_get_tex_region(52, 0, 61, 9),
_get_tex_region(61, 0, 70, 9),
_get_tex_region(70, 0, 79, 9),
_get_tex_region(79, 0, 88, 9),
],
[ # poison, hit
_get_tex_region(88, 0, 97, 9),
_get_tex_region(97, 0, 106, 9),
_get_tex_region(106, 0, 115, 9),
_get_tex_region(115, 0, 124, 9),
],
[ # wither, hit
_get_tex_region(124, 0, 133, 9),
_get_tex_region(133, 0, 142, 9),
_get_tex_region(142, 0, 151, 9),
_get_tex_region(151, 0, 160, 9),
],
[ # absorption
_get_tex_region(160, 0, 169, 9),
_get_tex_region(169, 0, 178, 9),
],
]
armor = [
_get_tex_region(16, 9, 25, 18),
_get_tex_region(25, 9, 34, 18),
_get_tex_region(34, 9, 43, 18),
]
hunger = [
[ # background
_get_tex_region(16, 27, 25, 36),
_get_tex_region(25, 27, 34, 36),
_get_tex_region(34, 27, 43, 36),
_get_tex_region(43, 27, 52, 36),
],
[ # normal, regen
_get_tex_region(52, 27, 61, 36),
_get_tex_region(61, 27, 70, 36),
_get_tex_region(70, 27, 79, 36),
_get_tex_region(79, 27, 88, 36),
],
[ # hunger, regen
_get_tex_region(88, 27, 97, 36),
_get_tex_region(97, 27, 106, 36),
_get_tex_region(106, 27, 115, 36),
_get_tex_region(115, 27, 124, 36),
],
]
bar = mcpython.util.texture.to_pyglet_image(
base1.crop((0, 0, 182, 22)).resize((364, 44), PIL.Image.NEAREST)
)
bar_size = (364, 44)
selection = mcpython.util.texture.to_pyglet_image(
base1.crop((0, 22, 24, 46)).resize((48, 48), PIL.Image.NEAREST)
)
xp_bars = [
mcpython.util.texture.to_pyglet_image(
base2.crop((0, 69, 182, 74)).resize((364, 10), PIL.Image.NEAREST)
),
mcpython.util.texture.to_pyglet_image(
base2.crop((0, 64, 182, 69)).resize((364, 10), PIL.Image.NEAREST)
),
]
global TEXTURES
TEXTURES = Textures
mcpython.engine.event.EventHandler.PUBLIC_EVENT_BUS.subscribe(
"data:reload:work", reload
)
if shared.IS_CLIENT:
shared.tick_handler.schedule_once(reload())
class InventoryPlayerHotbar(mcpython.client.gui.ContainerRenderer.ContainerRenderer):
"""
main inventory for the hotbar
"""
INSTANCES: typing.List["InventoryPlayerHotbar"] = []
@classmethod
def create(cls, player):
if len(cls.INSTANCES) > 0:
instance = cls.INSTANCES.pop()
instance.player = player
return instance
return cls(player)
def __init__(self, player):
super().__init__()
self.player = player
self.lable = pyglet.text.Label(color=(255, 255, 255, 255))
self.last_index = 0
self.last_item = None
self.time_since_last_change = 0
self.xp_level_lable = pyglet.text.Label(color=(92, 133, 59), anchor_x="center")
def free(self):
InventoryPlayerHotbar.INSTANCES.append(self)
@staticmethod
def get_config_file():
return "assets/config/inventory/player_inventory_hotbar.json"
def is_blocking_interactions(self) -> bool:
return False
# todo: move to container
async def create_slot_renderers(self) -> list:
return [mcpython.client.gui.Slot.Slot() for _ in range(9)]
async def on_activate(self):
pass
async def on_deactivate(self):
pass
def draw(self, hovering_slot=None):
self.bg_image_size = TEXTURES.bar_size
x, y = self.get_position()
y += 40
TEXTURES.bar.blit(x, y)
for slot in self.slots:
slot.draw(
x, y
) # change to default implementation: do NOT render hovering entry
selected_slot = shared.world.get_active_player().get_active_inventory_slot()
x, y = selected_slot.position
dx, dy = (
tuple(self.config["selected_delta"])
if "selected_delta" in self.config
else (8, 8)
)
x -= dx
y -= dy
dx, dy = self.get_position()
x += dx
y += dy
TEXTURES.selection.blit(x, y + 39)
if (
self.last_index != shared.world.get_active_player().active_inventory_slot
or selected_slot.get_itemstack().get_item_name() != self.last_item
):
self.time_since_last_change = time.time()
self.last_index = shared.world.get_active_player().active_inventory_slot
self.last_item = selected_slot.get_itemstack().get_item_name()
pyglet.gl.glColor3d(1.0, 1.0, 1.0)
if shared.world.get_active_player().gamemode in (0, 2):
x, y = self.get_position()
y += 40
self.draw_hearts(x, y)
self.draw_hunger(x, y)
self.draw_xp_level(x, y)
if shared.world.get_active_player().armor_level > 0:
self.draw_armor(x, y)
if (
selected_slot.get_itemstack().get_item_name()
and time.time() - self.time_since_last_change <= 5.0
):
self.lable.text = str(selected_slot.get_itemstack().get_item_name())
self.lable.x = round(
shared.window.get_size()[0] // 2 - self.lable.content_width // 2
)
self.lable.y = 90
self.lable.draw()
for slot in self.slots:
slot.draw_label()
def draw_hearts(self, hx, hy):
wx, _ = shared.window.get_size()
x = wx // 2 - 10 * 16 - 22
y = hy + 75
hearts = round(shared.world.get_active_player().hearts)
for _ in range(10):
TEXTURES.hearts[0][0].blit(x, y, width=18, height=18)
if hearts > 0:
TEXTURES.hearts[1][bool(hearts == 1)].blit(x, y, width=18, height=18)
hearts -= 2
x += 16
def draw_hunger(self, hx, hy):
wx, _ = shared.window.get_size()
x = wx // 2 + 22 + 10 * 16
y = hy + 75
hunger = round(shared.world.get_active_player().hunger)
for _ in range(10):
TEXTURES.hunger[0][0].blit(x, y, width=18, height=18)
if hunger > 0:
TEXTURES.hunger[1][int(hunger == 1)].blit(x, y, width=18, height=18)
hunger -= 2
x -= 16
def draw_xp_level(self, hx, hy):
wx, _ = shared.window.get_size()
x = wx // 2 - 182
y = hy + 55
TEXTURES.xp_bars[1].blit(x, y)
active_progress = (
shared.world.get_active_player().xp
/ shared.world.get_active_player().get_needed_xp_for_next_level()
)
TEXTURES.xp_bars[1].get_region(
x=0, y=0, height=10, width=round(362 * active_progress) + 1
).blit(x, y)
if shared.world.get_active_player().xp_level != 0:
self.lable.x = wx // 2
self.lable.y = hy + 65
self.lable.text = str(shared.world.get_active_player().xp_level)
self.lable.draw()
def draw_armor(self, hx, hy):
wx, _ = shared.window.get_size()
x = wx // 2 - 10 * 16 - 22
y = hy + 95
armor = round(shared.world.get_active_player().armor_level)
for _ in range(10):
TEXTURES.armor[0].blit(x, y, width=18, height=18)
if armor > 0:
TEXTURES.armor[int(armor != 1) + 1].blit(x, y, width=18, height=18)
armor -= 2
x += 16
def is_closable_by_escape(self) -> bool:
return False
def is_always_open(self) -> bool:
return True
|
<reponame>signorecello/babybuddy<gh_stars>0
# -*- coding: utf-8 -*-
import pytz
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.signals import user_logged_in
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone, translation
from django.utils.text import format_lazy
from django.utils.translation import gettext_lazy as _
from rest_framework.authtoken.models import Token
class Settings(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
dashboard_refresh_rate = models.DurationField(
verbose_name=_('Refresh rate'),
help_text=_('If supported by browser, the dashboard will only refresh '
'when visible, and also when receiving focus.'),
blank=True,
null=True,
default=timezone.timedelta(minutes=1),
choices=[
(None, _('disabled')),
(timezone.timedelta(minutes=1), _('1 min.')),
(timezone.timedelta(minutes=2), _('2 min.')),
(timezone.timedelta(minutes=3), _('3 min.')),
(timezone.timedelta(minutes=4), _('4 min.')),
(timezone.timedelta(minutes=5), _('5 min.')),
(timezone.timedelta(minutes=10), _('10 min.')),
(timezone.timedelta(minutes=15), _('15 min.')),
(timezone.timedelta(minutes=30), _('30 min.')),
])
dashboard_hide_empty = models.BooleanField(
verbose_name=_('Hide Empty Dashboard Cards'),
default=False,
editable=True
)
dashboard_hide_age = models.DurationField(
verbose_name=_('Hide data older than'),
help_text=_('This setting controls which data will be shown '
'in the dashboard.'),
blank=True,
null=True,
default=None,
choices=[
(None, _('show all data')),
(timezone.timedelta(days=1), _('1 day')),
(timezone.timedelta(days=2), _('2 days')),
(timezone.timedelta(days=3), _('3 days')),
(timezone.timedelta(weeks=1), _('1 week')),
(timezone.timedelta(weeks=4), _('4 weeks')),
])
language = models.CharField(
choices=settings.LANGUAGES,
default=settings.LANGUAGE_CODE,
max_length=255,
verbose_name=_('Language')
)
timezone = models.CharField(
choices=tuple(zip(pytz.common_timezones, pytz.common_timezones)),
default=timezone.get_default_timezone_name(),
max_length=100,
verbose_name=_('Timezone')
)
theme = models.CharField(
choices=settings.THEMES,
default=settings.DEFAULT_THEME,
max_length=255,
verbose_name=_('Theme')
)
def __str__(self):
return str(format_lazy(_('{user}\'s Settings'), user=self.user))
def api_key(self, reset=False):
"""
Get or create an API key for the associated user.
:param reset: If True, delete the existing key and create a new one.
:return: The user's API key.
"""
if reset:
Token.objects.get(user=self.user).delete()
return Token.objects.get_or_create(user=self.user)[0]
@property
def dashboard_refresh_rate_milliseconds(self):
"""
Convert seconds to milliseconds to be used in a Javascript setInterval
function call.
:return: the refresh rate in milliseconds or None.
"""
if self.dashboard_refresh_rate:
return self.dashboard_refresh_rate.seconds * 1000
return None
@receiver(post_save, sender=User)
def create_user_settings(sender, instance, created, **kwargs):
if created:
Settings.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_settings(sender, instance, **kwargs):
instance.settings.save()
@receiver(user_logged_in)
def user_logged_in_callback(sender, request, user, **kwargs):
if user.settings.language:
translation.activate(user.settings.language)
# TODO: Change this behavior as session-based language is deprecated.
request.session[
translation.LANGUAGE_SESSION_KEY] = user.settings.language
if user.settings.timezone:
timezone.activate(user.settings.timezone)
request.session['user_timezone'] = user.settings.timezone
|
# flake8: noqa
"""
更新主力合约
"""
import os
import sys
import json
from collections import OrderedDict
import pandas as pd
vnpy_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
if vnpy_root not in sys.path:
sys.path.append(vnpy_root)
os.environ["VNPY_TESTING"] = "1"
from vnpy.data.tdx.tdx_future_data import *
from vnpy.trader.util_wechat import send_wx_msg
from vnpy.trader.utility import load_json, save_json
if __name__ == "__main__":
if len(sys.argv) < 2:
print(f'请输入{vnpy_root}下检查目录,例如 prod/account01', file=sys.stderr)
exit()
print(sys.argv)
for account_folder in sys.argv[1:]:
cta_path = os.path.abspath(os.path.join(vnpy_root, account_folder))
if not os.path.exists(cta_path):
print(f'{cta_path}不存在', file=sys.stderr)
continue
print(f'开始检查{cta_path}下的策略运行配置文件')
account_name = account_folder.split('/')[-1]
# 创建API对象
api_01 = TdxFutureData()
# 更新本地合约缓存信息
api_01.update_mi_contracts()
setting_file_path = os.path.abspath(os.path.join(cta_path, 'cta_strategy_pro_setting.json'))
settings = load_json(setting_file_path, auto_save=False)
if len(settings) == 0:
print('无策略配置')
os._exit(0)
changed = False
for strategy_name, setting in settings.items():
vt_symbol = setting.get('vt_symbol')
if not vt_symbol:
print(f'{strategy_name}配置中无vt_symbol', file=sys.stderr)
continue
if '.' in vt_symbol:
symbol, exchange = vt_symbol.split('.')
else:
symbol = vt_symbol
exchange = None
if exchange == Exchange.SPD:
print(f"暂不处理自定义套利合约{vt_symbol}")
continue
full_symbol = get_full_symbol(symbol).upper()
underlying_symbol = get_underlying_symbol(symbol).upper()
contract_info = api_01.future_contracts.get(underlying_symbol)
if not contract_info:
print(f'{account_name}主力合约配置中,找不到{underlying_symbol}', file=sys.stderr)
continue
if 'mi_symbol' not in contract_info or 'exchange' not in contract_info or 'full_symbol' not in contract_info:
print(f'{account_name}主力合约配置中,找不到mi_symbol/exchange/full_symbol. {contract_info}', file=sys.stderr)
continue
new_mi_symbol = contract_info.get('mi_symbol')
new_exchange = contract_info.get('exchange')
new_vt_symbol = '.'.join([new_mi_symbol, new_exchange])
new_full_symbol = contract_info.get('full_symbol', '').upper()
if full_symbol >= new_full_symbol:
print(f'{account_name}策略配置:长合约{full_symbol}, 主力长合约{new_full_symbol},不更新')
continue
if exchange:
if len(vt_symbol) != len(new_vt_symbol):
print(f'{account_name}配置中,合约{vt_symbol} 与{new_vt_symbol} 长度不匹配,不更新', file=sys.stderr)
continue
else:
if len(symbol) != len(new_mi_symbol):
print(f'{account_name}配置中,合约{vt_symbol} 与{new_mi_symbol} 长度不匹配,不更新', file=sys.stderr)
continue
setting.update({'vt_symbol': new_vt_symbol})
send_wx_msg(f'{account_name}{strategy_name} 主力合约更换:{vt_symbol} => {new_vt_symbol} ')
changed = True
if changed:
save_json(setting_file_path, settings)
print(f'保存{account_name}新配置')
print('更新完毕')
os._exit(0)
|
from os.path import join
import pandas as pd
import qiime2
import biom
import pkg_resources
import q2templates
from mmvec.heatmap import ranks_heatmap, paired_heatmaps
TEMPLATES = pkg_resources.resource_filename('mmvec.q2', 'assets')
def heatmap(output_dir: str,
ranks: pd.DataFrame,
microbe_metadata: qiime2.CategoricalMetadataColumn = None,
metabolite_metadata: qiime2.CategoricalMetadataColumn = None,
method: str = 'average',
metric: str = 'euclidean',
color_palette: str = 'seismic',
margin_palette: str = 'cubehelix',
x_labels: bool = False,
y_labels: bool = False,
level: int = -1,
row_center: bool = True) -> None:
if microbe_metadata is not None:
microbe_metadata = microbe_metadata.to_series()
if metabolite_metadata is not None:
metabolite_metadata = metabolite_metadata.to_series()
ranks = ranks.T
if row_center:
ranks = ranks - ranks.mean(axis=0)
hotmap = ranks_heatmap(ranks, microbe_metadata, metabolite_metadata,
method, metric, color_palette, margin_palette,
x_labels, y_labels, level)
hotmap.savefig(join(output_dir, 'heatmap.pdf'), bbox_inches='tight')
hotmap.savefig(join(output_dir, 'heatmap.png'), bbox_inches='tight')
index = join(TEMPLATES, 'index.html')
q2templates.render(index, output_dir, context={
'title': 'Rank Heatmap',
'pdf_fp': 'heatmap.pdf',
'png_fp': 'heatmap.png'})
def paired_heatmap(output_dir: str,
ranks: pd.DataFrame,
microbes_table: biom.Table,
metabolites_table: biom.Table,
features: str = None,
top_k_microbes: int = 2,
keep_top_samples: bool = True,
microbe_metadata: qiime2.CategoricalMetadataColumn = None,
normalize: str = 'log10',
color_palette: str = 'magma',
top_k_metabolites: int = 50,
level: int = -1,
row_center: bool = True) -> None:
if microbe_metadata is not None:
microbe_metadata = microbe_metadata.to_series()
ranks = ranks.T
if row_center:
ranks = ranks - ranks.mean(axis=0)
select_microbes, select_metabolites, hotmaps = paired_heatmaps(
ranks, microbes_table, metabolites_table, microbe_metadata, features,
top_k_microbes, top_k_metabolites, keep_top_samples, level, normalize,
color_palette)
hotmaps.savefig(join(output_dir, 'heatmap.pdf'), bbox_inches='tight')
hotmaps.savefig(join(output_dir, 'heatmap.png'), bbox_inches='tight')
select_microbes.to_csv(join(output_dir, 'select_microbes.tsv'), sep='\t')
select_metabolites.to_csv(
join(output_dir, 'select_metabolites.tsv'), sep='\t')
index = join(TEMPLATES, 'index.html')
q2templates.render(index, output_dir, context={
'title': 'Paired Feature Abundance Heatmaps',
'pdf_fp': 'heatmap.pdf',
'png_fp': 'heatmap.png',
'table1_fp': 'select_microbes.tsv',
'download1_text': 'Download microbe abundances as TSV',
'table2_fp': 'select_metabolites.tsv',
'download2_text': 'Download top k metabolite abundances as TSV'})
|
<reponame>bagustris/emotion
import logging
import time
from typing import Any, Callable, Dict, Optional, Sequence, Union
import joblib
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
from sklearn.metrics import (
accuracy_score,
f1_score,
get_scorer,
make_scorer,
precision_score,
recall_score,
)
from sklearn.model_selection import BaseCrossValidator, LeaveOneGroupOut
from sklearn.utils.multiclass import unique_labels
from tensorflow.keras.models import Model
from .dataset import LabelledDataset
from .sklearn.classification import sk_cross_validate
from .tensorflow.classification import tf_cross_validate
from .utils import get_cv_splitter
def binary_accuracy_score(
y_true, y_pred, *, labels=None, average="binary", sample_weight=None
):
"""Calculated binary accuracy. Binary accuracy is the same as
accuracy considering only a single class.
Args:
-----
y_true:
Ground truth labels.
y_pred:
Predicted labels.
labels: list, optional
Labels to include for average != "binary". If None, all unique
labels in y_true or y_pred are included.
average: str, optional
Method to compute average. If "binary" then simply return
accuracy. If "macro" then return mean binary accuracy. If
"weighted" then weight the mean binary accuracy by ground truth
support. If None then return an array of values, one for each
label in labels.
Returns:
--------
label_accs: float or list
Binary accuracies for labels in labels or average if `average`
is not None.
"""
all_labels = unique_labels(y_true, y_pred)
if average == "binary":
if len(all_labels) != 2:
raise ValueError("Must only have two labels when `average` is 'binary'.")
return accuracy_score(y_true, y_pred, sample_weight=sample_weight)
if y_true.ndim != 1 or y_pred.ndim != 1:
raise ValueError("y_true and y_pred must be 1D arrays.")
if labels is None:
labels = all_labels
accs = {l: 0 for l in labels}
for lab in all_labels:
acc = accuracy_score(y_true == lab, y_pred == lab, sample_weight=sample_weight)
accs[lab] = acc
label_accs = [accs[l] for l in labels]
if average == "macro":
return np.mean(label_accs)
elif average == "weighted":
counts = [np.count_nonzero(y_true == l) for l in labels]
return np.average(label_accs, weights=counts)
elif average is None:
return label_accs[0] if len(label_accs) == 1 else label_accs
def standard_class_scoring(classes: Sequence[str]):
# FIXME: Use labels param with macro and micro recall instead of
# (balanced) accuracy in order to account for absent classes from
# test data? Or rely on user to create valid train/test splits.
scoring = {
"uar": get_scorer("balanced_accuracy"),
"war": get_scorer("accuracy"),
"microf1": make_scorer(f1_score, average="micro"),
"macrof1": make_scorer(f1_score, average="macro"),
}
for i, c in enumerate(classes):
scoring.update(
{
c
+ "_rec": make_scorer(
recall_score, average=None, labels=[i], zero_division=0
),
c
+ "_prec": make_scorer(
precision_score, average=None, labels=[i], zero_division=0
),
c
+ "_f1": make_scorer(
f1_score, average=None, labels=[i], zero_division=0
),
c + "_ba": make_scorer(binary_accuracy_score, average=None, labels=[i]),
}
)
return scoring
def within_corpus_cross_validation(
clf: Union[BaseEstimator, Callable[..., Model]],
dataset: LabelledDataset,
clf_lib: Optional[str] = None,
partition: Optional[str] = None,
cv: Union[BaseCrossValidator, int] = 10,
verbose: int = 0,
n_jobs: int = 1,
scoring=None,
fit_params: Dict[str, Any] = {},
) -> pd.DataFrame:
"""Cross validates a `Classifier` instance on a single dataset.
Parameters:
-----------
clf: class that implements fit() and predict()
The classifier to test.
dataset: LabelledDataset
The dataset for within-corpus cross-validation.
clf_lib: str
One of {"sk", "tf", "pt"} to select which library-specific
cross-validation method to use, since they're not all quite
compatible.
partition: str, optional
The name of the partition to cross-validate over. If None, then
don't use group cross-validation.
cv: int or BaseCrossValidator
A splitter used for cross-validation. Default is KFold(10) for
10 fold cross-validation.
verbose: bool
Passed to cross_validate().
n_jobs: bool
Passed to cross_validate().
Returns:
--------
df: pandas.DataFrame
A dataframe holding the results from all runs with this model.
"""
groups = None if partition is None else dataset.get_group_indices(partition)
if isinstance(cv, int):
cv = get_cv_splitter(bool(partition), cv)
if scoring is None:
scoring = standard_class_scoring(dataset.classes)
if clf_lib == "sk":
cross_validate_fn = sk_cross_validate
elif clf_lib == "tf":
n_jobs = 1
cross_validate_fn = tf_cross_validate
start_time = time.perf_counter()
with joblib.Parallel(n_jobs=n_jobs, verbose=verbose):
scores = cross_validate_fn(
clf,
dataset.x,
dataset.y,
cv=cv,
scoring=scoring,
groups=groups,
verbose=verbose,
fit_params=fit_params,
)
total_time = time.perf_counter() - start_time
logging.info(f"Cross-validation complete in {total_time:.2f}s")
n_folds = len(next(iter(scores.values())))
if isinstance(cv, LeaveOneGroupOut) and partition is not None:
index = pd.Index(dataset.get_group_names(partition), name="fold")
else:
index = pd.RangeIndex(1, n_folds + 1, name="fold")
score_df = pd.DataFrame(
{k[5:]: v for k, v in scores.items() if k.startswith("test_")}, index=index
)
return score_df
|
<reponame>Prithwijit-Chak/simpeg<filename>examples/_archived/plot_inv_dcip_dipoledipole_2_5Dinversion_irls.py
"""
2.5D DC inversion of with Iterative Reweighted Least Squares
============================================================
This is an example for 2.5D DC Inversion with Iterative Reweighted
Least Squares (IRLS). Earth includes a topography,
and below the topography conductive and resistive cylinders are embedded.
User is promoted to try different p, qx, qz.
For instance a set of paraemters (default):
* p=0 (sparse model, m)
* qx=2 (smooth model, m in x-direction)
* qz=2 (smooth model, m in z-direction)
But if you want share edges of the model, you can try:
* p=0 (sparse model, m)
* qx=0 (smooth model, m in x-direction)
* qz=2 (smooth model, m in z-direction)
"""
from SimPEG.electromagnetics.static import resistivity as DC
from SimPEG.electromagnetics.static.utils import gen_DCIPsurvey, genTopography
from SimPEG import (
maps,
utils,
data_misfit,
regularization,
optimization,
inversion,
inverse_problem,
directives,
)
import matplotlib.pyplot as plt
from matplotlib import colors
import numpy as np
from pylab import hist
try:
from pymatsolver import Pardiso as Solver
except ImportError:
from SimPEG import SolverLU as Solver
def run(plotIt=True, survey_type="dipole-dipole", p=0.0, qx=2.0, qz=2.0):
np.random.seed(1)
# Initiate I/O class for DC
IO = DC.IO()
# Obtain ABMN locations
xmin, xmax = 0.0, 200.0
ymin, ymax = 0.0, 0.0
zmin, zmax = 0, 0
endl = np.array([[xmin, ymin, zmin], [xmax, ymax, zmax]])
# Generate DC survey object
survey = gen_DCIPsurvey(endl, survey_type=survey_type, dim=2, a=10, b=10, n=10)
survey = IO.from_abmn_locations_to_survey(
survey.locations_a,
survey.locations_b,
survey.locations_m,
survey.locations_n,
survey_type,
data_dc_type="volt",
)
# Obtain 2D TensorMesh
mesh, actind = IO.set_mesh()
topo, mesh1D = genTopography(mesh, -10, 0, its=100)
actind = utils.surface2ind_topo(mesh, np.c_[mesh1D.vectorCCx, topo])
survey.drape_electrodes_on_topography(mesh, actind, option="top")
# Build a conductivity model
blk_inds_c = utils.model_builder.getIndicesSphere(
np.r_[60.0, -25.0], 12.5, mesh.gridCC
)
blk_inds_r = utils.model_builder.getIndicesSphere(
np.r_[140.0, -25.0], 12.5, mesh.gridCC
)
layer_inds = mesh.gridCC[:, 1] > -5.0
sigma = np.ones(mesh.nC) * 1.0 / 100.0
sigma[blk_inds_c] = 1.0 / 10.0
sigma[blk_inds_r] = 1.0 / 1000.0
sigma[~actind] = 1.0 / 1e8
rho = 1.0 / sigma
# Show the true conductivity model
if plotIt:
fig = plt.figure(figsize=(12, 3))
ax = plt.subplot(111)
temp = rho.copy()
temp[~actind] = np.nan
out = mesh.plotImage(
temp,
grid=True,
ax=ax,
gridOpts={"alpha": 0.2},
clim=(10, 1000),
pcolorOpts={"cmap": "viridis", "norm": colors.LogNorm()},
)
ax.plot(
survey.electrode_locations[:, 0], survey.electrode_locations[:, 1], "k."
)
ax.set_xlim(IO.grids[:, 0].min(), IO.grids[:, 0].max())
ax.set_ylim(-IO.grids[:, 1].max(), IO.grids[:, 1].min())
cb = plt.colorbar(out[0])
cb.set_label("Resistivity (ohm-m)")
ax.set_aspect("equal")
plt.show()
# Use Exponential Map: m = log(rho)
actmap = maps.InjectActiveCells(mesh, indActive=actind, valInactive=np.log(1e8))
mapping = maps.ExpMap(mesh) * actmap
# Generate mtrue
mtrue = np.log(rho[actind])
# Generate 2.5D DC problem
# "N" means potential is defined at nodes
prb = DC.Simulation2DNodal(
mesh, survey=survey, rhoMap=mapping, storeJ=True, Solver=Solver, verbose=True
)
# Make synthetic DC data with 5% Gaussian noise
data = prb.make_synthetic_data(mtrue, relative_error=0.05, add_noise=True)
IO.data_dc = data.dobs
# Show apparent resisitivty pseudo-section
if plotIt:
IO.plotPseudoSection(data=data.dobs / IO.G, data_type="apparent_resistivity")
# Show apparent resisitivty histogram
if plotIt:
fig = plt.figure()
out = hist(data.dobs / IO.G, bins=20)
plt.xlabel("Apparent Resisitivty ($\Omega$m)")
plt.show()
# Set initial model based upon histogram
m0 = np.ones(actmap.nP) * np.log(100.0)
# Set standard_deviation
# floor
eps = 10 ** (-3.2)
# percentage
relative = 0.05
dmisfit = data_misfit.L2DataMisfit(simulation=prb, data=data)
uncert = abs(data.dobs) * relative + eps
dmisfit.standard_deviation = uncert
# Map for a regularization
regmap = maps.IdentityMap(nP=int(actind.sum()))
# Related to inversion
reg = regularization.Sparse(
mesh, indActive=actind, mapping=regmap, gradientType="components"
)
# gradientType = 'components'
reg.norms = np.c_[p, qx, qz, 0.0]
IRLS = directives.Update_IRLS(
max_irls_iterations=20, minGNiter=1, beta_search=False, fix_Jmatrix=True
)
opt = optimization.InexactGaussNewton(maxIter=40)
invProb = inverse_problem.BaseInvProblem(dmisfit, reg, opt)
beta = directives.BetaSchedule(coolingFactor=5, coolingRate=2)
betaest = directives.BetaEstimate_ByEig(beta0_ratio=1e0)
target = directives.TargetMisfit()
update_Jacobi = directives.UpdatePreconditioner()
inv = inversion.BaseInversion(invProb, directiveList=[betaest, IRLS])
prb.counter = opt.counter = utils.Counter()
opt.LSshorten = 0.5
opt.remember("xc")
# Run inversion
mopt = inv.run(m0)
rho_est = mapping * mopt
rho_est_l2 = mapping * invProb.l2model
rho_est[~actind] = np.nan
rho_est_l2[~actind] = np.nan
rho_true = rho.copy()
rho_true[~actind] = np.nan
# show recovered conductivity
if plotIt:
vmin, vmax = rho.min(), rho.max()
fig, ax = plt.subplots(3, 1, figsize=(20, 9))
out1 = mesh.plotImage(
rho_true,
clim=(10, 1000),
pcolorOpts={"cmap": "viridis", "norm": colors.LogNorm()},
ax=ax[0],
)
out2 = mesh.plotImage(
rho_est_l2,
clim=(10, 1000),
pcolorOpts={"cmap": "viridis", "norm": colors.LogNorm()},
ax=ax[1],
)
out3 = mesh.plotImage(
rho_est,
clim=(10, 1000),
pcolorOpts={"cmap": "viridis", "norm": colors.LogNorm()},
ax=ax[2],
)
out = [out1, out2, out3]
titles = ["True", "L2", ("L%d, Lx%d, Lz%d") % (p, qx, qz)]
for i in range(3):
ax[i].plot(
survey.electrode_locations[:, 0], survey.electrode_locations[:, 1], "kv"
)
ax[i].set_xlim(IO.grids[:, 0].min(), IO.grids[:, 0].max())
ax[i].set_ylim(-IO.grids[:, 1].max(), IO.grids[:, 1].min())
cb = plt.colorbar(out[i][0], ax=ax[i])
cb.set_label("Resistivity ($\Omega$m)")
ax[i].set_xlabel("Northing (m)")
ax[i].set_ylabel("Elevation (m)")
ax[i].set_aspect("equal")
ax[i].set_title(titles[i])
plt.tight_layout()
plt.show()
if __name__ == "__main__":
run()
|
<filename>dobot_gym/envs/sim/dobot_env.py
import numpy as np
from gym.envs.robotics import robot_env, utils, rotations
def goal_distance(goal_a, goal_b):
assert goal_a.shape == goal_b.shape
return np.linalg.norm(goal_a - goal_b, axis=-1)
class DobotSimGoalEnv(robot_env.RobotEnv):
def __init__(self, model_path, n_substeps, n_actions, initial_qpos, distance_threshold):
super().__init__(model_path=model_path, n_actions=n_actions, n_substeps=n_substeps, initial_qpos=initial_qpos)
self.distance_threshold = distance_threshold
self.n_actions = n_actions
def compute_reward(self, achieved_goal, goal, info):
# Compute distance between goal and the achieved goal.
d = goal_distance(achieved_goal, goal)
return -(d > self.distance_threshold).astype(np.float32)
def _set_action(self, action):
action = action.copy() # ensure that we don't change the action outside of this scope
pos_ctrl, gripper_ctrl = action[:3], action[3]
pos_ctrl *= 0.05 # limit maximum change in position
rot_ctrl = [1., 0., 1., 0.] # fixed rotation of the end effector, expressed as a quaternion
gripper_ctrl = np.array([gripper_ctrl, gripper_ctrl])
action = np.concatenate([pos_ctrl, rot_ctrl, gripper_ctrl])
# Apply action to simulation.
utils.ctrl_set_action(self.sim, action)
utils.mocap_set_action(self.sim, action)
def _get_obs(self):
# positions
grip_pos = self.sim.data.get_site_xpos('robot0:grip')
dt = self.sim.nsubsteps * self.sim.model.opt.timestep
grip_velp = self.sim.data.get_site_xvelp('robot0:grip') * dt
robot_qpos, robot_qvel = utils.robot_get_obs(self.sim)
if self.has_object:
object_pos = self.sim.data.get_site_xpos('object0')
# rotations
object_rot = rotations.mat2euler(self.sim.data.get_site_xmat('object0'))
# velocities
object_velp = self.sim.data.get_site_xvelp('object0') * dt
object_velr = self.sim.data.get_site_xvelr('object0') * dt
# gripper state
object_rel_pos = object_pos - grip_pos
object_velp -= grip_velp
else:
object_pos = object_rot = object_velp = object_velr = object_rel_pos = np.zeros(0)
gripper_state = robot_qpos[-2:]
gripper_vel = robot_qvel[-2:] * dt # change to a scalar if the gripper is made symmetric
if not self.has_object:
achieved_goal = grip_pos.copy()
else:
achieved_goal = np.squeeze(object_pos.copy())
obs = np.concatenate([
grip_pos, object_pos.ravel(), object_rel_pos.ravel(), gripper_state, object_rot.ravel(),
object_velp.ravel(), object_velr.ravel(), grip_velp, gripper_vel,
])
return {
'observation': obs.copy(),
'achieved_goal': achieved_goal.copy(),
'desired_goal': self.goal.copy(),
}
def _viewer_setup(self):
body_id = self.sim.model.body_name2id('robot0:gripper_link')
lookat = self.sim.data.body_xpos[body_id]
for idx, value in enumerate(lookat):
self.viewer.cam.lookat[idx] = value
self.viewer.cam.distance = 2.5
self.viewer.cam.azimuth = 132.
self.viewer.cam.elevation = -14.
def _render_callback(self):
# Visualize target.
sites_offset = (self.sim.data.site_xpos - self.sim.model.site_pos).copy()
site_id = self.sim.model.site_name2id('target0')
self.sim.model.site_pos[site_id] = self.goal - sites_offset[0]
self.sim.forward()
def _reset_sim(self):
self.sim.set_state(self.initial_state)
self.sim.forward()
return True
def _sample_goal(self):
goal = self.initial_gripper_xpos[:3] + self.np_random.uniform(-0.15, 0.15, size=3)
return goal.copy()
def _is_success(self, achieved_goal, desired_goal):
d = goal_distance(achieved_goal, desired_goal)
return (d < self.distance_threshold).astype(np.float32)
def _env_setup(self, initial_qpos):
for name, value in initial_qpos.items():
self.sim.data.set_joint_qpos(name, value)
utils.reset_mocap_welds(self.sim)
self.sim.forward()
for _ in range(10):
self.sim.step()
# Extract information for sampling goals.
self.initial_gripper_xpos = self.sim.data.get_site_xpos('robot0:grip').copy()
|
# GUI of ECG Signal Generator with Spectral Analysis and Filtering
# Editor: <NAME>
# Date: 24.01.2020
# Detailed script documentation in form of comments right next to the code
# Enjoy using the program!
# Installing and upgrading all necessary packages
from subprocess import call
my_packages = ['matplotlib', 'scipy', 'numpy==1.17.4']
def upgrade(package_list):
call(['pip', 'install', '--upgrade', '--user'] + package_list)
upgrade(my_packages)
# import of libraries
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.figure import Figure
from scipy.misc import electrocardiogram
import numpy as np
import tkinter as tk
from tkinter import *
from tkinter import ttk
from math import pi
import scipy.fftpack as sf
import scipy.signal as sig
LARGE_FONT= ("Verdana", 12)
class window(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
tk.Tk.wm_title(self, "GUI ECG Signal with Advanced Filtering and Spectral_Analysis")
self._frame = None
self.switch_frame(StartPage)
def switch_frame(self, frame_class):
"""Destroys current frame and replaces it with a new one."""
new_frame = frame_class(self)
if self._frame is not None:
self._frame.destroy()
self._frame = new_frame
self._frame.pack()
class StartPage(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
label = tk.Label(self, text="Welcome", font=("Verdana", 16))
label.pack(pady=10,padx=10)
self.pack(expand=True, fill='both')
ttk.Button(self, text="Analyzer", command=lambda: master.switch_frame(Analyzer)).pack()
ttk.Button(self, text="Filters", command=lambda: master.switch_frame(Filters)).pack()
label_1 = tk.Label(self, text="Sampling Rate / Hz") # nice way of sorting widgets and grid to type in text :)
label_1.pack()
label_1_1 = tk.Label(self, text="360") # nice way of sorting widgets and grid to type in text :)
label_1_1.pack()
label_2 = tk.Label(self, text="Beats per Minutes / bpm")
label_2.pack()
label_2 = tk.Label(self, text="60")
label_2.pack()
label = tk.Label(self, text="This is the generated ECG! Analyze and filter your new ECG signal by clicking the buttons above!", font=LARGE_FONT)
label.pack(pady=10,padx=10)
self.ecg()
def ecg(self):
ecg = electrocardiogram()
fs = 360
time = np.arange(ecg.size) / fs
f = Figure(figsize=(10,6), dpi=100)
a = f.add_subplot(111)
a.set_xlabel("time in s")
a.set_ylabel("ECG in mV")
a.set_title("ECG Signal")
a.set_xlim(46.5, 50)
a.set_ylim(-2, 1.5)
a.plot(time, ecg)
canvas = FigureCanvasTkAgg(f, self)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
toolbar = NavigationToolbar2Tk(canvas, self)
toolbar.update()
canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
class Analyzer(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
label = tk.Label(self, text="Analyzer", font=("Verdana", 16))
label.pack(pady=10,padx=10)
ttk.Button(self, text="Back to Home", command=lambda: master.switch_frame(StartPage)).pack()
ttk.Button(self, text="Filters", command=lambda: master.switch_frame(Filters)).pack()
label = tk.Label(self, text="The powerspectrum of the generated ECG signal is analyzed here!", font=LARGE_FONT)
label.pack(pady=10,padx=10)
self.spectral_analysis()
def spectral_analysis(self):
# Plotting ECG
Fs = 360
t = 4
f = 10
x = electrocardiogram()
n = np.arange(x.size) / Fs
f = Figure(figsize=(10,6), dpi=100)
a = f.add_subplot(211)
a.set_xlabel("time in s")
a.set_ylabel("ECG in mV")
a.set_title("ECG Signal")
a.set_xlim(46.5, 50)
a.set_ylim(-2, 1.5)
a.plot(n, x)
canvas = FigureCanvasTkAgg(f, self)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
toolbar = NavigationToolbar2Tk(canvas, self)
toolbar.update()
canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
#Spectral Analysis
x_fft = abs(sf.fft(x))
l = np.size(x)
fr = (Fs/2)*np.linspace(0, 1, l/2)
x_magnitude = (2 / l)* abs(x_fft[0:np.size(fr)])
f2 = Figure(figsize=(10,6), dpi=100)
b = f.add_subplot(212)
b.set_xlabel('Frequency / Hz')
b.set_ylabel('Magnitude / dB')
b.set_title("Spectral analysis of the ECG")
b.plot(fr, 20*x_magnitude)
canvas2 = FigureCanvasTkAgg(f2, self)
canvas2.draw()
canvas2.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
toolbar2 = NavigationToolbar2Tk(canvas2, self)
toolbar2.update()
canvas2._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
f.tight_layout()
f2.tight_layout()
class Filters(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
label = tk.Label(self, text="Filters", font=("Verdana", 16))
label.pack(pady=10,padx=10)
ttk.Button(self, text="Back to Home", command=lambda: master.switch_frame(StartPage)).pack()
ttk.Button(self, text="Analyzer", command=lambda: master.switch_frame(Analyzer)).pack()
ttk.Button(self, text="High Pass Filtering",
command=lambda: master.switch_frame(Highpass_Filter)).pack()
ttk.Button(self, text="Low Pass Filtering",
command=lambda: master.switch_frame(Lowpass_Filter)).pack()
ttk.Button(self, text="Band Pass Filtering",
command=lambda: master.switch_frame(Bandpass_Filter)).pack()
ttk.Button(self, text="Band Stop Filtering",
command=lambda: master.switch_frame(Bandstop_Filter)).pack()
label = tk.Label(self, text="This is the ECG we are going to filter! Please select your filter :)", font=LARGE_FONT)
label.pack(pady=10,padx=10)
self.ecg()
def ecg(self):
ecg = electrocardiogram()
fs = 360
time = np.arange(ecg.size) / fs
f = Figure(figsize=(10,6), dpi=100)
a = f.add_subplot(111)
a.set_xlabel("time in s")
a.set_ylabel("ECG in mV")
a.set_title("ECG Signal")
a.set_xlim(46.5, 50)
a.set_ylim(-2, 1.5)
a.plot(time, ecg)
canvas = FigureCanvasTkAgg(f, self)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
toolbar = NavigationToolbar2Tk(canvas, self)
toolbar.update()
canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
class Bandpass_Filter(tk.Frame):
def __init__(self,master):
tk.Frame.__init__(self, master)
label = tk.Label(self, text="Band Pass Filtering", font=("Verdana", 16))
label.pack(pady=10,padx=10)
ttk.Button(self, text="Analyzer", command=lambda: master.switch_frame(Analyzer)).pack()
ttk.Button(self, text="Filters", command=lambda: master.switch_frame(Filters)).pack()
Lower_CutoffFrequency = tk.simpledialog.askfloat("Lower CutoffFrequency", "Which lower Cut off Frequency do you want?")
Upper_CutoffFrequency = tk.simpledialog.askfloat("Upper CutoffFrequency", "Which upper Cut off Frequency do you want?")
Ordernumber = tk.simpledialog.askinteger("Ordernumber", "Which Ordernumber do you want?")
label_1 = tk.Label(self, text="Lower Cut off Frequency / Hz") # nice way of sorting widgets and grid to type in text :)
label_1.pack(padx=2, pady=2)
label_1_1 = tk.Label(self, text=Lower_CutoffFrequency) # nice way of sorting widgets and grid to type in text :)
label_1_1.pack(padx=2, pady=2)
label_1 = tk.Label(self, text="Upper Cut off Frequency / Hz")
label_1.pack(padx=2, pady=2)
label_1_1 = tk.Label(self, text=Upper_CutoffFrequency)
label_1_1.pack(padx=2, pady=2)
label_2 = tk.Label(self, text="Ordernumber")
label_2.pack(padx=2, pady=2)
label_2_1 = tk.Label(self, text=Ordernumber)
label_2_1.pack(padx=2, pady=2)
self.Bandpass_Filter(Lower_CutoffFrequency, Upper_CutoffFrequency, Ordernumber)
def Bandpass_Filter(self, Lower_CutoffFrequency, Upper_CutoffFrequency, Ordernumber):
matplotlib.pyplot.close('all')
# Design Band Pass Filter
Fs = 360
x = electrocardiogram()
n = np.arange(x.size) / Fs
filter_order = Ordernumber # Changeable FilterOrder
cut_off_f = np.array([Lower_CutoffFrequency , Upper_CutoffFrequency]) # Cut off Frequency!!!!
normalized= 2*cut_off_f / Fs
[b,c] = sig.butter(filter_order, normalized, btype = 'bandpass')
# filterresponse
[W,h] = sig.freqz(b,c, worN = 1024)
W = Fs * W / (2 * pi)
f = Figure(figsize=(10,6), dpi=100)
a = f.add_subplot(311)
a.set_xlabel('Frequency / Hz')
a.set_ylabel("Magnitude / dB")
a.set_title('Band Pass Filter Frequency Response')
a.plot(W, 20 * np.log10(h))
canvas = FigureCanvasTkAgg(f, self)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
toolbar = NavigationToolbar2Tk(canvas, self)
toolbar.update()
canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
# Bandpass filtered signal
x_filtered = sig.lfilter(b, c, x)
f2 = Figure(figsize=(10,6), dpi=100)
a = f.add_subplot(312)
a.set_xlabel('Time / s')
a.set_ylabel("Amplitude / mV")
a.set_xlim(46.5, 50)
a.set_ylim(-2, 1.5)
a.set_title('Band Pass Filtered ECG')
a.plot(n, x_filtered)
canvas2 = FigureCanvasTkAgg(f2, self)
canvas2.draw()
canvas2.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
toolbar2 = NavigationToolbar2Tk(canvas, self)
toolbar2.update()
canvas2._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
Fs = 360
x = electrocardiogram()
n = np.arange(x.size) / Fs
f3 = Figure(figsize=(10,6), dpi=100)
a = f.add_subplot(313)
a.set_xlabel("time in s")
a.set_ylabel("ECG in mV")
a.set_title("ECG Signal")
a.set_xlim(46.5, 50)
a.set_ylim(-2, 1.5)
a.plot(n, x)
canvas = FigureCanvasTkAgg(f3, self)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
toolbar = NavigationToolbar2Tk(canvas, self)
toolbar.update()
canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True, padx=2, pady=2)
f.tight_layout()
f2.tight_layout()
f3.tight_layout()
class Highpass_Filter(tk.Frame):
def __init__(self,master):
tk.Frame.__init__(self, master)
label = tk.Label(self, text="High Pass Filtering", font=("Verdana", 16))
label.pack(pady=10,padx=10)
ttk.Button(self, text="Analyzer", command=lambda: master.switch_frame(Analyzer)).pack()
ttk.Button(self, text="Filters", command=lambda: master.switch_frame(Filters)).pack()
CutoffFrequency = tk.simpledialog.askfloat("CutoffFrequency", "Which Cut off Frequency do you want?")
Ordernumber = tk.simpledialog.askinteger("Ordernumber", "Which Ordernumber do you want?")
label_1 = tk.Label(self, text="Cut off Frequency / Hz") # nice way of sorting widgets and grid to type in text :)
label_1.pack(padx=2, pady=2)
label_1_1 = tk.Label(self, text=CutoffFrequency) # nice way of sorting widgets and grid to type in text :)
label_1_1.pack(padx=2, pady=2)
label_2 = tk.Label(self, text="Ordernumber")
label_2.pack(padx=2, pady=2)
label_2_1 = tk.Label(self, text=Ordernumber)
label_2_1.pack(padx=2, pady=2)
self.Highpass_Filter(CutoffFrequency, Ordernumber)
def Highpass_Filter(self, CutoffFrequency, Ordernumber):
# Design Highpass Filter
Fs = 360
x = electrocardiogram()
n = np.arange(x.size) / Fs
filter_order = Ordernumber # Changeable FilterOrder
cut_off_f = np.array([ CutoffFrequency ]) # Cut off Frequency!!!!
normalized= 2*cut_off_f / Fs
[b,c] = sig.butter(filter_order, normalized, btype = 'highpass')
# filterresponse
[W,h] = sig.freqz(b,c, worN = 1024)
W = Fs * W / (2 * pi)
f = Figure(figsize=(10,6), dpi=100)
a = f.add_subplot(311)
a.set_xlabel('Frequency / Hz')
a.set_ylabel("Magnitude / dB")
a.set_title('High Pass Filter Frequency Response')
a.plot(W, 20 * np.log10(h))
canvas = FigureCanvasTkAgg(f, self)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
toolbar = NavigationToolbar2Tk(canvas, self)
toolbar.update()
canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
# Highpass filtered signal
x_filtered = sig.lfilter(b, c, x)
f2 = Figure(figsize=(10,6), dpi=100)
a = f.add_subplot(312)
a.set_xlabel('Time / s')
a.set_ylabel("Amplitude / mV")
a.set_xlim(46.5, 50)
a.set_ylim(-2, 1.5)
a.set_title('High Pass Filtered ECG')
a.plot(n, x_filtered)
canvas2 = FigureCanvasTkAgg(f2, self)
canvas2.draw()
canvas2.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
toolbar2 = NavigationToolbar2Tk(canvas, self)
toolbar2.update()
canvas2._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
f3 = Figure(figsize=(10,6), dpi=100)
a = f.add_subplot(313)
a.set_xlabel("time in s")
a.set_ylabel("ECG in mV")
a.set_title("ECG Signal")
a.set_xlim(46.5, 50)
a.set_ylim(-2, 1.5)
a.plot(n, x)
canvas = FigureCanvasTkAgg(f3, self)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
toolbar = NavigationToolbar2Tk(canvas, self)
toolbar.update()
canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True, padx=2, pady=2)
f.tight_layout()
f2.tight_layout()
f3.tight_layout()
class Lowpass_Filter(tk.Frame):
def __init__(self,master):
tk.Frame.__init__(self, master)
label = tk.Label(self, text="Low Pass Filtering", font=("Verdana", 16))
label.pack(pady=10,padx=10)
ttk.Button(self, text="Analyzer", command=lambda: master.switch_frame(Analyzer)).pack()
ttk.Button(self, text="Filters", command=lambda: master.switch_frame(Filters)).pack()
CutoffFrequency = tk.simpledialog.askfloat("CutoffFrequency", "Which Cut off Frequency do you want?")
Ordernumber = tk.simpledialog.askinteger("Ordernumber", "Which Ordernumber do you want?")
label_1 = tk.Label(self, text="Cut off Frequency / Hz") # nice way of sorting widgets and grid to type in text :)
label_1.pack(padx=2, pady=2)
label_1_1 = tk.Label(self, text=CutoffFrequency) # nice way of sorting widgets and grid to type in text :)
label_1_1.pack(padx=2, pady=2)
label_2 = tk.Label(self, text="Ordernumber")
label_2.pack(padx=2, pady=2)
label_2_1 = tk.Label(self, text=Ordernumber)
label_2_1.pack(padx=2, pady=2)
self.Lowpass_Filter(CutoffFrequency, Ordernumber)
def Lowpass_Filter(self, CutoffFrequency, Ordernumber):
# Design Lowpass Filter
Fs = 360
x = electrocardiogram()
n = np.arange(x.size) / Fs
filter_order = Ordernumber # Changeable FilterOrder
cut_off_f = np.array([ CutoffFrequency ]) # Cut off Frequency!!!
normalized= 2*cut_off_f / Fs
[b,c] = sig.butter(filter_order, normalized, btype = 'lowpass')
# filterresponse
[W,h] = sig.freqz(b,c, worN = 1024)
W = Fs * W / (2 * pi)
f = Figure(figsize=(10,6), dpi=100)
a = f.add_subplot(311)
a.set_xlabel('Frequency / Hz')
a.set_ylabel("Magnitude / dB")
a.set_title('Low Pass Filter Frequency Response')
a.plot(W, 20 * np.log10(h))
canvas = FigureCanvasTkAgg(f, self)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
toolbar = NavigationToolbar2Tk(canvas, self)
toolbar.update()
canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
# Highpass filtered signal
x_filtered = sig.lfilter(b, c, x)
f2 = Figure(figsize=(10,6), dpi=100)
a = f.add_subplot(312)
a.set_xlabel('Time / s')
a.set_ylabel("Amplitude / mV")
a.set_xlim(46.5, 50)
a.set_ylim(-2, 1.5)
a.set_title('Low Pass Filtered ECG')
a.plot(n, x_filtered)
canvas2 = FigureCanvasTkAgg(f2, self)
canvas2.draw()
canvas2.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
toolbar2 = NavigationToolbar2Tk(canvas, self)
toolbar2.update()
canvas2._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
f3 = Figure(figsize=(10,6), dpi=100)
a = f.add_subplot(313)
a.set_xlabel("time in s")
a.set_ylabel("ECG in mV")
a.set_title("ECG Signal")
a.set_xlim(46.5, 50)
a.set_ylim(-2, 1.5)
a.plot(n, x)
canvas = FigureCanvasTkAgg(f3, self)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
toolbar = NavigationToolbar2Tk(canvas, self)
toolbar.update()
canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True, padx=2, pady=2)
f.tight_layout()
f2.tight_layout()
f3.tight_layout()
class Bandstop_Filter(tk.Frame):
def __init__(self,master):
tk.Frame.__init__(self, master)
label = tk.Label(self, text="Band Stop Filtering", font=("Verdana", 16))
label.pack(pady=10,padx=10)
ttk.Button(self, text="Analyzer", command=lambda: master.switch_frame(Analyzer)).pack()
ttk.Button(self, text="Filters", command=lambda: master.switch_frame(Filters)).pack()
Lower_CutoffFrequency = tk.simpledialog.askfloat("Lower CutoffFrequency", "Which lower Cut off Frequency do you want?")
Upper_CutoffFrequency = tk.simpledialog.askfloat("Upper CutoffFrequency", "Which upper Cut off Frequency do you want?")
Ordernumber = tk.simpledialog.askinteger("Ordernumber", "Which Ordernumber do you want?")
label_1 = tk.Label(self, text="Lower Cut off Frequency / Hz") # nice way of sorting widgets and grid to type in text :)
label_1.pack(padx=2, pady=2)
label_1_1 = tk.Label(self, text=Lower_CutoffFrequency) # nice way of sorting widgets and grid to type in text :)
label_1_1.pack(padx=2, pady=2)
label_1 = tk.Label(self, text="Upper Cut off Frequency / Hz")
label_1.pack(padx=2, pady=2)
label_1_1 = tk.Label(self, text=Upper_CutoffFrequency)
label_1_1.pack(padx=2, pady=2)
label_2 = tk.Label(self, text="Ordernumber")
label_2.pack(padx=2, pady=2)
label_2_1 = tk.Label(self, text=Ordernumber)
label_2_1.pack(padx=2, pady=2)
self.Bandstop_Filter(Lower_CutoffFrequency, Upper_CutoffFrequency, Ordernumber)
def Bandstop_Filter(self, Lower_CutoffFrequency, Upper_CutoffFrequency, Ordernumber):
# Design Highpass Filter
Fs = 360
x = electrocardiogram()
n = np.arange(x.size) / Fs
filter_order = Ordernumber # Changeable FilterOrder
cut_off_f = np.array([Lower_CutoffFrequency, Upper_CutoffFrequency]) # Cut off Frequency!!!!
normalized= 2*cut_off_f / Fs
[b,c] = sig.butter(filter_order, normalized, btype = 'bandstop')
# filterresponse
[W,h] = sig.freqz(b,c, worN = 1024)
W = Fs * W / (2 * pi)
f = Figure(figsize=(10,6), dpi=100)
a = f.add_subplot(311)
a.set_xlabel('Frequency / Hz')
a.set_ylabel("Magnitude / dB")
a.set_title('Band Stop Filter Frequency Response')
a.plot(W, 20 * np.log10(h))
canvas = FigureCanvasTkAgg(f, self)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
toolbar = NavigationToolbar2Tk(canvas, self)
toolbar.update()
canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
# Bandstop filtered signal
x_filtered = sig.lfilter(b, c, x)
f2 = Figure(figsize=(10,6), dpi=100)
a = f.add_subplot(312)
a.set_xlabel('Time / s')
a.set_ylabel("Amplitude / mV")
a.set_xlim(46.5, 50)
a.set_ylim(-2, 1.5)
a.set_title('band stop filtered ECG')
a.plot(n, x_filtered)
canvas2 = FigureCanvasTkAgg(f2, self)
canvas2.draw()
canvas2.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
toolbar2 = NavigationToolbar2Tk(canvas, self)
toolbar2.update()
canvas2._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
f3 = Figure(figsize=(10,6), dpi=100)
a = f.add_subplot(313)
a.set_xlabel("time in s")
a.set_ylabel("ECG in mV")
a.set_title("ECG Signal")
a.set_xlim(46.5, 50)
a.set_ylim(-2, 1.5)
a.plot(n, x)
canvas = FigureCanvasTkAgg(f3, self)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
toolbar = NavigationToolbar2Tk(canvas, self)
toolbar.update()
canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True, padx=2, pady=2)
f.tight_layout()
f2.tight_layout()
f3.tight_layout()
app = window()
app.mainloop()
|
<reponame>wright-group/bluesky-cmds
from collections import defaultdict
import itertools
import json
import toolz
from qtpy import QtWidgets
from bluesky_queueserver.manager.comms import zmq_single_request
from bluesky_hwproxy import zmq_single_request as hwproxy_request
import WrightTools as wt
from bluesky_cmds.project import widgets as pw
from bluesky_cmds.project import classes as pc
devices_all_json = zmq_single_request("devices_allowed", {"user_group": "admin"})[0][
"devices_allowed"
]
devices_all = {}
from pprint import pprint
pprint(devices_all_json)
def get_all_components(k, v):
out = {k: v}
for sk, sv in v.get("components", {}).items():
out.update(get_all_components(".".join([k, sk]), sv))
return out
def get_units(device):
if "." in device:
base_name = device.split(".")[0]
key_name = device.replace(".", "_")
else:
base_name = key_name = device
return hwproxy_request("describe", {"device": base_name})[0]["return"].get(key_name, {}).get(
"units", None
)
for k, v in devices_all_json.items():
devices_all.update(get_all_components(k, v))
devices_movable = list(filter(lambda x: devices_all[x]["is_movable"], devices_all))
devices_not_movable = list(filter(lambda x: not devices_all[x]["is_movable"], devices_all))
devices_with_deps = list(filter(lambda x: "components" in devices_all[x], devices_all))
class PlanUI:
def __init__(self, items=None):
if items is None:
self.items = [
MetadataWidget(),
ArgsWidget(),
KwargsWidget(),
]
else:
self.items = items
self.frame = QtWidgets.QWidget()
self.frame.setLayout(QtWidgets.QVBoxLayout())
layout = self.frame.layout()
layout.setContentsMargins(0, 0, 0, 0)
for x in self.items:
layout.addWidget(x.frame)
@property
def args(self):
return list(itertools.chain(*[x.args for x in self.items]))
@args.setter
def args(self, args):
for item in self.items:
if item.nargs < 0:
item.args = args
break
elif item.nargs > 0:
item.args = args[: item.nargs]
args = args[item.nargs :]
@property
def kwargs(self):
out = {}
for x in self.items:
out.update(x.kwargs)
return out
@kwargs.setter
def kwargs(self, kwargs):
for item in self.items:
item.kwargs = kwargs
if "args" in kwargs:
for item in self.items:
if item.nargs < 0:
item.args = kwargs["args"]
break
def load(self, *args, **kwargs):
for x in self.items:
if args:
if x.nargs < 0:
x.args = args
args = []
elif x.nargs > 0:
x.args = args[: x.nargs]
args = args[x.nargs :]
x.kwargs = kwargs
class MetadataWidget:
def __init__(self):
self.nargs = 0
self.fields = {
"Name": pc.String(),
"Info": pc.String(),
"Experimentor": pc.Combo(["Kyle", "Emily", "Kelson", "Dan"]),
}
@property
def frame(self):
frame = pw.InputTable()
frame.add("Metadata", None)
for k, v in self.fields.items():
frame.add(k, v)
return frame
@property
def args(self):
return []
@args.setter
def args(self, arg):
pass
@property
def kwargs(self):
return {"md": {k: v.read() for k, v in self.fields.items()}}
@kwargs.setter
def kwargs(self, kwargs):
md = kwargs.get("md", {})
for k, v in self.fields.items():
if k in md:
v.write(md[k])
class ArgsWidget:
def __init__(self):
self.nargs = -1
self.frame = pw.InputTable()
self.args_input = pc.String()
self.frame.add("Args", self.args_input)
@property
def args(self):
return json.loads(self.args_input.read() or "[]")
@args.setter
def args(self, args):
self.args_input.write(json.dumps(args))
@property
def kwargs(self):
return {}
@kwargs.setter
def kwargs(self, kwargs):
pass
class KwargsWidget:
def __init__(self):
self.nargs = 0
self.frame = pw.InputTable()
self.kwargs_input = pc.String()
self.frame.add("Kwargs", self.kwargs_input)
@property
def kwargs(self):
return json.loads(self.kwargs_input.read() or "{}")
@kwargs.setter
def kwargs(self, kwargs):
self.kwargs_input.write(json.dumps(kwargs))
@property
def args(self):
return []
@args.setter
def args(self, args):
pass
class SingleWidget:
def __init__(self, name, kwarg=None, kw_only=False):
self.nargs = 1
if kw_only:
self.nargs = 0
self.frame = pw.InputTable()
self.frame.add(name, self.input)
self.kwarg = kwarg
@property
def args(self):
return [self.input.read()] if not self.kwarg else []
@args.setter
def args(self, arg):
if arg:
self.input.write(arg[0])
@property
def kwargs(self):
return {self.kwarg: self.input.read()} if self.kwarg else {}
@kwargs.setter
def kwargs(self, kwargs):
if self.kwarg in kwargs:
self.args = [kwargs[self.kwarg]]
class BoolWidget(SingleWidget):
def __init__(self, name, kwarg=None):
self.input = pc.Bool()
super().__init__(name, kwarg)
class StrWidget(SingleWidget):
def __init__(self, name, kwarg=None):
self.input = pc.String()
super().__init__(name, kwarg)
class IntWidget(SingleWidget):
def __init__(self, name, kwarg=None, default=0):
self.input = pc.Number(decimals=0, initial_value=default)
super().__init__(name, kwarg)
@property
def args(self):
return [int(self.input.read())] if not self.kwarg else []
@args.setter
def args(self, arg):
if arg:
self.input.write(arg[0])
@property
def kwargs(self):
return {self.kwarg: int(self.input.read())} if self.kwarg else {}
@kwargs.setter
def kwargs(self, kwargs):
if self.kwarg in kwargs:
self.args = [kwargs[self.kwarg]]
class FloatWidget(SingleWidget):
def __init__(self, name, kwarg=None, default=0):
self.input = pc.Number(initial_value=default)
super().__init__(name, kwarg)
class EnumWidget(SingleWidget):
def __init__(self, name, options: dict, kwarg=None):
self.input = pc.Combo(options.keys())
super().__init__(name, kwarg)
self._options = options
@property
def options(self):
return self._options
@options.setter
def options(self, value):
self.input.set_allowed_values(value.keys())
self._options = value
@property
def args(self):
return [self.options[self.input.read()]] if not self.kwarg else []
@args.setter
def args(self, arg):
if arg:
for k, v in self.options.items():
if arg == v:
self.input.write(k)
break
@property
def kwargs(self):
return {self.kwarg: self.options[self.input.read()]} if self.kwarg else {}
@kwargs.setter
def kwargs(self, kwargs):
if self.kwarg in kwargs:
self.args = [kwargs[self.kwarg]]
class DeviceListWidget:
def __init__(self):
self.nargs = 1
self.inputs = {k: pc.Bool(True) for k in devices_not_movable}
self.frame = pw.InputTable()
self.frame.add("Devices", None)
for k, v in self.inputs.items():
self.frame.add(k, v)
@property
def kwargs(self):
return {}
@kwargs.setter
def kwargs(self, kwargs):
pass
@property
def args(self):
return [[k for k, v in self.inputs.items() if v.read()]]
@args.setter
def args(self, args):
arg = args[0]
for device in self.inputs:
if device in arg:
self.inputs[device].write(True)
else:
self.inputs[device].write(False)
class ConstantWidget:
def __init__(self):
self.nargs = 0
self.frame = QtWidgets.QWidget()
self.frame.setLayout(QtWidgets.QVBoxLayout())
self.frame.layout().setContentsMargins(0, 0, 0, 0)
label = pw.InputTable()
label.add("Constants", None)
self.frame.layout().addWidget(label)
self.constants_container_widget = QtWidgets.QWidget()
self.constants_container_widget.setLayout(QtWidgets.QVBoxLayout())
self.constants_container_widget.layout().setContentsMargins(0, 0, 0, 0)
self.constants = []
self.frame.layout().addWidget(self.constants_container_widget)
add_button = pw.SetButton("ADD")
remove_button = pw.SetButton("REMOVE", "stop")
add_button.clicked.connect(self.add_constant)
remove_button.clicked.connect(self.remove_constant)
self.frame.layout().addWidget(add_button)
self.frame.layout().addWidget(remove_button)
def add_constant(self, hardware=None, units="ps", terms=None):
# TODO better default
if not hardware:
hardware = devices_movable[0]
if terms is None:
terms = [[1, "d1"]]
const = Constant(hardware, units, terms)
self.constants.append(const)
self.constants_container_widget.layout().addWidget(const)
def remove_constant(self):
if not self.constants:
return
const = self.constants[-1]
self.constants = self.constants[:-1]
self.constants_container_widget.layout().removeWidget(const)
@property
def args(self):
return []
@args.setter
def args(self, args):
pass
@property
def kwargs(self):
return {"constants": [c.args for c in self.constants]}
@kwargs.setter
def kwargs(self, kwargs):
while self.constants:
self.remove_constant()
for c in kwargs.get("constants", []):
self.add_constant(*c)
class Constant(pw.InputTable):
def __init__(self, hardware, units, terms):
super().__init__()
self.add("Constant", None)
self.hardware = pc.Combo(devices_movable)
self.hardware.write(hardware)
self.hardware.updated.connect(self.on_hardware_updated)
self.add("Hardware", self.hardware)
self.units = pc.Combo(wt.units.blessed_units)
self.units.write(units)
self.add("Units", self.units)
self.expression = pc.String()
self.expression.write(
" + ".join(f"{coeff}*{hw}" if hw else f"{coeff}" for coeff, hw in terms)
)
self.add("Expression", self.expression)
self.on_hardware_updated()
@property
def args(self):
units = self.units.read()
if units == "None":
units = None
return [self.hardware.read(), units, self.terms]
@property
def terms(self):
import sympy
expr = sympy.parse_expr(self.expression.read())
coeffs = expr.as_coefficients_dict()
for k, v in list(coeffs.items()):
del coeffs[k]
if isinstance(k, sympy.Number):
coeffs[None] = float(k * v)
else:
coeffs[k.name] = float(v)
return [(v, k) for k, v in coeffs.items()]
def on_hardware_updated(self):
hw_name = self.hardware.read()
native = get_units(hw_name)
units_list = [
i for i in (native,) + wt.units.get_valid_conversions(native) if i != "mm_delay"
]
self.units.set_allowed_values(units_list)
class GenericScanArgsWidget:
def __init__(self, partition):
self.nargs = -1
self.partition = partition
self.frame = QtWidgets.QWidget()
self.frame.setLayout(QtWidgets.QVBoxLayout())
self.frame.layout().setContentsMargins(0, 0, 0, 0)
label = pw.InputTable()
label.add("Axes", None)
self.frame.layout().addWidget(label)
self.axis_container_widget = QtWidgets.QWidget()
self.axis_container_widget.setLayout(QtWidgets.QVBoxLayout())
self.axis_container_widget.layout().setContentsMargins(0, 0, 0, 0)
self.axes = []
self.frame.layout().addWidget(self.axis_container_widget)
add_button = pw.SetButton("ADD")
remove_button = pw.SetButton("REMOVE", "stop")
add_button.clicked.connect(self.add_axis)
remove_button.clicked.connect(self.remove_axis)
self.frame.layout().addWidget(add_button)
self.frame.layout().addWidget(remove_button)
self.add_axis()
def add_axis(self, *args):
raise NotImplementedError
def remove_axis(self):
if not self.axes:
return
ax = self.axes[-1]
self.axes = self.axes[:-1]
self.axis_container_widget.layout().removeWidget(ax)
@property
def args(self):
return list(itertools.chain(*[a.args for a in self.axes]))
@args.setter
def args(self, args):
while self.axes:
self.remove_axis()
for c in toolz.partition(self.partition, args):
self.add_axis(*c)
@property
def kwargs(self):
return {}
@kwargs.setter
def kwargs(self, kwargs):
pass
class GridscanAxis(pw.InputTable):
def __init__(self, hardware, start, stop, npts, units):
super().__init__()
self.add("Axis", None)
self.hardware = pc.Combo(devices_movable)
self.hardware.write(hardware)
self.hardware.updated.connect(self.on_hardware_updated)
self.add("Hardware", self.hardware)
self.start = pc.Number(start)
self.add("Start", self.start)
self.stop = pc.Number(stop)
self.add("Stop", self.stop)
self.npts = pc.Number(npts, decimals=0)
self.add("Npts", self.npts)
self.units = pc.Combo(wt.units.blessed_units)
self.units.write(units)
self.add("Units", self.units)
self.on_hardware_updated()
@property
def args(self):
units = self.units.read()
if units == "None":
units = None
return [
self.hardware.read(),
self.start.read(),
self.stop.read(),
int(self.npts.read()),
units,
]
def on_hardware_updated(self):
hw_name = self.hardware.read()
base_name = hw_name.split(".")[0]
key_name = hw_name.replace(".", "_")
native = hwproxy_request("describe", {"device": base_name})[0]["return"][key_name].get(
"units", None
)
units_list = [
i for i in (native,) + wt.units.get_valid_conversions(native) if i != "mm_delay"
]
self.units.set_allowed_values(units_list)
class GridscanArgsWidget(GenericScanArgsWidget):
def __init__(self):
super().__init__(5)
def add_axis(self, hardware=None, start=0, stop=1, npts=11, units="ps"):
if not hardware:
hardware = devices_movable[0]
axis = GridscanAxis(hardware, start, stop, npts, units)
self.axes.append(axis)
self.axis_container_widget.layout().addWidget(axis)
class ScanAxis(pw.InputTable):
def __init__(self, hardware, start, stop, units):
super().__init__()
self.add("Axis", None)
self.hardware = pc.Combo(devices_movable)
self.hardware.write(hardware)
self.add("Hardware", self.hardware)
self.hardware.updated.connect(self.on_hardware_updated)
self.start = pc.Number(start)
self.add("Start", self.start)
self.stop = pc.Number(stop)
self.add("Stop", self.stop)
self.units = pc.Combo(wt.units.blessed_units)
self.units.write(units)
self.add("Units", self.units)
self.on_hardware_updated()
@property
def args(self):
units = self.units.read()
if units == "None":
units = None
return [
self.hardware.read(),
self.start.read(),
self.stop.read(),
units,
]
def on_hardware_updated(self):
hw_name = self.hardware.read()
native = get_units(hw_name)
units_list = [
i for i in (native,) + wt.units.get_valid_conversions(native) if i != "mm_delay"
]
self.units.set_allowed_values(units_list)
class ScanArgsWidget(GenericScanArgsWidget):
def __init__(self):
super().__init__(4)
def add_axis(self, hardware=None, start=0, stop=1, units="ps"):
if not hardware:
hardware = devices_movable[0]
axis = ScanAxis(hardware, start, stop, units)
self.axes.append(axis)
self.axis_container_widget.layout().addWidget(axis)
class ListAxis(pw.InputTable):
def __init__(self, hardware, list, units):
super().__init__()
self.add("Axis", None)
self.hardware = pc.Combo(devices_movable)
self.hardware.write(hardware)
self.add("Hardware", self.hardware)
self.hardware.updated.connect(self.on_hardware_updated)
self.list = pc.String()
self.list.write(json.dumps(list) or "[]")
self.add("List", self.list)
self.units = pc.Combo(wt.units.blessed_units)
self.units.write(units)
self.add("Units", self.units)
self.on_hardware_updated()
@property
def args(self):
units = self.units.read()
if units == "None":
units = None
return [
self.hardware.read(),
json.loads(self.list.read()) or [],
units,
]
def on_hardware_updated(self):
hw_name = self.hardware.read()
native = get_units(hw_name)
units_list = [
i for i in (native,) + wt.units.get_valid_conversions(native) if i != "mm_delay"
]
self.units.set_allowed_values(units_list)
class ListscanArgsWidget(GenericScanArgsWidget):
def __init__(self):
super().__init__(3)
def add_axis(self, hardware=None, list=[], units="ps"):
if not hardware:
hardware = devices_movable[0]
axis = ListAxis(hardware, list, units)
self.axes.append(axis)
self.axis_container_widget.layout().addWidget(axis)
class OpaSelectorWidget(EnumWidget):
def __init__(self, name="opa"):
super().__init__(name, options={x:x for x in devices_with_deps if len(devices_all_json.get(x, {}).get("components", {})) > 1})
class OpaMotorSelectorWidget(EnumWidget):
def __init__(self, name="motor", opa_selector=None):
if opa_selector is None:
raise ValueError("Must specify associated opa selector")
self.opa_selector = opa_selector
super().__init__(name, options={"None": None})
self.on_opa_selected()
self.opa_selector.input.updated.connect(self.on_opa_selected)
# TODO mutual exclusion
def on_opa_selected(self):
motors = {
x: x
for x in devices_all_json[self.opa_selector.args[0]]["components"]
}
if not motors:
motors = {"None": None}
self.options = motors
class OpaMotorAxis(pw.InputTable):
def __init__(self, motor, method, center, width, npts, opa_selector):
super().__init__()
self.opa_selector = opa_selector
if motor is None:
motor = list(devices_all_json[self.opa_selector.args[0]]["components"].keys())[0]
self.add("Motor Axis", None)
self.motor = pc.Combo(devices_all_json[self.opa_selector.args[0]]["components"].keys())
self.motor.write(motor)
self.add("Motor", self.motor)
self.center = pc.Number(center)
self.add("Center", self.center)
self.width = pc.Number(width)
self.add("Width", self.width)
self.npts = pc.Number(npts, decimals=0)
self.add("npts", self.npts)
self.opa_selector.input.updated.connect(self.on_opa_updated)
@property
def kwargs(self):
# TODO 'static' method does not work so I don't give it a gui element yet -- 2022-05-16 KFS
return {"method": "scan", "center": self.center.read(), "width": self.width.read(), "npts": int(self.npts.read())}
def on_opa_updated(self):
self.motor.set_allowed_values(devices_all_json[self.opa_selector.args[0]]["components"].keys())
class OpaMotorFullWidget(GenericScanArgsWidget):
def __init__(self, opa_selector):
self.opa_selector = opa_selector
super().__init__(None)
self.nargs = 0
self.kwarg = "motors"
def add_axis(self, motor=None, method="scan", center=0, width=1, npts=11):
if not motor:
motor = devices_movable[0]
axis = OpaMotorAxis(motor, method, center, width, npts, opa_selector=self.opa_selector)
self.axes.append(axis)
self.axis_container_widget.layout().addWidget(axis)
@property
def args(self):
return []
@property
def kwargs(self):
return {"motors":{a.motor.read(): a.kwargs for a in self.axes}}
@kwargs.setter
def kwargs(self, value):
while self.axes:
self.remove_axis()
if "motors" in value:
for mot, params in value["motors"].items():
self.add_axis(motor=mot, **params)
class SpectrometerWidget(pw.InputTable):
def __init__(self, name="spectrometer", include_center=True):
super().__init__()
self.nargs = 0
self.name = name
self.frame = self
self.add("Spectrometer", None)
spec_devices = []
for dev in devices_all_json:
if dev not in devices_with_deps and wt.units.is_valid_conversion(get_units(dev), "nm"):
spec_devices.append(dev)
self.device = pc.Combo(["None"] + spec_devices)
self.add("Device", self.device)
self.method = pc.Combo(["none", "static", "zero", "track", "scan"])
self.add("Method", self.method)
self.center = pc.Number()
self.add("Center", self.center)
self.width = pc.Number(-250)
self.add("Width", self.width)
self.units = pc.Combo(("wn",) + wt.units.get_valid_conversions("wn"))
self.add("Units", self.units)
self.npts = pc.Number(11, decimals=0)
self.add("Npts", self.npts)
self.used = {
"none": (),
"static": ("device", "center", "units"),
"zero": ("device"),
"track": ("device"),
"scan": ("device", "center", "width", "units", "npts"),
}
if not include_center:
self.used["scan"] = ("device", "width", "units", "npts")
self.device.updated.connect(self.on_device_selected)
self.method.updated.connect(self.on_method_selected)
self.on_device_selected()
@property
def kwargs(self):
device = self.device.read()
method = self.method.read()
if device == "None" or method == "none":
return {self.name: None}
out = {
k: v
for k, v in {
"device": device,
"method": method,
"center": self.center.read(),
"width": self.width.read(),
"npts": int(self.npts.read()),
}.items()
if k in self.used[method] or k == "method"
}
return {self.name: out}
@property
def args(self):
return []
def on_device_selected(self):
if self.device.read() == "None":
for var in ("center", "width", "units", "npts"):
getattr(self, var).set_disabled(True)
else:
self.on_method_selected()
def on_method_selected(self):
method = self.method.read()
for var in ("device", "center", "width", "units", "npts"):
getattr(self, var).set_disabled(not var in self.used[method])
plan_ui_lookup = defaultdict(PlanUI)
plan_ui_lookup["grid_scan_wp"] = PlanUI(
[
MetadataWidget(),
DeviceListWidget(),
GridscanArgsWidget(),
ConstantWidget(),
]
)
plan_ui_lookup["rel_grid_scan_wp"] = PlanUI(
[
MetadataWidget(),
DeviceListWidget(),
GridscanArgsWidget(),
ConstantWidget(),
]
)
plan_ui_lookup["scan_wp"] = PlanUI(
[
MetadataWidget(),
DeviceListWidget(),
ScanArgsWidget(),
IntWidget("Npts", "num", 11),
ConstantWidget(),
]
)
plan_ui_lookup["rel_scan_wp"] = PlanUI(
[
MetadataWidget(),
DeviceListWidget(),
ScanArgsWidget(),
IntWidget("Npts", "num", 11),
ConstantWidget(),
]
)
plan_ui_lookup["list_scan_wp"] = PlanUI(
[
MetadataWidget(),
DeviceListWidget(),
ListscanArgsWidget(),
ConstantWidget(),
]
)
plan_ui_lookup["rel_list_scan_wp"] = PlanUI(
[
MetadataWidget(),
DeviceListWidget(),
ListscanArgsWidget(),
ConstantWidget(),
]
)
plan_ui_lookup["list_grid_scan_wp"] = PlanUI(
[
MetadataWidget(),
DeviceListWidget(),
ListscanArgsWidget(),
ConstantWidget(),
]
)
plan_ui_lookup["rel_list_grid_scan_wp"] = PlanUI(
[
MetadataWidget(),
DeviceListWidget(),
ListscanArgsWidget(),
ConstantWidget(),
]
)
plan_ui_lookup["count"] = PlanUI(
[
MetadataWidget(),
DeviceListWidget(),
IntWidget("Npts", "num", 1),
FloatWidget("Delay", "delay", 0),
]
)
plan_ui_lookup["run_tune_test"] = PlanUI(
[
MetadataWidget(),
DeviceListWidget(),
OpaSelectorWidget(),
SpectrometerWidget(include_center=False),
]
)
opa=OpaSelectorWidget()
plan_ui_lookup["run_setpoint"] = PlanUI(
[
MetadataWidget(),
DeviceListWidget(),
opa,
OpaMotorSelectorWidget(opa_selector=opa),
FloatWidget("Width", "width", 1),
IntWidget("Npts", "npts", 11),
SpectrometerWidget(include_center=False),
]
)
opa=OpaSelectorWidget()
plan_ui_lookup["run_intensity"] = PlanUI(
[
MetadataWidget(),
DeviceListWidget(),
opa,
OpaMotorSelectorWidget(opa_selector=opa),
FloatWidget("Width", "width", 1),
IntWidget("Npts", "npts", 11),
SpectrometerWidget(include_center=False),
]
)
opa=OpaSelectorWidget()
plan_ui_lookup["run_holistic"] = PlanUI(
[
MetadataWidget(),
DeviceListWidget(),
opa,
OpaMotorSelectorWidget(opa_selector=opa),
OpaMotorSelectorWidget(opa_selector=opa),
FloatWidget("Width", "width", 1),
IntWidget("Npts", "npts", 11),
SpectrometerWidget(include_center=False),
]
)
opa=OpaSelectorWidget()
plan_ui_lookup["motortune"] = PlanUI(
[
MetadataWidget(),
DeviceListWidget(),
opa,
BoolWidget("Use Tune Points", "use_tune_points"),
OpaMotorFullWidget(opa_selector=opa),
SpectrometerWidget(),
]
)
|
import os
import numpy as np
import h5py
import matplotlib.pyplot as plt
import pandas as pd
import pickle
import argparse
import ConfigParser
import sys
import shlex
from string import Template
import time
import argparse
import distutils.util
parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
parser.add_argument('-n', dest='network', default='SG', help='SG, Args, Nargs, Attr, Cat' )
parser.add_argument('-v', dest='vectorFn', required = True, help=' ')
parser.add_argument('-m', dest='modelFn', required = True, help=' ')
parser.add_argument('-w', dest='weightsFn', required = True, help=' ')
parser.add_argument('-s', dest='sType', required = True, help='train, test, dev')
parser.add_argument('-r', dest='resultsFn', default='', help=' ')
parser.add_argument('-p', dest='pickleFn', default='', help='store yvals and targets')
parser.add_argument('--gpuMem', dest='gpuMem', default=0.0, type=float, help='0.0=no gpu, 1.0=all memory')
parser.add_argument('--hardSG', dest='hardSG', default=False, type=distutils.util.strtobool, help='force HardSG from soft')
parser.add_argument('--forceSenna', dest='forceSenna', default=False, type=distutils.util.strtobool, help='translate from Glove to Senna')
parser.add_argument('--forceGlove', dest='forceGlove', default=False, type=distutils.util.strtobool, help='translate from Senna to Glove')
parser.add_argument('--debug', dest='debug', default=False, type=distutils.util.strtobool, help='Debug')
parser.add_argument('--maxSamples', dest='maxSamples', default=None, type=int, help='Maximum Samples from train, test, dev')
parser.add_argument('--noSG', dest='noSG', default=False, type=distutils.util.strtobool, help='no SG Feature input')
parser.add_argument('--testBatch', dest='testBatch', default=256, type=int, help='batch size for test')
if len(sys.argv) == 1:
# add default option string here
aString = ' '
sys.argv = [''] + aString.split(' ')
print sys.argv
if sys.argv[1].startswith('@'):
args, unknown = parser.parse_known_args()
args, unknown = parser.parse_known_args( shlex.split(open(sys.argv[1][1:]).read()) )
if unknown:
print '\n' * 10
print 'Warning, unknown args', unknown
print '\n' * 10
else:
args = parser.parse_args()
s = []
for arg in vars(args):
s.append( '%-20s = %-20s %-20s ' % (arg, getattr(args, arg), '(' + str(type(getattr(args, arg))) + ')' ) )
s.sort()
#print '\n'.join(s)
if (args.gpuMem < 0.01):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = ""
import tensorflow as tf
import keras.backend.tensorflow_backend as KTF
def get_session(gpu_fraction=0.6):
'''Assume that you have 6GB of GPU memory and want to allocate ~2GB'''
num_threads = os.environ.get('OMP_NUM_THREADS')
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
if num_threads:
return tf.Session(config=tf.ConfigProto(
gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))
else:
return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
if args.gpuMem >= 0.01:
KTF.set_session(get_session(gpu_fraction=args.gpuMem))
import keras
from keras import backend as K
from keras.layers import Input, Embedding, LSTM, Dense, Reshape, merge, Concatenate
from keras.layers import Activation, Lambda, Dropout, Layer, Masking, TimeDistributed, Bidirectional
from keras.models import Model, model_from_json
from SGGenerator import *
from pprint import pprint as p
from keras.regularizers import l2
from keras.layers.normalization import BatchNormalization
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.utils import plot_model
ga = AMRDataGenerator.getGeneralArch(args.vectorFn)
if ga['target']=='L0':
agt = SGGenerator(args.vectorFn, args.sType, args.testBatch , maxItems=args.maxSamples)
elif ga['target']=='args':
agt = ArgsGenerator(args.vectorFn, args.sType, args.testBatch , maxItems=args.maxSamples)
elif ga['target']=='nargs':
agt = NargsGenerator(args.vectorFn, args.sType, args.testBatch , maxItems=args.maxSamples)
elif ga['target']=='attr':
agt = AttrGenerator(args.vectorFn, args.sType, args.testBatch , maxItems=args.maxSamples)
elif ga['target']=='ncat':
agt = CatGenerator(args.vectorFn, args.sType, args.testBatch , maxItems=args.maxSamples)
else:
print 'Type of network is not determined by the vector genArch:'
p(ga)
print ga
exit(1)
# load json and create model
json_file = open(args.modelFn, 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
model.load_weights(args.weightsFn)
model.summary()
distSG_embedding_matrix = agt.readAMRDBFeatureInfo()
layers = model.layers
loadCount=0
for i in range(len(layers)):
name = layers[i].name
if 'distSGTable' == name:
print 'loading weights from vectors into model for ', name
w = model.get_layer(name).get_weights()
print w[0].shape
print distSG_embedding_matrix.shape
w[0][:distSG_embedding_matrix.shape[0]] = distSG_embedding_matrix
model.get_layer(name).set_weights(w)
loadCount+=1
elif 'logDistSGTable' == name:
log_em = np.log(distSG_embedding_matrix + 1e-20) # don't allow zero, log is inf.
log_em[0] *= 0.0
print 'loading weights from vectors into model for ', name
w = model.get_layer(name).get_weights()
print w[0].shape
print log_em.shape
w[0][:log_em.shape[0]] = log_em
model.get_layer(name).set_weights(w)
loadCount+=1
if loadCount != 1:
print 'WARNING, load count is', loadCount
numberOfBatches = (agt.numberOfItems())/args.testBatch
if numberOfBatches * args.testBatch < agt.numberOfItems():
numberOfBatches += 1
y_vals = model.predict_generator(agt.generate(), numberOfBatches)[0:agt.numberOfItems()]
agt.setCurrentIX(0)
targets = agt.getTargets(agt.numberOfItems() )
print 'yvals, targets: ', len(y_vals), len(targets)
if args.pickleFn:
pickle.dump ( (y_vals, targets), open(args.pickleFn, 'wb') )
if args.resultsFn:
df, sm, rc, sc, precision, recall, f1, cString = agt.writeAMRResultsDatabase(args.resultsFn, y_vals, targets)
else:
df, sm, rc, sc, precision, recall, f1, cString = agt.getConfusionStats(y_vals, targets)
print cString
print df
print 'test sm, rc, sc, precision, recall, f1:', sm, rc, sc, precision, recall, f1
print 'Done'
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google import auth
from google.api_core import client_options
from google.api_core import exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.auth import credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.datastore_admin_v1.services.datastore_admin import (
DatastoreAdminAsyncClient,
)
from google.cloud.datastore_admin_v1.services.datastore_admin import (
DatastoreAdminClient,
)
from google.cloud.datastore_admin_v1.services.datastore_admin import pagers
from google.cloud.datastore_admin_v1.services.datastore_admin import transports
from google.cloud.datastore_admin_v1.types import datastore_admin
from google.cloud.datastore_admin_v1.types import index
from google.longrunning import operations_pb2
from google.oauth2 import service_account
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert DatastoreAdminClient._get_default_mtls_endpoint(None) is None
assert (
DatastoreAdminClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
DatastoreAdminClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
DatastoreAdminClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
DatastoreAdminClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
DatastoreAdminClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
)
@pytest.mark.parametrize(
"client_class", [DatastoreAdminClient, DatastoreAdminAsyncClient]
)
def test_datastore_admin_client_from_service_account_file(client_class):
creds = credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert client.transport._host == "datastore.googleapis.com:443"
def test_datastore_admin_client_get_transport_class():
transport = DatastoreAdminClient.get_transport_class()
assert transport == transports.DatastoreAdminGrpcTransport
transport = DatastoreAdminClient.get_transport_class("grpc")
assert transport == transports.DatastoreAdminGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(DatastoreAdminClient, transports.DatastoreAdminGrpcTransport, "grpc"),
(
DatastoreAdminAsyncClient,
transports.DatastoreAdminGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
DatastoreAdminClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DatastoreAdminClient),
)
@mock.patch.object(
DatastoreAdminAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DatastoreAdminAsyncClient),
)
def test_datastore_admin_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(DatastoreAdminClient, "get_transport_class") as gtc:
transport = transport_class(credentials=credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(DatastoreAdminClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
ssl_channel_credentials=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
ssl_channel_credentials=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
ssl_channel_credentials=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
ssl_channel_credentials=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(DatastoreAdminClient, transports.DatastoreAdminGrpcTransport, "grpc", "true"),
(
DatastoreAdminAsyncClient,
transports.DatastoreAdminGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(DatastoreAdminClient, transports.DatastoreAdminGrpcTransport, "grpc", "false"),
(
DatastoreAdminAsyncClient,
transports.DatastoreAdminGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
DatastoreAdminClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DatastoreAdminClient),
)
@mock.patch.object(
DatastoreAdminAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DatastoreAdminAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_datastore_admin_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
ssl_channel_creds = mock.Mock()
with mock.patch(
"grpc.ssl_channel_credentials", return_value=ssl_channel_creds
):
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_ssl_channel_creds = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_ssl_channel_creds = ssl_channel_creds
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
ssl_channel_credentials=expected_ssl_channel_creds,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.grpc.SslCredentials.__init__", return_value=None
):
with mock.patch(
"google.auth.transport.grpc.SslCredentials.is_mtls",
new_callable=mock.PropertyMock,
) as is_mtls_mock:
with mock.patch(
"google.auth.transport.grpc.SslCredentials.ssl_credentials",
new_callable=mock.PropertyMock,
) as ssl_credentials_mock:
if use_client_cert_env == "false":
is_mtls_mock.return_value = False
ssl_credentials_mock.return_value = None
expected_host = client.DEFAULT_ENDPOINT
expected_ssl_channel_creds = None
else:
is_mtls_mock.return_value = True
ssl_credentials_mock.return_value = mock.Mock()
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_ssl_channel_creds = (
ssl_credentials_mock.return_value
)
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
ssl_channel_credentials=expected_ssl_channel_creds,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.grpc.SslCredentials.__init__", return_value=None
):
with mock.patch(
"google.auth.transport.grpc.SslCredentials.is_mtls",
new_callable=mock.PropertyMock,
) as is_mtls_mock:
is_mtls_mock.return_value = False
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
ssl_channel_credentials=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(DatastoreAdminClient, transports.DatastoreAdminGrpcTransport, "grpc"),
(
DatastoreAdminAsyncClient,
transports.DatastoreAdminGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_datastore_admin_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
ssl_channel_credentials=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(DatastoreAdminClient, transports.DatastoreAdminGrpcTransport, "grpc"),
(
DatastoreAdminAsyncClient,
transports.DatastoreAdminGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_datastore_admin_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
ssl_channel_credentials=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_datastore_admin_client_client_options_from_dict():
with mock.patch(
"google.cloud.datastore_admin_v1.services.datastore_admin.transports.DatastoreAdminGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = DatastoreAdminClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
ssl_channel_credentials=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_export_entities(
transport: str = "grpc", request_type=datastore_admin.ExportEntitiesRequest
):
client = DatastoreAdminClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_entities), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.export_entities(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastore_admin.ExportEntitiesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_export_entities_from_dict():
test_export_entities(request_type=dict)
@pytest.mark.asyncio
async def test_export_entities_async(
transport: str = "grpc_asyncio", request_type=datastore_admin.ExportEntitiesRequest
):
client = DatastoreAdminAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_entities), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.export_entities(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastore_admin.ExportEntitiesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_export_entities_async_from_dict():
await test_export_entities_async(request_type=dict)
def test_export_entities_flattened():
client = DatastoreAdminClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_entities), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.export_entities(
project_id="project_id_value",
labels={"key_value": "value_value"},
entity_filter=datastore_admin.EntityFilter(kinds=["kinds_value"]),
output_url_prefix="output_url_prefix_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].labels == {"key_value": "value_value"}
assert args[0].entity_filter == datastore_admin.EntityFilter(
kinds=["kinds_value"]
)
assert args[0].output_url_prefix == "output_url_prefix_value"
def test_export_entities_flattened_error():
client = DatastoreAdminClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.export_entities(
datastore_admin.ExportEntitiesRequest(),
project_id="project_id_value",
labels={"key_value": "value_value"},
entity_filter=datastore_admin.EntityFilter(kinds=["kinds_value"]),
output_url_prefix="output_url_prefix_value",
)
@pytest.mark.asyncio
async def test_export_entities_flattened_async():
client = DatastoreAdminAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_entities), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.export_entities(
project_id="project_id_value",
labels={"key_value": "value_value"},
entity_filter=datastore_admin.EntityFilter(kinds=["kinds_value"]),
output_url_prefix="output_url_prefix_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].labels == {"key_value": "value_value"}
assert args[0].entity_filter == datastore_admin.EntityFilter(
kinds=["kinds_value"]
)
assert args[0].output_url_prefix == "output_url_prefix_value"
@pytest.mark.asyncio
async def test_export_entities_flattened_error_async():
client = DatastoreAdminAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.export_entities(
datastore_admin.ExportEntitiesRequest(),
project_id="project_id_value",
labels={"key_value": "value_value"},
entity_filter=datastore_admin.EntityFilter(kinds=["kinds_value"]),
output_url_prefix="output_url_prefix_value",
)
def test_import_entities(
transport: str = "grpc", request_type=datastore_admin.ImportEntitiesRequest
):
client = DatastoreAdminClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_entities), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.import_entities(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastore_admin.ImportEntitiesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_import_entities_from_dict():
test_import_entities(request_type=dict)
@pytest.mark.asyncio
async def test_import_entities_async(
transport: str = "grpc_asyncio", request_type=datastore_admin.ImportEntitiesRequest
):
client = DatastoreAdminAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_entities), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.import_entities(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastore_admin.ImportEntitiesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_import_entities_async_from_dict():
await test_import_entities_async(request_type=dict)
def test_import_entities_flattened():
client = DatastoreAdminClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_entities), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.import_entities(
project_id="project_id_value",
labels={"key_value": "value_value"},
input_url="input_url_value",
entity_filter=datastore_admin.EntityFilter(kinds=["kinds_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].labels == {"key_value": "value_value"}
assert args[0].input_url == "input_url_value"
assert args[0].entity_filter == datastore_admin.EntityFilter(
kinds=["kinds_value"]
)
def test_import_entities_flattened_error():
client = DatastoreAdminClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.import_entities(
datastore_admin.ImportEntitiesRequest(),
project_id="project_id_value",
labels={"key_value": "value_value"},
input_url="input_url_value",
entity_filter=datastore_admin.EntityFilter(kinds=["kinds_value"]),
)
@pytest.mark.asyncio
async def test_import_entities_flattened_async():
client = DatastoreAdminAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_entities), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.import_entities(
project_id="project_id_value",
labels={"key_value": "value_value"},
input_url="input_url_value",
entity_filter=datastore_admin.EntityFilter(kinds=["kinds_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == "project_id_value"
assert args[0].labels == {"key_value": "value_value"}
assert args[0].input_url == "input_url_value"
assert args[0].entity_filter == datastore_admin.EntityFilter(
kinds=["kinds_value"]
)
@pytest.mark.asyncio
async def test_import_entities_flattened_error_async():
client = DatastoreAdminAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.import_entities(
datastore_admin.ImportEntitiesRequest(),
project_id="project_id_value",
labels={"key_value": "value_value"},
input_url="input_url_value",
entity_filter=datastore_admin.EntityFilter(kinds=["kinds_value"]),
)
def test_get_index(
transport: str = "grpc", request_type=datastore_admin.GetIndexRequest
):
client = DatastoreAdminClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_index), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = index.Index(
project_id="project_id_value",
index_id="index_id_value",
kind="kind_value",
ancestor=index.Index.AncestorMode.NONE,
state=index.Index.State.CREATING,
)
response = client.get_index(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastore_admin.GetIndexRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, index.Index)
assert response.project_id == "project_id_value"
assert response.index_id == "index_id_value"
assert response.kind == "kind_value"
assert response.ancestor == index.Index.AncestorMode.NONE
assert response.state == index.Index.State.CREATING
def test_get_index_from_dict():
test_get_index(request_type=dict)
@pytest.mark.asyncio
async def test_get_index_async(
transport: str = "grpc_asyncio", request_type=datastore_admin.GetIndexRequest
):
client = DatastoreAdminAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_index), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
index.Index(
project_id="project_id_value",
index_id="index_id_value",
kind="kind_value",
ancestor=index.Index.AncestorMode.NONE,
state=index.Index.State.CREATING,
)
)
response = await client.get_index(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastore_admin.GetIndexRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, index.Index)
assert response.project_id == "project_id_value"
assert response.index_id == "index_id_value"
assert response.kind == "kind_value"
assert response.ancestor == index.Index.AncestorMode.NONE
assert response.state == index.Index.State.CREATING
@pytest.mark.asyncio
async def test_get_index_async_from_dict():
await test_get_index_async(request_type=dict)
def test_list_indexes(
transport: str = "grpc", request_type=datastore_admin.ListIndexesRequest
):
client = DatastoreAdminClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_indexes), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastore_admin.ListIndexesResponse(
next_page_token="<PASSWORD>_<PASSWORD>_<PASSWORD>_value",
)
response = client.list_indexes(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastore_admin.ListIndexesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListIndexesPager)
assert response.next_page_token == "<PASSWORD>token_<PASSWORD>"
def test_list_indexes_from_dict():
test_list_indexes(request_type=dict)
@pytest.mark.asyncio
async def test_list_indexes_async(
transport: str = "grpc_asyncio", request_type=datastore_admin.ListIndexesRequest
):
client = DatastoreAdminAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_indexes), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastore_admin.ListIndexesResponse(
next_page_token="<PASSWORD>",
)
)
response = await client.list_indexes(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastore_admin.ListIndexesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListIndexesAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_indexes_async_from_dict():
await test_list_indexes_async(request_type=dict)
def test_list_indexes_pager():
client = DatastoreAdminClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_indexes), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
datastore_admin.ListIndexesResponse(
indexes=[index.Index(), index.Index(), index.Index(),],
next_page_token="abc",
),
datastore_admin.ListIndexesResponse(indexes=[], next_page_token="def",),
datastore_admin.ListIndexesResponse(
indexes=[index.Index(),], next_page_token="ghi",
),
datastore_admin.ListIndexesResponse(
indexes=[index.Index(), index.Index(),],
),
RuntimeError,
)
metadata = ()
pager = client.list_indexes(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, index.Index) for i in results)
def test_list_indexes_pages():
client = DatastoreAdminClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_indexes), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
datastore_admin.ListIndexesResponse(
indexes=[index.Index(), index.Index(), index.Index(),],
next_page_token="abc",
),
datastore_admin.ListIndexesResponse(indexes=[], next_page_token="def",),
datastore_admin.ListIndexesResponse(
indexes=[index.Index(),], next_page_token="ghi",
),
datastore_admin.ListIndexesResponse(
indexes=[index.Index(), index.Index(),],
),
RuntimeError,
)
pages = list(client.list_indexes(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_indexes_async_pager():
client = DatastoreAdminAsyncClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_indexes), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
datastore_admin.ListIndexesResponse(
indexes=[index.Index(), index.Index(), index.Index(),],
next_page_token="abc",
),
datastore_admin.ListIndexesResponse(indexes=[], next_page_token="def",),
datastore_admin.ListIndexesResponse(
indexes=[index.Index(),], next_page_token="ghi",
),
datastore_admin.ListIndexesResponse(
indexes=[index.Index(), index.Index(),],
),
RuntimeError,
)
async_pager = await client.list_indexes(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, index.Index) for i in responses)
@pytest.mark.asyncio
async def test_list_indexes_async_pages():
client = DatastoreAdminAsyncClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_indexes), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
datastore_admin.ListIndexesResponse(
indexes=[index.Index(), index.Index(), index.Index(),],
next_page_token="abc",
),
datastore_admin.ListIndexesResponse(indexes=[], next_page_token="def",),
datastore_admin.ListIndexesResponse(
indexes=[index.Index(),], next_page_token="ghi",
),
datastore_admin.ListIndexesResponse(
indexes=[index.Index(), index.Index(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_indexes(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.DatastoreAdminGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DatastoreAdminClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.DatastoreAdminGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DatastoreAdminClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.DatastoreAdminGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DatastoreAdminClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.DatastoreAdminGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
client = DatastoreAdminClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.DatastoreAdminGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.DatastoreAdminGrpcAsyncIOTransport(
credentials=credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.DatastoreAdminGrpcTransport,
transports.DatastoreAdminGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = DatastoreAdminClient(credentials=credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.DatastoreAdminGrpcTransport,)
def test_datastore_admin_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(exceptions.DuplicateCredentialArgs):
transport = transports.DatastoreAdminTransport(
credentials=credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_datastore_admin_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.datastore_admin_v1.services.datastore_admin.transports.DatastoreAdminTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.DatastoreAdminTransport(
credentials=credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"export_entities",
"import_entities",
"get_index",
"list_indexes",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_datastore_admin_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
auth, "load_credentials_from_file"
) as load_creds, mock.patch(
"google.cloud.datastore_admin_v1.services.datastore_admin.transports.DatastoreAdminTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (credentials.AnonymousCredentials(), None)
transport = transports.DatastoreAdminTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/datastore",
),
quota_project_id="octopus",
)
def test_datastore_admin_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(auth, "default") as adc, mock.patch(
"google.cloud.datastore_admin_v1.services.datastore_admin.transports.DatastoreAdminTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (credentials.AnonymousCredentials(), None)
transport = transports.DatastoreAdminTransport()
adc.assert_called_once()
def test_datastore_admin_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
DatastoreAdminClient()
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/datastore",
),
quota_project_id=None,
)
def test_datastore_admin_transport_auth_adc():
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
transports.DatastoreAdminGrpcTransport(
host="squid.clam.whelk", quota_project_id="octopus"
)
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/datastore",
),
quota_project_id="octopus",
)
def test_datastore_admin_host_no_port():
client = DatastoreAdminClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="datastore.googleapis.com"
),
)
assert client.transport._host == "datastore.googleapis.com:443"
def test_datastore_admin_host_with_port():
client = DatastoreAdminClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="datastore.googleapis.com:8000"
),
)
assert client.transport._host == "datastore.googleapis.com:8000"
def test_datastore_admin_grpc_transport_channel():
channel = grpc.insecure_channel("http://localhost/")
# Check that channel is used if provided.
transport = transports.DatastoreAdminGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_datastore_admin_grpc_asyncio_transport_channel():
channel = aio.insecure_channel("http://localhost/")
# Check that channel is used if provided.
transport = transports.DatastoreAdminGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
@pytest.mark.parametrize(
"transport_class",
[
transports.DatastoreAdminGrpcTransport,
transports.DatastoreAdminGrpcAsyncIOTransport,
],
)
def test_datastore_admin_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel", autospec=True
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/datastore",
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize(
"transport_class",
[
transports.DatastoreAdminGrpcTransport,
transports.DatastoreAdminGrpcAsyncIOTransport,
],
)
def test_datastore_admin_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel", autospec=True
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/datastore",
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
)
assert transport.grpc_channel == mock_grpc_channel
def test_datastore_admin_grpc_lro_client():
client = DatastoreAdminClient(
credentials=credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_datastore_admin_grpc_lro_async_client():
client = DatastoreAdminAsyncClient(
credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = DatastoreAdminClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = DatastoreAdminClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = DatastoreAdminClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = DatastoreAdminClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = DatastoreAdminClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = DatastoreAdminClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = DatastoreAdminClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = DatastoreAdminClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = DatastoreAdminClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = DatastoreAdminClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = DatastoreAdminClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = DatastoreAdminClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = DatastoreAdminClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = DatastoreAdminClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = DatastoreAdminClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.DatastoreAdminTransport, "_prep_wrapped_messages"
) as prep:
client = DatastoreAdminClient(
credentials=credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.DatastoreAdminTransport, "_prep_wrapped_messages"
) as prep:
transport_class = DatastoreAdminClient.get_transport_class()
transport = transport_class(
credentials=credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
|
<reponame>remotesensinginfo/eodatadown
#!/usr/bin/env python
"""
EODataDown - a set of functions to provide a simplified python interface to ARCSI.
"""
# This file is part of 'EODataDown'
# A tool for automating Earth Observation Data Downloading.
#
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Purpose: Provides a set of functions to provide a simplified python interface to ARCSI.
#
# Author: <NAME>
# Email: <EMAIL>
# Date: 09/08/2018
# Version: 1.0
#
# History:
# Version 1.0 - Created.
import logging
import json
import os.path
import rsgislib.vectorutils
import rsgislib.imageutils
from eodatadown.eodatadownutils import EODataDownException
import eodatadown.eodatadownutils
logger = logging.getLogger(__name__)
def run_arcsi_landsat(input_mtl, dem_file, output_dir, tmp_dir, spacecraft_str, sensor_str, reproj_outputs, proj_wkt_file, projabbv):
"""
A function to run ARCSI for a landsat scene using python rather than
the command line interface.
:param spacecraft_str:
:param sensor_str:
:param reproj_outputs:
:param proj_wkt_file:
:param projabbv:
:param input_mtl:
:param dem_file:
:param output_dir:
:param tmp_dir:
:return:
"""
import arcsilib.arcsirun
if (spacecraft_str == "LANDSAT_8") and (sensor_str == "OLI_TIRS"):
arcsi_sensor_str = "ls8"
elif (spacecraft_str == "LANDSAT_7") and (sensor_str == "ETM"):
arcsi_sensor_str = "ls7"
elif (spacecraft_str == "LANDSAT_5") and (sensor_str == "TM"):
arcsi_sensor_str = "ls5tm"
elif (spacecraft_str == "LANDSAT_4") and (sensor_str == "TM"):
arcsi_sensor_str = "ls4tm"
else:
logger.error("Did not recognise the spacecraft and sensor combination. (" + spacecraft_str + ", " + sensor_str + ")")
raise EODataDownException("Did not recognise the spacecraft and sensor combination.")
if not reproj_outputs:
proj_wkt_file = None
projabbv = None
logger.info("Starting to run ARCSI for: "+input_mtl)
arcsilib.arcsirun.runARCSI(input_mtl, None, None, arcsi_sensor_str, None, "KEA",
output_dir, None, proj_wkt_file, None, projabbv, None, None,
["CLOUDS", "DOSAOTSGL", "STDSREF", "SATURATE", "TOPOSHADOW", "FOOTPRINT", "METADATA"],
True, None, None, arcsilib.DEFAULT_ARCSI_AEROIMG_PATH, arcsilib.DEFAULT_ARCSI_ATMOSIMG_PATH,
"GreenVegetation", 0, None, None, False, None, None, None, None, False,
None, None, tmp_dir, 0.05, 0.5, 0.1, 0.4, dem_file, None, None, True,
20, False, False, 1000, "cubic", "near", 3000, 3000, 1000, 21,
True, False, False, None, None, False, None, 'LSMSK')
logger.info("Finished running ARCSI for: " + input_mtl)
def run_arcsi_sentinel2(input_hdr, dem_file, output_dir, tmp_dir, reproj_outputs, proj_wkt_file, projabbv, low_res=False):
"""
A function to run ARCSI for a landsat scene using python rather than
the command line interface.
:param input_hdr:
:param reproj_outputs:
:param proj_wkt_file:
:param projabbv:
:param dem_file:
:param output_dir:
:param tmp_dir:
:return:
"""
import arcsilib.arcsirun
if not reproj_outputs:
proj_wkt_file = None
projabbv = None
logger.info("Starting to run ARCSI for: "+input_hdr)
if low_res:
arcsilib.arcsirun.runARCSI(input_hdr, None, None, "sen2", None, "KEA",
output_dir, None, proj_wkt_file, None, projabbv, None, None,
["CLOUDS", "DOSAOTSGL", "STDSREF", "SATURATE", "TOPOSHADOW", "FOOTPRINT", "METADATA"],
True, None, None, arcsilib.DEFAULT_ARCSI_AEROIMG_PATH, arcsilib.DEFAULT_ARCSI_ATMOSIMG_PATH,
"GreenVegetation", 0, None, None, False, None, None, None, None, False,
None, None, tmp_dir, 0.05, 0.5, 0.1, 0.4, dem_file, None, None, True,
20, False, False, 1000, "cubic", "near", 3000, 3000, 1000, 21,
True, False, False, None, None, True, None, 'S2LESSFMSK')
else:
arcsilib.arcsirun.runARCSI(input_hdr, None, None, "sen2", None, "KEA",
output_dir, None, proj_wkt_file, None, projabbv, None, None,
["CLOUDS", "DOSAOTSGL", "STDSREF", "SATURATE", "TOPOSHADOW", "FOOTPRINT", "METADATA", "SHARP"],
True, None, None, arcsilib.DEFAULT_ARCSI_AEROIMG_PATH, arcsilib.DEFAULT_ARCSI_ATMOSIMG_PATH,
"GreenVegetation", 0, None, None, False, None, None, None, None, False,
None, None, tmp_dir, 0.05, 0.5, 0.1, 0.4, dem_file, None, None, True,
20, False, False, 1000, "cubic", "near", 3000, 3000, 1000, 21,
True, False, False, None, None, False, None, 'S2LESSFMSK')
logger.info("Finished running ARCSI for: " + input_hdr)
def run_arcsi_rapideye(input_xml, dem_file, output_dir, tmp_dir, reproj_outputs, proj_wkt_file, projabbv):
"""
A function to run ARCSI for a landsat scene using python rather than
the command line interface.
:param input_xml:
:param reproj_outputs:
:param proj_wkt_file:
:param projabbv:
:param dem_file:
:param output_dir:
:param tmp_dir:
:return:
"""
import arcsilib.arcsirun
if not reproj_outputs:
proj_wkt_file = None
projabbv = None
debug_mode = False
logger.info("Starting to run ARCSI for: "+input_xml)
arcsilib.arcsirun.runARCSI(input_xml, None, None, "rapideye", None, "KEA",
output_dir, None, proj_wkt_file, None, projabbv, None, None,
["DOSAOTSGL", "STDSREF", "SATURATE", "TOPOSHADOW", "FOOTPRINT", "METADATA"],
True, None, None, arcsilib.DEFAULT_ARCSI_AEROIMG_PATH, arcsilib.DEFAULT_ARCSI_ATMOSIMG_PATH,
"GreenVegetation", 0, None, None, False, None, None, None, None, False,
None, None, tmp_dir, 0.05, 0.5, 0.1, 0.4, dem_file, None, None, True,
20, False, debug_mode, 1000, "cubic", "near", 3000, 3000, 1000, 21,
True, False, False, None, None, False, None, 'FMASK')
logger.info("Finished running ARCSI for: " + input_xml)
def run_arcsi_planetscope(input_xml, output_dir, tmp_dir, reproj_outputs, proj_wkt_file, projabbv):
"""
A function to run ARCSI for a planetscope scene using python rather than
the command line interface.
:param reproj_outputs:
:param proj_wkt_file:
:param projabbv:
:param input_xml:
:param output_dir:
:param tmp_dir:
:return:
"""
import arcsilib.arcsirun
if not reproj_outputs:
proj_wkt_file = None
projabbv = None
dem_file = None
debug_mode = True
logger.info("Starting to run ARCSI for: " + input_xml)
arcsilib.arcsirun.runARCSI(input_xml, None, None, "planetscope", None, "KEA",
output_dir, None, proj_wkt_file, None, projabbv, None, None,
["TOA", "DOS", "SATURATE", "FOOTPRINT", "METADATA"],
True, None, None, arcsilib.DEFAULT_ARCSI_AEROIMG_PATH, arcsilib.DEFAULT_ARCSI_ATMOSIMG_PATH,
"GreenVegetation", 0, None, None, False, None, None, None, None, False,
None, None, tmp_dir, 0.05, 0.5, 0.1, 0.4, dem_file, None, None, True,
20, False, debug_mode, 1000, "cubic", "near", 3000, 3000, 1000, 21,
True, False, False, None, None, False, None, 'FMASK')
logger.info("Finished running ARCSI for: " + input_xml)
def move_arcsi_stdsref_products(arcsi_out_dir, ard_products_dir, use_roi, intersect_vec_file, intersect_vec_lyr,
subset_vec_file, subset_vec_lyr, mask_outputs, mask_vec_file, mask_vec_lyr, tmp_dir):
"""
A function to copy the outputs from ARCSI to the appropriate directory for EODataDown.
:param arcsi_out_dir: the output directory for arcsi where files should be copied from
:param ard_products_dir: the directory where the appropriate files should be copied too.
:param use_roi:
:param intersect_vec_file:
:param intersect_vec_lyr:
:param subset_vec_file:
:param subset_vec_lyr:
:param mask_outputs:
:param mask_vec_file:
:param mask_vec_lyr:
:param tmp_dir:
:return: bool True - valid result and task completed.
False - invalid result ARD not produced (e.g., 100% cloud cover)
"""
eoddutils = eodatadown.eodatadownutils.EODataDownUtils()
metadata_file = eoddutils.findFile(arcsi_out_dir, "*meta.json")
with open(metadata_file) as f:
meta_data_json = json.load(f)
json_parse_helper = eodatadown.eodatadownutils.EDDJSONParseHelper()
if json_parse_helper.doesPathExist(meta_data_json, ["ProductsInfo","ARCSI_CLOUD_COVER"]):
cloud_cover = json_parse_helper.getNumericValue(meta_data_json, ["ProductsInfo","ARCSI_CLOUD_COVER"], valid_lower=0.0, valid_upper=1.0)
out_file_info_dict = dict()
if cloud_cover < 0.95:
if use_roi:
valid_msk_image = json_parse_helper.getStrValue(meta_data_json, ["FileInfo", "VALID_MASK"])
if rsgislib.vectorutils.does_vmsk_img_intersect(os.path.join(arcsi_out_dir, valid_msk_image), intersect_vec_file, intersect_vec_lyr, tmp_dir, vec_epsg=None):
sref_mskd_image = json_parse_helper.getStrValue(meta_data_json, ["FileInfo", "STD_SREF_IMG"])
sref_mskd_image_path = os.path.join(arcsi_out_dir, sref_mskd_image)
sref_mskd_image_sub_path = os.path.join(tmp_dir, sref_mskd_image)
eoddutils.subsetMaskImg(sref_mskd_image_path, sref_mskd_image_sub_path, "KEA", subset_vec_file, subset_vec_lyr, mask_outputs, mask_vec_file, mask_vec_lyr, tmp_dir)
sref_mskd_image_tif = eoddutils.translateCloudOpGTIFF(sref_mskd_image_sub_path, ard_products_dir)
out_file_info_dict["STD_SREF_IMG"] = sref_mskd_image_tif
sref_full_image = json_parse_helper.getStrValue(meta_data_json, ["FileInfo", "STD_SREF_WHOLE_IMG"])
sref_full_image_path = os.path.join(arcsi_out_dir, sref_full_image)
sref_full_image_sub_path = os.path.join(tmp_dir, sref_full_image)
eoddutils.subsetMaskImg(sref_full_image_path, sref_full_image_sub_path, "KEA", subset_vec_file, subset_vec_lyr, mask_outputs, mask_vec_file, mask_vec_lyr, tmp_dir)
sref_full_image_tif = eoddutils.translateCloudOpGTIFF(sref_full_image_sub_path, ard_products_dir)
out_file_info_dict["STD_SREF_WHOLE_IMG"] = sref_full_image_tif
try:
cloud_msk_image = json_parse_helper.getStrValue(meta_data_json, ["FileInfo", "CLOUD_MASK"])
cloud_msk_image_path = os.path.join(arcsi_out_dir, cloud_msk_image)
cloud_msk_image_sub_path = os.path.join(tmp_dir, cloud_msk_image)
eoddutils.subsetMaskImg(cloud_msk_image_path, cloud_msk_image_sub_path, "KEA", subset_vec_file, subset_vec_lyr, mask_outputs, mask_vec_file, mask_vec_lyr, tmp_dir)
cloud_msk_image_tif = eoddutils.translateCloudOpGTIFF(cloud_msk_image_sub_path, ard_products_dir)
out_file_info_dict["CLOUD_MASK"] = cloud_msk_image_tif
except Exception as e:
logger.info("Cloud mask was not available - assume it wasn't calculated")
valid_msk_image = json_parse_helper.getStrValue(meta_data_json, ["FileInfo", "VALID_MASK"])
valid_msk_image_path = os.path.join(arcsi_out_dir, valid_msk_image)
valid_msk_image_sub_path = os.path.join(tmp_dir, valid_msk_image)
eoddutils.subsetMaskImg(valid_msk_image_path, valid_msk_image_sub_path, "KEA", subset_vec_file, subset_vec_lyr, mask_outputs, mask_vec_file, mask_vec_lyr, tmp_dir)
valid_msk_image_tif = eoddutils.translateCloudOpGTIFF(valid_msk_image_sub_path, ard_products_dir)
out_file_info_dict["VALID_MASK"] = valid_msk_image_tif
topo_msk_image = json_parse_helper.getStrValue(meta_data_json, ["FileInfo", "TOPO_SHADOW_MASK"])
topo_msk_image_path = os.path.join(arcsi_out_dir, topo_msk_image)
topo_msk_image_sub_path = os.path.join(tmp_dir, topo_msk_image)
eoddutils.subsetMaskImg(topo_msk_image_path, topo_msk_image_sub_path, "KEA", subset_vec_file, subset_vec_lyr, mask_outputs, mask_vec_file, mask_vec_lyr, tmp_dir)
topo_msk_image_tif = eoddutils.translateCloudOpGTIFF(topo_msk_image_sub_path, ard_products_dir)
out_file_info_dict["TOPO_SHADOW_MASK"] = topo_msk_image_tif
view_angle_image = json_parse_helper.getStrValue(meta_data_json, ["FileInfo", "VIEW_ANGLE"])
view_angle_image_path = os.path.join(arcsi_out_dir, view_angle_image)
view_angle_image_sub_path = os.path.join(tmp_dir, view_angle_image)
eoddutils.subsetMaskImg(view_angle_image_path, view_angle_image_sub_path, "KEA", subset_vec_file, subset_vec_lyr, mask_outputs, mask_vec_file, mask_vec_lyr, tmp_dir)
view_angle_image_tif = eoddutils.translateCloudOpGTIFF(view_angle_image_sub_path, ard_products_dir)
out_file_info_dict["VIEW_ANGLE"] = view_angle_image_tif
footprint_vec = json_parse_helper.getStrValue(meta_data_json, ["FileInfo", "FOOTPRINT"])
eoddutils.moveFilesWithBase2DIR(os.path.join(arcsi_out_dir, footprint_vec), ard_products_dir)
out_file_info_dict["FOOTPRINT"] = footprint_vec
out_file_info_dict["METADATA"] = json_parse_helper.getStrValue(meta_data_json, ["FileInfo", "METADATA"])
out_file_info_dict["ProviderMetadata"] = json_parse_helper.getStrValue(meta_data_json, ["FileInfo", "ProviderMetadata"])
out_file_info_dict["FileBaseName"] = json_parse_helper.getStrValue(meta_data_json, ["FileInfo", "FileBaseName"])
meta_data_json["FileInfo"] = out_file_info_dict
metadata_json_file = json_parse_helper.getStrValue(meta_data_json, ["FileInfo", "METADATA"])
output_meta_data_file = os.path.join(arcsi_out_dir, metadata_json_file)
with open(output_meta_data_file, 'w') as outfile:
json.dump(meta_data_json, outfile, sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False)
else:
return False
else:
sref_mskd_image = json_parse_helper.getStrValue(meta_data_json, ["FileInfo", "STD_SREF_IMG"])
sref_mskd_image_tif = eoddutils.translateCloudOpGTIFF(os.path.join(arcsi_out_dir, sref_mskd_image), ard_products_dir)
out_file_info_dict["STD_SREF_IMG"] = sref_mskd_image_tif
sref_full_image = json_parse_helper.getStrValue(meta_data_json, ["FileInfo", "STD_SREF_WHOLE_IMG"])
sref_full_image_tif = eoddutils.translateCloudOpGTIFF(os.path.join(arcsi_out_dir, sref_full_image), ard_products_dir)
out_file_info_dict["STD_SREF_WHOLE_IMG"] = sref_full_image_tif
try:
cloud_msk_image = json_parse_helper.getStrValue(meta_data_json, ["FileInfo", "CLOUD_MASK"])
cloud_msk_image_tif = eoddutils.translateCloudOpGTIFF(os.path.join(arcsi_out_dir, cloud_msk_image), ard_products_dir)
out_file_info_dict["CLOUD_MASK"] = cloud_msk_image_tif
except Exception as e:
logger.info("Cloud mask was not available - assume it wasn't calculated")
valid_msk_image = json_parse_helper.getStrValue(meta_data_json, ["FileInfo", "VALID_MASK"])
valid_msk_image_tif = eoddutils.translateCloudOpGTIFF(os.path.join(arcsi_out_dir, valid_msk_image), ard_products_dir)
out_file_info_dict["VALID_MASK"] = valid_msk_image_tif
topo_msk_image = json_parse_helper.getStrValue(meta_data_json, ["FileInfo", "TOPO_SHADOW_MASK"])
topo_msk_image_tif = eoddutils.translateCloudOpGTIFF(os.path.join(arcsi_out_dir, topo_msk_image), ard_products_dir)
out_file_info_dict["TOPO_SHADOW_MASK"] = topo_msk_image_tif
view_angle_image = json_parse_helper.getStrValue(meta_data_json, ["FileInfo", "VIEW_ANGLE"])
view_angle_image_tif = eoddutils.translateCloudOpGTIFF(os.path.join(arcsi_out_dir, view_angle_image), ard_products_dir)
out_file_info_dict["VIEW_ANGLE"] = view_angle_image_tif
footprint_vec = json_parse_helper.getStrValue(meta_data_json, ["FileInfo", "FOOTPRINT"])
eoddutils.moveFilesWithBase2DIR(os.path.join(arcsi_out_dir, footprint_vec), ard_products_dir)
out_file_info_dict["FOOTPRINT"] = footprint_vec
out_file_info_dict["METADATA"] = json_parse_helper.getStrValue(meta_data_json, ["FileInfo", "METADATA"])
out_file_info_dict["ProviderMetadata"] = json_parse_helper.getStrValue(meta_data_json, ["FileInfo", "ProviderMetadata"])
out_file_info_dict["FileBaseName"] = json_parse_helper.getStrValue(meta_data_json, ["FileInfo", "FileBaseName"])
meta_data_json["FileInfo"] = out_file_info_dict
metadata_json_file = json_parse_helper.getStrValue(meta_data_json, ["FileInfo", "METADATA"])
output_meta_data_file = os.path.join(arcsi_out_dir, metadata_json_file)
with open(output_meta_data_file, 'w') as outfile:
json.dump(meta_data_json, outfile, sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False)
else:
return False
else:
return False
return True
def move_arcsi_dos_products(arcsi_out_dir, ard_products_dir):
"""
A function to copy the outputs from ARCSI to the appropriate directory for EODataDown.
:param arcsi_out_dir: the output directory for arcsi where files should be copied from
:param ard_products_dir: the directory where the appropriate files should be copied too.
:return: bool True - valid result and task completed.
False - invalid result ARD not produced
"""
eoddutils = eodatadown.eodatadownutils.EODataDownUtils()
metadata_file = eoddutils.findFile(arcsi_out_dir, "*meta.json")
with open(metadata_file) as f:
meta_data_json = json.load(f)
json_parse_helper = eodatadown.eodatadownutils.EDDJSONParseHelper()
dos_image = json_parse_helper.getStrValue(meta_data_json, ["FileInfo", "SREF_DOS_IMG"])
eoddutils.moveFile2DIR(os.path.join(arcsi_out_dir, dos_image), ard_products_dir)
valid_msk_image = json_parse_helper.getStrValue(meta_data_json, ["FileInfo", "VALID_MASK"])
eoddutils.moveFile2DIR(os.path.join(arcsi_out_dir, valid_msk_image), ard_products_dir)
footprint_shp = json_parse_helper.getStrValue(meta_data_json, ["FileInfo", "FOOTPRINT"])
eoddutils.moveFilesWithBase2DIR(os.path.join(arcsi_out_dir, footprint_shp), ard_products_dir)
metadata_json_file = json_parse_helper.getStrValue(meta_data_json, ["FileInfo", "METADATA"])
eoddutils.copyFile2DIR(os.path.join(arcsi_out_dir, metadata_json_file), ard_products_dir)
return True
|
#https://raw.githubusercontent.com/PIA-Group/BioSPPy/master/biosppy/signals/tools.py
from __future__ import absolute_import, division, print_function
from six.moves import range
import six
import utils
# 3rd party
import numpy as np
import scipy.signal as ss
from scipy import interpolate, optimize
from scipy.stats import stats
def _get_window(kernel, size, **kwargs):
"""Return a window with the specified parameters.
Parameters
----------
kernel : str
Type of window to create.
size : int
Size of the window.
``**kwargs`` : dict, optional
Additional keyword arguments are passed to the underlying
scipy.signal.windows function.
Returns
-------
window : array
Created window.
"""
# mimics scipy.signal.get_window
if kernel in ['blackman', 'black', 'blk']:
winfunc = ss.blackman
elif kernel in ['triangle', 'triang', 'tri']:
winfunc = ss.triang
elif kernel in ['hamming', 'hamm', 'ham']:
winfunc = ss.hamming
elif kernel in ['bartlett', 'bart', 'brt']:
winfunc = ss.bartlett
elif kernel in ['hanning', 'hann', 'han']:
winfunc = ss.hann
elif kernel in ['blackmanharris', 'blackharr', 'bkh']:
winfunc = ss.blackmanharris
elif kernel in ['parzen', 'parz', 'par']:
winfunc = ss.parzen
elif kernel in ['bohman', 'bman', 'bmn']:
winfunc = ss.bohman
elif kernel in ['nuttall', 'nutl', 'nut']:
winfunc = ss.nuttall
elif kernel in ['barthann', 'brthan', 'bth']:
winfunc = ss.barthann
elif kernel in ['flattop', 'flat', 'flt']:
winfunc = ss.flattop
elif kernel in ['kaiser', 'ksr']:
winfunc = ss.kaiser
elif kernel in ['gaussian', 'gauss', 'gss']:
winfunc = ss.gaussian
elif kernel in ['general gaussian', 'general_gaussian', 'general gauss',
'general_gauss', 'ggs']:
winfunc = ss.general_gaussian
elif kernel in ['boxcar', 'box', 'ones', 'rect', 'rectangular']:
winfunc = ss.boxcar
elif kernel in ['slepian', 'slep', 'optimal', 'dpss', 'dss']:
winfunc = ss.slepian
elif kernel in ['cosine', 'halfcosine']:
winfunc = ss.cosine
elif kernel in ['chebwin', 'cheb']:
winfunc = ss.chebwin
else:
raise ValueError("Unknown window type.")
try:
window = winfunc(size, **kwargs)
except TypeError as e:
raise TypeError("Invalid window arguments: %s." % e)
return window
def smoother(signal=None, kernel='boxzen', size=10, mirror=True, **kwargs):
"""Smooth a signal using an N-point moving average [MAvg]_ filter.
This implementation uses the convolution of a filter kernel with the input
signal to compute the smoothed signal [Smit97]_.
Availabel kernels: median, boxzen, boxcar, triang, blackman, hamming, hann,
bartlett, flattop, parzen, bohman, blackmanharris, nuttall, barthann,
kaiser (needs beta), gaussian (needs std), general_gaussian (needs power,
width), slepian (needs width), chebwin (needs attenuation).
Parameters
----------
signal : array
Signal to smooth.
kernel : str, array, optional
Type of kernel to use; if array, use directly as the kernel.
size : int, optional
Size of the kernel; ignored if kernel is an array.
mirror : bool, optional
If True, signal edges are extended to avoid boundary effects.
``**kwargs`` : dict, optional
Additional keyword arguments are passed to the underlying
scipy.signal.windows function.
Returns
-------
signal : array
Smoothed signal.
params : dict
Smoother parameters.
Notes
-----
* When the kernel is 'median', mirror is ignored.
References
----------
.. [MAvg] Wikipedia, "Moving Average",
http://en.wikipedia.org/wiki/Moving_average
.. [Smit97] <NAME>, "Moving Average Filters - Implementation by
Convolution", http://www.dspguide.com/ch15/1.htm, 1997
"""
# check inputs
if signal is None:
raise TypeError("Please specify a signal to smooth.")
length = len(signal)
if isinstance(kernel, six.string_types):
# check length
if size > length:
size = length - 1
if size < 1:
size = 1
if kernel == 'boxzen':
# hybrid method
# 1st pass - boxcar kernel
aux, _ = smoother(signal,
kernel='boxcar',
size=size,
mirror=mirror)
# 2nd pass - parzen kernel
smoothed, _ = smoother(aux,
kernel='parzen',
size=size,
mirror=mirror)
params = {'kernel': kernel, 'size': size, 'mirror': mirror}
args = (smoothed, params)
names = ('signal', 'params')
return utils.ReturnTuple(args, names)
elif kernel == 'median':
# median filter
if size % 2 == 0:
raise ValueError(
"When the kernel is 'median', size must be odd.")
smoothed = ss.medfilt(signal, kernel_size=size)
params = {'kernel': kernel, 'size': size, 'mirror': mirror}
args = (smoothed, params)
names = ('signal', 'params')
return utils.ReturnTuple(args, names)
else:
win = _get_window(kernel, size, **kwargs)
elif isinstance(kernel, np.ndarray):
win = kernel
size = len(win)
# check length
if size > length:
raise ValueError("Kernel size is bigger than signal length.")
if size < 1:
raise ValueError("Kernel size is smaller than 1.")
else:
raise TypeError("Unknown kernel type.")
# convolve
w = win / win.sum()
if mirror:
aux = np.concatenate(
(signal[0] * np.ones(size), signal, signal[-1] * np.ones(size)))
smoothed = np.convolve(w, aux, mode='same')
smoothed = smoothed[size:-size]
else:
smoothed = np.convolve(w, signal, mode='same')
# output
params = {'kernel': kernel, 'size': size, 'mirror': mirror}
params.update(kwargs)
args = (smoothed, params)
names = ('signal', 'params')
return utils.ReturnTuple(args, names)
def zero_cross(signal=None, detrend=False):
"""Locate the indices where the signal crosses zero.
Parameters
----------
signal : array
Input signal.
detrend : bool, optional
If True, remove signal mean before computation.
Returns
-------
zeros : array
Indices of zero crossings.
Notes
-----
* When the signal crosses zero between samples, the first index
is returned.
"""
# check inputs
if signal is None:
raise TypeError("Please specify an input signal.")
if detrend:
signal = signal - np.mean(signal)
# zeros
df = np.diff(np.sign(signal))
zeros = np.nonzero(np.abs(df) > 0)[0]
return utils.ReturnTuple((zeros,), ('zeros',))
|
<filename>PythonScripts/get_Rho.py
from netCDF4 import Dataset
import numpy as np
import pandas as pd
import canyon_tools.readout_tools as rout
#from MITgcmutils import rdmds # cant make it work
CGrid = '/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run38/gridGlob.nc' # Smallest volume grid, closed bdy, no canyon.
phiHyd = '/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run38/phiHydGlob.nc'
pout = Dataset(phiHyd)
CGridOut = Dataset(CGrid)
# General input
nx = 360
ny = 360
nz = 90
nt = 19 # t dimension size
rc = CGridOut.variables['RC']
xc = rout.getField(CGrid, 'XC') # x coords tracer cells
yc = rout.getField(CGrid, 'YC') # y coords tracer cells
drF = CGridOut.variables['drF'] # vertical distance between faces
drC = CGridOut.variables['drC'] # vertical distance between centers
hFacC = rout.getField(CGrid, 'HFacC')
MaskC = rout.getMask(CGrid, 'HFacC')
rA = rout.getField(CGrid, 'rA')
Tp = pout.variables['T']
bathy = rout.getField(CGrid, 'Depth')
# STATIONS
ys = [262,220,262,227,100,245,245,262,220]
xs = [60,60,180,180,180,160,200,300,300]
stations = ['UpSh','UpSl','CH','CM','CO','UpC','DnC','DnSh','DnSl']
#All experiments in CNT and 3D including no canyon one (run07)
expList = ['/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run36',
'/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run37',
'/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run38',
'/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run43',
'/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run44',
'/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run45',
'/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run46',
'/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run51',
'/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run52',
'/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run55',
'/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run56',
'/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run57',
'/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run61',
'/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run62',
'/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run63',
'/ocean/kramosmu/MITgcm/TracerExperiments/3DVISC/run01',
'/ocean/kramosmu/MITgcm/TracerExperiments/3DVISC/run02',
'/ocean/kramosmu/MITgcm/TracerExperiments/3DVISC/run03',
'/ocean/kramosmu/MITgcm/TracerExperiments/3DVISC/run04',
'/ocean/kramosmu/MITgcm/TracerExperiments/3DDIFF/run04',
'/ocean/kramosmu/MITgcm/TracerExperiments/3DDIFF/run05',
'/ocean/kramosmu/MITgcm/TracerExperiments/3DDIFF/run06',
'/ocean/kramosmu/MITgcm/TracerExperiments/3DDIFF/run07']
expNames = ['CNTDIFF_run36',
'CNTDIFF_run37',
'CNTDIFF_run38',
'CNTDIFF_run43',
'CNTDIFF_run44',
'CNTDIFF_run45',
'CNTDIFF_run46',
'CNTDIFF_run51',
'CNTDIFF_run52',
'CNTDIFF_run55',
'CNTDIFF_run56',
'CNTDIFF_run57',
'CNTDIFF_run61',
'CNTDIFF_run62',
'CNTDIFF_run63',
'3DVISC_run01',
'3DVISC_run02',
'3DVISC_run03',
'3DVISC_run04',
'3DDIFF_run04',
'3DDIFF_run05',
'3DDIFF_run06',
'3DDIFF_run07']
#RhoRef = np.squeeze(rdmds('/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run38/RhoRef')) # I cannot make this function work
RhoRef = 999.79998779 # It is constant throughout my runs
nzlim = 30
zfin = 30
xi = 180
yi = 50
xh1=120
xh2=240
yh1=227
yh2=267
g = 9.81 # ms^-2
alpha = 2.0E-4 # 1/degC
beta = 7.4E-4
times = [0,2,4,6,8,10,12,14,16,18]
for exp,runs in zip(expList,expNames):
print(runs)
CState = ('%s/stateGlob.nc' %exp)
Temp = rout.getField(CState,'Temp')
S = rout.getField(CState,'S')
P = rout.getField(phiHyd,'phiHyd')
MaskExpand = np.expand_dims(MaskC,0)
maskExp = MaskExpand + np.zeros((Temp).shape)
TempMask=np.ma.array(Temp,mask=maskExp)
SMask=np.ma.array(S,mask=maskExp)
print(runs,'done reading')
for yi,xi,sname in zip(ys,xs,stations): # station indices
Rho = np.ma.empty((len(times),nz))
ii = 0
for tt in times:
#Linear eq. of state
rho = RhoRef*(np.ones(np.shape(TempMask[tt,:,yi,xi])) - alpha*(TempMask[tt,:,yi,xi]) + beta*(SMask[tt,:,yi,xi]))
Rho[ii,:]= rho
ii = ii+1
raw_data = {'drC' : drC[:-1],'rho_tt00': Rho[0,:],'rho_tt02': Rho[1,:],'rho_tt04': Rho[2,:],'rho_tt06': Rho[3,:],
'rho_tt08': Rho[4,:],'rho_tt10': Rho[5,:],'rho_tt12': Rho[6,:],'rho_tt14': Rho[7,:],'rho_tt16': Rho[8,:],
'rho_tt18': Rho[9,:]}
df = pd.DataFrame(raw_data, columns = ['drC', 'rho_tt00', 'rho_tt02', 'rho_tt04', 'rho_tt06', 'rho_tt08','rho_tt10',
'rho_tt12','rho_tt14', 'rho_tt16','rho_tt18' ])
filename1 = ('../results/metricsDataFrames/rho_%s_%s.csv' % (runs,sname))
df.to_csv(filename1)
|
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This code is based on: https://github.com/nutonomy/second.pytorch.git
#
# MIT License
# Copyright (c) 2018
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numba
import pathlib
import numpy as np
import pickle
from functools import partial
from collections import defaultdict
from utils import box_np_ops
from datasets import kitti_common as kitti
def merge_second_batch(batch_list, _unused=False):
example_merged = defaultdict(list)
for example in batch_list:
for k, v in example.items():
example_merged[k].append(v)
ret = {}
example_merged.pop("num_voxels")
for key, elems in example_merged.items():
if key in [
'voxels', 'num_points', 'num_gt', 'gt_boxes', 'voxel_labels',
'match_indices'
]:
ret[key] = np.concatenate(elems, axis=0)
elif key == 'match_indices_num':
ret[key] = np.concatenate(elems, axis=0)
elif key == 'coordinates':
coors = []
for i, coor in enumerate(elems):
coor_pad = np.pad(
coor, ((0, 0), (1, 0)),
mode='constant',
constant_values=i)
coors.append(coor_pad)
ret[key] = np.concatenate(coors, axis=0)
else:
ret[key] = np.stack(elems, axis=0)
return ret
def _read_and_prep_v9(info, root_path, num_point_features, prep_func):
"""read data from KITTI-format infos, then call prep function.
"""
# velodyne_path = str(pathlib.Path(root_path) / info['velodyne_path'])
# velodyne_path += '_reduced'
v_path = pathlib.Path(root_path) / info['velodyne_path']
v_path = v_path.parent.parent / (
v_path.parent.stem + "_reduced") / v_path.name
points = np.fromfile(
str(v_path), dtype=np.float32,
count=-1).reshape([-1, num_point_features])
image_idx = info['image_idx']
rect = info['calib/R0_rect'].astype(np.float32)
Trv2c = info['calib/Tr_velo_to_cam'].astype(np.float32)
P2 = info['calib/P2'].astype(np.float32)
input_dict = {
'points': points,
'rect': rect,
'Trv2c': Trv2c,
'P2': P2,
'image_shape': np.array(info["img_shape"], dtype=np.int32),
'image_idx': image_idx,
'image_path': info['img_path'],
# 'pointcloud_num_features': num_point_features,
}
if 'annos' in info:
annos = info['annos']
# we need other objects to avoid collision when sample
annos = kitti.remove_dontcare(annos)
loc = annos["location"]
dims = annos["dimensions"]
rots = annos["rotation_y"]
gt_names = annos["name"]
# print(gt_names, len(loc))
gt_boxes = np.concatenate(
[loc, dims, rots[..., np.newaxis]], axis=1).astype(np.float32)
# gt_boxes = box_np_ops.box_camera_to_lidar(gt_boxes, rect, Trv2c)
difficulty = annos["difficulty"]
input_dict.update({
'gt_boxes': gt_boxes,
'gt_names': gt_names,
'difficulty': difficulty,
})
if 'group_ids' in annos:
input_dict['group_ids'] = annos["group_ids"]
example = prep_func(input_dict=input_dict)
example["image_idx"] = image_idx
example["image_shape"] = input_dict["image_shape"]
if "anchors_mask" in example:
example["anchors_mask"] = example["anchors_mask"].astype(np.uint8)
return example
class Dataset(object):
"""An abstract class representing a pytorch-like Dataset.
All other datasets should subclass it. All subclasses should override
``__len__``, that provides the size of the dataset, and ``__getitem__``,
supporting integer indexing in range from 0 to len(self) exclusive.
"""
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
class KittiDataset(Dataset):
def __init__(self, info_path, root_path, num_point_features,
target_assigner, feature_map_size, prep_func):
with open(info_path, 'rb') as f:
infos = pickle.load(f)
#self._kitti_infos = kitti.filter_infos_by_used_classes(infos, class_names)
self._root_path = root_path
self._kitti_infos = infos
self._num_point_features = num_point_features
print("remain number of infos:", len(self._kitti_infos))
# generate anchors cache
# [352, 400]
ret = target_assigner.generate_anchors(feature_map_size)
anchors = ret["anchors"]
anchors = anchors.reshape([-1, 7])
matched_thresholds = ret["matched_thresholds"]
unmatched_thresholds = ret["unmatched_thresholds"]
anchors_bv = box_np_ops.rbbox2d_to_near_bbox(
anchors[:, [0, 1, 3, 4, 6]])
anchor_cache = {
"anchors": anchors,
"anchors_bv": anchors_bv,
"matched_thresholds": matched_thresholds,
"unmatched_thresholds": unmatched_thresholds,
}
self._prep_func = partial(prep_func, anchor_cache=anchor_cache)
def __len__(self):
return len(self._kitti_infos)
@property
def kitti_infos(self):
return self._kitti_infos
def __getitem__(self, idx):
return _read_and_prep_v9(
info=self._kitti_infos[idx],
root_path=self._root_path,
num_point_features=self._num_point_features,
prep_func=self._prep_func)
@numba.jit(nopython=True)
def _points_to_bevmap_reverse_kernel(points,
voxel_size,
coors_range,
coor_to_voxelidx,
# coors_2d,
bev_map,
height_lowers,
# density_norm_num=16,
with_reflectivity=False,
max_voxels=40000):
# put all computations to one loop.
# we shouldn't create large array in main jit code, otherwise
# reduce performance
N = points.shape[0]
ndim = points.shape[1] - 1
# ndim = 3
ndim_minus_1 = ndim - 1
grid_size = (coors_range[3:] - coors_range[:3]) / voxel_size
# np.round(grid_size)
# grid_size = np.round(grid_size).astype(np.int64)(np.int32)
grid_size = np.round(grid_size, 0, grid_size).astype(np.int32)
height_slice_size = voxel_size[-1]
coor = np.zeros(shape=(3, ), dtype=np.int32) # DHW
voxel_num = 0
failed = False
for i in range(N):
failed = False
for j in range(ndim):
c = np.floor((points[i, j] - coors_range[j]) / voxel_size[j])
if c < 0 or c >= grid_size[j]:
failed = True
break
coor[ndim_minus_1 - j] = c
if failed:
continue
voxelidx = coor_to_voxelidx[coor[0], coor[1], coor[2]]
if voxelidx == -1:
voxelidx = voxel_num
if voxel_num >= max_voxels:
break
voxel_num += 1
coor_to_voxelidx[coor[0], coor[1], coor[2]] = voxelidx
# coors_2d[voxelidx] = coor[1:]
bev_map[-1, coor[1], coor[2]] += 1
height_norm = bev_map[coor[0], coor[1], coor[2]]
incomimg_height_norm = (
points[i, 2] - height_lowers[coor[0]]) / height_slice_size
if incomimg_height_norm > height_norm:
bev_map[coor[0], coor[1], coor[2]] = incomimg_height_norm
if with_reflectivity:
bev_map[-2, coor[1], coor[2]] = points[i, 3]
# return voxel_num
def points_to_bev(points,
voxel_size,
coors_range,
with_reflectivity=False,
density_norm_num=16,
max_voxels=40000):
"""convert kitti points(N, 4) to a bev map. return [C, H, W] map.
this function based on algorithm in points_to_voxel.
takes 5ms in a reduced pointcloud with voxel_size=[0.1, 0.1, 0.8]
Args:
points: [N, ndim] float tensor. points[:, :3] contain xyz points and
points[:, 3] contain reflectivity.
voxel_size: [3] list/tuple or array, float. xyz, indicate voxel size
coors_range: [6] list/tuple or array, float. indicate voxel range.
format: xyzxyz, minmax
with_reflectivity: bool. if True, will add a intensity map to bev map.
Returns:
bev_map: [num_height_maps + 1(2), H, W] float tensor.
`WARNING`: bev_map[-1] is num_points map, NOT density map,
because calculate density map need more time in cpu rather than gpu.
if with_reflectivity is True, bev_map[-2] is intensity map.
"""
if not isinstance(voxel_size, np.ndarray):
voxel_size = np.array(voxel_size, dtype=points.dtype)
if not isinstance(coors_range, np.ndarray):
coors_range = np.array(coors_range, dtype=points.dtype)
voxelmap_shape = (coors_range[3:] - coors_range[:3]) / voxel_size
voxelmap_shape = tuple(np.round(voxelmap_shape).astype(np.int32).tolist())
voxelmap_shape = voxelmap_shape[::-1] # DHW format
coor_to_voxelidx = -np.ones(shape=voxelmap_shape, dtype=np.int32)
# coors_2d = np.zeros(shape=(max_voxels, 2), dtype=np.int32)
bev_map_shape = list(voxelmap_shape)
bev_map_shape[0] += 1
height_lowers = np.linspace(
coors_range[2], coors_range[5], voxelmap_shape[0], endpoint=False)
if with_reflectivity:
bev_map_shape[0] += 1
bev_map = np.zeros(shape=bev_map_shape, dtype=points.dtype)
_points_to_bevmap_reverse_kernel(
points, voxel_size, coors_range, coor_to_voxelidx, bev_map,
height_lowers, with_reflectivity, max_voxels)
return bev_map
def prep_pointcloud(input_dict,
root_path,
voxel_generator,
target_assigner,
db_sampler=None,
max_voxels=20000,
class_names=['Car'],
remove_outside_points=False,
training=True,
create_targets=True,
shuffle_points=False,
reduce_valid_area=False,
remove_unknown=False,
gt_rotation_noise=[-np.pi / 3, np.pi / 3],
gt_loc_noise_std=[1.0, 1.0, 1.0],
global_rotation_noise=[-np.pi / 4, np.pi / 4],
global_scaling_noise=[0.95, 1.05],
global_loc_noise_std=(0.2, 0.2, 0.2),
global_random_rot_range=[0.78, 2.35],
generate_bev=False,
without_reflectivity=False,
num_point_features=4,
anchor_area_threshold=1,
gt_points_drop=0.0,
gt_drop_max_keep=10,
remove_points_after_sample=True,
anchor_cache=None,
remove_environment=False,
random_crop=False,
reference_detections=None,
add_rgb_to_points=False,
lidar_input=False,
unlabeled_db_sampler=None,
out_size_factor=2,
min_gt_point_dict=None,
bev_only=False,
use_group_id=False,
out_dtype=np.float32):
"""convert point cloud to voxels, create targets if ground truths
exists.
"""
points = input_dict["points"]
rect = input_dict["rect"]
Trv2c = input_dict["Trv2c"]
P2 = input_dict["P2"]
'''
if reference_detections is not None:
C, R, T = box_np_ops.projection_matrix_to_CRT_kitti(P2)
frustums = box_np_ops.get_frustum_v2(reference_detections, C)
frustums -= T
# frustums = np.linalg.inv(R) @ frustums.T
frustums = np.einsum('ij, akj->aki', np.linalg.inv(R), frustums)
frustums = box_np_ops.camera_to_lidar(frustums, rect, Trv2c)
surfaces = box_np_ops.corner_to_surfaces_3d_jit(frustums)
masks = points_in_convex_polygon_3d_jit(points, surfaces)
points = points[masks.any(-1)]
if remove_outside_points and not lidar_input:
image_shape = input_dict["image_shape"]
points = box_np_ops.remove_outside_points(points, rect, Trv2c, P2,
image_shape)
if remove_environment is True and training:
selected = kitti.keep_arrays_by_name(gt_names, class_names)
gt_boxes = gt_boxes[selected]
gt_names = gt_names[selected]
difficulty = difficulty[selected]
if group_ids is not None:
group_ids = group_ids[selected]
points = prep.remove_points_outside_boxes(points, gt_boxes)
'''
if shuffle_points:
# shuffle is a little slow.
np.random.shuffle(points)
# [0, -40, -3, 70.4, 40, 1]
voxel_size = voxel_generator.voxel_size
pc_range = voxel_generator.point_cloud_range
grid_size = voxel_generator.grid_size
# [352, 400]
voxels, coordinates, num_points = voxel_generator.generate(points, max_voxels)
example = {
'voxels': voxels,
'num_points': num_points,
'coordinates': coordinates,
"num_voxels": np.array([voxels.shape[0]], dtype=np.int64)
}
example.update({
'rect': rect,
'Trv2c': Trv2c,
'P2': P2,
})
# if not lidar_input:
feature_map_size = grid_size[:2] // out_size_factor
feature_map_size = [*feature_map_size, 1][::-1]
if anchor_cache is not None:
anchors = anchor_cache["anchors"]
anchors_bv = anchor_cache["anchors_bv"]
matched_thresholds = anchor_cache["matched_thresholds"]
unmatched_thresholds = anchor_cache["unmatched_thresholds"]
else:
ret = target_assigner.generate_anchors(feature_map_size)
anchors = ret["anchors"]
anchors = anchors.reshape([-1, 7])
matched_thresholds = ret["matched_thresholds"]
unmatched_thresholds = ret["unmatched_thresholds"]
anchors_bv = box_np_ops.rbbox2d_to_near_bbox(
anchors[:, [0, 1, 3, 4, 6]])
example["anchors"] = anchors
# print("debug", anchors.shape, matched_thresholds.shape)
# anchors_bv = anchors_bv.reshape([-1, 4])
anchors_mask = None
if anchor_area_threshold >= 0:
coors = coordinates
dense_voxel_map = box_np_ops.sparse_sum_for_anchors_mask(
coors, tuple(grid_size[::-1][1:]))
dense_voxel_map = dense_voxel_map.cumsum(0)
dense_voxel_map = dense_voxel_map.cumsum(1)
anchors_area = box_np_ops.fused_get_anchors_area(
dense_voxel_map, anchors_bv, voxel_size, pc_range, grid_size)
anchors_mask = anchors_area > anchor_area_threshold
# example['anchors_mask'] = anchors_mask.astype(np.uint8)
example['anchors_mask'] = anchors_mask
if generate_bev:
bev_vxsize = voxel_size.copy()
bev_vxsize[:2] /= 2
bev_vxsize[2] *= 2
bev_map = points_to_bev(points, bev_vxsize, pc_range,
without_reflectivity)
example["bev_map"] = bev_map
return example
|
<reponame>Ensteinjun/mediapipe
# Copyright 2020 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mediapipe.python.solutions.pose."""
import math
import os
from absl.testing import absltest
from absl.testing import parameterized
import cv2
import numpy as np
import numpy.testing as npt
# resources dependency
from mediapipe.python.solutions import pose as mp_pose
TEST_IMAGE_PATH = 'mediapipe/python/solutions/testdata'
DIFF_THRESHOLOD = 30 # pixels
EXPECTED_POSE_COORDINATES_PREDICTION = [[593, 645], [593, 626], [599, 621],
[605, 617], [575, 637], [569, 640],
[563, 643], [621, 616], [565, 652],
[617, 652], [595, 667], [714, 662],
[567, 749], [792, 559], [497, 844],
[844, 435], [407, 906], [866, 403],
[381, 921], [859, 392], [366, 922],
[850, 405], [381, 918], [707, 948],
[631, 940], [582, 1122], [599, 1097],
[495, 1277], [641, 1239], [485, 1300],
[658, 1257], [453, 1332], [626, 1308]]
class PoseTest(parameterized.TestCase):
def _verify_output_landmarks(self, landmark_list, image_shape, num_landmarks):
self.assertLen(landmark_list.landmark, num_landmarks)
image_rows, image_cols, _ = image_shape
pose_coordinates = [(math.floor(landmark.x * image_cols),
math.floor(landmark.y * image_rows))
for landmark in landmark_list.landmark]
prediction_error = np.abs(
np.asarray(pose_coordinates) -
np.asarray(EXPECTED_POSE_COORDINATES_PREDICTION[:num_landmarks]))
npt.assert_array_less(prediction_error, DIFF_THRESHOLOD)
def test_invalid_image_shape(self):
pose = mp_pose.Pose()
with self.assertRaisesRegex(
ValueError, 'Input image must contain three channel rgb data.'):
pose.process(np.arange(36, dtype=np.uint8).reshape(3, 3, 4))
def test_blank_image(self):
pose = mp_pose.Pose()
image = np.zeros([100, 100, 3], dtype=np.uint8)
image.fill(255)
results = pose.process(image)
self.assertIsNone(results.pose_landmarks)
pose.close()
@parameterized.named_parameters(('static_image_mode', True, 3),
('video_mode', False, 3))
def test_upper_body_model(self, static_image_mode, num_frames):
image_path = os.path.join(os.path.dirname(__file__), 'testdata/pose.jpg')
pose = mp_pose.Pose(static_image_mode=static_image_mode,
upper_body_only=True)
image = cv2.imread(image_path)
for _ in range(num_frames):
results = pose.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
self._verify_output_landmarks(results.pose_landmarks, image.shape, 25)
pose.close()
@parameterized.named_parameters(('static_image_mode', True, 3),
('video_mode', False, 3))
def test_full_body_model(self, static_image_mode, num_frames):
image_path = os.path.join(os.path.dirname(__file__), 'testdata/pose.jpg')
pose = mp_pose.Pose(static_image_mode=static_image_mode)
image = cv2.imread(image_path)
for _ in range(num_frames):
results = pose.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
self._verify_output_landmarks(results.pose_landmarks, image.shape, 33)
pose.close()
if __name__ == '__main__':
absltest.main()
|
import pandas as pd
from datetime import datetime
import math
import pathlib
import sys
import os
# Read data downloaded from the crawler
def read_data(path):
try:
data = pd.read_excel(path, engine="odf")
return data
except Exception as excep:
sys.stderr.write(
"'Não foi possível ler o arquivo: "
+ path
+ ". O seguinte erro foi gerado: "
+ excep
)
os._exit(1)
# Strange way to check nan. Only I managed to make work
# Source: https://stackoverflow.com/a/944712/5822594
def isNaN(string):
return string != string
def get_begin_row(rows, begin_string):
begin_row = 0
for row in rows:
begin_row += 1
if row[0] == begin_string:
break
# We need to continue interate until wee a value that is not
# whitespace. That happen due to the spreadsheet formatting.
while isNaN(rows[begin_row][0]):
begin_row += 1
return begin_row
def get_end_row(rows, begin_row):
end_row = 0
for row in rows:
# First goes to begin_row.
if end_row < begin_row:
end_row += 1
continue
# Then keep moving until find a blank row.
if isNaN(row[0]):
break
end_row += 1
end_row -= 1
return end_row
def format_value(element):
# A value was found with incorrect formatting. (3,045.99 instead of 3045.99)
if isNaN(element):
return 0.0
if type(element) == str:
if "." in element and "," in element:
element = element.replace(".", "").replace(",", ".")
elif "," in element:
element = element.replace(",", ".")
return float(element)
# Used when the employee is not on the indemnity list
def parse_employees(file_name):
rows = read_data(file_name).to_numpy()
begin_string = "Matrícula"
begin_row = get_begin_row(rows, begin_string)
end_row = get_end_row(rows, begin_row)
employees = {}
curr_row = 0
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = row[0]
if type(matricula) != str:
matricula = str(matricula)
nome = row[1].split("/")[0]
cargo_efetivo = row[1].split("/")[1]
lotacao = row[2]
if isNaN(lotacao):
lotacao = "Não informado"
remuneracao_cargo_efetivo = format_value(row[4])
outras_verbas_remuneratorias = format_value(row[5])
confianca_comissao = format_value(
row[6]
) # Função de Confiança ou Cargo em Comissão
grat_natalina = format_value(row[7]) # Gratificação Natalina
ferias = format_value(row[8])
permanencia = format_value(row[9]) # Abono de Permanência
previdencia = format_value(row[13]) # Contribuição Previdenciária
imp_renda = format_value(row[14]) # Imposto de Renda
teto_constitucional = format_value(row[15]) # Retenção por Teto Constitucional
total_desconto = previdencia + imp_renda + teto_constitucional
total_gratificacoes = grat_natalina + ferias + permanencia + confianca_comissao
total_bruto = (
remuneracao_cargo_efetivo
+ outras_verbas_remuneratorias
+ total_gratificacoes
)
employees[matricula] = {
"reg": matricula,
"name": nome,
"role": cargo_efetivo,
"type": "membro",
"workplace": lotacao,
"active": True,
"income": {
"total": round(total_bruto, 2),
# REMUNERAÇÃO BÁSICA = Remuneração Cargo Efetivo + Outras Verbas Remuneratórias, Legais ou Judiciais
"wage": round(
remuneracao_cargo_efetivo + outras_verbas_remuneratorias, 2
),
"other": { # Gratificações
"total": round(total_gratificacoes, 2),
"trust_position": confianca_comissao,
"others_total": round(grat_natalina + ferias + permanencia, 2),
"others": {
"Gratificação Natalina": grat_natalina,
"Férias (1/3 constitucional)": ferias,
"Abono de Permanência": permanencia,
},
},
},
"discounts": { # Discounts Object. Using abs to garantee numbers are positivo (spreadsheet have negative discounts).
"total": round(total_desconto, 2),
"prev_contribution": previdencia,
# Retenção por teto constitucional
"ceil_retention": teto_constitucional,
"income_tax": imp_renda,
},
}
curr_row += 1
if curr_row > end_row:
break
return employees
def update_employee_indemnity(file_name, employees):
rows = read_data(file_name).to_numpy()
begin_string = "Matrícula" # word before starting data
begin_row = get_begin_row(rows, begin_string)
end_row = get_end_row(rows, begin_row)
curr_row = 0
# If the spreadsheet does not contain employees
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = row[0]
if type(matricula) != str:
matricula = str(matricula)
abono_pecuniario = format_value(row[2])
creche = format_value(row[3])
ajuda_de_custo = format_value(row[4])
natalidade = format_value(row[5])
alimentacao = format_value(row[6])
transporte = format_value(row[7])
ferias_indenizada = format_value(row[8])
banco_de_horas_indenizado = format_value(row[9])
moradia = format_value(row[10])
lp_pecunia = format_value(row[11])
total_indenizacoes = (
abono_pecuniario
+ creche
+ ajuda_de_custo
+ natalidade
+ alimentacao
+ transporte
+ ferias_indenizada
+ banco_de_horas_indenizado
+ moradia
+ lp_pecunia
)
emp = employees[matricula]
emp["income"].update(
{
"total": round(emp["income"]["total"] + total_indenizacoes, 2),
}
)
emp["income"].update(
{
"perks": {
"total": round(total_indenizacoes, 2),
"food": alimentacao,
"pre_school": creche,
"transportation": transporte,
"housing_aid": moradia,
"vacation": ferias_indenizada,
"pecuniary": round(abono_pecuniario + banco_de_horas_indenizado, 2),
"subsistence": ajuda_de_custo,
"birth_aid": natalidade,
"premium_license_pecuniary": lp_pecunia,
}
}
)
employees[matricula] = emp
curr_row += 1
if curr_row > end_row:
break
return employees
def update_employee_temporary_remuneration(file_name, employees):
rows = read_data(file_name).to_numpy()
begin_string = "Matrícula" # word before starting data
begin_row = get_begin_row(rows, begin_string)
end_row = get_end_row(rows, begin_row)
curr_row = 0
# If the spreadsheet does not contain employees
for row in rows:
if curr_row < begin_row:
curr_row += 1
continue
matricula = row[0]
if type(matricula) != str:
matricula = str(matricula)
# PSSS = Plano de Seguridade Social do Servidor Público
substituicao_membros = format_value(row[2]) # Substituição de Membros
funcao_substituicao = format_value(row[3]) # Função de Substituição
grat_encargo_curso = format_value(row[4]) # Gratificação por Encargo de Curso
insalubridade = format_value(row[5]) # Adicional de Insalubridade
grat_encargo_concurso = format_value(
row[6]
) # Gratificação por Encargo de Concurso
periculosidade = format_value(row[7]) # Periculosidade
exercicio_cumulativo_sem_psss = format_value(
row[8]
) # Gratificação de Exercício Cumulativo com Ofício Sem Psss
exercicio_cumulativo_com_psss = format_value(
row[9]
) # Gratificação Exercício Cumulativo com Ofício Com Psss
membros_substituicao = format_value(row[10]) # Membros Substituição
hora_extra_sem_pass = format_value(row[11]) # Hora Extra Sem Psss
adic_noturno_sem_pass = format_value(row[12]) # Adicional Noturno Sem Psss
subs_membros_ms2013 = format_value(row[13]) # Substituição Membros MS2013
adic_penosidade = format_value(row[14]) # Adicional Penosidade
total_temporario = (
substituicao_membros
+ funcao_substituicao
+ grat_encargo_curso
+ insalubridade
+ grat_encargo_concurso
+ periculosidade
+ exercicio_cumulativo_sem_psss
+ exercicio_cumulativo_com_psss
+ membros_substituicao
+ hora_extra_sem_pass
+ adic_noturno_sem_pass
+ subs_membros_ms2013
+ adic_penosidade
)
emp = employees[matricula]
emp["income"].update(
{
"total": round(emp["income"]["total"] + total_temporario, 2),
}
)
emp["income"]["other"].update(
{
"others_total": round(
emp["income"]["other"]["others_total"] + total_temporario, 2
),
"total": round(emp["income"]["other"]["total"] + total_temporario, 2),
}
)
emp["income"]["other"]["others"].update(
{
"Substituição de Membros": substituicao_membros,
"Função de Substituição": funcao_substituicao,
"Gratificação por Encargo de Curso": grat_encargo_curso,
"Adicional de Insalubridade": insalubridade,
"Gratificação por Encargo de Concurso": grat_encargo_concurso,
"Adicional de Periculosidade": periculosidade,
"Gratificação de Exercício Cumulativo com Ofício Sem Psss": exercicio_cumulativo_sem_psss,
"Gratificação Exercício Cumulativo com Ofício Com Psss": exercicio_cumulativo_com_psss,
"Membros Substituição": membros_substituicao,
"Hora Extra Sem Psss": hora_extra_sem_pass,
"Adicional Noturno Sem Psss": adic_noturno_sem_pass,
"Substituição Membros MS2013": subs_membros_ms2013,
"Adicional Penosidade": adic_penosidade,
}
)
employees[matricula] = emp
curr_row += 1
if curr_row > end_row:
break
return employees
def parse(file_names):
employees = {}
for fn in file_names:
if "Verbas Indenizatorias" not in fn and "Verbas Temporarias" not in fn:
# Puts all parsed employees in the big map
employees.update(parse_employees(fn))
try:
for fn in file_names:
if "Verbas Indenizatorias" in fn:
update_employee_indemnity(fn, employees)
elif "Verbas Temporarias" in fn:
update_employee_temporary_remuneration(fn, employees)
except KeyError as e:
sys.stderr.write(
"Registro inválido ao processar verbas indenizatórias: {}".format(e)
)
os._exit(1)
return list(employees.values())
|
<reponame>tswicegood/bokeh
# -*- coding: utf-8 -*-
import numpy as np
import scipy.special
from bokeh.plotting import *
mu, sigma = 0, 0.5
measured = np.random.normal(mu, sigma, 1000)
hist, edges = np.histogram(measured, density=True, bins=50)
x = np.linspace(-2, 2, 1000)
pdf = 1/(sigma * np.sqrt(2*np.pi)) * np.exp(-(x-mu)**2 / (2*sigma**2))
cdf = (1+scipy.special.erf((x-mu)/np.sqrt(2*sigma**2)))/2
output_file('histogram.html')
hold()
figure(title="Normal Distribution (μ=0, σ=0.5)",tools="previewsave",
background_fill="#E8DDCB")
quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],
fill_color="#036564", line_color="#033649",\
)
# Use `line` renderers to display the PDF and CDF
line(x, pdf, line_color="#D95B43", line_width=8, alpha=0.7, legend="PDF")
line(x, cdf, line_color="white", line_width=2, alpha=0.7, legend="CDF")
legend().orientation = "top_left"
xax, yax = axis()
xax.axis_label = 'x'
yax.axis_label = 'Pr(x)'
figure(title="Log Normal Distribution (μ=0, σ=0.5)", tools="previewsave",
background_fill="#E8DDCB")
mu, sigma = 0, 0.5 # NOTE: you can tinker with these values if you like
measured = np.random.lognormal(mu, sigma, 1000)
hist, edges = np.histogram(measured, density=True, bins=50)
x = np.linspace(0, 8.0, 1000)
pdf = 1/(x* sigma * np.sqrt(2*np.pi)) * np.exp(-(np.log(x)-mu)**2 / (2*sigma**2))
cdf = (1+scipy.special.erf((np.log(x)-mu)/(np.sqrt(2)*sigma)))/2
quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],
fill_color="#036564", line_color="#033649"
)
line(x, pdf, line_color="#D95B43", line_width=8, alpha=0.7, legend="PDF")
line(x, cdf, line_color="white", line_width=2, alpha=0.7, legend="CDF")
legend().orientation = "bottom_right"
xax, yax = axis()
xax.axis_label = 'x'
yax.axis_label = 'Pr(x)'
figure(title="Gamma Distribution (k=1, θ=2)", tools="previewsave",
background_fill="#E8DDCB")
k, theta = 1.0, 2.0
measured = np.random.gamma(k, theta, 1000)
hist, edges = np.histogram(measured, density=True, bins=50)
# compute ideal values
x = np.linspace(0, 20.0, 1000)
pdf = x**(k-1) * np.exp(-x/theta) / (theta**k * scipy.special.gamma(k))
cdf = scipy.special.gammainc(k, x/theta) / scipy.special.gamma(k)
quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],
fill_color="#036564", line_color="#033649"
)
line(x, pdf, line_color="#D95B43", line_width=8, alpha=0.7, legend="PDF")
line(x, cdf, line_color="white", line_width=2, alpha=0.7, legend="CDF")
legend().orientation = "top_left"
xax, yax = axis()
xax.axis_label = 'x'
yax.axis_label = 'Pr(x)'
figure(title="Beta Distribution (α=2, β=2)", tools="previewsave",
background_fill="#E8DDCB")
alpha, beta = 2.0, 2.0
measured = np.random.beta(alpha, beta, 1000)
hist, edges = np.histogram(measured, density=True, bins=50)
x = np.linspace(0, 1, 1000)
pdf = x**(alpha-1) * (1-x)**(beta-1) / scipy.special.beta(alpha, beta)
cdf = scipy.special.btdtr(alpha, beta, x)
quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],
fill_color="#036564", line_color="#033649"
)
line(x, pdf, line_color="#D95B43", line_width=8, alpha=0.7, legend="PDF")
line(x, cdf, line_color="white", line_width=2, alpha=0.7, legend="CDF")
xax, yax = axis()
xax.axis_label = 'x'
yax.axis_label = 'Pr(x)'
figure(title="Weibull Distribution (λ=1, k=1.25)", tools="previewsave",
background_fill="#E8DDCB")
lam, k = 1, 1.25
measured = lam*(-np.log(np.random.uniform(0, 1, 1000)))**(1/k)
hist, edges = np.histogram(measured, density=True, bins=50)
x = np.linspace(0, 8, 1000)
pdf = (k/lam)*(x/lam)**(k-1) * np.exp(-(x/lam)**k)
cdf = 1 - np.exp(-(x/lam)**k)
quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],
fill_color="#036564", line_color="#033649"
)
line(x, pdf, line_color="#D95B43", line_width=8, alpha=0.7, legend="PDF")
line(x, cdf, line_color="white", line_width=2, alpha=0.7, legend="CDF")
legend().orientation = "top_left"
xax, yax = axis()
xax.axis_label = 'x'
yax.axis_label = 'Pr(x)'
show()
|
<reponame>vltmedia/ROMP
import cv2
import keyboard
import imageio
import torch
import numpy as np
import random
from transforms3d.axangles import axangle2mat
import pickle
from PIL import Image
import torchvision
import time
import os,sys
import config
import constants
from config import args
from utils import save_obj
def get_video_bn(video_file_path):
basename = os.path.basename(video_file_path)
for ext in constants.video_exts:
basename.replace(ext, '')
return basename
def save_meshes(reorganize_idx, outputs, output_dir, smpl_faces):
vids_org = np.unique(reorganize_idx)
for idx, vid in enumerate(vids_org):
verts_vids = np.where(reorganize_idx==vid)[0]
img_path = outputs['meta_data']['imgpath'][verts_vids[0]]
obj_name = os.path.join(output_dir, '{}'.format(os.path.basename(img_path))).replace('.mp4','').replace('.jpg','').replace('.png','')+'.obj'
for subject_idx, batch_idx in enumerate(verts_vids):
save_obj(outputs['verts'][batch_idx].detach().cpu().numpy().astype(np.float16), \
smpl_faces,obj_name.replace('.obj', '_{}.obj'.format(subject_idx)))
class OpenCVCapture:
def __init__(self, video_file=None, show=False):
if video_file is None:
self.cap = cv2.VideoCapture(int(args().cam_id))
else:
self.cap = cv2.VideoCapture(video_file)
self.length = self.cap.get(cv2.CAP_PROP_FRAME_COUNT)
self.whether_to_show=show
def read(self, return_rgb=True):
flag, frame = self.cap.read()
if not flag:
return None
if self.whether_to_show:
cv2.imshow('webcam',cv2.resize(frame, (240,320)))
cv2.waitKey(1)
if return_rgb:
frame = np.flip(frame, -1).copy() # BGR to RGB
return frame
class Image_Reader:
def __init__(self, image_folder):
self.image_folder = image_folder
self.image_list = os.listdir(self.image_folder)
self.current_num=0
def read(self):
frame = cv2.imread(os.path.join(self.image_folder,self.image_list[self.current_num]))
self.current_num+=1
if self.current_num==len(self.image_list):
self.current_num=0
return np.flip(frame, -1).copy() # BGR to RGB
class Time_counter():
def __init__(self,thresh=0.1):
self.thresh=thresh
self.runtime = 0
self.frame_num = 0
def start(self):
self.start_time = time.time()
def count(self, frame_num=1):
time_cost = time.time()-self.start_time
if time_cost<self.thresh:
self.runtime+=time_cost
self.frame_num+=frame_num
self.start()
def fps(self):
print('average per-frame runtime:',self.runtime/self.frame_num)
print('FPS: {}, not including visualization time. '.format(self.frame_num/self.runtime))
def reset(self):
self.runtime = 0
self.frame_num = 0
def video2frame(video_name, frame_save_dir=None):
cap = OpenCVCapture(video_name)
os.makedirs(frame_save_dir, exist_ok=True)
frame_list = []
for frame_id in range(int(cap.length)):
frame = cap.read(return_rgb=False)
save_path = os.path.join(frame_save_dir, '{:06d}.jpg'.format(frame_id))
cv2.imwrite(save_path, frame)
frame_list.append(save_path)
return frame_list
def frames2video(images_path, video_name,fps=30):
writer = imageio.get_writer(video_name, format='mp4', mode='I', fps=fps)
for path in images_path:
image = imageio.imread(path)
writer.append_data(image)
writer.close() |
# coding:utf-8
"""
pybilibili.network
----------------------
所有爬虫函数
:copyright: (c) 2016 <NAME>
:license: BSD
"""
from bs4 import BeautifulSoup
from colorama import Fore, Style
import dicts
import json
import urllib
import requests
import time
import re
import os
import math
import sys
class Network:
def __init__(self):
self.homepage_res = requests.get('http://www.bilibili.com')
self.homepage_cont = self.homepage_res.content
self.homepage_soup = BeautifulSoup(self.homepage_cont, 'html.parser', from_encoding='utf-8')
def _clear_file(self, filename):
if os.path.exists(filename):
os.remove(filename)
def _print_or_output(self, content, output, filename):
if output == False:
print(content)
else:
for f in vars(Fore).items():
content = content.replace(f[1], '')
file = open(filename, 'a')
file.write(content)
file.close()
print(Fore.LIGHTWHITE_EX + '下载完成! 文件路径: ' + Fore.GREEN + os.path.abspath(filename))
def get_web_online(self):
return self.homepage_soup.find('a', href='/video/online.html').em.string
def print_video_stat(self, aid, filename, newfile):
json_res = requests.get('http://api.bilibili.com/archive_stat/stat?aid=%s' % aid)
json_str = json_res.content
json_list = json.loads(json_str.decode('utf-8'))
data = json_list['data']
html_res = requests.get('http://www.bilibili.com/video/av%s/' % aid)
html_cont = html_res.content.decode('utf-8')
html_soup = BeautifulSoup(html_cont, 'html.parser', from_encoding='utf-8')
title = html_soup.find('div', class_='v-title').string
author = html_soup.find('div', class_='usname').a.string
time_ = html_soup.find('time', itemprop="startDate").i.string
category_list = html_soup.find_all('a', rel='v:url')
category = str()
output = False
if filename is not None:
output = True
if newfile:
self._clear_file(filename)
for a in category_list[1:]:
category += a.string
if category_list.index(a) != len(category_list) - 1:
category += ' > '
self._print_or_output(Fore.CYAN + '[av%s] ' % aid +
Fore.RESET + '%s ' % title +
Fore.YELLOW + '作者: ' + Fore.RESET + '%s' % author + '\n', output, filename)
self._print_or_output(Fore.WHITE + time_ + Fore.RESET +
' | ' + Fore.WHITE + category + '\n', output, filename)
self._print_or_output(Fore.GREEN + '播放: ' + Fore.RESET + '%d ' % data['view'] +
Fore.GREEN + '弹幕: ' + Fore.RESET + '%d ' % data['danmaku'] +
Fore.GREEN + '评论: ' + Fore.RESET + '%d ' % data['reply'] +
Fore.GREEN + '收藏: ' + Fore.RESET + '%d ' % data['favorite'] +
Fore.GREEN + '硬币: ' + Fore.RESET + '%d ' % data['coin'] +
Fore.GREEN + '分享: ' + Fore.RESET + '%d ' % data['share'], output, filename)
# [av123456] 感觉身体被掏空by彩虹室内合唱团 作者:上海彩虹室内合唱团
# 2016-07-27 10:07 | 音乐 > 原创音乐
# 播放:1407731 弹幕:4167 评论:15916 收藏:15916 硬币:21166 分享:8637
def print_people_info(self, uid, filename, newfile):
req_info = requests.get('http://space.bilibili.com/ajax/member/GetInfo?mid=%s' % uid)
json_info_str = req_info.content
json_info_list = json.loads(json_info_str.decode('utf-8'))
data = json_info_list['data']
req_video = requests.get('http://space.bilibili.com/ajax/member/getSubmitVideos?mid=%s&tid=0' % uid)
json_video_str = req_video.content
json_video_list = json.loads(json_video_str.decode('utf-8'))
# [3875443] 哟唷喲yo Lv4 | 闲的无聊转转视频,做做视频(╯‵□′)╯︵┴─┴
# 性别: 男 注册于: 2014-4-7 生日: 01-01 地址: 未填写
# 投稿视频: 1 | 关注: 61 粉丝: 4
regtime = time.gmtime(data['regtime'])
place = data['place']
if place == '':
place = '未填写'
sign = data['sign']
if sign == '':
sign = '无简介'
output = False
if filename is not None:
output = True
if newfile:
self._clear_file(filename)
self._print_or_output(Fore.CYAN + '[%s] ' % uid +
Fore.RESET + '%s ' % data['name'] +
Fore.LIGHTYELLOW_EX + 'Lv%s ' % data['level_info']['current_level'] +
Fore.LIGHTWHITE_EX + '| ' +
Fore.RESET + '%s' % sign + '\n', output, filename)
self._print_or_output(Fore.GREEN + '性别: ' + Fore.RESET + '%s ' % data['sex'] +
Fore.GREEN + '注册于: ' + Fore.RESET + '%d-%d-%d ' % (regtime[0], regtime[1], regtime[2]) +
Fore.GREEN + '生日: ' + Fore.RESET + '%s ' % data['birthday'] +
Fore.GREEN + '地址: ' + Fore.RESET + '%s ' % place + '\n', output, filename)
self._print_or_output(Fore.GREEN + '投稿视频: ' + Fore.RESET + '%d ' % json_video_list['data']['count'] + Fore.LIGHTWHITE_EX + '| ' + Fore.LIGHTRED_EX + '关注: ' + Fore.RESET + '%d ' % data['friend'] + Fore.LIGHTRED_EX + '粉丝: ' + Fore.RESET + '%d' % data['fans'] + '\n', output, filename)
def print_ranking_list(self, ranking_name, category_fenqu, is_recent, scope, filename, newfile):
try:
ranking_list_name = dicts.ranking_list_name[ranking_name]
category_name = dicts.ranking_category_name[category_fenqu]
except:
#TODO: 错误处理
pass
if is_recent == True:
scope = '0' + scope
output = False
if filename is not None:
output = True
if newfile:
self._clear_file(filename)
res = requests.get('http://www.bilibili.com/index/rank/%s-%s-%s.json' % (ranking_list_name, scope, category_name))
json_str = res.content
json_list = json.loads(json_str.decode('utf-8'))
ranking = 1
for video in json_list['rank']['list']:
# 1: [av123456] 【多素材】解开只穿一件衬衣的扣子 综合评分: 1147182
# 播放: 666191 评论: 4160 作者: 科学超电磁炮F
try:
self._print_or_output(Fore.RED + "%d: " % ranking +
Fore.CYAN + "[av%d] " % video['aid'] +
Fore.RESET + "%s " % video['title'] +
Fore.YELLOW + "综合评分: %d" % video['pts'] + '\n' +
Fore.GREEN + "\t播放: " + Fore.RESET + "%d" % video['play'] +
Fore.GREEN + " 评论: " + Fore.RESET + "%d" % video['video_review'] +
Fore.GREEN + " 作者: " + Fore.RESET + video['author'] + '\n', output, filename)
except UnicodeEncodeError:
pass
ranking += 1
def download_danmu(self, aid, filename):
req_video = requests.get('http://www.bilibili.com/video/av%s/' % aid)
html = req_video.content
soup = BeautifulSoup(html, 'html.parser', from_encoding='utf-8')
title = soup.find('div', class_='v-title').h1.string
player = soup.find(text = re.compile('^EmbedPlayer'))
cid = ''
for i in player.string[(player.string.find('cid') + 4):]:
if i == '&':
break
cid += i
req_xml = requests.get('http://comment.bilibili.com/%s.xml' % cid)
xml = req_xml.content
if filename == '':
filename = '弹幕_av%s.xml' % aid
f = open(filename, 'w', encoding = 'utf-8')
try:
f.writelines(xml.decode('utf-8'))
except UnicodeEncodeError:
pass
f.close()
result = ''
#删除空行
f = open(filename, 'r', encoding = 'utf-8')
lines = f.readlines()
for li in lines:
if li.split():
result += li
f.close()
f = open(filename, 'w', encoding = 'utf-8')
f.writelines(result)
f.close()
print(Fore.LIGHTWHITE_EX + '下载完成! 文件路径: ' + Fore.GREEN + os.path.abspath(filename))
def _download_schedule(self, a, b, c):
per = 100.0 * a * b / c
sys.stdout.write('\r')
sys.stdout.write('[%-50s] %s' % ( '=' * int(math.floor(per / 2)), '%.2f%%' % per))
sys.stdout.flush()
if per >= 100:
sys.stdout.write('\n')
def download_video(self, aid, quality, type, output):
"""
quality: 1=流畅 2=高清
type: mp4 / flv
"""
req_video = requests.get('http://www.bilibili.com/video/av%s/' % aid)
html = req_video.content.decode('utf-8')
soup = BeautifulSoup(html, 'html.parser', from_encoding='utf-8')
title = soup.find('div', class_='v-title').string
player = soup.find(text = re.compile('^EmbedPlayer'))
cid = ''
for i in player.string[(player.string.find('cid') + 4):]:
if i == '&':
break
cid += i
json_str = requests.get('http://interface.bilibili.com/playurl?otype=json&appkey=86385cdc024c0f6c&cid=%s&quality=%s&type=%s' % (cid, quality, type)).content
json_list = json.loads(json_str.decode('utf-8'))
time_length = json_list['timelength']
size = json_list['durl'][0]['size']
download_link = json_list['durl'][0]['url']
length_all = float('%f' % (time_length / 1000 / 60))
length_min = math.floor(float('%f' % (time_length / 1000 / 60)))
length_sec = int(60 * (length_all - math.floor(length_all)))
length_str = '%d:%d' % (length_min, length_sec)
size_mb = '%.2fMB' % (size / (1024 * 1024))
if type == 'mp4':
type = 'hdmp4'
if output == '':
output = '%s.%s' % (title, type)
# [av123456] 【史诗级误解】甲铁城x罪恶王冠的超同步套路剧场(吐血完整版)——.mp4
# 时长: 12:43 | 大小: 119.0MB
# 开始下载? (y/n):
print(Fore.CYAN + '[av%s] ' % aid + Fore.RESET + title)
print(Fore.GREEN + '时长: ' + Fore.RESET + length_str +
Fore.LIGHTWHITE_EX + ' | ' +
Fore.GREEN + '大小: ' + Fore.RESET + size_mb)
confirm = input('开始下载? (y/n): ')
if confirm == 'y':
#Download
urllib.request.urlretrieve(download_link, output, self._download_schedule)
print(Fore.LIGHTWHITE_EX + '下载成功! 文件路径: ' + Fore.GREEN + os.path.abspath(output)) |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow_first_window.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWinodwFirstWindow(object):
def setupUi(self, MainWinodwFirstWindow):
MainWinodwFirstWindow.setObjectName("MainWinodwFirstWindow")
MainWinodwFirstWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWinodwFirstWindow)
self.centralwidget.setObjectName("centralwidget")
self.label_moil_template = QtWidgets.QLabel(self.centralwidget)
self.label_moil_template.setGeometry(QtCore.QRect(140, 80, 511, 101))
font = QtGui.QFont()
font.setPointSize(24)
font.setBold(True)
font.setWeight(75)
self.label_moil_template.setFont(font)
self.label_moil_template.setAlignment(QtCore.Qt.AlignCenter)
self.label_moil_template.setObjectName("label_moil_template")
self.label_data = QtWidgets.QLabel(self.centralwidget)
self.label_data.setGeometry(QtCore.QRect(230, 270, 67, 21))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_data.setFont(font)
self.label_data.setObjectName("label_data")
self.lineedit_date = QtWidgets.QLineEdit(self.centralwidget)
self.lineedit_date.setGeometry(QtCore.QRect(300, 270, 271, 21))
font = QtGui.QFont()
font.setPointSize(12)
self.lineedit_date.setFont(font)
self.lineedit_date.setObjectName("lineedit_date")
self.pushbtn_go = QtWidgets.QPushButton(self.centralwidget)
self.pushbtn_go.setGeometry(QtCore.QRect(340, 400, 89, 31))
font = QtGui.QFont()
font.setPointSize(16)
font.setBold(True)
font.setWeight(75)
self.pushbtn_go.setFont(font)
self.pushbtn_go.setObjectName("pushbtn_go")
MainWinodwFirstWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWinodwFirstWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 22))
self.menubar.setObjectName("menubar")
MainWinodwFirstWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWinodwFirstWindow)
self.statusbar.setObjectName("statusbar")
MainWinodwFirstWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWinodwFirstWindow)
QtCore.QMetaObject.connectSlotsByName(MainWinodwFirstWindow)
def retranslateUi(self, MainWinodwFirstWindow):
_translate = QtCore.QCoreApplication.translate
MainWinodwFirstWindow.setWindowTitle(_translate("MainWinodwFirstWindow", "First Window"))
self.label_moil_template.setText(_translate("MainWinodwFirstWindow", "MoilApp - Tutorial Sample\n"
"\n"
"First Window"))
self.label_data.setText(_translate("MainWinodwFirstWindow", "Data:"))
self.lineedit_date.setPlaceholderText(_translate("MainWinodwFirstWindow", "Set second window\'s data here"))
self.pushbtn_go.setText(_translate("MainWinodwFirstWindow", "Go !"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWinodwFirstWindow = QtWidgets.QMainWindow()
ui = Ui_MainWinodwFirstWindow()
ui.setupUi(MainWinodwFirstWindow)
MainWinodwFirstWindow.show()
sys.exit(app.exec_())
|
import argparse
from collections import OrderedDict
import getpass
import sys
import textwrap
import uuid
from course_seeder import CourseSeeder
from discussions_seeder import DiscussionsSeeder
def setup_course_in_studio(args):
"""
Setup studio and return the created course_id
"""
print "Studio url set to: {}".format(args.studio)
email = args.email or raw_input("Studio Email: ")
password = args.password or <PASSWORD>("Password: ")
seeder = CourseSeeder(studio_url=args.studio)
seeder.login_to_studio(email, password)
unique_id = uuid.uuid4().hex[:5]
course_data = {
"org": "dapi",
"course": args.action,
"run": "test_{}".format(unique_id),
"display_name": "dapi_name_{}".format(unique_id),
}
course_id = seeder.create_course(course_data=course_data)
seeder.import_tarfile(course_id=course_id, tarfile=args.tarfile)
return course_id
def setup_discussion_seeder(args):
print "LMS url set to: {}".format(args.lms)
seeder = DiscussionsSeeder(lms_url=args.lms)
email = args.email or raw_input("LMS Email: ")
password = args.password or <PASSWORD>("Password: ")
seeder.login_to_lms(email, password)
return seeder
def save_course_thread_list_to_file(args, seeder=None):
"""
Saves all threads in the course to a file.
Arguments:
args: Parser args.
seeder: A DiscussionSeeder.
"""
seeder = seeder if seeder else setup_discussion_seeder(args)
if not args.course:
print "Requires a course_id"
return
file_name = args.action
save_threads_to_file(seeder, file_name, course_id=args.course, thread_id_list=None)
def save_threads_to_file(seeder, file_name, course_id, thread_id_list=None):
"""
Handles printing necessary output when writing threads to a file.
Arguments:
seeder (DiscussionSeeder): The DiscussionSeeder.
file_name (str): The filename to be written to.
course_id (str): The course containing the threads.
thread_id_list (list): A list of thread ids, or None to read all
thread ids from the course.
"""
print "Saving thread_ids to: {}".format(file_name)
seeder.create_thread_data_file(file_name=file_name, course_id=course_id, thread_id_list=thread_id_list)
print "Run locust with SEEDED_DATA={}".format(file_name)
def get_course_id(args=None, course_id=None):
"""
Returns the course id to be used seeding. If a course id is provided, it is
used. Otherwise a new course is created and its course id is returned.
Arguments:
args: Optional parser args that could include the course, as well as
details for setting up a new course.
course_id: Optional course id.
Returns:
The course id to be used for seeding.
"""
if course_id:
seed_course_id = course_id
elif args and args.course:
seed_course_id = args.course
else:
seed_course_id = setup_course_in_studio(args)
print "Run locust with COURSE_ID={}".format(seed_course_id)
return seed_course_id
def create_threads(args, course_id=None, seeder=None, save_threads=True):
"""
Creates threads in multiples of 10 and then returns the locust commandline
Each thread has a ~250character body
Of the 10 threads created
4 have no comments/responses
3 have some sort of flag (abused/voted/following/endorsed)
1 has a response and a comment
3 have a response
Threads=10, Responses=4, comments=1
"""
course_id = get_course_id(args, course_id)
seeder = seeder if seeder else setup_discussion_seeder(args)
posts = int(args.batches or raw_input("How many threads in multiples of 10?"))
seeder.seed_threads(course_id=course_id, posts=posts)
file_name = args.action + str(posts * 10)
if save_threads:
save_threads_to_file(seeder, file_name, course_id)
def create_comments(args, course_id=None, seeder=None, save_threads=True):
"""
Creates threads, responses and comments as supplied in args or via input.
Arguments:
args: Parser args.
course_id: The course id where the comments will be added.
seeder: The DiscussionSeeder.
save_threads: True if the new threads should be saved to a file, and False otherwise.
"""
course_id = get_course_id(args, course_id)
seeder = seeder if seeder else setup_discussion_seeder(args)
posts = int(args.threads or raw_input("How many threads "))
responses = int(args.responses or raw_input("How many responses for thread "))
child_comments = int(args.comments or raw_input("How many comments for each response "))
thread_list = seeder.seed_comments(course_id=course_id, posts=posts, responses=responses, child_comments=child_comments)
file_name = args.action + str(responses * child_comments)
if save_threads:
save_threads_to_file(seeder, file_name, course_id, thread_id_list=thread_list)
def main():
actions = OrderedDict([
("CreateThreads", {
'function': create_comments,
'help': 'Creates threads with the specified number of responses and comments.',
}),
("CreateThreadBatches", {
'function': create_threads,
'help': 'Creates batches of 10 threads with random numbers of responses and comments and random anttributes.',
}),
("DumpCourseThreads", {
'function': save_course_thread_list_to_file,
'help': 'Dumps all threads in a course to a file to use with the locust tests.',
}),
])
parser = argparse.ArgumentParser(
description="This script can be used to seed data and output help for running locust.",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('-a', '--action', help='Script action (see below).', default='', required=True)
parser.add_argument('-s', '--studio', help='Studio url.', default='', required=True)
parser.add_argument('-l', '--lms', help='LMS url.', default='', required=True)
parser.add_argument('-f', '--tarfile', help='Name of the course tarfile.', default='dapi_course.tar.gz')
parser.add_argument('-e', '--email', help='Email for login for both LMS/Studio. You will be prompted if needed and not supplied.', default='')
parser.add_argument('-p', '--password', help='Password for both LMS/Studio. You will be prompted if needed and not supplied.', default='')
parser.add_argument('-b', '--batches', help='Number of custom 10 thread batches. You will be prompted if needed and not supplied.', default='')
parser.add_argument('-t', '--threads', help='Number of threads. You will be prompted if needed and not supplied.', default='')
parser.add_argument('-r', '--responses', help='Number of responses per thread. You will be prompted if needed and not supplied.', default='')
parser.add_argument('-m', '--comments', help='Number of comments per response. You will be prompted if needed and not supplied.', default='')
parser.add_argument('-c', '--course', help='Course id for adding threads. If not supplied, a course will be created for you.', default='')
parser.epilog = "script actions/tasks:"
for action in actions:
parser.epilog += "\n {}".format(action)
LINE_LENGTH = 80
TWO_INDENTS = 8
for line in textwrap.wrap(actions[action]['help'], LINE_LENGTH - TWO_INDENTS):
parser.epilog += "\n {}".format(line)
args = parser.parse_args()
if args.action not in actions:
parser.print_help()
return -1
actions[args.action]['function'](args)
if __name__ == "__main__":
sys.exit(main())
|
# coding: utf-8
# std
import string
from datetime import timedelta, datetime
import csv
import os
import shutil
import sys
# math
import math
import numpy as np
from scipy.sparse import *
# mabed
import mabsed.utils as utils
import json
class Corpus:
def __init__(self, input_files, stopwords, corpus_directory, min_absolute_freq, max_relative_freq,
separator, save_voc=False):
self.input_files = input_files
self.size = 0 # Numero de tweets en el Corpus
self.start_date = '3000-01-01 00:00:00' # Fecha del tweet mas antiguo
self.end_date = '1000-01-01 00:00:00' # Fecha del tweet mas reciente
self.separator = separator # Separador usado en los ficheros CSV
# load stop-words
self.stopwords = utils.load_stopwords(stopwords)
#stopwords_en = utils.load_stopwords('./detector/data/stopwords/stopwords-en.txt')
#self.stopwords = stopwords_es.update(stopwords_en)
# set corpus output directory
self.corpus_directory = corpus_directory
word_frequency = {} # Creamos un diccionario que tenga cuantas veces se ha repetido cada palabra en todos los tweets
for file in self.input_files:
with open(file, 'r') as input_file:
reader = csv.DictReader(input_file, delimiter='\t')
tweets = list(reader)
for tweet in tweets:
self.size += 1
tweet_date = tweet['date']
if tweet_date > self.end_date:
self.end_date = tweet_date
if tweet_date < self.start_date:
self.start_date = tweet_date
# words = self.tokenize(tweet['text'])
words = self.tokenize(tweet['lemmatizedText'])
# update word frequency
for word in words:
if len(word) > 1:
frequency = word_frequency.get(word)
if frequency is None:
frequency = 0
word_frequency[word] = frequency + 1
# Ordenamos el vocabulario con respecto a su frecuencia - La de mayor frecuencia primero
vocabulary = list(word_frequency.items())
vocabulary.sort(key=lambda x: x[1], reverse=True)
if save_voc:
with open('vocabulary.txt', 'w') as output_file:
output_file.write(str(vocabulary))
self.vocabulary = {} # Diccionario en el que las claves son las palabras que no están en las stopwords y que pasan los umbrales de frecuencia, y cuyo valor es el puesto de dicha palabra segun su frecuencia (0 es que es la que mas sale, 1 la segunda...)
vocabulary_size = 0
for word, frequency in vocabulary:
if frequency > min_absolute_freq and float(frequency / self.size) < max_relative_freq and word not in self.stopwords:
self.vocabulary[word] = vocabulary_size
vocabulary_size += 1
if save_voc:
with open('self_vocabulary.txt', 'w') as output_file:
output_file.write(str(self.vocabulary))
self.start_date = datetime.strptime(self.start_date, "%Y-%m-%d %H:%M:%S") # Lo pasamos a formato Date (estaba en String)
self.end_date = datetime.strptime(self.end_date, "%Y-%m-%d %H:%M:%S") # Lo pasamos a formato Date (estaba en String)
print(' Corpus: %i tweets, spanning from %s to %s' % (self.size,
self.start_date,
self.end_date))
print(' Vocabulary: %d distinct words' % vocabulary_size)
self.time_slice_count = None # El numero de time_slices necesario para dividir el Dataset
self.tweet_count = None # Numero de tweets en cada time_slice
self.global_freq = None # Matriz en formato CSR con la frecuencia de cada palabra en cada time_slice (para comprobar si aumenta mucho respecto a los demas)
self.mention_freq = None # Matriz en formato CSR con la cantidad de menciones que tiene cada palabra en cada time_slice (suma de todos los tweets)
self.user_freq = None # Matriz en formato CSR con la cantidad de usuarios distintos que han usado cada palabra en cada time_slice (suma de todos los tweets)
self.time_slice_length = None # Los minutos que dura el time_slice
# Devuelve una lista de lemas eliminando los signos de puntuacion y los links
def tokenize(self, text):
# split the documents into tokens based on whitespaces
words = text.split()
# Nos quitamos los enalces
words_without_links = [word for word in words if 'http' not in word]
# Sustituimos los signos de puntuacion por espacios por si van pegadas las palabras
t = str.maketrans("'!¡?¿.,\"()…“", " ") # Translate solo se le puede aplicar a un string
raw_tokens = ' '.join(words_without_links).translate(t).split()
# Strip solo quita los signos de puntuacion al principio y al final de la palabra
# string.punctuation tiene estos caracteres: !"#$%&'()*+,-./:;<=>?@[\]^_`{|}~
punctuation = string.punctuation #.replace('@', '').replace('#', '')
return [token.strip(punctuation).lower() for token in raw_tokens if len(token) > 1]
# Creamos las matrices que usaremos para el proceso de deteccion
def compute_matrices(self, time_slice_length):
self.time_slice_length = time_slice_length
# clean the data directory
if os.path.exists(self.corpus_directory):
shutil.rmtree(self.corpus_directory)
os.makedirs(self.corpus_directory)
# compute the total number of time-slices
time_delta = (self.end_date - self.start_date)
time_delta = time_delta.total_seconds()/60
self.time_slice_count = int(math.ceil(time_delta / self.time_slice_length)) # Redondeamos para arriba siempre (5.0 lo redondea a 5.0)
self.tweet_count = np.zeros(self.time_slice_count)
print(' Number of time-slices: %d' % self.time_slice_count)
# create empty files
for time_slice in range(self.time_slice_count):
dummy_file = open(self.corpus_directory + str(time_slice), 'w')
dummy_file.write('')
# compute word frequency
# dok_matrix es de SciPy
self.global_freq = dok_matrix((len(self.vocabulary), self.time_slice_count), dtype=np.int32)
self.mention_freq = dok_matrix((len(self.vocabulary), self.time_slice_count), dtype=np.int32)
self.user_freq = dok_matrix((len(self.vocabulary), self.time_slice_count), dtype=np.int32)
for file in self.input_files:
with open(file, 'r') as input_file:
reader = csv.DictReader(input_file, delimiter='\t')
tweets = list(reader)
# lemmatized_text_column_index = header.index('lemmatizedText')
user_buffer = {} # Diccionario en el que la clave sera una palabra y el valor un set con los usuarios que la han tweeteado en este time_slice
for tweet in tweets:
tweet_date = datetime.strptime(tweet['date'], "%Y-%m-%d %H:%M:%S")
tweet_user = tweet['authorId']
time_delta = (tweet_date - self.start_date)
time_delta = time_delta.total_seconds() / 60 # El tiempo transcurrido entre el tweet actual y el primero del Dataset en minutos
time_slice = int(time_delta / self.time_slice_length) # Un numero entre 0 y time_slice_count-1
self.tweet_count[time_slice] += 1
# tokenize the tweet and update word frequency
# tweet_text = tweet['text']
tweet_text = tweet['lemmatizedText']
words = self.tokenize(tweet_text)
mention = '@' in tweet_text
for word in set(words): # Transformandolo en set me quito las palabras repetidas en un mismo tweet
word_id = self.vocabulary.get(word)
if word_id is not None:
self.global_freq[word_id, time_slice] += 1 # Se accede asi por ser un dok_matrix
if mention:
self.mention_freq[word_id, time_slice] += 1
if word in user_buffer:
if tweet_user in user_buffer[word]:
continue
self.user_freq[word_id, time_slice] += 1
user_buffer[word].add(tweet_user)
continue
user_buffer[word] = set()
self.user_freq[word_id, time_slice] += 1
user_buffer[word].add(tweet_user)
with open(self.corpus_directory + str(time_slice), 'a') as time_slice_file:
tweet_json = {
'tweetId': tweet['tweetId'],
'authorId': tweet['authorId'],
'coordinates': tweet['coordinates'],
'date': tweet['date'],
'text': tweet['text'],
'lemmatizedText': tweet['lemmatizedText']
}
saving_tweet = json.dumps(tweet_json, ensure_ascii=False)
time_slice_file.write(saving_tweet+'\n')
self.global_freq = self.global_freq.tocsr()
self.mention_freq = self.mention_freq.tocsr()
self.user_freq = self.user_freq.tocsr()
# Pasa el time_slice (0, 13, 27...) a la correspondiente fecha que era en un principio
def to_date(self, time_slice):
a_date = self.start_date + timedelta(minutes=time_slice*self.time_slice_length)
return a_date
# Metodo que devuelve las P (parametro) palabras que mas veces aparezcan con la palabra principal del evento
def cooccurring_words(self, event, p):
main_word = event[2]
word_frequency = {} # Diccionario que contiene la frecuencia con la que coincide cada palabra con la palabra principal del evento
for i in range(event[1][0], event[1][1] + 1):
with open(self.corpus_directory + str(i), 'r') as input_file:
for line in input_file.readlines():
line_json = json.loads(line)
# tweet_text = line_json['text']
tweet_text = line_json['lemmatizedText']
words = self.tokenize(tweet_text)
if main_word in words:
for word in words:
if word != main_word:
if self.vocabulary.get(word) is not None:
frequency = word_frequency.get(word)
if frequency is None:
frequency = 0
word_frequency[word] = frequency + 1
# Ordenamos las palabras con respecto a su frecuencia - La de mayor frecuencia primero
vocabulary = list(word_frequency.items())
vocabulary.sort(key=lambda x: x[1], reverse=True) # Ordena
top_cooccurring_words = []
for word, frequency in vocabulary:
top_cooccurring_words.append(word)
if len(top_cooccurring_words) == p:
# return the p words that co-occur the most with the main word
return top_cooccurring_words |
<filename>VideoCameraRayIntersection/VideoCameraRayIntersection.py
import os
import vtk
import qt
import slicer
import numpy as np
import logging
from slicer.ScriptedLoadableModule import ScriptedLoadableModule, ScriptedLoadableModuleWidget, ScriptedLoadableModuleLogic, ScriptedLoadableModuleTest
# VideoCameraRayIntersection
class VideoCameraRayIntersection(ScriptedLoadableModule):
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "VideoCamera Ray Intersection"
self.parent.categories = ["VideoCameras"]
self.parent.dependencies = ["VideoCameras", "LinesIntersection", "Annotations"]
self.parent.contributors = ["<NAME> (Robarts Research Institute)"]
self.parent.helpText = """This module calculates the offset between ray intersections on an object from multiple videoCamera angles. """ + self.getDefaultModuleDocumentationLink()
self.parent.acknowledgementText = """This module was developed with support from the Natural Sciences and Engineering Research Council of Canada, the Canadian Foundation for Innovation, and the Virtual Augmentation and Simulation for Surgery and Therapy laboratory, Western University."""
# VideoCameraRayIntersectionWidget
class VideoCameraRayIntersectionWidget(ScriptedLoadableModuleWidget):
@staticmethod
def get(widget, objectName):
if widget.objectName == objectName:
return widget
else:
for w in widget.children():
resulting_widget = VideoCameraRayIntersectionWidget.get(w, objectName)
if resulting_widget:
return resulting_widget
return None
@staticmethod
def emptyOrZeros(doubleArray):
count = doubleArray.GetNumberOfValues()
result = True
for i in range(0, count):
if doubleArray.GetValue(i) != 0.0:
return False
return True
@staticmethod
def areSameVTK4x4(a, b):
for i in range(0, 4):
for j in range(0, 4):
if a.GetElement(i,j) != b.GetElement(i,j):
return False
return True
@staticmethod
def areSameVTK3x3(a, b):
for i in range(0, 3):
for j in range(0, 3):
if a.GetElement(i,j) != b.GetElement(i,j):
return False
return True
@staticmethod
def vtk4x4ToNumpy(vtk4x4):
if vtk4x4 is None:
return
val = np.asmatrix(np.eye(4, 4, dtype=np.float64))
for i in range(0, 4):
for j in range(0, 4):
val[i, j] = vtk4x4.GetElement(i, j)
return val
@staticmethod
def vtk3x3ToNumpy(vtk3x3):
if vtk3x3 is None:
return
val = np.asmatrix(np.eye(3, 3, dtype=np.float64))
for i in range(0, 3):
for j in range(0, 3):
val[i, j] = vtk3x3.GetElement(i, j)
return val
@staticmethod
def loadPixmap(param, x, y):
iconPath = os.path.join(os.path.dirname(slicer.modules.videocameracalibration.path), 'Resources/Icons/', param + ".png")
icon = qt.QIcon(iconPath)
return icon.pixmap(icon.actualSize(qt.QSize(x, y)))
def __init__(self, parent):
ScriptedLoadableModuleWidget.__init__(self, parent)
global OPENCV2_AVAILABLE
try:
global cv2
import cv2
OPENCV2_AVAILABLE = True
except ImportError:
OPENCV2_AVAILABLE = False
if not OPENCV2_AVAILABLE:
logging.error("OpenCV2 python interface not available.")
return
self.logic = VideoCameraRayIntersectionLogic()
self.canSelectFiducials = False
self.isManualCapturing = False
self.validVideoCamera = False
self.centerFiducialSelectionNode = None
self.copyNode = None
self.widget = None
self.videoCameraIntrinWidget = None
self.videoCameraSelector = None
self.videoCameraNode = None
self.videoCameraObserverTag = None
self.videoCameraTransformNode = None
self.videoCameraTransformObserverTag = None
self.videoCameraTransformStatusLabel = None
self.okPixmap = VideoCameraRayIntersectionWidget.loadPixmap('icon_Ok', 20, 20)
self.notOkPixmap = VideoCameraRayIntersectionWidget.loadPixmap('icon_NotOk', 20, 20)
# Inputs/Outputs
self.imageSelector = None
self.videoCameraTransformSelector = None
# Actions
self.captureButton = None
self.resetButton = None
self.actionContainer = None
# Results
self.resultsLabel = None
self.tempMarkupNode = None
self.sceneObserverTag = None
self.videoCameraToReference = None
self.identity3x3 = vtk.vtkMatrix3x3()
self.identity4x4 = vtk.vtkMatrix4x4()
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
if not OPENCV2_AVAILABLE:
self.layout.addWidget(qt.QLabel("OpenCV2 python is required and not available. Check installation/configuration of SlicerOpenCV."))
else:
# Load the UI From file
scriptedModulesPath = eval('slicer.modules.%s.path' % self.moduleName.lower())
scriptedModulesPath = os.path.dirname(scriptedModulesPath)
path = os.path.join(scriptedModulesPath, 'Resources', 'UI', 'q' + self.moduleName + 'Widget.ui')
self.widget = slicer.util.loadUI(path)
self.layout.addWidget(self.widget)
self.videoCameraIntrinWidget = VideoCameraRayIntersectionWidget.get(self.widget, "videoCameraIntrinsicsWidget")
# Workaround for videoCamera selector
self.videoCameraSelector = self.videoCameraIntrinWidget.children()[1].children()[1]
# Inputs/Outputs
self.imageSelector = VideoCameraRayIntersectionWidget.get(self.widget, "comboBox_ImageSelector")
self.videoCameraTransformSelector = VideoCameraRayIntersectionWidget.get(self.widget, "comboBox_VideoCameraTransform")
self.actionContainer = VideoCameraRayIntersectionWidget.get(self.widget, "widget_ActionContainer")
self.captureButton = VideoCameraRayIntersectionWidget.get(self.widget, "pushButton_Capture")
self.resetButton = VideoCameraRayIntersectionWidget.get(self.widget, "pushButton_Reset")
self.actionContainer = VideoCameraRayIntersectionWidget.get(self.widget, "widget_ActionContainer")
self.resultsLabel = VideoCameraRayIntersectionWidget.get(self.widget, "label_Results")
self.videoCameraTransformStatusLabel = VideoCameraRayIntersectionWidget.get(self.widget, "label_VideoCameraTransform_Status")
# Disable capture as image processing isn't active yet
self.actionContainer.setEnabled(False)
# UI file method does not do mrml scene connections, do them manually
self.videoCameraIntrinWidget.setMRMLScene(slicer.mrmlScene)
self.imageSelector.setMRMLScene(slicer.mrmlScene)
self.videoCameraTransformSelector.setMRMLScene(slicer.mrmlScene)
# Connections
self.videoCameraSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.onVideoCameraSelected)
self.imageSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.onImageSelected)
self.videoCameraTransformSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.onVideoCameraTransformSelected)
self.captureButton.connect('clicked(bool)', self.onCapture)
self.resetButton.connect('clicked(bool)', self.onReset)
# Adding an observer to scene to listen for mrml node
self.sceneObserverTag = slicer.mrmlScene.AddObserver(slicer.mrmlScene.NodeAddedEvent, self.onNodeAdded)
# Choose red slice only
lm = slicer.app.layoutManager()
lm.setLayout(slicer.vtkMRMLLayoutNode.SlicerLayoutOneUpRedSliceView)
# Refresh Apply button state
self.onSelect()
def onVideoCameraSelected(self):
if self.videoCameraNode is not None:
self.videoCameraNode.RemoveObserver(self.videoCameraObserverTag)
self.videoCameraNode = self.videoCameraSelector.currentNode()
if self.videoCameraNode is not None:
self.videoCameraObserverTag = self.videoCameraNode.AddObserver(vtk.vtkCommand.ModifiedEvent, self.onVideoCameraModified)
self.checkVideoCamera()
def cleanup(self):
self.videoCameraSelector.disconnect("currentNodeChanged(vtkMRMLNode*)", self.onVideoCameraSelected)
self.imageSelector.disconnect("currentNodeChanged(vtkMRMLNode*)", self.onImageSelected)
self.videoCameraTransformSelector.disconnect("currentNodeChanged(vtkMRMLNode*)", self.onVideoCameraTransformSelected)
self.captureButton.disconnect('clicked(bool)', self.onCapture)
self.resetButton.disconnect('clicked(bool)', self.onReset)
slicer.mrmlScene.RemoveObserver(self.sceneObserverTag)
@vtk.calldata_type(vtk.VTK_OBJECT)
def onVideoCameraModified(self, caller, event):
self.checkVideoCamera()
def checkVideoCamera(self):
if self.videoCameraNode is None:
self.validVideoCamera = False
return()
self.validVideoCamera = True
string = ""
# Check state of selected videoCamera
if VideoCameraRayIntersectionWidget.areSameVTK3x3(self.videoCameraNode.GetIntrinsicMatrix(), self.identity3x3):
string += "No videoCamera intrinsics! "
self.validVideoCamera = False
if VideoCameraRayIntersectionWidget.emptyOrZeros(self.videoCameraNode.GetDistortionCoefficients()):
string += "No distortion coefficients! "
self.validVideoCamera = False
if VideoCameraRayIntersectionWidget.areSameVTK4x4(self.videoCameraNode.GetMarkerToImageSensorTransform(), self.identity4x4):
string += "No tracker calibration performed! "
if len(string) > 0:
self.resultsLabel.text = string
else:
self.resultsLabel.text = "VideoCamera ok!"
self.onSelect()
def onImageSelected(self):
# Set red slice to the copy node
if self.imageSelector.currentNode() is not None:
slicer.app.layoutManager().sliceWidget('Red').sliceLogic().GetSliceCompositeNode().SetBackgroundVolumeID(self.imageSelector.currentNode().GetID())
slicer.app.layoutManager().sliceWidget('Red').sliceLogic().FitSliceToAll()
# Check pixel spacing, x and y must be 1px = 1mm in order for markups to produce correct pixel locations
spacing = self.imageSelector.currentNode().GetImageData().GetSpacing()
if spacing[0] != 1.0 or spacing[1] != 1.0:
message = "Image does not have 1.0 spacing in x or y, markup fiducials will not represent pixels exactly!"
logging.error(message)
self.resultsLabel.text = message
self.canSelectFiducials = False
else:
self.canSelectFiducials = True
self.onSelect()
def onReset(self):
self.resultsLabel.text = "Reset."
self.logic.reset()
def onSelect(self):
self.actionContainer.enabled = self.imageSelector.currentNode() \
and self.videoCameraTransformSelector.currentNode() \
and self.videoCameraSelector.currentNode() \
and self.canSelectFiducials \
and self.validVideoCamera
def onCapture(self):
if self.isManualCapturing:
# Cancel button hit
self.endManualCapturing()
slicer.modules.annotations.logic().StopPlaceMode()
return()
# Record tracker data at time of freeze and store
videoCameraToReferenceVtk = vtk.vtkMatrix4x4()
self.videoCameraTransformSelector.currentNode().GetMatrixTransformToParent(videoCameraToReferenceVtk)
self.videoCameraToReference = VideoCameraRayIntersectionWidget.vtk4x4ToNumpy(videoCameraToReferenceVtk)
if VideoCameraRayIntersectionWidget.areSameVTK4x4(videoCameraToReferenceVtk, self.identity4x4):
self.resultsLabel.text = "Invalid transform. Please try again with sensor in view."
return()
# Reset view so that capture button always works
if self.imageSelector.currentNode() is not None:
slicer.app.layoutManager().sliceWidget('Red').sliceLogic().GetSliceCompositeNode().SetBackgroundVolumeID(self.imageSelector.currentNode().GetID())
slicer.app.layoutManager().sliceWidget('Red').sliceLogic().FitSliceToAll()
self.onSelect()
# Make a copy of the volume node (aka freeze cv capture) to allow user to play with detection parameters or click on center
self.centerFiducialSelectionNode = slicer.mrmlScene.GetNodeByID(slicer.app.layoutManager().sliceWidget('Red').sliceLogic().GetSliceCompositeNode().GetBackgroundVolumeID())
self.copyNode = slicer.mrmlScene.CopyNode(self.centerFiducialSelectionNode)
imData = vtk.vtkImageData()
imData.DeepCopy(self.centerFiducialSelectionNode.GetImageData())
self.copyNode.SetAndObserveImageData(imData)
self.copyNode.SetName('FrozenImage')
slicer.app.layoutManager().sliceWidget('Red').sliceLogic().GetSliceCompositeNode().SetBackgroundVolumeID(self.copyNode.GetID())
# Initiate fiducial selection
slicer.modules.markups.logic().StartPlaceMode(False)
# Disable resetting while capture is active
self.resetButton.setEnabled(False)
self.isManualCapturing = True
self.captureButton.setText('Cancel')
@vtk.calldata_type(vtk.VTK_OBJECT)
def onNodeAdded(self, caller, event, callData):
if type(callData) is slicer.vtkMRMLMarkupsFiducialNode and self.isManualCapturing:
self.endManualCapturing()
# Calculate point and line pair
arr = [0,0,0]
callData.GetMarkupPoint(callData.GetNumberOfMarkups()-1, 0, arr)
point = np.zeros((1,1,2),dtype=np.float64)
point[0,0,0] = abs(arr[0])
point[0,0,1] = abs(arr[1])
# Get videoCamera parameters
mtx = VideoCameraRayIntersectionWidget.vtk3x3ToNumpy(self.videoCameraSelector.currentNode().GetIntrinsicMatrix())
if self.videoCameraSelector.currentNode().GetDistortionCoefficients().GetNumberOfValues() != 0:
dist = np.asarray(np.zeros((1, self.videoCameraSelector.currentNode().GetDistortionCoefficients().GetNumberOfValues()), dtype=np.float64))
for i in range(0, self.videoCameraSelector.currentNode().GetDistortionCoefficients().GetNumberOfValues()):
dist[0, i] = self.videoCameraSelector.currentNode().GetDistortionCoefficients().GetValue(i)
else:
dist = np.asarray([], dtype=np.float64)
# Calculate the direction vector for the given pixel (after undistortion)
pixel = np.vstack((cv2.undistortPoints(point, mtx, dist, P=mtx), np.array([1.0], dtype=np.float64)))
# Get the direction based on selected pixel
origin_sensor = np.asmatrix([[0.0],[0.0],[0.0],[1.0]], dtype=np.float64)
directionVec_sensor = np.vstack(((np.linalg.inv(mtx) * pixel) / np.linalg.norm(np.linalg.inv(mtx) * pixel), np.array([0.0], dtype=np.float64)))
sensorToVideoCamera = np.linalg.inv(VideoCameraRayIntersectionWidget.vtk4x4ToNumpy(self.videoCameraSelector.currentNode().GetMarkerToImageSensorTransform()))
sensorToReference = self.videoCameraToReference * sensorToVideoCamera
origin_ref = sensorToReference * origin_sensor
directionVec_ref = sensorToReference * directionVec_sensor
if self.developerMode:
logging.debug("origin_ref: " + str(origin_ref).replace('\n',''))
logging.debug("dir_ref: " + str(directionVec_ref).replace('\n',''))
result = self.logic.addRay(origin_ref[0:-1], directionVec_ref[0:-1])
if result is not None:
self.resultsLabel.text = "Point: " + str(result[0]) + "," + str(result[1]) + "," + str(result[2]) + ". Error: " + str(self.logic.getError())
if self.developerMode:
# For ease of copy pasting multiple entries, print it to the python console
print "Intersection|" + str(result[0]) + "," + str(result[1]) + "," + str(result[2]) + "|" + str(self.logic.getError())
# Allow markups module some time to process the new markup, but then quickly delete it
# Avoids VTK errors in log
self.tempMarkupNode = callData
qt.QTimer.singleShot(10, self.removeMarkup)
def endManualCapturing(self):
self.isManualCapturing = False
self.captureButton.setText('Capture')
# Resume playback
slicer.app.layoutManager().sliceWidget('Red').sliceLogic().GetSliceCompositeNode().SetBackgroundVolumeID(self.centerFiducialSelectionNode.GetID())
slicer.mrmlScene.RemoveNode(self.copyNode)
self.copyNode = None
# Re-enable UI
self.resetButton.setEnabled(True)
def removeMarkup(self):
if self.tempMarkupNode is not None:
self.tempMarkupNode.RemoveAllMarkups()
slicer.mrmlScene.RemoveNode(self.tempMarkupNode)
self.tempMarkupNode = None
def onVideoCameraTransformSelected(self):
if self.videoCameraTransformObserverTag is not None:
self.videoCameraTransformNode.RemoveObserver(self.videoCameraTransformObserverTag)
self.videoCameraTransformObserverTag = None
self.videoCameraTransformNode = self.videoCameraTransformSelector.currentNode()
if self.videoCameraTransformNode is not None:
self.videoCameraTransformObserverTag = self.videoCameraTransformNode.AddObserver(slicer.vtkMRMLTransformNode.TransformModifiedEvent, self.onVideoCameraTransformModified)
self.onSelect()
@vtk.calldata_type(vtk.VTK_OBJECT)
def onVideoCameraTransformModified(self, caller, event):
mat = vtk.vtkMatrix4x4()
self.videoCameraTransformNode.GetMatrixTransformToParent(mat)
if VideoCameraRayIntersectionWidget.areSameVTK4x4(mat, self.identity4x4):
self.videoCameraTransformStatusLabel.setPixmap(self.notOkPixmap)
self.captureButton.enabled = False
else:
self.videoCameraTransformStatusLabel.setPixmap(self.okPixmap)
self.captureButton.enabled = True
# VideoCameraRayIntersectionLogic
class VideoCameraRayIntersectionLogic(ScriptedLoadableModuleLogic):
def __init__(self):
self.linesRegistrationLogic = slicer.vtkSlicerLinesIntersectionLogic()
def reset(self):
# clear list of rays
self.linesRegistrationLogic.Reset()
def addRay(self, origin, direction):
self.linesRegistrationLogic.AddLine(origin, direction)
if self.linesRegistrationLogic.Count() > 2:
return self.linesRegistrationLogic.Update()
return None
def getCount(self):
return self.linesRegistrationLogic.Count()
def getPoint(self):
if self.linesRegistrationLogic.Count() > 2:
return self.linesRegistrationLogic.Update()
return None
def getError(self):
return self.linesRegistrationLogic.GetError()
# VideoCameraRayIntersectionTest
class VideoCameraRayIntersectionTest(ScriptedLoadableModuleTest):
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough. """
slicer.mrmlScene.Clear(0)
def runTest(self):
""" Run as few or as many tests as needed here. """
self.setUp()
self.test_VideoCameraRayIntersection1()
def test_VideoCameraRayIntersection1(self):
self.delayDisplay("Starting the test")
self.delayDisplay('Test passed!')
|
<gh_stars>0
"""
clock.py
Displays a clock with background image on a LILYGO® TTGO T-Display.
The buttons on the module can be used to set the time.
Background images courtesy of the NASA image and video gallery available at
https://images.nasa.gov/
The Font is Copyright 2018 The Pacifico Project Authors (https://github.com/googlefonts/Pacifico)
This Font Software is licensed under the SIL Open Font License, Version 1.1.
This license is copied below, and is also available with a FAQ at:
http://scripts.sil.org/OFL
"""
import gc
import utime
from machine import Pin, SPI, RTC
import st7789
import pacifico60 as font
rtc = RTC()
background_lock = 0 # prevents background change while > 0
class Button:
"""
Debounced pin handler
Modifed from https://gist.github.com/jedie/8564e62b0b8349ff9051d7c5a1312ed7
"""
def __init__(self, pin, callback, trigger=Pin.IRQ_FALLING, debounce=350):
self.callback = callback
self.debounce = debounce
self._next_call = utime.ticks_ms() + self.debounce
pin.irq(trigger=trigger, handler=self.debounce_handler)
def call_callback(self, pin):
self.callback(pin)
def debounce_handler(self, pin):
if utime.ticks_ms() > self._next_call:
self._next_call = utime.ticks_ms() + self.debounce
self.call_callback(pin)
def hour_pressed(pin):
global background_lock, rtc
tm = rtc.datetime()
rtc.init((tm[0], tm[1], tm[2], tm[3], tm[4], tm[5]+1, tm[6], tm[7]))
background_lock = 10
def minute_pressed(pin):
global background_lock, rtc
tm = rtc.datetime()
rtc.init((tm[0], tm[1], tm[2], tm[3], tm[4]+1, tm[5], tm[6], tm[7]))
background_lock = 10
def main():
"""
Initialize the display and show the time
"""
global background_lock
tft = st7789.ST7789(
SPI(1, baudrate=30000000, sck=Pin(18), mosi=Pin(19)),
135,
240,
reset=Pin(23, Pin.OUT),
cs=Pin(5, Pin.OUT),
dc=Pin(16, Pin.OUT),
backlight=Pin(4, Pin.OUT),
rotation=3)
tft.init()
# image, Time Column, Time Row, Time Color
backgrounds = [
('1.jpg', 30, 50, st7789.WHITE),
('2.jpg', 30, 50, st7789.WHITE),
('3.jpg', 30, 50, st7789.WHITE),
('4.jpg', 30, 50, st7789.WHITE),
('5.jpg', 30, 50, st7789.WHITE),
('6.jpg', 30, 50, st7789.WHITE),
('7.jpg', 30, 50, st7789.WHITE),
('8.jpg', 30, 50, st7789.WHITE)]
digit_columns = []
background_change = True
background_counter = 0
time_col = 0
time_row = 0
time_color = 0
last_time = "-----"
Button(pin=Pin(35, mode=Pin.IN, pull=Pin.PULL_UP), callback=hour_pressed)
Button(pin=Pin(0, mode=Pin.IN, pull=Pin.PULL_UP), callback=minute_pressed)
while True:
# create new digit_backgrounds and change the background image
if background_change:
image, time_col, time_row, time_color = backgrounds[background_counter]
background_counter += 1
background_counter %= len(backgrounds)
background_change = False
# clear the old backgrounds and gc
digit_background = []
gc.collect()
# draw the new background
tft.jpg(image, 0, 0, st7789.SLOW)
# calculate the starting column for each time digit
digit_columns = [time_col + digit *
font.MAX_WIDTH for digit in range(5)]
# nudge the ':' to the right since it is narrower then the digits
digit_columns[2] += font.MAX_WIDTH // 4
# get the background bitmap behind each clock digit from the jpg file and store it
# in a list so it can be used to write each digit simulating transparency.
digit_background = [
tft.jpg_decode(
image, # jpg file name
digit_columns[digit], # column to start bitmap at
time_row, # row to start bitmap at
font.MAX_WIDTH, # width of bitmap to save
font.HEIGHT) # height of bitmap to save
for digit in range(5)
]
# cause all digits to be updated
last_time = "-----"
# get the current hour and minute
_, _, _, hour, minute, second, _, _ = utime.localtime()
# 12 hour time
if hour == 0:
hour = 12
if hour > 12:
hour -= 12
# format time string as "HH:MM"
time_fmt = "{:02d}:{:02d}" if second % 2 == 0 else "{:02d} {:02d}"
time = time_fmt.format(hour, minute)
# loop through the time string
for digit in range(5):
# Check if this digit has changed
if time[digit] != last_time[digit]:
# digit 1 is the hour, change the background every hour
# digit 3 is the tens of the minute, change the background every 10 minutes
# digit 4 is the ones of the minute, change the background every minute
if digit == 1 and last_time[digit] != '-' and background_lock == 0:
background_change = True
# draw the changed digit, don't fill to the right
# of the ':' because it is always the same width
tft.write(
font, # the font to write to the display
time[digit], # time string digit to write
digit_columns[digit], # write to the correct column
time_row, # write on row
time_color, # color of time text
st7789.BLACK, # transparent background color
digit_background[digit], # use the background bitmap
digit != 2) # don't fill to the right of the ':'
# save the current time
last_time = time
if background_lock:
background_lock -= 1
utime.sleep(0.5)
gc.collect()
main()
|
import board
from kmk.boards.macropact import KMKKeyboard
from kmk.keys import KC, make_key
from kmk.rotary_encoder import Encoder
from kmk.ips import IPS, ips_config
keyboard = KMKKeyboard()
#keyboard.debug_enabled = True
def onRotateA(direction):
if(direction > 0):
keyboard._state.tap_key(KC.LSFT(KC.RBRC))
elif(direction < 0):
keyboard._state.tap_key(KC.LSFT(KC.LBRC))
def rgbv_onRotateA(direction):
if(direction > 0):
keyboard.pixels.increase_val()
elif(direction < 0):
keyboard.pixels.decrease_val()
def rgbh_onRotateA(direction):
if(direction > 0):
keyboard.pixels.increase_hue()
elif(direction < 0):
keyboard.pixels.decrease_hue()
def rgbs_onRotateA(direction):
if(direction > 0):
keyboard.pixels.increase_sat()
elif(direction < 0):
keyboard.pixels.decrease_sat()
def onRotateB(direction):
if(direction > 0):
keyboard._state.tap_key(KC.RBRC)
elif(direction < 0):
keyboard._state.tap_key(KC.LBRC)
def set_default_handler(*args, **kwargs):
keyboard.encoders[0].onRotate = onRotateA
def set_handler_rgbv(*args, **kwargs):
keyboard.encoders[0].onRotate = rgbv_onRotateA
def set_handler_rgbh(*args, **kwargs):
keyboard.encoders[0].onRotate = rgbh_onRotateA
def set_handler_rgbs(*args, **kwargs):
keyboard.encoders[0].onRotate = rgbs_onRotateA
keyboard.encoders = [Encoder(board.GP0, board.GP1, onRotateA), Encoder(board.GP2, board.GP3, onRotateB)]
keyboard.ips = IPS()
LAYER1 = KC.MO(1)
LAYER1.after_press_handler(lambda *args, **kwargs: keyboard.ips.load_bitmap("L1.bmp"))
LAYER1.after_release_handler(lambda *args, **kwargs: keyboard.ips.load_bitmap("L0.bmp"))
LAYER2 = KC.MO(2)
LAYER2.after_press_handler(lambda *args, **kwargs: keyboard.ips.load_bitmap("L2.bmp"))
LAYER2.after_release_handler(lambda *args, **kwargs: keyboard.ips.load_bitmap("L0.bmp"))
RGBV = make_key(on_press=set_handler_rgbv, on_release=set_default_handler)
RGBH = make_key(on_press=set_handler_rgbh, on_release=set_default_handler)
RGBS = make_key(on_press=set_handler_rgbs, on_release=set_default_handler)
keyboard.keymap = [
[KC.W, KC.E, KC.T, KC.Y, KC.NO,
KC.S, KC.G, KC.J, KC.L, KC.NO,
KC.Z, KC.C, KC.V, KC.B, KC.NO,
KC.G, KC.O, KC.P, LAYER2, LAYER1,
],
[KC.F1, KC.E, KC.LCMD(KC.J), KC.LCMD(KC.LSFT(KC.EQUAL)), KC.NO,
KC.F6, KC.G, KC.LCMD(KC.LSFT(KC.J)), KC.LCMD(KC.MINUS), KC.NO,
KC.Z, KC.C, KC.LCMD(KC.T), KC.LCMD(KC.N0), KC.NO,
KC.LSFT, KC.LCTL, KC.LALT, KC.LCMD, KC.TRNS,
],
[RGBV, KC.NO, KC.NO, KC.NO, KC.NO,
RGBH, KC.NO, KC.NO, KC.NO, KC.NO,
RGBS, KC.NO, KC.NO, KC.NO, KC.NO,
KC.RGB_TOG, KC.NO, KC.NO, KC.TRNS, KC.TRNS,
],
]
keyboard.rgb_pixel_pin = board.GP28
keyboard.rgb_config['num_pixels'] = 7
keyboard.rgb_config['sat_default'] = 0
keyboard.rgb_config['val_default'] = 255
keyboard.rgb_config['val_step'] = 5
keyboard.rgb_config['hue_step'] = 5
keyboard.rgb_config['sat_step'] = 5
if __name__ == '__main__':
keyboard.ips.load_bitmap("L0.bmp")
keyboard.go()
|
<filename>app/database.py
from sqlalchemy import create_engine, distinct, func, desc
from sqlalchemy.orm import sessionmaker
from geoalchemy2 import functions as geo_func
from datetime import datetime, timedelta
import config
from base import Base
from schema import Tweet, Hashtag, Picture, Division
import codecs, json
class Database:
def __init__(self, uri, debug=False, drop_all=config.TRUNCATE_TABLES, divisions_file="data/divisions.json"):
self.__debug = debug
self.__engine = create_engine(uri, echo=debug, client_encoding='utf8')
self.__base = Base
# Drop all if debugging
if drop_all:
self.__base.metadata.drop_all(self.__engine)
self.__base.metadata.create_all(self.__engine)
self.session = sessionmaker(bind=self.__engine)()
# Load all divisions from GeoJSON if debugging, and all tables were dropped
count = self.session.query(func.count(Division.id)).scalar()
if count == 0:
features = self.load_geojson(divisions_file)
self.create_divisions(features)
# Given a filename, opens the file and parses the GeoJSON,
# returning the array of geojson feature objects
def load_geojson(self, filename):
with codecs.open(filename, 'r', "utf-8") as file:
gj = file.read()
file.close()
parsed = json.loads(gj)
return parsed["features"]
# Given a list of GeoJSON features, create divison objects and insert them
def create_divisions(self, features):
for f in features:
self.session.add(Division(f))
self.session.commit()
# Returns the division ID that the input geometry is contained by, or None
def get_division(self, geom):
div = self.session.query(Division).filter(Division.geom.ST_Contains(geom)).first()
return div
def close(self):
self.__engine.dispose()
# Takes a list of hashtag entities and places them appropriately into the database Tweet object
# First checks to see if the hashtag already exists, if so, then the existing name and ID is used
# and an entry will be placed in the association table
def extract_hashtags(self, hashtags, tweetClass):
# Convert hashtags to lowercase and remove duplicates
ht_text = set(map(lambda x: x["text"].lower(), hashtags))
for ht in ht_text:
new_ht = self.session.query(Hashtag).filter(Hashtag.text == ht).first()
if new_ht == None:
new_ht = Hashtag(text=ht)
tweetClass.hashtags.append(new_ht)
# Filter out tweets from blacklisted users
def filter(self, tweet):
screen_name = tweet["user"]["screen_name"].lower()
for name in config.BLACKLIST_SCREENNAMES_EQ:
if screen_name is name:
return True
for name in config.BLACKLIST_SCREENNAMES_STARTSWITH:
if screen_name.startswith(name.lower()):
return True
for name in config.BLACKLIST_SCREENNAMES_ENDSWITH:
if screen_name.endswith(name.lower()):
return True
return False
# Takes a tweet object from the API and inserts it into the database
def insert_tweet(self, tweet):
if self.filter(tweet):
return
db_tweet = Tweet(tweet)
# Return if the point does not lie inside one of our divisions
div = self.get_division(db_tweet.geom)
if div == None:
return
db_tweet.division_id = div.id
if "hashtags" in tweet["entities"]:
self.extract_hashtags(tweet["entities"]["hashtags"], db_tweet)
# Add the tweet to the session
self.session.add(db_tweet)
self.session.commit()
return db_tweet
# Get Heatmap Geom
# Returns geometry for all tweets recorded in the past day
def get_heatmap_geom(self):
hour_ago = datetime.now() - timedelta(hours=24)
q = self.session.query(geo_func.ST_Y(Tweet.geom), geo_func.ST_X(Tweet.geom)).filter(Tweet.created_at > hour_ago).all()
return q
# Get Last Tweets
# Returns tweets from the past hour throughout the entire AOI
def get_last_tweets(self):
hour_ago = datetime.now() - timedelta(hours=1)
q = self.session.query(Tweet).filter(Tweet.created_at > hour_ago).all()
return q
# Returns a list of all division ids
def get_division_ids(self):
ids = self.session.query(Division.id).all()
return map(lambda x: x[0], ids)
'''
with series as (
select * from generate_series(
date_trunc('hour', now() - interval '1 week'),
now(),
interval '1 hour') time
), tw_count as (
select distinct date_trunc('hour', created_at) as time,
count(*) over (partition by date_trunc('hour', created_at))
from tweets.tweets
where created_at > now() - interval '1 week'
and division_id = 77
)
select series.time,
coalesce(tw_count.count, 0) as count
from series
left join tw_count on (tw_count.time = series.time);
'''
def get_tweet_counts_by_division(self, div_id=None):
series = self.session.query(
func.generate_series(
func.date_trunc('hour', datetime.utcnow() - timedelta(hours=24)),
datetime.utcnow(),
timedelta(hours=1)).label('time')).subquery()
tw_count = self.session.query(
func.date_trunc('hour', Tweet.created_at).label('time'),
func.count('*').over(partition_by=func.date_trunc('hour', Tweet.created_at)).label('count')).\
filter(Tweet.created_at > datetime.utcnow() - timedelta(hours=24)).\
distinct()
if div_id != None:
tw_count = tw_count.filter(Tweet.division_id == div_id)
tw_count = tw_count.subquery()
res = self.session.query(
series.c.time,
func.coalesce(tw_count.c.count, 0).label('count')).\
select_from(series).\
outerjoin(tw_count, tw_count.c.time == series.c.time).all()
return map(lambda x: { "time": x[0].isoformat(), "count": x[1] }, res)
# Returns the number of unique users for a given
# division over the past hour, day, and week
# TODO: Convert all to one query
def get_active_users_by_division(self, div_id):
users = self.session.query(Tweet.user_id).\
filter(Tweet.division_id == div_id)
queries = [
users.filter(Tweet.created_at > datetime.utcnow() - timedelta(hours=1)).distinct().subquery(),
users.filter(Tweet.created_at > datetime.utcnow() - timedelta(hours=24)).distinct().subquery(),
users.filter(Tweet.created_at > datetime.utcnow() - timedelta(days=7)).distinct().subquery()
]
resolved = map(lambda x: self.session.query(func.count(x.c.user_id)).scalar(), queries)
return {
"hour": resolved[0],
"day": resolved[1],
"week": resolved[2]
}
# Get pictures in extent
# Takes in lat/long bounding box and returns pictures that have been taken there in the past week
def get_pictures_in_extent(self, sw_lat, sw_long, ne_lat, ne_long):
wkt = "SRID=4326;POLYGON(({} {}, {} {}, {} {}, {} {}, {} {}))".format(ne_long, ne_lat, sw_long, ne_lat, sw_long, sw_lat, ne_long, sw_lat, ne_long, ne_lat)
pics = self.session.query(Picture).join(Tweet).filter(geo_func.ST_Within(Tweet.geom, wkt)).order_by(Tweet.created_at.desc()).limit(10)
return pics
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = [
'GetStacksResult',
'AwaitableGetStacksResult',
'get_stacks',
]
@pulumi.output_type
class GetStacksResult:
"""
A collection of values returned by getStacks.
"""
def __init__(__self__, compartment_id=None, display_name=None, filters=None, id=None, stacks=None, state=None):
if compartment_id and not isinstance(compartment_id, str):
raise TypeError("Expected argument 'compartment_id' to be a str")
pulumi.set(__self__, "compartment_id", compartment_id)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if filters and not isinstance(filters, list):
raise TypeError("Expected argument 'filters' to be a list")
pulumi.set(__self__, "filters", filters)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if stacks and not isinstance(stacks, list):
raise TypeError("Expected argument 'stacks' to be a list")
pulumi.set(__self__, "stacks", stacks)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> str:
"""
Unique identifier ([OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm)) for the compartment where the stack is located.
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[str]:
"""
Human-readable display name for the stack.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def filters(self) -> Optional[Sequence['outputs.GetStacksFilterResult']]:
return pulumi.get(self, "filters")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Unique identifier ([OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm)) for the stack.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def stacks(self) -> Sequence['outputs.GetStacksStackResult']:
"""
The list of stacks.
"""
return pulumi.get(self, "stacks")
@property
@pulumi.getter
def state(self) -> Optional[str]:
"""
The current lifecycle state of the stack.
"""
return pulumi.get(self, "state")
class AwaitableGetStacksResult(GetStacksResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetStacksResult(
compartment_id=self.compartment_id,
display_name=self.display_name,
filters=self.filters,
id=self.id,
stacks=self.stacks,
state=self.state)
def get_stacks(compartment_id: Optional[str] = None,
display_name: Optional[str] = None,
filters: Optional[Sequence[pulumi.InputType['GetStacksFilterArgs']]] = None,
id: Optional[str] = None,
state: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetStacksResult:
"""
This data source provides the list of Stacks in Oracle Cloud Infrastructure Resource Manager service.
Returns a list of stacks.
- If called using the compartment ID, returns all stacks in the specified compartment.
- If called using the stack ID, returns the specified stack.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_stacks = oci.resourcemanager.get_stacks(compartment_id=var["compartment_id"],
display_name=var["stack_display_name"],
id=var["stack_id"],
state=var["stack_state"])
```
:param str compartment_id: The compartment [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) on which to filter.
:param str display_name: Display name on which to query.
:param str id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) on which to query for a stack.
:param str state: A filter that returns only those resources that match the specified lifecycle state. The state value is case-insensitive.
"""
__args__ = dict()
__args__['compartmentId'] = compartment_id
__args__['displayName'] = display_name
__args__['filters'] = filters
__args__['id'] = id
__args__['state'] = state
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:resourcemanager/getStacks:getStacks', __args__, opts=opts, typ=GetStacksResult).value
return AwaitableGetStacksResult(
compartment_id=__ret__.compartment_id,
display_name=__ret__.display_name,
filters=__ret__.filters,
id=__ret__.id,
stacks=__ret__.stacks,
state=__ret__.state)
|
from math import exp
from scipy import optimize
'''
References
-----------
[1] <NAME>, <NAME>, and <NAME>, "PV panel model based on datasheet values," in Industrial Electronics, 2007. ISIE 2007. IEEE International Symposium on, 2007, pp. 2392-2396.
'''
class ParameterExtraction(object):
boltzmann_constant = 1.38065e-23
charge_of_electron = 1.602e-19
nominal_temperature = 25 + 273
def __init__(self, short_circuit_current, open_circuit_voltage,
maximum_power_point_current, maximum_power_point_voltage,
number_of_cells_in_series = 1,
**optional_keyword_arguments):
self.__short_circuit_current = short_circuit_current
self.__open_circuit_voltage = open_circuit_voltage
self.__maximum_power_point_current = maximum_power_point_current
self.__maximum_power_point_voltage = maximum_power_point_voltage
self.__number_of_cells_in_series = number_of_cells_in_series
self.number_of_iterations = optional_keyword_arguments.get('number_of_iterations', None)
#
# Alias methods in order to make long equations readable:
#
def isc(self):
return self.__short_circuit_current
def voc(self):
return self.__open_circuit_voltage
def impp(self):
return self.__maximum_power_point_current
def vmpp(self):
return self.__maximum_power_point_voltage
def ns(self):
return self.__number_of_cells_in_series
#
# End of alias methods definition
#
# parameter_estimates: [series_resistance, shunt_resistance, diode_quality_factor]
# Note: The third element of parameter_estimates is not thermal_voltage but thermal_voltage is used as the third unknown parameter for the calculation.
def calculate(self, parameter_estimates = [1, 1, 1]):
thermal_voltage_estimate = self.__thermal_voltage_estimate(parameter_estimates[2])
if self.number_of_iterations == None:
solution = optimize.root(self.__function_of_three_equations, [parameter_estimates[0], parameter_estimates[1], thermal_voltage_estimate])
else:
solution = optimize.root(self.__function_of_three_equations, [parameter_estimates[0], parameter_estimates[1], thermal_voltage_estimate], options={'maxfev': self.number_of_iterations})
self.series_resistance = solution.x[0]
self.shunt_resistance = solution.x[1]
self.thermal_voltage = solution.x[2]
self.diode_quality_factor = self.__diode_quality_factor()
return solution
def __diode_quality_factor(self):
return (self.thermal_voltage * self.charge_of_electron) / (self.boltzmann_constant * self.nominal_temperature)
def __thermal_voltage_estimate(self, diode_quality_factor_estimate):
return (diode_quality_factor_estimate * self.boltzmann_constant * self.nominal_temperature) / self.charge_of_electron
# unknown_parameters_vector = [series_resistance, shunt_resistance, thermal_voltage]
def __function_of_three_equations(self, unknown_parameters_vector):
# return [equation_1, equation_2, equation_3]
# First element: Equation (12) of [1] with Impp moved to the right hand side to make the equation with the form "0 = ....".
# Second element: Equation (18) of [1] with dP/dV (at I = Impp) = 0 since at Maximum Power Point, dP/dV = 0.
# Third element: Equation (19) of [1] with -1/Rsh moved to the right hand side to make the equation with the form "0 = ....".
# return [self.__short_circuit_current - ((self.__maximum_power_point_voltage + self.__maximum_power_point_current * unknown_parameters_vector[0] - self.__short_circuit_current * unknown_parameters_vector[0]) / unknown_parameters_vector[1]) \
# -(self.__short_circuit_current - ((self.__open_circuit_voltage - self.__short_circuit_current * unknown_parameters_vector[0]) / unknown_parameters_vector[1])) \
# * exp((self.__maximum_power_point_voltage + self.__maximum_power_point_current * unknown_parameters_vector[0] - self.__open_circuit_voltage) / (self.__number_of_cells_in_series * unknown_parameters_vector[2])) \
# - self.__maximum_power_point_current,
# self.__maximum_power_point_current + self.__maximum_power_point_voltage * \
# ((-(((self.__short_circuit_current * unknown_parameters_vector[1] - self.__open_circuit_voltage + self.__short_circuit_current * unknown_parameters_vector[0]) * exp((self.__maximum_power_point_voltage + self.__maximum_power_point_current * unknown_parameters_vector[0] - self.__open_circuit_voltage) / (self.__number_of_cells_in_series * unknown_parameters_vector[2]))) / (self.__number_of_cells_in_series * unknown_parameters_vector[2] * unknown_parameters_vector[1])) - (1 / unknown_parameters_vector[1])) \
# / (1 + (((self.__short_circuit_current * unknown_parameters_vector[1] - self.__open_circuit_voltage + self.__short_circuit_current * unknown_parameters_vector[0]) * exp((self.__maximum_power_point_voltage + self.__maximum_power_point_current * unknown_parameters_vector[0] - self.__open_circuit_voltage) / (self.__number_of_cells_in_series * unknown_parameters_vector[2]))) / (self.__number_of_cells_in_series * unknown_parameters_vector[2] * unknown_parameters_vector[1])) + (unknown_parameters_vector[0] / unknown_parameters_vector[1]))),
# ((-(((self.__short_circuit_current * unknown_parameters_vector[1] - self.__open_circuit_voltage + self.__short_circuit_current * unknown_parameters_vector[0]) * exp((self.__short_circuit_current * unknown_parameters_vector[0] - self.__open_circuit_voltage) / (self.__number_of_cells_in_series * unknown_parameters_vector[2]))) / (self.__number_of_cells_in_series * unknown_parameters_vector[2] * unknown_parameters_vector[1])) - (1 / unknown_parameters_vector[1])) \
# / (1 + (((self.__short_circuit_current * unknown_parameters_vector[1] - self.__open_circuit_voltage + self.__short_circuit_current * unknown_parameters_vector[0]) * exp((self.__short_circuit_current * unknown_parameters_vector[0] - self.__open_circuit_voltage) / (self.__number_of_cells_in_series * unknown_parameters_vector[2]))) / (self.__number_of_cells_in_series * unknown_parameters_vector[2] * unknown_parameters_vector[1])) + (unknown_parameters_vector[0] / unknown_parameters_vector[1]))) \
# + (1 / unknown_parameters_vector[1])]
return [self.isc() \
- ((self.vmpp() + self.impp() * unknown_parameters_vector[0] - self.isc() * unknown_parameters_vector[0]) / unknown_parameters_vector[1]) \
-(self.isc() - ((self.voc() - self.isc() * unknown_parameters_vector[0]) / unknown_parameters_vector[1])) \
* exp((self.vmpp() + self.impp() * unknown_parameters_vector[0] - self.voc()) / (self.ns() * unknown_parameters_vector[2])) \
- self.impp(),
self.impp() \
+ self.vmpp() * \
( \
( \
-( \
( (self.isc() * unknown_parameters_vector[1] - self.voc() + self.isc() * unknown_parameters_vector[0]) \
* exp((self.vmpp() + self.impp() * unknown_parameters_vector[0] - self.voc()) / (self.ns() * unknown_parameters_vector[2])) \
) \
/ (self.ns() * unknown_parameters_vector[2] * unknown_parameters_vector[1]) \
) \
- (1 / unknown_parameters_vector[1]) \
) / \
( \
1 + \
( \
( (self.isc() * unknown_parameters_vector[1] - self.voc() + self.isc() * unknown_parameters_vector[0]) \
* exp((self.vmpp() + self.impp() * unknown_parameters_vector[0] - self.voc()) / (self.ns() * unknown_parameters_vector[2]))) \
/ (self.ns() * unknown_parameters_vector[2] * unknown_parameters_vector[1]) \
) \
+ (unknown_parameters_vector[0] / unknown_parameters_vector[1]) \
) \
),
( \
( \
-( \
( (self.isc() * unknown_parameters_vector[1] - self.voc() + self.isc() * unknown_parameters_vector[0]) \
* exp((self.isc() * unknown_parameters_vector[0] - self.voc()) / (self.ns() * unknown_parameters_vector[2])) \
) \
/ (self.ns() * unknown_parameters_vector[2] * unknown_parameters_vector[1])) \
- (1 / unknown_parameters_vector[1]) \
) / \
( \
1 + \
( \
( (self.isc() * unknown_parameters_vector[1] - self.voc() + self.isc() * unknown_parameters_vector[0]) \
* exp((self.isc() * unknown_parameters_vector[0] - self.voc()) / (self.ns() * unknown_parameters_vector[2])) \
) / (self.ns() * unknown_parameters_vector[2] * unknown_parameters_vector[1])
) \
+ (unknown_parameters_vector[0] / unknown_parameters_vector[1]) \
) \
) \
+ (1 / unknown_parameters_vector[1])]
# Note: Decided to rely on the numerical estimate of root function instead of calculating it.
# But partically-done calculation is left here for the future reference just in case:
# unknown_parameters_vector = [series_resistance, shunt_resistance, thermal_voltage]
# def __jacobian_of_function_of_three_equations(self, unknown_parameters_vector):
# series_resistance = unknown_parameters_vector[0]
# shunt_resistance = unknown_parameters_vector[1]
# thermal_voltage = unknown_parameters_vector[1]
# exponential_factor_1 = self.__exponential_factor_1(series_resistance, thermal_voltage)
# element_1_1 = -((self.__maximum_power_point_current - self.__short_circuit_current) / shunt_resistance) \
# - (self.__short_circuit_current / shunt_resistance) * exp(exponential_factor_1) \
# - (self.__short_circuit_current - ((self.__open_circuit_voltage - self.__short_circuit_current * series_resistance) / shunt_resistance)) * exponential_factor_1 * exp(self.__maximum_power_point_current / (self.__number_of_cells_in_series * thermal_voltage))
# element_1_2 = (self.__maximum_power_point_voltage + self.__maximum_power_point_current * series_resistance - self.__short_circuit_current * series_resistance) / (shunt_resistance**2) \
# - (self.__open_circuit_voltage - self.__short_circuit_current * series_resistance) * exp(exponential_factor_1) / (shunt_resistance**2)
# element_1_3 = (self.__short_circuit_current - ((self.__open_circuit_voltage - self.__short_circuit_current * series_resistance) / shunt_resistance)) * exponential_factor_1 * exp(exponential_factor_1) * ((self.__maximum_power_point_voltage + self.__maximum_power_point_current * series_resistance - self.__open_circuit_voltage) / (thermal_voltage**2))
# # element_2_1 =
|
<gh_stars>0
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Generate a QC plot for BOLD fMRI motion estimates from an OpenFMRI dataset
"""
# magic line for manpage summary
# man: -*- % BOLD fMRI motion QC plot for an OpenFMRI dataset
import mvpa2
__docformat__ = 'restructuredtext'
def setup_parser(parser):
parser.add_argument(
'--path', metavar='PATH', required=True,
help="""path to the root directory of the OpenFMRI dataset""")
parser.add_argument(
'-t', '--task', metavar='ID', type=int, default=1,
help="""ID of the task with BOLD fMRI data for the QC plot.
Default: 1""")
parser.add_argument(
'--estimate-fname', metavar='NAME', default='bold_moest.txt',
help="""name of the files with the motion estimates in each BOLD
task run folder. Default: bold_moest.txt""")
parser.add_argument(
'--exclude-subjs', metavar='ID', nargs='+',
help="""list of space-separated suject IDs to exclude from the QC
analysis""")
parser.add_argument(
'--outlier-minthresh', type=float, default=None,
help="""minimum absolute threshold for outlier detection.
Default: None""")
parser.add_argument(
'--outlier-stdthresh', type=float, default=None,
help="""minimum threshold in units of standard deviation
for outlier detection. Default: None""")
parser.add_argument(
'--savefig', default=None,
help="""file name to store the QC figure under. Default: None""")
return parser
def run(args):
import numpy as np
import pylab as pl
from mvpa2.datasets.sources.openfmri import OpenFMRIDataset
from mvpa2.misc.plot import timeseries_boxplot, concat_ts_boxplot_stats
from mvpa2.misc.stats import compute_ts_boxplot_stats
of = OpenFMRIDataset(args.path)
data = of.get_task_bold_attributes(
args.task, args.estimate_fname, np.loadtxt,
exclude_subjs=args.exclude_subjs)
segment_sizes = [len(d[0]) for d in data]
# figure setup
pl.figure(figsize=(12, 5))
ax = pl.subplot(211)
# translation
run_stats = [compute_ts_boxplot_stats(
d[...,:3],
outlier_abs_minthresh=args.outlier_minthresh,
outlier_thresh=args.outlier_stdthresh,
aggfx=np.linalg.norm,
greedy_outlier=True)
for d in data]
stats = concat_ts_boxplot_stats(run_stats)
timeseries_boxplot(stats[0]['median'],
mean=stats[0]['mean'], std=stats[0]['std'], n=stats[0]['n'],
min=stats[0]['min'], max=stats[0]['max'],
p25=stats[0]['p25'], p75=stats[0]['p75'],
outlierd=stats[1], segment_sizes=segment_sizes)
pl.title('translation')
xp, xl = pl.xticks()
pl.xticks(xp, ['' for i in xl])
pl.xlim((0, len(stats[0]['n'])))
#pl.ylim((0,7))
pl.ylabel('estimate L2-norm in mm')
# rotation
ax = pl.subplot(212)
run_stats = [compute_ts_boxplot_stats(
d[...,3:],
outlier_abs_minthresh=args.outlier_minthresh,
outlier_thresh=args.outlier_stdthresh,
aggfx=np.linalg.norm,
greedy_outlier=True)
for d in data]
stats = concat_ts_boxplot_stats(run_stats)
timeseries_boxplot(stats[0]['median'],
mean=stats[0]['mean'], std=stats[0]['std'], n=stats[0]['n'],
min=stats[0]['min'], max=stats[0]['max'],
p25=stats[0]['p25'], p75=stats[0]['p75'],
outlierd=stats[1], segment_sizes=segment_sizes)
pl.xlim((0, len(stats[0]['n'])))
#pl.ylim((0,5))
pl.title('rotation')
pl.ylabel('estimate L2-norm in deg')
pl.xlabel('time in fMRI volumes')
if args.savefig is None:
pl.show()
else:
pl.savefig(args.safefig)
|
<filename>arkane/kinetics.py
#!/usr/bin/env python3
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2020 Prof. <NAME> (<EMAIL>), #
# Prof. <NAME> (<EMAIL>) and the RMG Team (<EMAIL>) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
"""
Arkane kinetics module
"""
import logging
import os.path
import string
import numpy as np
import rmgpy.quantity as quantity
from rmgpy.exceptions import SpeciesError, InputError
from rmgpy.kinetics.arrhenius import Arrhenius
from rmgpy.kinetics.tunneling import Wigner, Eckart
from rmgpy.molecule.draw import MoleculeDrawer, create_new_surface
from arkane.common import ArkaneSpecies
from arkane.output import prettify
from arkane.sensitivity import KineticsSensitivity as SensAnalysis
################################################################################
class KineticsJob(object):
"""
A representation of an Arkane kinetics job. This job is used to compute
and save the high-pressure-limit kinetics information for a single reaction.
`usedTST` - a boolean representing if TST was used to calculate the kinetics
if kinetics is already given in the input, then it is False.
`three_params` - a boolean representing if the modified three-parameter Arrhenius equation is used to calculate
high pressure kinetic rate coefficients. If it is False, the classical two-parameter Arrhenius
equation is used.
"""
def __init__(self, reaction, Tmin=None, Tmax=None, Tlist=None, Tcount=0, sensitivity_conditions=None,
three_params=True):
self.usedTST = False
self.Tmin = Tmin if Tmin is not None else (298, 'K')
self.Tmax = Tmax if Tmax is not None else (2500, 'K')
self.Tcount = Tcount if Tcount > 3 else 50
self.three_params = three_params
if Tlist is not None:
self.Tlist = Tlist
self.Tmin = (min(self.Tlist.value_si), 'K')
self.Tmax = (max(self.Tlist.value_si), 'K')
self.Tcount = len(self.Tlist.value_si)
else:
self.Tlist = (1 / np.linspace(1 / self.Tmax.value_si, 1 / self.Tmin.value_si, self.Tcount), 'K')
self.reaction = reaction
self.k_units = None
if sensitivity_conditions is not None:
self.sensitivity_conditions = [quantity.Quantity(condition) for condition in sensitivity_conditions]
else:
self.sensitivity_conditions = None
self.arkane_species = ArkaneSpecies(species=self.reaction.transition_state)
@property
def Tmin(self):
"""The minimum temperature at which the computed k(T) values are valid, or ``None`` if not defined."""
return self._Tmin
@Tmin.setter
def Tmin(self, value):
self._Tmin = quantity.Temperature(value)
@property
def Tmax(self):
"""The maximum temperature at which the computed k(T) values are valid, or ``None`` if not defined."""
return self._Tmax
@Tmax.setter
def Tmax(self, value):
self._Tmax = quantity.Temperature(value)
@property
def Tlist(self):
"""The temperatures at which the k(T) values are computed."""
return self._Tlist
@Tlist.setter
def Tlist(self, value):
self._Tlist = quantity.Temperature(value)
def execute(self, output_directory=None, plot=True):
"""
Execute the kinetics job, saving the results within
the `output_directory`.
If `plot` is True, then plots of the raw and fitted values for the kinetics
will be saved.
"""
self.generate_kinetics()
if output_directory is not None:
try:
self.write_output(output_directory)
except Exception as e:
logging.warning("Could not write kinetics output file due to error: "
"{0} in reaction {1}".format(e, self.reaction.label))
try:
self.write_chemkin(output_directory)
except Exception as e:
logging.warning("Could not write kinetics chemkin output due to error: "
"{0} in reaction {1}".format(e, self.reaction.label))
if plot:
try:
self.plot(output_directory)
except Exception as e:
logging.warning("Could not plot kinetics due to error: "
"{0} in reaction {1}".format(e, self.reaction.label))
try:
self.draw(output_directory)
except Exception as e:
logging.warning("Could not draw reaction {1} due to error: {0}".format(e, self.reaction.label))
if self.sensitivity_conditions is not None:
logging.info('\n\nRunning sensitivity analysis...')
SensAnalysis(self, output_directory)
logging.debug('Finished kinetics job for reaction {0}.'.format(self.reaction))
logging.debug(repr(self.reaction))
def generate_kinetics(self):
"""
Generate the kinetics data for the reaction and fit it to a modified Arrhenius model.
"""
if isinstance(self.reaction.kinetics, Arrhenius):
return None
self.usedTST = True
kinetics_class = 'Arrhenius'
tunneling = self.reaction.transition_state.tunneling
if isinstance(tunneling, Wigner) and tunneling.frequency is None:
tunneling.frequency = (self.reaction.transition_state.frequency.value_si, "cm^-1")
elif isinstance(tunneling, Eckart) and tunneling.frequency is None:
tunneling.frequency = (self.reaction.transition_state.frequency.value_si, "cm^-1")
tunneling.E0_reac = (sum([reactant.conformer.E0.value_si
for reactant in self.reaction.reactants]) * 0.001, "kJ/mol")
tunneling.E0_TS = (self.reaction.transition_state.conformer.E0.value_si * 0.001, "kJ/mol")
tunneling.E0_prod = (sum([product.conformer.E0.value_si
for product in self.reaction.products]) * 0.001, "kJ/mol")
elif tunneling is not None:
if tunneling.frequency is not None:
# Frequency was given by the user
pass
else:
raise ValueError('Unknown tunneling model {0!r} for reaction {1}.'.format(tunneling, self.reaction))
logging.debug('Generating {0} kinetics model for {1}...'.format(kinetics_class, self.reaction))
klist = np.zeros_like(self.Tlist.value_si)
for i, t in enumerate(self.Tlist.value_si):
klist[i] = self.reaction.calculate_tst_rate_coefficient(t)
order = len(self.reaction.reactants)
klist *= 1e6 ** (order - 1)
self.k_units = {1: 's^-1', 2: 'cm^3/(mol*s)', 3: 'cm^6/(mol^2*s)'}[order]
self.K_eq_units = {2: 'mol^2/cm^6', 1: 'mol/cm^3', 0: ' ', -1: 'cm^3/mol', -2: 'cm^6/mol^2'}[
len(self.reaction.products) - len(self.reaction.reactants)]
self.k_r_units = {1: 's^-1', 2: 'cm^3/(mol*s)', 3: 'cm^6/(mol^2*s)'}[len(self.reaction.products)]
self.reaction.kinetics = Arrhenius().fit_to_data(self.Tlist.value_si, klist, kunits=self.k_units,
three_params=self.three_params)
self.reaction.elementary_high_p = True
def write_output(self, output_directory):
"""
Save the results of the kinetics job to the `output.py` file located
in `output_directory`.
"""
reaction = self.reaction
ks, k0s, k0_revs, k_revs = [], [], [], []
logging.info('Saving kinetics for {0}...'.format(reaction))
order = len(self.reaction.reactants)
factor = 1e6 ** (order - 1)
f = open(os.path.join(output_directory, 'output.py'), 'a')
if self.usedTST:
# If TST is not used, eg. it was given in 'reaction', then this will throw an error.
f.write('# ======= =========== =========== =========== ===============\n')
f.write('# Temp. k (TST) Tunneling k (TST+T) Units\n')
f.write('# ======= =========== =========== =========== ===============\n')
if self.Tlist is None:
t_list = np.array([300, 400, 500, 600, 800, 1000, 1500, 2000])
else:
t_list = self.Tlist.value_si
for T in t_list:
tunneling = reaction.transition_state.tunneling
reaction.transition_state.tunneling = None
try:
k0 = reaction.calculate_tst_rate_coefficient(T) * factor
except SpeciesError:
k0 = 0
reaction.transition_state.tunneling = tunneling
try:
k = reaction.calculate_tst_rate_coefficient(T) * factor
kappa = k / k0
except (SpeciesError, ZeroDivisionError):
k = reaction.get_rate_coefficient(T)
kappa = 0
logging.info("The species in reaction {0} do not have adequate information for TST, "
"using default kinetics values.".format(reaction))
tunneling = reaction.transition_state.tunneling
ks.append(k)
k0s.append(k0)
f.write('# {0:4g} K {1:11.3e} {2:11g} {3:11.3e} {4}\n'.format(T, k0, kappa, k, self.k_units))
f.write('# ======= =========== =========== =========== ===============\n')
f.write('\n\n')
f.write('# ======= ============ =========== ============ ============= =========\n')
f.write('# Temp. Kc (eq) Units k_rev (TST) k_rev (TST+T) Units\n')
f.write('# ======= ============ =========== ============ ============= =========\n')
# Initialize Object for Converting Units
if self.K_eq_units != ' ':
keq_unit_converter = quantity.Units(self.K_eq_units).get_conversion_factor_from_si()
else:
keq_unit_converter = 1
for n, T in enumerate(t_list):
k = ks[n]
k0 = k0s[n]
K_eq = keq_unit_converter * reaction.get_equilibrium_constant(T) # returns SI units
k0_rev = k0 / K_eq
k_rev = k / K_eq
k0_revs.append(k0_rev)
k_revs.append(k_rev)
f.write('# {0:4g} K {1:11.3e} {2} {3:11.3e} {4:11.3e} {5}\n'.format(
T, K_eq, self.K_eq_units, k0_rev, k_rev, self.k_r_units))
f.write('# ======= ============ =========== ============ ============= =========\n')
f.write('\n\n')
kinetics_0_rev = Arrhenius().fit_to_data(t_list, np.array(k0_revs), kunits=self.k_r_units,
three_params=self.three_params)
kinetics_rev = Arrhenius().fit_to_data(t_list, np.array(k_revs), kunits=self.k_r_units,
three_params=self.three_params)
f.write('# k_rev (TST) = {0} \n'.format(kinetics_0_rev))
f.write('# k_rev (TST+T) = {0} \n\n'.format(kinetics_rev))
if self.three_params:
f.write('# kinetics fitted using the modified three-parameter Arrhenius equation '
'k = A * (T/T0)^n * exp(-Ea/RT) \n')
else:
f.write('# kinetics fitted using the two-parameter Arrhenius equation k = A * exp(-Ea/RT) \n')
# Reaction path degeneracy is INCLUDED in the kinetics itself!
rxn_str = 'kinetics(label={0!r}, kinetics={1!r})'.format(reaction.label, reaction.kinetics)
f.write('{0}\n\n'.format(prettify(rxn_str)))
f.close()
def write_chemkin(self, output_directory):
"""
Appends the kinetics rates to `chem.inp` in `outut_directory`
"""
# obtain a unit conversion factor
order = len(self.reaction.reactants)
factor = 1e6 ** (order - 1)
reaction = self.reaction
kinetics = reaction.kinetics
rxn_str = ''
if reaction.kinetics.comment:
for line in reaction.kinetics.comment.split("\n"):
rxn_str += "! {0}\n".format(line)
rxn_str += '{0!s:51} {1:9.3e} {2:9.3f} {3:9.3f}\n'.format(
reaction,
kinetics.A.value_si * factor,
kinetics.n.value_si,
kinetics.Ea.value_si / 4184.,
)
with open(os.path.join(output_directory, 'chem.inp'), 'a') as f:
f.write('{0}\n'.format(rxn_str))
def save_yaml(self, output_directory):
"""
Save a YAML file for TSs if structures of the respective reactant/s and product/s are known
"""
if all([spc.molecule is not None and len(spc.molecule)
for spc in self.reaction.reactants + self.reaction.products]):
self.arkane_species.update_species_attributes(self.reaction.transition_state)
self.arkane_species.reaction_label = self.reaction.label
self.arkane_species.reactants = [{'label': spc.label, 'adjacency_list': spc.molecule[0].to_adjacency_list()}
for spc in self.reaction.reactants]
self.arkane_species.products = [{'label': spc.label, 'adjacency_list': spc.molecule[0].to_adjacency_list()}
for spc in self.reaction.products]
self.arkane_species.save_yaml(path=output_directory)
def plot(self, output_directory):
"""
Plot both the raw kinetics data and the Arrhenius fit versus
temperature. The plot is saved to the file ``kinetics.pdf`` in the
output directory. The plot is not generated if ``matplotlib`` is not
installed.
"""
import matplotlib.pyplot as plt
f, ax = plt.subplots()
if self.Tlist is not None:
t_list = [t for t in self.Tlist.value_si]
else:
t_list = 1000.0 / np.arange(0.4, 3.35, 0.05)
klist = np.zeros_like(t_list)
klist2 = np.zeros_like(t_list)
for i in range(len(t_list)):
klist[i] = self.reaction.calculate_tst_rate_coefficient(t_list[i])
klist2[i] = self.reaction.kinetics.get_rate_coefficient(t_list[i])
order = len(self.reaction.reactants)
klist *= 1e6 ** (order - 1)
klist2 *= 1e6 ** (order - 1)
t_list = [1000.0 / t for t in t_list]
plt.semilogy(t_list, klist, 'ob', label='TST calculation')
plt.semilogy(t_list, klist2, '-k', label='Fitted rate')
plt.legend()
reaction_str = '{0} {1} {2}'.format(
' + '.join([reactant.label for reactant in self.reaction.reactants]),
'<=>', ' + '.join([product.label for product in self.reaction.products]))
plt.title(reaction_str)
plt.xlabel('1000 / Temperature (K^-1)')
plt.ylabel('Rate coefficient ({0})'.format(self.k_units))
plot_path = os.path.join(output_directory, 'plots')
if not os.path.exists(plot_path):
os.mkdir(plot_path)
valid_chars = "-_.()<=> %s%s" % (string.ascii_letters, string.digits)
filename = ''.join(c for c in reaction_str if c in valid_chars) + '.pdf'
plt.savefig(os.path.join(plot_path, filename))
plt.close()
def draw(self, output_directory, file_format='pdf'):
"""
Generate a PDF drawing of the reaction.
This requires that Cairo and its Python wrapper be available; if not,
the drawing is not generated.
You may also generate different formats of drawings, by changing format to
one of the following: `pdf`, `svg`, `png`.
"""
drawing_path = os.path.join(output_directory, 'paths')
if not os.path.exists(drawing_path):
os.mkdir(drawing_path)
valid_chars = "-_.()<=> %s%s" % (string.ascii_letters, string.digits)
reaction_str = '{0} {1} {2}'.format(
' + '.join([reactant.label for reactant in self.reaction.reactants]),
'<=>', ' + '.join([product.label for product in self.reaction.products]))
filename = ''.join(c for c in reaction_str if c in valid_chars) + '.pdf'
path = os.path.join(drawing_path, filename)
KineticsDrawer().draw(self.reaction, file_format=file_format, path=path)
class KineticsDrawer(object):
"""
This class provides functionality for drawing the potential energy surface
for a high pressure limit reaction using the Cairo 2D graphics engine.
The most common use case is simply::
KineticsDrawer().draw(reaction, file_format='png', path='network.png')
where ``reaction`` is the :class:`Reaction` object to draw. You can also
pass a dict of options to the constructor to affect how the reaction is drawn.
"""
def __init__(self, options=None):
self.options = {
'structures': True,
'fontFamily': 'sans',
'fontSizeNormal': 12,
'Eunits': 'kJ/mol',
'padding': 16,
'wellWidth': 64,
'wellSpacing': 64,
'Eslope': 1.5,
'TSwidth': 16,
'E0offset': 0.0,
}
if options:
self.options.update(options)
self.clear()
def clear(self):
"""Clear the drawer"""
self.reaction = None
self.wells = None
self.left = 0.0
self.top = 0.0
self.right = 0.0
self.bottom = 0.0
self.surface = None
self.cr = None
def _get_energy_range(self):
"""
Return the minimum and maximum energy in J/mol on the potential energy surface.
"""
e0_min = min(self.wells[0].E0, self.wells[1].E0, self.reaction.transition_state.conformer.E0.value_si)
e0_max = max(self.wells[0].E0, self.wells[1].E0, self.reaction.transition_state.conformer.E0.value_si)
if e0_max - e0_min > 5e5:
# the energy barrier in one of the reaction directions is larger than 500 kJ/mol, warn the user
logging.warning('The energy differences between the stationary points of reaction {0} '
'seems too large.'.format(self.reaction))
logging.warning('Got the following energies:\nWell 1: {0} kJ/mol\nTS: {1} kJ/mol\nWell 2: {2}'
' kJ/mol'.format(self.wells[0].E0 / 1000., self.wells[1].E0 / 1000.,
self.reaction.transition_state.conformer.E0.value_si / 1000.))
return e0_min, e0_max
def _use_structure_for_label(self, configuration):
"""
Return ``True`` if the configuration should use molecular structures
for its labels or ``False`` otherwise.
"""
# Initialize with the current user option value
use_structures = self.options['structures']
# But don't use structures if one or more species in the configuration
# do not have structure data
for spec in configuration.species_list:
if spec.molecule is None or len(spec.molecule) == 0:
use_structures = False
break
return use_structures
def _get_text_size(self, text, padding=2, file_format='pdf'):
try:
import cairocffi as cairo
except ImportError:
import cairo
# Use dummy surface to determine text extents
surface = create_new_surface(file_format)
cr = cairo.Context(surface)
cr.set_font_size(self.options['fontSizeNormal'])
extents = cr.text_extents(text)
width = extents[2] + 2 * padding
height = extents[3] + 2 * padding
return [0, 0, width, height]
def _draw_text(self, text, cr, x0, y0, padding=2):
cr.save()
cr.set_font_size(self.options['fontSizeNormal'])
extents = cr.text_extents(text)
cr.move_to(x0 - extents[0] - padding, y0 - extents[1] + padding)
cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
cr.show_text(text)
cr.restore()
width = extents[2] + 2 * padding
height = extents[3] + 2 * padding
return [0, 0, width, height]
def _get_label_size(self, configuration, file_format='pdf'):
width = 0
height = 0
bounding_rects = []
if self._use_structure_for_label(configuration):
for spec in configuration.species_list:
rect = MoleculeDrawer().draw(spec.molecule[0], file_format=file_format)[2]
bounding_rects.append(list(rect))
else:
for spec in configuration.species_list:
bounding_rects.append(self._get_text_size(spec.label, file_format=file_format))
plus_rect = self._get_text_size('+', file_format=file_format)
for rect in bounding_rects:
if width < rect[2]:
width = rect[2]
height += rect[3] + plus_rect[3]
height -= plus_rect[3]
return [0, 0, width, height]
def _draw_label(self, configuration, cr, x0, y0, file_format='pdf'):
bounding_rect = self._get_label_size(configuration, file_format=file_format)
padding = 2
use_structures = self._use_structure_for_label(configuration)
y = y0
for i, spec in enumerate(configuration.species_list):
if i > 0:
rect = self._get_text_size('+', padding=padding, file_format=file_format)
x = x0 - 0.5 * (rect[2] - bounding_rect[2]) + 2 * padding
self._draw_text('+', cr, x, y)
y += rect[3]
if use_structures:
molecule_drawer = MoleculeDrawer()
cr.save()
rect = molecule_drawer.draw(spec.molecule[0], file_format=file_format)[2]
cr.restore()
x = x0 - 0.5 * (rect[2] - bounding_rect[2])
cr.save()
molecule_drawer.render(cr, offset=(x, y))
cr.restore()
y += rect[3]
else:
rect = self._get_text_size(spec.label, padding=padding, file_format=file_format)
x = x0 - 0.5 * (rect[2] - bounding_rect[2]) + 2 * padding
self._draw_text(spec.label, cr, x, y)
y += rect[3]
return bounding_rect
def draw(self, reaction, file_format, path=None):
"""
Draw the potential energy surface for the given `network` as a Cairo
surface of the given `file_format`. If `path` is given, the surface is
saved to that location on disk.
"""
try:
import cairocffi as cairo
except ImportError:
try:
import cairo
except ImportError:
logging.warning('Cairo not found; potential energy surface will not be drawn.')
return
self.reaction = reaction
self.wells = [Well(self.reaction.reactants), Well(self.reaction.products)]
# Generate the bounding rectangles for each configuration label
label_rects = []
for well in self.wells:
label_rects.append(self._get_label_size(well, file_format=file_format))
# Get energy range (use kJ/mol internally)
e0_min, e0_max = self._get_energy_range()
e0_min *= 0.001
e0_max *= 0.001
# Drawing parameters
padding = self.options['padding']
well_width = self.options['wellWidth']
well_spacing = self.options['wellSpacing']
e_slope = self.options['Eslope']
ts_width = self.options['TSwidth']
e0_offset = self.options['E0offset'] * 0.001
# Choose multiplier to convert energies to desired units (on figure only)
e_units = self.options['Eunits']
try:
e_mult = {'J/mol': 1.0, 'kJ/mol': 0.001, 'cal/mol': 1.0 / 4.184, 'kcal/mol': 1.0 / 4184.,
'cm^-1': 1.0 / 11.962}[e_units]
except KeyError:
raise InputError('Invalid value "{0}" for Eunits parameter.'.format(e_units))
# Determine height required for drawing
e_height = self._get_text_size('0.0', file_format=file_format)[3] + 6
y_e0 = (e0_max - 0.0) * e_slope + padding + e_height
height = (e0_max - e0_min) * e_slope + 2 * padding + e_height + 6
for i in range(len(self.wells)):
if 0.001 * self.wells[i].E0 == e0_min:
height += label_rects[i][3]
break
# Determine naive position of each well (one per column)
coordinates = np.zeros((len(self.wells), 2), np.float64)
x = padding
for i in range(len(self.wells)):
well = self.wells[i]
rect = label_rects[i]
this_well_width = max(well_width, rect[2])
e0 = 0.001 * well.E0
y = y_e0 - e0 * e_slope
coordinates[i] = [x + 0.5 * this_well_width, y]
x += this_well_width + well_spacing
width = x + padding - well_spacing
# Determine the rectangles taken up by each well
# We'll use this to merge columns safely so that wells don't overlap
well_rects = []
for i in range(len(self.wells)):
l, t, w, h = label_rects[i]
x, y = coordinates[i, :]
if w < well_width:
w = well_width
t -= 6 + e_height
h += 6 + e_height
well_rects.append([l + x - 0.5 * w, t + y + 6, w, h])
# Squish columns together from the left where possible until an isomer is encountered
old_left = np.min(coordinates[:, 0])
n_left = - 1
columns = []
for i in range(n_left, -1, -1):
top = well_rects[i][1]
bottom = top + well_rects[i][3]
for column in columns:
for c in column:
top0 = well_rects[c][1]
bottom0 = top + well_rects[c][3]
if (top0 <= top <= bottom0) or (top <= top0 <= bottom):
# Can't put it in this column
break
else:
# Can put it in this column
column.append(i)
break
else:
# Needs a new column
columns.append([i])
for column in columns:
column_width = max([well_rects[c][2] for c in column])
x = coordinates[column[0] + 1, 0] - 0.5 * well_rects[column[0] + 1][2] - well_spacing - 0.5 * column_width
for c in column:
delta = x - coordinates[c, 0]
well_rects[c][0] += delta
coordinates[c, 0] += delta
new_left = np.min(coordinates[:, 0])
coordinates[:, 0] -= new_left - old_left
# Squish columns together from the right where possible until an isomer is encountered
n_right = 3
columns = []
for i in range(n_right, len(self.wells)):
top = well_rects[i][1]
bottom = top + well_rects[i][3]
for column in columns:
for c in column:
top0 = well_rects[c][1]
bottom0 = top0 + well_rects[c][3]
if (top0 <= top <= bottom0) or (top <= top0 <= bottom):
# Can't put it in this column
break
else:
# Can put it in this column
column.append(i)
break
else:
# Needs a new column
columns.append([i])
for column in columns:
column_width = max([well_rects[c][2] for c in column])
x = coordinates[column[0] - 1, 0] + 0.5 * well_rects[column[0] - 1][2] + well_spacing + 0.5 * column_width
for c in column:
delta = x - coordinates[c, 0]
well_rects[c][0] += delta
coordinates[c, 0] += delta
width = max([rect[2] + rect[0] for rect in well_rects]) - min([rect[0] for rect in well_rects]) + 2 * padding
# Draw to the final surface
surface = create_new_surface(file_format=file_format, target=path, width=width, height=height)
cr = cairo.Context(surface)
# Some global settings
cr.select_font_face("sans")
cr.set_font_size(self.options['fontSizeNormal'])
# Fill the background with white
cr.set_source_rgba(1.0, 1.0, 1.0, 1.0)
cr.paint()
self._draw_text('E0 ({0})'.format(e_units), cr, 15, 10, padding=2) # write units
# Draw reactions
e0_reac = self.wells[0].E0 * 0.001 - e0_offset
e0_prod = self.wells[1].E0 * 0.001 - e0_offset
e0_ts = self.reaction.transition_state.conformer.E0.value_si * 0.001 - e0_offset
x1, y1 = coordinates[0, :]
x2, y2 = coordinates[1, :]
x1 += well_spacing / 2.0
x2 -= well_spacing / 2.0
if abs(e0_ts - e0_reac) > 0.1 and abs(e0_ts - e0_prod) > 0.1:
if len(self.reaction.reactants) == 2:
if e0_reac < e0_prod:
x0 = x1 + well_spacing * 0.5
else:
x0 = x2 - well_spacing * 0.5
elif len(self.reaction.products) == 2:
if e0_reac < e0_prod:
x0 = x2 - well_spacing * 0.5
else:
x0 = x1 + well_spacing * 0.5
else:
x0 = 0.5 * (x1 + x2)
y0 = y_e0 - (e0_ts + e0_offset) * e_slope
width1 = (x0 - x1)
width2 = (x2 - x0)
# Draw horizontal line for TS
cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
cr.set_line_width(2.0)
cr.move_to(x0 - ts_width / 2.0, y0)
cr.line_to(x0 + ts_width / 2.0, y0)
cr.stroke()
# Add background and text for energy
e0 = "{0:.1f}".format(e0_ts * 1000. * e_mult)
extents = cr.text_extents(e0)
x = x0 - extents[2] / 2.0
y = y0 - 6.0
cr.rectangle(x + extents[0] - 2.0, y + extents[1] - 2.0, extents[2] + 4.0, extents[3] + 4.0)
cr.set_source_rgba(1.0, 1.0, 1.0, 0.75)
cr.fill()
cr.move_to(x, y)
cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
cr.show_text(e0)
# Draw Bezier curve connecting reactants and products through TS
cr.set_source_rgba(0.0, 0.0, 0.0, 0.5)
cr.set_line_width(1.0)
cr.move_to(x1, y1)
cr.curve_to(x1 + width1 / 8.0, y1, x0 - width1 / 8.0 - ts_width / 2.0, y0, x0 - ts_width / 2.0, y0)
cr.move_to(x0 + ts_width / 2.0, y0)
cr.curve_to(x0 + width2 / 8.0 + ts_width / 2.0, y0, x2 - width2 / 8.0, y2, x2, y2)
cr.stroke()
else:
width = (x2 - x1)
# Draw Bezier curve connecting reactants and products through TS
cr.set_source_rgba(0.0, 0.0, 0.0, 0.5)
cr.set_line_width(1.0)
cr.move_to(x1, y1)
cr.curve_to(x1 + width / 4.0, y1, x2 - width / 4.0, y2, x2, y2)
cr.stroke()
# Draw wells (after path reactions so that they are on top)
for i, well in enumerate(self.wells):
x0, y0 = coordinates[i, :]
# Draw horizontal line for well
cr.set_line_width(4.0)
cr.move_to(x0 - well_width / 2.0, y0)
cr.line_to(x0 + well_width / 2.0, y0)
cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
cr.stroke()
# Add background and text for energy
e0 = well.E0 * 0.001 - e0_offset
e0 = "{0:.1f}".format(e0 * 1000. * e_mult)
extents = cr.text_extents(e0)
x = x0 - extents[2] / 2.0
y = y0 - 6.0
cr.rectangle(x + extents[0] - 2.0, y + extents[1] - 2.0, extents[2] + 4.0, extents[3] + 4.0)
cr.set_source_rgba(1.0, 1.0, 1.0, 0.75)
cr.fill()
cr.move_to(x, y)
cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
cr.show_text(e0)
# Draw background and text for label
x = x0 - 0.5 * label_rects[i][2]
y = y0 + 6
cr.rectangle(x, y, label_rects[i][2], label_rects[i][3])
cr.set_source_rgba(1.0, 1.0, 1.0, 0.75)
cr.fill()
self._draw_label(well, cr, x, y, file_format=file_format)
# Finish Cairo drawing
if file_format == 'png':
surface.write_to_png(path)
else:
surface.finish()
class Well(object):
"""
A helper class representing a "well" of species
`species_list` is a list of at least one entry
`E0 `is the sum of all species' E0 in that list
"""
def __init__(self, species_list):
self.species_list = species_list
self.E0 = sum([species.conformer.E0.value_si for species in species_list])
|
<filename>parton/test_lhapdf.py
import unittest
from . import pdf, io
import shutil
import tempfile
lhapdf_CT10 ={
(-4, 1e-08, 10.0): 12.126009150675591,
(-4, 1e-08, 56.23413251903491): 41.02476133438564,
(-4, 1e-08, 316.22776601683796): 88.09606590653031,
(-4, 1e-08, 1778.2794100389228): 153.7780144766072,
(-4, 1e-08, 10000.0): 237.50159381331855,
(-4, 7.498942093324558e-07, 10.0): 4.335460451915329,
(-4, 7.498942093324558e-07, 56.23413251903491): 12.254992012880091,
(-4, 7.498942093324558e-07, 316.22776601683796): 23.066720358757788,
(-4, 7.498942093324558e-07, 1778.2794100389228): 36.273272727566784,
(-4, 7.498942093324558e-07, 10000.0): 51.40069478365495,
(-4, 5.623413251903491e-05, 10.0): 1.414605677409382,
(-4, 5.623413251903491e-05, 56.23413251903491): 3.130963246988818,
(-4, 5.623413251903491e-05, 316.22776601683796): 4.9789851557660745,
(-4, 5.623413251903491e-05, 1778.2794100389228): 6.873690044070669,
(-4, 5.623413251903491e-05, 10000.0): 8.761559269574471,
(-4, 0.004216965034285823, 10.0): 0.3392520292729965,
(-4, 0.004216965034285823, 56.23413251903491): 0.5402775654632718,
(-4, 0.004216965034285823, 316.22776601683796): 0.6880707116873284,
(-4, 0.004216965034285823, 1778.2794100389228): 0.8020515194422677,
(-4, 0.004216965034285823, 10000.0): 0.892139648134384,
(-4, 0.31622776601683794, 10.0): 0.002588510502494359,
(-4, 0.31622776601683794, 56.23413251903491): 0.002549447011017532,
(-4, 0.31622776601683794, 316.22776601683796): 0.002313635192150163,
(-4, 0.31622776601683794, 1778.2794100389228): 0.0020684044457654413,
(-4, 0.31622776601683794, 10000.0): 0.0018507518649738008,
(-3, 1e-08, 10.0): 16.756744962359846,
(-3, 1e-08, 56.23413251903491): 45.391216242724255,
(-3, 1e-08, 316.22776601683796): 92.27982481303964,
(-3, 1e-08, 1778.2794100389228): 157.82130933799775,
(-3, 1e-08, 10000.0): 241.43111563504945,
(-3, 7.498942093324558e-07, 10.0): 5.7895989791726095,
(-3, 7.498942093324558e-07, 56.23413251903491): 13.627086559893465,
(-3, 7.498942093324558e-07, 316.22776601683796): 24.38208662687902,
(-3, 7.498942093324558e-07, 1778.2794100389228): 37.544982601002665,
(-3, 7.498942093324558e-07, 10000.0): 52.63701932339524,
(-3, 5.623413251903491e-05, 10.0): 1.887473467779649,
(-3, 5.623413251903491e-05, 56.23413251903491): 3.579203838851147,
(-3, 5.623413251903491e-05, 316.22776601683796): 5.4102249095501165,
(-3, 5.623413251903491e-05, 1778.2794100389228): 7.29186292469213,
(-3, 5.623413251903491e-05, 10000.0): 9.169188137598681,
(-3, 0.004216965034285823, 10.0): 0.5228746551395594,
(-3, 0.004216965034285823, 56.23413251903491): 0.714124690080171,
(-3, 0.004216965034285823, 316.22776601683796): 0.8547590504861885,
(-3, 0.004216965034285823, 1778.2794100389228): 0.962994283665188,
(-3, 0.004216965034285823, 10000.0): 1.04825839995075,
(-3, 0.31622776601683794, 10.0): 0.005345303443615897,
(-3, 0.31622776601683794, 56.23413251903491): 0.004446805556731574,
(-3, 0.31622776601683794, 316.22776601683796): 0.0037490807510542792,
(-3, 0.31622776601683794, 1778.2794100389228): 0.0032125670405896553,
(-3, 0.31622776601683794, 10000.0): 0.002794619101109903,
(-2, 1e-08, 10.0): 17.605017846766618,
(-2, 1e-08, 56.23413251903491): 46.191048297236875,
(-2, 1e-08, 316.22776601683796): 93.04611385178964,
(-2, 1e-08, 1778.2794100389228): 158.56176695496683,
(-2, 1e-08, 10000.0): 242.15019914097294,
(-2, 7.498942093324558e-07, 10.0): 6.055052153385829,
(-2, 7.498942093324558e-07, 56.23413251903491): 13.877424836344732,
(-2, 7.498942093324558e-07, 316.22776601683796): 24.621957213835827,
(-2, 7.498942093324558e-07, 1778.2794100389228): 37.77683241186586,
(-2, 7.498942093324558e-07, 10000.0): 52.86233026936732,
(-2, 5.623413251903491e-05, 10.0): 1.9715303564164006,
(-2, 5.623413251903491e-05, 56.23413251903491): 3.6586617832598414,
(-2, 5.623413251903491e-05, 316.22776601683796): 5.486527459900575,
(-2, 5.623413251903491e-05, 1778.2794100389228): 7.365741064546534,
(-2, 5.623413251903491e-05, 10000.0): 9.241134839331808,
(-2, 0.004216965034285823, 10.0): 0.5570134353752993,
(-2, 0.004216965034285823, 56.23413251903491): 0.7477631930570984,
(-2, 0.004216965034285823, 316.22776601683796): 0.8880559387305795,
(-2, 0.004216965034285823, 1778.2794100389228): 0.9960158901669028,
(-2, 0.004216965034285823, 10000.0): 1.0810384205497796,
(-2, 0.31622776601683794, 10.0): 0.011876087898445954,
(-2, 0.31622776601683794, 56.23413251903491): 0.00915350813458153,
(-2, 0.31622776601683794, 316.22776601683796): 0.007410377531394808,
(-2, 0.31622776601683794, 1778.2794100389228): 0.006186424938281359,
(-2, 0.31622776601683794, 10000.0): 0.005281385118460772,
(-1, 1e-08, 10.0): 17.60538047787716,
(-1, 1e-08, 56.23413251903491): 46.19144584846197,
(-1, 1e-08, 316.22776601683796): 93.0465822705912,
(-1, 1e-08, 1778.2794100389228): 158.56172261762887,
(-1, 1e-08, 10000.0): 242.15095000133647,
(-1, 7.498942093324558e-07, 10.0): 6.055976149637256,
(-1, 7.498942093324558e-07, 56.23413251903491): 13.878406857813198,
(-1, 7.498942093324558e-07, 316.22776601683796): 24.62301282916305,
(-1, 7.498942093324558e-07, 1778.2794100389228): 37.777934326379324,
(-1, 7.498942093324558e-07, 10000.0): 52.86354511124354,
(-1, 5.623413251903491e-05, 10.0): 1.9742784526716286,
(-1, 5.623413251903491e-05, 56.23413251903491): 3.6617127531796454,
(-1, 5.623413251903491e-05, 316.22776601683796): 5.489815972869785,
(-1, 5.623413251903491e-05, 1778.2794100389228): 7.369227816743628,
(-1, 5.623413251903491e-05, 10000.0): 9.244800231131714,
(-1, 0.004216965034285823, 10.0): 0.5685067737284695,
(-1, 0.004216965034285823, 56.23413251903491): 0.7607974122201304,
(-1, 0.004216965034285823, 316.22776601683796): 0.9021788929879028,
(-1, 0.004216965034285823, 1778.2794100389228): 1.0109772765737797,
(-1, 0.004216965034285823, 10000.0): 1.0966774240831234,
(-1, 0.31622776601683794, 10.0): 0.010373439416445378,
(-1, 0.31622776601683794, 56.23413251903491): 0.007488490344510958,
(-1, 0.31622776601683794, 316.22776601683796): 0.005821543054307388,
(-1, 0.31622776601683794, 1778.2794100389228): 0.004721346325408865,
(-1, 0.31622776601683794, 10000.0): 0.003942215532844127,
(0, 1e-08, 10.0): 405.3255637220367,
(0, 1e-08, 56.23413251903491): 1436.880026381909,
(0, 1e-08, 316.22776601683796): 3156.596600706753,
(0, 1e-08, 1778.2794100389228): 5537.111363526038,
(0, 1e-08, 10000.0): 8521.652709276208,
(0, 7.498942093324558e-07, 10.0): 139.20380815306046,
(0, 7.498942093324558e-07, 56.23413251903491): 399.38652484266504,
(0, 7.498942093324558e-07, 316.22776601683796): 751.8778244490188,
(0, 7.498942093324558e-07, 1778.2794100389228): 1170.2119470580872,
(0, 7.498942093324558e-07, 10000.0): 1634.3156982020903,
(0, 5.623413251903491e-05, 10.0): 42.53967105156836,
(0, 5.623413251903491e-05, 56.23413251903491): 90.61388339659318,
(0, 5.623413251903491e-05, 316.22776601683796): 139.3955827757384,
(0, 5.623413251903491e-05, 1778.2794100389228): 186.30886678289167,
(0, 5.623413251903491e-05, 10000.0): 230.37005537249675,
(0, 0.004216965034285823, 10.0): 9.770246611236905,
(0, 0.004216965034285823, 56.23413251903491): 13.116612420226975,
(0, 0.004216965034285823, 316.22776601683796): 15.074265241424595,
(0, 0.004216965034285823, 1778.2794100389228): 16.274307327777525,
(0, 0.004216965034285823, 10000.0): 17.016579103299154,
(0, 0.31622776601683794, 10.0): 0.1511807636488401,
(0, 0.31622776601683794, 56.23413251903491): 0.09514987864524801,
(0, 0.31622776601683794, 316.22776601683796): 0.06625807729013862,
(0, 0.31622776601683794, 1778.2794100389228): 0.04919674628311809,
(0, 0.31622776601683794, 10000.0): 0.038236037093332384,
(1, 1e-08, 10.0): 17.60538047787716,
(1, 1e-08, 56.23413251903491): 46.191446527894946,
(1, 1e-08, 316.22776601683796): 93.0465822705912,
(1, 1e-08, 1778.2794100389228): 158.56172261762887,
(1, 1e-08, 10000.0): 242.15095000133647,
(1, 7.498942093324558e-07, 10.0): 6.056435894038626,
(1, 7.498942093324558e-07, 56.23413251903491): 13.879081655121778,
(1, 7.498942093324558e-07, 316.22776601683796): 24.62377099618751,
(1, 7.498942093324558e-07, 1778.2794100389228): 37.77889253256745,
(1, 7.498942093324558e-07, 10000.0): 52.86456969015634,
(1, 5.623413251903491e-05, 10.0): 1.9808741468734439,
(1, 5.623413251903491e-05, 56.23413251903491): 3.6702685383043385,
(1, 5.623413251903491e-05, 316.22776601683796): 5.499996546225309,
(1, 5.623413251903491e-05, 1778.2794100389228): 7.380844869741231,
(1, 5.623413251903491e-05, 10000.0): 9.257701768571732,
(1, 0.004216965034285823, 10.0): 0.6455876315279057,
(1, 0.004216965034285823, 56.23413251903491): 0.849645729138851,
(1, 0.004216965034285823, 316.22776601683796): 0.9994933659836224,
(1, 0.004216965034285823, 1778.2794100389228): 1.1149249499399423,
(1, 0.004216965034285823, 10000.0): 1.2060451257800577,
(1, 0.31622776601683794, 10.0): 0.16562096068835186,
(1, 0.31622776601683794, 56.23413251903491): 0.12631109703936957,
(1, 0.31622776601683794, 316.22776601683796): 0.10254991845260125,
(1, 0.31622776601683794, 1778.2794100389228): 0.0862699831275305,
(1, 0.31622776601683794, 10000.0): 0.07433886373595104,
(2, 1e-08, 10.0): 17.60508047787716,
(2, 1e-08, 56.23413251903491): 46.19114829723688,
(2, 1e-08, 316.22776601683796): 93.04631238950503,
(2, 1e-08, 1778.2794100389228): 158.56172261762887,
(2, 1e-08, 10000.0): 242.151017194184,
(2, 7.498942093324558e-07, 10.0): 6.055756222716645,
(2, 7.498942093324558e-07, 56.23413251903491): 13.87840084017821,
(2, 7.498942093324558e-07, 316.22776601683796): 24.6231668461724,
(2, 7.498942093324558e-07, 1778.2794100389228): 37.778282423294385,
(2, 7.498942093324558e-07, 10000.0): 52.864041745264316,
(2, 5.623413251903491e-05, 10.0): 1.981945214755691,
(2, 5.623413251903491e-05, 56.23413251903491): 3.6723532303490014,
(2, 5.623413251903491e-05, 316.22776601683796): 5.502970529354733,
(2, 5.623413251903491e-05, 1778.2794100389228): 7.384612901940605,
(2, 5.623413251903491e-05, 10000.0): 9.26223085528346,
(2, 0.004216965034285823, 10.0): 0.6876424544066511,
(2, 0.004216965034285823, 56.23413251903491): 0.9013554694966094,
(2, 0.004216965034285823, 316.22776601683796): 1.0585024492377975,
(2, 0.004216965034285823, 1778.2794100389228): 1.179878762750547,
(2, 0.004216965034285823, 10000.0): 1.2760218101155822,
(2, 0.31622776601683794, 10.0): 0.42801568253151273,
(2, 0.31622776601683794, 56.23413251903491): 0.33838274623018844,
(2, 0.31622776601683794, 316.22776601683796): 0.2816127781774709,
(2, 0.31622776601683794, 1778.2794100389228): 0.24143415857615186,
(2, 0.31622776601683794, 10000.0): 0.21124586323352432,
(3, 1e-08, 10.0): 16.756744962359846,
(3, 1e-08, 56.23413251903491): 45.391216242724255,
(3, 1e-08, 316.22776601683796): 92.27982481303964,
(3, 1e-08, 1778.2794100389228): 157.82130933799775,
(3, 1e-08, 10000.0): 241.43111563504945,
(3, 7.498942093324558e-07, 10.0): 5.7895989791726095,
(3, 7.498942093324558e-07, 56.23413251903491): 13.627086559893465,
(3, 7.498942093324558e-07, 316.22776601683796): 24.38208662687902,
(3, 7.498942093324558e-07, 1778.2794100389228): 37.544982601002665,
(3, 7.498942093324558e-07, 10000.0): 52.63701932339524,
(3, 5.623413251903491e-05, 10.0): 1.887473467779649,
(3, 5.623413251903491e-05, 56.23413251903491): 3.579203838851147,
(3, 5.623413251903491e-05, 316.22776601683796): 5.4102249095501165,
(3, 5.623413251903491e-05, 1778.2794100389228): 7.29186292469213,
(3, 5.623413251903491e-05, 10000.0): 9.169188137598681,
(3, 0.004216965034285823, 10.0): 0.5228746551395594,
(3, 0.004216965034285823, 56.23413251903491): 0.714124690080171,
(3, 0.004216965034285823, 316.22776601683796): 0.8547590504861885,
(3, 0.004216965034285823, 1778.2794100389228): 0.962994283665188,
(3, 0.004216965034285823, 10000.0): 1.04825839995075,
(3, 0.31622776601683794, 10.0): 0.005345303443615897,
(3, 0.31622776601683794, 56.23413251903491): 0.004446805556731574,
(3, 0.31622776601683794, 316.22776601683796): 0.0037490807510542792,
(3, 0.31622776601683794, 1778.2794100389228): 0.0032125670405896553,
(3, 0.31622776601683794, 10000.0): 0.002794619101109903,
(4, 1e-08, 10.0): 12.126009150675591,
(4, 1e-08, 56.23413251903491): 41.02476133438564,
(4, 1e-08, 316.22776601683796): 88.09606590653031,
(4, 1e-08, 1778.2794100389228): 153.7780144766072,
(4, 1e-08, 10000.0): 237.50159381331855,
(4, 7.498942093324558e-07, 10.0): 4.335460451915329,
(4, 7.498942093324558e-07, 56.23413251903491): 12.254992012880091,
(4, 7.498942093324558e-07, 316.22776601683796): 23.066720358757788,
(4, 7.498942093324558e-07, 1778.2794100389228): 36.273272727566784,
(4, 7.498942093324558e-07, 10000.0): 51.40069478365495,
(4, 5.623413251903491e-05, 10.0): 1.414605677409382,
(4, 5.623413251903491e-05, 56.23413251903491): 3.130963246988818,
(4, 5.623413251903491e-05, 316.22776601683796): 4.9789851557660745,
(4, 5.623413251903491e-05, 1778.2794100389228): 6.873690044070669,
(4, 5.623413251903491e-05, 10000.0): 8.761559269574471,
(4, 0.004216965034285823, 10.0): 0.3392520292729965,
(4, 0.004216965034285823, 56.23413251903491): 0.5402775654632718,
(4, 0.004216965034285823, 316.22776601683796): 0.6880707116873284,
(4, 0.004216965034285823, 1778.2794100389228): 0.8020515194422677,
(4, 0.004216965034285823, 10000.0): 0.892139648134384,
(4, 0.31622776601683794, 10.0): 0.002588510502494359,
(4, 0.31622776601683794, 56.23413251903491): 0.002549447011017532,
(4, 0.31622776601683794, 316.22776601683796): 0.002313635192150163,
(4, 0.31622776601683794, 1778.2794100389228): 0.0020684044457654413,
(4, 0.31622776601683794, 10000.0): 0.0018507518649738008}
lhapdf_MSTW = {
(1, 0.1, 1): 0.43071,
(1, 0.1, 3): 0.41700802616344,
(1, 0.1, 10): 0.39461,
(4, 0.1, 1): 0,
(4, 0.1, 3): 0.017505781181267437,
(4, 0.1, 10): 0.028645,
}
class TestLHAPDF(unittest.TestCase):
def test_lhapdf_ct10(self):
dir = tempfile.mkdtemp()
io.download_pdfset('CT10', dir)
p = pdf.PDF('CT10', 0, pdfdir=dir)
for args, lv in lhapdf_CT10.items():
self.assertAlmostEqual(p.xfxQ(*args) / lv,
1, delta=0.0002,
msg="Failed for {}".format(args))
shutil.rmtree(dir)
def test_lhapdf_mstw(self):
dir = tempfile.mkdtemp()
io.download_pdfset('MSTW2008nlo90cl', dir)
p = pdf.PDF('MSTW2008nlo90cl', 0, pdfdir=dir)
for args, lv in lhapdf_MSTW.items():
if lv == 0:
self.assertEqual(p.xfxQ(*args), lv,
msg="Failed for {}".format(args))
else:
self.assertAlmostEqual(p.xfxQ(*args) / lv,
1, delta=0.0002,
msg="Failed for {}".format(args))
shutil.rmtree(dir)
|
import librosa
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import logging
from scipy.spatial.distance import cdist
colors = {'pink': '\033[95m', 'blue': '\033[94m', 'green': '\033[92m', 'yellow': '\033[93m', 'red': '\033[91m',
'ENDC': '\033[0m', 'bold': '\033[1m', 'underline': '\033[4m'}
class AudioViewer:
def __init__(self, save_path=None):
self.save_path = save_path
self.spec_ids = 0
self.wave_ids = 0
def draw_spec(self, y, file_name=None,x_axis='time', y_axis='linear', colorbar_format='%+2.0f dB'):
plt.figure()
librosa.display.specshow(y, x_axis=x_axis, y_axis=y_axis)
plt.colorbar(format='%+2.0f dB')
if self.save_path is None:
plt.show()
else:
if file_name is None:
plt.savefig(os.path.join(self.save_path, 'spec_%d.png'%self.spec_ids))
else:
plt.savefig(os.path.join(self.save_path, file_name))
self.spec_ids += 1
def draw_wav(self, y, sr, file_name=None):
plt.figure()
librosa.display.waveplot(y, sr)
if self.save_path is None:
plt.show()
else:
if file_name is None:
plt.savefig(os.path.join(self.save_path, 'wave_%d.png'%self.wave_ids))
else:
plt.savefig(os.path.join(self.save_path, file_name))
self.wave_ids += 1
def folder_size(path='.'):
total = 0
for entry in os.scandir(path):
if entry.is_file():
total += entry.stat().st_size
elif entry.is_dir():
total += folder_size(entry.path)
return total
def str_color(color, data):
return colors[color] + str(data) + colors['ENDC']
def set_log(filename=None):
if filename is None:
logging.basicConfig(stream=sys.stdout, format='%(asctime)s [%(filename)s %(lineno)d] %(levelname)s: %(message)s',
datefmt='%m-%d %H:%M:%S')
else:
logging.basicConfig(filename=filename, format='%(asctime)s [%(filename)s %(lineno)d] %(levelname)s: %(message)s',
datefmt='%m-%d %H:%M:%S')
def get_score_matrix(embeddings, vectors, metric='cosine'):
score_matrix = cdist(embeddings, vectors, metric=metric)
return np.array(score_matrix)
def calc_acc(score_matrix, ys):
if ys.shape[-1] != 1:
label = np.argmax(ys, 1)
else:
label = ys
pred = np.argmax(score_matrix, axis=1)
Pos = np.where(label == pred)[0].shape[0]
All = label.shape[0]
return Pos / All
def calc_eer(score_matrix, ys, save_path, plot=True, threshold_up=1.0, threshold_down=-1.0, dot_num=100000):
if not isinstance(score_matrix, np.ndarray):
score_matrix = np.array(score_matrix)
if ys.shape[-1] != 1 and len(ys.shape) > 1:
logging.warning("ys isn't 1-d array or dense index, converting.")
ys = np.argmax(ys, -1)
step_size = (threshold_up - threshold_down) / (dot_num + 1)
threshold = threshold_up
best_eer = 1000
if plot: x_cord, y_cord = [], []
for i in range(dot_num):
threshold -= step_size
false_negative = 0
false_positive = 0
for idx in range(score_matrix.shape[0]):
for idy in range(score_matrix[idx].shape[0]):
if score_matrix[idx][idy] < threshold and ys[idx] == idy: false_negative += 1
if score_matrix[idx][idy] >= threshold and ys[idx] != idy: false_positive += 1
if plot:
x_cord.append(false_positive / (score_matrix.shape[0] * score_matrix.shape[1]))
y_cord.append(false_negative / (score_matrix.shape[0] * score_matrix.shape[1]))
best_eer = min(best_eer, false_negative / false_positive)
if plot:
figure = plt.figure()
fig_1 = figure.add_subplot(111)
fig_1.set_title('DET Curves')
plt.xlabel('False Alarm probability (in%)')
plt.ylabel('Miss Probability (in%)')
fig_1.scatter(x_cord, y_cord, c='r', marker='.')
plt.savefig(save_path)
|
<reponame>IoT-Inspector/unblob<filename>tests/handlers/executable/test_elf.py<gh_stars>10-100
import pytest
from helpers import unhex
from unblob.file_utils import File
from unblob.handlers.executable.elf import ELF64Handler
from unblob.models import ValidChunk
ELF_CONTENT = unhex(
"""\
00000000 7f 45 4c 46 02 01 01 00 00 00 00 00 00 00 00 00 |.ELF............|
00000010 03 00 3e 00 01 00 00 00 60 10 00 00 00 00 00 00 |..>.....`.......|
00000020 40 00 00 00 00 00 00 00 30 10 00 00 00 00 00 00 |@.......0.......|
00000030 00 00 00 00 40 00 38 00 0c 00 40 00 03 00 02 00 |....@.8...@.....|
00000040 06 00 00 00 04 00 00 00 40 00 00 00 00 00 00 00 |........@.......|
00000050 40 00 00 00 00 00 00 00 40 00 00 00 00 00 00 00 |@.......@.......|
00000060 a0 02 00 00 00 00 00 00 a0 02 00 00 00 00 00 00 |................|
00000070 08 00 00 00 00 00 00 00 03 00 00 00 04 00 00 00 |................|
00000080 00 00 00 00 00 00 00 00 18 03 00 00 00 00 00 00 |................|
00000090 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
000000a0 00 00 00 00 00 00 00 00 08 00 00 00 00 00 00 00 |................|
000000b0 01 00 00 00 04 00 00 00 00 00 00 00 00 00 00 00 |................|
000000c0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
000000d0 e0 02 00 00 00 00 00 00 e0 02 00 00 00 00 00 00 |................|
000000e0 00 10 00 00 00 00 00 00 01 00 00 00 05 00 00 00 |................|
000000f0 00 10 00 00 00 00 00 00 00 10 00 00 00 00 00 00 |................|
00000100 00 10 00 00 00 00 00 00 1b 00 00 00 00 00 00 00 |................|
00000110 1b 00 00 00 00 00 00 00 00 10 00 00 00 00 00 00 |................|
00000120 01 00 00 00 04 00 00 00 e0 02 00 00 00 00 00 00 |................|
00000130 00 20 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |. ..............|
00000140 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
00000150 00 10 00 00 00 00 00 00 01 00 00 00 06 00 00 00 |................|
00000160 e0 02 00 00 00 00 00 00 b8 3d 00 00 00 00 00 00 |.........=......|
00000170 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
00000180 00 00 00 00 00 00 00 00 00 10 00 00 00 00 00 00 |................|
00000190 02 00 00 00 06 00 00 00 00 00 00 00 00 00 00 00 |................|
000001a0 c8 3d 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |.=..............|
000001b0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
000001c0 08 00 00 00 00 00 00 00 04 00 00 00 04 00 00 00 |................|
000001d0 00 00 00 00 00 00 00 00 38 03 00 00 00 00 00 00 |........8.......|
000001e0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
000001f0 00 00 00 00 00 00 00 00 08 00 00 00 00 00 00 00 |................|
00000200 04 00 00 00 04 00 00 00 00 00 00 00 00 00 00 00 |................|
00000210 68 03 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |h...............|
00000220 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
00000230 08 00 00 00 00 00 00 00 53 e5 74 64 04 00 00 00 |........S.td....|
00000240 00 00 00 00 00 00 00 00 38 03 00 00 00 00 00 00 |........8.......|
00000250 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
00000260 00 00 00 00 00 00 00 00 08 00 00 00 00 00 00 00 |................|
00000270 50 e5 74 64 04 00 00 00 00 00 00 00 00 00 00 00 |P.td............|
00000280 0c 20 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |. ..............|
00000290 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
000002a0 08 00 00 00 00 00 00 00 51 e5 74 64 06 00 00 00 |........Q.td....|
000002b0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
*
000002d0 00 00 00 00 00 00 00 00 08 00 00 00 00 00 00 00 |................|
000002e0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
*
00001000 f3 0f 1e fa 48 83 ec 08 48 8b 05 d9 2f 00 00 48 |....H...H.../..H|
00001010 85 c0 74 02 ff d0 48 83 c4 08 c3 00 2e 73 68 73 |..t...H......shs|
00001020 74 72 74 61 62 00 2e 69 6e 69 74 00 00 00 00 00 |trtab..init.....|
00001030 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
*
00001070 0b 00 00 00 01 00 00 00 06 00 00 00 00 00 00 00 |................|
00001080 00 10 00 00 00 00 00 00 00 10 00 00 00 00 00 00 |................|
00001090 1b 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
000010a0 04 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
000010b0 01 00 00 00 03 00 00 00 00 00 00 00 00 00 00 00 |................|
000010c0 00 00 00 00 00 00 00 00 1b 10 00 00 00 00 00 00 |................|
000010d0 11 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
000010e0 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
000010f0
"""
)
def test_chunk_is_calculated():
file = File.from_bytes(ELF_CONTENT)
chunk = ELF64Handler().calculate_chunk(file, 0)
assert isinstance(chunk, ValidChunk)
assert chunk.start_offset == 0
assert chunk.end_offset == len(ELF_CONTENT)
@pytest.mark.parametrize(
"offset, byte",
[
pytest.param(0x10, 0xFE, id="invalid e_type"),
pytest.param(0x12, 0xFE, id="invalid e_machine"),
pytest.param(0x14, 0xFE, id="invalid e_version"),
],
)
def test_invalid_header(offset, byte):
file = File.from_bytes(ELF_CONTENT)
file[offset] = byte
chunk = ELF64Handler().calculate_chunk(file, 0)
assert chunk is None
|
import numpy as np
from scipy.interpolate import splev
###############################################################################
class FinCurveFitMethod():
pass
###############################################################################
class FinCurveFitPolynomial():
def __init__(self, power=3):
self._parentType = FinCurveFitMethod
self._power = power
self._coeffs = []
def _interpolatedYield(self, t):
yld = np.polyval(self._coeffs, t)
return yld
###############################################################################
class FinCurveFitNelsonSiegel():
def __init__(self, tau=None, bounds=[(-1, -1, -1, 0.5), (1, 1, 1, 100)]):
self._parentType = FinCurveFitMethod
self._beta1 = None
self._beta2 = None
self._beta3 = None
self._tau = tau
''' Fairly permissive bounds. Only tau1 is 1-100 '''
self._bounds = bounds
def _interpolatedYield(self, t, beta1=None, beta2=None, beta3=None, tau=None):
t = np.maximum(t, 1e-10)
if beta1 is None:
beta1 = self._beta1
if beta2 is None:
beta2 = self._beta2
if beta3 is None:
beta3 = self._beta3
if tau is None:
tau = self._tau
theta = t / tau
expTerm = np.exp(-theta)
yld = beta1
yld += beta2 * (1.0 - expTerm) / theta
yld += beta3 * ((1.0 - expTerm) / theta - expTerm)
return yld
###############################################################################
class FinCurveFitNelsonSiegelSvensson():
def __init__(self, tau1=None,
tau2=None,
bounds = [(0, -1, -1, -1, 0, 1), (1, 1, 1, 1, 10, 100)]):
self._parentType = FinCurveFitMethod
self._beta1 = None
self._beta2 = None
self._beta3 = None
self._beta4 = None
self._tau1 = tau1
self._tau2 = tau2
''' I impose some bounds to help ensure a sensible result if
the user does not provide any bounds. Especially for tau2. '''
self._bounds = bounds
def _interpolatedYield(self, t, beta1=None, beta2=None, beta3=None,
beta4=None, tau1=None, tau2=None):
# Careful if we get a time zero point
t = np.maximum(t, 1e-10)
if beta1 is None:
beta1 = self._beta1
if beta2 is None:
beta2 = self._beta2
if beta3 is None:
beta3 = self._beta3
if beta4 is None:
beta4 = self._beta4
if tau1 is None:
tau1 = self._tau1
if tau2 is None:
tau2 = self._tau2
theta1 = t / tau1
theta2 = t / tau2
expTerm1 = np.exp(-theta1)
expTerm2 = np.exp(-theta2)
yld = beta1
yld += beta2 * (1.0 - expTerm1) / theta1
yld += beta3 * ((1.0 - expTerm1) / theta1 - expTerm1)
yld += beta4 * ((1.0 - expTerm2) / theta2 - expTerm2)
return yld
###############################################################################
class FinCurveFitBSpline():
def __init__(self, power=3, knots=[1, 3, 5, 10]):
self._parentType = FinCurveFitMethod
self._power = power
self._knots = knots
def _interpolatedYield(self, t):
t = np.maximum(t, 1e-10)
yld = splev(t, self._spline)
return yld
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# -*- coding: utf-8 -*-
import logging
import typing
import torch
from fvcore.nn import activation_count, flop_count, parameter_count, parameter_count_table
from torch import nn
from detectron2.structures import BitMasks, Boxes, ImageList, Instances
from .logger import log_first_n
__all__ = [
"activation_count_operators",
"flop_count_operators",
"parameter_count_table",
"parameter_count",
]
FLOPS_MODE = "flops"
ACTIVATIONS_MODE = "activations"
# some extra ops to ignore from counting.
_IGNORED_OPS = [
"aten::add",
"aten::add_",
"aten::batch_norm",
"aten::constant_pad_nd",
"aten::div",
"aten::div_",
"aten::exp",
"aten::log2",
"aten::max_pool2d",
"aten::meshgrid",
"aten::mul",
"aten::mul_",
"aten::nonzero_numpy",
"aten::relu",
"aten::relu_",
"aten::rsub",
"aten::sigmoid",
"aten::sigmoid_",
"aten::softmax",
"aten::sort",
"aten::sqrt",
"aten::sub",
"aten::upsample_nearest2d",
"prim::PythonOp",
"torchvision::caffe_nms",
]
def flop_count_operators(
model: nn.Module, inputs: list, **kwargs
) -> typing.DefaultDict[str, float]:
"""
Implement operator-level flops counting using jit.
This is a wrapper of fvcore.nn.flop_count, that supports standard detection models
in detectron2.
Note:
The function runs the input through the model to compute flops.
The flops of a detection model is often input-dependent, for example,
the flops of box & mask head depends on the number of proposals &
the number of detected objects.
Therefore, the flops counting using a single input may not accurately
reflect the computation cost of a model.
Args:
model: a detectron2 model that takes `list[dict]` as input.
inputs (list[dict]): inputs to model, in detectron2's standard format.
"""
return _wrapper_count_operators(model=model, inputs=inputs, mode=FLOPS_MODE, **kwargs)
def activation_count_operators(
model: nn.Module, inputs: list, **kwargs
) -> typing.DefaultDict[str, float]:
"""
Implement operator-level activations counting using jit.
This is a wrapper of fvcore.nn.activation_count, that supports standard detection models
in detectron2.
Note:
The function runs the input through the model to compute activations.
The activations of a detection model is often input-dependent, for example,
the activations of box & mask head depends on the number of proposals &
the number of detected objects.
Args:
model: a detectron2 model that takes `list[dict]` as input.
inputs (list[dict]): inputs to model, in detectron2's standard format.
"""
return _wrapper_count_operators(model=model, inputs=inputs, mode=ACTIVATIONS_MODE, **kwargs)
def _flatten_to_tuple(outputs):
result = []
if isinstance(outputs, torch.Tensor):
result.append(outputs)
elif isinstance(outputs, (list, tuple)):
for v in outputs:
result.extend(_flatten_to_tuple(v))
elif isinstance(outputs, dict):
for _, v in outputs.items():
result.extend(_flatten_to_tuple(v))
elif isinstance(outputs, Instances):
result.extend(_flatten_to_tuple(outputs.get_fields()))
elif isinstance(outputs, (Boxes, BitMasks, ImageList)):
result.append(outputs.tensor)
else:
log_first_n(
logging.WARN,
f"Output of type {type(outputs)} not included in flops/activations count.",
n=10,
)
return tuple(result)
def _wrapper_count_operators(
model: nn.Module, inputs: list, mode: str, **kwargs
) -> typing.DefaultDict[str, float]:
# ignore some ops
supported_ops = {k: lambda *args, **kwargs: {} for k in _IGNORED_OPS}
supported_ops.update(kwargs.pop("supported_ops", {}))
kwargs["supported_ops"] = supported_ops
assert len(inputs) == 1, "Please use batch size=1"
tensor_input = inputs[0]["image"]
class WrapModel(nn.Module):
def __init__(self, model):
super().__init__()
if isinstance(
model, (nn.parallel.distributed.DistributedDataParallel, nn.DataParallel)
):
self.model = model.module
else:
self.model = model
def forward(self, image):
# jit requires the input/output to be Tensors
inputs = [{"image": image}]
outputs = self.model.forward(inputs)
# Only the subgraph that computes the returned tuple of tensor will be
# counted. So we flatten everything we found to tuple of tensors.
return _flatten_to_tuple(outputs)
old_train = model.training
with torch.no_grad():
if mode == FLOPS_MODE:
ret = flop_count(WrapModel(model).train(False), (tensor_input,), **kwargs)
elif mode == ACTIVATIONS_MODE:
ret = activation_count(WrapModel(model).train(False), (tensor_input,), **kwargs)
else:
raise NotImplementedError("Count for mode {} is not supported yet.".format(mode))
# compatible with change in fvcore
if isinstance(ret, tuple):
ret = ret[0]
model.train(old_train)
return ret
|
<reponame>souravsingh/chainercv
import itertools
import numpy as np
import os
import six
from chainercv.evaluations.eval_detection_coco import _redirect_stdout
from chainercv.evaluations.eval_detection_coco import _summarize
try:
import pycocotools.coco
import pycocotools.cocoeval
import pycocotools.mask as mask_tools
_available = True
except ImportError:
_available = False
def eval_instance_segmentation_coco(
pred_masks, pred_labels, pred_scores,
gt_masks, gt_labels, gt_areas=None, gt_crowdeds=None):
"""Evaluate instance segmentations based on evaluation code of MS COCO.
This function evaluates predicted instance segmentations obtained from
a dataset by using average precision for each class.
The code is based on the evaluation code used in MS COCO.
Args:
pred_masks (iterable of numpy.ndarray): See the table below.
pred_labels (iterable of numpy.ndarray): See the table below.
pred_scores (iterable of numpy.ndarray): See the table below.
gt_masks (iterable of numpy.ndarray): See the table below.
gt_labels (iterable of numpy.ndarray): See the table below.
gt_areas (iterable of numpy.ndarray): See the table below. If
:obj:`None`, some scores are not returned.
gt_crowdeds (iterable of numpy.ndarray): See the table below.
.. csv-table::
:header: name, shape, dtype, format
:obj:`pred_masks`, ":math:`[(R, H, W)]`", :obj:`bool`, --
:obj:`pred_labels`, ":math:`[(R,)]`", :obj:`int32`, \
":math:`[0, \#fg\_class - 1]`"
:obj:`pred_scores`, ":math:`[(R,)]`", :obj:`float32`, \
--
:obj:`gt_masks`, ":math:`[(R, H, W)]`", :obj:`bool`, --
:obj:`gt_labels`, ":math:`[(R,)]`", :obj:`int32`, \
":math:`[0, \#fg\_class - 1]`"
:obj:`gt_areas`, ":math:`[(R,)]`", \
:obj:`float32`, --
:obj:`gt_crowdeds`, ":math:`[(R,)]`", :obj:`bool`, --
All inputs should have the same length. For more detailed explanation
of the inputs, please refer to
:class:`chainercv.datasets.COCOInstanceSegmentationDataset`.
.. seealso::
:class:`chainercv.datasets.COCOInstanceSegmentationDataset`.
Returns:
dict:
The keys, value-types and the description of the values are listed
below. The APs and ARs calculated with different iou
thresholds, sizes of objects, and numbers of detections
per image. For more details on the 12 patterns of evaluation metrics,
please refer to COCO's official `evaluation page`_.
.. csv-table::
:header: key, type, description
ap/iou=0.50:0.95/area=all/max_dets=100, *numpy.ndarray*, \
[#coco_ins_eval_1]_
ap/iou=0.50/area=all/max_dets=100, *numpy.ndarray*, \
[#coco_ins_eval_1]_
ap/iou=0.75/area=all/max_dets=100, *numpy.ndarray*, \
[#coco_ins_eval_1]_
ap/iou=0.50:0.95/area=small/max_dets=100, *numpy.ndarray*, \
[#coco_ins_eval_1]_ [#coco_ins_eval_5]_
ap/iou=0.50:0.95/area=medium/max_dets=100, *numpy.ndarray*, \
[#coco_ins_eval_1]_ [#coco_ins_eval_5]_
ap/iou=0.50:0.95/area=large/max_dets=100, *numpy.ndarray*, \
[#coco_ins_eval_1]_ [#coco_ins_eval_5]_
ar/iou=0.50:0.95/area=all/max_dets=1, *numpy.ndarray*, \
[#coco_ins_eval_2]_
ar/iou=0.50/area=all/max_dets=10, *numpy.ndarray*, \
[#coco_ins_eval_2]_
ar/iou=0.75/area=all/max_dets=100, *numpy.ndarray*, \
[#coco_ins_eval_2]_
ar/iou=0.50:0.95/area=small/max_dets=100, *numpy.ndarray*, \
[#coco_ins_eval_2]_ [#coco_ins_eval_5]_
ar/iou=0.50:0.95/area=medium/max_dets=100, *numpy.ndarray*, \
[#coco_ins_eval_2]_ [#coco_ins_eval_5]_
ar/iou=0.50:0.95/area=large/max_dets=100, *numpy.ndarray*, \
[#coco_ins_eval_2]_ [#coco_ins_eval_5]_
map/iou=0.50:0.95/area=all/max_dets=100, *float*, \
[#coco_ins_eval_3]_
map/iou=0.50/area=all/max_dets=100, *float*, \
[#coco_ins_eval_3]_
map/iou=0.75/area=all/max_dets=100, *float*, \
[#coco_ins_eval_3]_
map/iou=0.50:0.95/area=small/max_dets=100, *float*, \
[#coco_ins_eval_3]_ [#coco_ins_eval_5]_
map/iou=0.50:0.95/area=medium/max_dets=100, *float*, \
[#coco_ins_eval_3]_ [#coco_ins_eval_5]_
map/iou=0.50:0.95/area=large/max_dets=100, *float*, \
[#coco_ins_eval_3]_ [#coco_ins_eval_5]_
mar/iou=0.50:0.95/area=all/max_dets=1, *float*, \
[#coco_ins_eval_4]_
mar/iou=0.50/area=all/max_dets=10, *float*, \
[#coco_ins_eval_4]_
mar/iou=0.75/area=all/max_dets=100, *float*, \
[#coco_ins_eval_4]_
mar/iou=0.50:0.95/area=small/max_dets=100, *float*, \
[#coco_ins_eval_4]_ [#coco_ins_eval_5]_
mar/iou=0.50:0.95/area=medium/max_dets=100, *float*, \
[#coco_ins_eval_4]_ [#coco_ins_eval_5]_
mar/iou=0.50:0.95/area=large/max_dets=100, *float*, \
[#coco_ins_eval_4]_ [#coco_ins_eval_5]_
coco_eval, *pycocotools.cocoeval.COCOeval*, \
result from :obj:`pycocotools`
existent_labels, *numpy.ndarray*, \
used labels \
.. [#coco_ins_eval_1] An array of average precisions. \
The :math:`l`-th value corresponds to the average precision \
for class :math:`l`. If class :math:`l` does not exist in \
either :obj:`pred_labels` or :obj:`gt_labels`, the corresponding \
value is set to :obj:`numpy.nan`.
.. [#coco_ins_eval_2] An array of average recalls. \
The :math:`l`-th value corresponds to the average precision \
for class :math:`l`. If class :math:`l` does not exist in \
either :obj:`pred_labels` or :obj:`gt_labels`, the corresponding \
value is set to :obj:`numpy.nan`.
.. [#coco_ins_eval_3] The average of average precisions over classes.
.. [#coco_ins_eval_4] The average of average recalls over classes.
.. [#coco_ins_eval_5] Skip if :obj:`gt_areas` is :obj:`None`.
"""
if not _available:
raise ValueError(
'Please install pycocotools \n'
'pip install -e \'git+https://github.com/cocodataset/coco.git'
'#egg=pycocotools&subdirectory=PythonAPI\'')
gt_coco = pycocotools.coco.COCO()
pred_coco = pycocotools.coco.COCO()
pred_masks = iter(pred_masks)
pred_labels = iter(pred_labels)
pred_scores = iter(pred_scores)
gt_masks = iter(gt_masks)
gt_labels = iter(gt_labels)
if gt_areas is None:
compute_area_dependent_metrics = False
gt_areas = itertools.repeat(None)
else:
compute_area_dependent_metrics = True
gt_areas = iter(gt_areas)
gt_crowdeds = (iter(gt_crowdeds) if gt_crowdeds is not None
else itertools.repeat(None))
images = []
pred_annos = []
gt_annos = []
existent_labels = {}
for i, (pred_mask, pred_label, pred_score, gt_mask, gt_label,
gt_area, gt_crowded) in enumerate(six.moves.zip(
pred_masks, pred_labels, pred_scores,
gt_masks, gt_labels, gt_areas, gt_crowdeds)):
size = pred_mask.shape[1:]
if gt_area is None:
gt_area = itertools.repeat(None)
if gt_crowded is None:
gt_crowded = itertools.repeat(None)
# Starting ids from 1 is important when using COCO.
img_id = i + 1
for pred_msk, pred_lb, pred_sc in zip(
pred_mask, pred_label, pred_score):
pred_annos.append(
_create_anno(pred_msk, pred_lb, pred_sc,
img_id=img_id, anno_id=len(pred_annos) + 1,
crw=0, ar=None))
existent_labels[pred_lb] = True
for gt_msk, gt_lb, gt_ar, gt_crw in zip(
gt_mask, gt_label, gt_area, gt_crowded):
gt_annos.append(
_create_anno(gt_msk, gt_lb, None,
img_id=img_id, anno_id=len(gt_annos) + 1,
ar=gt_ar, crw=gt_crw))
existent_labels[gt_lb] = True
images.append({'id': img_id, 'height': size[0], 'width': size[1]})
existent_labels = sorted(existent_labels.keys())
pred_coco.dataset['categories'] = [{'id': i} for i in existent_labels]
gt_coco.dataset['categories'] = [{'id': i} for i in existent_labels]
pred_coco.dataset['annotations'] = pred_annos
gt_coco.dataset['annotations'] = gt_annos
pred_coco.dataset['images'] = images
gt_coco.dataset['images'] = images
with _redirect_stdout(open(os.devnull, 'w')):
pred_coco.createIndex()
gt_coco.createIndex()
coco_eval = pycocotools.cocoeval.COCOeval(gt_coco, pred_coco, 'segm')
coco_eval.evaluate()
coco_eval.accumulate()
results = {'coco_eval': coco_eval}
p = coco_eval.params
common_kwargs = {
'prec': coco_eval.eval['precision'],
'rec': coco_eval.eval['recall'],
'iou_threshs': p.iouThrs,
'area_ranges': p.areaRngLbl,
'max_detection_list': p.maxDets}
all_kwargs = {
'ap/iou=0.50:0.95/area=all/max_dets=100': {
'ap': True, 'iou_thresh': None, 'area_range': 'all',
'max_detection': 100},
'ap/iou=0.50/area=all/max_dets=100': {
'ap': True, 'iou_thresh': 0.5, 'area_range': 'all',
'max_detection': 100},
'ap/iou=0.75/area=all/max_dets=100': {
'ap': True, 'iou_thresh': 0.75, 'area_range': 'all',
'max_detection': 100},
'ar/iou=0.50:0.95/area=all/max_dets=1': {
'ap': False, 'iou_thresh': None, 'area_range': 'all',
'max_detection': 1},
'ar/iou=0.50:0.95/area=all/max_dets=10': {
'ap': False, 'iou_thresh': None, 'area_range': 'all',
'max_detection': 10},
'ar/iou=0.50:0.95/area=all/max_dets=100': {
'ap': False, 'iou_thresh': None, 'area_range': 'all',
'max_detection': 100},
}
if compute_area_dependent_metrics:
all_kwargs.update({
'ap/iou=0.50:0.95/area=small/max_dets=100': {
'ap': True, 'iou_thresh': None, 'area_range': 'small',
'max_detection': 100},
'ap/iou=0.50:0.95/area=medium/max_dets=100': {
'ap': True, 'iou_thresh': None, 'area_range': 'medium',
'max_detection': 100},
'ap/iou=0.50:0.95/area=large/max_dets=100': {
'ap': True, 'iou_thresh': None, 'area_range': 'large',
'max_detection': 100},
'ar/iou=0.50:0.95/area=small/max_dets=100': {
'ap': False, 'iou_thresh': None, 'area_range': 'small',
'max_detection': 100},
'ar/iou=0.50:0.95/area=medium/max_dets=100': {
'ap': False, 'iou_thresh': None, 'area_range': 'medium',
'max_detection': 100},
'ar/iou=0.50:0.95/area=large/max_dets=100': {
'ap': False, 'iou_thresh': None, 'area_range': 'large',
'max_detection': 100},
})
for key, kwargs in all_kwargs.items():
kwargs.update(common_kwargs)
metrics, mean_metric = _summarize(**kwargs)
# pycocotools ignores classes that are not included in
# either gt or prediction, but lies between 0 and
# the maximum label id.
# We set values for these classes to np.nan.
results[key] = np.nan * np.ones(np.max(existent_labels) + 1)
results[key][existent_labels] = metrics
results['m' + key] = mean_metric
results['existent_labels'] = existent_labels
return results
def _create_anno(msk, lb, sc, img_id, anno_id, ar=None, crw=None):
H, W = msk.shape
if crw is None:
crw = False
msk = np.asfortranarray(msk.astype(np.uint8))
rle = mask_tools.encode(msk)
if ar is None:
# We compute dummy area to pass to pycocotools.
# Note that area dependent scores are ignored afterwards.
ar = mask_tools.area(rle)
if crw is None:
crw = False
# Rounding is done to make the result consistent with COCO.
anno = {
'image_id': img_id, 'category_id': lb,
'segmentation': rle,
'area': ar,
'id': anno_id,
'iscrowd': crw}
if sc is not None:
anno.update({'score': sc})
return anno
|
<filename>server_common/ioc_data_source.py
from __future__ import print_function, absolute_import, division, unicode_literals
import six
"""
Data source for ioc data
"""
from server_common.mysql_abstraction_layer import DatabaseError
from server_common.utilities import print_and_log
PV_INFO_FIELD_NAME = "info_field"
"""name of the info field on a pv to express its interest level and archive status"""
PV_DESCRIPTION_NAME = "description"
"""name of the description field on a pv"""
GET_PV_INFO_QUERY = """
SELECT s.iocname, p.pvname, lower(p.infoname), p.value
FROM pvinfo p
JOIN pvs s ON s.pvname = p.pvname
WHERE lower(p.infoname) LIKE "log%"
ORDER BY s.iocname, p.infoname"""
"""Query to return pv info for iocs from the ioc database"""
GET_PVS_WITH_DETAILS = """
SELECT DISTINCT pvinfo.pvname, pvs.record_type, pvs.record_desc, pvs.iocname
FROM pvinfo
INNER JOIN pvs ON pvs.pvname = pvinfo.pvname"""
GET_ACTIVE_IOC_INTERESTING_PVS = GET_PVS_WITH_DETAILS + """
WHERE (pvs.iocname in
(SELECT iocname
FROM iocrt
WHERE running=1)
AND infoname='INTEREST')"""
"""Select interesting pvs from running active iocs"""
GET_PVS_WITH_TEMPLATED_INTEREST = GET_PVS_WITH_DETAILS + """
WHERE (infoname='INTEREST' AND value='{interest}')"""
GET_PVS_WITH_TEMPLATED_INTEREST_FOR_AN_IOC = GET_PVS_WITH_TEMPLATED_INTEREST + """
AND iocname=%s"""
GET_PVS_WITH_DETAILS_FOR_AN_IOC = GET_PVS_WITH_DETAILS + """
AND iocname=%s"""
GET_PVNAMES_IN_PVCATEGORY = """
SELECT DISTINCT pvinfo.pvname
FROM pvinfo
INNER JOIN pvs ON pvs.pvname = pvinfo.pvname
WHERE infoname='PVCATEGORY'
AND value LIKE %s
AND pvinfo.pvname NOT LIKE '%:SP'"""
"""Get pvnames that are om a PV Category but are not set points"""
GET_IOCS_AND_DESCRIPTIONS = "SELECT iocname, descr FROM iocs"
"""Return all IOC andes and their descriptions"""
GET_IOCS_AND_RUNNING_STATUS = """
SELECT DISTINCT iocname, running
FROM iocrt
WHERE iocname NOT LIKE 'PSCTRL_%'"""
"""Sql query for getting iocnames and their running status"""
UPDATE_IOC_IS_RUNNING = "UPDATE iocrt SET running=%s WHERE iocname=%s"
"""Update whether an ioc is running"""
UPDATE_PV_INFO = "INSERT INTO pvinfo (pvname, infoname, value) VALUES (%s,%s,%s)"
"""Update the pv info"""
INSERT_PV_DETAILS = "INSERT INTO pvs (pvname, record_type, record_desc, iocname) VALUES (%s,%s,%s,%s)"
"""Insert PV details into the pvs table"""
INSERT_IOC_STARTED_DETAILS = "INSERT INTO iocrt (iocname, pid, start_time, stop_time, running, exe_path) " \
"VALUES (%s,%s,NOW(),'1970-01-01 00:00:01',1,%s)"
"""Insert details about the start of an IOC"""
DELETE_IOC_RUN_STATE = "DELETE FROM iocrt WHERE iocname=%s"
"""Delete ioc run state"""
DELETE_IOC_PV_DETAILS = "DELETE FROM pvs WHERE iocname=%s"
"""Delete ioc pv details, this cascades to pv info details"""
class IocDataSource(object):
"""
A source for IOC data from the database
"""
def __init__(self, mysql_abstraction_layer):
"""
Constructor.
Args:
mysql_abstraction_layer(server_common.mysql_abstraction_layer.AbstratSQLCommands): contact database with sql
"""
self.mysql_abstraction_layer = mysql_abstraction_layer
def _query_and_normalise(self, sqlquery, bind_vars=None):
"""
Executes the given query to the database and converts the data in each row from bytearray to a normal string.
:param sqlquery: The query to execute.
:param bind_vars: Any variables to bind to query. Defaults to None.
:return: A list of lists of strings, representing the data from the table.
"""
# Get as a plain list of lists
values = [list(element) for element in self.mysql_abstraction_layer.query(sqlquery, bind_vars)]
# Convert any bytearrays
for i, pv in enumerate(values):
for j, element in enumerate(pv):
if type(element) == bytearray:
values[i][j] = element.decode("utf-8")
return values
def get_iocs_and_descriptions(self):
"""
Gets a list of all the IOCs in the database and their descriptions.
Returns:
dict : IOCs and their descriptions
"""
try:
ioc_and_description_list = self._query_and_normalise(GET_IOCS_AND_DESCRIPTIONS)
return dict((element[0], dict(description=element[1])) for element in ioc_and_description_list)
except Exception as err:
print_and_log("could not get IOCS from database: %s" % err, "MAJOR", "DBSVR")
return dict()
def get_pars(self, category):
"""
Gets parameters of a particular category from the IOC database of.
Returns:
list : A list of the names of PVs associated with the parameter category
"""
try:
values = self._query_and_normalise(GET_PVNAMES_IN_PVCATEGORY, ("%{0}%".format(category),))
return [six.text_type(val[0]) for val in values]
except Exception as err:
print_and_log("could not get parameters category %s from database: %s" % (category, err), "MAJOR", "DBSVR")
return []
def get_pv_logging_info(self):
"""
Get pv info for annotations which start with LOG.
Returns: list of tuples (ioc name, pv name, infoname, value)
"""
data = self._query_and_normalise(GET_PV_INFO_QUERY)
pv_logging_info = {}
for iocname, pvname, infoname, value in data:
ioc_values = pv_logging_info.get(iocname, [])
ioc_values.append([pvname, infoname, value])
pv_logging_info[iocname] = ioc_values
return pv_logging_info
def get_interesting_pvs(self, level="", ioc=None):
"""
Queries the database for PVs based on their interest level and their IOC.
Args:
level (string, optional): The interest level to search for, either High, Medium, Low or Facility. Default to
all interest levels.
ioc (string, optional): The IOC to search. Default is all IOCs.
Returns:
list : A list of the PVs that match the search given by level and ioc
"""
try:
if level.lower().startswith('h'):
interest = 'HIGH'
elif level.lower().startswith('m'):
interest = 'MEDIUM'
elif level.lower().startswith('l'):
interest = 'LOW'
elif level.lower().startswith('f'):
interest = 'FACILITY'
else:
# Try to get all pvs!
interest = None
if ioc is not None and ioc != "":
bind_vars = (ioc, )
if interest is not None:
sql_query = GET_PVS_WITH_TEMPLATED_INTEREST_FOR_AN_IOC.format(interest=interest)
else:
sql_query = GET_PVS_WITH_DETAILS_FOR_AN_IOC
else:
bind_vars = None
if interest is not None:
sql_query = GET_PVS_WITH_TEMPLATED_INTEREST.format(interest=interest)
else:
sql_query = GET_PVS_WITH_DETAILS
return self._query_and_normalise(sql_query, bind_vars)
except Exception as err:
print_and_log("issue with getting interesting PVs: %s" % err, "MAJOR", "DBSVR")
return []
def get_active_pvs(self):
"""
Queries the database for ineresting PVs from active iocs.
Returns:
list : A list of the PVs in running IOCs
"""
try:
return self._query_and_normalise(GET_ACTIVE_IOC_INTERESTING_PVS)
except Exception as err:
print_and_log("issue with getting active PVs: %s" % err, "MAJOR", "DBSVR")
return []
def get_iocs_and_running_status(self):
"""
Get all the iocnames and whether they are running, but ignore IOCs associated with PSCTRL.
Returns:
list: iocs and running states
"""
try:
return self._query_and_normalise(GET_IOCS_AND_RUNNING_STATUS)
except Exception as err:
print_and_log("issue with reading IOC statuses before update: %s" % err, "MAJOR", "DBSVR")
return []
def update_ioc_is_running(self, ioc_name, running):
"""
Update running state in the database.
Args:
ioc_name: iocs name
running: the new running state
"""
try:
self.mysql_abstraction_layer.update(UPDATE_IOC_IS_RUNNING, (running, ioc_name))
except Exception as err:
print_and_log("Failed to update ioc running state in database ({ioc_name},{running}): {error}"
.format(ioc_name=ioc_name, running=running, error=err), "MAJOR", "DBSVR")
def insert_ioc_start(self, ioc_name, pid, exe_path, pv_database, prefix):
"""
Insert ioc start information into the database. This does a similar task to pvdump but for python server.
Args:
ioc_name: name of the ioc
pid: process id of the program
exe_path: executable's path
pv_database: pv database used to construct the pv. To add a pv info field use entries in the pv for
PV_INFO_FIELD_NAME.
For example: {'pv name': {'info_field': {'archive': '', 'INTEREST': 'HIGH'}, 'type': 'float'}}
prefix: prefix for the pv server
"""
self._remove_ioc_from_db(ioc_name)
self._add_ioc_start_to_db(exe_path, ioc_name, pid)
for pvname, pv in pv_database.items():
pv_fullname = "{}{}".format(prefix, pvname)
self._add_pv_to_db(ioc_name, pv, pv_fullname)
for info_field_name, info_field_value in pv.get(PV_INFO_FIELD_NAME, {}).items():
self._add_pv_info_to_db(info_field_name, info_field_value, pv_fullname)
def _add_pv_info_to_db(self, info_field_name, info_field_value, pv_fullname):
"""
Add pv info to the database.
Args:
info_field_name: name of the info field
info_field_value: value of the info field
pv_fullname: full pv name with prefix
Returns: nothing.
"""
try:
self.mysql_abstraction_layer.update(UPDATE_PV_INFO, (pv_fullname, info_field_name, info_field_value))
except Exception as err:
print_and_log("Failed to insert pv info for pv '{pvname}' with name '{name}' and value "
"'{value}': {error}".format(pvname=pv_fullname, name=info_field_name,
value=info_field_value, error=err), "MAJOR", "DBSVR")
def _add_pv_to_db(self, ioc_name, pv, pv_fullname):
"""
Add a pv to the database
Args:
ioc_name: name of the ioc
pv: pv information
pv_fullname: pv's full name
"""
try:
pv_type = pv.get('type', "float")
description = pv.get(PV_DESCRIPTION_NAME, "")
self.mysql_abstraction_layer.update(INSERT_PV_DETAILS, (pv_fullname, pv_type, description, ioc_name))
except DatabaseError as err:
print_and_log("Failed to insert pv data for pv '{pvname}' with contents '{pv}': {error}"
.format(ioc_name=ioc_name, pvname=pv_fullname, pv=pv, error=err), "MAJOR", "DBSVR")
def _add_ioc_start_to_db(self, exe_path, ioc_name, pid):
"""
Add the ioc start to the database
Args:
exe_path: the path to the executab;e
ioc_name: the ioc name
pid: the process id
"""
try:
self.mysql_abstraction_layer.update(INSERT_IOC_STARTED_DETAILS, (ioc_name, pid, exe_path))
except DatabaseError as err:
print_and_log("Failed to insert ioc into database ({ioc_name},{pid},{exepath}): {error}"
.format(ioc_name=ioc_name, pid=pid, exepath=exe_path, error=err), "MAJOR", "DBSVR")
def _remove_ioc_from_db(self, ioc_name):
"""
Remove the ioc data from the database
Args:
ioc_name: name of the ioc
"""
try:
self.mysql_abstraction_layer.update(DELETE_IOC_RUN_STATE, (ioc_name,))
except DatabaseError as err:
print_and_log("Failed to delete ioc, '{ioc_name}', from iocrt: {error}"
.format(ioc_name=ioc_name, error=err), "MAJOR", "DBSVR")
try:
self.mysql_abstraction_layer.update(DELETE_IOC_PV_DETAILS, (ioc_name,))
except DatabaseError as err:
print_and_log("Failed to delete ioc, '{ioc_name}', from pvs: {error}"
.format(ioc_name=ioc_name, error=err), "MAJOR", "DBSVR")
|
<gh_stars>0
"""Package for learning complete games from data
The API of this individual module is still unstable and may change as
improvements or refinements are made.
There are two general game types in this module: learned games and deviation
games. Learned games vary by the method, but generally expose methods for
computing payoffs and may other features. Deviation games use learned games and
different functions to compute deviation payoffs via various methods.
"""
import warnings
import numpy as np
from numpy.lib import recfunctions
import sklearn
from sklearn import gaussian_process as gp
from gameanalysis import gamereader
from gameanalysis import paygame
from gameanalysis import restrict
from gameanalysis import rsgame
from gameanalysis import utils
class _DevRegressionGame(rsgame._CompleteGame): # pylint: disable=protected-access
"""A game regression model that learns deviation payoffs
This model functions as a game, but doesn't have a default way of computing
deviation payoffs. It must be wrapped with another game that uses payoff
data to compute deviation payoffs.
"""
def __init__( # pylint: disable=too-many-arguments
self, game, regressors, offset, scale, min_payoffs, max_payoffs,
rest):
super().__init__(game.role_names, game.strat_names,
game.num_role_players)
self._regressors = regressors
self._offset = offset
self._offset.setflags(write=False)
self._scale = scale
self._scale.setflags(write=False)
self._min_payoffs = min_payoffs
self._min_payoffs.setflags(write=False)
self._max_payoffs = max_payoffs
self._max_payoffs.setflags(write=False)
self._rest = rest
self._rest.setflags(write=False)
def deviation_payoffs(self, _, **_kw): # pylint: disable=arguments-differ
raise ValueError(
"regression games don't define deviation payoffs and must be "
'used as a model for a deviation game')
def get_payoffs(self, profiles):
utils.check(
self.is_profile(profiles).all(), 'must pass valid profiles')
payoffs = np.zeros(profiles.shape)
for i, (off, scale, reg) in enumerate(zip(
self._offset, self._scale, self._regressors)):
mask = profiles[..., i] > 0
profs = profiles[mask]
profs[:, i] -= 1
if profs.size:
payoffs[mask, i] = reg.predict(restrict.translate(
profs, self._rest)).ravel() * scale + off
return payoffs
def get_dev_payoffs(self, dev_profs):
"""Compute the payoff for deviating
This implementation is more efficient than the default since we don't
need to compute the payoff for non deviators."""
prof_view = np.rollaxis(restrict.translate(dev_profs.reshape(
(-1, self.num_roles, self.num_strats)), self._rest), 1, 0)
payoffs = np.empty(dev_profs.shape[:-2] + (self.num_strats,))
pay_view = payoffs.reshape((-1, self.num_strats)).T
for pays, profs, reg in zip(
pay_view, utils.repeat(prof_view, self.num_role_strats),
self._regressors):
np.copyto(pays, reg.predict(profs))
return payoffs * self._scale + self._offset
def max_strat_payoffs(self):
return self._max_payoffs.view()
def min_strat_payoffs(self):
return self._min_payoffs.view()
def restrict(self, restriction):
base = rsgame.empty_copy(self).restrict(restriction)
new_rest = self._rest.copy()
new_rest[new_rest] = restriction
regs = tuple(reg for reg, m in zip(self._regressors, restriction) if m)
return _DevRegressionGame(
base, regs, self._offset[restriction], self._scale[restriction],
self._min_payoffs[restriction], self._max_payoffs[restriction],
new_rest)
def _add_constant(self, constant):
off = np.broadcast_to(constant, self.num_roles).repeat(
self.num_role_strats)
return _DevRegressionGame(
self, self._regressors, self._offset + off, self._scale,
self._min_payoffs + off, self._max_payoffs + off, self._rest)
def _multiply_constant(self, constant):
mul = np.broadcast_to(constant, self.num_roles).repeat(
self.num_role_strats)
return _DevRegressionGame(
self, self._regressors, self._offset * mul, self._scale * mul,
self._min_payoffs * mul, self._max_payoffs * mul, self._rest)
def _add_game(self, _):
return NotImplemented
def __eq__(self, othr):
# pylint: disable-msg=protected-access
return (super().__eq__(othr) and
self._regressors == othr._regressors and
np.allclose(self._offset, othr._offset) and
np.allclose(self._scale, othr._scale) and
np.all(self._rest == othr._rest))
def __hash__(self):
return hash((super().__hash__(), self._rest.tobytes()))
def _dev_profpay(game):
"""Iterate over deviation profiles and payoffs"""
sgame = paygame.samplegame_copy(game)
profiles = sgame.flat_profiles()
payoffs = sgame.flat_payoffs()
for i, pays in enumerate(payoffs.T):
mask = (profiles[:, i] > 0) & ~np.isnan(pays)
utils.check(
mask.any(), "couldn't find deviation data for a strategy")
profs = profiles[mask]
profs[:, i] -= 1
yield i, profs, pays[mask]
def nngame_train( # pylint: disable=too-many-arguments,too-many-locals
game, epochs=100, layer_sizes=(32, 32), dropout=0.2, verbosity=0,
optimizer='sgd', loss='mean_squared_error'):
"""Train a neural network regression model
This mostly exists as a proof of concept, individual testing should be done
to make sure it is working sufficiently. This API will likely change to
support more general architectures and training.
"""
utils.check(layer_sizes, 'must have at least one layer')
utils.check(0 <= dropout < 1, 'dropout must be a valid probability')
# This is for delayed importing inf tensor flow
from keras import models, layers
model = models.Sequential()
lay_iter = iter(layer_sizes)
model.add(layers.Dense(
next(lay_iter), input_shape=[game.num_strats], activation='relu'))
for units in lay_iter:
model.add(layers.Dense(units, activation='relu'))
if dropout:
model.add(layers.Dropout(dropout))
model.add(layers.Dense(1, activation='sigmoid'))
regs = []
offsets = np.empty(game.num_strats)
scales = np.empty(game.num_strats)
for i, profs, pays in _dev_profpay(game):
# XXX Payoff normalization specific to sigmoid. If we accept alternate
# models, we need a way to compute how to potentially normalize
# payoffs.
min_pay = pays.min()
offsets[i] = min_pay
max_pay = pays.max()
scale = 1 if np.isclose(max_pay, min_pay) else max_pay - min_pay
scales[i] = scale
reg = models.clone_model(model)
reg.compile(optimizer=optimizer, loss=loss)
reg.fit(profs, (pays - min_pay) / scale, epochs=epochs,
verbose=verbosity)
regs.append(reg)
return _DevRegressionGame(
game, tuple(regs), offsets, scales, game.min_strat_payoffs(),
game.max_strat_payoffs(), np.ones(game.num_strats, bool))
def sklgame_train(game, estimator):
"""Create a regression game from an arbitrary sklearn estimator
Parameters
----------
game : RsGame
The game to learn, must have at least one payoff per strategy.
estimator : sklearn estimator
An estimator that supports clone, fit, and predict via the stand
scikit-learn estimator API.
"""
regs = []
for _, profs, pays in _dev_profpay(game):
reg = sklearn.base.clone(estimator)
reg.fit(profs, pays)
regs.append(reg)
return _DevRegressionGame(
game, tuple(regs), np.zeros(game.num_strats), np.ones(game.num_strats),
game.min_strat_payoffs(), game.max_strat_payoffs(),
np.ones(game.num_strats, bool))
class _RbfGpGame(rsgame._CompleteGame): # pylint: disable=too-many-instance-attributes,protected-access
"""A regression game using RBF Gaussian processes
This regression game has a build in deviation payoff based off of a
continuous approximation of the multinomial distribution.
"""
def __init__( # pylint: disable=too-many-locals,too-many-arguments
self, role_names, strat_names, num_role_players, offset, coefs,
lengths, sizes, profiles, alpha):
super().__init__(role_names, strat_names, num_role_players)
self._offset = offset
self._offset.setflags(write=False)
self._coefs = coefs
self._coefs.setflags(write=False)
self._lengths = lengths
self._lengths.setflags(write=False)
self._sizes = sizes
self._sizes.setflags(write=False)
self._size_starts = np.insert(self._sizes[:-1].cumsum(), 0, 0)
self._size_starts.setflags(write=False)
self._profiles = profiles
self._profiles.setflags(write=False)
self._alpha = alpha
self._alpha.setflags(write=False)
# Useful member
self._dev_players = np.repeat(
self.num_role_players - np.eye(self.num_roles, dtype=int),
self.num_role_strats, 0)
self._dev_players.setflags(write=False)
# Compute min and max payoffs
# TODO These are pretty conservative, and could maybe be made more
# accurate
sdp = self._dev_players.repeat(self.num_role_strats, 1)
max_rbf = np.einsum('ij,ij,ij->i', sdp, sdp, 1 / self._lengths)
minw = np.exp(-max_rbf / 2) # pylint: disable=invalid-unary-operand-type
mask = self._alpha > 0
pos = np.add.reduceat(self._alpha * mask, self._size_starts)
neg = np.add.reduceat(self._alpha * ~mask, self._size_starts)
self._min_payoffs = self._coefs * (pos * minw + neg) + self._offset
self._min_payoffs.setflags(write=False)
self._max_payoffs = self._coefs * (pos + neg * minw) + self._offset
self._max_payoffs.setflags(write=False)
def get_payoffs(self, profiles):
utils.check(
self.is_profile(profiles).all(), 'must pass valid profiles')
dev_profiles = np.repeat(
profiles[..., None, :] - np.eye(self.num_strats, dtype=int),
self._sizes, -2)
vec = ((dev_profiles - self._profiles) /
self._lengths.repeat(self._sizes, 0))
rbf = np.einsum('...ij,...ij->...i', vec, vec)
payoffs = self._offset + self._coefs * np.add.reduceat(
np.exp(-rbf / 2) * self._alpha, self._size_starts, -1) # pylint: disable=invalid-unary-operand-type
payoffs[profiles == 0] = 0
return payoffs
def get_dev_payoffs(self, dev_profs, *, jacobian=False): # pylint: disable=arguments-differ
dev_profiles = dev_profs.repeat(
np.add.reduceat(self._sizes, self.role_starts), -2)
vec = ((dev_profiles - self._profiles) /
self._lengths.repeat(self._sizes, 0))
rbf = np.einsum('...ij,...ij->...i', vec, vec)
exp = np.exp(-rbf / 2) * self._alpha # pylint: disable=invalid-unary-operand-type
payoffs = self._offset + self._coefs * np.add.reduceat(
exp, self._size_starts, -1)
if not jacobian:
return payoffs
jac = -(self._coefs[:, None] / self._lengths *
np.add.reduceat(exp[:, None] * vec, self._size_starts, 0))
return payoffs, jac
def max_strat_payoffs(self):
return self._max_payoffs.view()
def min_strat_payoffs(self):
return self._min_payoffs.view()
def deviation_payoffs(self, mixture, *, jacobian=False, **_): # pylint: disable=too-many-locals
players = self._dev_players.repeat(self.num_role_strats, 1)
avg_prof = players * mixture
diag = 1 / (self._lengths ** 2 + avg_prof)
diag_sizes = diag.repeat(self._sizes, 0)
diff = self._profiles - avg_prof.repeat(self._sizes, 0)
det = 1 / (1 - self._dev_players * np.add.reduceat(
mixture ** 2 * diag, self.role_starts, 1))
det_sizes = det.repeat(self._sizes, 0)
cov_diag = np.einsum('ij,ij,ij->i', diff, diff, diag_sizes)
cov_outer = np.add.reduceat(
mixture * diag_sizes * diff, self.role_starts, 1)
sec_term = np.einsum(
'ij,ij,ij,ij->i', self._dev_players.repeat(self._sizes, 0),
det_sizes, cov_outer, cov_outer)
exp = np.exp(-(cov_diag + sec_term) / 2)
coef = self._lengths.prod(1) * np.sqrt(diag.prod(1) * det.prod(1))
avg = np.add.reduceat(self._alpha * exp, self._size_starts)
payoffs = self._coefs * coef * avg + self._offset
if not jacobian:
return payoffs
beta = 1 - players * mixture * diag
jac_coef = (
((beta ** 2 - 1) * det.repeat(self.num_role_strats, 1) +
players * diag) * avg[:, None])
delta = np.repeat(cov_outer * det_sizes, self.num_role_strats, 1)
jac_exp = -self._alpha[:, None] * exp[:, None] * (
(delta * beta.repeat(self._sizes, 0) - diff * diag_sizes - 1) ** 2
- (delta - 1) ** 2)
jac_avg = (players * np.add.reduceat(jac_exp, self._size_starts, 0))
jac = -self._coefs[:, None] * coef[:, None] * (jac_coef + jac_avg) / 2
return payoffs, jac
# TODO Add function that creates sample game which draws payoffs from the
# gp distribution
def restrict(self, restriction):
restriction = np.asarray(restriction, bool)
base = rsgame.empty_copy(self).restrict(restriction)
size_mask = restriction.repeat(self._sizes)
sizes = self._sizes[restriction]
profiles = self._profiles[size_mask]
lengths = self._lengths[restriction]
zeros = (profiles[:, ~restriction] /
lengths[:, ~restriction].repeat(sizes, 0))
removed = np.exp(-np.einsum('ij,ij->i', zeros, zeros) / 2) # pylint: disable=invalid-unary-operand-type
uprofs, inds = np.unique(
recfunctions.merge_arrays([
np.arange(restriction.sum()).repeat(sizes).view([('s', int)]),
utils.axis_to_elem(profiles[:, restriction])], flatten=True),
return_inverse=True)
new_alpha = np.bincount(inds, removed * self._alpha[size_mask])
new_sizes = np.diff(np.concatenate([
[-1], np.flatnonzero(np.diff(uprofs['s'])),
[new_alpha.size - 1]]))
return _RbfGpGame(
base.role_names, base.strat_names, base.num_role_players,
self._offset[restriction], self._coefs[restriction],
lengths[:, restriction], new_sizes, uprofs['axis'], new_alpha)
def _add_constant(self, constant):
off = np.broadcast_to(constant, self.num_roles).repeat(
self.num_role_strats)
return _RbfGpGame(
self.role_names, self.strat_names, self.num_role_players,
self._offset + off, self._coefs, self._lengths, self._sizes,
self._profiles, self._alpha)
def _multiply_constant(self, constant):
mul = np.broadcast_to(constant, self.num_roles).repeat(
self.num_role_strats)
return _RbfGpGame(
self.role_names, self.strat_names, self.num_role_players,
self._offset * mul, self._coefs * mul, self._lengths, self._sizes,
self._profiles, self._alpha)
def _add_game(self, _):
return NotImplemented
def to_json(self):
base = super().to_json()
base['offsets'] = self.payoff_to_json(self._offset)
base['coefs'] = self.payoff_to_json(self._coefs)
lengths = {}
for role, strats, lens in zip(
self.role_names, self.strat_names,
np.split(self._lengths, self.role_starts[1:])):
lengths[role] = {s: self.payoff_to_json(l)
for s, l in zip(strats, lens)}
base['lengths'] = lengths
profs = {}
for role, strats, data in zip(
self.role_names, self.strat_names,
np.split(np.split(self._profiles, self._size_starts[1:]),
self.role_starts[1:])):
profs[role] = {strat: [self.profile_to_json(p) for p in dat]
for strat, dat in zip(strats, data)}
base['profiles'] = profs
alphas = {}
for role, strats, alphs in zip(
self.role_names, self.strat_names,
np.split(np.split(self._alpha, self._size_starts[1:]),
self.role_starts[1:])):
alphas[role] = {s: a.tolist() for s, a in zip(strats, alphs)}
base['alphas'] = alphas
base['type'] = 'rbf.1'
return base
def __eq__(self, othr):
# pylint: disable-msg=protected-access
return (super().__eq__(othr) and
np.allclose(self._offset, othr._offset) and
np.allclose(self._coefs, othr._coefs) and
np.allclose(self._lengths, othr._lengths) and
np.all(self._sizes == othr._sizes) and
utils.allclose_perm(
np.concatenate([
np.arange(self.num_strats).repeat(
self._sizes)[:, None],
self._profiles, self._alpha[:, None]], 1),
np.concatenate([
np.arange(othr.num_strats).repeat(
othr._sizes)[:, None],
othr._profiles, othr._alpha[:, None]], 1)))
@utils.memoize
def __hash__(self):
hprofs = np.sort(utils.axis_to_elem(np.concatenate([
np.arange(self.num_strats).repeat(self._sizes)[:, None],
self._profiles], 1))).tobytes()
return hash((super().__hash__(), hprofs))
def rbfgame_train(game, num_restarts=3): # pylint: disable=too-many-locals
"""Train a regression game with an RBF Gaussian process
This model is somewhat well tests and has a few added benefits over
standard regression models due the nature of its functional form.
Parameters
----------
game : RsGame
The game to learn. Must have at least one payoff per strategy.
num_restarts : int, optional
The number of random restarts to make with the optimizer. Higher
numbers will give a better fit (in expectation), but will take
longer.
"""
dev_players = np.maximum(game.num_role_players - np.eye(
game.num_roles, dtype=int), 1).repeat(
game.num_role_strats, 0).repeat(game.num_role_strats, 1)
bounds = np.insert(dev_players[..., None], 0, 1, 2)
# TODO Add an alpha that is smaller for points near the edge of the
# simplex, accounting for the importance of minimizing error at the
# extrema.
means = np.empty(game.num_strats)
coefs = np.empty(game.num_strats)
lengths = np.empty((game.num_strats, game.num_strats))
profiles = []
alpha = []
sizes = []
for (strat, profs, pays), bound in zip(_dev_profpay(game), bounds):
pay_mean = pays.mean()
pays -= pay_mean
reg = gp.GaussianProcessRegressor(
1.0 * gp.kernels.RBF(bound.mean(1), bound) +
gp.kernels.WhiteKernel(1), n_restarts_optimizer=num_restarts,
copy_X_train=False)
reg.fit(profs, pays)
means[strat] = pay_mean
coefs[strat] = reg.kernel_.k1.k1.constant_value
lengths[strat] = reg.kernel_.k1.k2.length_scale
uprofs, inds = np.unique(
utils.axis_to_elem(profs), return_inverse=True)
profiles.append(utils.axis_from_elem(uprofs))
alpha.append(np.bincount(inds, reg.alpha_))
sizes.append(uprofs.size)
if np.any(lengths[..., None] == bounds):
warnings.warn(
'some lengths were at their bounds, this may indicate a poor '
'fit')
return _RbfGpGame(
game.role_names, game.strat_names, game.num_role_players, means, coefs,
lengths, np.array(sizes), np.concatenate(profiles),
np.concatenate(alpha))
def rbfgame_json(json):
"""Read an rbf game from json"""
utils.check(json['type'].split('.', 1)[0] == 'rbf', 'incorrect type')
base = rsgame.empty_json(json)
offsets = base.payoff_from_json(json['offsets'])
coefs = base.payoff_from_json(json['coefs'])
lengths = np.empty((base.num_strats,) * 2)
for role, strats in json['lengths'].items():
for strat, pay in strats.items():
ind = base.role_strat_index(role, strat)
base.payoff_from_json(pay, lengths[ind])
profiles = [None] * base.num_strats
for role, strats in json['profiles'].items():
for strat, profs in strats.items():
ind = base.role_strat_index(role, strat)
profiles[ind] = np.stack([
base.profile_from_json(p, verify=False) for p in profs])
alphas = [None] * base.num_strats
for role, strats in json['alphas'].items():
for strat, alph in strats.items():
ind = base.role_strat_index(role, strat)
alphas[ind] = np.array(alph)
sizes = np.fromiter( # pragma: no branch
(a.size for a in alphas), int, base.num_strats)
return _RbfGpGame(
base.role_names, base.strat_names, base.num_role_players, offsets,
coefs, lengths, sizes, np.concatenate(profiles),
np.concatenate(alphas))
class _DeviationGame(rsgame._CompleteGame): # pylint: disable=abstract-method,protected-access
"""A game that adds deviation payoffs"""
def __init__(self, model_game):
super().__init__(model_game.role_names, model_game.strat_names,
model_game.num_role_players)
utils.check(
model_game.is_complete(),
'deviation models must be complete games')
self.model = model_game
def get_payoffs(self, profiles):
return self.model.get_payoffs(profiles)
def profiles(self):
return self.model.profiles()
def payoffs(self):
return self.model.payoffs()
def max_strat_payoffs(self):
return self.model.max_strat_payoffs()
def min_strat_payoffs(self):
return self.model.min_strat_payoffs()
def to_json(self):
base = super().to_json()
base['model'] = self.model.to_json()
return base
def __eq__(self, othr):
return (super().__eq__(othr) and
self.model == othr.model)
@utils.memoize
def __hash__(self):
return hash((super().__hash__(), self.model))
class _SampleDeviationGame(_DeviationGame):
"""Deviation payoffs by sampling from mixture
This model produces unbiased deviation payoff estimates, but they're noisy
and random and take a while to compute. This is accurate in the limit as
`num_samples` goes to infinity.
Parameters
----------
model : DevRegressionGame
A payoff model
num_samples : int, optional
The number of samples to use for each deviation estimate. Higher means
lower variance but higher computation time.
"""
def __init__(self, model, num_samples=100):
super().__init__(model)
utils.check(num_samples > 0, 'num samples must be greater than 0')
# TODO It might be interesting to play with a sample schedule, i.e.
# change the number of samples based off of the query number to
# deviation payoffs (i.e. reduce variance as we get close to
# convergence)
self.num_samples = num_samples
def deviation_payoffs(self, mixture, *, jacobian=False, **_):
"""Compute the deivation payoffs
The method computes the jacobian as if we were importance sampling the
results, i.e. the function is really always sample according to mixture
m', but then importance sample to get the actual result."""
profs = self.random_role_deviation_profiles(self.num_samples, mixture)
payoffs = self.model.get_dev_payoffs(profs)
dev_pays = payoffs.mean(0)
if not jacobian:
return dev_pays
supp = mixture > 0
weights = np.zeros(profs.shape)
weights[..., supp] = profs[..., supp] / mixture[supp]
jac = np.einsum('ij,ijk->jk', payoffs, weights.repeat(
self.num_role_strats, 1)) / self.num_samples
return dev_pays, jac
def restrict(self, restriction):
return _SampleDeviationGame(
self.model.restrict(restriction), self.num_samples)
def _add_constant(self, constant):
return _SampleDeviationGame(self.model + constant, self.num_samples)
def _multiply_constant(self, constant):
return _SampleDeviationGame(self.model * constant, self.num_samples)
def _add_game(self, othr):
try:
assert self.num_samples == othr.num_samples
return _SampleDeviationGame(
self.model + othr.model, self.num_samples)
except (AttributeError, AssertionError):
return NotImplemented
def to_json(self):
base = super().to_json()
base['samples'] = self.num_samples
base['type'] = 'sample.1'
return base
def __eq__(self, othr):
return (super().__eq__(othr) and
self.num_samples == othr.num_samples)
@utils.memoize
def __hash__(self):
return hash((super().__hash__(), self.num_samples))
def sample(game, num_samples=100):
"""Create a sample game from a model
Parameters
----------
game : RsGame
If this is a payoff model it will be used to take samples, if this is
an existing deviation game, then this will use it's underlying model.
num_samples : int, optional
The number of samples to take.
"""
try:
return _SampleDeviationGame(game.model, num_samples=num_samples)
except AttributeError:
return _SampleDeviationGame(game, num_samples=num_samples)
def sample_json(json):
"""Read sample game from json"""
utils.check(
json['type'].split('.', 1)[0] == 'sample', 'incorrect type')
return _SampleDeviationGame(
gamereader.loadj(json['model']), num_samples=json['samples'])
class _PointDeviationGame(_DeviationGame):
"""Deviation payoffs by point approximation
This model computes payoffs by finding the deviation payoffs from the point
estimate of the mixture. It's fast but biased. This is accurate in the
limit as the number of players goes to infinity.
For this work, the underlying implementation of get_dev_payoffs must
support floating point profiles, which only really makes sense for
regression games. For deviation payoffs to have a jacobian, the underlying
model must also support a jacobian for get_dev_payoffs.
Parameters
----------
model : DevRegressionGame
A payoff model
"""
def __init__(self, model):
super().__init__(model)
self._dev_players = np.repeat(self.num_role_players - np.eye(
self.num_roles, dtype=int), self.num_role_strats, 1)
def deviation_payoffs(self, mixture, *, jacobian=False, **_):
if not jacobian:
return self.model.get_dev_payoffs(self._dev_players * mixture)
dev, jac = self.model.get_dev_payoffs(
self._dev_players * mixture, jacobian=True)
jac *= self._dev_players.repeat(self.num_role_strats, 0)
return dev, jac
def restrict(self, restriction):
return _PointDeviationGame(self.model.restrict(restriction))
def _add_constant(self, constant):
return _PointDeviationGame(self.model + constant)
def _multiply_constant(self, constant):
return _PointDeviationGame(self.model * constant)
def _add_game(self, othr):
try:
assert isinstance(othr, _PointDeviationGame)
return _PointDeviationGame(self.model + othr.model)
except (AttributeError, AssertionError):
return NotImplemented
def to_json(self):
base = super().to_json()
base['type'] = 'point.1'
return base
def point(game):
"""Create a point game from a model
Parameters
----------
game : RsGame
If this is a payoff model it will be used to take samples, if this is
an existing deviation game, then this will use it's underlying model.
"""
try:
return _PointDeviationGame(game.model)
except AttributeError:
return _PointDeviationGame(game)
def point_json(json):
"""Read point game from json"""
utils.check(
json['type'].split('.', 1)[0] == 'point', 'incorrect type')
return _PointDeviationGame(gamereader.loadj(json['model']))
class _NeighborDeviationGame(_DeviationGame):
"""Create a neighbor game from a model
This takes a normalized weighted estimate of the deviation payoffs by
finding all profiles within `num_neighbors` of the maximum probability
profile for the mixture and weighting them accordingly. This is biased, but
accurate in the limit as `num_neighbors` approaches `num_players`. It also
produces discontinuities every time the maximum probability profile
switches.
Parameters
----------
game : RsGame
If this is a payoff model it will be used to take samples, if this is
an existing deviation game, then this will use it's underlying model.
num_neighbors : int, optional
The number of deviations to take.
"""
def __init__(self, model, num_neighbors=2):
super().__init__(model)
utils.check(num_neighbors >= 0, 'num devs must be nonnegative')
self.num_neighbors = num_neighbors
def deviation_payoffs(self, mixture, *, jacobian=False, **_):
# TODO This is not smooth because there are discontinuities when the
# maximum probability profile jumps at the boundary. If we wanted to
# make it smooth, one option would be to compute the smoother
# interpolation between this and lower probability profiles. All we
# need to ensure smoothness is that the weight at profile
# discontinuities is 0.
profiles = self.nearby_profiles(
self.max_prob_prof(mixture), self.num_neighbors)
payoffs = self.get_payoffs(profiles)
game = paygame.game_replace(self, profiles, payoffs)
return game.deviation_payoffs(mixture, ignore_incomplete=True,
jacobian=jacobian)
def restrict(self, restriction):
return _NeighborDeviationGame(
self.model.restrict(restriction), self.num_neighbors)
def _add_constant(self, constant):
return _NeighborDeviationGame(self.model + constant, self.num_neighbors)
def _multiply_constant(self, constant):
return _NeighborDeviationGame(self.model * constant, self.num_neighbors)
def _add_game(self, othr):
try:
assert self.num_neighbors == othr.num_neighbors
return _NeighborDeviationGame(
self.model + othr.model, self.num_neighbors)
except (AttributeError, AssertionError):
return NotImplemented
def to_json(self):
base = super().to_json()
base['neighbors'] = self.num_neighbors
base['type'] = 'neighbor.2'
return base
def __eq__(self, othr):
return super().__eq__(othr) and self.num_neighbors == othr.num_neighbors
@utils.memoize
def __hash__(self):
return hash((super().__hash__(), self.num_neighbors))
def neighbor(game, num_neighbors=2):
"""Create a neighbor game from a model
Parameters
----------
game : RsGame
If this is a payoff model it will be used to take samples, if this is
an existing deviation game, then this will use it's underlying model.
num_neighbors : int, optional
The number of deviations to explore out.
"""
try:
return _NeighborDeviationGame(game.model, num_neighbors=num_neighbors)
except AttributeError:
return _NeighborDeviationGame(game, num_neighbors=num_neighbors)
def neighbor_json(json):
"""Read neighbor game from json"""
utils.check(
json['type'].split('.', 1)[0] == 'neighbor', 'incorrect type')
return _NeighborDeviationGame(
gamereader.loadj(json['model']),
num_neighbors=json.get('neighbors', json.get('devs', None)))
|
<filename>InductionApplet.py
# -*- coding: cp1252 -*-
#!/usr/bin/jython
import copy, pawt, java # , re
from javax.swing import JApplet, JLabel, JList, JTextArea, JButton, JRadioButton, \
ButtonGroup, JTextPane, JPanel, JOptionPane, \
ListSelectionModel, DefaultListModel
from javax.swing.event import ListSelectionListener, ListSelectionEvent
from java.awt import Container, Canvas, BorderLayout, FlowLayout, GridLayout, \
Font, Color, Dimension
from java.util import regex
from PyPlotter import awtGfx, Graph, Gfx, Colors
import Induction
###############################################################################
#
# Flags: edit these to change the behaviour of the simulation
#
###############################################################################
SHOW_MI_MEAN = False
SHOW_NON_MI_MEAN = False
###############################################################################
#
# utility functions and variables
#
###############################################################################
def bright(c):
brightness = c[0]**2 + c[1]**2 + c[2]**2
return brightness > 0.4
redPens = [Gfx.Pen(c) for c in filter(bright,filter(Colors.RedFilter, Colors.colors))]
bluePens = [Gfx.Pen(c) for c in filter(bright,filter(Colors.BlueFilter, Colors.colors))]
greenPens = [Gfx.Pen(c) for c in filter(bright,filter(Colors.GreenFilter, Colors.colors))]
yellowPens = [Gfx.Pen(c) for c in filter(bright,filter(Colors.YellowFilter, Colors.colors))]
emptyPens = [Graph.DONT_DRAW_PEN]*10
#def ProxyPenGenerator(penList=[]):
# for pen in penList:
# yield pen
# if penList != []: print "Warning: pen list seems to be too short!"
# for c in Colors.colors:
# yield Gfx.Pen(c)
class RunAsThread(java.lang.Thread):
def __init__(self, procedure):
self.procedure = procedure
java.lang.Thread.__init__(self)
def run(self):
self.procedure()
def re_sub(pattern, replacement, txt):
"""Replaces 'pattern' with 'replacement' in 'txt'."""
# for some reason the java security managar does not accept module 're'
# return re.sub("[ \\n]+", " ", txt)
pattern = regex.Pattern.compile(pattern)
matcher = pattern.matcher(txt)
return matcher.replaceAll(replacement)
###############################################################################
#
# list of examples
#
###############################################################################
exampleList = {}
exampleList["A: MI Demonstration"] = \
("Example 01(MI): MI + Forecasters",
[Induction.DelayedForecaster(0.7, 40, "Forecaster 1 (success; delay)"),
Induction.ForecasterFromBottom(0.90, "Forecaster 2 (success; recovers from zero)"),
Induction.MetaInductivist("Meta-Inductivist"),
Induction.ObjectInductivist("Object-Inductivist")],
bluePens[:2] + redPens[:1] + greenPens,
500,
lambda : Induction.getRandomEvent(2.0/3.0),
""" "MI Demonstration" demonstrates how a meta-inductivist works:
The meta-inductivist simply follows the predictor that had the highest
success rate so far. This predictor is called its "favorite". If another
predictor becomes better than the favorite, the meta-inductivists choses this
predictor as its new favorite.
""")
exampleList["B: Amp. Oscillators"] = \
("Example 02(MI): MI + Amplitude-Oscillator + OI",
[Induction.AmplitudeOscillator(0.3, 0.85, "AmpOsc"),
Induction.MetaInductivist("Meta-Inductivist"),
Induction.ObjectInductivist("Object-Inductivist")],
bluePens[:1] + redPens[:1] + greenPens,
500,
lambda : Induction.getRandomEvent(2.0/3.0),
"""Hier noch eine Beschreibung einfügen...
""")
exampleList["C: Systematic Oscillator"] = \
("Example 03(MI): MI + 1 Systematic Oscillator + OI",
[Induction.SystOscillator("SystOscillator"),
Induction.MetaInductivist("Meta-Inductivist"),
Induction.ObjectInductivist("Object-Inductivist")],
bluePens[:1] + redPens[:1] + greenPens, 500,
lambda : Induction.getRandomEvent(2.0/3.0),
"""An dieser Stelle sollten ein par nette Zeilen zur
Erklärung für den Nutzer stehen...
""")
###############################################################################
#
# the graphical user interface (java applet)
#
###############################################################################
class GraphCanvas(Canvas):
def __init__(self):
self.applet = None
def setApplet(self, applet):
self.applet = applet
def paint(self, g):
if self.applet != None: self.applet.refresh();
class InductionApplet(JApplet):
def init(self):
global exampleList
self.thinFont = Font("Dialog", 0, 10)
self.pane = self.getContentPane()
self.examples = exampleList.keys()
self.examples.sort()
self.exampleSelector = JList(self.examples, valueChanged=self.valueChanged)
self.exampleSelector.setSelectionMode(ListSelectionModel.SINGLE_SELECTION)
self.exampleSelector.setLayoutOrientation(JList.VERTICAL)
self.exampleSelector.setPreferredSize(Dimension(150,500))
self.exampleSelector.setBackground(Color(0.95, 0.95, 0.98))
self.exampleSelector.setFont(self.thinFont)
self.centerPanel = JPanel(BorderLayout())
self.canvas = GraphCanvas()
self.canvas.setApplet(self)
self.buttonRow = JPanel(FlowLayout())
self.backButton = JButton("<", actionPerformed = self.backAction)
self.backButton.setFont(self.thinFont)
self.continueButton = JButton("continue >",
actionPerformed=self.continueAction)
self.continueButton.setFont(self.thinFont)
self.scaleGroup = ButtonGroup()
self.linearButton = JRadioButton("linear scale",
actionPerformed=self.linearAction)
self.linearButton.setSelected(True)
self.linearButton.setFont(self.thinFont)
self.logarithmicButton = JRadioButton("logarithmic scale",
actionPerformed=self.logarithmicAction)
self.logarithmicButton.setFont(self.thinFont)
self.aboutButton = JButton("About...",
actionPerformed=self.aboutAction)
self.aboutButton.setFont(self.thinFont)
self.scaleGroup.add(self.linearButton)
self.scaleGroup.add(self.logarithmicButton)
self.buttonRow.add(self.backButton)
self.buttonRow.add(self.continueButton)
self.buttonRow.add(JLabel(" "*5))
self.buttonRow.add(self.linearButton)
self.buttonRow.add(self.logarithmicButton)
self.buttonRow.add(JLabel(" "*20));
self.buttonRow.add(self.aboutButton)
self.centerPanel.add(self.canvas, BorderLayout.CENTER)
self.centerPanel.add(self.buttonRow, BorderLayout.PAGE_END)
self.helpText = JTextPane()
self.helpText.setBackground(Color(1.0, 1.0, 0.5))
self.helpText.setPreferredSize(Dimension(800,80))
self.helpText.setText(re_sub("[ \\n]+", " ", """
Please select one of the examples in the list on the left!
"""))
self.pane.add(self.exampleSelector, BorderLayout.LINE_START)
self.pane.add(self.centerPanel, BorderLayout.CENTER)
self.pane.add(self.helpText, BorderLayout.PAGE_END)
self.graph = None
self.simulation = None
self.touched = ""
self.selected = ""
self.gfxDriver = None
def start(self):
self.gfxDriver = awtGfx.Driver(self.canvas)
#self.gfxDriver.setAntialias(True)
if self.gfxDriver.getSize()[0] < 200: # konqueror java bug work around
self.gfxDriver.w = 650
self.gfxDriver.h = 380
self.graph = Graph.Cartesian(self.gfxDriver, 1, 0.0, 1000, 1.0,
title="Results",
xaxis="Rounds", yaxis="Success Rate")
def stop(self):
pass
def destroy(self):
pass
def refresh(self):
if self.graph != None: self.graph.redraw()
def valueChanged(self, e):
global exampleList
newSelection = self.examples[self.exampleSelector.getSelectedIndex()]
if newSelection != self.touched:
self.touched = newSelection
text = re_sub("[ \\n]+", " ", exampleList[self.touched][-1])
self.helpText.setText(text)
if not e.getValueIsAdjusting() and newSelection != self.selected:
self.selected = newSelection
smallFontPen = copy.copy(Gfx.BLACK_PEN)
smallFontPen.fontSize = Gfx.SMALL
ex = exampleList[self.selected]
myStyleFlags = self.graph.styleFlags
if self.simulation != None: self.simulation.stop()
self.gfxDriver.resizedGfx() # konqueror 3.5.5 java bug workaround
self.graph = Graph.Cartesian(self.gfxDriver, 1, 0.0, ex[3], 1.0,
title=ex[0],
xaxis="Rounds", yaxis="Success Rate",
styleFlags = myStyleFlags,
axisPen = smallFontPen,
captionPen = smallFontPen)
self.zoomFrame = [(1, 0.0, ex[3], 1.0)]
self.simulation = Simulation(self.graph, ex[1], ex[2], ex[3], ex[4])
RunAsThread(self.simulation.simulation).start()
def determineCurrentZoomFrame(self):
i = 0
for zf in self.zoomFrame:
if self.graph.x2 <= zf[2]: break
i += 1
return i
def backAction(self, e):
if self.simulation == None: return
wasRunning = self.simulation.isRunning
self.simulation.stop()
if wasRunning or len(self.zoomFrame) <= 1: return
zi = self.determineCurrentZoomFrame()
if zi > 0 and zi < len(self.zoomFrame):
x1, y1, x2, y2 = self.zoomFrame[zi-1]
self.graph.adjustRange(x1, y1, x2, y2)
def continueAction(self, e):
if self.simulation == None: return
wasRunning = self.simulation.isRunning
self.simulation.stop()
zi = self.determineCurrentZoomFrame()
if zi == len(self.zoomFrame)-1:
if wasRunning or self.simulation.world.round == self.zoomFrame[zi][2]:
if self.graph.styleFlags & Graph.LOG_X == 0:
self.simulation.rounds *= 2
else:
self.simulation.rounds *= 10
self.zoomFrame.append((1, 0.0, self.simulation.rounds, 1.0))
self.graph.adjustRange(1, 0.0, self.simulation.rounds, 1.0)
RunAsThread(self.simulation.simulation).start()
else:
x1, y1, x2, y2 = self.zoomFrame[zi+1]
self.graph.adjustRange(x1, y1, x2, y2)
def linearAction(self, e):
if self.graph != None and (self.graph.styleFlags & Graph.LOG_X) != 0:
if self.simulation != None: self.simulation.stop()
self.graph.setStyle(self.graph.styleFlags & ~Graph.LOG_X, redraw=True)
if self.simulation != None:
RunAsThread(self.simulation.simulation).start()
def logarithmicAction(self, e):
if self.graph != None and (self.graph.styleFlags & Graph.LOG_X) == 0:
if self.simulation != None: self.simulation.stop()
self.graph.setStyle(self.graph.styleFlags | Graph.LOG_X, redraw=True)
if self.simulation != None:
RunAsThread(self.simulation.simulation).start()
def aboutAction(self, e):
aboutText = """Induction Applet v. 0.1
(c) 2007 University of Düsseldorf
Authors: <NAME>, <NAME>
"""
aboutText = re_sub(" +", " ", aboutText)
JOptionPane.showMessageDialog(self.getContentPane(), aboutText)
###############################################################################
#
# the simulation
#
###############################################################################
class Simulation:
def __init__(self, graph, predictorList, penList = [], rounds = 500,
eventFunction = lambda : Induction.getRandomEvent(2.0/3.0)):
self.graph = graph
self.rounds = rounds
self.predictorList = copy.deepcopy(predictorList)
self.world = Induction.World(eventFunction)
for predictor in self.predictorList:
self.world.register(predictor)
penBox = copy.copy(penList)
for predictor in self.predictorList:
pen = penBox[0]; del penBox[0]
pen.lineWidth = Gfx.MEDIUM
self.graph.addPen(str(predictor), pen, updateCaption = False)
if SHOW_MI_MEAN:
pen = Gfx.Pen((0.0, 0.0, 0.0), lineWidth=Gfx.THICK,
linePattern=Gfx.CONTINUOUS)
self.graph.addPen("miMean", pen, updateCaption = False)
if SHOW_NON_MI_MEAN:
pen = Gfx.Pen((0.3, 0.0, 0.6), lineWidth=Gfx.THICK,
linePattern=Gfx.CONTINUOUS)
self.graph.addPen("non_miMean", pen, updateCaption = False)
self.graph.redrawCaption()
self.interrupt = False
self.isRunning = False
self.last_xPixel = -1
def simulation(self):
self.isRunning = True
if self.world.round >= 1:
self.last_xPixel = self.graph._scaleX(self.world.round)
else: self.last_xPixel = -1
while not self.interrupt and self.world.round < self.rounds:
self.nextRound()
self.isRunning = False
self.interrupt = False
def nextRound(self):
self.world.nextRound()
xPixel = self.graph._scaleX(self.world.round)
if xPixel > self.last_xPixel:
self.last_xPixel = xPixel
predictorList = self.world.getPredictorList()
for predictor in predictorList:
self.graph.addValue(str(predictor), self.world.round,
predictor.successRate)
if SHOW_MI_MEAN:
self.graph.addValue("miMean", self.world.round,
self.world.miMean)
if SHOW_NON_MI_MEAN:
self.graph.addValue("non_miMean", self.world.round,
self.non_miMean)
# if self.world.round % ((b-a) / 10) == 0: self.win.refresh()
def stop(self):
self.interrupt = True
while self.interrupt and self.isRunning:
pass
self.interrupt = False
###############################################################################
#
# for testing purposes this jython file can also be run as standalone
# program outside a web-page
#
###############################################################################
if __name__ == "__main__":
applet = InductionApplet()
pawt.test(applet, size=(800,500))
applet.start()
#applet.refresh()
|
<gh_stars>0
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE
"""
This module defines an :doc:`uproot.interpretation.Interpretation` and
temporary array for string data.
Note that :doc:`uproot.interpretation.strings.AsStrings` is an interpretation for
top-level strings, but :doc:`uproot.containers.AsString` can be nested within any
other :doc:`uproot.containers.AsContainer`.
The :doc:`uproot.interpretation.strings.StringArray` class only holds data while
an array is being built from ``TBaskets``. Its final form is determined by the
:doc:`uproot.interpretation.library.Library`.
"""
from __future__ import absolute_import
import struct
import numpy
import uproot
_string_4byte_size = struct.Struct(">I")
class AsStrings(uproot.interpretation.Interpretation):
"""
Args:
header_bytes (int): Number of bytes to skip at the beginning of each
entry.
length_bytes ("1-5" or "4"): Method used to determine the length of
a string: "1-5" means one byte if the length is less than 256,
otherwise the true length is in the next four bytes; "4" means
always four bytes.
typename (None or str): If None, construct a plausible C++ typename.
Otherwise, take the suggestion as given.
original (None, :doc:`uproot.model.Model`, or :doc:`uproot.containers.Container`): If
this interpretation is derived from
:ref:`uproot.interpretation.objects.AsObjects.simplify`, this is a
reminder of the original
:ref:`uproot.interpretation.objects.AsObjects.model`.
An :doc:`uproot.interpretation.Interpretation` for an array of strings.
This cannot be nested within other
:doc:`uproot.interpretation.Interpretation` objects; it can only represent
a ``TBranch`` that only contains strings (not strings within ``std::vector``,
for instance).
Note that the :doc:`uproot.containers.AsString` class is for strings nested
within other objects.
(:ref:`uproot.interpretation.objects.AsObjects.simplify` converts an
:doc:`uproot.interpretation.objects.AsObjects` of
:doc:`uproot.containers.AsString` into a
:doc:`uproot.interpretation.strings.AsStrings`.)
"""
def __init__(
self, header_bytes=0, length_bytes="1-5", typename=None, original=None
):
self._header_bytes = header_bytes
if length_bytes in ("1-5", "4"):
self._length_bytes = length_bytes
else:
raise ValueError("length_bytes must be '1-5' or '4'")
self._typename = typename
self._original = original
@property
def header_bytes(self):
"""
The number of bytes to skip at the beginning of each entry.
"""
return self._header_bytes
@property
def length_bytes(self):
"""
Method used to determine the length of a string: "1-5" means one byte
if the length is less than 256, otherwise the true length is in the
next four bytes; "4" means always four bytes.
"""
return self._length_bytes
@property
def original(self):
"""
If not None, this was the original
:ref:`uproot.interpretation.objects.AsObjects.model` from an
:doc:`uproot.interpretation.objects.AsObjects` that was simplified
into this :doc:`uproot.interpretation.jagged.AsJagged`.
"""
return self._original
def __repr__(self):
args = []
if self._header_bytes != 0:
args.append("header_bytes={0}".format(self._header_bytes))
if self._length_bytes != "1-5":
args.append("length_bytes={0}".format(repr(self._length_bytes)))
return "AsStrings({0})".format(", ".join(args))
def __eq__(self, other):
return (
isinstance(other, AsStrings)
and self._header_bytes == other._header_bytes
and self._length_bytes == other._length_bytes
)
@property
def typename(self):
if self._typename is None:
return "char*"
else:
return self._typename
@property
def numpy_dtype(self):
return numpy.dtype(object)
def awkward_form(
self,
file,
index_format="i64",
header=False,
tobject_header=True,
breadcrumbs=(),
):
awkward = uproot.extras.awkward()
return awkward.forms.ListOffsetForm(
index_format,
awkward.forms.NumpyForm((), 1, "B", parameters={"__array__": "char"}),
parameters={
"__array__": "string",
"uproot": {
"as": "strings",
"header_bytes": self._header_bytes,
"length_bytes": self._length_bytes,
},
},
)
@property
def cache_key(self):
return "{0}({1},{2})".format(
type(self).__name__, self._header_bytes, repr(self._length_bytes)
)
def basket_array(
self, data, byte_offsets, basket, branch, context, cursor_offset, library
):
self.hook_before_basket_array(
data=data,
byte_offsets=byte_offsets,
basket=basket,
branch=branch,
context=context,
cursor_offset=cursor_offset,
library=library,
)
if byte_offsets is None:
counts = numpy.empty(len(data), dtype=numpy.int32)
outdata = numpy.empty(len(data), dtype=data.dtype)
pos = 0
entry_num = 0
len_outdata = 0
if self._length_bytes == "1-5":
while True:
if pos >= len(data):
break
size = data[pos]
pos += 1
if size == 255:
(size,) = _string_4byte_size.unpack(data[pos : pos + 4])
pos += 4
counts[entry_num] = size
entry_num += 1
outdata[len_outdata : len_outdata + size] = data[pos : pos + size]
len_outdata += size
pos += size
elif self._length_bytes == "4":
while True:
if pos >= len(data):
break
(size,) = _string_4byte_size.unpack(data[pos : pos + 4])
pos += 4
counts[entry_num] = size
entry_num += 1
outdata[len_outdata : len_outdata + size] = data[pos : pos + size]
len_outdata += size
pos += size
else:
raise AssertionError(repr(self._length_bytes))
counts = counts[:entry_num]
data = outdata[:len_outdata]
else:
byte_starts = byte_offsets[:-1] + self._header_bytes
byte_stops = byte_offsets[1:]
if self._length_bytes == "1-5":
length_header_size = numpy.ones(len(byte_starts), dtype=numpy.int32)
length_header_size[data[byte_starts] == 255] += 4
elif self._length_bytes == "4":
length_header_size = numpy.full(len(byte_starts), 4, dtype=numpy.int32)
else:
raise AssertionError(repr(self._length_bytes))
byte_starts += length_header_size
mask = numpy.zeros(len(data), dtype=numpy.int8)
mask[byte_starts[byte_starts < len(data)]] = 1
numpy.add.at(mask, byte_stops[byte_stops < len(data)], -1)
numpy.cumsum(mask, out=mask)
data = data[mask.view(numpy.bool_)]
counts = byte_stops - byte_starts
offsets = numpy.empty(len(counts) + 1, dtype=numpy.int32)
offsets[0] = 0
numpy.cumsum(counts, out=offsets[1:])
if hasattr(data, "tobytes"):
data = data.tobytes()
else:
data = data.tostring()
output = StringArray(offsets, data)
self.hook_after_basket_array(
data=data,
byte_offsets=byte_offsets,
basket=basket,
branch=branch,
context=context,
output=output,
cursor_offset=cursor_offset,
library=library,
)
return output
def final_array(
self, basket_arrays, entry_start, entry_stop, entry_offsets, library, branch
):
self.hook_before_final_array(
basket_arrays=basket_arrays,
entry_start=entry_start,
entry_stop=entry_stop,
entry_offsets=entry_offsets,
library=library,
branch=branch,
)
basket_offsets = {}
basket_content = {}
for k, v in basket_arrays.items():
basket_offsets[k] = v.offsets
basket_content[k] = v.content
if entry_start >= entry_stop:
output = StringArray(library.zeros((1,), numpy.int64), b"")
else:
length = 0
start = entry_offsets[0]
for _, stop in enumerate(entry_offsets[1:]):
if start <= entry_start and entry_stop <= stop:
length += entry_stop - entry_start
elif start <= entry_start < stop:
length += stop - entry_start
elif start <= entry_stop <= stop:
length += entry_stop - start
elif entry_start < stop and start <= entry_stop:
length += stop - start
start = stop
offsets = numpy.empty((length + 1,), numpy.int64)
before = 0
start = entry_offsets[0]
contents = []
for basket_num, stop in enumerate(entry_offsets[1:]):
if start <= entry_start and entry_stop <= stop:
local_start = entry_start - start
local_stop = entry_stop - start
off, cnt = basket_offsets[basket_num], basket_content[basket_num]
offsets[:] = (
before - off[local_start] + off[local_start : local_stop + 1]
)
before += off[local_stop] - off[local_start]
contents.append(cnt[off[local_start] : off[local_stop]])
elif start <= entry_start < stop:
local_start = entry_start - start
local_stop = stop - start
off, cnt = basket_offsets[basket_num], basket_content[basket_num]
offsets[: stop - entry_start + 1] = (
before - off[local_start] + off[local_start : local_stop + 1]
)
before += off[local_stop] - off[local_start]
contents.append(cnt[off[local_start] : off[local_stop]])
elif start <= entry_stop <= stop:
local_start = 0
local_stop = entry_stop - start
off, cnt = basket_offsets[basket_num], basket_content[basket_num]
offsets[start - entry_start :] = (
before - off[local_start] + off[local_start : local_stop + 1]
)
before += off[local_stop] - off[local_start]
contents.append(cnt[off[local_start] : off[local_stop]])
elif entry_start < stop and start <= entry_stop:
off, cnt = basket_offsets[basket_num], basket_content[basket_num]
offsets[start - entry_start : stop - entry_start + 1] = (
before - off[0] + off
)
before += off[-1] - off[0]
contents.append(cnt[off[0] : off[-1]])
start = stop
output = StringArray(offsets, b"".join(contents))
self.hook_before_library_finalize(
basket_arrays=basket_arrays,
entry_start=entry_start,
entry_stop=entry_stop,
entry_offsets=entry_offsets,
library=library,
branch=branch,
output=output,
)
output = library.finalize(output, branch, self, entry_start, entry_stop)
self.hook_after_final_array(
basket_arrays=basket_arrays,
entry_start=entry_start,
entry_stop=entry_stop,
entry_offsets=entry_offsets,
library=library,
branch=branch,
output=output,
)
return output
class StringArray(object):
"""
Args:
offsets (array of ``numpy.int32``): Starting and stopping indexes for
each string. The length of the ``offsets`` is one greater than the
number of strings.
content (array): Contiguous array of character data for all strings of
the array.
Temporary array filled by
:ref:`uproot.interpretation.strings.AsStrings.basket_array`, which will be
turned into a NumPy, Awkward, or other array, depending on the specified
:doc:`uproot.interpretation.library.Library`.
"""
def __init__(self, offsets, content):
self._offsets = offsets
self._content = content
def __repr__(self):
if len(self._content) > 100:
left, right = self._content[:45], self._content[-45:]
content = repr(left) + " ... " + repr(right)
else:
content = repr(self._content)
return "StringArray({0}, {1})".format(self._offsets, content)
@property
def offsets(self):
"""
Starting and stopping indexes for each string. The length of the
``offsets`` is one greater than the number of strings.
"""
return self._offsets
@property
def content(self):
"""
Contiguous array of character data for all strings of the array.
"""
return self._content
def __getitem__(self, where):
data = self._content[self._offsets[where] : self._offsets[where + 1]]
return uproot._util.ensure_str(data)
def __len__(self):
return len(self._offsets) - 1
def __iter__(self):
start = self._offsets[0]
content = self._content
for stop in self._offsets[1:]:
yield uproot._util.ensure_str(content[start:stop])
start = stop
|
<gh_stars>0
import time
import math
import config
import constants as c
from nltk import Tree
import re
def log_print(*args, **kwargs):
"""
A simple wrapper of the built-in print function.
It prints only if configuration.VERBOSE is true
"""
if "verbosity" in kwargs:
verbosity = kwargs["verbosity"]
del kwargs["verbosity"]
else:
verbosity = 1
if (config.VERBOSE and verbosity <= config.VERBOSITY):
print(*args, **kwargs)
def tok_format(tok):
"""
Make a summary string from a spaCy token, containing:
tok.orth_, which is the word[s];
tok.ent_id_, which is the entity id,
tok.dep_, which is the dependency tag
"""
return "_".join([tok.orth_, tok.tag_, tok.pos_, tok.dep_])
def to_nltk_tree(node):
"""
Returns a nltk.Tree object from a spaCy dependency graph.
It should be calld with node set as the root node of the dependency graph.
"""
if node.n_lefts + node.n_rights > 0:
return Tree(tok_format(node), [to_nltk_tree(child) for child in node.children])
else:
return Tree(tok_format(node), [])
def clean_sentence(sentence):
"""
Clean a sentence from some useless stuff (brackets, quotation marks etc.)
:param sentence: a string, representing a sentence.
:return: the same pair of (sentence, disambiguations) without the tokens relative to bad substrings.
"""
# regex that solves out problem
# r = re.compile(" ?(\(|\[) .+? (\)|\])| ?``| ?''|,|–,"# )
r = re.compile("[,\]\[\(\)\–\—]")
new_sentence = r.sub("", sentence)
new_sentence = re.sub("\\/", " ", new_sentence)
new_sentence = re.sub(" +", " ", new_sentence)
return new_sentence.lower()
def transform_word_custom(word):
"""
This function has no claim to be "complete",
but I noticed a slight improvement for the self-learned
vocabulary, since it eliminates some wasteful redundancies.
(Examples:
- "09:00", "01/01/2000" and other time format to "##:##", "##/##/####"
- http://.* in http://website
- Every character repeated more than three consecutive times is normalized to three times
(Trying to catch mispelled words or not regular punctiation and collapse them).
:param word: the word to modify.
:return: custom_word, the modified word.
"""
custom_word = word
# if re.search(r"^(http://|www\.).+", custom_word):
# custom_word = re.sub(r"^(http://|www\.).+", r"\1website", custom_word)
# elif re.search(r"^.+@.+\.\w+", word):
# custom_word = re.sub(r"^.+@.+\.\w+", r"<EMAIL>", custom_word)
if re.search(r"[0-9]+", custom_word):
custom_word = re.sub("[0-9]+", c.NUM_TAG, custom_word)
# custom_word = re.sub("#num#{6,}", r"#num#"*5, custom_word)
# elif re.search(r"(.)\1{4,}", word):
# custom_word = re.sub(r"(.)\1{4,}", r"\1\1\1", custom_word)
return custom_word
def merge_concept_tags(words, tags):
"""
given a list of words and a concept tags to them,
merge the contiguous sequences of the same tags.
:return: a dictionary:
{concept_tag : list of tokenized concept mentions}
e.g.
Is the University of Rome in Rome?
{
"l": [ ["University", "of", "Rome"] ],
"r": [ ["Rome"] ]
}
"""
left_indexes = [i for i, t in enumerate(tags) if t == c.LEFT_ENT_TAG]
right_indexes = [i for i, t in enumerate(tags) if t == c.RIGHT_ENT_TAG]
# get ranges made by continuous number sequences
left_ranges = get_ranges_from_indexes(left_indexes)
right_ranges = get_ranges_from_indexes(right_indexes)
# print(left_ranges)
# print(right_ranges)
type2conceptList = {c.LEFT_ENT_TAG:[], c.RIGHT_ENT_TAG:[]}
# From ranges, retrieve tokens
for r in left_ranges:
c1 = words[r[0]:r[1]]
type2conceptList[c.LEFT_ENT_TAG].append(c1)
for r in right_ranges:
c2 = words[r[0]:r[1]]
type2conceptList[c.RIGHT_ENT_TAG].append(c2)
return type2conceptList
def get_ranges_from_indexes(indexes):
if len(indexes) == 0: return []
ranges = []
sorted_indexes = sorted(indexes)
cur_min = min(sorted_indexes)
cur_max = cur_min
for idx in indexes:
if idx == cur_max:
cur_max = idx + 1
else:
ranges.append((cur_min, cur_max))
cur_min = idx
cur_max = idx + 1
if cur_max - cur_min >= 1:
ranges.append((cur_min, cur_max))
return ranges
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def extract_concepts(type2concepts, from_question=True):
"""
:param type2concepts: {concept_tag : list of tokenized concept mentions}
e.g.
Is the University of Rome in Rome?
{
"l": [ ["University", "of", "Rome"] ],
"r": [ ["Rome"] ]
}
where "l" and "r" stand for "left" and "right" concept
:param from_question: if the concepts come from a question or an answer
:return: (c1, c2), the concept strings for the left and right concept
"""
c1_list = type2concepts[c.LEFT_ENT_TAG]
c2_list = type2concepts[c.RIGHT_ENT_TAG]
# print(c1_list, c2_list)
if len(c1_list)==1:
c1 = c1_list[0]
elif len(c1_list)==0:
c1 = ""
else:
# if there is more than one candidate concept, sort by concept length
c1_list = sorted(enumerate(c1_list), key=lambda x: (len(x[1]),x[0]))
# print("sorted c_list:", c1_list)
# pick the longest
c1 = c1_list[0][1]
if len(c2_list)==1:
c2 = c2_list[0]
elif len(c2_list)==0:
c2 = ""
else:
# if there is more than one candidate concept, sort by concept length
c2_list = sorted(enumerate(c2_list), key=lambda x: (len(x[1]), x[0]))
# print("sorted c_list:", c2_list)
# pick the longest
c2 = c2_list[0][1]
if from_question:
if not c1 and c2:
c1=c2
c2=""
# print("Switch concepts: it is a question")
# print(c1,c2)
else:
if not c2 and c1:
c1 = c2
c2 = ""
# print(c1, c2)
# print("Switch concepts: it is an answer")
return " ".join(c1), " ".join(c2)
def rectify_answer(generated_answer):
"""remove duplicate consecutive words
e.g. My My Name Name -> My Name"""
words = generated_answer.split()
new_words = [words[0]]
for w in words[1:]:
if new_words[-1]!=w:
new_words.append(w)
return " ".join(new_words) |
<reponame>sterin/pywrapper<filename>wrap_apis.py
import subprocess
import BeautifulSoup
import urllib2
import re
bases = [
"abstract",
"allocation",
"arg",
"bool",
"buffer",
"bytearray",
"capsule",
"cell",
"class",
"cobject",
"codec",
"code",
"complex",
"concrete",
"conversion",
"datetime",
"descriptor",
"dict",
"exceptions",
"file",
"float",
"function",
"gcsupport",
"gen",
"import",
"index",
# "init",
"intro",
"int",
"iterator",
"iter",
"list",
"long",
"mapping",
"marshal",
"memory",
"method",
"module",
"none",
"number",
"objbuffer",
"object",
"objimpl",
"refcounting",
"reflection",
"sequence",
"set",
"slice",
"string",
"structures",
"sys",
"tuple",
"typeobj",
"type",
"unicode",
"utilities",
"veryhigh",
"weakref",
]
main_file_template="""\
#ifndef PYTHONWRAPPER_{FILE}__H
#define PYTHONWRAPPER_{FILE}__H
#include <datetime.h>
#include <marshal.h>
#include <frameobject.h>
#include <pythonrun.h>
#include <unicodeobject.h>
{CONTENTS}
#endif // PYTHONWRAPPER_API_{FILE}__H
"""
file_template="""
#ifndef PYTHONWRAPPER_{FILE}__H
#define PYTHONWRAPPER_{FILE}__H
namespace py
{{
{CONTENTS}
}}
#endif // PYTHONWRAPPER_API_{FILE}__H
"""
safe_noref_template = """\
static inline {RETURNTYPE} {FUNC}({PARMS})
{{
return safe_noref( {ORIG}({ARGS}) );
}}\
"""
safe_ref_template = """\
static inline {REF}<{REFTYPE}> {FUNC}({PARMS})
{{
return safe_{REF}( {ORIG}({ARGS}) );
}}\
"""
void_template = """\
static inline void {FUNC}({PARMS})
{{
{ORIG}({ARGS});
exception::check();
}}\
"""
proto_exceptions = {
'Py_VISIT': None,
'PyCodec_KnownEncoding': None,
'PyRun_SimpleFileFlags': None,
'PyParser_SimpleParseStringFlagsFilename':None,
'PyOS_stricmp': 'int PyOS_stricmp(char *s1, char *s2)',
'PyOS_strnicmp': 'int PyOS_strnicmp(char *s1, char *s2, Py_ssize_t n)',
'PyCode_GetNumFree': 'Py_ssize_t PyCode_GetNumFree(PyCodeObject* o)',
'PyCode_NewEmpty': 'PyCodeObject *PyCode_NewEmpty(const char *filename, const char *funcname, int firstlineno)',
'PyFile_SetBufSize': 'void PyFile_SetBufSize(PyObject *f, int bufsize)',
'PyGILState_GetThisThreadState' : 'PyThreadState* PyGILState_GetThisThreadState(void)',
'PyUnicode_EncodeRawUnicodeEscape': 'PyObject* PyUnicode_EncodeRawUnicodeEscape(const Py_UNICODE *data, Py_ssize_t length)',
'PyUnicode_RichCompare': 'PyObject* PyUnicode_RichCompare(PyObject *left, PyObject *right, int op)',
'PyType_IS_GC': 'int PyType_IS_GC(PyTypeObject *o)',
'PyType_HasFeature': 'int PyType_HasFeature(PyTypeObject *o, int feature)',
'PyFile_SetEncoding':'int PyFile_SetEncoding(PyObject *p, const char *enc)',
'PyFile_SetEncodingAndErrors':'int PyFile_SetEncodingAndErrors(PyObject *p, const char *enc, char *errors)',
'PySeqIter_Check':'int PySeqIter_Check(PyObject* op)',
'PyCallIter_Check':'int PyCallIter_Check(PyObject* op)',
'PyModule_AddIntMacro':None,
'PyModule_AddStringMacro':None,
'PyOS_CheckStack':None,
'PyUnicode_DecodeMBCS':None,
'PyUnicode_DecodeMBCSStateful':None,
'PyUnicode_EncodeMBCS':None,
'PyUnicode_AsMBCSString':None,
'PyGen_New':'PyObject* PyGen_New(stolen_ref<PyFrameObject> frame)',
'PyList_SetItem':'int PyList_SetItem(PyObject *list, Py_ssize_t index, stolen_ref<PyObject> item)',
'PyList_SET_ITEM':'void PyList_SET_ITEM(PyObject *list, Py_ssize_t index, stolen_ref<PyObject> item)',
'PyModule_AddObject':'int PyModule_AddObject(PyObject *module, const char *name, stolen_ref<PyObject> value)',
'PyTuple_SetItem':'int PyTuple_SetItem(PyObject *p, Py_ssize_t pos, stolen_ref<PyObject> o)',
'PyTuple_SET_ITEM':'void PyTuple_SET_ITEM(PyObject *p, Py_ssize_t pos, stolen_ref<PyObject> o)',
'PyString_Concat':'void PyString_Concat(replace_ref<PyObject> string, PyObject *newpart)',
'PyString_ConcatAndDel':'void PyString_ConcatAndDel(replace_ref<PyObject> string, stolen_ref<PyObject> newpart)',
}
def download(base):
f = open('/usr/share/doc/python/html/c-api/%s.html'%base, 'r')
soup = BeautifulSoup.BeautifulSoup(f.read())
return soup.findAll( 'dl', attrs={'class':'function'} )
def safe_split(s):
paren = 0
res = []
cur = []
for c in s:
if c == ',' and paren==0:
res.append(''.join(cur))
cur = []
continue
if c=='(':
paren += 1
elif c==')':
paren -= 1
cur.append(c)
if cur:
res.append(''.join(cur).strip())
return res
def split_parm(parm):
parm = parm.strip()
if parm=='ob':
return "PyObject* ob", 'ob'
if parm=='...':
return '...', None
if '(' in parm:
m = re.match(r'[^(]*\(\*([a-zA-Z0-9_]+)', parm)
if m:
return parm, m.group(1)
m = re.match(r'^(.*[^a-zA-Z0-9_])([a-zA-Z0-9_]+)(\[\])?$', parm)
if m:
return parm, m.group(2)
return parm, None
def split_parms(parms):
res = []
for i, parm in enumerate( safe_split(parms) ):
p, a = split_parm(parm)
if p != '...' and p!='void' and not a:
a = 'arg%d'%i
res.append(('%s %s'%(p,a), a))
elif a == 'class':
res.append( (p.replace('class', 'class_'), 'class_') )
else:
res.append((p,a))
return res
def split_proto1(proto):
m = re.match( r'^(.*[^a-zA-Z0-9_])([a-zA-Z0-9_]+)\((.*)\)$', proto.strip())
if m:
returntype = m.group(1).replace(' *','*').strip()
funcname = m.group(2)
parms = split_parms(m.group(3))
return returntype, funcname, parms
def split_proto(proto):
returntype, funcname, parms = split_proto1(proto)
if funcname in proto_exceptions and proto_exceptions[funcname]:
return split_proto1( proto_exceptions[funcname] )
return returntype, funcname, parms
def parse_file(base):
soup = download(base)
for l in soup:
text = []
x = l.contents[1]
for z in x.findAll(text=True):
text.append(z)
rc = l.findAll(attrs={'class':'refcount'})
refcount = rc[0].text if rc else None
prototype = "".join( t.replace(u' ', ' ').replace(u'\u00b6','') for t in text ).strip()
steal = False
if 'steal' in unicode(l.text).lower():
steal = True
if 'stolen' in unicode(l.text).lower():
steal = True
platform = False
if 'Availability' in unicode(l.text):
platform = True
yield refcount, split_proto(prototype), steal, platform
refs = set( [
"PyCodeObject*",
"PyFrameObject*",
"PyObject*",
"PyVarObject*",
] )
xxx=set()
used_bases = []
undocumented_return_values = []
varargs_functions = []
va_list_functions = []
steal_in_description = []
weird_names = []
primitive_macros = []
platform_dependent = []
duplicated_functions = []
unique_funcs = set()
for base in bases:
funcs = []
for rc, proto, steal, platform in parse_file(base):
if proto[1] in unique_funcs:
duplicated_functions.append( (base,proto[1]) )
continue
unique_funcs.add( proto[1] )
if steal:
steal_in_description.append( (base, proto[1]) )
if platform:
platform_dependent.append( (base,proto[1]) )
if proto[1] in proto_exceptions and not proto_exceptions[proto[1]]:
continue
if proto[2]:
if '...' in [ x[0] for x in proto[2] ]:
varargs_functions.append( (base,proto[1]) )
continue
if [ x[0] for x in proto[2] if 'va_list' in x[0] ]:
va_list_functions.append((base,proto[1]))
if [ x[0] for x in proto[2] if x[0].startswith('TYPE ')]:
primitive_macros.append( (base, proto[1]) )
continue
orig_name = proto[1]
if proto[1].startswith("Py_"):
func_name = proto[1][3:]
elif proto[1].startswith("Py"):
func_name = proto[1][2:]
elif proto[1].startswith("_Py"):
func_name = "_"+proto[1][3:]
else:
weird_names.append( (base, proto[1]) )
continue
if proto[0] in refs:
borrow = True if rc and 'borrow' in rc.lower() else False
if not rc:
undocumented_return_values.append( (base, proto[1]) )
try:
args = {
'REFTYPE': proto[0][:-1],
'REF': 'borrowed_ref' if borrow else 'ref',
'FUNC': func_name,
'ORIG': orig_name,
'ARGS': ', '.join( p for _,p in proto[2] if p),
'PARMS': ', '.join( p for p,_ in proto[2] if p)
}
funcs.append( safe_ref_template.format(**args) )
except Exception as e:
print proto
print e
raise
elif proto[0] == 'void':
args = {
'FUNC': func_name,
'ORIG': orig_name,
'ARGS': ', '.join( p for _,p in proto[2] if p),
'PARMS': ', '.join( p for p,_ in proto[2] if p)
}
funcs.append( void_template.format(**args) )
else:
args = {
'RETURNTYPE': proto[0],
'FUNC': func_name,
'ORIG': orig_name,
'ARGS': ', '.join( p for _,p in proto[2] if p),
'PARMS': ', '.join( p for p,_ in proto[2] if p)
}
funcs.append( safe_noref_template.format(**args) )
if platform:
funcs[-1] = "#ifdef _WIN32\n\n%s\n\n#endif"%funcs[-1]
if funcs:
with open( 'pywrapper_api_%s.h'%base, 'w' ) as f:
f.write( file_template.format( FILE='API_%s'%base.upper(), CONTENTS="\n\n".join( funcs ) ) )
used_bases.append(base)
with open('pywrapper_api.h', 'w') as f:
includes = [ '#include "pywrapper_api_%s.h"'%b for b in used_bases ]
f.write( main_file_template.format( FILE='API_%s'%base.upper(), CONTENTS="\n".join( includes ) ) )
print 'UNDOCUMENTED RETURN VALUE:'
for base, name in sorted(undocumented_return_values):
print 'Doc/c-api/%s.rst: '%base, name
print 'ACCEPTS VARARGS:'
for base, name in sorted(varargs_functions):
print 'Doc/c-api/%s.rst: '%base, name
print 'ACCEPTS VA_LIST:'
for base, name in sorted(va_list_functions):
print 'Doc/c-api/%s.rst: '%base, name
print 'STEAL in description:'
for base, name in sorted(steal_in_description):
print 'Doc/c-api/%s.rst: '%base, name
print 'WEIRD names:'
for base, name in sorted(weird_names):
print 'Doc/c-api/%s.rst: '%base, name
print 'PRIMITIVE MACROS:'
for base, name in sorted(primitive_macros):
print 'Doc/c-api/%s.rst: '%base, name
print 'PLATFORM DEPENDENT:'
for base, name in sorted(platform_dependent):
print 'Doc/c-api/%s.rst: '%base, name
print 'DUPLICATED FUNCTIONS:'
for base, name in sorted(duplicated_functions):
print 'Doc/c-api/%s.rst: '%base, name
|
import copy
import json
import os
from jsonschema.validators import Draft4Validator
from semDiff.compareEntities import EntityCoverage
from utils.schema2context import process_schema_name
class EntityMerge:
""" A class that merge two schemas based on their semantic annotations
:param schema1: dictionary of the first schema
:param context1: dictionary of the first context as {"@context":{}}
:param schema2: dictionary of the second schema
:param context2: dictionary of the second context as {"@context":{}}
"""
def __init__(self, schema1, context1, schema2, context2):
# Initiate output as a copy of the first schema and its context
self.output_schema = copy.deepcopy(schema1)
self.output_context = copy.deepcopy(context1)
coverage = EntityCoverage(schema1, context1, schema2, context2)
# for each unmatched field of the second schema that have a semantic value in the second
# context
for field_semantic_value in coverage.unmatched_with_sem.keys():
# field are organized in an array
for field_name in coverage.unmatched_with_sem[field_semantic_value]:
self.output_context["@context"][field_name] = field_semantic_value
self.output_schema['properties'][field_name] = schema2['properties'][field_name]
# for each unmatched field that doesn't have a semantic value
for field_name in coverage.unmatched_without_sem:
# if that field isn't already in the first schema
if field_name not in schema1["properties"]:
self.output_schema["properties"][field_name] = schema2["properties"][field_name]
class MergeEntityFromDiff:
""" A class that merges network2 into network1 based on overlaps from FullDiff
:param overlaps: a variable containing
"""
def __init__(self, overlaps):
self.overlaps = overlaps["overlaps"]
self.output = {
"schemas": copy.deepcopy(overlaps["network1"]['schemas']),
"contexts": copy.deepcopy(overlaps["network1"]['contexts'])
}
self.content = overlaps
self.name_mapping = {} # {"oldName":"newName"}
self.output_name = \
self.content['network1']['name'].lower() + \
"_" + self.content['network2']['name'].lower() + "_merge"
self.output_dir = os.path.join(os.path.dirname(__file__),
"../tests/fullDiffOutput/merges/" + self.output_name + "/")
self.errors = {}
self.main_schema_name = overlaps['network1']['name'].lower().replace(' ', '_').capitalize()
if "fields_to_merge" not in overlaps:
print("Nothing to merge for current setup")
exit()
# Process mergings
for schemaName in overlaps['fields_to_merge']:
merging_schema_name = schemaName.replace('_schema.json', '')
merge_with_schema_name = overlaps['fields_to_merge'][schemaName][
'merge_with'].replace('_schema.json', '')
if merge_with_schema_name != merging_schema_name:
merged_schema_name = merge_with_schema_name + "_" \
+ merging_schema_name \
+ "_merged_schema.json"
merged_type = merge_with_schema_name.capitalize(
) + merging_schema_name.capitalize()
else:
merged_schema_name = merge_with_schema_name + "_merged_schema.json"
merged_type = merge_with_schema_name.capitalize() + 'Merged'
self.name_mapping[overlaps['fields_to_merge'][schemaName][
'merge_with']] = merged_schema_name
self.name_mapping[schemaName] = merged_schema_name
merged_title = overlaps["network1"]['schemas'][overlaps[
'fields_to_merge'][schemaName]['merge_with']]['title'] + " - " + \
overlaps["network2"]['schemas'][schemaName]['title'] + " merging"
merged_description = "Merge between the " + overlaps["network1"]['schemas'][overlaps[
'fields_to_merge'][schemaName]['merge_with']]['title'] + " and the " + \
overlaps["network2"]['schemas'][schemaName]['title']
merged_schema = copy.deepcopy(
overlaps["network1"]['schemas'][
overlaps['fields_to_merge'][schemaName]['merge_with']])
merged_context = copy.deepcopy(
overlaps["network1"]['contexts'][overlaps[
'fields_to_merge'][schemaName]['merge_with']])
del self.output['schemas'][overlaps['fields_to_merge'][schemaName]['merge_with']]
del self.output['contexts'][overlaps['fields_to_merge'][schemaName]['merge_with']]
# process the fields to merge
for field in overlaps['fields_to_merge'][schemaName]['fields']:
merged_schema['properties'][field] = overlaps['network2'][
'schemas'][schemaName]['properties'][field]
merged_schema['title'] = merged_title
merged_schema['description'] = merged_description
merged_context[field] = overlaps['network2']['contexts'][schemaName][field]
if not overlaps['network2']['contexts'][schemaName][field].startswith(
('http', 'https')
):
prefix = overlaps['network2']['contexts'][schemaName][field].split(':')[0]
if prefix not in merged_context:
merged_context[prefix] = overlaps['network2'][
'contexts'][schemaName][prefix]
self.find_references(
overlaps['network2']['schemas'][schemaName]['properties'][field])
if 'enum' in merged_schema['properties']['@type']:
type_iterator = 0
for schema_type in merged_schema['properties']['@type']['enum']:
if schema_type == merge_with_schema_name.capitalize():
del merged_schema['properties']['@type']['enum'][type_iterator]
merged_schema['properties']['@type']['enum'].append(merged_type)
type_iterator += 1
self.output['schemas'][merged_schema_name] = merged_schema
self.output['contexts'][merged_schema_name] = merged_context
# processing main schema name
for overlap in self.overlaps:
if self.main_schema_name in overlap[0] and float(overlap[1]['coverage'][0]) >= 100:
old_schema1_name = self.main_schema_name.lower()
old_schema2_name = overlap[0][1].lower()
new_schema_name = old_schema1_name \
+ "_" + old_schema2_name + '_merged_schema.json'
new_description = "A merge between " \
+ old_schema1_name + " and " \
+ old_schema2_name + " schemas"
new_title = "Merge between " + old_schema1_name + " and " + old_schema2_name
new_schema = self.content['network1']['schemas'][old_schema1_name
+ "_schema.json"]
new_schema['description'] = new_description
new_schema['title'] = new_title
if 'enum' in new_schema['properties']['@type']:
type_iterator = 0
for schema_type in new_schema['properties']['@type']['enum']:
if schema_type == self.main_schema_name:
del new_schema['properties']['@type']['enum'][type_iterator]
new_schema['properties']['@type']['enum'].append(
process_schema_name(new_schema_name))
self.output['schemas'][new_schema_name] = new_schema
del self.output['schemas'][old_schema1_name + "_schema.json"]
# Context
self.output['contexts'][new_schema_name] = self.content['network1'][
'contexts'][old_schema1_name + '_schema.json']
self.output['contexts'][new_schema_name][
process_schema_name(new_schema_name)] = self.output['contexts'][
old_schema1_name + '_schema.json'][process_schema_name(old_schema1_name)]
del self.output['contexts'][new_schema_name][process_schema_name(old_schema1_name)]
del self.output['contexts'][old_schema1_name + '_schema.json']
else:
schema_1_name = overlap[0][0].lower() + '_schema.json'
schema_2_name = overlap[0][1].lower() + '_schema.json'
if schema_2_name not in overlaps['fields_to_merge']:
self.name_mapping[schema_2_name] = schema_1_name
self.modify_references()
def find_references(self, field):
""" Find $ref at root, in items or in allOf, anyOf, oneOf, adds the schema/context
to the merge and change reference names
:param field: a schema field
:type field: dict
:return:
"""
look_for = ["anyOf", "oneOf", "allOf"]
# $ref at root
if '$ref' in field:
sub_schema_name = field['$ref'].replace("#", '')
self.add_schema(sub_schema_name)
# $ref in anyOf, oneOf or allOf
for item in look_for:
if item in field:
for sub_item in field[item]:
if '$ref' in sub_item:
sub_schema_name = sub_item['$ref'].replace("#", '')
self.add_schema(sub_schema_name)
# $ref in items
if 'items' in field:
if '$ref' in field['items']:
sub_schema_name = field['items']['$ref'].replace('#', '')
self.add_schema(sub_schema_name)
for item in look_for:
if item in field['items']:
for sub_item in field['items'][item]:
if '$ref' in sub_item:
sub_schema_name = sub_item['$ref']
self.add_schema(sub_schema_name)
def add_schema(self, schema_name):
""" Adds the schema to the merge
:param schema_name:
:return:
"""
if schema_name not in self.name_mapping:
if schema_name is not None and schema_name not in self.output['schemas']:
schema_name = schema_name.replace("#", '')
if schema_name[0] is not "/":
self.output['schemas'][schema_name] = \
self.content['network2']['schemas'][schema_name]
self.output['contexts'][schema_name] = \
self.content['network2']['contexts'][schema_name]
# self.find_references(self.content['network2']['schemas'][schema_name])
for field in self.content['network2']['schemas'][schema_name]['properties']:
self.find_references(self.content[
'network2']['schemas'][schema_name][
'properties'][field])
def modify_references(self):
""" Modify the $ref names
:return:
"""
look_for = ["anyOf", "oneOf", "allOf"]
delete_schemas = []
for schema in self.output['schemas']:
if schema in self.name_mapping:
delete_schemas.append(schema)
else:
if 'properties' in self.output['schemas'][schema]:
for item in self.output['schemas'][schema]['properties']:
field = self.output['schemas'][schema]['properties'][item]
if '$ref' in field:
field_ref = field['$ref'].replace('#', '')
if field_ref in self.name_mapping:
self.output['schemas'][schema]['properties'][item]['$ref'] = \
self.name_mapping[field_ref] + '#'
for reference in look_for:
if reference in field:
sub_item_iterator = 0
for sub_item in field[reference]:
if '$ref' in sub_item:
field_ref = sub_item['$ref']
if field_ref in self.name_mapping:
self.output['schemas'][schema]['properties'][
reference][sub_item_iterator]['$ref'] = \
self.name_mapping[field_ref] + "#"
sub_item_iterator += 1
if 'items' in field:
if '$ref' in field['items']:
field_ref = field['items']['$ref'].replace('#', '')
if field_ref in self.name_mapping:
self.output['schemas'][
schema]['properties'][item]['items']['$ref'] = \
self.name_mapping[field_ref] + '#'
for reference in look_for:
if reference in field['items']:
sub_item_iterator = 0
for sub_item in field['items'][reference]:
if '$ref' in sub_item:
field_ref = sub_item['$ref'].replace('#', '')
if field_ref in self.name_mapping:
self.output[
'schemas'][schema][
'properties'][item][
'items'][reference][sub_item_iterator][
'$ref'] = self.name_mapping[field_ref] + "#"
sub_item_iterator += 1
for schema in delete_schemas:
del self.output['schemas'][schema]
change_names = {v: k for k, v in self.name_mapping.items()}
for context in self.output['contexts']:
new_field_base_name = process_schema_name(context)
if context in change_names:
old_schema_name = change_names[context]
old_field_base_name = process_schema_name(old_schema_name)
# set the new context field
self.output["contexts"][context][new_field_base_name] = \
copy.copy(self.content['network2'][
"contexts"][old_schema_name][old_field_base_name])
def save(self, base_url):
""" Saves the merge to disk and replace "id" attribute with the given base url
+ schema name
:param base_url:
:return:
"""
directory_system = [
os.path.join(self.output_dir, 'schema'),
os.path.join(self.output_dir, 'context')
]
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
for directory in directory_system:
if not os.path.exists(directory):
os.makedirs(directory)
for schemaName in self.output["schemas"]:
schema = self.output["schemas"][schemaName]
schema["id"] = base_url + "schema/" + schemaName
schema_file_name = os.path.join(os.path.join(self.output_dir, 'schema/'), schemaName)
context_name = schemaName.replace("_schema.json", '_context.jsonld')
context_file_name = \
os.path.join(os.path.join(self.output_dir, 'context/'), context_name)
with open(schema_file_name, "w") as schemaFile:
schemaFile.write(json.dumps(schema, indent=4))
schemaFile.close()
if schemaName in self.output['contexts'].keys():
with open(context_file_name, "w") as contextFile:
contextFile.write(json.dumps({
"@context": self.output['contexts'][schemaName]
}, indent=4))
contextFile.close()
def validate_output(self):
""" Validates the output of the merge
:return:
"""
for schema in self.output['schemas']:
try:
Draft4Validator.check_schema(self.output['schemas'][schema])
except Exception as e:
if schema not in self.errors:
self.errors[schema] = []
self.errors[schema].append(str(e))
|
<filename>photoshop/api/application.py
"""The Adobe Adobe Photoshop CC application object.
Which is the root of the object model and provides access to all other
objects. This object provides application-wide information,
such as application defaults and available fonts. It provides many important
methods, such as those for opening files and loading documents.
app = Application()
app.documents.add(800, 600, 72, "docRef")
"""
# Import built-in modules
import os
from pathlib import Path
import time
from typing import List
# Import local modules
from photoshop.api._core import Photoshop
from photoshop.api._document import Document
from photoshop.api._documents import Documents
from photoshop.api._measurement_log import MeasurementLog
from photoshop.api._notifiers import Notifiers
from photoshop.api._preferences import Preferences
from photoshop.api._text_fonts import TextFonts
from photoshop.api.enumerations import DialogModes
from photoshop.api.solid_color import SolidColor
class Application(Photoshop):
def __init__(self, version=None):
super().__init__(ps_version=version)
@property
def activeLayer(self):
return self.app.ArtLayer
@property
def layerSets(self):
return self.app.LayerSets
@property
def activeDocument(self):
"""The frontmost documents.
Setting this property is equivalent to clicking an
open document in the Adobe Photoshop CC
application to bring it to the front of the screen.
"""
return Document(self.app.activeDocument)
@activeDocument.setter
def activeDocument(self, document):
self.app.activeDocument = document
@property
def backgroundColor(self):
"""The default background color and color style for documents.
Returns:
.solid_color.SolidColor: The SolidColor instance.
"""
return SolidColor(self.app.backgroundColor)
@backgroundColor.setter
def backgroundColor(self, color):
"""Sets the default background color and color style for documents.
Args:
color (.solid_color.SolidColor): The SolidColor instance.
"""
self.app.backgroundColor = color
@property
def build(self):
"""str: The information about the application."""
return self.app.build
@property
def colorSettings(self):
"""The name of the current color settings.
as selected with Edit > Color Settings.
"""
return self.app.colorSettings
@colorSettings.setter
def colorSettings(self, settings):
"""The name of the current color settings.
Args:
settings (str): The name of the current tool sel.
"""
self.doJavaScript(f'app.colorSettings="{settings}"')
@property
def currentTool(self):
"""str: The name of the current tool sel."""
return self.app.currentTool
@currentTool.setter
def currentTool(self, tool_name):
"""Sets the current tool for select.
Args:
tool_name (str): The name of the current tool sel.
"""
self.app.currentTool = tool_name
@property
def displayDialogs(self) -> DialogModes:
"""The dialog mode for the document, which indicates whether or not
Photoshop displays dialogs when the script runs."""
return DialogModes(self.app.displayDialogs)
@displayDialogs.setter
def displayDialogs(self, dialog_mode: DialogModes):
"""The dialog mode for the document, which indicates whether or not
Photoshop displays dialogs when the script runs.
"""
self.app.displayDialogs = dialog_mode
@property
def documents(self) -> Documents:
"""._documents.Documents: The Documents instance."""
return Documents(self.app.documents)
@property
def fonts(self) -> TextFonts:
return TextFonts(self.app.fonts)
@property
def foregroundColor(self):
"""Get default foreground color.
Used to paint, fill, and stroke selections.
Returns:
.solid_color.SolidColor: The SolidColor instance.
"""
return SolidColor(parent=self.app.foregroundColor)
@foregroundColor.setter
def foregroundColor(self, color: SolidColor):
"""Set the `foregroundColor`.
Args:
color (.solid_color.SolidColor): The SolidColor instance.
"""
self.app.foregroundColor = color
@property
def freeMemory(self) -> float:
"""The amount of unused memory available to ."""
return self.app.freeMemory
@property
def locale(self) -> str:
"""The language locale of the application."""
return self.app.locale
@property
def macintoshFileTypes(self) -> List[str]:
"""A list of the image file types Photoshop can open."""
return self.app.macintoshFileTypes
@property
def measurementLog(self):
"""The log of measurements taken."""
return MeasurementLog(self.app.measurementLog)
@property
def name(self) -> str:
return self.app.name
@property
def notifiers(self):
"""The notifiers currently configured (in the Scripts Events Manager
menu in the application)."""
return Notifiers(self.app.notifiers)
@property
def notifiersEnabled(self):
"""If true, notifiers are enabled."""
return self.app.notifiersEnabled
@notifiersEnabled.setter
def notifiersEnabled(self, value):
self.app.notifiersEnabled = value
@property
def parent(self):
"""The object’s container."""
return self.app.parent
@property
def path(self):
"""str: The full path to the location of the Photoshop application."""
return Path(self.app.path)
@property
def playbackDisplayDialogs(self):
return self.doJavaScript("app.playbackDisplayDialogs")
@property
def playbackParameters(self):
"""Stores and retrieves parameters used as part of a recorded action."""
return self.app.playbackParameters
@playbackParameters.setter
def playbackParameters(self, value):
self.app.playbackParameters = value
@property
def preferences(self):
return Preferences(self.app.preferences)
@property
def preferencesFolder(self):
return Path(self.app.preferencesFolder)
@property
def recentFiles(self):
return self.app.recentFiles
@property
def scriptingBuildDate(self):
return self.app.scriptingBuildDate
@property
def scriptingVersion(self):
return self.app.scriptingVersion
@property
def systemInformation(self):
return self.app.systemInformation
@property
def version(self):
return self.app.version
@property
def windowsFileTypes(self):
return self.app.windowsFileTypes
# Methods.
def batch(self, *args, **kwargs):
"""Runs the batch automation routine.
Similar to the **File** > **Automate** > **Batch** command.
"""
self.app.bath(*args, **kwargs)
def beep(self):
"""Causes a "beep" sound."""
return self.eval_javascript("app.beep()")
def bringToFront(self):
return self.eval_javascript("app.bringToFront()")
def changeProgressText(self, text):
"""Changes the text that appears in the progress window."""
self.eval_javascript(f"app.changeProgressText('{text}')")
def charIDToTypeID(self, char_id):
return self.app.charIDToTypeID(char_id)
@staticmethod
def compareWithNumbers(first, second):
return first > second
def doAction(self, action, action_from):
"""Plays the specified action from the Actions palette."""
self.app.doAction(action, action_from)
return True
def doForcedProgress(self, title, javascript):
script = "app.doForcedProgress('{}', '{}')".format(
title,
javascript,
)
self.eval_javascript(script)
# Ensure the script execute success.
time.sleep(1)
def doProgress(self, title, javascript):
"""Performs a task with a progress bar. Other progress APIs must be
called periodically to update the progress bar and allow cancelling.
Args:
title (str): String to show in the progress window.
javascript (str): JavaScriptString to execute.
"""
script = "app.doProgress('{}', '{}')".format(
title,
javascript,
)
self.eval_javascript(script)
# Ensure the script execute success.
time.sleep(1)
def doProgressSegmentTask(self, segmentLength, done, total, javascript):
script = "app.doProgressSegmentTask({}, {}, {}, '{}');".format(
segmentLength,
done,
total,
javascript,
)
self.eval_javascript(script)
# Ensure the script execute success.
time.sleep(1)
def doProgressSubTask(self, index, limit, javascript):
script = "app.doProgressSubTask({}, {}, '{}');".format(
index,
limit,
javascript,
)
self.eval_javascript(script)
# Ensure the script execute success.
time.sleep(1)
def doProgressTask(self, index, javascript):
"""Sections off a portion of the unused progress bar for execution of
a subtask. Returns false on cancel.
"""
script = f"app.doProgressTask({index}, '{javascript}');"
self.eval_javascript(script)
# Ensure the script execute success.
time.sleep(1)
def eraseCustomOptions(self, key):
"""Removes the specified user objects from the Photoshop registry."""
self.app.eraseCustomOptions(key)
def executeAction(self, event_id, descriptor, display_dialogs=2):
return self.app.executeAction(event_id, descriptor, display_dialogs)
def executeActionGet(self, reference):
return self.app.executeActionGet(reference)
def featureEnabled(self, name):
"""Determines whether the feature
specified by name is enabled.
The following features are supported
as values for name:
"photoshop/extended"
"photoshop/standard"
"photoshop/trial
"""
return self.app.featureEnabled(name)
def getCustomOptions(self, key):
"""Retrieves user objects in the Photoshop registry for the ID with
value key."""
return self.app.getCustomOptions(key)
def open(
self,
document_file_path,
document_type: str = None,
as_smart_object: bool = False,
) -> Document:
document = self.app.open(document_file_path, document_type, as_smart_object)
if not as_smart_object:
return Document(document)
return document
def load(self, document_file_path):
"""Loads a support document."""
self.app.load(document_file_path)
return self.activeDocument
def doJavaScript(self, javascript, Arguments=None, ExecutionMode=None):
return self.app.doJavaScript(javascript, Arguments, ExecutionMode)
def isQuicktimeAvailable(self) -> bool:
return self.app.isQuicktimeAvailable
def openDialog(self):
return self.app.openDialog()
def purge(self, target):
"""Purges one or more caches.
Args:
target:
.e.g:
0: Clears all caches.
1: Clears the clipboard.
2: Deletes all history states from the History palette.
3: Clears the undo cache.
Returns:
"""
self.app.purge(target)
def putCustomOptions(self, key, custom_object, persistent):
self.app.putCustomOptions(key, custom_object, persistent)
def refresh(self):
"""Pauses the script while the application refreshes.
Ues to slow down execution and show the results to the user as the
script runs.
Use carefully; your script runs much more slowly when using this
method.
"""
self.app.refresh()
def refreshFonts(self):
"""Force the font list to get refreshed."""
return self.eval_javascript("app.refreshFonts();")
def runMenuItem(self, menu_id):
"""Run a menu item given the menu ID."""
return self.eval_javascript(
f"app.runMenuItem({menu_id})",
)
def showColorPicker(self):
"""Returns false if dialog is cancelled, true otherwise."""
return self.eval_javascript("app.showColorPicker();")
def stringIDToTypeID(self, string_id):
return self.app.stringIDToTypeID(string_id)
def togglePalettes(self):
"""Toggle palette visibility."""
return self.doJavaScript("app.togglePalettes()")
def toolSupportsBrushes(self, tool):
return self.app.toolSupportsBrushes(tool)
def toolSupportsBrushPresets(self, tool):
return self.app.toolSupportsPresets(tool)
@staticmethod
def system(command):
os.system(command)
def typeIDToStringID(self, type_id):
return self.app.typeIDToStringID(type_id)
def typeIDToCharID(self, type_id):
return self.app.typeIDToCharID(type_id)
def updateProgress(self, done, total):
self.eval_javascript(f"app.updateProgress({done}, {total})")
|
#!/bin/python3
#Utilities for downloading and parsing Final Fantasy 14 Loadstone content
#Copyright <NAME> 2016 BSD 3 clause license
import requests
from bs4 import BeautifulSoup
import re
def loastone_login():
print('http://na.finalfantasyxiv.com/lodestone/account/login/')
#Get a page from the Loadstone
# returns a BeautifulSoup object
def get_loadstone_page(url,session_id):
#Time format used for cookies
#import time
#time.strftime('%a, %d-%b-%Y %H:%M:%S %Z')
#ldst_is_support_browser=1, ldst_touchstone=1, ldst_bypass_browser=1", expires=session_expiration
cookies = dict(ldst_sess=session_id,domain='finalfantasyxiv.com', path='/')
raw_page = requests.get(url, cookies=cookies)
if(raw_page.status_code != 200):
raise Exception("Unable to download web page!")
return BeautifulSoup(raw_page.text,'html.parser')
#Each item has a separate detail page that must be loaded to determine if it's HQ or not
def is_item_hq(raw_item,session_id):
tooltip_url = 'http://na.finalfantasyxiv.com/' + item.find('div', attrs={"class": 'item_txt'})['data-lazy_load_url']
tooltip_page = get_loadstone_page(tooltip_url,session_id)
return bool(tooltip_page.find("img", src = re.compile('http://img\.finalfantasyxiv\.com/lds/pc/global/images/common/ic/hq.png.*')))
#Debug function to write some data to 'test.html'
def write_data(data):
out_file=open('test.html','w')
#for i in data:
#out_file.write(str(i))
out_file.write(str(data))
out_file.close()
#Debug function to write a pretty parsed version of a Loadstone page
def write_loadstone_page(url,session_id):
soup_page = get_loadstone_page(url,session_id)
write_data(soup_page.prettify().encode('utf8'))
#Use this to convert the provided items into something useful
def list_items_table(items):
item_row_format='<tr><td><img src="{image}"></img></td><td>{name}</td><td>{quantity}</td><td>{location}</td><td>{sub_location}</td></tr>\n'
item_buffer = '<table>\n'
for i in items:
item_buffer += item_row_format.format(**i)
item_buffer += '</table>\n'
return item_buffer
#Get all items in the Free company chest (does not get number of crystals or gil)
#Does not handle HQ Items yet
def get_fc_items(fc_id,session_id):
url = 'http://na.finalfantasyxiv.com/lodestone/freecompany/'+str(fc_id)+'/chest/'
soup_page = get_loadstone_page(url,session_id)
#Get all items
raw_items=soup_page.find_all("tr", attrs={"data-default_sort": True})
#Parse the items
items=[]
for item in raw_items:
tmp = {}
tmp['name'] = item.find("h2", attrs={"class": 'db-tooltip__item__name'}).text.strip()
tmp['quantity'] = int(item['data-stack'])
tmp['image'] = item.find("img")['src']
tmp['location'] = 'Company Chest'
tmp['sub_location'] = item.find_parent('tbody')['id']
items.append(tmp)
return items
#Get all items in a retainers inventory (does not get number of crystals or gil)
#Does not handle HQ Items yet
def get_retainer_items(char_id,retainer_id,session_id):
url = 'http://na.finalfantasyxiv.com/lodestone/character/'+str(char_id)+'/retainer/'+retainer_id+'/baggage/'
soup_page = get_loadstone_page(url,session_id)
#Get retainers name
retainer_name = soup_page.find("div", attrs={"class": 'retainer--name'}).p.text.strip()
#Get all items
raw_items=soup_page.find_all("tr", attrs={"data-default_sort": True})
#Parse the items
items=[]
for item in raw_items:
#if(is_item_hq(item,session_id)):
#print("HQ")
tmp = {}
tmp['name'] = item.find("a", attrs={"class": 'highlight'}).text.strip()
tmp['quantity'] = int(item['data-stack'])
tmp['image'] = item.find("img")['src']
tmp['location'] = 'Retainer: ' + retainer_name
tmp['sub_location'] = 'Inventory'
items.append(tmp)
return items
#Get all items a retainer is selling (does not get number of crystals or gil)
#HQ Item handling is suspect
#Note: This may return already sold items:
# sale_inventory is supposed to filter those out, but I din't think it's working correctly
def get_retainer_selling(char_id,retainer_id,session_id):
url = 'http://na.finalfantasyxiv.com/lodestone/character/'+str(char_id)+'/retainer/'+retainer_id+'/market/'
soup_page = get_loadstone_page(url,session_id)
#Get retainers name
retainer_name = soup_page.find("div", attrs={"class": 'retainer--name'}).p.text.strip()
#Get all items
sale_inventory=soup_page.find("div", attrs={"class": 'active'}).find('tbody')
#If no items, just return an empty set
if not sale_inventory:
return []
raw_items=sale_inventory.find_all("tr")
#Parse the items
items=[]
for item in raw_items:
tmp = {}
tmp['name'] = item.find("a", attrs={"class": 'highlight'}).text.strip()
tmp['quantity'] = int(item.find("td", attrs={"class": 'even'}).text.strip())
tmp['image'] = item.find("img")['src']
tmp['location'] = 'Retainer: ' + retainer_name
tmp['sub_location'] = 'Selling'
tmp['is_hq'] = bool(item.find("img", src = re.compile('http://img\.finalfantasyxiv\.com/lds/pc/global/images/common/ic/hq.png.*')))
items.append(tmp)
return items
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.