max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
src/leetcode_359_logger_rate_limiter.py | sungho-joo/leetcode2github | 0 | 6619451 | <reponame>sungho-joo/leetcode2github
# @l2g 359 python3
# [359] Logger Rate Limiter
# Difficulty: Easy
# https://leetcode.com/problems/logger-rate-limiter
#
# Design a logger system that receives a stream of messages along with their timestamps.
# Each unique message should only be printed at most every 10 seconds (i.e.
# a message printed at timestamp t will prevent other identical messages from being printed until timestamp t + 10).
# All messages will come in chronological order. Several messages may arrive at the same timestamp.
# Implement the Logger class:
#
# Logger() Initializes the logger object.
# bool shouldPrintMessage(int timestamp,
# string message) Returns true if the message should be printed in the given timestamp,
# otherwise returns false.
#
#
# Example 1:
#
# Input
# ["Logger","shouldPrintMessage","shouldPrintMessage","shouldPrintMessage","shouldPrintMessage",
# "shouldPrintMessage","shouldPrintMessage"]
# [[], [1, "foo"], [2, "bar"], [3, "foo"], [8, "bar"], [10, "foo"], [11, "foo"]]
# Output
# [null, true, true, false, false, false, true]
#
# Explanation
# Logger logger = new Logger();
# logger.shouldPrintMessage(1,"foo"); // return true,next allowed timestamp for "foo" is 1 + 10 = 11
# logger.shouldPrintMessage(2,"bar"); // return true,next allowed timestamp for "bar" is 2 + 10 = 12
# logger.shouldPrintMessage(3, "foo"); // 3 < 11, return false
# logger.shouldPrintMessage(8, "bar"); // 8 < 12, return false
# logger.shouldPrintMessage(10, "foo"); // 10 < 11, return false
# logger.shouldPrintMessage(11,"foo"); // 11 >= 11,return true,
# next allowed timestamp for "foo" is 11 + 10 = 21
#
#
# Constraints:
#
# 0 <= timestamp <= 10^9
# Every timestamp will be passed in non-decreasing order (chronological order).
# 1 <= message.length <= 30
# At most 10^4 calls will be made to shouldPrintMessage.
#
#
class Logger:
def __init__(self):
self.message_printed_time = dict()
def shouldPrintMessage(self, timestamp: int, message: str) -> bool:
if message not in self.message_printed_time:
self.message_printed_time[message] = timestamp
return True
else:
if (timestamp - self.message_printed_time[message]) >= 10:
self.message_printed_time[message] = timestamp
return True
else:
return False
# Your Logger object will be instantiated and called as such:
# obj = Logger()
# param_1 = obj.shouldPrintMessage(timestamp,message)
if __name__ == "__main__":
import os
import pytest
pytest.main([os.path.join("tests", "test_359.py")])
| # @l2g 359 python3
# [359] Logger Rate Limiter
# Difficulty: Easy
# https://leetcode.com/problems/logger-rate-limiter
#
# Design a logger system that receives a stream of messages along with their timestamps.
# Each unique message should only be printed at most every 10 seconds (i.e.
# a message printed at timestamp t will prevent other identical messages from being printed until timestamp t + 10).
# All messages will come in chronological order. Several messages may arrive at the same timestamp.
# Implement the Logger class:
#
# Logger() Initializes the logger object.
# bool shouldPrintMessage(int timestamp,
# string message) Returns true if the message should be printed in the given timestamp,
# otherwise returns false.
#
#
# Example 1:
#
# Input
# ["Logger","shouldPrintMessage","shouldPrintMessage","shouldPrintMessage","shouldPrintMessage",
# "shouldPrintMessage","shouldPrintMessage"]
# [[], [1, "foo"], [2, "bar"], [3, "foo"], [8, "bar"], [10, "foo"], [11, "foo"]]
# Output
# [null, true, true, false, false, false, true]
#
# Explanation
# Logger logger = new Logger();
# logger.shouldPrintMessage(1,"foo"); // return true,next allowed timestamp for "foo" is 1 + 10 = 11
# logger.shouldPrintMessage(2,"bar"); // return true,next allowed timestamp for "bar" is 2 + 10 = 12
# logger.shouldPrintMessage(3, "foo"); // 3 < 11, return false
# logger.shouldPrintMessage(8, "bar"); // 8 < 12, return false
# logger.shouldPrintMessage(10, "foo"); // 10 < 11, return false
# logger.shouldPrintMessage(11,"foo"); // 11 >= 11,return true,
# next allowed timestamp for "foo" is 11 + 10 = 21
#
#
# Constraints:
#
# 0 <= timestamp <= 10^9
# Every timestamp will be passed in non-decreasing order (chronological order).
# 1 <= message.length <= 30
# At most 10^4 calls will be made to shouldPrintMessage.
#
#
class Logger:
def __init__(self):
self.message_printed_time = dict()
def shouldPrintMessage(self, timestamp: int, message: str) -> bool:
if message not in self.message_printed_time:
self.message_printed_time[message] = timestamp
return True
else:
if (timestamp - self.message_printed_time[message]) >= 10:
self.message_printed_time[message] = timestamp
return True
else:
return False
# Your Logger object will be instantiated and called as such:
# obj = Logger()
# param_1 = obj.shouldPrintMessage(timestamp,message)
if __name__ == "__main__":
import os
import pytest
pytest.main([os.path.join("tests", "test_359.py")]) | en | 0.610645 | # @l2g 359 python3 # [359] Logger Rate Limiter # Difficulty: Easy # https://leetcode.com/problems/logger-rate-limiter # # Design a logger system that receives a stream of messages along with their timestamps. # Each unique message should only be printed at most every 10 seconds (i.e. # a message printed at timestamp t will prevent other identical messages from being printed until timestamp t + 10). # All messages will come in chronological order. Several messages may arrive at the same timestamp. # Implement the Logger class: # # Logger() Initializes the logger object. # bool shouldPrintMessage(int timestamp, # string message) Returns true if the message should be printed in the given timestamp, # otherwise returns false. # # # Example 1: # # Input # ["Logger","shouldPrintMessage","shouldPrintMessage","shouldPrintMessage","shouldPrintMessage", # "shouldPrintMessage","shouldPrintMessage"] # [[], [1, "foo"], [2, "bar"], [3, "foo"], [8, "bar"], [10, "foo"], [11, "foo"]] # Output # [null, true, true, false, false, false, true] # # Explanation # Logger logger = new Logger(); # logger.shouldPrintMessage(1,"foo"); // return true,next allowed timestamp for "foo" is 1 + 10 = 11 # logger.shouldPrintMessage(2,"bar"); // return true,next allowed timestamp for "bar" is 2 + 10 = 12 # logger.shouldPrintMessage(3, "foo"); // 3 < 11, return false # logger.shouldPrintMessage(8, "bar"); // 8 < 12, return false # logger.shouldPrintMessage(10, "foo"); // 10 < 11, return false # logger.shouldPrintMessage(11,"foo"); // 11 >= 11,return true, # next allowed timestamp for "foo" is 11 + 10 = 21 # # # Constraints: # # 0 <= timestamp <= 10^9 # Every timestamp will be passed in non-decreasing order (chronological order). # 1 <= message.length <= 30 # At most 10^4 calls will be made to shouldPrintMessage. # # # Your Logger object will be instantiated and called as such: # obj = Logger() # param_1 = obj.shouldPrintMessage(timestamp,message) | 3.366982 | 3 |
src/spn/structure/leaves/parametric/MPE.py | AmurG/SPFlow | 0 | 6619452 | """
Created on July 02, 2018
@author: <NAME>
"""
from scipy.stats import multivariate_normal as mn
from spn.algorithms.MPE import get_mpe_top_down_leaf, add_node_mpe
from spn.structure.leaves.parametric.Inference import continuous_log_likelihood, gamma_log_likelihood, \
discrete_log_likelihood, categorical_log_likelihood, categorical_dictionary_log_likelihood
from spn.structure.leaves.parametric.Parametric import (
Gaussian,
Gamma,
LogNormal,
Poisson,
Bernoulli,
Categorical,
Geometric,
Exponential,
CategoricalDictionary,
NegativeBinomial,
Hypergeometric,
MultivariateGaussian
)
import numpy as np
import logging
logger = logging.getLogger(__name__)
def get_parametric_bottom_up_log_ll(ll_func, mode_func):
def param_bu_fn(node, data=None, dtype=np.float64):
probs = ll_func(node, data=data, dtype=dtype)
mpe_ids = np.isnan(data[:, node.scope[0]])
mode_data = np.ones((1, data.shape[1])) * mode_func(node)
probs[mpe_ids] = ll_func(node, data=mode_data, dtype=dtype)
return probs
return param_bu_fn
def get_parametric_top_down_ll(mode_func):
def param_td_fn(node, input_vals, data=None, lls_per_node=None):
get_mpe_top_down_leaf(
node,
input_vals,
data=data,
mode=mode_func(node))
return param_td_fn
def add_parametric_mpe_support():
def gaussian_mode(node):
return node.mean
add_node_mpe(
Gaussian,
get_parametric_bottom_up_log_ll(continuous_log_likelihood, gaussian_mode),
get_parametric_top_down_ll(gaussian_mode),
)
def gamma_mode(node):
return (node.alpha - 1) / node.beta
add_node_mpe(
Gamma, get_parametric_bottom_up_log_ll(gamma_log_likelihood, gamma_mode), get_parametric_top_down_ll(gamma_mode)
)
def lognormal_mode(node):
return np.exp(node.mean - node.variance)
add_node_mpe(
LogNormal,
get_parametric_bottom_up_log_ll(continuous_log_likelihood, lognormal_mode),
get_parametric_top_down_ll(lognormal_mode),
)
def poisson_mode(node):
return np.floor(node.mean)
add_node_mpe(
Poisson,
get_parametric_bottom_up_log_ll(discrete_log_likelihood, poisson_mode),
get_parametric_top_down_ll(poisson_mode),
)
def bernoulli_mode(node):
if node.p > 0.5:
return 1
else:
return 0
add_node_mpe(
Bernoulli,
get_parametric_bottom_up_log_ll(discrete_log_likelihood, bernoulli_mode),
get_parametric_top_down_ll(bernoulli_mode),
)
def categorical_mode(node):
return np.argmax(node.p)
add_node_mpe(
Categorical,
get_parametric_bottom_up_log_ll(categorical_log_likelihood, categorical_mode),
get_parametric_top_down_ll(categorical_mode),
)
def geometric_mode(node):
return 1
add_node_mpe(
Geometric,
get_parametric_bottom_up_log_ll(discrete_log_likelihood, geometric_mode),
get_parametric_top_down_ll(geometric_mode),
)
def negative_binomial_mode(node):
if node.n <= 1:
return 0
else:
return np.floor(node.p * (node.n - 1) / (1 - node.p))
add_node_mpe(
NegativeBinomial,
get_parametric_bottom_up_log_ll(discrete_log_likelihood, negative_binomial_mode),
get_parametric_top_down_ll(negative_binomial_mode),
)
def exponential_mode(node):
return 0
add_node_mpe(
Exponential,
get_parametric_bottom_up_log_ll(continuous_log_likelihood, exponential_mode),
get_parametric_top_down_ll(exponential_mode),
)
def hypergeometric_mode(node):
return np.floor((node.n + 1) * (node.K + 1 / (node.N + 2)))
add_node_mpe(
Hypergeometric,
get_parametric_bottom_up_log_ll(continuous_log_likelihood, hypergeometric_mode),
get_parametric_top_down_ll(hypergeometric_mode),
)
def categoricaldict_mode(node):
return node.params.keys()[np.argmax(node.params.values())]
add_node_mpe(
CategoricalDictionary,
get_parametric_bottom_up_log_ll(categorical_dictionary_log_likelihood, categoricaldict_mode),
get_parametric_top_down_ll(categoricaldict_mode),
)
##Compute the conditional distribution for a multivariate Gaussian when some entries are nan i.e. unseen##
def makeconditional(mean, cov):
def conditionalmodemvg(vec):
activeset = np.isnan(vec)
totalnans = np.sum(activeset)
if(totalnans == 0):
return mn.pdf(vec, mean, cov)
if(totalnans == (len(mean))):
return mn.pdf(mean, mean, cov)
cov1 = cov[activeset, :]
cov2 = cov[~activeset, :]
cov11, cov12 = cov1[:, activeset], cov1[:, ~activeset]
cov21, cov22 = cov2[:, activeset], cov2[:, ~activeset]
temp = np.matmul(cov12, np.linalg.inv(cov22))
schur = cov11 - np.matmul(temp, cov21)
return 1. / (np.sqrt(2 * 3.14 * np.linalg.det(schur)))
return conditionalmodemvg
##Infer the conditional mean when some entries are seen##
def conditionalmean(mean, cov):
def infercondnl(dvec):
for i in range(0, len(dvec)):
activeset = np.isnan(dvec[i])
totalnans = np.sum(activeset)
if(totalnans == 0):
continue
if(totalnans == (len(mean))):
dvec[i] = mean
else:
cov1 = cov[activeset, :]
cov2 = cov[~activeset, :]
cov11, cov12 = cov1[:, activeset], cov1[:, ~activeset]
cov21, cov22 = cov2[:, activeset], cov2[:, ~activeset]
mat = np.matmul(cov12, np.linalg.inv(cov22))
arr = dvec[i]
arr[activeset] = mean[activeset] + \
np.matmul(mat, (arr[~activeset] - mean[~activeset]))
return dvec
return infercondnl
def mvg_bu_ll(node, data, dtype=np.float64):
probs = np.ones((data.shape[0], 1))
effdat = data[:, node.scope]
for i in range(0, len(effdat)):
lambdacond = makeconditional(
np.asarray(
node.mean), np.asarray(
node.sigma))
probs[i] = lambdacond(effdat[i])
return probs
def mvg_td(
node,
input_vals,
data=None,
lls_per_node=None,
dtype=np.float64):
input_vals = input_vals[0]
if len(input_vals) == 0:
return None
temp = data[input_vals, :]
checksum = np.sum(temp[:, node.scope], axis=-1)
indices = np.isnan(checksum)
createcondmean = conditionalmean(
np.asarray(
node.mean), np.asarray(
node.sigma))
temp = data[input_vals[indices], :]
temp[:, node.scope] = createcondmean(temp[:, node.scope])
data[input_vals[indices], :] = temp
return
add_node_mpe(MultivariateGaussian, mvg_bu_ll, mvg_td)
| """
Created on July 02, 2018
@author: <NAME>
"""
from scipy.stats import multivariate_normal as mn
from spn.algorithms.MPE import get_mpe_top_down_leaf, add_node_mpe
from spn.structure.leaves.parametric.Inference import continuous_log_likelihood, gamma_log_likelihood, \
discrete_log_likelihood, categorical_log_likelihood, categorical_dictionary_log_likelihood
from spn.structure.leaves.parametric.Parametric import (
Gaussian,
Gamma,
LogNormal,
Poisson,
Bernoulli,
Categorical,
Geometric,
Exponential,
CategoricalDictionary,
NegativeBinomial,
Hypergeometric,
MultivariateGaussian
)
import numpy as np
import logging
logger = logging.getLogger(__name__)
def get_parametric_bottom_up_log_ll(ll_func, mode_func):
def param_bu_fn(node, data=None, dtype=np.float64):
probs = ll_func(node, data=data, dtype=dtype)
mpe_ids = np.isnan(data[:, node.scope[0]])
mode_data = np.ones((1, data.shape[1])) * mode_func(node)
probs[mpe_ids] = ll_func(node, data=mode_data, dtype=dtype)
return probs
return param_bu_fn
def get_parametric_top_down_ll(mode_func):
def param_td_fn(node, input_vals, data=None, lls_per_node=None):
get_mpe_top_down_leaf(
node,
input_vals,
data=data,
mode=mode_func(node))
return param_td_fn
def add_parametric_mpe_support():
def gaussian_mode(node):
return node.mean
add_node_mpe(
Gaussian,
get_parametric_bottom_up_log_ll(continuous_log_likelihood, gaussian_mode),
get_parametric_top_down_ll(gaussian_mode),
)
def gamma_mode(node):
return (node.alpha - 1) / node.beta
add_node_mpe(
Gamma, get_parametric_bottom_up_log_ll(gamma_log_likelihood, gamma_mode), get_parametric_top_down_ll(gamma_mode)
)
def lognormal_mode(node):
return np.exp(node.mean - node.variance)
add_node_mpe(
LogNormal,
get_parametric_bottom_up_log_ll(continuous_log_likelihood, lognormal_mode),
get_parametric_top_down_ll(lognormal_mode),
)
def poisson_mode(node):
return np.floor(node.mean)
add_node_mpe(
Poisson,
get_parametric_bottom_up_log_ll(discrete_log_likelihood, poisson_mode),
get_parametric_top_down_ll(poisson_mode),
)
def bernoulli_mode(node):
if node.p > 0.5:
return 1
else:
return 0
add_node_mpe(
Bernoulli,
get_parametric_bottom_up_log_ll(discrete_log_likelihood, bernoulli_mode),
get_parametric_top_down_ll(bernoulli_mode),
)
def categorical_mode(node):
return np.argmax(node.p)
add_node_mpe(
Categorical,
get_parametric_bottom_up_log_ll(categorical_log_likelihood, categorical_mode),
get_parametric_top_down_ll(categorical_mode),
)
def geometric_mode(node):
return 1
add_node_mpe(
Geometric,
get_parametric_bottom_up_log_ll(discrete_log_likelihood, geometric_mode),
get_parametric_top_down_ll(geometric_mode),
)
def negative_binomial_mode(node):
if node.n <= 1:
return 0
else:
return np.floor(node.p * (node.n - 1) / (1 - node.p))
add_node_mpe(
NegativeBinomial,
get_parametric_bottom_up_log_ll(discrete_log_likelihood, negative_binomial_mode),
get_parametric_top_down_ll(negative_binomial_mode),
)
def exponential_mode(node):
return 0
add_node_mpe(
Exponential,
get_parametric_bottom_up_log_ll(continuous_log_likelihood, exponential_mode),
get_parametric_top_down_ll(exponential_mode),
)
def hypergeometric_mode(node):
return np.floor((node.n + 1) * (node.K + 1 / (node.N + 2)))
add_node_mpe(
Hypergeometric,
get_parametric_bottom_up_log_ll(continuous_log_likelihood, hypergeometric_mode),
get_parametric_top_down_ll(hypergeometric_mode),
)
def categoricaldict_mode(node):
return node.params.keys()[np.argmax(node.params.values())]
add_node_mpe(
CategoricalDictionary,
get_parametric_bottom_up_log_ll(categorical_dictionary_log_likelihood, categoricaldict_mode),
get_parametric_top_down_ll(categoricaldict_mode),
)
##Compute the conditional distribution for a multivariate Gaussian when some entries are nan i.e. unseen##
def makeconditional(mean, cov):
def conditionalmodemvg(vec):
activeset = np.isnan(vec)
totalnans = np.sum(activeset)
if(totalnans == 0):
return mn.pdf(vec, mean, cov)
if(totalnans == (len(mean))):
return mn.pdf(mean, mean, cov)
cov1 = cov[activeset, :]
cov2 = cov[~activeset, :]
cov11, cov12 = cov1[:, activeset], cov1[:, ~activeset]
cov21, cov22 = cov2[:, activeset], cov2[:, ~activeset]
temp = np.matmul(cov12, np.linalg.inv(cov22))
schur = cov11 - np.matmul(temp, cov21)
return 1. / (np.sqrt(2 * 3.14 * np.linalg.det(schur)))
return conditionalmodemvg
##Infer the conditional mean when some entries are seen##
def conditionalmean(mean, cov):
def infercondnl(dvec):
for i in range(0, len(dvec)):
activeset = np.isnan(dvec[i])
totalnans = np.sum(activeset)
if(totalnans == 0):
continue
if(totalnans == (len(mean))):
dvec[i] = mean
else:
cov1 = cov[activeset, :]
cov2 = cov[~activeset, :]
cov11, cov12 = cov1[:, activeset], cov1[:, ~activeset]
cov21, cov22 = cov2[:, activeset], cov2[:, ~activeset]
mat = np.matmul(cov12, np.linalg.inv(cov22))
arr = dvec[i]
arr[activeset] = mean[activeset] + \
np.matmul(mat, (arr[~activeset] - mean[~activeset]))
return dvec
return infercondnl
def mvg_bu_ll(node, data, dtype=np.float64):
probs = np.ones((data.shape[0], 1))
effdat = data[:, node.scope]
for i in range(0, len(effdat)):
lambdacond = makeconditional(
np.asarray(
node.mean), np.asarray(
node.sigma))
probs[i] = lambdacond(effdat[i])
return probs
def mvg_td(
node,
input_vals,
data=None,
lls_per_node=None,
dtype=np.float64):
input_vals = input_vals[0]
if len(input_vals) == 0:
return None
temp = data[input_vals, :]
checksum = np.sum(temp[:, node.scope], axis=-1)
indices = np.isnan(checksum)
createcondmean = conditionalmean(
np.asarray(
node.mean), np.asarray(
node.sigma))
temp = data[input_vals[indices], :]
temp[:, node.scope] = createcondmean(temp[:, node.scope])
data[input_vals[indices], :] = temp
return
add_node_mpe(MultivariateGaussian, mvg_bu_ll, mvg_td)
| en | 0.852291 | Created on July 02, 2018 @author: <NAME> ##Compute the conditional distribution for a multivariate Gaussian when some entries are nan i.e. unseen## ##Infer the conditional mean when some entries are seen## | 1.958974 | 2 |
crawler/html.py | dbadrian/paradise_lost_crawler | 0 | 6619453 | import json
def wrap_inner_html(driver, el, front, back):
old_text_raw = el.get_attribute("innerHTML")
new_text = front + old_text_raw + back
driver.execute_script(f"arguments[0].innerHTML = {json.dumps(new_text)};", el)
def append_to_inner_html(driver, el, text):
old_text_raw = el.get_attribute("innerHTML")
new_text = old_text_raw + text
driver.execute_script(f"arguments[0].innerHTML = {json.dumps(new_text)};", el)
def replace_inner_html(driver, el, text):
driver.execute_script(f"arguments[0].innerHTML = {json.dumps(text)};", el)
def modify_inner_html(driver, el, op):
old_text_raw = el.get_attribute("innerHTML")
new_text = op(old_text_raw)
driver.execute_script(f"arguments[0].innerHTML = {json.dumps(new_text)};", el)
| import json
def wrap_inner_html(driver, el, front, back):
old_text_raw = el.get_attribute("innerHTML")
new_text = front + old_text_raw + back
driver.execute_script(f"arguments[0].innerHTML = {json.dumps(new_text)};", el)
def append_to_inner_html(driver, el, text):
old_text_raw = el.get_attribute("innerHTML")
new_text = old_text_raw + text
driver.execute_script(f"arguments[0].innerHTML = {json.dumps(new_text)};", el)
def replace_inner_html(driver, el, text):
driver.execute_script(f"arguments[0].innerHTML = {json.dumps(text)};", el)
def modify_inner_html(driver, el, op):
old_text_raw = el.get_attribute("innerHTML")
new_text = op(old_text_raw)
driver.execute_script(f"arguments[0].innerHTML = {json.dumps(new_text)};", el)
| none | 1 | 2.991234 | 3 | |
app/main.py | acutaia/IPT-anonymizer | 0 | 6619454 | """
App main entry point
:author: <NAME>
:copyright: Copyright 2021, LINKS Foundation
:version: 1.0.0
..
Copyright 2021 LINKS Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Third Party
from fastapi import FastAPI
# Internal
from .db.postgresql import get_database
from .internals.logger import get_logger
from .routers import user_feed, iot
# --------------------------------------------------------------------------------------------
# Instantiate
database = get_database()
app = FastAPI(redoc_url=None, openapi_url=None)
# Include routers
app.include_router(user_feed.router)
app.include_router(iot.router)
# Configure logger
@app.on_event("startup")
async def startup_logger_and_sessions():
get_logger()
await database.connect()
# Shutdown logger
@app.on_event("shutdown")
async def shutdown_logger_and_sessions():
logger = get_logger()
await database.disconnect()
await logger.shutdown()
| """
App main entry point
:author: <NAME>
:copyright: Copyright 2021, LINKS Foundation
:version: 1.0.0
..
Copyright 2021 LINKS Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Third Party
from fastapi import FastAPI
# Internal
from .db.postgresql import get_database
from .internals.logger import get_logger
from .routers import user_feed, iot
# --------------------------------------------------------------------------------------------
# Instantiate
database = get_database()
app = FastAPI(redoc_url=None, openapi_url=None)
# Include routers
app.include_router(user_feed.router)
app.include_router(iot.router)
# Configure logger
@app.on_event("startup")
async def startup_logger_and_sessions():
get_logger()
await database.connect()
# Shutdown logger
@app.on_event("shutdown")
async def shutdown_logger_and_sessions():
logger = get_logger()
await database.disconnect()
await logger.shutdown()
| en | 0.771201 | App main entry point :author: <NAME> :copyright: Copyright 2021, LINKS Foundation :version: 1.0.0 .. Copyright 2021 LINKS Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # Third Party # Internal # -------------------------------------------------------------------------------------------- # Instantiate # Include routers # Configure logger # Shutdown logger | 1.858348 | 2 |
longclaw/orders/api.py | MakeCollective/longclaw | 0 | 6619455 | from rest_framework.decorators import action
from rest_framework import permissions, status, viewsets, filters
from rest_framework.response import Response
from rest_framework.pagination import LimitOffsetPagination
from longclaw.orders.models import Order
from longclaw.orders.serializers import OrderSerializer
class OrderViewSet(viewsets.ModelViewSet):
serializer_class = OrderSerializer
permission_classes = [permissions.IsAuthenticated]
queryset = Order.objects.all()
pagination_class = LimitOffsetPagination
filter_backends = [filters.SearchFilter]
search_fields = [
'=id', 'email',
'shipping_address__name', 'shipping_address__city',
]
@action(detail=True, methods=['post'])
def refund_order(self, request, pk):
"""Refund the order specified by the pk
"""
order = Order.objects.get(id=pk)
order.refund()
return Response(status=status.HTTP_204_NO_CONTENT)
@action(detail=True, methods=['post'])
def fulfill_order(self, request, pk):
"""Mark the order specified by pk as fulfilled
"""
order = Order.objects.get(id=pk)
order.fulfill()
return Response(status=status.HTTP_204_NO_CONTENT)
@action(detail=True, methods=['post'])
def unfulfill_order(self, request, pk):
"""Unmark the order specified by pk as fulfilled
"""
order = Order.objects.get(id=pk)
order.unfulfill()
return Response(status=status.HTTP_204_NO_CONTENT)
@action(detail=False, methods=['get'])
def order_statuses(self, request):
return Response({value: text for value, text in Order.ORDER_STATUSES}, status=200)
| from rest_framework.decorators import action
from rest_framework import permissions, status, viewsets, filters
from rest_framework.response import Response
from rest_framework.pagination import LimitOffsetPagination
from longclaw.orders.models import Order
from longclaw.orders.serializers import OrderSerializer
class OrderViewSet(viewsets.ModelViewSet):
serializer_class = OrderSerializer
permission_classes = [permissions.IsAuthenticated]
queryset = Order.objects.all()
pagination_class = LimitOffsetPagination
filter_backends = [filters.SearchFilter]
search_fields = [
'=id', 'email',
'shipping_address__name', 'shipping_address__city',
]
@action(detail=True, methods=['post'])
def refund_order(self, request, pk):
"""Refund the order specified by the pk
"""
order = Order.objects.get(id=pk)
order.refund()
return Response(status=status.HTTP_204_NO_CONTENT)
@action(detail=True, methods=['post'])
def fulfill_order(self, request, pk):
"""Mark the order specified by pk as fulfilled
"""
order = Order.objects.get(id=pk)
order.fulfill()
return Response(status=status.HTTP_204_NO_CONTENT)
@action(detail=True, methods=['post'])
def unfulfill_order(self, request, pk):
"""Unmark the order specified by pk as fulfilled
"""
order = Order.objects.get(id=pk)
order.unfulfill()
return Response(status=status.HTTP_204_NO_CONTENT)
@action(detail=False, methods=['get'])
def order_statuses(self, request):
return Response({value: text for value, text in Order.ORDER_STATUSES}, status=200)
| en | 0.878316 | Refund the order specified by the pk Mark the order specified by pk as fulfilled Unmark the order specified by pk as fulfilled | 2.115149 | 2 |
conformalmapping/helpers.py | TorbenFricke/cmtoolkit | 16 | 6619456 | <filename>conformalmapping/helpers.py
import functools
import numpy as np
def suppress_warnings(f):
"""Function decorator to prevent numpy raising warnings
"""
@functools.wraps(f)
def impl(*args, **kwargs):
oldsettings = {}
try:
oldsettings = np.seterr(all='ignore')
return f(*args, **kwargs)
finally:
np.seterr(**oldsettings)
return impl
def eps(z=1):
"""Wrapper around spacing that works for complex numbers
"""
zre = np.abs(np.real(z))
zim = np.abs(np.imag(z))
return np.spacing(np.max([zre, zim]))
def flipud(a):
return a[::-1]
| <filename>conformalmapping/helpers.py
import functools
import numpy as np
def suppress_warnings(f):
"""Function decorator to prevent numpy raising warnings
"""
@functools.wraps(f)
def impl(*args, **kwargs):
oldsettings = {}
try:
oldsettings = np.seterr(all='ignore')
return f(*args, **kwargs)
finally:
np.seterr(**oldsettings)
return impl
def eps(z=1):
"""Wrapper around spacing that works for complex numbers
"""
zre = np.abs(np.real(z))
zim = np.abs(np.imag(z))
return np.spacing(np.max([zre, zim]))
def flipud(a):
return a[::-1]
| en | 0.796545 | Function decorator to prevent numpy raising warnings Wrapper around spacing that works for complex numbers | 2.473123 | 2 |
sdk/python/pulumi_aws_native/amplify/branch.py | AaronFriel/pulumi-aws-native | 29 | 6619457 | <filename>sdk/python/pulumi_aws_native/amplify/branch.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['BranchArgs', 'Branch']
@pulumi.input_type
class BranchArgs:
def __init__(__self__, *,
app_id: pulumi.Input[str],
basic_auth_config: Optional[pulumi.Input['BranchBasicAuthConfigArgs']] = None,
branch_name: Optional[pulumi.Input[str]] = None,
build_spec: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
enable_auto_build: Optional[pulumi.Input[bool]] = None,
enable_performance_mode: Optional[pulumi.Input[bool]] = None,
enable_pull_request_preview: Optional[pulumi.Input[bool]] = None,
environment_variables: Optional[pulumi.Input[Sequence[pulumi.Input['BranchEnvironmentVariableArgs']]]] = None,
pull_request_environment_name: Optional[pulumi.Input[str]] = None,
stage: Optional[pulumi.Input['BranchStage']] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['BranchTagArgs']]]] = None):
"""
The set of arguments for constructing a Branch resource.
"""
pulumi.set(__self__, "app_id", app_id)
if basic_auth_config is not None:
pulumi.set(__self__, "basic_auth_config", basic_auth_config)
if branch_name is not None:
pulumi.set(__self__, "branch_name", branch_name)
if build_spec is not None:
pulumi.set(__self__, "build_spec", build_spec)
if description is not None:
pulumi.set(__self__, "description", description)
if enable_auto_build is not None:
pulumi.set(__self__, "enable_auto_build", enable_auto_build)
if enable_performance_mode is not None:
pulumi.set(__self__, "enable_performance_mode", enable_performance_mode)
if enable_pull_request_preview is not None:
pulumi.set(__self__, "enable_pull_request_preview", enable_pull_request_preview)
if environment_variables is not None:
pulumi.set(__self__, "environment_variables", environment_variables)
if pull_request_environment_name is not None:
pulumi.set(__self__, "pull_request_environment_name", pull_request_environment_name)
if stage is not None:
pulumi.set(__self__, "stage", stage)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="appId")
def app_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "app_id")
@app_id.setter
def app_id(self, value: pulumi.Input[str]):
pulumi.set(self, "app_id", value)
@property
@pulumi.getter(name="basicAuthConfig")
def basic_auth_config(self) -> Optional[pulumi.Input['BranchBasicAuthConfigArgs']]:
return pulumi.get(self, "basic_auth_config")
@basic_auth_config.setter
def basic_auth_config(self, value: Optional[pulumi.Input['BranchBasicAuthConfigArgs']]):
pulumi.set(self, "basic_auth_config", value)
@property
@pulumi.getter(name="branchName")
def branch_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "branch_name")
@branch_name.setter
def branch_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "branch_name", value)
@property
@pulumi.getter(name="buildSpec")
def build_spec(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "build_spec")
@build_spec.setter
def build_spec(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "build_spec", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="enableAutoBuild")
def enable_auto_build(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_auto_build")
@enable_auto_build.setter
def enable_auto_build(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_auto_build", value)
@property
@pulumi.getter(name="enablePerformanceMode")
def enable_performance_mode(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_performance_mode")
@enable_performance_mode.setter
def enable_performance_mode(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_performance_mode", value)
@property
@pulumi.getter(name="enablePullRequestPreview")
def enable_pull_request_preview(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_pull_request_preview")
@enable_pull_request_preview.setter
def enable_pull_request_preview(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_pull_request_preview", value)
@property
@pulumi.getter(name="environmentVariables")
def environment_variables(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BranchEnvironmentVariableArgs']]]]:
return pulumi.get(self, "environment_variables")
@environment_variables.setter
def environment_variables(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BranchEnvironmentVariableArgs']]]]):
pulumi.set(self, "environment_variables", value)
@property
@pulumi.getter(name="pullRequestEnvironmentName")
def pull_request_environment_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "pull_request_environment_name")
@pull_request_environment_name.setter
def pull_request_environment_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pull_request_environment_name", value)
@property
@pulumi.getter
def stage(self) -> Optional[pulumi.Input['BranchStage']]:
return pulumi.get(self, "stage")
@stage.setter
def stage(self, value: Optional[pulumi.Input['BranchStage']]):
pulumi.set(self, "stage", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BranchTagArgs']]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BranchTagArgs']]]]):
pulumi.set(self, "tags", value)
class Branch(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
app_id: Optional[pulumi.Input[str]] = None,
basic_auth_config: Optional[pulumi.Input[pulumi.InputType['BranchBasicAuthConfigArgs']]] = None,
branch_name: Optional[pulumi.Input[str]] = None,
build_spec: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
enable_auto_build: Optional[pulumi.Input[bool]] = None,
enable_performance_mode: Optional[pulumi.Input[bool]] = None,
enable_pull_request_preview: Optional[pulumi.Input[bool]] = None,
environment_variables: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BranchEnvironmentVariableArgs']]]]] = None,
pull_request_environment_name: Optional[pulumi.Input[str]] = None,
stage: Optional[pulumi.Input['BranchStage']] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BranchTagArgs']]]]] = None,
__props__=None):
"""
The AWS::Amplify::Branch resource creates a new branch within an app.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: BranchArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The AWS::Amplify::Branch resource creates a new branch within an app.
:param str resource_name: The name of the resource.
:param BranchArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(BranchArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
app_id: Optional[pulumi.Input[str]] = None,
basic_auth_config: Optional[pulumi.Input[pulumi.InputType['BranchBasicAuthConfigArgs']]] = None,
branch_name: Optional[pulumi.Input[str]] = None,
build_spec: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
enable_auto_build: Optional[pulumi.Input[bool]] = None,
enable_performance_mode: Optional[pulumi.Input[bool]] = None,
enable_pull_request_preview: Optional[pulumi.Input[bool]] = None,
environment_variables: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BranchEnvironmentVariableArgs']]]]] = None,
pull_request_environment_name: Optional[pulumi.Input[str]] = None,
stage: Optional[pulumi.Input['BranchStage']] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BranchTagArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = BranchArgs.__new__(BranchArgs)
if app_id is None and not opts.urn:
raise TypeError("Missing required property 'app_id'")
__props__.__dict__["app_id"] = app_id
__props__.__dict__["basic_auth_config"] = basic_auth_config
__props__.__dict__["branch_name"] = branch_name
__props__.__dict__["build_spec"] = build_spec
__props__.__dict__["description"] = description
__props__.__dict__["enable_auto_build"] = enable_auto_build
__props__.__dict__["enable_performance_mode"] = enable_performance_mode
__props__.__dict__["enable_pull_request_preview"] = enable_pull_request_preview
__props__.__dict__["environment_variables"] = environment_variables
__props__.__dict__["pull_request_environment_name"] = pull_request_environment_name
__props__.__dict__["stage"] = stage
__props__.__dict__["tags"] = tags
__props__.__dict__["arn"] = None
super(Branch, __self__).__init__(
'aws-native:amplify:Branch',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Branch':
"""
Get an existing Branch resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = BranchArgs.__new__(BranchArgs)
__props__.__dict__["app_id"] = None
__props__.__dict__["arn"] = None
__props__.__dict__["basic_auth_config"] = None
__props__.__dict__["branch_name"] = None
__props__.__dict__["build_spec"] = None
__props__.__dict__["description"] = None
__props__.__dict__["enable_auto_build"] = None
__props__.__dict__["enable_performance_mode"] = None
__props__.__dict__["enable_pull_request_preview"] = None
__props__.__dict__["environment_variables"] = None
__props__.__dict__["pull_request_environment_name"] = None
__props__.__dict__["stage"] = None
__props__.__dict__["tags"] = None
return Branch(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="appId")
def app_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "app_id")
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="basicAuthConfig")
def basic_auth_config(self) -> pulumi.Output[Optional['outputs.BranchBasicAuthConfig']]:
return pulumi.get(self, "basic_auth_config")
@property
@pulumi.getter(name="branchName")
def branch_name(self) -> pulumi.Output[str]:
return pulumi.get(self, "branch_name")
@property
@pulumi.getter(name="buildSpec")
def build_spec(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "build_spec")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "description")
@property
@pulumi.getter(name="enableAutoBuild")
def enable_auto_build(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "enable_auto_build")
@property
@pulumi.getter(name="enablePerformanceMode")
def enable_performance_mode(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "enable_performance_mode")
@property
@pulumi.getter(name="enablePullRequestPreview")
def enable_pull_request_preview(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "enable_pull_request_preview")
@property
@pulumi.getter(name="environmentVariables")
def environment_variables(self) -> pulumi.Output[Optional[Sequence['outputs.BranchEnvironmentVariable']]]:
return pulumi.get(self, "environment_variables")
@property
@pulumi.getter(name="pullRequestEnvironmentName")
def pull_request_environment_name(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "pull_request_environment_name")
@property
@pulumi.getter
def stage(self) -> pulumi.Output[Optional['BranchStage']]:
return pulumi.get(self, "stage")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence['outputs.BranchTag']]]:
return pulumi.get(self, "tags")
| <filename>sdk/python/pulumi_aws_native/amplify/branch.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['BranchArgs', 'Branch']
@pulumi.input_type
class BranchArgs:
def __init__(__self__, *,
app_id: pulumi.Input[str],
basic_auth_config: Optional[pulumi.Input['BranchBasicAuthConfigArgs']] = None,
branch_name: Optional[pulumi.Input[str]] = None,
build_spec: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
enable_auto_build: Optional[pulumi.Input[bool]] = None,
enable_performance_mode: Optional[pulumi.Input[bool]] = None,
enable_pull_request_preview: Optional[pulumi.Input[bool]] = None,
environment_variables: Optional[pulumi.Input[Sequence[pulumi.Input['BranchEnvironmentVariableArgs']]]] = None,
pull_request_environment_name: Optional[pulumi.Input[str]] = None,
stage: Optional[pulumi.Input['BranchStage']] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['BranchTagArgs']]]] = None):
"""
The set of arguments for constructing a Branch resource.
"""
pulumi.set(__self__, "app_id", app_id)
if basic_auth_config is not None:
pulumi.set(__self__, "basic_auth_config", basic_auth_config)
if branch_name is not None:
pulumi.set(__self__, "branch_name", branch_name)
if build_spec is not None:
pulumi.set(__self__, "build_spec", build_spec)
if description is not None:
pulumi.set(__self__, "description", description)
if enable_auto_build is not None:
pulumi.set(__self__, "enable_auto_build", enable_auto_build)
if enable_performance_mode is not None:
pulumi.set(__self__, "enable_performance_mode", enable_performance_mode)
if enable_pull_request_preview is not None:
pulumi.set(__self__, "enable_pull_request_preview", enable_pull_request_preview)
if environment_variables is not None:
pulumi.set(__self__, "environment_variables", environment_variables)
if pull_request_environment_name is not None:
pulumi.set(__self__, "pull_request_environment_name", pull_request_environment_name)
if stage is not None:
pulumi.set(__self__, "stage", stage)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="appId")
def app_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "app_id")
@app_id.setter
def app_id(self, value: pulumi.Input[str]):
pulumi.set(self, "app_id", value)
@property
@pulumi.getter(name="basicAuthConfig")
def basic_auth_config(self) -> Optional[pulumi.Input['BranchBasicAuthConfigArgs']]:
return pulumi.get(self, "basic_auth_config")
@basic_auth_config.setter
def basic_auth_config(self, value: Optional[pulumi.Input['BranchBasicAuthConfigArgs']]):
pulumi.set(self, "basic_auth_config", value)
@property
@pulumi.getter(name="branchName")
def branch_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "branch_name")
@branch_name.setter
def branch_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "branch_name", value)
@property
@pulumi.getter(name="buildSpec")
def build_spec(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "build_spec")
@build_spec.setter
def build_spec(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "build_spec", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="enableAutoBuild")
def enable_auto_build(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_auto_build")
@enable_auto_build.setter
def enable_auto_build(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_auto_build", value)
@property
@pulumi.getter(name="enablePerformanceMode")
def enable_performance_mode(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_performance_mode")
@enable_performance_mode.setter
def enable_performance_mode(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_performance_mode", value)
@property
@pulumi.getter(name="enablePullRequestPreview")
def enable_pull_request_preview(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_pull_request_preview")
@enable_pull_request_preview.setter
def enable_pull_request_preview(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_pull_request_preview", value)
@property
@pulumi.getter(name="environmentVariables")
def environment_variables(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BranchEnvironmentVariableArgs']]]]:
return pulumi.get(self, "environment_variables")
@environment_variables.setter
def environment_variables(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BranchEnvironmentVariableArgs']]]]):
pulumi.set(self, "environment_variables", value)
@property
@pulumi.getter(name="pullRequestEnvironmentName")
def pull_request_environment_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "pull_request_environment_name")
@pull_request_environment_name.setter
def pull_request_environment_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pull_request_environment_name", value)
@property
@pulumi.getter
def stage(self) -> Optional[pulumi.Input['BranchStage']]:
return pulumi.get(self, "stage")
@stage.setter
def stage(self, value: Optional[pulumi.Input['BranchStage']]):
pulumi.set(self, "stage", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BranchTagArgs']]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BranchTagArgs']]]]):
pulumi.set(self, "tags", value)
class Branch(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
app_id: Optional[pulumi.Input[str]] = None,
basic_auth_config: Optional[pulumi.Input[pulumi.InputType['BranchBasicAuthConfigArgs']]] = None,
branch_name: Optional[pulumi.Input[str]] = None,
build_spec: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
enable_auto_build: Optional[pulumi.Input[bool]] = None,
enable_performance_mode: Optional[pulumi.Input[bool]] = None,
enable_pull_request_preview: Optional[pulumi.Input[bool]] = None,
environment_variables: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BranchEnvironmentVariableArgs']]]]] = None,
pull_request_environment_name: Optional[pulumi.Input[str]] = None,
stage: Optional[pulumi.Input['BranchStage']] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BranchTagArgs']]]]] = None,
__props__=None):
"""
The AWS::Amplify::Branch resource creates a new branch within an app.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: BranchArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The AWS::Amplify::Branch resource creates a new branch within an app.
:param str resource_name: The name of the resource.
:param BranchArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(BranchArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
app_id: Optional[pulumi.Input[str]] = None,
basic_auth_config: Optional[pulumi.Input[pulumi.InputType['BranchBasicAuthConfigArgs']]] = None,
branch_name: Optional[pulumi.Input[str]] = None,
build_spec: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
enable_auto_build: Optional[pulumi.Input[bool]] = None,
enable_performance_mode: Optional[pulumi.Input[bool]] = None,
enable_pull_request_preview: Optional[pulumi.Input[bool]] = None,
environment_variables: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BranchEnvironmentVariableArgs']]]]] = None,
pull_request_environment_name: Optional[pulumi.Input[str]] = None,
stage: Optional[pulumi.Input['BranchStage']] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BranchTagArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = BranchArgs.__new__(BranchArgs)
if app_id is None and not opts.urn:
raise TypeError("Missing required property 'app_id'")
__props__.__dict__["app_id"] = app_id
__props__.__dict__["basic_auth_config"] = basic_auth_config
__props__.__dict__["branch_name"] = branch_name
__props__.__dict__["build_spec"] = build_spec
__props__.__dict__["description"] = description
__props__.__dict__["enable_auto_build"] = enable_auto_build
__props__.__dict__["enable_performance_mode"] = enable_performance_mode
__props__.__dict__["enable_pull_request_preview"] = enable_pull_request_preview
__props__.__dict__["environment_variables"] = environment_variables
__props__.__dict__["pull_request_environment_name"] = pull_request_environment_name
__props__.__dict__["stage"] = stage
__props__.__dict__["tags"] = tags
__props__.__dict__["arn"] = None
super(Branch, __self__).__init__(
'aws-native:amplify:Branch',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Branch':
"""
Get an existing Branch resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = BranchArgs.__new__(BranchArgs)
__props__.__dict__["app_id"] = None
__props__.__dict__["arn"] = None
__props__.__dict__["basic_auth_config"] = None
__props__.__dict__["branch_name"] = None
__props__.__dict__["build_spec"] = None
__props__.__dict__["description"] = None
__props__.__dict__["enable_auto_build"] = None
__props__.__dict__["enable_performance_mode"] = None
__props__.__dict__["enable_pull_request_preview"] = None
__props__.__dict__["environment_variables"] = None
__props__.__dict__["pull_request_environment_name"] = None
__props__.__dict__["stage"] = None
__props__.__dict__["tags"] = None
return Branch(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="appId")
def app_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "app_id")
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="basicAuthConfig")
def basic_auth_config(self) -> pulumi.Output[Optional['outputs.BranchBasicAuthConfig']]:
return pulumi.get(self, "basic_auth_config")
@property
@pulumi.getter(name="branchName")
def branch_name(self) -> pulumi.Output[str]:
return pulumi.get(self, "branch_name")
@property
@pulumi.getter(name="buildSpec")
def build_spec(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "build_spec")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "description")
@property
@pulumi.getter(name="enableAutoBuild")
def enable_auto_build(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "enable_auto_build")
@property
@pulumi.getter(name="enablePerformanceMode")
def enable_performance_mode(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "enable_performance_mode")
@property
@pulumi.getter(name="enablePullRequestPreview")
def enable_pull_request_preview(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "enable_pull_request_preview")
@property
@pulumi.getter(name="environmentVariables")
def environment_variables(self) -> pulumi.Output[Optional[Sequence['outputs.BranchEnvironmentVariable']]]:
return pulumi.get(self, "environment_variables")
@property
@pulumi.getter(name="pullRequestEnvironmentName")
def pull_request_environment_name(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "pull_request_environment_name")
@property
@pulumi.getter
def stage(self) -> pulumi.Output[Optional['BranchStage']]:
return pulumi.get(self, "stage")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence['outputs.BranchTag']]]:
return pulumi.get(self, "tags")
| en | 0.782257 | # coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** The set of arguments for constructing a Branch resource. The AWS::Amplify::Branch resource creates a new branch within an app. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. The AWS::Amplify::Branch resource creates a new branch within an app. :param str resource_name: The name of the resource. :param BranchArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. Get an existing Branch resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. | 1.741463 | 2 |
Competitive Programming/Array/Chocolate Distribution Problem.py | shreejitverma/GeeksforGeeks | 2 | 6619458 | <filename>Competitive Programming/Array/Chocolate Distribution Problem.py<gh_stars>1-10
'''https://practice.geeksforgeeks.org/problems/chocolate-distribution-problem3825/1
Chocolate Distribution Problem
Easy Accuracy: 53.25% Submissions: 30711 Points: 2
Given an array A[ ] of positive integers of size N, where each value represents the number of chocolates in a packet. Each packet can have a variable number of chocolates. There are M students, the task is to distribute chocolate packets among M students such that :
1. Each student gets exactly one packet.
2. The difference between maximum number of chocolates given to a student and minimum number of chocolates given to a student is minimum.
Example 1:
Input:
N = 8, M = 5
A = {3, 4, 1, 9, 56, 7, 9, 12}
Output: 6
Explanation: The minimum difference between
maximum chocolates and minimum chocolates
is 9 - 3 = 6 by choosing following M packets :
{3, 4, 9, 7, 9}.
Example 2:
Input:
N = 7, M = 3
A = {7, 3, 2, 4, 9, 12, 56}
Output: 2
Explanation: The minimum difference between
maximum chocolates and minimum chocolates
is 4 - 2 = 2 by choosing following M packets :
{3, 2, 4}.
Your Task:
You don't need to take any input or print anything. Your task is to complete the function findMinDiff() which takes array A[ ], N and M as input parameters and returns the minimum possible difference between maximum number of chocolates given to a student and minimum number of chocolates given to a student.
Expected Time Complexity: O(N*Log(N))
Expected Auxiliary Space: O(1)
Constraints:
1 ≤ T ≤ 100
1 ≤ N ≤ 105
1 ≤ Ai ≤ 109
1 ≤ M ≤ N'''
# User function Template for python3
class Solution:
def findMinDiff(self, A, N, M):
# code here
A.sort() # sorting
best = float('inf') # a very large number
if N == M: # corner case
return A[N-1] - A[0]
for i in range(N-M+1):
best = min(A[M+i-1] - A[i], best) # slidig window
return best
# {
# Driver Code Starts
# Initial Template for Python 3
if __name__ == '__main__':
t = int(input())
for _ in range(t):
N = int(input())
A = [int(x) for x in input().split()]
M = int(input())
solObj = Solution()
print(solObj.findMinDiff(A, N, M))
# } Driver Code Ends
| <filename>Competitive Programming/Array/Chocolate Distribution Problem.py<gh_stars>1-10
'''https://practice.geeksforgeeks.org/problems/chocolate-distribution-problem3825/1
Chocolate Distribution Problem
Easy Accuracy: 53.25% Submissions: 30711 Points: 2
Given an array A[ ] of positive integers of size N, where each value represents the number of chocolates in a packet. Each packet can have a variable number of chocolates. There are M students, the task is to distribute chocolate packets among M students such that :
1. Each student gets exactly one packet.
2. The difference between maximum number of chocolates given to a student and minimum number of chocolates given to a student is minimum.
Example 1:
Input:
N = 8, M = 5
A = {3, 4, 1, 9, 56, 7, 9, 12}
Output: 6
Explanation: The minimum difference between
maximum chocolates and minimum chocolates
is 9 - 3 = 6 by choosing following M packets :
{3, 4, 9, 7, 9}.
Example 2:
Input:
N = 7, M = 3
A = {7, 3, 2, 4, 9, 12, 56}
Output: 2
Explanation: The minimum difference between
maximum chocolates and minimum chocolates
is 4 - 2 = 2 by choosing following M packets :
{3, 2, 4}.
Your Task:
You don't need to take any input or print anything. Your task is to complete the function findMinDiff() which takes array A[ ], N and M as input parameters and returns the minimum possible difference between maximum number of chocolates given to a student and minimum number of chocolates given to a student.
Expected Time Complexity: O(N*Log(N))
Expected Auxiliary Space: O(1)
Constraints:
1 ≤ T ≤ 100
1 ≤ N ≤ 105
1 ≤ Ai ≤ 109
1 ≤ M ≤ N'''
# User function Template for python3
class Solution:
def findMinDiff(self, A, N, M):
# code here
A.sort() # sorting
best = float('inf') # a very large number
if N == M: # corner case
return A[N-1] - A[0]
for i in range(N-M+1):
best = min(A[M+i-1] - A[i], best) # slidig window
return best
# {
# Driver Code Starts
# Initial Template for Python 3
if __name__ == '__main__':
t = int(input())
for _ in range(t):
N = int(input())
A = [int(x) for x in input().split()]
M = int(input())
solObj = Solution()
print(solObj.findMinDiff(A, N, M))
# } Driver Code Ends
| en | 0.853136 | ERROR: type should be string, got "https://practice.geeksforgeeks.org/problems/chocolate-distribution-problem3825/1 Chocolate Distribution Problem Easy Accuracy: 53.25% Submissions: 30711 Points: 2 Given an array A[ ] of positive integers of size N, where each value represents the number of chocolates in a packet. Each packet can have a variable number of chocolates. There are M students, the task is to distribute chocolate packets among M students such that : 1. Each student gets exactly one packet. 2. The difference between maximum number of chocolates given to a student and minimum number of chocolates given to a student is minimum. Example 1: Input: N = 8, M = 5 A = {3, 4, 1, 9, 56, 7, 9, 12} Output: 6 Explanation: The minimum difference between maximum chocolates and minimum chocolates is 9 - 3 = 6 by choosing following M packets : {3, 4, 9, 7, 9}. Example 2: Input: N = 7, M = 3 A = {7, 3, 2, 4, 9, 12, 56} Output: 2 Explanation: The minimum difference between maximum chocolates and minimum chocolates is 4 - 2 = 2 by choosing following M packets : {3, 2, 4}. Your Task: You don't need to take any input or print anything. Your task is to complete the function findMinDiff() which takes array A[ ], N and M as input parameters and returns the minimum possible difference between maximum number of chocolates given to a student and minimum number of chocolates given to a student. Expected Time Complexity: O(N*Log(N)) Expected Auxiliary Space: O(1) Constraints: 1 ≤ T ≤ 100 1 ≤ N ≤ 105 1 ≤ Ai ≤ 109 1 ≤ M ≤ N # User function Template for python3 # code here # sorting # a very large number # corner case # slidig window # { # Driver Code Starts # Initial Template for Python 3 # } Driver Code Ends" | 4.139753 | 4 |
app/views.py | alexherns/sciral-ocf-dev | 0 | 6619459 | # -*- coding: utf-8 -*-
from app import app, db, models
from flask import render_template, flash, redirect, session, url_for, request
from .forms import searchBox
from .models import Article
from .scripts import fetch_articles, query_local_database
import json
import urllib2
import pickle
from config import ALTMETRIC_KEY
@app.route('/', methods=['GET', 'POST'])
@app.route('/index', methods=['GET', 'POST'])
def index():
form = searchBox()
if form.validate_on_submit():
flash('Search for {0} was successfully accepted!'.format(
form.query_term.data))
#session['query_term']= form.query_term.data
return redirect(url_for('search', query_term=form.query_term.data))
return render_template('index.html', title='', form=form)
@app.route('/results', methods=['GET', 'POST'])
def results():
default_option = request.args['default_option']
form = searchBox()
if default_option == 'True':
articles = models.Article.query.filter(
models.Article.default_set == True).order_by(models.Article.score.desc()).all()
else:
articles = pickle.load(open('./tmp/pickle.dump', 'rb'))
if form.validate_on_submit():
flash('Search for {0} was successfully accepted!'.format(
form.query_term.data))
return redirect(url_for('search', query_term=form.query_term.data))
return render_template('results.html', title='', form=form, articles=articles)
@app.route('/search', methods=['GET', 'POST'])
def search():
query_term = request.args['query_term']
articles = query_local_database(query_term)
if len(articles) == 0:
flash('No results were obtained for your query. Returning a default set.'.format(
query_term))
default_option = True
return redirect(url_for('results', default_option=default_option))
default_option = False
pickle.dump(articles, open('./tmp/pickle.dump', 'wb'))
return redirect(url_for('results', default_option=default_option))
| # -*- coding: utf-8 -*-
from app import app, db, models
from flask import render_template, flash, redirect, session, url_for, request
from .forms import searchBox
from .models import Article
from .scripts import fetch_articles, query_local_database
import json
import urllib2
import pickle
from config import ALTMETRIC_KEY
@app.route('/', methods=['GET', 'POST'])
@app.route('/index', methods=['GET', 'POST'])
def index():
form = searchBox()
if form.validate_on_submit():
flash('Search for {0} was successfully accepted!'.format(
form.query_term.data))
#session['query_term']= form.query_term.data
return redirect(url_for('search', query_term=form.query_term.data))
return render_template('index.html', title='', form=form)
@app.route('/results', methods=['GET', 'POST'])
def results():
default_option = request.args['default_option']
form = searchBox()
if default_option == 'True':
articles = models.Article.query.filter(
models.Article.default_set == True).order_by(models.Article.score.desc()).all()
else:
articles = pickle.load(open('./tmp/pickle.dump', 'rb'))
if form.validate_on_submit():
flash('Search for {0} was successfully accepted!'.format(
form.query_term.data))
return redirect(url_for('search', query_term=form.query_term.data))
return render_template('results.html', title='', form=form, articles=articles)
@app.route('/search', methods=['GET', 'POST'])
def search():
query_term = request.args['query_term']
articles = query_local_database(query_term)
if len(articles) == 0:
flash('No results were obtained for your query. Returning a default set.'.format(
query_term))
default_option = True
return redirect(url_for('results', default_option=default_option))
default_option = False
pickle.dump(articles, open('./tmp/pickle.dump', 'wb'))
return redirect(url_for('results', default_option=default_option))
| en | 0.448065 | # -*- coding: utf-8 -*- #session['query_term']= form.query_term.data | 2.388856 | 2 |
ws/broadcast-server.py | NormalVR/IXWebSocket | 319 | 6619460 | #!/usr/bin/env python
import os
import asyncio
import websockets
connections = set()
async def echo(websocket, path):
connections.add(websocket)
try:
async for message in websocket:
print(message)
for ws in connections:
if ws != websocket:
await ws.send(message)
except:
raise
finally:
connections.remove(websocket)
asyncio.get_event_loop().run_until_complete(
websockets.serve(echo, 'localhost', 8080))
asyncio.get_event_loop().run_forever()
| #!/usr/bin/env python
import os
import asyncio
import websockets
connections = set()
async def echo(websocket, path):
connections.add(websocket)
try:
async for message in websocket:
print(message)
for ws in connections:
if ws != websocket:
await ws.send(message)
except:
raise
finally:
connections.remove(websocket)
asyncio.get_event_loop().run_until_complete(
websockets.serve(echo, 'localhost', 8080))
asyncio.get_event_loop().run_forever()
| ru | 0.26433 | #!/usr/bin/env python | 2.718339 | 3 |
help/urls.py | matmaxgeds/somaliaims-demo | 0 | 6619461 | <gh_stars>0
from django.conf.urls import url
from .views import *
urlpatterns = [
url(r'^$', HelpPageView.as_view(), name='help'),
] | from django.conf.urls import url
from .views import *
urlpatterns = [
url(r'^$', HelpPageView.as_view(), name='help'),
] | none | 1 | 1.454335 | 1 | |
repository-miner/profile.py | INSO-TUWien/portfoliometrix | 0 | 6619462 | <gh_stars>0
import time
class ProfilingPhase:
def __enter__(self):
self.start = time.time()
def __exit__(self, exc_type, exc_val, exc_tb):
self.end = time.time()
self.duration = self.end - self.start
class Profiler:
CHECKOUT = 'checkout'
ANALYSIS = 'analysis'
STORAGE = 'storage'
def __init__(self, file_name: str):
self.file_name = file_name
self.phases = {}
with open(self.file_name, 'w+') as time_file:
time_file.write('SEP=,\n')
time_file.write(f'repository,commit,{",".join([Profiler.CHECKOUT, Profiler.ANALYSIS])}\n')
def save(self, repository, snapshot):
with open(self.file_name, 'a+') as time_file:
time_file.write(f'{repository},{snapshot},{",".join([str(value.duration) for (key, value) in self.phases.items()])}\n')
def save_storage(self, repository):
with open(self.file_name, 'a+') as time_file:
time_file.write(f'{repository}-storage,{self.phases[Profiler.STORAGE].duration}\n')
def start(self, phase: str) -> ProfilingPhase:
profiling_phase = ProfilingPhase()
self.phases[phase] = profiling_phase
return profiling_phase
| import time
class ProfilingPhase:
def __enter__(self):
self.start = time.time()
def __exit__(self, exc_type, exc_val, exc_tb):
self.end = time.time()
self.duration = self.end - self.start
class Profiler:
CHECKOUT = 'checkout'
ANALYSIS = 'analysis'
STORAGE = 'storage'
def __init__(self, file_name: str):
self.file_name = file_name
self.phases = {}
with open(self.file_name, 'w+') as time_file:
time_file.write('SEP=,\n')
time_file.write(f'repository,commit,{",".join([Profiler.CHECKOUT, Profiler.ANALYSIS])}\n')
def save(self, repository, snapshot):
with open(self.file_name, 'a+') as time_file:
time_file.write(f'{repository},{snapshot},{",".join([str(value.duration) for (key, value) in self.phases.items()])}\n')
def save_storage(self, repository):
with open(self.file_name, 'a+') as time_file:
time_file.write(f'{repository}-storage,{self.phases[Profiler.STORAGE].duration}\n')
def start(self, phase: str) -> ProfilingPhase:
profiling_phase = ProfilingPhase()
self.phases[phase] = profiling_phase
return profiling_phase | none | 1 | 3.121357 | 3 | |
projects/forest_firefighters/controllers/mavic/mavic.py | cyberbotics/webots-projects | 0 | 6619463 | from controller import Robot, Keyboard
def clamp(value, value_min, value_max):
return min(max(value, value_min), value_max)
class Mavic (Robot):
# Constants, empirically found.
K_VERTICAL_THRUST = 68.5 # with this thrust, the drone lifts.
K_VERTICAL_OFFSET = 0.6 # Vertical offset where the robot actually targets to stabilize itself.
K_VERTICAL_P = 3.0 # P constant of the vertical PID.
K_ROLL_P = 50.0 # P constant of the roll PID.
K_PITCH_P = 30.0 # P constant of the pitch PID.
target_altitude = 20
def __init__(self):
Robot.__init__(self)
self.timeStep = int(self.getBasicTimeStep())
# keyboard
self.keyboard = self.getKeyboard()
self.keyboard.enable(10 * self.timeStep)
self.water_to_drop = 0
# Get and enable devices.
self.camera = self.getDevice("camera")
self.camera.enable(self.timeStep)
self.imu = self.getDevice("inertial unit")
self.imu.enable(self.timeStep)
self.gps = self.getDevice("gps")
self.gps.enable(self.timeStep)
self.gyro = self.getDevice("gyro")
self.gyro.enable(self.timeStep)
self.front_left_motor = self.getDevice("front left propeller")
self.front_right_motor = self.getDevice("front right propeller")
self.rear_left_motor = self.getDevice("rear left propeller")
self.rear_right_motor = self.getDevice("rear right propeller")
motors = [self.front_left_motor, self.front_right_motor, self.rear_left_motor, self.rear_right_motor]
for motor in motors:
motor.setPosition(float('inf'))
motor.setVelocity(1)
# Display manual control message.
print("You can control the drone with your computer keyboard:")
print("- 'D': drop water")
print("- 'up': move forward.")
print("- 'down': move backward.")
print("- 'right': turn right.")
print("- 'left': turn left.")
print("- 'shift + up': increase the target altitude.")
print("- 'shift + down': decrease the target altitude.")
def run(self):
while self.step(self.timeStep) != -1:
# Read sensors
roll, pitch, _ = self.imu.getRollPitchYaw()
_, _, altitude = self.gps.getValues()
roll_acceleration, pitch_acceleration, _ = self.gyro.getValues()
roll_disturbance = 0
pitch_disturbance = 0
yaw_disturbance = 0
key = self.keyboard.getKey()
# Drop the water from the drone
if key == ord('D'):
self.water_to_drop += 1
elif self.water_to_drop > 0:
self.setCustomData(str(self.water_to_drop))
self.water_to_drop = 0
else:
self.setCustomData(str(0))
# Movement
if key == Keyboard.LEFT:
yaw_disturbance = 1.3
elif key == Keyboard.RIGHT:
yaw_disturbance = -1.3
elif key == Keyboard.UP:
pitch_disturbance = -2
elif key == Keyboard.DOWN:
pitch_disturbance = 2
elif key == Keyboard.UP + Keyboard.SHIFT:
self.target_altitude += 0.05
print(f"target altitude: {self.target_altitude} [m]\n")
elif key == Keyboard.DOWN + Keyboard.SHIFT:
self.target_altitude -= 0.05
print(f"target altitude: {self.target_altitude} [m]\n")
roll_input = self.K_ROLL_P * clamp(roll, -1, 1) + roll_acceleration + roll_disturbance
pitch_input = self.K_PITCH_P * clamp(pitch, -1, 1) + pitch_acceleration + pitch_disturbance
yaw_input = yaw_disturbance
clamped_difference_altitude = clamp(self.target_altitude - altitude + self.K_VERTICAL_OFFSET, -1, 1)
vertical_input = self.K_VERTICAL_P * pow(clamped_difference_altitude, 3.0)
front_left_motor_input = self.K_VERTICAL_THRUST + vertical_input - yaw_input + pitch_input - roll_input
front_right_motor_input = self.K_VERTICAL_THRUST + vertical_input + yaw_input + pitch_input + roll_input
rear_left_motor_input = self.K_VERTICAL_THRUST + vertical_input + yaw_input - pitch_input - roll_input
rear_right_motor_input = self.K_VERTICAL_THRUST + vertical_input - yaw_input - pitch_input + roll_input
self.front_left_motor.setVelocity(front_left_motor_input)
self.front_right_motor.setVelocity(-front_right_motor_input)
self.rear_left_motor.setVelocity(-rear_left_motor_input)
self.rear_right_motor.setVelocity(rear_right_motor_input)
robot = Mavic()
robot.run()
| from controller import Robot, Keyboard
def clamp(value, value_min, value_max):
return min(max(value, value_min), value_max)
class Mavic (Robot):
# Constants, empirically found.
K_VERTICAL_THRUST = 68.5 # with this thrust, the drone lifts.
K_VERTICAL_OFFSET = 0.6 # Vertical offset where the robot actually targets to stabilize itself.
K_VERTICAL_P = 3.0 # P constant of the vertical PID.
K_ROLL_P = 50.0 # P constant of the roll PID.
K_PITCH_P = 30.0 # P constant of the pitch PID.
target_altitude = 20
def __init__(self):
Robot.__init__(self)
self.timeStep = int(self.getBasicTimeStep())
# keyboard
self.keyboard = self.getKeyboard()
self.keyboard.enable(10 * self.timeStep)
self.water_to_drop = 0
# Get and enable devices.
self.camera = self.getDevice("camera")
self.camera.enable(self.timeStep)
self.imu = self.getDevice("inertial unit")
self.imu.enable(self.timeStep)
self.gps = self.getDevice("gps")
self.gps.enable(self.timeStep)
self.gyro = self.getDevice("gyro")
self.gyro.enable(self.timeStep)
self.front_left_motor = self.getDevice("front left propeller")
self.front_right_motor = self.getDevice("front right propeller")
self.rear_left_motor = self.getDevice("rear left propeller")
self.rear_right_motor = self.getDevice("rear right propeller")
motors = [self.front_left_motor, self.front_right_motor, self.rear_left_motor, self.rear_right_motor]
for motor in motors:
motor.setPosition(float('inf'))
motor.setVelocity(1)
# Display manual control message.
print("You can control the drone with your computer keyboard:")
print("- 'D': drop water")
print("- 'up': move forward.")
print("- 'down': move backward.")
print("- 'right': turn right.")
print("- 'left': turn left.")
print("- 'shift + up': increase the target altitude.")
print("- 'shift + down': decrease the target altitude.")
def run(self):
while self.step(self.timeStep) != -1:
# Read sensors
roll, pitch, _ = self.imu.getRollPitchYaw()
_, _, altitude = self.gps.getValues()
roll_acceleration, pitch_acceleration, _ = self.gyro.getValues()
roll_disturbance = 0
pitch_disturbance = 0
yaw_disturbance = 0
key = self.keyboard.getKey()
# Drop the water from the drone
if key == ord('D'):
self.water_to_drop += 1
elif self.water_to_drop > 0:
self.setCustomData(str(self.water_to_drop))
self.water_to_drop = 0
else:
self.setCustomData(str(0))
# Movement
if key == Keyboard.LEFT:
yaw_disturbance = 1.3
elif key == Keyboard.RIGHT:
yaw_disturbance = -1.3
elif key == Keyboard.UP:
pitch_disturbance = -2
elif key == Keyboard.DOWN:
pitch_disturbance = 2
elif key == Keyboard.UP + Keyboard.SHIFT:
self.target_altitude += 0.05
print(f"target altitude: {self.target_altitude} [m]\n")
elif key == Keyboard.DOWN + Keyboard.SHIFT:
self.target_altitude -= 0.05
print(f"target altitude: {self.target_altitude} [m]\n")
roll_input = self.K_ROLL_P * clamp(roll, -1, 1) + roll_acceleration + roll_disturbance
pitch_input = self.K_PITCH_P * clamp(pitch, -1, 1) + pitch_acceleration + pitch_disturbance
yaw_input = yaw_disturbance
clamped_difference_altitude = clamp(self.target_altitude - altitude + self.K_VERTICAL_OFFSET, -1, 1)
vertical_input = self.K_VERTICAL_P * pow(clamped_difference_altitude, 3.0)
front_left_motor_input = self.K_VERTICAL_THRUST + vertical_input - yaw_input + pitch_input - roll_input
front_right_motor_input = self.K_VERTICAL_THRUST + vertical_input + yaw_input + pitch_input + roll_input
rear_left_motor_input = self.K_VERTICAL_THRUST + vertical_input + yaw_input - pitch_input - roll_input
rear_right_motor_input = self.K_VERTICAL_THRUST + vertical_input - yaw_input - pitch_input + roll_input
self.front_left_motor.setVelocity(front_left_motor_input)
self.front_right_motor.setVelocity(-front_right_motor_input)
self.rear_left_motor.setVelocity(-rear_left_motor_input)
self.rear_right_motor.setVelocity(rear_right_motor_input)
robot = Mavic()
robot.run()
| en | 0.851763 | # Constants, empirically found. # with this thrust, the drone lifts. # Vertical offset where the robot actually targets to stabilize itself. # P constant of the vertical PID. # P constant of the roll PID. # P constant of the pitch PID. # keyboard # Get and enable devices. # Display manual control message. # Read sensors # Drop the water from the drone # Movement | 3.487761 | 3 |
tests/unit/codecs/test_codecs.py | System73/tamarco | 9 | 6619464 | <reponame>System73/tamarco
import pytest
from tamarco.codecs.interface import CodecInterface
from tamarco.codecs.json import JsonCodec
from tamarco.codecs.pickle import PickleCodec
from tamarco.codecs.yaml import YamlCodec
@pytest.mark.parametrize("Codec", (YamlCodec, JsonCodec, PickleCodec, CodecInterface))
@pytest.mark.asyncio
async def test_codec(Codec):
str_original = "test"
if isinstance(Codec, YamlCodec):
str_original = "Node:0 " " Node:1"
elif isinstance(Codec, JsonCodec):
str_original = "{'node1': {'node2': 'example node'}}"
try:
obj_encode = Codec.encode(str_original)
except Exception:
if isinstance(Codec, CodecInterface):
assert True
try:
assert Codec.decode(obj_encode) == str_original
except Exception:
if isinstance(Codec, CodecInterface):
assert True
| import pytest
from tamarco.codecs.interface import CodecInterface
from tamarco.codecs.json import JsonCodec
from tamarco.codecs.pickle import PickleCodec
from tamarco.codecs.yaml import YamlCodec
@pytest.mark.parametrize("Codec", (YamlCodec, JsonCodec, PickleCodec, CodecInterface))
@pytest.mark.asyncio
async def test_codec(Codec):
str_original = "test"
if isinstance(Codec, YamlCodec):
str_original = "Node:0 " " Node:1"
elif isinstance(Codec, JsonCodec):
str_original = "{'node1': {'node2': 'example node'}}"
try:
obj_encode = Codec.encode(str_original)
except Exception:
if isinstance(Codec, CodecInterface):
assert True
try:
assert Codec.decode(obj_encode) == str_original
except Exception:
if isinstance(Codec, CodecInterface):
assert True | none | 1 | 2.225953 | 2 | |
lib/virtual_machine_translator/virtualMachine.py | DimitarYordanov17/jack-compiler | 5 | 6619465 | <gh_stars>1-10
# A virtual machine translator. Intermediate code, supplied by front-end compiler, to Hack machine language. @DimitarYordanov7
# To run: python3 virtualMachine.py {your .vm file} {yes/no, should distinct .asm files be kept} {yes/no, should bootstrap code be added}
from lib.virtual_machine_translator.virtualMachineLibrary import VirtualMachineLibrary
import os
import sys
class VirtualMachineTranslator:
"""
Main class, capable of processing a full directory, with .vm files resulting in one .asm file
"""
BOOTSTRAP_CODE = ["@256", "D=A", "@SP", "M=D"]
def translate(path, keep_disctint_files, add_bootstrap_code):
"""
Translate a path - create out.asm, add? bootstrap code, add? translated Sys.vm, add remaining translated .vm files
"""
vm_files = []
for root, dirs, files in os.walk(path):
for file_name in files:
if ".vm" in file_name:
vm_files.append(file_name)
break
with open("out.asm", "w") as output_file:
if add_bootstrap_code:
output_file.write("// bootstrap code \n")
for instruction in VirtualMachineTranslator.BOOTSTRAP_CODE:
output_file.write(instruction + "\n")
if "Sys.vm" in vm_files:
VirtualMachineTranslator.translate_file("Sys.vm")
sys_file = open("Sys.asm", "r")
output_file.write(sys_file.read())
vm_files.remove("Sys.vm")
if not keep_disctint_files:
os.system("rm Sys.asm")
for vm_file_name in vm_files:
VirtualMachineTranslator.translate_file(vm_file_name)
vm_file = open(vm_file_name.split(".")[0] + ".asm", "r")
output_file.write(vm_file.read())
if not keep_disctint_files:
for file_name in vm_files:
asm_file_name = file_name.split(".")[0] + ".asm"
os.system(f"rm {asm_file_name}")
def translate_file(input_file_name):
"""
Fully translate a file
"""
output_file_name = input_file_name.split(".")[0] + ".asm"
os.system(f"cp {input_file_name} {output_file_name}")
VirtualMachineTranslator.clean(output_file_name)
VirtualMachineTranslator.parse_file(output_file_name)
def parse_file(input_file_name):
"""
Parse every instruction and write the requested and further translated equivalent
"""
with open(input_file_name, "r+") as input_file:
last_function = ""
instructions = input_file.readlines()
input_file.seek(0)
total_instructions = 0
for line in instructions:
instruction_structure = line.split()
instruction = instruction_structure[0]
bytecode_instruction = []
if len(instruction_structure) == 1 and instruction != "return": # Stack arithmetic
bytecode_instruction = VirtualMachineLibrary.get_arithmetic(instruction, last_function, input_file_name.split(".")[0], total_instructions)
elif instruction in ["pop", "push"]: # Memory access
bytecode_instruction = VirtualMachineLibrary.get_memory(line, input_file_name.split(".")[0])
elif len(instruction_structure) == 2: # Program flow
label = instruction_structure[1]
bytecode_instruction = VirtualMachineLibrary.get_program_flow(instruction, label, last_function)
else: # Function calling
if instruction == "function":
last_instruction = instruction_structure[1]
bytecode_instruction = VirtualMachineLibrary.get_function(instruction_structure, total_instructions, input_file_name.split(".")[0])
input_file.write(f"// {line}")
for instruction in bytecode_instruction:
total_instructions += 1
input_file.write(instruction + "\n")
input_file.truncate()
def clean(input_file):
"""
Remove unnecesary whitespaces and comments
"""
with open(input_file, "r+") as f:
lines = f.readlines()
f.seek(0)
for line in lines:
if line != "\n":
if "//" in line:
line_elements = line.lstrip().split("//")
if line_elements[0]:
f.write(line_elements[0].rstrip() + "\n")
else:
f.write(line)
f.truncate()
| # A virtual machine translator. Intermediate code, supplied by front-end compiler, to Hack machine language. @DimitarYordanov7
# To run: python3 virtualMachine.py {your .vm file} {yes/no, should distinct .asm files be kept} {yes/no, should bootstrap code be added}
from lib.virtual_machine_translator.virtualMachineLibrary import VirtualMachineLibrary
import os
import sys
class VirtualMachineTranslator:
"""
Main class, capable of processing a full directory, with .vm files resulting in one .asm file
"""
BOOTSTRAP_CODE = ["@256", "D=A", "@SP", "M=D"]
def translate(path, keep_disctint_files, add_bootstrap_code):
"""
Translate a path - create out.asm, add? bootstrap code, add? translated Sys.vm, add remaining translated .vm files
"""
vm_files = []
for root, dirs, files in os.walk(path):
for file_name in files:
if ".vm" in file_name:
vm_files.append(file_name)
break
with open("out.asm", "w") as output_file:
if add_bootstrap_code:
output_file.write("// bootstrap code \n")
for instruction in VirtualMachineTranslator.BOOTSTRAP_CODE:
output_file.write(instruction + "\n")
if "Sys.vm" in vm_files:
VirtualMachineTranslator.translate_file("Sys.vm")
sys_file = open("Sys.asm", "r")
output_file.write(sys_file.read())
vm_files.remove("Sys.vm")
if not keep_disctint_files:
os.system("rm Sys.asm")
for vm_file_name in vm_files:
VirtualMachineTranslator.translate_file(vm_file_name)
vm_file = open(vm_file_name.split(".")[0] + ".asm", "r")
output_file.write(vm_file.read())
if not keep_disctint_files:
for file_name in vm_files:
asm_file_name = file_name.split(".")[0] + ".asm"
os.system(f"rm {asm_file_name}")
def translate_file(input_file_name):
"""
Fully translate a file
"""
output_file_name = input_file_name.split(".")[0] + ".asm"
os.system(f"cp {input_file_name} {output_file_name}")
VirtualMachineTranslator.clean(output_file_name)
VirtualMachineTranslator.parse_file(output_file_name)
def parse_file(input_file_name):
"""
Parse every instruction and write the requested and further translated equivalent
"""
with open(input_file_name, "r+") as input_file:
last_function = ""
instructions = input_file.readlines()
input_file.seek(0)
total_instructions = 0
for line in instructions:
instruction_structure = line.split()
instruction = instruction_structure[0]
bytecode_instruction = []
if len(instruction_structure) == 1 and instruction != "return": # Stack arithmetic
bytecode_instruction = VirtualMachineLibrary.get_arithmetic(instruction, last_function, input_file_name.split(".")[0], total_instructions)
elif instruction in ["pop", "push"]: # Memory access
bytecode_instruction = VirtualMachineLibrary.get_memory(line, input_file_name.split(".")[0])
elif len(instruction_structure) == 2: # Program flow
label = instruction_structure[1]
bytecode_instruction = VirtualMachineLibrary.get_program_flow(instruction, label, last_function)
else: # Function calling
if instruction == "function":
last_instruction = instruction_structure[1]
bytecode_instruction = VirtualMachineLibrary.get_function(instruction_structure, total_instructions, input_file_name.split(".")[0])
input_file.write(f"// {line}")
for instruction in bytecode_instruction:
total_instructions += 1
input_file.write(instruction + "\n")
input_file.truncate()
def clean(input_file):
"""
Remove unnecesary whitespaces and comments
"""
with open(input_file, "r+") as f:
lines = f.readlines()
f.seek(0)
for line in lines:
if line != "\n":
if "//" in line:
line_elements = line.lstrip().split("//")
if line_elements[0]:
f.write(line_elements[0].rstrip() + "\n")
else:
f.write(line)
f.truncate() | en | 0.658478 | # A virtual machine translator. Intermediate code, supplied by front-end compiler, to Hack machine language. @DimitarYordanov7 # To run: python3 virtualMachine.py {your .vm file} {yes/no, should distinct .asm files be kept} {yes/no, should bootstrap code be added} Main class, capable of processing a full directory, with .vm files resulting in one .asm file Translate a path - create out.asm, add? bootstrap code, add? translated Sys.vm, add remaining translated .vm files Fully translate a file Parse every instruction and write the requested and further translated equivalent # Stack arithmetic # Memory access # Program flow # Function calling Remove unnecesary whitespaces and comments | 3.202312 | 3 |
tests/test_exhaustive.py | falcon-computing/Merlin_DSE | 1 | 6619466 | """
The unit test module for exhaustive serach algorithm.
"""
from autodse import logger
from autodse.parameter import MerlinParameter
from autodse.explorer.exhaustive import ExhaustiveAlgorithm
from autodse.result import Result
LOG = logger.get_default_logger('UNIT-TEST', 'DEBUG')
def test_exhaustive():
#pylint:disable=missing-docstring
LOG.debug('=== Testing exhaustive search algorithm start ===')
space = {}
param = MerlinParameter()
param.name = 'A'
param.option_expr = '[x for x in range(10) if x==0 or B!="flatten" and C!="flatten"]'
param.deps = ['B', 'C']
param.default = 0
space['A'] = param
param = MerlinParameter()
param.name = 'B'
param.option_expr = '[x for x in ["off", "", "flatten"] if x=="off" or C!="flatten"]'
param.deps = ['C']
param.child = ['A']
param.default = 'off'
space['B'] = param
param = MerlinParameter()
param.name = 'C'
param.option_expr = '[x for x in ["off", "", "flatten"]]'
param.child = ['A', 'B']
param.default = 'off'
space['C'] = param
algo = ExhaustiveAlgorithm(space)
gen = algo.gen()
results = [Result()] * 8
iter_cnt = 0
point_cnt = 0
while True:
try:
points = gen.send(results if iter_cnt > 0 else None)
point_cnt += len(points)
iter_cnt += 1
except StopIteration:
break
assert point_cnt == 43 and iter_cnt == 6
LOG.debug('=== Testing exhaustive search algorithm end ===')
| """
The unit test module for exhaustive serach algorithm.
"""
from autodse import logger
from autodse.parameter import MerlinParameter
from autodse.explorer.exhaustive import ExhaustiveAlgorithm
from autodse.result import Result
LOG = logger.get_default_logger('UNIT-TEST', 'DEBUG')
def test_exhaustive():
#pylint:disable=missing-docstring
LOG.debug('=== Testing exhaustive search algorithm start ===')
space = {}
param = MerlinParameter()
param.name = 'A'
param.option_expr = '[x for x in range(10) if x==0 or B!="flatten" and C!="flatten"]'
param.deps = ['B', 'C']
param.default = 0
space['A'] = param
param = MerlinParameter()
param.name = 'B'
param.option_expr = '[x for x in ["off", "", "flatten"] if x=="off" or C!="flatten"]'
param.deps = ['C']
param.child = ['A']
param.default = 'off'
space['B'] = param
param = MerlinParameter()
param.name = 'C'
param.option_expr = '[x for x in ["off", "", "flatten"]]'
param.child = ['A', 'B']
param.default = 'off'
space['C'] = param
algo = ExhaustiveAlgorithm(space)
gen = algo.gen()
results = [Result()] * 8
iter_cnt = 0
point_cnt = 0
while True:
try:
points = gen.send(results if iter_cnt > 0 else None)
point_cnt += len(points)
iter_cnt += 1
except StopIteration:
break
assert point_cnt == 43 and iter_cnt == 6
LOG.debug('=== Testing exhaustive search algorithm end ===')
| en | 0.37103 | The unit test module for exhaustive serach algorithm. #pylint:disable=missing-docstring | 3.035417 | 3 |
pyPseudo/lexer/Lexer.py | johnyob/Pseudo | 1 | 6619467 | from pyPseudo.error.ScanError import ScanError
from pyPseudo.lexer.Token import Token
from pyPseudo.lexer.TokenType import TokenType, keywords
class Lexer:
def __init__(self, source, path):
self._source = source
self._path = path
self._tokens = []
self._errors = []
self._start = 0
self._current = 0
self._line = 1
def scanTokens(self):
while not self._isAtEnd():
self._start = self._current
self._scanToken()
self._tokens.append(Token(TokenType.EOF, "", None, self._path, self._line))
return self._tokens
def getErrors(self):
return self._errors
def _case(self, character, comparableCharacter):
return character == comparableCharacter
def _scanToken(self):
character = self._move()
if self._case(character, "("):
self._addToken(TokenType.LEFT_PAREN)
elif self._case(character, ")"):
self._addToken(TokenType.RIGHT_PAREN)
elif self._case(character, "["):
self._addToken(TokenType.LEFT_SQUARE)
elif self._case(character, "]"):
self._addToken(TokenType.RIGHT_SQUARE)
elif self._case(character, "{"):
self._addToken(TokenType.LEFT_BRACE)
elif self._case(character, "}"):
self._addToken(TokenType.RIGHT_BRACE)
elif self._case(character, ","):
self._addToken(TokenType.COMMA)
elif self._case(character, "."):
self._addToken(TokenType.DOT)
elif self._case(character, "-"):
self._addToken(TokenType.MINUS)
elif self._case(character, "+"):
self._addToken(TokenType.PLUS)
elif self._case(character, ";"):
#self._addToken(TokenType.SEMICOLON)
pass
elif self._case(character, "*"):
self._addToken(TokenType.STAR)
elif self._case(character, "<"):
self._addToken(
TokenType.LEFT_ARROW if self._match("-") else TokenType.LESS_EQUAL \
if self._match("=") else TokenType.NOT_EQUAL if self._match(">") else \
TokenType.LESS
)
elif self._case(character, ">"):
self._addToken(
TokenType.GREATER_EQUAL if self._match("=") else TokenType.GREATER
)
elif self._case(character, "="):
self._addToken(TokenType.EQUAL)
elif self._case(character, "/"):
if self._match("/"):
while self._peek() != "\n" and not self._isAtEnd():
self._move()
else:
self._addToken(TokenType.SLASH)
elif self._case(character, " "):
pass
elif self._case(character, "\r"):
pass
elif self._case(character, "\t"):
pass
elif self._case(character, "\n"):
self._line += 1
elif self._case(character, "\""):
self._string()
else:
if self._isDigit(character):
self._number()
elif self._isAlpha(character):
self._identifier()
else:
self._error(self._path, self._line, "Unexpected character")
def _identifier(self):
while not self._isAtEnd() and self._isAlphaNumeric(self._peek()):
self._move()
text = self._source[self._start : self._current]
token = keywords.get(text, TokenType.IDENTIFIER)
self._addToken(token)
def _number(self):
while not self._isAtEnd() and self._isDigit(self._peek()):
self._move()
if self._peek() == "." and self._isDigit(self._peekNext()):
self._move()
while self._isDigit(self._peek()):
self._move()
literal = float(self._source[self._start : self._current])
self._addTokenLiteral(TokenType.NUMBER, literal)
def _string(self):
while self._peek() != "\"" and not self._isAtEnd():
if self._peek() == "\n":
self._line += 1
self._move()
if self._isAtEnd():
self._error("Unterminated string")
return
self._move()
literal = self._source[self._start + 1 : self._current - 1]
self._addTokenLiteral(TokenType.STRING, literal)
def _match(self, expected):
if self._isAtEnd():
return False
if self._source[self._current] != expected:
return False
self._current += 1
return True
def _peekNext(self):
if self._current + 1 >= len(self._source):
return '\0'
return self._source[self._current + 1]
def _peek(self):
if self._isAtEnd():
return '\0'
return self._source[self._current]
def _move(self):
self._current += 1
return self._source[self._current - 1]
def _addToken(self, type):
self._addTokenLiteral(type, None)
def _addTokenLiteral(self, type, literal):
lexeme = self._source[self._start : self._current]
self._tokens.append(Token(type, lexeme, literal, self._path, self._line))
def _isDigit(self, character):
return character.isdigit()
def _isAlpha(self, character):
return character.isalpha()
def _isAlphaNumeric(self, character):
return self._isDigit(character) or self._isAlpha(character)
def _isAtEnd(self):
return self._current >= len(self._source)
def _error(self, message):
self._errors.append(ScanError(self._path, self._line, message))
| from pyPseudo.error.ScanError import ScanError
from pyPseudo.lexer.Token import Token
from pyPseudo.lexer.TokenType import TokenType, keywords
class Lexer:
def __init__(self, source, path):
self._source = source
self._path = path
self._tokens = []
self._errors = []
self._start = 0
self._current = 0
self._line = 1
def scanTokens(self):
while not self._isAtEnd():
self._start = self._current
self._scanToken()
self._tokens.append(Token(TokenType.EOF, "", None, self._path, self._line))
return self._tokens
def getErrors(self):
return self._errors
def _case(self, character, comparableCharacter):
return character == comparableCharacter
def _scanToken(self):
character = self._move()
if self._case(character, "("):
self._addToken(TokenType.LEFT_PAREN)
elif self._case(character, ")"):
self._addToken(TokenType.RIGHT_PAREN)
elif self._case(character, "["):
self._addToken(TokenType.LEFT_SQUARE)
elif self._case(character, "]"):
self._addToken(TokenType.RIGHT_SQUARE)
elif self._case(character, "{"):
self._addToken(TokenType.LEFT_BRACE)
elif self._case(character, "}"):
self._addToken(TokenType.RIGHT_BRACE)
elif self._case(character, ","):
self._addToken(TokenType.COMMA)
elif self._case(character, "."):
self._addToken(TokenType.DOT)
elif self._case(character, "-"):
self._addToken(TokenType.MINUS)
elif self._case(character, "+"):
self._addToken(TokenType.PLUS)
elif self._case(character, ";"):
#self._addToken(TokenType.SEMICOLON)
pass
elif self._case(character, "*"):
self._addToken(TokenType.STAR)
elif self._case(character, "<"):
self._addToken(
TokenType.LEFT_ARROW if self._match("-") else TokenType.LESS_EQUAL \
if self._match("=") else TokenType.NOT_EQUAL if self._match(">") else \
TokenType.LESS
)
elif self._case(character, ">"):
self._addToken(
TokenType.GREATER_EQUAL if self._match("=") else TokenType.GREATER
)
elif self._case(character, "="):
self._addToken(TokenType.EQUAL)
elif self._case(character, "/"):
if self._match("/"):
while self._peek() != "\n" and not self._isAtEnd():
self._move()
else:
self._addToken(TokenType.SLASH)
elif self._case(character, " "):
pass
elif self._case(character, "\r"):
pass
elif self._case(character, "\t"):
pass
elif self._case(character, "\n"):
self._line += 1
elif self._case(character, "\""):
self._string()
else:
if self._isDigit(character):
self._number()
elif self._isAlpha(character):
self._identifier()
else:
self._error(self._path, self._line, "Unexpected character")
def _identifier(self):
while not self._isAtEnd() and self._isAlphaNumeric(self._peek()):
self._move()
text = self._source[self._start : self._current]
token = keywords.get(text, TokenType.IDENTIFIER)
self._addToken(token)
def _number(self):
while not self._isAtEnd() and self._isDigit(self._peek()):
self._move()
if self._peek() == "." and self._isDigit(self._peekNext()):
self._move()
while self._isDigit(self._peek()):
self._move()
literal = float(self._source[self._start : self._current])
self._addTokenLiteral(TokenType.NUMBER, literal)
def _string(self):
while self._peek() != "\"" and not self._isAtEnd():
if self._peek() == "\n":
self._line += 1
self._move()
if self._isAtEnd():
self._error("Unterminated string")
return
self._move()
literal = self._source[self._start + 1 : self._current - 1]
self._addTokenLiteral(TokenType.STRING, literal)
def _match(self, expected):
if self._isAtEnd():
return False
if self._source[self._current] != expected:
return False
self._current += 1
return True
def _peekNext(self):
if self._current + 1 >= len(self._source):
return '\0'
return self._source[self._current + 1]
def _peek(self):
if self._isAtEnd():
return '\0'
return self._source[self._current]
def _move(self):
self._current += 1
return self._source[self._current - 1]
def _addToken(self, type):
self._addTokenLiteral(type, None)
def _addTokenLiteral(self, type, literal):
lexeme = self._source[self._start : self._current]
self._tokens.append(Token(type, lexeme, literal, self._path, self._line))
def _isDigit(self, character):
return character.isdigit()
def _isAlpha(self, character):
return character.isalpha()
def _isAlphaNumeric(self, character):
return self._isDigit(character) or self._isAlpha(character)
def _isAtEnd(self):
return self._current >= len(self._source)
def _error(self, message):
self._errors.append(ScanError(self._path, self._line, message))
| ja | 0.281177 | #self._addToken(TokenType.SEMICOLON) | 2.809718 | 3 |
findbps.py | jpaggi/findbps | 1 | 6619468 | <filename>findbps.py
from subprocess import Popen, PIPE
from pickle import dumps
from os import path
def findbps(reads, output, bowtie_options, motif, length, threshold, strand):
"""
Input:
reads: str of name of file where single-end, stranded
RNA-seq reads in fastq format are located
output:str of desired basename of output files
bowtie_options: str of bowtie options you wish to
be used for alignment of reads after splitting.
See the bowtie manual.
Recommend "-y -p 2 -v 0 -X 5000 -m 1 <index>"
motif: list of dictionaries representing 5'ss motif
position weight matrix. Each dictionary has a
key for each nucleotide, with a float of the
probability as keys.
length:int of the lowest acceptable number of bases
used to align a fragment of a read.
threshold: float of the lowest acceptable probability
that a sequence would be sampled from the
given martrix in order to attempt mapping.
Recommend 0.0 unless many false positives
strand:str either 'first' if reads are first-stranded
or 'second' if reads are second-stranded
Output:
output + '.bed':
A file in paired-end bed format with
information about the reads with a valid
alignment.
output + '_no_alignment.fastq':
Reads with no valid alignment in the
paired-end tab-delimited format
described in the bowtie manual split
as they were attempted to be aligned.
"""
#gets the name of the directory of this file
directory = path.dirname(path.realpath(__file__))
#make these arguments into strings so they can be passed to fp_checker.py
motif = '"' + dumps(motif) + '"'
length = str(length)
threshold = str(threshold)
#this process splits each read at the most likely 5'SS based on the
# given weight matrix and sends them to bowtie to be mapped
# see fp_checker.py for further details
fp_checker = Popen('python ' + directory + '/fp_checker.py ' +
motif +' '+ length +' '+ threshold +' '+ strand,
stdin = open(reads,'r'), stdout = PIPE, shell = True)
#this process maps each split read to the given genome
bowtie = Popen('bowtie --ff ' + bowtie_options + ' --12 - --un ' +
output+'_no_alignment.fastq',
stdin = fp_checker.stdout, stdout = PIPE, shell = True)
fp_checker.stdout.close()
#this process converts the bowtie output into a bed file
# see make_bed.py for further details
make_bed = Popen('python ' + directory + '/make_bed.py',
stdin = bowtie.stdout,
stdout = open(output + ".bed",'w'), shell = True)
bowtie.stdout.close()
make_bed.wait()
return 0
if __name__ == '__main__':
from sys import argv
reads = argv[1]
output = argv[2]
bowtie_options = argv[3]
motif = eval(argv[4])
length = int(argv[5])
threshold = float(argv[6])
strand = argv[7]
findbps(reads, output, bowtie_options, motif, length, threshold, strand)
| <filename>findbps.py
from subprocess import Popen, PIPE
from pickle import dumps
from os import path
def findbps(reads, output, bowtie_options, motif, length, threshold, strand):
"""
Input:
reads: str of name of file where single-end, stranded
RNA-seq reads in fastq format are located
output:str of desired basename of output files
bowtie_options: str of bowtie options you wish to
be used for alignment of reads after splitting.
See the bowtie manual.
Recommend "-y -p 2 -v 0 -X 5000 -m 1 <index>"
motif: list of dictionaries representing 5'ss motif
position weight matrix. Each dictionary has a
key for each nucleotide, with a float of the
probability as keys.
length:int of the lowest acceptable number of bases
used to align a fragment of a read.
threshold: float of the lowest acceptable probability
that a sequence would be sampled from the
given martrix in order to attempt mapping.
Recommend 0.0 unless many false positives
strand:str either 'first' if reads are first-stranded
or 'second' if reads are second-stranded
Output:
output + '.bed':
A file in paired-end bed format with
information about the reads with a valid
alignment.
output + '_no_alignment.fastq':
Reads with no valid alignment in the
paired-end tab-delimited format
described in the bowtie manual split
as they were attempted to be aligned.
"""
#gets the name of the directory of this file
directory = path.dirname(path.realpath(__file__))
#make these arguments into strings so they can be passed to fp_checker.py
motif = '"' + dumps(motif) + '"'
length = str(length)
threshold = str(threshold)
#this process splits each read at the most likely 5'SS based on the
# given weight matrix and sends them to bowtie to be mapped
# see fp_checker.py for further details
fp_checker = Popen('python ' + directory + '/fp_checker.py ' +
motif +' '+ length +' '+ threshold +' '+ strand,
stdin = open(reads,'r'), stdout = PIPE, shell = True)
#this process maps each split read to the given genome
bowtie = Popen('bowtie --ff ' + bowtie_options + ' --12 - --un ' +
output+'_no_alignment.fastq',
stdin = fp_checker.stdout, stdout = PIPE, shell = True)
fp_checker.stdout.close()
#this process converts the bowtie output into a bed file
# see make_bed.py for further details
make_bed = Popen('python ' + directory + '/make_bed.py',
stdin = bowtie.stdout,
stdout = open(output + ".bed",'w'), shell = True)
bowtie.stdout.close()
make_bed.wait()
return 0
if __name__ == '__main__':
from sys import argv
reads = argv[1]
output = argv[2]
bowtie_options = argv[3]
motif = eval(argv[4])
length = int(argv[5])
threshold = float(argv[6])
strand = argv[7]
findbps(reads, output, bowtie_options, motif, length, threshold, strand)
| en | 0.881454 | Input: reads: str of name of file where single-end, stranded RNA-seq reads in fastq format are located output:str of desired basename of output files bowtie_options: str of bowtie options you wish to be used for alignment of reads after splitting. See the bowtie manual. Recommend "-y -p 2 -v 0 -X 5000 -m 1 <index>" motif: list of dictionaries representing 5'ss motif position weight matrix. Each dictionary has a key for each nucleotide, with a float of the probability as keys. length:int of the lowest acceptable number of bases used to align a fragment of a read. threshold: float of the lowest acceptable probability that a sequence would be sampled from the given martrix in order to attempt mapping. Recommend 0.0 unless many false positives strand:str either 'first' if reads are first-stranded or 'second' if reads are second-stranded Output: output + '.bed': A file in paired-end bed format with information about the reads with a valid alignment. output + '_no_alignment.fastq': Reads with no valid alignment in the paired-end tab-delimited format described in the bowtie manual split as they were attempted to be aligned. #gets the name of the directory of this file #make these arguments into strings so they can be passed to fp_checker.py #this process splits each read at the most likely 5'SS based on the # given weight matrix and sends them to bowtie to be mapped # see fp_checker.py for further details #this process maps each split read to the given genome #this process converts the bowtie output into a bed file # see make_bed.py for further details | 2.80377 | 3 |
yamtbx/dataproc/xds/command_line/make_plot_to_compare_correctlp.py | 7l2icj/kamo_clone | 16 | 6619469 | <reponame>7l2icj/kamo_clone<gh_stars>10-100
#!/usr/bin/env yamtbx.python
"""
(c) RIKEN 2015. All rights reserved.
Author: <NAME>
This software is released under the new BSD License; see LICENSE.
"""
from yamtbx.dataproc.xds.correctlp import CorrectLp
import iotbx.phil
from collections import OrderedDict
import math
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
master_params_str="""\
plot = *ios *rmeas *ccano *cmpl sigano cchalf red
.type = choice(multi=True)
.help = What to plot
output = "plot.pdf"
.type = path
rdataout = "for_R.dat"
.type = path
"""
def run(params, args):
ofs = open(params.rdataout, "w")
ofs.write("name s2max variable value\n")
for_plot = OrderedDict()
for p in params.plot:
print "Preparing", p
for_plot[p] = OrderedDict()
trans_table = dict(ios="i_over_sigma",
rmeas="r_meas",
ccano="cc_ano",
cmpl="cmpl",
sigano="sig_ano",
cchalf="cc_half",
red="redundancy")
for lpfile, label in ((args[2*i],args[2*i+1]) for i in xrange((len(args))//2)):
lp = CorrectLp(lpfile)
print label, lpfile, lp.space_group.info(), "anomalous=%s"%lp.anomalous_flag
ofs.write("# %s %s %s anomalous=%s\n" % (label, lpfile, lp.space_group.info(), lp.anomalous_flag))
plot_x = map(lambda x:1/x**2, lp.table["all"]["dmin"][:-1])
for p in params.plot:
plot_y = lp.table["all"][trans_table[p]][:-1]
for_plot[p][label] = plot_x, plot_y
for px, py in zip(plot_x, plot_y):
ofs.write("%s %.5f %s %f\n" % (label, px, trans_table[p], py))
fig, ax = plt.subplots()
#plt.title("Comapring xds results")
s2_formatter = lambda x,pos: "inf" if x == 0 else "%.2f" % (1./math.sqrt(x))
for i, p in enumerate(params.plot):
ax = plt.subplot(len(params.plot),1,i+1)
for lab in for_plot[p]:
plot_x, plot_y = for_plot[p][lab]
ax.plot(plot_x, plot_y, label=lab, marker="o")
ax.set_ylabel(trans_table[p])
ax.xaxis.set_major_formatter(FuncFormatter(s2_formatter))
plt.xlabel("Resolution [A]")
#plt.legend()
leg = plt.legend(loc='center left', bbox_to_anchor=(1,0.5), numpoints=1)
fig.subplots_adjust(top=0.8)
fig.savefig(params.output, bbox_extra_artists=(leg,), bbox_inches='tight')
plt.show()
print """
Instruction for R:
R
library(ggplot2)
d <- read.table("%s", h=T)
number_ticks <- function(n) {function(limits) pretty(limits, n)}
ggplot(d, aes(x=s2max, y=value, colour=factor(name))) + geom_point() + geom_line() + facet_grid(variable~., scale="free") + scale_x_continuous(label=function(x)sprintf("%%.2f", 1/sqrt(x)), breaks=number_ticks(10))
""" % params.rdataout
# run()
if __name__ == "__main__":
import sys
cmdline = iotbx.phil.process_command_line(args=sys.argv[1:],
master_string=master_params_str)
params = cmdline.work.extract()
args = cmdline.remaining_args
run(params, args)
| #!/usr/bin/env yamtbx.python
"""
(c) RIKEN 2015. All rights reserved.
Author: <NAME>
This software is released under the new BSD License; see LICENSE.
"""
from yamtbx.dataproc.xds.correctlp import CorrectLp
import iotbx.phil
from collections import OrderedDict
import math
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
master_params_str="""\
plot = *ios *rmeas *ccano *cmpl sigano cchalf red
.type = choice(multi=True)
.help = What to plot
output = "plot.pdf"
.type = path
rdataout = "for_R.dat"
.type = path
"""
def run(params, args):
ofs = open(params.rdataout, "w")
ofs.write("name s2max variable value\n")
for_plot = OrderedDict()
for p in params.plot:
print "Preparing", p
for_plot[p] = OrderedDict()
trans_table = dict(ios="i_over_sigma",
rmeas="r_meas",
ccano="cc_ano",
cmpl="cmpl",
sigano="sig_ano",
cchalf="cc_half",
red="redundancy")
for lpfile, label in ((args[2*i],args[2*i+1]) for i in xrange((len(args))//2)):
lp = CorrectLp(lpfile)
print label, lpfile, lp.space_group.info(), "anomalous=%s"%lp.anomalous_flag
ofs.write("# %s %s %s anomalous=%s\n" % (label, lpfile, lp.space_group.info(), lp.anomalous_flag))
plot_x = map(lambda x:1/x**2, lp.table["all"]["dmin"][:-1])
for p in params.plot:
plot_y = lp.table["all"][trans_table[p]][:-1]
for_plot[p][label] = plot_x, plot_y
for px, py in zip(plot_x, plot_y):
ofs.write("%s %.5f %s %f\n" % (label, px, trans_table[p], py))
fig, ax = plt.subplots()
#plt.title("Comapring xds results")
s2_formatter = lambda x,pos: "inf" if x == 0 else "%.2f" % (1./math.sqrt(x))
for i, p in enumerate(params.plot):
ax = plt.subplot(len(params.plot),1,i+1)
for lab in for_plot[p]:
plot_x, plot_y = for_plot[p][lab]
ax.plot(plot_x, plot_y, label=lab, marker="o")
ax.set_ylabel(trans_table[p])
ax.xaxis.set_major_formatter(FuncFormatter(s2_formatter))
plt.xlabel("Resolution [A]")
#plt.legend()
leg = plt.legend(loc='center left', bbox_to_anchor=(1,0.5), numpoints=1)
fig.subplots_adjust(top=0.8)
fig.savefig(params.output, bbox_extra_artists=(leg,), bbox_inches='tight')
plt.show()
print """
Instruction for R:
R
library(ggplot2)
d <- read.table("%s", h=T)
number_ticks <- function(n) {function(limits) pretty(limits, n)}
ggplot(d, aes(x=s2max, y=value, colour=factor(name))) + geom_point() + geom_line() + facet_grid(variable~., scale="free") + scale_x_continuous(label=function(x)sprintf("%%.2f", 1/sqrt(x)), breaks=number_ticks(10))
""" % params.rdataout
# run()
if __name__ == "__main__":
import sys
cmdline = iotbx.phil.process_command_line(args=sys.argv[1:],
master_string=master_params_str)
params = cmdline.work.extract()
args = cmdline.remaining_args
run(params, args) | en | 0.428838 | #!/usr/bin/env yamtbx.python (c) RIKEN 2015. All rights reserved. Author: <NAME> This software is released under the new BSD License; see LICENSE. \ plot = *ios *rmeas *ccano *cmpl sigano cchalf red .type = choice(multi=True) .help = What to plot output = "plot.pdf" .type = path rdataout = "for_R.dat" .type = path #plt.title("Comapring xds results") #plt.legend() Instruction for R: R library(ggplot2) d <- read.table("%s", h=T) number_ticks <- function(n) {function(limits) pretty(limits, n)} ggplot(d, aes(x=s2max, y=value, colour=factor(name))) + geom_point() + geom_line() + facet_grid(variable~., scale="free") + scale_x_continuous(label=function(x)sprintf("%%.2f", 1/sqrt(x)), breaks=number_ticks(10)) # run() | 2.23471 | 2 |
traju/cli.py | vsheg/traju | 0 | 6619470 | """Console script for traju."""
from os import cpu_count
import sys
from argparse import ArgumentParser, Namespace
import logging
from pathlib import Path
from typing import *
from .helpers import *
logger = logging.getLogger(__name__)
def parse_args() -> Namespace:
'''Parse command line arguments.'''
parser = ArgumentParser(prog='traju', description='Proceed arguments')
add_arg = parser.add_argument # alias
# File parameters
add_arg(
'path',
help='path to directory or to trajectories',
type=Path,
nargs='*',
default=Path(),
)
add_arg('--recursive', '-r', help='go through subfolders', action='store_true')
add_arg(
'--strict',
help='stop when a problem occurs, else just warn',
action='store_true',
)
add_arg('--traj-exts', help='trajectory file extentions', type=str, default='nc')
add_arg('--top-exts', help='topology file extentions', type=str, default='prmtop')
add_arg(
'-y',
'--yes',
help='run script sliently without interactivity',
action='store_true',
)
add_arg(
'--summary',
'-s',
help='write summary file for joined trajectories',
action='store_true',
)
# Computation parameters
add_arg(
'--max-procs',
'-p',
help='upper limit for number of using processes '
'(note: the number of CPU cores is upper limit too)',
type=int,
default=16,
)
# Mutually exclusive output options
save_group = parser.add_mutually_exclusive_group()
save_group.add_argument(
'--overwrite', '-o', help='overwrite original files', action='store_true'
)
save_group.add_argument(
'--nearby',
'-n',
help='write new trajctory to the same folder as the original',
action='store_true',
)
save_group.add_argument(
'--join', '-j', help='join trajectories into one', action='store_true'
)
# cpptraj parameters
add_arg('--prefix', help='add prefix to new trajectories', type=str, default='')
add_arg('--postfix', help='add postfix to new trajectories', type=str, default='_u')
add_arg(
'--ext', '-e', help='extension for new trajectories', type=str, default='nc'
),
add_arg(
'--align',
'-a',
help='align protein backbone to the first frame',
action='store_true',
),
add_arg('--dehyd', '-d', help='remove water', action='store_true')
# proceed arguments
args = parser.parse_args()
logger.debug('Arguments were parsed')
return args
args = parse_args()
#
PATHS: Sequence[Path] = vector_like(args.path) # paths of trajectories
logger.debug('Number of provided paths: %s', len(PATHS))
# Interface flags
SILENT: bool = not args.yes # don't get user approval
if SILENT:
logging.getLogger().setLevel(logging.WARN) # change level of root logger
STRICT: bool = args.strict # stop if something went wrong at least with one traj
# Computing parameters
MAX_PROCS: int = args.max_procs
# Saving flags
NEARBY: bool = args.nearby # save out trajs in the same folder
JOIN: bool = args.join # join input trajs into one
OVERWRITE: bool = args.overwrite # replace original trajs with outs
# File naming
TOP_EXTENTIONS: Iterable[str] = vector_like(args.top_exts) # without preceding dot
TRAJ_EXTENTIONS: Iterable[str] = vector_like(args.traj_exts) # too
PREFIX: str = args.prefix
POSTFIX: str = args.postfix
TRAJ_OUT_EXT: str = args.ext
#
DEHYDRATE: bool = args.dehyd
ALIGN: bool = args.align
def find_trajs(PATHS: Iterable) -> Sequence[Path]:
'''Collect specified trajs into list and find it in provided directories.'''
dirs, trajs = apart(lambda path: path.is_file(), PATHS)
trajs = list(trajs) # to make sure
if trajs:
logger.info('%s traj(s) specified explicitly', len(trajs))
if not SILENT:
for traj in trajs:
print(f'* {traj}')
# find trajs in provided folders
if dirs:
add_trajs = []
for dir_ in dirs:
for ext in args.traj_exts:
for path in dir_.glob(('**/*.' if args.recursive else '*.') + ext):
if path.is_file():
add_trajs.append(path)
logger.info(
'%s traj(s) found in folder'
+ (' and subfolders recursively' if args.recursive else ''),
len(add_trajs),
)
if not SILENT:
for traj in add_trajs:
print(f' * {traj}')
trajs.extend(add_trajs)
return trajs
TRAJS = find_trajs(PATHS)
def main():
'''CLI entry point.'''
from .traju import TASKS, proceed_tasks
proceed_tasks(TASKS)
return 0
if __name__ == '__main__':
sys.exit(main()) # pragma: no cover
| """Console script for traju."""
from os import cpu_count
import sys
from argparse import ArgumentParser, Namespace
import logging
from pathlib import Path
from typing import *
from .helpers import *
logger = logging.getLogger(__name__)
def parse_args() -> Namespace:
'''Parse command line arguments.'''
parser = ArgumentParser(prog='traju', description='Proceed arguments')
add_arg = parser.add_argument # alias
# File parameters
add_arg(
'path',
help='path to directory or to trajectories',
type=Path,
nargs='*',
default=Path(),
)
add_arg('--recursive', '-r', help='go through subfolders', action='store_true')
add_arg(
'--strict',
help='stop when a problem occurs, else just warn',
action='store_true',
)
add_arg('--traj-exts', help='trajectory file extentions', type=str, default='nc')
add_arg('--top-exts', help='topology file extentions', type=str, default='prmtop')
add_arg(
'-y',
'--yes',
help='run script sliently without interactivity',
action='store_true',
)
add_arg(
'--summary',
'-s',
help='write summary file for joined trajectories',
action='store_true',
)
# Computation parameters
add_arg(
'--max-procs',
'-p',
help='upper limit for number of using processes '
'(note: the number of CPU cores is upper limit too)',
type=int,
default=16,
)
# Mutually exclusive output options
save_group = parser.add_mutually_exclusive_group()
save_group.add_argument(
'--overwrite', '-o', help='overwrite original files', action='store_true'
)
save_group.add_argument(
'--nearby',
'-n',
help='write new trajctory to the same folder as the original',
action='store_true',
)
save_group.add_argument(
'--join', '-j', help='join trajectories into one', action='store_true'
)
# cpptraj parameters
add_arg('--prefix', help='add prefix to new trajectories', type=str, default='')
add_arg('--postfix', help='add postfix to new trajectories', type=str, default='_u')
add_arg(
'--ext', '-e', help='extension for new trajectories', type=str, default='nc'
),
add_arg(
'--align',
'-a',
help='align protein backbone to the first frame',
action='store_true',
),
add_arg('--dehyd', '-d', help='remove water', action='store_true')
# proceed arguments
args = parser.parse_args()
logger.debug('Arguments were parsed')
return args
args = parse_args()
#
PATHS: Sequence[Path] = vector_like(args.path) # paths of trajectories
logger.debug('Number of provided paths: %s', len(PATHS))
# Interface flags
SILENT: bool = not args.yes # don't get user approval
if SILENT:
logging.getLogger().setLevel(logging.WARN) # change level of root logger
STRICT: bool = args.strict # stop if something went wrong at least with one traj
# Computing parameters
MAX_PROCS: int = args.max_procs
# Saving flags
NEARBY: bool = args.nearby # save out trajs in the same folder
JOIN: bool = args.join # join input trajs into one
OVERWRITE: bool = args.overwrite # replace original trajs with outs
# File naming
TOP_EXTENTIONS: Iterable[str] = vector_like(args.top_exts) # without preceding dot
TRAJ_EXTENTIONS: Iterable[str] = vector_like(args.traj_exts) # too
PREFIX: str = args.prefix
POSTFIX: str = args.postfix
TRAJ_OUT_EXT: str = args.ext
#
DEHYDRATE: bool = args.dehyd
ALIGN: bool = args.align
def find_trajs(PATHS: Iterable) -> Sequence[Path]:
'''Collect specified trajs into list and find it in provided directories.'''
dirs, trajs = apart(lambda path: path.is_file(), PATHS)
trajs = list(trajs) # to make sure
if trajs:
logger.info('%s traj(s) specified explicitly', len(trajs))
if not SILENT:
for traj in trajs:
print(f'* {traj}')
# find trajs in provided folders
if dirs:
add_trajs = []
for dir_ in dirs:
for ext in args.traj_exts:
for path in dir_.glob(('**/*.' if args.recursive else '*.') + ext):
if path.is_file():
add_trajs.append(path)
logger.info(
'%s traj(s) found in folder'
+ (' and subfolders recursively' if args.recursive else ''),
len(add_trajs),
)
if not SILENT:
for traj in add_trajs:
print(f' * {traj}')
trajs.extend(add_trajs)
return trajs
TRAJS = find_trajs(PATHS)
def main():
'''CLI entry point.'''
from .traju import TASKS, proceed_tasks
proceed_tasks(TASKS)
return 0
if __name__ == '__main__':
sys.exit(main()) # pragma: no cover
| en | 0.5855 | Console script for traju. Parse command line arguments. # alias # File parameters # Computation parameters # Mutually exclusive output options # cpptraj parameters # proceed arguments # # paths of trajectories # Interface flags # don't get user approval # change level of root logger # stop if something went wrong at least with one traj # Computing parameters # Saving flags # save out trajs in the same folder # join input trajs into one # replace original trajs with outs # File naming # without preceding dot # too # Collect specified trajs into list and find it in provided directories. # to make sure # find trajs in provided folders CLI entry point. # pragma: no cover | 2.531691 | 3 |
api/users/libraries/token_to_user.py | django-doctor/lite-api | 3 | 6619471 | from api.users.libraries.token import Token
def token_to_user_pk(token):
data = Token.decode_to_json(token)
return data.get("id")
| from api.users.libraries.token import Token
def token_to_user_pk(token):
data = Token.decode_to_json(token)
return data.get("id")
| none | 1 | 2.262139 | 2 | |
main.py | skorani/Preprocess-Emoji | 5 | 6619472 | #!/usr/bin/env pythoh
# -*- encoding: utf-8 -*-
import emojies
def main():
input_text = input("Enter your text:\n")
no_emoji_text = emojies.replace(input_text)
print(f"{no_emoji_text}")
if __name__ == "__main__":
main()
| #!/usr/bin/env pythoh
# -*- encoding: utf-8 -*-
import emojies
def main():
input_text = input("Enter your text:\n")
no_emoji_text = emojies.replace(input_text)
print(f"{no_emoji_text}")
if __name__ == "__main__":
main()
| en | 0.427458 | #!/usr/bin/env pythoh # -*- encoding: utf-8 -*- | 3.311102 | 3 |
data/base_dataset.py | linhlpv/pytorch-CycleGAN-and-pix2pix | 0 | 6619473 | """This module implements an abstract base class (ABC) 'BaseDataset' for datasets.
It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses.
"""
import random
import numpy as np
import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
from abc import ABC, abstractmethod
import albumentations as A
from albumentations.pytorch.transforms import ToTensor, ToTensorV2
import cv2
import torch
class BaseDataset(data.Dataset, ABC):
"""This class is an abstract base class (ABC) for datasets.
To create a subclass, you need to implement the following four functions:
-- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
-- <__len__>: return the size of dataset.
-- <__getitem__>: get a data point.
-- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
"""
def __init__(self, opt):
"""Initialize the class; save the options in the class
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
self.opt = opt
self.root = opt.dataroot
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
return parser
@abstractmethod
def __len__(self):
"""Return the total number of images in the dataset."""
return 0
@abstractmethod
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns:
a dictionary of data with their names. It ususally contains the data itself and its metadata information.
"""
pass
class Albumentations:
def __init__(self, augmentations):
self.augmentations = A.Compose(augmentations)
def __call__(self, image):
image = self.augmentations(image=image)['image']
# print('Before to tensor ', image.max(), image.min(), image.dtype)
return image
# if len(image.shape) == 2: # 2D image
# return torch.from_numpy(image).unsqueeze(0).type(torch.float32)
# elif len(image.shape) == 3:
# return torch.from_numpy(image).permute(2, 0, 1).type(torch.float32)
def get_params(opt, size):
w, h = size
new_h = h
new_w = w
if opt.preprocess == 'resize_and_crop':
new_h = new_w = opt.load_size
elif opt.preprocess == 'scale_width_and_crop':
new_w = opt.load_size
new_h = opt.load_size * h // w
x = random.randint(0, np.maximum(0, new_w - opt.crop_size))
y = random.randint(0, np.maximum(0, new_h - opt.crop_size))
flip = random.random() > 0.5
return {'crop_pos': (x, y), 'flip': flip}
def get_transform(opt, params=None, grayscale=False, method=Image.BICUBIC, convert=True):
transform_list = []
if grayscale:
transform_list.append(transforms0.Grayscale(1))
if 'resize' in opt.preprocess:
osize = [opt.load_size, opt.load_size]
transform_list.append(transforms.Resize(osize, method))
elif 'scale_width' in opt.preprocess:
transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, opt.crop_size, method)))
if 'crop' in opt.preprocess:
if params is None:
transform_list.append(transforms.RandomCrop(opt.crop_size))
else:
transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size)))
if opt.preprocess == 'none':
transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method)))
if not opt.no_flip:
if params is None:
transform_list.append(transforms.RandomHorizontalFlip())
elif params['flip']:
transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip'])))
if convert:
transform_list += [transforms.ToTensor()]
if grayscale:
transform_list += [transforms.Normalize((0.5,), (0.5,))]
else:
transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def get_transform_for_petct(opt, params=None, convert=True):
transform_list = []
# transform_list += [transform.Lambda(lambda img: img.astype(torch.float32).unsqueeze(-1))
if 'resize' in opt.preprocess:
# osize = [opt.load_size, opt.load_size]
# transform_list.append(transforms.Resize(osize, method))
transform_list.append(A.Resize(opt.load_size, opt.load_size, interpolation=cv2.INTER_NEAREST))
# elif 'scale_width' in opt.preprocess:
# transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, opt.crop_size, method)))
# print(params)
if 'crop' in opt.preprocess:
# if params is None:
# transform_list.append(transforms.RandomCrop(opt.crop_size))
# else:
# transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size)))
transform_list.append(A.RandomCrop(opt.crop_size, opt.crop_size))
# if opt.preprocess == 'none':
# transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method)))
if not opt.no_flip:
# if params is None:
# transform_list.append(transforms.RandomHorizontalFlip())
# elif params['flip']:
# transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip'])))
transform_list.append(A.HorizontalFlip())
# if convert:
# transform_list += [transforms.ToTensor()]
# transform_list += [transforms.Normalize ((0.5,), (0.5,))] # Always gray image in petct project
# print(transform_list)
return transforms.Compose([
Albumentations(transform_list),
transforms.ToTensor(),
# transforms.Lambda(lambda img: img / img.max()), # neu chia max o day thi sai
transforms.Normalize ((0.5,), (0.5,))
])
def norm_SUV(image):
image = (image - image.min()) / (image.max() - image.min())
return image
def __make_power_2(img, base, method=Image.BICUBIC):
ow, oh = img.size
h = int(round(oh / base) * base)
w = int(round(ow / base) * base)
if h == oh and w == ow:
return img
__print_size_warning(ow, oh, w, h)
return img.resize((w, h), method)
def __scale_width(img, target_size, crop_size, method=Image.BICUBIC):
ow, oh = img.size
if ow == target_size and oh >= crop_size:
return img
w = target_size
h = int(max(target_size * oh / ow, crop_size))
return img.resize((w, h), method)
def __crop(img, pos, size):
ow, oh = img.size
x1, y1 = pos
tw = th = size
if (ow > tw or oh > th):
return img.crop((x1, y1, x1 + tw, y1 + th))
return img
def __flip(img, flip):
if flip:
return img.transpose(Image.FLIP_LEFT_RIGHT)
return img
def __print_size_warning(ow, oh, w, h):
"""Print warning information about image size(only print once)"""
if not hasattr(__print_size_warning, 'has_printed'):
print("The image size needs to be a multiple of 4. "
"The loaded image size was (%d, %d), so it was adjusted to "
"(%d, %d). This adjustment will be done to all images "
"whose sizes are not multiples of 4" % (ow, oh, w, h))
__print_size_warning.has_printed = True
| """This module implements an abstract base class (ABC) 'BaseDataset' for datasets.
It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses.
"""
import random
import numpy as np
import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
from abc import ABC, abstractmethod
import albumentations as A
from albumentations.pytorch.transforms import ToTensor, ToTensorV2
import cv2
import torch
class BaseDataset(data.Dataset, ABC):
"""This class is an abstract base class (ABC) for datasets.
To create a subclass, you need to implement the following four functions:
-- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
-- <__len__>: return the size of dataset.
-- <__getitem__>: get a data point.
-- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
"""
def __init__(self, opt):
"""Initialize the class; save the options in the class
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
self.opt = opt
self.root = opt.dataroot
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
return parser
@abstractmethod
def __len__(self):
"""Return the total number of images in the dataset."""
return 0
@abstractmethod
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns:
a dictionary of data with their names. It ususally contains the data itself and its metadata information.
"""
pass
class Albumentations:
def __init__(self, augmentations):
self.augmentations = A.Compose(augmentations)
def __call__(self, image):
image = self.augmentations(image=image)['image']
# print('Before to tensor ', image.max(), image.min(), image.dtype)
return image
# if len(image.shape) == 2: # 2D image
# return torch.from_numpy(image).unsqueeze(0).type(torch.float32)
# elif len(image.shape) == 3:
# return torch.from_numpy(image).permute(2, 0, 1).type(torch.float32)
def get_params(opt, size):
w, h = size
new_h = h
new_w = w
if opt.preprocess == 'resize_and_crop':
new_h = new_w = opt.load_size
elif opt.preprocess == 'scale_width_and_crop':
new_w = opt.load_size
new_h = opt.load_size * h // w
x = random.randint(0, np.maximum(0, new_w - opt.crop_size))
y = random.randint(0, np.maximum(0, new_h - opt.crop_size))
flip = random.random() > 0.5
return {'crop_pos': (x, y), 'flip': flip}
def get_transform(opt, params=None, grayscale=False, method=Image.BICUBIC, convert=True):
transform_list = []
if grayscale:
transform_list.append(transforms0.Grayscale(1))
if 'resize' in opt.preprocess:
osize = [opt.load_size, opt.load_size]
transform_list.append(transforms.Resize(osize, method))
elif 'scale_width' in opt.preprocess:
transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, opt.crop_size, method)))
if 'crop' in opt.preprocess:
if params is None:
transform_list.append(transforms.RandomCrop(opt.crop_size))
else:
transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size)))
if opt.preprocess == 'none':
transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method)))
if not opt.no_flip:
if params is None:
transform_list.append(transforms.RandomHorizontalFlip())
elif params['flip']:
transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip'])))
if convert:
transform_list += [transforms.ToTensor()]
if grayscale:
transform_list += [transforms.Normalize((0.5,), (0.5,))]
else:
transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def get_transform_for_petct(opt, params=None, convert=True):
transform_list = []
# transform_list += [transform.Lambda(lambda img: img.astype(torch.float32).unsqueeze(-1))
if 'resize' in opt.preprocess:
# osize = [opt.load_size, opt.load_size]
# transform_list.append(transforms.Resize(osize, method))
transform_list.append(A.Resize(opt.load_size, opt.load_size, interpolation=cv2.INTER_NEAREST))
# elif 'scale_width' in opt.preprocess:
# transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, opt.crop_size, method)))
# print(params)
if 'crop' in opt.preprocess:
# if params is None:
# transform_list.append(transforms.RandomCrop(opt.crop_size))
# else:
# transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size)))
transform_list.append(A.RandomCrop(opt.crop_size, opt.crop_size))
# if opt.preprocess == 'none':
# transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method)))
if not opt.no_flip:
# if params is None:
# transform_list.append(transforms.RandomHorizontalFlip())
# elif params['flip']:
# transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip'])))
transform_list.append(A.HorizontalFlip())
# if convert:
# transform_list += [transforms.ToTensor()]
# transform_list += [transforms.Normalize ((0.5,), (0.5,))] # Always gray image in petct project
# print(transform_list)
return transforms.Compose([
Albumentations(transform_list),
transforms.ToTensor(),
# transforms.Lambda(lambda img: img / img.max()), # neu chia max o day thi sai
transforms.Normalize ((0.5,), (0.5,))
])
def norm_SUV(image):
image = (image - image.min()) / (image.max() - image.min())
return image
def __make_power_2(img, base, method=Image.BICUBIC):
ow, oh = img.size
h = int(round(oh / base) * base)
w = int(round(ow / base) * base)
if h == oh and w == ow:
return img
__print_size_warning(ow, oh, w, h)
return img.resize((w, h), method)
def __scale_width(img, target_size, crop_size, method=Image.BICUBIC):
ow, oh = img.size
if ow == target_size and oh >= crop_size:
return img
w = target_size
h = int(max(target_size * oh / ow, crop_size))
return img.resize((w, h), method)
def __crop(img, pos, size):
ow, oh = img.size
x1, y1 = pos
tw = th = size
if (ow > tw or oh > th):
return img.crop((x1, y1, x1 + tw, y1 + th))
return img
def __flip(img, flip):
if flip:
return img.transpose(Image.FLIP_LEFT_RIGHT)
return img
def __print_size_warning(ow, oh, w, h):
"""Print warning information about image size(only print once)"""
if not hasattr(__print_size_warning, 'has_printed'):
print("The image size needs to be a multiple of 4. "
"The loaded image size was (%d, %d), so it was adjusted to "
"(%d, %d). This adjustment will be done to all images "
"whose sizes are not multiples of 4" % (ow, oh, w, h))
__print_size_warning.has_printed = True
| en | 0.391322 | This module implements an abstract base class (ABC) 'BaseDataset' for datasets. It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses. This class is an abstract base class (ABC) for datasets. To create a subclass, you need to implement the following four functions: -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt). -- <__len__>: return the size of dataset. -- <__getitem__>: get a data point. -- <modify_commandline_options>: (optionally) add dataset-specific options and set default options. Initialize the class; save the options in the class Parameters: opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions Add new dataset-specific options, and rewrite default values for existing options. Parameters: parser -- original option parser is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. Returns: the modified parser. Return the total number of images in the dataset. Return a data point and its metadata information. Parameters: index - - a random integer for data indexing Returns: a dictionary of data with their names. It ususally contains the data itself and its metadata information. # print('Before to tensor ', image.max(), image.min(), image.dtype) # if len(image.shape) == 2: # 2D image # return torch.from_numpy(image).unsqueeze(0).type(torch.float32) # elif len(image.shape) == 3: # return torch.from_numpy(image).permute(2, 0, 1).type(torch.float32) # transform_list += [transform.Lambda(lambda img: img.astype(torch.float32).unsqueeze(-1)) # osize = [opt.load_size, opt.load_size] # transform_list.append(transforms.Resize(osize, method)) # elif 'scale_width' in opt.preprocess: # transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, opt.crop_size, method))) # print(params) # if params is None: # transform_list.append(transforms.RandomCrop(opt.crop_size)) # else: # transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size))) # if opt.preprocess == 'none': # transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method))) # if params is None: # transform_list.append(transforms.RandomHorizontalFlip()) # elif params['flip']: # transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip']))) # if convert: # transform_list += [transforms.ToTensor()] # transform_list += [transforms.Normalize ((0.5,), (0.5,))] # Always gray image in petct project # print(transform_list) # transforms.Lambda(lambda img: img / img.max()), # neu chia max o day thi sai Print warning information about image size(only print once) | 3.200231 | 3 |
wishlist/application/webapi/wishlist/models.py | guiyllw/wishlist-luizalabs | 0 | 6619474 | <filename>wishlist/application/webapi/wishlist/models.py
from typing import List
from wishlist.application.webapi.common.models import SerializableModel
from wishlist.application.webapi.product.models import FullProduct
class AddProductsRequest(SerializableModel):
customer_id: str
product_ids: List[str]
class CustomerWishList(SerializableModel):
id: str
customer_id: str
product_ids: List[str]
class FullCustomerWishList(SerializableModel):
id: str
customer_id: str
products: List[FullProduct]
| <filename>wishlist/application/webapi/wishlist/models.py
from typing import List
from wishlist.application.webapi.common.models import SerializableModel
from wishlist.application.webapi.product.models import FullProduct
class AddProductsRequest(SerializableModel):
customer_id: str
product_ids: List[str]
class CustomerWishList(SerializableModel):
id: str
customer_id: str
product_ids: List[str]
class FullCustomerWishList(SerializableModel):
id: str
customer_id: str
products: List[FullProduct]
| none | 1 | 1.848344 | 2 | |
scripts/filter_task_log_gz.py | yannakopoulos/elk-admin | 0 | 6619475 | <filename>scripts/filter_task_log_gz.py<gh_stars>0
#!/usr/bin/env python
from __future__ import print_function, division
import argparse
import gzip
import re
import json
parser = argparse.ArgumentParser(description='Reads and parses task.log.gz.')
parser.add_argument('-f', action='store', dest='path',
type=str, help='Path to task.log.gz')
path = parser.parse_args().path
doc = {} # doc containing log data
doc['has_fatal_exception'] = False
# open gzip and extract fatal exception block
gzip_handler = gzip.open(path, 'rb')
try:
recording = False
for line in gzip_handler:
if 'Begin Fatal Exception' in line:
doc['has_fatal_exception'] = True
recording = True
error_lines = []
if recording:
error_lines.append(line[8:])
if 'End Fatal Exception' in line:
recording = False
finally:
gzip_handler.close()
# task id
task_p = re.compile('\/(\d{4})\/(\d{4})\/')
doc['id'] = int("".join(task_p.search(path).groups()))
if doc['has_fatal_exception']:
# compile full error message, if it exists
doc['message'] = "".join(error_lines)
doc['has_fatal_exception'] = True
# exception category
e_cat_p = re.compile('\'(.*)\'')
doc['exception_category'] = \
e_cat_p.search(doc['message']).group(1)
# exception message
e_mess_p = re.compile('Exception Message:\n(.*)')
doc['exception_message'] = \
e_mess_p.search(doc['message']).group(1)
# send json doc to logstash via stdout
print(json.dumps({'task_log_gz': doc}))
| <filename>scripts/filter_task_log_gz.py<gh_stars>0
#!/usr/bin/env python
from __future__ import print_function, division
import argparse
import gzip
import re
import json
parser = argparse.ArgumentParser(description='Reads and parses task.log.gz.')
parser.add_argument('-f', action='store', dest='path',
type=str, help='Path to task.log.gz')
path = parser.parse_args().path
doc = {} # doc containing log data
doc['has_fatal_exception'] = False
# open gzip and extract fatal exception block
gzip_handler = gzip.open(path, 'rb')
try:
recording = False
for line in gzip_handler:
if 'Begin Fatal Exception' in line:
doc['has_fatal_exception'] = True
recording = True
error_lines = []
if recording:
error_lines.append(line[8:])
if 'End Fatal Exception' in line:
recording = False
finally:
gzip_handler.close()
# task id
task_p = re.compile('\/(\d{4})\/(\d{4})\/')
doc['id'] = int("".join(task_p.search(path).groups()))
if doc['has_fatal_exception']:
# compile full error message, if it exists
doc['message'] = "".join(error_lines)
doc['has_fatal_exception'] = True
# exception category
e_cat_p = re.compile('\'(.*)\'')
doc['exception_category'] = \
e_cat_p.search(doc['message']).group(1)
# exception message
e_mess_p = re.compile('Exception Message:\n(.*)')
doc['exception_message'] = \
e_mess_p.search(doc['message']).group(1)
# send json doc to logstash via stdout
print(json.dumps({'task_log_gz': doc}))
| en | 0.365057 | #!/usr/bin/env python # doc containing log data # open gzip and extract fatal exception block # task id # compile full error message, if it exists # exception category # exception message # send json doc to logstash via stdout | 2.394971 | 2 |
setup.py | bruceravel/xraylarch | 0 | 6619476 | #!/usr/bin/env python
from __future__ import print_function
# from distutils.core import setup
from setuptools import setup
import time
import os
import sys
import site
import shutil
from glob import glob
DEBUG = False
cmdline_args = sys.argv[1:]
required_modules = ['numpy', 'scipy', 'lmfit', 'h5py', 'sqlalchemy', 'six']
graphics_modules = ['matplotlib', 'wx', 'wxmplot', 'wxutils', 'yaml']
recommended_modules = {'basic analysis': required_modules,
'graphics and plotting': graphics_modules,
'xrd modules' : ('fabio','pyFAI'),
'color-enhanced error messages': ('termcolor', ),
'using the EPICS control system': ('epics', ),
'testing tools': ('nose', ),
}
# files that may be left from earlier installs) and should be removed
historical_cruft = []
modules_imported = {}
missing = []
deps_ok = False
if os.path.exists('.deps'):
try:
f = open('.deps', 'r').readlines()
deps_ok = int(f[0].strip()) == 1
except:
pass
if not deps_ok:
print( 'Checking dependencies....')
for desc, mods in recommended_modules.items():
for mod in mods:
if mod == 'wx':
try:
import wxversion
wxversion.ensureMinimal('2.9')
except:
pass
if mod not in modules_imported:
modules_imported[mod] = False
try:
x = __import__(mod)
modules_imported[mod] = True
except ImportError:
missing.append(' %s: needed for %s' % (mod, desc))
missing_reqs = []
for mod in modules_imported:
if mod in required_modules and not modules_imported[mod]:
missing_reqs.append(mod)
if len(missing_reqs) > 0:
print('== Cannot Install Larch: Required Modules are Missing ==')
isword = 'is'
if len(missing_reqs) > 1: isword = 'are'
print(' %s %s REQUIRED' % (' and '.join(missing_reqs), isword) )
print(' ')
print(' Please read INSTALL for further information.')
print(' ')
sys.exit()
deps_ok = len(missing) == 0
##
## For Travis-CI, need to write a local site config file
##
if os.environ.get('TRAVIS_CI_TEST', '0') == '1':
time.sleep(0.2)
from lib import version
# system-wide larchdir
larchdir = os.path.join(sys.exec_prefix, 'share', 'larch')
if DEBUG:
print("## Settings (Debug mode) ## ")
print(" larchdir: ", larchdir)
print(" sys.prefix: ", sys.prefix)
print(" sys.exec_prefix: ", sys.exec_prefix)
print(" cmdline_args: ", cmdline_args)
print("## ")
# construct list of files to install besides the normal python modules
# this includes the larch executable files, and all the larch modules
# and plugins
larchico_dir = os.path.join(larchdir, 'icons')
larchmod_dir = os.path.join(larchdir, 'modules')
sysbin_dir = 'Scripts'
scripts = glob('bin/*')
mac_apps = []
_scripts = []
for s in scripts:
if s.endswith('.app'):
mac_apps.append(s)
else:
_scripts.append(s)
scripts = _scripts
if os.name != 'nt':
_scripts = []
sysbin_dir = 'bin'
for s in scripts:
if not s.endswith('.bat'):
_scripts.append(s)
scripts = _scripts
data_files = [(sysbin_dir, scripts),
(larchico_dir, glob('icons/*.ic*')),
(larchmod_dir, glob('modules/*.lar') + glob('modules/*.py'))]
#dlls
dll_maindir = os.path.join(larchdir, 'dlls')
archs = []
if os.name == 'nt':
archs.extend(['win32', 'win64'])
else:
if sys.platform.lower().startswith('linux'):
archs.extend(['linux32', 'linux64'])
elif sys.platform.lower().startswith('darwin'):
archs.append('darwin')
for dx in archs:
dlldir = os.path.join(dll_maindir, dx)
dllfiles = glob('dlls/%s/*' % dx)
data_files.append((dlldir, dllfiles))
plugin_dir = os.path.join(larchdir, 'plugins')
pluginfiles = []
pluginpaths = []
for fname in glob('plugins/*'):
if os.path.isdir(fname):
pluginpaths.append(fname)
else:
pluginfiles.append(fname)
data_files.append((plugin_dir, pluginfiles))
for pdir in pluginpaths:
pfiles = []
filelist = []
for ext in ('py', 'txt', 'db', 'dat', 'rst', 'lar',
'dll', 'dylib', 'so'):
filelist.extend(glob('%s/*.%s' % (pdir, ext)))
for fname in filelist:
if os.path.isdir(fname):
print('Warning -- not walking subdirectories for Plugins!!')
else:
pfiles.append(fname)
data_files.append((os.path.join(larchdir, pdir), pfiles))
if (cmdline_args[0] == 'install' and
sys.platform == 'darwin' and
'Anaconda' in sys.version):
for fname in scripts:
fh = open(fname, 'r')
lines = fh.readlines()
fh.close()
line0 = lines[0].strip()
if not line0.startswith('#!/usr/bin/env pythonw'):
fh = open(fname, 'w')
fh.write('#!/usr/bin/env pythonw\n')
fh.write("".join(lines[1:]))
fh.close()
print("Rewrote ", fname)
# now we have all the data files, so we can run setup
setup(name = 'xraylarch',
version = version.__version__,
author = '<NAME> and the X-rayLarch Development Team',
author_email = '<EMAIL>',
url = 'http://xraypy.github.io/xraylarch/',
download_url = 'http://xraypy.github.io/xraylarch/',
install_requires = required_modules,
license = 'BSD',
description = 'Synchrotron X-ray data analysis in python',
package_dir = {'larch': 'lib'},
packages = ['larch', 'larch.utils', 'larch.wxlib',
'larch.fitting', 'larch.fitting.uncertainties'],
data_files = data_files,
platforms = ['Windows', 'Linux', 'Mac OS X'],
classifiers=['Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering'],
)
def remove_cruft(basedir, filelist):
"""remove files from base directory"""
def remove_file(base, fname):
fullname = os.path.join(base, fname)
if os.path.exists(fullname):
try:
os.unlink(fullname)
except:
pass
for fname in filelist:
remove_file(basedir, fname)
if fname.endswith('.py'):
remove_file(basedir, fname+'c')
remove_file(basedir, fname+'o')
if (cmdline_args[0] == 'install' and sys.platform == 'darwin' and
'Anaconda' in sys.version):
for fname in scripts:
fh = open(fname, 'r')
lines = fh.readlines()
fh.close()
line0 = lines[0].strip()
if line0.startswith('#!/usr/bin/env pythonw'):
fh = open(fname, 'w')
fh.write('#!/usr/bin/env python\n')
fh.write("".join(lines[1:]))
fh.close()
def fix_permissions(dirname, stat=None):
"""
set permissions on a list of directories to match
those of the HOME directory
"""
if stat is None:
return
def set_perms(fname):
try:
os.chown(fname, stat.st_uid, stat.st_gid)
os.chmod(fname, stat.st_mode)
except(AttributeError, OSError):
pass
for top, dirs, files in os.walk(dirname):
set_perms(top)
for d in dirs+files:
set_perms(os.path.join(top, d))
if cmdline_args[0] == 'install':
remove_cruft(larchdir, historical_cruft)
if deps_ok and not os.path.exists('.deps'):
f = open('.deps', 'w')
f.write('1\n')
f.close()
if len(missing) > 0:
msg = """
#==============================================================#
#=== Warning: Some recommended Python Packages are missing:
%s
Some functionality will not work until these are installed.
See INSTALL for further information.
#==============================================================#"""
print(msg % '\n'.join(missing))
| #!/usr/bin/env python
from __future__ import print_function
# from distutils.core import setup
from setuptools import setup
import time
import os
import sys
import site
import shutil
from glob import glob
DEBUG = False
cmdline_args = sys.argv[1:]
required_modules = ['numpy', 'scipy', 'lmfit', 'h5py', 'sqlalchemy', 'six']
graphics_modules = ['matplotlib', 'wx', 'wxmplot', 'wxutils', 'yaml']
recommended_modules = {'basic analysis': required_modules,
'graphics and plotting': graphics_modules,
'xrd modules' : ('fabio','pyFAI'),
'color-enhanced error messages': ('termcolor', ),
'using the EPICS control system': ('epics', ),
'testing tools': ('nose', ),
}
# files that may be left from earlier installs) and should be removed
historical_cruft = []
modules_imported = {}
missing = []
deps_ok = False
if os.path.exists('.deps'):
try:
f = open('.deps', 'r').readlines()
deps_ok = int(f[0].strip()) == 1
except:
pass
if not deps_ok:
print( 'Checking dependencies....')
for desc, mods in recommended_modules.items():
for mod in mods:
if mod == 'wx':
try:
import wxversion
wxversion.ensureMinimal('2.9')
except:
pass
if mod not in modules_imported:
modules_imported[mod] = False
try:
x = __import__(mod)
modules_imported[mod] = True
except ImportError:
missing.append(' %s: needed for %s' % (mod, desc))
missing_reqs = []
for mod in modules_imported:
if mod in required_modules and not modules_imported[mod]:
missing_reqs.append(mod)
if len(missing_reqs) > 0:
print('== Cannot Install Larch: Required Modules are Missing ==')
isword = 'is'
if len(missing_reqs) > 1: isword = 'are'
print(' %s %s REQUIRED' % (' and '.join(missing_reqs), isword) )
print(' ')
print(' Please read INSTALL for further information.')
print(' ')
sys.exit()
deps_ok = len(missing) == 0
##
## For Travis-CI, need to write a local site config file
##
if os.environ.get('TRAVIS_CI_TEST', '0') == '1':
time.sleep(0.2)
from lib import version
# system-wide larchdir
larchdir = os.path.join(sys.exec_prefix, 'share', 'larch')
if DEBUG:
print("## Settings (Debug mode) ## ")
print(" larchdir: ", larchdir)
print(" sys.prefix: ", sys.prefix)
print(" sys.exec_prefix: ", sys.exec_prefix)
print(" cmdline_args: ", cmdline_args)
print("## ")
# construct list of files to install besides the normal python modules
# this includes the larch executable files, and all the larch modules
# and plugins
larchico_dir = os.path.join(larchdir, 'icons')
larchmod_dir = os.path.join(larchdir, 'modules')
sysbin_dir = 'Scripts'
scripts = glob('bin/*')
mac_apps = []
_scripts = []
for s in scripts:
if s.endswith('.app'):
mac_apps.append(s)
else:
_scripts.append(s)
scripts = _scripts
if os.name != 'nt':
_scripts = []
sysbin_dir = 'bin'
for s in scripts:
if not s.endswith('.bat'):
_scripts.append(s)
scripts = _scripts
data_files = [(sysbin_dir, scripts),
(larchico_dir, glob('icons/*.ic*')),
(larchmod_dir, glob('modules/*.lar') + glob('modules/*.py'))]
#dlls
dll_maindir = os.path.join(larchdir, 'dlls')
archs = []
if os.name == 'nt':
archs.extend(['win32', 'win64'])
else:
if sys.platform.lower().startswith('linux'):
archs.extend(['linux32', 'linux64'])
elif sys.platform.lower().startswith('darwin'):
archs.append('darwin')
for dx in archs:
dlldir = os.path.join(dll_maindir, dx)
dllfiles = glob('dlls/%s/*' % dx)
data_files.append((dlldir, dllfiles))
plugin_dir = os.path.join(larchdir, 'plugins')
pluginfiles = []
pluginpaths = []
for fname in glob('plugins/*'):
if os.path.isdir(fname):
pluginpaths.append(fname)
else:
pluginfiles.append(fname)
data_files.append((plugin_dir, pluginfiles))
for pdir in pluginpaths:
pfiles = []
filelist = []
for ext in ('py', 'txt', 'db', 'dat', 'rst', 'lar',
'dll', 'dylib', 'so'):
filelist.extend(glob('%s/*.%s' % (pdir, ext)))
for fname in filelist:
if os.path.isdir(fname):
print('Warning -- not walking subdirectories for Plugins!!')
else:
pfiles.append(fname)
data_files.append((os.path.join(larchdir, pdir), pfiles))
if (cmdline_args[0] == 'install' and
sys.platform == 'darwin' and
'Anaconda' in sys.version):
for fname in scripts:
fh = open(fname, 'r')
lines = fh.readlines()
fh.close()
line0 = lines[0].strip()
if not line0.startswith('#!/usr/bin/env pythonw'):
fh = open(fname, 'w')
fh.write('#!/usr/bin/env pythonw\n')
fh.write("".join(lines[1:]))
fh.close()
print("Rewrote ", fname)
# now we have all the data files, so we can run setup
setup(name = 'xraylarch',
version = version.__version__,
author = '<NAME> and the X-rayLarch Development Team',
author_email = '<EMAIL>',
url = 'http://xraypy.github.io/xraylarch/',
download_url = 'http://xraypy.github.io/xraylarch/',
install_requires = required_modules,
license = 'BSD',
description = 'Synchrotron X-ray data analysis in python',
package_dir = {'larch': 'lib'},
packages = ['larch', 'larch.utils', 'larch.wxlib',
'larch.fitting', 'larch.fitting.uncertainties'],
data_files = data_files,
platforms = ['Windows', 'Linux', 'Mac OS X'],
classifiers=['Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering'],
)
def remove_cruft(basedir, filelist):
"""remove files from base directory"""
def remove_file(base, fname):
fullname = os.path.join(base, fname)
if os.path.exists(fullname):
try:
os.unlink(fullname)
except:
pass
for fname in filelist:
remove_file(basedir, fname)
if fname.endswith('.py'):
remove_file(basedir, fname+'c')
remove_file(basedir, fname+'o')
if (cmdline_args[0] == 'install' and sys.platform == 'darwin' and
'Anaconda' in sys.version):
for fname in scripts:
fh = open(fname, 'r')
lines = fh.readlines()
fh.close()
line0 = lines[0].strip()
if line0.startswith('#!/usr/bin/env pythonw'):
fh = open(fname, 'w')
fh.write('#!/usr/bin/env python\n')
fh.write("".join(lines[1:]))
fh.close()
def fix_permissions(dirname, stat=None):
"""
set permissions on a list of directories to match
those of the HOME directory
"""
if stat is None:
return
def set_perms(fname):
try:
os.chown(fname, stat.st_uid, stat.st_gid)
os.chmod(fname, stat.st_mode)
except(AttributeError, OSError):
pass
for top, dirs, files in os.walk(dirname):
set_perms(top)
for d in dirs+files:
set_perms(os.path.join(top, d))
if cmdline_args[0] == 'install':
remove_cruft(larchdir, historical_cruft)
if deps_ok and not os.path.exists('.deps'):
f = open('.deps', 'w')
f.write('1\n')
f.close()
if len(missing) > 0:
msg = """
#==============================================================#
#=== Warning: Some recommended Python Packages are missing:
%s
Some functionality will not work until these are installed.
See INSTALL for further information.
#==============================================================#"""
print(msg % '\n'.join(missing))
| en | 0.716887 | #!/usr/bin/env python # from distutils.core import setup # files that may be left from earlier installs) and should be removed ## ## For Travis-CI, need to write a local site config file ## # system-wide larchdir # Settings (Debug mode) ## ") # ") # construct list of files to install besides the normal python modules # this includes the larch executable files, and all the larch modules # and plugins #dlls # now we have all the data files, so we can run setup remove files from base directory set permissions on a list of directories to match those of the HOME directory #==============================================================# #=== Warning: Some recommended Python Packages are missing: %s Some functionality will not work until these are installed. See INSTALL for further information. #==============================================================# | 2.03016 | 2 |
Progr. Lang/Python_vs_JavaScript.py | dimi-fn/Various-Data-Science-Scripts | 8 | 6619477 | <gh_stars>1-10
# comment
'''
multi-line comment
'''
# variable declaration
x = 10
# naming a variable
first_name = "Alex"
# constants
TAX_RATE = 22
# print
print(x, first_name)
print("x = {} and first name is: {}".format(x, first_name))
print("Type of x is: {}".format(type(x)))
# floor division
print(10//3)
# classes and methods
class Car:
def __init__(self, brand, colour):
self.brand = brand
self.colour = colour
# function method
def print_output(self):
print ("Brand of car is {} and the colour is {}".format(self.brand, self.colour))
result= Car(brand="mercedes", colour="black")
result.print_output() | # comment
'''
multi-line comment
'''
# variable declaration
x = 10
# naming a variable
first_name = "Alex"
# constants
TAX_RATE = 22
# print
print(x, first_name)
print("x = {} and first name is: {}".format(x, first_name))
print("Type of x is: {}".format(type(x)))
# floor division
print(10//3)
# classes and methods
class Car:
def __init__(self, brand, colour):
self.brand = brand
self.colour = colour
# function method
def print_output(self):
print ("Brand of car is {} and the colour is {}".format(self.brand, self.colour))
result= Car(brand="mercedes", colour="black")
result.print_output() | en | 0.616323 | # comment multi-line comment # variable declaration # naming a variable # constants # print # floor division # classes and methods # function method | 3.950111 | 4 |
COMP9021/Quiz/quiz_6.py | bezdomniy/unsw | 1 | 6619478 | # Defines two classes, Point() and Disk().
# The latter has an "area" attribute and three methods:
# - change_radius(r)
# - intersects(disk), that returns True or False depending on whether
# the disk provided as argument intersects the disk object.
# - absord(disk), that returns a new disk object that represents the smallest
# disk that contains both the disk provided as argument and the disk object.
#
# Written by *** and <NAME> for COMP9021
from math import pi, hypot, sqrt
class Point():
def __init__(self, x = 0, y = 0):
self.x = x
self.y = y
def __repr__(self):
return 'Point({:.2f}, {:.2f})'.format(self.x, self.y)
class Disk():
def __init__(self,**kwargs):
self.radius = kwargs.pop('radius',0)
self.area = pi * self.radius ** 2
self.point = kwargs.pop('centre',Point())
def __repr__(self):
return 'Disk(Point({:.2f}, {:.2f}), {:.2f})'.format(self.point.x, self.point.y, self.radius)
def change_radius(self,r):
self.radius = r
self.area = pi * r ** 2
def intersects(self,disk):
return (abs(self.point.x-disk.point.x)**2 + abs(self.point.y-disk.point.y)**2)**(1/2) <= self.radius+disk.radius
def absorb(self,disk):
x_dist=self.point.x-disk.point.x
y_dist=self.point.y-disk.point.y
dist=sqrt(x_dist**2 + y_dist**2)
if min(self.radius,disk.radius) + dist < max(self.radius,disk.radius):
if self.radius > disk.radius:
big_circle = self
else:
big_circle = disk
return big_circle
else:
new_radius = 0.5 * (self.radius+disk.radius+dist)
x = self.point.x-abs((new_radius - self.radius) * x_dist / dist)
y = self.point.y-abs((new_radius - self.radius) * y_dist / dist)
return Disk(centre=Point(x,y),radius=new_radius)
return Disk(centre = mid_point,radius = max(x_dist,y_dist))
| # Defines two classes, Point() and Disk().
# The latter has an "area" attribute and three methods:
# - change_radius(r)
# - intersects(disk), that returns True or False depending on whether
# the disk provided as argument intersects the disk object.
# - absord(disk), that returns a new disk object that represents the smallest
# disk that contains both the disk provided as argument and the disk object.
#
# Written by *** and <NAME> for COMP9021
from math import pi, hypot, sqrt
class Point():
def __init__(self, x = 0, y = 0):
self.x = x
self.y = y
def __repr__(self):
return 'Point({:.2f}, {:.2f})'.format(self.x, self.y)
class Disk():
def __init__(self,**kwargs):
self.radius = kwargs.pop('radius',0)
self.area = pi * self.radius ** 2
self.point = kwargs.pop('centre',Point())
def __repr__(self):
return 'Disk(Point({:.2f}, {:.2f}), {:.2f})'.format(self.point.x, self.point.y, self.radius)
def change_radius(self,r):
self.radius = r
self.area = pi * r ** 2
def intersects(self,disk):
return (abs(self.point.x-disk.point.x)**2 + abs(self.point.y-disk.point.y)**2)**(1/2) <= self.radius+disk.radius
def absorb(self,disk):
x_dist=self.point.x-disk.point.x
y_dist=self.point.y-disk.point.y
dist=sqrt(x_dist**2 + y_dist**2)
if min(self.radius,disk.radius) + dist < max(self.radius,disk.radius):
if self.radius > disk.radius:
big_circle = self
else:
big_circle = disk
return big_circle
else:
new_radius = 0.5 * (self.radius+disk.radius+dist)
x = self.point.x-abs((new_radius - self.radius) * x_dist / dist)
y = self.point.y-abs((new_radius - self.radius) * y_dist / dist)
return Disk(centre=Point(x,y),radius=new_radius)
return Disk(centre = mid_point,radius = max(x_dist,y_dist))
| en | 0.773267 | # Defines two classes, Point() and Disk(). # The latter has an "area" attribute and three methods: # - change_radius(r) # - intersects(disk), that returns True or False depending on whether # the disk provided as argument intersects the disk object. # - absord(disk), that returns a new disk object that represents the smallest # disk that contains both the disk provided as argument and the disk object. # # Written by *** and <NAME> for COMP9021 | 4.064579 | 4 |
models/hydro_pump_fix.py | susundberg/python-freecad-3dparts | 0 | 6619479 | import supalib
EPS = 0.001
TOLE = 0.2
DIM=48
THICK=10
BASE=5
db = DIM+BASE
pipe_rad=11
bm = supalib.create_box( (DIM,DIM,THICK), place=(-DIM*0.5, -DIM*0.5, 0.0) )
bo = supalib.create_box( (db, db,THICK), place=( -db*0.5, -db*0.5, 0.0 ) )
bb = supalib.create_box( (db, BASE*0.5, pipe_rad + THICK ), place=( -db*0.5, db*0.5 - BASE*0.5 + EPS,0 ) )
fix = supalib.create_cut( bo, bm )
fix = supalib.create_union( (fix, bb) )
pipe_sz = 20
pipe_x = 10.6
pipe_z = THICK
def creta_pipe_re():
return supalib.create_cyl( radius=0.5*7.2, size_z = pipe_sz + TOLE, place=(0,0,-EPS) )
om = supalib.create_cyl( radius=0.5*7.2, size_z = pipe_sz )
os = supalib.create_cyl( radius=0.5*pipe_rad, size_z = pipe_sz )
def pipe_relocate( obj ):
obj = supalib.relocate( obj, rotate=(1,0,0,90))
obj = supalib.relocate( obj, place=(pipe_x,DIM*0.5 + pipe_sz,pipe_z))
return obj
pipe = supalib.create_cut( os, om)
pipe = pipe_relocate( pipe )
om = creta_pipe_re()
om = pipe_relocate( om )
fix = supalib.create_cut( fix, om )
fix = supalib.create_union( (fix, pipe ))
fix = supalib.create_chamfer( fix, (fix.Shape.Edges[23],), radius=2.0 )
fix.Label="hydro_pump_fix"
mesh = supalib.creta_mesh_from( fix, save_to="/home/pauli/", version=1 )
supalib.finish()
| import supalib
EPS = 0.001
TOLE = 0.2
DIM=48
THICK=10
BASE=5
db = DIM+BASE
pipe_rad=11
bm = supalib.create_box( (DIM,DIM,THICK), place=(-DIM*0.5, -DIM*0.5, 0.0) )
bo = supalib.create_box( (db, db,THICK), place=( -db*0.5, -db*0.5, 0.0 ) )
bb = supalib.create_box( (db, BASE*0.5, pipe_rad + THICK ), place=( -db*0.5, db*0.5 - BASE*0.5 + EPS,0 ) )
fix = supalib.create_cut( bo, bm )
fix = supalib.create_union( (fix, bb) )
pipe_sz = 20
pipe_x = 10.6
pipe_z = THICK
def creta_pipe_re():
return supalib.create_cyl( radius=0.5*7.2, size_z = pipe_sz + TOLE, place=(0,0,-EPS) )
om = supalib.create_cyl( radius=0.5*7.2, size_z = pipe_sz )
os = supalib.create_cyl( radius=0.5*pipe_rad, size_z = pipe_sz )
def pipe_relocate( obj ):
obj = supalib.relocate( obj, rotate=(1,0,0,90))
obj = supalib.relocate( obj, place=(pipe_x,DIM*0.5 + pipe_sz,pipe_z))
return obj
pipe = supalib.create_cut( os, om)
pipe = pipe_relocate( pipe )
om = creta_pipe_re()
om = pipe_relocate( om )
fix = supalib.create_cut( fix, om )
fix = supalib.create_union( (fix, pipe ))
fix = supalib.create_chamfer( fix, (fix.Shape.Edges[23],), radius=2.0 )
fix.Label="hydro_pump_fix"
mesh = supalib.creta_mesh_from( fix, save_to="/home/pauli/", version=1 )
supalib.finish()
| none | 1 | 1.8809 | 2 | |
modules/dropout.py | izhx/nmnlp | 2 | 6619480 | <reponame>izhx/nmnlp<filename>modules/dropout.py
""" Some drop out class.
"""
import torch
class WordDropout(torch.nn.Dropout):
""" mask whole -1 dim array. """
def forward(self, x: torch.Tensor): # pylint:disable=arguments-differ
if not self.training or self.p == 0:
return x
mask = torch.rand(*x.shape[:-1], 1, device=x.device) < self.p
return x.masked_fill_(mask, 0) if self.inplace else x.masked_fill(mask, 0)
class LockedDropout(torch.nn.Dropout):
""" batch dim share mask. """
def __init__(self, p: float = 0.5, inplace: bool = False):
super().__init__(p, inplace)
self.q = 1 - p # pylint:disable=invalid-name
def forward(self, x: torch.Tensor): # pylint:disable=arguments-differ
if not self.training or self.p == 0:
return x
mask = torch.rand(1, *x.shape[1:], device=x.device).bernoulli_(
p=self.q).div_(self.q).expand_as(x)
return x.mul_(mask) if self.inplace else x.mul(mask)
| """ Some drop out class.
"""
import torch
class WordDropout(torch.nn.Dropout):
""" mask whole -1 dim array. """
def forward(self, x: torch.Tensor): # pylint:disable=arguments-differ
if not self.training or self.p == 0:
return x
mask = torch.rand(*x.shape[:-1], 1, device=x.device) < self.p
return x.masked_fill_(mask, 0) if self.inplace else x.masked_fill(mask, 0)
class LockedDropout(torch.nn.Dropout):
""" batch dim share mask. """
def __init__(self, p: float = 0.5, inplace: bool = False):
super().__init__(p, inplace)
self.q = 1 - p # pylint:disable=invalid-name
def forward(self, x: torch.Tensor): # pylint:disable=arguments-differ
if not self.training or self.p == 0:
return x
mask = torch.rand(1, *x.shape[1:], device=x.device).bernoulli_(
p=self.q).div_(self.q).expand_as(x)
return x.mul_(mask) if self.inplace else x.mul(mask) | en | 0.298176 | Some drop out class. mask whole -1 dim array. # pylint:disable=arguments-differ batch dim share mask. # pylint:disable=invalid-name # pylint:disable=arguments-differ | 3.069249 | 3 |
main.py | lobo0616/bysj | 1 | 6619481 | <filename>main.py<gh_stars>1-10
import sys #sys.argv 是一个包含命令行参数的列表 sys.path 包含了一个 Python 解释器自动查找所需模块的路径的列表
import argparse #argparse 是python自带的命令行参数解析包,可以用来方便地读取命令行参数
import time
from utils import *
from my_timer import MyTimer
max_time_for_import_one_py = 3.0 # seconds
min_time_for_run_one_func = 0.1 # seconds, sometimes time_gap_sec*time_ratio (for gold.py) is too small
def evaluate_one_py(py_name, all_func_info, stu_name, gold_funcs, verbose):
if verbose > 0:
print('\nStart evaluating %s %s'%( py_name,stu_name), flush=True, file=sys.stderr)
try:
with MyTimer(max_time_for_import_one_py):
this_funcs = get_funcs_in_one_module(py_name, verbose) #将学生代码的函数提取出来
except Exception as e:
print_a_thing_verbose_1('import module %s timeout: %s %s' % (py_name, type(e).__name__, e), verbose)
total_score = 0.
func_scores = []
func_names = []
for (func_name, score, time_ratio, test_case_file_name) in all_func_info: #批阅的函数名 分数 测试用例文件 时间?
func_names.append(func_name)
if this_funcs is None: #判断是否有函数
func_scores.append(0.)
print_a_thing_verbose_1('module %s does not contain func: %s' % (py_name, func_name), verbose)
continue
correct_case_cnt = 0.
lines = get_all_lines(test_case_file_name) #读文件,测试用例文件
total_case_cnt = len(lines) #测试用例个数
gold_func = gold_funcs.get(func_name) #返回gold文件里的特定的函数名(要评分的那些函数)
assert gold_func is not None #检查函数名
if this_funcs.get(func_name) is None:
lines = [] #如果没有相符合的函数名 测试用例文件也没有相符合的
for i_input, one_input in enumerate(lines):#enumerate() 函数用于将一个可遍历的数据对象(如列表、元组或字符串)组合为一个索引序列,同时列出数据和数据下标 i是下标,one是数据
one_input_line = one_input.strip() #移除空格换行符
assert len(one_input_line) > 0 #检查长度
one_input = eval(one_input_line) #eval() 函数用来执行一个字符串表达式,并返回表达式的值(字符串转为列表)
one_input_for_sys = eval(one_input_line)
start_time = time.time()
gold_result = gold_func(*one_input) #将测试用例放入函数里执行?(数组第一个元素?)
end_time = time.time()
time_gap_sec = end_time - start_time #执行时间
try:
with MyTimer(max(time_gap_sec * time_ratio, min_time_for_run_one_func)):
result = this_funcs[func_name](*one_input_for_sys) #将测试用例放到学生函数里执行?
except Exception as e: #发生异常执行这一块
print_msg_verbose_2(py_name, func_name, i_input, '%s : %s' % (type(e).__name__, e), verbose)
continue
if gold_result is None:
print(*one_input, gold_result)
if result == gold_result: #判断学生结果和答案结果是否相等
correct_case_cnt += 1 #通过的正确的测试用例个数+1
print_msg_verbose_2(py_name, func_name, i_input, 'passed', verbose)
else:
print_msg_verbose_2(py_name, func_name, i_input, 'failed', verbose)
this_func_score = score * correct_case_cnt / total_case_cnt #分数就是通过的用例/总用例
func_scores.append(this_func_score) #函数的得分列表
total_score += this_func_score #总分数
print_func_score_verbose_1(py_name, stu_name, func_name, score, correct_case_cnt, total_case_cnt, verbose)
print_score_summary(py_name,stu_name, total_score, func_names, func_scores)
if __name__ == '__main__':
argparser = argparse.ArgumentParser() #创建解析器
argparser.add_argument('--prog_dir', default='examples/') #添加参数
argparser.add_argument('--gold_py', default='gold.py')
argparser.add_argument('--func_info_list', default='func_info_list.txt')
argparser.add_argument('--verbose', type=int, default=0)
argparser.add_argument('--student',default='student.csv') #新增文件 学号和姓名
args, extra_args = argparser.parse_known_args() #解析参数。
#当仅获取到基本设置时,如果运行命令中传入了之后才会获取到的其他配置,不会报错;而是将多出来的部分保存起来,留到后面使用
sys.path.insert(0, args.prog_dir) #新添加的目录会优先于其他目录被import检查
all_func_info = get_func_info(args.func_info_list) #该函数在utils.py中,获取要评阅的函数名及分数和各自的测试用例
#all_student_info =get_name_info(args.student) #获取学生学号 姓名
py_list = get_student_py_list(args.prog_dir) #该函数在utils.py中,返回没有.py后缀的学生文件列表
gold_py = remove_py_suffix(args.gold_py.lower()) #该函数在utils.py中,lower()大写转成小写,去掉参考答案文件后缀
gold_funcs = get_funcs_in_one_module(gold_py, args.verbose) #该函数在utils.py中,返回gold_py的函数
assert gold_funcs is not None #assert(断言)用于判断一个表达式,在表达式条件为 false 的时候触发异常。
with open('log.stdout-outputs', 'w') as f:
sys.stdout = f #sys.stdout的形式就是print的一种默认输出格式,等于print "%VALUE%"
for one_py in py_list: #学生代码文件夹
print("学号:", one_py)
stu_name = get_name_info(one_py,args.student)
print("姓名:", stu_name)
evaluate_one_py(one_py, all_func_info, stu_name, gold_funcs, args.verbose)
| <filename>main.py<gh_stars>1-10
import sys #sys.argv 是一个包含命令行参数的列表 sys.path 包含了一个 Python 解释器自动查找所需模块的路径的列表
import argparse #argparse 是python自带的命令行参数解析包,可以用来方便地读取命令行参数
import time
from utils import *
from my_timer import MyTimer
max_time_for_import_one_py = 3.0 # seconds
min_time_for_run_one_func = 0.1 # seconds, sometimes time_gap_sec*time_ratio (for gold.py) is too small
def evaluate_one_py(py_name, all_func_info, stu_name, gold_funcs, verbose):
if verbose > 0:
print('\nStart evaluating %s %s'%( py_name,stu_name), flush=True, file=sys.stderr)
try:
with MyTimer(max_time_for_import_one_py):
this_funcs = get_funcs_in_one_module(py_name, verbose) #将学生代码的函数提取出来
except Exception as e:
print_a_thing_verbose_1('import module %s timeout: %s %s' % (py_name, type(e).__name__, e), verbose)
total_score = 0.
func_scores = []
func_names = []
for (func_name, score, time_ratio, test_case_file_name) in all_func_info: #批阅的函数名 分数 测试用例文件 时间?
func_names.append(func_name)
if this_funcs is None: #判断是否有函数
func_scores.append(0.)
print_a_thing_verbose_1('module %s does not contain func: %s' % (py_name, func_name), verbose)
continue
correct_case_cnt = 0.
lines = get_all_lines(test_case_file_name) #读文件,测试用例文件
total_case_cnt = len(lines) #测试用例个数
gold_func = gold_funcs.get(func_name) #返回gold文件里的特定的函数名(要评分的那些函数)
assert gold_func is not None #检查函数名
if this_funcs.get(func_name) is None:
lines = [] #如果没有相符合的函数名 测试用例文件也没有相符合的
for i_input, one_input in enumerate(lines):#enumerate() 函数用于将一个可遍历的数据对象(如列表、元组或字符串)组合为一个索引序列,同时列出数据和数据下标 i是下标,one是数据
one_input_line = one_input.strip() #移除空格换行符
assert len(one_input_line) > 0 #检查长度
one_input = eval(one_input_line) #eval() 函数用来执行一个字符串表达式,并返回表达式的值(字符串转为列表)
one_input_for_sys = eval(one_input_line)
start_time = time.time()
gold_result = gold_func(*one_input) #将测试用例放入函数里执行?(数组第一个元素?)
end_time = time.time()
time_gap_sec = end_time - start_time #执行时间
try:
with MyTimer(max(time_gap_sec * time_ratio, min_time_for_run_one_func)):
result = this_funcs[func_name](*one_input_for_sys) #将测试用例放到学生函数里执行?
except Exception as e: #发生异常执行这一块
print_msg_verbose_2(py_name, func_name, i_input, '%s : %s' % (type(e).__name__, e), verbose)
continue
if gold_result is None:
print(*one_input, gold_result)
if result == gold_result: #判断学生结果和答案结果是否相等
correct_case_cnt += 1 #通过的正确的测试用例个数+1
print_msg_verbose_2(py_name, func_name, i_input, 'passed', verbose)
else:
print_msg_verbose_2(py_name, func_name, i_input, 'failed', verbose)
this_func_score = score * correct_case_cnt / total_case_cnt #分数就是通过的用例/总用例
func_scores.append(this_func_score) #函数的得分列表
total_score += this_func_score #总分数
print_func_score_verbose_1(py_name, stu_name, func_name, score, correct_case_cnt, total_case_cnt, verbose)
print_score_summary(py_name,stu_name, total_score, func_names, func_scores)
if __name__ == '__main__':
argparser = argparse.ArgumentParser() #创建解析器
argparser.add_argument('--prog_dir', default='examples/') #添加参数
argparser.add_argument('--gold_py', default='gold.py')
argparser.add_argument('--func_info_list', default='func_info_list.txt')
argparser.add_argument('--verbose', type=int, default=0)
argparser.add_argument('--student',default='student.csv') #新增文件 学号和姓名
args, extra_args = argparser.parse_known_args() #解析参数。
#当仅获取到基本设置时,如果运行命令中传入了之后才会获取到的其他配置,不会报错;而是将多出来的部分保存起来,留到后面使用
sys.path.insert(0, args.prog_dir) #新添加的目录会优先于其他目录被import检查
all_func_info = get_func_info(args.func_info_list) #该函数在utils.py中,获取要评阅的函数名及分数和各自的测试用例
#all_student_info =get_name_info(args.student) #获取学生学号 姓名
py_list = get_student_py_list(args.prog_dir) #该函数在utils.py中,返回没有.py后缀的学生文件列表
gold_py = remove_py_suffix(args.gold_py.lower()) #该函数在utils.py中,lower()大写转成小写,去掉参考答案文件后缀
gold_funcs = get_funcs_in_one_module(gold_py, args.verbose) #该函数在utils.py中,返回gold_py的函数
assert gold_funcs is not None #assert(断言)用于判断一个表达式,在表达式条件为 false 的时候触发异常。
with open('log.stdout-outputs', 'w') as f:
sys.stdout = f #sys.stdout的形式就是print的一种默认输出格式,等于print "%VALUE%"
for one_py in py_list: #学生代码文件夹
print("学号:", one_py)
stu_name = get_name_info(one_py,args.student)
print("姓名:", stu_name)
evaluate_one_py(one_py, all_func_info, stu_name, gold_funcs, args.verbose)
| zh | 0.986551 | #sys.argv 是一个包含命令行参数的列表 sys.path 包含了一个 Python 解释器自动查找所需模块的路径的列表 #argparse 是python自带的命令行参数解析包,可以用来方便地读取命令行参数 # seconds # seconds, sometimes time_gap_sec*time_ratio (for gold.py) is too small #将学生代码的函数提取出来 #批阅的函数名 分数 测试用例文件 时间? #判断是否有函数 #读文件,测试用例文件 #测试用例个数 #返回gold文件里的特定的函数名(要评分的那些函数) #检查函数名 #如果没有相符合的函数名 测试用例文件也没有相符合的 #enumerate() 函数用于将一个可遍历的数据对象(如列表、元组或字符串)组合为一个索引序列,同时列出数据和数据下标 i是下标,one是数据 #移除空格换行符 #检查长度 #eval() 函数用来执行一个字符串表达式,并返回表达式的值(字符串转为列表) #将测试用例放入函数里执行?(数组第一个元素?) #执行时间 #将测试用例放到学生函数里执行? #发生异常执行这一块 #判断学生结果和答案结果是否相等 #通过的正确的测试用例个数+1 #分数就是通过的用例/总用例 #函数的得分列表 #总分数 #创建解析器 #添加参数 #新增文件 学号和姓名 #解析参数。 #当仅获取到基本设置时,如果运行命令中传入了之后才会获取到的其他配置,不会报错;而是将多出来的部分保存起来,留到后面使用 #新添加的目录会优先于其他目录被import检查 #该函数在utils.py中,获取要评阅的函数名及分数和各自的测试用例 #all_student_info =get_name_info(args.student) #获取学生学号 姓名 #该函数在utils.py中,返回没有.py后缀的学生文件列表 #该函数在utils.py中,lower()大写转成小写,去掉参考答案文件后缀 #该函数在utils.py中,返回gold_py的函数 #assert(断言)用于判断一个表达式,在表达式条件为 false 的时候触发异常。 #sys.stdout的形式就是print的一种默认输出格式,等于print "%VALUE%" #学生代码文件夹 | 2.64305 | 3 |
tests/test_pddlstream.py | Learning-and-Intelligent-Systems/kitchen-worlds | 2 | 6619482 | <gh_stars>1-10
#!/usr/bin/env python
from __future__ import print_function
import os
import json
from os.path import join, abspath, dirname, isdir, isfile
from config import EXP_PATH
from pybullet_tools.pr2_utils import get_group_conf
from pybullet_tools.utils import disconnect, LockRenderer, has_gui, WorldSaver, wait_if_gui, \
SEPARATOR, get_aabb, wait_for_duration
from pybullet_tools.bullet_utils import summarize_facts, print_goal, nice
from pybullet_tools.pr2_agent import get_stream_info, post_process, move_cost_fn ## , get_stream_map
from pybullet_tools.logging import TXT_FILE
## custom stream_map
from pybullet_tools.pr2_streams import get_stable_gen, get_contain_gen, get_position_gen, \
Position, get_handle_grasp_gen, LinkPose, get_ik_ir_grasp_handle_gen, get_pull_drawer_handle_motion_gen, \
get_joint_position_test, get_marker_grasp_gen, get_bconf_in_region_test, get_pull_door_handle_motion_gen, \
get_bconf_in_region_gen, get_pose_in_region_gen, get_motion_wconf_gen, get_update_wconf_p_two_gen, \
get_marker_pose_gen, get_pull_marker_to_pose_motion_gen, get_pull_marker_to_bconf_motion_gen, \
get_pull_marker_random_motion_gen, get_ik_ungrasp_handle_gen, get_pose_in_region_test, \
get_cfree_btraj_pose_test, get_joint_position_open_gen, get_ik_ungrasp_mark_gen, \
sample_joint_position_open_list_gen, get_update_wconf_pst_gen, get_ik_ir_wconf_gen, \
get_update_wconf_p_gen, get_ik_ir_wconf_gen, get_pose_in_space_test, get_turn_knob_handle_motion_gen
from pybullet_tools.pr2_primitives import get_group_joints, Conf, get_base_custom_limits, Pose, Conf, \
get_ik_ir_gen, get_motion_gen, get_cfree_approach_pose_test, get_cfree_pose_pose_test, get_cfree_traj_pose_test, \
get_grasp_gen, Attach, Detach, Clean, Cook, control_commands, Command, \
get_gripper_joints, GripperCommand, State
from pddlstream.language.generator import from_gen_fn, from_list_fn, from_fn, fn_from_constant, empty_gen, from_test
from pddlstream.language.constants import Equal, AND, print_solution, PDDLProblem
from pddlstream.utils import read, INF, get_file_path, find_unique, Profiler, str_from_object
from pddlstream.algorithms.meta import solve, create_parser
from pybullet_planning.lisdf_tools.lisdf_loader import load_lisdf_pybullet
from pybullet_planning.lisdf_tools.lisdf_planning import pddl_to_init_goal, Problem
from world_builder.actions import apply_actions
DEFAULT_TEST = 'kitchen' ## 'blocks_pick' ##
def get_stream_map(p, c, l, t):
# p = problem
# c = collisions
# l = custom_limits
# t = teleport
stream_map = {
'sample-pose': from_gen_fn(get_stable_gen(p, collisions=c)),
'sample-pose-inside': from_gen_fn(get_contain_gen(p, collisions=c)), ##
'sample-grasp': from_list_fn(get_grasp_gen(p, collisions=True)),
'inverse-kinematics': from_gen_fn(get_ik_ir_gen(p, collisions=c, teleport=t, custom_limits=l,
learned=False, max_attempts=60, verbose=False)),
'inverse-kinematics-wconf': from_gen_fn(get_ik_ir_wconf_gen(p, collisions=c, teleport=t, custom_limits=l,
learned=False, max_attempts=60, verbose=False,
visualize=False)),
'plan-base-motion': from_fn(get_motion_gen(p, collisions=c, teleport=t, custom_limits=l)),
'plan-base-motion-wconf': from_fn(get_motion_wconf_gen(p, collisions=c, teleport=t, custom_limits=l)),
'test-cfree-pose-pose': from_test(get_cfree_pose_pose_test(collisions=c)),
'test-cfree-approach-pose': from_test(get_cfree_approach_pose_test(p, collisions=c)),
'test-cfree-traj-pose': from_test(get_cfree_traj_pose_test(p.robot, collisions=c)),
'test-cfree-btraj-pose': from_test(get_cfree_btraj_pose_test(p.robot, collisions=c)),
# 'get-joint-position-open': from_fn(get_joint_position_open_gen(p)),
'get-joint-position-open': from_list_fn(sample_joint_position_open_list_gen(p)),
# 'sample-joint-position-open': from_fn(get_position_gen(p, collisions=c, extent='max')),
# 'sample-joint-position-closed': from_fn(get_position_gen(p, collisions=c, extent='min')),
# 'test-joint-position-open': from_test(get_joint_position_test(extent='max')),
# 'test-joint-position-closed': from_test(get_joint_position_test(extent='min')),
'sample-handle-grasp': from_list_fn(get_handle_grasp_gen(p, collisions=c)),
'inverse-kinematics-grasp-handle': from_gen_fn(
get_ik_ir_grasp_handle_gen(p, collisions=c, teleport=t, custom_limits=l,
learned=False, verbose=False, ACONF=True, WCONF=False)),
'inverse-kinematics-ungrasp-handle': from_gen_fn(
get_ik_ungrasp_handle_gen(p, collisions=c, teleport=t, custom_limits=l,
verbose=False, WCONF=False)),
'inverse-kinematics-grasp-handle-wconf': from_gen_fn(
get_ik_ir_grasp_handle_gen(p, collisions=c, teleport=t, custom_limits=l,
learned=False, verbose=False, ACONF=True, WCONF=True)),
'inverse-kinematics-ungrasp-handle-wconf': from_gen_fn(
get_ik_ungrasp_handle_gen(p, collisions=c, teleport=t, custom_limits=l,
verbose=False, WCONF=True)),
'plan-base-pull-drawer-handle': from_fn(
get_pull_drawer_handle_motion_gen(p, collisions=c, teleport=t, custom_limits=l)),
'plan-base-pull-door-handle': from_fn(
get_pull_door_handle_motion_gen(p, collisions=c, teleport=t, custom_limits=l)),
'plan-arm-turn-knob-handle': from_fn(
get_turn_knob_handle_motion_gen(p, collisions=c, teleport=t, custom_limits=l)),
'sample-marker-grasp': from_list_fn(get_marker_grasp_gen(p, collisions=c)),
'inverse-kinematics-grasp-marker': from_gen_fn(
get_ik_ir_grasp_handle_gen(p, collisions=True, teleport=t, custom_limits=l,
learned=False, verbose=False)),
'inverse-kinematics-ungrasp-marker': from_fn(
get_ik_ungrasp_mark_gen(p, collisions=True, teleport=t, custom_limits=l)),
'plan-base-pull-marker-random': from_gen_fn(
get_pull_marker_random_motion_gen(p, collisions=c, teleport=t, custom_limits=l,
learned=False)),
'sample-marker-pose': from_list_fn(get_marker_pose_gen(p, collisions=c)),
'plan-base-pull-marker-to-bconf': from_fn(get_pull_marker_to_bconf_motion_gen(p, collisions=c, teleport=t)),
'plan-base-pull-marker-to-pose': from_fn(get_pull_marker_to_pose_motion_gen(p, collisions=c, teleport=t)),
'test-bconf-in-region': from_test(get_bconf_in_region_test(p.robot)),
'test-pose-in-region': from_test(get_pose_in_region_test()),
'test-pose-in-space': from_test(get_pose_in_space_test()), ##
# 'sample-bconf-in-region': from_gen_fn(get_bconf_in_region_gen(p, collisions=c, visualize=False)),
'sample-bconf-in-region': from_list_fn(get_bconf_in_region_gen(p, collisions=c, visualize=False)),
'sample-pose-in-region': from_list_fn(get_pose_in_region_gen(p, collisions=c, visualize=False)),
'update-wconf-p': from_fn(get_update_wconf_p_gen()),
'update-wconf-p-two': from_fn(get_update_wconf_p_two_gen()),
'update-wconf-pst': from_fn(get_update_wconf_pst_gen()),
'MoveCost': move_cost_fn,
# 'TrajPoseCollision': fn_from_constant(False),
# 'TrajArmCollision': fn_from_constant(False),
# 'TrajGraspCollision': fn_from_constant(False),
}
return stream_map
def pddlstream_from_dir(problem, exp_dir, collisions=True, teleport=False):
world = problem.world
domain_pddl = read(join(exp_dir, 'domain_full.pddl'))
stream_pddl = read(join(exp_dir, 'stream.pddl'))
planning_config = json.load(open(join(exp_dir, 'planning_config.json')))
constant_map = {}
init, goal = pddl_to_init_goal(exp_dir, world)
goal = [AND] + goal
problem.add_init(init)
base_limits = planning_config['base_limits']
custom_limits = get_base_custom_limits(world.robot, base_limits)
stream_map = get_stream_map(problem, collisions, custom_limits, teleport)
return PDDLProblem(domain_pddl, constant_map, stream_pddl, stream_map, init, goal)
def init_experiment(exp_dir):
if isfile(TXT_FILE):
os.remove(TXT_FILE)
def get_args(exp_name):
parser = create_parser()
parser.add_argument('-test', type=str, default=exp_name, help='Name of the test case')
parser.add_argument('-cfree', action='store_true', help='Disables collisions during planning')
parser.add_argument('-enable', action='store_true', help='Enables rendering during planning')
parser.add_argument('-teleport', action='store_true', help='Teleports between configurations')
parser.add_argument('-simulate', action='store_true', help='Simulates the system')
args = parser.parse_args()
print('Arguments:', args)
return args
#####################################
def main(exp_name, verbose=True):
args = get_args(exp_name)
exp_dir = join(EXP_PATH, args.test)
world = load_lisdf_pybullet(exp_dir) ## join(exp_dir, 'scene.lisdf'))
saver = WorldSaver()
problem = Problem(world)
pddlstream_problem = pddlstream_from_dir(problem, exp_dir=exp_dir, collisions=not args.cfree,
teleport=args.teleport)
world.summarize_all_objects()
stream_info = get_stream_info(partial=False, defer=False)
_, _, _, stream_map, init, goal = pddlstream_problem
summarize_facts(init, world=world)
print_goal(goal)
print(SEPARATOR)
init_experiment(exp_dir)
with Profiler():
with LockRenderer(lock=not args.enable):
solution = solve(pddlstream_problem, algorithm=args.algorithm, unit_costs=args.unit,
stream_info=stream_info, success_cost=INF, verbose=True, debug=False)
saver.restore()
print_solution(solution)
plan, cost, evaluations = solution
if (plan is None) or not has_gui():
disconnect()
return
print(SEPARATOR)
with LockRenderer(lock=not args.enable):
commands = post_process(problem, plan)
problem.remove_gripper()
saver.restore()
saver.restore()
wait_if_gui('Execute?')
if args.simulate: ## real physics
control_commands(commands)
else:
# apply_commands(State(), commands, time_step=0.01)
apply_actions(problem, commands, time_step=0.01)
wait_if_gui('Finish?')
disconnect()
if __name__ == '__main__':
main(exp_name=DEFAULT_TEST)
| #!/usr/bin/env python
from __future__ import print_function
import os
import json
from os.path import join, abspath, dirname, isdir, isfile
from config import EXP_PATH
from pybullet_tools.pr2_utils import get_group_conf
from pybullet_tools.utils import disconnect, LockRenderer, has_gui, WorldSaver, wait_if_gui, \
SEPARATOR, get_aabb, wait_for_duration
from pybullet_tools.bullet_utils import summarize_facts, print_goal, nice
from pybullet_tools.pr2_agent import get_stream_info, post_process, move_cost_fn ## , get_stream_map
from pybullet_tools.logging import TXT_FILE
## custom stream_map
from pybullet_tools.pr2_streams import get_stable_gen, get_contain_gen, get_position_gen, \
Position, get_handle_grasp_gen, LinkPose, get_ik_ir_grasp_handle_gen, get_pull_drawer_handle_motion_gen, \
get_joint_position_test, get_marker_grasp_gen, get_bconf_in_region_test, get_pull_door_handle_motion_gen, \
get_bconf_in_region_gen, get_pose_in_region_gen, get_motion_wconf_gen, get_update_wconf_p_two_gen, \
get_marker_pose_gen, get_pull_marker_to_pose_motion_gen, get_pull_marker_to_bconf_motion_gen, \
get_pull_marker_random_motion_gen, get_ik_ungrasp_handle_gen, get_pose_in_region_test, \
get_cfree_btraj_pose_test, get_joint_position_open_gen, get_ik_ungrasp_mark_gen, \
sample_joint_position_open_list_gen, get_update_wconf_pst_gen, get_ik_ir_wconf_gen, \
get_update_wconf_p_gen, get_ik_ir_wconf_gen, get_pose_in_space_test, get_turn_knob_handle_motion_gen
from pybullet_tools.pr2_primitives import get_group_joints, Conf, get_base_custom_limits, Pose, Conf, \
get_ik_ir_gen, get_motion_gen, get_cfree_approach_pose_test, get_cfree_pose_pose_test, get_cfree_traj_pose_test, \
get_grasp_gen, Attach, Detach, Clean, Cook, control_commands, Command, \
get_gripper_joints, GripperCommand, State
from pddlstream.language.generator import from_gen_fn, from_list_fn, from_fn, fn_from_constant, empty_gen, from_test
from pddlstream.language.constants import Equal, AND, print_solution, PDDLProblem
from pddlstream.utils import read, INF, get_file_path, find_unique, Profiler, str_from_object
from pddlstream.algorithms.meta import solve, create_parser
from pybullet_planning.lisdf_tools.lisdf_loader import load_lisdf_pybullet
from pybullet_planning.lisdf_tools.lisdf_planning import pddl_to_init_goal, Problem
from world_builder.actions import apply_actions
DEFAULT_TEST = 'kitchen' ## 'blocks_pick' ##
def get_stream_map(p, c, l, t):
# p = problem
# c = collisions
# l = custom_limits
# t = teleport
stream_map = {
'sample-pose': from_gen_fn(get_stable_gen(p, collisions=c)),
'sample-pose-inside': from_gen_fn(get_contain_gen(p, collisions=c)), ##
'sample-grasp': from_list_fn(get_grasp_gen(p, collisions=True)),
'inverse-kinematics': from_gen_fn(get_ik_ir_gen(p, collisions=c, teleport=t, custom_limits=l,
learned=False, max_attempts=60, verbose=False)),
'inverse-kinematics-wconf': from_gen_fn(get_ik_ir_wconf_gen(p, collisions=c, teleport=t, custom_limits=l,
learned=False, max_attempts=60, verbose=False,
visualize=False)),
'plan-base-motion': from_fn(get_motion_gen(p, collisions=c, teleport=t, custom_limits=l)),
'plan-base-motion-wconf': from_fn(get_motion_wconf_gen(p, collisions=c, teleport=t, custom_limits=l)),
'test-cfree-pose-pose': from_test(get_cfree_pose_pose_test(collisions=c)),
'test-cfree-approach-pose': from_test(get_cfree_approach_pose_test(p, collisions=c)),
'test-cfree-traj-pose': from_test(get_cfree_traj_pose_test(p.robot, collisions=c)),
'test-cfree-btraj-pose': from_test(get_cfree_btraj_pose_test(p.robot, collisions=c)),
# 'get-joint-position-open': from_fn(get_joint_position_open_gen(p)),
'get-joint-position-open': from_list_fn(sample_joint_position_open_list_gen(p)),
# 'sample-joint-position-open': from_fn(get_position_gen(p, collisions=c, extent='max')),
# 'sample-joint-position-closed': from_fn(get_position_gen(p, collisions=c, extent='min')),
# 'test-joint-position-open': from_test(get_joint_position_test(extent='max')),
# 'test-joint-position-closed': from_test(get_joint_position_test(extent='min')),
'sample-handle-grasp': from_list_fn(get_handle_grasp_gen(p, collisions=c)),
'inverse-kinematics-grasp-handle': from_gen_fn(
get_ik_ir_grasp_handle_gen(p, collisions=c, teleport=t, custom_limits=l,
learned=False, verbose=False, ACONF=True, WCONF=False)),
'inverse-kinematics-ungrasp-handle': from_gen_fn(
get_ik_ungrasp_handle_gen(p, collisions=c, teleport=t, custom_limits=l,
verbose=False, WCONF=False)),
'inverse-kinematics-grasp-handle-wconf': from_gen_fn(
get_ik_ir_grasp_handle_gen(p, collisions=c, teleport=t, custom_limits=l,
learned=False, verbose=False, ACONF=True, WCONF=True)),
'inverse-kinematics-ungrasp-handle-wconf': from_gen_fn(
get_ik_ungrasp_handle_gen(p, collisions=c, teleport=t, custom_limits=l,
verbose=False, WCONF=True)),
'plan-base-pull-drawer-handle': from_fn(
get_pull_drawer_handle_motion_gen(p, collisions=c, teleport=t, custom_limits=l)),
'plan-base-pull-door-handle': from_fn(
get_pull_door_handle_motion_gen(p, collisions=c, teleport=t, custom_limits=l)),
'plan-arm-turn-knob-handle': from_fn(
get_turn_knob_handle_motion_gen(p, collisions=c, teleport=t, custom_limits=l)),
'sample-marker-grasp': from_list_fn(get_marker_grasp_gen(p, collisions=c)),
'inverse-kinematics-grasp-marker': from_gen_fn(
get_ik_ir_grasp_handle_gen(p, collisions=True, teleport=t, custom_limits=l,
learned=False, verbose=False)),
'inverse-kinematics-ungrasp-marker': from_fn(
get_ik_ungrasp_mark_gen(p, collisions=True, teleport=t, custom_limits=l)),
'plan-base-pull-marker-random': from_gen_fn(
get_pull_marker_random_motion_gen(p, collisions=c, teleport=t, custom_limits=l,
learned=False)),
'sample-marker-pose': from_list_fn(get_marker_pose_gen(p, collisions=c)),
'plan-base-pull-marker-to-bconf': from_fn(get_pull_marker_to_bconf_motion_gen(p, collisions=c, teleport=t)),
'plan-base-pull-marker-to-pose': from_fn(get_pull_marker_to_pose_motion_gen(p, collisions=c, teleport=t)),
'test-bconf-in-region': from_test(get_bconf_in_region_test(p.robot)),
'test-pose-in-region': from_test(get_pose_in_region_test()),
'test-pose-in-space': from_test(get_pose_in_space_test()), ##
# 'sample-bconf-in-region': from_gen_fn(get_bconf_in_region_gen(p, collisions=c, visualize=False)),
'sample-bconf-in-region': from_list_fn(get_bconf_in_region_gen(p, collisions=c, visualize=False)),
'sample-pose-in-region': from_list_fn(get_pose_in_region_gen(p, collisions=c, visualize=False)),
'update-wconf-p': from_fn(get_update_wconf_p_gen()),
'update-wconf-p-two': from_fn(get_update_wconf_p_two_gen()),
'update-wconf-pst': from_fn(get_update_wconf_pst_gen()),
'MoveCost': move_cost_fn,
# 'TrajPoseCollision': fn_from_constant(False),
# 'TrajArmCollision': fn_from_constant(False),
# 'TrajGraspCollision': fn_from_constant(False),
}
return stream_map
def pddlstream_from_dir(problem, exp_dir, collisions=True, teleport=False):
world = problem.world
domain_pddl = read(join(exp_dir, 'domain_full.pddl'))
stream_pddl = read(join(exp_dir, 'stream.pddl'))
planning_config = json.load(open(join(exp_dir, 'planning_config.json')))
constant_map = {}
init, goal = pddl_to_init_goal(exp_dir, world)
goal = [AND] + goal
problem.add_init(init)
base_limits = planning_config['base_limits']
custom_limits = get_base_custom_limits(world.robot, base_limits)
stream_map = get_stream_map(problem, collisions, custom_limits, teleport)
return PDDLProblem(domain_pddl, constant_map, stream_pddl, stream_map, init, goal)
def init_experiment(exp_dir):
if isfile(TXT_FILE):
os.remove(TXT_FILE)
def get_args(exp_name):
parser = create_parser()
parser.add_argument('-test', type=str, default=exp_name, help='Name of the test case')
parser.add_argument('-cfree', action='store_true', help='Disables collisions during planning')
parser.add_argument('-enable', action='store_true', help='Enables rendering during planning')
parser.add_argument('-teleport', action='store_true', help='Teleports between configurations')
parser.add_argument('-simulate', action='store_true', help='Simulates the system')
args = parser.parse_args()
print('Arguments:', args)
return args
#####################################
def main(exp_name, verbose=True):
args = get_args(exp_name)
exp_dir = join(EXP_PATH, args.test)
world = load_lisdf_pybullet(exp_dir) ## join(exp_dir, 'scene.lisdf'))
saver = WorldSaver()
problem = Problem(world)
pddlstream_problem = pddlstream_from_dir(problem, exp_dir=exp_dir, collisions=not args.cfree,
teleport=args.teleport)
world.summarize_all_objects()
stream_info = get_stream_info(partial=False, defer=False)
_, _, _, stream_map, init, goal = pddlstream_problem
summarize_facts(init, world=world)
print_goal(goal)
print(SEPARATOR)
init_experiment(exp_dir)
with Profiler():
with LockRenderer(lock=not args.enable):
solution = solve(pddlstream_problem, algorithm=args.algorithm, unit_costs=args.unit,
stream_info=stream_info, success_cost=INF, verbose=True, debug=False)
saver.restore()
print_solution(solution)
plan, cost, evaluations = solution
if (plan is None) or not has_gui():
disconnect()
return
print(SEPARATOR)
with LockRenderer(lock=not args.enable):
commands = post_process(problem, plan)
problem.remove_gripper()
saver.restore()
saver.restore()
wait_if_gui('Execute?')
if args.simulate: ## real physics
control_commands(commands)
else:
# apply_commands(State(), commands, time_step=0.01)
apply_actions(problem, commands, time_step=0.01)
wait_if_gui('Finish?')
disconnect()
if __name__ == '__main__':
main(exp_name=DEFAULT_TEST) | en | 0.235457 | #!/usr/bin/env python ## , get_stream_map ## custom stream_map ## 'blocks_pick' ## # p = problem # c = collisions # l = custom_limits # t = teleport ## # 'get-joint-position-open': from_fn(get_joint_position_open_gen(p)), # 'sample-joint-position-open': from_fn(get_position_gen(p, collisions=c, extent='max')), # 'sample-joint-position-closed': from_fn(get_position_gen(p, collisions=c, extent='min')), # 'test-joint-position-open': from_test(get_joint_position_test(extent='max')), # 'test-joint-position-closed': from_test(get_joint_position_test(extent='min')), ## # 'sample-bconf-in-region': from_gen_fn(get_bconf_in_region_gen(p, collisions=c, visualize=False)), # 'TrajPoseCollision': fn_from_constant(False), # 'TrajArmCollision': fn_from_constant(False), # 'TrajGraspCollision': fn_from_constant(False), ##################################### ## join(exp_dir, 'scene.lisdf')) ## real physics # apply_commands(State(), commands, time_step=0.01) | 1.467222 | 1 |
streamlit_ui/recommendation.py | Taher-Dohadwala/better-job-finder | 0 | 6619483 | """
This script contains the UI interface for viewing recommendation
"""
import random
import streamlit as st
from streamlit_ui.job_finder import search_result_block,search
from streamlit_ui.data_streamer import DataStreamer
import tensorflow as tf
from transformers import DistilBertTokenizerFast
from transformers import TFDistilBertForSequenceClassification
data_streamer = DataStreamer()
tokenizer = DistilBertTokenizerFast.from_pretrained('distilbert-base-uncased')
loaded_model = TFDistilBertForSequenceClassification.from_pretrained("models/recommendation")
def search_result_block(job_title,company,location_,date,apply,description,confidence):
"""This function contains a boilerplate job posting card layout """
# split into two columns
col1, col2 = st.beta_columns(2)
# left side contains job info
with col1:
st.text(job_title)
st.text(company)
st.text(location_)
st.text(date)
with col2:
# right slide contains apply link and label selection
link = f'[Apply]({apply})'
st.markdown(link, unsafe_allow_html=True)
# Display confidence
st.text(f"{confidence*100:.2f}% confidence")
# hides description until clicked on
with st.beta_expander("See Description"):
st.markdown(description)
# cache optimizes for same searches
@st.cache(show_spinner=False)
def search(position,location):
"""Takes a job position and location and returns aggregated job search results """
data_streamer.search(position,location)
job_titles,companies,locations,dates,applies,descriptions = data_streamer.get_data()
return job_titles,companies,locations,dates,applies,descriptions
@st.cache(show_spinner=False)
def make_prediction(example):
predict_input = tokenizer.encode(example,
truncation=True,
padding=True,
return_tensors="tf")
tf_output = loaded_model.predict(predict_input)[0]
tf_prediction = tf.nn.softmax(tf_output, axis=1).numpy()[0]
pred = tf.argmax(tf_prediction)
confidence = tf_prediction[pred]
return pred,confidence
def app():
# Title of the app
st.title('Search and view recommended jobs')
# top search bar
col1, col2 = st.beta_columns(2)
with col1:
position = st.text_input('Job Search', 'Data Science')
with col2:
location = st.text_input("Location","Chicago, IL")
# display loading status
results = st.beta_container()
with st.spinner("Finding Interesting Jobs..."):
# scrape job data from all data sources
job_titles,companies,locations,dates,applies,descriptions = search(position,location)
predictions = []
confidences = []
# display loading status
with st.spinner("Model Inference..."):
for description in descriptions:
pred,conf = make_prediction(description)
predictions.append(pred)
confidences.append(conf)
no_results = True
with results:
# display job info based on model recommendations only
st.text("Recommended Jobs Only:")
for i,(pred,conf,job_title,company,location_,date,apply,description) in enumerate(zip(predictions,confidences,job_titles,companies,locations,dates,applies,descriptions)):
if pred == 1 and conf > 0.5:
search_result_block(job_title,company,location_,date,apply,description,conf)
if no_results:
no_results = False
if no_results:
st.text("No matches with this search. Try another search")
if __name__ == '__main__':
app()
| """
This script contains the UI interface for viewing recommendation
"""
import random
import streamlit as st
from streamlit_ui.job_finder import search_result_block,search
from streamlit_ui.data_streamer import DataStreamer
import tensorflow as tf
from transformers import DistilBertTokenizerFast
from transformers import TFDistilBertForSequenceClassification
data_streamer = DataStreamer()
tokenizer = DistilBertTokenizerFast.from_pretrained('distilbert-base-uncased')
loaded_model = TFDistilBertForSequenceClassification.from_pretrained("models/recommendation")
def search_result_block(job_title,company,location_,date,apply,description,confidence):
"""This function contains a boilerplate job posting card layout """
# split into two columns
col1, col2 = st.beta_columns(2)
# left side contains job info
with col1:
st.text(job_title)
st.text(company)
st.text(location_)
st.text(date)
with col2:
# right slide contains apply link and label selection
link = f'[Apply]({apply})'
st.markdown(link, unsafe_allow_html=True)
# Display confidence
st.text(f"{confidence*100:.2f}% confidence")
# hides description until clicked on
with st.beta_expander("See Description"):
st.markdown(description)
# cache optimizes for same searches
@st.cache(show_spinner=False)
def search(position,location):
"""Takes a job position and location and returns aggregated job search results """
data_streamer.search(position,location)
job_titles,companies,locations,dates,applies,descriptions = data_streamer.get_data()
return job_titles,companies,locations,dates,applies,descriptions
@st.cache(show_spinner=False)
def make_prediction(example):
predict_input = tokenizer.encode(example,
truncation=True,
padding=True,
return_tensors="tf")
tf_output = loaded_model.predict(predict_input)[0]
tf_prediction = tf.nn.softmax(tf_output, axis=1).numpy()[0]
pred = tf.argmax(tf_prediction)
confidence = tf_prediction[pred]
return pred,confidence
def app():
# Title of the app
st.title('Search and view recommended jobs')
# top search bar
col1, col2 = st.beta_columns(2)
with col1:
position = st.text_input('Job Search', 'Data Science')
with col2:
location = st.text_input("Location","Chicago, IL")
# display loading status
results = st.beta_container()
with st.spinner("Finding Interesting Jobs..."):
# scrape job data from all data sources
job_titles,companies,locations,dates,applies,descriptions = search(position,location)
predictions = []
confidences = []
# display loading status
with st.spinner("Model Inference..."):
for description in descriptions:
pred,conf = make_prediction(description)
predictions.append(pred)
confidences.append(conf)
no_results = True
with results:
# display job info based on model recommendations only
st.text("Recommended Jobs Only:")
for i,(pred,conf,job_title,company,location_,date,apply,description) in enumerate(zip(predictions,confidences,job_titles,companies,locations,dates,applies,descriptions)):
if pred == 1 and conf > 0.5:
search_result_block(job_title,company,location_,date,apply,description,conf)
if no_results:
no_results = False
if no_results:
st.text("No matches with this search. Try another search")
if __name__ == '__main__':
app()
| en | 0.778518 | This script contains the UI interface for viewing recommendation This function contains a boilerplate job posting card layout # split into two columns # left side contains job info # right slide contains apply link and label selection # Display confidence # hides description until clicked on # cache optimizes for same searches Takes a job position and location and returns aggregated job search results # Title of the app # top search bar # display loading status # scrape job data from all data sources # display loading status # display job info based on model recommendations only | 2.742064 | 3 |
DRL/log_analysis/tracks/reinvent_base-1500-4-2019-10-01-224416.py | EXYNOS-999/AWS_JPL_DRL | 0 | 6619484 | array([[2.88738855, 0.72646774],
[3.16759122, 0.70478649],
[3.45517317, 0.69217863],
[3.75325158, 0.68581005],
[4.07281434, 0.68360819],
[4.50000223, 0.68376092],
[4.54999507, 0.68377879],
[5.11738115, 0.69080411],
[5.44798256, 0.7112322 ],
[5.71126558, 0.7422347 ],
[5.94137211, 0.78496462],
[6.1491271 , 0.84078035],
[6.33675893, 0.91066736],
[6.50351669, 0.99483994],
[6.64762588, 1.09336367],
[6.76714849, 1.20640158],
[6.85790417, 1.33508669],
[6.92193762, 1.47646609],
[6.96026824, 1.62797346],
[6.96689958, 1.7888072 ],
[6.92976742, 1.95515434],
[6.85379617, 2.11910271],
[6.72693273, 2.26841633],
[6.56582731, 2.3979065 ],
[6.38075512, 2.50632652],
[6.18037171, 2.5960265 ],
[5.97126499, 2.67207187],
[5.75829177, 2.74110301],
[5.5588064 , 2.81130664],
[5.36088415, 2.88623818],
[5.16456229, 2.96629375],
[4.96988832, 3.05190956],
[4.77697334, 3.14377629],
[4.58660766, 3.24539747],
[4.39799283, 3.35419739],
[4.21046443, 3.46760151],
[4.02347669, 3.58333046],
[3.8506858 , 3.68988272],
[3.6826464 , 3.79114179],
[3.51884306, 3.88569665],
[3.35641365, 3.97361826],
[3.19259098, 4.05426986],
[3.02554648, 4.12572184],
[2.85392239, 4.18548215],
[2.67754933, 4.23399905],
[2.49618509, 4.27140786],
[2.30880373, 4.29610891],
[2.11373905, 4.30523325],
[1.90856103, 4.29409449],
[1.68968426, 4.25390854],
[1.45387751, 4.16915111],
[1.21119005, 4.00653223],
[1.01922953, 3.74402202],
[0.92220549, 3.42050544],
[0.88926604, 3.10443889],
[0.89600747, 2.82076036],
[0.92404943, 2.56281185],
[0.96605253, 2.32460305],
[1.01802833, 2.11228544],
[1.08079017, 1.91512981],
[1.15513698, 1.73107571],
[1.24162317, 1.56014807],
[1.34112998, 1.40323884],
[1.45472589, 1.2610932 ],
[1.58653095, 1.13641183],
[1.74472608, 1.03228688],
[1.92655529, 0.94305481],
[2.13282228, 0.86779425],
[2.36411252, 0.80679887],
[2.61751276, 0.75992145],
[2.88738855, 0.72646774]]) | array([[2.88738855, 0.72646774],
[3.16759122, 0.70478649],
[3.45517317, 0.69217863],
[3.75325158, 0.68581005],
[4.07281434, 0.68360819],
[4.50000223, 0.68376092],
[4.54999507, 0.68377879],
[5.11738115, 0.69080411],
[5.44798256, 0.7112322 ],
[5.71126558, 0.7422347 ],
[5.94137211, 0.78496462],
[6.1491271 , 0.84078035],
[6.33675893, 0.91066736],
[6.50351669, 0.99483994],
[6.64762588, 1.09336367],
[6.76714849, 1.20640158],
[6.85790417, 1.33508669],
[6.92193762, 1.47646609],
[6.96026824, 1.62797346],
[6.96689958, 1.7888072 ],
[6.92976742, 1.95515434],
[6.85379617, 2.11910271],
[6.72693273, 2.26841633],
[6.56582731, 2.3979065 ],
[6.38075512, 2.50632652],
[6.18037171, 2.5960265 ],
[5.97126499, 2.67207187],
[5.75829177, 2.74110301],
[5.5588064 , 2.81130664],
[5.36088415, 2.88623818],
[5.16456229, 2.96629375],
[4.96988832, 3.05190956],
[4.77697334, 3.14377629],
[4.58660766, 3.24539747],
[4.39799283, 3.35419739],
[4.21046443, 3.46760151],
[4.02347669, 3.58333046],
[3.8506858 , 3.68988272],
[3.6826464 , 3.79114179],
[3.51884306, 3.88569665],
[3.35641365, 3.97361826],
[3.19259098, 4.05426986],
[3.02554648, 4.12572184],
[2.85392239, 4.18548215],
[2.67754933, 4.23399905],
[2.49618509, 4.27140786],
[2.30880373, 4.29610891],
[2.11373905, 4.30523325],
[1.90856103, 4.29409449],
[1.68968426, 4.25390854],
[1.45387751, 4.16915111],
[1.21119005, 4.00653223],
[1.01922953, 3.74402202],
[0.92220549, 3.42050544],
[0.88926604, 3.10443889],
[0.89600747, 2.82076036],
[0.92404943, 2.56281185],
[0.96605253, 2.32460305],
[1.01802833, 2.11228544],
[1.08079017, 1.91512981],
[1.15513698, 1.73107571],
[1.24162317, 1.56014807],
[1.34112998, 1.40323884],
[1.45472589, 1.2610932 ],
[1.58653095, 1.13641183],
[1.74472608, 1.03228688],
[1.92655529, 0.94305481],
[2.13282228, 0.86779425],
[2.36411252, 0.80679887],
[2.61751276, 0.75992145],
[2.88738855, 0.72646774]]) | none | 1 | 1.31331 | 1 | |
crea/__init__.py | creativechain/crea-python | 0 | 6619485 | # -*- coding: utf-8 -*-
from .crea import Crea
__version__ = '1.0.1'
| # -*- coding: utf-8 -*-
from .crea import Crea
__version__ = '1.0.1'
| en | 0.769321 | # -*- coding: utf-8 -*- | 0.929499 | 1 |
Uncuffed/network/PeerNetwork.py | WckdAwe/Uncuffed | 2 | 6619486 | import collections
import concurrent.futures
import json
import requests as requests
from typing import Set, Optional
from .Peer import Peer
from ..helpers.Storable import Storable
from ..helpers.paths import PATH_DATA
class PeerNetwork(Storable):
"""
Network of peers stored in each node.
"""
__instance = None
def __init__(self, peers: Set[Peer] = None):
""" Virtually private constructor. """
if PeerNetwork.__instance is not None:
raise Exception('This class is a singleton!')
else:
PeerNetwork.__instance = self
self._peers = peers or set()
self._my_peer: Optional[Peer] = None
@classmethod
def get_instance(cls):
if cls.__instance is None:
return cls.load_from_file()
else:
return cls.__instance
def get_peers(self, exclude_peer: Peer = None):
"""
:return: A copy of the peers set.
"""
peer_set = set(self._peers)
if exclude_peer is not None:
peer_set.remove(exclude_peer)
return peer_set
def register_peer(self, peer: Peer):
"""
Register peer if not already registered.
:param peer:
:return: If peer was registered or not.
"""
if peer in self._peers:
return False
self._peers.add(peer)
self.store_to_file()
return True
def unregister_peer(self, peer: Peer):
"""
Unregister peer if it is registered
:param peer:
:return: If peer was registered or not.
"""
if peer not in self._peers:
return False
self._peers.remove(peer)
self.store_to_file()
return True
@staticmethod
def post_json(peer: Peer, route, data):
"""
:param peer: The peer.
:param route: sub_url of peer to call.
:param data: json data to pass.
:return: None if failure, otherwise the response text.
"""
try:
url = peer.get_url() + route
response = requests.post(url=url, json=data)
if response.status_code == 200:
return response.text
return None
except Exception as e: # TODO
return None
def broadcast_json(self, caller: Peer, route: str, data):
"""
Broadcast a JSON Post to all peers.
:param caller: Peer initiating broadcast
:param route: sub_url of peer to call.
:param data: json data to pass.
:return: Tuple containing successfully sent and total peers.
"""
peers = self.get_peers(exclude_peer=caller)
total_peers = len(peers)
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [
executor.submit(self.post_json, peer, route, data) for peer in peers
]
total_sent = len(list(filter(lambda o: o is not None, [f.result() for f in futures])))
return total_sent, total_peers
@staticmethod
def get_storage_location() -> str:
return f'{PATH_DATA}/node_list.json'
@classmethod
def from_json(cls, data):
peers = set(map(Peer.from_string, data))
return cls(
peers=peers,
)
def to_json(self, **args) -> bytes:
return json.dumps(list(map(lambda o: str(o), self._peers)), **args).encode('utf-8')
def to_dict(self) -> dict:
return collections.OrderedDict({
'peers': map(lambda o: str(o), self._peers),
})
| import collections
import concurrent.futures
import json
import requests as requests
from typing import Set, Optional
from .Peer import Peer
from ..helpers.Storable import Storable
from ..helpers.paths import PATH_DATA
class PeerNetwork(Storable):
"""
Network of peers stored in each node.
"""
__instance = None
def __init__(self, peers: Set[Peer] = None):
""" Virtually private constructor. """
if PeerNetwork.__instance is not None:
raise Exception('This class is a singleton!')
else:
PeerNetwork.__instance = self
self._peers = peers or set()
self._my_peer: Optional[Peer] = None
@classmethod
def get_instance(cls):
if cls.__instance is None:
return cls.load_from_file()
else:
return cls.__instance
def get_peers(self, exclude_peer: Peer = None):
"""
:return: A copy of the peers set.
"""
peer_set = set(self._peers)
if exclude_peer is not None:
peer_set.remove(exclude_peer)
return peer_set
def register_peer(self, peer: Peer):
"""
Register peer if not already registered.
:param peer:
:return: If peer was registered or not.
"""
if peer in self._peers:
return False
self._peers.add(peer)
self.store_to_file()
return True
def unregister_peer(self, peer: Peer):
"""
Unregister peer if it is registered
:param peer:
:return: If peer was registered or not.
"""
if peer not in self._peers:
return False
self._peers.remove(peer)
self.store_to_file()
return True
@staticmethod
def post_json(peer: Peer, route, data):
"""
:param peer: The peer.
:param route: sub_url of peer to call.
:param data: json data to pass.
:return: None if failure, otherwise the response text.
"""
try:
url = peer.get_url() + route
response = requests.post(url=url, json=data)
if response.status_code == 200:
return response.text
return None
except Exception as e: # TODO
return None
def broadcast_json(self, caller: Peer, route: str, data):
"""
Broadcast a JSON Post to all peers.
:param caller: Peer initiating broadcast
:param route: sub_url of peer to call.
:param data: json data to pass.
:return: Tuple containing successfully sent and total peers.
"""
peers = self.get_peers(exclude_peer=caller)
total_peers = len(peers)
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [
executor.submit(self.post_json, peer, route, data) for peer in peers
]
total_sent = len(list(filter(lambda o: o is not None, [f.result() for f in futures])))
return total_sent, total_peers
@staticmethod
def get_storage_location() -> str:
return f'{PATH_DATA}/node_list.json'
@classmethod
def from_json(cls, data):
peers = set(map(Peer.from_string, data))
return cls(
peers=peers,
)
def to_json(self, **args) -> bytes:
return json.dumps(list(map(lambda o: str(o), self._peers)), **args).encode('utf-8')
def to_dict(self) -> dict:
return collections.OrderedDict({
'peers': map(lambda o: str(o), self._peers),
})
| en | 0.764435 | Network of peers stored in each node. Virtually private constructor. :return: A copy of the peers set. Register peer if not already registered. :param peer: :return: If peer was registered or not. Unregister peer if it is registered :param peer: :return: If peer was registered or not. :param peer: The peer. :param route: sub_url of peer to call. :param data: json data to pass. :return: None if failure, otherwise the response text. # TODO Broadcast a JSON Post to all peers. :param caller: Peer initiating broadcast :param route: sub_url of peer to call. :param data: json data to pass. :return: Tuple containing successfully sent and total peers. | 2.913266 | 3 |
tests/pydecompile-test/baselines/always_enums2.py | gengxf0505/pxt | 1 | 6619487 | #/ <reference path="./testBlocks/basic.ts" />
testNamespace.enumArgument(testNamespace.numberArgumentOutput(0)) | #/ <reference path="./testBlocks/basic.ts" />
testNamespace.enumArgument(testNamespace.numberArgumentOutput(0)) | en | 0.404047 | #/ <reference path="./testBlocks/basic.ts" /> | 1.089764 | 1 |
WirelessMonitoringModule/gr-radar/python/qa_signal_generator_sync_pulse_c.py | Aekai/Wi-Mind | 1 | 6619488 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014 Communications Engineering Lab, KIT.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
import radar_swig as radar
class qa_signal_generator_sync_pulse_c (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
test_len = 30;
packet_len = 10;
pulse_send = (2,3,1)
pulse_wait = (1,2)
amplitude = 0.7
src = radar.signal_generator_sync_pulse_c(packet_len,pulse_send,pulse_wait,amplitude,"packet_len")
head = blocks.head(8,test_len)
snk = blocks.vector_sink_c()
self.tb.connect(src,head,snk)
self.tb.run ()
# create ref data
ref_data = [0]*packet_len
counter = 0
for k in range(pulse_wait[0]):
ref_data[counter+k] = complex(0,0)
counter = counter+pulse_wait[0]
for k in range(pulse_send[0]):
ref_data[counter+k] = complex(amplitude,0)
counter = counter+pulse_send[0]
for k in range(pulse_wait[1]):
ref_data[counter+k] = complex(0,0)
counter = counter+pulse_wait[1]
for k in range(pulse_send[1]):
ref_data[counter+k] = complex(amplitude,0)
counter = counter+pulse_send[1]
for k in range(pulse_send[2]):
ref_data[counter+k] = complex(amplitude,0)
# check data
data = snk.data()
data1 = data[0:packet_len] # first packet
data2 = data[0:packet_len] # second packet
self.assertComplexTuplesAlmostEqual(ref_data,data1,4) # check first packet
self.assertComplexTuplesAlmostEqual(ref_data,data2,4) # check second packet
if __name__ == '__main__':
gr_unittest.run(qa_signal_generator_sync_pulse_c)#, "qa_signal_generator_sync_pulse_c.xml")
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014 Communications Engineering Lab, KIT.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
import radar_swig as radar
class qa_signal_generator_sync_pulse_c (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
test_len = 30;
packet_len = 10;
pulse_send = (2,3,1)
pulse_wait = (1,2)
amplitude = 0.7
src = radar.signal_generator_sync_pulse_c(packet_len,pulse_send,pulse_wait,amplitude,"packet_len")
head = blocks.head(8,test_len)
snk = blocks.vector_sink_c()
self.tb.connect(src,head,snk)
self.tb.run ()
# create ref data
ref_data = [0]*packet_len
counter = 0
for k in range(pulse_wait[0]):
ref_data[counter+k] = complex(0,0)
counter = counter+pulse_wait[0]
for k in range(pulse_send[0]):
ref_data[counter+k] = complex(amplitude,0)
counter = counter+pulse_send[0]
for k in range(pulse_wait[1]):
ref_data[counter+k] = complex(0,0)
counter = counter+pulse_wait[1]
for k in range(pulse_send[1]):
ref_data[counter+k] = complex(amplitude,0)
counter = counter+pulse_send[1]
for k in range(pulse_send[2]):
ref_data[counter+k] = complex(amplitude,0)
# check data
data = snk.data()
data1 = data[0:packet_len] # first packet
data2 = data[0:packet_len] # second packet
self.assertComplexTuplesAlmostEqual(ref_data,data1,4) # check first packet
self.assertComplexTuplesAlmostEqual(ref_data,data2,4) # check second packet
if __name__ == '__main__':
gr_unittest.run(qa_signal_generator_sync_pulse_c)#, "qa_signal_generator_sync_pulse_c.xml") | en | 0.822846 | #!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2014 Communications Engineering Lab, KIT. # # This is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # # set up fg # create ref data # check data # first packet # second packet # check first packet # check second packet #, "qa_signal_generator_sync_pulse_c.xml") | 2.410936 | 2 |
libraries/crossLanguageParser.py | jarble/EngScript | 8 | 6619489 | #This version is obsolete! Use polishnotation.py instead.
#Test everything in polyglotCodeGenerator.py
from listOfRegexes import *
from copy import copy, deepcopy
import random
from random import randint
from random import choice
from pyparsing import OneOrMore, nestedExpr
import numpy
import re;
from addParentheses import addParentheses
from removeParentheses import removeParentheses
def addOpeningAndClosingParentheses(theString):
if(theString.startswith("(") == False):
theString = theString + "("
if(theString.endswith(")") == False):
theString = theString + ")"
return theString
"function name: numberOfIndentations(theString)"
"requires functions: False"
"is defined: True"
"description: Get the number of indentations at the beginning of the string."
"function name: addInitialParentheses(theString,numberOfParentheses)"
"requires functions: numberOfIndentations(theString)"
"is defined: True"
"description: Add parentheses to beginning of the string after the indentation."
#print addInitialParentheses(" lol herp de derp", 4)
"function name: addFinalParentheses(theString,numberOfParentheses)"
"requires functions: False"
"is defined: True"
"description: Add parentheses to the end of the string."
"function name: addParentheses(theString)"
"requires functions: numberOfIndentations(theString), addFinalParentheses(theString,numberOfParentheses), addInitialParentheses(theString,numberOfParentheses)"
"is defined: True"
"description: Add parentheses to the string to match the indentation."
#print(addParentheses(
'''
while (i > 0)
(print 'hello')
(print 'hello')
while (i > 5)
print
'"world"'
'''
#))
"function name: evaluateMacro"
"requires functions: evaluateMacroWithSpecificString(inputString,variableNames,stringToReturn), addParentheses(theString)"
"is defined: False"
"description: Return the output of the macro."
"function name: splitMacroWithParentheses"
"requires functions: replaceParenthesesWithSymbols(theString), getExpressionsInParentheses(theString)"
"is defined: True"
"description: Split the macro with parentheses using a regular expression."
"function name: replaceParenthesesWithSymbols(theString)"
"requires functions: False"
"is defined: True"
"description: Replace the symbols inside nested parentheses with <<>>."
"function name: getExpressionsInParentheses(theString)"
"requires functions: False"
"is defined: True"
"description: Get an array of every substring in the input that is inside parentheses."
"function name: replaceMultipleStringsWithMultipleStrings"
"requires functions: False"
"is defined: True"
"description: Replace multiple strings in a string with multiple other strings."
functionChecker("crossLanguageParser.py", "evaluateMacro")
'''
http://stackoverflow.com/questions/18903923/how-to-split-a-string-in-python-without-redundant-output
Here's a demonstration of parameters being extracted from a macro.
Put ?: in front of every group, like this: (?:foo|bar|baz). Otherwise it will produce redundant results in the output.
'''
#An example of an array that defines a list of regular expressions to match a pattern:
patternDefiningArray = [
[
["theArray", '(rotated(?: by|))' "theDegrees", '(degrees)'],
["(rotation of)", "theArray", "by", "theDegrees", "degrees"]
],
["theArray", "theDegrees"],
["rotateArray(", "theArray", ", " "theDegrees", ")"]
]
def replaceMultipleStringsWithMultipleStrings(string, rep_dict):
pattern = re.compile("|".join([re.escape(k) for k in rep_dict.keys()]), re.M)
return pattern.sub(lambda x: rep_dict[x.group(0)], string)
#print(replaceMultipleStringsWithMultipleStrings("foo and bar are baz", {"foo":"1", "bar":"2", "baz":"3"}))
def lisp(x): #convert parse array back into symbols
newStr = ""
for current in x:
if(type(current) == str):
newStr += " " + current
else:
newStr += " " + lisp(current)
newStr = newStr[1:len(newStr)]
return "("+newStr+")"
def getExpressionsInParentheses(theString):
#print("Get the expressions for: " + theString)
theData = OneOrMore(nestedExpr()).parseString(theString)
theNewArr = []
for current in theData[0]:
if(type(current) != str):
theNewArr += [lisp(current)]
return theNewArr
def replaceParenthesesWithSymbols(theString):
#theString = addOpeningAndClosingParentheses(theString)
#print("The thing to replace with symbols is " + theString)
theData = OneOrMore(nestedExpr()).parseString(theString)
theNewString = ""
for current in theData[0]:
if(type(current) == str):
theNewString += " " + current
else:
theNewString += " <<>>"
theNewString = "(" + theNewString[1:len(theNewString)] + ")"
return theNewString
aStringToPrint = "(replace (foo) in bar with (substring from 2 to 3 in (a string called 'hello')))"
#print(replaceParenthesesWithSymbols(aStringToPrint))
#print(getExpressionsInParentheses(aStringToPrint))
def printMatches(stringToMatch):
stringToMatch = replaceParenthesesWithSymbols(stringToMatch)
toReturn = []
#theArray is an array of regular expressions that is defined in listOfRegexes.py
for current in theArray:
if(current.match(stringToMatch)):
theSplitString = re.match(current, stringToMatch).groups()
#theArgs =
toReturn += [{"splitString":theSplitString, "matchingRegex":current}]
#if(toReturn == []):
#raise Exception(stringToMatch + " does not match any regular expression.")
return toReturn
def my_shuffle(array):
random.shuffle(array)
return array
def getMatchingRegex(theString1):
theString1 = replaceParenthesesWithSymbols(theString1)
thingToReturn = printMatches(theString1)[0]["matchingRegex"]
return thingToReturn
def splitMacroWithParentheses(theString):
theExpressions = getExpressionsInParentheses(theString)
theString = replaceParenthesesWithSymbols(theString)
#print(theString)
#print(theExpressions)
theSplitString = list(printMatches(theString)[0]["splitString"])
theCounter = 0
for idx, current in enumerate(theSplitString):
if(current == "<<>>"):
theSplitString[idx] = theExpressions[theCounter]
theCounter += 1
return theSplitString
#splitMacroWithParentheses("(replace (substring of 'hello' between 2 and 3) in (bar is an integer) with (baz is not a string))")
#splitMacroWithParentheses("(substring of (gorp is really funny) between (3 is a magic (it's a number)) and (4 is an integer))")
def rangesOverlap(arr1, arr2):
if (arr1[0] <= arr2[1]) and (arr2[1] <= arr2[1]):
return True
def arrayDimensions(theArr):
return numpy.array(theArr).shape
#print(evaluateMacroWithSpecificString("(replace (substring of 'hello' between 2 and 3) in (bar is an integer) with (baz is not a string))"))
def sumOfAllNums(theNumArr):
toReturn = 0
for current in theNumArr:
toReturn += current
return current
def splitMacroWithWhitespace(theString):
theExpressions = getExpressionsInParentheses(theString)
theString = replaceParenthesesWithSymbols(theString)
#print(theString)
#print(theExpressions)
theSplitString = OneOrMore(nestedExpr()).parseString(theString)[0]
theCounter = 0
for idx, current in enumerate(theSplitString):
#print("The string here is " + current)
if(current == '<<>>'):
#print("Replacing " + theSplitString[idx] + " with " + theExpressions[theCounter])
theSplitString[idx] = theExpressions[theCounter]
theCounter += 1
return theSplitString
def getNonSeparatorParts(theString):
theNewArr = []
theSplitString = splitMacroWithWhitespace(theString)
for idx, current in enumerate(theSplitString):
if(((idx + 1)%2) != 0):
theNewArr += [current]
return theNewArr
def everyOtherIsTheSame(theSplitString):
firstCurrent = theSplitString[1]
if(len(theSplitString) % 2 == 0):
return False
for idx, current in enumerate(theSplitString):
if((idx+1)%2 == 0):
if(current != firstCurrent):
return False
else:
if(current == firstCurrent):
return False
return True
def isArithmeticOperator(theString):
if theString in ["+", "-", "-", "/", "^", "%", "&", "and", "or", "<", ">", "<=", ">=", "==", "||"]:
return True
return False
"function name: getDictionaryFromMacro(theVariables,theMacro,theResult)"
"requires functions: splitMacroWithParentheses, replaceParenthesesWithSymbols(theString)"
"is defined: True"
"description: Return the output of the macro. Replace the variables in stringToReturn with the parameters"
def remove_values_from_list(the_list, val):
return [value for value in the_list if value != val]
def removeEachValueFromList(the_list, values):
for current in values:
the_list = remove_values_from_list(the_list, current)
return the_list
def getDictionaryFromMacro(theVariables, theMacro, theResult):
arrayOfVariables = theVariables.split(",")
newArrayOfVariables = deepcopy(arrayOfVariables)
newArrayOfVariables += ["\(", "\)"]
theVariables = "(" + "|".join(newArrayOfVariables) + ")"
#how to get the index of a string in another string: string.index('stringToFind')
#how to split a string without removing separators: http://stackoverflow.com/questions/2136556/in-python-how-do-i-split-a-string-and-keep-the-separators
theSplitMacro = re.split(theVariables, theMacro)
theSplitMacro = filter(None, theSplitMacro)
theSplitMacro = removeEachValueFromList(theSplitMacro, ["", ")", "("])
theSplitResult = re.split(theVariables, theResult)
theSplitResult = removeEachValueFromList(theSplitResult, ["", ")", "("])
#print(theSplitMacro)
#print(theSplitResult)
#print(theSplitMacro.index("bar"))
#print(arrayOfVariables)
dictionaryToReturn = {}
for current in arrayOfVariables:
dictionaryToReturn[theSplitMacro.index(current)] = theSplitResult.index(current)
return dictionaryToReturn
#print(getDictionaryFromMacro('foo,bar', '(foo equals equals bar)', '(foo == bar)'))
def is_prime(a):
return all(a % i for i in xrange(2, a))
#return am
#print(removeParentheses("(print (the type of foo))", "foo"))
#print(removeParentheses("((foo [bar]) = baz)","foo,bar"))
def getMacroParameters(inputString,stringThatMatchesRegex,variableNames,stringToReturn, returnParameters = False):
#print("Input string: " + inputString)
#print("String that matches regex: " + stringThatMatchesRegex)
#print("variable names: " + variableNames)
#print("string to return: " + stringToReturn)
#Return None if the input doesn't match a regex.
if(printMatches(replaceParenthesesWithSymbols(inputString)) == []):
return None
if(printMatches(replaceParenthesesWithSymbols(stringThatMatchesRegex)) == []):
#raise Exception(stringThatMatchesRegex + " does not match any regular expression.")
return None
if(getMatchingRegex(replaceParenthesesWithSymbols(stringThatMatchesRegex))) != getMatchingRegex(replaceParenthesesWithSymbols(inputString)):
return None
#print(replaceParenthesesWithSymbols(inputString))
#print(getExpressionsInParentheses(inputString))
theSplitInputString = splitMacroWithParentheses(inputString)
theSplitParameterString = splitMacroWithParentheses(stringThatMatchesRegex)
arrayOfVariables = variableNames.split(",")
#print("theSplitInputString: " + str(theSplitInputString))
#print("theSplitParameterString: " + str(theSplitParameterString))
#print("theSplitStringToReturn: " + str(theSplitStringToReturn))
#print("arrayOfVariables: " + str(arrayOfVariables))
parameterInformationDictionary = {}
#The location of each variable in theSplitInputString is the same as the location of each variable in the
for current in arrayOfVariables:
for idx, current1 in enumerate(theSplitParameterString):
#print(current + ", " + current1)
if current1 in current:
parameterInformationDictionary[current] = theSplitInputString[idx]
#print("parameterInformationDictionary: " + str(parameterInformationDictionary))
if(returnParameters == True):
return parameterInformationDictionary
else:
return replaceMultipleStringsWithMultipleStrings(stringToReturn, parameterInformationDictionary)
def theThingsToEvaluate():
return [
#removeParentheses("((foo [bar]) = baz)","foo,bar"),
#removeParentheses("((foo[bar]) = baz)","foo,bar,baz"),
[["(foo in bar is between goo and gar)", "(foo is between goo and gar in bar)"], "foo,bar,goo,gar", "(foo in bar is between goo and gar)"],
[["(function named funcName that returns typeToReturn with parameters named paramNames with the parameter types paramTypes theBody)", "(public static typeToReturn funcName paramNames paramTypes theBody)"], "typeToReturn,funcName,paramNames,paramTypes,theBody", "function(parameterNames=paramNames, parameterTypes=paramTypes, isStatic=True, returnType='typeToReturn', functionName='funcName', body=theBody),"],
[["(def funcName paramNames theBody)"], "funcName,paramNames,theBody", "function(parameterNames=paramNames, parameterTypes=paramNames, isStatic=True, returnType='void', functionName='funcName', body=theBody),"],
[["(foo = bar)"], "foo,bar", "setVar(valueToGet=bar, valueToChange=foo)"],
[["(not foo)"], "foo", "Not(foo)"],
[["(convert foo from bar to baz)", "(convert foo to baz from bar)", "(foo converted from baz to bar)"], "foo,bar,baz", "convert foo from bar to baz"],
[["(cond foo)"], "foo", "conditionalBlock(foo)"],
[["(for theInitializer theCondition theIncrement theBody)"], "theInitializer,theCondition,theIncrement,theBody", "forLoop(body=theBody, initializer=theInitializer, condition=theCondition, increment=theIncrement)"],
[["(foo ;)"], "foo", "seriesOfStatements([foo])"],
[["([aVar])", "(aVar,)"], "aVar", "[aVar]"],
[["(main foo)"], "foo", "main(foo)"],
[["(convert foo from base bar to base baz)", "(convert foo to base baz from base bar)", "(foo converted to base baz from base bar)", "(foo converted from base bar to base baz)", "(foo in base baz instead of base bar)"], "foo,bar,baz", "(convert foo from base bar to base baz)"],
[["(foo [ bar ])"], "foo,bar", "foo[bar]"],
[["(switch foo)"], "foo", "Switch(foo, [])"],
[["(if foo)"], "foo", "If(foo, [])"],
[["(while foo)"], "foo", "While(foo, [])"],
[["(module foo)"], "foo", "module([foo])"],
[["(default foo)"], "foo", "default(foo)"],
[["(foo{ bar })"], "foo,bar", "foo(bar)"],
[["(type [ dimensions ] varName = initialValue)", "(type varName [ dimensions ] = initialValue)"], "type,varName,dimensions,initialValue", "typedimensions varName = initialValue"],
[["(theArr[ indices ])"], "theArr,indices", "theArrindices"],
[["(type foo = bar)"], "type,foo,bar", "initializeVar(variableName=foo, variableType=type, initialValue=bar, arrayDimensions=None)"],
[["(switch condition insideBrackets afterBrackets)"], "condition,insideBrackets,afterBrackets", "switch(condition){insideBrackets} afterBrackets"],
[["(if condition insideBrackets afterBrackets)"], "condition,insideBrackets,afterBrackets", "If(condition, [insideBrackets]), afterBrackets"],
[["(foo is a prime number)"], "foo", "is_prime(foo)"],
[["(the type of foo)"], "foo", "type(foo)"],
[["(foo divided by bar)", "(the quotient of foo and bar)"], "foo,bar", "(foo/bar)"],
[["(foo is between bar and baz)"], "foo,bar,baz", "((bar < foo) and (foo < baz))"],
[["(foo contains bar)", "(bar is in foo)"], "foo,bar", "(bar in foo)"],
[["(length of foo)"], "foo", "len(foo)"],
[["(random number between foo and bar)"], "foo,bar", "randint(foo,bar)"],
[["(print foo)"], "foo", "puts(foo)"],
[["(foo to the power of bar)"], "foo,bar","(foo**bar)"],
[["(foo and bar)"], "foo,bar","(foo and bar)"],
[["(sum of each number in foo)"], "foo","sumOfAllNums(foo)"],
[["(return foo)"], "foo","return foo"],
[["(foo matches bar)"], "foo,bar","re.compile(bar).matches(foo)"],
[["(foo equals bar)", "(foo and bar are equal)"], "foo,bar", "Equals(foo, bar)"],
[["(foo if bar)", "(if bar foo)", "(if bar then foo)"], "foo,bar", "If(bar, foo)"],
[["(while bar foo)", "(foo while bar)"], "foo,bar", "While(bar, foo)"],
[["(foo is divisible by bar)", "(foo is a multiple of bar)"], "foo,bar", "(foo % bar == 0)"],
[["(foo > bar)"], "foo,bar", "greaterThan(foo, bar)"],
[["(foo is less than bar)"], "foo,bar", "lessThan(foo, bar)"],
[["(foo overlaps with bar)"], "foo,bar", "rangesOverlap(foo, bar)"],
[["(replace each foo in bar with baz)"], "foo,bar,baz", "bar.replace(foo,baz)"],
[["(else foo)"], "foo", "Else([foo]),"],
[["(case foo bar)"], "foo,bar", "case(foo, bar)"],
[["(switch foo bar)"], "foo,bar", "switch(foo, bar)"],
[["(elif foo bar)"], "foo,bar", "Elif(foo, bar)"],
[["(elif foo bar baz)"], "foo,bar,baz", "Elif(foo, bar), baz"],
[["(foo mod bar)"], "foo,bar", "mod(foo, bar)"],
[["(class [[foo]] [[body]])"], "[[foo]],[[body]]", "getClass([[foo]], [[body]])"],
[["(foo / bar)"], "foo,bar", "divide(foo, bar)"],
[["(foo has the same meaning as bar)"], "foo,bar", "foo has the same meaning as bar"],
[["(the value of foo)"], "foo", "foo"],
[["(test the exec function)"], '', "exec(\"print('toPrint')\")"],
]
thingsToEvaluate = []
def evaluateMacro(stringToEvaluate, returnParameterNames=False):
if(stringToEvaluate.startswith("exec(")):
exec(stringToEvaluate)
return ""
#print("Macro to evaluate: " + str(stringToEvaluate))
if(returnParameterNames == False):
#print(getParameterNames(stringToEvaluate))
pass
global thingsToEvaluate
if (thingsToEvaluate == []):
thingsToEvaluate = theThingsToEvaluate()
#print(thingsToEvaluate)
addThingsToEvaluate = [
removeParentheses("(foo = (bar [ baz ]))"),
[["(unless foo bar)", "(foo unless bar)"], "(if (not foo) then bar)"],
[["(foo squared)"], "(foo to the power of 2)"],
[["(foo cubed)"], "(foo to the power of 3)"],
[["(square root of foo)"], "(foo to the power of -2)"],
[["(foo += bar)"], "(foo = (foo + bar))"],
[["(foo *= bar)"], "(foo = (foo * bar))"],
[["(foo -= bar)"], "(foo = (foo - bar))"],
[["(foo /= bar)"], "(foo = (foo / bar))"],
[["(foo ++)"], "(foo += 1)"],
[["(foo != bar)"], "(not (foo == bar))"],
[["(foo = foo + bar)"], "(foo += bar)"],
[["(foo = foo - bar)"], "(foo -= bar)"],
[["(foo = foo * bar)"], "(foo *= bar)"],
[["(foo if bar unless baz)"], "(foo if (bar and (not baz)))"],
[["(print the type of foofoo)"], "(print (the type of foofoo)))"],
[["(foo percent of bar)"], "(foo * 0.01 * bar)"],
]
for current in addThingsToEvaluate:
print(thingsToEvaluate[len(thingsToEvaluate)-1])
thingsToEvaluate += [[current[0], getParameterNames(current[1]), evaluateMacro(current[1])]]
stringToEvaluate = addParentheses(stringToEvaluate)
#print("String to evaluate with added parentheses:\n" + stringToEvaluate)
#stringToEvaluate = addOpeningAndClosingParentheses(stringToEvaluate)
#print("Evaluate the macro " + stringToEvaluate)
#print(splitMacroWithWhitespace(stringToEvaluate))
theArr = printMatches(replaceParenthesesWithSymbols(stringToEvaluate));
#theData = OneOrMore(nestedExpr()).parseString(stringToEvaluate)
#print(theData)
whitespaceSplitString = splitMacroWithWhitespace(stringToEvaluate);
#print("The string split with whitespace is " + str(whitespaceSplitString))
separatorCharacter = whitespaceSplitString[1]
if(everyOtherIsTheSame(whitespaceSplitString)):
#print("Every other character in " + str(whitespaceSplitString) + " is " + str(separatorCharacter))
nonSeparatorParts = getNonSeparatorParts(stringToEvaluate)
if returnParameterNames == True:
thingToReturn = {}
for idx,current in enumerate(nonSeparatorParts):
thingToReturn[idx] = current
return thingToReturn
#print("The non-separator parts are " + str(nonSeparatorParts))
for idx, current in enumerate(nonSeparatorParts):
if(current.startswith("(")):
print(current)
nonSeparatorParts[idx] = evaluateMacro(current)
if(separatorCharacter in ["+", "plus"]):
return "add([" + ", ".join(nonSeparatorParts) + "])"
elif(separatorCharacter in ["-", "minus"]):
return "subtract([" + ", ".join(nonSeparatorParts) + "])"
elif(separatorCharacter in ["*", "times"]):
return "multiply([" + ", ".join(nonSeparatorParts) + "])"
elif(separatorCharacter in ["||", "or", "|"]):
return "Or([" + ", ".join(nonSeparatorParts) + "])"
elif(separatorCharacter in ["and", "&&", "&"]):
return "And([" + ", ".join(nonSeparatorParts) + "])"
elif(separatorCharacter in [","]):
return "[" + ", ".join(nonSeparatorParts) + "]"
elif(separatorCharacter in [";"]):
return "seriesOfStatements([" + ", ".join(nonSeparatorParts) + "])"
#print("String to evaluate: " + stringToEvaluate)
#print(getMatchingRegex(stringToEvaluate))
#This code does not do
#if returnParameterNames == False:
#if(getMatchingRegex("(foo has the same meaning as bar)") == getMatchingRegex(stringToEvaluate)):
#print("The input is a syntax definition: " + stringToEvaluate)
#thingToChange = evaluateMacro(stringToEvaluate, returnParameterNames=True);
#print(thingToChange)
#print(thingToChange['foo'])
#print(thingToChange['bar'])
#thingsToEvaluate += [[thingToChange['foo'], getParameterNames(thingToChange['bar']), evaluateMacro(thingToChange['bar'])]]
#print(thingsToEvaluate)
#else:
#print(str(whitespaceSplitString) + " is not an arithmetic expression.")
for current in thingsToEvaluate:
for currentInputString in current[0]:
if returnParameterNames == True:
theResult = evaluateMacroWithSpecificString(inputString=stringToEvaluate,stringThatMatchesRegex=currentInputString,variableNames=current[1],stringToReturn=current[2], returnParameters = True)
else:
theResult = evaluateMacroWithSpecificString(stringToEvaluate, currentInputString, current[1], current[2])
if(theResult != None):
if(type(theResult) == str and theResult.startswith("exec(")):
exec(theResult)
return ""
else:
return theResult
if(len(theArr) == 1):
matchingRegex = theArr[0]["matchingRegex"]
'''
paramArray = splitMacroWithParentheses(stringToEvaluate)
#Evaluate these before the macro has been evaluated:
if matchingRegex == getMatchingRegex("(foo and bar are equal)"):
return evaluateMacro("(" + paramArray[0] + " equals " + paramArray[2] + ")")
elif matchingRegex == getMatchingRegex("(shuffle foo randomly)"):
return evaluateMacro("(randomly shuffle " + paramArray[1] + ")")
#Evaluate these after the macro has been evaluated
for idx, current in enumerate(paramArray):
if(current.startswith("(") and current.endswith(")")):
paramArray[idx] = evaluateMacro(current)
"(foo is divisible by bar)"
"(foo % bar == 0)"
"(randomly shuffle foo)"
"my_shuffle(foo)"
"(foo in reverse order)"
"foo[::-1]"
"(sort foo in alphabetical order)"
"sorted(foo)"
"(the type of foo)"
"type(foo)"
"(sort foo from largest to smallest)"
"sorted(foo)"
"(sort foo from smallest to largest)"
"sorted(foo).reverse()"
"(foo is an integer)"
"(type(foo) == int)"
"(all locations of foo in bar)"
"[i for i, x in enumerate(bar) if x == foo]"
"(pick random from foo)"
"choice(foo)"
"(dimensions of foo)"
"arrayDimensions(foo)"
"(print foo)"
"(puts(foo))"
"(foo from bar to baz)"
"(substring of foo between bar and baz)"
"(foo and bar)"
"(foo and bar)"
"(sum of each number in foo)"):
"sumOfAllNums(foo)"
"(return foo)"
"Return(foo),"
if(toReturn != ""
#print(stringToEvaluate + " becomes (" + toReturn + ") which evaluates to " + str(eval(toReturn)))
#stringToReturn = str(eval(toReturn))
#print(stringToEvaluate + " becomes \n " + toReturn + "\n")
return toReturn
else:
'''
raise Exception(stringToEvaluate + " matches " + matchingRegex.pattern + ", but the output is not yet defined in evaluateMacro")
elif(len(theArr) == 0):
raise Exception(replaceParenthesesWithSymbols(stringToEvaluate) + " does not match any regular expression.")
else:
print(stringToEvaluate) + " matches more than one regular expression!"
def printMacroOutputs(theStrings):
for current in theStrings:
print("To evaluate: " + current)
print(str(current) + "\nbecomes\n " + str(evaluateMacro(current))+"\n")
"function name: evaluateMacroWithSpecificString(inputString,variableNames,stringToReturn)"
"requires functions: splitMacroWithParentheses, replaceParenthesesWithSymbols(theString), replaceMultipleStringsWithMultipleStrings, getDictionaryFromMacro(theVariables,theMacro,theResult)"
"is defined: True"
"description: Return the output of the macro. Replace the variables in stringToReturn with the parameters"
def evaluateMacroWithSpecificString(inputString,stringThatMatchesRegex,variableNames,stringToReturn, returnParameters = False):
#print("Input string: " + inputString)
#print("String that matches regex: " + stringThatMatchesRegex)
#print("variable names: " + variableNames)
#print("string to return: " + stringToReturn)
#Return None if the input doesn't match a regex.
if(printMatches(replaceParenthesesWithSymbols(inputString)) == []):
return None
if(printMatches(replaceParenthesesWithSymbols(stringThatMatchesRegex)) == []):
#raise Exception(stringThatMatchesRegex + " does not match any regular expression.")
return None
if(getMatchingRegex(replaceParenthesesWithSymbols(stringThatMatchesRegex))) != getMatchingRegex(replaceParenthesesWithSymbols(inputString)):
return None
if(variableNames == ""):
return stringToReturn
#print(replaceParenthesesWithSymbols(inputString))
#print(getExpressionsInParentheses(inputString))
theSplitInputString = splitMacroWithParentheses(inputString)
theSplitParameterString = splitMacroWithParentheses(stringThatMatchesRegex)
arrayOfVariables = variableNames.split(",")
#print("theSplitInputString: " + str(theSplitInputString))
#print("theSplitParameterString: " + str(theSplitParameterString))
#print("theSplitStringToReturn: " + str(theSplitStringToReturn))
#print("arrayOfVariables: " + str(arrayOfVariables))
parameterInformationDictionary = {}
#The location of each variable in theSplitInputString is the same as the location of each variable in the
if returnParameters == False:
for idx, current in enumerate(theSplitInputString):
if current.startswith("(") and current.endswith(")"):
#print("Thing to evaluate: " + current)
theSplitInputString[idx] = evaluateMacro(current)
for current in arrayOfVariables:
for idx, current1 in enumerate(theSplitParameterString):
#print(current + ", " + current1)
if current1 in current:
parameterInformationDictionary[current] = theSplitInputString[idx]
#print("parameterInformationDictionary: " + str(parameterInformationDictionary))
if(returnParameters == True):
return parameterInformationDictionary
else:
return replaceMultipleStringsWithMultipleStrings(stringToReturn, parameterInformationDictionary)
'''
Return the string that is to be evaluated.
evaluateMacroWithSpecificString("(3 == (4+1))", ["(foo equals bar)"], "foo,bar", "(foo == bar)")
First, get a dictionary to represent the indices of all parameters in the
Then, get a list of all regular expressions that match the input string.
Ensure that each string in stringsThatMatchRegexes matches one regex, and each regex in stringsThatMatchRegexes matches one string.
'''
#print(evaluateMacroWithSpecificString("(3 == (4 plus 1))", "(foo equals bar)", "foo,bar", "(foo == bar)"))
#print(evaluateMacro("(5 is between (4 times 4) and 7)"))
#print(evaluateMacro("([1,2,3,4] contains 4)"))
def getParameterNames(theMacro):
thingToChange = evaluateMacro(theMacro, returnParameterNames=True);
#print("ThingToChange is " + str(thingToChange))
thingToChange1 = thingToChange.keys()
thingToReturn = []
for idx,current in enumerate(thingToChange1):
thingToReturn += [thingToChange[thingToChange1[idx]]]
for idx, current in enumerate(thingToReturn):
if current.startswith("("):
thingToReturn[idx] = getParameterNames(current)
print(thingToReturn)
thingToReturn = ",".join(thingToReturn)
thingToReturn = thingToReturn.split(",")
return ",".join(list(set(thingToReturn)))
print(getMatchingRegex("(print 1)"))
print(getMatchingRegex("(print the type of 1)"))
print(getParameterNames("(baz is between barf and frog)"))
print(evaluateMacro("(gorf cubed)"))
print(evaluateMacro("(gorf squared)"))
print(evaluateMacro("(test the exec function)"))
print(evaluateMacro("exec(\"print('derp')\")"))
| #This version is obsolete! Use polishnotation.py instead.
#Test everything in polyglotCodeGenerator.py
from listOfRegexes import *
from copy import copy, deepcopy
import random
from random import randint
from random import choice
from pyparsing import OneOrMore, nestedExpr
import numpy
import re;
from addParentheses import addParentheses
from removeParentheses import removeParentheses
def addOpeningAndClosingParentheses(theString):
if(theString.startswith("(") == False):
theString = theString + "("
if(theString.endswith(")") == False):
theString = theString + ")"
return theString
"function name: numberOfIndentations(theString)"
"requires functions: False"
"is defined: True"
"description: Get the number of indentations at the beginning of the string."
"function name: addInitialParentheses(theString,numberOfParentheses)"
"requires functions: numberOfIndentations(theString)"
"is defined: True"
"description: Add parentheses to beginning of the string after the indentation."
#print addInitialParentheses(" lol herp de derp", 4)
"function name: addFinalParentheses(theString,numberOfParentheses)"
"requires functions: False"
"is defined: True"
"description: Add parentheses to the end of the string."
"function name: addParentheses(theString)"
"requires functions: numberOfIndentations(theString), addFinalParentheses(theString,numberOfParentheses), addInitialParentheses(theString,numberOfParentheses)"
"is defined: True"
"description: Add parentheses to the string to match the indentation."
#print(addParentheses(
'''
while (i > 0)
(print 'hello')
(print 'hello')
while (i > 5)
print
'"world"'
'''
#))
"function name: evaluateMacro"
"requires functions: evaluateMacroWithSpecificString(inputString,variableNames,stringToReturn), addParentheses(theString)"
"is defined: False"
"description: Return the output of the macro."
"function name: splitMacroWithParentheses"
"requires functions: replaceParenthesesWithSymbols(theString), getExpressionsInParentheses(theString)"
"is defined: True"
"description: Split the macro with parentheses using a regular expression."
"function name: replaceParenthesesWithSymbols(theString)"
"requires functions: False"
"is defined: True"
"description: Replace the symbols inside nested parentheses with <<>>."
"function name: getExpressionsInParentheses(theString)"
"requires functions: False"
"is defined: True"
"description: Get an array of every substring in the input that is inside parentheses."
"function name: replaceMultipleStringsWithMultipleStrings"
"requires functions: False"
"is defined: True"
"description: Replace multiple strings in a string with multiple other strings."
functionChecker("crossLanguageParser.py", "evaluateMacro")
'''
http://stackoverflow.com/questions/18903923/how-to-split-a-string-in-python-without-redundant-output
Here's a demonstration of parameters being extracted from a macro.
Put ?: in front of every group, like this: (?:foo|bar|baz). Otherwise it will produce redundant results in the output.
'''
#An example of an array that defines a list of regular expressions to match a pattern:
patternDefiningArray = [
[
["theArray", '(rotated(?: by|))' "theDegrees", '(degrees)'],
["(rotation of)", "theArray", "by", "theDegrees", "degrees"]
],
["theArray", "theDegrees"],
["rotateArray(", "theArray", ", " "theDegrees", ")"]
]
def replaceMultipleStringsWithMultipleStrings(string, rep_dict):
pattern = re.compile("|".join([re.escape(k) for k in rep_dict.keys()]), re.M)
return pattern.sub(lambda x: rep_dict[x.group(0)], string)
#print(replaceMultipleStringsWithMultipleStrings("foo and bar are baz", {"foo":"1", "bar":"2", "baz":"3"}))
def lisp(x): #convert parse array back into symbols
newStr = ""
for current in x:
if(type(current) == str):
newStr += " " + current
else:
newStr += " " + lisp(current)
newStr = newStr[1:len(newStr)]
return "("+newStr+")"
def getExpressionsInParentheses(theString):
#print("Get the expressions for: " + theString)
theData = OneOrMore(nestedExpr()).parseString(theString)
theNewArr = []
for current in theData[0]:
if(type(current) != str):
theNewArr += [lisp(current)]
return theNewArr
def replaceParenthesesWithSymbols(theString):
#theString = addOpeningAndClosingParentheses(theString)
#print("The thing to replace with symbols is " + theString)
theData = OneOrMore(nestedExpr()).parseString(theString)
theNewString = ""
for current in theData[0]:
if(type(current) == str):
theNewString += " " + current
else:
theNewString += " <<>>"
theNewString = "(" + theNewString[1:len(theNewString)] + ")"
return theNewString
aStringToPrint = "(replace (foo) in bar with (substring from 2 to 3 in (a string called 'hello')))"
#print(replaceParenthesesWithSymbols(aStringToPrint))
#print(getExpressionsInParentheses(aStringToPrint))
def printMatches(stringToMatch):
stringToMatch = replaceParenthesesWithSymbols(stringToMatch)
toReturn = []
#theArray is an array of regular expressions that is defined in listOfRegexes.py
for current in theArray:
if(current.match(stringToMatch)):
theSplitString = re.match(current, stringToMatch).groups()
#theArgs =
toReturn += [{"splitString":theSplitString, "matchingRegex":current}]
#if(toReturn == []):
#raise Exception(stringToMatch + " does not match any regular expression.")
return toReturn
def my_shuffle(array):
random.shuffle(array)
return array
def getMatchingRegex(theString1):
theString1 = replaceParenthesesWithSymbols(theString1)
thingToReturn = printMatches(theString1)[0]["matchingRegex"]
return thingToReturn
def splitMacroWithParentheses(theString):
theExpressions = getExpressionsInParentheses(theString)
theString = replaceParenthesesWithSymbols(theString)
#print(theString)
#print(theExpressions)
theSplitString = list(printMatches(theString)[0]["splitString"])
theCounter = 0
for idx, current in enumerate(theSplitString):
if(current == "<<>>"):
theSplitString[idx] = theExpressions[theCounter]
theCounter += 1
return theSplitString
#splitMacroWithParentheses("(replace (substring of 'hello' between 2 and 3) in (bar is an integer) with (baz is not a string))")
#splitMacroWithParentheses("(substring of (gorp is really funny) between (3 is a magic (it's a number)) and (4 is an integer))")
def rangesOverlap(arr1, arr2):
if (arr1[0] <= arr2[1]) and (arr2[1] <= arr2[1]):
return True
def arrayDimensions(theArr):
return numpy.array(theArr).shape
#print(evaluateMacroWithSpecificString("(replace (substring of 'hello' between 2 and 3) in (bar is an integer) with (baz is not a string))"))
def sumOfAllNums(theNumArr):
toReturn = 0
for current in theNumArr:
toReturn += current
return current
def splitMacroWithWhitespace(theString):
theExpressions = getExpressionsInParentheses(theString)
theString = replaceParenthesesWithSymbols(theString)
#print(theString)
#print(theExpressions)
theSplitString = OneOrMore(nestedExpr()).parseString(theString)[0]
theCounter = 0
for idx, current in enumerate(theSplitString):
#print("The string here is " + current)
if(current == '<<>>'):
#print("Replacing " + theSplitString[idx] + " with " + theExpressions[theCounter])
theSplitString[idx] = theExpressions[theCounter]
theCounter += 1
return theSplitString
def getNonSeparatorParts(theString):
theNewArr = []
theSplitString = splitMacroWithWhitespace(theString)
for idx, current in enumerate(theSplitString):
if(((idx + 1)%2) != 0):
theNewArr += [current]
return theNewArr
def everyOtherIsTheSame(theSplitString):
firstCurrent = theSplitString[1]
if(len(theSplitString) % 2 == 0):
return False
for idx, current in enumerate(theSplitString):
if((idx+1)%2 == 0):
if(current != firstCurrent):
return False
else:
if(current == firstCurrent):
return False
return True
def isArithmeticOperator(theString):
if theString in ["+", "-", "-", "/", "^", "%", "&", "and", "or", "<", ">", "<=", ">=", "==", "||"]:
return True
return False
"function name: getDictionaryFromMacro(theVariables,theMacro,theResult)"
"requires functions: splitMacroWithParentheses, replaceParenthesesWithSymbols(theString)"
"is defined: True"
"description: Return the output of the macro. Replace the variables in stringToReturn with the parameters"
def remove_values_from_list(the_list, val):
return [value for value in the_list if value != val]
def removeEachValueFromList(the_list, values):
for current in values:
the_list = remove_values_from_list(the_list, current)
return the_list
def getDictionaryFromMacro(theVariables, theMacro, theResult):
arrayOfVariables = theVariables.split(",")
newArrayOfVariables = deepcopy(arrayOfVariables)
newArrayOfVariables += ["\(", "\)"]
theVariables = "(" + "|".join(newArrayOfVariables) + ")"
#how to get the index of a string in another string: string.index('stringToFind')
#how to split a string without removing separators: http://stackoverflow.com/questions/2136556/in-python-how-do-i-split-a-string-and-keep-the-separators
theSplitMacro = re.split(theVariables, theMacro)
theSplitMacro = filter(None, theSplitMacro)
theSplitMacro = removeEachValueFromList(theSplitMacro, ["", ")", "("])
theSplitResult = re.split(theVariables, theResult)
theSplitResult = removeEachValueFromList(theSplitResult, ["", ")", "("])
#print(theSplitMacro)
#print(theSplitResult)
#print(theSplitMacro.index("bar"))
#print(arrayOfVariables)
dictionaryToReturn = {}
for current in arrayOfVariables:
dictionaryToReturn[theSplitMacro.index(current)] = theSplitResult.index(current)
return dictionaryToReturn
#print(getDictionaryFromMacro('foo,bar', '(foo equals equals bar)', '(foo == bar)'))
def is_prime(a):
return all(a % i for i in xrange(2, a))
#return am
#print(removeParentheses("(print (the type of foo))", "foo"))
#print(removeParentheses("((foo [bar]) = baz)","foo,bar"))
def getMacroParameters(inputString,stringThatMatchesRegex,variableNames,stringToReturn, returnParameters = False):
#print("Input string: " + inputString)
#print("String that matches regex: " + stringThatMatchesRegex)
#print("variable names: " + variableNames)
#print("string to return: " + stringToReturn)
#Return None if the input doesn't match a regex.
if(printMatches(replaceParenthesesWithSymbols(inputString)) == []):
return None
if(printMatches(replaceParenthesesWithSymbols(stringThatMatchesRegex)) == []):
#raise Exception(stringThatMatchesRegex + " does not match any regular expression.")
return None
if(getMatchingRegex(replaceParenthesesWithSymbols(stringThatMatchesRegex))) != getMatchingRegex(replaceParenthesesWithSymbols(inputString)):
return None
#print(replaceParenthesesWithSymbols(inputString))
#print(getExpressionsInParentheses(inputString))
theSplitInputString = splitMacroWithParentheses(inputString)
theSplitParameterString = splitMacroWithParentheses(stringThatMatchesRegex)
arrayOfVariables = variableNames.split(",")
#print("theSplitInputString: " + str(theSplitInputString))
#print("theSplitParameterString: " + str(theSplitParameterString))
#print("theSplitStringToReturn: " + str(theSplitStringToReturn))
#print("arrayOfVariables: " + str(arrayOfVariables))
parameterInformationDictionary = {}
#The location of each variable in theSplitInputString is the same as the location of each variable in the
for current in arrayOfVariables:
for idx, current1 in enumerate(theSplitParameterString):
#print(current + ", " + current1)
if current1 in current:
parameterInformationDictionary[current] = theSplitInputString[idx]
#print("parameterInformationDictionary: " + str(parameterInformationDictionary))
if(returnParameters == True):
return parameterInformationDictionary
else:
return replaceMultipleStringsWithMultipleStrings(stringToReturn, parameterInformationDictionary)
def theThingsToEvaluate():
return [
#removeParentheses("((foo [bar]) = baz)","foo,bar"),
#removeParentheses("((foo[bar]) = baz)","foo,bar,baz"),
[["(foo in bar is between goo and gar)", "(foo is between goo and gar in bar)"], "foo,bar,goo,gar", "(foo in bar is between goo and gar)"],
[["(function named funcName that returns typeToReturn with parameters named paramNames with the parameter types paramTypes theBody)", "(public static typeToReturn funcName paramNames paramTypes theBody)"], "typeToReturn,funcName,paramNames,paramTypes,theBody", "function(parameterNames=paramNames, parameterTypes=paramTypes, isStatic=True, returnType='typeToReturn', functionName='funcName', body=theBody),"],
[["(def funcName paramNames theBody)"], "funcName,paramNames,theBody", "function(parameterNames=paramNames, parameterTypes=paramNames, isStatic=True, returnType='void', functionName='funcName', body=theBody),"],
[["(foo = bar)"], "foo,bar", "setVar(valueToGet=bar, valueToChange=foo)"],
[["(not foo)"], "foo", "Not(foo)"],
[["(convert foo from bar to baz)", "(convert foo to baz from bar)", "(foo converted from baz to bar)"], "foo,bar,baz", "convert foo from bar to baz"],
[["(cond foo)"], "foo", "conditionalBlock(foo)"],
[["(for theInitializer theCondition theIncrement theBody)"], "theInitializer,theCondition,theIncrement,theBody", "forLoop(body=theBody, initializer=theInitializer, condition=theCondition, increment=theIncrement)"],
[["(foo ;)"], "foo", "seriesOfStatements([foo])"],
[["([aVar])", "(aVar,)"], "aVar", "[aVar]"],
[["(main foo)"], "foo", "main(foo)"],
[["(convert foo from base bar to base baz)", "(convert foo to base baz from base bar)", "(foo converted to base baz from base bar)", "(foo converted from base bar to base baz)", "(foo in base baz instead of base bar)"], "foo,bar,baz", "(convert foo from base bar to base baz)"],
[["(foo [ bar ])"], "foo,bar", "foo[bar]"],
[["(switch foo)"], "foo", "Switch(foo, [])"],
[["(if foo)"], "foo", "If(foo, [])"],
[["(while foo)"], "foo", "While(foo, [])"],
[["(module foo)"], "foo", "module([foo])"],
[["(default foo)"], "foo", "default(foo)"],
[["(foo{ bar })"], "foo,bar", "foo(bar)"],
[["(type [ dimensions ] varName = initialValue)", "(type varName [ dimensions ] = initialValue)"], "type,varName,dimensions,initialValue", "typedimensions varName = initialValue"],
[["(theArr[ indices ])"], "theArr,indices", "theArrindices"],
[["(type foo = bar)"], "type,foo,bar", "initializeVar(variableName=foo, variableType=type, initialValue=bar, arrayDimensions=None)"],
[["(switch condition insideBrackets afterBrackets)"], "condition,insideBrackets,afterBrackets", "switch(condition){insideBrackets} afterBrackets"],
[["(if condition insideBrackets afterBrackets)"], "condition,insideBrackets,afterBrackets", "If(condition, [insideBrackets]), afterBrackets"],
[["(foo is a prime number)"], "foo", "is_prime(foo)"],
[["(the type of foo)"], "foo", "type(foo)"],
[["(foo divided by bar)", "(the quotient of foo and bar)"], "foo,bar", "(foo/bar)"],
[["(foo is between bar and baz)"], "foo,bar,baz", "((bar < foo) and (foo < baz))"],
[["(foo contains bar)", "(bar is in foo)"], "foo,bar", "(bar in foo)"],
[["(length of foo)"], "foo", "len(foo)"],
[["(random number between foo and bar)"], "foo,bar", "randint(foo,bar)"],
[["(print foo)"], "foo", "puts(foo)"],
[["(foo to the power of bar)"], "foo,bar","(foo**bar)"],
[["(foo and bar)"], "foo,bar","(foo and bar)"],
[["(sum of each number in foo)"], "foo","sumOfAllNums(foo)"],
[["(return foo)"], "foo","return foo"],
[["(foo matches bar)"], "foo,bar","re.compile(bar).matches(foo)"],
[["(foo equals bar)", "(foo and bar are equal)"], "foo,bar", "Equals(foo, bar)"],
[["(foo if bar)", "(if bar foo)", "(if bar then foo)"], "foo,bar", "If(bar, foo)"],
[["(while bar foo)", "(foo while bar)"], "foo,bar", "While(bar, foo)"],
[["(foo is divisible by bar)", "(foo is a multiple of bar)"], "foo,bar", "(foo % bar == 0)"],
[["(foo > bar)"], "foo,bar", "greaterThan(foo, bar)"],
[["(foo is less than bar)"], "foo,bar", "lessThan(foo, bar)"],
[["(foo overlaps with bar)"], "foo,bar", "rangesOverlap(foo, bar)"],
[["(replace each foo in bar with baz)"], "foo,bar,baz", "bar.replace(foo,baz)"],
[["(else foo)"], "foo", "Else([foo]),"],
[["(case foo bar)"], "foo,bar", "case(foo, bar)"],
[["(switch foo bar)"], "foo,bar", "switch(foo, bar)"],
[["(elif foo bar)"], "foo,bar", "Elif(foo, bar)"],
[["(elif foo bar baz)"], "foo,bar,baz", "Elif(foo, bar), baz"],
[["(foo mod bar)"], "foo,bar", "mod(foo, bar)"],
[["(class [[foo]] [[body]])"], "[[foo]],[[body]]", "getClass([[foo]], [[body]])"],
[["(foo / bar)"], "foo,bar", "divide(foo, bar)"],
[["(foo has the same meaning as bar)"], "foo,bar", "foo has the same meaning as bar"],
[["(the value of foo)"], "foo", "foo"],
[["(test the exec function)"], '', "exec(\"print('toPrint')\")"],
]
thingsToEvaluate = []
def evaluateMacro(stringToEvaluate, returnParameterNames=False):
if(stringToEvaluate.startswith("exec(")):
exec(stringToEvaluate)
return ""
#print("Macro to evaluate: " + str(stringToEvaluate))
if(returnParameterNames == False):
#print(getParameterNames(stringToEvaluate))
pass
global thingsToEvaluate
if (thingsToEvaluate == []):
thingsToEvaluate = theThingsToEvaluate()
#print(thingsToEvaluate)
addThingsToEvaluate = [
removeParentheses("(foo = (bar [ baz ]))"),
[["(unless foo bar)", "(foo unless bar)"], "(if (not foo) then bar)"],
[["(foo squared)"], "(foo to the power of 2)"],
[["(foo cubed)"], "(foo to the power of 3)"],
[["(square root of foo)"], "(foo to the power of -2)"],
[["(foo += bar)"], "(foo = (foo + bar))"],
[["(foo *= bar)"], "(foo = (foo * bar))"],
[["(foo -= bar)"], "(foo = (foo - bar))"],
[["(foo /= bar)"], "(foo = (foo / bar))"],
[["(foo ++)"], "(foo += 1)"],
[["(foo != bar)"], "(not (foo == bar))"],
[["(foo = foo + bar)"], "(foo += bar)"],
[["(foo = foo - bar)"], "(foo -= bar)"],
[["(foo = foo * bar)"], "(foo *= bar)"],
[["(foo if bar unless baz)"], "(foo if (bar and (not baz)))"],
[["(print the type of foofoo)"], "(print (the type of foofoo)))"],
[["(foo percent of bar)"], "(foo * 0.01 * bar)"],
]
for current in addThingsToEvaluate:
print(thingsToEvaluate[len(thingsToEvaluate)-1])
thingsToEvaluate += [[current[0], getParameterNames(current[1]), evaluateMacro(current[1])]]
stringToEvaluate = addParentheses(stringToEvaluate)
#print("String to evaluate with added parentheses:\n" + stringToEvaluate)
#stringToEvaluate = addOpeningAndClosingParentheses(stringToEvaluate)
#print("Evaluate the macro " + stringToEvaluate)
#print(splitMacroWithWhitespace(stringToEvaluate))
theArr = printMatches(replaceParenthesesWithSymbols(stringToEvaluate));
#theData = OneOrMore(nestedExpr()).parseString(stringToEvaluate)
#print(theData)
whitespaceSplitString = splitMacroWithWhitespace(stringToEvaluate);
#print("The string split with whitespace is " + str(whitespaceSplitString))
separatorCharacter = whitespaceSplitString[1]
if(everyOtherIsTheSame(whitespaceSplitString)):
#print("Every other character in " + str(whitespaceSplitString) + " is " + str(separatorCharacter))
nonSeparatorParts = getNonSeparatorParts(stringToEvaluate)
if returnParameterNames == True:
thingToReturn = {}
for idx,current in enumerate(nonSeparatorParts):
thingToReturn[idx] = current
return thingToReturn
#print("The non-separator parts are " + str(nonSeparatorParts))
for idx, current in enumerate(nonSeparatorParts):
if(current.startswith("(")):
print(current)
nonSeparatorParts[idx] = evaluateMacro(current)
if(separatorCharacter in ["+", "plus"]):
return "add([" + ", ".join(nonSeparatorParts) + "])"
elif(separatorCharacter in ["-", "minus"]):
return "subtract([" + ", ".join(nonSeparatorParts) + "])"
elif(separatorCharacter in ["*", "times"]):
return "multiply([" + ", ".join(nonSeparatorParts) + "])"
elif(separatorCharacter in ["||", "or", "|"]):
return "Or([" + ", ".join(nonSeparatorParts) + "])"
elif(separatorCharacter in ["and", "&&", "&"]):
return "And([" + ", ".join(nonSeparatorParts) + "])"
elif(separatorCharacter in [","]):
return "[" + ", ".join(nonSeparatorParts) + "]"
elif(separatorCharacter in [";"]):
return "seriesOfStatements([" + ", ".join(nonSeparatorParts) + "])"
#print("String to evaluate: " + stringToEvaluate)
#print(getMatchingRegex(stringToEvaluate))
#This code does not do
#if returnParameterNames == False:
#if(getMatchingRegex("(foo has the same meaning as bar)") == getMatchingRegex(stringToEvaluate)):
#print("The input is a syntax definition: " + stringToEvaluate)
#thingToChange = evaluateMacro(stringToEvaluate, returnParameterNames=True);
#print(thingToChange)
#print(thingToChange['foo'])
#print(thingToChange['bar'])
#thingsToEvaluate += [[thingToChange['foo'], getParameterNames(thingToChange['bar']), evaluateMacro(thingToChange['bar'])]]
#print(thingsToEvaluate)
#else:
#print(str(whitespaceSplitString) + " is not an arithmetic expression.")
for current in thingsToEvaluate:
for currentInputString in current[0]:
if returnParameterNames == True:
theResult = evaluateMacroWithSpecificString(inputString=stringToEvaluate,stringThatMatchesRegex=currentInputString,variableNames=current[1],stringToReturn=current[2], returnParameters = True)
else:
theResult = evaluateMacroWithSpecificString(stringToEvaluate, currentInputString, current[1], current[2])
if(theResult != None):
if(type(theResult) == str and theResult.startswith("exec(")):
exec(theResult)
return ""
else:
return theResult
if(len(theArr) == 1):
matchingRegex = theArr[0]["matchingRegex"]
'''
paramArray = splitMacroWithParentheses(stringToEvaluate)
#Evaluate these before the macro has been evaluated:
if matchingRegex == getMatchingRegex("(foo and bar are equal)"):
return evaluateMacro("(" + paramArray[0] + " equals " + paramArray[2] + ")")
elif matchingRegex == getMatchingRegex("(shuffle foo randomly)"):
return evaluateMacro("(randomly shuffle " + paramArray[1] + ")")
#Evaluate these after the macro has been evaluated
for idx, current in enumerate(paramArray):
if(current.startswith("(") and current.endswith(")")):
paramArray[idx] = evaluateMacro(current)
"(foo is divisible by bar)"
"(foo % bar == 0)"
"(randomly shuffle foo)"
"my_shuffle(foo)"
"(foo in reverse order)"
"foo[::-1]"
"(sort foo in alphabetical order)"
"sorted(foo)"
"(the type of foo)"
"type(foo)"
"(sort foo from largest to smallest)"
"sorted(foo)"
"(sort foo from smallest to largest)"
"sorted(foo).reverse()"
"(foo is an integer)"
"(type(foo) == int)"
"(all locations of foo in bar)"
"[i for i, x in enumerate(bar) if x == foo]"
"(pick random from foo)"
"choice(foo)"
"(dimensions of foo)"
"arrayDimensions(foo)"
"(print foo)"
"(puts(foo))"
"(foo from bar to baz)"
"(substring of foo between bar and baz)"
"(foo and bar)"
"(foo and bar)"
"(sum of each number in foo)"):
"sumOfAllNums(foo)"
"(return foo)"
"Return(foo),"
if(toReturn != ""
#print(stringToEvaluate + " becomes (" + toReturn + ") which evaluates to " + str(eval(toReturn)))
#stringToReturn = str(eval(toReturn))
#print(stringToEvaluate + " becomes \n " + toReturn + "\n")
return toReturn
else:
'''
raise Exception(stringToEvaluate + " matches " + matchingRegex.pattern + ", but the output is not yet defined in evaluateMacro")
elif(len(theArr) == 0):
raise Exception(replaceParenthesesWithSymbols(stringToEvaluate) + " does not match any regular expression.")
else:
print(stringToEvaluate) + " matches more than one regular expression!"
def printMacroOutputs(theStrings):
for current in theStrings:
print("To evaluate: " + current)
print(str(current) + "\nbecomes\n " + str(evaluateMacro(current))+"\n")
"function name: evaluateMacroWithSpecificString(inputString,variableNames,stringToReturn)"
"requires functions: splitMacroWithParentheses, replaceParenthesesWithSymbols(theString), replaceMultipleStringsWithMultipleStrings, getDictionaryFromMacro(theVariables,theMacro,theResult)"
"is defined: True"
"description: Return the output of the macro. Replace the variables in stringToReturn with the parameters"
def evaluateMacroWithSpecificString(inputString,stringThatMatchesRegex,variableNames,stringToReturn, returnParameters = False):
#print("Input string: " + inputString)
#print("String that matches regex: " + stringThatMatchesRegex)
#print("variable names: " + variableNames)
#print("string to return: " + stringToReturn)
#Return None if the input doesn't match a regex.
if(printMatches(replaceParenthesesWithSymbols(inputString)) == []):
return None
if(printMatches(replaceParenthesesWithSymbols(stringThatMatchesRegex)) == []):
#raise Exception(stringThatMatchesRegex + " does not match any regular expression.")
return None
if(getMatchingRegex(replaceParenthesesWithSymbols(stringThatMatchesRegex))) != getMatchingRegex(replaceParenthesesWithSymbols(inputString)):
return None
if(variableNames == ""):
return stringToReturn
#print(replaceParenthesesWithSymbols(inputString))
#print(getExpressionsInParentheses(inputString))
theSplitInputString = splitMacroWithParentheses(inputString)
theSplitParameterString = splitMacroWithParentheses(stringThatMatchesRegex)
arrayOfVariables = variableNames.split(",")
#print("theSplitInputString: " + str(theSplitInputString))
#print("theSplitParameterString: " + str(theSplitParameterString))
#print("theSplitStringToReturn: " + str(theSplitStringToReturn))
#print("arrayOfVariables: " + str(arrayOfVariables))
parameterInformationDictionary = {}
#The location of each variable in theSplitInputString is the same as the location of each variable in the
if returnParameters == False:
for idx, current in enumerate(theSplitInputString):
if current.startswith("(") and current.endswith(")"):
#print("Thing to evaluate: " + current)
theSplitInputString[idx] = evaluateMacro(current)
for current in arrayOfVariables:
for idx, current1 in enumerate(theSplitParameterString):
#print(current + ", " + current1)
if current1 in current:
parameterInformationDictionary[current] = theSplitInputString[idx]
#print("parameterInformationDictionary: " + str(parameterInformationDictionary))
if(returnParameters == True):
return parameterInformationDictionary
else:
return replaceMultipleStringsWithMultipleStrings(stringToReturn, parameterInformationDictionary)
'''
Return the string that is to be evaluated.
evaluateMacroWithSpecificString("(3 == (4+1))", ["(foo equals bar)"], "foo,bar", "(foo == bar)")
First, get a dictionary to represent the indices of all parameters in the
Then, get a list of all regular expressions that match the input string.
Ensure that each string in stringsThatMatchRegexes matches one regex, and each regex in stringsThatMatchRegexes matches one string.
'''
#print(evaluateMacroWithSpecificString("(3 == (4 plus 1))", "(foo equals bar)", "foo,bar", "(foo == bar)"))
#print(evaluateMacro("(5 is between (4 times 4) and 7)"))
#print(evaluateMacro("([1,2,3,4] contains 4)"))
def getParameterNames(theMacro):
thingToChange = evaluateMacro(theMacro, returnParameterNames=True);
#print("ThingToChange is " + str(thingToChange))
thingToChange1 = thingToChange.keys()
thingToReturn = []
for idx,current in enumerate(thingToChange1):
thingToReturn += [thingToChange[thingToChange1[idx]]]
for idx, current in enumerate(thingToReturn):
if current.startswith("("):
thingToReturn[idx] = getParameterNames(current)
print(thingToReturn)
thingToReturn = ",".join(thingToReturn)
thingToReturn = thingToReturn.split(",")
return ",".join(list(set(thingToReturn)))
print(getMatchingRegex("(print 1)"))
print(getMatchingRegex("(print the type of 1)"))
print(getParameterNames("(baz is between barf and frog)"))
print(evaluateMacro("(gorf cubed)"))
print(evaluateMacro("(gorf squared)"))
print(evaluateMacro("(test the exec function)"))
print(evaluateMacro("exec(\"print('derp')\")"))
| en | 0.568698 | #This version is obsolete! Use polishnotation.py instead. #Test everything in polyglotCodeGenerator.py #print addInitialParentheses(" lol herp de derp", 4) #print(addParentheses( while (i > 0) (print 'hello') (print 'hello') while (i > 5) print '"world"' #)) http://stackoverflow.com/questions/18903923/how-to-split-a-string-in-python-without-redundant-output Here's a demonstration of parameters being extracted from a macro. Put ?: in front of every group, like this: (?:foo|bar|baz). Otherwise it will produce redundant results in the output. #An example of an array that defines a list of regular expressions to match a pattern: #print(replaceMultipleStringsWithMultipleStrings("foo and bar are baz", {"foo":"1", "bar":"2", "baz":"3"})) #convert parse array back into symbols #print("Get the expressions for: " + theString) #theString = addOpeningAndClosingParentheses(theString) #print("The thing to replace with symbols is " + theString) #print(replaceParenthesesWithSymbols(aStringToPrint)) #print(getExpressionsInParentheses(aStringToPrint)) #theArray is an array of regular expressions that is defined in listOfRegexes.py #theArgs = #if(toReturn == []): #raise Exception(stringToMatch + " does not match any regular expression.") #print(theString) #print(theExpressions) #splitMacroWithParentheses("(replace (substring of 'hello' between 2 and 3) in (bar is an integer) with (baz is not a string))") #splitMacroWithParentheses("(substring of (gorp is really funny) between (3 is a magic (it's a number)) and (4 is an integer))") #print(evaluateMacroWithSpecificString("(replace (substring of 'hello' between 2 and 3) in (bar is an integer) with (baz is not a string))")) #print(theString) #print(theExpressions) #print("The string here is " + current) #print("Replacing " + theSplitString[idx] + " with " + theExpressions[theCounter]) #how to get the index of a string in another string: string.index('stringToFind') #how to split a string without removing separators: http://stackoverflow.com/questions/2136556/in-python-how-do-i-split-a-string-and-keep-the-separators #print(theSplitMacro) #print(theSplitResult) #print(theSplitMacro.index("bar")) #print(arrayOfVariables) #print(getDictionaryFromMacro('foo,bar', '(foo equals equals bar)', '(foo == bar)')) #return am #print(removeParentheses("(print (the type of foo))", "foo")) #print(removeParentheses("((foo [bar]) = baz)","foo,bar")) #print("Input string: " + inputString) #print("String that matches regex: " + stringThatMatchesRegex) #print("variable names: " + variableNames) #print("string to return: " + stringToReturn) #Return None if the input doesn't match a regex. #raise Exception(stringThatMatchesRegex + " does not match any regular expression.") #print(replaceParenthesesWithSymbols(inputString)) #print(getExpressionsInParentheses(inputString)) #print("theSplitInputString: " + str(theSplitInputString)) #print("theSplitParameterString: " + str(theSplitParameterString)) #print("theSplitStringToReturn: " + str(theSplitStringToReturn)) #print("arrayOfVariables: " + str(arrayOfVariables)) #The location of each variable in theSplitInputString is the same as the location of each variable in the #print(current + ", " + current1) #print("parameterInformationDictionary: " + str(parameterInformationDictionary)) #removeParentheses("((foo [bar]) = baz)","foo,bar"), #removeParentheses("((foo[bar]) = baz)","foo,bar,baz"), #print("Macro to evaluate: " + str(stringToEvaluate)) #print(getParameterNames(stringToEvaluate)) #print(thingsToEvaluate) #print("String to evaluate with added parentheses:\n" + stringToEvaluate) #stringToEvaluate = addOpeningAndClosingParentheses(stringToEvaluate) #print("Evaluate the macro " + stringToEvaluate) #print(splitMacroWithWhitespace(stringToEvaluate)) #theData = OneOrMore(nestedExpr()).parseString(stringToEvaluate) #print(theData) #print("The string split with whitespace is " + str(whitespaceSplitString)) #print("Every other character in " + str(whitespaceSplitString) + " is " + str(separatorCharacter)) #print("The non-separator parts are " + str(nonSeparatorParts)) #print("String to evaluate: " + stringToEvaluate) #print(getMatchingRegex(stringToEvaluate)) #This code does not do #if returnParameterNames == False: #if(getMatchingRegex("(foo has the same meaning as bar)") == getMatchingRegex(stringToEvaluate)): #print("The input is a syntax definition: " + stringToEvaluate) #thingToChange = evaluateMacro(stringToEvaluate, returnParameterNames=True); #print(thingToChange) #print(thingToChange['foo']) #print(thingToChange['bar']) #thingsToEvaluate += [[thingToChange['foo'], getParameterNames(thingToChange['bar']), evaluateMacro(thingToChange['bar'])]] #print(thingsToEvaluate) #else: #print(str(whitespaceSplitString) + " is not an arithmetic expression.") paramArray = splitMacroWithParentheses(stringToEvaluate) #Evaluate these before the macro has been evaluated: if matchingRegex == getMatchingRegex("(foo and bar are equal)"): return evaluateMacro("(" + paramArray[0] + " equals " + paramArray[2] + ")") elif matchingRegex == getMatchingRegex("(shuffle foo randomly)"): return evaluateMacro("(randomly shuffle " + paramArray[1] + ")") #Evaluate these after the macro has been evaluated for idx, current in enumerate(paramArray): if(current.startswith("(") and current.endswith(")")): paramArray[idx] = evaluateMacro(current) "(foo is divisible by bar)" "(foo % bar == 0)" "(randomly shuffle foo)" "my_shuffle(foo)" "(foo in reverse order)" "foo[::-1]" "(sort foo in alphabetical order)" "sorted(foo)" "(the type of foo)" "type(foo)" "(sort foo from largest to smallest)" "sorted(foo)" "(sort foo from smallest to largest)" "sorted(foo).reverse()" "(foo is an integer)" "(type(foo) == int)" "(all locations of foo in bar)" "[i for i, x in enumerate(bar) if x == foo]" "(pick random from foo)" "choice(foo)" "(dimensions of foo)" "arrayDimensions(foo)" "(print foo)" "(puts(foo))" "(foo from bar to baz)" "(substring of foo between bar and baz)" "(foo and bar)" "(foo and bar)" "(sum of each number in foo)"): "sumOfAllNums(foo)" "(return foo)" "Return(foo)," if(toReturn != "" #print(stringToEvaluate + " becomes (" + toReturn + ") which evaluates to " + str(eval(toReturn))) #stringToReturn = str(eval(toReturn)) #print(stringToEvaluate + " becomes \n " + toReturn + "\n") return toReturn else: #print("Input string: " + inputString) #print("String that matches regex: " + stringThatMatchesRegex) #print("variable names: " + variableNames) #print("string to return: " + stringToReturn) #Return None if the input doesn't match a regex. #raise Exception(stringThatMatchesRegex + " does not match any regular expression.") #print(replaceParenthesesWithSymbols(inputString)) #print(getExpressionsInParentheses(inputString)) #print("theSplitInputString: " + str(theSplitInputString)) #print("theSplitParameterString: " + str(theSplitParameterString)) #print("theSplitStringToReturn: " + str(theSplitStringToReturn)) #print("arrayOfVariables: " + str(arrayOfVariables)) #The location of each variable in theSplitInputString is the same as the location of each variable in the #print("Thing to evaluate: " + current) #print(current + ", " + current1) #print("parameterInformationDictionary: " + str(parameterInformationDictionary)) Return the string that is to be evaluated. evaluateMacroWithSpecificString("(3 == (4+1))", ["(foo equals bar)"], "foo,bar", "(foo == bar)") First, get a dictionary to represent the indices of all parameters in the Then, get a list of all regular expressions that match the input string. Ensure that each string in stringsThatMatchRegexes matches one regex, and each regex in stringsThatMatchRegexes matches one string. #print(evaluateMacroWithSpecificString("(3 == (4 plus 1))", "(foo equals bar)", "foo,bar", "(foo == bar)")) #print(evaluateMacro("(5 is between (4 times 4) and 7)")) #print(evaluateMacro("([1,2,3,4] contains 4)")) #print("ThingToChange is " + str(thingToChange)) | 2.548693 | 3 |
corker/tests/test_controller.py | jd-boyd/corker | 0 | 6619490 | # pylint: disable=missing-docstring,no-member
from __future__ import absolute_import, print_function
from nose.tools import eq_
from corker.controller import BaseController, route
def test_route():
@route('bob')
def meth():
pass
eq_(meth._route, [(('bob',), {})])
def test_double_route():
@route('bob')
@route('fred')
def meth():
pass
eq_(meth._route, [(('fred',), {}), (('bob',), {})])
def test_config():
import webob
class Index(BaseController):
@route('')
def index(self):
return Response('Hi index!\n')
i = Index({}, bdb={'a': 1})
print(i.bdb)
eq_(i.bdb, {'a': 1})
| # pylint: disable=missing-docstring,no-member
from __future__ import absolute_import, print_function
from nose.tools import eq_
from corker.controller import BaseController, route
def test_route():
@route('bob')
def meth():
pass
eq_(meth._route, [(('bob',), {})])
def test_double_route():
@route('bob')
@route('fred')
def meth():
pass
eq_(meth._route, [(('fred',), {}), (('bob',), {})])
def test_config():
import webob
class Index(BaseController):
@route('')
def index(self):
return Response('Hi index!\n')
i = Index({}, bdb={'a': 1})
print(i.bdb)
eq_(i.bdb, {'a': 1})
| en | 0.690397 | # pylint: disable=missing-docstring,no-member | 2.248019 | 2 |
Search_3D/main_single_pcd_v2.py | akcalakcal/FCGF_submit | 1 | 6619491 | <reponame>akcalakcal/FCGF_submit<filename>Search_3D/main_single_pcd_v2.py<gh_stars>1-10
import open3d as o3d
import numpy as np
import sys
import math
import os
import copy
import tkinter.filedialog
from concurrent.futures import ThreadPoolExecutor
from lib.feature_extractor import FeatureExtractor
## Visualization is taken from "https://github.com/chrischoy/FCGF
from utils.visualization import get_colored_point_cloud_feature
from utils.pointcloud import make_open3d_point_cloud
## VLAD library is from "https://github.com/jorjasso/VLAD"
from VLADlib.VLAD import *
from VLADlib.Descriptors import *
import argparse
import glob
import cv2
import time
from tqdm import tqdm
import random
from pathlib import Path
def points_2_pointcloud(coords):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(coords)
#colors = [[0.5, 0.5, 0.5] for i in range(len(pcd.points))]
#pcd.colors = o3d.utility.Vector3dVector(colors)
return pcd
def visualize_point_cloud(pcd_list):
vis = o3d.visualization.Visualizer()
vis.create_window()
for pcd in pcd_list:
vis.add_geometry(pcd)
#ctr = vis.get_view_control()
#print("Field of view (before changing) %.2f" % ctr.get_field_of_view())
#ctr.change_field_of_view(step=fov_step)
#print("Field of view (after changing) %.2f" % ctr.get_field_of_view())
## TODO:
## 1-> json cmaera parameters change H,W
## 2-> add screen capture feature
#vis.get_render_option().load_from_json("./renderoption.json")
vis.run()
vis.destroy_window()
def convertMeshBox2LineBox(mesh_box, color_select):
points = np.array(mesh_box.vertices)
lines = [[0, 1], [0, 2], [1, 3], [2, 3],
[4, 5], [4, 6], [5, 7], [6, 7],
[0, 4], [1, 5], [2, 6], [3, 7], ]
##colors = [[1, 0, 0] for i in range(len(lines))]
colors = [color_select for i in range(len(lines))]
line_set = o3d.geometry.LineSet(
points=o3d.utility.Vector3dVector(points),
lines=o3d.utility.Vector2iVector(lines),
)
line_set.colors = o3d.utility.Vector3dVector(colors)
return line_set
class Search3D:
def __init__(self, path_to_pcd, path_query_pcd, path_to_feat, isVisualizationON, input_type):
self.path_to_pcd = path_to_pcd # Path to point cloud file
self.path_to_feat = path_to_feat # Path to feature file
self.path_query_pcd = path_query_pcd
self.voxel_size = 0.025
self.read_inputs()
self.k = 4 #2 #16 # no. of visual words used for VisualDictionary Generation
self.sample_step_size = 10 #100 #300 #30 #100
self.leafSize = 40 # leafsize for "indexBallTree"
self.k_retrieve = 3 # number of retrieved box
self.color_dict={"black":[0,0,0], "blue":[0,0,1]}
self.isSearchAvaliable = True
self.visualization = isVisualizationON
self.input_type = input_type
self.pcd_apart = 10
self.BB_thresh = 0.55 #0.5
def read_inputs(self):
data_i = np.load(self.path_to_feat)
self.coord_i, self.points_i, self.feat_i = data_i['xyz'], data_i['points'], data_i['feature']
self.pcd_i = points_2_pointcloud(self.coord_i)
def computeVisualDictionary(self):
descriptors = self.feat_i
self.visualDictionary = kMeansDictionary(descriptors, self.k)
def extractBoxes_VLADdesc(self):
self.descriptorsVLAD=list()
self.idBox = list()
self.descriptorFCGF=list()
self.pointCoords=list()
self.meshBox=list()
## For each box in the point cloud, VLAD descriptors are computed.
for ind_p in list(range(0, self.coord_i.shape[0],self.sample_step_size)):
#for ind_p in list(range(0, 10000,self.sample_step_size)):
## Create mesh_box - experiment
## Box width, this value is computed considering the calibration of datasets in '3DMatch' repository
box_w = 0.2
## Creation of a box
mesh_box = o3d.geometry.TriangleMesh.create_box(width=box_w, height=box_w, depth=box_w)
mesh_box.paint_uniform_color([0.9, 0.1, 0.1])
## Locate center of box to the origin
mat_trans = np.eye((4))
mat_trans[0, 3] = -box_w/2
mat_trans[1, 3] = -box_w/2
mat_trans[2, 3] = -box_w/2
mesh_box.transform(mat_trans)
## Locate center of box to the point location
mat_trans = np.eye((4))
mat_trans[0, 3] = self.coord_i[ind_p, 0]
mat_trans[1, 3] = self.coord_i[ind_p, 1]
mat_trans[2, 3] = self.coord_i[ind_p, 2]
mesh_box.transform(mat_trans)
## We store the all boxes in a list named "self.meshBox"
self.meshBox.append(mesh_box)
## Sampling Points in the Box:
thresh = math.sqrt(3)*box_w/2
q_point = self.coord_i[ind_p, :]
q_point_arr = np.tile(q_point, (self.coord_i.shape[0], 1))
dist_arr = q_point_arr - self.coord_i
dist = np.linalg.norm(dist_arr, axis=1)
box_p_ind = np.where(dist<=thresh)[0]
## Container for coordinates of points in the Box
box_p = self.coord_i[box_p_ind, :]
## Container for FCGF features of points in the Box
box_p_feat = self.feat_i[box_p_ind, :]
## Calling VLAD descriptor extractor
if box_p_feat is not None:
## Previously computed "self.visualDictionary" is used here
## VLAD function is from VLAD library (https://github.com/jorjasso/VLAD)
v = VLAD(box_p_feat, self.visualDictionary)
self.descriptorsVLAD.append(v)
self.idBox.append(ind_p)
self.descriptorFCGF.append(box_p_feat)
self.pointCoords.append(box_p)
self.descriptorsVLAD = np.asarray(self.descriptorsVLAD)
self.No_box = len(self.idBox)
##
# With given bounding box, search boxes are being modified
##
def extractBoxes_VLADdesc_given_BB(self):
self.descriptorsVLAD=list()
self.idBox = list()
self.descriptorFCGF=list()
self.pointCoords=list()
self.meshBox=list()
## DEBUG
if self.input_type == 'mesh':
pcd_in = o3d.io.read_triangle_mesh(self.path_query_pcd)
pcd_in.compute_vertex_normals()
if self.input_type == 'pcd':
pcd_in = o3d.io.read_point_cloud(self.path_query_pcd)
dummy_box = pcd_in.get_axis_aligned_bounding_box()
box_scale = 1.2 # 0.5
box_w_max = dummy_box.max_bound
box_w_min = dummy_box.min_bound
box_w_x = box_scale * abs(box_w_max[0] - box_w_min[0])
box_w_y = box_scale * abs(box_w_max[1] - box_w_min[1])
box_w_z = box_scale * abs(box_w_max[2] - box_w_min[2])
## DEBUG
## For each box in the point cloud, VLAD descriptors are computed.
##for ind_p in list(range(0, self.coord_i.shape[0],self.sample_step_size)):
for ind_p in tqdm(range(0, self.coord_i.shape[0],self.sample_step_size)):
#for ind_p in list(range(0, 10000,self.sample_step_size)):
## Create mesh_box - experiment
## Box width, this value is computed considering the calibration of datasets in '3DMatch' repository
#box_w = 0.2
## DEBUG
## Creation of a box
mesh_box = o3d.geometry.TriangleMesh.create_box(width=box_w_x, height=box_w_y, depth=box_w_z)
mesh_box.paint_uniform_color([0.9, 0.1, 0.1])
## Locate center of box to the origin
mat_trans = np.eye((4))
mat_trans[0, 3] = -box_w_x / 2
mat_trans[1, 3] = -box_w_y / 2
mat_trans[2, 3] = -box_w_z / 2
mesh_box.transform(mat_trans)
## DEBUG
'''
## Creation of a box
mesh_box = o3d.geometry.TriangleMesh.create_box(width=box_w, height=box_w, depth=box_w)
mesh_box.paint_uniform_color([0.9, 0.1, 0.1])
## Locate center of box to the origin
mat_trans = np.eye((4))
mat_trans[0, 3] = -box_w/2
mat_trans[1, 3] = -box_w/2
mat_trans[2, 3] = -box_w/2
mesh_box.transform(mat_trans)
'''
## Locate center of box to the point location
mat_trans = np.eye((4))
mat_trans[0, 3] = self.coord_i[ind_p, 0]
mat_trans[1, 3] = self.coord_i[ind_p, 1]
mat_trans[2, 3] = self.coord_i[ind_p, 2]
mesh_box.transform(mat_trans)
## We store the all boxes in a list named "self.meshBox"
self.meshBox.append(mesh_box)
## Sampling Points in the Box:
box_w = max(box_w_x, box_w_y, box_w_z)
thresh = math.sqrt(3)*box_w/2
q_point = self.coord_i[ind_p, :]
q_point_arr = np.tile(q_point, (self.coord_i.shape[0], 1))
dist_arr = q_point_arr - self.coord_i
dist = np.linalg.norm(dist_arr, axis=1)
box_p_ind = np.where(dist<=thresh)[0]
## Container for coordinates of points in the Box
box_p = self.coord_i[box_p_ind, :]
## Container for FCGF features of points in the Box
box_p_feat = self.feat_i[box_p_ind, :]
## Calling VLAD descriptor extractor
if box_p_feat is not None:
## Previously computed "self.visualDictionary" is used here
## VLAD function is from VLAD library (https://github.com/jorjasso/VLAD)
v = VLAD(box_p_feat, self.visualDictionary)
self.descriptorsVLAD.append(v)
#self.idBox.append(ind_p)
#self.descriptorFCGF.append(box_p_feat)
#self.pointCoords.append(box_p)
self.descriptorsVLAD = np.asarray(self.descriptorsVLAD)
#self.No_box = len(self.idBox)
##
# With multi thread
##
def extractBoxes_VLADdesc_given_BB_multhread(self):
self.descriptorsVLAD = list()
self.meshBox = list()
if self.input_type == 'mesh':
pcd_in = o3d.io.read_triangle_mesh(self.path_query_pcd)
pcd_in.compute_vertex_normals()
if self.input_type == 'pcd':
pcd_in = o3d.io.read_point_cloud(self.path_query_pcd)
dummy_box = pcd_in.get_axis_aligned_bounding_box()
box_scale = 1.2 # 0.5
box_w_max = dummy_box.max_bound
box_w_min = dummy_box.min_bound
box_w_x = box_scale * abs(box_w_max[0] - box_w_min[0])
box_w_y = box_scale * abs(box_w_max[1] - box_w_min[1])
box_w_z = box_scale * abs(box_w_max[2] - box_w_min[2])
## Find FCGF features wihin the query bounding box
pcd_in_query = o3d.io.read_triangle_mesh(self.path_query_pcd)
box_p_query_ind = []
for p_q in pcd_in_query.vertices:
index_pos = np.where((self.coord_i[:, 0] == p_q[0]) & (self.coord_i[:, 1] == p_q[1]) & (self.coord_i[:, 2] == p_q[2]))
if index_pos[0]:
box_p_query_ind.append(index_pos[0])
## Container for FCGF features of points in the query Box
box_p_query_ind = np.array(box_p_query_ind)[:, 0]
box_p_query_feat = self.feat_i[box_p_query_ind, :]
box_p_query_feat_mean = np.mean(box_p_query_feat, axis=0)
box_p_query_feat = np.tile(box_p_query_feat_mean, (self.feat_i.shape[0], 1))
dist_feat_arr = box_p_query_feat - self.feat_i
dist_feat = np.linalg.norm(dist_feat_arr, axis=1)
min_dist_feat = np.min(dist_feat)
med_dist_feat = np.median(dist_feat)
max_dist_feat = np.max(dist_feat)
thresh_feat = self.BB_thresh #0.5 #0.2 * (med_dist_feat + min_dist_feat)
box_p_feat_ind = np.where(dist_feat <= thresh_feat)[0]
#for ind_p in tqdm(range(0, self.coord_i.shape[0], self.sample_step_size)):
for ind_p in tqdm(box_p_feat_ind):
## TODO: Outlier rejection
## Description: We want to compute vlad descriptors for only the 3D points of similar FCG features
## DEBUG
## DEBUG
## Create mesh_box - experiment
## Creation of a box
mesh_box = o3d.geometry.TriangleMesh.create_box(width=box_w_x, height=box_w_y, depth=box_w_z)
mesh_box.paint_uniform_color([0.9, 0.1, 0.1])
## Locate center of box to the origin
mat_trans = np.eye((4))
mat_trans[0, 3] = -box_w_x / 2
mat_trans[1, 3] = -box_w_y / 2
mat_trans[2, 3] = -box_w_z / 2
mesh_box.transform(mat_trans)
## Locate center of box to the point location
mat_trans = np.eye((4))
mat_trans[0, 3] = self.coord_i[ind_p, 0]
mat_trans[1, 3] = self.coord_i[ind_p, 1]
mat_trans[2, 3] = self.coord_i[ind_p, 2]
mesh_box.transform(mat_trans)
## We store the all boxes in a list named "self.meshBox"
self.meshBox.append(mesh_box)
## Sampling Points in the Box:
box_w = max(box_w_x, box_w_y, box_w_z)
thresh = math.sqrt(3) * box_w / 2
q_point = self.coord_i[ind_p, :]
q_point_arr = np.tile(q_point, (self.coord_i.shape[0], 1))
dist_arr = q_point_arr - self.coord_i
dist = np.linalg.norm(dist_arr, axis=1)
box_p_ind = np.where(dist <= thresh)[0]
## Container for coordinates of points in the Box
box_p = self.coord_i[box_p_ind, :]
## Container for FCGF features of points in the Box
box_p_feat = self.feat_i[box_p_ind, :]
## Calling VLAD descriptor extractor
if box_p_feat is not None:
## Previously computed "self.visualDictionary" is used here
## VLAD function is from VLAD library (https://github.com/jorjasso/VLAD)
v = VLAD(box_p_feat, self.visualDictionary)
self.descriptorsVLAD.append(v)
# self.idBox.append(ind_p)
# self.descriptorFCGF.append(box_p_feat)
# self.pointCoords.append(box_p)
self.descriptorsVLAD = np.asarray(self.descriptorsVLAD)
# self.No_box = len(self.idBox)
def computeIndexBallTree(self):
self.tree = indexBallTree(self.descriptorsVLAD, self.leafSize)
##
# Inputs:
# boxId: Index of the Query Box
# k_NN: k Nearest Neighbor
##
def query(self, boxId, k_NN):
self.k_retrieve = k_NN
## Initialization - Computation of Colored Point Cloud Based on FCGF Features
## Duplication of "pcd_i"point cloud
## We show matched boxes on this point cloud
#pcd_match = points_2_pointcloud(self.pcd_i.points)
## DEBUG read mesh instead of point cloud
if self.input_type == 'mesh':
self.pcd_i = o3d.io.read_triangle_mesh(self.path_to_pcd)
self.pcd_i.compute_vertex_normals()
pcd_match = o3d.io.read_triangle_mesh(self.path_to_pcd)
pcd_match.compute_vertex_normals()
elif self.input_type == 'pcd':
self.pcd_i = o3d.io.read_point_cloud(self.path_to_pcd)
pcd_match = o3d.io.read_point_cloud(self.path_to_pcd)
#o3d.visualization.draw_geometries([pcd_match])
## DEBUG
## Translate pcd match to the right for visualization
mat_trans = np.eye(4)
mat_trans[0, 3] = 15.0 #3.0 # 4.0
mat_trans[1, 3] = 0
mat_trans[2, 3] = 0
pcd_match.transform(mat_trans)
## We used point cloud coloring based on FCGF features
## This coloring is also used in FCGF paper
if self.visualization:
#spheres_i = get_colored_point_cloud_feature(self.pcd_i, self.feat_i, self.voxel_size)
#spheres_match_i = get_colored_point_cloud_feature(pcd_match, self.feat_i, self.voxel_size)
spheres_i = self.pcd_i
spheres_match_i = pcd_match
else:
spheres_i = self.pcd_i
spheres_match_i = pcd_match
## TODO: interactive box searching
## How many boxes we have.
while(self.isSearchAvaliable):
## Fetching the feature vector of the box, which is previously computed
queryBox_descriptor_FGCF = self.descriptorFCGF[boxId]
v = VLAD(queryBox_descriptor_FGCF, self.visualDictionary)
v = v.reshape(1, -1)
# find the k most relevant images
# using previously generated "balltree"
dist, ind = self.tree.query(v, self.k_retrieve)
## Initialization of Visuzation - Empty open3D Scene
visual_list = []
visual_list.append(spheres_i)
visual_list.append(spheres_match_i)
# Draw the box - Query
mesh_box_vertices_query = self.meshBox[boxId]
## Matched box is colored in black
lines_set_query_box = convertMeshBox2LineBox(mesh_box_vertices_query, self.color_dict["black"])
visual_list.append(lines_set_query_box)
## Iteration through neaarest neighor matches
## and draw each box on the point cloud
for ind_match in ind[0]:
## Draw the box - Match
mesh_box_vertices_match = copy.deepcopy(self.meshBox[ind_match])
mesh_box_vertices_match.transform(mat_trans)
## Matched box is colored in blue
lines_set_match_box = convertMeshBox2LineBox(mesh_box_vertices_match, self.color_dict["blue"])
visual_list.append(lines_set_match_box)
if self.visualization:
visualize_point_cloud(visual_list)
decision = input('Do you want to continue to searching another box? Y or N? \n')
if decision.capitalize() == 'Y':
selected_boxId = input('Select boxId for another query search between 0 and {} \n'.format(self.No_box))
boxId = int(selected_boxId)
print('Another query search is started using boxId = {} \n'.format(boxId))
elif decision.capitalize() == 'N':
self.isSearchAvaliable = False
else:
print('Another query search is started using boxId = {} \n'.format(boxId))
else:
self.isSearchAvaliable = False
def query_given_BB(self, boxId, k_NN, feat_extractor):
self.k_retrieve = k_NN
## Initialization - Computation of Colored Point Cloud Based on FCGF Features
## Duplication of "pcd_i"point cloud
## We show matched boxes on this point cloud
#pcd_match = points_2_pointcloud(self.pcd_i.points)
## DEBUG read mesh instead of point cloud
if self.input_type == 'mesh':
self.pcd_i = o3d.io.read_triangle_mesh(self.path_to_pcd)
self.pcd_i.compute_vertex_normals()
pcd_match = o3d.io.read_triangle_mesh(self.path_to_pcd)
pcd_match.compute_vertex_normals()
elif self.input_type == 'pcd':
self.pcd_i = o3d.io.read_point_cloud(self.path_to_pcd)
pcd_match = o3d.io.read_point_cloud(self.path_to_pcd)
#o3d.visualization.draw_geometries([pcd_match])
## Distance between point clouds
dummy_box = pcd_match.get_axis_aligned_bounding_box()
box_w_max = dummy_box.max_bound
box_w_min = dummy_box.min_bound
self.pcd_apart = 1.5 * abs(box_w_max[0] - box_w_min[0])
## Distance between point clouds
## DEBUG
## Translate pcd match to the right for visualization
mat_trans = np.eye(4)
mat_trans[0, 3] = self.pcd_apart #3.5 #3.0 # 4.0
mat_trans[1, 3] = 0
mat_trans[2, 3] = 0
pcd_match.transform(mat_trans)
## We used point cloud coloring based on FCGF features
## This coloring is also used in FCGF paper
if self.visualization:
#spheres_i = get_colored_point_cloud_feature(self.pcd_i, self.feat_i, self.voxel_size)
#spheres_match_i = get_colored_point_cloud_feature(pcd_match, self.feat_i, self.voxel_size)
spheres_i = self.pcd_i
spheres_match_i = pcd_match
else:
spheres_i = self.pcd_i
spheres_match_i = pcd_match
## TODO: interactive box searching
## How many boxes we have.
while(self.isSearchAvaliable):
## TODO: extract FCGF feature for query point cloud here
#target_folder_path = os.path.dirname(os.path.abspath(self.path_query_pcd))
#file_path_query_feat_i = feat_extractor.extract(self.path_query_pcd, target_folder_path)
#data_query_i = np.load(file_path_query_feat_i)
#query_coord_i, query_points_i, query_feat_i = data_query_i['xyz'], data_query_i['points'], data_query_i['feature']
#query_pcd_i = points_2_pointcloud(query_coord_i)
## DEBUG
pcd_in_query = o3d.io.read_triangle_mesh(self.path_query_pcd)
BB_pcd_in_query = pcd_in_query.get_axis_aligned_bounding_box()
box_p_query_ind = []
for p_q in pcd_in_query.vertices:
index_pos = np.where((self.coord_i[:,0] == p_q[0]) & (self.coord_i[:, 1] == p_q[1]) & (self.coord_i[:, 2] == p_q[2]))
if index_pos[0]:
box_p_query_ind.append(index_pos[0])
## Container for FCGF features of points in the query Box
box_p_query_ind = np.array(box_p_query_ind)[:,0]
box_p_query_feat = self.feat_i[box_p_query_ind, :]
## DEBUG
#
## Fetching the feature vector of the box, which is previously computed
queryBox_descriptor_FGCF = box_p_query_feat #self.descriptorFCGF[boxId]
#queryBox_descriptor_FGCF = query_feat_i
v = VLAD(queryBox_descriptor_FGCF, self.visualDictionary)
v = v.reshape(1, -1)
## DEBUG - kretrieve
search_continue = True
while search_continue:
# find the k most relevant images
# using previously generated "balltree"
dist, ind = self.tree.query(v, self.k_retrieve)
## Initialization of Visuzation - Empty open3D Scene
visual_list = []
visual_list.append(spheres_i)
visual_list.append(spheres_match_i)
# Draw the box - Query
visual_list.append(BB_pcd_in_query)
## Iteration through neaarest neighor matches
## and draw each box on the point cloud
mesh_box_stack = []
tmp_cnt = 0
for ind_match in ind[0]:
# Init
IoU = False
## Draw the box - Match
mesh_box_vertices_match = copy.deepcopy(self.meshBox[ind_match])
if tmp_cnt == 0:
mesh_box_stack.append(copy.deepcopy(mesh_box_vertices_match))
mesh_box_vertices_match.transform(mat_trans)
## Matched box is colored in blue
lines_set_match_box = convertMeshBox2LineBox(mesh_box_vertices_match, self.color_dict["blue"])
visual_list.append(lines_set_match_box)
## TODO: Compare matched mesh boxes wrt Intersection over Union (IoU)
if tmp_cnt > 0:
#IoU = mesh_box_stack[-1].is_intersecting(mesh_box_vertices_match)
for m_tmp in mesh_box_stack:
IoU_t = m_tmp.is_intersecting(mesh_box_vertices_match)
IoU = IoU or IoU_t
if not IoU:
mesh_box_stack.append(copy.deepcopy(mesh_box_vertices_match))
mesh_box_vertices_match.transform(mat_trans)
## Matched box is colored in blue
lines_set_match_box = convertMeshBox2LineBox(mesh_box_vertices_match, self.color_dict["blue"])
visual_list.append(lines_set_match_box)
#visual_list_tmp = visual_list.copy()
#visual_list_tmp.append(lines_set_match_box)
#visualize_point_cloud(visual_list_tmp)
tmp_cnt = tmp_cnt + 1
#print('len(visual_list) = ', len(visual_list))
#print('self.k_retrieve = ', self.k_retrieve)
#print('k_NN = ', k_NN)
if len(visual_list) >= (k_NN + 2):
search_continue = False
else:
self.k_retrieve = self.k_retrieve + 10
## DEBUG - kretrieve
if self.visualization:
visualize_point_cloud(visual_list)
decision = input('Do you want to continue to searching another box? Y or N? \n')
if decision.capitalize() == 'Y':
#selected_boxId = input('Select boxId for another query search between 0 and {} \n'.format(self.No_box))
#boxId = int(selected_boxId)
## Select A Bounding Box Again
if self.input_type == 'pcd':
# pcd_in = o3d.io.read_triangle_mesh(file_path_pcd_i)
pcd_in = o3d.io.read_point_cloud(self.path_to_pcd)
# pcd_in.compute_vertex_normals()
# o3d.visualization.draw_geometries([pcd_in])
demo_crop_geometry(pcd_in)
elif self.input_type == 'mesh':
pcd_in = o3d.io.read_triangle_mesh(self.path_to_pcd)
pcd_in.compute_vertex_normals()
demo_crop_geometry(pcd_in)
## DEBUG
k_retrieve_new = input('Number of Search Results Shown \n')
self.k_retrieve = int(k_retrieve_new)
## DEBUG
## Extract Vlad Descriptors given new =ly selected BB
self.extractBoxes_VLADdesc_given_BB()
self.query_given_BB(boxId, k_NN, feat_extractor)
print('Another query search is started using boxId = {} \n'.format(boxId))
elif decision.capitalize() == 'N':
self.isSearchAvaliable = False
else:
print('Another query search is started using boxId = {} \n'.format(boxId))
else:
self.isSearchAvaliable = False
def pick_points(pcd):
print("")
print(
"1) Please pick at least three correspondences using [shift + left click]"
)
print(" Press [shift + right click] to undo point picking")
print("2) Afther picking points, press q for close the window")
vis = o3d.visualization.VisualizerWithEditing()
vis.create_window()
vis.add_geometry(pcd)
vis.run() # user picks points
vis.destroy_window()
print("")
return vis.get_picked_points()
def demo_crop_geometry(pcd):
print("Demo for manual geometry cropping")
print(
"1) Press 'Y' twice to align geometry with negative direction of y-axis"
)
print("2) Press 'K' to lock screen and to switch to selection mode")
print("3) Drag for rectangle selection,")
print(" or use ctrl + left click for polygon selection")
print("4) Press 'C' to get a selected geometry and to save it")
print("5) Press 'F' to switch to freeview mode")
#pcd = o3d.io.read_point_cloud("../../TestData/ICP/cloud_bin_0.pcd")
o3d.visualization.draw_geometries_with_editing([pcd])
def main(args):
## Reading the arguments
args = vars(args)
#PATH_PCD = args["path_pointcloud"]
#PATH_PCD = "/home/akin/workspace/All_Data/Indoor_Lidar_RGBD_Scan_Dataset/Apartment/Reconstruction/ours_apartment/apartment.ply"
#PATH_PCD = "/home/akin/workspace/workspace_applications/Deep_3D_Search/FCGF_submit/Search_3D/query_pcd/cropped_7_frag.ply"
#PATH_QUERY_PCD = "/home/akin/workspace/workspace_applications/Deep_3D_Search/FCGF_submit/Search_3D/query_pcd/cropped_7.ply"
#PATH_PCD = "/home/akin/workspace/All_Data/Tanks_and_Templates/Caterpillar/GT/Caterpillar.ply"
PATH_PCD = "/home/akin/workspace/workspace_applications/Deep_3D_Search/FCGF_submit/Search_3D/query_pcd/cropped_1.ply"
PATH_QUERY_PCD = "/home/akin/workspace/workspace_applications/Deep_3D_Search/FCGF_submit/Search_3D/query_pcd/cropped_query.ply"
## User dialog for input file
PATH_PCD = tkinter.filedialog.askopenfilename()
##
input_type = 'pcd' #'mesh'
selection_tool = 1
FCGF_vis = 0 #False
PATH_FEATURE = args["path_feature"]
k_NN = args["k_nearest_neighbor"]
isVisualizationON = bool(args["visualization"])
file_path_pcd_i = PATH_PCD
file_path_query_pcd = PATH_QUERY_PCD
file_path_feat_i = PATH_FEATURE
## TODO: EXP: Bounding box of the 3D geometry
'''
pcd_in = o3d.io.read_triangle_mesh(file_path_query_pcd)
pcd_in.compute_vertex_normals()
dummy_box = pcd_in.get_axis_aligned_bounding_box()
#o3d.visualization.draw_geometries([dummy_box, pcd_in])
## DEBUG
center_bb = dummy_box.get_center()
box_w = 0.9
box_scale = 1.2 #0.5
box_w_max = dummy_box.max_bound
box_w_min = dummy_box.min_bound
box_w_x = box_scale*abs(box_w_max[0] - box_w_min[0])
box_w_y = box_scale*abs(box_w_max[1] - box_w_min[1])
box_w_z = box_scale*abs(box_w_max[2] - box_w_min[2])
## Creation of a box
mesh_box = o3d.geometry.TriangleMesh.create_box(width=box_w_x, height=box_w_y, depth=box_w_z)
mesh_box.paint_uniform_color([0.9, 0.1, 0.1])
## Locate center of box to the origin
mat_trans = np.eye((4))
mat_trans[0, 3] = -box_w_x / 2
mat_trans[1, 3] = -box_w_y / 2
mat_trans[2, 3] = -box_w_z / 2
mesh_box.transform(mat_trans)
## Locate center of box to the point location
mat_trans = np.eye((4))
mat_trans[0, 3] = center_bb[0]
mat_trans[1, 3] = center_bb[1]
mat_trans[2, 3] = center_bb[2]
mesh_box.transform(mat_trans)
line_set_dummy = convertMeshBox2LineBox(mesh_box, [1,0,0])
o3d.visualization.draw_geometries([dummy_box, pcd_in, line_set_dummy])
## DEBUG
'''
##
## TODO: Add volume selection tool for the user here
if selection_tool:
if input_type == 'pcd':
#pcd_in = o3d.io.read_triangle_mesh(file_path_pcd_i)
pcd_in = o3d.io.read_point_cloud(file_path_pcd_i)
#pcd_in.compute_vertex_normals()
#o3d.visualization.draw_geometries([pcd_in])
demo_crop_geometry(pcd_in)
elif input_type == 'mesh':
pcd_in = o3d.io.read_triangle_mesh(file_path_pcd_i)
pcd_in.compute_vertex_normals()
demo_crop_geometry(pcd_in)
#sys.exit()
##
## TODO: Add feature extraction module here
model_path = "/home/akin/workspace/workspace_applications/Deep_3D_Search/FCGF_submit/outputs_trained_models/checkpoint.pth"
feat_extractor = FeatureExtractor(model_path)
target_folder_path = os.path.dirname(os.path.abspath(file_path_pcd_i))
file_path_feat_i = feat_extractor.extract(PATH_PCD, target_folder_path)
##
## TODO: Visualize input point cloud FCGF features
if FCGF_vis:
data_i = np.load(file_path_feat_i)
coord_i, points_i, feat_i = data_i['xyz'], data_i['points'], data_i['feature']
pcd_i = o3d.io.read_point_cloud(file_path_pcd_i)
pcd_match = points_2_pointcloud(coord_i)
voxel_size = 0.05 #0.025
pcd_in_FCGF = get_colored_point_cloud_feature(pcd_match, feat_i, voxel_size)
file_name_FCGF_folder = os.path.dirname(os.path.abspath(file_path_query_pcd))
file_name_FCGF_name = os.path.basename(file_path_query_pcd)
file_name_FCGF_name = os.path.splitext(file_name_FCGF_name)[0]
file_name_FCGF = file_name_FCGF_folder + "/" + file_name_FCGF_name + "_FCGF_" + str(voxel_size) + ".ply"
o3d.io.write_triangle_mesh(file_name_FCGF, pcd_in_FCGF)
o3d.visualization.draw_geometries([pcd_in_FCGF])
sys.exit()
'''
## TODO: Visualize input point cloud FCGF features - Query
feat_extractor = FeatureExtractor(model_path)
target_query_folder_path = os.path.dirname(os.path.abspath(file_path_query_pcd))
file_path_query_feat_i = feat_extractor.extract(file_path_query_pcd, target_query_folder_path)
data_i = np.load(file_path_query_feat_i)
coord_i, points_i, feat_i = data_i['xyz'], data_i['points'], data_i['feature']
pcd_i = o3d.io.read_point_cloud(file_path_query_pcd)
pcd_match = points_2_pointcloud(coord_i)
voxel_size = 0.025
pcd_in_FCGF = get_colored_point_cloud_feature(pcd_match, feat_i, voxel_size)
o3d.visualization.draw_geometries([pcd_in_FCGF])
sys.exit()
#
'''
if os.path.isfile(file_path_pcd_i)==0 or os.path.isfile(file_path_feat_i)==0:
print('ERROR - Missing Files - Check Files ')
print('Point cloud file = ', file_path_pcd_i)
print('Feature file = ', file_path_feat_i)
sys.exit()
## Start timer
if not isVisualizationON:
start_time = time.time()
###
# "Search_3D" Class Instance Generation
###
s3d = Search3D(file_path_pcd_i, file_path_query_pcd, file_path_feat_i, isVisualizationON, input_type)
###
# Visual Dictionary Generation
###
print('Computing Visual Dictionary')
s3d.computeVisualDictionary()
###
# VLAD Decriptor Extractor
###
print('Computing VLAD Descriptors')
#s3d.extractBoxes_VLADdesc()
#s3d.extractBoxes_VLADdesc_given_BB()
s3d.extractBoxes_VLADdesc_given_BB_multhread()
###
# IndexballTree Generation
###
print('Generating of IndexBallTree')
s3d.computeIndexBallTree()
###
# Query Search
###
print('Search Box Query in Point Cloud')
boxId = 0
#s3d.query(boxId, k_NN)
s3d.query_given_BB(boxId, k_NN, feat_extractor)
## end timer
if not isVisualizationON:
execution_time = time.time() - start_time
print('execution time = %.2f seconds' % execution_time)
if __name__ == "__main__":
# execute only if run as a script
parser = argparse.ArgumentParser(description='Search 3D Application')
# Dataset setting
parser.add_argument("-p", "--path_pointcloud", required = True,
help = "Path of the point cloud file")
parser.add_argument("-f", "--path_feature", required = True,
help = "Path of the FCGF feature file associated the input point cloud")
parser.add_argument("-k", "--k_nearest_neighbor", default=3, type=int, required=True,
help="k nearest neighbor matches are computed in the program")
parser.add_argument("-v", "--visualization", default=1, type=int, required=True,
help="Visualization Flag")
args = parser.parse_args()
print('Input Arguments:')
for arg in vars(args):
print("\t {} -> {}".format(arg, getattr(args, arg)))
print('Search_3D is running')
main(parser.parse_args())
## TODO: add args. | import open3d as o3d
import numpy as np
import sys
import math
import os
import copy
import tkinter.filedialog
from concurrent.futures import ThreadPoolExecutor
from lib.feature_extractor import FeatureExtractor
## Visualization is taken from "https://github.com/chrischoy/FCGF
from utils.visualization import get_colored_point_cloud_feature
from utils.pointcloud import make_open3d_point_cloud
## VLAD library is from "https://github.com/jorjasso/VLAD"
from VLADlib.VLAD import *
from VLADlib.Descriptors import *
import argparse
import glob
import cv2
import time
from tqdm import tqdm
import random
from pathlib import Path
def points_2_pointcloud(coords):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(coords)
#colors = [[0.5, 0.5, 0.5] for i in range(len(pcd.points))]
#pcd.colors = o3d.utility.Vector3dVector(colors)
return pcd
def visualize_point_cloud(pcd_list):
vis = o3d.visualization.Visualizer()
vis.create_window()
for pcd in pcd_list:
vis.add_geometry(pcd)
#ctr = vis.get_view_control()
#print("Field of view (before changing) %.2f" % ctr.get_field_of_view())
#ctr.change_field_of_view(step=fov_step)
#print("Field of view (after changing) %.2f" % ctr.get_field_of_view())
## TODO:
## 1-> json cmaera parameters change H,W
## 2-> add screen capture feature
#vis.get_render_option().load_from_json("./renderoption.json")
vis.run()
vis.destroy_window()
def convertMeshBox2LineBox(mesh_box, color_select):
points = np.array(mesh_box.vertices)
lines = [[0, 1], [0, 2], [1, 3], [2, 3],
[4, 5], [4, 6], [5, 7], [6, 7],
[0, 4], [1, 5], [2, 6], [3, 7], ]
##colors = [[1, 0, 0] for i in range(len(lines))]
colors = [color_select for i in range(len(lines))]
line_set = o3d.geometry.LineSet(
points=o3d.utility.Vector3dVector(points),
lines=o3d.utility.Vector2iVector(lines),
)
line_set.colors = o3d.utility.Vector3dVector(colors)
return line_set
class Search3D:
def __init__(self, path_to_pcd, path_query_pcd, path_to_feat, isVisualizationON, input_type):
self.path_to_pcd = path_to_pcd # Path to point cloud file
self.path_to_feat = path_to_feat # Path to feature file
self.path_query_pcd = path_query_pcd
self.voxel_size = 0.025
self.read_inputs()
self.k = 4 #2 #16 # no. of visual words used for VisualDictionary Generation
self.sample_step_size = 10 #100 #300 #30 #100
self.leafSize = 40 # leafsize for "indexBallTree"
self.k_retrieve = 3 # number of retrieved box
self.color_dict={"black":[0,0,0], "blue":[0,0,1]}
self.isSearchAvaliable = True
self.visualization = isVisualizationON
self.input_type = input_type
self.pcd_apart = 10
self.BB_thresh = 0.55 #0.5
def read_inputs(self):
data_i = np.load(self.path_to_feat)
self.coord_i, self.points_i, self.feat_i = data_i['xyz'], data_i['points'], data_i['feature']
self.pcd_i = points_2_pointcloud(self.coord_i)
def computeVisualDictionary(self):
descriptors = self.feat_i
self.visualDictionary = kMeansDictionary(descriptors, self.k)
def extractBoxes_VLADdesc(self):
self.descriptorsVLAD=list()
self.idBox = list()
self.descriptorFCGF=list()
self.pointCoords=list()
self.meshBox=list()
## For each box in the point cloud, VLAD descriptors are computed.
for ind_p in list(range(0, self.coord_i.shape[0],self.sample_step_size)):
#for ind_p in list(range(0, 10000,self.sample_step_size)):
## Create mesh_box - experiment
## Box width, this value is computed considering the calibration of datasets in '3DMatch' repository
box_w = 0.2
## Creation of a box
mesh_box = o3d.geometry.TriangleMesh.create_box(width=box_w, height=box_w, depth=box_w)
mesh_box.paint_uniform_color([0.9, 0.1, 0.1])
## Locate center of box to the origin
mat_trans = np.eye((4))
mat_trans[0, 3] = -box_w/2
mat_trans[1, 3] = -box_w/2
mat_trans[2, 3] = -box_w/2
mesh_box.transform(mat_trans)
## Locate center of box to the point location
mat_trans = np.eye((4))
mat_trans[0, 3] = self.coord_i[ind_p, 0]
mat_trans[1, 3] = self.coord_i[ind_p, 1]
mat_trans[2, 3] = self.coord_i[ind_p, 2]
mesh_box.transform(mat_trans)
## We store the all boxes in a list named "self.meshBox"
self.meshBox.append(mesh_box)
## Sampling Points in the Box:
thresh = math.sqrt(3)*box_w/2
q_point = self.coord_i[ind_p, :]
q_point_arr = np.tile(q_point, (self.coord_i.shape[0], 1))
dist_arr = q_point_arr - self.coord_i
dist = np.linalg.norm(dist_arr, axis=1)
box_p_ind = np.where(dist<=thresh)[0]
## Container for coordinates of points in the Box
box_p = self.coord_i[box_p_ind, :]
## Container for FCGF features of points in the Box
box_p_feat = self.feat_i[box_p_ind, :]
## Calling VLAD descriptor extractor
if box_p_feat is not None:
## Previously computed "self.visualDictionary" is used here
## VLAD function is from VLAD library (https://github.com/jorjasso/VLAD)
v = VLAD(box_p_feat, self.visualDictionary)
self.descriptorsVLAD.append(v)
self.idBox.append(ind_p)
self.descriptorFCGF.append(box_p_feat)
self.pointCoords.append(box_p)
self.descriptorsVLAD = np.asarray(self.descriptorsVLAD)
self.No_box = len(self.idBox)
##
# With given bounding box, search boxes are being modified
##
def extractBoxes_VLADdesc_given_BB(self):
self.descriptorsVLAD=list()
self.idBox = list()
self.descriptorFCGF=list()
self.pointCoords=list()
self.meshBox=list()
## DEBUG
if self.input_type == 'mesh':
pcd_in = o3d.io.read_triangle_mesh(self.path_query_pcd)
pcd_in.compute_vertex_normals()
if self.input_type == 'pcd':
pcd_in = o3d.io.read_point_cloud(self.path_query_pcd)
dummy_box = pcd_in.get_axis_aligned_bounding_box()
box_scale = 1.2 # 0.5
box_w_max = dummy_box.max_bound
box_w_min = dummy_box.min_bound
box_w_x = box_scale * abs(box_w_max[0] - box_w_min[0])
box_w_y = box_scale * abs(box_w_max[1] - box_w_min[1])
box_w_z = box_scale * abs(box_w_max[2] - box_w_min[2])
## DEBUG
## For each box in the point cloud, VLAD descriptors are computed.
##for ind_p in list(range(0, self.coord_i.shape[0],self.sample_step_size)):
for ind_p in tqdm(range(0, self.coord_i.shape[0],self.sample_step_size)):
#for ind_p in list(range(0, 10000,self.sample_step_size)):
## Create mesh_box - experiment
## Box width, this value is computed considering the calibration of datasets in '3DMatch' repository
#box_w = 0.2
## DEBUG
## Creation of a box
mesh_box = o3d.geometry.TriangleMesh.create_box(width=box_w_x, height=box_w_y, depth=box_w_z)
mesh_box.paint_uniform_color([0.9, 0.1, 0.1])
## Locate center of box to the origin
mat_trans = np.eye((4))
mat_trans[0, 3] = -box_w_x / 2
mat_trans[1, 3] = -box_w_y / 2
mat_trans[2, 3] = -box_w_z / 2
mesh_box.transform(mat_trans)
## DEBUG
'''
## Creation of a box
mesh_box = o3d.geometry.TriangleMesh.create_box(width=box_w, height=box_w, depth=box_w)
mesh_box.paint_uniform_color([0.9, 0.1, 0.1])
## Locate center of box to the origin
mat_trans = np.eye((4))
mat_trans[0, 3] = -box_w/2
mat_trans[1, 3] = -box_w/2
mat_trans[2, 3] = -box_w/2
mesh_box.transform(mat_trans)
'''
## Locate center of box to the point location
mat_trans = np.eye((4))
mat_trans[0, 3] = self.coord_i[ind_p, 0]
mat_trans[1, 3] = self.coord_i[ind_p, 1]
mat_trans[2, 3] = self.coord_i[ind_p, 2]
mesh_box.transform(mat_trans)
## We store the all boxes in a list named "self.meshBox"
self.meshBox.append(mesh_box)
## Sampling Points in the Box:
box_w = max(box_w_x, box_w_y, box_w_z)
thresh = math.sqrt(3)*box_w/2
q_point = self.coord_i[ind_p, :]
q_point_arr = np.tile(q_point, (self.coord_i.shape[0], 1))
dist_arr = q_point_arr - self.coord_i
dist = np.linalg.norm(dist_arr, axis=1)
box_p_ind = np.where(dist<=thresh)[0]
## Container for coordinates of points in the Box
box_p = self.coord_i[box_p_ind, :]
## Container for FCGF features of points in the Box
box_p_feat = self.feat_i[box_p_ind, :]
## Calling VLAD descriptor extractor
if box_p_feat is not None:
## Previously computed "self.visualDictionary" is used here
## VLAD function is from VLAD library (https://github.com/jorjasso/VLAD)
v = VLAD(box_p_feat, self.visualDictionary)
self.descriptorsVLAD.append(v)
#self.idBox.append(ind_p)
#self.descriptorFCGF.append(box_p_feat)
#self.pointCoords.append(box_p)
self.descriptorsVLAD = np.asarray(self.descriptorsVLAD)
#self.No_box = len(self.idBox)
##
# With multi thread
##
def extractBoxes_VLADdesc_given_BB_multhread(self):
self.descriptorsVLAD = list()
self.meshBox = list()
if self.input_type == 'mesh':
pcd_in = o3d.io.read_triangle_mesh(self.path_query_pcd)
pcd_in.compute_vertex_normals()
if self.input_type == 'pcd':
pcd_in = o3d.io.read_point_cloud(self.path_query_pcd)
dummy_box = pcd_in.get_axis_aligned_bounding_box()
box_scale = 1.2 # 0.5
box_w_max = dummy_box.max_bound
box_w_min = dummy_box.min_bound
box_w_x = box_scale * abs(box_w_max[0] - box_w_min[0])
box_w_y = box_scale * abs(box_w_max[1] - box_w_min[1])
box_w_z = box_scale * abs(box_w_max[2] - box_w_min[2])
## Find FCGF features wihin the query bounding box
pcd_in_query = o3d.io.read_triangle_mesh(self.path_query_pcd)
box_p_query_ind = []
for p_q in pcd_in_query.vertices:
index_pos = np.where((self.coord_i[:, 0] == p_q[0]) & (self.coord_i[:, 1] == p_q[1]) & (self.coord_i[:, 2] == p_q[2]))
if index_pos[0]:
box_p_query_ind.append(index_pos[0])
## Container for FCGF features of points in the query Box
box_p_query_ind = np.array(box_p_query_ind)[:, 0]
box_p_query_feat = self.feat_i[box_p_query_ind, :]
box_p_query_feat_mean = np.mean(box_p_query_feat, axis=0)
box_p_query_feat = np.tile(box_p_query_feat_mean, (self.feat_i.shape[0], 1))
dist_feat_arr = box_p_query_feat - self.feat_i
dist_feat = np.linalg.norm(dist_feat_arr, axis=1)
min_dist_feat = np.min(dist_feat)
med_dist_feat = np.median(dist_feat)
max_dist_feat = np.max(dist_feat)
thresh_feat = self.BB_thresh #0.5 #0.2 * (med_dist_feat + min_dist_feat)
box_p_feat_ind = np.where(dist_feat <= thresh_feat)[0]
#for ind_p in tqdm(range(0, self.coord_i.shape[0], self.sample_step_size)):
for ind_p in tqdm(box_p_feat_ind):
## TODO: Outlier rejection
## Description: We want to compute vlad descriptors for only the 3D points of similar FCG features
## DEBUG
## DEBUG
## Create mesh_box - experiment
## Creation of a box
mesh_box = o3d.geometry.TriangleMesh.create_box(width=box_w_x, height=box_w_y, depth=box_w_z)
mesh_box.paint_uniform_color([0.9, 0.1, 0.1])
## Locate center of box to the origin
mat_trans = np.eye((4))
mat_trans[0, 3] = -box_w_x / 2
mat_trans[1, 3] = -box_w_y / 2
mat_trans[2, 3] = -box_w_z / 2
mesh_box.transform(mat_trans)
## Locate center of box to the point location
mat_trans = np.eye((4))
mat_trans[0, 3] = self.coord_i[ind_p, 0]
mat_trans[1, 3] = self.coord_i[ind_p, 1]
mat_trans[2, 3] = self.coord_i[ind_p, 2]
mesh_box.transform(mat_trans)
## We store the all boxes in a list named "self.meshBox"
self.meshBox.append(mesh_box)
## Sampling Points in the Box:
box_w = max(box_w_x, box_w_y, box_w_z)
thresh = math.sqrt(3) * box_w / 2
q_point = self.coord_i[ind_p, :]
q_point_arr = np.tile(q_point, (self.coord_i.shape[0], 1))
dist_arr = q_point_arr - self.coord_i
dist = np.linalg.norm(dist_arr, axis=1)
box_p_ind = np.where(dist <= thresh)[0]
## Container for coordinates of points in the Box
box_p = self.coord_i[box_p_ind, :]
## Container for FCGF features of points in the Box
box_p_feat = self.feat_i[box_p_ind, :]
## Calling VLAD descriptor extractor
if box_p_feat is not None:
## Previously computed "self.visualDictionary" is used here
## VLAD function is from VLAD library (https://github.com/jorjasso/VLAD)
v = VLAD(box_p_feat, self.visualDictionary)
self.descriptorsVLAD.append(v)
# self.idBox.append(ind_p)
# self.descriptorFCGF.append(box_p_feat)
# self.pointCoords.append(box_p)
self.descriptorsVLAD = np.asarray(self.descriptorsVLAD)
# self.No_box = len(self.idBox)
def computeIndexBallTree(self):
self.tree = indexBallTree(self.descriptorsVLAD, self.leafSize)
##
# Inputs:
# boxId: Index of the Query Box
# k_NN: k Nearest Neighbor
##
def query(self, boxId, k_NN):
self.k_retrieve = k_NN
## Initialization - Computation of Colored Point Cloud Based on FCGF Features
## Duplication of "pcd_i"point cloud
## We show matched boxes on this point cloud
#pcd_match = points_2_pointcloud(self.pcd_i.points)
## DEBUG read mesh instead of point cloud
if self.input_type == 'mesh':
self.pcd_i = o3d.io.read_triangle_mesh(self.path_to_pcd)
self.pcd_i.compute_vertex_normals()
pcd_match = o3d.io.read_triangle_mesh(self.path_to_pcd)
pcd_match.compute_vertex_normals()
elif self.input_type == 'pcd':
self.pcd_i = o3d.io.read_point_cloud(self.path_to_pcd)
pcd_match = o3d.io.read_point_cloud(self.path_to_pcd)
#o3d.visualization.draw_geometries([pcd_match])
## DEBUG
## Translate pcd match to the right for visualization
mat_trans = np.eye(4)
mat_trans[0, 3] = 15.0 #3.0 # 4.0
mat_trans[1, 3] = 0
mat_trans[2, 3] = 0
pcd_match.transform(mat_trans)
## We used point cloud coloring based on FCGF features
## This coloring is also used in FCGF paper
if self.visualization:
#spheres_i = get_colored_point_cloud_feature(self.pcd_i, self.feat_i, self.voxel_size)
#spheres_match_i = get_colored_point_cloud_feature(pcd_match, self.feat_i, self.voxel_size)
spheres_i = self.pcd_i
spheres_match_i = pcd_match
else:
spheres_i = self.pcd_i
spheres_match_i = pcd_match
## TODO: interactive box searching
## How many boxes we have.
while(self.isSearchAvaliable):
## Fetching the feature vector of the box, which is previously computed
queryBox_descriptor_FGCF = self.descriptorFCGF[boxId]
v = VLAD(queryBox_descriptor_FGCF, self.visualDictionary)
v = v.reshape(1, -1)
# find the k most relevant images
# using previously generated "balltree"
dist, ind = self.tree.query(v, self.k_retrieve)
## Initialization of Visuzation - Empty open3D Scene
visual_list = []
visual_list.append(spheres_i)
visual_list.append(spheres_match_i)
# Draw the box - Query
mesh_box_vertices_query = self.meshBox[boxId]
## Matched box is colored in black
lines_set_query_box = convertMeshBox2LineBox(mesh_box_vertices_query, self.color_dict["black"])
visual_list.append(lines_set_query_box)
## Iteration through neaarest neighor matches
## and draw each box on the point cloud
for ind_match in ind[0]:
## Draw the box - Match
mesh_box_vertices_match = copy.deepcopy(self.meshBox[ind_match])
mesh_box_vertices_match.transform(mat_trans)
## Matched box is colored in blue
lines_set_match_box = convertMeshBox2LineBox(mesh_box_vertices_match, self.color_dict["blue"])
visual_list.append(lines_set_match_box)
if self.visualization:
visualize_point_cloud(visual_list)
decision = input('Do you want to continue to searching another box? Y or N? \n')
if decision.capitalize() == 'Y':
selected_boxId = input('Select boxId for another query search between 0 and {} \n'.format(self.No_box))
boxId = int(selected_boxId)
print('Another query search is started using boxId = {} \n'.format(boxId))
elif decision.capitalize() == 'N':
self.isSearchAvaliable = False
else:
print('Another query search is started using boxId = {} \n'.format(boxId))
else:
self.isSearchAvaliable = False
def query_given_BB(self, boxId, k_NN, feat_extractor):
self.k_retrieve = k_NN
## Initialization - Computation of Colored Point Cloud Based on FCGF Features
## Duplication of "pcd_i"point cloud
## We show matched boxes on this point cloud
#pcd_match = points_2_pointcloud(self.pcd_i.points)
## DEBUG read mesh instead of point cloud
if self.input_type == 'mesh':
self.pcd_i = o3d.io.read_triangle_mesh(self.path_to_pcd)
self.pcd_i.compute_vertex_normals()
pcd_match = o3d.io.read_triangle_mesh(self.path_to_pcd)
pcd_match.compute_vertex_normals()
elif self.input_type == 'pcd':
self.pcd_i = o3d.io.read_point_cloud(self.path_to_pcd)
pcd_match = o3d.io.read_point_cloud(self.path_to_pcd)
#o3d.visualization.draw_geometries([pcd_match])
## Distance between point clouds
dummy_box = pcd_match.get_axis_aligned_bounding_box()
box_w_max = dummy_box.max_bound
box_w_min = dummy_box.min_bound
self.pcd_apart = 1.5 * abs(box_w_max[0] - box_w_min[0])
## Distance between point clouds
## DEBUG
## Translate pcd match to the right for visualization
mat_trans = np.eye(4)
mat_trans[0, 3] = self.pcd_apart #3.5 #3.0 # 4.0
mat_trans[1, 3] = 0
mat_trans[2, 3] = 0
pcd_match.transform(mat_trans)
## We used point cloud coloring based on FCGF features
## This coloring is also used in FCGF paper
if self.visualization:
#spheres_i = get_colored_point_cloud_feature(self.pcd_i, self.feat_i, self.voxel_size)
#spheres_match_i = get_colored_point_cloud_feature(pcd_match, self.feat_i, self.voxel_size)
spheres_i = self.pcd_i
spheres_match_i = pcd_match
else:
spheres_i = self.pcd_i
spheres_match_i = pcd_match
## TODO: interactive box searching
## How many boxes we have.
while(self.isSearchAvaliable):
## TODO: extract FCGF feature for query point cloud here
#target_folder_path = os.path.dirname(os.path.abspath(self.path_query_pcd))
#file_path_query_feat_i = feat_extractor.extract(self.path_query_pcd, target_folder_path)
#data_query_i = np.load(file_path_query_feat_i)
#query_coord_i, query_points_i, query_feat_i = data_query_i['xyz'], data_query_i['points'], data_query_i['feature']
#query_pcd_i = points_2_pointcloud(query_coord_i)
## DEBUG
pcd_in_query = o3d.io.read_triangle_mesh(self.path_query_pcd)
BB_pcd_in_query = pcd_in_query.get_axis_aligned_bounding_box()
box_p_query_ind = []
for p_q in pcd_in_query.vertices:
index_pos = np.where((self.coord_i[:,0] == p_q[0]) & (self.coord_i[:, 1] == p_q[1]) & (self.coord_i[:, 2] == p_q[2]))
if index_pos[0]:
box_p_query_ind.append(index_pos[0])
## Container for FCGF features of points in the query Box
box_p_query_ind = np.array(box_p_query_ind)[:,0]
box_p_query_feat = self.feat_i[box_p_query_ind, :]
## DEBUG
#
## Fetching the feature vector of the box, which is previously computed
queryBox_descriptor_FGCF = box_p_query_feat #self.descriptorFCGF[boxId]
#queryBox_descriptor_FGCF = query_feat_i
v = VLAD(queryBox_descriptor_FGCF, self.visualDictionary)
v = v.reshape(1, -1)
## DEBUG - kretrieve
search_continue = True
while search_continue:
# find the k most relevant images
# using previously generated "balltree"
dist, ind = self.tree.query(v, self.k_retrieve)
## Initialization of Visuzation - Empty open3D Scene
visual_list = []
visual_list.append(spheres_i)
visual_list.append(spheres_match_i)
# Draw the box - Query
visual_list.append(BB_pcd_in_query)
## Iteration through neaarest neighor matches
## and draw each box on the point cloud
mesh_box_stack = []
tmp_cnt = 0
for ind_match in ind[0]:
# Init
IoU = False
## Draw the box - Match
mesh_box_vertices_match = copy.deepcopy(self.meshBox[ind_match])
if tmp_cnt == 0:
mesh_box_stack.append(copy.deepcopy(mesh_box_vertices_match))
mesh_box_vertices_match.transform(mat_trans)
## Matched box is colored in blue
lines_set_match_box = convertMeshBox2LineBox(mesh_box_vertices_match, self.color_dict["blue"])
visual_list.append(lines_set_match_box)
## TODO: Compare matched mesh boxes wrt Intersection over Union (IoU)
if tmp_cnt > 0:
#IoU = mesh_box_stack[-1].is_intersecting(mesh_box_vertices_match)
for m_tmp in mesh_box_stack:
IoU_t = m_tmp.is_intersecting(mesh_box_vertices_match)
IoU = IoU or IoU_t
if not IoU:
mesh_box_stack.append(copy.deepcopy(mesh_box_vertices_match))
mesh_box_vertices_match.transform(mat_trans)
## Matched box is colored in blue
lines_set_match_box = convertMeshBox2LineBox(mesh_box_vertices_match, self.color_dict["blue"])
visual_list.append(lines_set_match_box)
#visual_list_tmp = visual_list.copy()
#visual_list_tmp.append(lines_set_match_box)
#visualize_point_cloud(visual_list_tmp)
tmp_cnt = tmp_cnt + 1
#print('len(visual_list) = ', len(visual_list))
#print('self.k_retrieve = ', self.k_retrieve)
#print('k_NN = ', k_NN)
if len(visual_list) >= (k_NN + 2):
search_continue = False
else:
self.k_retrieve = self.k_retrieve + 10
## DEBUG - kretrieve
if self.visualization:
visualize_point_cloud(visual_list)
decision = input('Do you want to continue to searching another box? Y or N? \n')
if decision.capitalize() == 'Y':
#selected_boxId = input('Select boxId for another query search between 0 and {} \n'.format(self.No_box))
#boxId = int(selected_boxId)
## Select A Bounding Box Again
if self.input_type == 'pcd':
# pcd_in = o3d.io.read_triangle_mesh(file_path_pcd_i)
pcd_in = o3d.io.read_point_cloud(self.path_to_pcd)
# pcd_in.compute_vertex_normals()
# o3d.visualization.draw_geometries([pcd_in])
demo_crop_geometry(pcd_in)
elif self.input_type == 'mesh':
pcd_in = o3d.io.read_triangle_mesh(self.path_to_pcd)
pcd_in.compute_vertex_normals()
demo_crop_geometry(pcd_in)
## DEBUG
k_retrieve_new = input('Number of Search Results Shown \n')
self.k_retrieve = int(k_retrieve_new)
## DEBUG
## Extract Vlad Descriptors given new =ly selected BB
self.extractBoxes_VLADdesc_given_BB()
self.query_given_BB(boxId, k_NN, feat_extractor)
print('Another query search is started using boxId = {} \n'.format(boxId))
elif decision.capitalize() == 'N':
self.isSearchAvaliable = False
else:
print('Another query search is started using boxId = {} \n'.format(boxId))
else:
self.isSearchAvaliable = False
def pick_points(pcd):
print("")
print(
"1) Please pick at least three correspondences using [shift + left click]"
)
print(" Press [shift + right click] to undo point picking")
print("2) Afther picking points, press q for close the window")
vis = o3d.visualization.VisualizerWithEditing()
vis.create_window()
vis.add_geometry(pcd)
vis.run() # user picks points
vis.destroy_window()
print("")
return vis.get_picked_points()
def demo_crop_geometry(pcd):
print("Demo for manual geometry cropping")
print(
"1) Press 'Y' twice to align geometry with negative direction of y-axis"
)
print("2) Press 'K' to lock screen and to switch to selection mode")
print("3) Drag for rectangle selection,")
print(" or use ctrl + left click for polygon selection")
print("4) Press 'C' to get a selected geometry and to save it")
print("5) Press 'F' to switch to freeview mode")
#pcd = o3d.io.read_point_cloud("../../TestData/ICP/cloud_bin_0.pcd")
o3d.visualization.draw_geometries_with_editing([pcd])
def main(args):
## Reading the arguments
args = vars(args)
#PATH_PCD = args["path_pointcloud"]
#PATH_PCD = "/home/akin/workspace/All_Data/Indoor_Lidar_RGBD_Scan_Dataset/Apartment/Reconstruction/ours_apartment/apartment.ply"
#PATH_PCD = "/home/akin/workspace/workspace_applications/Deep_3D_Search/FCGF_submit/Search_3D/query_pcd/cropped_7_frag.ply"
#PATH_QUERY_PCD = "/home/akin/workspace/workspace_applications/Deep_3D_Search/FCGF_submit/Search_3D/query_pcd/cropped_7.ply"
#PATH_PCD = "/home/akin/workspace/All_Data/Tanks_and_Templates/Caterpillar/GT/Caterpillar.ply"
PATH_PCD = "/home/akin/workspace/workspace_applications/Deep_3D_Search/FCGF_submit/Search_3D/query_pcd/cropped_1.ply"
PATH_QUERY_PCD = "/home/akin/workspace/workspace_applications/Deep_3D_Search/FCGF_submit/Search_3D/query_pcd/cropped_query.ply"
## User dialog for input file
PATH_PCD = tkinter.filedialog.askopenfilename()
##
input_type = 'pcd' #'mesh'
selection_tool = 1
FCGF_vis = 0 #False
PATH_FEATURE = args["path_feature"]
k_NN = args["k_nearest_neighbor"]
isVisualizationON = bool(args["visualization"])
file_path_pcd_i = PATH_PCD
file_path_query_pcd = PATH_QUERY_PCD
file_path_feat_i = PATH_FEATURE
## TODO: EXP: Bounding box of the 3D geometry
'''
pcd_in = o3d.io.read_triangle_mesh(file_path_query_pcd)
pcd_in.compute_vertex_normals()
dummy_box = pcd_in.get_axis_aligned_bounding_box()
#o3d.visualization.draw_geometries([dummy_box, pcd_in])
## DEBUG
center_bb = dummy_box.get_center()
box_w = 0.9
box_scale = 1.2 #0.5
box_w_max = dummy_box.max_bound
box_w_min = dummy_box.min_bound
box_w_x = box_scale*abs(box_w_max[0] - box_w_min[0])
box_w_y = box_scale*abs(box_w_max[1] - box_w_min[1])
box_w_z = box_scale*abs(box_w_max[2] - box_w_min[2])
## Creation of a box
mesh_box = o3d.geometry.TriangleMesh.create_box(width=box_w_x, height=box_w_y, depth=box_w_z)
mesh_box.paint_uniform_color([0.9, 0.1, 0.1])
## Locate center of box to the origin
mat_trans = np.eye((4))
mat_trans[0, 3] = -box_w_x / 2
mat_trans[1, 3] = -box_w_y / 2
mat_trans[2, 3] = -box_w_z / 2
mesh_box.transform(mat_trans)
## Locate center of box to the point location
mat_trans = np.eye((4))
mat_trans[0, 3] = center_bb[0]
mat_trans[1, 3] = center_bb[1]
mat_trans[2, 3] = center_bb[2]
mesh_box.transform(mat_trans)
line_set_dummy = convertMeshBox2LineBox(mesh_box, [1,0,0])
o3d.visualization.draw_geometries([dummy_box, pcd_in, line_set_dummy])
## DEBUG
'''
##
## TODO: Add volume selection tool for the user here
if selection_tool:
if input_type == 'pcd':
#pcd_in = o3d.io.read_triangle_mesh(file_path_pcd_i)
pcd_in = o3d.io.read_point_cloud(file_path_pcd_i)
#pcd_in.compute_vertex_normals()
#o3d.visualization.draw_geometries([pcd_in])
demo_crop_geometry(pcd_in)
elif input_type == 'mesh':
pcd_in = o3d.io.read_triangle_mesh(file_path_pcd_i)
pcd_in.compute_vertex_normals()
demo_crop_geometry(pcd_in)
#sys.exit()
##
## TODO: Add feature extraction module here
model_path = "/home/akin/workspace/workspace_applications/Deep_3D_Search/FCGF_submit/outputs_trained_models/checkpoint.pth"
feat_extractor = FeatureExtractor(model_path)
target_folder_path = os.path.dirname(os.path.abspath(file_path_pcd_i))
file_path_feat_i = feat_extractor.extract(PATH_PCD, target_folder_path)
##
## TODO: Visualize input point cloud FCGF features
if FCGF_vis:
data_i = np.load(file_path_feat_i)
coord_i, points_i, feat_i = data_i['xyz'], data_i['points'], data_i['feature']
pcd_i = o3d.io.read_point_cloud(file_path_pcd_i)
pcd_match = points_2_pointcloud(coord_i)
voxel_size = 0.05 #0.025
pcd_in_FCGF = get_colored_point_cloud_feature(pcd_match, feat_i, voxel_size)
file_name_FCGF_folder = os.path.dirname(os.path.abspath(file_path_query_pcd))
file_name_FCGF_name = os.path.basename(file_path_query_pcd)
file_name_FCGF_name = os.path.splitext(file_name_FCGF_name)[0]
file_name_FCGF = file_name_FCGF_folder + "/" + file_name_FCGF_name + "_FCGF_" + str(voxel_size) + ".ply"
o3d.io.write_triangle_mesh(file_name_FCGF, pcd_in_FCGF)
o3d.visualization.draw_geometries([pcd_in_FCGF])
sys.exit()
'''
## TODO: Visualize input point cloud FCGF features - Query
feat_extractor = FeatureExtractor(model_path)
target_query_folder_path = os.path.dirname(os.path.abspath(file_path_query_pcd))
file_path_query_feat_i = feat_extractor.extract(file_path_query_pcd, target_query_folder_path)
data_i = np.load(file_path_query_feat_i)
coord_i, points_i, feat_i = data_i['xyz'], data_i['points'], data_i['feature']
pcd_i = o3d.io.read_point_cloud(file_path_query_pcd)
pcd_match = points_2_pointcloud(coord_i)
voxel_size = 0.025
pcd_in_FCGF = get_colored_point_cloud_feature(pcd_match, feat_i, voxel_size)
o3d.visualization.draw_geometries([pcd_in_FCGF])
sys.exit()
#
'''
if os.path.isfile(file_path_pcd_i)==0 or os.path.isfile(file_path_feat_i)==0:
print('ERROR - Missing Files - Check Files ')
print('Point cloud file = ', file_path_pcd_i)
print('Feature file = ', file_path_feat_i)
sys.exit()
## Start timer
if not isVisualizationON:
start_time = time.time()
###
# "Search_3D" Class Instance Generation
###
s3d = Search3D(file_path_pcd_i, file_path_query_pcd, file_path_feat_i, isVisualizationON, input_type)
###
# Visual Dictionary Generation
###
print('Computing Visual Dictionary')
s3d.computeVisualDictionary()
###
# VLAD Decriptor Extractor
###
print('Computing VLAD Descriptors')
#s3d.extractBoxes_VLADdesc()
#s3d.extractBoxes_VLADdesc_given_BB()
s3d.extractBoxes_VLADdesc_given_BB_multhread()
###
# IndexballTree Generation
###
print('Generating of IndexBallTree')
s3d.computeIndexBallTree()
###
# Query Search
###
print('Search Box Query in Point Cloud')
boxId = 0
#s3d.query(boxId, k_NN)
s3d.query_given_BB(boxId, k_NN, feat_extractor)
## end timer
if not isVisualizationON:
execution_time = time.time() - start_time
print('execution time = %.2f seconds' % execution_time)
if __name__ == "__main__":
# execute only if run as a script
parser = argparse.ArgumentParser(description='Search 3D Application')
# Dataset setting
parser.add_argument("-p", "--path_pointcloud", required = True,
help = "Path of the point cloud file")
parser.add_argument("-f", "--path_feature", required = True,
help = "Path of the FCGF feature file associated the input point cloud")
parser.add_argument("-k", "--k_nearest_neighbor", default=3, type=int, required=True,
help="k nearest neighbor matches are computed in the program")
parser.add_argument("-v", "--visualization", default=1, type=int, required=True,
help="Visualization Flag")
args = parser.parse_args()
print('Input Arguments:')
for arg in vars(args):
print("\t {} -> {}".format(arg, getattr(args, arg)))
print('Search_3D is running')
main(parser.parse_args())
## TODO: add args. | en | 0.525093 | ## Visualization is taken from "https://github.com/chrischoy/FCGF ## VLAD library is from "https://github.com/jorjasso/VLAD" #colors = [[0.5, 0.5, 0.5] for i in range(len(pcd.points))] #pcd.colors = o3d.utility.Vector3dVector(colors) #ctr = vis.get_view_control() #print("Field of view (before changing) %.2f" % ctr.get_field_of_view()) #ctr.change_field_of_view(step=fov_step) #print("Field of view (after changing) %.2f" % ctr.get_field_of_view()) ## TODO: ## 1-> json cmaera parameters change H,W ## 2-> add screen capture feature #vis.get_render_option().load_from_json("./renderoption.json") ##colors = [[1, 0, 0] for i in range(len(lines))] # Path to point cloud file # Path to feature file #2 #16 # no. of visual words used for VisualDictionary Generation #100 #300 #30 #100 # leafsize for "indexBallTree" # number of retrieved box #0.5 ## For each box in the point cloud, VLAD descriptors are computed. #for ind_p in list(range(0, 10000,self.sample_step_size)): ## Create mesh_box - experiment ## Box width, this value is computed considering the calibration of datasets in '3DMatch' repository ## Creation of a box ## Locate center of box to the origin ## Locate center of box to the point location ## We store the all boxes in a list named "self.meshBox" ## Sampling Points in the Box: ## Container for coordinates of points in the Box ## Container for FCGF features of points in the Box ## Calling VLAD descriptor extractor ## Previously computed "self.visualDictionary" is used here ## VLAD function is from VLAD library (https://github.com/jorjasso/VLAD) ## # With given bounding box, search boxes are being modified ## ## DEBUG # 0.5 ## DEBUG ## For each box in the point cloud, VLAD descriptors are computed. ##for ind_p in list(range(0, self.coord_i.shape[0],self.sample_step_size)): #for ind_p in list(range(0, 10000,self.sample_step_size)): ## Create mesh_box - experiment ## Box width, this value is computed considering the calibration of datasets in '3DMatch' repository #box_w = 0.2 ## DEBUG ## Creation of a box ## Locate center of box to the origin ## DEBUG ## Creation of a box mesh_box = o3d.geometry.TriangleMesh.create_box(width=box_w, height=box_w, depth=box_w) mesh_box.paint_uniform_color([0.9, 0.1, 0.1]) ## Locate center of box to the origin mat_trans = np.eye((4)) mat_trans[0, 3] = -box_w/2 mat_trans[1, 3] = -box_w/2 mat_trans[2, 3] = -box_w/2 mesh_box.transform(mat_trans) ## Locate center of box to the point location ## We store the all boxes in a list named "self.meshBox" ## Sampling Points in the Box: ## Container for coordinates of points in the Box ## Container for FCGF features of points in the Box ## Calling VLAD descriptor extractor ## Previously computed "self.visualDictionary" is used here ## VLAD function is from VLAD library (https://github.com/jorjasso/VLAD) #self.idBox.append(ind_p) #self.descriptorFCGF.append(box_p_feat) #self.pointCoords.append(box_p) #self.No_box = len(self.idBox) ## # With multi thread ## # 0.5 ## Find FCGF features wihin the query bounding box ## Container for FCGF features of points in the query Box #0.5 #0.2 * (med_dist_feat + min_dist_feat) #for ind_p in tqdm(range(0, self.coord_i.shape[0], self.sample_step_size)): ## TODO: Outlier rejection ## Description: We want to compute vlad descriptors for only the 3D points of similar FCG features ## DEBUG ## DEBUG ## Create mesh_box - experiment ## Creation of a box ## Locate center of box to the origin ## Locate center of box to the point location ## We store the all boxes in a list named "self.meshBox" ## Sampling Points in the Box: ## Container for coordinates of points in the Box ## Container for FCGF features of points in the Box ## Calling VLAD descriptor extractor ## Previously computed "self.visualDictionary" is used here ## VLAD function is from VLAD library (https://github.com/jorjasso/VLAD) # self.idBox.append(ind_p) # self.descriptorFCGF.append(box_p_feat) # self.pointCoords.append(box_p) # self.No_box = len(self.idBox) ## # Inputs: # boxId: Index of the Query Box # k_NN: k Nearest Neighbor ## ## Initialization - Computation of Colored Point Cloud Based on FCGF Features ## Duplication of "pcd_i"point cloud ## We show matched boxes on this point cloud #pcd_match = points_2_pointcloud(self.pcd_i.points) ## DEBUG read mesh instead of point cloud #o3d.visualization.draw_geometries([pcd_match]) ## DEBUG ## Translate pcd match to the right for visualization #3.0 # 4.0 ## We used point cloud coloring based on FCGF features ## This coloring is also used in FCGF paper #spheres_i = get_colored_point_cloud_feature(self.pcd_i, self.feat_i, self.voxel_size) #spheres_match_i = get_colored_point_cloud_feature(pcd_match, self.feat_i, self.voxel_size) ## TODO: interactive box searching ## How many boxes we have. ## Fetching the feature vector of the box, which is previously computed # find the k most relevant images # using previously generated "balltree" ## Initialization of Visuzation - Empty open3D Scene # Draw the box - Query ## Matched box is colored in black ## Iteration through neaarest neighor matches ## and draw each box on the point cloud ## Draw the box - Match ## Matched box is colored in blue ## Initialization - Computation of Colored Point Cloud Based on FCGF Features ## Duplication of "pcd_i"point cloud ## We show matched boxes on this point cloud #pcd_match = points_2_pointcloud(self.pcd_i.points) ## DEBUG read mesh instead of point cloud #o3d.visualization.draw_geometries([pcd_match]) ## Distance between point clouds ## Distance between point clouds ## DEBUG ## Translate pcd match to the right for visualization #3.5 #3.0 # 4.0 ## We used point cloud coloring based on FCGF features ## This coloring is also used in FCGF paper #spheres_i = get_colored_point_cloud_feature(self.pcd_i, self.feat_i, self.voxel_size) #spheres_match_i = get_colored_point_cloud_feature(pcd_match, self.feat_i, self.voxel_size) ## TODO: interactive box searching ## How many boxes we have. ## TODO: extract FCGF feature for query point cloud here #target_folder_path = os.path.dirname(os.path.abspath(self.path_query_pcd)) #file_path_query_feat_i = feat_extractor.extract(self.path_query_pcd, target_folder_path) #data_query_i = np.load(file_path_query_feat_i) #query_coord_i, query_points_i, query_feat_i = data_query_i['xyz'], data_query_i['points'], data_query_i['feature'] #query_pcd_i = points_2_pointcloud(query_coord_i) ## DEBUG ## Container for FCGF features of points in the query Box ## DEBUG # ## Fetching the feature vector of the box, which is previously computed #self.descriptorFCGF[boxId] #queryBox_descriptor_FGCF = query_feat_i ## DEBUG - kretrieve # find the k most relevant images # using previously generated "balltree" ## Initialization of Visuzation - Empty open3D Scene # Draw the box - Query ## Iteration through neaarest neighor matches ## and draw each box on the point cloud # Init ## Draw the box - Match ## Matched box is colored in blue ## TODO: Compare matched mesh boxes wrt Intersection over Union (IoU) #IoU = mesh_box_stack[-1].is_intersecting(mesh_box_vertices_match) ## Matched box is colored in blue #visual_list_tmp = visual_list.copy() #visual_list_tmp.append(lines_set_match_box) #visualize_point_cloud(visual_list_tmp) #print('len(visual_list) = ', len(visual_list)) #print('self.k_retrieve = ', self.k_retrieve) #print('k_NN = ', k_NN) ## DEBUG - kretrieve #selected_boxId = input('Select boxId for another query search between 0 and {} \n'.format(self.No_box)) #boxId = int(selected_boxId) ## Select A Bounding Box Again # pcd_in = o3d.io.read_triangle_mesh(file_path_pcd_i) # pcd_in.compute_vertex_normals() # o3d.visualization.draw_geometries([pcd_in]) ## DEBUG ## DEBUG ## Extract Vlad Descriptors given new =ly selected BB # user picks points #pcd = o3d.io.read_point_cloud("../../TestData/ICP/cloud_bin_0.pcd") ## Reading the arguments #PATH_PCD = args["path_pointcloud"] #PATH_PCD = "/home/akin/workspace/All_Data/Indoor_Lidar_RGBD_Scan_Dataset/Apartment/Reconstruction/ours_apartment/apartment.ply" #PATH_PCD = "/home/akin/workspace/workspace_applications/Deep_3D_Search/FCGF_submit/Search_3D/query_pcd/cropped_7_frag.ply" #PATH_QUERY_PCD = "/home/akin/workspace/workspace_applications/Deep_3D_Search/FCGF_submit/Search_3D/query_pcd/cropped_7.ply" #PATH_PCD = "/home/akin/workspace/All_Data/Tanks_and_Templates/Caterpillar/GT/Caterpillar.ply" ## User dialog for input file ## #'mesh' #False ## TODO: EXP: Bounding box of the 3D geometry pcd_in = o3d.io.read_triangle_mesh(file_path_query_pcd) pcd_in.compute_vertex_normals() dummy_box = pcd_in.get_axis_aligned_bounding_box() #o3d.visualization.draw_geometries([dummy_box, pcd_in]) ## DEBUG center_bb = dummy_box.get_center() box_w = 0.9 box_scale = 1.2 #0.5 box_w_max = dummy_box.max_bound box_w_min = dummy_box.min_bound box_w_x = box_scale*abs(box_w_max[0] - box_w_min[0]) box_w_y = box_scale*abs(box_w_max[1] - box_w_min[1]) box_w_z = box_scale*abs(box_w_max[2] - box_w_min[2]) ## Creation of a box mesh_box = o3d.geometry.TriangleMesh.create_box(width=box_w_x, height=box_w_y, depth=box_w_z) mesh_box.paint_uniform_color([0.9, 0.1, 0.1]) ## Locate center of box to the origin mat_trans = np.eye((4)) mat_trans[0, 3] = -box_w_x / 2 mat_trans[1, 3] = -box_w_y / 2 mat_trans[2, 3] = -box_w_z / 2 mesh_box.transform(mat_trans) ## Locate center of box to the point location mat_trans = np.eye((4)) mat_trans[0, 3] = center_bb[0] mat_trans[1, 3] = center_bb[1] mat_trans[2, 3] = center_bb[2] mesh_box.transform(mat_trans) line_set_dummy = convertMeshBox2LineBox(mesh_box, [1,0,0]) o3d.visualization.draw_geometries([dummy_box, pcd_in, line_set_dummy]) ## DEBUG ## ## TODO: Add volume selection tool for the user here #pcd_in = o3d.io.read_triangle_mesh(file_path_pcd_i) #pcd_in.compute_vertex_normals() #o3d.visualization.draw_geometries([pcd_in]) #sys.exit() ## ## TODO: Add feature extraction module here ## ## TODO: Visualize input point cloud FCGF features #0.025 ## TODO: Visualize input point cloud FCGF features - Query feat_extractor = FeatureExtractor(model_path) target_query_folder_path = os.path.dirname(os.path.abspath(file_path_query_pcd)) file_path_query_feat_i = feat_extractor.extract(file_path_query_pcd, target_query_folder_path) data_i = np.load(file_path_query_feat_i) coord_i, points_i, feat_i = data_i['xyz'], data_i['points'], data_i['feature'] pcd_i = o3d.io.read_point_cloud(file_path_query_pcd) pcd_match = points_2_pointcloud(coord_i) voxel_size = 0.025 pcd_in_FCGF = get_colored_point_cloud_feature(pcd_match, feat_i, voxel_size) o3d.visualization.draw_geometries([pcd_in_FCGF]) sys.exit() # ## Start timer ### # "Search_3D" Class Instance Generation ### ### # Visual Dictionary Generation ### ### # VLAD Decriptor Extractor ### #s3d.extractBoxes_VLADdesc() #s3d.extractBoxes_VLADdesc_given_BB() ### # IndexballTree Generation ### ### # Query Search ### #s3d.query(boxId, k_NN) ## end timer # execute only if run as a script # Dataset setting ## TODO: add args. | 2.317513 | 2 |
users/urls.py | HamidRezaSaad/ToDo | 0 | 6619492 | <filename>users/urls.py<gh_stars>0
from django.urls import path
from . import views
app_name = "users"
urlpatterns = [
path("signup/", views.signup, name="signup"),
path("login/", views.login_request, name="login"),
path("logout/", views.logout_request, name="logout"),
]
| <filename>users/urls.py<gh_stars>0
from django.urls import path
from . import views
app_name = "users"
urlpatterns = [
path("signup/", views.signup, name="signup"),
path("login/", views.login_request, name="login"),
path("logout/", views.logout_request, name="logout"),
]
| none | 1 | 1.954846 | 2 | |
yolox/models/yolo_pafpn.py | jie311/yolox_keypoint_segment | 16 | 6619493 | <reponame>jie311/yolox_keypoint_segment<filename>yolox/models/yolo_pafpn.py
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import torch
import torch.nn as nn
from .darknet import CSPDarknet
from .coatnet import coatnet_0, coatnet_2
from .network_blocks import BaseConv, CSPLayer, DWConv
class YOLOPAFPN(nn.Module):
"""
YOLOv3 model. Darknet 53 is the default backbone of this model.
"""
def __init__(
self,
img_channel=3,
depth=1.0,
width=1.0,
in_features=("dark3", "dark4", "dark5"),
in_channels=[256, 512, 1024],
depthwise=False,
act="silu",
backbone_name='CSPDarknet',
input_size=(320, 320)
):
super().__init__()
if backbone_name == 'CoAtNet':
self.backbone = coatnet_2(img_shape=input_size, img_channel=img_channel, dep_mul=depth,
wid_mul=width, out_features=in_features)
else:
self.backbone = CSPDarknet(img_channel, depth, width, depthwise=depthwise,
act=act, out_features=in_features)
self.in_features = in_features
self.in_channels = in_channels
Conv = DWConv if depthwise else BaseConv
self.upsample = nn.Upsample(scale_factor=2, mode="nearest")
self.lateral_conv0 = BaseConv(
int(in_channels[-1] * width), int(in_channels[-2] * width), 1, 1, act=act
)
self.C3_p4 = CSPLayer(
int(2 * in_channels[-2] * width),
int(in_channels[-2] * width),
round(3 * depth),
False,
depthwise=depthwise,
act=act,
) # cat
self.reduce_conv1 = BaseConv(
int(in_channels[-2] * width), int(in_channels[-3] * width), 1, 1, act=act
)
self.C3_p3 = CSPLayer(
int(2 * in_channels[-3] * width),
int(in_channels[-3] * width),
round(3 * depth),
False,
depthwise=depthwise,
act=act,
)
# bottom-up conv
self.bu_conv2 = Conv(
int(in_channels[-3] * width), int(in_channels[-3] * width), 3, 2, act=act
)
self.C3_n3 = CSPLayer(
int(2 * in_channels[-3] * width),
int(in_channels[-2] * width),
round(3 * depth),
False,
depthwise=depthwise,
act=act,
)
# bottom-up conv
self.bu_conv1 = Conv(
int(in_channels[-2] * width), int(in_channels[-2] * width), 3, 2, act=act
)
self.C3_n4 = CSPLayer(
int(2 * in_channels[-2] * width),
int(in_channels[-1] * width),
round(3 * depth),
False,
depthwise=depthwise,
act=act,
)
if len(self.in_channels) == 4:
self.reduce_conv2 = BaseConv(
int(in_channels[-3] * width), int(in_channels[-4] * width), 1, 1, act=act
)
self.C3_p2 = CSPLayer(
int(2 * in_channels[-4] * width),
int(in_channels[-4] * width),
round(3 * depth),
False,
depthwise=depthwise,
act=act,
)
self.bu_conv3 = Conv(
int(in_channels[-4] * width), int(in_channels[-4] * width), 3, 2, act=act
)
self.C3_n2 = CSPLayer(
int(2 * in_channels[-4] * width),
int(in_channels[-3] * width),
round(3 * depth),
False,
depthwise=depthwise,
act=act,
)
def forward(self, input):
"""
Args:
inputs: input images.
Returns:
Tuple[Tensor]: FPN feature.
"""
# backbone
out_features = self.backbone(input)
features = [out_features[f] for f in self.in_features]
if len(features) == 3:
[x2, x1, x0] = features # 尺寸从大到小
fpn_out0 = self.lateral_conv0(x0) # in:512,10,10 out:v,10,10
f_out0 = self.upsample(fpn_out0) # in:256,10,10 out:256,20,20
f_out0 = torch.cat([f_out0, x1], 1) # in:256,20,20 out:512,20,20
f_out0 = self.C3_p4(f_out0) # in:512,20,20 out:256,20,20
fpn_out1 = self.reduce_conv1(f_out0) # in:256,20,20 out:128,20,20
f_out1 = self.upsample(fpn_out1) # in:128,20,20 out:128,40,40
f_out1 = torch.cat([f_out1, x2], 1) # in::128,40,40 out:256,40,40
pan_out2 = self.C3_p3(f_out1) # in:256,40,40 out:128,40,40
p_out1 = self.bu_conv2(pan_out2) # in:128,40,40 out:128,20,20
p_out1 = torch.cat([p_out1, fpn_out1], 1) # int:128,20,20 out:256,20,20
pan_out1 = self.C3_n3(p_out1) # in:256,20,20 out:256,20,20
p_out0 = self.bu_conv1(pan_out1) # in:256,20,20 out:256,10,10
p_out0 = torch.cat([p_out0, fpn_out0], 1) # in:256,10,10 out:512,10,10
pan_out0 = self.C3_n4(p_out0) # in:512,10,10 out:512,10,10
outputs = (pan_out2, pan_out1, pan_out0)
else:
[x3, x2, x1, x0] = features # 尺寸从大到小
fpn_out0 = self.lateral_conv0(x0) # in:512,10,10 out:v,10,10
f_out0 = self.upsample(fpn_out0) # in:256,10,10 out:256,20,20
f_out0 = torch.cat([f_out0, x1], 1) # in:256,20,20 out:512,20,20
f_out0 = self.C3_p4(f_out0) # in:512,20,20 out:256,20,20
fpn_out1 = self.reduce_conv1(f_out0) # in:256,20,20 out:128,20,20
f_out1 = self.upsample(fpn_out1) # in:128,20,20 out:128,40,40
f_out1 = torch.cat([f_out1, x2], 1) # in::128,40,40 out:256,40,40
f_out1 = self.C3_p3(f_out1) # in:256,40,40 out:128,40,40
fpn_out2 = self.reduce_conv2(f_out1) # in:128,40,40 out:64,40,40
f_out2 = self.upsample(fpn_out2) # in:64,40,40 out:64,80,80
f_out2 = torch.cat([f_out2, x3], 1) # in::64,80,80 out:128,80,80
pan_out3 = self.C3_p2(f_out2) # in:128,80,80 out:64,80,80
p_out2 = self.bu_conv3(pan_out3) # in:64,80,80 out:64,40,40
p_out2 = torch.cat([p_out2, fpn_out2], 1) # int:64,40,40 out:128,40,40
pan_out2 = self.C3_n2(p_out2) # in:128,40,40 out:128,40,40
p_out1 = self.bu_conv2(pan_out2) # in:128,40,40 out:128,20,20
p_out1 = torch.cat([p_out1, fpn_out1], 1) # int:128,20,20 out:256,20,20
pan_out1 = self.C3_n3(p_out1) # in:256,20,20 out:256,20,20
p_out0 = self.bu_conv1(pan_out1) # in:256,20,20 out:256,10,10
p_out0 = torch.cat([p_out0, fpn_out0], 1) # in:256,10,10 out:512,10,10
pan_out0 = self.C3_n4(p_out0) # in:512,10,10 out:512,10,10
outputs = (pan_out3, pan_out2, pan_out1, pan_out0)
return outputs
| #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import torch
import torch.nn as nn
from .darknet import CSPDarknet
from .coatnet import coatnet_0, coatnet_2
from .network_blocks import BaseConv, CSPLayer, DWConv
class YOLOPAFPN(nn.Module):
"""
YOLOv3 model. Darknet 53 is the default backbone of this model.
"""
def __init__(
self,
img_channel=3,
depth=1.0,
width=1.0,
in_features=("dark3", "dark4", "dark5"),
in_channels=[256, 512, 1024],
depthwise=False,
act="silu",
backbone_name='CSPDarknet',
input_size=(320, 320)
):
super().__init__()
if backbone_name == 'CoAtNet':
self.backbone = coatnet_2(img_shape=input_size, img_channel=img_channel, dep_mul=depth,
wid_mul=width, out_features=in_features)
else:
self.backbone = CSPDarknet(img_channel, depth, width, depthwise=depthwise,
act=act, out_features=in_features)
self.in_features = in_features
self.in_channels = in_channels
Conv = DWConv if depthwise else BaseConv
self.upsample = nn.Upsample(scale_factor=2, mode="nearest")
self.lateral_conv0 = BaseConv(
int(in_channels[-1] * width), int(in_channels[-2] * width), 1, 1, act=act
)
self.C3_p4 = CSPLayer(
int(2 * in_channels[-2] * width),
int(in_channels[-2] * width),
round(3 * depth),
False,
depthwise=depthwise,
act=act,
) # cat
self.reduce_conv1 = BaseConv(
int(in_channels[-2] * width), int(in_channels[-3] * width), 1, 1, act=act
)
self.C3_p3 = CSPLayer(
int(2 * in_channels[-3] * width),
int(in_channels[-3] * width),
round(3 * depth),
False,
depthwise=depthwise,
act=act,
)
# bottom-up conv
self.bu_conv2 = Conv(
int(in_channels[-3] * width), int(in_channels[-3] * width), 3, 2, act=act
)
self.C3_n3 = CSPLayer(
int(2 * in_channels[-3] * width),
int(in_channels[-2] * width),
round(3 * depth),
False,
depthwise=depthwise,
act=act,
)
# bottom-up conv
self.bu_conv1 = Conv(
int(in_channels[-2] * width), int(in_channels[-2] * width), 3, 2, act=act
)
self.C3_n4 = CSPLayer(
int(2 * in_channels[-2] * width),
int(in_channels[-1] * width),
round(3 * depth),
False,
depthwise=depthwise,
act=act,
)
if len(self.in_channels) == 4:
self.reduce_conv2 = BaseConv(
int(in_channels[-3] * width), int(in_channels[-4] * width), 1, 1, act=act
)
self.C3_p2 = CSPLayer(
int(2 * in_channels[-4] * width),
int(in_channels[-4] * width),
round(3 * depth),
False,
depthwise=depthwise,
act=act,
)
self.bu_conv3 = Conv(
int(in_channels[-4] * width), int(in_channels[-4] * width), 3, 2, act=act
)
self.C3_n2 = CSPLayer(
int(2 * in_channels[-4] * width),
int(in_channels[-3] * width),
round(3 * depth),
False,
depthwise=depthwise,
act=act,
)
def forward(self, input):
"""
Args:
inputs: input images.
Returns:
Tuple[Tensor]: FPN feature.
"""
# backbone
out_features = self.backbone(input)
features = [out_features[f] for f in self.in_features]
if len(features) == 3:
[x2, x1, x0] = features # 尺寸从大到小
fpn_out0 = self.lateral_conv0(x0) # in:512,10,10 out:v,10,10
f_out0 = self.upsample(fpn_out0) # in:256,10,10 out:256,20,20
f_out0 = torch.cat([f_out0, x1], 1) # in:256,20,20 out:512,20,20
f_out0 = self.C3_p4(f_out0) # in:512,20,20 out:256,20,20
fpn_out1 = self.reduce_conv1(f_out0) # in:256,20,20 out:128,20,20
f_out1 = self.upsample(fpn_out1) # in:128,20,20 out:128,40,40
f_out1 = torch.cat([f_out1, x2], 1) # in::128,40,40 out:256,40,40
pan_out2 = self.C3_p3(f_out1) # in:256,40,40 out:128,40,40
p_out1 = self.bu_conv2(pan_out2) # in:128,40,40 out:128,20,20
p_out1 = torch.cat([p_out1, fpn_out1], 1) # int:128,20,20 out:256,20,20
pan_out1 = self.C3_n3(p_out1) # in:256,20,20 out:256,20,20
p_out0 = self.bu_conv1(pan_out1) # in:256,20,20 out:256,10,10
p_out0 = torch.cat([p_out0, fpn_out0], 1) # in:256,10,10 out:512,10,10
pan_out0 = self.C3_n4(p_out0) # in:512,10,10 out:512,10,10
outputs = (pan_out2, pan_out1, pan_out0)
else:
[x3, x2, x1, x0] = features # 尺寸从大到小
fpn_out0 = self.lateral_conv0(x0) # in:512,10,10 out:v,10,10
f_out0 = self.upsample(fpn_out0) # in:256,10,10 out:256,20,20
f_out0 = torch.cat([f_out0, x1], 1) # in:256,20,20 out:512,20,20
f_out0 = self.C3_p4(f_out0) # in:512,20,20 out:256,20,20
fpn_out1 = self.reduce_conv1(f_out0) # in:256,20,20 out:128,20,20
f_out1 = self.upsample(fpn_out1) # in:128,20,20 out:128,40,40
f_out1 = torch.cat([f_out1, x2], 1) # in::128,40,40 out:256,40,40
f_out1 = self.C3_p3(f_out1) # in:256,40,40 out:128,40,40
fpn_out2 = self.reduce_conv2(f_out1) # in:128,40,40 out:64,40,40
f_out2 = self.upsample(fpn_out2) # in:64,40,40 out:64,80,80
f_out2 = torch.cat([f_out2, x3], 1) # in::64,80,80 out:128,80,80
pan_out3 = self.C3_p2(f_out2) # in:128,80,80 out:64,80,80
p_out2 = self.bu_conv3(pan_out3) # in:64,80,80 out:64,40,40
p_out2 = torch.cat([p_out2, fpn_out2], 1) # int:64,40,40 out:128,40,40
pan_out2 = self.C3_n2(p_out2) # in:128,40,40 out:128,40,40
p_out1 = self.bu_conv2(pan_out2) # in:128,40,40 out:128,20,20
p_out1 = torch.cat([p_out1, fpn_out1], 1) # int:128,20,20 out:256,20,20
pan_out1 = self.C3_n3(p_out1) # in:256,20,20 out:256,20,20
p_out0 = self.bu_conv1(pan_out1) # in:256,20,20 out:256,10,10
p_out0 = torch.cat([p_out0, fpn_out0], 1) # in:256,10,10 out:512,10,10
pan_out0 = self.C3_n4(p_out0) # in:512,10,10 out:512,10,10
outputs = (pan_out3, pan_out2, pan_out1, pan_out0)
return outputs | en | 0.367084 | #!/usr/bin/env python # -*- encoding: utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. YOLOv3 model. Darknet 53 is the default backbone of this model. # cat # bottom-up conv # bottom-up conv Args: inputs: input images. Returns: Tuple[Tensor]: FPN feature. # backbone # 尺寸从大到小 # in:512,10,10 out:v,10,10 # in:256,10,10 out:256,20,20 # in:256,20,20 out:512,20,20 # in:512,20,20 out:256,20,20 # in:256,20,20 out:128,20,20 # in:128,20,20 out:128,40,40 # in::128,40,40 out:256,40,40 # in:256,40,40 out:128,40,40 # in:128,40,40 out:128,20,20 # int:128,20,20 out:256,20,20 # in:256,20,20 out:256,20,20 # in:256,20,20 out:256,10,10 # in:256,10,10 out:512,10,10 # in:512,10,10 out:512,10,10 # 尺寸从大到小 # in:512,10,10 out:v,10,10 # in:256,10,10 out:256,20,20 # in:256,20,20 out:512,20,20 # in:512,20,20 out:256,20,20 # in:256,20,20 out:128,20,20 # in:128,20,20 out:128,40,40 # in::128,40,40 out:256,40,40 # in:256,40,40 out:128,40,40 # in:128,40,40 out:64,40,40 # in:64,40,40 out:64,80,80 # in::64,80,80 out:128,80,80 # in:128,80,80 out:64,80,80 # in:64,80,80 out:64,40,40 # int:64,40,40 out:128,40,40 # in:128,40,40 out:128,40,40 # in:128,40,40 out:128,20,20 # int:128,20,20 out:256,20,20 # in:256,20,20 out:256,20,20 # in:256,20,20 out:256,10,10 # in:256,10,10 out:512,10,10 # in:512,10,10 out:512,10,10 | 1.924374 | 2 |
Python_OS_Services/platform_module2.py | xanthium-enterprises/Python3-Tutorial | 0 | 6619494 | # Platform module for querying info about your system.
# www.xanthium.in
import platform
print('Generic platform services using platform module')
print('\nOS/Machine info\n')
#Cross platform
print('\n----------------------------------------------------')
print('\nCross platform OS ')
print('\n----------------------------------------------------')
print('Machine Type -> ' + platform.machine()) #returns the machine type Eg i386 or AMD64
print('Processor Name -> ' + platform.processor()) #returns the real processor name
print('OS name -> ' + platform.system()) #returns the OS name Eg Windows,Linux,Java,Darwin
print('OS Release No -> ' + platform.release()) #returns the OS release number
print('\nOS full name -> ' + platform.system() +'-' +platform.release())
print('\nNetwork Name -> ' + platform.node()) #returns the computers network name
#Python
print('\nPython Environment ')
print('\n----------------------------------------------------')
print('\nPython Implementation -> ' + platform.python_implementation()) # which type of Python implementation.Eg‘CPython’, ‘IronPython’, ‘Jython’, ‘PyPy’.print(platform.python_version())
print('Python version -> ' + platform.python_version()) #version of the installed python
#Returns a tuple (buildno, builddate) stating the Python build number and date as strings
BuildNumberTuple = platform.python_build()
print('Python build Number -> ' + BuildNumberTuple[0])
print('Python build Date -> ' + BuildNumberTuple[1])
print('Compiler used to build -> ' + platform.python_compiler()) #Returns a string identifying the compiler used for compiling Python
#windows specific
print('\n----------------------------------------------------')
print('\nWindows Specific')
print('\n----------------------------------------------------')
Win32_Version_Tuple = platform.win32_ver()
print('Windows OS Release -> ' + Win32_Version_Tuple[0])
print('Windows OS Version -> ' + Win32_Version_Tuple[1])
print('CSD level (service pack) -> ' + Win32_Version_Tuple[2])
print('Windows OS type -> ' + Win32_Version_Tuple[3])
print('Windows Current Edition -> ' + platform.win32_edition()) #string representing the current Windows edition
#CoreSingleLanguage -only 1 language ,other language packs need to be installed
print('Is Platform Windows IOT -> ' + str(platform.win32_is_iot())) # is platform a windows iot,return bool
| # Platform module for querying info about your system.
# www.xanthium.in
import platform
print('Generic platform services using platform module')
print('\nOS/Machine info\n')
#Cross platform
print('\n----------------------------------------------------')
print('\nCross platform OS ')
print('\n----------------------------------------------------')
print('Machine Type -> ' + platform.machine()) #returns the machine type Eg i386 or AMD64
print('Processor Name -> ' + platform.processor()) #returns the real processor name
print('OS name -> ' + platform.system()) #returns the OS name Eg Windows,Linux,Java,Darwin
print('OS Release No -> ' + platform.release()) #returns the OS release number
print('\nOS full name -> ' + platform.system() +'-' +platform.release())
print('\nNetwork Name -> ' + platform.node()) #returns the computers network name
#Python
print('\nPython Environment ')
print('\n----------------------------------------------------')
print('\nPython Implementation -> ' + platform.python_implementation()) # which type of Python implementation.Eg‘CPython’, ‘IronPython’, ‘Jython’, ‘PyPy’.print(platform.python_version())
print('Python version -> ' + platform.python_version()) #version of the installed python
#Returns a tuple (buildno, builddate) stating the Python build number and date as strings
BuildNumberTuple = platform.python_build()
print('Python build Number -> ' + BuildNumberTuple[0])
print('Python build Date -> ' + BuildNumberTuple[1])
print('Compiler used to build -> ' + platform.python_compiler()) #Returns a string identifying the compiler used for compiling Python
#windows specific
print('\n----------------------------------------------------')
print('\nWindows Specific')
print('\n----------------------------------------------------')
Win32_Version_Tuple = platform.win32_ver()
print('Windows OS Release -> ' + Win32_Version_Tuple[0])
print('Windows OS Version -> ' + Win32_Version_Tuple[1])
print('CSD level (service pack) -> ' + Win32_Version_Tuple[2])
print('Windows OS type -> ' + Win32_Version_Tuple[3])
print('Windows Current Edition -> ' + platform.win32_edition()) #string representing the current Windows edition
#CoreSingleLanguage -only 1 language ,other language packs need to be installed
print('Is Platform Windows IOT -> ' + str(platform.win32_is_iot())) # is platform a windows iot,return bool
| en | 0.614619 | # Platform module for querying info about your system. # www.xanthium.in #Cross platform #returns the machine type Eg i386 or AMD64 #returns the real processor name #returns the OS name Eg Windows,Linux,Java,Darwin #returns the OS release number #returns the computers network name #Python # which type of Python implementation.Eg‘CPython’, ‘IronPython’, ‘Jython’, ‘PyPy’.print(platform.python_version()) #version of the installed python #Returns a tuple (buildno, builddate) stating the Python build number and date as strings #Returns a string identifying the compiler used for compiling Python #windows specific #string representing the current Windows edition #CoreSingleLanguage -only 1 language ,other language packs need to be installed # is platform a windows iot,return bool | 2.858227 | 3 |
tuples/convert tuple into Dictionary.py | ZephyrAveryl777/Python-Programs | 6 | 6619495 | '''
You can use dict() method to convert
tuple into dictionary.
'''
test = (('America', 27), ('Canada', 25), ('Japan', 5), ('Italy', 0))
dict1 = dict(i for i in test)
dict2 = dict(map(reversed, test))
dict3 = dict(i[::1] for i in test)
print(f'\nElements of the tuple: {test}')
print(f'\nDictionary using dict_method: {dict1}')
print(f'\nDictionary using dict_map method: {dict2}')
print(f'\nDictionary using dict_item_iteration: {dict3}') | '''
You can use dict() method to convert
tuple into dictionary.
'''
test = (('America', 27), ('Canada', 25), ('Japan', 5), ('Italy', 0))
dict1 = dict(i for i in test)
dict2 = dict(map(reversed, test))
dict3 = dict(i[::1] for i in test)
print(f'\nElements of the tuple: {test}')
print(f'\nDictionary using dict_method: {dict1}')
print(f'\nDictionary using dict_map method: {dict2}')
print(f'\nDictionary using dict_item_iteration: {dict3}') | en | 0.820388 | You can use dict() method to convert
tuple into dictionary. | 4.179072 | 4 |
tests/test_grant_tagger.py | wellcometrust/nutrition-labels | 2 | 6619496 | <filename>tests/test_grant_tagger.py<gh_stars>1-10
import pytest
import pandas as pd
import numpy as np
from nutrition_labels.grant_tagger import GrantTagger
training_data = pd.DataFrame(
[
{
'text_field': 'Genetics grant to help medicine.',
'text_field_2': 'Genes linked to illnesses.',
'Label': 0,
'ID': 4
},
{
'text_field': 'The history of medicine.',
'text_field_2': 'Books about medicine and genes.',
'Label': 0,
'ID': 1
},
{
'text_field': 'Creating software tools to further technology.',
'text_field_2': 'Coding in Python.',
'Label': 1,
'ID': 2
},
{
'text_field': 'Technology tools will be created.',
'text_field_2': 'Python and other languages.',
'Label': 1,
'ID': 0
},
{
'text_field': 'In this grant we hope to create new software',
'text_field_2': 'Tools will be created.',
'Label': 1,
'ID': 3
},
{
'text_field': 'Software will be created.',
'text_field_2': 'Machine learning tools.',
'Label': 1,
'ID': 5
}
]
)
prediction_cols = ['text_field', 'text_field_2']
label_name = 'Label'
train_data_id = 'ID'
def test_fit_transform():
grant_tagger = GrantTagger(
prediction_cols=prediction_cols,
label_name=label_name,
)
X_train = training_data['text_field'].tolist()
y_train = training_data['Label'].tolist()
grant_tagger.fit(X_train, y_train)
X_vect = grant_tagger.transform(pd.DataFrame({'Grant texts': X_train}))
assert X_vect.shape[0] == 6
assert X_vect.shape == grant_tagger.X_train_vect.shape
def test_split_data():
grant_tagger = GrantTagger(
test_size=1/3,
prediction_cols=prediction_cols,
label_name=label_name,
)
train_data, test_data, _ = grant_tagger.split_data(training_data, train_data_id)
(_, y_train, train_ids) = train_data
(_, y_test, _) = test_data
assert train_ids == [2, 0, 5, 4]
assert len(y_train) == 4
assert len(y_test) == 2
def test_split_relevant_sample_ratio():
grant_tagger = GrantTagger(
relevant_sample_ratio=0.25,
prediction_cols=prediction_cols,
label_name=label_name,
)
train_data, test_data, _ = grant_tagger.split_data(training_data, train_data_id)
(_, y_train, _) = train_data
(_, y_test, _) = test_data
all_y = y_train + y_test
assert len(all_y) == 5
assert len([y for y in all_y if y==0]) == 1
grant_tagger = GrantTagger(
relevant_sample_ratio=0.5,
prediction_cols=prediction_cols,
label_name=label_name,
)
training_data_cp = training_data.copy()
training_data_cp['Label'] = [0, 0, 0, 0, 1, 1]
train_data, test_data, _ = grant_tagger.split_data(training_data_cp, train_data_id)
(_, y_train, _) = train_data
(_, y_test, _) = test_data
assert len(y_train + y_test) == 3
grant_tagger = GrantTagger(
relevant_sample_ratio=1,
prediction_cols=prediction_cols,
label_name=label_name,
)
train_data, test_data, _ = grant_tagger.split_data(training_data_cp, train_data_id)
(_, y_train, _) = train_data
(_, y_test, _) = test_data
assert len(y_train + y_test) == 4
grant_tagger = GrantTagger(
relevant_sample_ratio=2,
prediction_cols=prediction_cols,
label_name=label_name,
)
train_data, test_data, _ = grant_tagger.split_data(training_data_cp, train_data_id)
(_, y_train, _) = train_data
(_, y_test, _) = test_data
assert len(y_train + y_test) == 6
def test_train_test_info():
grant_tagger = GrantTagger(
relevant_sample_ratio=1,
prediction_cols=prediction_cols,
label_name=label_name,
)
train_data, test_data, unseen_data = grant_tagger.split_data(training_data, train_data_id)
(X_train, y_train, train_ids) = train_data
grant_tagger.fit(X_train, y_train)
grant_info = grant_tagger.train_test_info(train_ids, y_train, test_data, unseen_data)
training_data_truth_dict = dict(zip(training_data.ID, training_data.Label))
output_truth_dict = {k:v['Truth'] for k, v in grant_info.items()}
assert output_truth_dict == training_data_truth_dict
def test_apply_threshold():
y_predict = [0, 0, 0, 1, 1, 1]
pred_probs = np.array(
[
[0.8, 0.2],
[0.7, 0.3],
[0.6, 0.4],
[0.2, 0.8],
[0.3, 0.7],
[0.4, 0.6],
]
)
grant_tagger = GrantTagger(
threshold=0.7
)
y_predict_thresh = grant_tagger.apply_threshold(y_predict, pred_probs)
assert all([y1==y2 for y1, y2 in zip([0, 0, 0, 1, 1, 0], y_predict_thresh)])
| <filename>tests/test_grant_tagger.py<gh_stars>1-10
import pytest
import pandas as pd
import numpy as np
from nutrition_labels.grant_tagger import GrantTagger
training_data = pd.DataFrame(
[
{
'text_field': 'Genetics grant to help medicine.',
'text_field_2': 'Genes linked to illnesses.',
'Label': 0,
'ID': 4
},
{
'text_field': 'The history of medicine.',
'text_field_2': 'Books about medicine and genes.',
'Label': 0,
'ID': 1
},
{
'text_field': 'Creating software tools to further technology.',
'text_field_2': 'Coding in Python.',
'Label': 1,
'ID': 2
},
{
'text_field': 'Technology tools will be created.',
'text_field_2': 'Python and other languages.',
'Label': 1,
'ID': 0
},
{
'text_field': 'In this grant we hope to create new software',
'text_field_2': 'Tools will be created.',
'Label': 1,
'ID': 3
},
{
'text_field': 'Software will be created.',
'text_field_2': 'Machine learning tools.',
'Label': 1,
'ID': 5
}
]
)
prediction_cols = ['text_field', 'text_field_2']
label_name = 'Label'
train_data_id = 'ID'
def test_fit_transform():
grant_tagger = GrantTagger(
prediction_cols=prediction_cols,
label_name=label_name,
)
X_train = training_data['text_field'].tolist()
y_train = training_data['Label'].tolist()
grant_tagger.fit(X_train, y_train)
X_vect = grant_tagger.transform(pd.DataFrame({'Grant texts': X_train}))
assert X_vect.shape[0] == 6
assert X_vect.shape == grant_tagger.X_train_vect.shape
def test_split_data():
grant_tagger = GrantTagger(
test_size=1/3,
prediction_cols=prediction_cols,
label_name=label_name,
)
train_data, test_data, _ = grant_tagger.split_data(training_data, train_data_id)
(_, y_train, train_ids) = train_data
(_, y_test, _) = test_data
assert train_ids == [2, 0, 5, 4]
assert len(y_train) == 4
assert len(y_test) == 2
def test_split_relevant_sample_ratio():
grant_tagger = GrantTagger(
relevant_sample_ratio=0.25,
prediction_cols=prediction_cols,
label_name=label_name,
)
train_data, test_data, _ = grant_tagger.split_data(training_data, train_data_id)
(_, y_train, _) = train_data
(_, y_test, _) = test_data
all_y = y_train + y_test
assert len(all_y) == 5
assert len([y for y in all_y if y==0]) == 1
grant_tagger = GrantTagger(
relevant_sample_ratio=0.5,
prediction_cols=prediction_cols,
label_name=label_name,
)
training_data_cp = training_data.copy()
training_data_cp['Label'] = [0, 0, 0, 0, 1, 1]
train_data, test_data, _ = grant_tagger.split_data(training_data_cp, train_data_id)
(_, y_train, _) = train_data
(_, y_test, _) = test_data
assert len(y_train + y_test) == 3
grant_tagger = GrantTagger(
relevant_sample_ratio=1,
prediction_cols=prediction_cols,
label_name=label_name,
)
train_data, test_data, _ = grant_tagger.split_data(training_data_cp, train_data_id)
(_, y_train, _) = train_data
(_, y_test, _) = test_data
assert len(y_train + y_test) == 4
grant_tagger = GrantTagger(
relevant_sample_ratio=2,
prediction_cols=prediction_cols,
label_name=label_name,
)
train_data, test_data, _ = grant_tagger.split_data(training_data_cp, train_data_id)
(_, y_train, _) = train_data
(_, y_test, _) = test_data
assert len(y_train + y_test) == 6
def test_train_test_info():
grant_tagger = GrantTagger(
relevant_sample_ratio=1,
prediction_cols=prediction_cols,
label_name=label_name,
)
train_data, test_data, unseen_data = grant_tagger.split_data(training_data, train_data_id)
(X_train, y_train, train_ids) = train_data
grant_tagger.fit(X_train, y_train)
grant_info = grant_tagger.train_test_info(train_ids, y_train, test_data, unseen_data)
training_data_truth_dict = dict(zip(training_data.ID, training_data.Label))
output_truth_dict = {k:v['Truth'] for k, v in grant_info.items()}
assert output_truth_dict == training_data_truth_dict
def test_apply_threshold():
y_predict = [0, 0, 0, 1, 1, 1]
pred_probs = np.array(
[
[0.8, 0.2],
[0.7, 0.3],
[0.6, 0.4],
[0.2, 0.8],
[0.3, 0.7],
[0.4, 0.6],
]
)
grant_tagger = GrantTagger(
threshold=0.7
)
y_predict_thresh = grant_tagger.apply_threshold(y_predict, pred_probs)
assert all([y1==y2 for y1, y2 in zip([0, 0, 0, 1, 1, 0], y_predict_thresh)])
| none | 1 | 2.548462 | 3 | |
moment/timelines.py | lordi/redis-moment | 41 | 6619497 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
from . import conf
from .base import Base, BaseHour, BaseDay, BaseWeek, BaseMonth, BaseYear
from .collections import MixinSerializable
__all__ = ['TIMELINE_NAMESPACE', 'TIMELINE_ALIASES', 'Timeline',
'HourTimeline', 'DayTimeline', 'WeekTimeline',
'MonthTimeline', 'YearTimeline']
TIMELINE_NAMESPACE = 'tln'
def _totimerange(start_time, end_time):
if start_time is None:
start_time = '-inf'
if end_time is None:
end_time = '+inf'
return start_time, end_time
class Timeline(Base, MixinSerializable):
namespace = TIMELINE_NAMESPACE
key_format = '{self.name}'
clonable_attrs = ['serializer']
def __init__(self, name, client='default', serializer=None):
super(Timeline, self).__init__(name, client)
self.serializer = conf.get_serializer(serializer)
def encode(self, data, timestamp):
return {'d': data, 't': timestamp}
def decode(self, value):
return value.get('d'), value.get('t')
def add(self, *items, **kwargs):
"""
Add new item to `timeline`
Examples ::
tl = Timeline('events')
tl.add('event1', 'event2', timestamp=time.time())
"""
assert items, 'At least one item should be given.'
timestamp = kwargs.get('timestamp') or time.time()
args = []
for item in items:
args.append(timestamp)
args.append(self.dumps(self.encode(item, timestamp)))
self.client.zadd(self.key, *args)
return timestamp
def timerange(self, start_time=None, end_time=None, limit=None):
start_time, end_time = _totimerange(start_time, end_time)
offset = None if limit is None else 0
items = self.client.zrangebyscore(self.key, start_time,
end_time, offset, limit)
return [self.decode(self.loads(i)) for i in items]
def delete_timerange(self, start_time=None, end_time=None):
start_time, end_time = _totimerange(start_time, end_time)
return self.client.zremrangebyscore(self.key, start_time, end_time)
def count_timerange(self, start_time=None, end_time=None):
start_time, end_time = _totimerange(start_time, end_time)
return self.client.zcount(self.key, start_time, end_time)
def range(self, start=0, end=-1):
items = self.client.zrange(self.key, start, end)
return [self.decode(self.loads(i)) for i in items]
def delete_range(self, start=0, end=-1):
return self.client.zremrangebyrank(self.key, start, end)
def head(self, limit=1):
return self.range(0, limit - 1)
def tail(self, limit=1):
return self.range(-limit, -1)
def items(self):
return self.range()
def count(self):
return self.client.zcard(self.key)
def __len__(self):
return self.count()
class HourTimeline(BaseHour, Timeline):
pass
class DayTimeline(BaseDay, Timeline):
pass
class WeekTimeline(BaseWeek, Timeline):
pass
class MonthTimeline(BaseMonth, Timeline):
pass
class YearTimeline(BaseYear, Timeline):
pass
TIMELINE_ALIASES = {
'hour': HourTimeline,
'day': DayTimeline,
'week': WeekTimeline,
'month': MonthTimeline,
'year': YearTimeline,
}
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
from . import conf
from .base import Base, BaseHour, BaseDay, BaseWeek, BaseMonth, BaseYear
from .collections import MixinSerializable
__all__ = ['TIMELINE_NAMESPACE', 'TIMELINE_ALIASES', 'Timeline',
'HourTimeline', 'DayTimeline', 'WeekTimeline',
'MonthTimeline', 'YearTimeline']
TIMELINE_NAMESPACE = 'tln'
def _totimerange(start_time, end_time):
if start_time is None:
start_time = '-inf'
if end_time is None:
end_time = '+inf'
return start_time, end_time
class Timeline(Base, MixinSerializable):
namespace = TIMELINE_NAMESPACE
key_format = '{self.name}'
clonable_attrs = ['serializer']
def __init__(self, name, client='default', serializer=None):
super(Timeline, self).__init__(name, client)
self.serializer = conf.get_serializer(serializer)
def encode(self, data, timestamp):
return {'d': data, 't': timestamp}
def decode(self, value):
return value.get('d'), value.get('t')
def add(self, *items, **kwargs):
"""
Add new item to `timeline`
Examples ::
tl = Timeline('events')
tl.add('event1', 'event2', timestamp=time.time())
"""
assert items, 'At least one item should be given.'
timestamp = kwargs.get('timestamp') or time.time()
args = []
for item in items:
args.append(timestamp)
args.append(self.dumps(self.encode(item, timestamp)))
self.client.zadd(self.key, *args)
return timestamp
def timerange(self, start_time=None, end_time=None, limit=None):
start_time, end_time = _totimerange(start_time, end_time)
offset = None if limit is None else 0
items = self.client.zrangebyscore(self.key, start_time,
end_time, offset, limit)
return [self.decode(self.loads(i)) for i in items]
def delete_timerange(self, start_time=None, end_time=None):
start_time, end_time = _totimerange(start_time, end_time)
return self.client.zremrangebyscore(self.key, start_time, end_time)
def count_timerange(self, start_time=None, end_time=None):
start_time, end_time = _totimerange(start_time, end_time)
return self.client.zcount(self.key, start_time, end_time)
def range(self, start=0, end=-1):
items = self.client.zrange(self.key, start, end)
return [self.decode(self.loads(i)) for i in items]
def delete_range(self, start=0, end=-1):
return self.client.zremrangebyrank(self.key, start, end)
def head(self, limit=1):
return self.range(0, limit - 1)
def tail(self, limit=1):
return self.range(-limit, -1)
def items(self):
return self.range()
def count(self):
return self.client.zcard(self.key)
def __len__(self):
return self.count()
class HourTimeline(BaseHour, Timeline):
pass
class DayTimeline(BaseDay, Timeline):
pass
class WeekTimeline(BaseWeek, Timeline):
pass
class MonthTimeline(BaseMonth, Timeline):
pass
class YearTimeline(BaseYear, Timeline):
pass
TIMELINE_ALIASES = {
'hour': HourTimeline,
'day': DayTimeline,
'week': WeekTimeline,
'month': MonthTimeline,
'year': YearTimeline,
}
| en | 0.334777 | #!/usr/bin/env python # -*- coding: utf-8 -*- Add new item to `timeline` Examples :: tl = Timeline('events') tl.add('event1', 'event2', timestamp=time.time()) | 2.545951 | 3 |
journey11/src/main/simple/simpleworkrequest.py | parrisma/AI-intuition | 0 | 6619498 | from journey11.src.interface.srcsink import SrcSink
from journey11.src.interface.workrequest import WorkRequest
from journey11.src.lib.uniqueworkref import UniqueWorkRef
class SimpleWorkRequest(WorkRequest):
def __init__(self,
unique_work_ref: UniqueWorkRef,
originator: SrcSink):
self._work_ref = unique_work_ref
self._originator = originator
return
@property
def work_ref(self) -> UniqueWorkRef:
"""
The unique work reference of teh work being requested
:return: The work reference
"""
return self._work_ref
@property
def originator(self) -> SrcSink:
"""
The origin the work request came from.
:return: The task pool
"""
return self._originator
def __str__(self):
"""
Render as string
:return: String rendering of class instance
"""
return "Work Request {} from SrcSink {}".format(self._work_ref.id, self._originator.name)
| from journey11.src.interface.srcsink import SrcSink
from journey11.src.interface.workrequest import WorkRequest
from journey11.src.lib.uniqueworkref import UniqueWorkRef
class SimpleWorkRequest(WorkRequest):
def __init__(self,
unique_work_ref: UniqueWorkRef,
originator: SrcSink):
self._work_ref = unique_work_ref
self._originator = originator
return
@property
def work_ref(self) -> UniqueWorkRef:
"""
The unique work reference of teh work being requested
:return: The work reference
"""
return self._work_ref
@property
def originator(self) -> SrcSink:
"""
The origin the work request came from.
:return: The task pool
"""
return self._originator
def __str__(self):
"""
Render as string
:return: String rendering of class instance
"""
return "Work Request {} from SrcSink {}".format(self._work_ref.id, self._originator.name)
| en | 0.832472 | The unique work reference of teh work being requested :return: The work reference The origin the work request came from. :return: The task pool Render as string :return: String rendering of class instance | 2.332488 | 2 |
helipad/agent.py | vishalbelsare/helipad | 16 | 6619499 | # ==========
# Basic extensible agent class
# Do not run this file; import model.py and run from your file.
# ==========
from random import choice, randint
from numpy import *
#Basic agent functions. This class should not be instantiated directly; instead it should be
#subclassed by a class corresponding to a primitive and registered with Helipad.addPrimitive().
#See below, the Agent() class for a minimal example.
class baseAgent:
fixed = False
#==================
# UTILITY METHODS
#==================
def __init__(self, breed, id, model):
self.breed = breed
self.id = int(id)
self.model = model
self.age = 0
self.dead = False
self.stocks = Stocks(breed, model.goods)
self.edges = {}
self.utils = 0
self.position = None #Overridden in spatial init
self.currentDemand = {g:0 for g in model.goods.keys()}
self.currentShortage = {g:0 for g in model.goods.keys()}
if hasattr(super(), 'runInit'): super().__init__() #For multi-level models
self.model.doHooks(['baseAgentInit', self.primitive+'Init'], [self, self.model])
def step(self, stage):
self.model.doHooks(['baseAgentStep', self.primitive+'Step'], [self, self.model, stage])
if hasattr(super(), 'runInit'): super().step(stage) #For multi-level models
if stage == self.model.stages: self.age += 1
#==================
# ECONOMIC METHODS
#==================
#Give amt1 of good 1, get amt2 of good 2
#Negative values of amt1 and amt2 allowed, which reverses the direction
def trade(self, partner, good1, amt1, good2, amt2):
self.model.doHooks('preTrade', [self, partner, good1, amt1, good2, amt2])
if amt2 != 0: price = amt1 / amt2
#Budget constraints. Hold price constant if hit
if amt1 > self.stocks[good1]:
self.currentShortage[good1] += amt1 - self.stocks[good1]
amt1 = self.stocks[good1]
if amt2 != 0: amt2 = amt1 / price
elif -amt1 > partner.stocks[good1]:
partner.currentShortage[good1] += -amt1 - partner.stocks[good1]
amt1 = -partner.stocks[good1]
if amt2 != 0: amt2 = amt1 / price
if amt2 > partner.stocks[good2]:
partner.currentShortage[good2] += amt1 - partner.stocks[good2]
amt2 = partner.stocks[good2]
amt1 = price * amt2
elif -amt2 > self.stocks[good2]:
self.currentShortage[good2] += -amt1 - self.stocks[good2]
amt2 = -self.stocks[good2]
amt1 = price * amt2
self.stocks[good1] -= amt1
partner.stocks[good1] += amt1
self.stocks[good2] += amt2
partner.stocks[good2] -= amt2
#Record demand
if amt1 > 0: partner.currentDemand[good1] += amt1
else: self.currentDemand[good1] -= amt1
if amt2 > 0: self.currentDemand[good2] += amt2
else: partner.currentDemand[good2] -= amt2
self.model.doHooks('postTrade', [self, partner, good1, amt1, good2, amt2])
#Price is per-unit
#Returns the quantity actually sold, Which is the same as quantity input unless there's a shortage
def buy(self, partner, good, q, p):
if self.model.moneyGood is None: raise RuntimeError('Buy function requires a monetary good to be specified')
qp = self.model.doHooks('buy', [self, partner, good, q, p])
if qp is not None: q, p = qp
before = self.stocks[good]
self.trade(partner, self.model.moneyGood, p*q, good, q)
return self.stocks[good] - before
#Unilateral
def pay(self, recipient, amount):
if self.model.moneyGood is None: raise RuntimeError('Pay function requires a monetary good to be specified')
#Budget constraint and hooks
if amount > self.balance: amount = self.balance
if -amount > recipient.balance: amount = -recipient.balance
amount_ = self.model.doHooks('pay', [self, recipient, amount, self.model])
if amount_ is not None: amount = amount_
if amount != 0:
recipient.stocks[self.model.moneyGood] += amount
self.stocks[self.model.moneyGood] -= amount
return amount
@property
def balance(self):
if self.model.moneyGood is None: raise RuntimeError('Balance checking requires a monetary good to be specified')
bal = self.stocks[self.model.moneyGood]
bal_ = self.model.doHooks('checkBalance', [self, bal, self.model])
if bal_ is not None: bal = bal_
return bal
#==================
# GENETIC METHODS
#==================
def reproduce(self, inherit=[], mutate={}, partners=[]):
if self.fixed: raise NotImplementedError('Fixed primitives cannot reproduce.')
maxid = 0
for a in self.model.allagents.values():
if a.id > maxid:
maxid = a.id
newagent = type(self)(self.breed, maxid+1, self.model)
#Values in the inherit list can either be a variable name (in which case the new agent inherits
#the mean of all of the values for the parents), or a tuple, the first element of which is a
#variable name, and the second is a string representing how to merge them. Possible values are
#'mean' (default for numeric values), 'first' (default for non-numeric values), 'last', 'gmean',
#'random', and 'sum'. The second value can also take a function, which receives a list of
#values from the parents and returns a value for the child.
parents = [self] + partners
for a in inherit:
stat = None
if isinstance(a, tuple): a, stat = a
v = [getattr(p,a) for p in parents if hasattr(p,a)] #List of values, filtering those without
if len(v)==0: continue
#Default statistic if unspecified. 'mean' for numbers, and 'first' for non-numbers.
if stat is None:
stat = 'mean' if isinstance(v[0], (int, float, complex)) and not isinstance(v[0], bool) else 'first'
if stat=='mean': n = mean(v)
elif stat=='sum': n = sum(v)
elif stat=='gmean': n = exp(log(v).sum()/len(v))
elif stat=='first': n = v[0]
elif stat=='last': n = v[len(v)-1]
elif stat=='rand' or stat=='random': n = choice(v)
elif stat=='max': n = max(v)
elif stat=='min': n = min(v)
elif callable(stat): n = stat(v)
else: raise ValueError("Invalid statistic in reproduction function.")
setattr(newagent, a, n)
#Mutate variables
#Values in the mutate dict can be either a function (which takes a value and returns a value),
# a number (a std dev by which to mutate the value), or a tuple, the first element of which
# is a std dev and the second of which is either 'log' or 'linear'
for k,v in mutate.items():
if callable(v): newval = v(getattr(newagent, k))
else:
if isinstance(v, tuple): v, scale = v
else: scale = 'linear'
if scale=='log': newval = random.lognormal(log(getattr(newagent, k)), v)
else: newval = random.normal(getattr(newagent, k), v)
setattr(newagent, k, newval)
newagent.id = maxid+1
for p in parents:
p.newEdge(newagent,'lineage', True) #Keep track of parent-child relationships
self.model.agents[self.primitive].append(newagent)
self.model.doHooks(['baseAgentReproduce', self.primitive+'Reproduce'], [parents, newagent, self.model])
return newagent
def die(self, updateGUI=True):
if self.fixed: raise NotImplementedError('Fixed primitives cannot die.')
self.model.agents[self.primitive].remove(self)
for edge in self.alledges: edge.cut()
self.dead = True
self.model.doHooks(['baseAgentDie', self.primitive+'Die'], [self])
@property
def parent(self):
p = self.inbound('lineage', obj='agent')
if len(p)==0: return None
elif len(p)==1: return p[0]
else: return p
@property
def children(self):
return [edge.partner(self) for edge in self.outbound('lineage')]
#==================
# NETWORK METHODS
#==================
def newEdge(self, partner, kind='edge', direction=None, weight=1):
return Edge(self, partner, kind, direction, weight)
def outbound(self, kind='edge', undirected=False, obj='edge'):
if obj not in ['agent', 'edge']: raise ValueError('Object must be specified either \'agent\' or \'edge\'.')
if kind is None: edges = self.alledges
else:
if not kind in self.edges: return []
edges = self.edges[kind]
ob = [edge for edge in edges if edge.startpoint == self or (undirected and not edge.directed)]
return ob if obj=='edge' else [e.partner(self) for e in ob]
def inbound(self, kind='edge', undirected=False, obj='edge'):
if obj not in ['agent', 'edge']: raise ValueError('Object must be specified either \'agent\' or \'edge\'.')
if kind is None: edges = self.alledges
else:
if not kind in self.edges: return []
edges = self.edges[kind]
ib = [edge for edge in edges if edge.endpoint == self or (undirected and not edge.directed)]
return ib if obj=='edge' else [e.partner(self) for e in ib]
def edgesWith(self, partner, kind='edge'):
if kind is not None:
if not kind in self.edges: return []
edges = self.edges[kind]
else: edges = self.alledges
return [edge for edge in edges if self in edge.vertices and partner in edge.vertices]
@property
def alledges(self):
edges = []
for e in self.edges.values(): edges += e
return edges
#==================
# OTHER METHODS
#==================
#In a multi-level model, allow the agent to move to a different deme/firm/etc
def transfer(self, dest):
origin = self.model
dest.agents[self.primitive].append(self)
self.model = dest
origin.agents[self.primitive].remove(self)
self.model.doHooks(['baseAgentMove', self.primitive+'Move'], [self, origin, dest])
#The default agent class corresponding to the 'agent' primitive.
class Agent(baseAgent):
pass
#For spatial models
class Patch(baseAgent):
fixed = True
@property
def neighbors(self):
return self.outbound('space', True, obj='agent')
@property
def up(self):
if self.y==0 and not self.model.param('wrap'): return None
return self.model.patches[self.x, self.y-1 if self.y > 0 else self.model.param('y')-1]
@property
def right(self):
if self.x>=self.model.param('x')-1 and not self.model.param('wrap'): return None
return self.model.patches[self.x+1 if self.x < self.model.param('x')-1 else 0, self.y]
@property
def down(self):
if self.y>=self.model.param('y')-1 and not self.model.param('wrap'): return None
return self.model.patches[self.x, self.y+1 if self.y < self.model.param('y')-1 else 0]
@property
def left(self):
if self.x==0 and not self.model.param('wrap'): return None
return self.model.patches[self.x-1 if self.x > 0 else self.model.param('x')-1, self.y]
@property
def agentsOn(self):
for prim, lst in self.model.agents.items():
if prim=='patch': continue
yield from [a for a in lst if self.x-0.5<=a.x<self.x+0.5 and self.y-0.5<=a.y<self.y+0.5]
#Direction can take an Agent object (corresponding to the endpoint),
#an int (0 for undirected, >0 for agent1 to agent2, and <0 for agent2 to agent1),
#or a boolean (False for undirected, True for agent1 to agent2)
class Edge:
def __init__(self, agent1, agent2, kind='edge', direction=None, weight=1):
self.active = True
self.kind = kind
self.vertices = (agent1, agent2)
self.weight = weight
self.directed = False
if direction is not None:
self.directed = True
if isinstance(direction, int):
if direction==0: self.directed = False
elif direction>0: self.startpoint, self.endpoint = (agent1, agent2)
elif direction<0: self.startpoint, self.endpoint = (agent2, agent1)
elif isinstance(direction, bool):
self.directed = direction
if direction: self.startpoint, self.endpoint = (agent1, agent2)
elif isinstance(direction, baseAgent):
if direction not in self.vertices: raise ValueError('Direction must select one of the agents as an endpoint.')
self.endpoint = direction
self.startpoint = agent1 if direction==agent2 else agent2
else: raise ValueError('Direction must be either int, bool, or agent.')
if not self.directed:
self.endpoint, self.startpoint, self.directed = (None, None, False)
#Add object to each agent, and to the model
for agent in self.vertices:
if not kind in agent.edges: agent.edges[kind] = []
if not self in agent.edges[kind]: agent.edges[kind].append(self) #Don't add self-links twice
agent1.model.doHooks('edgeInit', [self, kind, agent1, agent2])
def cut(self):
for agent in self.vertices:
if self in agent.edges[self.kind]: agent.edges[self.kind].remove(self) #Remove from agents
self.active = False
self.vertices[0].model.doHooks('edgeCut', [self])
def partner(self, agent):
if agent==self.vertices[0]: return self.vertices[1]
elif agent==self.vertices[1]: return self.vertices[0]
else: raise ValueError('Agent',agent.id,'is not connected to this edge.')
def reassign(self, oldagent, newagent):
self.vertices = (self.partner(oldagent), newagent)
oldagent.edges[self.kind].remove(self)
newagent.edges[self.kind].append(self)
newagent.model.doHooks('edgeReassign', [self, oldagent, newagent])
class Stocks:
def __init__(self, breed, goodslist):
self.goods = {g:{} for g in goodslist}
for good, ginfo in goodslist.items():
for p, fn in ginfo.props.items():
endow = fn(breed) if callable(fn) else fn
if endow is None: self.goods[good][p] = 0
elif isinstance(endow, tuple) or isinstance(endow, list): self.goods[good][p] = randint(*endow)
else: self.goods[good][p] = endow
def __getitem__(self, key):
if type(key) is str: return self.goods[key]['quantity']
elif type(key) is tuple:
if type(key[1]) is str: return self.goods[key[0]][key[1]]
elif key[1]==True: return self.goods[key[0]]
elif key[1]==False: return self.goods[key]['quantity']
raise KeyError
def __setitem__(self, key, val):
if type(key) is str: self.goods[key]['quantity'] = val
elif type(key) is tuple and type(key[1]) is str: self.goods[key[0]][key[1]] = val
else: raise KeyError
def __iter__(self): return iter({k: g['quantity'] for k,g in self.goods.items()})
def __next__(self): return next({k: g['quantity'] for k,g in self.goods.items()})
def __len__(self): return len(self.goods)
def keys(self): return self.goods.keys()
def values(self): return [g['quantity'] for g in self.goods.values()]
def items(self): return [(k, g['quantity']) for k,g in self.goods.items()] | # ==========
# Basic extensible agent class
# Do not run this file; import model.py and run from your file.
# ==========
from random import choice, randint
from numpy import *
#Basic agent functions. This class should not be instantiated directly; instead it should be
#subclassed by a class corresponding to a primitive and registered with Helipad.addPrimitive().
#See below, the Agent() class for a minimal example.
class baseAgent:
fixed = False
#==================
# UTILITY METHODS
#==================
def __init__(self, breed, id, model):
self.breed = breed
self.id = int(id)
self.model = model
self.age = 0
self.dead = False
self.stocks = Stocks(breed, model.goods)
self.edges = {}
self.utils = 0
self.position = None #Overridden in spatial init
self.currentDemand = {g:0 for g in model.goods.keys()}
self.currentShortage = {g:0 for g in model.goods.keys()}
if hasattr(super(), 'runInit'): super().__init__() #For multi-level models
self.model.doHooks(['baseAgentInit', self.primitive+'Init'], [self, self.model])
def step(self, stage):
self.model.doHooks(['baseAgentStep', self.primitive+'Step'], [self, self.model, stage])
if hasattr(super(), 'runInit'): super().step(stage) #For multi-level models
if stage == self.model.stages: self.age += 1
#==================
# ECONOMIC METHODS
#==================
#Give amt1 of good 1, get amt2 of good 2
#Negative values of amt1 and amt2 allowed, which reverses the direction
def trade(self, partner, good1, amt1, good2, amt2):
self.model.doHooks('preTrade', [self, partner, good1, amt1, good2, amt2])
if amt2 != 0: price = amt1 / amt2
#Budget constraints. Hold price constant if hit
if amt1 > self.stocks[good1]:
self.currentShortage[good1] += amt1 - self.stocks[good1]
amt1 = self.stocks[good1]
if amt2 != 0: amt2 = amt1 / price
elif -amt1 > partner.stocks[good1]:
partner.currentShortage[good1] += -amt1 - partner.stocks[good1]
amt1 = -partner.stocks[good1]
if amt2 != 0: amt2 = amt1 / price
if amt2 > partner.stocks[good2]:
partner.currentShortage[good2] += amt1 - partner.stocks[good2]
amt2 = partner.stocks[good2]
amt1 = price * amt2
elif -amt2 > self.stocks[good2]:
self.currentShortage[good2] += -amt1 - self.stocks[good2]
amt2 = -self.stocks[good2]
amt1 = price * amt2
self.stocks[good1] -= amt1
partner.stocks[good1] += amt1
self.stocks[good2] += amt2
partner.stocks[good2] -= amt2
#Record demand
if amt1 > 0: partner.currentDemand[good1] += amt1
else: self.currentDemand[good1] -= amt1
if amt2 > 0: self.currentDemand[good2] += amt2
else: partner.currentDemand[good2] -= amt2
self.model.doHooks('postTrade', [self, partner, good1, amt1, good2, amt2])
#Price is per-unit
#Returns the quantity actually sold, Which is the same as quantity input unless there's a shortage
def buy(self, partner, good, q, p):
if self.model.moneyGood is None: raise RuntimeError('Buy function requires a monetary good to be specified')
qp = self.model.doHooks('buy', [self, partner, good, q, p])
if qp is not None: q, p = qp
before = self.stocks[good]
self.trade(partner, self.model.moneyGood, p*q, good, q)
return self.stocks[good] - before
#Unilateral
def pay(self, recipient, amount):
if self.model.moneyGood is None: raise RuntimeError('Pay function requires a monetary good to be specified')
#Budget constraint and hooks
if amount > self.balance: amount = self.balance
if -amount > recipient.balance: amount = -recipient.balance
amount_ = self.model.doHooks('pay', [self, recipient, amount, self.model])
if amount_ is not None: amount = amount_
if amount != 0:
recipient.stocks[self.model.moneyGood] += amount
self.stocks[self.model.moneyGood] -= amount
return amount
@property
def balance(self):
if self.model.moneyGood is None: raise RuntimeError('Balance checking requires a monetary good to be specified')
bal = self.stocks[self.model.moneyGood]
bal_ = self.model.doHooks('checkBalance', [self, bal, self.model])
if bal_ is not None: bal = bal_
return bal
#==================
# GENETIC METHODS
#==================
def reproduce(self, inherit=[], mutate={}, partners=[]):
if self.fixed: raise NotImplementedError('Fixed primitives cannot reproduce.')
maxid = 0
for a in self.model.allagents.values():
if a.id > maxid:
maxid = a.id
newagent = type(self)(self.breed, maxid+1, self.model)
#Values in the inherit list can either be a variable name (in which case the new agent inherits
#the mean of all of the values for the parents), or a tuple, the first element of which is a
#variable name, and the second is a string representing how to merge them. Possible values are
#'mean' (default for numeric values), 'first' (default for non-numeric values), 'last', 'gmean',
#'random', and 'sum'. The second value can also take a function, which receives a list of
#values from the parents and returns a value for the child.
parents = [self] + partners
for a in inherit:
stat = None
if isinstance(a, tuple): a, stat = a
v = [getattr(p,a) for p in parents if hasattr(p,a)] #List of values, filtering those without
if len(v)==0: continue
#Default statistic if unspecified. 'mean' for numbers, and 'first' for non-numbers.
if stat is None:
stat = 'mean' if isinstance(v[0], (int, float, complex)) and not isinstance(v[0], bool) else 'first'
if stat=='mean': n = mean(v)
elif stat=='sum': n = sum(v)
elif stat=='gmean': n = exp(log(v).sum()/len(v))
elif stat=='first': n = v[0]
elif stat=='last': n = v[len(v)-1]
elif stat=='rand' or stat=='random': n = choice(v)
elif stat=='max': n = max(v)
elif stat=='min': n = min(v)
elif callable(stat): n = stat(v)
else: raise ValueError("Invalid statistic in reproduction function.")
setattr(newagent, a, n)
#Mutate variables
#Values in the mutate dict can be either a function (which takes a value and returns a value),
# a number (a std dev by which to mutate the value), or a tuple, the first element of which
# is a std dev and the second of which is either 'log' or 'linear'
for k,v in mutate.items():
if callable(v): newval = v(getattr(newagent, k))
else:
if isinstance(v, tuple): v, scale = v
else: scale = 'linear'
if scale=='log': newval = random.lognormal(log(getattr(newagent, k)), v)
else: newval = random.normal(getattr(newagent, k), v)
setattr(newagent, k, newval)
newagent.id = maxid+1
for p in parents:
p.newEdge(newagent,'lineage', True) #Keep track of parent-child relationships
self.model.agents[self.primitive].append(newagent)
self.model.doHooks(['baseAgentReproduce', self.primitive+'Reproduce'], [parents, newagent, self.model])
return newagent
def die(self, updateGUI=True):
if self.fixed: raise NotImplementedError('Fixed primitives cannot die.')
self.model.agents[self.primitive].remove(self)
for edge in self.alledges: edge.cut()
self.dead = True
self.model.doHooks(['baseAgentDie', self.primitive+'Die'], [self])
@property
def parent(self):
p = self.inbound('lineage', obj='agent')
if len(p)==0: return None
elif len(p)==1: return p[0]
else: return p
@property
def children(self):
return [edge.partner(self) for edge in self.outbound('lineage')]
#==================
# NETWORK METHODS
#==================
def newEdge(self, partner, kind='edge', direction=None, weight=1):
return Edge(self, partner, kind, direction, weight)
def outbound(self, kind='edge', undirected=False, obj='edge'):
if obj not in ['agent', 'edge']: raise ValueError('Object must be specified either \'agent\' or \'edge\'.')
if kind is None: edges = self.alledges
else:
if not kind in self.edges: return []
edges = self.edges[kind]
ob = [edge for edge in edges if edge.startpoint == self or (undirected and not edge.directed)]
return ob if obj=='edge' else [e.partner(self) for e in ob]
def inbound(self, kind='edge', undirected=False, obj='edge'):
if obj not in ['agent', 'edge']: raise ValueError('Object must be specified either \'agent\' or \'edge\'.')
if kind is None: edges = self.alledges
else:
if not kind in self.edges: return []
edges = self.edges[kind]
ib = [edge for edge in edges if edge.endpoint == self or (undirected and not edge.directed)]
return ib if obj=='edge' else [e.partner(self) for e in ib]
def edgesWith(self, partner, kind='edge'):
if kind is not None:
if not kind in self.edges: return []
edges = self.edges[kind]
else: edges = self.alledges
return [edge for edge in edges if self in edge.vertices and partner in edge.vertices]
@property
def alledges(self):
edges = []
for e in self.edges.values(): edges += e
return edges
#==================
# OTHER METHODS
#==================
#In a multi-level model, allow the agent to move to a different deme/firm/etc
def transfer(self, dest):
origin = self.model
dest.agents[self.primitive].append(self)
self.model = dest
origin.agents[self.primitive].remove(self)
self.model.doHooks(['baseAgentMove', self.primitive+'Move'], [self, origin, dest])
#The default agent class corresponding to the 'agent' primitive.
class Agent(baseAgent):
pass
#For spatial models
class Patch(baseAgent):
fixed = True
@property
def neighbors(self):
return self.outbound('space', True, obj='agent')
@property
def up(self):
if self.y==0 and not self.model.param('wrap'): return None
return self.model.patches[self.x, self.y-1 if self.y > 0 else self.model.param('y')-1]
@property
def right(self):
if self.x>=self.model.param('x')-1 and not self.model.param('wrap'): return None
return self.model.patches[self.x+1 if self.x < self.model.param('x')-1 else 0, self.y]
@property
def down(self):
if self.y>=self.model.param('y')-1 and not self.model.param('wrap'): return None
return self.model.patches[self.x, self.y+1 if self.y < self.model.param('y')-1 else 0]
@property
def left(self):
if self.x==0 and not self.model.param('wrap'): return None
return self.model.patches[self.x-1 if self.x > 0 else self.model.param('x')-1, self.y]
@property
def agentsOn(self):
for prim, lst in self.model.agents.items():
if prim=='patch': continue
yield from [a for a in lst if self.x-0.5<=a.x<self.x+0.5 and self.y-0.5<=a.y<self.y+0.5]
#Direction can take an Agent object (corresponding to the endpoint),
#an int (0 for undirected, >0 for agent1 to agent2, and <0 for agent2 to agent1),
#or a boolean (False for undirected, True for agent1 to agent2)
class Edge:
def __init__(self, agent1, agent2, kind='edge', direction=None, weight=1):
self.active = True
self.kind = kind
self.vertices = (agent1, agent2)
self.weight = weight
self.directed = False
if direction is not None:
self.directed = True
if isinstance(direction, int):
if direction==0: self.directed = False
elif direction>0: self.startpoint, self.endpoint = (agent1, agent2)
elif direction<0: self.startpoint, self.endpoint = (agent2, agent1)
elif isinstance(direction, bool):
self.directed = direction
if direction: self.startpoint, self.endpoint = (agent1, agent2)
elif isinstance(direction, baseAgent):
if direction not in self.vertices: raise ValueError('Direction must select one of the agents as an endpoint.')
self.endpoint = direction
self.startpoint = agent1 if direction==agent2 else agent2
else: raise ValueError('Direction must be either int, bool, or agent.')
if not self.directed:
self.endpoint, self.startpoint, self.directed = (None, None, False)
#Add object to each agent, and to the model
for agent in self.vertices:
if not kind in agent.edges: agent.edges[kind] = []
if not self in agent.edges[kind]: agent.edges[kind].append(self) #Don't add self-links twice
agent1.model.doHooks('edgeInit', [self, kind, agent1, agent2])
def cut(self):
for agent in self.vertices:
if self in agent.edges[self.kind]: agent.edges[self.kind].remove(self) #Remove from agents
self.active = False
self.vertices[0].model.doHooks('edgeCut', [self])
def partner(self, agent):
if agent==self.vertices[0]: return self.vertices[1]
elif agent==self.vertices[1]: return self.vertices[0]
else: raise ValueError('Agent',agent.id,'is not connected to this edge.')
def reassign(self, oldagent, newagent):
self.vertices = (self.partner(oldagent), newagent)
oldagent.edges[self.kind].remove(self)
newagent.edges[self.kind].append(self)
newagent.model.doHooks('edgeReassign', [self, oldagent, newagent])
class Stocks:
def __init__(self, breed, goodslist):
self.goods = {g:{} for g in goodslist}
for good, ginfo in goodslist.items():
for p, fn in ginfo.props.items():
endow = fn(breed) if callable(fn) else fn
if endow is None: self.goods[good][p] = 0
elif isinstance(endow, tuple) or isinstance(endow, list): self.goods[good][p] = randint(*endow)
else: self.goods[good][p] = endow
def __getitem__(self, key):
if type(key) is str: return self.goods[key]['quantity']
elif type(key) is tuple:
if type(key[1]) is str: return self.goods[key[0]][key[1]]
elif key[1]==True: return self.goods[key[0]]
elif key[1]==False: return self.goods[key]['quantity']
raise KeyError
def __setitem__(self, key, val):
if type(key) is str: self.goods[key]['quantity'] = val
elif type(key) is tuple and type(key[1]) is str: self.goods[key[0]][key[1]] = val
else: raise KeyError
def __iter__(self): return iter({k: g['quantity'] for k,g in self.goods.items()})
def __next__(self): return next({k: g['quantity'] for k,g in self.goods.items()})
def __len__(self): return len(self.goods)
def keys(self): return self.goods.keys()
def values(self): return [g['quantity'] for g in self.goods.values()]
def items(self): return [(k, g['quantity']) for k,g in self.goods.items()] | en | 0.76428 | # ========== # Basic extensible agent class # Do not run this file; import model.py and run from your file. # ========== #Basic agent functions. This class should not be instantiated directly; instead it should be #subclassed by a class corresponding to a primitive and registered with Helipad.addPrimitive(). #See below, the Agent() class for a minimal example. #================== # UTILITY METHODS #================== #Overridden in spatial init #For multi-level models #For multi-level models #================== # ECONOMIC METHODS #================== #Give amt1 of good 1, get amt2 of good 2 #Negative values of amt1 and amt2 allowed, which reverses the direction #Budget constraints. Hold price constant if hit #Record demand #Price is per-unit #Returns the quantity actually sold, Which is the same as quantity input unless there's a shortage #Unilateral #Budget constraint and hooks #================== # GENETIC METHODS #================== #Values in the inherit list can either be a variable name (in which case the new agent inherits #the mean of all of the values for the parents), or a tuple, the first element of which is a #variable name, and the second is a string representing how to merge them. Possible values are #'mean' (default for numeric values), 'first' (default for non-numeric values), 'last', 'gmean', #'random', and 'sum'. The second value can also take a function, which receives a list of #values from the parents and returns a value for the child. #List of values, filtering those without #Default statistic if unspecified. 'mean' for numbers, and 'first' for non-numbers. #Mutate variables #Values in the mutate dict can be either a function (which takes a value and returns a value), # a number (a std dev by which to mutate the value), or a tuple, the first element of which # is a std dev and the second of which is either 'log' or 'linear' #Keep track of parent-child relationships #================== # NETWORK METHODS #================== #================== # OTHER METHODS #================== #In a multi-level model, allow the agent to move to a different deme/firm/etc #The default agent class corresponding to the 'agent' primitive. #For spatial models #Direction can take an Agent object (corresponding to the endpoint), #an int (0 for undirected, >0 for agent1 to agent2, and <0 for agent2 to agent1), #or a boolean (False for undirected, True for agent1 to agent2) #Add object to each agent, and to the model #Don't add self-links twice #Remove from agents | 2.830501 | 3 |
nds/aurich.py | risklayer/corona-landkreis-crawler | 12 | 6619500 | <gh_stars>10-100
#!/usr/bin/python3
from botbase import *
_stand = re.compile(r"Aktualisiert:")
def aurich(sheets):
soup = get_soup("https://www.landkreis-aurich.de/fileadmin/ftp-upload/Uebersicht.htm")
date = soup.find(text=_stand)
#if not today().strftime("%d.%M.%Y") in date: raise NotYetAvailableException("Aurich noch alt: " + date)
date = check_date(date.split(" ",2)[1], "Aurich")
args = dict()
for row in soup.findAll("tr"):
row = [x.get_text(" ") for x in row.findAll(["td","th"])]
#print(row)
if len(row) < 2: continue
if "Gesamtanzahl" in row[0]: args["c"] = force_int(row[1])
if "zum Vortag" in row[1]: args["cc"] = force_int(re.search("([+-]?\s*[0-9.]+)", row[1]).group(1))
if "Genesene" in row[0]: args["g"] = force_int(row[1])
if "Verstorbene" in row[0]: args["d"] = force_int(row[1])
if "Quarantäne" in row[0]: args["q"] = force_int(row[1])
#print(args)
assert "c" in args and "d" in args and "g" in args
update(sheets, 3452, **args, sig="Bot", ignore_delta=False)
return True
schedule.append(Task(10, 2, 14, 35, 600, aurich, 3452))
if __name__ == '__main__': aurich(googlesheets())
| #!/usr/bin/python3
from botbase import *
_stand = re.compile(r"Aktualisiert:")
def aurich(sheets):
soup = get_soup("https://www.landkreis-aurich.de/fileadmin/ftp-upload/Uebersicht.htm")
date = soup.find(text=_stand)
#if not today().strftime("%d.%M.%Y") in date: raise NotYetAvailableException("Aurich noch alt: " + date)
date = check_date(date.split(" ",2)[1], "Aurich")
args = dict()
for row in soup.findAll("tr"):
row = [x.get_text(" ") for x in row.findAll(["td","th"])]
#print(row)
if len(row) < 2: continue
if "Gesamtanzahl" in row[0]: args["c"] = force_int(row[1])
if "zum Vortag" in row[1]: args["cc"] = force_int(re.search("([+-]?\s*[0-9.]+)", row[1]).group(1))
if "Genesene" in row[0]: args["g"] = force_int(row[1])
if "Verstorbene" in row[0]: args["d"] = force_int(row[1])
if "Quarantäne" in row[0]: args["q"] = force_int(row[1])
#print(args)
assert "c" in args and "d" in args and "g" in args
update(sheets, 3452, **args, sig="Bot", ignore_delta=False)
return True
schedule.append(Task(10, 2, 14, 35, 600, aurich, 3452))
if __name__ == '__main__': aurich(googlesheets()) | en | 0.167957 | #!/usr/bin/python3 #if not today().strftime("%d.%M.%Y") in date: raise NotYetAvailableException("Aurich noch alt: " + date) #print(row) #print(args) | 2.63894 | 3 |
decoder.py | nehal309/QA-Task | 0 | 6619501 | """Module for decoding."""
import os
import time
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
from dataset import ids_to_tokens
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('decode_dir', 'decoded',
'Path to store decoded outputs')
tf.app.flags.DEFINE_integer('max_decode_steps', 1000000,
'Number of decoding steps.')
tf.app.flags.DEFINE_integer('decode_batches_per_ckpt', 8000,
'Number of batches to decode before restoring next '
'checkpoint')
DECODE_LOOP_DELAY_SECS = 60
DECODE_IO_FLUSH_INTERVAL = 100
class DecodeIO(object):
"""Writes the decoded and references to RKV files for Rouge score.
See nlp/common/utils/internal/rkv_parser.py for detail about rkv file.
"""
def __init__(self, outdir):
self._cnt = 0
self._outdir = outdir
if not os.path.exists(self._outdir):
os.mkdir(self._outdir)
self._ref_file = None
self._decode_file = None
def write(self, reference, decode):
"""Writes the reference and decoded outputs to RKV files.
Args:
reference: The human (correct) result.
decode: The machine-generated result
"""
self._ref_file.write('output=%s\n' % reference)
self._decode_file.write('output=%s\n' % decode)
self._cnt += 1
if self._cnt % DECODE_IO_FLUSH_INTERVAL == 0:
self._ref_file.flush()
self._decode_file.flush()
def reset_files(self):
"""Resets the output files. Must be called once before write()."""
if self._ref_file: self._ref_file.close()
if self._decode_file: self._decode_file.close()
timestamp = int(time.time())
self._ref_file = open(
os.path.join(self._outdir, 'ref%d'%timestamp), 'w')
self._decode_file = open(
os.path.join(self._outdir, 'decode%d'%timestamp), 'w')
class Decoder(object):
"""Decoder."""
def __init__(self, model, batch_reader, params, vocab):
"""
Args:
model: The dynamic coattention model.
batch_reader: The batch data reader.
params: paramters.
vocab: Vocabulary
"""
self._model = model
self._model.build_graph()
self._batch_reader = batch_reader
self._params = params
self._vocab = vocab
self._saver = tf.train.Saver()
self._decode_io = DecodeIO(FLAGS.decode_dir)
def loop(self):
"""Decoding loop for long running process."""
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
step = 0
while step < FLAGS.max_decode_steps:
time.sleep(DECODE_LOOP_DELAY_SECS)
if not self._decode(self._saver, sess):
continue
step += 1
def _decode(self, saver, sess):
"""Restore a checkpoint and decode it.
Args:
saver: Tensorflow checkpoint saver.
sess: Tensorflow session.
Returns:
If success, returns true, otherwise, false.
"""
ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root)
if not (ckpt_state and ckpt_state.model_checkpoint_path):
tf.logging.info('No model to decode yet at %s', FLAGS.log_root)
return False
tf.logging.info('checkpoint path %s', ckpt_state.model_checkpoint_path)
ckpt_path = os.path.join(
FLAGS.log_root, os.path.basename(ckpt_state.model_checkpoint_path))
tf.logging.info('renamed checkpoint path %s', ckpt_path)
saver.restore(sess, ckpt_path)
self._decode_io.reset_files()
for _ in range(FLAGS.decode_batches_per_ckpt):
(batch_context, batch_question, _,
origin_context, origin_question, _) = data_batcher.next()
guess = np.zeros((2, model._params.batch_size))
# model inference
(start, end) = model.infer(sess, batch_context, batch_question, guess)
self._decode_batch(
batch_context, start, end)
return True
def _decode_batch(self, batch_context, start, end):
"""Convert id to words and writing results.
Args:
batch_context: Batch of original context string.
start: The start word position output by machine.
end: The end word position output by machine.
"""
for i in range(self._params.batch_size):
c = list(map(lambda x: ids_to_tokens(x, self._vocab), batch_context[i]))
context = ' '.join(c)
answer = ' '.join(c[start[i]:end[i]+1])
tf.logging.info('context: %s', context)
tf.logging.info('answer: %s', answer)
self._decode_io.write(context.strip(), answer.strip())
| """Module for decoding."""
import os
import time
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
from dataset import ids_to_tokens
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('decode_dir', 'decoded',
'Path to store decoded outputs')
tf.app.flags.DEFINE_integer('max_decode_steps', 1000000,
'Number of decoding steps.')
tf.app.flags.DEFINE_integer('decode_batches_per_ckpt', 8000,
'Number of batches to decode before restoring next '
'checkpoint')
DECODE_LOOP_DELAY_SECS = 60
DECODE_IO_FLUSH_INTERVAL = 100
class DecodeIO(object):
"""Writes the decoded and references to RKV files for Rouge score.
See nlp/common/utils/internal/rkv_parser.py for detail about rkv file.
"""
def __init__(self, outdir):
self._cnt = 0
self._outdir = outdir
if not os.path.exists(self._outdir):
os.mkdir(self._outdir)
self._ref_file = None
self._decode_file = None
def write(self, reference, decode):
"""Writes the reference and decoded outputs to RKV files.
Args:
reference: The human (correct) result.
decode: The machine-generated result
"""
self._ref_file.write('output=%s\n' % reference)
self._decode_file.write('output=%s\n' % decode)
self._cnt += 1
if self._cnt % DECODE_IO_FLUSH_INTERVAL == 0:
self._ref_file.flush()
self._decode_file.flush()
def reset_files(self):
"""Resets the output files. Must be called once before write()."""
if self._ref_file: self._ref_file.close()
if self._decode_file: self._decode_file.close()
timestamp = int(time.time())
self._ref_file = open(
os.path.join(self._outdir, 'ref%d'%timestamp), 'w')
self._decode_file = open(
os.path.join(self._outdir, 'decode%d'%timestamp), 'w')
class Decoder(object):
"""Decoder."""
def __init__(self, model, batch_reader, params, vocab):
"""
Args:
model: The dynamic coattention model.
batch_reader: The batch data reader.
params: paramters.
vocab: Vocabulary
"""
self._model = model
self._model.build_graph()
self._batch_reader = batch_reader
self._params = params
self._vocab = vocab
self._saver = tf.train.Saver()
self._decode_io = DecodeIO(FLAGS.decode_dir)
def loop(self):
"""Decoding loop for long running process."""
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
step = 0
while step < FLAGS.max_decode_steps:
time.sleep(DECODE_LOOP_DELAY_SECS)
if not self._decode(self._saver, sess):
continue
step += 1
def _decode(self, saver, sess):
"""Restore a checkpoint and decode it.
Args:
saver: Tensorflow checkpoint saver.
sess: Tensorflow session.
Returns:
If success, returns true, otherwise, false.
"""
ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root)
if not (ckpt_state and ckpt_state.model_checkpoint_path):
tf.logging.info('No model to decode yet at %s', FLAGS.log_root)
return False
tf.logging.info('checkpoint path %s', ckpt_state.model_checkpoint_path)
ckpt_path = os.path.join(
FLAGS.log_root, os.path.basename(ckpt_state.model_checkpoint_path))
tf.logging.info('renamed checkpoint path %s', ckpt_path)
saver.restore(sess, ckpt_path)
self._decode_io.reset_files()
for _ in range(FLAGS.decode_batches_per_ckpt):
(batch_context, batch_question, _,
origin_context, origin_question, _) = data_batcher.next()
guess = np.zeros((2, model._params.batch_size))
# model inference
(start, end) = model.infer(sess, batch_context, batch_question, guess)
self._decode_batch(
batch_context, start, end)
return True
def _decode_batch(self, batch_context, start, end):
"""Convert id to words and writing results.
Args:
batch_context: Batch of original context string.
start: The start word position output by machine.
end: The end word position output by machine.
"""
for i in range(self._params.batch_size):
c = list(map(lambda x: ids_to_tokens(x, self._vocab), batch_context[i]))
context = ' '.join(c)
answer = ' '.join(c[start[i]:end[i]+1])
tf.logging.info('context: %s', context)
tf.logging.info('answer: %s', answer)
self._decode_io.write(context.strip(), answer.strip())
| en | 0.683661 | Module for decoding. Writes the decoded and references to RKV files for Rouge score. See nlp/common/utils/internal/rkv_parser.py for detail about rkv file. Writes the reference and decoded outputs to RKV files. Args: reference: The human (correct) result. decode: The machine-generated result Resets the output files. Must be called once before write(). Decoder. Args: model: The dynamic coattention model. batch_reader: The batch data reader. params: paramters. vocab: Vocabulary Decoding loop for long running process. Restore a checkpoint and decode it. Args: saver: Tensorflow checkpoint saver. sess: Tensorflow session. Returns: If success, returns true, otherwise, false. # model inference Convert id to words and writing results. Args: batch_context: Batch of original context string. start: The start word position output by machine. end: The end word position output by machine. | 2.55601 | 3 |
job-template/job/tf_distributed_train/tfjob_launcher.py | jollyshuai/cube-studio | 1 | 6619502 |
import datetime
import json
import subprocess
import time
import uuid
from kubernetes import client as k8s_client
from kubernetes import config as k8s_config
from job.pkgs.constants import NodeAffnity, JOB_DEF_NAMESPACE, WORKER_DEF_RESOURCE_LIMITS, DEF_IMAGE_PULL_SECRETS, \
ComputeResource, PodAffnity
from job.pkgs.context import JobComponentRunner, KFJobContext
from job.pkgs.k8s.tfjob import TFJob
from job.pkgs.utils import parse_timedelta
TRAINER_TYPE_PLAIN = "plain"
TRAINER_TYPE_RUNNER = "runner"
TRAINER_SPECS = {
TRAINER_TYPE_PLAIN: {
"image": "ai.tencentmusic.com/tme-public/tf2.3_plain_train:latest",
"cmd": ["python3", "-m", "job.tf_plain_train.plain_train"]
},
TRAINER_TYPE_RUNNER: {
"image": "ai.tencentmusic.com/tme-public/tf2.3_keras_train:latest",
"cmd": ["python3", "-m", "job.tf_keras_train.runner_train"]
}
}
class TFJobLauncher(JobComponentRunner):
def job_func(self, jc_entry):
job = jc_entry.job
job_name = job.get('name')
job_namespace = job.get('namespace') or jc_entry.context.namespace or JOB_DEF_NAMESPACE
num_workers = int(job.get('num_workers', 1))
num_pss = int(job.get('num_pss', 0))
node_affin = job.get("node_affin")
pod_affin = job.get("pod_affin")
node_selector = job.get("node_selector", {}) or jc_entry.context.parsed_node_selector()
resources = job.get("resources")
if not isinstance(resources, dict) or 'limits' not in resources:
print("user specified resource {} not valid".format(resources))
resources = jc_entry.context.parsed_resource_spec()
if resources:
print("will use resource spec from tfjob for workers: {}".format(resources))
else:
resources = WORKER_DEF_RESOURCE_LIMITS
ps_resources = job.get("ps_resources")
chief_resources = job.get("chief_resources")
if (ComputeResource.P_GPU in resources['limits'] or ComputeResource.V_GPU_CORE in resources['limits']) \
and not node_affin:
node_affin = NodeAffnity.ONLY_GPU
print("auto set node_affin={}".format(node_affin))
if node_affin in [NodeAffnity.ONLY_GPU, NodeAffnity.PREF_GPU] and 'cpu' in node_selector:
node_selector.pop('cpu', None)
print("auto poped up 'cpu' in node selector: {}".format(node_selector))
if node_affin in [NodeAffnity.ONLY_CPU, NodeAffnity.PREF_CPU] and 'gpu' in node_selector:
node_selector.pop('gpu', None)
print("auto poped up 'gpu' in node selector: {}".format(node_selector))
restart_policy = job.get("restart_policy", '').strip()
if restart_policy and restart_policy not in ['OnFailure', 'Always', 'ExitCode', 'Never']:
print("WARNING: unrecognized 'restart_policy' '{}', reset to 'Never'".format(restart_policy))
restart_policy = 'Never'
backoff_limits = job.get("backoff_limits", num_workers)
if backoff_limits < 0:
print("WARNING: 'backoff_limits' should be >=0, got {}, defaults to 1".format(backoff_limits))
backoff_limits = 1
job_timeout = parse_timedelta(job.get('timeout', '365d'))
job_polling_interval = parse_timedelta(job.get('polling_interval', '30s'))
trainer_type = job.get("trainer", TRAINER_TYPE_RUNNER).strip().lower()
trainer_spec = TRAINER_SPECS.get(trainer_type)
if not trainer_spec:
raise NotImplementedError("unsupported trainer type '{}', supported are {}"
.format(trainer_type, TRAINER_SPECS.keys()))
print("use trainer '{}', spec={}, num_workers={}, num_pss={}"
.format(trainer_type, trainer_spec, num_workers, num_pss))
driver_job_detail = job.get('job_detail')
driver_args = [
"--job", json.dumps(driver_job_detail),
"--pack-path", jc_entry.pack_path,
"--upstream-output-file", jc_entry.upstream_output_file,
"--export-path", jc_entry.export_path,
"--pipeline-id", jc_entry.pipeline_id,
"--run-id", jc_entry.run_id,
"--creator", jc_entry.creator,
"--output-file", jc_entry.output_file or self.output_file
]
driver_mounts = jc_entry.context.parsed_volumn_mounts() or []
job_labels = {
"run-rtx": jc_entry.runner,
"upload-rtx": jc_entry.creator,
"pipeline-id": jc_entry.pipeline_id,
"run-id": jc_entry.run_id,
"workflow-name": jc_entry.pipeline_name,
'task-id': jc_entry.task_id,
'task-name': jc_entry.task_name
}
user_envs = job.get("envs")
driver_envs = jc_entry.context.to_k8s_env_list()
if isinstance(user_envs, dict):
for k, v in user_envs.items():
driver_envs.append({"name": str(k), "value": str(v)})
if 'profile_batch' in driver_job_detail.get('train_args', {}).get('tensorboard', {}) and \
node_affin in [NodeAffnity.ONLY_GPU, NodeAffnity.PREF_GPU]:
privileged = True
print("job use gpu and tf profiler, set privileged=True")
else:
privileged = False
self.launch_tfjob(job_name, job_namespace, num_workers, num_pss, trainer_spec.get("image"),
trainer_spec.get("cmd"), driver_args, driver_envs, driver_mounts, resources,
restart_policy, node_affin, pod_affin, job_labels, backoff_limits, job_timeout,
job_polling_interval, False, node_selector, privileged, jc_entry.creator,
ps_resources, chief_resources)
@classmethod
def default_job_name(cls):
import re
ctx = KFJobContext.get_context()
p_name = str(ctx.pipeline_name) or ''
p_name = re.sub(r'[^-a-z0-9]', '-', p_name)
jid = str(uuid.uuid4()).replace('-', '')
return "-".join(["tfjob", p_name, jid])[:54]
# return "tfjob-" + str(uuid.uuid1())
@classmethod
def launch_tfjob(cls, name, namespace, num_workers, num_pss, driver_image, driver_cmd,
driver_args, driver_envs, driver_mounts, resources=None, restart_policy=None,
node_affin=None, pod_affin=None, job_labels={}, backoff_limits=3, job_timeout=None,
job_polling_interval=None, delete_after_finish=False, node_selector={}, privileged=False,
creator='', ps_resources=None, chief_resources=None):
subprocess.check_call("echo '10.101.140.98 cls-g9v4gmm0.ccs.tencent-cloud.com' >> /etc/hosts", shell=True)
k8s_config.load_incluster_config()
k8s_api_client = k8s_client.ApiClient()
tfjob = TFJob("v1", k8s_api_client)
job_name = name.strip() if name and name.strip() else cls.default_job_name()
if node_affin == NodeAffnity.PREF_GPU:
node_affin = NodeAffnity.ONLY_GPU
print("WARING: 'node_affin' set to 'pref_gpu', changed it to 'only_gpu' to avoid heterogeneity")
if node_affin == NodeAffnity.PREF_CPU:
node_affin = NodeAffnity.ONLY_CPU
print("WARING: 'node_affin' set to 'pref_cpu', changed it to 'only_cpu' to avoid heterogeneity")
if not pod_affin and node_affin in [NodeAffnity.ONLY_GPU, NodeAffnity.PREF_GPU]:
pod_affin = PodAffnity.CONCENT
print("auto set pod_affin to {}".format(pod_affin))
st = time.perf_counter()
print('begin create new tfjob %s' % job_name)
tfjob.create(job_name, namespace, num_workers, num_pss, driver_image, driver_cmd, driver_args,
driver_envs, driver_mounts, resources, restart_policy, DEF_IMAGE_PULL_SECRETS,
node_affin, pod_affin, job_labels, backoff_limits, node_selector, privileged, creator,
ps_resources, chief_resources)
job_timeout = job_timeout if job_timeout else datetime.timedelta(days=365)
job_polling_inteval = job_polling_interval if job_polling_interval else datetime.timedelta(seconds=30)
condition = tfjob.wait_for_condition(namespace, job_name, ["Succeeded", "Failed"], job_timeout,
job_polling_inteval, trace_worker_log=True)
print("TFJob '{}' finished in condition '{}', cost {}s".format(job_name, condition, time.perf_counter() - st))
if condition != 'Succeeded':
raise RuntimeError("TFJob '{}' in namespace '{}' failed, num_workers={}, driver_args={}"
.format(job_name, namespace, num_workers, driver_args))
if delete_after_finish:
print("will delete tfjob '{}' in '{}'".format(job_name, namespace))
tfjob.delete(name=job_name, namespace=namespace)
print("deleted tfjob '{}' in '{}'".format(job_name, namespace))
if __name__ == "__main__":
runner = TFJobLauncher("TFJob launcher for train component")
runner.run()
|
import datetime
import json
import subprocess
import time
import uuid
from kubernetes import client as k8s_client
from kubernetes import config as k8s_config
from job.pkgs.constants import NodeAffnity, JOB_DEF_NAMESPACE, WORKER_DEF_RESOURCE_LIMITS, DEF_IMAGE_PULL_SECRETS, \
ComputeResource, PodAffnity
from job.pkgs.context import JobComponentRunner, KFJobContext
from job.pkgs.k8s.tfjob import TFJob
from job.pkgs.utils import parse_timedelta
TRAINER_TYPE_PLAIN = "plain"
TRAINER_TYPE_RUNNER = "runner"
TRAINER_SPECS = {
TRAINER_TYPE_PLAIN: {
"image": "ai.tencentmusic.com/tme-public/tf2.3_plain_train:latest",
"cmd": ["python3", "-m", "job.tf_plain_train.plain_train"]
},
TRAINER_TYPE_RUNNER: {
"image": "ai.tencentmusic.com/tme-public/tf2.3_keras_train:latest",
"cmd": ["python3", "-m", "job.tf_keras_train.runner_train"]
}
}
class TFJobLauncher(JobComponentRunner):
def job_func(self, jc_entry):
job = jc_entry.job
job_name = job.get('name')
job_namespace = job.get('namespace') or jc_entry.context.namespace or JOB_DEF_NAMESPACE
num_workers = int(job.get('num_workers', 1))
num_pss = int(job.get('num_pss', 0))
node_affin = job.get("node_affin")
pod_affin = job.get("pod_affin")
node_selector = job.get("node_selector", {}) or jc_entry.context.parsed_node_selector()
resources = job.get("resources")
if not isinstance(resources, dict) or 'limits' not in resources:
print("user specified resource {} not valid".format(resources))
resources = jc_entry.context.parsed_resource_spec()
if resources:
print("will use resource spec from tfjob for workers: {}".format(resources))
else:
resources = WORKER_DEF_RESOURCE_LIMITS
ps_resources = job.get("ps_resources")
chief_resources = job.get("chief_resources")
if (ComputeResource.P_GPU in resources['limits'] or ComputeResource.V_GPU_CORE in resources['limits']) \
and not node_affin:
node_affin = NodeAffnity.ONLY_GPU
print("auto set node_affin={}".format(node_affin))
if node_affin in [NodeAffnity.ONLY_GPU, NodeAffnity.PREF_GPU] and 'cpu' in node_selector:
node_selector.pop('cpu', None)
print("auto poped up 'cpu' in node selector: {}".format(node_selector))
if node_affin in [NodeAffnity.ONLY_CPU, NodeAffnity.PREF_CPU] and 'gpu' in node_selector:
node_selector.pop('gpu', None)
print("auto poped up 'gpu' in node selector: {}".format(node_selector))
restart_policy = job.get("restart_policy", '').strip()
if restart_policy and restart_policy not in ['OnFailure', 'Always', 'ExitCode', 'Never']:
print("WARNING: unrecognized 'restart_policy' '{}', reset to 'Never'".format(restart_policy))
restart_policy = 'Never'
backoff_limits = job.get("backoff_limits", num_workers)
if backoff_limits < 0:
print("WARNING: 'backoff_limits' should be >=0, got {}, defaults to 1".format(backoff_limits))
backoff_limits = 1
job_timeout = parse_timedelta(job.get('timeout', '365d'))
job_polling_interval = parse_timedelta(job.get('polling_interval', '30s'))
trainer_type = job.get("trainer", TRAINER_TYPE_RUNNER).strip().lower()
trainer_spec = TRAINER_SPECS.get(trainer_type)
if not trainer_spec:
raise NotImplementedError("unsupported trainer type '{}', supported are {}"
.format(trainer_type, TRAINER_SPECS.keys()))
print("use trainer '{}', spec={}, num_workers={}, num_pss={}"
.format(trainer_type, trainer_spec, num_workers, num_pss))
driver_job_detail = job.get('job_detail')
driver_args = [
"--job", json.dumps(driver_job_detail),
"--pack-path", jc_entry.pack_path,
"--upstream-output-file", jc_entry.upstream_output_file,
"--export-path", jc_entry.export_path,
"--pipeline-id", jc_entry.pipeline_id,
"--run-id", jc_entry.run_id,
"--creator", jc_entry.creator,
"--output-file", jc_entry.output_file or self.output_file
]
driver_mounts = jc_entry.context.parsed_volumn_mounts() or []
job_labels = {
"run-rtx": jc_entry.runner,
"upload-rtx": jc_entry.creator,
"pipeline-id": jc_entry.pipeline_id,
"run-id": jc_entry.run_id,
"workflow-name": jc_entry.pipeline_name,
'task-id': jc_entry.task_id,
'task-name': jc_entry.task_name
}
user_envs = job.get("envs")
driver_envs = jc_entry.context.to_k8s_env_list()
if isinstance(user_envs, dict):
for k, v in user_envs.items():
driver_envs.append({"name": str(k), "value": str(v)})
if 'profile_batch' in driver_job_detail.get('train_args', {}).get('tensorboard', {}) and \
node_affin in [NodeAffnity.ONLY_GPU, NodeAffnity.PREF_GPU]:
privileged = True
print("job use gpu and tf profiler, set privileged=True")
else:
privileged = False
self.launch_tfjob(job_name, job_namespace, num_workers, num_pss, trainer_spec.get("image"),
trainer_spec.get("cmd"), driver_args, driver_envs, driver_mounts, resources,
restart_policy, node_affin, pod_affin, job_labels, backoff_limits, job_timeout,
job_polling_interval, False, node_selector, privileged, jc_entry.creator,
ps_resources, chief_resources)
@classmethod
def default_job_name(cls):
import re
ctx = KFJobContext.get_context()
p_name = str(ctx.pipeline_name) or ''
p_name = re.sub(r'[^-a-z0-9]', '-', p_name)
jid = str(uuid.uuid4()).replace('-', '')
return "-".join(["tfjob", p_name, jid])[:54]
# return "tfjob-" + str(uuid.uuid1())
@classmethod
def launch_tfjob(cls, name, namespace, num_workers, num_pss, driver_image, driver_cmd,
driver_args, driver_envs, driver_mounts, resources=None, restart_policy=None,
node_affin=None, pod_affin=None, job_labels={}, backoff_limits=3, job_timeout=None,
job_polling_interval=None, delete_after_finish=False, node_selector={}, privileged=False,
creator='', ps_resources=None, chief_resources=None):
subprocess.check_call("echo '10.101.140.98 cls-g9v4gmm0.ccs.tencent-cloud.com' >> /etc/hosts", shell=True)
k8s_config.load_incluster_config()
k8s_api_client = k8s_client.ApiClient()
tfjob = TFJob("v1", k8s_api_client)
job_name = name.strip() if name and name.strip() else cls.default_job_name()
if node_affin == NodeAffnity.PREF_GPU:
node_affin = NodeAffnity.ONLY_GPU
print("WARING: 'node_affin' set to 'pref_gpu', changed it to 'only_gpu' to avoid heterogeneity")
if node_affin == NodeAffnity.PREF_CPU:
node_affin = NodeAffnity.ONLY_CPU
print("WARING: 'node_affin' set to 'pref_cpu', changed it to 'only_cpu' to avoid heterogeneity")
if not pod_affin and node_affin in [NodeAffnity.ONLY_GPU, NodeAffnity.PREF_GPU]:
pod_affin = PodAffnity.CONCENT
print("auto set pod_affin to {}".format(pod_affin))
st = time.perf_counter()
print('begin create new tfjob %s' % job_name)
tfjob.create(job_name, namespace, num_workers, num_pss, driver_image, driver_cmd, driver_args,
driver_envs, driver_mounts, resources, restart_policy, DEF_IMAGE_PULL_SECRETS,
node_affin, pod_affin, job_labels, backoff_limits, node_selector, privileged, creator,
ps_resources, chief_resources)
job_timeout = job_timeout if job_timeout else datetime.timedelta(days=365)
job_polling_inteval = job_polling_interval if job_polling_interval else datetime.timedelta(seconds=30)
condition = tfjob.wait_for_condition(namespace, job_name, ["Succeeded", "Failed"], job_timeout,
job_polling_inteval, trace_worker_log=True)
print("TFJob '{}' finished in condition '{}', cost {}s".format(job_name, condition, time.perf_counter() - st))
if condition != 'Succeeded':
raise RuntimeError("TFJob '{}' in namespace '{}' failed, num_workers={}, driver_args={}"
.format(job_name, namespace, num_workers, driver_args))
if delete_after_finish:
print("will delete tfjob '{}' in '{}'".format(job_name, namespace))
tfjob.delete(name=job_name, namespace=namespace)
print("deleted tfjob '{}' in '{}'".format(job_name, namespace))
if __name__ == "__main__":
runner = TFJobLauncher("TFJob launcher for train component")
runner.run()
| en | 0.266503 | # return "tfjob-" + str(uuid.uuid1()) | 1.963061 | 2 |
openerp/addons/base_import/__init__.py | ntiufalara/openerp7 | 3 | 6619503 | import controllers
import models
import test_models
| import controllers
import models
import test_models
| none | 1 | 1.049423 | 1 | |
crawlers/parlens/spiders/rs_current_members.py | factly/parlens-crawlers | 0 | 6619504 | <gh_stars>0
# -*- coding: utf-8 -*-
import scrapy
from parlens.items import RSMembers
import datetime
import re
class RSCurrentMembersSpider(scrapy.Spider):
name = 'rs_current_members'
start_urls = ['https://rajyasabha.nic.in/rsnew/member_site/MemlistElDate.aspx']
error = open("./logs/errors.log","a+")
error.write("\n\n\n######## <NAME> Current Members Crawler "+str(datetime.datetime.now())+" ###########\n" )
custom_settings = {
"ITEM_PIPELINES": {
'parlens.pipelines.rsmembers.DuplicateCleaner': 5, # remove already existing member based on RSID
'parlens.pipelines.members.NameCleaner': 10, # seprate name and prefix
'parlens.pipelines.members.EducationCleaner': 20, # clean education field and assign value
'parlens.pipelines.members.MaritalCleaner': 30, # clean marital field and assign appropriate value
'parlens.pipelines.members.ProfessionCleaner': 50, # clean profession
'parlens.pipelines.rsmembers.DOBCleaner': 60, # convert dob into timestamp
'parlens.pipelines.rsmembers.ChildrenCleaner': 70, # clean sons and daughters field
'parlens.pipelines.rsmembers.GeoTermCleaner': 80, # convert geography field into GID
'parlens.pipelines.rsmembers.PartyTermCleaner': 90, # convert party field into PID
'parlens.pipelines.rsmembers.TermConstructor': 100, # Construct term object and remove party and geography field
}
}
def parse(self, response):
all_rows = response.css("#ctl00_ContentPlaceHolder1_GridView2").css("tr")
list_req_params = {
"__EVENTARGUMENT":"",
"__EVENTTARGET":"ctl00$ContentPlaceHolder1$GridView2$ctl05$lkb",
"__LASTFOCUS":"",
"__VIEWSTATE":response.css("input#__VIEWSTATE::attr(value)").extract_first(),
"__VIEWSTATEGENERATOR":"5E964A8E",
"ctl00$ContentPlaceHolder1$TextBox2":"",
"ctl00$ContentPlaceHolder1$search_name":"",
"domains":"rajyasabha.nic.in",
"q":"",
"sitesearch":"rajyasabha.nic.in",
"ctl00$ContentPlaceHolder1$RadioButtonList1": "Al"
}
for row in all_rows[1:]:
member_link = row.css("td > font > a::attr(id)").extract_first().replace("_","$")
member_term_from = row.css("td")[4].css("::text").extract_first()
member_term_to = row.css("td")[5].css("::text").extract_first()
list_req_params["__EVENTTARGET"] = member_link
yield scrapy.FormRequest(
url = "https://rajyasabha.nic.in/rsnew/member_site/MemlistElDate.aspx",
formdata=list_req_params,
callback=self.parse_profile,
dont_filter=True,
meta={
'term_from': member_term_from,
'term_to': member_term_to
},
method="POST"
)
def parse_profile(self, response):
RSID = response.css("img#ctl00_ContentPlaceHolder1_GridView1_ctl02_Image1").css("::attr(src)").extract_first().split("/")[1].replace("P", "").replace(".jpg", "")
name = response.css("span#ctl00_ContentPlaceHolder1_GridView1_ctl02_Label3").css("::text").extract_first().strip()
geography = response.css("table#ctl00_ContentPlaceHolder1_TabContainer1_TabPanel1_DetailsView1").css("tr")[0].css("td")[1].css("::text").extract_first().strip()
party = response.css("table#ctl00_ContentPlaceHolder1_TabContainer1_TabPanel1_DetailsView1").css("tr")[1].css("td")[1].css("::text").extract_first().strip()
dob = response.css("span#ctl00_ContentPlaceHolder1_TabContainer1_TabPanel2_DetailsView2_Label14").css("::text").extract_first().strip()
birth_place = response.css("span#ctl00_ContentPlaceHolder1_TabContainer1_TabPanel2_DetailsView2_Label15").css("::text").extract_first().strip()
marital_status = response.css("span#ctl00_ContentPlaceHolder1_TabContainer1_TabPanel2_DetailsView2_Label1").css("::text").extract_first().strip()
children = response.css("span#ctl00_ContentPlaceHolder1_TabContainer1_TabPanel2_DetailsView2_Label2").css("::text").extract_first().strip()
education = response.css("span#ctl00_ContentPlaceHolder1_TabContainer1_TabPanel2_DetailsView2_Label16").css("::text").extract_first().strip()
profession = response.css("span#ctl00_ContentPlaceHolder1_TabContainer1_TabPanel2_DetailsView2_Label17").css("::text").extract_first().strip()
phoneRaw = response.css("span#ctl00_ContentPlaceHolder1_TabContainer1_TabPanel2_DetailsView2_Label18").css("::text").extract_first().strip()
phone = re.findall("[0-9]{11}|[0-9]{10}", phoneRaw)
emailRaw = response.css("img#ctl00_ContentPlaceHolder1_TabContainer1_TabPanel1_DetailsView1_Image21").css("::attr(src)").extract_first().split("=")[1].lower().replace(" ", "").replace("email", "").replace(":", "")
email = emailRaw.split(";") if emailRaw != "" else list()
yield RSMembers(
RSID = int(RSID),
name = name,
term = {
'geography': geography,
'party': party,
'from': response.meta['term_from'],
'to': response.meta['term_to'],
'house': 2,
'session': None
},
dob = dob if dob != '-' else None,
birth_place = birth_place if birth_place != '-' else None,
marital_status = marital_status if marital_status != '-' else None,
children = children if children != '-' else None,
education = education if education != '' else None,
profession = profession if profession != '' else None,
phone = phone if len(phone) != 0 else [],
email = email
) | # -*- coding: utf-8 -*-
import scrapy
from parlens.items import RSMembers
import datetime
import re
class RSCurrentMembersSpider(scrapy.Spider):
name = 'rs_current_members'
start_urls = ['https://rajyasabha.nic.in/rsnew/member_site/MemlistElDate.aspx']
error = open("./logs/errors.log","a+")
error.write("\n\n\n######## <NAME> Current Members Crawler "+str(datetime.datetime.now())+" ###########\n" )
custom_settings = {
"ITEM_PIPELINES": {
'parlens.pipelines.rsmembers.DuplicateCleaner': 5, # remove already existing member based on RSID
'parlens.pipelines.members.NameCleaner': 10, # seprate name and prefix
'parlens.pipelines.members.EducationCleaner': 20, # clean education field and assign value
'parlens.pipelines.members.MaritalCleaner': 30, # clean marital field and assign appropriate value
'parlens.pipelines.members.ProfessionCleaner': 50, # clean profession
'parlens.pipelines.rsmembers.DOBCleaner': 60, # convert dob into timestamp
'parlens.pipelines.rsmembers.ChildrenCleaner': 70, # clean sons and daughters field
'parlens.pipelines.rsmembers.GeoTermCleaner': 80, # convert geography field into GID
'parlens.pipelines.rsmembers.PartyTermCleaner': 90, # convert party field into PID
'parlens.pipelines.rsmembers.TermConstructor': 100, # Construct term object and remove party and geography field
}
}
def parse(self, response):
all_rows = response.css("#ctl00_ContentPlaceHolder1_GridView2").css("tr")
list_req_params = {
"__EVENTARGUMENT":"",
"__EVENTTARGET":"ctl00$ContentPlaceHolder1$GridView2$ctl05$lkb",
"__LASTFOCUS":"",
"__VIEWSTATE":response.css("input#__VIEWSTATE::attr(value)").extract_first(),
"__VIEWSTATEGENERATOR":"5E964A8E",
"ctl00$ContentPlaceHolder1$TextBox2":"",
"ctl00$ContentPlaceHolder1$search_name":"",
"domains":"rajyasabha.nic.in",
"q":"",
"sitesearch":"rajyasabha.nic.in",
"ctl00$ContentPlaceHolder1$RadioButtonList1": "Al"
}
for row in all_rows[1:]:
member_link = row.css("td > font > a::attr(id)").extract_first().replace("_","$")
member_term_from = row.css("td")[4].css("::text").extract_first()
member_term_to = row.css("td")[5].css("::text").extract_first()
list_req_params["__EVENTTARGET"] = member_link
yield scrapy.FormRequest(
url = "https://rajyasabha.nic.in/rsnew/member_site/MemlistElDate.aspx",
formdata=list_req_params,
callback=self.parse_profile,
dont_filter=True,
meta={
'term_from': member_term_from,
'term_to': member_term_to
},
method="POST"
)
def parse_profile(self, response):
RSID = response.css("img#ctl00_ContentPlaceHolder1_GridView1_ctl02_Image1").css("::attr(src)").extract_first().split("/")[1].replace("P", "").replace(".jpg", "")
name = response.css("span#ctl00_ContentPlaceHolder1_GridView1_ctl02_Label3").css("::text").extract_first().strip()
geography = response.css("table#ctl00_ContentPlaceHolder1_TabContainer1_TabPanel1_DetailsView1").css("tr")[0].css("td")[1].css("::text").extract_first().strip()
party = response.css("table#ctl00_ContentPlaceHolder1_TabContainer1_TabPanel1_DetailsView1").css("tr")[1].css("td")[1].css("::text").extract_first().strip()
dob = response.css("span#ctl00_ContentPlaceHolder1_TabContainer1_TabPanel2_DetailsView2_Label14").css("::text").extract_first().strip()
birth_place = response.css("span#ctl00_ContentPlaceHolder1_TabContainer1_TabPanel2_DetailsView2_Label15").css("::text").extract_first().strip()
marital_status = response.css("span#ctl00_ContentPlaceHolder1_TabContainer1_TabPanel2_DetailsView2_Label1").css("::text").extract_first().strip()
children = response.css("span#ctl00_ContentPlaceHolder1_TabContainer1_TabPanel2_DetailsView2_Label2").css("::text").extract_first().strip()
education = response.css("span#ctl00_ContentPlaceHolder1_TabContainer1_TabPanel2_DetailsView2_Label16").css("::text").extract_first().strip()
profession = response.css("span#ctl00_ContentPlaceHolder1_TabContainer1_TabPanel2_DetailsView2_Label17").css("::text").extract_first().strip()
phoneRaw = response.css("span#ctl00_ContentPlaceHolder1_TabContainer1_TabPanel2_DetailsView2_Label18").css("::text").extract_first().strip()
phone = re.findall("[0-9]{11}|[0-9]{10}", phoneRaw)
emailRaw = response.css("img#ctl00_ContentPlaceHolder1_TabContainer1_TabPanel1_DetailsView1_Image21").css("::attr(src)").extract_first().split("=")[1].lower().replace(" ", "").replace("email", "").replace(":", "")
email = emailRaw.split(";") if emailRaw != "" else list()
yield RSMembers(
RSID = int(RSID),
name = name,
term = {
'geography': geography,
'party': party,
'from': response.meta['term_from'],
'to': response.meta['term_to'],
'house': 2,
'session': None
},
dob = dob if dob != '-' else None,
birth_place = birth_place if birth_place != '-' else None,
marital_status = marital_status if marital_status != '-' else None,
children = children if children != '-' else None,
education = education if education != '' else None,
profession = profession if profession != '' else None,
phone = phone if len(phone) != 0 else [],
email = email
) | en | 0.349556 | # -*- coding: utf-8 -*- ######## <NAME> Current Members Crawler "+str(datetime.datetime.now())+" ###########\n" ) # remove already existing member based on RSID # seprate name and prefix # clean education field and assign value # clean marital field and assign appropriate value # clean profession # convert dob into timestamp # clean sons and daughters field # convert geography field into GID # convert party field into PID # Construct term object and remove party and geography field #__VIEWSTATE::attr(value)").extract_first(), #ctl00_ContentPlaceHolder1_GridView1_ctl02_Image1").css("::attr(src)").extract_first().split("/")[1].replace("P", "").replace(".jpg", "") #ctl00_ContentPlaceHolder1_GridView1_ctl02_Label3").css("::text").extract_first().strip() #ctl00_ContentPlaceHolder1_TabContainer1_TabPanel1_DetailsView1").css("tr")[0].css("td")[1].css("::text").extract_first().strip() #ctl00_ContentPlaceHolder1_TabContainer1_TabPanel1_DetailsView1").css("tr")[1].css("td")[1].css("::text").extract_first().strip() #ctl00_ContentPlaceHolder1_TabContainer1_TabPanel2_DetailsView2_Label14").css("::text").extract_first().strip() #ctl00_ContentPlaceHolder1_TabContainer1_TabPanel2_DetailsView2_Label15").css("::text").extract_first().strip() #ctl00_ContentPlaceHolder1_TabContainer1_TabPanel2_DetailsView2_Label1").css("::text").extract_first().strip() #ctl00_ContentPlaceHolder1_TabContainer1_TabPanel2_DetailsView2_Label2").css("::text").extract_first().strip() #ctl00_ContentPlaceHolder1_TabContainer1_TabPanel2_DetailsView2_Label16").css("::text").extract_first().strip() #ctl00_ContentPlaceHolder1_TabContainer1_TabPanel2_DetailsView2_Label17").css("::text").extract_first().strip() #ctl00_ContentPlaceHolder1_TabContainer1_TabPanel2_DetailsView2_Label18").css("::text").extract_first().strip() #ctl00_ContentPlaceHolder1_TabContainer1_TabPanel1_DetailsView1_Image21").css("::attr(src)").extract_first().split("=")[1].lower().replace(" ", "").replace("email", "").replace(":", "") | 2.464612 | 2 |
utils/__init__.py | davzarov/fictional-disco | 0 | 6619505 | import os
import shutil
from pathlib import Path
from typing import Generator, List, Tuple
def open_file(f: Path) -> List[str]:
"""opens raw file and returns cookies"""
if not f.exists():
raise FileExistsError("File doesn't exist.")
if f.suffix != '.txt':
raise TypeError("File must be of type .txt")
cookies = f.read_text().split("\n")
return cookies
def list_files(dir: Path) -> Generator[Path, None, None]:
"""list files in a directory"""
for f in dir.iterdir():
if f.exists() and f.is_file():
yield f
def make_directory(dir: Path) -> Tuple[bool, Path]:
"""creates directory in the desired path"""
created = False
if not dir.exists():
try:
dir.mkdir(parents=True)
created = True
except FileExistsError:
pass
else:
print(f"[Created]: {dir.name} directory.")
return created, dir
def is_empty(dir: Path) -> bool:
"""checks if directory is empty"""
return not any(dir.iterdir())
def make_file(dir: Path, f: Path):
"""outputs a file in a dir using the current file features"""
output = dir / (f"{f.stem}_cookies{f.suffix}")
if not output.exists():
output.touch()
return output
def move_file(o: Path, d: Path) -> None:
"""moves file from o (origin) to d (destination)"""
shutil.move(os.fspath(o), os.fspath(d))
def copy_file(o: Path, d: Path) -> None:
"""moves file from o (origin) to d (destination)"""
shutil.copy(os.fspath(o), os.fspath(d))
def remove_file(f: Path) -> None:
"""removes file or link passed"""
f.unlink()
| import os
import shutil
from pathlib import Path
from typing import Generator, List, Tuple
def open_file(f: Path) -> List[str]:
"""opens raw file and returns cookies"""
if not f.exists():
raise FileExistsError("File doesn't exist.")
if f.suffix != '.txt':
raise TypeError("File must be of type .txt")
cookies = f.read_text().split("\n")
return cookies
def list_files(dir: Path) -> Generator[Path, None, None]:
"""list files in a directory"""
for f in dir.iterdir():
if f.exists() and f.is_file():
yield f
def make_directory(dir: Path) -> Tuple[bool, Path]:
"""creates directory in the desired path"""
created = False
if not dir.exists():
try:
dir.mkdir(parents=True)
created = True
except FileExistsError:
pass
else:
print(f"[Created]: {dir.name} directory.")
return created, dir
def is_empty(dir: Path) -> bool:
"""checks if directory is empty"""
return not any(dir.iterdir())
def make_file(dir: Path, f: Path):
"""outputs a file in a dir using the current file features"""
output = dir / (f"{f.stem}_cookies{f.suffix}")
if not output.exists():
output.touch()
return output
def move_file(o: Path, d: Path) -> None:
"""moves file from o (origin) to d (destination)"""
shutil.move(os.fspath(o), os.fspath(d))
def copy_file(o: Path, d: Path) -> None:
"""moves file from o (origin) to d (destination)"""
shutil.copy(os.fspath(o), os.fspath(d))
def remove_file(f: Path) -> None:
"""removes file or link passed"""
f.unlink()
| en | 0.819113 | opens raw file and returns cookies list files in a directory creates directory in the desired path checks if directory is empty outputs a file in a dir using the current file features moves file from o (origin) to d (destination) moves file from o (origin) to d (destination) removes file or link passed | 3.146244 | 3 |
TensorMonitor/tensor_manager.py | octaviaguo/Tensorflow-Visualizing | 15 | 6619506 | <reponame>octaviaguo/Tensorflow-Visualizing<filename>TensorMonitor/tensor_manager.py
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import sys
import fnmatch
from control_panel import ControlPanel
import time
if sys.version_info[0] >= 3:
from functools import reduce
class TensorMonitor(object):
filter_types = [
'USER_LIST',
'TRAINABLE_VARIABLES',
'ACTIVATIONS',
'GLOBAL_VARIABLES',
'ALL_OPS']
user_tensor_list = []
control_panel = None
class Tensor:
name = None
shape = None
op = None
filter_str = None
def __init__(cls, name, shape, op):
cls.name = name
cls.shape = shape
cls.op = op
@classmethod
def __update_tensor_list(cls, session):
cls.tensor_list = []
cls.tensor_list_1 = []
if cls.filter_type == 'USER_LIST':
for t in cls.user_tensor_list:
cls.tensor_list.append(t)
elif cls.filter_type == 'TRAINABLE_VARIABLES':
tensors = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
for t in tensors:
#print(t.op.type)
cls.tensor_list.append(cls.Tensor(t.name, t.get_shape(), t))
elif cls.filter_type == 'GLOBAL_VARIABLES':
tensors = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
for t in tensors:
cls.tensor_list.append(cls.Tensor(t.name, t.get_shape(), t))
elif cls.filter_type == 'ACTIVATIONS':
for t in tf.get_default_graph().get_operations():
try:
tensor = t.values()[0]
#print(tensor.op.type)
if tensor.op.type in ('Relu', 'Softplus', 'Relu6', 'Tanh'):
cls.tensor_list.append(cls.Tensor(t.name, tensor.get_shape(), tensor))
except:
continue
else:
for t in session.graph.get_operations():
if cls.filter_str != '' and cls.filter_str in t.name:
try:
tensor = t.values()[0]
shape = tensor.get_shape()
if len(shape) > 0 or True:
cls.tensor_list.append(cls.Tensor(t.name, shape, tensor))
except:
continue
if cls.filter_str == '':
try:
tensor = t.values()[0]
shape = tensor.get_shape()
if len(shape) > 0 or True:
cls.tensor_list.append(cls.Tensor(t.name, shape, tensor))
except:
continue
@classmethod
def __loading(cls, session):
cls.prelimi_wt = []
for t in cls.user_tensor_list:
cls.prelimi_wt.append(t)
for t in session.graph.get_operations():
try:
tensor = t.values()[0]
shape = tensor.get_shape()
if len(shape) > 0 or True:
cls.prelimi_wt.append(cls.Tensor(t.name, shape, tensor))
except:
continue
@classmethod
def __init(cls, session, input_list):
cls.__loading(session)
cls.filter_type = None
cls.filter_str = ""
cls.control_panel = ControlPanel(
filter_type_list=cls.filter_types,
input_list=input_list,
loaded_list=cls.prelimi_wt
)
@classmethod
def __check(cls, session):
(filter_type, filter_str) = cls.control_panel.get_filter_type()
if (cls.filter_type != filter_type) or \
(cls.filter_type=='ALL_OPS' and cls.filter_str != filter_str):
cls.filter_type = filter_type
cls.filter_str = filter_str
cls.__update_tensor_list(session)
cls.control_panel.update_tensor_list(tensor_list=[(t.name, t.shape, t.op) for t in cls.tensor_list])
@classmethod
def AddUserList(cls, **args):
#print(args.keys())
for name in args.keys():
tensor = cls.Tensor(name, args[name].shape, args[name])
#tensor = cls.Tensor(args[name].name, args[name].shape, args[name])
cls.user_tensor_list.append(tensor)
@classmethod
def Beat(cls, session, **args):
if cls.control_panel is None:
cls.__init(session, args.keys())
tensor_watch_dict = cls.control_panel.get_tensor_watch_list()
#print(tensor_watch_dict.keys(),len(tensor_watch_dict.keys()))
for input_item in tensor_watch_dict.keys():
tensor_list = tensor_watch_dict[input_item]
try:
if input_item.input_obj is not None:
feed_dict = input_item.input_obj.prepare_input()
elif input_item.name in args.keys():
feed_dict = args[input_item.name]
else:
feed_dict = None
ops = [t[2] for t in tensor_list]
r = session.run(ops, feed_dict=feed_dict)
#print(r)
for i,t in enumerate(tensor_list):
data_source = t[3]
data_source.set_data(r[i])
except Exception as e:
print('session run error:')
print(e)
cls.__check(session)
quit = not cls.control_panel.beat(True)
while cls.control_panel.is_pause():
if cls.control_panel.is_step():
break
cls.__check(session)
quit = not cls.control_panel.beat(False)
time.sleep(0.1)
if quit:
break
if quit:
return 'quit'
else:
return cls.control_panel.get_console_command()
| import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import sys
import fnmatch
from control_panel import ControlPanel
import time
if sys.version_info[0] >= 3:
from functools import reduce
class TensorMonitor(object):
filter_types = [
'USER_LIST',
'TRAINABLE_VARIABLES',
'ACTIVATIONS',
'GLOBAL_VARIABLES',
'ALL_OPS']
user_tensor_list = []
control_panel = None
class Tensor:
name = None
shape = None
op = None
filter_str = None
def __init__(cls, name, shape, op):
cls.name = name
cls.shape = shape
cls.op = op
@classmethod
def __update_tensor_list(cls, session):
cls.tensor_list = []
cls.tensor_list_1 = []
if cls.filter_type == 'USER_LIST':
for t in cls.user_tensor_list:
cls.tensor_list.append(t)
elif cls.filter_type == 'TRAINABLE_VARIABLES':
tensors = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
for t in tensors:
#print(t.op.type)
cls.tensor_list.append(cls.Tensor(t.name, t.get_shape(), t))
elif cls.filter_type == 'GLOBAL_VARIABLES':
tensors = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
for t in tensors:
cls.tensor_list.append(cls.Tensor(t.name, t.get_shape(), t))
elif cls.filter_type == 'ACTIVATIONS':
for t in tf.get_default_graph().get_operations():
try:
tensor = t.values()[0]
#print(tensor.op.type)
if tensor.op.type in ('Relu', 'Softplus', 'Relu6', 'Tanh'):
cls.tensor_list.append(cls.Tensor(t.name, tensor.get_shape(), tensor))
except:
continue
else:
for t in session.graph.get_operations():
if cls.filter_str != '' and cls.filter_str in t.name:
try:
tensor = t.values()[0]
shape = tensor.get_shape()
if len(shape) > 0 or True:
cls.tensor_list.append(cls.Tensor(t.name, shape, tensor))
except:
continue
if cls.filter_str == '':
try:
tensor = t.values()[0]
shape = tensor.get_shape()
if len(shape) > 0 or True:
cls.tensor_list.append(cls.Tensor(t.name, shape, tensor))
except:
continue
@classmethod
def __loading(cls, session):
cls.prelimi_wt = []
for t in cls.user_tensor_list:
cls.prelimi_wt.append(t)
for t in session.graph.get_operations():
try:
tensor = t.values()[0]
shape = tensor.get_shape()
if len(shape) > 0 or True:
cls.prelimi_wt.append(cls.Tensor(t.name, shape, tensor))
except:
continue
@classmethod
def __init(cls, session, input_list):
cls.__loading(session)
cls.filter_type = None
cls.filter_str = ""
cls.control_panel = ControlPanel(
filter_type_list=cls.filter_types,
input_list=input_list,
loaded_list=cls.prelimi_wt
)
@classmethod
def __check(cls, session):
(filter_type, filter_str) = cls.control_panel.get_filter_type()
if (cls.filter_type != filter_type) or \
(cls.filter_type=='ALL_OPS' and cls.filter_str != filter_str):
cls.filter_type = filter_type
cls.filter_str = filter_str
cls.__update_tensor_list(session)
cls.control_panel.update_tensor_list(tensor_list=[(t.name, t.shape, t.op) for t in cls.tensor_list])
@classmethod
def AddUserList(cls, **args):
#print(args.keys())
for name in args.keys():
tensor = cls.Tensor(name, args[name].shape, args[name])
#tensor = cls.Tensor(args[name].name, args[name].shape, args[name])
cls.user_tensor_list.append(tensor)
@classmethod
def Beat(cls, session, **args):
if cls.control_panel is None:
cls.__init(session, args.keys())
tensor_watch_dict = cls.control_panel.get_tensor_watch_list()
#print(tensor_watch_dict.keys(),len(tensor_watch_dict.keys()))
for input_item in tensor_watch_dict.keys():
tensor_list = tensor_watch_dict[input_item]
try:
if input_item.input_obj is not None:
feed_dict = input_item.input_obj.prepare_input()
elif input_item.name in args.keys():
feed_dict = args[input_item.name]
else:
feed_dict = None
ops = [t[2] for t in tensor_list]
r = session.run(ops, feed_dict=feed_dict)
#print(r)
for i,t in enumerate(tensor_list):
data_source = t[3]
data_source.set_data(r[i])
except Exception as e:
print('session run error:')
print(e)
cls.__check(session)
quit = not cls.control_panel.beat(True)
while cls.control_panel.is_pause():
if cls.control_panel.is_step():
break
cls.__check(session)
quit = not cls.control_panel.beat(False)
time.sleep(0.1)
if quit:
break
if quit:
return 'quit'
else:
return cls.control_panel.get_console_command() | en | 0.11012 | #print(t.op.type) #print(tensor.op.type) #print(args.keys()) #tensor = cls.Tensor(args[name].name, args[name].shape, args[name]) #print(tensor_watch_dict.keys(),len(tensor_watch_dict.keys())) #print(r) | 2.342595 | 2 |
exercicios/ex068.py | mrcbnu/python-_exercicios | 0 | 6619507 | <gh_stars>0
###########################################
# EXERCICIO 067 #
###########################################
'''FAÇA UM PROGRAMA QUE JOGUE PAR OU IMPAR
COM O COMPUTADOR. O JOGO SERÁ INTERROMPIDO
QUANDO O JOGADOR PERDER, MOSTRANDO O TOTAL
DE VITORIAS CONSECUTIVAS QUE ELE CONQUISTOU
NO FINAL DO JOGO'''
from random import randint
branco = '\033[1;30m'
branco_in = '\033[1;7;30m'
vermelho = '\033[1;31m'
vermelho_in = '\033[1;7;31m'
verde = '\033[1;32m'
verde_in = '\033[1;7;32m'
amarelo = '\033[1;33m'
amarelo_in = '\033[1;7;33m'
azul = '\033[1;34m'
roxo = '\033[1;35m'
azulc = '\033[1;36m'
azulc_in = '\033[1;7;36m'
cinza = '\033[1;37m'
fimdacor = '\033[m'
print(f'{branco}-=' * 25, f'{fimdacor}')
print('{}{:^50}{}'.format(azulc,'VAMOS BRINCAR DE PAR OU ÍMPAR',fimdacor))
print(f'{branco}-=' * 25, f'{fimdacor}')
vitoria = 0
while True:
cpu = randint(0,10)
chute = int(input(f'{roxo}DIGA UM NUMERO ENTRE 0 E 10: {fimdacor}'))
parouimpar = str(input(f'{vermelho}PAR OU IMPAR [P/I]? {fimdacor}')).upper()[0].strip()
while parouimpar not in 'PI':
parouimpar = str(input(f'{vermelho}PAR OU IMPAR [P/I]? {fimdacor}')).upper()[0].strip()
soma = cpu + chute
if soma % 2 == 0:
resultado = 'PAR'
else:
resultado = 'IMPAR'
print(f'O COMPUTADOR ESCOLHEU{branco} {cpu}{fimdacor} e VOCÊ ESCOLHEU {branco}{chute} {fimdacor}= {branco}{soma}{fimdacor}')
print('{}{:^50}{}'.format(azulc_in,resultado,fimdacor))
if resultado == 'PAR' and parouimpar == 'P' or resultado == 'IMPAR' and parouimpar == 'I':
print('{}{:^50}{}'.format(amarelo_in,' V O C Ê V E N C E U ! ! !',fimdacor))
print(f'{branco}-=' * 25, f'{fimdacor}')
print('{}{:^50}{}'.format(azulc, 'VAMOS JOGAR NOVAMENTE', fimdacor))
print(f'{branco}-=' * 25, f'{fimdacor}')
vitoria += 1
else:
print('{}{:^50}{}'.format(vermelho_in,' V O C Ê P E R D E U ! ! !',fimdacor))
print('-' * 50)
break
print('{}{:^50}{}'.format(verde_in,'G A M E O V E R ! ! !',fimdacor))
print('{}{:^50}{}'.format(branco_in,vitoria,fimdacor))
print('{}{:^50}{}'.format(verde_in,'VITÓRIAS',fimdacor))
| ###########################################
# EXERCICIO 067 #
###########################################
'''FAÇA UM PROGRAMA QUE JOGUE PAR OU IMPAR
COM O COMPUTADOR. O JOGO SERÁ INTERROMPIDO
QUANDO O JOGADOR PERDER, MOSTRANDO O TOTAL
DE VITORIAS CONSECUTIVAS QUE ELE CONQUISTOU
NO FINAL DO JOGO'''
from random import randint
branco = '\033[1;30m'
branco_in = '\033[1;7;30m'
vermelho = '\033[1;31m'
vermelho_in = '\033[1;7;31m'
verde = '\033[1;32m'
verde_in = '\033[1;7;32m'
amarelo = '\033[1;33m'
amarelo_in = '\033[1;7;33m'
azul = '\033[1;34m'
roxo = '\033[1;35m'
azulc = '\033[1;36m'
azulc_in = '\033[1;7;36m'
cinza = '\033[1;37m'
fimdacor = '\033[m'
print(f'{branco}-=' * 25, f'{fimdacor}')
print('{}{:^50}{}'.format(azulc,'VAMOS BRINCAR DE PAR OU ÍMPAR',fimdacor))
print(f'{branco}-=' * 25, f'{fimdacor}')
vitoria = 0
while True:
cpu = randint(0,10)
chute = int(input(f'{roxo}DIGA UM NUMERO ENTRE 0 E 10: {fimdacor}'))
parouimpar = str(input(f'{vermelho}PAR OU IMPAR [P/I]? {fimdacor}')).upper()[0].strip()
while parouimpar not in 'PI':
parouimpar = str(input(f'{vermelho}PAR OU IMPAR [P/I]? {fimdacor}')).upper()[0].strip()
soma = cpu + chute
if soma % 2 == 0:
resultado = 'PAR'
else:
resultado = 'IMPAR'
print(f'O COMPUTADOR ESCOLHEU{branco} {cpu}{fimdacor} e VOCÊ ESCOLHEU {branco}{chute} {fimdacor}= {branco}{soma}{fimdacor}')
print('{}{:^50}{}'.format(azulc_in,resultado,fimdacor))
if resultado == 'PAR' and parouimpar == 'P' or resultado == 'IMPAR' and parouimpar == 'I':
print('{}{:^50}{}'.format(amarelo_in,' V O C Ê V E N C E U ! ! !',fimdacor))
print(f'{branco}-=' * 25, f'{fimdacor}')
print('{}{:^50}{}'.format(azulc, 'VAMOS JOGAR NOVAMENTE', fimdacor))
print(f'{branco}-=' * 25, f'{fimdacor}')
vitoria += 1
else:
print('{}{:^50}{}'.format(vermelho_in,' V O C Ê P E R D E U ! ! !',fimdacor))
print('-' * 50)
break
print('{}{:^50}{}'.format(verde_in,'G A M E O V E R ! ! !',fimdacor))
print('{}{:^50}{}'.format(branco_in,vitoria,fimdacor))
print('{}{:^50}{}'.format(verde_in,'VITÓRIAS',fimdacor)) | es | 0.251348 | ########################################### # EXERCICIO 067 # ########################################### FAÇA UM PROGRAMA QUE JOGUE PAR OU IMPAR COM O COMPUTADOR. O JOGO SERÁ INTERROMPIDO QUANDO O JOGADOR PERDER, MOSTRANDO O TOTAL DE VITORIAS CONSECUTIVAS QUE ELE CONQUISTOU NO FINAL DO JOGO | 3.587471 | 4 |
order/models.py | ajpocus/pizzeria | 0 | 6619508 | import decimal
from decimal import Decimal
from django.db import models
quant = Decimal('0.01')
class Flavor(models.Model):
name = models.CharField(max_length=24)
base_price = models.DecimalField(max_digits=4,
decimal_places=2,
default=3.99)
def __unicode__(self):
return self.name
class Size(models.Model):
name = models.CharField(max_length=24)
base_price = models.DecimalField(max_digits=4,
decimal_places=2,
default=0.00)
def __unicode__(self):
return self.name
class Topping(models.Model):
name = models.CharField(max_length=24)
base_price = models.DecimalField(max_digits=4,
decimal_places=2,
default=0.99)
def __unicode__(self):
return self.name
class Pizza(models.Model):
size = models.ForeignKey(Size, null=True)
toppings = models.ManyToManyField(Topping, null=True)
crust = models.ForeignKey(Flavor, null=True)
base_price = models.DecimalField(max_digits=4,
decimal_places=2,
default=5.00)
def save(self, *args, **kwargs):
if not Pizza.objects.filter(id=self.id):
super(Pizza, self).save(*args, **kwargs)
else:
price = Decimal('0.00')
if self.size:
price = self.size.base_price
for topping in self.toppings.all():
if topping.base_price:
price = price + topping.base_price
self.base_price = decimal.Decimal(str(price)).quantize(quant)
super(Pizza, self).save(*args, **kwargs)
def __unicode__(self):
if self.size.name:
name = self.size.name + " Pizza"
else:
name = "Pizza"
for topping in self.toppings.all():
if topping.name:
name = name + ", " + topping.name
return name
class Bread(models.Model):
flavor = models.ForeignKey(Flavor)
base_price = models.DecimalField(max_digits=4,
decimal_places=2,
default=4.00)
def save(self, *args, **kwargs):
self.base_price = Decimal(self.flavor.base_price).quantize(quant)
super(Bread, self).save(*args, **kwargs)
def __unicode__(self):
return self.type
class Customer(models.Model):
name = models.CharField(max_length=64)
number = models.CharField(max_length=20)
def __unicode__(self):
return self.name
class Order(models.Model):
customer = models.ForeignKey(Customer)
date = models.DateField()
pizzas = models.ManyToManyField(Pizza, blank=True)
breads = models.ManyToManyField(Bread, blank=True)
is_made = models.BooleanField(default=False)
subtotal = models.DecimalField(max_digits=6,
decimal_places=2,
default=0.00)
tax = models.DecimalField(max_digits=6,
decimal_places=2,
default=0.00)
total = models.DecimalField(max_digits=6,
decimal_places=2,
default=0.00)
def save(self, *args, **kwargs):
if not Order.objects.filter(id=self.id):
super(Order, self).save(*args, **kwargs)
else:
decimal.getcontext().rounding = decimal.ROUND_HALF_EVEN
self.subtotal = Decimal('0.00')
for pizza in self.pizzas.all():
self.subtotal += pizza.base_price
for topping in pizza.toppings.all():
self.subtotal += topping.base_price
for bread in self.breads.all():
self.subtotal += bread.base_price
self.tax = Decimal('0.06') * self.subtotal
self.total = self.subtotal + self.tax
self.subtotal = self.subtotal.quantize(quant)
self.tax = self.tax.quantize(quant)
self.total = self.total.quantize(quant)
super(Order, self).save(*args, **kwargs)
def __unicode__(self):
return str(self.id)
| import decimal
from decimal import Decimal
from django.db import models
quant = Decimal('0.01')
class Flavor(models.Model):
name = models.CharField(max_length=24)
base_price = models.DecimalField(max_digits=4,
decimal_places=2,
default=3.99)
def __unicode__(self):
return self.name
class Size(models.Model):
name = models.CharField(max_length=24)
base_price = models.DecimalField(max_digits=4,
decimal_places=2,
default=0.00)
def __unicode__(self):
return self.name
class Topping(models.Model):
name = models.CharField(max_length=24)
base_price = models.DecimalField(max_digits=4,
decimal_places=2,
default=0.99)
def __unicode__(self):
return self.name
class Pizza(models.Model):
size = models.ForeignKey(Size, null=True)
toppings = models.ManyToManyField(Topping, null=True)
crust = models.ForeignKey(Flavor, null=True)
base_price = models.DecimalField(max_digits=4,
decimal_places=2,
default=5.00)
def save(self, *args, **kwargs):
if not Pizza.objects.filter(id=self.id):
super(Pizza, self).save(*args, **kwargs)
else:
price = Decimal('0.00')
if self.size:
price = self.size.base_price
for topping in self.toppings.all():
if topping.base_price:
price = price + topping.base_price
self.base_price = decimal.Decimal(str(price)).quantize(quant)
super(Pizza, self).save(*args, **kwargs)
def __unicode__(self):
if self.size.name:
name = self.size.name + " Pizza"
else:
name = "Pizza"
for topping in self.toppings.all():
if topping.name:
name = name + ", " + topping.name
return name
class Bread(models.Model):
flavor = models.ForeignKey(Flavor)
base_price = models.DecimalField(max_digits=4,
decimal_places=2,
default=4.00)
def save(self, *args, **kwargs):
self.base_price = Decimal(self.flavor.base_price).quantize(quant)
super(Bread, self).save(*args, **kwargs)
def __unicode__(self):
return self.type
class Customer(models.Model):
name = models.CharField(max_length=64)
number = models.CharField(max_length=20)
def __unicode__(self):
return self.name
class Order(models.Model):
customer = models.ForeignKey(Customer)
date = models.DateField()
pizzas = models.ManyToManyField(Pizza, blank=True)
breads = models.ManyToManyField(Bread, blank=True)
is_made = models.BooleanField(default=False)
subtotal = models.DecimalField(max_digits=6,
decimal_places=2,
default=0.00)
tax = models.DecimalField(max_digits=6,
decimal_places=2,
default=0.00)
total = models.DecimalField(max_digits=6,
decimal_places=2,
default=0.00)
def save(self, *args, **kwargs):
if not Order.objects.filter(id=self.id):
super(Order, self).save(*args, **kwargs)
else:
decimal.getcontext().rounding = decimal.ROUND_HALF_EVEN
self.subtotal = Decimal('0.00')
for pizza in self.pizzas.all():
self.subtotal += pizza.base_price
for topping in pizza.toppings.all():
self.subtotal += topping.base_price
for bread in self.breads.all():
self.subtotal += bread.base_price
self.tax = Decimal('0.06') * self.subtotal
self.total = self.subtotal + self.tax
self.subtotal = self.subtotal.quantize(quant)
self.tax = self.tax.quantize(quant)
self.total = self.total.quantize(quant)
super(Order, self).save(*args, **kwargs)
def __unicode__(self):
return str(self.id)
| none | 1 | 2.532495 | 3 | |
src/CINcalc.py | zhangyafeng1/CINcalc | 0 | 6619509 | <reponame>zhangyafeng1/CINcalc
#!/usr/bin/env python
import sys
from rblib import cnvproc
def parsecnvfile(cnvfile,hchr,ploidy):
"""
#Sample(tissue/cell) Patient chrom start end probes log2ratio
TCGA-OR-A5LA-01A TCGA-OR-A5LA chr1 3301765 5128894 1270 -0.2177
TCGA-OR-A5LA-01A TCGA-OR-A5LA chr1 5132643 47878718 22491 -0.179
TCGA-OR-A5LA-01A TCGA-OR-A5LA chr1 161959375 247650984 54615 -0.2341
"""
h = {}
f = open(cnvfile,"r")
for line in f:
if line.startswith("#"):continue
sn,pn,chrom,start,end,nums,logratio = line.rstrip("\n").split("\t")
assert chrom in hchr
if sn not in h: h[sn] = []
start = int(start); end = int(end)
ratio = 2**float(logratio) * ploidy
fploidy = 2**float(logratio) * ploidy
h[sn].append([chrom,start,end,ratio,ratio,fploidy])
f.close()
return h
def parsechrfile(chrfile):
h = {}
f = open(chrfile,"r")
totlen = 0.0
for line in f:
arr = line.rstrip("\n").split("\t")
tmplen = float(arr[1])
h[arr[0]] = tmplen
totlen += tmplen
f.close()
return h,totlen
def runscript(cnvfile,chrfile,ploidy,upcut,lowcut):
hchr,totlen = parsechrfile(chrfile)
hsn = parsecnvfile(cnvfile,hchr,ploidy)
sys.stdout.write("#SN\twploidy\twgii\n")
for sn in hsn:
segments = hsn[sn]
cin_ins = cnvproc.CNVproc(segments,totseglen=totlen)
wploidy = cin_ins.wploidy()
#print 2**upcut * ploidy,2**lowcut * ploidy
wgii = cin_ins.wgii(tgain=2**upcut * ploidy,tloss=2**lowcut * ploidy,chrlen=hchr,numchrs=len(hchr))
sys.stdout.write("%s\t%.5f\t%.5f\n"%(sn,wploidy,wgii))
return 0
from optparse import OptionParser,OptionGroup
import time
import os
def checkfile(fns):
for fn in fns:
if not os.path.isfile(fn): return 2
return 0
def __main():
usage = "usage: %prog CNVfile"
description = "Contact: <NAME> <<EMAIL>>"
parser = OptionParser(usage,version="%prog 1.0.1",description = description)
Required_group = OptionGroup(parser,'Required Options')
Required_group.add_option('-r',dest='chromfile',help="chromosome size file",metavar='FILE',type='string',default=None)
Required_group.add_option('-p',dest='ploidy',help="Species ploidy [2]",metavar='INT',type='int',default=2.0)
Other_group = OptionGroup(parser,'Threshold Options')
Other_group.add_option('-l',dest='low',help="Low cutoff for loss fragments",metavar='FLOAT',type='float',default = -0.4)
Other_group.add_option('-u',dest='up',help="Up cutoff for gain fragments",metavar='FLOAT',type='float',default = 0.4)
parser.add_option_group(Required_group)
parser.add_option_group(Other_group)
(options, args) = parser.parse_args()
if len(args) < 1:
parser.print_help()
return -1
chrfile = str(options.chromfile)
ploidy = float(options.ploidy)
lowcut = float(options.low)
upcut = float(options.up)
cnvfile = args[0]
for fn in [cnvfile,chrfile]:
if not os.path.isfile(fn):
sys.stderr.write("file '%s' not found!"%fn)
return 2
ret = runscript(cnvfile,chrfile,ploidy,upcut,lowcut)
return ret
if __name__ == "__main__":
start_time = time.time()
ret = __main()
cost_time = time.time()-start_time
if ret: sys.stderr.write("[ERROR] Task interrupt, Code: %d\n"%ret)
else: sys.stderr.write("[INFO] Time consumed: %.2fs, Code: %d\n"%(cost_time,ret))
exit(ret)
| #!/usr/bin/env python
import sys
from rblib import cnvproc
def parsecnvfile(cnvfile,hchr,ploidy):
"""
#Sample(tissue/cell) Patient chrom start end probes log2ratio
TCGA-OR-A5LA-01A TCGA-OR-A5LA chr1 3301765 5128894 1270 -0.2177
TCGA-OR-A5LA-01A TCGA-OR-A5LA chr1 5132643 47878718 22491 -0.179
TCGA-OR-A5LA-01A TCGA-OR-A5LA chr1 161959375 247650984 54615 -0.2341
"""
h = {}
f = open(cnvfile,"r")
for line in f:
if line.startswith("#"):continue
sn,pn,chrom,start,end,nums,logratio = line.rstrip("\n").split("\t")
assert chrom in hchr
if sn not in h: h[sn] = []
start = int(start); end = int(end)
ratio = 2**float(logratio) * ploidy
fploidy = 2**float(logratio) * ploidy
h[sn].append([chrom,start,end,ratio,ratio,fploidy])
f.close()
return h
def parsechrfile(chrfile):
h = {}
f = open(chrfile,"r")
totlen = 0.0
for line in f:
arr = line.rstrip("\n").split("\t")
tmplen = float(arr[1])
h[arr[0]] = tmplen
totlen += tmplen
f.close()
return h,totlen
def runscript(cnvfile,chrfile,ploidy,upcut,lowcut):
hchr,totlen = parsechrfile(chrfile)
hsn = parsecnvfile(cnvfile,hchr,ploidy)
sys.stdout.write("#SN\twploidy\twgii\n")
for sn in hsn:
segments = hsn[sn]
cin_ins = cnvproc.CNVproc(segments,totseglen=totlen)
wploidy = cin_ins.wploidy()
#print 2**upcut * ploidy,2**lowcut * ploidy
wgii = cin_ins.wgii(tgain=2**upcut * ploidy,tloss=2**lowcut * ploidy,chrlen=hchr,numchrs=len(hchr))
sys.stdout.write("%s\t%.5f\t%.5f\n"%(sn,wploidy,wgii))
return 0
from optparse import OptionParser,OptionGroup
import time
import os
def checkfile(fns):
for fn in fns:
if not os.path.isfile(fn): return 2
return 0
def __main():
usage = "usage: %prog CNVfile"
description = "Contact: <NAME> <<EMAIL>>"
parser = OptionParser(usage,version="%prog 1.0.1",description = description)
Required_group = OptionGroup(parser,'Required Options')
Required_group.add_option('-r',dest='chromfile',help="chromosome size file",metavar='FILE',type='string',default=None)
Required_group.add_option('-p',dest='ploidy',help="Species ploidy [2]",metavar='INT',type='int',default=2.0)
Other_group = OptionGroup(parser,'Threshold Options')
Other_group.add_option('-l',dest='low',help="Low cutoff for loss fragments",metavar='FLOAT',type='float',default = -0.4)
Other_group.add_option('-u',dest='up',help="Up cutoff for gain fragments",metavar='FLOAT',type='float',default = 0.4)
parser.add_option_group(Required_group)
parser.add_option_group(Other_group)
(options, args) = parser.parse_args()
if len(args) < 1:
parser.print_help()
return -1
chrfile = str(options.chromfile)
ploidy = float(options.ploidy)
lowcut = float(options.low)
upcut = float(options.up)
cnvfile = args[0]
for fn in [cnvfile,chrfile]:
if not os.path.isfile(fn):
sys.stderr.write("file '%s' not found!"%fn)
return 2
ret = runscript(cnvfile,chrfile,ploidy,upcut,lowcut)
return ret
if __name__ == "__main__":
start_time = time.time()
ret = __main()
cost_time = time.time()-start_time
if ret: sys.stderr.write("[ERROR] Task interrupt, Code: %d\n"%ret)
else: sys.stderr.write("[INFO] Time consumed: %.2fs, Code: %d\n"%(cost_time,ret))
exit(ret) | en | 0.319963 | #!/usr/bin/env python #Sample(tissue/cell) Patient chrom start end probes log2ratio TCGA-OR-A5LA-01A TCGA-OR-A5LA chr1 3301765 5128894 1270 -0.2177 TCGA-OR-A5LA-01A TCGA-OR-A5LA chr1 5132643 47878718 22491 -0.179 TCGA-OR-A5LA-01A TCGA-OR-A5LA chr1 161959375 247650984 54615 -0.2341 #print 2**upcut * ploidy,2**lowcut * ploidy | 2.33015 | 2 |
testing/unit/test_domain.py | Dragoncall/GPflowOpt | 258 | 6619510 | import gpflowopt
import numpy as np
from ..utility import GPflowOptTestCase
class TestContinuousParameter(GPflowOptTestCase):
def test_simple(self):
p = gpflowopt.domain.ContinuousParameter("x1", 0, 1)
self.assertTrue(np.allclose(p._range, [0,1]), msg="Internal storage of object incorrect")
self.assertEqual(p.lower, 0, msg="Lower should equal 0")
self.assertEqual(p.upper, 1, msg="Upper should equal 1")
self.assertEqual(p.size, 1, msg="Size of parameter should equal 1")
p.upper = 2
self.assertEqual(p.upper, 2, msg="After assignment, upper should equal 2")
p.lower = 1
self.assertEqual(p.lower, 1, msg="After assignment, lower should equal 2")
p = np.sum([gpflowopt.domain.ContinuousParameter("x1", 0, 1)])
self.assertTrue(p.size == 1, msg="Construction of domain by list using sum failed")
def test_equality(self):
p = gpflowopt.domain.ContinuousParameter("x1", 0, 1)
pne = gpflowopt.domain.ContinuousParameter("x1", 0, 2)
self.assertNotEqual(p, pne, msg="Should not be equal (invalid upper)")
pne = gpflowopt.domain.ContinuousParameter("x1", -1, 1)
self.assertNotEqual(p, pne, msg="Should not be equal (invalid lower)")
pne = gpflowopt.domain.ContinuousParameter("x1", -1, 2)
self.assertNotEqual(p, pne, msg="Should not be equal (invalid lower/upper)")
p.lower = -1
p.upper = 2
self.assertEqual(p, pne, msg="Should be equal after adjusting bounds")
def test_indexing(self):
p = np.sum([gpflowopt.domain.ContinuousParameter("x1", 0, 1),
gpflowopt.domain.ContinuousParameter("x2", 0, 1),
gpflowopt.domain.ContinuousParameter("x3", 0, 1),
gpflowopt.domain.ContinuousParameter("x4", 0, 1)])
subdomain = p[['x4', 'x1', 2]]
self.assertTrue(subdomain.size == 3, msg="Subdomain should have size 3")
self.assertTrue(subdomain[0].label == 'x4', msg="Subdomain's first parameter should be 'x4'")
self.assertTrue(subdomain[1].label == 'x1', msg="Subdomain's second parameter should be 'x1'")
self.assertTrue(subdomain[2].label == 'x3', msg="Subdomain's third parameter should be 'x3'")
def test_containment(self):
p = gpflowopt.domain.ContinuousParameter("x1", 0, 1)
self.assertIn(0, p, msg="Point is within domain")
self.assertIn(0.5, p, msg="Point is within domain")
self.assertIn(1, p, msg="Point is within domain")
self.assertNotIn(1.1, p, msg="Point is not within domain")
self.assertNotIn(-0.5, p, msg="Point is not within domain")
def test_value(self):
p = gpflowopt.domain.ContinuousParameter("x1", 0, 1)
self.assertTupleEqual(p.value.shape, (1,), msg="Default value has incorrect shape.")
self.assertTrue(np.allclose(p.value, 0.5), msg="Parameter has incorrect default value")
p.value = 0.8
self.assertTrue(np.allclose(p.value, 0.8), msg="Parameter has incorrect value after update")
p.value = [0.6, 0.8]
self.assertTupleEqual(p.value.shape, (2,), msg="Default value has incorrect shape.")
np.testing.assert_allclose(p.value, np.array([0.6, 0.8]), err_msg="Parameter has incorrect value after update")
p = gpflowopt.domain.ContinuousParameter("x1", 0, 1, 0.2)
self.assertTupleEqual(p.value.shape, (1,), msg="Default value has incorrect shape.")
self.assertTrue(np.allclose(p.value, 0.2), msg="Parameter has incorrect initialized value")
class TestHypercubeDomain(GPflowOptTestCase):
def setUp(self):
self.domain = np.sum([gpflowopt.domain.ContinuousParameter("x{0}".format(i), -1, 1) for i in range(1, 4)])
def test_object_integrity(self):
self.assertEqual(len(self.domain._parameters), 3)
def test_simple(self):
self.assertEqual(self.domain.size, 3, msg="Size of domain should equal 3")
self.assertTrue(np.allclose(self.domain.lower, -1.0), msg="Lower of domain should equal -1 for all parameters")
self.assertTrue(np.allclose(self.domain.upper, 1.0), msg="Lower of domain should equal 1 for all parameters")
def test_equality(self):
dne = np.sum([gpflowopt.domain.ContinuousParameter("x{0}".format(i), -1, 1) for i in range(1, 3)] +
[gpflowopt.domain.ContinuousParameter("x3", -3, 1)])
self.assertNotEqual(self.domain, dne, msg="One lower bound mismatch, should not be equal.")
dne = np.sum([gpflowopt.domain.ContinuousParameter("x{0}".format(i), -1, 1) for i in range(1, 3)] +
[gpflowopt.domain.ContinuousParameter("x3", -1, 2)])
self.assertNotEqual(self.domain, dne, msg="One upper bound mismatch, should not be equal.")
dne = np.sum([gpflowopt.domain.ContinuousParameter("x{0}".format(i), -1, 1) for i in range(1, 3)])
self.assertNotEqual(self.domain, dne, msg="Size mismatch")
de = np.sum([gpflowopt.domain.ContinuousParameter("x{0}".format(i), -1, 1) for i in range(1, 4)])
self.assertEqual(self.domain, de, msg="No mismatches, should be equal")
def test_parenting(self):
for p in self.domain:
self.assertEqual(id(p._parent), id(self.domain), "Misspecified parent link detected")
def test_access(self):
for i in range(self.domain.size):
self.assertEqual(self.domain[i].label, "x{0}".format(i+1), "Accessing parameters, encountering "
"incorrect labels")
self.domain[2].lower = -2
de = np.sum([gpflowopt.domain.ContinuousParameter("x{0}".format(i), -1, 1) for i in range(1, 3)] +
[gpflowopt.domain.ContinuousParameter("x3", -2, 1)])
self.assertEqual(self.domain, de, msg="No mismatches, should be equal")
def test_containment(self):
A = np.random.rand(50,3)*2-1
self.assertTrue(A in self.domain, msg="Generated random points within domain")
A = np.vstack((A, np.array([-2, -2, -2])))
self.assertFalse(A in self.domain, msg="One of the points was not in the domain")
A = np.random.rand(50,4)*2-1
self.assertFalse(A in self.domain, msg="Generated random points have different dimensionality")
def test_value(self):
self.assertTupleEqual(self.domain.value.shape, (1, 3), msg="Default value has incorrect shape.")
np.testing.assert_allclose(self.domain.value, np.array([[0, 0, 0]]), err_msg="Parameter has incorrect initial value")
A = np.random.rand(10, 3) * 2 - 1
self.domain.value = A
self.assertTupleEqual(self.domain.value.shape, (10, 3), msg="Assigned value has incorrect shape.")
np.testing.assert_allclose(self.domain.value, A, err_msg="Parameter has incorrect value after assignment")
def test_transformation(self):
X = np.random.rand(50,3)*2-1
target = gpflowopt.domain.UnitCube(3)
transform = self.domain >> target
self.assertTrue(np.allclose(transform.forward(X), (X + 1) / 2), msg="Transformation to [0,1] incorrect")
self.assertTrue(np.allclose(transform.backward(transform.forward(X)), X),
msg="Transforming back and forth yields different result")
inv_transform = target >> self.domain
self.assertTrue(np.allclose(transform.backward(transform.forward(X)),
inv_transform.forward(transform.forward(X))),
msg="Inverse transform yields different results")
self.assertTrue(np.allclose((~transform).A.value, inv_transform.A.value))
self.assertTrue(np.allclose((~transform).b.value, inv_transform.b.value))
def test_unitcube(self):
domain = gpflowopt.domain.UnitCube(3)
self.assertTrue(np.allclose(domain.lower, 0))
self.assertTrue(np.allclose(domain.upper, 1))
self.assertEqual(domain.size, 3)
| import gpflowopt
import numpy as np
from ..utility import GPflowOptTestCase
class TestContinuousParameter(GPflowOptTestCase):
def test_simple(self):
p = gpflowopt.domain.ContinuousParameter("x1", 0, 1)
self.assertTrue(np.allclose(p._range, [0,1]), msg="Internal storage of object incorrect")
self.assertEqual(p.lower, 0, msg="Lower should equal 0")
self.assertEqual(p.upper, 1, msg="Upper should equal 1")
self.assertEqual(p.size, 1, msg="Size of parameter should equal 1")
p.upper = 2
self.assertEqual(p.upper, 2, msg="After assignment, upper should equal 2")
p.lower = 1
self.assertEqual(p.lower, 1, msg="After assignment, lower should equal 2")
p = np.sum([gpflowopt.domain.ContinuousParameter("x1", 0, 1)])
self.assertTrue(p.size == 1, msg="Construction of domain by list using sum failed")
def test_equality(self):
p = gpflowopt.domain.ContinuousParameter("x1", 0, 1)
pne = gpflowopt.domain.ContinuousParameter("x1", 0, 2)
self.assertNotEqual(p, pne, msg="Should not be equal (invalid upper)")
pne = gpflowopt.domain.ContinuousParameter("x1", -1, 1)
self.assertNotEqual(p, pne, msg="Should not be equal (invalid lower)")
pne = gpflowopt.domain.ContinuousParameter("x1", -1, 2)
self.assertNotEqual(p, pne, msg="Should not be equal (invalid lower/upper)")
p.lower = -1
p.upper = 2
self.assertEqual(p, pne, msg="Should be equal after adjusting bounds")
def test_indexing(self):
p = np.sum([gpflowopt.domain.ContinuousParameter("x1", 0, 1),
gpflowopt.domain.ContinuousParameter("x2", 0, 1),
gpflowopt.domain.ContinuousParameter("x3", 0, 1),
gpflowopt.domain.ContinuousParameter("x4", 0, 1)])
subdomain = p[['x4', 'x1', 2]]
self.assertTrue(subdomain.size == 3, msg="Subdomain should have size 3")
self.assertTrue(subdomain[0].label == 'x4', msg="Subdomain's first parameter should be 'x4'")
self.assertTrue(subdomain[1].label == 'x1', msg="Subdomain's second parameter should be 'x1'")
self.assertTrue(subdomain[2].label == 'x3', msg="Subdomain's third parameter should be 'x3'")
def test_containment(self):
p = gpflowopt.domain.ContinuousParameter("x1", 0, 1)
self.assertIn(0, p, msg="Point is within domain")
self.assertIn(0.5, p, msg="Point is within domain")
self.assertIn(1, p, msg="Point is within domain")
self.assertNotIn(1.1, p, msg="Point is not within domain")
self.assertNotIn(-0.5, p, msg="Point is not within domain")
def test_value(self):
p = gpflowopt.domain.ContinuousParameter("x1", 0, 1)
self.assertTupleEqual(p.value.shape, (1,), msg="Default value has incorrect shape.")
self.assertTrue(np.allclose(p.value, 0.5), msg="Parameter has incorrect default value")
p.value = 0.8
self.assertTrue(np.allclose(p.value, 0.8), msg="Parameter has incorrect value after update")
p.value = [0.6, 0.8]
self.assertTupleEqual(p.value.shape, (2,), msg="Default value has incorrect shape.")
np.testing.assert_allclose(p.value, np.array([0.6, 0.8]), err_msg="Parameter has incorrect value after update")
p = gpflowopt.domain.ContinuousParameter("x1", 0, 1, 0.2)
self.assertTupleEqual(p.value.shape, (1,), msg="Default value has incorrect shape.")
self.assertTrue(np.allclose(p.value, 0.2), msg="Parameter has incorrect initialized value")
class TestHypercubeDomain(GPflowOptTestCase):
def setUp(self):
self.domain = np.sum([gpflowopt.domain.ContinuousParameter("x{0}".format(i), -1, 1) for i in range(1, 4)])
def test_object_integrity(self):
self.assertEqual(len(self.domain._parameters), 3)
def test_simple(self):
self.assertEqual(self.domain.size, 3, msg="Size of domain should equal 3")
self.assertTrue(np.allclose(self.domain.lower, -1.0), msg="Lower of domain should equal -1 for all parameters")
self.assertTrue(np.allclose(self.domain.upper, 1.0), msg="Lower of domain should equal 1 for all parameters")
def test_equality(self):
dne = np.sum([gpflowopt.domain.ContinuousParameter("x{0}".format(i), -1, 1) for i in range(1, 3)] +
[gpflowopt.domain.ContinuousParameter("x3", -3, 1)])
self.assertNotEqual(self.domain, dne, msg="One lower bound mismatch, should not be equal.")
dne = np.sum([gpflowopt.domain.ContinuousParameter("x{0}".format(i), -1, 1) for i in range(1, 3)] +
[gpflowopt.domain.ContinuousParameter("x3", -1, 2)])
self.assertNotEqual(self.domain, dne, msg="One upper bound mismatch, should not be equal.")
dne = np.sum([gpflowopt.domain.ContinuousParameter("x{0}".format(i), -1, 1) for i in range(1, 3)])
self.assertNotEqual(self.domain, dne, msg="Size mismatch")
de = np.sum([gpflowopt.domain.ContinuousParameter("x{0}".format(i), -1, 1) for i in range(1, 4)])
self.assertEqual(self.domain, de, msg="No mismatches, should be equal")
def test_parenting(self):
for p in self.domain:
self.assertEqual(id(p._parent), id(self.domain), "Misspecified parent link detected")
def test_access(self):
for i in range(self.domain.size):
self.assertEqual(self.domain[i].label, "x{0}".format(i+1), "Accessing parameters, encountering "
"incorrect labels")
self.domain[2].lower = -2
de = np.sum([gpflowopt.domain.ContinuousParameter("x{0}".format(i), -1, 1) for i in range(1, 3)] +
[gpflowopt.domain.ContinuousParameter("x3", -2, 1)])
self.assertEqual(self.domain, de, msg="No mismatches, should be equal")
def test_containment(self):
A = np.random.rand(50,3)*2-1
self.assertTrue(A in self.domain, msg="Generated random points within domain")
A = np.vstack((A, np.array([-2, -2, -2])))
self.assertFalse(A in self.domain, msg="One of the points was not in the domain")
A = np.random.rand(50,4)*2-1
self.assertFalse(A in self.domain, msg="Generated random points have different dimensionality")
def test_value(self):
self.assertTupleEqual(self.domain.value.shape, (1, 3), msg="Default value has incorrect shape.")
np.testing.assert_allclose(self.domain.value, np.array([[0, 0, 0]]), err_msg="Parameter has incorrect initial value")
A = np.random.rand(10, 3) * 2 - 1
self.domain.value = A
self.assertTupleEqual(self.domain.value.shape, (10, 3), msg="Assigned value has incorrect shape.")
np.testing.assert_allclose(self.domain.value, A, err_msg="Parameter has incorrect value after assignment")
def test_transformation(self):
X = np.random.rand(50,3)*2-1
target = gpflowopt.domain.UnitCube(3)
transform = self.domain >> target
self.assertTrue(np.allclose(transform.forward(X), (X + 1) / 2), msg="Transformation to [0,1] incorrect")
self.assertTrue(np.allclose(transform.backward(transform.forward(X)), X),
msg="Transforming back and forth yields different result")
inv_transform = target >> self.domain
self.assertTrue(np.allclose(transform.backward(transform.forward(X)),
inv_transform.forward(transform.forward(X))),
msg="Inverse transform yields different results")
self.assertTrue(np.allclose((~transform).A.value, inv_transform.A.value))
self.assertTrue(np.allclose((~transform).b.value, inv_transform.b.value))
def test_unitcube(self):
domain = gpflowopt.domain.UnitCube(3)
self.assertTrue(np.allclose(domain.lower, 0))
self.assertTrue(np.allclose(domain.upper, 1))
self.assertEqual(domain.size, 3)
| none | 1 | 2.872879 | 3 | |
bare_python/s08_itertools.py | AndreiHondrari/python_exploration | 3 | 6619511 | #!python3
# type: ignore
from itertools import (
count, dropwhile, groupby, filterfalse, islice, starmap, tee, takewhile,
cycle, zip_longest, product, permutations, combinations,
combinations_with_replacement
)
print("### count")
c = count(step=5)
print(next(c))
print(next(c))
print(next(c))
print(next(c))
print(next(c))
print("### cycle")
cy = cycle([2, 5])
print(next(cy))
print(next(cy))
print("repeat from cycle saved from this point")
print(next(cy)) # -> repeat from cycle saved from this point
print(next(cy))
print("### dropwhile")
myitbl = [1, 2, 11, 3, 4]
def fp(x: int) -> bool:
return x < 10
dw = dropwhile(fp, myitbl)
for x in dw:
print(x) # -> drops 1 and 2 (all elements before 11)
print("### group by")
for y in groupby([3, 3, 3, 2, 5, 5, 6, 6, 6, 8]):
print(y)
print(y[0])
# x[1] -> this is a itertools._grouper object
# (it's an iterator having the complete set)
for i in y[1]:
print("-> " + str(i))
print("### filterfalse")
fi = filterfalse(lambda x: x > 3, [1, 3, 4, 10, 1, 2, 8, 1, 4])
print(list(fi))
print("### islice")
si = islice('ABCDEFGHIJKLMNOPQRSTUVXYZ', 2, 10, 3)
print(list(si))
print("### map")
mapped = map(
lambda x, y, z: x+y+z, (1, 2, 3, 4), (10, 20, 30), (100, 200, 300, 400)
)
print(list(mapped))
print("### starmap")
def totalsum(*args):
return sum(args)
smp = starmap(totalsum, [[1, 2, 3], [10, 10, 10, 1000, 9]])
print(next(smp))
print(next(smp))
print("### takewhile")
tw = takewhile(lambda x: x < 4, [1, 2, 3, 2, 1, 3, 10, 3, 2, 1, 2, 3, 2, 1])
print(list(tw))
print("### tee")
# splits iterator into n
te = tee([3, 4], 3)
for z in te:
print(list(z))
print("### zip")
zi = zip([1, 2, 3], [10, 20, 30, 40], [333, 444, 555])
print(list(zi))
print("### zip_longest")
zi = zip_longest(
[1, 2, 3], [10, 20, 30, 40, 50, 60, 70], [333, 444, 555],
fillvalue=9999999
)
print(list(zi))
print("### product")
print(list(product([1, 2], [3, 4], [5])))
print("### permutations")
print(list(permutations('ABC', 3)))
print("### combinations")
print(list(combinations('ABC', 2)))
print(list(combinations('ABC', 3)))
print("### combinations_with_replacement")
print(list(combinations_with_replacement('ABC', 3)))
| #!python3
# type: ignore
from itertools import (
count, dropwhile, groupby, filterfalse, islice, starmap, tee, takewhile,
cycle, zip_longest, product, permutations, combinations,
combinations_with_replacement
)
print("### count")
c = count(step=5)
print(next(c))
print(next(c))
print(next(c))
print(next(c))
print(next(c))
print("### cycle")
cy = cycle([2, 5])
print(next(cy))
print(next(cy))
print("repeat from cycle saved from this point")
print(next(cy)) # -> repeat from cycle saved from this point
print(next(cy))
print("### dropwhile")
myitbl = [1, 2, 11, 3, 4]
def fp(x: int) -> bool:
return x < 10
dw = dropwhile(fp, myitbl)
for x in dw:
print(x) # -> drops 1 and 2 (all elements before 11)
print("### group by")
for y in groupby([3, 3, 3, 2, 5, 5, 6, 6, 6, 8]):
print(y)
print(y[0])
# x[1] -> this is a itertools._grouper object
# (it's an iterator having the complete set)
for i in y[1]:
print("-> " + str(i))
print("### filterfalse")
fi = filterfalse(lambda x: x > 3, [1, 3, 4, 10, 1, 2, 8, 1, 4])
print(list(fi))
print("### islice")
si = islice('ABCDEFGHIJKLMNOPQRSTUVXYZ', 2, 10, 3)
print(list(si))
print("### map")
mapped = map(
lambda x, y, z: x+y+z, (1, 2, 3, 4), (10, 20, 30), (100, 200, 300, 400)
)
print(list(mapped))
print("### starmap")
def totalsum(*args):
return sum(args)
smp = starmap(totalsum, [[1, 2, 3], [10, 10, 10, 1000, 9]])
print(next(smp))
print(next(smp))
print("### takewhile")
tw = takewhile(lambda x: x < 4, [1, 2, 3, 2, 1, 3, 10, 3, 2, 1, 2, 3, 2, 1])
print(list(tw))
print("### tee")
# splits iterator into n
te = tee([3, 4], 3)
for z in te:
print(list(z))
print("### zip")
zi = zip([1, 2, 3], [10, 20, 30, 40], [333, 444, 555])
print(list(zi))
print("### zip_longest")
zi = zip_longest(
[1, 2, 3], [10, 20, 30, 40, 50, 60, 70], [333, 444, 555],
fillvalue=9999999
)
print(list(zi))
print("### product")
print(list(product([1, 2], [3, 4], [5])))
print("### permutations")
print(list(permutations('ABC', 3)))
print("### combinations")
print(list(combinations('ABC', 2)))
print(list(combinations('ABC', 3)))
print("### combinations_with_replacement")
print(list(combinations_with_replacement('ABC', 3)))
| en | 0.311121 | #!python3 # type: ignore ## count") ## cycle") # -> repeat from cycle saved from this point ## dropwhile") # -> drops 1 and 2 (all elements before 11) ## group by") # x[1] -> this is a itertools._grouper object # (it's an iterator having the complete set) ## filterfalse") ## islice") ## map") ## starmap") ## takewhile") ## tee") # splits iterator into n ## zip") ## zip_longest") ## product") ## permutations") ## combinations") ## combinations_with_replacement") | 3.384251 | 3 |
newsfeedsystem/tags/views.py | bakowroc/newsfeed-system | 0 | 6619512 | from rest_framework.generics import(
CreateAPIView,
ListAPIView,
RetrieveAPIView,
DestroyAPIView,
UpdateAPIView,
)
from rest_framework.permissions import (
AllowAny,
IsAuthenticated,
IsAdminUser,
IsAuthenticatedOrReadOnly
)
from tags.models import Tag
from tags.api.serializers import (
TagSerializer,
TagDetailSerializer,
TagCreateSerializer,
)
class TagCreate(CreateAPIView):
queryset = Tag.objects.all()
serializer_class = TagCreateSerializer
permission_classes = [IsAdminUser]
class TagDetail(RetrieveAPIView):
queryset = Tag.objects.all()
serializer_class = TagDetailSerializer
permission_classes = [AllowAny]
class TagDestroy(DestroyAPIView):
queryset = Tag.objects.all()
serializer_class = TagDetailSerializer
class TagList(ListAPIView):
queryset = Tag.objects.all()
serializer_class = TagSerializer
permission_classes = [AllowAny]
class TagUpdate(UpdateAPIView):
queryset = Tag.objects.all()
serializer_class = TagDetailSerializer
permission_classes = [IsAdminUser]
| from rest_framework.generics import(
CreateAPIView,
ListAPIView,
RetrieveAPIView,
DestroyAPIView,
UpdateAPIView,
)
from rest_framework.permissions import (
AllowAny,
IsAuthenticated,
IsAdminUser,
IsAuthenticatedOrReadOnly
)
from tags.models import Tag
from tags.api.serializers import (
TagSerializer,
TagDetailSerializer,
TagCreateSerializer,
)
class TagCreate(CreateAPIView):
queryset = Tag.objects.all()
serializer_class = TagCreateSerializer
permission_classes = [IsAdminUser]
class TagDetail(RetrieveAPIView):
queryset = Tag.objects.all()
serializer_class = TagDetailSerializer
permission_classes = [AllowAny]
class TagDestroy(DestroyAPIView):
queryset = Tag.objects.all()
serializer_class = TagDetailSerializer
class TagList(ListAPIView):
queryset = Tag.objects.all()
serializer_class = TagSerializer
permission_classes = [AllowAny]
class TagUpdate(UpdateAPIView):
queryset = Tag.objects.all()
serializer_class = TagDetailSerializer
permission_classes = [IsAdminUser]
| none | 1 | 1.968215 | 2 | |
bflib/characters/races/base.py | ChrisLR/BasicDungeonRL | 3 | 6619513 | <filename>bflib/characters/races/base.py
import abc
from datetime import timedelta
from bflib import languages, restrictions, units
from bflib.characters import specialabilities, savingthrows
from bflib.keywords.items import WearLocation, WieldLocation
class Race(object):
__metaclass__ = abc.ABCMeta
name = ""
average_height = units.Feet(0)
average_weight = units.Pound(0)
average_lifespan = timedelta(0)
restriction_set = restrictions.RestrictionSet()
racial_class = None
racial_language = languages.Common
size = None
special_ability_set = specialabilities.SpecialAbilitySet()
saving_throw_set = savingthrows.SavingThrowSet()
wear_locations = (
WearLocation.Head,
WearLocation.Face,
WearLocation.Neck,
WearLocation.Torso,
WearLocation.Arms,
WearLocation.Arms,
WearLocation.Hands,
WearLocation.Hands,
WearLocation.Rings,
WearLocation.Rings,
WearLocation.Legs,
WearLocation.Legs,
WearLocation.Feet,
WearLocation.Feet,
WearLocation.Bandolier,
WearLocation.Back,
WearLocation.Belt,
WearLocation.Waist,
)
wield_locations = (
WieldLocation.LeftHand,
WieldLocation.RightHand,
)
| <filename>bflib/characters/races/base.py
import abc
from datetime import timedelta
from bflib import languages, restrictions, units
from bflib.characters import specialabilities, savingthrows
from bflib.keywords.items import WearLocation, WieldLocation
class Race(object):
__metaclass__ = abc.ABCMeta
name = ""
average_height = units.Feet(0)
average_weight = units.Pound(0)
average_lifespan = timedelta(0)
restriction_set = restrictions.RestrictionSet()
racial_class = None
racial_language = languages.Common
size = None
special_ability_set = specialabilities.SpecialAbilitySet()
saving_throw_set = savingthrows.SavingThrowSet()
wear_locations = (
WearLocation.Head,
WearLocation.Face,
WearLocation.Neck,
WearLocation.Torso,
WearLocation.Arms,
WearLocation.Arms,
WearLocation.Hands,
WearLocation.Hands,
WearLocation.Rings,
WearLocation.Rings,
WearLocation.Legs,
WearLocation.Legs,
WearLocation.Feet,
WearLocation.Feet,
WearLocation.Bandolier,
WearLocation.Back,
WearLocation.Belt,
WearLocation.Waist,
)
wield_locations = (
WieldLocation.LeftHand,
WieldLocation.RightHand,
)
| none | 1 | 2.634381 | 3 | |
tests/test_jamendo.py | 9seconds/rymtracks | 1 | 6619514 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for Jamendo service.
"""
if __name__ == "__main__":
from mixins import FetchMixin
else:
from .mixins import FetchMixin
from unittest import TestCase, main
from six import u
##############################################################################
class JamendoCase(FetchMixin, TestCase):
"""
Jamendo test case.
"""
URL = "http://www.jamendo.com/en/list/a128698/sue-o-de-dahlia"
DATA = (
(u("Entre Tu Y Yo Track"), "4:03"),
(u("Creo En Ti"), "3:18"),
(u("De Que Vale"), "3:33"),
(u("Inyecci\xf3n De Vida"), "3:45"),
(u("Marioneta"), "3:28"),
(u("Quedate"), "3:24"),
(u("Una Oportunidad"), "3:50"),
(u("Encanto Natural"), "3:42"),
(u("Marioneta Acustico"), "3:39"),
(u("Entre Tu Y Yo Acustico"), "3:29")
)
##############################################################################
if __name__ == "__main__":
main(verbosity=2)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for Jamendo service.
"""
if __name__ == "__main__":
from mixins import FetchMixin
else:
from .mixins import FetchMixin
from unittest import TestCase, main
from six import u
##############################################################################
class JamendoCase(FetchMixin, TestCase):
"""
Jamendo test case.
"""
URL = "http://www.jamendo.com/en/list/a128698/sue-o-de-dahlia"
DATA = (
(u("Entre Tu Y Yo Track"), "4:03"),
(u("Creo En Ti"), "3:18"),
(u("De Que Vale"), "3:33"),
(u("Inyecci\xf3n De Vida"), "3:45"),
(u("Marioneta"), "3:28"),
(u("Quedate"), "3:24"),
(u("Una Oportunidad"), "3:50"),
(u("Encanto Natural"), "3:42"),
(u("Marioneta Acustico"), "3:39"),
(u("Entre Tu Y Yo Acustico"), "3:29")
)
##############################################################################
if __name__ == "__main__":
main(verbosity=2)
| de | 0.661562 | #!/usr/bin/env python # -*- coding: utf-8 -*- Tests for Jamendo service. ############################################################################## Jamendo test case. ############################################################################## | 2.325879 | 2 |
apps/projects/templatetags/project_tags.py | jfterpstra/onepercentclub-site | 7 | 6619515 | <filename>apps/projects/templatetags/project_tags.py
from django import template
register = template.Library()
@register.assignment_tag
def get_project(project_id):
from apps.projects.models import Project
return Project.objects.get(pk=int(project_id))
| <filename>apps/projects/templatetags/project_tags.py
from django import template
register = template.Library()
@register.assignment_tag
def get_project(project_id):
from apps.projects.models import Project
return Project.objects.get(pk=int(project_id))
| none | 1 | 1.692001 | 2 | |
odmlui/editor_tab.py | mpsonntag/odml-ui | 3 | 6619516 | import os.path
import pygtkcompat
import odml
import odml.validation
from odml.tools.parser_utils import InvalidVersionException
from odml.tools.converters.version_converter import VersionConverter
import gtk
from .command_manager import CommandManager
from .helpers import uri_to_path, get_parser_for_uri, get_extension, \
get_parser_for_file_type, handle_section_import
from .message_dialog import ErrorDialog
from .treemodel import event
from .validation_window import ValidationWindow
pygtkcompat.enable()
pygtkcompat.enable_gtk(version='3.0')
class EditorTab(object):
"""
Represents a Document Object in the Editor
"""
file_uri = None
edited = 0
def __init__(self, window, cmdm=None):
if cmdm is None:
cmdm = CommandManager()
cmdm.enable_undo = self.enable_undo
cmdm.enable_redo = self.enable_redo
cmdm.error_func = window.command_error
self.command_manager = cmdm
self.document = None
self.window = window
self._clones = [self]
def new(self, doc=None):
"""
initialize a new document
"""
if doc is None:
doc = odml.Document()
sec = odml.Section(name="Default Section")
doc.append(sec)
self.window.registry.add(doc)
self.document = doc
self.file_uri = None
def load(self, uri):
self.file_uri = uri
file_path = uri_to_path(uri)
parser = get_parser_for_uri(file_path)
try:
self.document = odml.load(file_path, parser)
except InvalidVersionException as inver:
_, curr_file = os.path.split(file_path)
err_header = "Cannot open file '%s'." % curr_file
err_msg = ("You are trying to open an odML file of an outdated format. "
"\n\nUse 'File .. import' to convert and open files of "
"a previous odML format.")
ErrorDialog(self.window, err_header, err_msg)
self.window.set_welcome()
return False
except Exception as exc:
ErrorDialog(self.window, "Error parsing '%s'" % file_path, str(exc))
self.window.set_welcome()
return False
self.document.finalize()
# Make sure all Properties within all sections are properly
# initialized with the "pseudo_values" attribute.
for sec in self.document.sections:
handle_section_import(sec)
self.window.registry.add(self.document)
self.window._info_bar.show_info("Loading of %s done!" % (os.path.basename(file_path)))
return True
def convert(self, uri):
"""
Convert a previous odML version to the current one. If the file can be
successfully converted, it is saved with the old filename and the
postfix '_converted' in the xml format and immediately loaded into a new tab.
:param uri: uri of the conversion candidate file.
:return: True if loading worked, False if any conversion or loading errors occur.
"""
file_path = uri_to_path(uri)
parser = get_parser_for_uri(file_path)
vconv = VersionConverter(file_path)
# Currently we can only convert to xml out of the box,
# so don't bother about the extension.
file_name = os.path.basename(file_path)
new_file_name = "%s_converted.xml" % os.path.splitext(file_name)[0]
new_file_path = os.path.join(os.path.dirname(file_path), new_file_name)
try:
vconv.write_to_file(new_file_path, parser)
except Exception as err:
err_header = "Error converting file '%s'." % file_name
ErrorDialog(self.window, err_header, str(err))
return False
# When we have written, we can load!
return self.load(new_file_path)
def reset(self):
# initialize the edit stack position
self.edited = 0
self.command_manager.reset()
self.enable_undo(enable=False)
self.enable_redo(enable=False)
@property
def is_modified(self):
return self.edited != len(self.command_manager)
def save_if_changed(self):
"""
if the document was modified, ask the user if he or she wants to save the document
returns false if the user cancelled the action
"""
if not self.is_modified:
return True
msg = "%s has been modified. Do you want to save your changes?" % (
self.file_uri if self.file_uri is not None else "The document")
dialog = gtk.MessageDialog(transient_for=self.window,
modal=True,
message_type=gtk.MESSAGE_INFO,
buttons=gtk.BUTTONS_YES_NO,
text=msg)
dialog.add_button(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
dialog.set_title("Save changes?")
dialog.set_default_response(gtk.RESPONSE_CANCEL)
response = dialog.run()
dialog.destroy()
if response == gtk.RESPONSE_CANCEL:
return False
if response == gtk.RESPONSE_NO:
return True
return self.window.save(None)
def save(self, uri, file_type=None):
# Mandatory document validation before save to avoid
# not being able to open an invalid document.
self.remove_validation()
validation = odml.validation.Validation(self.document)
self.document.validation_result = validation
for err in self.document.validation_result.errors:
if err.is_error:
self.window._info_bar.show_info(
"Invalid document. Please fix errors (red) before saving.")
self.validate()
return
self.document.clean()
parser = None
if file_type:
parser = get_parser_for_file_type(file_type)
if not parser:
parser = get_parser_for_uri(uri)
file_path = uri_to_path(uri)
ext = get_extension(file_path)
if ext != parser:
file_path += ".%s" % parser.lower()
try:
odml.save(self.document, file_path, parser)
except Exception as exc:
self.window._info_bar.show_info("Save failed: %s" % exc)
return
# undo the clean
self.document.finalize()
# Finalize also removes all pseudo_values for any unchanged terminology
# entries, rendering these Properties unmodifiable. Re-initialize
# the pseudo_values for these Properties.
for sec in self.document.sections:
handle_section_import(sec)
self.window._info_bar.show_info("%s was saved" % (os.path.basename(file_path)))
self.edited = len(self.command_manager)
return True
def enable_undo(self, enable=True):
for tab in self._clones:
tab._enable_undo(enable)
def _enable_undo(self, enable):
if self.window.current_tab is self:
self.window.enable_undo(enable)
def enable_redo(self, enable=True):
for tab in self._clones:
tab._enable_redo(enable)
def _enable_redo(self, enable=True):
if self.window.current_tab is self:
self.window.enable_redo(enable)
def clone(self, klass=None):
if klass is None:
klass = self.__class__
ntab = klass(self.window, self.command_manager)
self._clones.append(ntab)
ntab._clones = self._clones
ntab.file_uri = self.file_uri
ntab.document = self.document
return ntab
def validate(self):
"""check the document for errors"""
self.remove_validation()
validation = odml.validation.Validation(self.document)
self.document.validation_result = validation
if len(validation.errors) > 0:
self.update_validation_error_objects(validation.errors)
ValidationWindow(self).show()
else:
self.window._info_bar.show_info("The document is valid. No errors found.")
self.remove_validation()
def update_validation_error_objects(self, errors):
"""
send out a change event for all error-affected objects
so that the gui can refresh these
"""
for err in errors:
change_event = event.ChangeContext(('_error', True))
change_event.post_change = True
change_event.action = "set"
change_event.pass_on(err.obj)
def remove_validation(self):
"""remove any dangling validation references"""
if not hasattr(self.document, "validation_result"):
return
errors = self.document.validation_result.errors
del self.document.validation_result
self.update_validation_error_objects(errors)
def get_name(self):
"""return the filename of this tab's document"""
return os.path.basename(str(self.file_uri))
def update_label(self):
"""update the tab label with the current filename"""
self.label.set_text(self.get_name())
def close(self):
"""
any cleanup?
"""
self._clones.remove(self)
| import os.path
import pygtkcompat
import odml
import odml.validation
from odml.tools.parser_utils import InvalidVersionException
from odml.tools.converters.version_converter import VersionConverter
import gtk
from .command_manager import CommandManager
from .helpers import uri_to_path, get_parser_for_uri, get_extension, \
get_parser_for_file_type, handle_section_import
from .message_dialog import ErrorDialog
from .treemodel import event
from .validation_window import ValidationWindow
pygtkcompat.enable()
pygtkcompat.enable_gtk(version='3.0')
class EditorTab(object):
"""
Represents a Document Object in the Editor
"""
file_uri = None
edited = 0
def __init__(self, window, cmdm=None):
if cmdm is None:
cmdm = CommandManager()
cmdm.enable_undo = self.enable_undo
cmdm.enable_redo = self.enable_redo
cmdm.error_func = window.command_error
self.command_manager = cmdm
self.document = None
self.window = window
self._clones = [self]
def new(self, doc=None):
"""
initialize a new document
"""
if doc is None:
doc = odml.Document()
sec = odml.Section(name="Default Section")
doc.append(sec)
self.window.registry.add(doc)
self.document = doc
self.file_uri = None
def load(self, uri):
self.file_uri = uri
file_path = uri_to_path(uri)
parser = get_parser_for_uri(file_path)
try:
self.document = odml.load(file_path, parser)
except InvalidVersionException as inver:
_, curr_file = os.path.split(file_path)
err_header = "Cannot open file '%s'." % curr_file
err_msg = ("You are trying to open an odML file of an outdated format. "
"\n\nUse 'File .. import' to convert and open files of "
"a previous odML format.")
ErrorDialog(self.window, err_header, err_msg)
self.window.set_welcome()
return False
except Exception as exc:
ErrorDialog(self.window, "Error parsing '%s'" % file_path, str(exc))
self.window.set_welcome()
return False
self.document.finalize()
# Make sure all Properties within all sections are properly
# initialized with the "pseudo_values" attribute.
for sec in self.document.sections:
handle_section_import(sec)
self.window.registry.add(self.document)
self.window._info_bar.show_info("Loading of %s done!" % (os.path.basename(file_path)))
return True
def convert(self, uri):
"""
Convert a previous odML version to the current one. If the file can be
successfully converted, it is saved with the old filename and the
postfix '_converted' in the xml format and immediately loaded into a new tab.
:param uri: uri of the conversion candidate file.
:return: True if loading worked, False if any conversion or loading errors occur.
"""
file_path = uri_to_path(uri)
parser = get_parser_for_uri(file_path)
vconv = VersionConverter(file_path)
# Currently we can only convert to xml out of the box,
# so don't bother about the extension.
file_name = os.path.basename(file_path)
new_file_name = "%s_converted.xml" % os.path.splitext(file_name)[0]
new_file_path = os.path.join(os.path.dirname(file_path), new_file_name)
try:
vconv.write_to_file(new_file_path, parser)
except Exception as err:
err_header = "Error converting file '%s'." % file_name
ErrorDialog(self.window, err_header, str(err))
return False
# When we have written, we can load!
return self.load(new_file_path)
def reset(self):
# initialize the edit stack position
self.edited = 0
self.command_manager.reset()
self.enable_undo(enable=False)
self.enable_redo(enable=False)
@property
def is_modified(self):
return self.edited != len(self.command_manager)
def save_if_changed(self):
"""
if the document was modified, ask the user if he or she wants to save the document
returns false if the user cancelled the action
"""
if not self.is_modified:
return True
msg = "%s has been modified. Do you want to save your changes?" % (
self.file_uri if self.file_uri is not None else "The document")
dialog = gtk.MessageDialog(transient_for=self.window,
modal=True,
message_type=gtk.MESSAGE_INFO,
buttons=gtk.BUTTONS_YES_NO,
text=msg)
dialog.add_button(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
dialog.set_title("Save changes?")
dialog.set_default_response(gtk.RESPONSE_CANCEL)
response = dialog.run()
dialog.destroy()
if response == gtk.RESPONSE_CANCEL:
return False
if response == gtk.RESPONSE_NO:
return True
return self.window.save(None)
def save(self, uri, file_type=None):
# Mandatory document validation before save to avoid
# not being able to open an invalid document.
self.remove_validation()
validation = odml.validation.Validation(self.document)
self.document.validation_result = validation
for err in self.document.validation_result.errors:
if err.is_error:
self.window._info_bar.show_info(
"Invalid document. Please fix errors (red) before saving.")
self.validate()
return
self.document.clean()
parser = None
if file_type:
parser = get_parser_for_file_type(file_type)
if not parser:
parser = get_parser_for_uri(uri)
file_path = uri_to_path(uri)
ext = get_extension(file_path)
if ext != parser:
file_path += ".%s" % parser.lower()
try:
odml.save(self.document, file_path, parser)
except Exception as exc:
self.window._info_bar.show_info("Save failed: %s" % exc)
return
# undo the clean
self.document.finalize()
# Finalize also removes all pseudo_values for any unchanged terminology
# entries, rendering these Properties unmodifiable. Re-initialize
# the pseudo_values for these Properties.
for sec in self.document.sections:
handle_section_import(sec)
self.window._info_bar.show_info("%s was saved" % (os.path.basename(file_path)))
self.edited = len(self.command_manager)
return True
def enable_undo(self, enable=True):
for tab in self._clones:
tab._enable_undo(enable)
def _enable_undo(self, enable):
if self.window.current_tab is self:
self.window.enable_undo(enable)
def enable_redo(self, enable=True):
for tab in self._clones:
tab._enable_redo(enable)
def _enable_redo(self, enable=True):
if self.window.current_tab is self:
self.window.enable_redo(enable)
def clone(self, klass=None):
if klass is None:
klass = self.__class__
ntab = klass(self.window, self.command_manager)
self._clones.append(ntab)
ntab._clones = self._clones
ntab.file_uri = self.file_uri
ntab.document = self.document
return ntab
def validate(self):
"""check the document for errors"""
self.remove_validation()
validation = odml.validation.Validation(self.document)
self.document.validation_result = validation
if len(validation.errors) > 0:
self.update_validation_error_objects(validation.errors)
ValidationWindow(self).show()
else:
self.window._info_bar.show_info("The document is valid. No errors found.")
self.remove_validation()
def update_validation_error_objects(self, errors):
"""
send out a change event for all error-affected objects
so that the gui can refresh these
"""
for err in errors:
change_event = event.ChangeContext(('_error', True))
change_event.post_change = True
change_event.action = "set"
change_event.pass_on(err.obj)
def remove_validation(self):
"""remove any dangling validation references"""
if not hasattr(self.document, "validation_result"):
return
errors = self.document.validation_result.errors
del self.document.validation_result
self.update_validation_error_objects(errors)
def get_name(self):
"""return the filename of this tab's document"""
return os.path.basename(str(self.file_uri))
def update_label(self):
"""update the tab label with the current filename"""
self.label.set_text(self.get_name())
def close(self):
"""
any cleanup?
"""
self._clones.remove(self)
| en | 0.774626 | Represents a Document Object in the Editor initialize a new document # Make sure all Properties within all sections are properly # initialized with the "pseudo_values" attribute. Convert a previous odML version to the current one. If the file can be successfully converted, it is saved with the old filename and the postfix '_converted' in the xml format and immediately loaded into a new tab. :param uri: uri of the conversion candidate file. :return: True if loading worked, False if any conversion or loading errors occur. # Currently we can only convert to xml out of the box, # so don't bother about the extension. # When we have written, we can load! # initialize the edit stack position if the document was modified, ask the user if he or she wants to save the document returns false if the user cancelled the action # Mandatory document validation before save to avoid # not being able to open an invalid document. # undo the clean # Finalize also removes all pseudo_values for any unchanged terminology # entries, rendering these Properties unmodifiable. Re-initialize # the pseudo_values for these Properties. check the document for errors send out a change event for all error-affected objects so that the gui can refresh these remove any dangling validation references return the filename of this tab's document update the tab label with the current filename any cleanup? | 2.370804 | 2 |
flask_display.py | mm698657/robinbot | 0 | 6619517 | #!/usr/bin/python3
#from flask import Flask,render_template
from flask import Flask, request, render_template
import time
import sqlite3
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello, World!'
@app.route('/positions')
def display_table():
con = sqlite3.connect('/home/ec2-user/robinbot/buys.db')
cur = con.cursor()
cur.execute('select * from buys order by symbol')
rows = cur.fetchall()
return render_template("display.html",rows = rows)
@app.route('/profit')
def display_foo():
con = sqlite3.connect('/home/ec2-user/robinbot/buys.db')
cur = con.cursor()
cur.execute('select symbol, buy_price, current_price, bot_recommends, buy_date, round(((current_price - buy_price) / buy_price) * 100, 2 ) as profit from buys order by profit')
rows = cur.fetchall()
cur.execute('create view IF NOT EXISTS total_profit AS select "TOTAL" as symbol ,round(sum(buy_price), 2) as buy_price, round(sum(current_price), 2) as current_price, "TRUE" as bot_recommends, "NULL" as buy_date, round(((sum(current_price) - sum(buy_price))/sum(buy_price)) * 100, 2) as profit from buys WHERE symbol != "SPY"')
cur.execute('select * from total_profit')
rows.append(cur.fetchall()[0])
return render_template("display_profit.html",rows = rows)
@app.route('/current_buys')
def display_buys():
con = sqlite3.connect('/home/ec2-user/robinbot/buys.db')
cur = con.cursor()
cur.execute('select symbol, buy_price, current_price, bot_recommends, buy_date, round(((current_price - buy_price) / buy_price) * 100, 0) as profit from buys where bot_recommends == "True" order by profit')
rows = cur.fetchall()
return render_template("display_profit.html",rows = rows)
| #!/usr/bin/python3
#from flask import Flask,render_template
from flask import Flask, request, render_template
import time
import sqlite3
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello, World!'
@app.route('/positions')
def display_table():
con = sqlite3.connect('/home/ec2-user/robinbot/buys.db')
cur = con.cursor()
cur.execute('select * from buys order by symbol')
rows = cur.fetchall()
return render_template("display.html",rows = rows)
@app.route('/profit')
def display_foo():
con = sqlite3.connect('/home/ec2-user/robinbot/buys.db')
cur = con.cursor()
cur.execute('select symbol, buy_price, current_price, bot_recommends, buy_date, round(((current_price - buy_price) / buy_price) * 100, 2 ) as profit from buys order by profit')
rows = cur.fetchall()
cur.execute('create view IF NOT EXISTS total_profit AS select "TOTAL" as symbol ,round(sum(buy_price), 2) as buy_price, round(sum(current_price), 2) as current_price, "TRUE" as bot_recommends, "NULL" as buy_date, round(((sum(current_price) - sum(buy_price))/sum(buy_price)) * 100, 2) as profit from buys WHERE symbol != "SPY"')
cur.execute('select * from total_profit')
rows.append(cur.fetchall()[0])
return render_template("display_profit.html",rows = rows)
@app.route('/current_buys')
def display_buys():
con = sqlite3.connect('/home/ec2-user/robinbot/buys.db')
cur = con.cursor()
cur.execute('select symbol, buy_price, current_price, bot_recommends, buy_date, round(((current_price - buy_price) / buy_price) * 100, 0) as profit from buys where bot_recommends == "True" order by profit')
rows = cur.fetchall()
return render_template("display_profit.html",rows = rows)
| en | 0.143899 | #!/usr/bin/python3 #from flask import Flask,render_template | 2.701312 | 3 |
test/distributions/test_distributions.py | SamuelMarks/botorch | 0 | 6619518 | <reponame>SamuelMarks/botorch
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Probability Distributions.
This is adapted from https://github.com/probtorch/pytorch/pull/143.
TODO: replace with PyTorch version once the PR is up and landed.
"""
import random
import unittest
from collections import namedtuple
from itertools import product
from numbers import Number
import torch
from botorch.distributions import Kumaraswamy
from botorch.utils.testing import BotorchTestCase
from torch._six import inf, string_classes
from torch.autograd import grad
from torch.distributions import Distribution, Independent
from torch.distributions.constraints import Constraint, is_dependent
SEED = 1234
Example = namedtuple("Example", ["Dist", "params"])
EXAMPLES = [
Example(
Kumaraswamy,
[
# avoid extreme parameters
{
"concentration1": 0.5 + 3 * torch.rand(2, 3).requires_grad_(),
"concentration0": 0.5 + 3 * torch.rand(2, 3).requires_grad_(),
},
{
"concentration1": 0.5 + 3 * torch.rand(4).requires_grad_(),
"concentration0": 0.5 + 3 * torch.rand(4).requires_grad_(),
},
],
),
]
def set_rng_seed(seed):
torch.manual_seed(seed)
random.seed(seed)
def is_iterable(obj):
try:
iter(obj)
return True
except TypeError:
return False
def iter_indices(tensor):
if tensor.dim() == 0:
return range(0)
if tensor.dim() == 1:
return range(tensor.size(0))
return product(*(range(s) for s in tensor.size()))
class TestCase(unittest.TestCase):
precision = 1e-5
def setUp(self):
set_rng_seed(SEED)
def assertEqual(self, x, y, prec=None, message="", allow_inf=False):
if isinstance(prec, str) and message == "":
message = prec
prec = None
if prec is None:
prec = self.precision
if isinstance(x, torch.Tensor) and isinstance(y, Number):
self.assertEqual(x.item(), y, prec, message, allow_inf)
elif isinstance(y, torch.Tensor) and isinstance(x, Number):
self.assertEqual(x, y.item(), prec, message, allow_inf)
elif isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor):
def assertTensorsEqual(a, b):
super(TestCase, self).assertEqual(a.size(), b.size(), message)
if a.numel() > 0:
b = b.type_as(a)
b = b.cuda(device=a.get_device()) if a.is_cuda else b.cpu()
# check that NaNs are in the same locations
nan_mask = a != a
self.assertTrue(torch.equal(nan_mask, b != b), message)
diff = a - b
diff[nan_mask] = 0
# TODO: implement abs on CharTensor
if diff.is_signed() and "CharTensor" not in diff.type():
diff = diff.abs()
max_err = diff.max()
self.assertLessEqual(max_err, prec, message)
super(TestCase, self).assertEqual(x.is_sparse, y.is_sparse, message)
if x.is_sparse:
x = self.safeCoalesce(x)
y = self.safeCoalesce(y)
assertTensorsEqual(x._indices(), y._indices())
assertTensorsEqual(x._values(), y._values())
else:
assertTensorsEqual(x, y)
elif isinstance(x, string_classes) and isinstance(y, string_classes):
super(TestCase, self).assertEqual(x, y, message)
elif type(x) == set and type(y) == set:
super(TestCase, self).assertEqual(x, y, message)
elif is_iterable(x) and is_iterable(y):
super(TestCase, self).assertEqual(len(x), len(y), message)
for x_, y_ in zip(x, y):
self.assertEqual(x_, y_, prec, message)
elif isinstance(x, bool) and isinstance(y, bool):
super(TestCase, self).assertEqual(x, y, message)
elif isinstance(x, Number) and isinstance(y, Number):
if abs(x) == inf or abs(y) == inf:
if allow_inf:
super(TestCase, self).assertEqual(x, y, message)
else:
self.fail(
"Expected finite numeric values - x={}, y={}".format(x, y)
)
return
super(TestCase, self).assertLessEqual(abs(x - y), prec, message)
else:
super(TestCase, self).assertEqual(x, y, message)
class TestKumaraswamy(BotorchTestCase, TestCase):
def test_kumaraswamy_shape(self):
concentration1 = torch.randn(2, 3).abs().requires_grad_(True)
concentration0 = torch.randn(2, 3).abs().requires_grad_(True)
concentration1_1d = torch.randn(1).abs().requires_grad_(True)
concentration0_1d = torch.randn(1).abs().requires_grad_(True)
self.assertEqual(
Kumaraswamy(concentration1, concentration0).sample().size(), (2, 3)
)
self.assertEqual(
Kumaraswamy(concentration1, concentration0).sample((5,)).size(), (5, 2, 3)
)
self.assertEqual(
Kumaraswamy(concentration1_1d, concentration0_1d).sample().size(), (1,)
)
self.assertEqual(
Kumaraswamy(concentration1_1d, concentration0_1d).sample((1,)).size(),
(1, 1),
)
self.assertEqual(Kumaraswamy(1.0, 1.0).sample().size(), ())
self.assertEqual(Kumaraswamy(1.0, 1.0).sample((1,)).size(), (1,))
# Kumaraswamy distribution is not implemented in SciPy
# Hence these tests are explicit
def test_kumaraswamy_mean_variance(self):
c1_1 = torch.randn(2, 3).abs().requires_grad_(True)
c0_1 = torch.randn(2, 3).abs().requires_grad_(True)
c1_2 = torch.randn(4).abs().requires_grad_(True)
c0_2 = torch.randn(4).abs().requires_grad_(True)
cases = [(c1_1, c0_1), (c1_2, c0_2)]
for i, (a, b) in enumerate(cases):
m = Kumaraswamy(a, b)
samples = m.sample((60000,))
expected = samples.mean(0)
actual = m.mean
error = (expected - actual).abs()
max_error = max(error[error == error])
self.assertLess(
max_error,
0.01,
"Kumaraswamy example {}/{}, incorrect .mean".format(i + 1, len(cases)),
)
expected = samples.var(0)
actual = m.variance
error = (expected - actual).abs()
max_error = max(error[error == error])
self.assertLess(
max_error,
0.01,
"Kumaraswamy example {}/{}, incorrect .variance".format(
i + 1, len(cases)
),
)
def test_valid_parameter_broadcasting(self):
valid_examples = [
(
Kumaraswamy(
concentration1=torch.tensor([1.0, 1.0]), concentration0=1.0
),
(2,),
),
(
Kumaraswamy(concentration1=1, concentration0=torch.tensor([1.0, 1.0])),
(2,),
),
(
Kumaraswamy(
concentration1=torch.tensor([1.0, 1.0]),
concentration0=torch.tensor([1.0]),
),
(2,),
),
(
Kumaraswamy(
concentration1=torch.tensor([1.0, 1.0]),
concentration0=torch.tensor([[1.0], [1.0]]),
),
(2, 2),
),
(
Kumaraswamy(
concentration1=torch.tensor([1.0, 1.0]),
concentration0=torch.tensor([[1.0]]),
),
(1, 2),
),
(
Kumaraswamy(
concentration1=torch.tensor([1.0]),
concentration0=torch.tensor([[1.0]]),
),
(1, 1),
),
]
for dist, expected_size in valid_examples:
dist_sample_size = dist.sample().size()
self.assertEqual(
dist_sample_size,
expected_size,
"actual size: {} != expected size: {}".format(
dist_sample_size, expected_size
),
)
def test_invalid_parameter_broadcasting(self):
# invalid broadcasting cases; should throw error
# example type (distribution class, distribution params)
invalid_examples = [
(
Kumaraswamy,
{
"concentration1": torch.tensor([[1, 1]]),
"concentration0": torch.tensor([1, 1, 1, 1]),
},
),
(
Kumaraswamy,
{
"concentration1": torch.tensor([[[1, 1, 1], [1, 1, 1]]]),
"concentration0": torch.tensor([1, 1]),
},
),
]
for dist, kwargs in invalid_examples:
self.assertRaises(RuntimeError, dist, **kwargs)
def _check_enumerate_support(self, dist, examples):
for params, expected in examples:
params = {k: torch.tensor(v) for k, v in params.items()}
expected = torch.tensor(expected)
d = dist(**params)
actual = d.enumerate_support(expand=False)
self.assertEqual(actual, expected)
actual = d.enumerate_support(expand=True)
expected_with_expand = expected.expand(
(-1,) + d.batch_shape + d.event_shape
)
self.assertEqual(actual, expected_with_expand)
def test_repr(self):
for Dist, params in EXAMPLES:
for param in params:
dist = Dist(**param)
self.assertTrue(repr(dist).startswith(dist.__class__.__name__))
def test_sample_detached(self):
for Dist, params in EXAMPLES:
for i, param in enumerate(params):
variable_params = [
p for p in param.values() if getattr(p, "requires_grad", False)
]
if not variable_params:
continue
dist = Dist(**param)
sample = dist.sample()
self.assertFalse(
sample.requires_grad,
msg="{} example {}/{}, .sample() is not detached".format(
Dist.__name__, i + 1, len(params)
),
)
def test_rsample_requires_grad(self):
for Dist, params in EXAMPLES:
for i, param in enumerate(params):
if not any(getattr(p, "requires_grad", False) for p in param.values()):
continue
dist = Dist(**param)
if not dist.has_rsample:
continue
sample = dist.rsample()
self.assertTrue(
sample.requires_grad,
msg="{} example {}/{}, .rsample() does not require grad".format(
Dist.__name__, i + 1, len(params)
),
)
def test_enumerate_support_type(self):
for Dist, params in EXAMPLES:
for i, param in enumerate(params):
dist = Dist(**param)
try:
self.assertIsInstance(
dist.sample(),
type(dist.enumerate_support()),
msg=(
"{} example {}/{}, return type mismatch between "
+ "sample and enumerate_support."
).format(Dist.__name__, i + 1, len(params)),
)
except NotImplementedError:
pass
def test_distribution_expand(self):
shapes = [torch.Size(), torch.Size((2,)), torch.Size((2, 1))]
for Dist, params in EXAMPLES:
for param in params:
for shape in shapes:
d = Dist(**param)
expanded_shape = shape + d.batch_shape
original_shape = d.batch_shape + d.event_shape
expected_shape = shape + original_shape
expanded = d.expand(batch_shape=list(expanded_shape))
sample = expanded.sample()
actual_shape = expanded.sample().shape
self.assertEqual(expanded.__class__, d.__class__)
self.assertEqual(d.sample().shape, original_shape)
self.assertEqual(expanded.log_prob(sample), d.log_prob(sample))
self.assertEqual(actual_shape, expected_shape)
self.assertEqual(expanded.batch_shape, expanded_shape)
try:
self.assertEqual(
expanded.mean,
d.mean.expand(expanded_shape + d.event_shape),
allow_inf=True,
)
self.assertEqual(
expanded.variance,
d.variance.expand(expanded_shape + d.event_shape),
allow_inf=True,
)
except NotImplementedError:
pass
def test_distribution_subclass_expand(self):
expand_by = torch.Size((2,))
for Dist, params in EXAMPLES:
class SubClass(Dist):
pass
for param in params:
d = SubClass(**param)
expanded_shape = expand_by + d.batch_shape
original_shape = d.batch_shape + d.event_shape
expected_shape = expand_by + original_shape
expanded = d.expand(batch_shape=expanded_shape)
sample = expanded.sample()
actual_shape = expanded.sample().shape
self.assertEqual(expanded.__class__, d.__class__)
self.assertEqual(d.sample().shape, original_shape)
self.assertEqual(expanded.log_prob(sample), d.log_prob(sample))
self.assertEqual(actual_shape, expected_shape)
def test_independent_shape(self):
for Dist, params in EXAMPLES:
for param in params:
base_dist = Dist(**param)
x = base_dist.sample()
base_log_prob_shape = base_dist.log_prob(x).shape
for reinterpreted_batch_ndims in range(len(base_dist.batch_shape) + 1):
indep_dist = Independent(base_dist, reinterpreted_batch_ndims)
indep_log_prob_shape = base_log_prob_shape[
: len(base_log_prob_shape) - reinterpreted_batch_ndims
]
self.assertEqual(indep_dist.log_prob(x).shape, indep_log_prob_shape)
self.assertEqual(
indep_dist.sample().shape, base_dist.sample().shape
)
self.assertEqual(indep_dist.has_rsample, base_dist.has_rsample)
if indep_dist.has_rsample:
self.assertEqual(
indep_dist.sample().shape, base_dist.sample().shape
)
try:
self.assertEqual(
indep_dist.enumerate_support().shape,
base_dist.enumerate_support().shape,
)
self.assertEqual(indep_dist.mean.shape, base_dist.mean.shape)
except NotImplementedError:
pass
try:
self.assertEqual(
indep_dist.variance.shape, base_dist.variance.shape
)
except NotImplementedError:
pass
try:
self.assertEqual(
indep_dist.entropy().shape, indep_log_prob_shape
)
except NotImplementedError:
pass
def test_independent_expand(self):
for Dist, params in EXAMPLES:
for param in params:
base_dist = Dist(**param)
for reinterpreted_batch_ndims in range(len(base_dist.batch_shape) + 1):
for s in [torch.Size(), torch.Size((2,)), torch.Size((2, 3))]:
indep_dist = Independent(base_dist, reinterpreted_batch_ndims)
expanded_shape = s + indep_dist.batch_shape
expanded = indep_dist.expand(expanded_shape)
expanded_sample = expanded.sample()
expected_shape = expanded_shape + indep_dist.event_shape
self.assertEqual(expanded_sample.shape, expected_shape)
self.assertEqual(
expanded.log_prob(expanded_sample),
indep_dist.log_prob(expanded_sample),
)
self.assertEqual(expanded.event_shape, indep_dist.event_shape)
self.assertEqual(expanded.batch_shape, expanded_shape)
def test_cdf_icdf_inverse(self):
# Tests the invertibility property on the distributions
for Dist, params in EXAMPLES:
for i, param in enumerate(params):
dist = Dist(**param)
samples = dist.sample(sample_shape=(20,))
try:
cdf = dist.cdf(samples)
actual = dist.icdf(cdf)
except NotImplementedError:
continue
rel_error = torch.abs(actual - samples) / (1e-10 + torch.abs(samples))
self.assertLess(
rel_error.max(),
1e-4,
msg="\n".join(
[
"{} example {}/{}, icdf(cdf(x)) != x".format(
Dist.__name__, i + 1, len(params)
),
"x = {}".format(samples),
"cdf(x) = {}".format(cdf),
"icdf(cdf(x)) = {}".format(actual),
]
),
)
def test_cdf_log_prob(self):
# Tests if the differentiation of the CDF gives the PDF at a given value
for Dist, params in EXAMPLES:
for i, param in enumerate(params):
dist = Dist(**param)
samples = dist.sample().clone().detach()
if samples.dtype.is_floating_point:
samples.requires_grad_()
try:
cdfs = dist.cdf(samples)
pdfs = dist.log_prob(samples).exp()
except NotImplementedError:
continue
cdfs_derivative = grad(cdfs.sum(), [samples])[
0
] # this should not be wrapped in torch.abs()
self.assertEqual(
cdfs_derivative,
pdfs,
prec=0.2,
message="\n".join(
[
"{} example {}/{}, d(cdf)/dx != pdf(x)".format(
Dist.__name__, i + 1, len(params)
),
"x = {}".format(samples),
"cdf = {}".format(cdfs),
"pdf = {}".format(pdfs),
"grad(cdf) = {}".format(cdfs_derivative),
]
),
)
def test_entropy_monte_carlo(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
for Dist, params in EXAMPLES:
for i, param in enumerate(params):
# use double precision for better numerical stability
dist = Dist(**{k: v.double() for k, v in param.items()})
try:
actual = dist.entropy()
except NotImplementedError:
continue
# use a lot of samples for better MC approximation
x = dist.sample(sample_shape=(120000,))
expected = -dist.log_prob(
x.clamp_max(1 - 2 * torch.finfo(x.dtype).eps)
).mean(0)
ignore = expected == inf
expected[ignore] = actual[ignore]
self.assertEqual(
actual,
expected,
prec=0.2,
message="\n".join(
[
"{} example {}/{}, incorrect .entropy().".format(
Dist.__name__, i + 1, len(params)
),
"Expected (monte carlo) {}".format(expected),
"Actual (analytic) {}".format(actual),
"max error = {}".format(torch.abs(actual - expected).max()),
]
),
)
def test_params_contains(self):
for Dist, params in EXAMPLES:
for i, param in enumerate(params):
dist = Dist(**param)
for name, value in param.items():
if isinstance(value, Number):
value = torch.tensor([value])
try:
constraint = dist.arg_constraints[name]
except KeyError:
continue # ignore optional parameters
if is_dependent(constraint):
continue
message = "{} example {}/{} parameter {} = {}".format(
Dist.__name__, i + 1, len(params), name, value
)
self.assertTrue(constraint.check(value).all(), msg=message)
def test_support_contains(self):
for Dist, params in EXAMPLES:
self.assertIsInstance(Dist.support, Constraint)
for i, param in enumerate(params):
dist = Dist(**param)
value = dist.sample()
constraint = dist.support
message = "{} example {}/{} sample = {}".format(
Dist.__name__, i + 1, len(params), value
)
self.assertTrue(constraint.check(value).all(), msg=message)
class TestDistributionShapes(BotorchTestCase, TestCase):
def setUp(self):
super().setUp()
self.scalar_sample = 1
self.tensor_sample_1 = torch.ones(3, 2)
self.tensor_sample_2 = torch.ones(3, 2, 3)
Distribution.set_default_validate_args(True)
def tearDown(self):
super().tearDown()
Distribution.set_default_validate_args(False)
def test_kumaraswamy_shape_scalar_params(self):
kumaraswamy = Kumaraswamy(1, 1)
self.assertEqual(kumaraswamy._batch_shape, torch.Size())
self.assertEqual(kumaraswamy._event_shape, torch.Size())
self.assertEqual(kumaraswamy.sample().size(), torch.Size())
self.assertEqual(kumaraswamy.sample((3, 2)).size(), torch.Size((3, 2)))
self.assertEqual(
kumaraswamy.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2))
)
self.assertEqual(
kumaraswamy.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3))
)
def test_entropy_shape(self):
for Dist, params in EXAMPLES:
for i, param in enumerate(params):
dist = Dist(validate_args=False, **param)
try:
actual_shape = dist.entropy().size()
expected_shape = (
dist.batch_shape if dist.batch_shape else torch.Size()
)
message = (
f"{Dist.__name__} example {i + 1}/{len(params)}, "
f"shape mismatch. expected {expected_shape}, "
f"actual {actual_shape}"
)
self.assertEqual(actual_shape, expected_shape, message=message)
except NotImplementedError:
continue
| #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Probability Distributions.
This is adapted from https://github.com/probtorch/pytorch/pull/143.
TODO: replace with PyTorch version once the PR is up and landed.
"""
import random
import unittest
from collections import namedtuple
from itertools import product
from numbers import Number
import torch
from botorch.distributions import Kumaraswamy
from botorch.utils.testing import BotorchTestCase
from torch._six import inf, string_classes
from torch.autograd import grad
from torch.distributions import Distribution, Independent
from torch.distributions.constraints import Constraint, is_dependent
SEED = 1234
Example = namedtuple("Example", ["Dist", "params"])
EXAMPLES = [
Example(
Kumaraswamy,
[
# avoid extreme parameters
{
"concentration1": 0.5 + 3 * torch.rand(2, 3).requires_grad_(),
"concentration0": 0.5 + 3 * torch.rand(2, 3).requires_grad_(),
},
{
"concentration1": 0.5 + 3 * torch.rand(4).requires_grad_(),
"concentration0": 0.5 + 3 * torch.rand(4).requires_grad_(),
},
],
),
]
def set_rng_seed(seed):
torch.manual_seed(seed)
random.seed(seed)
def is_iterable(obj):
try:
iter(obj)
return True
except TypeError:
return False
def iter_indices(tensor):
if tensor.dim() == 0:
return range(0)
if tensor.dim() == 1:
return range(tensor.size(0))
return product(*(range(s) for s in tensor.size()))
class TestCase(unittest.TestCase):
precision = 1e-5
def setUp(self):
set_rng_seed(SEED)
def assertEqual(self, x, y, prec=None, message="", allow_inf=False):
if isinstance(prec, str) and message == "":
message = prec
prec = None
if prec is None:
prec = self.precision
if isinstance(x, torch.Tensor) and isinstance(y, Number):
self.assertEqual(x.item(), y, prec, message, allow_inf)
elif isinstance(y, torch.Tensor) and isinstance(x, Number):
self.assertEqual(x, y.item(), prec, message, allow_inf)
elif isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor):
def assertTensorsEqual(a, b):
super(TestCase, self).assertEqual(a.size(), b.size(), message)
if a.numel() > 0:
b = b.type_as(a)
b = b.cuda(device=a.get_device()) if a.is_cuda else b.cpu()
# check that NaNs are in the same locations
nan_mask = a != a
self.assertTrue(torch.equal(nan_mask, b != b), message)
diff = a - b
diff[nan_mask] = 0
# TODO: implement abs on CharTensor
if diff.is_signed() and "CharTensor" not in diff.type():
diff = diff.abs()
max_err = diff.max()
self.assertLessEqual(max_err, prec, message)
super(TestCase, self).assertEqual(x.is_sparse, y.is_sparse, message)
if x.is_sparse:
x = self.safeCoalesce(x)
y = self.safeCoalesce(y)
assertTensorsEqual(x._indices(), y._indices())
assertTensorsEqual(x._values(), y._values())
else:
assertTensorsEqual(x, y)
elif isinstance(x, string_classes) and isinstance(y, string_classes):
super(TestCase, self).assertEqual(x, y, message)
elif type(x) == set and type(y) == set:
super(TestCase, self).assertEqual(x, y, message)
elif is_iterable(x) and is_iterable(y):
super(TestCase, self).assertEqual(len(x), len(y), message)
for x_, y_ in zip(x, y):
self.assertEqual(x_, y_, prec, message)
elif isinstance(x, bool) and isinstance(y, bool):
super(TestCase, self).assertEqual(x, y, message)
elif isinstance(x, Number) and isinstance(y, Number):
if abs(x) == inf or abs(y) == inf:
if allow_inf:
super(TestCase, self).assertEqual(x, y, message)
else:
self.fail(
"Expected finite numeric values - x={}, y={}".format(x, y)
)
return
super(TestCase, self).assertLessEqual(abs(x - y), prec, message)
else:
super(TestCase, self).assertEqual(x, y, message)
class TestKumaraswamy(BotorchTestCase, TestCase):
def test_kumaraswamy_shape(self):
concentration1 = torch.randn(2, 3).abs().requires_grad_(True)
concentration0 = torch.randn(2, 3).abs().requires_grad_(True)
concentration1_1d = torch.randn(1).abs().requires_grad_(True)
concentration0_1d = torch.randn(1).abs().requires_grad_(True)
self.assertEqual(
Kumaraswamy(concentration1, concentration0).sample().size(), (2, 3)
)
self.assertEqual(
Kumaraswamy(concentration1, concentration0).sample((5,)).size(), (5, 2, 3)
)
self.assertEqual(
Kumaraswamy(concentration1_1d, concentration0_1d).sample().size(), (1,)
)
self.assertEqual(
Kumaraswamy(concentration1_1d, concentration0_1d).sample((1,)).size(),
(1, 1),
)
self.assertEqual(Kumaraswamy(1.0, 1.0).sample().size(), ())
self.assertEqual(Kumaraswamy(1.0, 1.0).sample((1,)).size(), (1,))
# Kumaraswamy distribution is not implemented in SciPy
# Hence these tests are explicit
def test_kumaraswamy_mean_variance(self):
c1_1 = torch.randn(2, 3).abs().requires_grad_(True)
c0_1 = torch.randn(2, 3).abs().requires_grad_(True)
c1_2 = torch.randn(4).abs().requires_grad_(True)
c0_2 = torch.randn(4).abs().requires_grad_(True)
cases = [(c1_1, c0_1), (c1_2, c0_2)]
for i, (a, b) in enumerate(cases):
m = Kumaraswamy(a, b)
samples = m.sample((60000,))
expected = samples.mean(0)
actual = m.mean
error = (expected - actual).abs()
max_error = max(error[error == error])
self.assertLess(
max_error,
0.01,
"Kumaraswamy example {}/{}, incorrect .mean".format(i + 1, len(cases)),
)
expected = samples.var(0)
actual = m.variance
error = (expected - actual).abs()
max_error = max(error[error == error])
self.assertLess(
max_error,
0.01,
"Kumaraswamy example {}/{}, incorrect .variance".format(
i + 1, len(cases)
),
)
def test_valid_parameter_broadcasting(self):
valid_examples = [
(
Kumaraswamy(
concentration1=torch.tensor([1.0, 1.0]), concentration0=1.0
),
(2,),
),
(
Kumaraswamy(concentration1=1, concentration0=torch.tensor([1.0, 1.0])),
(2,),
),
(
Kumaraswamy(
concentration1=torch.tensor([1.0, 1.0]),
concentration0=torch.tensor([1.0]),
),
(2,),
),
(
Kumaraswamy(
concentration1=torch.tensor([1.0, 1.0]),
concentration0=torch.tensor([[1.0], [1.0]]),
),
(2, 2),
),
(
Kumaraswamy(
concentration1=torch.tensor([1.0, 1.0]),
concentration0=torch.tensor([[1.0]]),
),
(1, 2),
),
(
Kumaraswamy(
concentration1=torch.tensor([1.0]),
concentration0=torch.tensor([[1.0]]),
),
(1, 1),
),
]
for dist, expected_size in valid_examples:
dist_sample_size = dist.sample().size()
self.assertEqual(
dist_sample_size,
expected_size,
"actual size: {} != expected size: {}".format(
dist_sample_size, expected_size
),
)
def test_invalid_parameter_broadcasting(self):
# invalid broadcasting cases; should throw error
# example type (distribution class, distribution params)
invalid_examples = [
(
Kumaraswamy,
{
"concentration1": torch.tensor([[1, 1]]),
"concentration0": torch.tensor([1, 1, 1, 1]),
},
),
(
Kumaraswamy,
{
"concentration1": torch.tensor([[[1, 1, 1], [1, 1, 1]]]),
"concentration0": torch.tensor([1, 1]),
},
),
]
for dist, kwargs in invalid_examples:
self.assertRaises(RuntimeError, dist, **kwargs)
def _check_enumerate_support(self, dist, examples):
for params, expected in examples:
params = {k: torch.tensor(v) for k, v in params.items()}
expected = torch.tensor(expected)
d = dist(**params)
actual = d.enumerate_support(expand=False)
self.assertEqual(actual, expected)
actual = d.enumerate_support(expand=True)
expected_with_expand = expected.expand(
(-1,) + d.batch_shape + d.event_shape
)
self.assertEqual(actual, expected_with_expand)
def test_repr(self):
for Dist, params in EXAMPLES:
for param in params:
dist = Dist(**param)
self.assertTrue(repr(dist).startswith(dist.__class__.__name__))
def test_sample_detached(self):
for Dist, params in EXAMPLES:
for i, param in enumerate(params):
variable_params = [
p for p in param.values() if getattr(p, "requires_grad", False)
]
if not variable_params:
continue
dist = Dist(**param)
sample = dist.sample()
self.assertFalse(
sample.requires_grad,
msg="{} example {}/{}, .sample() is not detached".format(
Dist.__name__, i + 1, len(params)
),
)
def test_rsample_requires_grad(self):
for Dist, params in EXAMPLES:
for i, param in enumerate(params):
if not any(getattr(p, "requires_grad", False) for p in param.values()):
continue
dist = Dist(**param)
if not dist.has_rsample:
continue
sample = dist.rsample()
self.assertTrue(
sample.requires_grad,
msg="{} example {}/{}, .rsample() does not require grad".format(
Dist.__name__, i + 1, len(params)
),
)
def test_enumerate_support_type(self):
for Dist, params in EXAMPLES:
for i, param in enumerate(params):
dist = Dist(**param)
try:
self.assertIsInstance(
dist.sample(),
type(dist.enumerate_support()),
msg=(
"{} example {}/{}, return type mismatch between "
+ "sample and enumerate_support."
).format(Dist.__name__, i + 1, len(params)),
)
except NotImplementedError:
pass
def test_distribution_expand(self):
shapes = [torch.Size(), torch.Size((2,)), torch.Size((2, 1))]
for Dist, params in EXAMPLES:
for param in params:
for shape in shapes:
d = Dist(**param)
expanded_shape = shape + d.batch_shape
original_shape = d.batch_shape + d.event_shape
expected_shape = shape + original_shape
expanded = d.expand(batch_shape=list(expanded_shape))
sample = expanded.sample()
actual_shape = expanded.sample().shape
self.assertEqual(expanded.__class__, d.__class__)
self.assertEqual(d.sample().shape, original_shape)
self.assertEqual(expanded.log_prob(sample), d.log_prob(sample))
self.assertEqual(actual_shape, expected_shape)
self.assertEqual(expanded.batch_shape, expanded_shape)
try:
self.assertEqual(
expanded.mean,
d.mean.expand(expanded_shape + d.event_shape),
allow_inf=True,
)
self.assertEqual(
expanded.variance,
d.variance.expand(expanded_shape + d.event_shape),
allow_inf=True,
)
except NotImplementedError:
pass
def test_distribution_subclass_expand(self):
expand_by = torch.Size((2,))
for Dist, params in EXAMPLES:
class SubClass(Dist):
pass
for param in params:
d = SubClass(**param)
expanded_shape = expand_by + d.batch_shape
original_shape = d.batch_shape + d.event_shape
expected_shape = expand_by + original_shape
expanded = d.expand(batch_shape=expanded_shape)
sample = expanded.sample()
actual_shape = expanded.sample().shape
self.assertEqual(expanded.__class__, d.__class__)
self.assertEqual(d.sample().shape, original_shape)
self.assertEqual(expanded.log_prob(sample), d.log_prob(sample))
self.assertEqual(actual_shape, expected_shape)
def test_independent_shape(self):
for Dist, params in EXAMPLES:
for param in params:
base_dist = Dist(**param)
x = base_dist.sample()
base_log_prob_shape = base_dist.log_prob(x).shape
for reinterpreted_batch_ndims in range(len(base_dist.batch_shape) + 1):
indep_dist = Independent(base_dist, reinterpreted_batch_ndims)
indep_log_prob_shape = base_log_prob_shape[
: len(base_log_prob_shape) - reinterpreted_batch_ndims
]
self.assertEqual(indep_dist.log_prob(x).shape, indep_log_prob_shape)
self.assertEqual(
indep_dist.sample().shape, base_dist.sample().shape
)
self.assertEqual(indep_dist.has_rsample, base_dist.has_rsample)
if indep_dist.has_rsample:
self.assertEqual(
indep_dist.sample().shape, base_dist.sample().shape
)
try:
self.assertEqual(
indep_dist.enumerate_support().shape,
base_dist.enumerate_support().shape,
)
self.assertEqual(indep_dist.mean.shape, base_dist.mean.shape)
except NotImplementedError:
pass
try:
self.assertEqual(
indep_dist.variance.shape, base_dist.variance.shape
)
except NotImplementedError:
pass
try:
self.assertEqual(
indep_dist.entropy().shape, indep_log_prob_shape
)
except NotImplementedError:
pass
def test_independent_expand(self):
for Dist, params in EXAMPLES:
for param in params:
base_dist = Dist(**param)
for reinterpreted_batch_ndims in range(len(base_dist.batch_shape) + 1):
for s in [torch.Size(), torch.Size((2,)), torch.Size((2, 3))]:
indep_dist = Independent(base_dist, reinterpreted_batch_ndims)
expanded_shape = s + indep_dist.batch_shape
expanded = indep_dist.expand(expanded_shape)
expanded_sample = expanded.sample()
expected_shape = expanded_shape + indep_dist.event_shape
self.assertEqual(expanded_sample.shape, expected_shape)
self.assertEqual(
expanded.log_prob(expanded_sample),
indep_dist.log_prob(expanded_sample),
)
self.assertEqual(expanded.event_shape, indep_dist.event_shape)
self.assertEqual(expanded.batch_shape, expanded_shape)
def test_cdf_icdf_inverse(self):
# Tests the invertibility property on the distributions
for Dist, params in EXAMPLES:
for i, param in enumerate(params):
dist = Dist(**param)
samples = dist.sample(sample_shape=(20,))
try:
cdf = dist.cdf(samples)
actual = dist.icdf(cdf)
except NotImplementedError:
continue
rel_error = torch.abs(actual - samples) / (1e-10 + torch.abs(samples))
self.assertLess(
rel_error.max(),
1e-4,
msg="\n".join(
[
"{} example {}/{}, icdf(cdf(x)) != x".format(
Dist.__name__, i + 1, len(params)
),
"x = {}".format(samples),
"cdf(x) = {}".format(cdf),
"icdf(cdf(x)) = {}".format(actual),
]
),
)
def test_cdf_log_prob(self):
# Tests if the differentiation of the CDF gives the PDF at a given value
for Dist, params in EXAMPLES:
for i, param in enumerate(params):
dist = Dist(**param)
samples = dist.sample().clone().detach()
if samples.dtype.is_floating_point:
samples.requires_grad_()
try:
cdfs = dist.cdf(samples)
pdfs = dist.log_prob(samples).exp()
except NotImplementedError:
continue
cdfs_derivative = grad(cdfs.sum(), [samples])[
0
] # this should not be wrapped in torch.abs()
self.assertEqual(
cdfs_derivative,
pdfs,
prec=0.2,
message="\n".join(
[
"{} example {}/{}, d(cdf)/dx != pdf(x)".format(
Dist.__name__, i + 1, len(params)
),
"x = {}".format(samples),
"cdf = {}".format(cdfs),
"pdf = {}".format(pdfs),
"grad(cdf) = {}".format(cdfs_derivative),
]
),
)
def test_entropy_monte_carlo(self):
set_rng_seed(0) # see Note [Randomized statistical tests]
for Dist, params in EXAMPLES:
for i, param in enumerate(params):
# use double precision for better numerical stability
dist = Dist(**{k: v.double() for k, v in param.items()})
try:
actual = dist.entropy()
except NotImplementedError:
continue
# use a lot of samples for better MC approximation
x = dist.sample(sample_shape=(120000,))
expected = -dist.log_prob(
x.clamp_max(1 - 2 * torch.finfo(x.dtype).eps)
).mean(0)
ignore = expected == inf
expected[ignore] = actual[ignore]
self.assertEqual(
actual,
expected,
prec=0.2,
message="\n".join(
[
"{} example {}/{}, incorrect .entropy().".format(
Dist.__name__, i + 1, len(params)
),
"Expected (monte carlo) {}".format(expected),
"Actual (analytic) {}".format(actual),
"max error = {}".format(torch.abs(actual - expected).max()),
]
),
)
def test_params_contains(self):
for Dist, params in EXAMPLES:
for i, param in enumerate(params):
dist = Dist(**param)
for name, value in param.items():
if isinstance(value, Number):
value = torch.tensor([value])
try:
constraint = dist.arg_constraints[name]
except KeyError:
continue # ignore optional parameters
if is_dependent(constraint):
continue
message = "{} example {}/{} parameter {} = {}".format(
Dist.__name__, i + 1, len(params), name, value
)
self.assertTrue(constraint.check(value).all(), msg=message)
def test_support_contains(self):
for Dist, params in EXAMPLES:
self.assertIsInstance(Dist.support, Constraint)
for i, param in enumerate(params):
dist = Dist(**param)
value = dist.sample()
constraint = dist.support
message = "{} example {}/{} sample = {}".format(
Dist.__name__, i + 1, len(params), value
)
self.assertTrue(constraint.check(value).all(), msg=message)
class TestDistributionShapes(BotorchTestCase, TestCase):
def setUp(self):
super().setUp()
self.scalar_sample = 1
self.tensor_sample_1 = torch.ones(3, 2)
self.tensor_sample_2 = torch.ones(3, 2, 3)
Distribution.set_default_validate_args(True)
def tearDown(self):
super().tearDown()
Distribution.set_default_validate_args(False)
def test_kumaraswamy_shape_scalar_params(self):
kumaraswamy = Kumaraswamy(1, 1)
self.assertEqual(kumaraswamy._batch_shape, torch.Size())
self.assertEqual(kumaraswamy._event_shape, torch.Size())
self.assertEqual(kumaraswamy.sample().size(), torch.Size())
self.assertEqual(kumaraswamy.sample((3, 2)).size(), torch.Size((3, 2)))
self.assertEqual(
kumaraswamy.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2))
)
self.assertEqual(
kumaraswamy.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3))
)
def test_entropy_shape(self):
for Dist, params in EXAMPLES:
for i, param in enumerate(params):
dist = Dist(validate_args=False, **param)
try:
actual_shape = dist.entropy().size()
expected_shape = (
dist.batch_shape if dist.batch_shape else torch.Size()
)
message = (
f"{Dist.__name__} example {i + 1}/{len(params)}, "
f"shape mismatch. expected {expected_shape}, "
f"actual {actual_shape}"
)
self.assertEqual(actual_shape, expected_shape, message=message)
except NotImplementedError:
continue | en | 0.784917 | #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. Probability Distributions. This is adapted from https://github.com/probtorch/pytorch/pull/143. TODO: replace with PyTorch version once the PR is up and landed. # avoid extreme parameters # check that NaNs are in the same locations # TODO: implement abs on CharTensor # Kumaraswamy distribution is not implemented in SciPy # Hence these tests are explicit # invalid broadcasting cases; should throw error # example type (distribution class, distribution params) # Tests the invertibility property on the distributions # Tests if the differentiation of the CDF gives the PDF at a given value # this should not be wrapped in torch.abs() # see Note [Randomized statistical tests] # use double precision for better numerical stability # use a lot of samples for better MC approximation # ignore optional parameters | 2.466178 | 2 |
utils/prepare_dictionary.py | gbouritsas/PnC | 19 | 6619519 | import os
import types
import pickle
from utils.misc import isnotebook
if isnotebook():
from tqdm import tqdm_notebook as tqdm
else:
from tqdm import tqdm
import numpy as np
import networkx as nx
import graph_tool as gt
import graph_tool.stats as gt_stats
import graph_tool.topology as gt_topology
import graph_tool.clustering as gt_clustering
import graph_tool.generation as gt_generation
from utils.conversions import convert_to_gt
def unique_non_isomorphic(H_set):
H_unique = []
for H in H_set:
found = False
for H_saved in H_unique:
if H.num_vertices() != H_saved.num_vertices() or H.num_edges() != H_saved.num_edges():
# avoid running isomorphism routine if num vertices/num edges is different
continue
iso = True if H.num_edges() == 0 and H.num_vertices() == 1 else \
gt_topology.isomorphism(H, H_saved)
if iso:
found = True
break
if not found:
H_unique.append(H)
return H_unique
def get_motifs(k_min, k_max, graphs_ptg, directed=False):
#n_shuffles = 100
motif_num_vertices_list = list(range(k_min, k_max+1))
H_dictionary = []
counts = []
# add single nodes and single edges
H_dictionary += [gt_generation.complete_graph(1), gt_generation.complete_graph(2)]
counts += [0,0]
for i in tqdm(range(len(graphs_ptg))):
G_edge_index = graphs_ptg[i].edge_index.transpose(1,0).tolist()
G_gt = gt.Graph(directed=directed)
G_gt.add_edge_list(G_edge_index)
gt_stats.remove_self_loops(G_gt)
gt_stats.remove_parallel_edges(G_gt)
for motif_num_vertices in motif_num_vertices_list:
motifs_k, counts_k = gt_clustering.motifs(G_gt, motif_num_vertices)
for motif, count in zip(motifs_k, counts_k):
found=False
for H_index, H in enumerate(H_dictionary):
if H.num_vertices() != motif.num_vertices() or H.num_edges() != motif.num_edges():
# avoid running isomorphism routine if num vertices/num edges is different
continue
iso = True if H.num_edges() == 0 and H.num_vertices()==1 else \
gt_topology.isomorphism(H, motif)
if iso:
counts[H_index] += count
found = True
break
if not found:
H_dictionary.append(motif)
counts += [count]
counts = np.array(counts)
H_dictionary = list(np.array(H_dictionary)[np.argsort(-counts)])
counts = counts[np.argsort(-counts)]
counts = counts/counts.sum()
return H_dictionary, counts
def get_custom_edge_list(ks, substructure_type=None, filename=None):
'''
Instantiates a list of `edge_list`s representing substructures
of type `substructure_type` with sizes specified by `ks`.
'''
if substructure_type is None and filename is None:
raise ValueError('You must specify either a type or a filename where to read substructures from.')
edge_lists = []
for k in ks:
if substructure_type is not None:
graphs_nx = getattr(nx, substructure_type)(k)
else:
graphs_nx = nx.read_graph6(os.path.join(filename, 'graph{}c.g6'.format(k)))
if isinstance(graphs_nx, list) or isinstance(graphs_nx, types.GeneratorType):
edge_lists += [list(graph_nx.edges) for graph_nx in graphs_nx]
else:
edge_lists.append(list(graphs_nx.edges))
return edge_lists
def prepare_dictionary(args, path=None, graphs_ptg=None, split_folder=None):
###### choose the substructures: usually loaded from networkx,
###### except for 'all_simple_graphs' where they need to be precomputed,
###### or when a custom edge list is provided in the input by the user
H_set_gt = []
edge_lists_all = []
for i, atom_type in enumerate(args['atom_types']):
if atom_type in ['cycle_graph',
'path_graph',
'complete_graph',
'binomial_tree',
'star_graph',
'nonisomorphic_trees']:
k_min = 2 if atom_type == 'star_graph' else 1
k_max = args['k'][i]
edge_lists = get_custom_edge_list(list(range(k_min, k_max + 1)), substructure_type=atom_type)
elif atom_type in ['cycle_graph_chosen_k',
'path_graph_chosen_k',
'complete_graph_chosen_k',
'binomial_tree_chosen_k',
'star_graph_chosen_k',
'nonisomorphic_trees_chosen_k']:
edge_lists = get_custom_edge_list([args['k'][i]], substructure_type=atom_type.replace('_chosen_k',''))
elif atom_type == 'all_simple_graphs':
k_min = 2
k_max = args['k'][i]
filename = os.path.join(args['root_folder'], 'all_simple_graphs')
edge_lists = get_custom_edge_list(list(range(k_min, k_max + 1)), filename=filename)
elif atom_type == 'all_simple_graphs_chosen_k':
filename = os.path.join(args['root_folder'], 'all_simple_graphs')
edge_lists = get_custom_edge_list([args['k'][i]], filename=filename)
elif atom_type == 'diamond_graph':
graph_nx = nx.diamond_graph()
edge_lists = [list(graph_nx.edges)]
elif atom_type == 'custom':
assert args['custom_edge_lists'] is not None, "Custom edge lists must be provided."
edge_lists = args['custom_edge_lists']
elif atom_type == 'motifs':
k_min = 3
k_max = args['k'][i]
# data_folder = os.path.join(path, 'processed', 'dictionaries')
data_folder = os.path.join(path, 'processed', 'dictionaries', split_folder)
motif_file = os.path.join(data_folder, 'motifs' + '_' + str(k_max) + '.pkl')
if os.path.exists(motif_file):
with open(motif_file, 'rb') as f:
H_set_gt, counts = pickle.load(f)
else:
H_set_gt, counts = get_motifs(k_min, k_max, graphs_ptg, directed=args['directed'])
if not os.path.exists(data_folder):
os.makedirs(data_folder)
with open(motif_file, 'wb') as f:
pickle.dump((H_set_gt, counts), f)
else:
raise NotImplementedError("Atom {} is not currently supported.".format(atom_type))
if atom_type != 'motifs':
edge_lists_all += edge_lists
# convert to graph tool. Only necessary for subgraph isomorphism
if len(edge_lists_all)!=0:
H_set_gt += convert_to_gt(edge_lists_all, directed=args['directed'])
H_set_gt = unique_non_isomorphic(H_set_gt)
return H_set_gt
| import os
import types
import pickle
from utils.misc import isnotebook
if isnotebook():
from tqdm import tqdm_notebook as tqdm
else:
from tqdm import tqdm
import numpy as np
import networkx as nx
import graph_tool as gt
import graph_tool.stats as gt_stats
import graph_tool.topology as gt_topology
import graph_tool.clustering as gt_clustering
import graph_tool.generation as gt_generation
from utils.conversions import convert_to_gt
def unique_non_isomorphic(H_set):
H_unique = []
for H in H_set:
found = False
for H_saved in H_unique:
if H.num_vertices() != H_saved.num_vertices() or H.num_edges() != H_saved.num_edges():
# avoid running isomorphism routine if num vertices/num edges is different
continue
iso = True if H.num_edges() == 0 and H.num_vertices() == 1 else \
gt_topology.isomorphism(H, H_saved)
if iso:
found = True
break
if not found:
H_unique.append(H)
return H_unique
def get_motifs(k_min, k_max, graphs_ptg, directed=False):
#n_shuffles = 100
motif_num_vertices_list = list(range(k_min, k_max+1))
H_dictionary = []
counts = []
# add single nodes and single edges
H_dictionary += [gt_generation.complete_graph(1), gt_generation.complete_graph(2)]
counts += [0,0]
for i in tqdm(range(len(graphs_ptg))):
G_edge_index = graphs_ptg[i].edge_index.transpose(1,0).tolist()
G_gt = gt.Graph(directed=directed)
G_gt.add_edge_list(G_edge_index)
gt_stats.remove_self_loops(G_gt)
gt_stats.remove_parallel_edges(G_gt)
for motif_num_vertices in motif_num_vertices_list:
motifs_k, counts_k = gt_clustering.motifs(G_gt, motif_num_vertices)
for motif, count in zip(motifs_k, counts_k):
found=False
for H_index, H in enumerate(H_dictionary):
if H.num_vertices() != motif.num_vertices() or H.num_edges() != motif.num_edges():
# avoid running isomorphism routine if num vertices/num edges is different
continue
iso = True if H.num_edges() == 0 and H.num_vertices()==1 else \
gt_topology.isomorphism(H, motif)
if iso:
counts[H_index] += count
found = True
break
if not found:
H_dictionary.append(motif)
counts += [count]
counts = np.array(counts)
H_dictionary = list(np.array(H_dictionary)[np.argsort(-counts)])
counts = counts[np.argsort(-counts)]
counts = counts/counts.sum()
return H_dictionary, counts
def get_custom_edge_list(ks, substructure_type=None, filename=None):
'''
Instantiates a list of `edge_list`s representing substructures
of type `substructure_type` with sizes specified by `ks`.
'''
if substructure_type is None and filename is None:
raise ValueError('You must specify either a type or a filename where to read substructures from.')
edge_lists = []
for k in ks:
if substructure_type is not None:
graphs_nx = getattr(nx, substructure_type)(k)
else:
graphs_nx = nx.read_graph6(os.path.join(filename, 'graph{}c.g6'.format(k)))
if isinstance(graphs_nx, list) or isinstance(graphs_nx, types.GeneratorType):
edge_lists += [list(graph_nx.edges) for graph_nx in graphs_nx]
else:
edge_lists.append(list(graphs_nx.edges))
return edge_lists
def prepare_dictionary(args, path=None, graphs_ptg=None, split_folder=None):
###### choose the substructures: usually loaded from networkx,
###### except for 'all_simple_graphs' where they need to be precomputed,
###### or when a custom edge list is provided in the input by the user
H_set_gt = []
edge_lists_all = []
for i, atom_type in enumerate(args['atom_types']):
if atom_type in ['cycle_graph',
'path_graph',
'complete_graph',
'binomial_tree',
'star_graph',
'nonisomorphic_trees']:
k_min = 2 if atom_type == 'star_graph' else 1
k_max = args['k'][i]
edge_lists = get_custom_edge_list(list(range(k_min, k_max + 1)), substructure_type=atom_type)
elif atom_type in ['cycle_graph_chosen_k',
'path_graph_chosen_k',
'complete_graph_chosen_k',
'binomial_tree_chosen_k',
'star_graph_chosen_k',
'nonisomorphic_trees_chosen_k']:
edge_lists = get_custom_edge_list([args['k'][i]], substructure_type=atom_type.replace('_chosen_k',''))
elif atom_type == 'all_simple_graphs':
k_min = 2
k_max = args['k'][i]
filename = os.path.join(args['root_folder'], 'all_simple_graphs')
edge_lists = get_custom_edge_list(list(range(k_min, k_max + 1)), filename=filename)
elif atom_type == 'all_simple_graphs_chosen_k':
filename = os.path.join(args['root_folder'], 'all_simple_graphs')
edge_lists = get_custom_edge_list([args['k'][i]], filename=filename)
elif atom_type == 'diamond_graph':
graph_nx = nx.diamond_graph()
edge_lists = [list(graph_nx.edges)]
elif atom_type == 'custom':
assert args['custom_edge_lists'] is not None, "Custom edge lists must be provided."
edge_lists = args['custom_edge_lists']
elif atom_type == 'motifs':
k_min = 3
k_max = args['k'][i]
# data_folder = os.path.join(path, 'processed', 'dictionaries')
data_folder = os.path.join(path, 'processed', 'dictionaries', split_folder)
motif_file = os.path.join(data_folder, 'motifs' + '_' + str(k_max) + '.pkl')
if os.path.exists(motif_file):
with open(motif_file, 'rb') as f:
H_set_gt, counts = pickle.load(f)
else:
H_set_gt, counts = get_motifs(k_min, k_max, graphs_ptg, directed=args['directed'])
if not os.path.exists(data_folder):
os.makedirs(data_folder)
with open(motif_file, 'wb') as f:
pickle.dump((H_set_gt, counts), f)
else:
raise NotImplementedError("Atom {} is not currently supported.".format(atom_type))
if atom_type != 'motifs':
edge_lists_all += edge_lists
# convert to graph tool. Only necessary for subgraph isomorphism
if len(edge_lists_all)!=0:
H_set_gt += convert_to_gt(edge_lists_all, directed=args['directed'])
H_set_gt = unique_non_isomorphic(H_set_gt)
return H_set_gt
| en | 0.732513 | # avoid running isomorphism routine if num vertices/num edges is different #n_shuffles = 100 # add single nodes and single edges # avoid running isomorphism routine if num vertices/num edges is different Instantiates a list of `edge_list`s representing substructures of type `substructure_type` with sizes specified by `ks`. ###### choose the substructures: usually loaded from networkx, ###### except for 'all_simple_graphs' where they need to be precomputed, ###### or when a custom edge list is provided in the input by the user # data_folder = os.path.join(path, 'processed', 'dictionaries') # convert to graph tool. Only necessary for subgraph isomorphism | 2.2385 | 2 |
src/cool/errors.py | C0NKER/cool-compiler-2020 | 0 | 6619520 | class COOLError:
def __init__(self, name, pos, body):
self.name = name
self.pos = pos
self.body = body
def __str__(self):
return '(%d, %d) - %s: %s' % (self.pos[0], self.pos[1], self.name, self.body)
__repr__ = __str__
class CompilerError(COOLError):
def __init__(self, pos, body):
super().__init__('CompilerError', pos, body)
class LexicographicError(COOLError):
def __init__(self, pos, body):
super().__init__('LexicographicError', pos, body)
class SyntacticError(COOLError):
def __init__(self, pos, body):
super().__init__('SyntacticError', pos, body)
class NamexError(COOLError):
def __init__(self, pos, body):
super().__init__('NameError', pos, body)
class TypexError(COOLError):
def __init__(self, pos, body):
super().__init__('TypeError', pos, body)
class AttributexError(COOLError):
def __init__(self, pos, body):
super().__init__('AttributeError', pos, body)
class SemanticError(COOLError):
def __init__(self, pos, body):
super().__init__('SemanticError', pos, body) | class COOLError:
def __init__(self, name, pos, body):
self.name = name
self.pos = pos
self.body = body
def __str__(self):
return '(%d, %d) - %s: %s' % (self.pos[0], self.pos[1], self.name, self.body)
__repr__ = __str__
class CompilerError(COOLError):
def __init__(self, pos, body):
super().__init__('CompilerError', pos, body)
class LexicographicError(COOLError):
def __init__(self, pos, body):
super().__init__('LexicographicError', pos, body)
class SyntacticError(COOLError):
def __init__(self, pos, body):
super().__init__('SyntacticError', pos, body)
class NamexError(COOLError):
def __init__(self, pos, body):
super().__init__('NameError', pos, body)
class TypexError(COOLError):
def __init__(self, pos, body):
super().__init__('TypeError', pos, body)
class AttributexError(COOLError):
def __init__(self, pos, body):
super().__init__('AttributeError', pos, body)
class SemanticError(COOLError):
def __init__(self, pos, body):
super().__init__('SemanticError', pos, body) | none | 1 | 3.113982 | 3 | |
exercise_2017/12th_week/blackjack/cards.py | Taewan-P/python_study | 0 | 6619521 | <gh_stars>0
# Playing Cards
class Card(object):
SUITS = ["Diamond", "Heart", "Spade", "Clover"]
RANKS = ["A", "2", "3", "4", "5", "6", "7",
"8", "9", "10", "J", "Q", "K"]
def __init__(self, suit, rank, face_up=True):
if suit in Card.SUITS and rank in Card.RANKS:
self.suit = suit
self.rank = rank
self.face_up = face_up
else:
print("Error: Not a right suit or rank")
def __str__(self):
if self.face_up:
return self.suit + "." + self.rank
else:
return "XXX"
def flip(self):
self.face_up = not self.face_up
class Hand(object):
def __init__(self):
self.cards = []
def __str__(self):
if len(self.cards) == 0:
show = "empty"
else:
show = ""
for card in self.cards:
show += str(card) + " "
return show
def clear(self):
self.cards = []
def add(self,card):
self.cards.append(card)
def give(self,card,hand):
self.cards.remove(card)
hand.add(card)
class Deck(Hand):
def fresh_deck(self):
self.cards = []
for s in Card.SUITS:
for r in Card.RANKS:
self.cards.append(Card(s,r,False))
import random
random.shuffle(self.cards)
def deal(self,hand,how_many=1,open=False):
if self.cards == []:
self.fresh_deck()
for _ in range(how_many):
card = self.cards[0]
if open :
card.flip()
self.give(card,hand)
| # Playing Cards
class Card(object):
SUITS = ["Diamond", "Heart", "Spade", "Clover"]
RANKS = ["A", "2", "3", "4", "5", "6", "7",
"8", "9", "10", "J", "Q", "K"]
def __init__(self, suit, rank, face_up=True):
if suit in Card.SUITS and rank in Card.RANKS:
self.suit = suit
self.rank = rank
self.face_up = face_up
else:
print("Error: Not a right suit or rank")
def __str__(self):
if self.face_up:
return self.suit + "." + self.rank
else:
return "XXX"
def flip(self):
self.face_up = not self.face_up
class Hand(object):
def __init__(self):
self.cards = []
def __str__(self):
if len(self.cards) == 0:
show = "empty"
else:
show = ""
for card in self.cards:
show += str(card) + " "
return show
def clear(self):
self.cards = []
def add(self,card):
self.cards.append(card)
def give(self,card,hand):
self.cards.remove(card)
hand.add(card)
class Deck(Hand):
def fresh_deck(self):
self.cards = []
for s in Card.SUITS:
for r in Card.RANKS:
self.cards.append(Card(s,r,False))
import random
random.shuffle(self.cards)
def deal(self,hand,how_many=1,open=False):
if self.cards == []:
self.fresh_deck()
for _ in range(how_many):
card = self.cards[0]
if open :
card.flip()
self.give(card,hand) | en | 0.839428 | # Playing Cards | 3.78264 | 4 |
random/collectionGame.py | Dmendoza3/Phyton | 0 | 6619522 | <gh_stars>0
import random
rarities = ['common', 'uncommon', 'rare', '']
collection = {}
collectionRate = {}
collected = []
def generateCollection(num, nRarities=3, rate=0.75):
cardsLeft = num
for r in range(nRarities):
if r < nRarities - 1:
nPerRarity = int(cardsLeft * rate)
else:
nPerRarity = cardsLeft
collection[(nPerRarity)] = []
for n in range(nPerRarity):
collection[nPerRarity].append('name' + str(nPerRarity) + str(random.randint(100, 999)))
cardsLeft -= nPerRarity
prevN = 0
for x in collection:
collectionRate[(prevN + 1, prevN + x)] = x
prevN += x
def getCard():
rand = random.randint(1, 100)
for x in collectionRate:
if rand in range(x[0], x[1]):
print(collectionRate[x])
return random.choice(collection[collectionRate[x]])
generateCollection(100)
print(collection)
for x in range(10):
print(getCard())
| import random
rarities = ['common', 'uncommon', 'rare', '']
collection = {}
collectionRate = {}
collected = []
def generateCollection(num, nRarities=3, rate=0.75):
cardsLeft = num
for r in range(nRarities):
if r < nRarities - 1:
nPerRarity = int(cardsLeft * rate)
else:
nPerRarity = cardsLeft
collection[(nPerRarity)] = []
for n in range(nPerRarity):
collection[nPerRarity].append('name' + str(nPerRarity) + str(random.randint(100, 999)))
cardsLeft -= nPerRarity
prevN = 0
for x in collection:
collectionRate[(prevN + 1, prevN + x)] = x
prevN += x
def getCard():
rand = random.randint(1, 100)
for x in collectionRate:
if rand in range(x[0], x[1]):
print(collectionRate[x])
return random.choice(collection[collectionRate[x]])
generateCollection(100)
print(collection)
for x in range(10):
print(getCard()) | none | 1 | 3.613224 | 4 | |
gpa/forms.py | Don-Joel/MyDash | 0 | 6619523 | from django import forms
from .models import Class
class ClassModelForm(forms.ModelForm):
class Meta:
model = Class
fields =[
'name',
'year',
'semester',
'grade',
'numeric_grade',
'credit_hours',
]
labels= {
"numeric_grade" : "Course GPA (0.0 - 4.0)",
} | from django import forms
from .models import Class
class ClassModelForm(forms.ModelForm):
class Meta:
model = Class
fields =[
'name',
'year',
'semester',
'grade',
'numeric_grade',
'credit_hours',
]
labels= {
"numeric_grade" : "Course GPA (0.0 - 4.0)",
} | none | 1 | 2.66983 | 3 | |
jetengine/query/not_equal.py | kpdemetriou/jetengine | 5 | 6619524 | from jetengine.query.base import QueryOperator
class NotEqualQueryOperator(QueryOperator):
"""
Query operator used to return all documents that have the specified field with a value that's not equal to the specified value.
For more information on `$ne` go to http://docs.mongodb.org/manual/reference/operator/query/ne/.
Usage:
.. testsetup:: ne_query_operator
from datetime import datetime
import tornado.ioloop
from jetengine import *
.. testcode:: ne_query_operator
class User(Document):
email = StringField()
query = Q(email__ne="<EMAIL>")
query_result = query.to_query(User)
print(query_result)
The resulting query is:
.. testoutput:: ne_query_operator
{'email': {'$ne': '<EMAIL>'}}
"""
def to_query(self, field_name, value):
return {field_name: {"$ne": value}}
| from jetengine.query.base import QueryOperator
class NotEqualQueryOperator(QueryOperator):
"""
Query operator used to return all documents that have the specified field with a value that's not equal to the specified value.
For more information on `$ne` go to http://docs.mongodb.org/manual/reference/operator/query/ne/.
Usage:
.. testsetup:: ne_query_operator
from datetime import datetime
import tornado.ioloop
from jetengine import *
.. testcode:: ne_query_operator
class User(Document):
email = StringField()
query = Q(email__ne="<EMAIL>")
query_result = query.to_query(User)
print(query_result)
The resulting query is:
.. testoutput:: ne_query_operator
{'email': {'$ne': '<EMAIL>'}}
"""
def to_query(self, field_name, value):
return {field_name: {"$ne": value}}
| en | 0.51783 | Query operator used to return all documents that have the specified field with a value that's not equal to the specified value. For more information on `$ne` go to http://docs.mongodb.org/manual/reference/operator/query/ne/. Usage: .. testsetup:: ne_query_operator from datetime import datetime import tornado.ioloop from jetengine import * .. testcode:: ne_query_operator class User(Document): email = StringField() query = Q(email__ne="<EMAIL>") query_result = query.to_query(User) print(query_result) The resulting query is: .. testoutput:: ne_query_operator {'email': {'$ne': '<EMAIL>'}} | 3.262378 | 3 |
delivery_proj/users/api/views.py | unkn1w/Delivery | 0 | 6619525 | <filename>delivery_proj/users/api/views.py
from django.contrib.auth import get_user_model
from rest_framework import viewsets, mixins
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework.viewsets import ReadOnlyModelViewSet, GenericViewSet
from ..permissions import BuyerOnly, RestaurantOnly, RestaurantCourierOnly, CourierOnly
from .serializers import (
UserSerializer,
CreateUserSerializer,
RestaurantSerializer,
ViewRestaurantSerializer,
CourierSerializer,
BuyerSerializer,
)
from ..models import Restaurant, Courier, Buyer
User = get_user_model()
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
def get_serializer_class(self):
if self.action == "create":
return CreateUserSerializer
return UserSerializer
class RestaurantViewSet(ReadOnlyModelViewSet):
serializer_class = ViewRestaurantSerializer
permission_classes = [IsAuthenticated & (BuyerOnly | IsAdminUser)]
def get_queryset(self):
return Restaurant.objects.prefetch_related("dishes").all()
class CreateRestaurantViewSet(
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
GenericViewSet,
):
serializer_class = RestaurantSerializer
permission_classes = [IsAuthenticated & (RestaurantOnly | IsAdminUser)]
def get_queryset(self):
return Restaurant.objects.prefetch_related("dishes").all()
class CourierViewSet(
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
GenericViewSet,
):
queryset = Courier.objects.all()
serializer_class = CourierSerializer
permission_classes = [IsAuthenticated & (CourierOnly | IsAdminUser)]
class BuyerViewSet(
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
mixins.RetrieveModelMixin,
GenericViewSet,
):
queryset = Buyer.objects.all()
serializer_class = BuyerSerializer
permission_classes = [IsAuthenticated & (BuyerOnly | IsAdminUser)]
| <filename>delivery_proj/users/api/views.py
from django.contrib.auth import get_user_model
from rest_framework import viewsets, mixins
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework.viewsets import ReadOnlyModelViewSet, GenericViewSet
from ..permissions import BuyerOnly, RestaurantOnly, RestaurantCourierOnly, CourierOnly
from .serializers import (
UserSerializer,
CreateUserSerializer,
RestaurantSerializer,
ViewRestaurantSerializer,
CourierSerializer,
BuyerSerializer,
)
from ..models import Restaurant, Courier, Buyer
User = get_user_model()
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
def get_serializer_class(self):
if self.action == "create":
return CreateUserSerializer
return UserSerializer
class RestaurantViewSet(ReadOnlyModelViewSet):
serializer_class = ViewRestaurantSerializer
permission_classes = [IsAuthenticated & (BuyerOnly | IsAdminUser)]
def get_queryset(self):
return Restaurant.objects.prefetch_related("dishes").all()
class CreateRestaurantViewSet(
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
GenericViewSet,
):
serializer_class = RestaurantSerializer
permission_classes = [IsAuthenticated & (RestaurantOnly | IsAdminUser)]
def get_queryset(self):
return Restaurant.objects.prefetch_related("dishes").all()
class CourierViewSet(
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
GenericViewSet,
):
queryset = Courier.objects.all()
serializer_class = CourierSerializer
permission_classes = [IsAuthenticated & (CourierOnly | IsAdminUser)]
class BuyerViewSet(
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
mixins.RetrieveModelMixin,
GenericViewSet,
):
queryset = Buyer.objects.all()
serializer_class = BuyerSerializer
permission_classes = [IsAuthenticated & (BuyerOnly | IsAdminUser)]
| none | 1 | 2.021469 | 2 | |
tests/test_grids.py | vyahello/search-words-puzzle | 1 | 6619526 | """
A test suite contains a set of test cases for the puzzle
grids interfaces.
"""
import pytest
from puzzle.grids import Content, GridContent, Grid, RandomWordsGrid
from puzzle.properties import Coordinate, LetterCoordinates, GridSize
pytestmark = pytest.mark.unittest
_grid_height: int = 15
_grid_width: int = 15
@pytest.fixture()
def random_words_grid() -> Grid:
"""Return a grid of random letters.
Build a grid of letters when entering into the context manager.
"""
with RandomWordsGrid(
grid_size=GridSize(_grid_height, _grid_width)
) as grid: # type: Grid
yield grid
@pytest.mark.parametrize(
'content, letter_to_coordinates',
(
pytest.param(
GridContent(rows=['a']),
{'a': [Coordinate(x_axis=0, y_axis=0)]},
id='a',
),
pytest.param(
GridContent(rows=['a', 'b']),
{
'a': [Coordinate(x_axis=0, y_axis=0)],
'b': [Coordinate(x_axis=1, y_axis=0)],
},
id='a\nb',
),
pytest.param(
GridContent(rows=['aa', 'bb', 'cc']),
{
'a': [
Coordinate(x_axis=0, y_axis=0),
Coordinate(x_axis=0, y_axis=1),
],
'b': [
Coordinate(x_axis=1, y_axis=0),
Coordinate(x_axis=1, y_axis=1),
],
'c': [
Coordinate(x_axis=2, y_axis=0),
Coordinate(x_axis=2, y_axis=1),
],
},
id='aa\nbb\ncc',
),
),
)
def test_grid_content_to_coordinates(
content: Content, letter_to_coordinates: LetterCoordinates
) -> None:
"""Test the location (coordinates) of the content of random letters.
Every letter in a grid is able to contain multiple coordinates.
"""
expected = content.to_coordinates()
assert expected == letter_to_coordinates, (
f'Expected letter coordinates: {expected} != '
f'Actual letter coordinates: {letter_to_coordinates}'
)
@pytest.mark.parametrize(
'content, result',
(
pytest.param(GridContent(rows=['a']), 'a', id='a'),
pytest.param(GridContent(rows=['a', 'b']), 'a\nb', id='a\nb'),
pytest.param(
GridContent(rows=['aa', 'bb', 'cc']), 'aa\nbb\ncc', id='aa\nbb\ncc'
),
),
)
def test_valid_grid_content(content: Content, result: str) -> None:
"""Test the random grid of letters is properly generated (not empty)."""
assert result == str(
content
), f'Expected content: {result} != Actual content: {content}'
@pytest.mark.parametrize(
'grid_size',
(
GridSize(0, 0),
GridSize(1, 0),
GridSize(0, 1),
GridSize(-1, 0),
GridSize(0, -1),
GridSize(1, -1),
GridSize(-1, 1),
),
)
def test_invalid_grid_size(grid_size: GridSize) -> None:
"""The the combination of invalid grid size.
ValueError should be raised in case of invalid grid size.
"""
with pytest.raises(ValueError):
with RandomWordsGrid(grid_size) as grid: # type: Grid
str(grid.content)
def test_valid_grid_size(random_words_grid: Grid) -> None:
"""Test grid generates a content with the expected height and width."""
content = str(random_words_grid.content).split()
assert len(content) == _grid_height, (
f'Expected grid height: {_grid_height} != '
f'Actual grid height: {len(content)}'
)
assert len(content[0]) == _grid_width, (
f'Expected grid width: {_grid_width} != '
f'Actual grid width: {len(content[0])}'
)
def test_invalid_grid_content() -> None:
"""Test the random grid of letters is invalid (empty).
ValueError exception should be raised in case of empty grid rows.
"""
with pytest.raises(ValueError):
str(GridContent(rows=[]))
def test_grid_content_is_generated(random_words_grid: Grid) -> None:
"""Test the grid is able to generate a content of random letters."""
assert isinstance(random_words_grid.content, Content), (
f'Random grid content should be "{Content.__class__}" '
f'data type but got "{random_words_grid.content.__class__}" type'
)
assert str(random_words_grid.content), (
'The grid content is not generated: '
f'got "{random_words_grid.content}" content'
)
def test_grid_properties(random_words_grid: Grid) -> None:
"""Test grid contains proper attributes (height and width)."""
height = random_words_grid.height
width = random_words_grid.width
assert _grid_height == height, (
f'Expected grid height property: {_grid_height} '
f'!= Actual grid height property: {height}'
)
assert _grid_width == width, (
f'Expected grid width property: {_grid_width} '
f'!= Actual grid width property: {width}'
)
def test_empty_grid(random_words_grid: Grid) -> None:
"""Test grid is able to be refreshed (got empty).
ValueError should be raised when generating empty grid content.
"""
random_words_grid.refresh()
with pytest.raises(ValueError):
str(random_words_grid.content)
| """
A test suite contains a set of test cases for the puzzle
grids interfaces.
"""
import pytest
from puzzle.grids import Content, GridContent, Grid, RandomWordsGrid
from puzzle.properties import Coordinate, LetterCoordinates, GridSize
pytestmark = pytest.mark.unittest
_grid_height: int = 15
_grid_width: int = 15
@pytest.fixture()
def random_words_grid() -> Grid:
"""Return a grid of random letters.
Build a grid of letters when entering into the context manager.
"""
with RandomWordsGrid(
grid_size=GridSize(_grid_height, _grid_width)
) as grid: # type: Grid
yield grid
@pytest.mark.parametrize(
'content, letter_to_coordinates',
(
pytest.param(
GridContent(rows=['a']),
{'a': [Coordinate(x_axis=0, y_axis=0)]},
id='a',
),
pytest.param(
GridContent(rows=['a', 'b']),
{
'a': [Coordinate(x_axis=0, y_axis=0)],
'b': [Coordinate(x_axis=1, y_axis=0)],
},
id='a\nb',
),
pytest.param(
GridContent(rows=['aa', 'bb', 'cc']),
{
'a': [
Coordinate(x_axis=0, y_axis=0),
Coordinate(x_axis=0, y_axis=1),
],
'b': [
Coordinate(x_axis=1, y_axis=0),
Coordinate(x_axis=1, y_axis=1),
],
'c': [
Coordinate(x_axis=2, y_axis=0),
Coordinate(x_axis=2, y_axis=1),
],
},
id='aa\nbb\ncc',
),
),
)
def test_grid_content_to_coordinates(
content: Content, letter_to_coordinates: LetterCoordinates
) -> None:
"""Test the location (coordinates) of the content of random letters.
Every letter in a grid is able to contain multiple coordinates.
"""
expected = content.to_coordinates()
assert expected == letter_to_coordinates, (
f'Expected letter coordinates: {expected} != '
f'Actual letter coordinates: {letter_to_coordinates}'
)
@pytest.mark.parametrize(
'content, result',
(
pytest.param(GridContent(rows=['a']), 'a', id='a'),
pytest.param(GridContent(rows=['a', 'b']), 'a\nb', id='a\nb'),
pytest.param(
GridContent(rows=['aa', 'bb', 'cc']), 'aa\nbb\ncc', id='aa\nbb\ncc'
),
),
)
def test_valid_grid_content(content: Content, result: str) -> None:
"""Test the random grid of letters is properly generated (not empty)."""
assert result == str(
content
), f'Expected content: {result} != Actual content: {content}'
@pytest.mark.parametrize(
'grid_size',
(
GridSize(0, 0),
GridSize(1, 0),
GridSize(0, 1),
GridSize(-1, 0),
GridSize(0, -1),
GridSize(1, -1),
GridSize(-1, 1),
),
)
def test_invalid_grid_size(grid_size: GridSize) -> None:
"""The the combination of invalid grid size.
ValueError should be raised in case of invalid grid size.
"""
with pytest.raises(ValueError):
with RandomWordsGrid(grid_size) as grid: # type: Grid
str(grid.content)
def test_valid_grid_size(random_words_grid: Grid) -> None:
"""Test grid generates a content with the expected height and width."""
content = str(random_words_grid.content).split()
assert len(content) == _grid_height, (
f'Expected grid height: {_grid_height} != '
f'Actual grid height: {len(content)}'
)
assert len(content[0]) == _grid_width, (
f'Expected grid width: {_grid_width} != '
f'Actual grid width: {len(content[0])}'
)
def test_invalid_grid_content() -> None:
"""Test the random grid of letters is invalid (empty).
ValueError exception should be raised in case of empty grid rows.
"""
with pytest.raises(ValueError):
str(GridContent(rows=[]))
def test_grid_content_is_generated(random_words_grid: Grid) -> None:
"""Test the grid is able to generate a content of random letters."""
assert isinstance(random_words_grid.content, Content), (
f'Random grid content should be "{Content.__class__}" '
f'data type but got "{random_words_grid.content.__class__}" type'
)
assert str(random_words_grid.content), (
'The grid content is not generated: '
f'got "{random_words_grid.content}" content'
)
def test_grid_properties(random_words_grid: Grid) -> None:
"""Test grid contains proper attributes (height and width)."""
height = random_words_grid.height
width = random_words_grid.width
assert _grid_height == height, (
f'Expected grid height property: {_grid_height} '
f'!= Actual grid height property: {height}'
)
assert _grid_width == width, (
f'Expected grid width property: {_grid_width} '
f'!= Actual grid width property: {width}'
)
def test_empty_grid(random_words_grid: Grid) -> None:
"""Test grid is able to be refreshed (got empty).
ValueError should be raised when generating empty grid content.
"""
random_words_grid.refresh()
with pytest.raises(ValueError):
str(random_words_grid.content)
| en | 0.778866 | A test suite contains a set of test cases for the puzzle grids interfaces. Return a grid of random letters. Build a grid of letters when entering into the context manager. # type: Grid Test the location (coordinates) of the content of random letters. Every letter in a grid is able to contain multiple coordinates. Test the random grid of letters is properly generated (not empty). The the combination of invalid grid size. ValueError should be raised in case of invalid grid size. # type: Grid Test grid generates a content with the expected height and width. Test the random grid of letters is invalid (empty). ValueError exception should be raised in case of empty grid rows. Test the grid is able to generate a content of random letters. Test grid contains proper attributes (height and width). Test grid is able to be refreshed (got empty). ValueError should be raised when generating empty grid content. | 2.85304 | 3 |
app/tests/conftests.py | jaisenbe58r/SaasFastApi | 42 | 6619527 | # From @euri10 -- https://gitter.im/tiangolo/fastapi?at=5cd915ed56271260f95275ac
import asyncio
from unittest import TestCase
import pytest
from sqlalchemy import create_engine
from sqlalchemy_utils import create_database, database_exists, drop_database
from starlette.config import environ
from starlette.testclient import TestClient
# This sets `os.environ`, but provides some additional protection.
# If we placed it below the application import, it would raise an error
# informing us that 'TESTING' had already been read from the environment.
environ["TESTING"] = "True"
environ["EMAILS_ENABLED"] = "False"
from app.main import app # isort:skip
from app.database import engine, Base, DBSession
class TestBase(TestCase):
def setUp(self):
self.db_session = DBSession()
self.connection = engine.connect()
# # Configure Search DDL triggers.
Base.metadata.drop_all(self.connection)
Base.metadata.create_all(self.connection)
self.client = TestClient(app)
def tearDown(self):
self.db_session.rollback()
self.db_session.close()
def create_system_admin(self, *args, **kwargs):
from app.controllers.account import create_account
from app.schemas.account import AccountCreate
return create_account(
self.db_session,
first_name="Admin",
last_name="Istrator",
email="<EMAIL>",
password="<PASSWORD>",
is_system_admin=True,
is_active=True,
send_registration_email=False,
)
def auth_headers(self, email="<EMAIL>", password="<PASSWORD>"):
payload = {"username": email, "password": password}
resp = self.client.post("/auth/token", data=payload)
return {"Authorization": "Bearer " + resp.json().get("access_token")}
| # From @euri10 -- https://gitter.im/tiangolo/fastapi?at=5cd915ed56271260f95275ac
import asyncio
from unittest import TestCase
import pytest
from sqlalchemy import create_engine
from sqlalchemy_utils import create_database, database_exists, drop_database
from starlette.config import environ
from starlette.testclient import TestClient
# This sets `os.environ`, but provides some additional protection.
# If we placed it below the application import, it would raise an error
# informing us that 'TESTING' had already been read from the environment.
environ["TESTING"] = "True"
environ["EMAILS_ENABLED"] = "False"
from app.main import app # isort:skip
from app.database import engine, Base, DBSession
class TestBase(TestCase):
def setUp(self):
self.db_session = DBSession()
self.connection = engine.connect()
# # Configure Search DDL triggers.
Base.metadata.drop_all(self.connection)
Base.metadata.create_all(self.connection)
self.client = TestClient(app)
def tearDown(self):
self.db_session.rollback()
self.db_session.close()
def create_system_admin(self, *args, **kwargs):
from app.controllers.account import create_account
from app.schemas.account import AccountCreate
return create_account(
self.db_session,
first_name="Admin",
last_name="Istrator",
email="<EMAIL>",
password="<PASSWORD>",
is_system_admin=True,
is_active=True,
send_registration_email=False,
)
def auth_headers(self, email="<EMAIL>", password="<PASSWORD>"):
payload = {"username": email, "password": password}
resp = self.client.post("/auth/token", data=payload)
return {"Authorization": "Bearer " + resp.json().get("access_token")}
| en | 0.818108 | # From @euri10 -- https://gitter.im/tiangolo/fastapi?at=5cd915ed56271260f95275ac # This sets `os.environ`, but provides some additional protection. # If we placed it below the application import, it would raise an error # informing us that 'TESTING' had already been read from the environment. # isort:skip # # Configure Search DDL triggers. | 1.998039 | 2 |
keep_backend/tests/test_openrosa/test_json_to_xls.py | 9929105/KEEP | 0 | 6619528 | from django.test import TestCase
from openrosa.xform_reader import XFormReader
from openrosa.json_xls_convert import jsonXlsConvert
# Tutorial xform with basic data/survey types
XFORM_FILE = '../_data/test_docs/tutorial.xml'
class JSONToXLSTests( TestCase ):
def test_conversion( self ):
reader = XFormReader( XFORM_FILE )
json = reader.to_json_dict()
converter = jsonXlsConvert( '/tmp/test.xls' )
converter.writeToXls( json.get( 'children' ) )
| from django.test import TestCase
from openrosa.xform_reader import XFormReader
from openrosa.json_xls_convert import jsonXlsConvert
# Tutorial xform with basic data/survey types
XFORM_FILE = '../_data/test_docs/tutorial.xml'
class JSONToXLSTests( TestCase ):
def test_conversion( self ):
reader = XFormReader( XFORM_FILE )
json = reader.to_json_dict()
converter = jsonXlsConvert( '/tmp/test.xls' )
converter.writeToXls( json.get( 'children' ) )
| en | 0.797735 | # Tutorial xform with basic data/survey types | 2.179541 | 2 |
cve-manager/cve_manager/handler/task_handler/callback/repo_set.py | seandong37tt4qu/jeszhengq | 0 | 6619529 | <reponame>seandong37tt4qu/jeszhengq<gh_stars>0
#!/usr/bin/python3
# ******************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2021-2022. All rights reserved.
# licensed under the Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN 'AS IS' BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v2 for more details.
# ******************************************************************************/
"""
Time:
Author:
Description: callback function of the repo setting task.
"""
from cve_manager.handler.task_handler.callback import TaskCallback
from cve_manager.conf.constant import REPO_STATUS, ANSIBLE_TASK_STATUS
class RepoSetCallback(TaskCallback):
"""
Callback function for repo setting.
"""
def v2_runner_on_unreachable(self, result):
host_name, task_name = self._record_info(result, ANSIBLE_TASK_STATUS.UNREACHABLE)
self.save_to_db(task_name, host_name, REPO_STATUS.FAIL)
def v2_runner_on_ok(self, result):
host_name, task_name = self._record_info(result, ANSIBLE_TASK_STATUS.SUCCEED)
self.save_to_db(task_name, host_name, REPO_STATUS.SUCCEED)
def v2_runner_on_failed(self, result, ignore_errors=False):
host_name, task_name = self._record_info(result, ANSIBLE_TASK_STATUS.FAIL)
self.save_to_db(task_name, host_name, REPO_STATUS.FAIL)
def save_to_db(self, task_name, host_name, status):
"""
When it's a check task, save the check result to member variable.
Otherwise update the status of the host to database.
Args:
task_name (str): task name in playbook.
host_name (str)
status (str)
"""
# it means it's a task for setting repo.
if task_name == 'set repo':
self.result[host_name][task_name]['status'] = status
host_id = self.task_info[host_name]['host_id']
self.proxy.set_repo_status(self.task_id, [host_id], status)
if status == REPO_STATUS.SUCCEED:
self.proxy.set_host_repo(self.task_info[host_name]['repo_name'], [host_id])
elif task_name.startswith('check'):
self.check_result[host_name][task_name] = self.result[host_name].pop(
task_name)
| #!/usr/bin/python3
# ******************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2021-2022. All rights reserved.
# licensed under the Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN 'AS IS' BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v2 for more details.
# ******************************************************************************/
"""
Time:
Author:
Description: callback function of the repo setting task.
"""
from cve_manager.handler.task_handler.callback import TaskCallback
from cve_manager.conf.constant import REPO_STATUS, ANSIBLE_TASK_STATUS
class RepoSetCallback(TaskCallback):
"""
Callback function for repo setting.
"""
def v2_runner_on_unreachable(self, result):
host_name, task_name = self._record_info(result, ANSIBLE_TASK_STATUS.UNREACHABLE)
self.save_to_db(task_name, host_name, REPO_STATUS.FAIL)
def v2_runner_on_ok(self, result):
host_name, task_name = self._record_info(result, ANSIBLE_TASK_STATUS.SUCCEED)
self.save_to_db(task_name, host_name, REPO_STATUS.SUCCEED)
def v2_runner_on_failed(self, result, ignore_errors=False):
host_name, task_name = self._record_info(result, ANSIBLE_TASK_STATUS.FAIL)
self.save_to_db(task_name, host_name, REPO_STATUS.FAIL)
def save_to_db(self, task_name, host_name, status):
"""
When it's a check task, save the check result to member variable.
Otherwise update the status of the host to database.
Args:
task_name (str): task name in playbook.
host_name (str)
status (str)
"""
# it means it's a task for setting repo.
if task_name == 'set repo':
self.result[host_name][task_name]['status'] = status
host_id = self.task_info[host_name]['host_id']
self.proxy.set_repo_status(self.task_id, [host_id], status)
if status == REPO_STATUS.SUCCEED:
self.proxy.set_host_repo(self.task_info[host_name]['repo_name'], [host_id])
elif task_name.startswith('check'):
self.check_result[host_name][task_name] = self.result[host_name].pop(
task_name) | en | 0.608502 | #!/usr/bin/python3 # ****************************************************************************** # Copyright (c) Huawei Technologies Co., Ltd. 2021-2022. All rights reserved. # licensed under the Mulan PSL v2. # You can use this software according to the terms and conditions of the Mulan PSL v2. # You may obtain a copy of Mulan PSL v2 at: # http://license.coscl.org.cn/MulanPSL2 # THIS SOFTWARE IS PROVIDED ON AN 'AS IS' BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR # PURPOSE. # See the Mulan PSL v2 for more details. # ******************************************************************************/ Time: Author: Description: callback function of the repo setting task. Callback function for repo setting. When it's a check task, save the check result to member variable. Otherwise update the status of the host to database. Args: task_name (str): task name in playbook. host_name (str) status (str) # it means it's a task for setting repo. | 1.745364 | 2 |
src/python/hotpot/config/config.py | Tsinglung-Tseng/hotpot | 0 | 6619530 | <filename>src/python/hotpot/config/config.py<gh_stars>0
class Config:
AnalyticalPhantom = '/home/qinglong/node3share/analytical_phantom_sinogram.h5'
SummaryDir = '/home/qinglong/node3share/remote_drssrn/tensorboard_log/2xDown_new_1'
| <filename>src/python/hotpot/config/config.py<gh_stars>0
class Config:
AnalyticalPhantom = '/home/qinglong/node3share/analytical_phantom_sinogram.h5'
SummaryDir = '/home/qinglong/node3share/remote_drssrn/tensorboard_log/2xDown_new_1'
| none | 1 | 1.12507 | 1 | |
src/ttkbootstrap/cookbook/dials_and_meters.py | jongbatax/ttkbootstrap | 0 | 6619531 | """
Author: <NAME>
Modified: 2021-05-09
"""
from ttkbootstrap import Style
from ttkbootstrap.widgets import Meter
style = Style('cosmo')
root = style.master
root.title('ttkbootstrap')
m1 = Meter(metersize=180, padding=20, amountused=25, metertype='semi', labeltext='miles per hour', interactive=True)
m1.grid(row=0, column=0)
m2 = Meter(metersize=180, padding=20, amountused=1800, amounttotal=2600, labeltext='storage used', textappend='gb',
meterstyle='info.TMeter', stripethickness=10, interactive=True)
m2.grid(row=0, column=1)
m3 = Meter(metersize=180, padding=20, stripethickness=2, amountused=40, labeltext='project capacity', textappend='%',
meterstyle='success.TMeter', interactive=True)
m3.grid(row=1, column=0)
m4 = Meter(metersize=180, padding=20, amounttotal=280, arcrange=180, arcoffset=-180, amountused=75, textappend='°',
labeltext='heat temperature', wedgesize=5, meterstyle='danger.TMeter', interactive=True)
m4.grid(row=1, column=1)
root.mainloop() | """
Author: <NAME>
Modified: 2021-05-09
"""
from ttkbootstrap import Style
from ttkbootstrap.widgets import Meter
style = Style('cosmo')
root = style.master
root.title('ttkbootstrap')
m1 = Meter(metersize=180, padding=20, amountused=25, metertype='semi', labeltext='miles per hour', interactive=True)
m1.grid(row=0, column=0)
m2 = Meter(metersize=180, padding=20, amountused=1800, amounttotal=2600, labeltext='storage used', textappend='gb',
meterstyle='info.TMeter', stripethickness=10, interactive=True)
m2.grid(row=0, column=1)
m3 = Meter(metersize=180, padding=20, stripethickness=2, amountused=40, labeltext='project capacity', textappend='%',
meterstyle='success.TMeter', interactive=True)
m3.grid(row=1, column=0)
m4 = Meter(metersize=180, padding=20, amounttotal=280, arcrange=180, arcoffset=-180, amountused=75, textappend='°',
labeltext='heat temperature', wedgesize=5, meterstyle='danger.TMeter', interactive=True)
m4.grid(row=1, column=1)
root.mainloop() | en | 0.734642 | Author: <NAME> Modified: 2021-05-09 | 2.481383 | 2 |
Snippets/word_count.py | ColinShark/Pyrogram-Snippets | 59 | 6619532 | # Iterates through a chat's history and counts how many each words was said.
# This counts everything that gets seperated by Pythons ".split()".
from pyrogram import Client
chat = "pyrogramlounge"
limit = 2000
# Limit is for how many messages you want to look through
app = Client("my_account")
class custom(dict):
def __missing__(self, key):
return 0
with app:
words = custom()
progress = app.send_message(chat, "`processed 0 messages...`")
total = 0
for msg in app.iter_history(chat, limit):
total += 1
if total % 200 == 0:
progress.edit_text(f"`processed {total} messages...`")
if msg.text:
for word in msg.text.split():
words[word.lower()] += 1
if msg.caption:
for word in msg.caption.split():
words[word.lower()] += 1
freq = sorted(words, key=words.get, reverse=True)
out = "Word Counter\n"
for i in range(50):
out += f"{i+1}. {words[freq[i]]}: {freq[i]}\n"
progress.edit_text(out, parse_mode=None)
| # Iterates through a chat's history and counts how many each words was said.
# This counts everything that gets seperated by Pythons ".split()".
from pyrogram import Client
chat = "pyrogramlounge"
limit = 2000
# Limit is for how many messages you want to look through
app = Client("my_account")
class custom(dict):
def __missing__(self, key):
return 0
with app:
words = custom()
progress = app.send_message(chat, "`processed 0 messages...`")
total = 0
for msg in app.iter_history(chat, limit):
total += 1
if total % 200 == 0:
progress.edit_text(f"`processed {total} messages...`")
if msg.text:
for word in msg.text.split():
words[word.lower()] += 1
if msg.caption:
for word in msg.caption.split():
words[word.lower()] += 1
freq = sorted(words, key=words.get, reverse=True)
out = "Word Counter\n"
for i in range(50):
out += f"{i+1}. {words[freq[i]]}: {freq[i]}\n"
progress.edit_text(out, parse_mode=None)
| en | 0.982797 | # Iterates through a chat's history and counts how many each words was said. # This counts everything that gets seperated by Pythons ".split()". # Limit is for how many messages you want to look through | 2.978032 | 3 |
test/script.py | HannesEberhard/symbolic4 | 0 | 6619533 | <filename>test/script.py
import os
import datetime
comment_character = '#'
exclusive_character = '!';
exclusive = False
test_data = []
log_string = ""
os.system("git pull")
os.system("gcc -o test ../src/*.c test.c -lm")
contents = open('test_data.txt', 'r').read()#.replace('(', '\(').replace(')', '\)')
if exclusive_character in contents:
exclusive = True
for content in contents.split('\n'):
if content != "" and content[0] != comment_character and (not exclusive or content[0] == exclusive_character) and '|' in content:
content = content.split('|')
os.popen('valgrind --leak-check=full --show-reachable=yes --undef-value-errors=no --log-file="valgrind_memcheck_log.txt" ./test 0 "' + content[0] + '" "' + content[1] + '"')
with open('valgrind_memcheck_log.txt','r') as f:
valgrind_memcheck_log = f.read()
test_data.append([content[0].replace('\(', '(').replace('\)', ')'),
content[1].replace('\(', '(').replace('\)', ')'),
os.popen('./test 1 "' + content[0] + '" "' + content[1] + '"').read(),
valgrind_memcheck_log])
# test_string = os.popen('./test 1 ' + content[0] + ' ' + content[1]).read()
# memcheck_string = os.popen('valgrind --leak-check=full --show-reachable=yes --undef-value-errors=no ./test 0 ' + content[0] + ' ' + content[1]).read()
# print(content[0]);
# print(content[1]);
# print(os.popen('./test 1 ' + content[0] + ' ' + content[1]).read());
# print(os.popen('valgrind --leak-check=full --show-reachable=yes --undef-value-errors=no ./test 0 ' + content[0] + ' ' + content[1]).read())
# os.popen('valgrind --tool=massif --massif-out-file=./massif.out ./test 0 ' + content[0] + ' ' + content[1])
# os.popen('ms_print ./massif.out')
'''os.popen('valgrind --tool=massif --massif-out-file=./massif.out ./test 0 ' + content[0] + ' ' + content[1])
test_data.append([content[0],
content[1],
os.popen('./test 1 ' + content[0] + ' ' + content[1]).read(),
os.popen('valgrind --leak-check=full --show-reachable=yes --undef-value-errors=no ./test 0 ' + content[0] + ' ' + content[1]).read(),
os.popen('ms_print ./massif.out').read()])
print(test_data);
for data in test_data:
log_string += data[2] + '\n'
log_string += '\n\n\nValgrind Logs:\n\n'
for data in test_data:
log_string += data[0] + '\n'
log_string += data[2] + '\n\n\n'
log_string += data[3] + '\n\n\n\n\n'
'''
log_string += "------------------------------\n Test results\n------------------------------\n"
for data in test_data:
log_string += data[2] + '\n'
log_string += '\n'
log_string += "------------------------------\n Valgrind-Memcheck results\n------------------------------\n"
for data in test_data:
log_string += data[3] + '\n'
log_string += '\n'
log_string += "------------------------------\n Valgrind-Massif results\n------------------------------\n"
log_file = open("log.txt", "w")
log_file.write(log_string)
log_file.close()
os.system("git add .")
os.system("git commit -m \"test " + datetime.datetime.now().strftime("%d-%m-%Y %H:%M:%S") + "\"")
os.system("git push origin master")
| <filename>test/script.py
import os
import datetime
comment_character = '#'
exclusive_character = '!';
exclusive = False
test_data = []
log_string = ""
os.system("git pull")
os.system("gcc -o test ../src/*.c test.c -lm")
contents = open('test_data.txt', 'r').read()#.replace('(', '\(').replace(')', '\)')
if exclusive_character in contents:
exclusive = True
for content in contents.split('\n'):
if content != "" and content[0] != comment_character and (not exclusive or content[0] == exclusive_character) and '|' in content:
content = content.split('|')
os.popen('valgrind --leak-check=full --show-reachable=yes --undef-value-errors=no --log-file="valgrind_memcheck_log.txt" ./test 0 "' + content[0] + '" "' + content[1] + '"')
with open('valgrind_memcheck_log.txt','r') as f:
valgrind_memcheck_log = f.read()
test_data.append([content[0].replace('\(', '(').replace('\)', ')'),
content[1].replace('\(', '(').replace('\)', ')'),
os.popen('./test 1 "' + content[0] + '" "' + content[1] + '"').read(),
valgrind_memcheck_log])
# test_string = os.popen('./test 1 ' + content[0] + ' ' + content[1]).read()
# memcheck_string = os.popen('valgrind --leak-check=full --show-reachable=yes --undef-value-errors=no ./test 0 ' + content[0] + ' ' + content[1]).read()
# print(content[0]);
# print(content[1]);
# print(os.popen('./test 1 ' + content[0] + ' ' + content[1]).read());
# print(os.popen('valgrind --leak-check=full --show-reachable=yes --undef-value-errors=no ./test 0 ' + content[0] + ' ' + content[1]).read())
# os.popen('valgrind --tool=massif --massif-out-file=./massif.out ./test 0 ' + content[0] + ' ' + content[1])
# os.popen('ms_print ./massif.out')
'''os.popen('valgrind --tool=massif --massif-out-file=./massif.out ./test 0 ' + content[0] + ' ' + content[1])
test_data.append([content[0],
content[1],
os.popen('./test 1 ' + content[0] + ' ' + content[1]).read(),
os.popen('valgrind --leak-check=full --show-reachable=yes --undef-value-errors=no ./test 0 ' + content[0] + ' ' + content[1]).read(),
os.popen('ms_print ./massif.out').read()])
print(test_data);
for data in test_data:
log_string += data[2] + '\n'
log_string += '\n\n\nValgrind Logs:\n\n'
for data in test_data:
log_string += data[0] + '\n'
log_string += data[2] + '\n\n\n'
log_string += data[3] + '\n\n\n\n\n'
'''
log_string += "------------------------------\n Test results\n------------------------------\n"
for data in test_data:
log_string += data[2] + '\n'
log_string += '\n'
log_string += "------------------------------\n Valgrind-Memcheck results\n------------------------------\n"
for data in test_data:
log_string += data[3] + '\n'
log_string += '\n'
log_string += "------------------------------\n Valgrind-Massif results\n------------------------------\n"
log_file = open("log.txt", "w")
log_file.write(log_string)
log_file.close()
os.system("git add .")
os.system("git commit -m \"test " + datetime.datetime.now().strftime("%d-%m-%Y %H:%M:%S") + "\"")
os.system("git push origin master")
| en | 0.058475 | #.replace('(', '\(').replace(')', '\)') # test_string = os.popen('./test 1 ' + content[0] + ' ' + content[1]).read() # memcheck_string = os.popen('valgrind --leak-check=full --show-reachable=yes --undef-value-errors=no ./test 0 ' + content[0] + ' ' + content[1]).read() # print(content[0]); # print(content[1]); # print(os.popen('./test 1 ' + content[0] + ' ' + content[1]).read()); # print(os.popen('valgrind --leak-check=full --show-reachable=yes --undef-value-errors=no ./test 0 ' + content[0] + ' ' + content[1]).read()) # os.popen('valgrind --tool=massif --massif-out-file=./massif.out ./test 0 ' + content[0] + ' ' + content[1]) # os.popen('ms_print ./massif.out') os.popen('valgrind --tool=massif --massif-out-file=./massif.out ./test 0 ' + content[0] + ' ' + content[1]) test_data.append([content[0], content[1], os.popen('./test 1 ' + content[0] + ' ' + content[1]).read(), os.popen('valgrind --leak-check=full --show-reachable=yes --undef-value-errors=no ./test 0 ' + content[0] + ' ' + content[1]).read(), os.popen('ms_print ./massif.out').read()]) print(test_data); for data in test_data: log_string += data[2] + '\n' log_string += '\n\n\nValgrind Logs:\n\n' for data in test_data: log_string += data[0] + '\n' log_string += data[2] + '\n\n\n' log_string += data[3] + '\n\n\n\n\n' | 2.606106 | 3 |
caesar-cipher/caesar_cipher.py | izabela-am/Cryptography-Algorithms | 0 | 6619534 | <reponame>izabela-am/Cryptography-Algorithms
import sys
from string import ascii_lowercase as lower_case_letters
file = open(sys.argv[1], 'r').read().lower() # File to be read by the program
key = int(sys.argv[2]) # Shift key
operation = sys.argv[3] # operation: encrypt or decrypt
final_message = ''
for letter in file:
if letter in lower_case_letters:
letter_index = lower_case_letters.find(letter)
if operation == 'encrypt':
letter_index = (letter_index + key) % 26
elif operation == 'decrypt':
letter_index = (letter_index - key) % 26
final_message += lower_case_letters[letter_index]
else:
final_message += letter
print(final_message,) | import sys
from string import ascii_lowercase as lower_case_letters
file = open(sys.argv[1], 'r').read().lower() # File to be read by the program
key = int(sys.argv[2]) # Shift key
operation = sys.argv[3] # operation: encrypt or decrypt
final_message = ''
for letter in file:
if letter in lower_case_letters:
letter_index = lower_case_letters.find(letter)
if operation == 'encrypt':
letter_index = (letter_index + key) % 26
elif operation == 'decrypt':
letter_index = (letter_index - key) % 26
final_message += lower_case_letters[letter_index]
else:
final_message += letter
print(final_message,) | en | 0.906737 | # File to be read by the program # Shift key # operation: encrypt or decrypt | 3.97445 | 4 |
blog/forms.py | boost-entropy-repos-org/ojas | 0 | 6619535 | from django import forms
from .models import Comment
class CommentForm(forms.ModelForm):
name = forms.CharField(label='Your name', max_length=100, required=False, widget=forms.TextInput(attrs={
'placeholder': 'Optional',
}))
body = forms.CharField(label='Join the discussion', max_length=500, widget=forms.Textarea(attrs={
'placeholder': '500 characters max',
'rows': '3',
}))
class Meta:
model = Comment
fields = ('name', 'body') | from django import forms
from .models import Comment
class CommentForm(forms.ModelForm):
name = forms.CharField(label='Your name', max_length=100, required=False, widget=forms.TextInput(attrs={
'placeholder': 'Optional',
}))
body = forms.CharField(label='Join the discussion', max_length=500, widget=forms.Textarea(attrs={
'placeholder': '500 characters max',
'rows': '3',
}))
class Meta:
model = Comment
fields = ('name', 'body') | none | 1 | 2.421299 | 2 | |
LZW/utils.py | picorro/InfoTeorija | 0 | 6619536 | import math
chunk_size = 1024
def number_of_bits(k, max_size):
return int(math.log2(max_size + k))
def yield_bytes_from_stream(stream):
"""Returns a chunk of bytes from a stream"""
while True:
chunk = stream.read(chunk_size)
if chunk:
yield chunk
else:
break
def yield_from_file(path_to_file):
"""Returns a chunk of bytes from a file"""
with open(path_to_file, "rb") as stream:
while True:
data = stream.read(chunk_size)
if data:
yield data
else:
break
| import math
chunk_size = 1024
def number_of_bits(k, max_size):
return int(math.log2(max_size + k))
def yield_bytes_from_stream(stream):
"""Returns a chunk of bytes from a stream"""
while True:
chunk = stream.read(chunk_size)
if chunk:
yield chunk
else:
break
def yield_from_file(path_to_file):
"""Returns a chunk of bytes from a file"""
with open(path_to_file, "rb") as stream:
while True:
data = stream.read(chunk_size)
if data:
yield data
else:
break
| en | 0.8999 | Returns a chunk of bytes from a stream Returns a chunk of bytes from a file | 3.652426 | 4 |
euler-021.py | spacether/euler-python | 0 | 6619537 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Let d(n) be defined as the sum of proper divisors of n (numbers less than n
which divide evenly into n).
If d(a) = b and d(b) = a, where a ≠ b, then a and b are an amicable pair and
each of a and b are called amicable numbers.
For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55
and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71
and 142; so d(284) = 220.
Evaluate the sum of all the amicable numbers under 10,000
"""
def divisors(big_num):
divisors = []
upper_bound = big_num/2
for divisor in range(1, upper_bound+1):
if big_num % divisor == 0:
divisors.append(divisor)
return divisors
def calc_divisor_sum(big_num):
return sum(divisors(big_num))
divisor_sums = {}
amicable_numbers_sum = 0
for num in range(2,10000):
divisor_sum = calc_divisor_sum(num)
divisor_sums[num] = divisor_sum
if (divisor_sum in divisor_sums
and divisor_sums[divisor_sum] == num
and num != divisor_sum):
print('amicable pair %s %s' % (num, divisor_sum))
amicable_numbers_sum += divisor_sum + num
print('=amicable_numbers_sum%s' % amicable_numbers_sum)
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Let d(n) be defined as the sum of proper divisors of n (numbers less than n
which divide evenly into n).
If d(a) = b and d(b) = a, where a ≠ b, then a and b are an amicable pair and
each of a and b are called amicable numbers.
For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55
and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71
and 142; so d(284) = 220.
Evaluate the sum of all the amicable numbers under 10,000
"""
def divisors(big_num):
divisors = []
upper_bound = big_num/2
for divisor in range(1, upper_bound+1):
if big_num % divisor == 0:
divisors.append(divisor)
return divisors
def calc_divisor_sum(big_num):
return sum(divisors(big_num))
divisor_sums = {}
amicable_numbers_sum = 0
for num in range(2,10000):
divisor_sum = calc_divisor_sum(num)
divisor_sums[num] = divisor_sum
if (divisor_sum in divisor_sums
and divisor_sums[divisor_sum] == num
and num != divisor_sum):
print('amicable pair %s %s' % (num, divisor_sum))
amicable_numbers_sum += divisor_sum + num
print('=amicable_numbers_sum%s' % amicable_numbers_sum)
| en | 0.876076 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Let d(n) be defined as the sum of proper divisors of n (numbers less than n which divide evenly into n). If d(a) = b and d(b) = a, where a ≠ b, then a and b are an amicable pair and each of a and b are called amicable numbers. For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71 and 142; so d(284) = 220. Evaluate the sum of all the amicable numbers under 10,000 | 4.023695 | 4 |
engine.py | xinming-wei/cocoon | 0 | 6619538 | <gh_stars>0
import subprocess
import os
import util
import ops.cds.syn as syn
import ops.cds.floorplan as fp
import ops.cds.pdn as pdn
import ops.cds.place as place
import ops.cds.cts as cts
import ops.cds.route as route
import ops.cds.drc as drc
import time
def run(design, flow, flow_name):
begin_t = time.time()
design_name = design.top_name
run_path = util.getRunPath(design)
# print(run_path)
os.system("mkdir -p %s && rm -rf %s*" % (run_path, run_path))
make_file = open(os.path.join(run_path, "Makefile"), "w")
tcl_path = util.getScriptPath(design)
os.system("mkdir -p %s && rm -rf %s*" % (tcl_path, tcl_path))
overall_tcl = open(os.path.join(tcl_path, "flow.tcl"), 'w', encoding='utf-8')
obj_path = util.getObjPath(design)
os.system("mkdir -p %s && rm -rf %s*" % (obj_path, obj_path))
os.system(f"cp {design.rtl_input} {obj_path}")
rpt_path = util.getRptPath(design)
os.system("mkdir -p %s && rm -rf %s*" % (rpt_path, rpt_path))
for x in flow.ops:
if x[0] == "GenusSynth":
script_path = "../scripts/"
tmp_op_syn = syn.GenusSynth(design)
tmp_op_syn.config(design_name + "_" + x[1])
output = "\n" if flow.verbose else f" > {os.path.join(rpt_path, 'GenusSynth.log')}\n"
make_file.write("all:\n")
make_file.write("\tgenus -legacy_ui -batch -files " + script_path + design_name + "_" + x[1] + ".tcl" + output)
if x[0] == "yosys":
script_path = "../scripts/"
tmp_op_syn = syn.YosysSynth(design)
tmp_op_syn.config(design_name + "_" + x[1], flow)
make_file.write("all:\n")
yosys_path = os.path.join(flow.yosys_bin_path, "yosys")
save_log = f" | tee -a {os.path.join(rpt_path, 'yosys.log')}\n" if flow.verbose else f" > {os.path.join(rpt_path, 'YosysSynth.log')}\n"
make_file.write(f"\t{yosys_path} " + script_path + design_name + "_" + x[1] + ".ys" + save_log)
if x[0] == "InnovusFloorplan":
tmp_op_fp = fp.InnovusFloorplan(design)
for key, val in flow.params_fp.items():
tmp_op_fp.setParams(key, val)
tmp_op_fp.config(design, design_name + "_" + x[1])
overall_tcl.write('source %s%s_to_floorplan.tcl\n'%(tcl_path, design_name))
if x[0] == "InnovusPDN":
tmp_op_pdn = pdn.InnovusPDN(design)
tmp_op_pdn.config(design, design_name + "_" + x[1])
overall_tcl.write('source %s%s_to_pdn.tcl\n'%(tcl_path, design_name))
if x[0] == "InnovusPlace":
tmp_op_pdn = place.InnovusPlace(design)
tmp_op_pdn.params['cadence_version'] = flow.cadence_version
tmp_op_pdn.config(design, design_name + "_" + x[1])
overall_tcl.write('source %s%s_to_place.tcl\n'%(tcl_path, design_name))
if x[0] == "DREAMPlace":
tmp_op_place = place.DREAMPlace(design)
tmp_op_place.config(design, design_name + "_" + x[1])
if x[0] == "InnovusCTS":
tmp_op_cts = cts.InnovusCTS(design)
tmp_op_cts.config(design, design_name + "_" + x[1])
overall_tcl.write('source %s%s_to_cts.tcl\n'%(tcl_path, design_name))
if x[0] == "InnovusRoute":
tmp_op_route = route.InnovusRoute(design)
for key, val in flow.params_route.items():
tmp_op_route.setParams(key, val)
tmp_op_route.paramsExtern['cadence_version'] = flow.cadence_version
tmp_op_route.config(design, design_name + "_" + x[1])
overall_tcl.write('source %s%s_to_route.tcl\n'%(tcl_path, design_name))
# overall_tcl.write('set dbgLefDefOutVersion 5.8\ndefOut -floorplan -netlist -routing %s.def\n'%(design_name))
if x[0] == "InnovusDRC":
tmp_op_drc = drc.InnovusDRC(design)
tmp_op_drc.config(design, design_name + "_" + x[1])
overall_tcl.write('source %s%s_to_drc.tcl\n'%(tcl_path, design_name))
if flow.flow['placement'] == "innovus":
output = "\n" if flow.verbose else f" > {os.path.join(rpt_path, 'innovus.log')}\n"
make_file.write("\tinnovus -batch -files " + script_path + "flow.tcl" + output)
elif flow.flow['placement'] == "dreamplace":
output = "\n" if flow.verbose else f" > {os.path.join(rpt_path, 'innovus_fp.log')}\n"
make_file.write("\tinnovus -batch -files " + script_path + ("%s_to_floorplan.tcl" % design_name) + output)
save_log = f" | tee -a {os.path.join(rpt_path, 'dreamplace.log')}\n" if flow.verbose else f" > {os.path.join(rpt_path, 'dreamplace.log')}\n"
make_file.write("\tpython %s %s" % (flow.dreamplace_bin_path, script_path + "%s_to_place.json" % design_name) + save_log)
output = "\n" if flow.verbose else f" > {os.path.join(rpt_path, 'innovus_route.log')}\n"
make_file.write("\tinnovus -batch -files " + script_path + ("%s_to_route.tcl" % design_name) + output)
make_file.close()
overall_tcl.close()
run_path = util.getRunPath(design)
print("Current working directory: %s" % run_path)
proc_make = subprocess.Popen('make', cwd=run_path) # Start a child process
proc_make.wait() # Wait until the process finishes
assert proc_make.poll() == 0, "The flow [%s] failed and the process finished abnormally" % flow_name
print("The basic flow has finished successfully!")
print(f"Design is saved to {run_path}{design.top_name}\n\n")
# Iterative Feedback Tuning
if flow.n_iter_IFT > 0:
for i in range(flow.n_iter_IFT):
print("========== Start of the IFT iteration %d ==========\n" % (i+1))
rpt_path = tmp_op_syn.getRptTiming()
critical_path = util.parseTimingRpt(rpt_path)
tmp_op_syn = syn.GenusSynth(design, critical_path)
tmp_op_syn.config(design_name + "_" + "to_synth")
proc_make = subprocess.Popen('make', cwd=run_path) # Start a child process
proc_make.wait() # Wait until the process finishes
assert proc_make.poll() == 0, "The flow failed and the process finished abnormally"
print(f"========== Finish IFT round [{i+1}] ==========\n\n")
end_t = time.time()
print("*************** Flow [{}] finishes in {:.1f} seconds ***************\n\n".format(flow_name, end_t - begin_t))
| import subprocess
import os
import util
import ops.cds.syn as syn
import ops.cds.floorplan as fp
import ops.cds.pdn as pdn
import ops.cds.place as place
import ops.cds.cts as cts
import ops.cds.route as route
import ops.cds.drc as drc
import time
def run(design, flow, flow_name):
begin_t = time.time()
design_name = design.top_name
run_path = util.getRunPath(design)
# print(run_path)
os.system("mkdir -p %s && rm -rf %s*" % (run_path, run_path))
make_file = open(os.path.join(run_path, "Makefile"), "w")
tcl_path = util.getScriptPath(design)
os.system("mkdir -p %s && rm -rf %s*" % (tcl_path, tcl_path))
overall_tcl = open(os.path.join(tcl_path, "flow.tcl"), 'w', encoding='utf-8')
obj_path = util.getObjPath(design)
os.system("mkdir -p %s && rm -rf %s*" % (obj_path, obj_path))
os.system(f"cp {design.rtl_input} {obj_path}")
rpt_path = util.getRptPath(design)
os.system("mkdir -p %s && rm -rf %s*" % (rpt_path, rpt_path))
for x in flow.ops:
if x[0] == "GenusSynth":
script_path = "../scripts/"
tmp_op_syn = syn.GenusSynth(design)
tmp_op_syn.config(design_name + "_" + x[1])
output = "\n" if flow.verbose else f" > {os.path.join(rpt_path, 'GenusSynth.log')}\n"
make_file.write("all:\n")
make_file.write("\tgenus -legacy_ui -batch -files " + script_path + design_name + "_" + x[1] + ".tcl" + output)
if x[0] == "yosys":
script_path = "../scripts/"
tmp_op_syn = syn.YosysSynth(design)
tmp_op_syn.config(design_name + "_" + x[1], flow)
make_file.write("all:\n")
yosys_path = os.path.join(flow.yosys_bin_path, "yosys")
save_log = f" | tee -a {os.path.join(rpt_path, 'yosys.log')}\n" if flow.verbose else f" > {os.path.join(rpt_path, 'YosysSynth.log')}\n"
make_file.write(f"\t{yosys_path} " + script_path + design_name + "_" + x[1] + ".ys" + save_log)
if x[0] == "InnovusFloorplan":
tmp_op_fp = fp.InnovusFloorplan(design)
for key, val in flow.params_fp.items():
tmp_op_fp.setParams(key, val)
tmp_op_fp.config(design, design_name + "_" + x[1])
overall_tcl.write('source %s%s_to_floorplan.tcl\n'%(tcl_path, design_name))
if x[0] == "InnovusPDN":
tmp_op_pdn = pdn.InnovusPDN(design)
tmp_op_pdn.config(design, design_name + "_" + x[1])
overall_tcl.write('source %s%s_to_pdn.tcl\n'%(tcl_path, design_name))
if x[0] == "InnovusPlace":
tmp_op_pdn = place.InnovusPlace(design)
tmp_op_pdn.params['cadence_version'] = flow.cadence_version
tmp_op_pdn.config(design, design_name + "_" + x[1])
overall_tcl.write('source %s%s_to_place.tcl\n'%(tcl_path, design_name))
if x[0] == "DREAMPlace":
tmp_op_place = place.DREAMPlace(design)
tmp_op_place.config(design, design_name + "_" + x[1])
if x[0] == "InnovusCTS":
tmp_op_cts = cts.InnovusCTS(design)
tmp_op_cts.config(design, design_name + "_" + x[1])
overall_tcl.write('source %s%s_to_cts.tcl\n'%(tcl_path, design_name))
if x[0] == "InnovusRoute":
tmp_op_route = route.InnovusRoute(design)
for key, val in flow.params_route.items():
tmp_op_route.setParams(key, val)
tmp_op_route.paramsExtern['cadence_version'] = flow.cadence_version
tmp_op_route.config(design, design_name + "_" + x[1])
overall_tcl.write('source %s%s_to_route.tcl\n'%(tcl_path, design_name))
# overall_tcl.write('set dbgLefDefOutVersion 5.8\ndefOut -floorplan -netlist -routing %s.def\n'%(design_name))
if x[0] == "InnovusDRC":
tmp_op_drc = drc.InnovusDRC(design)
tmp_op_drc.config(design, design_name + "_" + x[1])
overall_tcl.write('source %s%s_to_drc.tcl\n'%(tcl_path, design_name))
if flow.flow['placement'] == "innovus":
output = "\n" if flow.verbose else f" > {os.path.join(rpt_path, 'innovus.log')}\n"
make_file.write("\tinnovus -batch -files " + script_path + "flow.tcl" + output)
elif flow.flow['placement'] == "dreamplace":
output = "\n" if flow.verbose else f" > {os.path.join(rpt_path, 'innovus_fp.log')}\n"
make_file.write("\tinnovus -batch -files " + script_path + ("%s_to_floorplan.tcl" % design_name) + output)
save_log = f" | tee -a {os.path.join(rpt_path, 'dreamplace.log')}\n" if flow.verbose else f" > {os.path.join(rpt_path, 'dreamplace.log')}\n"
make_file.write("\tpython %s %s" % (flow.dreamplace_bin_path, script_path + "%s_to_place.json" % design_name) + save_log)
output = "\n" if flow.verbose else f" > {os.path.join(rpt_path, 'innovus_route.log')}\n"
make_file.write("\tinnovus -batch -files " + script_path + ("%s_to_route.tcl" % design_name) + output)
make_file.close()
overall_tcl.close()
run_path = util.getRunPath(design)
print("Current working directory: %s" % run_path)
proc_make = subprocess.Popen('make', cwd=run_path) # Start a child process
proc_make.wait() # Wait until the process finishes
assert proc_make.poll() == 0, "The flow [%s] failed and the process finished abnormally" % flow_name
print("The basic flow has finished successfully!")
print(f"Design is saved to {run_path}{design.top_name}\n\n")
# Iterative Feedback Tuning
if flow.n_iter_IFT > 0:
for i in range(flow.n_iter_IFT):
print("========== Start of the IFT iteration %d ==========\n" % (i+1))
rpt_path = tmp_op_syn.getRptTiming()
critical_path = util.parseTimingRpt(rpt_path)
tmp_op_syn = syn.GenusSynth(design, critical_path)
tmp_op_syn.config(design_name + "_" + "to_synth")
proc_make = subprocess.Popen('make', cwd=run_path) # Start a child process
proc_make.wait() # Wait until the process finishes
assert proc_make.poll() == 0, "The flow failed and the process finished abnormally"
print(f"========== Finish IFT round [{i+1}] ==========\n\n")
end_t = time.time()
print("*************** Flow [{}] finishes in {:.1f} seconds ***************\n\n".format(flow_name, end_t - begin_t)) | en | 0.724348 | # print(run_path) # overall_tcl.write('set dbgLefDefOutVersion 5.8\ndefOut -floorplan -netlist -routing %s.def\n'%(design_name)) # Start a child process # Wait until the process finishes # Iterative Feedback Tuning # Start a child process # Wait until the process finishes | 2.04644 | 2 |
WebMirror/management/rss_parser_funcs/feed_parse_extractOyasumiReads.py | fake-name/ReadableWebProxy | 193 | 6619539 | <filename>WebMirror/management/rss_parser_funcs/feed_parse_extractOyasumiReads.py
def extractOyasumiReads(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
if 'ISEKAIJIN NO TEBIKISHO' in item['tags']:
return buildReleaseMessageWithType(item, 'Isekaijin no Tebikisho', vol, chp, frag=frag, postfix=postfix)
if 'OTOTSUKAI WA SHI TO ODORU' in item['tags']:
return buildReleaseMessageWithType(item, 'Ototsukai wa Shi to Odoru', vol, chp, frag=frag, postfix=postfix)
return False | <filename>WebMirror/management/rss_parser_funcs/feed_parse_extractOyasumiReads.py
def extractOyasumiReads(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
if 'ISEKAIJIN NO TEBIKISHO' in item['tags']:
return buildReleaseMessageWithType(item, 'Isekaijin no Tebikisho', vol, chp, frag=frag, postfix=postfix)
if 'OTOTSUKAI WA SHI TO ODORU' in item['tags']:
return buildReleaseMessageWithType(item, 'Ototsukai wa Shi to Odoru', vol, chp, frag=frag, postfix=postfix)
return False | none | 1 | 2.281334 | 2 | |
problems/lian-xu-zi-shu-zu-de-zui-da-he-lcof/solution.py | MleMoe/LeetCode-1 | 2 | 6619540 | <reponame>MleMoe/LeetCode-1
from typing import List
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
for i in range(1, len(nums)):
nums[i] += max(nums[i - 1], 0)
return max(nums)
if __name__ == '__main__':
test_cases = [[-2, 1, -3, 4, -1, 2, 1, -5, 4]]
for case in test_cases:
ans = Solution().maxSubArray(case)
print(ans)
| from typing import List
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
for i in range(1, len(nums)):
nums[i] += max(nums[i - 1], 0)
return max(nums)
if __name__ == '__main__':
test_cases = [[-2, 1, -3, 4, -1, 2, 1, -5, 4]]
for case in test_cases:
ans = Solution().maxSubArray(case)
print(ans) | none | 1 | 3.544874 | 4 | |
azext/batch/operations/__init__.py | jdmartinez36/azure-batch-cli-extensions | 11 | 6619541 | <reponame>jdmartinez36/azure-batch-cli-extensions<gh_stars>10-100
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from .pool_operations import ExtendedPoolOperations
from .job_operations import ExtendedJobOperations
from .file_operations import ExtendedFileOperations
__all__ = [
'ExtendedPoolOperations',
'ExtendedJobOperations',
'ExtendedFileOperations',
]
| # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from .pool_operations import ExtendedPoolOperations
from .job_operations import ExtendedJobOperations
from .file_operations import ExtendedFileOperations
__all__ = [
'ExtendedPoolOperations',
'ExtendedJobOperations',
'ExtendedFileOperations',
] | en | 0.42147 | # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- | 1.463393 | 1 |
71classes.py | Roshan-Sen/Homework | 0 | 6619542 | import sys
import mcb185 as mcb
import argparse
"""
Using classes to build a library of found
orfs from an fa file. Using
chromosome 1 of A. thaliana.
I wanted to see how many found
open reading frames were longer
than a certain threshold.
Object use is unnecessary,
but I wanted to try using them.
"""
gcode = {
'AAA' : 'K', 'AAC' : 'N', 'AAG' : 'K', 'AAT' : 'N',
'ACA' : 'T', 'ACC' : 'T', 'ACG' : 'T', 'ACT' : 'T',
'AGA' : 'R', 'AGC' : 'S', 'AGG' : 'R', 'AGT' : 'S',
'ATA' : 'I', 'ATC' : 'I', 'ATG' : 'M', 'ATT' : 'I',
'CAA' : 'Q', 'CAC' : 'H', 'CAG' : 'Q', 'CAT' : 'H',
'CCA' : 'P', 'CCC' : 'P', 'CCG' : 'P', 'CCT' : 'P',
'CGA' : 'R', 'CGC' : 'R', 'CGG' : 'R', 'CGT' : 'R',
'CTA' : 'L', 'CTC' : 'L', 'CTG' : 'L', 'CTT' : 'L',
'GAA' : 'E', 'GAC' : 'D', 'GAG' : 'E', 'GAT' : 'D',
'GCA' : 'A', 'GCC' : 'A', 'GCG' : 'A', 'GCT' : 'A',
'GGA' : 'G', 'GGC' : 'G', 'GGG' : 'G', 'GGT' : 'G',
'GTA' : 'V', 'GTC' : 'V', 'GTG' : 'V', 'GTT' : 'V',
'TAA' : '*', 'TAC' : 'Y', 'TAG' : '*', 'TAT' : 'Y',
'TCA' : 'S', 'TCC' : 'S', 'TCG' : 'S', 'TCT' : 'S',
'TGA' : '*', 'TGC' : 'C', 'TGG' : 'W', 'TGT' : 'C',
'TTA' : 'L', 'TTC' : 'F', 'TTG' : 'L', 'TTT' : 'F',
}
stopcodons = [
'TAA', 'TAG', 'TGA'
]
parser = argparse.ArgumentParser(description = 'Finds possible open reading frames in a genome or chromosome sequence and tells user how many have a length in amino acids greater than their specified threshold')
# arguments
parser.add_argument('fasta', type=str,
metavar='<str>', help='required fasta file')
parser.add_argument('--threshold', required=False, type=int, default = 40,
metavar='<int>', help='integer threshold, default = 40')
# finalization
arg = parser.parse_args()
class Gene:
genesequence = ""
def __init__(self, genesequence):
self.genesequence = genesequence
def translation(self):
aasequence = ""
for i in range(0, len(self.genesequence), 3):
codon = self.genesequence[i:i + 3]
if codon not in list(gcode.keys()): aasequence += "X"
else: aasequence += gcode[codon]
return aasequence
def buildgenearray(dna):
initialgenes = []
currentgeneindex = -1
for i in range(3):
countingorf = False
for j in range(i, len(dna) - 2, 3):
codon = dna[j:j + 3]
if countingorf:
initialgenes[currentgeneindex] += codon
if codon in stopcodons:
countingorf = False
elif codon == "ATG":
countingorf = True
initialgenes.append(codon)
currentgeneindex += 1
else: continue
validgenes = []
for gene in initialgenes:
if gene[-3:] in stopcodons:
validgenes.append(gene)
return validgenes
def buildgeneobjectarray(genes):
geneobjarray = []
for gene in genes:
geneobject = Gene(gene)
geneobjarray.append(geneobject)
return geneobjarray
if len(sys.argv) != 2:
print("Insufficient or too much input")
sys.exit()
sequences = []
for record in mcb.read_fasta(arg.fasta):
sequences.append(record)
chromosome = sequences[0][1]
genes = buildgenearray(chromosome)
geneobjectarray = buildgeneobjectarray(genes)
count = 0
for gene in geneobjectarray:
if len(gene.translation()) > arg.threshold: count += 1
print("Out of the " + str(len(genes)) + " found orfs, " + str(count) + " orfs had a length greater than the threshold of " + str(arg.threshold) + " amino acids.")
| import sys
import mcb185 as mcb
import argparse
"""
Using classes to build a library of found
orfs from an fa file. Using
chromosome 1 of A. thaliana.
I wanted to see how many found
open reading frames were longer
than a certain threshold.
Object use is unnecessary,
but I wanted to try using them.
"""
gcode = {
'AAA' : 'K', 'AAC' : 'N', 'AAG' : 'K', 'AAT' : 'N',
'ACA' : 'T', 'ACC' : 'T', 'ACG' : 'T', 'ACT' : 'T',
'AGA' : 'R', 'AGC' : 'S', 'AGG' : 'R', 'AGT' : 'S',
'ATA' : 'I', 'ATC' : 'I', 'ATG' : 'M', 'ATT' : 'I',
'CAA' : 'Q', 'CAC' : 'H', 'CAG' : 'Q', 'CAT' : 'H',
'CCA' : 'P', 'CCC' : 'P', 'CCG' : 'P', 'CCT' : 'P',
'CGA' : 'R', 'CGC' : 'R', 'CGG' : 'R', 'CGT' : 'R',
'CTA' : 'L', 'CTC' : 'L', 'CTG' : 'L', 'CTT' : 'L',
'GAA' : 'E', 'GAC' : 'D', 'GAG' : 'E', 'GAT' : 'D',
'GCA' : 'A', 'GCC' : 'A', 'GCG' : 'A', 'GCT' : 'A',
'GGA' : 'G', 'GGC' : 'G', 'GGG' : 'G', 'GGT' : 'G',
'GTA' : 'V', 'GTC' : 'V', 'GTG' : 'V', 'GTT' : 'V',
'TAA' : '*', 'TAC' : 'Y', 'TAG' : '*', 'TAT' : 'Y',
'TCA' : 'S', 'TCC' : 'S', 'TCG' : 'S', 'TCT' : 'S',
'TGA' : '*', 'TGC' : 'C', 'TGG' : 'W', 'TGT' : 'C',
'TTA' : 'L', 'TTC' : 'F', 'TTG' : 'L', 'TTT' : 'F',
}
stopcodons = [
'TAA', 'TAG', 'TGA'
]
parser = argparse.ArgumentParser(description = 'Finds possible open reading frames in a genome or chromosome sequence and tells user how many have a length in amino acids greater than their specified threshold')
# arguments
parser.add_argument('fasta', type=str,
metavar='<str>', help='required fasta file')
parser.add_argument('--threshold', required=False, type=int, default = 40,
metavar='<int>', help='integer threshold, default = 40')
# finalization
arg = parser.parse_args()
class Gene:
genesequence = ""
def __init__(self, genesequence):
self.genesequence = genesequence
def translation(self):
aasequence = ""
for i in range(0, len(self.genesequence), 3):
codon = self.genesequence[i:i + 3]
if codon not in list(gcode.keys()): aasequence += "X"
else: aasequence += gcode[codon]
return aasequence
def buildgenearray(dna):
initialgenes = []
currentgeneindex = -1
for i in range(3):
countingorf = False
for j in range(i, len(dna) - 2, 3):
codon = dna[j:j + 3]
if countingorf:
initialgenes[currentgeneindex] += codon
if codon in stopcodons:
countingorf = False
elif codon == "ATG":
countingorf = True
initialgenes.append(codon)
currentgeneindex += 1
else: continue
validgenes = []
for gene in initialgenes:
if gene[-3:] in stopcodons:
validgenes.append(gene)
return validgenes
def buildgeneobjectarray(genes):
geneobjarray = []
for gene in genes:
geneobject = Gene(gene)
geneobjarray.append(geneobject)
return geneobjarray
if len(sys.argv) != 2:
print("Insufficient or too much input")
sys.exit()
sequences = []
for record in mcb.read_fasta(arg.fasta):
sequences.append(record)
chromosome = sequences[0][1]
genes = buildgenearray(chromosome)
geneobjectarray = buildgeneobjectarray(genes)
count = 0
for gene in geneobjectarray:
if len(gene.translation()) > arg.threshold: count += 1
print("Out of the " + str(len(genes)) + " found orfs, " + str(count) + " orfs had a length greater than the threshold of " + str(arg.threshold) + " amino acids.")
| en | 0.949612 | Using classes to build a library of found orfs from an fa file. Using chromosome 1 of A. thaliana. I wanted to see how many found open reading frames were longer than a certain threshold. Object use is unnecessary, but I wanted to try using them. # arguments # finalization | 2.711533 | 3 |
src/AssistantPi.py | creekhead/RPI_google_asst | 0 | 6619543 |
import os.path
activate_this = os.path.join(os.path.dirname(__file__), '../env/bin/activate_this.py')
with open(activate_this) as f:
exec(f.read(), {'__file__': activate_this})
import examples.voice.assistant_library_with_local_commands_demo as assistant
assistant.main()
|
import os.path
activate_this = os.path.join(os.path.dirname(__file__), '../env/bin/activate_this.py')
with open(activate_this) as f:
exec(f.read(), {'__file__': activate_this})
import examples.voice.assistant_library_with_local_commands_demo as assistant
assistant.main()
| none | 1 | 2.077788 | 2 | |
pyc64/cputools.py | hodgeswt/pyc64 | 74 | 6619544 | <gh_stars>10-100
"""
6502/6510 CPU utilities, requires the py65 library
http://py65.readthedocs.io
Written by <NAME> (<EMAIL>)
License: MIT open-source.
"""
import time
import py65.monitor
import py65.devices.mpu6502 as mpu6502
class Monitor(py65.monitor.Monitor):
"""cpu/mem monitor that accepts external memory"""
def __init__(self, memory, stdout=None, stdin=None):
try:
super().__init__(stdout=stdout, stdin=stdin, memory=memory, putc_addr=None, getc_addr=None)
self.__workaround = False
except TypeError:
# workaround for older version of py65
self.memory = memory
super().__init__(stdout=stdout, stdin=stdin)
self.putc_addr = None
self.getc_addr = None
def _install_mpu_observers(self, getc_addr, putc_addr):
# only called as workaround in case of older py65 version
self._mpu.memory = self.memory
class CPU(mpu6502.MPU):
def run(self, pc=None, microsleep=None, loop_detect_delay=0.5):
end_address = 0xffff
self.sp = 0xf2
self.stPushWord(end_address - 1) # push a sentinel return address
if pc is not None:
self.pc = pc
stopcodes = {0x00} # BRK
instructions = 0
start_time = time.perf_counter()
while True:
if self.memory[self.pc] == 0x4c and self.WordAt(self.pc + 1) == self.pc:
# JMP to itself, instead of looping forever we also consider this a program end
end_time = time.perf_counter()
time.sleep(loop_detect_delay)
print(self.name + " CPU simulator: infinite jmp loop detected at ${:04x}, considered as end-of-program.".format(self.pc))
self.stPopWord() # pop the sentinel return address
break
self.step()
instructions += 1
if microsleep and instructions % 5000 == 0:
microsleep()
if self.pc == end_address:
# when this address is reached, we consider it the end of the program
end_time = time.perf_counter()
break
if self.memory[self.pc] in stopcodes:
end_time = time.perf_counter()
raise InterruptedError("brk instruction at ${:04x}".format(self.pc))
duration = end_time - start_time
mips = instructions / duration / 1e6
print(self.name + " CPU simulator: {:d} instructions in {:.3f} seconds = {:.3f} mips (~{:.3f} times realtime)"
.format(instructions, duration, mips, mips/0.44))
if __name__ == "__main__":
try:
from .memory import ScreenAndMemory
except (SystemError, ImportError):
from pyc64.memory import ScreenAndMemory
screen = ScreenAndMemory()
screen.clear()
screen.memory[0xc000:0xc00b] = [0xa9, 0x44, 0x8d, 0x00, 0x04, 0xa9, 0x01, 0x8d, 0x00, 0xd8, 0x60]
cpu = CPU(screen.memory)
assert screen.memory[0x0400] == 0x20
assert screen.memory[0xd800] == 14
cpu.run(pc=0xc000)
assert screen.memory[0x0400] == 0x44
assert screen.memory[0xd800] == 1
program = open("drive8/gary2.prg", "rb").read()
address = program[0] + 256*program[1]
for _ in range(200):
cpu.reset()
screen.memory[address:address+len(program)-2] = program[2:]
cpu.run(pc=2061, loop_detect_delay=0)
assert screen.memory[0x0400] != 0x44
assert screen.memory[0xd800] != 1
assert screen.memory[53280] == 0
assert screen.memory[53281] == 0
| """
6502/6510 CPU utilities, requires the py65 library
http://py65.readthedocs.io
Written by <NAME> (<EMAIL>)
License: MIT open-source.
"""
import time
import py65.monitor
import py65.devices.mpu6502 as mpu6502
class Monitor(py65.monitor.Monitor):
"""cpu/mem monitor that accepts external memory"""
def __init__(self, memory, stdout=None, stdin=None):
try:
super().__init__(stdout=stdout, stdin=stdin, memory=memory, putc_addr=None, getc_addr=None)
self.__workaround = False
except TypeError:
# workaround for older version of py65
self.memory = memory
super().__init__(stdout=stdout, stdin=stdin)
self.putc_addr = None
self.getc_addr = None
def _install_mpu_observers(self, getc_addr, putc_addr):
# only called as workaround in case of older py65 version
self._mpu.memory = self.memory
class CPU(mpu6502.MPU):
def run(self, pc=None, microsleep=None, loop_detect_delay=0.5):
end_address = 0xffff
self.sp = 0xf2
self.stPushWord(end_address - 1) # push a sentinel return address
if pc is not None:
self.pc = pc
stopcodes = {0x00} # BRK
instructions = 0
start_time = time.perf_counter()
while True:
if self.memory[self.pc] == 0x4c and self.WordAt(self.pc + 1) == self.pc:
# JMP to itself, instead of looping forever we also consider this a program end
end_time = time.perf_counter()
time.sleep(loop_detect_delay)
print(self.name + " CPU simulator: infinite jmp loop detected at ${:04x}, considered as end-of-program.".format(self.pc))
self.stPopWord() # pop the sentinel return address
break
self.step()
instructions += 1
if microsleep and instructions % 5000 == 0:
microsleep()
if self.pc == end_address:
# when this address is reached, we consider it the end of the program
end_time = time.perf_counter()
break
if self.memory[self.pc] in stopcodes:
end_time = time.perf_counter()
raise InterruptedError("brk instruction at ${:04x}".format(self.pc))
duration = end_time - start_time
mips = instructions / duration / 1e6
print(self.name + " CPU simulator: {:d} instructions in {:.3f} seconds = {:.3f} mips (~{:.3f} times realtime)"
.format(instructions, duration, mips, mips/0.44))
if __name__ == "__main__":
try:
from .memory import ScreenAndMemory
except (SystemError, ImportError):
from pyc64.memory import ScreenAndMemory
screen = ScreenAndMemory()
screen.clear()
screen.memory[0xc000:0xc00b] = [0xa9, 0x44, 0x8d, 0x00, 0x04, 0xa9, 0x01, 0x8d, 0x00, 0xd8, 0x60]
cpu = CPU(screen.memory)
assert screen.memory[0x0400] == 0x20
assert screen.memory[0xd800] == 14
cpu.run(pc=0xc000)
assert screen.memory[0x0400] == 0x44
assert screen.memory[0xd800] == 1
program = open("drive8/gary2.prg", "rb").read()
address = program[0] + 256*program[1]
for _ in range(200):
cpu.reset()
screen.memory[address:address+len(program)-2] = program[2:]
cpu.run(pc=2061, loop_detect_delay=0)
assert screen.memory[0x0400] != 0x44
assert screen.memory[0xd800] != 1
assert screen.memory[53280] == 0
assert screen.memory[53281] == 0 | en | 0.875136 | 6502/6510 CPU utilities, requires the py65 library http://py65.readthedocs.io Written by <NAME> (<EMAIL>) License: MIT open-source. cpu/mem monitor that accepts external memory # workaround for older version of py65 # only called as workaround in case of older py65 version # push a sentinel return address # BRK # JMP to itself, instead of looping forever we also consider this a program end # pop the sentinel return address # when this address is reached, we consider it the end of the program | 3.064373 | 3 |
diagui/diagui_test.py | DentonGentry/gfiber-catawampus | 2 | 6619545 | <filename>diagui/diagui_test.py<gh_stars>1-10
"""Unit Tests for diagui.py implementation."""
__author__ = '<EMAIL> (<NAME>)'
import ast
import json
import os
import google3
import diagui.main
import tornado.httpclient
import tr.mainloop
import tr.helpers
import dm_root
import dm.fakewifi
import dm.host
from tr.wvtest import unittest
class AsynchFetch(object):
"""Creates instance of client object, makes asynchronous calls to server."""
def __init__(self, url_temp):
self.http_client = tornado.httpclient.AsyncHTTPClient()
self.resp = None
self.http_client.fetch(url_temp, method='GET',
callback=self.HandleRequest)
def HandleRequest(self, response):
self.resp = response
def Wait(self, loop):
while not self.resp:
loop.RunOnce()
class FakeHostsList(dm.host.CATA181HOSTS):
def __init__(self, count=1):
self._hosts = {}
for idx in range(1, count+1):
host = tr.core.Extensible(dm.host.CATA181HOST)()
host.X_CATAWAMPUS_ORG_ClientIdentification = (
dm.host.ClientIdentification())
self._hosts[str(idx)] = host
@property
def HostList(self):
return self._hosts
class DiaguiTest(unittest.TestCase):
"""Tests whether 2 clients receive the same data from the server.
Also checks if both receive updates.
"""
def setUp(self):
self.save_activewan = diagui.main.ACTIVEWAN
diagui.main.ACTIVEWAN = 'testdata/activewan'
self.checksum = '0'
self.url_string = 'http://localhost:8880/content.json?checksum='
def tearDown(self):
diagui.main.ACTIVEWAN = self.save_activewan
def testUpdateDict(self):
test_data = """acs OK (May 21 2013 18:58:41+700)
softversion 1.16a
uptime 76:28:39
serialnumber 123456789
temperature 54 C
fiberjack Up
wanmac 1a:2b:3c:4d:5e:6f
wanip 192.168.127.12
lanip 192.168.1.1
subnetmask 255.255.255.0
dhcpstart 192.168.3.11
dhcpend 192.168.1.254
wiredlan 6a:5b:4c:3d:2e:1f Up
wireddevices Living Room (TV box, 6a:5b:4c:3d:2e:1f)
ssid24 AllenFamilyNetwork
ssid5 (same)
wpa2 (configured)
wirelesslan 3a:1b:4c:1d:5e:9f Up
wirelessdevices Dad\'s Phone (6a:5b:4c:3d:2e:1f)
upnp O
portforwarding 80-80: Dad\'s Computer (6a:5b:4c:3d:2e:1f)
dmzdevice Wireless Device (1) (6a:5b:4c:3d:2e:1f)
dyndns DynDNS
username allenfamily
domain home.allenfamily.com"""
url_temp = self.url_string + self.checksum
app = diagui.main.MainApplication(None, None, run_diagui=True)
app.listen(8880)
app.diagui.data = dict(line.decode('utf-8').strip().split(None, 1)
for line in test_data.split('\n'))
app.diagui.UpdateCheckSum()
response1 = AsynchFetch(url_temp)
response2 = AsynchFetch(url_temp)
main_loop = tr.mainloop.MainLoop()
response1.Wait(main_loop)
response2.Wait(main_loop)
self.assertEqual(response1.resp.body,
response2.resp.body)
self.assertNotEqual(response1.resp.body, None)
self.checksum = ast.literal_eval(response1.resp.body).get(
'checksum')
test_data = """acs OK (May 21 2013 18:58:41+700)
softversion 2.16a
uptime 76:28:39
serialnumber 987654321
temperature 54 C
fiberjack Up
wanmac 1a:2b:3c:4d:5e:6f
wanip 192.168.127.12
lanip 192.168.1.1
subnetmask 255.255.255.0
dhcpstart 192.168.3.11
dhcpend 192.168.1.254
wiredlan 6a:5b:4c:3d:2e:1f Up
wireddevices Living Room (TV box, 6a:5b:4c:3d:2e:1f)
ssid24 AllenFamilyNetwork
ssid5 (same)
wpa2 (configured)
wirelesslan 3a:1b:4c:1d:5e:9f Up
wirelessdevices Dad\'s Phone (6a:5b:4c:3d:2e:1f)
upnp O
portforwarding 80-80: Dad\'s Computer (6a:5b:4c:3d:2e:1f)
dmzdevice Wireless Device (1) (6a:5b:4c:3d:2e:1f)
dyndns DynDNS
username allenfamily
domain home.allenfamily.com"""
app.diagui.data = dict(line.decode('utf-8').strip().split(None, 1)
for line in test_data.split('\n'))
app.diagui.UpdateCheckSum()
url_temp = self.url_string + self.checksum
response1_new = AsynchFetch(url_temp)
response2_new = AsynchFetch(url_temp)
response1_new.Wait(main_loop)
response2_new.Wait(main_loop)
self.assertEqual(response1_new.resp.body,
response2_new.resp.body)
self.assertNotEqual(response1_new.resp.body, None)
self.assertNotEqual(response1.resp.body,
response1_new.resp.body)
def testOnuStats(self):
app = diagui.main.MainApplication(None, None, run_diagui=True)
app.listen(8880)
main_loop = tr.mainloop.MainLoop()
diagui.main.ONU_STAT_FILE = 'testdata/onu_stats1.json'
app.diagui.UpdateOnuStats()
self.assertTrue('onu_wan_connected' in app.diagui.data)
self.assertFalse('onu_serial' in app.diagui.data)
self.checksum = '0'
url_temp = self.url_string + self.checksum
response = AsynchFetch(url_temp)
response.Wait(main_loop)
self.assertNotEqual(response.resp.body, None)
jsdata = json.loads(response.resp.body)
self.assertTrue(jsdata['onu_wan_connected'])
diagui.main.ONU_STAT_FILE = 'testdata/onu_stats2.json'
app.diagui.UpdateOnuStats()
response = AsynchFetch(url_temp)
response.Wait(main_loop)
jsdata = json.loads(response.resp.body)
self.assertTrue(jsdata['onu_wan_connected'])
self.assertTrue(jsdata['onu_acs_contacted'])
self.assertEqual(jsdata['onu_acs_contact_time'], 100000)
self.assertEqual(jsdata['onu_serial'], '12345')
def testNoOnuStats(self):
app = diagui.main.MainApplication(None, None, run_diagui=True)
diagui.main.ONU_STAT_FILE = '/no/such/file'
app.diagui.UpdateOnuStats()
# just checking whether there is an exception
class TechuiTest(unittest.TestCase):
"""Tests the data gathering functions for the TechUI."""
def testMainApp(self):
url = 'http://localhost:8880/techui.json?checksum=0'
app = diagui.main.MainApplication(None, None, run_diagui=True,
run_techui=True)
fake_data = {'moca_bitloading': {},
'ip_addr': {'ec:88:92:91:3d:67': '172.16.31.10',
'aa:aa:aa:aa:aa:aa': '123.456.78.90'},
'wifi_signal_strength': {},
'softversion': 'gfrg200-46-pre0-39-g056a912-th',
'serialnumber': 'G0123456789',
'other_aps': {'f4:f5:e8:80:58:d7': -67.0},
'host_names': {'ec:88:92:91:3d:67': 'android',
'aa:aa:aa:aa:aa:aa': 'GFiberTV'},
'moca_corrected_codewords': {},
'moca_uncorrected_codewords': {},
'moca_signal_strength': {},
'self_signals': {'f4:f5:e8:83:01:94': -25},
'moca_nbas': {},
'checksum': 0}
app.techui.data = fake_data
app.listen(8880)
main_loop = tr.mainloop.MainLoop()
response1 = AsynchFetch(url)
response1.Wait(main_loop)
result1 = json.loads(response1.resp.body)
self.assertNotEqual(result1, None)
self.assertEqual(result1, fake_data)
# Send another request, update the data, and call callbacks.
# Should update the checksum.
result1_checksum = result1['checksum']
response2 = AsynchFetch(url)
app.techui.data['other_aps'] = {'f4:f5:e8:80:58:d7': -50.0}
app.techui.NotifyUpdatedDict()
response2.Wait(main_loop)
result2 = json.loads(response2.resp.body)
# Set fake data to expected output and compare.
fake_data['other_aps'] = {'f4:f5:e8:80:58:d7': -50.0}
fake_data['checksum'] = app.techui.data['checksum']
result2_checksum = result2['checksum']
self.assertNotEqual(result2, None)
self.assertEqual(result2, fake_data)
self.assertNotEqual(result1_checksum, result2_checksum)
self.assertEqual(app.techui.FindTVBoxes(), ['123.456.78.90'])
# Update the url to have the new checksum, update data, and check for
# correct response.
url = 'http://localhost:8880/techui.json?checksum=' + result2_checksum
response3 = AsynchFetch(url)
app.techui.data['other_aps'] = {'f4:f5:e8:80:58:d7': -40.0}
app.techui.NotifyUpdatedDict()
response3.Wait(main_loop)
result3 = json.loads(response3.resp.body)
# Set fake data to expected output and compare.
fake_data['other_aps'] = {'f4:f5:e8:80:58:d7': -40.0}
fake_data['checksum'] = app.techui.data['checksum']
result3_checksum = result3['checksum']
self.assertNotEqual(result3, None)
self.assertEqual(result3, fake_data)
self.assertNotEqual(result2_checksum, result3_checksum)
def testSetTechUIDict(self):
techui = diagui.main.TechUI(None)
techui.SetTechUIDict('fake', {})
self.assertEqual(techui.data['fake'], {})
test_dict = {'11:22:33:44:55:66': 1, '11:22:33:44:55:67': 2}
techui.SetTechUIDict('fake', test_dict)
self.assertEqual(techui.data['fake'], test_dict)
def testLoadJson(self):
dne = '/tmp/does_not_exist'
try:
os.remove(dne)
except OSError:
pass
result = diagui.main.LoadJson(dne)
self.assertEqual(result, {})
jsonfile = '/tmp/json'
test_dict = {'11:22:33:44:55:66': 1, '11:22:33:44:55:67': 2}
tr.helpers.WriteFileAtomic(jsonfile, json.dumps(test_dict))
result = diagui.main.LoadJson(jsonfile)
self.assertEqual(result, test_dict)
try:
os.remove(jsonfile)
except OSError:
pass
def testUpdateMocaDict(self):
techui = diagui.main.TechUI(None)
techui.root = dm_root.DeviceModelRoot(None, 'fakecpe', None)
interface_list = techui.root.Device.MoCA.InterfaceList
snr = {}
bitloading = {}
corrected_cw = {}
uncorrected_cw = {}
nbas = {}
for unused_i, inter in interface_list.iteritems():
for unused_j, dev in inter.AssociatedDeviceList.iteritems():
snr[dev.MACAddress] = dev.X_CATAWAMPUS_ORG_RxSNR_dB
bitloading[dev.MACAddress] = dev.X_CATAWAMPUS_ORG_RxBitloading
nbas[dev.MACAddress] = dev.X_CATAWAMPUS_ORG_RxNBAS
corrected = (dev.X_CATAWAMPUS_ORG_RxPrimaryCwCorrected +
dev.X_CATAWAMPUS_ORG_RxSecondaryCwCorrected)
uncorrected = (dev.X_CATAWAMPUS_ORG_RxPrimaryCwUncorrected +
dev.X_CATAWAMPUS_ORG_RxSecondaryCwUncorrected)
no_errors = (dev.X_CATAWAMPUS_ORG_RxPrimaryCwNoErrors +
dev.X_CATAWAMPUS_ORG_RxSecondaryCwNoErrors)
total = corrected + uncorrected + no_errors
if total > 0:
corrected_cw[dev.MACAddress] = corrected/total
uncorrected_cw[dev.MACAddress] = uncorrected/total
else:
corrected_cw[dev.MACAddress] = 0
uncorrected_cw[dev.MACAddress] = 0
techui.UpdateMocaDict()
self.assertEqual(snr, techui.data['moca_signal_strength'])
self.assertEqual(bitloading, techui.data['moca_bitloading'])
self.assertEqual(corrected_cw,
techui.data['moca_corrected_codewords'])
self.assertEqual(uncorrected_cw,
techui.data['moca_uncorrected_codewords'])
self.assertEqual(nbas, techui.data['moca_nbas'])
def testUpdateWifiDict(self):
techui = diagui.main.TechUI(None)
wlan0 = dm.fakewifi.FakeWifiWlanConfiguration()
wlan1 = dm.fakewifi.FakeWifiWlanConfiguration()
techui.root = dm_root.DeviceModelRoot(None, 'fakecpe', None)
lans = techui.root.InternetGatewayDevice.LANDeviceList
lans['1'].WLANConfigurationList = {
'1': wlan0,
'2': wlan1,
}
wlan0.signals = {'11:22:33:44:55:66': -66}
wlan1.signals = {'66:55:44:33:22:11': -11}
techui.UpdateWifiDict()
self.assertEquals(
techui.data['wifi_signal_strength'],
{'66:55:44:33:22:11': -11, '11:22:33:44:55:66': -66})
def testNoSignals(self):
techui = diagui.main.TechUI(None)
wlan0 = dm.fakewifi.FakeWifiWlanConfiguration()
wlan1 = object()
techui.root = dm_root.DeviceModelRoot(None, 'fakecpe', None)
lans = techui.root.InternetGatewayDevice.LANDeviceList
lans['1'].WLANConfigurationList = {
'1': wlan0,
'2': wlan1,
}
wlan0.signals = {'11:22:33:44:55:66': -66}
techui.UpdateWifiDict()
self.assertEquals(
techui.data['wifi_signal_strength'],
{'11:22:33:44:55:66': -66})
class LicenseuiTest(unittest.TestCase):
"""Make sure server can retrieve encrypted license file."""
def testLicenseExists(self):
app = diagui.main.MainApplication(None, None, run_licenseui=True)
app.listen(8880)
main_loop = tr.mainloop.MainLoop()
response = AsynchFetch('http://localhost:8880/license/LICENSES.zip')
response.Wait(main_loop)
self.assertNotEqual(response.resp.body, None)
if __name__ == '__main__':
unittest.main()
| <filename>diagui/diagui_test.py<gh_stars>1-10
"""Unit Tests for diagui.py implementation."""
__author__ = '<EMAIL> (<NAME>)'
import ast
import json
import os
import google3
import diagui.main
import tornado.httpclient
import tr.mainloop
import tr.helpers
import dm_root
import dm.fakewifi
import dm.host
from tr.wvtest import unittest
class AsynchFetch(object):
"""Creates instance of client object, makes asynchronous calls to server."""
def __init__(self, url_temp):
self.http_client = tornado.httpclient.AsyncHTTPClient()
self.resp = None
self.http_client.fetch(url_temp, method='GET',
callback=self.HandleRequest)
def HandleRequest(self, response):
self.resp = response
def Wait(self, loop):
while not self.resp:
loop.RunOnce()
class FakeHostsList(dm.host.CATA181HOSTS):
def __init__(self, count=1):
self._hosts = {}
for idx in range(1, count+1):
host = tr.core.Extensible(dm.host.CATA181HOST)()
host.X_CATAWAMPUS_ORG_ClientIdentification = (
dm.host.ClientIdentification())
self._hosts[str(idx)] = host
@property
def HostList(self):
return self._hosts
class DiaguiTest(unittest.TestCase):
"""Tests whether 2 clients receive the same data from the server.
Also checks if both receive updates.
"""
def setUp(self):
self.save_activewan = diagui.main.ACTIVEWAN
diagui.main.ACTIVEWAN = 'testdata/activewan'
self.checksum = '0'
self.url_string = 'http://localhost:8880/content.json?checksum='
def tearDown(self):
diagui.main.ACTIVEWAN = self.save_activewan
def testUpdateDict(self):
test_data = """acs OK (May 21 2013 18:58:41+700)
softversion 1.16a
uptime 76:28:39
serialnumber 123456789
temperature 54 C
fiberjack Up
wanmac 1a:2b:3c:4d:5e:6f
wanip 192.168.127.12
lanip 192.168.1.1
subnetmask 255.255.255.0
dhcpstart 192.168.3.11
dhcpend 192.168.1.254
wiredlan 6a:5b:4c:3d:2e:1f Up
wireddevices Living Room (TV box, 6a:5b:4c:3d:2e:1f)
ssid24 AllenFamilyNetwork
ssid5 (same)
wpa2 (configured)
wirelesslan 3a:1b:4c:1d:5e:9f Up
wirelessdevices Dad\'s Phone (6a:5b:4c:3d:2e:1f)
upnp O
portforwarding 80-80: Dad\'s Computer (6a:5b:4c:3d:2e:1f)
dmzdevice Wireless Device (1) (6a:5b:4c:3d:2e:1f)
dyndns DynDNS
username allenfamily
domain home.allenfamily.com"""
url_temp = self.url_string + self.checksum
app = diagui.main.MainApplication(None, None, run_diagui=True)
app.listen(8880)
app.diagui.data = dict(line.decode('utf-8').strip().split(None, 1)
for line in test_data.split('\n'))
app.diagui.UpdateCheckSum()
response1 = AsynchFetch(url_temp)
response2 = AsynchFetch(url_temp)
main_loop = tr.mainloop.MainLoop()
response1.Wait(main_loop)
response2.Wait(main_loop)
self.assertEqual(response1.resp.body,
response2.resp.body)
self.assertNotEqual(response1.resp.body, None)
self.checksum = ast.literal_eval(response1.resp.body).get(
'checksum')
test_data = """acs OK (May 21 2013 18:58:41+700)
softversion 2.16a
uptime 76:28:39
serialnumber 987654321
temperature 54 C
fiberjack Up
wanmac 1a:2b:3c:4d:5e:6f
wanip 192.168.127.12
lanip 192.168.1.1
subnetmask 255.255.255.0
dhcpstart 192.168.3.11
dhcpend 192.168.1.254
wiredlan 6a:5b:4c:3d:2e:1f Up
wireddevices Living Room (TV box, 6a:5b:4c:3d:2e:1f)
ssid24 AllenFamilyNetwork
ssid5 (same)
wpa2 (configured)
wirelesslan 3a:1b:4c:1d:5e:9f Up
wirelessdevices Dad\'s Phone (6a:5b:4c:3d:2e:1f)
upnp O
portforwarding 80-80: Dad\'s Computer (6a:5b:4c:3d:2e:1f)
dmzdevice Wireless Device (1) (6a:5b:4c:3d:2e:1f)
dyndns DynDNS
username allenfamily
domain home.allenfamily.com"""
app.diagui.data = dict(line.decode('utf-8').strip().split(None, 1)
for line in test_data.split('\n'))
app.diagui.UpdateCheckSum()
url_temp = self.url_string + self.checksum
response1_new = AsynchFetch(url_temp)
response2_new = AsynchFetch(url_temp)
response1_new.Wait(main_loop)
response2_new.Wait(main_loop)
self.assertEqual(response1_new.resp.body,
response2_new.resp.body)
self.assertNotEqual(response1_new.resp.body, None)
self.assertNotEqual(response1.resp.body,
response1_new.resp.body)
def testOnuStats(self):
app = diagui.main.MainApplication(None, None, run_diagui=True)
app.listen(8880)
main_loop = tr.mainloop.MainLoop()
diagui.main.ONU_STAT_FILE = 'testdata/onu_stats1.json'
app.diagui.UpdateOnuStats()
self.assertTrue('onu_wan_connected' in app.diagui.data)
self.assertFalse('onu_serial' in app.diagui.data)
self.checksum = '0'
url_temp = self.url_string + self.checksum
response = AsynchFetch(url_temp)
response.Wait(main_loop)
self.assertNotEqual(response.resp.body, None)
jsdata = json.loads(response.resp.body)
self.assertTrue(jsdata['onu_wan_connected'])
diagui.main.ONU_STAT_FILE = 'testdata/onu_stats2.json'
app.diagui.UpdateOnuStats()
response = AsynchFetch(url_temp)
response.Wait(main_loop)
jsdata = json.loads(response.resp.body)
self.assertTrue(jsdata['onu_wan_connected'])
self.assertTrue(jsdata['onu_acs_contacted'])
self.assertEqual(jsdata['onu_acs_contact_time'], 100000)
self.assertEqual(jsdata['onu_serial'], '12345')
def testNoOnuStats(self):
app = diagui.main.MainApplication(None, None, run_diagui=True)
diagui.main.ONU_STAT_FILE = '/no/such/file'
app.diagui.UpdateOnuStats()
# just checking whether there is an exception
class TechuiTest(unittest.TestCase):
"""Tests the data gathering functions for the TechUI."""
def testMainApp(self):
url = 'http://localhost:8880/techui.json?checksum=0'
app = diagui.main.MainApplication(None, None, run_diagui=True,
run_techui=True)
fake_data = {'moca_bitloading': {},
'ip_addr': {'ec:88:92:91:3d:67': '172.16.31.10',
'aa:aa:aa:aa:aa:aa': '123.456.78.90'},
'wifi_signal_strength': {},
'softversion': 'gfrg200-46-pre0-39-g056a912-th',
'serialnumber': 'G0123456789',
'other_aps': {'f4:f5:e8:80:58:d7': -67.0},
'host_names': {'ec:88:92:91:3d:67': 'android',
'aa:aa:aa:aa:aa:aa': 'GFiberTV'},
'moca_corrected_codewords': {},
'moca_uncorrected_codewords': {},
'moca_signal_strength': {},
'self_signals': {'f4:f5:e8:83:01:94': -25},
'moca_nbas': {},
'checksum': 0}
app.techui.data = fake_data
app.listen(8880)
main_loop = tr.mainloop.MainLoop()
response1 = AsynchFetch(url)
response1.Wait(main_loop)
result1 = json.loads(response1.resp.body)
self.assertNotEqual(result1, None)
self.assertEqual(result1, fake_data)
# Send another request, update the data, and call callbacks.
# Should update the checksum.
result1_checksum = result1['checksum']
response2 = AsynchFetch(url)
app.techui.data['other_aps'] = {'f4:f5:e8:80:58:d7': -50.0}
app.techui.NotifyUpdatedDict()
response2.Wait(main_loop)
result2 = json.loads(response2.resp.body)
# Set fake data to expected output and compare.
fake_data['other_aps'] = {'f4:f5:e8:80:58:d7': -50.0}
fake_data['checksum'] = app.techui.data['checksum']
result2_checksum = result2['checksum']
self.assertNotEqual(result2, None)
self.assertEqual(result2, fake_data)
self.assertNotEqual(result1_checksum, result2_checksum)
self.assertEqual(app.techui.FindTVBoxes(), ['123.456.78.90'])
# Update the url to have the new checksum, update data, and check for
# correct response.
url = 'http://localhost:8880/techui.json?checksum=' + result2_checksum
response3 = AsynchFetch(url)
app.techui.data['other_aps'] = {'f4:f5:e8:80:58:d7': -40.0}
app.techui.NotifyUpdatedDict()
response3.Wait(main_loop)
result3 = json.loads(response3.resp.body)
# Set fake data to expected output and compare.
fake_data['other_aps'] = {'f4:f5:e8:80:58:d7': -40.0}
fake_data['checksum'] = app.techui.data['checksum']
result3_checksum = result3['checksum']
self.assertNotEqual(result3, None)
self.assertEqual(result3, fake_data)
self.assertNotEqual(result2_checksum, result3_checksum)
def testSetTechUIDict(self):
techui = diagui.main.TechUI(None)
techui.SetTechUIDict('fake', {})
self.assertEqual(techui.data['fake'], {})
test_dict = {'11:22:33:44:55:66': 1, '11:22:33:44:55:67': 2}
techui.SetTechUIDict('fake', test_dict)
self.assertEqual(techui.data['fake'], test_dict)
def testLoadJson(self):
dne = '/tmp/does_not_exist'
try:
os.remove(dne)
except OSError:
pass
result = diagui.main.LoadJson(dne)
self.assertEqual(result, {})
jsonfile = '/tmp/json'
test_dict = {'11:22:33:44:55:66': 1, '11:22:33:44:55:67': 2}
tr.helpers.WriteFileAtomic(jsonfile, json.dumps(test_dict))
result = diagui.main.LoadJson(jsonfile)
self.assertEqual(result, test_dict)
try:
os.remove(jsonfile)
except OSError:
pass
def testUpdateMocaDict(self):
techui = diagui.main.TechUI(None)
techui.root = dm_root.DeviceModelRoot(None, 'fakecpe', None)
interface_list = techui.root.Device.MoCA.InterfaceList
snr = {}
bitloading = {}
corrected_cw = {}
uncorrected_cw = {}
nbas = {}
for unused_i, inter in interface_list.iteritems():
for unused_j, dev in inter.AssociatedDeviceList.iteritems():
snr[dev.MACAddress] = dev.X_CATAWAMPUS_ORG_RxSNR_dB
bitloading[dev.MACAddress] = dev.X_CATAWAMPUS_ORG_RxBitloading
nbas[dev.MACAddress] = dev.X_CATAWAMPUS_ORG_RxNBAS
corrected = (dev.X_CATAWAMPUS_ORG_RxPrimaryCwCorrected +
dev.X_CATAWAMPUS_ORG_RxSecondaryCwCorrected)
uncorrected = (dev.X_CATAWAMPUS_ORG_RxPrimaryCwUncorrected +
dev.X_CATAWAMPUS_ORG_RxSecondaryCwUncorrected)
no_errors = (dev.X_CATAWAMPUS_ORG_RxPrimaryCwNoErrors +
dev.X_CATAWAMPUS_ORG_RxSecondaryCwNoErrors)
total = corrected + uncorrected + no_errors
if total > 0:
corrected_cw[dev.MACAddress] = corrected/total
uncorrected_cw[dev.MACAddress] = uncorrected/total
else:
corrected_cw[dev.MACAddress] = 0
uncorrected_cw[dev.MACAddress] = 0
techui.UpdateMocaDict()
self.assertEqual(snr, techui.data['moca_signal_strength'])
self.assertEqual(bitloading, techui.data['moca_bitloading'])
self.assertEqual(corrected_cw,
techui.data['moca_corrected_codewords'])
self.assertEqual(uncorrected_cw,
techui.data['moca_uncorrected_codewords'])
self.assertEqual(nbas, techui.data['moca_nbas'])
def testUpdateWifiDict(self):
techui = diagui.main.TechUI(None)
wlan0 = dm.fakewifi.FakeWifiWlanConfiguration()
wlan1 = dm.fakewifi.FakeWifiWlanConfiguration()
techui.root = dm_root.DeviceModelRoot(None, 'fakecpe', None)
lans = techui.root.InternetGatewayDevice.LANDeviceList
lans['1'].WLANConfigurationList = {
'1': wlan0,
'2': wlan1,
}
wlan0.signals = {'11:22:33:44:55:66': -66}
wlan1.signals = {'66:55:44:33:22:11': -11}
techui.UpdateWifiDict()
self.assertEquals(
techui.data['wifi_signal_strength'],
{'66:55:44:33:22:11': -11, '11:22:33:44:55:66': -66})
def testNoSignals(self):
techui = diagui.main.TechUI(None)
wlan0 = dm.fakewifi.FakeWifiWlanConfiguration()
wlan1 = object()
techui.root = dm_root.DeviceModelRoot(None, 'fakecpe', None)
lans = techui.root.InternetGatewayDevice.LANDeviceList
lans['1'].WLANConfigurationList = {
'1': wlan0,
'2': wlan1,
}
wlan0.signals = {'11:22:33:44:55:66': -66}
techui.UpdateWifiDict()
self.assertEquals(
techui.data['wifi_signal_strength'],
{'11:22:33:44:55:66': -66})
class LicenseuiTest(unittest.TestCase):
"""Make sure server can retrieve encrypted license file."""
def testLicenseExists(self):
app = diagui.main.MainApplication(None, None, run_licenseui=True)
app.listen(8880)
main_loop = tr.mainloop.MainLoop()
response = AsynchFetch('http://localhost:8880/license/LICENSES.zip')
response.Wait(main_loop)
self.assertNotEqual(response.resp.body, None)
if __name__ == '__main__':
unittest.main()
| en | 0.618284 | Unit Tests for diagui.py implementation. Creates instance of client object, makes asynchronous calls to server. Tests whether 2 clients receive the same data from the server. Also checks if both receive updates. acs OK (May 21 2013 18:58:41+700) softversion 1.16a uptime 76:28:39 serialnumber 123456789 temperature 54 C fiberjack Up wanmac 1a:2b:3c:4d:5e:6f wanip 192.168.127.12 lanip 192.168.1.1 subnetmask 255.255.255.0 dhcpstart 192.168.3.11 dhcpend 192.168.1.254 wiredlan 6a:5b:4c:3d:2e:1f Up wireddevices Living Room (TV box, 6a:5b:4c:3d:2e:1f) ssid24 AllenFamilyNetwork ssid5 (same) wpa2 (configured) wirelesslan 3a:1b:4c:1d:5e:9f Up wirelessdevices Dad\'s Phone (6a:5b:4c:3d:2e:1f) upnp O portforwarding 80-80: Dad\'s Computer (6a:5b:4c:3d:2e:1f) dmzdevice Wireless Device (1) (6a:5b:4c:3d:2e:1f) dyndns DynDNS username allenfamily domain home.allenfamily.com acs OK (May 21 2013 18:58:41+700) softversion 2.16a uptime 76:28:39 serialnumber 987654321 temperature 54 C fiberjack Up wanmac 1a:2b:3c:4d:5e:6f wanip 192.168.127.12 lanip 192.168.1.1 subnetmask 255.255.255.0 dhcpstart 192.168.3.11 dhcpend 192.168.1.254 wiredlan 6a:5b:4c:3d:2e:1f Up wireddevices Living Room (TV box, 6a:5b:4c:3d:2e:1f) ssid24 AllenFamilyNetwork ssid5 (same) wpa2 (configured) wirelesslan 3a:1b:4c:1d:5e:9f Up wirelessdevices Dad\'s Phone (6a:5b:4c:3d:2e:1f) upnp O portforwarding 80-80: Dad\'s Computer (6a:5b:4c:3d:2e:1f) dmzdevice Wireless Device (1) (6a:5b:4c:3d:2e:1f) dyndns DynDNS username allenfamily domain home.allenfamily.com # just checking whether there is an exception Tests the data gathering functions for the TechUI. # Send another request, update the data, and call callbacks. # Should update the checksum. # Set fake data to expected output and compare. # Update the url to have the new checksum, update data, and check for # correct response. # Set fake data to expected output and compare. Make sure server can retrieve encrypted license file. | 2.431191 | 2 |
src/genie/libs/parser/iosxe/tests/ShowPolicyMap/cli/equal/golden_output7_expected.py | balmasea/genieparser | 204 | 6619546 | <gh_stars>100-1000
expected_output = {
"policy_map": {
"parent": {
"class": {
"class-default": {
"average_rate_traffic_shaping": True,
"cir_bps": 10000000,
"service_policy": "child",
}
}
}
}
}
| expected_output = {
"policy_map": {
"parent": {
"class": {
"class-default": {
"average_rate_traffic_shaping": True,
"cir_bps": 10000000,
"service_policy": "child",
}
}
}
}
} | none | 1 | 1.316282 | 1 | |
proxypooler/ext.py | arrti/proxypooler | 0 | 6619547 | from functools import partial
import msgpack
from proxypooler import config
from proxypooler.task_logger import log
from proxypooler.utils import LoggerAsync, MQueue
from proxypooler.db import RedisClient
conn = RedisClient()
serial = msgpack.packb # use MessagePack as serializer
deserial = partial(msgpack.unpackb, encoding='utf-8', use_list=False)
logger = LoggerAsync(config.project, log)
server_logger = LoggerAsync(config.project_srv, log)
validator_pub_queue = MQueue('pub', config.mq_url,
'proxypooler_validator_exchange', 'proxypooler_validator_queue')
validator_sub_queue = MQueue('sub', config.mq_url,
'proxypooler_validator_exchange', 'proxypooler_validator_queue')
| from functools import partial
import msgpack
from proxypooler import config
from proxypooler.task_logger import log
from proxypooler.utils import LoggerAsync, MQueue
from proxypooler.db import RedisClient
conn = RedisClient()
serial = msgpack.packb # use MessagePack as serializer
deserial = partial(msgpack.unpackb, encoding='utf-8', use_list=False)
logger = LoggerAsync(config.project, log)
server_logger = LoggerAsync(config.project_srv, log)
validator_pub_queue = MQueue('pub', config.mq_url,
'proxypooler_validator_exchange', 'proxypooler_validator_queue')
validator_sub_queue = MQueue('sub', config.mq_url,
'proxypooler_validator_exchange', 'proxypooler_validator_queue')
| en | 0.897541 | # use MessagePack as serializer | 2.056088 | 2 |
class_Game.py | Jobkanis/Battleport | 0 | 6619548 | <reponame>Jobkanis/Battleport<filename>class_Game.py<gh_stars>0
import random
import math
import time
import copy
import pygame
import class_Player
import class_Boats
import class_Positions
import class_Visual
import class_Menu
import database
class Game:
def __init__(self, gameDisplay, clock, width, height):
#creating classes
self.Sound_enabled = True
self.Music_enabled = True
self.Players = []
self.Positions = []
self.EmptyBoat = NotImplemented
self.EmptyPosition = NotImplemented
self.EmptyPlayer = NotImplemented
self.Player1 = NotImplemented
self.Player2 = NotImplemented
self.Visual = class_Visual.Visual(self, gameDisplay, clock, width, height)
self.Database = database.Database()
def setupgame(self, player1name, player2name):
if self.Music_enabled == True:
pygame.mixer.music.load("sound/bgm_ingame.wav")
pygame.mixer.music.set_volume(1)
pygame.mixer.music.play(-1)
######### Empty Variables ###########
########## Empty Classes ##########
self.EmptyPlayer = class_Player.Player(self, "empty")
self.Players.append(self.EmptyPlayer)
self.EmptyPosition = class_Positions.Position(self, -1, -1)
self.Positions.append(self.EmptyPosition)
self.EmptyBoat = class_Boats.Boat(self, self.EmptyPlayer, "empty")
self.EmptyPlayer.Boats.append(self.EmptyBoat)
################ Players ###################
self.CreatePositions() #Create all positions
self.att_sound = pygame.mixer.Sound('ship_att.wav')
self.sink_sound = pygame.mixer.Sound('ship_dead.wav')
self.goal_sound = pygame.mixer.Sound('ship_dead.wav')
self.move_sound = pygame.mixer.Sound('ship_move.wav')
self.ship_select_sound = pygame.mixer.Sound('ship_select.wav')
self.game_won = pygame.mixer.Sound('game_won.wav')
self.game_over = pygame.mixer.Sound('game_over.wav')
self.Player1 = class_Player.Player(self, player1name)
self.Players.append(self.Player1)
self.Player2 = class_Player.Player(self, player2name)
self.Players.append(self.Player2)
self.Winner = self.EmptyPlayer
self.Player_Playing = self.Player1
self.Visual.show_nextturn(self.Player_Playing)
self.Player1.CreateBoats()
self.Player_Playing = self.Player2
self.Visual.show_nextturn(self.Player_Playing)
self.Player2.CreateBoats()
self.Play()
return self.Winner
#sounds
def Play(self):
self.Player_Playing = self.Player2
while self.Winner == self.EmptyPlayer:
self.Visual.drawscreen()
time.sleep(1)
self.Player_Playing = self.NextPlayer()
self.Visual.show_nextturn(self.Player_Playing)
self.Player_Playing.PlayTurn()
self.Visual.drawscreen()
time.sleep(1)
if self.Sound_enabled:
self.game_over.play()
self.Visual.DrawWinnerScreen()
############# USEABLE GAME FUNCTIONS #############
def GetPosition(self, x, y):
for Pos in self.Positions:
if Pos.X == x and Pos.Y == y:
return Pos
return self.EmptyPosition
def GetBoat(self, x, y):
for LocalBoats in GetBoatPositions(self):
if LocalBoats.X == x and LocalBoats.Y == y:
return LocalBoats
for LocalPlayers in self.Players:
for boat in LocalPlayers.Boats:
if boat.X == x and boat.Y == y:
return boat
else: return self.EmptyBoat
############### SPECIFIC GAME FUNCTIONS ###################
def NextPlayer(self):
if self.Player_Playing == self.Player1:
return self.Player2
else:
return self.Player1
def CreatePositions(self):
print("Creating positions")
for y in range (0,20):
for x in range (0,20):
LocalPosition = class_Positions.Position(self, x, y)
self.Positions.append(LocalPosition)
def GetAllBoatPositions(self, exception): #exception is list
BoatPositions = []
BoatPositions += self.Player1.GetPlayerBoatPositions(exception) #exception
BoatPositions += self.Player2.GetPlayerBoatPositions(exception) #exception
return BoatPositions
def ToughUpdateBoats(self):
positions = self.Positions
Player1Boats = self.Player1.Boats
Player2Boats = self.Player2.Boats
for localpositions in positions:
localpositions.Boat = self.EmptyBoat
for p1boats in Player1Boats:
allboatpositions = p1boats.GetLocalBoatsPositions(True, -1, -1, "inactive")
for p1allboats in allboatpositions:
if p1allboats.X == localpositions.X and p1allboats.Y == localpositions.Y:
localpositions.Boat = p1boats
for p2boats in Player2Boats:
allboatpositions = p2boats.GetLocalBoatsPositions(True, -1, -1, "inactive")
for p2allboats in allboatpositions:
if p2allboats.X == localpositions.X and p2allboats.Y == localpositions.Y:
localpositions.Boat = p2boats
| import random
import math
import time
import copy
import pygame
import class_Player
import class_Boats
import class_Positions
import class_Visual
import class_Menu
import database
class Game:
def __init__(self, gameDisplay, clock, width, height):
#creating classes
self.Sound_enabled = True
self.Music_enabled = True
self.Players = []
self.Positions = []
self.EmptyBoat = NotImplemented
self.EmptyPosition = NotImplemented
self.EmptyPlayer = NotImplemented
self.Player1 = NotImplemented
self.Player2 = NotImplemented
self.Visual = class_Visual.Visual(self, gameDisplay, clock, width, height)
self.Database = database.Database()
def setupgame(self, player1name, player2name):
if self.Music_enabled == True:
pygame.mixer.music.load("sound/bgm_ingame.wav")
pygame.mixer.music.set_volume(1)
pygame.mixer.music.play(-1)
######### Empty Variables ###########
########## Empty Classes ##########
self.EmptyPlayer = class_Player.Player(self, "empty")
self.Players.append(self.EmptyPlayer)
self.EmptyPosition = class_Positions.Position(self, -1, -1)
self.Positions.append(self.EmptyPosition)
self.EmptyBoat = class_Boats.Boat(self, self.EmptyPlayer, "empty")
self.EmptyPlayer.Boats.append(self.EmptyBoat)
################ Players ###################
self.CreatePositions() #Create all positions
self.att_sound = pygame.mixer.Sound('ship_att.wav')
self.sink_sound = pygame.mixer.Sound('ship_dead.wav')
self.goal_sound = pygame.mixer.Sound('ship_dead.wav')
self.move_sound = pygame.mixer.Sound('ship_move.wav')
self.ship_select_sound = pygame.mixer.Sound('ship_select.wav')
self.game_won = pygame.mixer.Sound('game_won.wav')
self.game_over = pygame.mixer.Sound('game_over.wav')
self.Player1 = class_Player.Player(self, player1name)
self.Players.append(self.Player1)
self.Player2 = class_Player.Player(self, player2name)
self.Players.append(self.Player2)
self.Winner = self.EmptyPlayer
self.Player_Playing = self.Player1
self.Visual.show_nextturn(self.Player_Playing)
self.Player1.CreateBoats()
self.Player_Playing = self.Player2
self.Visual.show_nextturn(self.Player_Playing)
self.Player2.CreateBoats()
self.Play()
return self.Winner
#sounds
def Play(self):
self.Player_Playing = self.Player2
while self.Winner == self.EmptyPlayer:
self.Visual.drawscreen()
time.sleep(1)
self.Player_Playing = self.NextPlayer()
self.Visual.show_nextturn(self.Player_Playing)
self.Player_Playing.PlayTurn()
self.Visual.drawscreen()
time.sleep(1)
if self.Sound_enabled:
self.game_over.play()
self.Visual.DrawWinnerScreen()
############# USEABLE GAME FUNCTIONS #############
def GetPosition(self, x, y):
for Pos in self.Positions:
if Pos.X == x and Pos.Y == y:
return Pos
return self.EmptyPosition
def GetBoat(self, x, y):
for LocalBoats in GetBoatPositions(self):
if LocalBoats.X == x and LocalBoats.Y == y:
return LocalBoats
for LocalPlayers in self.Players:
for boat in LocalPlayers.Boats:
if boat.X == x and boat.Y == y:
return boat
else: return self.EmptyBoat
############### SPECIFIC GAME FUNCTIONS ###################
def NextPlayer(self):
if self.Player_Playing == self.Player1:
return self.Player2
else:
return self.Player1
def CreatePositions(self):
print("Creating positions")
for y in range (0,20):
for x in range (0,20):
LocalPosition = class_Positions.Position(self, x, y)
self.Positions.append(LocalPosition)
def GetAllBoatPositions(self, exception): #exception is list
BoatPositions = []
BoatPositions += self.Player1.GetPlayerBoatPositions(exception) #exception
BoatPositions += self.Player2.GetPlayerBoatPositions(exception) #exception
return BoatPositions
def ToughUpdateBoats(self):
positions = self.Positions
Player1Boats = self.Player1.Boats
Player2Boats = self.Player2.Boats
for localpositions in positions:
localpositions.Boat = self.EmptyBoat
for p1boats in Player1Boats:
allboatpositions = p1boats.GetLocalBoatsPositions(True, -1, -1, "inactive")
for p1allboats in allboatpositions:
if p1allboats.X == localpositions.X and p1allboats.Y == localpositions.Y:
localpositions.Boat = p1boats
for p2boats in Player2Boats:
allboatpositions = p2boats.GetLocalBoatsPositions(True, -1, -1, "inactive")
for p2allboats in allboatpositions:
if p2allboats.X == localpositions.X and p2allboats.Y == localpositions.Y:
localpositions.Boat = p2boats | de | 0.483697 | #creating classes ######### Empty Variables ########### ########## Empty Classes ########## ################ Players ################### #Create all positions #sounds ############# USEABLE GAME FUNCTIONS ############# ############### SPECIFIC GAME FUNCTIONS ################### #exception is list #exception #exception | 3.033071 | 3 |
xmnlp/sentiment/__init__.py | ai4dev/xmnlp | 0 | 6619549 | <gh_stars>0
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# -------------------------------------------#
# author: <NAME> #
# email: <EMAIL> #
# -------------------------------------------#
from __future__ import absolute_import, unicode_literals
import sys
from xmnlp.config import path as C_PATH
from . import sentiment
if sys.version_info[0] == 2:
reload(sys)
sys.setdefaultencoding('utf8')
model = None
def loader():
"""load model"""
global model
if model is None:
print("(Lazy Load) Loading model...")
model = sentiment.Sentiment()
model.load(C_PATH.sentiment['model']['sentiment'])
def predict(text, stopword=None):
"""predict sentiment"""
loader()
return model.predict(text, stopword=stopword)
def load(path):
"""load model from path"""
global model
model = sentiment.Sentiment()
model.load(path)
| # !/usr/bin/env python
# -*- coding: utf-8 -*-
# -------------------------------------------#
# author: <NAME> #
# email: <EMAIL> #
# -------------------------------------------#
from __future__ import absolute_import, unicode_literals
import sys
from xmnlp.config import path as C_PATH
from . import sentiment
if sys.version_info[0] == 2:
reload(sys)
sys.setdefaultencoding('utf8')
model = None
def loader():
"""load model"""
global model
if model is None:
print("(Lazy Load) Loading model...")
model = sentiment.Sentiment()
model.load(C_PATH.sentiment['model']['sentiment'])
def predict(text, stopword=None):
"""predict sentiment"""
loader()
return model.predict(text, stopword=stopword)
def load(path):
"""load model from path"""
global model
model = sentiment.Sentiment()
model.load(path) | en | 0.365461 | # !/usr/bin/env python # -*- coding: utf-8 -*- # -------------------------------------------# # author: <NAME> # # email: <EMAIL> # # -------------------------------------------# load model predict sentiment load model from path | 2.457381 | 2 |
app/lti_app/tests/citation_checker/fixtures.py | oss6/scriba | 0 | 6619550 | import pytest
from lti_app.core.citation_checker import Checker as CitationChecker
@pytest.fixture
def make_citation_checker():
def _make_citation_checker(text, reference):
return CitationChecker(text, reference)
return _make_citation_checker
| import pytest
from lti_app.core.citation_checker import Checker as CitationChecker
@pytest.fixture
def make_citation_checker():
def _make_citation_checker(text, reference):
return CitationChecker(text, reference)
return _make_citation_checker
| none | 1 | 1.688839 | 2 |