text
stringlengths 3
1.05M
|
|---|
const WOQLTableConfig = require("./tableConfig");
const UTILS = require('../utils');
const WOQLRule = require('../woqlRule');
const WOQLResult = require('../woqlResult');
/**
* @file WOQL Table
* @license Apache Version 2
*/
function WOQLTable(client, config){
this.client = client;
this.config = (config ? config : new WOQLTableConfig());
return this;
}
WOQLTable.prototype.options = function(config){
this.config = config;
return this;
}
WOQLTable.prototype.setResult = function(res){
this.result = res;
return this;
}
WOQLTable.prototype.count = function(){
return this.result.count();
}
WOQLTable.prototype.first = function(){
return this.result.first();
}
WOQLTable.prototype.prev = function(){
return this.result.prev();
}
WOQLTable.prototype.next = function(){
return this.result.next();
}
WOQLTable.prototype.canAdvancePage = function(){
return (this.result.count() == this.result.query.getLimit());
}
WOQLTable.prototype.canChangePage = function(){
return this.canAdvancePage() || this.canRetreatPage();
}
WOQLTable.prototype.canRetreatPage = function(){
return (this.result.query.getPage() > 1);
}
WOQLTable.prototype.getPageSize = function(){
return this.result.query.getLimit() ;
}
WOQLTable.prototype.setPage = function(l){
return this.result.query.setPage(l);
}
WOQLTable.prototype.getPage = function(){
return this.result.query.getPage();
}
WOQLTable.prototype.setPageSize = function(l){
return this.update(this.result.query.setPageSize(l));
}
WOQLTable.prototype.nextPage = function(){
return this.update(this.result.query.nextPage());
}
WOQLTable.prototype.firstPage = function(){
return this.update(this.result.query.firstPage());
}
WOQLTable.prototype.previousPage = function(){
return this.update(this.result.query.previousPage());
}
WOQLTable.prototype.getColumnsToRender = function(){
if(this.hasColumnOrder()){
var cols = this.getColumnOrder();
}
else if(this.result.query.hasSelect()){
var cols = this.result.query.getSelectVariables();
}
else {
var cols = this.result.getVariableList();
}
var self = this;
return (cols ? cols.filter(col => !self.hidden(col)) : []);
}
WOQLTable.prototype.getColumnHeaderContents = function(colid){
colid = UTILS.addNamespaceToVariable(colid);
let hr = new WOQLRule().matchColumn(this.config.rules, colid, "header");
if(hr && hr.length){
let h = hr[hr.length-1].rule.header;
if(typeof h == "string"){
return document.createTextNode(h);
}
else if(typeof h == "function"){
return h(colid);
}
else return h;
}
var clab = UTILS.labelFromURL(colid);
return document.createTextNode(clab);
}
WOQLTable.prototype.hidden = function(col){
colid = UTILS.addNamespaceToVariable(col);
let matched_rules = new WOQLRule().matchColumn(this.config.rules, colid, "hidden");
if(matched_rules.length){
return matched_rules[matched_rules.length-1].rule.hidden;
}
return false;
}
/**
* Called when you want to change the query associated with the table.
*/
WOQLTable.prototype.update = function(nquery){
return nquery.execute(this.client).then((results) => {
var nresult = new WOQLResult(results, nquery);
this.setResult(nresult);
if(this.notify) this.notify(nresult);
return nresult;
});
}
WOQLTable.prototype.hasDefinedEvent = function(row, key, scope, action, rownum){
if(scope == "row"){
var matched_rules = new WOQLRule().matchRow(this.config.rules, row, this.result.cursor, action);
}
else {
var matched_rules = new WOQLRule().matchCell(this.config.rules, row, key, this.result.cursor, action);
}
if(matched_rules && matched_rules.length) return true;
return false;
}
WOQLTable.prototype.getDefinedEvent = function(row, key, scope, action, rownum){
if(scope == "row"){
var matched_rules = new WOQLRule().matchRow(this.config.rules, row, this.result.cursor, action);
}
else {
var matched_rules = new WOQLRule().matchCell(this.config.rules, row, key, this.result.cursor, action);
}
if(matched_rules && matched_rules.length) {
var l = (matched_rules.length - 1);
return matched_rules[l].rule[action];
}
}
WOQLTable.prototype.getRowClick = function(row){
let re = this.getDefinedEvent(row, false, "row", "click");
return re;
}
WOQLTable.prototype.getCellClick = function(row, col){
let cc = this.getDefinedEvent(row, col, "column", "click");
return cc;
}
WOQLTable.prototype.getRowHover = function(row){
return this.getDefinedEvent(row, false, "row", "hover");
}
WOQLTable.prototype.getCellHover = function(row, key){
return this.getDefinedEvent(row, key, "column", "hover");
}
WOQLTable.prototype.getColumnOrder = function(){
return this.config.column_order();
}
WOQLTable.prototype.hasColumnOrder = WOQLTable.prototype.getColumnOrder;
WOQLTable.prototype.hasCellClick = WOQLTable.prototype.getCellClick;
WOQLTable.prototype.hasRowClick = WOQLTable.prototype.getRowClick;
WOQLTable.prototype.hasCellHover = WOQLTable.prototype.getCellHover;
WOQLTable.prototype.hasRowHover = WOQLTable.prototype.getRowHover;
WOQLTable.prototype.getRenderer = function(key, row, rownum){
var rend = this.getSpecificRender(key, row);
if(rend) return rend;
let renderer = this.getDefinedEvent(row, key, "column", "renderer", rownum);
let args = this.getDefinedEvent(row, key, "column", "args", rownum);
if(!renderer){
let r = this.getRendererForDatatype(row[key]);
renderer = r.name;
if(!args) args = r.args;
}
if(renderer){
return this.datatypes.createRenderer(renderer, args);
}
}
WOQLTable.prototype.renderValue = function(renderer, val, key, row){
if(val && val['@type']){
renderer.type = val['@type'];
var dv = new DataValue(val['@value'], val['@type'], key, row);
}
else if(val && val['@language']){
renderer.type = "xsd:string";
var dv = new DataValue(val['@value'], renderer.type, key, row);
}
else if(val && typeof val == "string"){
renderer.type = "id";
var dv = new DataValue(val, "id", key, row);
}
if(dv) return renderer.renderValue(dv);
return "";
}
function DataValue(val, type){
this.datavalue = (val == "unknown" ? "" : val);
this.datatype = type;
}
DataValue.prototype.value = function(nvalue){
if(nvalue) {
this.datavalue = nvalue;
return this;
}
return this.datavalue;
}
WOQLTable.prototype.getRendererForDatatype = function(val){
if(val && val['@type']){
return this.datatypes.getRenderer(val['@type'], val['@value']);
}
else if(val && val['@language']){
return this.datatypes.getRenderer("xsd:string", val['@value']);
}
else if(val && typeof val == "string"){
return this.datatypes.getRenderer("id", val);
}
return false;
}
WOQLTable.prototype.getSpecificRender = function(key, row){
let rend = this.getDefinedEvent(row, key, "column", "render");
return rend;
}
module.exports = WOQLTable;
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts from 1.* TensorFlow to 2.0 TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import renames_v2
class TFAPIChangeSpec(ast_edits.APIChangeSpec):
"""List of maps that describe what changed in the API."""
def __init__(self):
# Maps from a function name to a dictionary that describes how to
# map from an old argument keyword to the new argument keyword.
self.function_keyword_renames = {
"tf.expand_dims": {
"dim": "axis",
},
"tf.convert_to_tensor": {
"preferred_dtype": "dtype_hint"
},
"tf.math.count_nonzero": {
"input_tensor": "input",
"keep_dims": "keepdims",
"reduction_indices": "axis",
},
"tf.nn.pool": {
"dilation_rate": "dilations"
},
"tf.nn.separable_conv2d": {
"rate": "dilations"
},
"tf.nn.sufficient_statistics": {
"keep_dims": "keepdims"
},
"tf.debugging.assert_all_finite": {
"t": "x",
"msg": "message",
},
"tf.sparse.split": {
"split_dim": "axis",
},
"tf.multinomial": {
"output_dtype": "dtype",
},
"tf.random.multinomial": {
"output_dtype": "dtype",
},
"tf.nn.conv3d": {
"filter": "filters"
},
"tf.nn.conv3d_transpose": {
"value": "input",
"filter": "filters",
},
"tf.nn.convolution": {
"filter": "filters",
"dilation_rate": "dilations",
},
"tf.gfile.Exists": {
"filename": "path",
},
"tf.random.stateless_multinomial": {
"output_dtype": "dtype",
},
}
# Mapping from function to the new name of the function
self.symbol_renames = renames_v2.renames
# pylint: disable=line-too-long
# Add additional renames not in renames_v2.py here.
# IMPORTANT: For the renames in here, if you also need to add to
# function_reorders or function_keyword_renames, use the OLD function name.
# These renames happen after the arguments have been processed.
self.symbol_renames.update({
"tf.contrib.data.AUTOTUNE": "tf.data.experimental.AUTOTUNE",
"tf.contrib.data.Counter": "tf.data.experimental.Counter",
"tf.contrib.data.CheckpointInputPipelineHook": "tf.data.experimental.CheckpointInputPipelineHook",
"tf.contrib.data.CsvDataset": "tf.data.experimental.CsvDataset",
"tf.contrib.data.Optional": "tf.data.experimental.Optional",
"tf.contrib.data.RandomDataset": "tf.data.experimental.RandomDataset",
"tf.contrib.data.Reducer": "tf.data.experimental.Reducer",
"tf.contrib.data.SqlDataset": "tf.data.experimental.SqlDataset",
"tf.contrib.data.StatsAggregator": "tf.data.experimental.StatsAggregator",
"tf.contrib.data.TFRecordWriter": "tf.data.experimental.TFRecordWriter",
"tf.contrib.data.assert_element_shape": "tf.data.experimental.assert_element_shape",
"tf.contrib.data.batch_and_drop_remainder": "tf.compat.v1.contrib.data.batch_and_drop_remainder",
"tf.contrib.data.bucket_by_sequence_length": "tf.data.experimental.bucket_by_sequence_length",
"tf.contrib.data.choose_from_datasets": "tf.data.experimental.choose_from_datasets",
"tf.contrib.data.copy_to_device": "tf.data.experimental.copy_to_device",
"tf.contrib.data.dense_to_sparse_batch": "tf.data.experimental.dense_to_sparse_batch",
"tf.contrib.data.enumerate_dataset": "tf.data.experimental.enumerate_dataset",
"tf.contrib.data.get_next_as_optional": "tf.data.experimental.get_next_as_optional",
"tf.contrib.data.get_single_element": "tf.data.experimental.get_single_element",
"tf.contrib.data.group_by_reducer": "tf.data.experimental.group_by_reducer",
"tf.contrib.data.group_by_window": "tf.data.experimental.group_by_window",
"tf.contrib.data.ignore_errors": "tf.data.experimental.ignore_errors",
"tf.contrib.data.latency_stats": "tf.data.experimental.latency_stats",
"tf.contrib.data.make_batched_features_dataset": "tf.data.experimental.make_batched_features_dataset",
"tf.contrib.data.make_csv_dataset": "tf.data.experimental.make_csv_dataset",
"tf.contrib.data.make_saveable_from_iterator": "tf.data.experimental.make_saveable_from_iterator",
"tf.contrib.data.map_and_batch": "tf.data.experimental.map_and_batch",
"tf.contrib.data.padded_batch_and_drop_remainder": "tf.compat.v1.contrib.data.padded_batch_and_drop_remainder",
"tf.contrib.data.parallel_interleave": "tf.data.experimental.parallel_interleave",
"tf.contrib.data.parse_example_dataset": "tf.data.experimental.parse_example_dataset",
"tf.contrib.data.prefetch_to_device": "tf.data.experimental.prefetch_to_device",
"tf.contrib.data.read_batch_features": "tf.compat.v1.contrib.data.read_batch_features",
"tf.contrib.data.reduce_dataset": "tf.compat.v1.contrib.data.reduce_dataset",
"tf.contrib.data.rejection_resample": "tf.data.experimental.rejection_resample",
"tf.contrib.data.sample_from_datasets": "tf.data.experimental.sample_from_datasets",
"tf.contrib.data.scan": "tf.data.experimental.scan",
"tf.contrib.data.set_stats_aggregator": "tf.data.experimental.set_stats_aggregator",
"tf.contrib.data.shuffle_and_repeat": "tf.data.experimental.shuffle_and_repeat",
"tf.contrib.data.sliding_window_batch": "tf.compat.v1.contrib.data.sliding_window_batch",
"tf.contrib.data.sloppy_interleave": "tf.compat.v1.contrib.data.sloppy_interleave",
"tf.contrib.data.unbatch": "tf.data.experimental.unbatch",
"tf.contrib.data.unique": "tf.data.experimental.unique",
"tf.quantize_v2": "tf.quantization.quantize",
"tf.sparse_concat": "tf.sparse.concat",
"tf.sparse_split": "tf.sparse.split",
"tf.multinomial": "tf.random.categorical",
"tf.random.multinomial": "tf.random.categorical",
"tf.load_file_system_library": "tf.load_library",
})
# pylint: enable=line-too-long
# For custom behavior and if auto-generate rename in renames_v2.py
# is incorrect, add the op name here to exclude it from renames_v2.py.
excluded_renames = [
]
# Variables that should be changed to functions.
self.change_to_function = {}
# Functions that were reordered should be changed to the new keyword args
# for safety, if positional arguments are used. If you have reversed the
# positional arguments yourself, this could do the wrong thing.
# IMPORTANT: order here should correspond to OLD argument order.
# We just prepend "arg_name=" to all arguments in function calls.
self.function_reorders = {
"tf.argmax": ["input", "axis", "name", "dimension", "output_type"],
"tf.argmin": ["input", "axis", "name", "dimension", "output_type"],
"tf.boolean_mask": ["tensor", "mask", "name", "axis"],
"tf.convert_to_tensor": ["value", "dtype", "name", "preferred_dtype"],
"tf.nn.convolution": [
"input", "filter", "padding", "strides", "dilation_rate", "name",
"data_format"],
"tf.nn.crelu": ["features", "name", "axis"],
"tf.nn.pool": [
"input", "window_shape", "pooling_type", "padding", "dilation_rate",
"strides", "name", "data_format"
],
"tf.nn.depthwise_conv2d": [
"input", "filter", "strides", "padding", "rate", "name",
"data_format"
],
"tf.multinomial": [
"logits", "num_samples", "seed", "name", "output_dtype"
],
"tf.random.multinomial": [
"logits", "num_samples", "seed", "name", "output_dtype"
],
"tf.pad": ["tensor", "paddings", "mode", "name", "constant_values"],
"tf.quantize_v2": [
"input", "min_range", "max_range", "T", "mode", "name",
"round_mode"
],
"tf.shape": ["input", "name", "out_type"],
"tf.size": ["input", "name", "out_type"],
"tf.sparse.concat": [
"axis", "sp_inputs", "name", "expand_nonconcat_dim", "concat_dim"
],
"tf.random.poisson": ["lam", "shape", "dtype", "seed", "name"],
"tf.sparse.segment_mean": [
"data", "indices", "segment_ids", "name", "num_segments"
],
"tf.sparse.segment_sqrt_n": [
"data", "indices", "segment_ids", "name", "num_segments"
],
"tf.sparse.segment_sum": [
"data", "indices", "segment_ids", "name", "num_segments"
],
"tf.strings.length": ["input", "name", "unit"],
}
# Specially handled functions.
self.function_handle = {}
decay_function_comment = (
"ERROR: <function name> has been changed to return a callable instead "
"of a tensor when graph building, but its functionality remains "
"unchanged during eager execution (returns a callable like "
"before). The converter cannot detect and fix this reliably, so "
"you need to inspect this usage manually.\n"
)
# TODO(b/118888586): add default value change to update script.
default_loss_reduction_changed = (
"WARNING: default value of loss_reduction has been changed to "
"SUM_OVER_BATCH_SIZE.\n"
)
assert_return_type_comment = (
"WARNING: assert_* functions have been changed to return None, the "
"data argument has been removed, and arguments have been reordered."
)
assert_rank_comment = (
"WARNING: assert_rank_* functions have been changed to return None, and"
" the data and summarize arguments have been removed."
)
# Function warnings. <function name> placeholder inside warnings will be
# replaced by function name.
self.function_warnings = {
"tf.assert_greater": assert_return_type_comment,
"tf.assert_equal": assert_return_type_comment,
"tf.assert_less": assert_return_type_comment,
"tf.assert_rank": assert_rank_comment,
"tf.debugging.assert_equal": assert_return_type_comment,
"tf.debugging.assert_greater": assert_return_type_comment,
"tf.debugging.assert_greater_equal": assert_return_type_comment,
"tf.debugging.assert_integer": assert_return_type_comment,
"tf.debugging.assert_less": assert_return_type_comment,
"tf.debugging.assert_less_equal": assert_return_type_comment,
"tf.debugging.assert_near": assert_return_type_comment,
"tf.debugging.assert_negative": assert_return_type_comment,
"tf.debugging.assert_non_negative": assert_return_type_comment,
"tf.debugging.assert_non_positive": assert_return_type_comment,
"tf.debugging.assert_none_equal": assert_return_type_comment,
"tf.debugging.assert_positive": assert_return_type_comment,
"tf.debugging.assert_rank": assert_rank_comment,
"tf.debugging.assert_rank_at_least": assert_rank_comment,
"tf.debugging.assert_rank_in": assert_rank_comment,
"tf.train.exponential_decay":
decay_function_comment,
"tf.train.piecewise_constant":
decay_function_comment,
"tf.train.polynomial_decay":
decay_function_comment,
"tf.train.natural_exp_decay":
decay_function_comment,
"tf.train.inverse_time_decay":
decay_function_comment,
"tf.train.cosine_decay":
decay_function_comment,
"tf.train.cosine_decay_restarts":
decay_function_comment,
"tf.train.linear_cosine_decay":
decay_function_comment,
"tf.train.noisy_linear_cosine_decay":
decay_function_comment,
"tf.estimator.LinearClassifier":
default_loss_reduction_changed,
"tf.estimator.LinearRegressor":
default_loss_reduction_changed,
"tf.estimator.DNNLinearCombinedClassifier":
default_loss_reduction_changed,
"tf.estimator.DNNLinearCombinedRegressor":
default_loss_reduction_changed,
"tf.estimator.DNNRegressor":
default_loss_reduction_changed,
"tf.estimator.DNNClassifier":
default_loss_reduction_changed,
"tf.estimator.BaselineClassifier":
default_loss_reduction_changed,
"tf.estimator.BaselineRegressor":
default_loss_reduction_changed,
"tf.nn.conv1d":
"WARNING: use_cudnn_on_gpu argument has been removed and \"value\" was "
"renamed to \"input\"",
"tf.nn.conv2d":
"WARNING: use_cudnn_on_gpu argument has been removed and \"filter\" "
"was renamed to \"filters\"",
"tf.nn.conv2d_backprop_filter":
"WARNING: use_cudnn_on_gpu argument has been removed",
"tf.nn.conv2d_backprop_input":
"WARNING: use_cudnn_on_gpu argument has been removed and \"filter\" "
"was renamed to \"filters\"",
}
# Right now we can't have both a rename and a warning.
self.symbol_renames = {
name: new_name
for name, new_name in self.symbol_renames.items()
if name not in self.function_warnings and name not in excluded_renames
}
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Convert a TensorFlow Python file to 2.0
Simple usage:
tf_convert_v2.py --infile foo.py --outfile bar.py
tf_convert_v2.py --intree ~/code/old --outtree ~/code/new
""")
parser.add_argument(
"--infile",
dest="input_file",
help="If converting a single file, the name of the file "
"to convert")
parser.add_argument(
"--outfile",
dest="output_file",
help="If converting a single file, the output filename.")
parser.add_argument(
"--intree",
dest="input_tree",
help="If converting a whole tree of files, the directory "
"to read from (relative or absolute).")
parser.add_argument(
"--outtree",
dest="output_tree",
help="If converting a whole tree of files, the output "
"directory (relative or absolute).")
parser.add_argument(
"--copyotherfiles",
dest="copy_other_files",
help=("If converting a whole tree of files, whether to "
"copy the other files."),
type=bool,
default=False)
parser.add_argument(
"--reportfile",
dest="report_filename",
help=("The name of the file where the report log is "
"stored."
"(default: %(default)s)"),
default="report.txt")
args = parser.parse_args()
upgrade = ast_edits.ASTCodeUpgrader(TFAPIChangeSpec())
report_text = None
report_filename = args.report_filename
files_processed = 0
if args.input_file:
if not args.output_file:
raise ValueError(
"--outfile=<output file> argument is required when converting a "
"single file.")
files_processed, report_text, errors = upgrade.process_file(
args.input_file, args.output_file)
files_processed = 1
elif args.input_tree:
if not args.output_tree:
raise ValueError(
"--outtree=<output directory> argument is required when converting a "
"file tree.")
files_processed, report_text, errors = upgrade.process_tree(
args.input_tree, args.output_tree, args.copy_other_files)
else:
parser.print_help()
if report_text:
open(report_filename, "w").write(report_text)
print("TensorFlow 2.0 Upgrade Script")
print("-----------------------------")
print("Converted %d files\n" % files_processed)
print("Detected %d errors that require attention" % len(errors))
print("-" * 80)
print("\n".join(errors))
print("\nMake sure to read the detailed log %r\n" % report_filename)
|
import { v4 as uuid } from "uuid";
import {
UPDATE_AUTH,
SET_EXPIRED_SESSION,
REMOVE_AUTH,
UPDATE_ANILIST,
} from "../actions";
// make UUIDs noticeable
const genUUID = () => {
let id = uuid();
const parts = id.split("-");
parts[1] = "NANI";
return parts.join("-");
};
const initialState = {
token: "",
expires: 8640000000000000,
username: "",
guest: true,
premium: false,
anilist: { username: "", token: "" },
expiredSession: "",
uuid: genUUID(),
};
export default function Auth(state = initialState, action) {
switch (action.type) {
case UPDATE_AUTH:
return {
...state,
...action.payload,
};
case UPDATE_ANILIST:
return {
...state,
anilist: action.payload,
};
case REMOVE_AUTH:
return {
...initialState,
expiredSession: state.expiredSession,
uuid: state.uuid,
anilist: state.anilist,
};
case SET_EXPIRED_SESSION:
return {
...state,
expiredSession: action.payload,
};
default:
return state;
}
}
|
# coding: utf-8
"""
anchore_engine.services.policy_engine
This is a policy evaluation service. It receives push-events from external systems for data updates and provides an api for requesting image policy checks
OpenAPI spec version: 1.0.0
Contact: zach@anchore.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class FeedUpdateNotification(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'event_timestamp': 'datetime',
'feed_name': 'str',
'feed_group': 'str',
'data': 'list[object]'
}
attribute_map = {
'event_timestamp': 'event_timestamp',
'feed_name': 'feed_name',
'feed_group': 'feed_group',
'data': 'data'
}
def __init__(self, event_timestamp=None, feed_name=None, feed_group=None, data=None):
"""
FeedUpdateNotification - a model defined in Swagger
"""
self._event_timestamp = None
self._feed_name = None
self._feed_group = None
self._data = None
if event_timestamp is not None:
self.event_timestamp = event_timestamp
if feed_name is not None:
self.feed_name = feed_name
if feed_group is not None:
self.feed_group = feed_group
if data is not None:
self.data = data
@property
def event_timestamp(self):
"""
Gets the event_timestamp of this FeedUpdateNotification.
The time of the external event. Should be set to when the event occurred, to the delivery time
:return: The event_timestamp of this FeedUpdateNotification.
:rtype: datetime
"""
return self._event_timestamp
@event_timestamp.setter
def event_timestamp(self, event_timestamp):
"""
Sets the event_timestamp of this FeedUpdateNotification.
The time of the external event. Should be set to when the event occurred, to the delivery time
:param event_timestamp: The event_timestamp of this FeedUpdateNotification.
:type: datetime
"""
self._event_timestamp = event_timestamp
@property
def feed_name(self):
"""
Gets the feed_name of this FeedUpdateNotification.
:return: The feed_name of this FeedUpdateNotification.
:rtype: str
"""
return self._feed_name
@feed_name.setter
def feed_name(self, feed_name):
"""
Sets the feed_name of this FeedUpdateNotification.
:param feed_name: The feed_name of this FeedUpdateNotification.
:type: str
"""
self._feed_name = feed_name
@property
def feed_group(self):
"""
Gets the feed_group of this FeedUpdateNotification.
:return: The feed_group of this FeedUpdateNotification.
:rtype: str
"""
return self._feed_group
@feed_group.setter
def feed_group(self, feed_group):
"""
Sets the feed_group of this FeedUpdateNotification.
:param feed_group: The feed_group of this FeedUpdateNotification.
:type: str
"""
self._feed_group = feed_group
@property
def data(self):
"""
Gets the data of this FeedUpdateNotification.
:return: The data of this FeedUpdateNotification.
:rtype: list[object]
"""
return self._data
@data.setter
def data(self, data):
"""
Sets the data of this FeedUpdateNotification.
:param data: The data of this FeedUpdateNotification.
:type: list[object]
"""
self._data = data
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, FeedUpdateNotification):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
/**
* Add collapseable boxes to our editor screens.
*/
postboxes.add_postbox_toggles(pagenow);
/**
* The rest of our customizations.
*/
(function($) {
if ('edit' === getParameterByName('action')) {
// Store our original slug on page load for edit checking.
var original_slug = $('#name').val();
}
// Switch to newly selected post type or taxonomy automatically.
$('#post_type').on('change',function(){
$('#cptui_select_post_type').submit();
});
$('#taxonomy').on('change',function(){
$( '#cptui_select_taxonomy' ).submit();
});
// Confirm our deletions
$('#cpt_submit_delete').on('click',function() {
if ( confirm( cptui_type_data.confirm ) ) {
return true;
}
return false;
});
// Toggles help/support accordions.
$('#support .question').each(function() {
var tis = $(this), state = false, answer = tis.next('div').slideUp();
tis.on('click keydown',function(e) {
// Helps with accessibility and keyboard navigation.
if(e.type==='keydown' && e.keyCode!==32 && e.keyCode!==13) {
return;
}
e.preventDefault();
state = !state;
answer.slideToggle(state);
tis.toggleClass('active',state);
tis.attr('aria-expanded', state.toString() );
tis.focus();
});
});
// Switch spaces for underscores on our slug fields.
$('#name').on('keyup',function(e){
var value, original_value;
value = original_value = $(this).val();
if ( e.keyCode !== 9 && e.keyCode !== 37 && e.keyCode !== 38 && e.keyCode !== 39 && e.keyCode !== 40 ) {
value = value.replace(/ /g, "_");
value = value.toLowerCase();
value = replaceDiacritics(value);
value = transliterate(value);
value = replaceSpecialCharacters(value);
if ( value !== original_value ) {
$(this).attr('value', value);
}
}
//Displays a message if slug changes.
if(undefined != original_slug) {
var $slugchanged = $('#slugchanged');
if(value != original_slug) {
$slugchanged.removeClass('hidemessage');
} else {
$slugchanged.addClass('hidemessage');
}
}
});
// Replace diacritic characters with latin characters.
function replaceDiacritics(s) {
var diacritics = [
/[\300-\306]/g, /[\340-\346]/g, // A, a
/[\310-\313]/g, /[\350-\353]/g, // E, e
/[\314-\317]/g, /[\354-\357]/g, // I, i
/[\322-\330]/g, /[\362-\370]/g, // O, o
/[\331-\334]/g, /[\371-\374]/g, // U, u
/[\321]/g, /[\361]/g, // N, n
/[\307]/g, /[\347]/g // C, c
];
var chars = ['A', 'a', 'E', 'e', 'I', 'i', 'O', 'o', 'U', 'u', 'N', 'n', 'C', 'c'];
for (var i = 0; i < diacritics.length; i++) {
s = s.replace(diacritics[i], chars[i]);
}
return s;
}
function replaceSpecialCharacters(s) {
s = s.replace(/[^a-z0-9\s]/gi, '_');
return s;
}
var cyrillic = {
"Ё": "YO", "Й": "I", "Ц": "TS", "У": "U", "К": "K", "Е": "E", "Н": "N", "Г": "G", "Ш": "SH", "Щ": "SCH", "З": "Z", "Х": "H", "Ъ": "'", "ё": "yo", "й": "i", "ц": "ts", "у": "u", "к": "k", "е": "e", "н": "n", "г": "g", "ш": "sh", "щ": "sch", "з": "z", "х": "h", "ъ": "'", "Ф": "F", "Ы": "I", "В": "V", "А": "a", "П": "P", "Р": "R", "О": "O", "Л": "L", "Д": "D", "Ж": "ZH", "Э": "E", "ф": "f", "ы": "i", "в": "v", "а": "a", "п": "p", "р": "r", "о": "o", "л": "l", "д": "d", "ж": "zh", "э": "e", "Я": "Ya", "Ч": "CH", "С": "S", "М": "M", "И": "I", "Т": "T", "Ь": "'", "Б": "B", "Ю": "YU", "я": "ya", "ч": "ch", "с": "s", "м": "m", "и": "i", "т": "t", "ь": "'", "б": "b", "ю": "yu"
};
function transliterate(word) {
return word.split('').map(function (char) {
return cyrillic[char] || char;
}).join("");
}
if ( undefined != wp.media ) {
var _custom_media = true,
_orig_send_attachment = wp.media.editor.send.attachment;
}
function getParameterByName(name, url) {
if (!url) url = window.location.href;
name = name.replace(/[\[\]]/g, "\\$&");
var regex = new RegExp("[?&]" + name + "(=([^&#]*)|&|#|$)"),
results = regex.exec(url);
if (!results) return null;
if (!results[2]) return '';
return decodeURIComponent(results[2].replace(/\+/g, " "));
}
$('#cptui_choose_icon').on('click',function(e){
e.preventDefault();
var button = $(this);
var id = jQuery('#menu_icon').attr('id');
_custom_media = true;
wp.media.editor.send.attachment = function (props, attachment) {
if (_custom_media) {
$("#" + id).val(attachment.url);
} else {
return _orig_send_attachment.apply(this, [props, attachment]);
}
};
wp.media.editor.open(button);
return false;
});
$('#togglelabels').on('click',function(e){
e.preventDefault();
$('#labels_expand').toggleClass('toggledclosed');
});
$('#togglesettings').on('click',function(e) {
e.preventDefault();
$('#settings_expand').toggleClass('toggledclosed');
});
$('#labels_expand,#settings_expand').on('focus',function(e) {
if ( $(this).hasClass('toggledclosed') ) {
$(this).toggleClass('toggledclosed');
}
});
$('#labels_expand legend,#settings_expand legend').on('click',function(e){
$(this).parent().toggleClass('toggledclosed');
});
$('.cptui-help').on('click',function(e){
e.preventDefault();
});
$('.cptui-taxonomy-submit').on('click',function(e){
if ( $('.cptui-table :checkbox:checked').length == 0 ) {
e.preventDefault();
alert( cptui_tax_data.no_associated_type );
}
});
})(jQuery);
|
"""客户端上传客户端号和分数(注意:并不会上传排名,客户端无法上传排名),同一个客户端可以多次上传分数,取最新的一次分数"""
def uploading():
"""客户端上传客户编号和分数"""
client_no = input("请输入您的客户编号:")
score = int(input("请输入您的分数:"))
return client_no, score
uploading()
# 创建客户表,存储客户编号,分数和排名
class Client(BaseModel):
|
# Copyright (c) 2020, Michael Boyle
# See LICENSE file for details:
# <https://github.com/moble/quaternionic/blob/master/LICENSE>
"""Essential functions for quaternion algebra.
These functions form the basic algebraic behavior of quaternions — addition,
multiplication, exp, log, etc.
Each function takes one or two array-like inputs — depending on whether it is
unary or binary — as the first parameter or two, and another array-like object
for output as the final parameter. Even for functions that return a single
float or bool, the output must be array-like so that it can be modified inside
the function. These functions are JIT-compiled by numba's `guvectorize`
function, meaning that they can also act on arbitrary arrays just like standard
numpy arrays, as long as the final dimension of any quaternion-valued input has
size 4 to represent the components of the quaternion.
These functions are generic, meaning that they can be used without the
`quaternionic.array` object. However, these functions are implemented as the
"ufunc"s of those arrays, meaning that we can perform algebra directly on them
in natural ways — as in `q1+q2`, `q1*q2`, etc. — and using the standard numpy
functions — as in `np.exp(q)`, `np.log(q)`, etc.
For this purpose, we implement as many of [the standard
ufuncs](https://numpy.org/doc/stable/reference/ufuncs.html) as make sense for
quaternions. For the most part, this means ignoring operations related to
integers, remainders, ordering, or trigonometric functions. The bit-twiddling
functions are re-interpreted as they usually are in geometric algebra to denote
geometric operations.
All functions in this module are magically compiled to ufuncs in `__init__.py`
and placed attached to the `algebra_ufuncs` object.
"""
import numpy as np
from . import float64, boolean
from .utilities import attach_typelist_and_signature
_quaternion_resolution = 10 * np.finfo(float).resolution
@attach_typelist_and_signature([(float64[:], float64[:], float64[:])], '(n),(n)->(n)')
def add(q1, q2, qout):
"""Add two quaternions q1+q2"""
qout[0] = q1[0] + q2[0]
qout[1] = q1[1] + q2[1]
qout[2] = q1[2] + q2[2]
qout[3] = q1[3] + q2[3]
@attach_typelist_and_signature([(float64[:], float64[:], float64[:])], '(n),(n)->(n)')
def subtract(q1, q2, qout):
"""Subtract quaternion q1-q2"""
qout[0] = q1[0] - q2[0]
qout[1] = q1[1] - q2[1]
qout[2] = q1[2] - q2[2]
qout[3] = q1[3] - q2[3]
@attach_typelist_and_signature([(float64[:], float64[:], float64[:])], '(n),(n)->(n)')
def multiply(q1, q2, qout):
"""Multiply quaternions q1*q2"""
a = q1[0]*q2[0] - q1[1]*q2[1] - q1[2]*q2[2] - q1[3]*q2[3]
b = q1[0]*q2[1] + q1[1]*q2[0] + q1[2]*q2[3] - q1[3]*q2[2]
c = q1[0]*q2[2] - q1[1]*q2[3] + q1[2]*q2[0] + q1[3]*q2[1]
d = q1[0]*q2[3] + q1[1]*q2[2] - q1[2]*q2[1] + q1[3]*q2[0]
qout[0] = a
qout[1] = b
qout[2] = c
qout[3] = d
@attach_typelist_and_signature([(float64[:], float64[:], float64[:])], '(n),(n)->(n)')
def divide(q1, q2, qout):
"""Divide quaternions q1/q2 = q1 * q2.inverse"""
q2norm = q2[0]**2 + q2[1]**2 + q2[2]**2 + q2[3]**2
a = (+q1[0]*q2[0] + q1[1]*q2[1] + q1[2]*q2[2] + q1[3]*q2[3]) / q2norm
b = (-q1[0]*q2[1] + q1[1]*q2[0] - q1[2]*q2[3] + q1[3]*q2[2]) / q2norm
c = (-q1[0]*q2[2] + q1[1]*q2[3] + q1[2]*q2[0] - q1[3]*q2[1]) / q2norm
d = (-q1[0]*q2[3] - q1[1]*q2[2] + q1[2]*q2[1] + q1[3]*q2[0]) / q2norm
qout[0] = a
qout[1] = b
qout[2] = c
qout[3] = d
true_divide = divide
@attach_typelist_and_signature([(float64, float64[:], float64[:])], '(),(n)->(n)')
def multiply_scalar(s, q, qout):
"""Multiply scalar by quaternion s*q"""
qout[0] = s * q[0]
qout[1] = s * q[1]
qout[2] = s * q[2]
qout[3] = s * q[3]
@attach_typelist_and_signature([(float64, float64[:], float64[:])], '(),(n)->(n)')
def divide_scalar(s, q, qout):
"""Divide scalar by quaternion s/q = s * q.inverse"""
qnorm = q[0]**2 + q[1]**2 + q[2]**2 + q[3]**2
qout[0] = s * q[0] / qnorm
qout[1] = -s * q[1] / qnorm
qout[2] = -s * q[2] / qnorm
qout[3] = -s * q[3] / qnorm
true_divide_scalar = divide_scalar
@attach_typelist_and_signature([(float64[:], float64, float64[:])], '(n),()->(n)')
def scalar_multiply(q, s, qout):
"""Multiply quaternion by scalar q*s"""
qout[0] = q[0] * s
qout[1] = q[1] * s
qout[2] = q[2] * s
qout[3] = q[3] * s
@attach_typelist_and_signature([(float64[:], float64, float64[:])], '(n),()->(n)')
def scalar_divide(q, s, qout):
"""Divide quaternion by scalar q/s"""
qout[0] = q[0] / s
qout[1] = q[1] / s
qout[2] = q[2] / s
qout[3] = q[3] / s
scalar_true_divide = scalar_divide
@attach_typelist_and_signature([(float64[:], float64[:])], '(n)->(n)')
def negative(q, qout):
"""Return negative quaternion -q"""
qout[0] = -q[0]
qout[1] = -q[1]
qout[2] = -q[2]
qout[3] = -q[3]
@attach_typelist_and_signature([(float64[:], float64[:])], '(n)->(n)')
def positive(q, qout):
"""Return input quaternion q"""
qout[0] = q[0]
qout[1] = q[1]
qout[2] = q[2]
qout[3] = q[3]
@attach_typelist_and_signature([(float64[:], float64, float64[:])], '(n),()->(n)')
def float_power(q, s, qout):
"""Raise quaternion to scalar power exp(log(q)*s)"""
b = np.sqrt(q[1]**2 + q[2]**2 + q[3]**2)
if np.abs(b) <= _quaternion_resolution * np.abs(q[0]):
if q[0] < 0.0:
if np.abs(q[0] + 1) > _quaternion_resolution:
qout[0] = np.log(-q[0])
qout[1] = np.pi
qout[2] = 0.0
qout[3] = 0.0
else:
qout[0] = 0.0
qout[1] = np.pi
qout[2] = 0.0
qout[3] = 0.0
else:
qout[0] = np.log(q[0])
qout[1] = 0.0
qout[2] = 0.0
qout[3] = 0.0
else:
v = np.arctan2(b, q[0])
f = v / b
qout[0] = np.log(q[0] * q[0] + b * b) / 2.0
qout[1] = f * q[1]
qout[2] = f * q[2]
qout[3] = f * q[3]
qout *= s
vnorm = np.sqrt(qout[1]**2 + qout[2]**2 + qout[3]**2)
if vnorm > _quaternion_resolution:
e = np.exp(qout[0])
qout[0] = e * np.cos(vnorm)
qout[1:] *= e * np.sin(vnorm) / vnorm
else:
qout[0] = np.exp(qout[0])
qout[1] = 0.0
qout[2] = 0.0
qout[3] = 0.0
@attach_typelist_and_signature([(float64[:], float64[:])], '(n)->()')
def absolute(q, qout):
"""Return absolute value of quaternion |q|"""
qout[0] = np.sqrt(q[0]**2 + q[1]**2 + q[2]**2 + q[3]**2)
@attach_typelist_and_signature([(float64[:], float64[:])], '(n)->(n)')
def conj(q, qout):
"""Return quaternion-conjugate of quaternion q̄"""
qout[0] = +q[0]
qout[1] = -q[1]
qout[2] = -q[2]
qout[3] = -q[3]
conjugate = conj
@attach_typelist_and_signature([(float64[:], float64[:])], '(n)->(n)')
def exp(q, qout):
"""Return exponential of input quaternion exp(q)"""
vnorm = np.sqrt(q[1]**2 + q[2]**2 + q[3]**2)
if vnorm > _quaternion_resolution:
s = np.sin(vnorm) / vnorm
e = np.exp(q[0])
qout[0] = e * np.cos(vnorm)
qout[1] = e * s * q[1]
qout[2] = e * s * q[2]
qout[3] = e * s * q[3]
else:
qout[0] = np.exp(q[0])
qout[1] = 0.0
qout[2] = 0.0
qout[3] = 0.0
@attach_typelist_and_signature([(float64[:], float64[:])], '(n)->(n)')
def log(q, qout):
"""Return logarithm of input quaternion log(q)"""
b = np.sqrt(q[1]**2 + q[2]**2 + q[3]**2)
if b <= _quaternion_resolution * np.abs(q[0]):
if q[0] < 0.0:
if np.abs(q[0] + 1) > _quaternion_resolution:
qout[0] = np.log(-q[0])
qout[1] = np.pi
qout[2] = 0.0
qout[3] = 0.0
else:
qout[0] = 0.0
qout[1] = np.pi
qout[2] = 0.0
qout[3] = 0.0
else:
qout[0] = np.log(q[0])
qout[1] = 0.0
qout[2] = 0.0
qout[3] = 0.0
else:
v = np.arctan2(b, q[0])
f = v / b
qout[0] = np.log(q[0] * q[0] + b * b) / 2.0
qout[1] = f * q[1]
qout[2] = f * q[2]
qout[3] = f * q[3]
@attach_typelist_and_signature([(float64[:], float64[:])], '(n)->(n)')
def sqrt(q, qout):
"""Return square-root of input quaternion √q.
The general formula whenever the denominator is nonzero is
```
√q = (|q| + q) / √(2|q| + 2q.w)
```
This can be proven by expanding `q` as `q.w + q.vec` and multiplying the
expression above by itself.
When the denominator is zero, the quaternion is a pure-real negative number.
It is not clear what the appropriate square-root is in this case (because the
quaternions come with infinitely many elements that square to -1), so we
arbitrarily choose the result to be proportional to the `x` quaternion.
"""
# √Q = (a + Q) / √(2*a + 2*Q[0])
a = np.sqrt(q[0]**2 + q[1]**2 + q[2]**2 + q[3]**2)
if np.abs(a + q[0]) < _quaternion_resolution * a:
qout[0] = 0.0
qout[1] = np.sqrt(a)
qout[2] = 0.0
qout[3] = 0.0
else:
c = np.sqrt(0.5 / (a + q[0]))
qout[0] = (a + q[0]) * c
qout[1] = q[1] * c
qout[2] = q[2] * c
qout[3] = q[3] * c
@attach_typelist_and_signature([(float64[:], float64[:])], '(n)->(n)')
def square(q, qout):
"""Return square of quaternion q*q"""
a = q[0]**2 - q[1]**2 - q[2]**2 - q[3]**2
b = 2*q[0]*q[1]
c = 2*q[0]*q[2]
d = 2*q[0]*q[3]
qout[0] = a
qout[1] = b
qout[2] = c
qout[3] = d
@attach_typelist_and_signature([(float64[:], float64[:])], '(n)->(n)')
def reciprocal(q, qout):
"""Return reciprocal (inverse) of quaternion q.inverse"""
norm = q[0]**2 + q[1]**2 + q[2]**2 + q[3]**2
qout[0] = q[0] / norm
qout[1] = -q[1] / norm
qout[2] = -q[2] / norm
qout[3] = -q[3] / norm
@attach_typelist_and_signature([(float64[:], float64[:], float64[:])], '(n),(n)->(n)')
def bitwise_or(q1, q2, qout):
"""Return scalar product of quaternions q1|q2.
If we denote by `⟨a⟩ₛ` the grade-s component of the general multivector `a`,
we can express this product as
```
a | b = Σₛ,ᵣ ⟨⟨a⟩ₛ ⟨b⟩ᵣ⟩₀
```
Note that this is different from the "Hestenes dot" product where the sum
runs over s≠0 and r≠0; that is the product returned by `galgebra` using
this operator.
"""
qout[0] = q1[0]*q2[0] - q1[1]*q2[1] - q1[2]*q2[2] - q1[3]*q2[3]
qout[1] = 0.0
qout[2] = 0.0
qout[3] = 0.0
@attach_typelist_and_signature([(float64[:], float64[:], float64[:])], '(n),(n)->(n)')
def bitwise_xor(q1, q2, qout):
"""Return outer product of quaternions q1^q2.
This is the generalized outer product of geometric algebra. If we denote
by `⟨a⟩ₛ` the grade-s component of the general multivector `a`, we can
express this product as
```
a ^ b = Σₛ,ᵣ ⟨⟨a⟩ₛ ⟨b⟩ᵣ⟩ₛ₊ᵣ
```
Note that the result may seem surprising because we sometimes think of quaternions as
"""
a = q1[0]*q2[0]
b = q1[0]*q2[1] + q1[1]*q2[0]
c = q1[0]*q2[2] + q1[2]*q2[0]
d = q1[0]*q2[3] + q1[3]*q2[0]
qout[0] = a
qout[1] = b
qout[2] = c
qout[3] = d
invert = conj # reversion (= conjugate for quaternion algebra)
@attach_typelist_and_signature([(float64[:], float64[:], float64[:])], '(n),(n)->(n)')
def left_shift(q1, q2, qout):
"""Return left-contraction of quaternions q1<<q2 = q1⌋q1.
For all quaternions `a`, `b`, `c`, we have
```
(a ^ b) * c = a * (b ⌋ c)
```
"""
a = q1[0]*q2[0] - q1[1]*q2[1] - q1[2]*q2[2] - q1[3]*q2[3]
b = q1[0]*q2[1]
c = q1[0]*q2[2]
d = q1[0]*q2[3]
qout[0] = a
qout[1] = b
qout[2] = c
qout[3] = d
@attach_typelist_and_signature([(float64[:], float64[:], float64[:])], '(n),(n)->(n)')
def right_shift(q1, q2, qout):
"""Return right-contraction of quaternions q1>>q2 = q1⌊q2.
For all quaternions `a`, `b`, `c`, we have
```
c * (b ^ a) = (c ⌊ b) * a
```
"""
a = q1[0]*q2[0] - q1[1]*q2[1] - q1[2]*q2[2] - q1[3]*q2[3]
b = q1[1]*q2[0]
c = q1[2]*q2[0]
d = q1[3]*q2[0]
qout[0] = a
qout[1] = b
qout[2] = c
qout[3] = d
@attach_typelist_and_signature([(float64[:], float64[:], boolean[:])], '(n),(n)->()')
def not_equal(q1, q2, bout):
bout[0] = np.any(q1[:] != q2[:])
@attach_typelist_and_signature([(float64[:], float64[:], boolean[:])], '(n),(n)->()')
def equal(q1, q2, bout):
bout[0] = np.all(q1[:] == q2[:])
@attach_typelist_and_signature([(float64[:], float64[:], boolean[:])], '(n),(n)->()')
def logical_and(q1, q2, bout):
bout[0] = np.any(q1[:]) and np.any(q2[:])
@attach_typelist_and_signature([(float64[:], float64[:], boolean[:])], '(n),(n)->()')
def logical_or(q1, q2, bout):
bout[0] = np.any(q1[:]) or np.any(q2[:])
@attach_typelist_and_signature([(float64[:], boolean[:])], '(n)->()')
def isfinite(qin, bout):
bout[0] = np.isfinite(qin[0]) and np.isfinite(qin[1]) and np.isfinite(qin[2]) and np.isfinite(qin[3])
@attach_typelist_and_signature([(float64[:], boolean[:])], '(n)->()')
def isinf(qin, bout):
bout[0] = np.isinf(qin[0]) or np.isinf(qin[1]) or np.isinf(qin[2]) or np.isinf(qin[3])
@attach_typelist_and_signature([(float64[:], boolean[:])], '(n)->()')
def isnan(qin, bout):
bout[0] = np.isnan(qin[0]) or np.isnan(qin[1]) or np.isnan(qin[2]) or np.isnan(qin[3])
|
/* eslint import/no-extraneous-dependencies: 0, no-console: 0 */
import express from 'express';
import webpack from 'webpack';
const { webpackPort, webpackHost } = require('../config/env');
const webpackConfig = require('../config/webpack-dev.config');
const compiler = webpack(webpackConfig);
const serverOptions = {
contentBase: `http://${webpackHost}:${webpackPort}`,
quiet: true,
noInfo: true,
hot: true,
inline: true,
lazy: false,
publicPath: webpackConfig.output.publicPath,
headers: { 'Access-Control-Allow-Origin': '*' },
stats: { colors: true },
watchOptions: {
aggregateTimeout: 300,
poll: true
}
};
const app = express();
app.use(require('webpack-dev-middleware')(compiler, serverOptions));
app.use(require('webpack-hot-middleware')(compiler));
app.listen(webpackPort, (err) => {
if (err) {
console.error(err);
} else {
console.info(`Webpack development server listening on port ${webpackPort}`);
}
});
|
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2012 The Bitcoin developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef __cplusplus
# error This header can only be compiled as C++.
#endif
#ifndef __INCLUDED_PROTOCOL_H__
#define __INCLUDED_PROTOCOL_H__
#include "serialize.h"
#include "netbase.h"
#include <string>
#include "uint256.h"
extern bool fTestNet;
static inline unsigned short GetDefaultPort(const bool testnet = fTestNet)
{
return testnet ? 38696 : 18696;
}
extern unsigned char pchMessageStart[4];
/** Message header.
* (4) message start.
* (12) command.
* (4) size.
* (4) checksum.
*/
class CMessageHeader
{
public:
CMessageHeader();
CMessageHeader(const char* pszCommand, unsigned int nMessageSizeIn);
std::string GetCommand() const;
bool IsValid() const;
IMPLEMENT_SERIALIZE
(
READWRITE(FLATDATA(pchMessageStart));
READWRITE(FLATDATA(pchCommand));
READWRITE(nMessageSize);
READWRITE(nChecksum);
)
// TODO: make private (improves encapsulation)
public:
enum {
MESSAGE_START_SIZE=sizeof(::pchMessageStart),
COMMAND_SIZE=12,
MESSAGE_SIZE_SIZE=sizeof(int),
CHECKSUM_SIZE=sizeof(int),
MESSAGE_SIZE_OFFSET=MESSAGE_START_SIZE+COMMAND_SIZE,
CHECKSUM_OFFSET=MESSAGE_SIZE_OFFSET+MESSAGE_SIZE_SIZE,
HEADER_SIZE=MESSAGE_START_SIZE+COMMAND_SIZE+MESSAGE_SIZE_SIZE+CHECKSUM_SIZE
};
char pchMessageStart[MESSAGE_START_SIZE];
char pchCommand[COMMAND_SIZE];
unsigned int nMessageSize;
unsigned int nChecksum;
};
/** nServices flags */
enum
{
NODE_NETWORK = (1 << 0),
NODE_BLOOM = (1 << 1),
};
/** A CService with information about it as peer */
class CAddress : public CService
{
public:
CAddress();
explicit CAddress(CService ipIn, uint64 nServicesIn=NODE_NETWORK);
void Init();
IMPLEMENT_SERIALIZE
(
CAddress* pthis = const_cast<CAddress*>(this);
CService* pip = (CService*)pthis;
if (fRead)
pthis->Init();
if (nType & SER_DISK)
READWRITE(nVersion);
if ((nType & SER_DISK) ||
(nVersion >= CADDR_TIME_VERSION && !(nType & SER_GETHASH)))
READWRITE(nTime);
READWRITE(nServices);
READWRITE(*pip);
)
void print() const;
// TODO: make private (improves encapsulation)
public:
uint64 nServices;
// disk and network only
unsigned int nTime;
// memory only
int64 nLastTry;
};
/** inv message data */
class CInv
{
public:
CInv();
CInv(int typeIn, const uint256& hashIn);
CInv(const std::string& strType, const uint256& hashIn);
IMPLEMENT_SERIALIZE
(
READWRITE(type);
READWRITE(hash);
)
friend bool operator<(const CInv& a, const CInv& b);
bool IsKnownType() const;
const char* GetCommand() const;
std::string ToString() const;
void print() const;
// TODO: make private (improves encapsulation)
public:
int type;
uint256 hash;
};
enum
{
MSG_TX = 1,
MSG_BLOCK,
// Nodes may always request a MSG_FILTERED_BLOCK in a getdata, however,
// MSG_FILTERED_BLOCK should not appear in any invs except as a part of getdata.
MSG_FILTERED_BLOCK,
};
#endif // __INCLUDED_PROTOCOL_H__
|
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import concurrent.futures as futures
from typing import Any, Callable, Coroutine, Dict, Type
from urllib.parse import urlparse
from ....utils import implements, classproperty
from .base import Channel, ChannelType, Server, Client
from .core import register_client, register_server
from .errors import ChannelClosed
DEFAULT_DUMMY_ADDRESS = 'dummy://0'
class DummyChannel(Channel):
"""
Channel for communications in same process.
"""
__slots__ = '_in_queue', '_out_queue', '_closed'
name = 'dummy'
def __init__(self,
in_queue: asyncio.Queue,
out_queue: asyncio.Queue,
local_address: str = None,
dest_address: str = None,
compression=None):
super().__init__(local_address=local_address,
dest_address=dest_address,
compression=compression)
self._in_queue = in_queue
self._out_queue = out_queue
self._closed = asyncio.Event()
@property
@implements(Channel.type)
def type(self) -> ChannelType:
return ChannelType.local
@implements(Channel.send)
async def send(self, message: Any):
if self._closed.is_set(): # pragma: no cover
raise ChannelClosed('Channel already closed, cannot send message')
# put message directly into queue
await self._out_queue.put(message)
@implements(Channel.recv)
async def recv(self):
if self._closed.is_set(): # pragma: no cover
raise ChannelClosed('Channel already closed, cannot write message')
try:
return await self._in_queue.get()
except RuntimeError:
if self._closed.is_set():
pass
@implements(Channel.close)
async def close(self):
self._closed.set()
@property
@implements(Channel.closed)
def closed(self) -> bool:
return self._closed.is_set()
@register_server
class DummyServer(Server):
__slots__ = '_closed',
_address_to_instances: Dict[str, "DummyServer"] = dict()
scheme = 'dummy'
def __init__(self,
address: str,
channel_handler: Callable[[Channel], Coroutine] = None):
super().__init__(address, channel_handler)
self._closed = asyncio.Event()
@classmethod
def get_instance(cls, address: str):
return cls._address_to_instances[address]
@classproperty
@implements(Server.client_type)
def client_type(self) -> Type["Client"]:
return DummyClient
@property
@implements(Server.channel_type)
def channel_type(self) -> ChannelType:
return ChannelType.local
@staticmethod
@implements(Server.create)
async def create(config: Dict) -> "DummyServer":
config = config.copy()
address = config.pop('address', DEFAULT_DUMMY_ADDRESS)
handle_channel = config.pop('handle_channel')
if urlparse(address).scheme != DummyServer.scheme: # pragma: no cover
raise ValueError(f'Address for DummyServer '
f'should be starts with "dummy://", '
f'got {address}')
if config: # pragma: no cover
raise TypeError(f'Creating DummyServer got unexpected '
f'arguments: {",".join(config)}')
try:
return DummyServer.get_instance(address)
except KeyError:
server = DummyServer(address, handle_channel)
DummyServer._address_to_instances[address] = server
return server
@implements(Server.start)
async def start(self):
# nothing needs to do for dummy server
pass
@implements(Server.join)
async def join(self, timeout=None):
wait_coro = self._closed.wait()
try:
await asyncio.wait_for(wait_coro, timeout=timeout)
except (futures.TimeoutError, asyncio.TimeoutError):
pass
@implements(Server.on_connected)
async def on_connected(self, *args, **kwargs):
channel = args[0]
assert isinstance(channel, DummyChannel)
if kwargs: # pragma: no cover
raise TypeError(f'{type(self).__name__} got unexpected '
f'arguments: {",".join(kwargs)}')
await self.channel_handler(channel)
@implements(Server.stop)
async def stop(self):
self._closed.set()
del DummyServer._address_to_instances[self.address]
@property
@implements(Server.stopped)
def stopped(self) -> bool:
return self._closed.is_set()
@register_client
class DummyClient(Client):
__slots__ = '_task',
scheme = DummyServer.scheme
def __init__(self,
local_address: str,
dest_address: str,
channel: Channel):
super().__init__(local_address,
dest_address,
channel)
self._task = None
@staticmethod
@implements(Client.connect)
async def connect(dest_address: str,
local_address: str = None,
**kwargs) -> "Client":
if urlparse(dest_address).scheme != DummyServer.scheme: # pragma: no cover
raise ValueError(f'Destination address should start with "dummy://" '
f'for DummyClient, got {dest_address}')
server = DummyServer.get_instance(dest_address)
if server is None: # pragma: no cover
raise RuntimeError('DummyServer needs to be created '
'first before DummyClient')
q1, q2 = asyncio.Queue(), asyncio.Queue()
client_channel = DummyChannel(q1, q2)
server_channel = DummyChannel(q2, q1)
conn_coro = server.on_connected(server_channel)
task = asyncio.create_task(conn_coro)
client = DummyClient(local_address, dest_address, client_channel)
client._task = task
return client
@implements(Client.close)
async def close(self):
await super().close()
self._task.cancel()
self._task = None
|
const flickr = { flickr: { width: 1536, height: 1792, paths: [{ d: 'M1248 128q119 0 203.5 84.5t84.5 203.5v960q0 119-84.5 203.5t-203.5 84.5h-960q-119 0-203.5-84.5t-84.5-203.5v-960q0-119 84.5-203.5t203.5-84.5h960zM698 896q0-88-62-150t-150-62-150 62-62 150 62 150 150 62 150-62 62-150zM1262 896q0-88-62-150t-150-62-150 62-62 150 62 150 150 62 150-62 62-150z' }] } };
export default flickr;
|
from typing import Union
from jsonrpcclient.requests import Request
class Engine:
def __init__(self, client):
self.client = client
def get_coinbase(self):
payload = Request("engine_getCoinbase")
response = self.client.send(payload)
return response.data.result
def get_block_reward(self, block_number: Union[int, None]):
payload = Request("engine_getBlockReward", block_number)
response = self.client.send(payload)
return response.data.result
def get_recommended_confirmation(self):
payload = Request("engine_getRecommendedConfirmation")
response = self.client.send(payload)
return response.data.result
def get_custom_action_data(
self, handler_id: int, data_bytes: str, block_number: Union[int, None]
):
payload = Request(
"engine_getCustomActionData", handler_id, data_bytes, block_number,
)
response = self.client.send(payload)
return response.data.result
|
const { Client } = require("@elastic/elasticsearch");
const client = new Client({ node: "http://localhost:9200" });
async function run() {
// Let's start by indexing some data
await client.index({
index: "game-of-thrones",
body: {
character: "Ned Stark",
quote: "Winter is coming.",
},
});
await client.index({
index: "game-of-thrones",
body: {
character: "Daenerys Targaryen",
quote: "I am the mother of dragons.",
},
});
await client.index({
index: "game-of-thrones",
// here we are forcing an index refresh,
// otherwise we will not get any result
// in the consequent search
refresh: true,
body: {
character: "Tyrion Lannister",
quote: "A mind needs books like a sword needs a whetstone.",
},
});
// Let's search!
const { body } = await client.search({
index: "game-of-thrones",
body: {
query: {
match: {
quote: "winter",
},
},
},
});
console.log(body.hits.hits);
}
run().catch(console.log);
|
"""
Django settings for myproject project.
Generated by 'django-admin startproject' using Django 2.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
from decouple import config, Csv
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = 'j(up0zin%yzi(%ig0=mxzh+r$ufw023s@etkk7x8owu21f+bj-'
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = True
DEBUG = config('DEBUG', default=False, cast=bool)
# ALLOWED_HOSTS = []
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize', # <- here
'widget_tweaks',
# 'boards.templatetags.form_tags',
'boards', # 板块
'accounts', # 账户
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# DATABASES = {
# 'default': dj_database_url.config(
# default=config('DATABASE_URL')
# )
# }
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
# STATICFILES_DIRS = [
# os.path.join(BASE_DIR, 'static'),
# ]
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
LOGOUT_REDIRECT_URL = 'home'
LOGIN_REDIRECT_URL = 'home'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
LOGIN_URL = 'login'
|
r"""
Solve S-unit equation x + y = 1
Inspired by work of Tzanakis--de Weger, Baker--Wustholz and Smart, we use the LLL methods in Sage to implement an algorithm that returns all S-unit solutions to the equation $x + y = 1$.
REFERENCES:
- [MR2016]_
- [Sma1995]_
- [Sma1998]_
- [Yu2007]_
- [AKMRVW]_
AUTHORS:
- Alejandra Alvarado, Angelos Koutsianas, Beth Malmskog, Christopher Rasmussen, David Roe, Christelle Vincent, Mckenzie West (2018-04-25 to 2018-11-09): original version
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import solve_S_unit_equation, eq_up_to_order
sage: K.<xi> = NumberField(x^2+x+1)
sage: S = K.primes_above(3)
sage: expected = [((0, 1), (4, 0), xi + 2, -xi - 1),
....: ((1, -1), (0, -1), 1/3*xi + 2/3, -1/3*xi + 1/3),
....: ((1, 0), (5, 0), xi + 1, -xi),
....: ((2, 0), (5, 1), xi, -xi + 1)]
sage: sols = solve_S_unit_equation(K, S, 200)
sage: eq_up_to_order(sols, expected)
True
.. TODO::
- Use Cython to improve timings on the sieve
"""
# ****************************************************************************
# Copyright (C) 2020 Alejandra Alvarado <aalvarado2 at eiu.edu>
# Angelos Koutsianas <koutsis.jr at gmail.com>
# Beth Malmskog <beth.malmskog at gmail.com>
# Christopher Rasmussen <crasmussen at wesleyan.edu>
# Christelle Vincent <christelle.vincent at uvm.edu>
# Mckenzie West <westmr at uwec.edu>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.rings.all import Infinity
from sage.symbolic.ring import SR
from sage.rings.integer import Integer
from sage.rings.integer_ring import ZZ
from sage.rings.real_mpfr import RealField, RR
from sage.rings.complex_mpfr import ComplexField
from sage.functions.log import exp
from sage.rings.rational_field import QQ
from sage.rings.number_field.number_field import is_real_place, refine_embedding
from sage.rings.number_field.unit_group import UnitGroup
from sage.rings.finite_rings.integer_mod_ring import Integers
from sage.rings.finite_rings.integer_mod import mod
from sage.rings.padics.factory import Qp
from sage.combinat.combination import Combinations
from sage.misc.all import prod
from sage.arith.all import factorial
from sage.matrix.constructor import matrix, identity_matrix, vector, block_matrix, zero_matrix
from sage.modules.free_module_element import zero_vector
from itertools import combinations_with_replacement
from sage.arith.all import gcd, lcm, CRT
from copy import copy
import itertools
def column_Log(SUK, iota, U, prec=106):
r"""
Return the log vector of ``iota``; i.e., the logs of all the valuations.
INPUT:
- ``SUK`` -- a group of `S`-units
- ``iota`` -- an element of ``K``
- ``U`` -- a list of places (finite or infinite) of ``K``
- ``prec`` -- the precision of the real field (default: 106)
OUTPUT:
The log vector as a list of real numbers
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import column_Log
sage: K.<xi> = NumberField(x^3-3)
sage: S = tuple(K.primes_above(3))
sage: SUK = UnitGroup(K, S=S)
sage: phi_complex = K.places()[1]
sage: v_fin = S[0]
sage: U = [phi_complex, v_fin]
sage: column_Log(SUK, xi^2, U) # abs tol 1e-29
[1.464816384890812968648768625966, -2.197224577336219382790490473845]
REFERENCES:
- [Sma1995]_ p. 823
"""
R = RealField(prec)
return [ R(SUK.number_field().abs_val(v, iota, prec)).log() for v in U]
def c3_func(SUK, prec=106):
r"""
Return the constant `c_3` from [AKMRVW]_.
INPUT:
- ``SUK`` -- a group of `S`-units
- ``prec`` -- the precision of the real field (default: 106)
OUTPUT:
The constant ``c3``, as a real number
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import c3_func
sage: K.<xi> = NumberField(x^3-3)
sage: SUK = UnitGroup(K, S=tuple(K.primes_above(3)))
sage: c3_func(SUK) # abs tol 1e-29
0.4257859134798034746197327286726
.. NOTE::
The numerator should be as close to 1 as possible, especially as the rank of the `S`-units grows large
REFERENCES:
- [AKMRVW]_ arXiv:1903.00977
"""
R = RealField(prec)
all_places = list(SUK.primes()) + SUK.number_field().places(prec)
Possible_U = Combinations(all_places, SUK.rank())
c1 = R(1) # guarantees final c1 >= 1
for U in Possible_U:
# first, build the matrix C_{i,U}
columns_of_C = []
for unit in SUK.fundamental_units():
columns_of_C.append(column_Log(SUK, unit, U, prec))
C = matrix(SUK.rank(), SUK.rank(), columns_of_C)
# Is it invertible?
if abs(C.determinant()) > 10**(-10):
poss_c1 = C.inverse().apply_map(abs).norm(Infinity)
c1 = R(max(poss_c1, c1))
return R(0.9999999) / (c1*SUK.rank())
def c4_func(SUK, v, A, prec=106):
r"""
Return the constant `c_4` from Smart's TCDF paper, [Sma1995]_.
INPUT:
- ``SUK`` -- a group of `S`-units
- ``v`` -- a place of ``K``, finite (a fractional ideal) or infinite (element of ``SUK.number_field().places(prec)``)
- ``A`` -- the set of the product of the coefficients of the ``S``-unit equation with each root of unity of ``K``
- ``prec`` -- the precision of the real field (default: 106)
OUTPUT:
The constant ``c4``, as a real number
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import c4_func
sage: K.<xi> = NumberField(x^3-3)
sage: SUK = UnitGroup(K, S=tuple(K.primes_above(3)))
sage: phi_real = K.places()[0]
sage: phi_complex = K.places()[1]
sage: v_fin = tuple(K.primes_above(3))[0]
sage: A = K.roots_of_unity()
sage: c4_func(SUK,phi_real,A)
1.000000000000000000000000000000
sage: c4_func(SUK,phi_complex,A)
1.000000000000000000000000000000
sage: c4_func(SUK,v_fin,A)
1.000000000000000000000000000000
REFERENCES:
- [Sma1995]_ p. 824
"""
return max(SUK.number_field().abs_val(v, alpha, prec) for alpha in A)
def beta_k(betas_and_ns):
r"""
Return a pair `[\beta_k,|beta_k|_v]`, where `\beta_k` has the smallest nonzero valuation in absolute value of the list ``betas_and_ns``.
INPUT:
- ``betas_and_ns`` -- a list of pairs ``[beta,val_v(beta)]`` outputted from the function where ``beta`` is an element of ``SUK.fundamental_units()``
OUTPUT:
The pair ``[beta_k,v(beta_k)]``, where ``beta_k`` is an element of ``K`` and ``val_v(beta_k)`` is a integer
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import beta_k
sage: K.<xi> = NumberField(x^3-3)
sage: SUK = UnitGroup(K, S=tuple(K.primes_above(3)))
sage: v_fin = tuple(K.primes_above(3))[0]
sage: betas = [ [beta, beta.valuation(v_fin)] for beta in SUK.fundamental_units() ]
sage: beta_k(betas)
[xi, 1]
REFERENCES:
- [Sma1995]_ pp. 824-825
"""
for pair in betas_and_ns:
if abs( pair[1] ) != 0:
good_pair = pair
break
for pair in betas_and_ns:
if ( abs(pair[1]) != 0 and abs(pair[1]) < abs(good_pair[1]) ):
good_pair = pair
return good_pair
def mus(SUK, v):
r"""
Return a list `[\mu]`, for `\mu` defined in [AKMRVW]_.
INPUT:
- ``SUK`` -- a group of `S`-units
- ``v`` -- a finite place of ``K``
OUTPUT:
A list ``[mus]`` where each ``mu`` is an element of ``K``
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import mus
sage: K.<xi> = NumberField(x^3-3)
sage: SUK = UnitGroup(K, S=tuple(K.primes_above(3)))
sage: v_fin = tuple(K.primes_above(3))[0]
sage: mus(SUK, v_fin)
[xi^2 - 2]
REFERENCES:
- [AKMRVW]_
"""
betas = SUK.fundamental_units()
beta_and_ns = [[beta,beta.valuation(v)] for beta in betas]
if all(pair[1]==0 for pair in beta_and_ns):
return betas
else:
good_pair = beta_k(beta_and_ns)
temp = [(beta[0]**good_pair[1])*(good_pair[0]**(-beta[1])) for beta in beta_and_ns]
temp.remove(1)
return temp
def possible_mu0s(SUK, v):
r"""
Return a list `[\mu_0]` of all possible `\mu_0` values defined in [AKMRVW]_.
INPUT:
- ``SUK`` -- a group of `S`-units
- ``v`` -- a finite place of ``K``
OUTPUT:
A list ``[mu0s]`` where each ``mu0`` is an element of ``K``
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import possible_mu0s
sage: K.<xi> = NumberField(x^3-3)
sage: S = tuple(K.primes_above(3))
sage: SUK = UnitGroup(K, S=S)
sage: v_fin = S[0]
sage: possible_mu0s(SUK,v_fin)
[-1, 1]
.. NOTE::
`n_0` is the valuation of the coefficient `\alpha_d` of the `S`-unit equation such that `|\alpha_d \tau_d|_v = 1`
We have set `n_0 = 0` here since the coefficients are roots of unity
`\alpha_0` is not defined in the paper, we set it to be 1
REFERENCES:
- [AKMRVW]_
- [Sma1995]_ pp. 824-825, but we modify the definition of ``sigma`` (``sigma_tilde``) to make it easier to code
"""
beta_and_ns = [[beta,beta.valuation(v)] for beta in SUK.fundamental_units()]
betak, nk = beta_k(beta_and_ns)
ns = [beta[1] for beta in beta_and_ns if beta[0] != betak]
betas = [beta[0] for beta in beta_and_ns if beta[0] != betak]
mu0s = []
for rs in combinations_with_replacement(range(abs(nk)), len(betas)):
# n_0 = valuation_v of one of the coefficients of the equation = 0 for x + y = 1 p. 824
n_rs = zip(ns, rs)
sigma_tilde = -(sum([n_r[0]*n_r[1] for n_r in n_rs]))
if sigma_tilde % nk == 0:
beta_rs = zip(betas, rs)
temp_prod = prod([beta_r[0]**beta_r[1] for beta_r in beta_rs]) * betak**(sigma_tilde/nk)
for alpha0 in SUK.roots_of_unity():
if alpha0*temp_prod not in mu0s:
mu0s.append(alpha0*temp_prod)
return mu0s
def Yu_a1_kappa1_c1(p, dK, ep):
r"""
Compute the constants a(1), kappa1, and c(1) of [Yu2007]_.
INPUT:
- ``p`` -- a rational prime number
- ``dK`` -- the absolute degree of some number field `K`
- ``ep`` -- the absolute ramification index of some prime `frak_p` of `K` lying above `p`
OUTPUT:
The constants a(1), kappa1, and c(1).
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import Yu_a1_kappa1_c1
sage: Yu_a1_kappa1_c1(5, 10, 3)
(16, 20, 319)
REFERENCES:
- [Yu2007]_
"""
# For readability, we compute a(1) and kappa1 first.
if p == 2:
a1 = 32
kappa1 = 40
elif p == 3:
a1 = 16
kappa1 = 20
else:
if ep >= 2:
a1 = 16
kappa1 = 20
else:
a1 = 8*(p-1)/(p-2)
kappa1 = 10
# Next we compute c(1), which has more cases to consider.
if p == 2:
c1 = 160
elif p == 3:
if dK == 1:
c1 = 537
else:
c1 = 759
elif p == 5:
if ep == 1:
c1 = 1473
else:
c1 = 319
elif p%4 == 1:
if ep == 1:
c1 = 1473
else:
c1 = 1502
else:
# p > 5 and p % 4 == 3
if ep == 1:
if dK == 1:
c1 = 1288
else:
c1 = 1282
else:
c1 = 2190
return a1, kappa1, c1
def Yu_condition_115(K, v):
r"""
Return ``True`` or ``False``, as the number field ``K`` and the finite place ``v`` satisfy condition (1.15) of [Yu2007]_.
INPUT:
- ``K`` -- a number field
- ``v`` -- a finite place of ``K``
OUTPUT:
``True`` if (1.15) is satisfied, otherwise ``False``.
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import Yu_condition_115
sage: K.<a> = NumberField(x^2 + 5)
sage: v2 = K.primes_above(2)[0]
sage: v11 = K.primes_above(11)[0]
sage: Yu_condition_115(K, v2)
False
sage: Yu_condition_115(K, v11)
True
REFERENCES:
- [Yu2007]_ p. 188
"""
p = v.smallest_integer()
f = v.residue_class_degree()
w = K.number_of_roots_of_unity()
# Determine q.
if p == 2:
q = 3
else:
q = 2
# Check the condition.
if q == 2:
if p**f % 4 == 1:
return True
if w%4 == 0:
return True
else:
if w%3 == 0:
return True
return False
def Yu_modified_height(mu, n, v, prec=106):
r"""
Return the value of h(n)(mu) as appearing in [Yu2007]_ equation (1.21).
INPUT:
- ``mu`` -- an element of a field K
- ``n`` -- number of mu_j to be considered in Yu's Theorem.
- ``v`` -- a place of K
- ``prec`` -- the precision of the real field
OUTPUT:
The value `h_p(mu)`.
EXAMPLES::
sage: K.<a> = NumberField(x^2 + 5)
sage: v11 = K.primes_above(11)[0]
sage: from sage.rings.number_field.S_unit_solver import Yu_modified_height
sage: Yu_modified_height(a, 3, v11)
0.8047189562170501873003796666131
If mu is a root of unity, the output is not zero. ::
sage: Yu_modified_height(-1, 3, v11)
0.03425564675426243634374205111379
REFERENCES:
- [Yu2007]_ p. 192
"""
R = RealField(prec)
K = v.number_field()
dK = K.degree()
p = v.smallest_integer()
ep = v.ramification_index()
fp = v.residue_class_degree()
a1, kappa1, c1 = Yu_a1_kappa1_c1(p, dK, ep)
h0 = mu.global_height(prec)
h1 = R( fp * R(p).log() / (kappa1 * (n + 4) * dK) )
if h0 > h1:
return h0
else:
return h1
def Omega_prime(dK, v, mu_list, prec=106):
r"""
Return the constant Omega' appearing in [AKMRVW]_.
INPUT:
- ``dK`` -- the degree of a number field `K`
- ``v`` -- a finite place of `K`
- ``mu_list`` -- a list of nonzero elements of `K`. It is assumed that the sublist mu[1:] is multiplicatively independent.
- ``prec`` -- the precision of the real field
OUTPUT:
The constant `Omega'`.
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import mus, Omega_prime
sage: K.<a> = NumberField(x^3 - 3)
sage: SUK = UnitGroup(K, S=tuple(K.primes_above(6)))
sage: v = K.primes_above(3)[0]
sage: mu_list = [-1] + mus(SUK, v)
sage: dK = K.degree()
sage: Omega_prime(dK, v, mu_list)
0.000487349679922696
REFERENCES:
- [AKMRVW]_ arXiv:1903:.00977
"""
R = RealField(prec)
omega_prime = R(1)
for mu in mu_list[1:]:
omega_prime *= mu.global_height()
n = len(mu_list)
omega_prime *= Yu_modified_height(mu_list[0], n, v, prec)
return omega_prime
def Yu_C1_star(n, v, prec=106):
r"""
Return the constant C_1^* appearing in [Yu2007]_ (1.23).
INPUT:
- ``n`` -- the number of generators of a multiplicative subgroup of a field `K`
- ``v`` -- a finite place of `K` (a fractional ideal)
- ``prec`` -- the precision of the real field
OUTPUT:
The constant `C1_star` as a real number.
EXAMPLES::
sage: K.<a> = NumberField(x^2 + 5)
sage: v11 = K.primes_above(11)[0]
sage: from sage.rings.number_field.S_unit_solver import Yu_C1_star
sage: Yu_C1_star(1,v11)
2.154667761574516556114215527020e6
REFERENCES:
- [Yu2007]_ p.189,193
"""
R = RealField(prec)
K = v.number_field()
dK = K.absolute_degree()
p = v.smallest_integer()
ep = v.ramification_index()
fp = v.residue_class_degree()
if p == 2:
q = 3
else:
q = 2
w = K.number_of_roots_of_unity()
u = ZZ(w).valuation(q)
a_paren_1, kappa1, c_paren_1 = Yu_a1_kappa1_c1(p, dK, ep)
C1 = R(1)
C1 *= c_paren_1
C1 *= a_paren_1**n
C1 *= (n**n * (n+1)**(n+1))/factorial(n)
C1 *= p**fp/(q**u)
C1 *= ( dK / (fp * R(p).log()) )**(n+2)
C1 *= R (max( dK, exp(1) )).log()
C1 *= max( R(exp(4)*(n+1)*dK).log(), ep, fp * R(p).log() )
C1_star = R((n+1) * C1)
return C1_star
def Yu_bound(SUK, v, prec=106):
r"""
Return `c8` such that `c8 >= exp(2)/\log(2)` and `ord_p (\Theta - 1) < c8 \log B`,
where `\Theta = \prod_{j=1}^n \alpha_j^{b_j}` and `B \geq \max_j |b_j|` and `B \geq 3`.
INPUT:
- ``SUK`` -- a group of `S`-units
- ``v`` -- a finite place of `K` (a fractional ideal)
- ``prec`` -- the precision of the real field
OUTPUT:
The constant `c8` as a real number.
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import Yu_bound
sage: K.<a> = NumberField(x^2 + 11)
sage: SUK = UnitGroup(K, S=tuple(K.primes_above(6)))
sage: v = K.primes_above(3)[0]
sage: Yu_bound(SUK, v)
9.03984381033128e9
REFERENCES:
- [Sma1995]_ p. 825
- [Yu2007]_ p. 189--193 esp. Theorem 1
- [AKMRVW]_ arXiv:1903.00977
"""
# We are using Theorem 1 of "p-adic logarithmic forms and group varieties III" by Kunrui Yu.
# We require the assumption of (1.18): B \geq max {|b_1|,...,|b_n|,3}
# To be sure that the Lemma of Petho-de Weger is applicable in a later function, we always return a value >= exp(2)/log(2).
R = RealField(prec)
p = v.smallest_integer()
K = SUK.number_field()
dK = K.absolute_degree()
mu_free_gens = mus(SUK, v)
poss_mu0 = possible_mu0s(SUK, v)
n = 1 + len(mu_free_gens)
if Yu_condition_115(K,v):
largest_Omega_prime = R(0)
for mu0 in poss_mu0:
current_Omega_prime = Omega_prime(dK, v, [mu0] + mu_free_gens[:], prec)
largest_Omega_prime = max( current_Omega_prime, largest_Omega_prime )
C1star = Yu_C1_star(n, v, prec)
return max( exp(R(2))/R(2).log(), largest_Omega_prime * C1star)
else:
# K and v don't satisfy the theorem hypotheses, and we must move to a quadratic extension L.
# For justification of this next bound, see [AKMRVW].
x = SR.var('x')
if p == 2:
L_over_K = K.extension(x**2 + x + 1, 'xi0')
else:
L_over_K = K.extension(x**2 + 1, 'xi0')
# pick any prime vL over v
vL_0 = L_over_K.primes_above(v)[0]
e_vL_v = vL_0.relative_ramification_index()
# build absolute versions of L and vL
L = L_over_K.absolute_field('xi_L')
vL_gens = tuple( [L(z) for z in vL_0.gens()] )
vL = L.fractional_ideal( vL_gens )
dL = L.degree()
largest_Omega_prime = R(0)
for mu0 in poss_mu0:
current_Omega_prime = Omega_prime(dL, vL, [mu0] + mu_free_gens[:], prec)
largest_Omega_prime = max( current_Omega_prime, largest_Omega_prime )
C1star = Yu_C1_star(n, vL, prec)
return max(exp(R(2))/R(2).log(), e_vL_v * largest_Omega_prime * C1star)
def K0_func(SUK, A, prec=106):
r"""
Return the constant `K_0` from [AKMRVW]_.
INPUT:
- ``SUK`` -- a group of `S`-units
- ``A`` -- the set of the products of the coefficients of the `S`-unit equation with each root of unity of ``K``
- ``prec`` -- the precision of the real field (default: 106)
OUTPUT:
The constant ``K0``, a real number.
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import K0_func
sage: K.<a> = NumberField(x^2 + 11)
sage: SUK = UnitGroup(K, S=tuple(K.primes_above(6)))
sage: v = K.primes_above(3)[0]
sage: K0_func(SUK, K.roots_of_unity())
8.84763586062272e12
REFERENCES:
- [Sma1995]_ p. 824
- [AKMRVW]_ arXiv:1903.00977
"""
R = RealField(prec)
K0 = R(1)
c3 = c3_func(SUK, prec)
for v_l in SUK.primes():
e_l = v_l.residue_class_degree()
Norm_v_l = v_l.absolute_norm()
c5_l = c3/(e_l * R(Norm_v_l).log())
c8_l = Yu_bound(SUK, v_l, prec)
K0_l = (2 * c8_l)/(e_l * c5_l) * R(c8_l / (e_l * c5_l)).log()
K0 = max(K0, K0_l)
return K0
def c11_func(SUK, v, A, prec=106):
r"""
Return the constant `c_{11}` from Smart's TCDF paper, [Sma1995]_.
INPUT:
- ``SUK`` -- a group of `S`-units
- ``v`` -- a place of ``K``, finite (a fractional ideal) or infinite (element of ``SUK.number_field().places(prec)``)
- ``A`` -- the set of the product of the coefficients of the `S`-unit equation with each root of unity of ``K``
- ``prec`` -- the precision of the real field (default: 106)
OUTPUT:
The constant ``c11``, a real number
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import c11_func
sage: K.<xi> = NumberField(x^3-3)
sage: SUK = UnitGroup(K, S=tuple(K.primes_above(3)))
sage: phi_real = K.places()[0]
sage: phi_complex = K.places()[1]
sage: A = K.roots_of_unity()
sage: c11_func(SUK, phi_real, A) # abs tol 1e-29
3.255848343572896153455615423662
sage: c11_func(SUK, phi_complex, A) # abs tol 1e-29
6.511696687145792306911230847323
REFERENCES:
- [Sma1995]_ p. 825
"""
R = RealField(prec)
if is_real_place(v):
return R(4*c4_func(SUK, v, A, prec)).log() / c3_func(SUK, prec)
else:
return 2*R(4*(c4_func(SUK, v, A, prec)).sqrt()).log() / c3_func(SUK, prec)
def c13_func(SUK, v, prec=106):
r"""
Return the constant `c_{13}` from Smart's TCDF paper, [Sma1995]_.
INPUT:
- ``SUK`` -- a group of `S`-units
- ``v`` -- an infinite place of ``K`` (element of ``SUK.number_field().places(prec)``)
- ``prec`` -- the precision of the real field (default: 106)
OUTPUT:
The constant ``c13``, as a real number
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import c13_func
sage: K.<xi> = NumberField(x^3-3)
sage: SUK = UnitGroup(K, S=tuple(K.primes_above(3)))
sage: phi_real = K.places()[0]
sage: phi_complex = K.places()[1]
sage: c13_func(SUK, phi_real) # abs tol 1e-29
0.4257859134798034746197327286726
sage: c13_func(SUK, phi_complex) # abs tol 1e-29
0.2128929567399017373098663643363
It is an error to input a finite place. ::
sage: phi_finite = K.primes_above(3)[0]
sage: c13_func(SUK, phi_finite)
Traceback (most recent call last):
...
TypeError: Place must be infinite
REFERENCES:
- [Sma1995]_ p. 825
"""
try:
v.codomain()
except AttributeError:
raise TypeError('Place must be infinite')
if is_real_place(v):
return c3_func(SUK, prec)
else:
return c3_func(SUK, prec)/2
def K1_func(SUK, v, A, prec=106):
r"""
Return the constant `K_1` from Smart's TCDF paper, [Sma1995]_.
INPUT:
- ``SUK`` -- a group of `S`-units
- ``v`` -- an infinite place of ``K`` (element of ``SUK.number_field().places(prec)``)
- ``A`` -- a list of all products of each potential ``a``, ``b`` in the $S$-unit equation ``ax + by + 1 = 0`` with each root of unity of ``K``
- ``prec`` -- the precision of the real field (default: 106)
OUTPUT:
The constant ``K1,`` a real number
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import K1_func
sage: K.<xi> = NumberField(x^3-3)
sage: SUK = UnitGroup(K, S=tuple(K.primes_above(3)))
sage: phi_real = K.places()[0]
sage: phi_complex = K.places()[1]
sage: A = K.roots_of_unity()
sage: K1_func(SUK, phi_real, A)
4.483038368145048508970350163578e16
sage: K1_func(SUK, phi_complex, A)
2.073346189067285101984136298965e17
REFERENCES:
- [Sma1995]_ p. 825
"""
R = RealField(prec)
# [Sma1995]_ p. 825
if is_real_place(v):
c11 = R(4*c4_func(SUK, v, A, prec)).log() / c3_func(SUK, prec)
else:
c11 = 2*( R(4*(c4_func(SUK,v, A, prec)).sqrt()).log() ) / c3_func(SUK, prec)
# [Sma1995]_ p. 825
if is_real_place(v):
c12 = R(2 * c4_func(SUK, v, A, prec))
else:
c12 = R(2 * c4_func(SUK, v, A, prec).sqrt())
# [Sma1998]_ p. 225, Theorem A.1
d = SUK.number_field().degree()
t = SUK.rank()
Baker_C = R(18 * factorial(t+2) * (t+1)**(t+2) * (32*d)**(t+3) * R(2*(t+1) * d).log())
def hprime(SUK, alpha, v):
# [Sma1998]_ p. 225
return R(max(alpha.global_height(), 1/SUK.number_field().degree(), abs( v(alpha).log() ) / SUK.number_field().degree()))
# [Sma1995]_ p. 825 and [Sma1998]_ p. 225, Theorem A.1
c14 = Baker_C * prod([hprime(SUK, alpha, v) for alpha in SUK.gens_values()])
# [Sma1995]_ p. 825
c13 = c13_func(SUK,v,prec)
w = len(SUK.roots_of_unity())
c15 = (2/c13)*(c12.log()+c14*(((t+1)*w*c14/c13).log()))
return max([c11, c15])
def minimal_vector(A, y, prec=106):
r"""
INPUT:
- ``A`` : a square n by n non-singular integer matrix whose rows generate a lattice `\mathcal L`
- ``y`` : a row (1 by n) vector with integer coordinates
- ``prec`` : precision of real field (default: 106)
OUTPUT:
A lower bound for the square of
.. MATH::
\ell (\mathcal L,\vec y) =
\begin{cases}
\displaystyle\min_{\vec x\in\mathcal L}\Vert\vec x-\vec y\Vert &, \vec y\not\in\mathcal L. \\
\displaystyle\min_{0\neq\vec x\in\mathcal L}\Vert\vec x\Vert &,\vec y\in\mathcal L.
\end{cases}`
ALGORITHM:
The algorithm is based on V.9 and V.10 of [Sma1998]_
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import minimal_vector
sage: B = matrix(ZZ, 2, [1,1,1,0])
sage: y = vector(ZZ, [2,1])
sage: minimal_vector(B, y)
1/2
::
sage: B = random_matrix(ZZ, 3)
sage: while not B.determinant():
....: B = random_matrix(ZZ, 3)
sage: B # random
[-2 -1 -1]
[ 1 1 -2]
[ 6 1 -1]
sage: y = vector([1, 2, 100])
sage: minimal_vector(B, y) # random
15/28
"""
if A.is_singular():
raise ValueError('The matrix A is singular')
R = RealField(prec)
n = len(y)
c1 = 2**(n-1)
ALLL = A.LLL()
ALLLinv = ALLL.inverse()
ybrace = [ abs(R(a-a.round())) for a in y * ALLLinv if (a-a.round()) != 0]
if len(ybrace) == 0:
return (ALLL.rows()[0].norm())**2 / c1
else:
sigma = ybrace[len(ybrace)-1]
return ((ALLL.rows()[0].norm())**2 * sigma) / c1
def reduction_step_complex_case(place, B0, list_of_gens, torsion_gen, c13):
r"""
INPUT:
- ``place`` -- (ring morphism) an infinite place of a number field `K`
- ``B0`` -- the initial bound
- ``list_of_gens`` -- a set of generators of the free part of the group
- ``torsion_gen`` -- an element of the torsion part of the group
- ``c13`` -- a positive real number
OUTPUT:
A tuple consisting of:
1. a new upper bound, an integer
2. a boolean value, ``True`` if we have to increase precision, otherwise ``False``
.. NOTE::
The constant ``c13`` in Section 5, [AKMRVW]_
This function does handle both real and non-real infinite places.
REFERENCES:
See [Sma1998]_, [AKMRVW]_.
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import reduction_step_complex_case
sage: K.<a> = NumberField([x^3-2])
sage: SK = sum([K.primes_above(p) for p in [2,3,5]],[])
sage: G = [g for g in K.S_unit_group(S=SK).gens_values() if g.multiplicative_order()==Infinity]
sage: p1 = K.places(prec=100)[1]
sage: reduction_step_complex_case(p1, 10^5, G, -1, 2)
(18, False)
"""
prec = place.codomain().precision()
R = RealField(prec)
CF = ComplexField(prec)
n = len(list_of_gens)
w = torsion_gen.multiplicative_order()
real_part_log_gens = [ R(CF(place(g).log()).real_part()) for g in list_of_gens]
imag_part_log_gens = [ R(CF(place(g).log()).imag_part()) for g in list_of_gens]
real_part_log_gens += [R(0)]
imag_part_log_gens += [2*R.pi()/w]
abs_log_parts = [abs(part) for part in real_part_log_gens]+[abs(part) for part in imag_part_log_gens]
max_part_log = max(abs_log_parts)
npi = []
# we collect the list of indices of log(g) which are not pure imaginary
# if this list is empty, we have to take a special case
for i in range(len(real_part_log_gens)):
lg = real_part_log_gens[i]
if abs(lg) > 2**(-place.codomain().precision()):
npi.append(i)
# someday make this a separate function
if not npi:
# this is the pure imaginary case.
# we have only imaginary numbers
C = ZZ(1)
S = n * B0**2
T = (n+w+n*w)*B0 / 2
finish = False
while not finish:
A = identity_matrix(ZZ, n+1)
A[n] = vector([(g * C).round() for g in imag_part_log_gens])
if A.is_singular():
C = ZZ(2*C)
else:
# We have to work with rows because of the .LLL() function
A = A.transpose()
# Note that l is the an lower bound on the square of the magnitude of the shortest non-zero vector in the lattice generated by A
l = minimal_vector(A, zero_vector(ZZ,n+1))
# Checking hypotheses of Lemma 5.3 in our paper:
if l <= T**2+S:
C = ZZ(2*C)
# Need to check precision: must be at least two more than the number of digits in largest entry in A to ensure that we get true rounding--
if prec < R(C*max_part_log).log()/R(2).log()+3:
return 0, True
else:
# Need to check precision: must be at least two more than the number of digits in largest entry in A to ensure that we get true rounding--
if prec < R(C*max_part_log).log()/R(2).log()+3:
return 0, True
else:
Bnew = ((R(C * 2).log() - ((l**2-S).sqrt()-T)).log() / c13).round()
finish = True
return max(4,w,Bnew), False
elif is_real_place(place):
# this is the case when we are working with a real embedding, we get savings here
C = R(1)
S = (n-1) * B0**2
w = place.domain().number_of_roots_of_unity()
T = (n*B0+1)/R(2)
finish = False
while not finish:
A = copy(identity_matrix(ZZ, n+1))
# We redefine the imaginary parts in case any generator was negative
new_imag_part_log_gens = [0 for i in imag_part_log_gens[:-1]]+[imag_part_log_gens[-1]]
A[n-1] = vector([(g*C).round() for g in real_part_log_gens])
A[n] = vector([(g*C).round() for g in new_imag_part_log_gens])
if A.is_singular():
C *= 2
else:
# We apply Lemma 5.3 from [AKMRVW]
A = A.transpose()
l = minimal_vector(A, zero_vector(ZZ,n+1))
# Note that l is the a lower bound on the square of the magnitude of the shortest non-zero vector in the lattice generated by A
# Checking hypothesis of lemma 5.3 in [AKMRVW]
if l <= T**2 + S:
C *= 2
# Need to check precision: must be at least two more than the number of digits in largest entry in A to ensure that we get true rounding--
if prec < R(C*max_part_log).log()/R(2).log()+3:
return 0, True
else:
# Need to check precision: must be at least two more than the number of digits in largest entry in A to ensure that we get true rounding--
if prec < R(C*max_part_log).log()/R(2).log()+3:
return 0, True
else:
Bnew = ((R(C * 2).log() - ((l-S).sqrt()-T).log()) / c13).round()
finish = True
return max(4,w,Bnew), False
else:
# the case when the real part is not 0 for all log(a_i), see Lemma 5.2 in [AKMRVW]
C = R(1)
S = (n-1) * B0**2
w = place.domain().number_of_roots_of_unity()
T = (n+w+n*w)*B0/R(2).sqrt()
finish = False
# we reorder the generators to that the real part of the last non-torsion generator is not 0:
if n-1 not in npi:
new_last_gen_index = npi[0]
old_last_gen_real = real_part_log_gens[n-1]
old_last_gen_imag = imag_part_log_gens[n-1]
real_part_log_gens[n-1] = real_part_log_gens[new_last_gen_index]
imag_part_log_gens[n-1] = imag_part_log_gens[new_last_gen_index]
real_part_log_gens[new_last_gen_index] = old_last_gen_real
imag_part_log_gens[new_last_gen_index] = old_last_gen_imag
while not finish:
A = copy(identity_matrix(ZZ, n+1))
A[n-1] = vector([(g*C).round() for g in real_part_log_gens])
A[n] = vector([(g*C).round() for g in imag_part_log_gens])
if A.is_singular():
C *= 2
else:
# We apply Lemma 5.2 from [AKMRVW]
A = A.transpose()
l = minimal_vector(A, zero_vector(ZZ,n+1))
# Note that l is the a lower bound on the square of the magnitude of the shortest non-zero vector in the lattice generated by A
# Checking hypothesis of lemma 5.2 in [AKMRVW]
if l <= T**2 + S:
C *= 2
# Need to check precision: must be at least two more than the number of digits in largest entry in A to ensure that we get true rounding--
if prec < R(C*max_part_log).log()/R(2).log()+3:
return 0, True
else:
# Need to check precision: must be at least two more than the number of digits in largest entry in A to ensure that we get true rounding--
if prec < R(C*max_part_log).log()/R(2).log()+3:
return 0, True
else:
Bnew = ((R(C * 2).log() - ((l-S).sqrt()-T).log()) / c13).round()
finish = True
return max(4,w,Bnew), False
def cx_LLL_bound(SUK, A, prec=106):
r"""
Return the maximum of all of the `K_1`'s as they are LLL-optimized for each infinite place `v`.
INPUT:
- ``SUK`` -- a group of `S`-units
- ``A`` -- a list of all products of each potential ``a``, ``b`` in the `S`-unit equation ``ax + by + 1 = 0`` with each root of unity of ``K``
- ``prec`` -- precision of real field (default: 106)
OUTPUT:
A bound for the exponents at the infinite place, as a real number
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import cx_LLL_bound
sage: K.<xi> = NumberField(x^3-3)
sage: SUK = UnitGroup(K,S=tuple(K.primes_above(3)))
sage: A = K.roots_of_unity()
sage: cx_LLL_bound(SUK,A) # long time
35
"""
cx_LLL = 0
# initialize a bound, a bad guess, as we iterate over the places of the number field, we will replace its value with the largest complex LLL bound we've found across the places
for v in SUK.number_field().places(prec=prec):
prec_v = prec
c13_LLL = c13_func(SUK, v, prec_v)
cx_bound = K1_func(SUK, v, A, prec_v)
# cx_bound is the LLL bound according to this place, it will be replaced as LLL gives us smaller bounds
new_bound, inc_prec = reduction_step_complex_case(v, cx_bound, SUK.fundamental_units(), SUK.zeta(), c13_LLL)
while inc_prec:
v = refine_embedding(v)
c13_LLL = c13_func(SUK, v, prec_v)
cx_bound = K1_func(SUK, v, A, prec_v)
new_bound, inc_prec = reduction_step_complex_case(v, cx_bound, SUK.fundamental_units(), SUK.zeta(), c13_LLL)
counter = 0
while abs(cx_bound - new_bound) > .5*cx_bound and counter < 15:
# We fear a loop that is not convergent, this is the purpose of the counter
# Repeat complex LLL until we get essentially no change from it
cx_bound = min(cx_bound, new_bound)
new_bound, inc_prec = reduction_step_complex_case(v, cx_bound, SUK.fundamental_units(), SUK.zeta(), c13_LLL)
while inc_prec:
v = refine_embedding(v)
c13_LLL = c13_func(SUK, v, prec_v)
new_bound, inc_prec = reduction_step_complex_case(v, cx_bound, SUK.fundamental_units(), SUK.zeta(), c13_LLL)
counter += 1
cx_bound = min(cx_bound, new_bound)
# for this place the complex LLL bound is cx_bound
cx_LLL = max(cx_bound, cx_LLL)
# compare this value with the complex LLL bounds we have found for the previous places, if it is bigger, replace that bound
return cx_LLL
def log_p(a, prime, prec):
r"""
INPUT:
- ``a`` -- an element of a number field `K`
- ``prime`` -- a prime ideal of the number field `K`
- ``prec`` -- a positive integer
OUTPUT:
An element of `K` which is congruent to the ``prime``-adic logarithm of ``a`` with respect to ``prime`` modulo ``p^prec``, where ``p`` is the rational prime below ``prime``
.. NOTE::
Here we take into account the other primes in `K` above `p` in order to get coefficients with small values
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import log_p
sage: K.<a> = NumberField(x^2+14)
sage: p1 = K.primes_above(3)[0]
sage: p1
Fractional ideal (3, a + 1)
sage: log_p(a+2, p1, 20)
8255385638/3*a + 15567609440/3
::
sage: K.<a> = NumberField(x^4+14)
sage: p1 = K.primes_above(5)[0]
sage: p1
Fractional ideal (5, a + 1)
sage: log_p(1/(a^2-4), p1, 30)
-42392683853751591352946/25*a^3 - 113099841599709611260219/25*a^2 -
8496494127064033599196/5*a - 18774052619501226990432/25
"""
if a == 0:
raise ValueError('a is the zero element')
if a.valuation(prime) != 0:
raise ValueError('The valuation of a with respect to prime is not zero')
K = prime.ring()
p = prime.smallest_integer()
# In order to get an approximation with small coefficients we have to take into account the other primes above p
# with negative valuation. For example, say prime2 is another (principal ideal) prime above p, and a=(unit)(prime2)^(-k) for some unit and k
# a positive integer, and let tilde(a):=a(prime2)^k. Then log_p(a)=log_p(tilde(a))-k(log_p(prime2)), where the series representations
# of these two logs will have smaller coefficients.
primes = [(-(a.valuation(pr)),pr) for pr in K.primes_above(p) if a.valuation(pr) < 0]
local_terms = []
for (val, pr) in primes:
# for its pair in primes we find an element in K such that it is divisible only by pr and not by any other ideal above p. Then we take this element in the correct exponent
if pr.is_principal():
local_terms.append(pr.gens_reduced()[0]**val)
else:
local_terms.append(pr.gens()[1]**val)
return log_p_series_part(a*prod(local_terms), prime, prec) - sum([log_p_series_part(b, prime, prec) for b in local_terms])
def log_p_series_part(a, prime, prec):
r"""
INPUT:
- ``a`` -- an element of a number field `K`
- ``prime`` -- a prime ideal of the number field `K`
- ``prec`` -- a positive integer
OUTPUT:
The ``prime``-adic logarithm of ``a`` and accuracy ``p^prec``, where ``p`` is the rational prime below ``prime``
ALGORITHM:
The algorithm is based on the algorithm on page 30 of [Sma1998]_
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import log_p_series_part
sage: K.<a> = NumberField(x^2-5)
sage: p1 = K.primes_above(3)[0]
sage: p1
Fractional ideal (3)
sage: log_p_series_part(a^2-a+1, p1, 30)
120042736778562*a + 263389019530092
::
sage: K.<a> = NumberField(x^4+14)
sage: p1 = K.primes_above(5)[0]
sage: p1
Fractional ideal (5, a + 1)
sage: log_p_series_part(1/(a^2-4), p1, 30)
5628940883264585369224688048459896543498793204839654215019548600621221950915106576555819252366183605504671859902129729380543157757424169844382836287443485157589362653561119898762509175000557196963413830027960725069496503331353532893643983455103456070939403472988282153160667807627271637196608813155377280943180966078/1846595723557147156151786152499366687569722744011302407020455809280594038056223852568951718462474153951672335866715654153523843955513167531739386582686114545823305161128297234887329119860255600972561534713008376312342295724191173957260256352612807316114669486939448006523889489471912384033203125*a^2 + 2351432413692022254066438266577100183514828004415905040437326602004946930635942233146528817325416948515797296867947688356616798913401046136899081536181084767344346480810627200495531180794326634382675252631839139904967037478184840941275812058242995052383261849064340050686841429735092777331963400618255005895650200107/1846595723557147156151786152499366687569722744011302407020455809280594038056223852568951718462474153951672335866715654153523843955513167531739386582686114545823305161128297234887329119860255600972561534713008376312342295724191173957260256352612807316114669486939448006523889489471912384033203125
"""
if a.valuation(prime) != 0:
raise ValueError('The valuation of a with respect to prime is not zero')
K = prime.ring()
g = K.gen()
p = prime.smallest_integer()
f = prime.residue_class_degree()
e = prime.absolute_ramification_index()
q = p**f - 1
R = RealField(prec)
divisor = q.divisors()
order = min(d for d in divisor if (a**d - 1).valuation(prime) > 0)
gamma= a**order
t = 0
while (gamma-1).valuation(prime) <= e:
t += 1
gamma = gamma**p
prec += t
# since later we divide by p^t, we must increase the precision by t at this point.
m = (gamma-1).valuation(prime) / e
n = Integer(1)
step = 10 ** (R(prec).log()/R(10).log()).floor()
while n < (R(n).log()/R(p).log() + prec)/m:
n += step
# could use smaller stepsize to get actual smallest integer n, however this seems to run faster.
w = (R(prec).log()/R(p).log()).floor()
gamma = sum([ZZ(gi % (p**(prec+w))) * g**i
if gi.valuation(p) >= 0 else
ZZ((gi * p**(-gi.valuation(p))) % (p**(prec+w-gi.valuation(p)))) * p**(gi.valuation(p)) * g**i
for i,gi in enumerate(gamma) if gi != 0])
beta = 0
delta = 1 - gamma
for i in range(1, n+1):
beta -= delta / i
delta *= (1 - gamma)
delta = sum([ZZ(di % (p**(prec+w))) * g**b
if di.valuation(p) >= 0 else
ZZ((di * p**(-di.valuation(p))) % (p**(prec + w - di.valuation(p)))) * p**(di.valuation(p)) * g**b
for b,di in enumerate(delta) if di != 0])
beta = beta / (order * p**t)
# we try to make the coefficients small
logp = 0
for i,b in enumerate(beta.list()):
val = b.valuation(p)
if val < 0:
t = b * p**(-val)
t = ZZ(mod(t, p**(prec-val)))
t = t * p**val
else:
t = ZZ(mod(b, p**prec))
logp = logp + t * g**i
return logp
def defining_polynomial_for_Kp(prime, prec=106):
r"""
INPUT:
- ``prime`` -- a prime ideal of a number field `K`
- ``prec`` -- a positive natural number (default: 106)
OUTPUT:
A polynomial with integer coefficients that is equivalent ``mod p^prec`` to a defining polynomial for the completion of `K` associated to the specified prime.
.. NOTE::
`K` has to be an absolute extension
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import defining_polynomial_for_Kp
sage: K.<a> = QuadraticField(2)
sage: p2 = K.prime_above(7); p2
Fractional ideal (-2*a + 1)
sage: defining_polynomial_for_Kp(p2, 10)
x + 266983762
::
sage: K.<a> = QuadraticField(-6)
sage: p2 = K.prime_above(2); p2
Fractional ideal (2, a)
sage: defining_polynomial_for_Kp(p2, 100)
x^2 + 6
sage: p5 = K.prime_above(5); p5
Fractional ideal (5, a + 2)
sage: defining_polynomial_for_Kp(p5, 100)
x + 3408332191958133385114942613351834100964285496304040728906961917542037
"""
K = prime.ring()
if not K.is_absolute():
raise ValueError('The number field is not an absolute extension')
theta = K.gen()
f = K.defining_polynomial()
p = prime.smallest_integer()
e = prime.absolute_ramification_index()
N = prec
while True:
RQp = Qp(p, prec=N, type='capped-rel', print_mode='series')
# We factor f in Integers(p**(precision)) using the factorization in Qp
factors = f.change_ring(RQp).factor()
# We are going to find which factor of f is related to the prime ideal 'prime'
L = [g.change_ring(ZZ) for g, _ in factors]
A = [g for g in L if (g(theta)).valuation(prime) >= e*N/2]
# We narrow down the list unitl only one value remains
if len(A) == 1:
return A[0].change_ring(Integers(p**prec)).change_ring(ZZ)
else:
N += 1
def embedding_to_Kp(a, prime, prec):
r"""
INPUT:
- ``a`` -- an element of a number field `K`
- ``prime`` -- a prime ideal of `K`
- ``prec`` -- a positive natural number
OUTPUT:
An element of `K` that is equivalent to ``a`` modulo ``p^(prec)`` and the generator of `K` appears with exponent less than `e \cdot f`, where ``p`` is the rational prime below ``prime`` and `e,f` are the ramification index and residue degree, respectively.
.. NOTE::
`K` has to be an absolute number field
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import embedding_to_Kp
sage: K.<a> = QuadraticField(17)
sage: p = K.prime_above(13); p
Fractional ideal (-a + 2)
sage: embedding_to_Kp(a-3, p, 15)
-20542890112375827
::
sage: K.<a> = NumberField(x^4-2)
sage: p = K.prime_above(7); p
Fractional ideal (-a^2 + a - 1)
sage: embedding_to_Kp(a^3-3, p, 15)
-1261985118949117459462968282807202378
"""
K = prime.ring()
if not K.is_absolute():
raise ValueError('K has to be an absolute extension')
g = defining_polynomial_for_Kp(prime, prec).change_ring(QQ)
gen = K.gen()
f = K(a).lift()
return K( sum([b*gen**j for j,b in enumerate(f.mod(g))]) )
def p_adic_LLL_bound_one_prime(prime, B0, M, M_logp, m0, c3, prec=106):
r"""
INPUT:
- ``prime`` -- a prime ideal of a number field `K`
- ``B0`` -- the initial bound
- ``M`` -- a list of elements of `K`, the `\mu_i`'s from Lemma IX.3 of [Sma1998]_
- ``M_logp`` -- the p-adic logarithm of elements in `M`
- ``m0`` -- an element of `K`, this is `\mu_0` from Lemma IX.3 of [Sma1998]_
- ``c3`` -- a positive real constant
- ``prec`` -- the precision of the calculations (default: 106), i.e., values are known to O(p^prec)
OUTPUT:
A pair consisting of:
1. a new upper bound, an integer
2. a boolean value, ``True`` if we have to increase precision, otherwise ``False``
.. NOTE::
The constant `c_5` is the constant `c_5` at the page 89 of [Sma1998]_ which is equal to the constant `c_{10}` at the page 139 of [Sma1995]_.
In this function, the `c_i` constants are in line with [Sma1998]_, but generally differ from the constants in [Sma1995]_ and other parts of this code.
EXAMPLES:
This example indicates a case where we must increase precision::
sage: from sage.rings.number_field.S_unit_solver import p_adic_LLL_bound_one_prime
sage: prec = 50
sage: K.<a> = NumberField(x^3-3)
sage: S = tuple(K.primes_above(3))
sage: SUK = UnitGroup(K, S=S)
sage: v = S[0]
sage: A = SUK.roots_of_unity()
sage: K0_old = 9.4755766731093e17
sage: Mus = [a^2 - 2]
sage: Log_p_Mus = [185056824593551109742400*a^2 + 1389583284398773572269676*a + 717897987691852588770249]
sage: mu0 = K(-1)
sage: c3_value = 0.42578591347980
sage: m0_Kv_new, increase_precision = p_adic_LLL_bound_one_prime(v, K0_old, Mus, Log_p_Mus, mu0, c3_value, prec)
sage: m0_Kv_new
0
sage: increase_precision
True
And now we increase the precision to make it all work::
sage: prec = 106
sage: K0_old = 9.475576673109275443280257946930e17
sage: Log_p_Mus = [1029563604390986737334686387890424583658678662701816*a^2 + 661450700156368458475507052066889190195530948403866*a]
sage: c3_value = 0.4257859134798034746197327286726
sage: m0_Kv_new, increase_precision = p_adic_LLL_bound_one_prime(v, K0_old, Mus, Log_p_Mus, mu0, c3_value, prec)
sage: m0_Kv_new
476
sage: increase_precision
False
"""
if any(g.valuation(prime) != 0 for g in M+[m0]):
raise ValueError('There is an element with non zero valuation')
K = prime.ring()
w = K.number_of_roots_of_unity()
p = prime.smallest_integer()
f = prime.residue_class_degree()
e = prime.absolute_ramification_index()
R = RealField(prec)
c5 = c3 / (f*e*R(p).log())
theta = K.gen()
# if M is empty then it is easy to give an upper bound
if len(M) == 0:
if m0 != 1:
return max(4,w, R(max(R(p).log()*f*(m0-1).valuation(prime)/c3, 0)).floor()), False
else:
return 0, False
# we evaluate the p-adic logarithms of m0 and we embed it in the completion of K with respect to prime
m0_logp = log_p(m0, prime, prec)
m0_logp = embedding_to_Kp(m0_logp, prime, prec)
n = len(M_logp)
# Below we implement paragraph VI.4.2 of [Sma1998], pages 89-93
# we evaluate the order of discriminant of theta
Theta = [theta**i for i in range(K.absolute_degree())]
ordp_Disc = (K.disc(Theta)).valuation(p)
# We evaluate Lambda
c8 = min(min(a.valuation(p) for a in g) for g in M_logp)
lam = p**c8
# we apply lemma VI.5 of [Sma1998] page 90
# c6 is 0 here because we seek to solve the equation x+y=1, so our set A
# is contained in the roots of unity of K
# In one very extreme case (p = 2 and all other constants as small as possible),
# low_bound = 1/c5 is not quite enough to give strict inequality. So we add 1 to be safe.
low_bound = (1/c5).round() + 1
for a in m0_logp:
if a != 0 and c8 > a.valuation(p):
B1 = (c8 + ordp_Disc/2) / c5
if B1 > low_bound:
return max(4,w,RR(B1).floor()), False
else:
return max(4,w,low_bound), False
c8 = min([a.valuation(p) for a in m0_logp] + [c8])
B = [g/lam for g in M_logp]
b0 = m0_logp / lam
c9 = c8 + ordp_Disc/2
# We evaluate 'u' and we construct the matrix A
m = e * f
u = 1
while True:
if prec <= u + c8:
return 0, True
# We construct the matrix A as a block matrix
A11 = identity_matrix(ZZ, n)
A12 = zero_matrix(ZZ, n, m)
A21 = zero_matrix(ZZ, n, m)
A22 = p**u * identity_matrix(ZZ, m)
for i,b in enumerate(B):
A21[i] = vector([mod(b[j],p**u) for j in range(m)])
A = block_matrix( [[A11,A12], [A21.transpose(),A22]] )
y = zero_vector(ZZ, n+m)
for i in range(m):
y[i+n] = -mod(b0[i], p**u)
# This refers to c10 from Smart
c10squared = minimal_vector(A.transpose(), y)
if c10squared > n * B0**2:
B2 = (u+c9) / c5
if B2 > low_bound:
return max(4,w,R(B2).floor()),False
else:
return max(4,w,low_bound),False
else:
u += 1
def p_adic_LLL_bound(SUK, A, prec=106):
r"""
Return the maximum of all of the `K_0`'s as they are LLL-optimized for each finite place `v`.
INPUT:
- ``SUK`` -- a group of `S`-units
- ``A`` -- a list of all products of each potential ``a``, ``b`` in the `S`-unit equation ``ax + by + 1 = 0`` with each root of unity of ``K``
- ``prec``-- precision for p-adic LLL calculations (default: 106)
OUTPUT:
A bound for the max of exponents in the case that extremal place is finite (see [Sma1995]_) as a real number
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import p_adic_LLL_bound
sage: K.<xi> = NumberField(x^3-3)
sage: SUK = UnitGroup(K,S=tuple(K.primes_above(3)))
sage: A = SUK.roots_of_unity()
sage: prec = 100
sage: p_adic_LLL_bound(SUK,A, prec)
89
"""
S = SUK.primes()
K0_old = K0_func(SUK, A, prec)
LLL_K0_by_finite_place = []
for i,v in enumerate(S):
# Kv_old = K0_by_finite_place[0]
Mus0 = possible_mu0s(SUK, v)
Mus = mus(SUK, v)
Log_p_Mus = [log_p(a, v, prec) for a in Mus]
local_prec = prec
val = 0
for m0 in Mus0:
m0_Kv_old = K0_old
m0_Kv_new, increase_precision = p_adic_LLL_bound_one_prime(v, m0_Kv_old, Mus, Log_p_Mus, m0, c3_func(SUK, local_prec), local_prec)
while increase_precision:
local_prec *= 2
Log_p_Mus = [log_p(a, v, local_prec) for a in Mus]
Log_p_Mus = [embedding_to_Kp(a, v, local_prec) for a in Log_p_Mus]
m0_Kv_new, increase_precision = p_adic_LLL_bound_one_prime(v, m0_Kv_old, Mus, Log_p_Mus, m0, c3_func(SUK, local_prec), local_prec)
while m0_Kv_new < m0_Kv_old:
m0_Kv_old = m0_Kv_new
m0_Kv_new, increase_precision = p_adic_LLL_bound_one_prime(v, m0_Kv_old, Mus, Log_p_Mus, m0, c3_func(SUK,local_prec), local_prec)
while increase_precision:
local_prec *= 2
Log_p_Mus = [log_p(a, v, local_prec) for a in Mus]
Log_p_Mus = [embedding_to_Kp(a, v, local_prec) for a in Log_p_Mus]
m0_Kv_new,increase_precision = p_adic_LLL_bound_one_prime(v, m0_Kv_old, Mus, Log_p_Mus, m0, c3_func(SUK, local_prec), local_prec)
if m0_Kv_old > val:
val = m0_Kv_old
LLL_K0_by_finite_place.append(val)
return max(LLL_K0_by_finite_place)
def split_primes_large_lcm(SUK, bound):
r"""
Return a list ``L`` of rational primes `q` which split completely in `K` and which have desirable properties (see NOTE).
INPUT:
- ``SUK`` -- the `S`-unit group of an absolute number field `K`.
- ``bound`` -- a positive integer
OUTPUT:
A list `L` of rational primes `q`, with the following properties:
- each prime `q` in `L` splits completely in `K`
- if `Q` is a prime in `S` and `q` is the rational
prime below `Q`, then `q` is **not** in `L`
- the value ``lcm { q-1 : q in L }`` is greater than or equal to ``2*bound + 1``.
.. NOTE::
- A series of compatible exponent vectors for the primes in `L` will
lift to **at most** one integer exponent vector whose entries
`a_i` satisfy `|a_i|` is less than or equal to ``bound``.
- The ordering of this set is not very intelligent for the purposes
of the later sieving processes.
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import split_primes_large_lcm
sage: K.<xi> = NumberField(x^3 - 3*x + 1)
sage: S = K.primes_above(3)
sage: SUK = UnitGroup(K,S=tuple(S))
sage: split_primes_large_lcm(SUK, 200)
[17, 19, 37, 53]
With a tiny bound, Sage may ask you to increase the bound.
::
sage: from sage.rings.number_field.S_unit_solver import split_primes_large_lcm
sage: K.<xi> = NumberField(x^2 + 163)
sage: SUK = UnitGroup(K, S=tuple(K.primes_above(23)))
sage: split_primes_large_lcm(SUK, 8)
Traceback (most recent call last):
...
ValueError: Not enough split primes found. Increase bound.
"""
K = SUK.number_field()
# we recover the rational primes below S:
S0 = set(prime_ideal.smallest_integer() for prime_ideal in SUK.primes())
split_prime_list = K.completely_split_primes(4*bound + 4)
lcm_list = []
L = 1
while L < 2*bound + 1:
if split_prime_list == []:
# Need More Primes!
raise ValueError('Not enough split primes found. Increase bound.')
q = split_prime_list.pop(0)
# only use q if it is *not* below a prime in S -- that is,
# only if q does *not* appear in S0.
if q not in S0:
L = lcm(L, q-1)
lcm_list.append(q)
return lcm_list
def sieve_ordering(SUK, q):
r"""
Returns ordered data for running sieve on the primes in `SUK` over the rational prime `q`.
INPUT:
- ``SUK`` -- the `S`-unit group of a number field `K`
- ``q`` -- a rational prime number which splits completely in `K`
OUTPUT:
A list of tuples, ``[ideals_over_q, residue_fields, rho_images, product_rho_orders]``, where
1. ``ideals_over_q`` is a list of the `d = [K:\mathbb{Q}]` ideals in `K` over `q`
2. ``residue_fields[i]`` is the residue field of ``ideals_over_q[i]``
3. ``rho_images[i]`` is a list of the reductions of the generators in of the `S`-unit group, modulo ``ideals_over_q[i]``
4. ``product_rho_orders[i]`` is the product of the multiplicative orders of the elements in ``rho_images[i]``
.. NOTE::
- The list ``ideals_over_q`` is sorted so that the product of orders is smallest for ``ideals_over_q[0]``, as this will make the later sieving steps more efficient.
- The primes of ``S`` must not lie over ``q``.
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import sieve_ordering
sage: K.<xi> = NumberField(x^3 - 3*x + 1)
sage: SUK = K.S_unit_group(S=3)
sage: sieve_data = list(sieve_ordering(SUK, 19))
sage: sieve_data[0]
(Fractional ideal (xi - 3),
Fractional ideal (-2*xi^2 + 3),
Fractional ideal (2*xi + 1))
sage: sieve_data[1]
(Residue field of Fractional ideal (xi - 3),
Residue field of Fractional ideal (-2*xi^2 + 3),
Residue field of Fractional ideal (2*xi + 1))
sage: sieve_data[2]
([18, 7, 16, 4], [18, 9, 12, 8], [18, 3, 10, 10])
sage: sieve_data[3]
(486, 648, 11664)
"""
K = SUK.number_field()
rho = SUK.gens_values()
d = K.absolute_degree()
primes_over_q = K.primes_above(q)
# q must split completely.
if len(primes_over_q) != d:
raise ValueError('The prime q is not completely split.')
for P in SUK.primes():
if P in primes_over_q:
raise ValueError('There is a prime in S over q.')
q_data = []
for Qi in primes_over_q:
resfield = Qi.residue_field()
rho_mod_Qi = [resfield(rho_j) for rho_j in rho]
orderprod = prod(rho_ij.multiplicative_order() for rho_ij in rho_mod_Qi)
q_data.append([Qi, resfield, rho_mod_Qi, orderprod])
q_data.sort(key=lambda X: [X[3],X[0],X[1],X[2]])
# zip() will change the list of n list of length m to m tuples of length n
return zip(*q_data)
def clean_rfv_dict(rfv_dictionary):
r"""
Given a residue field vector dictionary, removes some impossible keys and entries.
INPUT:
- ``rfv_dictionary`` -- a dictionary whose keys are exponent vectors and whose values are residue field vectors
OUTPUT:
None. But it removes some keys from the input dictionary.
.. NOTE::
- The keys of a residue field vector dictionary are exponent vectors modulo ``(q-1)`` for some prime ``q``.
- The values are residue field vectors. It is known that the entries of a residue field vector
which comes from a solution to the S-unit equation cannot have 1 in any entry.
EXAMPLES:
In this example, we use a truncated list generated when solving the `S`-unit equation in the case that `K` is defined by the
polynomial `x^2+x+1` and `S` consists of the primes above 3::
sage: from sage.rings.number_field.S_unit_solver import clean_rfv_dict
sage: rfv_dict = {(1, 3): [3, 2], (3, 0): [6, 6], (5, 4): [3, 6], (2, 1): [4, 6], (5, 1): [3, 1], (2, 5): [1, 5], (0, 3): [1, 6]}
sage: len(rfv_dict)
7
sage: clean_rfv_dict(rfv_dict)
sage: len(rfv_dict)
4
sage: rfv_dict
{(1, 3): [3, 2], (2, 1): [4, 6], (3, 0): [6, 6], (5, 4): [3, 6]}
"""
for a, val in list(rfv_dictionary.items()):
if 1 in val:
rfv_dictionary.pop(a)
def construct_rfv_to_ev(rfv_dictionary, q, d, verbose=False):
r"""
Return a reverse lookup dictionary, to find the exponent vectors associated to a given residue field vector.
INPUT:
- ``rfv_dictionary`` -- a dictionary whose keys are exponent vectors and whose values are the associated residue field vectors
- ``q`` -- a prime (assumed to split completely in the relevant number field)
- ``d`` -- the number of primes in `K` above the rational prime ``q``
- ``verbose`` -- a boolean flag to indicate more detailed output is desired (default: False)
OUTPUT:
A dictionary ``P`` whose keys are residue field vectors and whose values are lists of all exponent vectors
which correspond to the given residue field vector.
.. NOTE::
- For example, if ``rfv_dictionary[ e0 ] = r0``, then ``P[ r0 ]`` is a list which contains ``e0``.
- During construction, some residue field vectors can be eliminated as coming from
solutions to the `S`-unit equation. Such vectors are dropped from the keys of the dictionary ``P``.
EXAMPLES:
In this example, we use a truncated list generated when solving the `S`-unit equation in the case that `K` is defined by the
polynomial `x^2+x+1` and `S` consists of the primes above 3::
sage: from sage.rings.number_field.S_unit_solver import construct_rfv_to_ev
sage: rfv_dict = {(1, 3): [3, 2], (3, 0): [6, 6], (5, 4): [3, 6], (2, 1): [4, 6], (4, 0): [4, 2], (1, 2): [5, 6]}
sage: construct_rfv_to_ev(rfv_dict,7,2,False)
{(3, 2): [(1, 3)], (4, 2): [(4, 0)], (4, 6): [(2, 1)], (5, 6): [(1, 2)]}
"""
# The keys in P are just the possible first entries of a residue field vector.
# The values (all empty lists now) will be added in the next step.
P = {(v,) : [] for v in range(2, q)}
# Step 1. Populate the empty lists in P[(v,)].
# Loop through the keys in rfv_dictionary. For each, look at the output rf_vector.
# Find the key in P which matches the first entry of the rf_vector.
# Dump the **rest** of the rf_vector into a pair [exp_vec, rf_vec[1:]],
# and append this pair into the dictionary P at the key (rf_vec[0], ).
# Now, P[(v,)] = [ [a_0, e_0], [a_1, e_1], ...]
#
# The relationship between v, a_i, and e_i is as follows:
#
# a_i is an exponent vector, whose associated residue field vector is the
# concatenation of v with e_i.
for exponent_vector in rfv_dictionary:
residue_field_vector = rfv_dictionary[exponent_vector]
rf_vector_start = (residue_field_vector[0], )
rf_vector_end = residue_field_vector[1:]
P[rf_vector_start].append([exponent_vector, rf_vector_end])
if verbose:
print("Populated P. Currently it has ", len(P), "keys.")
# Step 2: We build a new dictionary, P_new, from P.
#
# This is a step that will be repeated, once for each of the d primes over q.
#
# P is a dictionary whose keys are tuples of length m, representing the beginning of known residue field vectors.
#
# For any such beginning `s`,
#
# P[s] = [ [a_0, e_0], [a_1, e_1], ...]
#
# where for any exponent vector a_i, the associated residue field vector is the concatenation s + e_i.
#
# The dictionary P_new is constructed from the dictionary P. The new keys will be tuples of length m + 1.
#
# During the construction, we look for impossible entries for S-unit solutions, and drop them from the dictionary as needed.
for j in range(d-1):
if verbose:
print("Constructing ", j, " th place of the residue field vectors, out of ", d-1, " total.")
P_new = {}
garbage = {}
# we loop over each key of P.
for rf_vector_start in P:
# each key of P provides q-2 possible keys for P_new, which we introduce and assign an empty list.
for w in range(2, q):
new_rf_vector_start = tuple(list(rf_vector_start) + [w])
P_new[new_rf_vector_start] = []
# we populate P_new[ new_rf_vector_start ] using P[rf_vector_start]
for exponent_vector, rf_vector_end in P[rf_vector_start]:
new_rf_vector_end = rf_vector_end[1:]
w = rf_vector_end[0]
new_rf_vector_start = tuple(list(rf_vector_start) + [w])
P_new[new_rf_vector_start].append([exponent_vector, new_rf_vector_end])
if verbose:
print("P_new is populated with ", len(P_new), " keys.")
# we now loop over the keys of P_new, looking for incompatible entries.
for rf_vector_start in P_new:
# the final entry of rf_vector_start or rf_vector_complement_start must be < (q+3)/2.
# No loss to insist that it is rf_vector_start.
if rf_vector_start[-1] < (q+3)/2:
# we find the complement to rf_vector_start:
rf_vector_complement_start = tuple([ q+1-j for j in rf_vector_start])
if P_new[ rf_vector_start ] == [] or P_new[rf_vector_complement_start] == []:
# these can't be solutions. Mark them for deletion.
garbage[rf_vector_start] = True
garbage[rf_vector_complement_start] = True
# garbage removal
for rf_vector_start in garbage:
P_new.pop(rf_vector_start, 0)
if verbose:
print("After removing incompatible entries, P_new is down to ", len(P_new), " keys.")
# Time to move on to the next dictionary.
P = P_new.copy()
# Now, we just clean up P.
for residue_field_vector in P:
# at this instant, P[ residue_field_vector ] is a list of pairs: [ [a0,e0], ... ]
# We only care about the exponent vectors a0,...
P[residue_field_vector] = [a[0] for a in P[residue_field_vector]]
if verbose:
print("Returning dictionary P with ", len(P), " keys.")
return P.copy()
def construct_comp_exp_vec(rfv_to_ev_dict, q):
r"""
Constructs a dictionary associating complement vectors to residue field vectors.
INPUT:
- ``rfv_to_ev_dict`` -- a dictionary whose keys are residue field vectors and whose values are lists of exponent vectors with the associated residue field vector.
- ``q`` -- the characteristic of the residue field
OUTPUT:
A dictionary whose typical key is an exponent vector ``a``, and whose associated value is a list of complementary exponent vectors to ``a``.
EXAMPLES:
In this example, we use the list generated when solving the `S`-unit equation in the case that `K` is defined by the
polynomial `x^2+x+1` and `S` consists of the primes above 3
::
sage: from sage.rings.number_field.S_unit_solver import construct_comp_exp_vec
sage: rfv_to_ev_dict = {(6, 6): [(3, 0)], (5, 6): [(1, 2)], (5, 4): [(5, 3)], (6, 2): [(5, 5)], (2, 5): [(0, 1)], (5, 5): [(3, 4)], (4, 4): [(0, 2)], (6, 3): [(1, 4)], (3, 6): [(5, 4)], (2, 2): [(0, 4)], (3, 5): [(1, 0)], (6, 4): [(1, 1)], (3, 2): [(1, 3)], (2, 6): [(4, 5)], (4, 5): [(4, 3)], (2, 3): [(2, 3)], (4, 2): [(4, 0)], (6, 5): [(5, 2)], (3, 3): [(3, 2)], (5, 3): [(5, 0)], (4, 6): [(2, 1)], (3, 4): [(3, 5)], (4, 3): [(0, 5)], (5, 2): [(3, 1)], (2, 4): [(2, 0)]}
sage: construct_comp_exp_vec(rfv_to_ev_dict, 7)
{(0, 1): [(1, 4)],
(0, 2): [(0, 2)],
(0, 4): [(3, 0)],
(0, 5): [(4, 3)],
(1, 0): [(5, 0)],
(1, 1): [(2, 0)],
(1, 2): [(1, 3)],
(1, 3): [(1, 2)],
(1, 4): [(0, 1)],
(2, 0): [(1, 1)],
(2, 1): [(4, 0)],
(2, 3): [(5, 2)],
(3, 0): [(0, 4)],
(3, 1): [(5, 4)],
(3, 2): [(3, 4)],
(3, 4): [(3, 2)],
(3, 5): [(5, 3)],
(4, 0): [(2, 1)],
(4, 3): [(0, 5)],
(4, 5): [(5, 5)],
(5, 0): [(1, 0)],
(5, 2): [(2, 3)],
(5, 3): [(3, 5)],
(5, 4): [(3, 1)],
(5, 5): [(4, 5)]}
"""
comp_exp_vec_dict = {}
for residue_field_vector in rfv_to_ev_dict:
rf_vector_complement = tuple([q + 1 - j for j in residue_field_vector])
exponent_vector_list = rfv_to_ev_dict[ residue_field_vector ][:]
exponent_vector_complement_list = rfv_to_ev_dict[rf_vector_complement][:]
for exponent_vector in exponent_vector_list:
comp_exp_vec_dict[exponent_vector] = exponent_vector_complement_list
return comp_exp_vec_dict
def drop_vector(ev, p, q, complement_ev_dict):
r"""
Determines if the exponent vector, ``ev``, may be removed from the complement dictionary during construction.
This will occur if ``ev`` is not compatible with an exponent vector mod ``q-1``.
INPUT:
- ``ev`` -- an exponent vector modulo ``p - 1``
- ``p`` -- the prime such that ev is an exponent vector modulo ``p-1``
- ``q`` -- a prime, distinct from ``p``, that is a key in the ``complement_ev_dict``
- ``complement_ev_dict`` -- a dictionary of dictionaries, whose keys are primes
``complement_ev_dict[q]`` is a dictionary whose keys are exponent vectors modulo ``q-1``
and whose values are lists of complementary exponent vectors modulo ``q-1``
OUTPUT:
Returns ``True`` if ``ev`` may be dropped from the complement exponent vector dictionary, and ``False`` if not.
.. NOTE::
- If ``ev`` is not compatible with any of the vectors modulo ``q-1``, then it can no longer correspond to a solution
of the `S`-unit equation. It returns ``True`` to indicate that it should be removed.
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import drop_vector
sage: drop_vector((1, 2, 5), 7, 11, {11: {(1, 1, 3): [(1, 1, 3),(2, 3, 4)]}})
True
::
sage: P={3: {(1, 0, 0): [(1, 0, 0), (0, 1, 0)], (0, 1, 0): [(1, 0, 0), (0, 1, 0)]}, 7: {(0, 3, 4): [(0, 1, 2), (0, 3, 4), (0, 5, 0)], (1, 2, 4): [(1, 0, 4), (1, 4, 2), (1, 2, 0)], (0, 1, 2): [(0, 1, 2), (0, 3, 4), (0, 5, 0)], (0, 5, 4): [(1, 0, 0), (1, 4, 4), (1, 2, 2)], (1, 4, 2): [(1, 2, 4), (1, 4, 0), (1, 0, 2)], (1, 0, 4): [(1, 2, 4), (1, 4, 0), (1, 0, 2)], (0, 3, 2): [(1, 0, 0), (1, 4, 4), (1, 2, 2)], (1, 0, 0): [(0, 5, 4), (0, 3, 2), (0, 1, 0)], (1, 2, 0): [(1, 2, 4), (1, 4, 0), (1, 0, 2)], (0, 1, 0): [(1, 0, 0), (1, 4, 4), (1, 2, 2)], (0, 5, 0): [(0, 1, 2), (0, 3, 4), (0, 5, 0)], (1, 2, 2): [(0, 5, 4), (0, 3, 2), (0, 1, 0)], (1, 4, 0): [(1, 0, 4), (1, 4, 2), (1, 2, 0)], (1, 0, 2): [(1, 0, 4), (1, 4, 2), (1, 2, 0)], (1, 4, 4): [(0, 5, 4), (0, 3, 2), (0, 1, 0)]}}
sage: drop_vector((0,1,0),3,7,P)
False
"""
# returns True if it is OK to drop exp_vec given the current comp_exp_vec dictionary associated to some q.
# returns False otherwise
# loop over the possible compatible vectors in the other modulus
g = gcd(p-1, q-1)
for compatible_exp_vec in compatible_vectors(ev, p-1, q-1, g):
# do they appear in the other dictionary?
if compatible_exp_vec in complement_ev_dict[q]:
# OK, but the complements need to be compatible, too!
ev_complement_list = complement_ev_dict[p][ev]
for ev_comp in ev_complement_list:
for compatible_cv in compatible_vectors(ev_comp, p-1, q-1, g):
if compatible_cv in complement_ev_dict[q][compatible_exp_vec]:
return False
return True
def construct_complement_dictionaries(split_primes_list, SUK, verbose=False):
r"""
A function to construct the complement exponent vector dictionaries.
INPUT:
- ``split_primes_list`` -- a list of rational primes which split completely in the number field `K`
- ``SUK`` -- the `S`-unit group for a number field `K`
- ``verbose`` -- a boolean to provide additional feedback (default: False)
OUTPUT:
A dictionary of dictionaries. The keys coincide with the primes in ``split_primes_list``
For each ``q``, ``comp_exp_vec[q]`` is a dictionary whose keys are exponent vectors modulo ``q-1``,
and whose values are lists of exponent vectors modulo ``q-1``
If ``w`` is an exponent vector in ``comp_exp_vec[q][v]``, then the residue field vectors modulo ``q`` for
``v`` and ``w`` sum to ``[1,1,...,1]``
.. NOTE::
- The data of ``comp_exp_vec`` will later be lifted to `\mathbb{Z}` to look for true `S`-Unit equation solutions.
- During construction, the various dictionaries are compared to each other several times to
eliminate as many mod `q` solutions as possible.
- The authors acknowledge a helpful discussion with Norman Danner which helped formulate this code.
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import construct_complement_dictionaries
sage: f = x^2 + 5
sage: H = 10
sage: K.<xi> = NumberField(f)
sage: SUK = K.S_unit_group(S=K.primes_above(H))
sage: split_primes_list = [3, 7]
sage: actual = construct_complement_dictionaries(split_primes_list, SUK)
sage: expected = {3: {(0, 1, 0): [(1, 0, 0), (0, 1, 0)],
....: (1, 0, 0): [(1, 0, 0), (0, 1, 0)]},
....: 7: {(0, 1, 0): [(1, 0, 0), (1, 4, 4), (1, 2, 2)],
....: (0, 1, 2): [(0, 1, 2), (0, 3, 4), (0, 5, 0)],
....: (0, 3, 2): [(1, 0, 0), (1, 4, 4), (1, 2, 2)],
....: (0, 3, 4): [(0, 1, 2), (0, 3, 4), (0, 5, 0)],
....: (0, 5, 0): [(0, 1, 2), (0, 3, 4), (0, 5, 0)],
....: (0, 5, 4): [(1, 0, 0), (1, 4, 4), (1, 2, 2)],
....: (1, 0, 0): [(0, 5, 4), (0, 3, 2), (0, 1, 0)],
....: (1, 0, 2): [(1, 0, 4), (1, 4, 2), (1, 2, 0)],
....: (1, 0, 4): [(1, 2, 4), (1, 4, 0), (1, 0, 2)],
....: (1, 2, 0): [(1, 2, 4), (1, 4, 0), (1, 0, 2)],
....: (1, 2, 2): [(0, 5, 4), (0, 3, 2), (0, 1, 0)],
....: (1, 2, 4): [(1, 0, 4), (1, 4, 2), (1, 2, 0)],
....: (1, 4, 0): [(1, 0, 4), (1, 4, 2), (1, 2, 0)],
....: (1, 4, 2): [(1, 2, 4), (1, 4, 0), (1, 0, 2)],
....: (1, 4, 4): [(0, 5, 4), (0, 3, 2), (0, 1, 0)]}}
sage: all(set(actual[p][vec]) == set(expected[p][vec]) for p in [3,7] for vec in expected[p])
True
"""
# We initialize some dictionaries.
rho = SUK.gens_values()
rho_length = len(rho)
rho_images_dict = {}
rho_orders_dict = {}
K = SUK.number_field()
for q in split_primes_list:
ideals_over_q, residue_fields, rho_images, product_rho_orders = sieve_ordering(SUK, q)
rho_images_dict[q] = rho_images
rho_orders_dict[q] = product_rho_orders
nK = K.absolute_degree()
w0 = rho[0].multiplicative_order()
# We build a dictionary of dictionaries.
# rfv_to_ev[q] is the 'mod q' residue field vector to exponent vector dictionary.
rfv_to_ev = {}
# We build a second dictionary of dictionaries.
# comp_exp_vec[q] is the dictionary mod q which assigns to each exponent vector
# a list of 'complementary' exponent vectors.
comp_exp_vec = {}
q0 = split_primes_list[0]
if verbose:
print("Using the following primes: ", split_primes_list)
for q in split_primes_list:
rho_images = rho_images_dict[q]
if verbose:
print("q = ", q)
def epsilon_q(a, i):
# a is an exponent vector
# i is an index for one of the primes over q
# returns the value of rho_j^a_j inside the
# residue field of Qi. (Necessarily isomorphic to F_q.)
# rho_images[i][j] == rho[j] modulo Q[i]
eps_value = rho_images[i][0]**a[0]
for j in range(1, rho_length):
eps_value *= rho_images[i][j]**a[j]
return eps_value
if verbose:
print("The evaluation function epsilon has been defined using rho_images = ", rho_images)
# Now, we run through the vectors in the iterator, but only keep the ones
# which are compatible with the previously constructed dictionaries. That is,
# in order to keep an exp_vec mod q, there must exist a compatible exp_vec mod p
# in the keys of the rfv_to_ev[p] dictionary for each completely split prime
# p appearing prior to q in split_primes_list.
if q == q0:
# for the first prime, there is no filtering possible, and we just build the exponent vector
# iterator.
# This should consist of all vectors (a0,...,a_{t-1}), where
# a0 is in the range 0 .. w_0 - 1 and
# aj is in the range 0 .. q - 2 (for j > 0)
ranges = [range(w0)] + [range(q-1) for _ in range(rho_length-1)]
ev_iterator = itertools.product(*ranges)
# With the iterator built, we construct the exponent vector to residue field dictionary.
ev_to_rfv_dict = {ev : [epsilon_q(ev, i) for i in range(nK)] for ev in ev_iterator}
if verbose:
print("The residue field dictionary currently has ", len(ev_to_rfv_dict), " exponent vector keys.")
else:
ev_to_rfv_dict = {}
# We use compatibility requirements to keep the size of the dictionary down.
# Later on, we'll compare all dictionaries pairwise. But for now, we just
# check against the first.
# That is, rather than loop over every possible exponent vector mod q-1,
# we only consider those evs which are compatible with the mod q0 - 1 vectors.
# Loop over exponent vectors modulo q0 - 1
g = gcd(q0-1, q-1)
for exp_vec_mod_q0 in comp_exp_vec[q0]:
# Loop only over exponent vectors modulo q-1 which are compatible with exp_vec_mod_q0
for exp_vec in compatible_vectors(exp_vec_mod_q0, q0-1, q-1, g):
# fill the dictionary with the residue field vectors using the evaluation function.
ev_to_rfv_dict[exp_vec] = [epsilon_q(exp_vec, i) for i in range(nK)]
if verbose:
print("The residue field dictionary currently has ", len(ev_to_rfv_dict), " exponent vector keys.")
# At this point, we now have a dictionary ev_to_rfv_dict, which attaches
# to each exponent vector a 'residue field vector,' which is a tuple of the
# nK values epsilon_q(a,0),...,epsilon_q(a,nK-1).
clean_rfv_dict( ev_to_rfv_dict )
if verbose:
print("clean_rfv_dict executed.")
print("The residue field dictionary currently has ", len(ev_to_rfv_dict), " exponent vector keys.")
# We essentially construct an inverse dictionary: one whose keys are residue field vectors,
# and whose values are the exponent vectors that yield each key
rfv_to_ev[q] = construct_rfv_to_ev(ev_to_rfv_dict, q, nK, verbose=verbose)
if verbose:
print("construct_rfv_to_ev executed.")
print("The rfv_to_ev dictionary currently has ", len(rfv_to_ev[q]), "rfv keys.")
comp_exp_vec[q] = construct_comp_exp_vec(rfv_to_ev[q], q)
if verbose:
print("construct_comp_exp_vec executed.")
print("Size of comp_exp_vec[q]: ", len(comp_exp_vec[q]))
# Now that we have a new dictionary, we compare all the dictionaries pairwise,
# looking for opportunities to remove 'impossible' solutions.
for p in comp_exp_vec.keys():
if p == q:
continue
if verbose:
print("Comparing dictionaries for p = ", p, "and q = ", q, ".")
old_size_p = len(comp_exp_vec[p])
if verbose:
print("Size of comp_exp_vec[p] is: ", old_size_p, ".")
cv_size = ((q-1)/gcd(p-1, q-1)) ** (rho_length - 1)
print("Length of compatible_vectors: ", cv_size, ".")
print("Product: ", old_size_p*cv_size)
for exp_vec in list(comp_exp_vec[p]):
if drop_vector(exp_vec, p, q, comp_exp_vec):
comp_exp_vec[p].pop(exp_vec)
if verbose:
print("Shrunk dictionary p from ", old_size_p, " to ", len(comp_exp_vec[p]))
# Now, repeat, but swap p and q.
old_size_q = len(comp_exp_vec[q])
if verbose:
print("Size of comp_exp_vec[q] is: ", old_size_q, ".")
cv_size = ((p-1)/gcd(p-1, q-1)) ** (rho_length - 1)
print("Length of compatible_vectors: ", cv_size, ".")
print("Product: ", old_size_q * cv_size)
for exp_vec in list(comp_exp_vec[q]):
if drop_vector(exp_vec, q, p, comp_exp_vec):
comp_exp_vec[q].pop(exp_vec)
if verbose:
print("Shrunk dictionary q from ", old_size_q, " to ", len(comp_exp_vec[q]))
return comp_exp_vec
def compatible_vectors_check(a0, a1, g, l):
r"""
Given exponent vectors with respect to two moduli, determines if they are compatible.
INPUT:
- ``a0`` -- an exponent vector modulo ``m0``
- ``a1`` -- an exponent vector modulo ``m1`` (must have the same length as ``a0``)
- ``g`` -- the gcd of ``m0`` and ``m1``
- ``l`` -- the length of ``a0`` and of ``a1``
OUTPUT:
True if there is an integer exponent vector a satisfying
.. MATH::
\begin{aligned}
a[0] &== a0[0] == a1[0]\\
a[1:] &== a0[1:] \mod m_0\\
a[1:] &== a1[1:] \mod m_1
\end{aligned}
and False otherwise.
.. NOTE::
- Exponent vectors must agree exactly in the first coordinate.
- If exponent vectors are different lengths, an error is raised.
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import compatible_vectors_check
sage: a0 = (3, 1, 8, 11)
sage: a1 = (3, 5, 6, 13)
sage: a2 = (5, 5, 6, 13)
sage: compatible_vectors_check(a0, a1, gcd(12, 22), 4r)
True
sage: compatible_vectors_check(a0, a2, gcd(12, 22), 4r)
False
"""
# exponent vectors must agree exactly in the 0th coordinate.
return a0[0] == a1[0] and all((x0 - x1) % g == 0 for x0,x1 in zip(itertools.islice(a0, 1, l), itertools.islice(a1, 1, l)))
def compatible_vectors(a, m0, m1, g):
r"""
Given an exponent vector ``a`` modulo ``m0``, returns an iterator over the exponent vectors for the modulus ``m1``, such that a lift to the lcm modulus exists.
INPUT:
- ``a`` -- an exponent vector for the modulus ``m0``
- ``m0`` -- a positive integer (specifying the modulus for ``a``)
- ``m1`` -- a positive integer (specifying the alternate modulus)
- ``g`` -- the gcd of m0 and m1
OUTPUT:
A list of exponent vectors modulo ``m1`` which are compatible with ``a``.
.. NOTE::
- Exponent vectors must agree exactly in the 0th position in order to be compatible.
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import compatible_vectors
sage: a = (3, 1, 8, 1)
sage: list(compatible_vectors(a, 18, 12, gcd(18,12)))
[(3, 1, 2, 1),
(3, 1, 2, 7),
(3, 1, 8, 1),
(3, 1, 8, 7),
(3, 7, 2, 1),
(3, 7, 2, 7),
(3, 7, 8, 1),
(3, 7, 8, 7)]
The order of the moduli matters. ::
sage: len(list(compatible_vectors(a, 18, 12, gcd(18,12))))
8
sage: len(list(compatible_vectors(a, 12, 18, gcd(18,12))))
27
"""
# recall that the 0th entry must be an exact match.
ranges = [[a[0]]] + [range(a[i]%g, (a[i]%g) + m1, g) for i in range(1, len(a))]
return itertools.product(*ranges)
def compatible_systems(split_prime_list, complement_exp_vec_dict):
r"""
Given dictionaries of complement exponent vectors for various primes that split in K, compute all possible compatible systems.
INPUT:
- ``split_prime_list`` -- a list of rational primes that split completely in `K`
- ``complement_exp_vec_dict`` -- a dictionary of dictionaries. The keys are primes from ``split_prime_list``.
OUTPUT:
A list of compatible systems of exponent vectors.
.. NOTE::
- For any ``q`` in ``split_prime_list``, ``complement_exp_vec_dict[q]`` is a dictionary whose keys are exponent vectors modulo ``q-1``
and whose values are lists of exponent vectors modulo ``q-1`` which are complementary to the key.
- an item in system_list has the form ``[ [v0, w0], [v1, w1], ..., [vk, wk] ]``, where::
- ``qj = split_prime_list[j]``
- ``vj`` and ``wj`` are complementary exponent vectors modulo ``qj - 1``
- the pairs are all simultaneously compatible.
- Let ``H = lcm( qj - 1 : qj in split_primes_list )``. Then for any compatible system, there is at most one pair of integer
exponent vectors ``[v, w]`` such that::
- every entry of ``v`` and ``w`` is bounded in absolute value by ``H``
- for any ``qj``, ``v`` and ``vj`` agree modulo ``(qj - 1)``
- for any ``qj``, ``w`` and ``wj`` agree modulo ``(qj - 1)``
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import compatible_systems
sage: split_primes_list = [3, 7]
sage: checking_dict = {3: {(0, 1, 0): [(1, 0, 0)]}, 7: {(0, 1, 0): [(1, 0, 0)]}}
sage: compatible_systems(split_primes_list, checking_dict)
[[[(0, 1, 0), (1, 0, 0)], [(0, 1, 0), (1, 0, 0)]]]
"""
S0 = split_prime_list
system_list = []
if len(S0) == 1:
q = S0[0]
for exponent_vector in complement_exp_vec_dict[q]:
for complementary_vector in complement_exp_vec_dict[q][exponent_vector]:
pair = [[exponent_vector, complementary_vector]]
system_list.append(pair)
elif len(S0) > 1:
S1 = S0[:-1]
old_systems = compatible_systems(S1, complement_exp_vec_dict)
q = S0[-1]
gcds = [gcd(q-1, qj-1) for qj in S1]
for exp_vec in complement_exp_vec_dict[q]:
l = len(exp_vec)
for comp_vec in complement_exp_vec_dict[q][exp_vec]:
for old_system in old_systems:
if all((compatible_vectors_check(exp_vec, exp_vec_qj, g, l) and
compatible_vectors_check(comp_vec, comp_vec_qj, g, l))
for g, (exp_vec_qj, comp_vec_qj) in zip(gcds, old_system)):
# build the new system and append it to the list.
new_system = old_system + [[exp_vec, comp_vec]]
system_list.append(new_system)
return system_list
def compatible_system_lift(compatible_system, split_primes_list):
r"""
Given a compatible system of exponent vectors and complementary exponent vectors, return a lift to the integers.
INPUT:
- ``compatible_system`` -- a list of pairs ``[ [v0, w0], [v1, w1], .., [vk, wk] ]``
where [vi, wi] is a pair of complementary exponent vectors modulo ``qi - 1``, and all pairs are compatible.
- ``split_primes_list`` -- a list of primes ``[ q0, q1, .., qk ]``
OUTPUT:
A pair of vectors ``[v, w]`` satisfying:
1. ``v[0] == vi[0]`` for all ``i``
2. ``w[0] == wi[0]`` for all ``i``
3. ``v[j] == vi[j]`` modulo ``qi - 1`` for all ``i`` and all ``j > 0``
4. ``w[j] == wi[j]`` modulo ``qi - 1`` for all ``i`` and all `j > 0``
5. every entry of ``v`` and ``w`` is bounded by ``L/2`` in absolute value, where ``L`` is the least common multiple of ``{qi - 1 : qi in split_primes_list }``
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import compatible_system_lift
sage: split_primes_list = [3, 7]
sage: comp_sys = [[(0, 1, 0), (0, 1, 0)], [(0, 3, 4), (0, 1, 2)]]
sage: compatible_system_lift(comp_sys, split_primes_list)
[(0, 3, -2), (0, 1, 2)]
"""
if len(split_primes_list) != len(compatible_system):
raise ValueError("The number of primes does not match the length of the given exponent vectors.")
# the first entries are already determined.
exponent_vector_lift = [ZZ(compatible_system[0][0][0])]
complement_vector_lift = [ZZ(compatible_system[0][1][0])]
# fill in exponent_vector_lift
moduli_list = [q-1 for q in split_primes_list]
L = lcm(moduli_list)
t = len(compatible_system[0][0])
for i in range(1,t):
exp_coord_residues = [pair[0][i] for pair in compatible_system]
comp_coord_residues = [pair[1][i] for pair in compatible_system]
ev_lift_coordinate = CRT(exp_coord_residues, moduli_list)
cv_lift_coordinate = CRT(comp_coord_residues, moduli_list)
# these values lie in the range [0, L-1], so we must shift them if they are bigger than L/2.
if ev_lift_coordinate > L/2:
ev_lift_coordinate -= L
if cv_lift_coordinate > L/2:
cv_lift_coordinate -= L
exponent_vector_lift.append(ev_lift_coordinate)
complement_vector_lift.append(cv_lift_coordinate)
return [tuple(exponent_vector_lift), tuple(complement_vector_lift)]
def solutions_from_systems(SUK, bound, cs_list, split_primes_list):
r"""
Lifts compatible systems to the integers and returns the S-unit equation solutions the lifts yield.
INPUT:
- ``SUK`` -- the group of `S`-units where we search for solutions
- ``bound`` -- a bound for the entries of all entries of all lifts
- ``cs_list`` -- a list of compatible systems of exponent vectors modulo `q-1` for
various primes `q`
- ``split_primes_list`` -- a list of primes giving the moduli of the exponent vectors in ``cs_list``
OUTPUT:
A list of solutions to the S-unit equation. Each solution is a list:
1. an exponent vector over the integers, ``ev``
2. an exponent vector over the integers, ``cv``
3. the S-unit corresponding to ``ev``, ``iota_exp``
4. the S-unit corresponding to ``cv``, ``iota_comp``
.. NOTE::
- Every entry of ``ev`` is less than or equal to bound in absolute value
- every entry of ``cv`` is less than or equal to bound in absolute value
- ``iota_exp + iota_comp == 1``
EXAMPLES:
Given a single compatible system, a solution can be found. ::
sage: from sage.rings.number_field.S_unit_solver import solutions_from_systems
sage: K.<xi> = NumberField(x^2-15)
sage: SUK = K.S_unit_group(S=K.primes_above(2))
sage: split_primes_list = [7, 17]
sage: a_compatible_system = [[[(0, 0, 5), (0, 0, 5)], [(0, 0, 15), (0, 0, 15)]]]
sage: solutions_from_systems( SUK, 20, a_compatible_system, split_primes_list )
[((0, 0, -1), (0, 0, -1), 1/2, 1/2)]
"""
solutions = []
for system in cs_list:
ev, cv = compatible_system_lift(system, split_primes_list)
if all(abs(x) <= bound for x in ev[1:] + cv[1:]):
# the entries are all below the bound, so there is nothing left to do
# except construct the elements and see if they are solutions to
# the S-unit equation
iota_exp = SUK.exp( ev )
iota_comp = SUK.exp( cv )
if iota_exp + iota_comp == 1:
sol = ( ev, cv, iota_exp, iota_comp )
solutions.append( sol )
return solutions
def clean_sfs(sfs_list):
r"""
Given a list of S-unit equation solutions, remove trivial redundancies.
INPUT:
- ``sfs_list`` -- a list of solutions to the S-unit equation
OUTPUT:
A list of solutions to the S-unit equation
.. NOTE::
The function looks for cases where ``x + y = 1`` and ``y + x = 1`` appear\
as separate solutions, and removes one.
EXAMPLES:
The function is not dependent on the number field and removes redundancies in any list. ::
sage: from sage.rings.number_field.S_unit_solver import clean_sfs
sage: sols = [((1, 0, 0), (0, 0, 1), -1, 2), ((0, 0, 1), (1, 0, 0), 2, -1)]
sage: clean_sfs( sols )
[((1, 0, 0), (0, 0, 1), -1, 2)]
"""
# given the output from solutions_from_systems,
# look for trivial redundancies: swapping exp_vec, comp_vec, particularly.
new_sfs = []
for entry in sfs_list:
swapped_entry = (entry[1], entry[0], entry[3], entry[2])
if entry not in new_sfs and swapped_entry not in new_sfs:
new_sfs.append(entry)
return new_sfs
def sieve_below_bound(K, S, bound=10, bump=10, split_primes_list=[], verbose=False):
r"""
Return all solutions to the S-unit equation ``x + y = 1`` over K with exponents below the given bound.
INPUT:
- ``K`` -- a number field (an absolute extension of the rationals)
- ``S`` -- a list of finite primes of ``K``
- ``bound`` -- a positive integer upper bound for exponents, solutions with exponents having absolute value below this bound will be found (default: 10)
- ``bump`` -- a positive integer by which the minimum LCM will be increased if not enough split primes are found in sieving step (default: 10)
- ``split_primes_list`` -- a list of rational primes that split completely in the extension K/Q, used for sieving. For complete list of solutions should have lcm of {(p_i-1)} for primes p_i greater than bound (default: [])
- ``verbose`` -- an optional parameter allowing the user to print information during the sieving process (default: False)
OUTPUT:
A list of tuples ``[( A_1, B_1, x_1, y_1), (A_2, B_2, x_2, y_2), ... ( A_n, B_n, x_n, y_n)]`` such that:
1. The first two entries are tuples ``A_i = (a_0, a_1, ... , a_t)`` and ``B_i = (b_0, b_1, ... , b_t)`` of exponents.
2. The last two entries are ``S``-units ``x_i`` and ``y_i`` in ``K`` with ``x_i + y_i = 1``.
3. If the default generators for the ``S``-units of ``K`` are ``(rho_0, rho_1, ... , rho_t)``, then these satisfy ``x_i = \prod(rho_i)^(a_i)`` and ``y_i = \prod(rho_i)^(b_i)``.
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import sieve_below_bound, eq_up_to_order
sage: K.<xi> = NumberField(x^2+x+1)
sage: SUK = UnitGroup(K,S=tuple(K.primes_above(3)))
sage: S = SUK.primes()
sage: sols = sieve_below_bound(K, S, 10)
sage: expected = [
....: ((1, -1), (0, -1), 1/3*xi + 2/3, -1/3*xi + 1/3),
....: ((0, 1), (4, 0), xi + 2, -xi - 1),
....: ((2, 0), (5, 1), xi, -xi + 1),
....: ((1, 0), (5, 0), xi + 1, -xi)]
sage: eq_up_to_order(sols, expected)
True
"""
SUK = UnitGroup(K, S=tuple(S))
initial_bound = bound
while not split_primes_list:
try:
split_primes_list = split_primes_large_lcm(SUK, initial_bound)
except ValueError:
initial_bound += bump
print("Couldn't find enough split primes. Bumping to ", initial_bound)
if not K.is_absolute():
raise ValueError("K must be an absolute extension.")
complement_exp_vec_dict = construct_complement_dictionaries(split_primes_list, SUK, verbose=verbose)
cs_list = compatible_systems(split_primes_list, complement_exp_vec_dict)
sfs_list = solutions_from_systems(SUK, bound, cs_list, split_primes_list)
S_unit_solutions = clean_sfs(sfs_list)
return S_unit_solutions
def solve_S_unit_equation(K, S, prec=106, include_exponents=True, include_bound=False, proof=None, verbose=False):
r"""
Return all solutions to the S-unit equation ``x + y = 1`` over K.
INPUT:
- ``K`` -- a number field (an absolute extension of the rationals)
- ``S`` -- a list of finite primes of ``K``
- ``prec`` -- precision used for computations in real, complex, and p-adic fields (default: 106)
- ``include_exponents`` -- whether to include the exponent vectors in the returned value (default: True).
- ``include_bound`` -- whether to return the final computed bound (default: False)
- ``verbose`` -- whether to print information during the sieving step (default: False)
OUTPUT:
A list of tuples ``[( A_1, B_1, x_1, y_1), (A_2, B_2, x_2, y_2), ... ( A_n, B_n, x_n, y_n)]`` such that:
1. The first two entries are tuples ``A_i = (a_0, a_1, ... , a_t)`` and ``B_i = (b_0, b_1, ... , b_t)`` of exponents. These will be omitted if ``include_exponents`` is ``False``.
2. The last two entries are ``S``-units ``x_i`` and ``y_i`` in ``K`` with ``x_i + y_i = 1``.
3. If the default generators for the ``S``-units of ``K`` are ``(rho_0, rho_1, ... , rho_t)``, then these satisfy ``x_i = \prod(rho_i)^(a_i)`` and ``y_i = \prod(rho_i)^(b_i)``.
If ``include_bound``, will return a pair ``(sols, bound)`` where ``sols`` is as above and ``bound`` is the bound used for the entries in the exponent vectors.
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import solve_S_unit_equation, eq_up_to_order
sage: K.<xi> = NumberField(x^2+x+1)
sage: S = K.primes_above(3)
sage: sols = solve_S_unit_equation(K, S, 200)
sage: expected = [
....: ((0, 1), (4, 0), xi + 2, -xi - 1),
....: ((1, -1), (0, -1), 1/3*xi + 2/3, -1/3*xi + 1/3),
....: ((1, 0), (5, 0), xi + 1, -xi),
....: ((2, 0), (5, 1), xi, -xi + 1)]
sage: eq_up_to_order(sols, expected)
True
In order to see the bound as well use the optional parameter ``include_bound``::
sage: solutions, bound = solve_S_unit_equation(K, S, 100, include_bound=True)
sage: bound
7
You can omit the exponent vectors::
sage: sols = solve_S_unit_equation(K, S, 200, include_exponents=False)
sage: expected = [(xi + 2, -xi - 1), (1/3*xi + 2/3, -1/3*xi + 1/3), (-xi, xi + 1), (-xi + 1, xi)]
sage: set(frozenset(a) for a in sols) == set(frozenset(b) for b in expected)
True
It is an error to use values in S that are not primes in K::
sage: solve_S_unit_equation(K, [3], 200)
Traceback (most recent call last):
...
ValueError: S must consist only of prime ideals, or a single element from which a prime ideal can be constructed.
We check the case that the rank is 0::
sage: K.<xi> = NumberField(x^2+x+1)
sage: solve_S_unit_equation(K, [])
[((1,), (5,), xi + 1, -xi)]
"""
# Checks to make sure inputs are legal
# K must be an absolute extension:
if not K.is_absolute():
raise ValueError("K must be an absolute extension.")
# S must be a finite set of primes
try:
SUK = UnitGroup(K, proof=proof, S=tuple(S))
except Exception:
raise ValueError("S must consist only of prime ideals, or a single element from which a prime ideal can be constructed.")
# Gather the roots of unity of the number field
A = K.roots_of_unity()
w = K.number_of_roots_of_unity()
if SUK.rank() == 0:
# Since the rank is 0, K is imaginary quadratic and S is empty
# Only possibilities are combinations of roots of unity
# and this can only occur when there are 6 roots of unity, when
# (1+sqrt(-3))/2 + (1-sqrt(-3))/2 = 1 is the unique solution.
if len(A) == 6:
S_unit_solutions = [((ZZ(1),), (ZZ(5),), A[0], A[-2])]
else:
S_unit_solutions = []
else:
# First find a bound using the LLL reduction method
# That bound must exceed both 4 and w. (See [AKMRVW].)
all_LLL_bounds = [4, w]
all_LLL_bounds += [cx_LLL_bound(SUK, A, prec)]
if S:
# only need p-adic bound when S nonempty
all_LLL_bounds.append(p_adic_LLL_bound(SUK, A, prec))
# Take the largest of all of the bounds we found
final_LLL_bound = max(all_LLL_bounds)
if verbose:
print("The LLL bound is: ", final_LLL_bound)
# Use the sieve to more easily find all bounds
S_unit_solutions = sieve_below_bound(K, list(S), final_LLL_bound, verbose=verbose)
if not include_exponents:
S_unit_solutions = [sol[2:] for sol in S_unit_solutions]
if include_bound:
return S_unit_solutions, final_LLL_bound
else:
return S_unit_solutions
def eq_up_to_order(A, B):
"""
If A and B are lists of four-tuples ``[a0,a1,a2,a3]`` and ``[b0,b1,b2,b3]``,
checks that there is some reordering so that either ``ai=bi`` for all ``i`` or
``a0==b1``, ``a1==b0``, ``a2==b3``, ``a3==b2``.
The entries must be hashable.
EXAMPLES::
sage: from sage.rings.number_field.S_unit_solver import eq_up_to_order
sage: L = [(1,2,3,4),(5,6,7,8)]
sage: L1 = [L[1],L[0]]
sage: L2 = [(2,1,4,3),(6,5,8,7)]
sage: eq_up_to_order(L, L1)
True
sage: eq_up_to_order(L, L2)
True
sage: eq_up_to_order(L, [(1,2,4,3),(5,6,8,7)])
False
"""
# does not look very optimal
Adup = set(A + [(a[1],a[0],a[3],a[2]) for a in A])
Bdup = set(B + [(b[1],b[0],b[3],b[2]) for b in B])
return Adup == Bdup
|
import React from 'react';
import Text from '../../components/Text';
import Link from '../../components/Link';
import { List, ListItem } from '../../components/List';
const list = [
{ kind: 'simple', heading: 'Ordered lists' },
{ kind: 'unordered', heading: 'Bullet lists' },
{ kind: 'ordered', heading: 'Ordered lists' },
];
const Normaltext = [
{ kind: 'p', text: 'Body copy, paragraph', styling: '16px, Regular' },
{ kind: 'p', text: 'Label (form)', styling: '14px, SemiBold' },
{ kind: 'code', text: 'Code', styling: 'Monospace, 16px, Regular' },
{ kind: 'sup', text: 'Sup', styling: '11px, Regular' },
{ kind: 'p', text: 'Inline highlight', styling: '14px, SemiBold' },
];
const colors = [
{ color: '#031C2D', text: '#text-01' },
{ color: '#5A6872', text: '#text-02' },
{ color: '#DFE6EB', text: '#text-03' },
];
const headingText = [
{ kind: 'h1', text: 'Heading 1', styling: '29px, SemiBold' },
{ kind: 'h2', text: 'Heading 2', styling: '25px, SemiBold' },
{ kind: 'h3', text: 'Heading 3', styling: '22px, SemiBold' },
{ kind: 'h4', text: 'Heading 4', styling: '20px, SemiBold' },
{ kind: 'h5', text: 'Heading 5', styling: '18px, SemiBold' },
{ kind: 'h6', text: 'Heading 6', styling: '16px, SemiBold' },
];
const emphasisText = [
{ kind: 'strong', text: 'Bold', styling: '16px, Bold' },
{ kind: 'bold', text: 'SemiBold', styling: '16px, SemiBold' },
{ kind: 'i', text: 'Italic', styling: '16px, Italic' },
];
const Typography = () => {
return (
<>
<h3>Headings</h3>
<p>Typographic scale when using headings in text component</p>
<div className="wfp--card-box">
{headingText.map((e) => (
<div style={{ paddingLeft: '0.5rem' }}>
<Text kind={e.kind}>{e.text}</Text>
<div style={{ color: '#A9A9A9', marginBottom: '2rem' }}>
{e.styling}
</div>
</div>
))}
</div>
<h3>Text</h3>
<p>Typographic scale for normal text</p>
<div className="wfp--card-box">
{Normaltext.map((e) => (
<div style={{ paddingLeft: '0.5rem' }}>
<Text kind={e.kind}>{e.text}</Text>
<div style={{ color: '#A9A9A9', marginBottom: '2rem' }}>
{e.styling}
</div>
</div>
))}
</div>
<h3>Page title</h3>
<p>Typographic scale for page title and overline</p>
<div className="wfp--card-box">
<div style={{ paddingLeft: '0.5rem' }}>
<Text kind="title">Title</Text>
<div style={{ color: '#A9A9A9', marginBottom: '2rem' }}>
58px, Light
</div>
<Text kind="subtitle">Overline</Text>
<div style={{ color: '#A9A9A9', marginBottom: '2rem' }}>
14px, SemiBold
</div>
</div>
</div>
<h3>Links</h3>
<Text kind="p">Typographic scale for links</Text>
<div className="wfp--card-box">
<div style={{ paddingLeft: '0.5rem' }}>
<Text kind="a">Link</Text>
<div style={{ color: '#A9A9A9', marginBottom: '2rem' }}>
16px, Regular
</div>
<Link className="wfp--link--hover">Link</Link>
<div style={{ color: '#A9A9A9', marginBottom: '2rem' }}>
16px, Regular
</div>
</div>
</div>
<h3>Emphasis</h3>
<p>Typographic scale for to put empahasis on text</p>
<div className="wfp--card-box">
{emphasisText.map((e) => (
<div style={{ paddingLeft: '0.5rem' }}>
<Text kind={e.kind}>{e.text}</Text>
<div style={{ color: '#A9A9A9', marginBottom: '2rem' }}>
{e.styling}
</div>
</div>
))}
</div>
<h3>Lists</h3>
<p>
Typographic scale for lists of items int he different variations:
ordered and unordered
</p>
<div className="wfp--card-box">
{list.map((e) => (
<div style={{ paddingLeft: '0.5rem' }}>
<Text kind="h4">{e.heading}</Text>
<List kind={e.kind}>
<ListItem>Numbered List 1</ListItem>
<ListItem>Numbered List 2</ListItem>
<ListItem>Numbered List 3</ListItem>
</List>
</div>
))}
</div>
<h3>Colors</h3>
<p>Color accents to use on normal text</p>
<div className="wfp--card-box">
{colors.map((e) => (
<div style={{ display: 'flex', paddingLeft: '0.5rem' }}>
<div>
<Text>{e.text}</Text>
<div style={{ color: '#A9A9A9', marginBottom: '3rem' }}>
16px, {e.color}
</div>
</div>
<div
style={{
backgroundColor: e.color,
width: '50px',
height: '50px',
borderRadius: '50%',
marginLeft: '1.5rem',
}}
></div>
</div>
))}
</div>
</>
);
};
export default Typography;
|
export default Object.assign ||
function(target, ...sources) {
sources.forEach((source) => {
Object.keys(source).forEach((key) => (target[key] = source[key]));
});
return target;
};
|
import discord
COR = 0x690FC3
msg_id = None
msg_user = None
client = discord.Client()
@client.event
async def on_ready():
print('BOT ONLINE !')
print(client.user.name)
print(client.user.id)
print('------Vagner Tutorial----')
print('------LabNegro-------')
print('-----Tutorial Cargo Custom Emoji-----')
"""
link do emojis dos elos do league of legends para download .
https://drive.google.com/drive/folders/1dFHnj0RWG23iR2JfoEr56RdUs9flN-Lf?usp=sharing
Lembrado que o bot tem que estar no servidor com esse emojis.
Como pegar o id do emoji?
1° adcione no servidor
2° vá no discord e digite \ e selecione o emoji . Ai ele irá retornár um o id exemplo : <:support:439639384418025475>
3° agora so trocar os emojis no codigo pelos do seu servidor.
LEMBRANDO
-OS NOMES DO CARGOS TEM QUE SER INDÊNTICO AO DO SERVIDOR
-E PRA ADD CUSTO REACTION NA MESSAGEM NÃO PODE TER OS < >
"""
@client.event
async def on_message(message):
if message.content.lower().startswith("py_lol"):
embedlol = discord.Embed(
title='Escolha Seu Elo e Lane',
color=COR,
description='\n'
'\n')
embedlol.set_thumbnail(url='https://i.imgur.com/Mn08hTd.png')
embedlol.add_field(name='Unranked', value='<:unraked:439639400666759180>', inline=True)
embedlol.add_field(name='Top', value='<:top:439639384573214742>', inline=True)
embedlol.add_field(name='Bronze', value='<:bronze:439639385017942036>', inline=True)
embedlol.add_field(name='Jungle', value='<:jungle:439639384036474881>', inline=True)
embedlol.add_field(name='Prata', value='<:prata:439639397001068544>', inline=True)
embedlol.add_field(name='Mid', value='<:mid:439639384128618506>', inline=True)
embedlol.add_field(name='Ouro', value='<:ouro:439639401685843987>', inline=True)
embedlol.add_field(name='Adc', value='<:adc:439639377212080129>', inline=True)
embedlol.add_field(name='Platina', value='<:platina:439639389900111872>', inline=True)
embedlol.add_field(name='Suporte', value='<:support:439639384418025475>', inline=True)
embedlol.add_field(name='Diamante', value='<:diamante:439639397273436160>', inline=True)
botmsg = await client.send_message(message.channel, embed=embedlol)
await client.add_reaction(botmsg, ":unraked:439639400666759180")
await client.add_reaction(botmsg, ":bronze:439639385017942036")
await client.add_reaction(botmsg, ":prata:439639397001068544")
await client.add_reaction(botmsg, "::ouro:439639401685843987")
await client.add_reaction(botmsg, ":platina:439639389900111872")
await client.add_reaction(botmsg, ":diamante:439639397273436160")
await client.add_reaction(botmsg, ":top:439639384573214742")
await client.add_reaction(botmsg, ":jungle:439639384036474881")
await client.add_reaction(botmsg, ":mid:439639384128618506")
await client.add_reaction(botmsg, ":adc:439639377212080129")
await client.add_reaction(botmsg, ":support:439639384418025475")
global msg_id
msg_id = botmsg.id
global msg_user
msg_user = message.author
@client.event
async def on_reaction_add(reaction, user):
msg = reaction.message
if reaction.custom_emoji and reaction.emoji.id == "439639400666759180":
role = discord.utils.find(lambda r: r.name == "● Unranked", msg.server.roles)
await client.add_roles(user, role)
if reaction.custom_emoji and reaction.emoji.id == "439639385017942036":
role = discord.utils.find(lambda r: r.name == "● Bronze", msg.server.roles)
await client.add_roles(user, role)
if reaction.custom_emoji and reaction.emoji.id == "439639397001068544":
role = discord.utils.find(lambda r: r.name == "● Prata", msg.server.roles)
await client.add_roles(user, role)
if reaction.custom_emoji and reaction.emoji.id == "439639401685843987":
role = discord.utils.find(lambda r: r.name == "● Gold", msg.server.roles)
await client.add_roles(user, role)
if reaction.custom_emoji and reaction.emoji.id == "439639389900111872":
role = discord.utils.find(lambda r: r.name == "● Platina", msg.server.roles)
await client.add_roles(user, role)
if reaction.custom_emoji and reaction.emoji.id == "439639397273436160":
role = discord.utils.find(lambda r: r.name == "● Diamante", msg.server.roles)
await client.add_roles(user, role)
if reaction.custom_emoji and reaction.emoji.id == "439639384573214742":
role = discord.utils.find(lambda r: r.name == "● Top", msg.server.roles)
await client.add_roles(user, role)
if reaction.custom_emoji and reaction.emoji.id == "439639384036474881":
role = discord.utils.find(lambda r: r.name == "● Jg", msg.server.roles)
await client.add_roles(user, role)
if reaction.custom_emoji and reaction.emoji.id == "439639384128618506":
role = discord.utils.find(lambda r: r.name == "● Mid", msg.server.roles)
await client.add_roles(user, role)
if reaction.custom_emoji and reaction.emoji.id == "439639377212080129":
role = discord.utils.find(lambda r: r.name == "● Adc", msg.server.roles)
await client.add_roles(user, role)
if reaction.custom_emoji and reaction.emoji.id == "439639384418025475":
role = discord.utils.find(lambda r: r.name == "● Suporte", msg.server.roles)
await client.add_roles(user, role)
@client.event
async def on_reaction_remove(reaction, user):
msg = reaction.message
if reaction.custom_emoji and reaction.emoji.id == "439639400666759180":
role = discord.utils.find(lambda r: r.name == "● Unranked", msg.server.roles)
await client.remove_roles(user, role)
if reaction.custom_emoji and reaction.emoji.id == "439639385017942036":
role = discord.utils.find(lambda r: r.name == "● Bronze", msg.server.roles)
await client.remove_roles(user, role)
if reaction.custom_emoji and reaction.emoji.id == "439639397001068544":
role = discord.utils.find(lambda r: r.name == "● Prata", msg.server.roles)
await client.remove_roles(user, role)
if reaction.custom_emoji and reaction.emoji.id == "439639401685843987":
role = discord.utils.find(lambda r: r.name == "● Gold", msg.server.roles)
await client.remove_roles(user, role)
if reaction.custom_emoji and reaction.emoji.id == "439639389900111872":
role = discord.utils.find(lambda r: r.name == "● Platina", msg.server.roles)
await client.remove_roles(user, role)
if reaction.custom_emoji and reaction.emoji.id == "439639397273436160":
role = discord.utils.find(lambda r: r.name == "● Diamante", msg.server.roles)
await client.remove_roles(user, role)
if reaction.custom_emoji and reaction.emoji.id == "439639384573214742":
role = discord.utils.find(lambda r: r.name == "● Top", msg.server.roles)
await client.remove_roles(user, role)
if reaction.custom_emoji and reaction.emoji.id == "439639384036474881":
role = discord.utils.find(lambda r: r.name == "● Jg", msg.server.roles)
await client.remove_roles(user, role)
if reaction.custom_emoji and reaction.emoji.id == "439639384128618506":
role = discord.utils.find(lambda r: r.name == "● Mid", msg.server.roles)
await client.remove_roles(user, role)
if reaction.custom_emoji and reaction.emoji.id == "439639377212080129":
role = discord.utils.find(lambda r: r.name == "● Adc", msg.server.roles)
await client.remove_roles(user, role)
if reaction.custom_emoji and reaction.emoji.id == "439639384418025475":
role = discord.utils.find(lambda r: r.name == "● Suporte", msg.server.roles)
await client.remove_roles(user, role)
client.run('seu_token_aqui')
|
/**
* Created by dengchongjing on 2017/5/11.
*/
import * as types from '../types'
import { getTopic } from '@/api'
const state = {
article: {}
}
const actions = {
getTopic ({commit}, id) {
getTopic(id).then(data => { commit(types.UPDATE_TOPIC_DATA, data.data) })
}
}
const mutations = {
// 更新当前文章
[types.UPDATE_TOPIC_DATA] (state, article) {
state.article = article
}
}
export default {
state,
mutations,
actions
}
|
/*
* Copyright (C) 2017, Axis Communications AB, LUND, SWEDEN
*/
/*
* Simple example application that demonstrates how axoverlay is used.
*/
#include <stdio.h>
#include <string.h>
#include <errno.h>
#include <glib.h>
#include <glib-unix.h>
#include <cairo/cairo.h>
#include <axoverlay.h>
#define OVERLAY_WIDTH 144
#define OVERLAY_HEIGHT 144
#define PIE_RADIUS 64
#define ANIMATION_FPS 10
#define M_PI 3.14
static gint animation_timer = -1;
static gdouble pie_angle = 0.0;
static gint overlay_id = -1;
static gboolean
signal_handler(gpointer data)
{
GMainLoop *main_loop = (GMainLoop *) data;
g_main_loop_quit(main_loop);
return G_SOURCE_REMOVE;
}
static gboolean
update_overlay_cb(gpointer data)
{
GError *error = NULL;
/* Update the angle of the pie shape */
pie_angle += 5.0;
if (pie_angle >= 360.0) {
pie_angle -= 360.0;
}
/* Request a redraw of the overlay */
axoverlay_redraw(&error);
if (error != NULL) {
/*
* If redraw fails then it is likely due to that overlayd has
* crashed. Don't exit instead wait for overlayd to restart and
* for axoverlay to restore the connection.
*/
printf("Failed to redraw overlay (%d): %s\n", error->code, error->message);
g_error_free(error);
}
return G_SOURCE_CONTINUE;
}
static void
render_overlay_cb(gpointer render_context, gint id,
struct axoverlay_stream_data *stream,
enum axoverlay_position_type postype, gfloat overlay_x,
gfloat overlay_y, gint overlay_width, gint overlay_height,
gpointer user_data)
{
cairo_t *cr = render_context;
gdouble x = OVERLAY_WIDTH/2;
gdouble y = OVERLAY_HEIGHT/2;
gdouble radius = PIE_RADIUS;
gdouble angle1 = 0;
gdouble angle2 = pie_angle*(M_PI/180.0);
/* Clear background */
cairo_set_source_rgba(cr, 0.0, 0.0, 0.0, 0.0);
cairo_set_operator(cr, CAIRO_OPERATOR_SOURCE);
cairo_rectangle(cr, 0, 0, OVERLAY_WIDTH, OVERLAY_HEIGHT);
cairo_fill(cr);
/* Draw a filled pie shape */
cairo_set_source_rgba(cr, 1.0, 0.0, 0.0, 1.0);
cairo_set_operator(cr, CAIRO_OPERATOR_SOURCE);
cairo_set_line_width(cr, 5.0);
cairo_arc(cr, x, y, radius, angle1, angle2);
cairo_line_to(cr, x, y);
cairo_line_to(cr, x+PIE_RADIUS, y);
cairo_fill(cr);
}
int
main(int argc, char **argv)
{
GError *error = NULL;
GMainLoop *main_loop = NULL;
/* Create a glib main loop */
main_loop = g_main_loop_new(NULL, FALSE);
g_unix_signal_add(SIGINT, signal_handler, main_loop);
g_unix_signal_add(SIGTERM, signal_handler, main_loop);
if(!axoverlay_is_backend_supported(AXOVERLAY_CAIRO_IMAGE_BACKEND)) {
printf("AXOVERLAY_CAIRO_IMAGE_BACKEND is not supported");
return 1;
}
/* Initialize the library */
struct axoverlay_settings settings;
axoverlay_init_axoverlay_settings(&settings);
settings.render_callback = render_overlay_cb;
settings.adjustment_callback = NULL;
settings.select_callback = NULL;
settings.backend = AXOVERLAY_CAIRO_IMAGE_BACKEND;
axoverlay_init(&settings, &error);
if (error != NULL) {
printf("Failed to initialize axoverlay: %s", error->message);
g_error_free(error);
return 1;
}
/* Create an overlay */
struct axoverlay_overlay_data data;
axoverlay_init_overlay_data(&data);
data.postype = AXOVERLAY_CUSTOM_NORMALIZED;
data.anchor_point = AXOVERLAY_ANCHOR_CENTER;
data.x = 0.0;
data.y = 0.0;
data.width = OVERLAY_WIDTH;
data.height = OVERLAY_WIDTH;
data.colorspace = AXOVERLAY_COLORSPACE_ARGB32;
overlay_id = axoverlay_create_overlay(&data, NULL, &error);
if (error != NULL) {
printf("Failed to create first overlay: %s", error->message);
g_error_free(error);
return 1;
}
/* Draw overlays */
axoverlay_redraw(&error);
if (error != NULL) {
printf("Failed to draw overlays: %s", error->message);
axoverlay_destroy_overlay(overlay_id, &error);
axoverlay_cleanup();
g_error_free(error);
return 1;
}
/* Start animation timer */
animation_timer = g_timeout_add(1000/ANIMATION_FPS, update_overlay_cb, NULL);
/* Enter main loop */
g_main_loop_run(main_loop);
/* Destroy the overlay */
axoverlay_destroy_overlay(overlay_id, &error);
if (error != NULL) {
printf("Failed to destroy first overlay: %s", error->message);
g_error_free(error);
return 1;
}
/* Release library resources */
axoverlay_cleanup();
/* Release the animation timer */
g_source_remove(animation_timer);
/* Release main loop */
g_main_loop_unref(main_loop);
return 0;
}
|
/******************************************************************************
Copyright (c) 2000 Microsoft Corporation
Module Name:
ProjectConstants.h
Abstract:
This file contains contants common to the whole project.
Revision History:
Davide Massarenti (Dmassare) 03/20/2000
created
******************************************************************************/
#if !defined(__INCLUDED___PCH___PROJECTCONSTANTS_H___)
#define __INCLUDED___PCH___PROJECTCONSTANTS_H___
#ifndef DEBUG
#undef NOJETBLUECOM
#define NOJETBLUECOM
#endif
////////////////////////////////////////////////////////////////////////////////
#define HC_ROOT L"%WINDIR%\\PCHealth"
#define HC_ROOT_HELPSVC HC_ROOT L"\\HelpCtr"
#define HC_ROOT_HELPSVC_BINARIES HC_ROOT_HELPSVC L"\\Binaries"
#define HC_ROOT_HELPSVC_CONFIG HC_ROOT_HELPSVC L"\\Config"
#define HC_ROOT_HELPSVC_BATCH HC_ROOT_HELPSVC L"\\Batch"
#define HC_ROOT_HELPSVC_DATACOLL HC_ROOT_HELPSVC L"\\DataColl"
#define HC_ROOT_HELPSVC_LOGS HC_ROOT_HELPSVC L"\\Logs"
#define HC_ROOT_HELPSVC_TEMP HC_ROOT_HELPSVC L"\\Temp"
#define HC_ROOT_HELPSVC_OFFLINECACHE HC_ROOT_HELPSVC L"\\OfflineCache"
#define HC_ROOT_HELPSVC_PKGSTORE HC_ROOT_HELPSVC L"\\PackageStore"
#define HC_HELPSET_ROOT HC_ROOT_HELPSVC L"\\"
#define HC_HELPSET_SUB_INSTALLEDSKUS L"InstalledSKUs"
#define HC_HELPSET_SUB_DATABASE L"Database"
#define HC_HELPSET_SUB_INDEX L"Indices"
#define HC_HELPSET_SUB_SYSTEM L"System"
#define HC_HELPSET_SUB_SYSTEM_OEM L"System_OEM"
#define HC_HELPSET_SUB_VENDORS L"Vendors"
#define HC_HELPSET_SUB_HELPFILES L"HelpFiles"
#define HC_HELPSET_SUBSUB_DATAARCHIVE L"pchdata.cab"
#define HC_HELPSET_SUBSUB_DATABASEFILE L"hcdata.edb"
#define HC_HELPSET_SUBSUB_INDEXFILE L"merged.hhk"
#define HC_HELPSVC_HELPFILES_DEFAULT L"%WINDIR%\\Help"
// This is relative to CSIDL_LOCAL_APPDATA (i.e: C:\Documents and Settings\<username>\Local Settings\Application Data)
#define HC_ROOT_HELPCTR L"Microsoft\\HelpCtr"
#define HC_REGISTRY_BASE L"SOFTWARE\\Microsoft\\PCHealth"
#define HC_REGISTRY_HELPSVC HC_REGISTRY_BASE L"\\HelpSvc"
#define HC_REGISTRY_HELPHOST HC_REGISTRY_BASE L"\\HelpHost"
#define HC_REGISTRY_HELPCTR HC_REGISTRY_BASE L"\\HelpCtr"
#define HC_REGISTRY_PCHSVC HC_REGISTRY_BASE L"\\PchSvc"
#define HC_REGISTRY_HELPCTR_USER HC_REGISTRY_HELPCTR L"\\UserSettings"
#define HC_REGISTRY_HELPCTR_IE HC_REGISTRY_HELPCTR L"\\IESettings"
////////////////////////////////////////
#define HC_HELPSVC_STORE_TRUSTEDCONTENTS HC_ROOT_HELPSVC_CONFIG L"\\Cntstore.bin"
#define HC_HELPSVC_STORE_CHANNELS HC_ROOT_HELPSVC_CONFIG L"\\SAFStore.xml"
#define HC_HELPSVC_STORE_INCIDENTITEMS HC_ROOT_HELPSVC_CONFIG L"\\incstore.bin"
#define HC_HELPSVC_STORE_SKUS HC_ROOT_HELPSVC_PKGSTORE L"\\SkuStore.bin"
#define HC_HCUPDATE_LOGNAME HC_ROOT_HELPSVC_LOGS L"\\hcupdate.log"
#define HC_HCUPDATE_STORE_PACKAGES HC_ROOT_HELPSVC_PKGSTORE L"\\pchver.xml"
#define HC_HCUPDATE_STORE_SE HC_ROOT_HELPSVC_CONFIG L"\\sereg.xml"
#define HC_SEMGR_LOGNAME HC_ROOT_HELPSVC_LOGS L"\\semgr.log"
////////////////////////////////////////
// OLD
#define HC_HELPSVC_STORE_USERS HC_ROOT_HELPSVC_CONFIG L"\\UsersStore.cxml"
////////////////////////////////////////
#define HC_HELPSVC_NAME L"helpsvc"
#define HC_MICROSOFT_DN L"CN=Microsoft Corporation,L=Redmond,S=Washington,C=US"
////////////////////////////////////////
#define HC_TIMEOUT_NETWORKALIVE 3000
#define HC_TIMEOUT_DESTINATIONREACHABLE 3000
#define HC_TIMEOUT_CONNECTIONCHECK 15000
#define HC_TIMEOUT_LINKCHECKER_FOREGROUND 15000
#define HC_TIMEOUT_LINKCHECKER_BACKGROUND 25000
////////////////////////////////////////
#endif // !defined(__INCLUDED___PCH___PROJECTCONSTANTS_H___)
|
import re
from mat.utils.utils import Utils, Issue
class Issue(Issue):
TITLE = 'SSL Pinning Check'
DESCRIPTION = 'Checks if SSL Pinning is not implemented'
ID = 'ssl-pinning'
ISSUE_TITLE = 'Application Does Not Implement SSL Pinning'
FINDINGS = 'The Team found the application did not implement SSL Pinning'
def dependencies(self):
return self.ANALYSIS.UTILS.check_dependencies(['static'])
def run(self):
files = Utils.grep(r'X509TrustManager|getAcceptedIssuers|checkClientTrusted|checkServerTrusted', self.ANALYSIS.LOCAL_SMALI + '*')
if not files:
self.REPORT = True
self.FINDINGS = 'No evidence of TrustManager being used was found.'
self.DETAILS = ''
for f in files:
with open(f, 'r') as d:
smali = d.read()
if re.search(r'.method.*checkServerTrusted(.*\n)*?[ \t]*\.prologue\n(([\t ]*(\.line.*)?)\n)*[ \t]*return-void', smali):
self.REPORT = True
self.DETAILS += '\n* {file}:\n\n<code>\n{method}</code>\n'.format(file=f.replace(self.ANALYSIS.LOCAL_SMALI, 'smali'), method=self.ANALYSIS.UTILS.get_smali_method('checkServerTrusted', f))
if re.search(r'.method.*getAcceptedIssuers(.*\n)*?[ \t]*\.prologue\n(([\t ]*(\.line.*)?)\n)*[ \t]*const\/4 v0, 0x0\n[ \n\t]*return-object v0', smali):
self.REPORT = True
self.DETAILS += '\n* {file}:\n\n<code>\n{method}</code>\n'.format(file=f.replace(self.ANALYSIS.LOCAL_SMALI, 'smali'), method=self.ANALYSIS.UTILS.get_smali_method('getAcceptedIssuers', f))
|
from typing import Optional
import aiohttp
from apple.rpc.full_node_rpc_client import FullNodeRpcClient
from apple.util.byte_types import hexstr_to_bytes
from apple.util.config import load_config
from apple.util.default_root import DEFAULT_ROOT_PATH
from apple.util.ints import uint16
from apple.util.misc import format_bytes
async def netstorge_async(rpc_port: Optional[int], delta_block_height: str, start: str) -> None:
"""
Calculates the estimated space on the network given two block header hashes.
"""
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if rpc_port is None:
rpc_port = config["full_node"]["rpc_port"]
client = await FullNodeRpcClient.create(self_hostname, uint16(rpc_port), DEFAULT_ROOT_PATH, config)
if delta_block_height:
if start == "":
blockchain_state = await client.get_blockchain_state()
if blockchain_state["peak"] is None:
print("No blocks in blockchain")
client.close()
await client.await_closed()
return None
newer_block_height = blockchain_state["peak"].height
else:
newer_block = await client.get_block_record(hexstr_to_bytes(start))
if newer_block is None:
print("Block header hash", start, "not found.")
client.close()
await client.await_closed()
return None
else:
print("newer_height", newer_block.height)
newer_block_height = newer_block.height
newer_block_header = await client.get_block_record_by_height(newer_block_height)
older_block_height = max(0, newer_block_height - int(delta_block_height))
older_block_header = await client.get_block_record_by_height(older_block_height)
network_space_bytes_estimate = await client.get_network_space(
newer_block_header.header_hash, older_block_header.header_hash
)
print(
"Older Block\n"
f"Block Height: {older_block_header.height}\n"
f"Weight: {older_block_header.weight}\n"
f"VDF Iterations: {older_block_header.total_iters}\n"
f"Header Hash: 0x{older_block_header.header_hash}\n"
)
print(
"Newer Block\n"
f"Block Height: {newer_block_header.height}\n"
f"Weight: {newer_block_header.weight}\n"
f"VDF Iterations: {newer_block_header.total_iters}\n"
f"Header Hash: 0x{newer_block_header.header_hash}\n"
)
print(format_bytes(network_space_bytes_estimate))
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if full node rpc is running at {rpc_port}")
else:
print(f"Exception {e}")
client.close()
await client.await_closed()
|
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
import textwrap
from sqlalchemy import DDL
from indico.core import signals
@signals.db_schema_created.connect_via('categories')
def _create_check_consistency_deleted(sender, connection, **kwargs):
sql = textwrap.dedent("""
CREATE FUNCTION categories.check_consistency_deleted() RETURNS trigger AS
$BODY$
DECLARE
rows int;
BEGIN
CREATE TEMP TABLE IF NOT EXISTS _categories_consistency_deleted_checked (dummy bool) ON COMMIT DROP;
IF EXISTS (SELECT 1 FROM _categories_consistency_deleted_checked) THEN
RETURN NULL;
ELSE
INSERT INTO _categories_consistency_deleted_checked VALUES (true);
END IF;
-- use dynamic sql to prevent pg from preparing the statement with a crappy query plan
EXECUTE $$
WITH RECURSIVE chains(id, path, is_deleted) AS (
SELECT id, ARRAY[id], is_deleted
FROM categories.categories
WHERE parent_id IS NULL
UNION ALL
SELECT cat.id, chains.path || cat.id, chains.is_deleted OR cat.is_deleted
FROM categories.categories cat, chains
WHERE cat.parent_id = chains.id
)
SELECT 1
FROM events.events e
JOIN chains ON (chains.id = e.category_id)
WHERE NOT e.is_deleted AND chains.is_deleted;
$$;
GET DIAGNOSTICS rows = ROW_COUNT;
IF rows != 0 THEN
RAISE EXCEPTION SQLSTATE 'INDX1' USING
MESSAGE = 'Categories inconsistent',
DETAIL = 'Event inside deleted category';
END IF;
EXECUTE $$
SELECT 1
FROM categories.categories cat
JOIN categories.categories parent ON (parent.id = cat.parent_id)
WHERE NOT cat.is_deleted AND parent.is_deleted;
$$;
GET DIAGNOSTICS rows = ROW_COUNT;
IF rows != 0 THEN
RAISE EXCEPTION SQLSTATE 'INDX1' USING
MESSAGE = 'Categories inconsistent',
DETAIL = 'Subcategory inside deleted category';
END IF;
RETURN NULL;
END;
$BODY$
LANGUAGE plpgsql
""")
DDL(sql).execute(connection)
@signals.db_schema_created.connect_via('categories')
def _create_check_cycles(sender, connection, **kwargs):
sql = textwrap.dedent("""
CREATE FUNCTION categories.check_cycles() RETURNS trigger AS
$BODY$
DECLARE
rows int;
BEGIN
-- use dynamic sql to prevent pg from preparing the statement with a crappy query plan
EXECUTE $$
WITH RECURSIVE chains(id, path, is_cycle) AS (
SELECT id, ARRAY[id], false
FROM categories.categories
UNION ALL
SELECT cat.id, chains.path || cat.id, cat.id = ANY(chains.path)
FROM categories.categories cat, chains
WHERE cat.parent_id = chains.id AND NOT chains.is_cycle
)
SELECT 1 FROM chains WHERE is_cycle;
$$;
GET DIAGNOSTICS rows = ROW_COUNT;
IF rows != 0 THEN
RAISE EXCEPTION SQLSTATE 'INDX2' USING
MESSAGE = 'Categories inconsistent',
DETAIL = 'Cycle detected';
END IF;
RETURN NULL;
END;
$BODY$
LANGUAGE plpgsql
""")
DDL(sql).execute(connection)
|
import * as React from 'react';
import createSvgIcon from './utils/createSvgIcon';
import { jsx as _jsx } from "react/jsx-runtime";
export default createSvgIcon( /*#__PURE__*/_jsx("path", {
d: "M20 10h-3V8.86c1.72-.45 3-2 3-3.86h-3V4c0-.55-.45-1-1-1H8c-.55 0-1 .45-1 1v1H4c0 1.86 1.28 3.41 3 3.86V10H4c0 1.86 1.28 3.41 3 3.86V15H4c0 1.86 1.28 3.41 3 3.86V20c0 .55.45 1 1 1h8c.55 0 1-.45 1-1v-1.14c1.72-.45 3-2 3-3.86h-3v-1.14c1.72-.45 3-2 3-3.86zm-8 9c-1.11 0-2-.9-2-2s.89-2 2-2c1.1 0 2 .9 2 2s-.89 2-2 2zm0-5c-1.11 0-2-.9-2-2s.89-2 2-2c1.1 0 2 .9 2 2s-.89 2-2 2zm0-5c-1.11 0-2-.9-2-2 0-1.11.89-2 2-2 1.1 0 2 .89 2 2 0 1.1-.89 2-2 2z"
}), 'Traffic');
|
function accepts(file, acceptedFiles) {
if (file && acceptedFiles) {
const acceptedFilesArray = Array.isArray(acceptedFiles)
? acceptedFiles
: acceptedFiles.split(",");
const fileName = file.name || "";
const mimeType = file.type || "";
const baseMimeType = mimeType.replace(/\/.*$/, "");
return acceptedFilesArray.some((type) => {
const validType = type.trim();
if (validType.charAt(0) === ".") {
return fileName.toLowerCase().endsWith(validType.toLowerCase());
} else if (validType.endsWith("/*")) {
// This is something like a image/* mime type
return baseMimeType === validType.replace(/\/.*$/, "");
}
return mimeType === validType;
});
}
return true;
}
function isImage(file) {
if (file.type.split("/")[0] === "image") {
return true;
}
}
function convertBytesToMbsOrKbs(filesize) {
let size = "";
// I know, not technically correct...
if (filesize >= 1000000) {
size = filesize / 1000000 + " megabytes";
} else if (filesize >= 1000) {
size = filesize / 1000 + " kilobytes";
} else {
size = filesize + " bytes";
}
return size;
}
async function createFileFromUrl(url) {
const response = await fetch(url);
const data = await response.blob();
const metadata = { type: data.type };
const filename = url.replace(/\?.+/, "").split("/").pop();
const ext = data.type.split("/").pop();
return new File([data], `${filename}.${ext}`, metadata);
}
export { createFileFromUrl, accepts };
|
import os
import argparse
from ConfigParser import SafeConfigParser
import sys
import logging
#sys.path.append('/home/zhengc/NRC-LIMS-dataDownloader')
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from nrc_ngs_dl.lims_database import LimsDatabase
from nrc_ngs_dl.web_parser import WebParser
def set_up_logging():
logger = logging.getLogger('nrc_ngs_dl')
logger.setLevel(logging.INFO)
fh = logging.FileHandler('information.log')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.info('***********test_database**************')
def parse_input_args(argv):
input_parser = argparse.ArgumentParser()
input_parser.add_argument('-c', dest='config_file')
args = input_parser.parse_args(argv)
return args
def main():
# get settings from cinfig.ini.sample file
set_up_logging()
logger= logging.getLogger('nrc_ngs_dl.lims_downloader')
config_parser = SafeConfigParser()
try:
args = parse_input_args(sys.argv[1:])
except:
logger.info('Wrong command line args')
sys.exit(1)
if not args.config_file:
logger.info('Missing the configuration file')
logger.info('Usage: python lims_downloader.py /path/to/configuation.sample')
sys.exit(1)
config_file = args.config_file
try:
with open(config_file) as f:
config_parser.read(config_file)
except IOError:
logger.info('Cannot open file: config.ini.sample')
sys.exit(1)
try:
logger.info('Get settings ...')
DB_NAME = config_parser.get('sqlite_database', 'name')
USERNAME = config_parser.get('nrc_lims','username')
PASSWORD = config_parser.get('nrc_lims','password')
LOGIN_URL = config_parser.get('nrc_lims','login_url')
RUNLIST_URL = config_parser.get('nrc_lims','runlist_url')
DESTINATION_FOLDER = config_parser.get('output','path')
TABLE_RUN_LIST = config_parser.get('run_list_setting','table')
COLUMN_RUN_LINK = config_parser.get('run_list_setting','column_link')
COLUMN_RUN_STATUS = config_parser.get('run_list_setting','column_status')
TABLE_FILE_LIST = config_parser.get('file_list_setting','table')
COLUMN_FILE_LINK = config_parser.get('file_list_setting','column_link')
COLUMN_LANE = config_parser.get('file_list_setting','column_lane')
except:
logger.info('Cannot get the configuration settings' )
sys.exit(1)
if os.path.exists(DESTINATION_FOLDER) == False:
logger.info('DESTINATION_FOLDER not exist; do not have permission to access the folder')
sys.exit(1)
#connect to database if the database exist
#otherwise create tables for this database
#if os.path.isfile(DB_NAME):
# os.remove(DB_NAME)
lims_database = LimsDatabase(DB_NAME)
if lims_database is None:
logger.info('Cannot access the database')
sys.exit(1)
#login to LIMS webpage
try:
logger.info('Logging into ...')
web_parser = WebParser(LOGIN_URL,RUNLIST_URL,USERNAME,PASSWORD)
except:
logger.info('Cannot access the web page')
sys.exit(1)
#get a list of all the completed sequence runs
#information for each run : url_for_the_run, run_name, plate_name,
#Plateform, Operator, Creation Date, Description, status
try:
logger.info('Getting run list ...')
run_list = web_parser.get_runlist(TABLE_RUN_LIST, COLUMN_RUN_LINK, COLUMN_RUN_STATUS)
except:
logger.info('Cannot get the list of sequence runs')
sys.exit(1)
#for each sequence run in the list,
#1. check if it is a new data or re-processed data
#2. in the case of new data: download the data, insert the information of the data into database tables
#3. in the case of re-processed data:
for a_run in run_list:
run_url = a_run
run_info = web_parser.get_runinfo(run_url)
lane_info = web_parser.get_laneinfo(run_url,TABLE_FILE_LIST, COLUMN_LANE,COLUMN_FILE_LINK)
for a_lane in lane_info:
case = lims_database.check_new_run(run_info,a_lane)
if case ==3:
logger.info('Deleting records in database for re-processed data (run_name %s, lane_index %s)' % (run_info['run_name'],a_lane[0]))
lims_database.delete_old_run(run_info, a_lane)
if case != 1:
logger.info('downloading new/re-processed data (run_name %s, lane_index %s)' % (run_info['run_name'],a_lane[0]))
file_info = web_parser.get_fileinfo(run_url,a_lane,TABLE_FILE_LIST)
rowid = lims_database.insert_run_info(run_info)
lims_database.insert_lane_info(rowid,run_url,a_lane)
lims_database.insert_file_info(rowid,file_info)
lims_database.disconnect()
if __name__ == '__main__':
main()
|
from . import problem, user, submit
|
from contextlib import suppress
import awxkit.exceptions as exc
from awxkit.api.pages import base, WorkflowJobTemplate, UnifiedJobTemplate, JobTemplate
from awxkit.api.mixins import HasCreate, DSAdapter
from awxkit.api.resources import resources
from awxkit.utils import update_payload, PseudoNamespace, random_title
from . import page
class WorkflowJobTemplateNode(HasCreate, base.Base):
dependencies = [WorkflowJobTemplate, UnifiedJobTemplate]
NATURAL_KEY = ('workflow_job_template', 'identifier')
def payload(self, workflow_job_template, unified_job_template, **kwargs):
if not unified_job_template:
# May pass "None" to explicitly create an approval node
payload = PseudoNamespace(workflow_job_template=workflow_job_template.id)
else:
payload = PseudoNamespace(workflow_job_template=workflow_job_template.id, unified_job_template=unified_job_template.id)
optional_fields = (
'diff_mode',
'extra_data',
'limit',
'scm_branch',
'job_tags',
'job_type',
'skip_tags',
'verbosity',
'extra_data',
'identifier',
'all_parents_must_converge',
)
update_payload(payload, optional_fields, kwargs)
if 'inventory' in kwargs:
payload['inventory'] = kwargs['inventory'].id
return payload
def create_payload(self, workflow_job_template=WorkflowJobTemplate, unified_job_template=JobTemplate, **kwargs):
if not unified_job_template:
self.create_and_update_dependencies(workflow_job_template)
payload = self.payload(workflow_job_template=self.ds.workflow_job_template, unified_job_template=None, **kwargs)
else:
self.create_and_update_dependencies(workflow_job_template, unified_job_template)
payload = self.payload(workflow_job_template=self.ds.workflow_job_template, unified_job_template=self.ds.unified_job_template, **kwargs)
payload.ds = DSAdapter(self.__class__.__name__, self._dependency_store)
return payload
def create(self, workflow_job_template=WorkflowJobTemplate, unified_job_template=JobTemplate, **kwargs):
payload = self.create_payload(workflow_job_template=workflow_job_template, unified_job_template=unified_job_template, **kwargs)
return self.update_identity(WorkflowJobTemplateNodes(self.connection).post(payload))
def _add_node(self, endpoint, unified_job_template, **kwargs):
node = endpoint.post(dict(unified_job_template=unified_job_template.id, **kwargs))
node.create_and_update_dependencies(self.ds.workflow_job_template, unified_job_template)
return node
def add_always_node(self, unified_job_template, **kwargs):
return self._add_node(self.related.always_nodes, unified_job_template, **kwargs)
def add_failure_node(self, unified_job_template, **kwargs):
return self._add_node(self.related.failure_nodes, unified_job_template, **kwargs)
def add_success_node(self, unified_job_template, **kwargs):
return self._add_node(self.related.success_nodes, unified_job_template, **kwargs)
def add_credential(self, credential):
with suppress(exc.NoContent):
self.related.credentials.post(dict(id=credential.id, associate=True))
def remove_credential(self, credential):
with suppress(exc.NoContent):
self.related.credentials.post(dict(id=credential.id, disassociate=True))
def remove_all_credentials(self):
for cred in self.related.credentials.get().results:
with suppress(exc.NoContent):
self.related.credentials.post(dict(id=cred.id, disassociate=True))
def make_approval_node(self, **kwargs):
if 'name' not in kwargs:
kwargs['name'] = 'approval node {}'.format(random_title())
self.related.create_approval_template.post(kwargs)
return self.get()
def get_job_node(self, workflow_job):
candidates = workflow_job.get_related('workflow_nodes', identifier=self.identifier)
return candidates.results.pop()
page.register_page(
[resources.workflow_job_template_node, (resources.workflow_job_template_nodes, 'post'), (resources.workflow_job_template_workflow_nodes, 'post')],
WorkflowJobTemplateNode,
)
class WorkflowJobTemplateNodes(page.PageList, WorkflowJobTemplateNode):
pass
page.register_page(
[
resources.workflow_job_template_nodes,
resources.workflow_job_template_workflow_nodes,
resources.workflow_job_template_node_always_nodes,
resources.workflow_job_template_node_failure_nodes,
resources.workflow_job_template_node_success_nodes,
],
WorkflowJobTemplateNodes,
)
|
/*global window, console, document */
/*
* Speaker represents the volume icon that will be shown in the mediaPlayer, for example.
* It manages the volume level of the media tag given in the constructor.
* Every Speaker is a View.
* Ex.: var speaker = Speaker({elementID: element, media: mediaTag, id: id});
*/
var Erizo = Erizo || {};
Erizo.Speaker = function (spec) {
"use strict";
var that = Erizo.View({}),
show,
mute,
unmute,
lastVolume = 50;
// Variables
// DOM element in which the Speaker will be appended
that.elementID = spec.elementID;
// media tag
that.media = spec.media;
// Speaker id
that.id = spec.id;
// MediaStream
that.stream = spec.stream;
// Container
that.div = document.createElement('div');
that.div.setAttribute('style', 'width: 40%; height: 100%; max-width: 32px; position: absolute; right: 0;z-index:0;');
// Volume icon
that.icon = document.createElement('img');
that.icon.setAttribute('id', 'volume_' + that.id);
that.icon.setAttribute('src', that.url + '/assets/sound48.png');
that.icon.setAttribute('style', 'width: 80%; height: 100%; position: absolute;');
that.div.appendChild(that.icon);
if (!that.stream.local) {
// Volume bar
that.picker = document.createElement('input');
that.picker.setAttribute('id', 'picker_' + that.id);
that.picker.type = "range";
that.picker.min = 0;
that.picker.max = 100;
that.picker.step = 10;
that.picker.value = lastVolume;
that.picker.setAttribute("orient", "vertical"); // FireFox supports range sliders as of version 23
that.div.appendChild(that.picker);
that.media.volume = that.picker.value / 100;
that.media.muted = false;
that.picker.oninput = function (evt) {
if (that.picker.value > 0) {
that.media.muted = false;
that.icon.setAttribute('src', that.url + '/assets/sound48.png');
} else {
that.media.muted = true;
that.icon.setAttribute('src', that.url + '/assets/mute48.png');
}
that.media.volume = that.picker.value / 100;
};
// Private functions
show = function (displaying) {
that.picker.setAttribute('style', 'background: transparent; width: 32px; height: 100px; position: absolute; bottom: 90%; z-index: 1;' + that.div.offsetHeight + 'px; right: 0px; -webkit-appearance: slider-vertical; display: ' + displaying);
};
mute = function () {
that.icon.setAttribute('src', that.url + '/assets/mute48.png');
lastVolume = that.picker.value;
that.picker.value = 0;
that.media.volume = 0;
that.media.muted = true;
};
unmute = function () {
that.icon.setAttribute('src', that.url + '/assets/sound48.png');
that.picker.value = lastVolume;
that.media.volume = that.picker.value / 100;
that.media.muted = false;
};
that.icon.onclick = function (evt) {
if (that.media.muted) {
unmute();
} else {
mute();
}
}
// Public functions
that.div.onmouseover = function (evt) {
show('block');
};
that.div.onmouseout = function (evt) {
show('none');
};
show('none');
} else {
mute = function () {
that.media.muted = true;
that.icon.setAttribute('src', that.url + '/assets/mute48.png');
that.stream.stream.getAudioTracks()[0].enabled = false;
};
unmute = function () {
that.media.muted = false;
that.icon.setAttribute('src', that.url + '/assets/sound48.png');
that.stream.stream.getAudioTracks()[0].enabled = true;
};
that.icon.onclick = function (evt) {
if (that.media.muted) {
unmute();
} else {
mute();
}
}
}
document.getElementById(that.elementID).appendChild(that.div);
return that;
};
|
module.exports = function(grunt) {
grunt.initConfig({
exec: {
build: {
cmd: 'jekyll build'
},
serve: {
cmd: "jekyll serve --watch --baseurl=''"
},
deploy: {
cmd: 'echo This is hosted on GitHub Pages. Push to deploy.'
}
}
});
grunt.loadNpmTasks('grunt-exec');
grunt.registerTask('default', [ 'exec:serve' ]);
grunt.registerTask('serve', [ 'exec:serve' ]);
grunt.registerTask('deploy', [ 'exec:deploy' ]);
};
|
/* istanbul instrument in package npmdoc_eslint */
/*jslint
bitwise: true,
browser: true,
maxerr: 8,
maxlen: 96,
node: true,
nomen: true,
regexp: true,
stupid: true
*/
(function () {
'use strict';
var local;
// run shared js-env code - pre-init
(function () {
// init local
local = {};
// init modeJs
local.modeJs = (function () {
try {
return typeof navigator.userAgent === 'string' &&
typeof document.querySelector('body') === 'object' &&
typeof XMLHttpRequest.prototype.open === 'function' &&
'browser';
} catch (errorCaughtBrowser) {
return module.exports &&
typeof process.versions.node === 'string' &&
typeof require('http').createServer === 'function' &&
'node';
}
}());
// init global
local.global = local.modeJs === 'browser'
? window
: global;
switch (local.modeJs) {
// re-init local from window.local
case 'browser':
local = local.global.utility2.objectSetDefault(
local.global.utility2_rollup || local.global.local,
local.global.utility2
);
break;
// re-init local from example.js
case 'node':
local = (local.global.utility2_rollup || require('utility2'))
.requireExampleJsFromReadme();
break;
}
// export local
local.global.local = local;
}());
// run shared js-env code - function
(function () {
return;
}());
switch (local.modeJs) {
// run browser js-env code - function
case 'browser':
break;
// run node js-env code - function
case 'node':
break;
}
// run shared js-env code - post-init
(function () {
return;
}());
switch (local.modeJs) {
// run browser js-env code - post-init
case 'browser':
local.testCase_browser_nullCase = local.testCase_browser_nullCase || function (
options,
onError
) {
/*
* this function will test browsers's null-case handling-behavior-behavior
*/
onError(null, options);
};
// run tests
local.nop(local.modeTest &&
document.querySelector('#testRunButton1') &&
document.querySelector('#testRunButton1').click());
break;
// run node js-env code - post-init
/* istanbul ignore next */
case 'node':
local.testCase_buildApidoc_default = local.testCase_buildApidoc_default || function (
options,
onError
) {
/*
* this function will test buildApidoc's default handling-behavior-behavior
*/
options = { modulePathList: module.paths };
local.buildApidoc(options, onError);
};
local.testCase_buildApp_default = local.testCase_buildApp_default || function (
options,
onError
) {
/*
* this function will test buildApp's default handling-behavior-behavior
*/
local.testCase_buildReadme_default(options, local.onErrorThrow);
local.testCase_buildLib_default(options, local.onErrorThrow);
local.testCase_buildTest_default(options, local.onErrorThrow);
local.testCase_buildCustomOrg_default(options, local.onErrorThrow);
options = [];
local.buildApp(options, onError);
};
local.testCase_buildCustomOrg_default = local.testCase_buildCustomOrg_default ||
function (options, onError) {
/*
* this function will test buildCustomOrg's default handling-behavior
*/
options = {};
local.buildCustomOrg(options, onError);
};
local.testCase_buildLib_default = local.testCase_buildLib_default || function (
options,
onError
) {
/*
* this function will test buildLib's default handling-behavior
*/
options = {};
local.buildLib(options, onError);
};
local.testCase_buildReadme_default = local.testCase_buildReadme_default || function (
options,
onError
) {
/*
* this function will test buildReadme's default handling-behavior-behavior
*/
options = {};
local.buildReadme(options, onError);
};
local.testCase_buildTest_default = local.testCase_buildTest_default || function (
options,
onError
) {
/*
* this function will test buildTest's default handling-behavior
*/
options = {};
local.buildTest(options, onError);
};
local.testCase_webpage_default = local.testCase_webpage_default || function (
options,
onError
) {
/*
* this function will test webpage's default handling-behavior
*/
options = { modeCoverageMerge: true, url: local.serverLocalHost + '?modeTest=1' };
local.browserTest(options, onError);
};
// run test-server
local.testRunServer(local);
break;
}
}());
|
import React, { useState } from "react";
import "./main.css";
import axios from "axios";
import { useHistory } from "react-router-dom";
import { setCountry } from "../../actions/country";
import { setRegion } from "../../actions/region";
import { connect } from "react-redux";
const Main = ({ setCountry, setRegion }) => {
const [state, setstate] = useState({
country: "",
continent: "",
region: "",
world: 0,
n: 0,
flag: false,
});
let { country, continent, region, n, flag, world } = state;
const history = useHistory();
const url = "http://localhost:8080/population";
let config = {
headers: {
"Access-Control-Allow-Origin": "*",
},
params: {
country: country,
continent: continent,
region: region,
top: n,
},
};
if (!flag)
axios
.get(url + "/world")
.then((res) => setstate({ ...state, world: res.data, flag: true }));
return (
<div className="main">
<div>
<h5>World Population:{world} </h5>
</div>
<div
className="card-panel"
onClick={(e) =>
axios.get(url + "/continents").then((res) => {
setRegion(res.data);
history.push("/region");
})
}
>
<h5>Continents Populations</h5>
</div>
<div
className="card-panel"
onClick={(e) =>
axios.get(url + "/regions").then((res) => {
setRegion(res.data);
history.push("/region");
})
}
>
<h5>Regions Populations</h5>
</div>
<div
onClick={(e) => {
axios.get(url + "/country/all").then((res) => {
setCountry(res.data);
history.push("/country");
});
}}
className="card-panel"
>
<h5>World Countries</h5>
</div>
<div
onClick={(e) => {
if (continent !== "")
axios.get(`${url}/country/continent`, config).then((res) => {
setCountry(res.data);
history.push("/country");
});
}}
className="card-panel"
>
<h5>Countries in a Continent</h5>
<div class="input-field">
<input
value={continent}
onChange={(e) => setstate({ ...state, continent: e.target.value })}
placeholder="Continent"
type="text"
class="validate"
/>
</div>
</div>
<div
onClick={(e) => {
if (region !== "")
axios.get(`${url}/country/region`, config).then((res) => {
setCountry(res.data);
history.push("/country");
});
}}
className="card-panel"
>
<h5>Countries in a Region</h5>
<input
value={region}
onChange={(e) => setstate({ ...state, region: e.target.value })}
placeholder="Region"
type="text"
class="validate"
/>
</div>
<div
onClick={(e) => {
if (n !== 0)
axios.get(`${url}/country/top`, config).then((res) => {
setCountry(res.data);
history.push("/country");
});
}}
className="card-panel"
>
<h5>Top n Countries in the world</h5>
<input
value={n}
onChange={(e) => setstate({ ...state, n: e.target.value })}
placeholder="Top"
type="number"
class="validate"
/>
</div>
<div
onClick={(e) => {
if (n !== 0)
axios.get(`${url}/country/continent/top`, config).then((res) => {
setCountry(res.data);
history.push("/country");
});
}}
className="card-panel"
>
<h5>Top n Countries in the Continent</h5>
<input
value={continent}
onChange={(e) => setstate({ ...state, continent: e.target.value })}
placeholder="Continent"
type="text"
class="validate"
/>
<input
value={n}
onChange={(e) => setstate({ ...state, n: e.target.value })}
placeholder="Top"
type="number"
class="validate"
/>
</div>
<div
onClick={(e) => {
if (region !== "")
axios.get(`${url}/country/region/top`, config).then((res) => {
setCountry(res.data);
history.push("/country");
});
}}
className="card-panel"
>
<h5>Top n Countries in the Region</h5>
<input
value={region}
onChange={(e) => setstate({ ...state, region: e.target.value })}
placeholder="Region"
type="text"
class="validate"
/>
<input
value={n}
onChange={(e) => setstate({ ...state, n: e.target.value })}
placeholder="Top"
type="number"
class="validate"
/>
</div>
</div>
);
};
export default connect(null, { setCountry, setRegion })(Main);
|
/**
* @file Make component support data operation like Vue for ant mini program
* @author sparklewhy@gmail.com
*/
'use strict';
import observable, {setObservableContext} from '../base';
import {observableArray, overrideArrayMethods} from '../array';
import {component as antApi, array as antArray} from './array';
setObservableContext('props', true);
let componentExtension = observable.component;
let rawCreated = componentExtension.created;
componentExtension.created = function () {
if (this.$rawComputed) {
// fix ant reference bug: `this.data.xx` operation is not allowed
// when page onload, otherwise it'll affect the init data state
// of the page when load next time.
// So, here create a shadow copy of data.
this.data = Object.assign({}, this.data);
}
rawCreated.call(this);
};
Object.assign(componentExtension.methods, antApi);
let arrApis = Object.assign({}, observableArray, antArray);
overrideArrayMethods(arrApis, true);
overrideArrayMethods(arrApis, false);
/**
* View update hook
*
* @private
* @param {Object} prevProps the previous property data before update
*/
observable.component.didUpdate = function (prevProps) {
let propObserver = this.__propsObserver;
if (!propObserver) {
return;
}
let currProps = this.props;
// update the cache props data, as for the prop data will be override
// when prop change, it leads to the cache props data will not refer to
// the new props data
propObserver.rawData = currProps;
Object.keys(prevProps).forEach(k => {
let newVal = currProps[k];
let oldVal = prevProps[k];
if (newVal !== oldVal) {
propObserver.firePropValueChange(k, newVal, oldVal);
}
});
};
export default observable;
|
const path = require("path");
const { DefinePlugin } = require("webpack");
const { CleanWebpackPlugin } = require("clean-webpack-plugin");
const isProd = process.env.NODE_ENV !== "development";
module.exports = {
mode: isProd ? "production" : "development",
devtool: isProd ? "source-map" : "eval-source-map",
entry: {
"my-lib": path.resolve(__dirname, "src/my-lib.ts")
},
output: {
filename: "[name].js",
path: path.resolve(__dirname, "dist"),
library: "MyLib",
libraryTarget: "umd",
libraryTarget: "window"
},
resolve: {
extensions: [".ts", ".js"]
},
module: {
rules: [
{
test: /\.ts$/,
exclude: [/node_modules/],
use: ["babel-loader", "ts-loader", "eslint-loader"]
},
{
test: /\.js$/,
exclude: [/node_modules/],
use: ["babel-loader", "eslint-loader"]
}
]
},
plugins: [
new DefinePlugin({
__VERSION__: require("./package.json").version
}),
new CleanWebpackPlugin()
]
};
|
"""
Copyright (c) 2012-2014, Austin Benson and David Gleich
All rights reserved.
This file is part of MRTSQR and is under the BSD 2-Clause License,
which can be found in the LICENSE file in the root directory, or at
http://opensource.org/licenses/BSD-2-Clause
"""
"""
Mapper and reducer implementations for Direct TSQR.
Use the script run_dirtsqr.py to run Direct TSQR.
"""
import sys
import time
import struct
import uuid
import cPickle as pickle
import gc
import numpy
import numpy.linalg
import util
import mrmc
import dumbo
import dumbo.backends.common
from dumbo import opt
@opt("getpath", "yes")
class DirTSQRMap1(mrmc.MatrixHandler):
"""
Input: <key, value> pairs representing <row id, row> in the matrix A
Output:
1. R matrix: <mapper id, row>
2. Q matrix: <mapper id, row + [row_id]>
"""
def __init__(self):
mrmc.MatrixHandler.__init__(self)
self.keys = []
self.data = []
self.mapper_id = uuid.uuid1().hex
def collect(self,key,value):
if self.ncols == None:
self.ncols = len(value)
print >>sys.stderr, "Matrix size: %i columns"%(self.ncols)
else:
assert(len(value) == self.ncols)
self.keys.append(key)
self.data.append(value)
self.nrows += 1
# write status updates so Hadoop doesn't complain
if self.nrows%50000 == 0:
self.counters['rows processed'] += 50000
def close(self):
self.counters['rows processed'] += self.nrows % 50000
# if no data was passed to this task, we just return
if len(self.data) == 0:
return
QR = numpy.linalg.qr(numpy.array(self.data))
yield ("R_%s" % str(self.mapper_id), self.mapper_id), QR[1].tolist()
flat_Q = [entry for row in QR[0] for entry in row]
val1 = pickle.dumps(self.keys)
val2 = struct.pack('d'*len(flat_Q), *flat_Q)
val = ''.join([str(len(val1)) + '_', val1, val2])
yield ("Q_%s" % str(self.mapper_id), self.mapper_id), val
def __call__(self,data):
self.collect_data(data)
for key,val in self.close():
yield key, val
@opt("getpath", "yes")
class DirTSQRRed2(dumbo.backends.common.MapRedBase):
"""
Takes all of the intermediate Rs
Computes [R_1, ..., R_n] = Q2R_{final}
Output:
1. R_final: R in A = QR with key-value pairs <i, row>
2. Q2: <mapper_id, row>
where Q2 is a list of key value pairs.
Each key corresponds to a mapperid from stage 1 and that keys value is the
Q2 matrix corresponding to that mapper_id
"""
def __init__(self, compute_svd=False):
self.R_data = {}
self.key_order = []
self.Q2 = None
self.compute_svd = compute_svd
def collect(self, key, value):
assert(key not in self.R_data)
data = []
for row in value:
data.append([float(val) for val in row])
self.R_data[key] = data
def close_R(self):
data = []
for key in self.R_data:
data += self.R_data[key]
self.key_order.append(key)
A = numpy.array(data)
QR = numpy.linalg.qr(A)
self.Q2 = QR[0].tolist()
self.R_final = QR[1].tolist()
for i, row in enumerate(self.R_final):
yield ("R_final", i), row
if self.compute_svd:
U, S, Vt = numpy.linalg.svd(self.R_final)
S = numpy.diag(S)
for i, row in enumerate(U):
yield ("U", i), row
for i, row in enumerate(S):
yield ("Sigma", i), row
for i, row in enumerate(Vt):
yield ("Vt", i), row
def close_Q(self):
num_rows = len(self.Q2)
rows_to_read = num_rows / len(self.key_order)
ind = 0
key_ind = 0
local_Q = []
for row in self.Q2:
local_Q.append(row)
ind += 1
if (ind == rows_to_read):
flat_Q = [entry for row in local_Q for entry in row]
yield ("Q2", self.key_order[key_ind]), flat_Q
key_ind += 1
local_Q = []
ind = 0
def __call__(self,data):
for key,values in data:
for value in values:
self.collect(key, value)
for key, val in self.close_R():
yield key, val
for key, val in self.close_Q():
yield key, val
class DirTSQRMap3(dumbo.backends.common.MapRedBase):
"""
input: Q1 as <mapper_id, [row] + [row_id]>
input: Q2 comes attached as a text file, which is then parsed on the fly
output: Q as <row_id, row>
"""
def __init__(self,ncols,q2path='q2.txt',upath=None):
# TODO implement this
self.Q1_data = {}
self.row_keys = {}
self.Q2_data = {}
self.ncols = ncols
self.q2path = q2path
self.u_data = None
if upath is not None:
self.u_data = []
for row in util.parse_matrix_txt(upath):
self.u_data.append(row)
self.u_data = numpy.mat(self.u_data)
def parse_q2(self):
try:
f = open(self.q2path, 'r')
except:
# We may be expecting only the file to be distributed
# with the script
f = open(self.q2path.split('/')[-1], 'r')
for line in f:
if len(line) > 5:
ind1 = line.find('(')
ind2 = line.rfind(')')
key = line[ind1+1:ind2]
# lazy parsing: we only need the keys that we have
if key not in self.Q1_data:
continue
line = line[ind2+3:]
line = line.lstrip('[').rstrip().rstrip(']')
line = line.split(',')
line = [float(v) for v in line]
line = numpy.array(line)
mat = numpy.reshape(line, (self.ncols, self.ncols))
self.Q2_data[key] = mat
f.close()
def collect(self, key, keys, value):
self.Q1_data[key] = (keys, value)
def close(self):
# parse the q2 file we were given
self.parse_q2()
for key in self.Q1_data:
assert(key in self.Q2_data)
keys, Q1 = self.Q1_data[key]
Q2 = self.Q2_data[key]
if self.u_data is not None:
Q2 = Q2 * self.u_data
Q_out = Q1 * Q2
for i, row in enumerate(Q_out.getA()):
yield keys[i], struct.pack('d' * len(row), *row)
def __call__(self, data):
for key, val in data:
ind = val.find('_')
val1_len = int(val[:ind])
keys = val[ind + 1:ind + 1 + val1_len]
matrix = val[ind +1 +val1_len:]
keys = pickle.loads(keys)
num_entries = len(matrix) / 8
if num_entries % self.ncols != 0:
raise mrmc.DataFormatException(
'Length of value (%d) did not match number of columns (%d)' % (
num_entries, self.ncols))
mat = struct.unpack('d' * num_entries, matrix)
mat = numpy.mat(mat)
mat = numpy.reshape(mat, (num_entries / self.ncols , self.ncols))
self.collect(key, keys, mat)
for key, val in self.close():
yield key, val
"""
The classes RLabeller, QGrouperMap, QGrouperReduce, and DirTSQRRed3 are
used for recursive Direct TSQR.
"""
class RLabeller(dumbo.backends.common.MapRedBase):
def __init__(self):
self.data = []
def close(self):
for pair in self.data:
yield pair[0], pair[1]
def __call__(self, data):
for key, value in data:
for i, row in enumerate(value):
new_key = str(key) + '_' + str(i)
row = [float(val) for val in row]
row = struct.pack('d' * len(row), *row)
self.data.append((new_key, row))
for key, val in self.close():
yield key, val
class QGrouperMap(dumbo.backends.common.MapRedBase):
def __init__(self):
self.data = []
def close(self):
for pair in self.data:
yield pair[0], pair[1]
def __call__(self, data):
for key, value in data:
new_key, num = key.split('_')
val = pickle.dumps((value, int(num)))
self.data.append((new_key, val))
for key, val in self.close():
yield key, val
class QGrouperReduce(dumbo.backends.common.MapRedBase):
def __init__(self, ncols):
self.ncols = ncols
self.data = {}
def close(self):
for key in self.data:
assert(None not in self.data[key])
local_Q = self.data[key]
flat_Q = [entry for row in local_Q for entry in row]
val = 'Q2' + '_' + struct.pack('d' * (self.ncols ** 2), *flat_Q)
yield key, val
def collect(self, key, value, num):
assert(num < self.ncols)
if key not in self.data:
self.data[key] = self.ncols * [None]
row = struct.unpack('d' * self.ncols, value)
self.data[key][num] = row
def __call__(self, data):
for key, values in data:
for value in values:
val, num = pickle.loads(value)
self.collect(key, val, num)
for key, val in self.close():
yield key, val
class DirTSQRRed3(dumbo.backends.common.MapRedBase):
def __init__(self, ncols):
self.ncols = ncols
self.Q1_data = None
self.Q2_data = None
def collect(self, key, keys, value):
self.Q1_data = (keys, value)
def collect_Q2(self, key, value):
value = numpy.array(struct.unpack('d' * (self.ncols ** 2), value))
self.Q2_data = numpy.reshape(value, (self.ncols, self.ncols))
def flush(self):
keys, Q1 = self.Q1_data
Q2 = self.Q2_data
Q_out = Q1 * Q2
self.Q1_data = None
self.Q2_data = None
Q1 = None
Q2 = None
gc.collect()
for i, row in enumerate(Q_out.getA()):
yield keys[i], struct.pack('d' * len(row), *row)
def __call__(self, data):
for key, values in data:
for val in values:
ind = val.find('_')
if val[:ind] == 'Q2':
mat = val[ind+1:]
self.collect_Q2(key, mat)
else:
val1_len = int(val[:ind])
keys = val[ind + 1:ind + 1 + val1_len]
matrix = val[ind +1 +val1_len:]
keys = pickle.loads(keys)
num_entries = len(matrix) / 8
assert (num_entries % self.ncols == 0)
mat = struct.unpack('d' * num_entries, matrix)
mat = numpy.mat(mat)
mat = numpy.reshape(mat, (num_entries / self.ncols , self.ncols))
self.collect(key, keys, mat)
for k, v in self.flush():
yield k, v
|
import {
primaryColor,
dangerColor,
successColor,
grayColor,
defaultFont
} from "./material-dashboard-react";
const customInputStyle = {
disabled: {
"&:before": {
backgroundColor: "transparent !important"
}
},
underline: {
"&:hover:not($disabled):before,&:before": {
borderColor: grayColor[4] + " !important",
borderWidth: "1px !important"
},
"&:after": {
borderColor: primaryColor[0]
}
},
underlineError: {
"&:after": {
borderColor: dangerColor[0]
}
},
underlineSuccess: {
"&:after": {
borderColor: successColor[0]
}
},
labelRoot: {
...defaultFont,
color: grayColor[3] + " !important",
fontWeight: "400",
fontSize: "14px",
lineHeight: "1.42857",
letterSpacing: "unset"
},
labelRootError: {
color: dangerColor[0]
},
labelRootSuccess: {
color: successColor[0]
},
feedback: {
position: "absolute",
top: "18px",
right: "0",
zIndex: "2",
display: "block",
width: "24px",
height: "24px",
textAlign: "center",
pointerEvents: "none"
},
marginTop: {
marginTop: "16px"
},
formControl: {
paddingBottom: "10px",
margin: "27px 0 0 0",
position: "relative",
verticalAlign: "unset"
}
};
export default customInputStyle;
|
// For discussion and comments, see: http://remysharp.com/2009/01/07/html5-enabling-script/
/*@cc_on'abbr article aside audio canvas details figcaption figure footer header hgroup mark menu meter nav output progress section summary time video'.replace(/\w+/g,function(n){document.createElement(n)})@*/
var addEvent = (function () {
if (document.addEventListener) {
return function (el, type, fn) {
if (el && el.nodeName || el === window) {
el.addEventListener(type, fn, false);
} else if (el && el.length) {
for (var i = 0; i < el.length; i++) {
addEvent(el[i], type, fn);
}
}
};
} else {
return function (el, type, fn) {
if (el && el.nodeName || el === window) {
el.attachEvent('on' + type, function () { return fn.call(el, window.event); });
} else if (el && el.length) {
for (var i = 0; i < el.length; i++) {
addEvent(el[i], type, fn);
}
}
};
}
})();
(function () {
var pre = document.createElement('pre');
pre.id = "view-source"
// private scope to avoid conflicts with demos
addEvent(window, 'click', function (event) {
if (event.target.hash == '#view-source') {
// event.preventDefault();
if (!document.getElementById('view-source')) {
// pre.innerHTML = ('<!DOCTYPE html>\n<html>\n' + document.documentElement.innerHTML + '\n</html>').replace(/[<>]/g, function (m) { return {'<':'<','>':'>'}[m]});
var xhr = new XMLHttpRequest();
// original source - rather than rendered source
xhr.onreadystatechange = function () {
if (this.readyState == 4 && this.status == 200) {
pre.innerHTML = this.responseText.replace(/[<>]/g, function (m) { return {'<':'<','>':'>'}[m]});
prettyPrint();
}
};
document.body.appendChild(pre);
// really need to be sync? - I like to think so
xhr.open("GET", window.location, true);
xhr.send();
}
document.body.className = 'view-source';
var sourceTimer = setInterval(function () {
if (window.location.hash != '#view-source') {
clearInterval(sourceTimer);
document.body.className = '';
}
}, 200);
}
});
})();
|
import logging
import os
import random as rd
import click
import mne
import pandas as pd
import numpy as np
from scipy.signal import welch
import matplotlib.pyplot as plt
from config import CHANNEL_NAMES, DATA_ROOT, PROCESSED_ROOT, RAW_ROOT
from data.utils import df_from_fif, df_from_tdt, get_sampling_frequency
from data.preprocess import preprocess_raw_mne_file
def plot_psd(X, label, Fs, NFFT, color=None):
noverlap = int(NFFT * 0.8)
freqs, psd = welch(X, fs=Fs, window='hann', nperseg=NFFT,
noverlap=noverlap)
# print(len(freqs))
# print(len(psd))
f = freqs[freqs > np.zeros(len(freqs))]
psd = psd[freqs > np.zeros(len(freqs))]
plt.plot(np.log10(f), 10 * np.log10(psd.ravel()), label=label,
color=color)
def interactive_plot_freq(input_file, kind, apply_proj=False):
"""Create an interactive figure visualizing all channels from a file."""
df = df_from_fif(input_file) if kind=='PROCESSED' else \
df_from_tdt(input_file)
if kind == 'RAW':
sfreq = get_sampling_frequency(input_file)
else:
sfreq = 250
info = mne.create_info(ch_names=CHANNEL_NAMES, sfreq=sfreq, ch_types='eeg')
data = mne.io.RawArray(np.transpose(df.values), info)
processed = preprocess_raw_mne_file(data, apply_proj)
logging.info(f'Plotting frequency components of {input_file} of'
'kind={kind}, sfreq={sfreq}...')
# data.plot_psd()
# processed.plot_psd()
# add some plotting parameter
# decim_fit = 100 # we lean a purely spatial model, we don't need all samples
# decim_show = 10 # we can make plotting faster
n_fft = 2 ** 13 # let's use long windows to see low frequencies
for i, channel in enumerate(CHANNEL_NAMES):
plt.figure(figsize=(9, 6))
values = df[channel].values
logging.info(f'Plotting file {input_file}...')
plot_psd(values, Fs=sfreq, NFFT=n_fft, label='EEG', color='black')
if kind == 'RAW':
plot_psd(processed[i][0][0], Fs=sfreq, NFFT=n_fft,
label='EEG-processed', color='orange')
plt.legend()
plt.xticks(np.log10([0.1, 1, 10, 100]), [0.1, 1, 10, 100])
plt.xlim(np.log10([0.1, 300]))
plt.xlabel('log10(frequency) [Hz]')
plt.ylabel('Power Spectral Density [dB]')
plt.grid()
plt.show()
def examine_all_freq(kind='RAW', proj=False):
input_folder = PROCESSED_ROOT if kind=='PROCESSED' else RAW_ROOT
to_examine = os.listdir(input_folder)
rd.shuffle(to_examine)
for file_name in to_examine:
interactive_plot_freq(os.path.join(input_folder, file_name), kind, proj)
@click.command()
@click.option('--kind', type=str, default='RAW')
@click.option('--proj', type=bool, default=False)
def main(kind, proj):
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
logger.info(f'Plotting PSD of EEG singals of kind {kind}...')
examine_all_freq(kind, proj)
if __name__ == '__main__':
main()
|
'''
LPD-Net Model: FN-SF-VLAD
Feature Network + FN-Parallel structure (P) + Series-FC structure (SF)
# Thanks to Mikaela Angelina Uy, modified from PointNetVLAD
author: suo_ivy
created: 10/26/18
'''
import os
import sys
import tensorflow as tf
#Taken from Charles Qi's pointnet code
MODELS_DIR = os.path.dirname(__file__)
sys.path.append(MODELS_DIR)
sys.path.append(os.path.join(MODELS_DIR, '../utils'))
import tf_util
#from transform_nets import input_transform_net, feature_transform_net, neural_feature_net
#Adopted from Antoine Meich
import loupe as lp
def placeholder_inputs(batch_num_queries, num_pointclouds_per_query, num_point, input_dim=13):
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_num_queries, num_pointclouds_per_query, num_point, input_dim))
return pointclouds_pl
#Adopted from the original pointnet code
def forward(point_cloud, is_training, bn_decay=None, params=None):
# Network: DGCNN
# INPUT: batch_num_queries X num_pointclouds_per_query X num_points_per_pointcloud X input_dim
# OUTPUT: batch_num_queries X num_pointclouds_per_query X output_dim
batch_num_queries = point_cloud.get_shape()[0].value
num_pointclouds_per_query = point_cloud.get_shape()[1].value
num_points = point_cloud.get_shape()[2].value
CLUSTER_SIZE = params["CLUSTER_SIZE"] # default: 64
OUTPUT_DIM = params["FEATURE_OUTPUT_DIM"] # default: 256
k = params["KNN"] # default: 20
INPUT_DIM = params["INPUT_DIM"] # default: 13
point_cloud = tf.reshape(point_cloud, [batch_num_queries*num_pointclouds_per_query, num_points, INPUT_DIM])
# BxNxC
#if INPUT_DIM != 13:
# print("input dimension must be 13!!!")
# exit()
#pc, feature_cloud = tf.split(point_cloud, [3, 10], 2) # BxNx3 BxNx10
#with tf.variable_scope('single') as sc:
# pc = tf.expand_dims(point_cloud, -1) # BxNxC -> BxNxCx1
# pfea = tf_util.conv2d(pc, 16, [1, INPUT_DIM],
# padding='VALID', stride=[1,1],
# bn=True, is_training=is_training,
# scope='conv0_a', bn_decay=bn_decay) # BxNx1x16
# pfea = tf_util.conv2d(pfea, 32, [1, 1],
# padding='VALID', stride=[1,1],
# bn=True, is_training=is_training,
# scope='conv0_b', bn_decay=bn_decay) # BxNx1x32
# pfea = tf.squeeze(pfea, [2])
# DGCNN index
with tf.variable_scope('fastdgcnn') as sc:
dpist = tf_util.pairwise_distance_mask(point_cloud, k=k)
# BxNxN
# -------------------------------------------------------------------------
x = tf_util.conv1d(point_cloud, 64, 1,
padding='VALID', stride=1,
bn=True, is_training=is_training,
scope='conv1', bn_decay=bn_decay) # BxNx64
x1 = tf.matmul(dpist, x) # BxNxN X BxNx64 -> BxNx64
x1 = x1 / float(k)
t1 = x1 - x
t1 = tf_util.conv1d(t1, 64, 1,
padding='VALID', stride=1,
bn=True, is_training=is_training,
scope='conv1_a', bn_decay=bn_decay) # BxNx64
t1 = tf_util.conv1d(t1, 64, 1,
padding='VALID', stride=1,
bn=True, is_training=is_training,
scope='conv1_b', bn_decay=bn_decay) # BxNx64
x1 = t1 + x1
# -------------------------------------------------------------------------
x = tf_util.conv1d(x1, 64, 1,
padding='VALID', stride=1,
bn=True, is_training=is_training,
scope='conv2', bn_decay=bn_decay) # BxNx64
x2 = tf.matmul(dpist, x) # BxNxN X BxNx64 -> BxNx64
x2 = x2 / float(k)
t2 = x2 - x
t2 = tf_util.conv1d(t2, 64, 1,
padding='VALID', stride=1,
bn=True, is_training=is_training,
scope='conv2_a', bn_decay=bn_decay) # BxNx64
t2 = tf_util.conv1d(t2, 64, 1,
padding='VALID', stride=1,
bn=True, is_training=is_training,
scope='conv2_b', bn_decay=bn_decay) # BxNx64
x2 = t2 + x2
# -------------------------------------------------------------------------
x = tf_util.conv1d(x2, 64, 1,
padding='VALID', stride=1,
bn=True, is_training=is_training,
scope='conv3', bn_decay=bn_decay) # BxNx64
x3 = tf.matmul(dpist, x) # BxNxN X BxNx64 -> BxNx64
x3 = x3 / float(k)
t3 = x3 - x
t3 = tf_util.conv1d(t3, 64, 1,
padding='VALID', stride=1,
bn=True, is_training=is_training,
scope='conv3_a', bn_decay=bn_decay) # BxNx64
t3 = tf_util.conv1d(t3, 64, 1,
bn=True, padding='VALID', stride=1,
is_training=is_training,
scope='conv3_b', bn_decay=bn_decay) # BxNx64
x3 = t3 + x3
# -------------------------------------------------------------------------
x = tf_util.conv1d(x3, 64, 1,
padding='VALID', stride=1,
bn=True, is_training=is_training,
scope='conv4', bn_decay=bn_decay) # BxNx64
x4 = tf.matmul(dpist, x) # BxNxN X BxNx64 -> BxNx64
x4 = x4 / float(k)
t4 = x4 - x
t4 = tf_util.conv1d(t4, 64, 1,
padding='VALID', stride=1,
bn=True, is_training=is_training,
scope='conv4_a', bn_decay=bn_decay) # BxNx64
t4 = tf_util.conv1d(t4, 64, 1,
padding='VALID', stride=1,
bn=True, is_training=is_training,
scope='conv4_b', bn_decay=bn_decay) # BxNx64
x4 = t4 + x4
# -------------------------------------------------------------------------
x = tf.concat([x1, x2, x3, x4], axis=-1) # BxNx64 * 4 -> BxNx256
x = tf_util.conv1d(x, 1024, 1,
padding='VALID', stride=1,
bn=True, is_training=is_training,
scope='conv5', bn_decay=bn_decay) # BxNx1024
#x = tf.expand_dims(x, axis=2) # BxNx1024 -> BxNx1x1024
with tf.variable_scope('VLAD') as sc:
NetVLAD = lp.G_VLAD(feature_size=1024, max_samples=num_points, cluster_size=CLUSTER_SIZE,
output_dim=OUTPUT_DIM, groups=params["GROUPS"], gating=True, add_batch_norm=True,
is_training=is_training)
net = tf.reshape(x, [-1, 1024])
net = tf.nn.l2_normalize(net, 1)
output = NetVLAD.forward(net)
print(output)
#normalize to have norm 1
output = tf.nn.l2_normalize(output,1)
#output = tf.reshape(output,[batch_num_queries,num_pointclouds_per_query,OUTPUT_DIM])
output = tf.reshape(output,[batch_num_queries,num_pointclouds_per_query,OUTPUT_DIM], name="last_output")
#return output
return tf.nn.l2_normalize(tf.reshape(x, [-1, 1024]), 1), output
def best_pos_distance(query, pos_vecs):
with tf.name_scope('best_pos_distance') as scope:
#batch = query.get_shape()[0]
num_pos = pos_vecs.get_shape()[1]
query_copies = tf.tile(query, [1,int(num_pos),1]) #shape num_pos x output_dim
best_pos=tf.reduce_min(tf.reduce_sum(tf.squared_difference(pos_vecs,query_copies),2),1)
#best_pos=tf.reduce_max(tf.reduce_sum(tf.squared_difference(pos_vecs,query_copies),2),1)
return best_pos
##########Losses for PointNetVLAD###########
#Returns average loss across the query tuples in a batch, loss in each is the average loss of the definite negatives against the best positive
def triplet_loss(q_vec, pos_vecs, neg_vecs, margin):
# ''', end_points, reg_weight=0.001):
best_pos=best_pos_distance(q_vec, pos_vecs)
num_neg = neg_vecs.get_shape()[1]
batch = q_vec.get_shape()[0]
query_copies = tf.tile(q_vec, [1, int(num_neg),1])
best_pos=tf.tile(tf.reshape(best_pos,(-1,1)),[1, int(num_neg)])
m=tf.fill([int(batch), int(num_neg)],margin)
triplet_loss=tf.reduce_mean(tf.reduce_sum(tf.maximum(tf.add(m,tf.subtract(best_pos,tf.reduce_sum(tf.squared_difference(neg_vecs,query_copies),2))), tf.zeros([int(batch), int(num_neg)])),1))
return triplet_loss
#Lazy variant
def lazy_triplet_loss(q_vec, pos_vecs, neg_vecs, margin):
best_pos=best_pos_distance(q_vec, pos_vecs)
num_neg = neg_vecs.get_shape()[1]
batch = q_vec.get_shape()[0]
query_copies = tf.tile(q_vec, [1, int(num_neg),1])
best_pos=tf.tile(tf.reshape(best_pos,(-1,1)),[1, int(num_neg)])
m=tf.fill([int(batch), int(num_neg)],margin)
triplet_loss=tf.reduce_mean(tf.reduce_max(tf.maximum(tf.add(m,tf.subtract(best_pos,tf.reduce_sum(tf.squared_difference(neg_vecs,query_copies),2))), tf.zeros([int(batch), int(num_neg)])),1))
return triplet_loss
def softmargin_loss(q_vec, pos_vecs, neg_vecs):
best_pos=best_pos_distance(q_vec, pos_vecs)
num_neg = neg_vecs.get_shape()[1]
batch = q_vec.get_shape()[0]
query_copies = tf.tile(q_vec, [1, int(num_neg),1])
best_pos=tf.tile(tf.reshape(best_pos,(-1,1)),[1, int(num_neg)])
ones=tf.fill([int(batch), int(num_neg)],1.0)
soft_loss=tf.reduce_mean(tf.reduce_sum(tf.log(tf.exp(tf.subtract(best_pos,tf.reduce_sum(tf.squared_difference(neg_vecs,query_copies),2)))+1.0),1))
return soft_los
def lazy_softmargin_loss(q_vec, pos_vecs, neg_vecs):
best_pos=best_pos_distance(q_vec, pos_vecs)
num_neg = neg_vecs.get_shape()[1]
batch = q_vec.get_shape()[0]
query_copies = tf.tile(q_vec, [1, int(num_neg),1])
best_pos=tf.tile(tf.reshape(best_pos,(-1,1)),[1, int(num_neg)])
ones=tf.fill([int(batch), int(num_neg)],1.0)
soft_loss=tf.reduce_mean(tf.reduce_max(tf.log(tf.exp(tf.subtract(best_pos,tf.reduce_sum(tf.squared_difference(neg_vecs,query_copies),2)))+1.0),1))
return soft_loss
def quadruplet_loss_sm(q_vec, pos_vecs, neg_vecs, other_neg, m2):
soft_loss= softmargin_loss(q_vec, pos_vecs, neg_vecs)
best_pos=best_pos_distance(q_vec, pos_vecs)
num_neg = neg_vecs.get_shape()[1]
batch = q_vec.get_shape()[0]
other_neg_copies = tf.tile(other_neg, [1, int(num_neg),1])
best_pos=tf.tile(tf.reshape(best_pos,(-1,1)),[1, int(num_neg)])
m2=tf.fill([int(batch), int(num_neg)],m2)
second_loss=tf.reduce_mean(tf.reduce_sum(tf.maximum(tf.add(m2,tf.subtract(best_pos,tf.reduce_sum(tf.squared_difference(neg_vecs,other_neg_copies),2))), tf.zeros([int(batch), int(num_neg)])),1))
total_loss= soft_loss+second_loss
return total_loss
def lazy_quadruplet_loss_sm(q_vec, pos_vecs, neg_vecs, other_neg, m2):
soft_loss= lazy_softmargin_loss(q_vec, pos_vecs, neg_vecs)
best_pos=best_pos_distance(q_vec, pos_vecs)
num_neg = neg_vecs.get_shape()[1]
batch = q_vec.get_shape()[0]
other_neg_copies = tf.tile(other_neg, [1, int(num_neg),1])
best_pos=tf.tile(tf.reshape(best_pos,(-1,1)),[1, int(num_neg)])
m2=tf.fill([int(batch), int(num_neg)],m2)
second_loss=tf.reduce_mean(tf.reduce_max(tf.maximum(tf.add(m2,tf.subtract(best_pos,tf.reduce_sum(tf.squared_difference(neg_vecs,other_neg_copies),2))), tf.zeros([int(batch), int(num_neg)])),1))
total_loss= soft_loss+second_loss
return total_loss
def quadruplet_loss(q_vec, pos_vecs, neg_vecs, other_neg, m1, m2):
trip_loss= triplet_loss(q_vec, pos_vecs, neg_vecs, m1)
best_pos=best_pos_distance(q_vec, pos_vecs)
num_neg = neg_vecs.get_shape()[1]
batch = q_vec.get_shape()[0]
other_neg_copies = tf.tile(other_neg, [1, int(num_neg),1])
best_pos=tf.tile(tf.reshape(best_pos,(-1,1)),[1, int(num_neg)])
m2=tf.fill([int(batch), int(num_neg)],m2)
second_loss=tf.reduce_mean(tf.reduce_sum(tf.maximum(tf.add(m2,tf.subtract(best_pos,tf.reduce_sum(tf.squared_difference(neg_vecs,other_neg_copies),2))), tf.zeros([int(batch), int(num_neg)])),1))
total_loss= trip_loss+second_loss
return total_loss
def lazy_quadruplet_loss(q_vec, pos_vecs, neg_vecs, other_neg, m1, m2):
trip_loss= lazy_triplet_loss(q_vec, pos_vecs, neg_vecs, m1)
best_pos=best_pos_distance(q_vec, pos_vecs)
num_neg = neg_vecs.get_shape()[1]
batch = q_vec.get_shape()[0]
other_neg_copies = tf.tile(other_neg, [1, int(num_neg),1])
best_pos=tf.tile(tf.reshape(best_pos,(-1,1)),[1, int(num_neg)])
m2=tf.fill([int(batch), int(num_neg)],m2)
second_loss=tf.reduce_mean(tf.reduce_max(tf.maximum(tf.add(m2,tf.subtract(best_pos,tf.reduce_sum(tf.squared_difference(neg_vecs,other_neg_copies),2))), tf.zeros([int(batch), int(num_neg)])),1))
total_loss= trip_loss+second_loss
return total_loss
|
from django.contrib.auth.models import User
from rest_framework import serializers
from mainstore.models import Catalog, Product
# hyperlinkedmodelserializer
#view_name='api:product-detail'
class UserSerializer(serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='api:user-detail')
products = serializers.HyperlinkedRelatedField(many=True, view_name='api:product-detail', read_only=True)
class Meta:
model = User
fields = ('url', 'id', 'username', 'products')
class CatalogSerializer(serializers.HyperlinkedModelSerializer):
products = serializers.HyperlinkedRelatedField(many=True, view_name='api:product-detail', read_only=True)
url = serializers.HyperlinkedIdentityField(view_name='api:catalog-detail')
class Meta:
model = Catalog
fields = ('url', 'id', 'name', 'slug', 'products')
# look
class ProductSerializer(serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='api:product-detail')
catalog_id = serializers.HyperlinkedRelatedField(queryset=Catalog.objects.all(), view_name='api:catalog-detail',
read_only=False)
catalog = serializers.HyperlinkedRelatedField(view_name='api:catalog-detail', read_only=True)
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Product
fields = (
'url', 'id', 'name', 'price', 'slug', 'catalog_id', 'catalog', 'quantity', 'available', 'is_new', 'color',
'size',
'owner')
def create(self, validated_data):
"""
Create and return a new 'Product' instance, given the validated data.
"""
# get id only
catalog_id = validated_data.pop('catalog_id').id
print('ccccc id is :', catalog_id)
# if 'catalog' in validated_data.keys():
# catalog_data = validated_data.pop('catalog')
# print("catalog_id :",catalog_id)
# catalog=CatalogSerializer.create(CatalogSerializer(),validated_data=catalog_data)
#
# product=Product.objects.create(catalog=catalog,catalog_id=catalog_id,**validated_data)
# else:
product = Product.objects.create(catalog_id=catalog_id, **validated_data)
return product
def update(self, instance, validated_data):
"""
Update and return an existing 'Product' instance, given the validated data
"""
try:
catalog_id=validated_data.pop('catalog_id').id
except catalog_id.DoesNotExit:
raise
instance.name = validated_data.get('name', instance.name)
instance.price = validated_data.get('price', instance.price)
instance.image = validated_data.get('image', instance.image)
instance.slug = validated_data.get('slug', instance.slug)
instance.color = validated_data.get('color', instance.color)
instance.size = validated_data.get('size', instance.size)
instance.available = validated_data.get('available', instance.available)
instance.is_new = validated_data.get('is_new', instance.is_new)
instance.quantity = validated_data.get('quantity', instance.quantity)
instance.catalog_id = validated_data.get('catalog_id', instance.catalog_id)
if str(catalog_id):
pass
# catalog=Catalog.objects.get(pk=catalog_id)
# print("catalog for updating:",catalog)
# catalog.url=catalog.get('url',catalog)
# catalog.save()
instance.save()
return instance
|
var fs = require('fs')
var path = require('path')
var mkdirp = require('mkdirp')
var mr = require('npm-registry-mock')
var osenv = require('osenv')
var rimraf = require('rimraf')
var test = require('tap').test
var common = require('../common-tap')
var server
var pkg = path.resolve(__dirname, 'prune')
var cache = path.resolve(pkg, 'cache')
var json = {
name: 'prune-with-only-dev-deps',
description: 'fixture',
version: '0.0.1',
main: 'index.js',
devDependencies: {
'test-package-with-one-dep': '0.0.0',
'test-package': '0.0.0'
}
}
var EXEC_OPTS = {
cwd: pkg,
npm_config_depth: 'Infinity'
}
test('setup', function (t) {
cleanup()
mkdirp.sync(cache)
fs.writeFileSync(
path.join(pkg, 'package.json'),
JSON.stringify(json, null, 2)
)
mr({ port: common.port }, function (er, s) {
server = s
t.end()
})
})
test('npm install', function (t) {
common.npm([
'install',
'--cache', cache,
'--registry', common.registry,
'--loglevel', 'silent',
'--production', 'false'
], EXEC_OPTS, function (err, code, stdout, stderr) {
t.ifErr(err, 'install finished successfully')
t.notOk(code, 'exit ok')
t.notOk(stderr, 'Should not get data on stderr: ' + stderr)
t.end()
})
})
function readdir (dir) {
try {
return fs.readdirSync(dir)
} catch (ex) {
if (ex.code === 'ENOENT') return []
throw ex
}
}
test('verify installs', function (t) {
var dirs = readdir(pkg + '/node_modules').sort()
t.same(dirs, [ 'test-package', 'test-package-with-one-dep' ].sort())
t.end()
})
test('npm prune', function (t) {
common.npm([
'prune',
'--loglevel', 'silent',
'--production', 'false'
], EXEC_OPTS, function (err, code, stdout, stderr) {
t.ifErr(err, 'prune finished successfully')
t.notOk(code, 'exit ok')
t.notOk(stderr, 'Should not get data on stderr: ' + stderr)
t.end()
})
})
test('verify installs', function (t) {
var dirs = readdir(pkg + '/node_modules').sort()
t.same(dirs, [ 'test-package', 'test-package-with-one-dep' ])
t.end()
})
test('npm prune', function (t) {
common.npm([
'prune',
'--loglevel', 'silent',
'--production',
'--json'
], EXEC_OPTS, function (err, code, stdout, stderr) {
t.ifErr(err, 'prune finished successfully')
t.notOk(code, 'exit ok')
t.like(JSON.parse(stdout), {removed: [{name: 'test-package'}, {name: 'test-package-with-one-dep'}]})
t.end()
})
})
test('verify installs', function (t) {
var dirs = readdir(pkg + '/node_modules').sort()
t.same(dirs, [])
t.end()
})
test('cleanup', function (t) {
server.close()
cleanup()
t.pass('cleaned up')
t.end()
})
function cleanup () {
process.chdir(osenv.tmpdir())
rimraf.sync(pkg)
}
|
const s3 = require("../config/aws");
module.exports = key => {
var params = {
Bucket: process.env.ResumeBucketName,
Key: key
};
s3.deleteObject(params, (err, data) => {
if (err) {
console.log(err);
}
console.log(`Resume ${key} deleted`);
});
};
|
/* Copyright (C) 2002-2020 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/file.h>
static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
static int fd;
static void *
tf (void *arg)
{
if (flock (fd, LOCK_SH | LOCK_NB) != 0)
{
puts ("second flock failed");
exit (1);
}
pthread_mutex_unlock (&lock);
return NULL;
}
static int
do_test (void)
{
char tmp[] = "/tmp/tst-flock1-XXXXXX";
fd = mkstemp (tmp);
if (fd == -1)
{
puts ("mkstemp failed");
exit (1);
}
unlink (tmp);
write (fd, "foobar xyzzy", 12);
if (flock (fd, LOCK_EX | LOCK_NB) != 0)
{
puts ("first flock failed");
exit (1);
}
pthread_mutex_lock (&lock);
pthread_t th;
if (pthread_create (&th, NULL, tf, NULL) != 0)
{
puts ("pthread_create failed");
exit (1);
}
pthread_mutex_lock (&lock);
void *result;
if (pthread_join (th, &result) != 0)
{
puts ("pthread_join failed");
exit (1);
}
close (fd);
return result != NULL;
}
#define TEST_FUNCTION do_test ()
#include "../test-skeleton.c"
|
'''
filename = urls.py
author = LJH
date = 2019/08/19
'''
from django.conf.urls import url
from django.views.generic import TemplateView
from . import views
from django.views.static import serve
from games.settings import MEDIA_ROOT
urlpatterns = [
# 个人资料
url(r'^profile/$', views.ProfileView.as_view(), name='profile'),
# media图片路由处理
url(r'^media/(?P<path>.*)$', serve, {"document_root": MEDIA_ROOT}),
# 修改密码
url(r'^change_passwd/$', views.ChangePasswdView.as_view(), name='change_passwd'),
# 找回密码邮箱路由
url(r'password/forget/$', views.PasswordForget.as_view(), name="password_forget"),
# 重置密码
url(r'password/reset/(\w+)/$', views.PasswordReset.as_view(), name="password_reset"),
# 联系我
url(r'password/callme',views.Callme,name="callme")
]
|
import os
import yaml
import hashlib
import importlib
import sys
import argparse
from time import sleep
from pathlib import Path
from pymongo import MongoClient
from feed import mainCaller
from database_connector import mongo_update
from front_end_scorer import pre_scorer
try:
p = Path(__file__).parents[1]
sys.path.append(str(p))
except IndexError:
print("[!] Error: Use full path of the file")
sys.exit(1)
def check():
print("[+] Parsing settings.yaml to get feeds")
fil = os.environ.get("CONFIG_FILE", default="settings.yaml")
db = os.environ.get("MONGO_DB", default="misp_feed")
client = MongoClient(
"mongodb://root:password@localhost:27017/?authSource=admin"
)
all_dbs = client.list_database_names()
if db in all_dbs:
database = client[db]
filename=str(Path(__file__).parents[1])+"/feed_ingestor/"+fil
old_file = database.file.find_one({"filename": filename})
if str(old_file["hash"]) != str(
hashlib.md5(open(str(filename), "rb").read()).hexdigest()
):
feed_updater = mainCaller()
feed_list, event_list = feed_updater.update_misp(
filename=str(filename),
include_event_tags=False
)
# We add try except blocks here so that if function errors out,we need to basically pass. Next minute it will be called anyways and by then probably the feed must be indexed in misp as an event with all attributes and cached
feed_list, event_list = feed_updater.update_file(
filename=str(filename)
)
# no feed_list and event_list is passed (basically errors out, then this is not called)
mongo_update.update_feed(
feed_list=feed_list, event_list=event_list
)
# if no feed_list and event_list is passed (basically errors out, then this is not called). Error out means the event does not have all attributes.
old_file["hash"] = str(
hashlib.md5(open(str(filename), "rb").read()).hexdigest()
)
database.file.replace_one(
filter={"_id": old_file["_id"]}, replacement=old_file
)
else:
feed_updater = mainCaller()
feed_list, event_list = feed_updater.update_file(filename=str(filename),include_event_tags=False)
mongo_update.update_feed(feed_list=feed_list, event_list=event_list)
old_file["hash"] = str(
hashlib.md5(open(str(filename), "rb").read()).hexdigest()
)
database.file.replace_one(
filter={"_id": old_file["_id"]}, replacement=old_file
)
else:
database = client[db]
filename=str(Path(__file__).parents[1])+"/feed_ingestor/"+fil
database.file.insert_one(
{
"filename": filename,
"hash": str(hashlib.md5(open(str(filename), "rb").read()).hexdigest()),
}
)
feed_updater = mainCaller()
feed_list, event_list = feed_updater.update_misp(filename=str(filename),include_event_tags=False)
mongo_update.update_feed(feed_list=feed_list, event_list=event_list)
feed_list = feed_updater.update_file(filename=str(filename))
sleep(240)
ip_list = feed_updater.update_attributes()
mongo_update.add_ip(
ip_list
)
# Pulls all IPs and their properties into mongodb current.
print("[+] Scoring engine started")
print("[+] Loading complete")
def add_ip():
print("[+] IP address being added to db")
ip_updater = mainCaller()
ip_list = ip_updater.update_attributes()
mongo_update.add_ip(ip_list)
print("[+] Scoring engine started")
pre_scorer.score_attributes()
print("[+] Loading Complete")
if __name__=='__main__':
arg = argparse.ArgumentParser(description='IP reputation program')
arg.add_argument('-s', const='start',required=False, help='Required only for the first run',nargs='?')
config = open(f"{p}/config.yaml")
parsed_yaml_file = yaml.load(config, Loader=yaml.FullLoader)
os.environ['MISP_URL'] = parsed_yaml_file['credentials']['MISP_URL']
os.environ['MISP_KEY'] = parsed_yaml_file['credentials']['MISP_KEY']
config.close()
args = arg.parse_args()
if args.s != None:
check()
else:
add_ip()
|
import asyncio
import copy
import json
import logging
import ssl
import time
from decimal import Decimal
from typing import Any, Dict, List, Optional
import aiohttp
from hummingbot.client.config.fee_overrides_config_map import fee_overrides_config_map
from hummingbot.client.config.global_config_map import global_config_map
from hummingbot.client.settings import GATEAWAY_CA_CERT_PATH, GATEAWAY_CLIENT_CERT_PATH, GATEAWAY_CLIENT_KEY_PATH
from hummingbot.connector.connector_base import ConnectorBase
from hummingbot.connector.connector.balancer.balancer_in_flight_order import BalancerInFlightOrder
from hummingbot.core.network_iterator import NetworkStatus
from hummingbot.core.data_type.cancellation_result import CancellationResult
from hummingbot.core.data_type.limit_order import LimitOrder
from hummingbot.core.data_type.trade_fee import AddedToCostTradeFee, TokenAmount
from hummingbot.core.event.events import (
BuyOrderCompletedEvent,
BuyOrderCreatedEvent,
MarketEvent,
MarketOrderFailureEvent,
OrderFilledEvent,
OrderType,
SellOrderCompletedEvent,
SellOrderCreatedEvent,
TradeType
)
from hummingbot.core.utils import async_ttl_cache
from hummingbot.core.utils.async_utils import safe_ensure_future, safe_gather
from hummingbot.core.utils.ethereum import check_transaction_exceptions, fetch_trading_pairs
from hummingbot.core.utils.tracking_nonce import get_tracking_nonce
from hummingbot.logger import HummingbotLogger
from hummingbot.logger.struct_logger import METRICS_LOG_LEVEL
s_logger = None
s_decimal_0 = Decimal("0")
s_decimal_NaN = Decimal("nan")
logging.basicConfig(level=METRICS_LOG_LEVEL)
class BalancerConnector(ConnectorBase):
"""
BalancerConnector connects with balancer gateway APIs and provides pricing, user account tracking and trading
functionality.
"""
API_CALL_TIMEOUT = 10.0
POLL_INTERVAL = 1.0
UPDATE_BALANCE_INTERVAL = 30.0
@classmethod
def logger(cls) -> HummingbotLogger:
global s_logger
if s_logger is None:
s_logger = logging.getLogger(__name__)
return s_logger
def __init__(self,
trading_pairs: List[str],
wallet_private_key: str,
ethereum_rpc_url: str,
trading_required: bool = True
):
"""
:param trading_pairs: a list of trading pairs
:param wallet_private_key: a private key for eth wallet
:param ethereum_rpc_url: this is usually infura RPC URL
:param trading_required: Whether actual trading is needed.
"""
super().__init__()
self._trading_pairs = trading_pairs
self._tokens = set()
for trading_pair in trading_pairs:
self._tokens.update(set(trading_pair.split("-")))
self._wallet_private_key = wallet_private_key
self._ethereum_rpc_url = ethereum_rpc_url
self._trading_required = trading_required
self._ev_loop = asyncio.get_event_loop()
self._shared_client = None
self._last_poll_timestamp = 0.0
self._last_balance_poll_timestamp = time.time()
self._last_est_gas_cost_reported = 0
self._in_flight_orders = {}
self._allowances = {}
self._status_polling_task = None
self._auto_approve_task = None
self._initiate_pool_task = None
self._initiate_pool_status = None
self._real_time_balance_update = False
self._poll_notifier = None
@property
def name(self):
return "balancer"
@staticmethod
async def fetch_trading_pairs() -> List[str]:
return await fetch_trading_pairs()
@property
def limit_orders(self) -> List[LimitOrder]:
return [
in_flight_order.to_limit_order()
for in_flight_order in self._in_flight_orders.values()
]
async def initiate_pool(self) -> str:
"""
Initiate connector and cache pools
"""
try:
self.logger().info(f"Initializing Balancer connector and caching pools for {self._trading_pairs}.")
resp = await self._api_request("get", "eth/balancer/start",
{"pairs": json.dumps(self._trading_pairs)})
status = bool(str(resp["success"]))
if bool(str(resp["success"])):
self._initiate_pool_status = status
except asyncio.CancelledError:
raise
except Exception as e:
self.logger().network(
f"Error initializing {self._trading_pairs} swap pools",
exc_info=True,
app_warning_msg=str(e)
)
async def auto_approve(self):
"""
Automatically approves Balancer contract as a spender for token in trading pairs.
It first checks if there are any already approved amount (allowance)
"""
self.logger().info("Checking for allowances...")
self._allowances = await self.get_allowances()
for token, amount in self._allowances.items():
if amount <= s_decimal_0:
amount_approved = await self.approve_balancer_spender(token)
if amount_approved > 0:
self._allowances[token] = amount_approved
await asyncio.sleep(2)
else:
break
async def approve_balancer_spender(self, token_symbol: str) -> Decimal:
"""
Approves Balancer contract as a spender for a token.
:param token_symbol: token to approve.
"""
resp = await self._api_request("post",
"eth/approve",
{"token": token_symbol,
"connector": self.name})
amount_approved = Decimal(str(resp["amount"]))
if amount_approved > 0:
self.logger().info(f"Approved Balancer spender contract for {token_symbol}.")
else:
self.logger().info(f"Balancer spender contract approval failed on {token_symbol}.")
return amount_approved
async def get_allowances(self) -> Dict[str, Decimal]:
"""
Retrieves allowances for token in trading_pairs
:return: A dictionary of token and its allowance (how much Balancer can spend).
"""
ret_val = {}
resp = await self._api_request("post", "eth/allowances",
{"tokenList": "[" + (",".join(['"' + t + '"' for t in self._tokens])) + "]",
"connector": self.name})
for token, amount in resp["approvals"].items():
ret_val[token] = Decimal(str(amount))
return ret_val
@async_ttl_cache(ttl=5, maxsize=10)
async def get_quote_price(self, trading_pair: str, is_buy: bool, amount: Decimal) -> Optional[Decimal]:
"""
Retrieves a quote price.
:param trading_pair: The market trading pair
:param is_buy: True for an intention to buy, False for an intention to sell
:param amount: The amount required (in base token unit)
:return: The quote price.
"""
try:
base, quote = trading_pair.split("-")
side = "buy" if is_buy else "sell"
resp = await self._api_request("post",
"eth/balancer/price",
{"base": base,
"quote": quote,
"amount": amount,
"side": side.upper()})
required_items = ["price", "gasLimit", "gasPrice", "gasCost"]
if any(item not in resp.keys() for item in required_items):
if "info" in resp.keys():
self.logger().info(f"Unable to get price. {resp['info']}")
else:
self.logger().info(f"Missing data from price result. Incomplete return result for ({resp.keys()})")
else:
gas_limit = resp["gasLimit"]
gas_price = resp["gasPrice"]
gas_cost = resp["gasCost"]
price = resp["price"]
account_standing = {
"allowances": self._allowances,
"balances": self._account_balances,
"base": base,
"quote": quote,
"amount": amount,
"side": side,
"gas_limit": gas_limit,
"gas_price": gas_price,
"gas_cost": gas_cost,
"price": price,
"swaps": len(resp["swaps"])
}
exceptions = check_transaction_exceptions(account_standing)
for index in range(len(exceptions)):
self.logger().info(f"Warning! [{index+1}/{len(exceptions)}] {side} order - {exceptions[index]}")
if price is not None and len(exceptions) == 0:
fee_overrides_config_map["balancer_maker_fixed_fees"].value = [
TokenAmount("ETH", Decimal(str(gas_cost)))
]
fee_overrides_config_map["balancer_taker_fixed_fees"].value = [
TokenAmount("ETH", Decimal(str(gas_cost)))
]
return Decimal(str(price))
except asyncio.CancelledError:
raise
except Exception as e:
self.logger().network(
f"Error getting quote price for {trading_pair} {side} order for {amount} amount.",
exc_info=True,
app_warning_msg=str(e)
)
async def get_order_price(self, trading_pair: str, is_buy: bool, amount: Decimal) -> Decimal:
"""
This is simply the quote price
"""
return await self.get_quote_price(trading_pair, is_buy, amount)
def buy(self, trading_pair: str, amount: Decimal, order_type: OrderType, price: Decimal) -> str:
"""
Buys an amount of base token for a given price (or cheaper).
:param trading_pair: The market trading pair
:param amount: The order amount (in base token unit)
:param order_type: Any order type is fine, not needed for this.
:param price: The maximum price for the order.
:return: A newly created order id (internal).
"""
return self.place_order(True, trading_pair, amount, price)
def sell(self, trading_pair: str, amount: Decimal, order_type: OrderType, price: Decimal) -> str:
"""
Sells an amount of base token for a given price (or at a higher price).
:param trading_pair: The market trading pair
:param amount: The order amount (in base token unit)
:param order_type: Any order type is fine, not needed for this.
:param price: The minimum price for the order.
:return: A newly created order id (internal).
"""
return self.place_order(False, trading_pair, amount, price)
def place_order(self, is_buy: bool, trading_pair: str, amount: Decimal, price: Decimal) -> str:
"""
Places an order.
:param is_buy: True for buy order
:param trading_pair: The market trading pair
:param amount: The order amount (in base token unit)
:param price: The minimum price for the order.
:return: A newly created order id (internal).
"""
side = TradeType.BUY if is_buy else TradeType.SELL
order_id = f"{side.name.lower()}-{trading_pair}-{get_tracking_nonce()}"
safe_ensure_future(self._create_order(side, order_id, trading_pair, amount, price))
return order_id
async def _create_order(self,
trade_type: TradeType,
order_id: str,
trading_pair: str,
amount: Decimal,
price: Decimal):
"""
Calls buy or sell API end point to place an order, starts tracking the order and triggers relevant order events.
:param trade_type: BUY or SELL
:param order_id: Internal order id (also called client_order_id)
:param trading_pair: The market to place order
:param amount: The order amount (in base token value)
:param price: The order price
"""
amount = self.quantize_order_amount(trading_pair, amount)
price = self.quantize_order_price(trading_pair, price)
base, quote = trading_pair.split("-")
api_params = {"base": base,
"quote": quote,
"side": trade_type.name.upper(),
"amount": str(amount),
"limitPrice": str(price),
}
try:
order_result = await self._api_request("post", "eth/balancer/trade", api_params)
hash = order_result.get("txHash")
gas_price = order_result.get("gasPrice")
gas_limit = order_result.get("gasLimit")
gas_cost = order_result.get("gasCost")
self.start_tracking_order(order_id, None, trading_pair, trade_type, price, amount, gas_price)
tracked_order = self._in_flight_orders.get(order_id)
# update onchain balance
await self._update_balances()
if tracked_order is not None:
self.logger().info(f"Created {trade_type.name} order {order_id} txHash: {hash} "
f"for {amount} {trading_pair}. Estimated Gas Cost: {gas_cost} ETH "
f" (gas limit: {gas_limit}, gas price: {gas_price})")
tracked_order.update_exchange_order_id(hash)
tracked_order.gas_price = gas_price
if hash is not None:
tracked_order.fee_asset = "ETH"
tracked_order.executed_amount_base = amount
tracked_order.executed_amount_quote = amount * price
event_tag = MarketEvent.BuyOrderCreated if trade_type is TradeType.BUY else MarketEvent.SellOrderCreated
event_class = BuyOrderCreatedEvent if trade_type is TradeType.BUY else SellOrderCreatedEvent
self.trigger_event(event_tag,
event_class(
self.current_timestamp,
OrderType.LIMIT,
trading_pair,
amount,
price,
order_id,
tracked_order.creation_timestamp,
hash))
else:
self.trigger_event(MarketEvent.OrderFailure,
MarketOrderFailureEvent(self.current_timestamp, order_id, OrderType.LIMIT))
except asyncio.CancelledError:
raise
except Exception as e:
self.stop_tracking_order(order_id)
self.logger().network(
f"Error submitting {trade_type.name} order to Balancer for "
f"{amount} {trading_pair} "
f"{price}.",
exc_info=True,
app_warning_msg=str(e)
)
self.trigger_event(MarketEvent.OrderFailure,
MarketOrderFailureEvent(self.current_timestamp, order_id, OrderType.LIMIT))
def start_tracking_order(self,
order_id: str,
exchange_order_id: str,
trading_pair: str,
trade_type: TradeType,
price: Decimal,
amount: Decimal,
gas_price: Decimal):
"""
Starts tracking an order by simply adding it into _in_flight_orders dictionary.
"""
self._in_flight_orders[order_id] = BalancerInFlightOrder(
client_order_id=order_id,
exchange_order_id=exchange_order_id,
trading_pair=trading_pair,
order_type=OrderType.LIMIT,
trade_type=trade_type,
price=price,
amount=amount,
gas_price=gas_price,
creation_timestamp=self.current_timestamp
)
def stop_tracking_order(self, order_id: str):
"""
Stops tracking an order by simply removing it from _in_flight_orders dictionary.
"""
if order_id in self._in_flight_orders:
del self._in_flight_orders[order_id]
async def _update_order_status(self):
"""
Calls REST API to get status update for each in-flight order.
"""
if len(self._in_flight_orders) > 0:
tracked_orders = list(self._in_flight_orders.values())
tasks = []
for tracked_order in tracked_orders:
order_id = await tracked_order.get_exchange_order_id()
tasks.append(self._api_request("post",
"eth/poll",
{"txHash": order_id}))
update_results = await safe_gather(*tasks, return_exceptions=True)
for update_result in update_results:
self.logger().info(f"Polling for order status updates of {len(tasks)} orders.")
if isinstance(update_result, Exception):
raise update_result
if "txHash" not in update_result:
self.logger().info(f"_update_order_status txHash not in resp: {update_result}")
continue
if update_result["confirmed"] is True:
if update_result["receipt"]["status"] == 1:
gas_used = update_result["receipt"]["gasUsed"]
gas_price = tracked_order.gas_price
fee = Decimal(str(gas_used)) * Decimal(str(gas_price)) / Decimal(str(1e9))
self.trigger_event(
MarketEvent.OrderFilled,
OrderFilledEvent(
self.current_timestamp,
tracked_order.client_order_id,
tracked_order.trading_pair,
tracked_order.trade_type,
tracked_order.order_type,
Decimal(str(tracked_order.price)),
Decimal(str(tracked_order.amount)),
AddedToCostTradeFee(
flat_fees=[TokenAmount(tracked_order.fee_asset, Decimal(str(fee)))]
),
exchange_trade_id=order_id
)
)
tracked_order.last_state = "FILLED"
self.logger().info(f"The {tracked_order.trade_type.name} order "
f"{tracked_order.client_order_id} has completed "
f"according to order status API.")
event_tag = MarketEvent.BuyOrderCompleted if tracked_order.trade_type is TradeType.BUY \
else MarketEvent.SellOrderCompleted
event_class = BuyOrderCompletedEvent if tracked_order.trade_type is TradeType.BUY \
else SellOrderCompletedEvent
self.trigger_event(event_tag,
event_class(self.current_timestamp,
tracked_order.client_order_id,
tracked_order.base_asset,
tracked_order.quote_asset,
tracked_order.fee_asset,
tracked_order.executed_amount_base,
tracked_order.executed_amount_quote,
float(fee),
tracked_order.order_type))
self.stop_tracking_order(tracked_order.client_order_id)
else:
self.logger().info(
f"The market order {tracked_order.client_order_id} has failed according to order status API. ")
self.trigger_event(MarketEvent.OrderFailure,
MarketOrderFailureEvent(
self.current_timestamp,
tracked_order.client_order_id,
tracked_order.order_type
))
self.stop_tracking_order(tracked_order.client_order_id)
def get_taker_order_type(self):
return OrderType.LIMIT
def get_order_price_quantum(self, trading_pair: str, price: Decimal) -> Decimal:
return Decimal("1e-15")
def get_order_size_quantum(self, trading_pair: str, order_size: Decimal) -> Decimal:
return Decimal("1e-15")
@property
def ready(self):
return all(self.status_dict.values())
def has_allowances(self) -> bool:
"""
Checks if all tokens have allowance (an amount approved)
"""
return len(self._allowances.values()) == len(self._tokens) and \
all(amount > s_decimal_0 for amount in self._allowances.values())
@property
def status_dict(self) -> Dict[str, bool]:
return {
"account_balance": len(self._account_balances) > 0 if self._trading_required else True,
"allowances": self.has_allowances() if self._trading_required else True
}
async def start_network(self):
if self._trading_required:
self._status_polling_task = safe_ensure_future(self._status_polling_loop())
self._initiate_pool_task = safe_ensure_future(self.initiate_pool())
self._auto_approve_task = safe_ensure_future(self.auto_approve())
async def stop_network(self):
if self._status_polling_task is not None:
self._status_polling_task.cancel()
self._status_polling_task = None
if self._auto_approve_task is not None:
self._auto_approve_task.cancel()
self._auto_approve_task = None
if self._initiate_pool_task is not None:
self._initiate_pool_task.cancel()
self._initiate_pool_task = None
async def check_network(self) -> NetworkStatus:
try:
response = await self._api_request("get", "api")
if response["status"] != "ok":
raise Exception(f"Error connecting to Gateway API. HTTP status is {response.status}.")
except asyncio.CancelledError:
raise
except Exception:
return NetworkStatus.NOT_CONNECTED
return NetworkStatus.CONNECTED
def tick(self, timestamp: float):
"""
Is called automatically by the clock for each clock's tick (1 second by default).
It checks if status polling task is due for execution.
"""
if time.time() - self._last_poll_timestamp > self.POLL_INTERVAL:
if self._poll_notifier is not None and not self._poll_notifier.is_set():
self._poll_notifier.set()
async def _status_polling_loop(self):
while True:
try:
self._poll_notifier = asyncio.Event()
await self._poll_notifier.wait()
await safe_gather(
self._update_balances(on_interval = True),
self._update_order_status(),
)
self._last_poll_timestamp = self.current_timestamp
except asyncio.CancelledError:
raise
except Exception as e:
self.logger().error(str(e), exc_info=True)
self.logger().network("Unexpected error while fetching account updates.",
exc_info=True,
app_warning_msg="Could not fetch balances from Gateway API.")
await asyncio.sleep(0.5)
async def _update_balances(self, on_interval = False):
"""
Calls Eth API to update total and available balances.
"""
last_tick = self._last_balance_poll_timestamp
current_tick = self.current_timestamp
if not on_interval or (current_tick - last_tick) > self.UPDATE_BALANCE_INTERVAL:
self._last_balance_poll_timestamp = current_tick
local_asset_names = set(self._account_balances.keys())
remote_asset_names = set()
resp_json = await self._api_request("post",
"eth/balances",
{"tokenList": "[" + (",".join(['"' + t + '"' for t in self._tokens])) + "]"})
for token, bal in resp_json["balances"].items():
self._account_available_balances[token] = Decimal(str(bal))
self._account_balances[token] = Decimal(str(bal))
remote_asset_names.add(token)
asset_names_to_remove = local_asset_names.difference(remote_asset_names)
for asset_name in asset_names_to_remove:
del self._account_available_balances[asset_name]
del self._account_balances[asset_name]
self._in_flight_orders_snapshot = {k: copy.copy(v) for k, v in self._in_flight_orders.items()}
self._in_flight_orders_snapshot_timestamp = self.current_timestamp
async def _http_client(self) -> aiohttp.ClientSession:
"""
:returns Shared client session instance
"""
if self._shared_client is None:
ssl_ctx = ssl.create_default_context(cafile=GATEAWAY_CA_CERT_PATH)
ssl_ctx.load_cert_chain(GATEAWAY_CLIENT_CERT_PATH, GATEAWAY_CLIENT_KEY_PATH)
conn = aiohttp.TCPConnector(ssl_context=ssl_ctx)
self._shared_client = aiohttp.ClientSession(connector=conn)
return self._shared_client
async def _api_request(self,
method: str,
path_url: str,
params: Dict[str, Any] = {}) -> Dict[str, Any]:
"""
Sends an aiohttp request and waits for a response.
:param method: The HTTP method, e.g. get or post
:param path_url: The path url or the API end point
:param params: A dictionary of required params for the end point
:returns A response in json format.
"""
base_url = f"https://{global_config_map['gateway_api_host'].value}:" \
f"{global_config_map['gateway_api_port'].value}"
url = f"{base_url}/{path_url}"
client = await self._http_client()
if method == "get":
if len(params) > 0:
response = await client.get(url, params=params)
else:
response = await client.get(url)
elif method == "post":
params["privateKey"] = self._wallet_private_key
if params["privateKey"][:2] != "0x":
params["privateKey"] = "0x" + params["privateKey"]
response = await client.post(url, data=params)
parsed_response = json.loads(await response.text())
if response.status != 200:
err_msg = ""
if "error" in parsed_response:
err_msg = f" Message: {parsed_response['error']}"
raise IOError(f"Error fetching data from {url}. HTTP status is {response.status}.{err_msg}")
if "error" in parsed_response:
raise Exception(f"Error: {parsed_response['error']} {parsed_response['message']}")
return parsed_response
async def cancel_all(self, timeout_seconds: float) -> List[CancellationResult]:
return []
@property
def in_flight_orders(self) -> Dict[str, BalancerInFlightOrder]:
return self._in_flight_orders
|