text
stringlengths 3
1.05M
|
|---|
var SignupWindow = function(_containingTab) {
//UI STUFF
var suWin = Ti.UI.createWindow({
backgroundColor:'transparent',
backgroundImage: '/images/grain.png',
title: "Signup",
barColor: '#6d0a0c',
layout: 'vertical'
});
var email = Ti.UI.createTextField({
hintText:'Email',
autocorrect:false,
autocapitalization:Ti.UI.TEXT_AUTOCAPITALIZATION_NONE,
top:5,
width:'90%',
height:40,
font: {
fontWeight: 'normal',
fontSize: '17'
},
textAlign: 'center',
color:'#333',
backgroundColor: '#ddd',
borderRadius:3,
paddingLeft:2,
paddingRight:2
});
var username = Ti.UI.createTextField({
hintText:'Username',
autocorrect:false,
autocapitalization:Ti.UI.TEXT_AUTOCAPITALIZATION_NONE,
top:5,
width:'90%',
height:40,
font: {
fontWeight: 'normal',
fontSize: '17'
},
textAlign: 'center',
color:'#333',
backgroundColor: '#ddd',
borderRadius:3,
paddingLeft:2,
paddingRight:2
});
var password = Ti.UI.createTextField({
hintText:'Password',
passwordMask:true,
autocorrect:false,
autocapitalization:Ti.UI.TEXT_AUTOCAPITALIZATION_NONE,
top:5,
width:'90%',
height:40,
font: {
fontWeight: 'normal',
fontSize: '17'
},
textAlign: 'center',
color:'#333',
backgroundColor: '#ddd',
borderRadius:3,
paddingLeft:2,
paddingRight:2
});
var confirm = Ti.UI.createTextField({
hintText:'Confirm Password',
passwordMask:true,
autocorrect:false,
autocapitalization:Ti.UI.TEXT_AUTOCAPITALIZATION_NONE,
top:5,
width:'90%',
height:40,
font: {
fontWeight: 'normal',
fontSize: '17'
},
textAlign: 'center',
color:'#333',
backgroundColor: '#ddd',
borderRadius:3,
paddingLeft:2,
paddingRight:2
});
var signupButton = Ti.UI.createButton({
title:'Signup',
top:15,
width: 200,
height: Ti.UI.SIZE
});
var fbConnectButton = Ti.UI.createButton({
title:'Connect with Facebook',
top:5,
width:200,
height:20,
visible:true
});
//ADDING UI COMPONENTS TO WINDOW
suWin.add(email);
suWin.add(username);
suWin.add(password);
suWin.add(confirm);
suWin.add(signupButton);
suWin.add(fbConnectButton);
//CALLBACK FUNCTIONS
function cb() {
if(acs.isLoggedIn()===true) {
alert(L("successfully signup."))
} else {
alert(L('something wrong during signup process'));
}
}
//EVENTS REGISTERING
signupButton.addEventListener('click',function() {
if(password.value === confirm.value)
acs.createUser(email.value,username.value,password.value,Ti.Platform.macaddress,cb);
else alert(L("Passwords do not match. Try again."));
});
fbConnectButton.addEventListener('click', function() {
if(Ti.Facebook.loggedIn) {
Ti.Facebook.logout(); //logout from fb
}
Ti.Facebook.authorize();
});
Ti.include('helpers/facebookAuthenListeners.js'); //fb authen functionality
Ti.Facebook.addEventListener('login', function(e) {
if (e.success) {
alert('SignupWindow.js FB login event cb');
var PlaceholderWindow = require('ui/common/PlaceholderWindow');
var placeholderwin = new PlaceholderWindow();
_containingTab.open(placeholderwin);
}
});
return suWin;
};
module.exports = SignupWindow;
|
from django.db import models
from initiatives.models import ORMInitiative
# Create your models here.
class Email(models.Model):
email = models.EmailField(primary_key=True)
token = models.CharField(max_length=1024)
expiry = models.DateTimeField(auto_now_add=False)
class Role(models.Model):
id = models.IntegerField(primary_key=True)
roles = (
(1,'Student'),(2,'Admin'), (3,'Internal team'), (4,'Teacher') , (5,'Others'),
)
type = models.CharField(max_length=30,choices=roles)
def __str__(self):
return self.type
class Department(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=40)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class Skill (models.Model):
skill_id = models.IntegerField(primary_key=True)
skill_name = models.CharField(max_length = 50)
def __str__(self):
return self.skill_name
class ORMUser(models.Model):
id = models.IntegerField(primary_key=True)
first_name = models.CharField(max_length = 40)
last_name = models.CharField(max_length = 40)
college_code = models.IntegerField()
email = models.OneToOneField(Email,on_delete=models.CASCADE)
email_verified = models.BooleanField()
role = models.OneToOneField(Role,on_delete=models.CASCADE)
disable_account = models.BooleanField()
department = models.ForeignKey(Department,on_delete=models.CASCADE)
skill = models.ManyToManyField(Skill)
user_image = models.ImageField(upload_to=None)
username = models.CharField(max_length=100)
password = models.CharField(max_length=32)
last_login = models.DateTimeField()
login_count = models.PositiveIntegerField()
initiative_associated = models.ManyToManyField(ORMInitiative)
class ORMProject(models.Model):
proj_id = models.IntegerField()
proj_name = models.CharField(max_length=50)
proj_description = models.TextField()
technology = models.CharField(max_length=200)
user_id = models.ForeignKey(ORMUser,on_delete = models.CASCADE)
|
(function () {
/* Imports */
var Meteor = Package.meteor.Meteor;
/* Exports */
if (typeof Package === 'undefined') Package = {};
Package['mrt:jquery-hotkeys'] = {};
})();
//# sourceMappingURL=mrt_jquery-hotkeys.js.map
|
(function (AJS, $, JIRA) {
//AJS.namespace("JIRA.Admin.CustomFields.UserPickerFilter");
/**
* A user of the SelectorPanel from the config page, a webwork action.
*
* Input data is obtained from ww action via data in the generated html.
* Configured filter data is sent back to server via form submission to ww action, too.
*/
JIRA.Admin.CustomFields.UserPickerFilter.Config = {
/**
* store the user filter json into the hidden field for form action.
*/
_storeUserFilterJson : function (userFilter) {
$('#filter-data-hidden').val(JSON.stringify(userFilter))
},
/**
* update css class of button panel depending on whether filter is disabled.
* we want to have a larger margin top when disabled.
* @param userFilterEnabled whether the user filter is enabled
* @private
*/
_adjustButtonPanelPostion : function(userFilterEnabled) {
var filterButtonPanel = $("#filter-button-panel"),
add = userFilterEnabled ? "enabled" : "disabled",
remove = userFilterEnabled ? "disabled" : "enabled";
filterButtonPanel.addClass("filter-" + add);
filterButtonPanel.removeClass("filter-" + remove);
},
/**
* This method performs initialization when the selector panel is loaded from user picker config page.
* A different initialization method is required when loading from the quick Create Field dialog.
*/
initializeFromConfigPage : function () {
var $data = $('#data-for-template');
var groups = $data.data('groupsJson') || [],
projectRoles = $data.data('projectRolesJson') || [],
userFilter = $data.data('userFilterJson') || { enabled: false};
$data.remove(); // don't need the data in html any more
var instance = this;
var selectorPanel = JIRA.Admin.CustomFields.UserPickerFilter.SelectorPanel;
selectorPanel.initialize($('#filter-selector-panel'), userFilter, groups, projectRoles);
this._adjustButtonPanelPostion(userFilter);
// setup hook to store the json string into hidden file before form submit
$('#filter-submit').click(function() {
instance._storeUserFilterJson(selectorPanel.getUserFilter());
});
selectorPanel.getFilterCheckbox().change(function() {
// adjust the top margin of button panels
instance._adjustButtonPanelPostion(selectorPanel.isUserFilterEnabled());
});
}
};
// render the filter selector panel
AJS.$(function () {
JIRA.Admin.CustomFields.UserPickerFilter.Config.initializeFromConfigPage();
});
})(AJS, AJS.$, JIRA);
|
"""
This sample file demonstrate how to upload a list of EnergyPlus models into one simulation job
Current issue the tracking will skip the first model, so user might see a model missing in the tracking information.
e.g.
Upload 2 models and run them in parallel
But tracking info shows:
Total progress 0%, success: 0, failure: 0, running: 1, queue: 0
This is a known issue and we will working on a fix solution.
"""
import BuildSimHubAPI as bsh_api
import BuildSimHubAPI.postprocess as pp
# project_key can be found in every project (click the information icon next to project name)
project_key = "9d6b010f-de6f-4f18-9030-411467288b11"
file_dir_1 = '/Users/weilixu/Desktop/data/schedule/5ZoneTDV.idf'
file_dir_2 = '/Users/weilixu/Desktop/data/schedule/5ZoneTDV.idf'
# initialize the client
bsh = bsh_api.BuildSimHubAPIClient()
"""
The most straightforward way to do simulation
"""
new_sj_run = bsh.new_simulation_job(project_key)
results = new_sj_run.run([file_dir_1, file_dir_2], add_files='/Users/weilixu/Desktop/data/schedule/csv', track=True)
|
class MainMenuButtonEffect : ScriptedWidgetEventHandler
{
reference float speed;
reference float amount;
protected float m_textProportion;
protected float m_textProportion2;
protected ButtonWidget m_root;
protected ref AnimatorTimer m_anim;
// -----------------------------------------------------------
void MainMenuButtonEffect()
{
if (GetGame()) GetGame().GetUpdateQueue(CALL_CATEGORY_GUI).Insert(this.Update);
m_anim = new AnimatorTimer();
}
// -----------------------------------------------------------
void ~MainMenuButtonEffect()
{
if (GetGame()) GetGame().GetUpdateQueue(CALL_CATEGORY_GUI).Remove(this.Update);
}
// -----------------------------------------------------------
void OnWidgetScriptInit(ButtonWidget w)
{
m_root = w;
m_root.SetHandler(this);
}
// -----------------------------------------------------------
protected void Update(float tDelta)
{
m_anim.Tick(tDelta);
float p = amount * m_anim.GetValue();
//m_root.SetTextProportion( m_textProportion + (p * 0.5) );
m_root.SetTextOffset( p * 4, 0 );
float c = 1.0 - m_anim.GetValue();
m_root.SetTextColor(ARGBF(1, 1, c, c));
}
// -----------------------------------------------------------
override bool OnFocus(Widget w, int x, int y)
{
//if ( !m_anim.IsRunning() ) m_textProportion = m_root.GetTextProportion();
if ( !m_anim.IsRunning() )
{
m_root.GetPos( m_textProportion, m_textProportion2 );
}
m_anim.Animate(1.0, speed);
return false;
}
// -----------------------------------------------------------
override bool OnFocusLost(Widget w, int x, int y)
{
m_anim.Animate(0.0, speed);
return false;
}
};
|
'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
var _vueTypes = require('../_util/vue-types');
var _vueTypes2 = _interopRequireDefault(_vueTypes);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { 'default': obj }; }
var triggerType = _vueTypes2['default'].oneOf(['hover', 'focus', 'click', 'contextmenu']);
exports['default'] = function () {
return {
trigger: _vueTypes2['default'].oneOfType([triggerType, _vueTypes2['default'].arrayOf(triggerType)]).def('hover'),
visible: _vueTypes2['default'].bool,
defaultVisible: _vueTypes2['default'].bool,
placement: _vueTypes2['default'].oneOf(['top', 'left', 'right', 'bottom', 'topLeft', 'topRight', 'bottomLeft', 'bottomRight', 'leftTop', 'leftBottom', 'rightTop', 'rightBottom']).def('top'),
transitionName: _vueTypes2['default'].string.def('zoom-big-fast'),
// onVisibleChange: PropTypes.func,
overlayStyle: _vueTypes2['default'].object.def({}),
overlayClassName: _vueTypes2['default'].string,
prefixCls: _vueTypes2['default'].string.def('ant-tooltip'),
mouseEnterDelay: _vueTypes2['default'].number.def(0.1),
mouseLeaveDelay: _vueTypes2['default'].number.def(0.1),
getPopupContainer: _vueTypes2['default'].func,
arrowPointAtCenter: _vueTypes2['default'].bool.def(false),
autoAdjustOverflow: _vueTypes2['default'].oneOfType([_vueTypes2['default'].bool, _vueTypes2['default'].object]).def(true),
destroyTooltipOnHide: _vueTypes2['default'].bool.def(false),
align: _vueTypes2['default'].object.def({})
};
};
|
/**
* \file
* A hash table which uses the values themselves as nodes.
*
* Author:
* Mark Probst (mark.probst@gmail.com)
*
* (C) 2007 Novell, Inc.
*
*/
#ifndef __MONO_UTILS_MONO_INTERNAL_HASH__
#define __MONO_UTILS_MONO_INTERNAL_HASH__
/* A MonoInternalHashTable is a hash table that does not allocate hash
nodes. It can be used if the following conditions are fulfilled:
* The key is contained (directly or indirectly) in the value.
* Each value is in at most one internal hash table at the same
time.
The value data structure must then be extended to contain a
pointer, used by the internal hash table to chain values in the
same bucket.
Apart from the hash function, two other functions must be provided,
namely for extracting the key out of a value, and for getting the
next value pointer. The latter must actually return a pointer to
the next value pointer, because the internal hash table must be
able to modify it.
See the class_cache internal hash table in MonoImage for an
example.
*/
typedef struct _MonoInternalHashTable MonoInternalHashTable;
typedef gpointer (*MonoInternalHashKeyExtractFunc) (gpointer value);
typedef gpointer* (*MonoInternalHashNextValueFunc) (gpointer value);
struct _MonoInternalHashTable
{
GHashFunc hash_func;
MonoInternalHashKeyExtractFunc key_extract;
MonoInternalHashNextValueFunc next_value;
gint size;
gint num_entries;
gpointer *table;
};
void
mono_internal_hash_table_init (MonoInternalHashTable *table,
GHashFunc hash_func,
MonoInternalHashKeyExtractFunc key_extract,
MonoInternalHashNextValueFunc next_value);
void
mono_internal_hash_table_destroy (MonoInternalHashTable *table);
gpointer
mono_internal_hash_table_lookup (MonoInternalHashTable *table, gpointer key);
/* mono_internal_hash_table_insert requires that there is no entry for
key in the hash table. If you want to change the value for a key
already in the hash table, remove it first and then insert the new
one.
The key pointer is actually only passed here to check a debugging
assertion and to make the API look more familiar. */
void
mono_internal_hash_table_insert (MonoInternalHashTable *table,
gpointer key, gpointer value);
void
mono_internal_hash_table_remove (MonoInternalHashTable *table, gpointer key);
#endif
|
from coco_style_annotation_creator import test_human2coco_format as h2c
from detect2.tools import myinfer
import make_crop_and_mask_w_mask_nms as mcm
from global_local_parsing import gl_eval as gleval
import parsing_fusion as pfs
import argparse
import os
import time
from detectron2.config import get_cfg
from detectron2.engine.defaults import DefaultPredictor
import glob
import tqdm
import cv2
from detectron2.data.detection_utils import read_image
def h2c_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset",
type=str,
default='test',
help="name of dataset for register")
parser.add_argument("--root_dir", type=str, default='./mhp_parsing/data/')
# for instance detection
parser.add_argument(
"--config_file",
type=str,
# default='./mhp_parsing/detect2/configs/Misc/demo.yaml',
default=
'./mhp_parsing/detect2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x_copy.yaml',
help='detectron2 config file')
parser.add_argument(
"--resume",
action="store_true",
help="whether to attempt to resume from the checkpoint directory",
)
parser.add_argument("--eval-only",
action="store_true",
help="perform evaluation only")
return parser.parse_args()
def setup(args, cfg_modify=[]):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(cfg_modify)
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = 0.6
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.6
cfg.freeze()
return cfg
def main():
count = []
time0 = time.time()
args1 = h2c_arguments()
dataname = args1.dataset
# no-label test imgs -> coco Dataset
data_dir = os.path.join(args1.root_dir, dataname)
src_dir = os.path.join(data_dir, 'src_imgs')
# anno_dir = os.path.join(data_dir, 'src_annos')
# if not os.path.exists(anno_dir):
# os.mkdir(anno_dir)
# h2c.tococo(src_dir, anno_dir, dataname)
time1 = time.time()
count.append(time1 - time0)
# get the instance result
# instance_det_out = os.path.join(data_dir, 'instance_detection')
# anno_file = os.path.join(anno_dir, dataname + '.json')
# if not os.path.exists(instance_det_out):
# os.mkdir(instance_det_out)
# cfg_modify = [
# 'MODEL.WEIGHTS',
# # '/home/qiu/Downloads/models/detectron2/detectron2_maskrcnn_cihp_finetune.pth',
# '/home/qiu/Downloads/models/detectron2/detectron2_mask_rcnn_fpn_3x_pre.pkl',
# ]
# myinfer.infer(args1, cfg_modify, src_dir, anno_file)
cfg = setup(args1, )
demo = DefaultPredictor(cfg)
imgnames = os.listdir(src_dir)
inputs = []
for name in imgnames:
if name.endswith(('.jpg', '.png')):
inputs.append(os.path.join(src_dir, name))
if len(inputs) == 1:
inputs = glob.glob(os.path.expanduser(inputs[0]))
assert inputs, "The input path(s) was not found"
detect_list = []
for path in tqdm.tqdm(inputs):
# use PIL, to be consistent with evaluation
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
# img=read_image(path, 'BGR')
h, w, _ = img.shape
predictions = demo(img)
predictions['img_path'] = path
detect_list.append(predictions)
time2 = time.time()
count.append(time2 - time1)
# crop the instances
# file_list = mcm.crop(src_dir, data_dir, anno_file, instance_det_out)
file_list = mcm.crop(src_dir, data_dir, detect_list)
time3 = time.time()
count.append(time3 - time2)
# use the same pretrained checkpoint for local and global parsing
schp_ckpt = '/home/qiu/Projects/Self-Correction-Human-Parsing/deploy/pascal_abn_checkpoint.pth'
file_list = gleval.glparsing(data_dir, ['crop_pic', 'src_imgs'],
schp_ckpt,
log_dir=data_dir,
file_list=file_list)
time4 = time.time()
count.append(time4 - time3)
# fuse result
mask_dir = os.path.join(data_dir, 'crop_mask')
save_dir = os.path.join(data_dir, 'fusion_result')
pfs.gl_fuse(
file_list,
mask_dir,
save_dir,
data_dir,
)
time5 = time.time()
count.append(time5 - time4)
count.append(time5 - time0)
print(count)
if __name__ == '__main__':
# start=time.time()
main()
# print(time.time()-start)
|
import cv2
import math
import numpy as np from scipy.spatial
import distance as dist
def midpoint(ptA, ptB):
return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)
def calculateDistance(ex,ey,ew,eh):
dist = math.sqrt((ex - ew)**2 + (ey - eh)**2)
return dist
# print calculateDistance(x1, y1, x2, y2)
# using harcasde for face
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_eye.xml
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
# read both the images of the face and the glasses
image = cv2.imread(r"C:\Users\Sundar Gopal\PycharmProjects\images11MPAYP5.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
centers=[]
faces = face_cascade.detectMultiScale(gray,1.3,5)
#check for the face detected
for (x,y,w,h) in faces:
#create two Regions of Interest on face.
roi_gray = gray[y:y+h, x:x+w]
roi_color = image[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
# Store the cordinates of eyes in the image to the 'center' array
for (ex,ey,ew,eh) in eyes:
centers.append((x+int(ex+0.5*ew), y+int(ey+0.5*eh)))
# Point2f eye1, eye2;
# double res = cv::norm(eye1-eye2);
#creates rectangle with 'colour'
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
overlay_img = np.ones(image.shape,np.uint8)*255
#Create a mask and generate it's inverse.
gray_glasses = cv2.cvtColor(overlay_img, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(gray_glasses, 110, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
temp = cv2.bitwise_and(image, image, mask=mask)
temp2 = cv2.bitwise_and(overlay_img, overlay_img, mask=mask_inv)
final_img = cv2.add(temp, temp2)
# imS = cv2.resize(final_img, (1366, 768))
# print calculateDistance(ex,ey,ew,eh)
cv2.imshow('Final Result', final_img)
cv2.waitKey()
cv2.destroyAllWindows()
|
// @flow
import React from 'react';
import { create } from 'react-test-renderer';
import { FixtureContainer } from '../../FixtureContainer';
it('renders string element type', () => {
expect(
create(
<FixtureContainer>
<div>Hello world!</div>
</FixtureContainer>
).toJSON()
).toEqual({ type: 'div', props: {}, children: ['Hello world!'] });
});
|
//数组或对象元素的位置
function(arrayToSearch, item) {
if (Array.prototype.indexOf) {
return arrayToSearch.indexOf(item);
} else {
for (var i = 0; i < arrayToSearch.length; i++) {
if (arrayToSearch[i] === item) return i;
}
return -1;
}
}
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .migrate_sql_server_sql_mi_task_output import MigrateSqlServerSqlMITaskOutput
class MigrateSqlServerSqlMITaskOutputDatabaseLevel(MigrateSqlServerSqlMITaskOutput):
"""MigrateSqlServerSqlMITaskOutputDatabaseLevel.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Result identifier
:vartype id: str
:param result_type: Constant filled by server.
:type result_type: str
:ivar database_name: Name of the database
:vartype database_name: str
:ivar size_mb: Size of the database in megabytes
:vartype size_mb: float
:ivar state: Current state of migration. Possible values include: 'None',
'InProgress', 'Failed', 'Warning', 'Completed', 'Skipped', 'Stopped'
:vartype state: str or ~azure.mgmt.datamigration.models.MigrationState
:ivar stage: Current stage of migration. Possible values include: 'None',
'Initialize', 'Backup', 'FileCopy', 'Restore', 'Completed'
:vartype stage: str or
~azure.mgmt.datamigration.models.DatabaseMigrationStage
:ivar started_on: Migration start time
:vartype started_on: datetime
:ivar ended_on: Migration end time
:vartype ended_on: datetime
:ivar message: Migration progress message
:vartype message: str
:ivar exceptions_and_warnings: Migration exceptions and warnings
:vartype exceptions_and_warnings:
list[~azure.mgmt.datamigration.models.ReportableException]
"""
_validation = {
'id': {'readonly': True},
'result_type': {'required': True},
'database_name': {'readonly': True},
'size_mb': {'readonly': True},
'state': {'readonly': True},
'stage': {'readonly': True},
'started_on': {'readonly': True},
'ended_on': {'readonly': True},
'message': {'readonly': True},
'exceptions_and_warnings': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'result_type': {'key': 'resultType', 'type': 'str'},
'database_name': {'key': 'databaseName', 'type': 'str'},
'size_mb': {'key': 'sizeMB', 'type': 'float'},
'state': {'key': 'state', 'type': 'str'},
'stage': {'key': 'stage', 'type': 'str'},
'started_on': {'key': 'startedOn', 'type': 'iso-8601'},
'ended_on': {'key': 'endedOn', 'type': 'iso-8601'},
'message': {'key': 'message', 'type': 'str'},
'exceptions_and_warnings': {'key': 'exceptionsAndWarnings', 'type': '[ReportableException]'},
}
def __init__(self):
super(MigrateSqlServerSqlMITaskOutputDatabaseLevel, self).__init__()
self.database_name = None
self.size_mb = None
self.state = None
self.stage = None
self.started_on = None
self.ended_on = None
self.message = None
self.exceptions_and_warnings = None
self.result_type = 'DatabaseLevelOutput'
|
var callbackArguments = [];
var base_0 = [618,-1,1.7976931348623157e+308,843,0,126,893]
var r_0= undefined
try {
r_0 = base_0.reduceRight()
}
catch(e) {
r_0= "Error"
}
function serialize(array){
return array.map(function(a){
if (a === null || a == undefined) return a;
var name = a.constructor.name;
if (name==='Object' || name=='Boolean'|| name=='Array'||name=='Number'||name=='String')
return JSON.stringify(a);
return name;
});
}
setTimeout(function(){
require("fs").writeFileSync("./experiments/reduceRight/reduceRightRandom/test809.json",JSON.stringify({"baseObjects":serialize([base_0]),"returnObjects":serialize([r_0]),"callbackArgs":callbackArguments}))
},300)
|
/*
Module: history.js
Description: Gets and sets users reading history
*/
import fastdom from 'fastdom';
import $ from 'lib/$';
import { storage } from '@guardian/libs';
import { getPath } from 'lib/url';
import isObject from 'lodash/isObject';
import {getCookie} from "lib/cookies";
import { ARTICLES_VIEWED_OPT_OUT_COOKIE } from "common/modules/commercial/user-features";
const editions = ['uk', 'us', 'au'];
const editionalised = [
'business',
'commentisfree',
'culture',
'environment',
'media',
'money',
'sport',
'technology',
];
const pageMeta = [
{
tid: 'section',
tname: 'sectionName',
},
{
tid: 'keywordIds',
tname: 'keywords',
},
{
tid: 'seriesId',
tname: 'series',
},
{
tid: 'authorIds',
tname: 'author',
},
];
const buckets = [
{
type: 'content',
indexInRecord: 1,
},
{
type: 'front',
indexInRecord: 2,
},
];
const getMondayFromDate = (date) => {
const day = date.getDay() || 7;
// Do not set date to Monday if it is already Monday
if (day !== 1) {
date.setHours(-24 * (day - 1));
}
return Math.floor(date.getTime() / 86400000);
};
const summaryPeriodDays = 90;
const forgetUniquesAfter = 10;
const historySize = 50;
const storageKeyHistory = 'gu.history';
const storageKeySummary = 'gu.history.summary';
const storageKeyDailyArticleCount = 'gu.history.dailyArticleCount'; // Array containing an article count for each day
const storageKeyWeeklyArticleCount = 'gu.history.weeklyArticleCount';
const today = Math.floor(Date.now() / 86400000); // 1 day in ms
const startOfThisWeek = getMondayFromDate(new Date());
let historyCache;
let summaryCache;
let popularFilteredCache;
let topNavItemsCache;
let inMegaNav = false;
const saveHistory = (history) => {
historyCache = history;
storage.local.set(storageKeyHistory, history);
};
const saveSummary = (summary) => {
summaryCache = summary;
storage.local.set(storageKeySummary, summary);
};
const getHistory = () => {
historyCache = historyCache || storage.local.get(storageKeyHistory) || [];
return historyCache;
};
const getSummary = () => {
if (!summaryCache) {
summaryCache = storage.local.get(storageKeySummary);
if (
!isObject(summaryCache) ||
!isObject(summaryCache.tags) ||
typeof summaryCache.periodEnd !== 'number'
) {
summaryCache = {
periodEnd: today,
tags: {},
showInMegaNav: true,
};
}
}
return summaryCache;
};
const seriesSummary = () => {
const views = item => item.reduce((acc, val) => acc + val[1], 0);
const summaryTags = getSummary().tags;
const seriesTags = Object.keys(summaryTags).reduce((acc, val) => {
if (val.includes('series')) {
acc[val] = summaryTags[val];
}
return acc;
}, {});
const seriesTagsSummary = Object.keys(seriesTags).reduce((acc, val) => {
const tag = seriesTags[val];
acc[val] = views(tag[1]) + views(tag[2]);
return acc;
}, {});
return seriesTagsSummary;
};
const mostViewedSeries = () => {
const summary = seriesSummary();
return Object.keys(summary).reduce(
(topSeries, currentSeries) =>
summary[topSeries] > summary[currentSeries]
? topSeries
: currentSeries,
''
);
};
const deleteFromSummary = (tag) => {
const summary = getSummary();
delete summary.tags[tag];
saveSummary(summary);
};
const isRevisit = (pageId) => {
const visited = getHistory().find(page => page[0] === pageId);
return !!(visited && visited[1] > 1);
};
const pruneSummary = (summary, newToday = today) => {
const updateBy = newToday - summary.periodEnd;
if (updateBy !== 0) {
summary.periodEnd = newToday;
Object.keys(summary.tags).forEach(tid => {
const record = summary.tags[tid];
const result = buckets.map(bucket => {
if (record[bucket.indexInRecord]) {
const visits = record[bucket.indexInRecord]
.map(day => {
const newAge = day[0] + updateBy;
return newAge < summaryPeriodDays && newAge >= 0
? [newAge, day[1]]
: false;
})
.filter(Boolean);
return visits.length > 1 ||
(visits.length === 1 &&
visits[0][0] < forgetUniquesAfter)
? visits
: [];
}
return [];
});
if (result.some(r => r.length)) {
summary.tags[tid] = [record[0]].concat(result);
} else {
delete summary.tags[tid];
}
});
}
return summary;
};
const tally = (
visits,
weight = 1,
minimum = 1
) => {
let totalVisits = 0;
const result = visits.reduce((t, day) => {
const dayOffset = day[0];
const dayVisits = day[1];
totalVisits += dayVisits;
return t + weight * (9 + dayVisits) * (summaryPeriodDays - dayOffset);
}, 0);
return totalVisits < minimum ? 0 : result;
};
const getPopular = (opts) => {
const tags = getSummary().tags;
let tids = Object.keys(tags);
const op = Object.assign(
{},
{
number: 100,
weights: {},
thresholds: {},
},
opts
);
if (op.whitelist) {
tids = tids.filter(tid => op.whitelist.includes(tid));
}
if (op.blacklist) {
tids = tids.filter(tid => !op.blacklist.includes(tid));
}
return tids
.map(tid => {
const record = tags[tid];
const rank = buckets.reduce(
(r, bucket) =>
r +
tally(
record[bucket.indexInRecord],
op.weights[bucket.type],
op.thresholds[bucket.type]
),
0
);
return {
idAndName: [tid, record[0]],
rank,
};
})
.filter(Boolean)
.sort((a, b) => a.rank - b.rank)
.slice(-op.number)
.map(tid => tid.idAndName)
.reverse();
};
const getContributors = () => {
const contibutors = [];
const tags = getSummary().tags;
Object.keys(tags).forEach(tagId => {
if (tagId.startsWith('profile/')) {
contibutors.push(tags[tagId]);
}
});
return contibutors;
};
const collapsePath = (path) => {
const isEditionalisedRx = new RegExp(
`^(${editions.join('|')})/(${editionalised.join('|')})$`
);
const stripEditionRx = new RegExp(`^(${editions.join('|')})/`);
if (path) {
let newPath = path.replace(/^\/|\/$/g, '');
if (newPath.match(isEditionalisedRx)) {
newPath = newPath.replace(stripEditionRx, '');
}
const newPathSplit = newPath.split('/');
if (newPathSplit.length === 2 && newPathSplit[0] === newPathSplit[1]) {
newPath = [newPathSplit[0]].join('/');
}
return newPath;
}
return '';
};
const getTopNavItems = () => {
topNavItemsCache =
topNavItemsCache ||
$('.js-navigation-header .js-top-navigation a').map(item =>
collapsePath(getPath($(item).attr('href')))
);
return topNavItemsCache;
};
const getPopularFiltered = (opts) => {
const flush = opts && opts.flush;
popularFilteredCache =
(!flush && popularFilteredCache) ||
getPopular({
blacklist: getTopNavItems(),
number: 10,
weights: {
content: 1,
front: 5,
},
thresholds: {
content: 5,
front: 1,
},
});
return popularFilteredCache;
};
const firstCsv = (str) => (str || '').split(',')[0];
const reset = () => {
historyCache = undefined;
summaryCache = undefined;
storage.local.remove(storageKeyHistory);
storage.local.remove(storageKeySummary);
storage.local.remove(storageKeyDailyArticleCount);
};
const logHistory = (pageConfig) => {
const { pageId } = pageConfig;
let history;
let foundCount = 0;
if (!pageConfig.isFront) {
history = getHistory().filter(item => {
const isArr = Array.isArray(item);
const found = isArr && item[0] === pageId;
foundCount = found ? item[1] : foundCount;
return isArr && !found;
});
history.unshift([pageId, foundCount + 1]);
saveHistory(history.slice(0, historySize));
}
};
const logSummary = (pageConfig, mockToday) => {
const summary = pruneSummary(getSummary(), mockToday);
const page = collapsePath(pageConfig.pageId);
let isFront = false;
const meta = pageMeta.reduceRight((tagMeta, tag) => {
const tid = collapsePath(firstCsv(pageConfig[tag.tid]));
const tname = tid && firstCsv(pageConfig[tag.tname]);
if (tid && tname) {
tagMeta[tid] = tname;
}
isFront = isFront || tid === page;
return tagMeta;
}, {});
Object.keys(meta).forEach(tid => {
const tname = meta[tid];
const record = summary.tags[tid] || [];
buckets.forEach(bucket => {
record[bucket.indexInRecord] = record[bucket.indexInRecord] || [];
});
record[0] = tname;
const visits = record[isFront ? 2 : 1];
const todaysVisits = visits.find(day => day[0] === 0);
if (todaysVisits) {
todaysVisits[1] += 1;
} else {
visits.unshift([0, 1]);
}
summary.tags[tid] = record;
});
saveSummary(summary);
};
const getMegaNav = () => $('.js-global-navigation');
const removeFromMegaNav = () => {
getMegaNav().each(megaNav => {
fastdom.mutate(() => {
$('.js-global-navigation__section--history', megaNav).remove();
});
});
inMegaNav = false;
};
const tagHtml = (
tag,
index
) => `<li class="inline-list__item">
<a href="/${
tag[0]
}" class="button button--small button--tag button--secondary" data-link-name="${index +
1} | ${tag[1]}">${tag[1]}</a>
</li>`;
const showInMegaNav = () => {
let tagsHTML;
if (getSummary().showInMegaNav === false) {
return;
}
if (inMegaNav) {
removeFromMegaNav();
}
const tags = getPopularFiltered();
if (tags.length) {
tagsHTML = `<li class="global-navigation__section js-global-navigation__section--history" data-link-name="shortcuts">
<span class="global-navigation__title global-navigation__title--history">recently visited</span>
<ul class="global-navigation__children global-navigation__children--history">
${tags.map(tagHtml).join('')}
<a class="button button--small button--tag button--tertiary" href="/preferences" data-link-name="edit">edit these</a>
</ul>
</li>`;
fastdom.mutate(() => {
getMegaNav().prepend(tagsHTML);
});
inMegaNav = true;
}
};
const showInMegaNavEnabled = () =>
getSummary().showInMegaNav !== false;
const showInMegaNavEnable = (bool) => {
const summary = getSummary();
summary.showInMegaNav = bool;
if (summary.showInMegaNav) {
showInMegaNav();
} else {
removeFromMegaNav();
}
saveSummary(summary);
};
const incrementDailyArticleCount = (pageConfig) => {
if (!pageConfig.isFront && !getCookie(ARTICLES_VIEWED_OPT_OUT_COOKIE.name)) {
const dailyCount = storage.local.get(storageKeyDailyArticleCount) || [];
if (dailyCount[0] && dailyCount[0].day && dailyCount[0].day === today) {
dailyCount[0].count += 1;
} else {
// New day
dailyCount.unshift({ day: today, count: 1 });
// Remove any days older than 60
const cutOff = today - 60;
const firstOldDayIndex = dailyCount.findIndex(
c => c.day && c.day < cutOff
);
if (firstOldDayIndex > 0) {
dailyCount.splice(firstOldDayIndex);
}
}
storage.local.set(storageKeyDailyArticleCount, dailyCount);
}
};
const incrementWeeklyArticleCount = (pageConfig) => {
if (!pageConfig.isFront && !getCookie(ARTICLES_VIEWED_OPT_OUT_COOKIE.name)) {
const weeklyArticleCount =
storage.local.get(storageKeyWeeklyArticleCount) || [];
if (
weeklyArticleCount[0] &&
weeklyArticleCount[0].week &&
weeklyArticleCount[0].week === startOfThisWeek
) {
weeklyArticleCount[0].count += 1;
} else {
// New day
weeklyArticleCount.unshift({
week: startOfThisWeek,
count: 1,
});
// Remove any weeks older than a year
const cutOff = startOfThisWeek - 365;
const firstOldWeekIndex = weeklyArticleCount.findIndex(
c => c.week && c.week < cutOff
);
if (firstOldWeekIndex > 0) {
weeklyArticleCount.splice(firstOldWeekIndex);
}
}
storage.local.set(storageKeyWeeklyArticleCount, weeklyArticleCount);
}
};
const getArticleViewCountForDays = (days) => {
const dailyCount = storage.local.get(storageKeyDailyArticleCount) || [];
const cutOff = today - days;
const firstOldDayIndex = dailyCount.findIndex(
c => c.day && c.day <= cutOff
);
const dailyCountWindow =
firstOldDayIndex >= 0
? dailyCount.slice(0, firstOldDayIndex)
: dailyCount;
return dailyCountWindow.reduce((acc, current) => current.count + acc, 0);
};
const getArticleViewCountForWeeks = (weeks) => {
const weeklyCount = storage.local.get(storageKeyWeeklyArticleCount) || [];
const cutOff = startOfThisWeek - weeks * 7;
const firstOldWeekIndex = weeklyCount.findIndex(
c => c.week && c.week <= cutOff
);
const weeklyCountWindow =
firstOldWeekIndex >= 0
? weeklyCount.slice(0, firstOldWeekIndex)
: weeklyCount;
return weeklyCountWindow.reduce((acc, current) => current.count + acc, 0);
};
export {
logHistory,
logSummary,
showInMegaNav,
showInMegaNavEnable,
showInMegaNavEnabled,
getPopular,
getPopularFiltered,
getContributors,
deleteFromSummary,
isRevisit,
reset,
seriesSummary,
mostViewedSeries,
incrementDailyArticleCount,
incrementWeeklyArticleCount,
getArticleViewCountForDays,
getArticleViewCountForWeeks,
getMondayFromDate,
storageKeyDailyArticleCount,
storageKeyWeeklyArticleCount,
};
export const _ = {
getSummary,
getHistory,
pruneSummary,
collapsePath,
};
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetVpnServerConfigurationResult',
'AwaitableGetVpnServerConfigurationResult',
'get_vpn_server_configuration',
]
@pulumi.output_type
class GetVpnServerConfigurationResult:
"""
VpnServerConfiguration Resource.
"""
def __init__(__self__, aad_authentication_parameters=None, etag=None, id=None, location=None, name=None, p2_s_vpn_gateways=None, provisioning_state=None, radius_client_root_certificates=None, radius_server_address=None, radius_server_root_certificates=None, radius_server_secret=None, tags=None, type=None, vpn_authentication_types=None, vpn_client_ipsec_policies=None, vpn_client_revoked_certificates=None, vpn_client_root_certificates=None, vpn_protocols=None):
if aad_authentication_parameters and not isinstance(aad_authentication_parameters, dict):
raise TypeError("Expected argument 'aad_authentication_parameters' to be a dict")
pulumi.set(__self__, "aad_authentication_parameters", aad_authentication_parameters)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if p2_s_vpn_gateways and not isinstance(p2_s_vpn_gateways, list):
raise TypeError("Expected argument 'p2_s_vpn_gateways' to be a list")
pulumi.set(__self__, "p2_s_vpn_gateways", p2_s_vpn_gateways)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if radius_client_root_certificates and not isinstance(radius_client_root_certificates, list):
raise TypeError("Expected argument 'radius_client_root_certificates' to be a list")
pulumi.set(__self__, "radius_client_root_certificates", radius_client_root_certificates)
if radius_server_address and not isinstance(radius_server_address, str):
raise TypeError("Expected argument 'radius_server_address' to be a str")
pulumi.set(__self__, "radius_server_address", radius_server_address)
if radius_server_root_certificates and not isinstance(radius_server_root_certificates, list):
raise TypeError("Expected argument 'radius_server_root_certificates' to be a list")
pulumi.set(__self__, "radius_server_root_certificates", radius_server_root_certificates)
if radius_server_secret and not isinstance(radius_server_secret, str):
raise TypeError("Expected argument 'radius_server_secret' to be a str")
pulumi.set(__self__, "radius_server_secret", radius_server_secret)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if vpn_authentication_types and not isinstance(vpn_authentication_types, list):
raise TypeError("Expected argument 'vpn_authentication_types' to be a list")
pulumi.set(__self__, "vpn_authentication_types", vpn_authentication_types)
if vpn_client_ipsec_policies and not isinstance(vpn_client_ipsec_policies, list):
raise TypeError("Expected argument 'vpn_client_ipsec_policies' to be a list")
pulumi.set(__self__, "vpn_client_ipsec_policies", vpn_client_ipsec_policies)
if vpn_client_revoked_certificates and not isinstance(vpn_client_revoked_certificates, list):
raise TypeError("Expected argument 'vpn_client_revoked_certificates' to be a list")
pulumi.set(__self__, "vpn_client_revoked_certificates", vpn_client_revoked_certificates)
if vpn_client_root_certificates and not isinstance(vpn_client_root_certificates, list):
raise TypeError("Expected argument 'vpn_client_root_certificates' to be a list")
pulumi.set(__self__, "vpn_client_root_certificates", vpn_client_root_certificates)
if vpn_protocols and not isinstance(vpn_protocols, list):
raise TypeError("Expected argument 'vpn_protocols' to be a list")
pulumi.set(__self__, "vpn_protocols", vpn_protocols)
@property
@pulumi.getter(name="aadAuthenticationParameters")
def aad_authentication_parameters(self) -> Optional['outputs.AadAuthenticationParametersResponse']:
"""
The set of aad vpn authentication parameters.
"""
return pulumi.get(self, "aad_authentication_parameters")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="p2SVpnGateways")
def p2_s_vpn_gateways(self) -> Sequence['outputs.P2SVpnGatewayResponse']:
"""
List of references to P2SVpnGateways.
"""
return pulumi.get(self, "p2_s_vpn_gateways")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the VpnServerConfiguration resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="radiusClientRootCertificates")
def radius_client_root_certificates(self) -> Optional[Sequence['outputs.VpnServerConfigRadiusClientRootCertificateResponse']]:
"""
Radius client root certificate of VpnServerConfiguration.
"""
return pulumi.get(self, "radius_client_root_certificates")
@property
@pulumi.getter(name="radiusServerAddress")
def radius_server_address(self) -> Optional[str]:
"""
The radius server address property of the VpnServerConfiguration resource for point to site client connection.
"""
return pulumi.get(self, "radius_server_address")
@property
@pulumi.getter(name="radiusServerRootCertificates")
def radius_server_root_certificates(self) -> Optional[Sequence['outputs.VpnServerConfigRadiusServerRootCertificateResponse']]:
"""
Radius Server root certificate of VpnServerConfiguration.
"""
return pulumi.get(self, "radius_server_root_certificates")
@property
@pulumi.getter(name="radiusServerSecret")
def radius_server_secret(self) -> Optional[str]:
"""
The radius secret property of the VpnServerConfiguration resource for point to site client connection.
"""
return pulumi.get(self, "radius_server_secret")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="vpnAuthenticationTypes")
def vpn_authentication_types(self) -> Optional[Sequence[str]]:
"""
VPN authentication types for the VpnServerConfiguration.
"""
return pulumi.get(self, "vpn_authentication_types")
@property
@pulumi.getter(name="vpnClientIpsecPolicies")
def vpn_client_ipsec_policies(self) -> Optional[Sequence['outputs.IpsecPolicyResponse']]:
"""
VpnClientIpsecPolicies for VpnServerConfiguration.
"""
return pulumi.get(self, "vpn_client_ipsec_policies")
@property
@pulumi.getter(name="vpnClientRevokedCertificates")
def vpn_client_revoked_certificates(self) -> Optional[Sequence['outputs.VpnServerConfigVpnClientRevokedCertificateResponse']]:
"""
VPN client revoked certificate of VpnServerConfiguration.
"""
return pulumi.get(self, "vpn_client_revoked_certificates")
@property
@pulumi.getter(name="vpnClientRootCertificates")
def vpn_client_root_certificates(self) -> Optional[Sequence['outputs.VpnServerConfigVpnClientRootCertificateResponse']]:
"""
VPN client root certificate of VpnServerConfiguration.
"""
return pulumi.get(self, "vpn_client_root_certificates")
@property
@pulumi.getter(name="vpnProtocols")
def vpn_protocols(self) -> Optional[Sequence[str]]:
"""
VPN protocols for the VpnServerConfiguration.
"""
return pulumi.get(self, "vpn_protocols")
class AwaitableGetVpnServerConfigurationResult(GetVpnServerConfigurationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVpnServerConfigurationResult(
aad_authentication_parameters=self.aad_authentication_parameters,
etag=self.etag,
id=self.id,
location=self.location,
name=self.name,
p2_s_vpn_gateways=self.p2_s_vpn_gateways,
provisioning_state=self.provisioning_state,
radius_client_root_certificates=self.radius_client_root_certificates,
radius_server_address=self.radius_server_address,
radius_server_root_certificates=self.radius_server_root_certificates,
radius_server_secret=self.radius_server_secret,
tags=self.tags,
type=self.type,
vpn_authentication_types=self.vpn_authentication_types,
vpn_client_ipsec_policies=self.vpn_client_ipsec_policies,
vpn_client_revoked_certificates=self.vpn_client_revoked_certificates,
vpn_client_root_certificates=self.vpn_client_root_certificates,
vpn_protocols=self.vpn_protocols)
def get_vpn_server_configuration(resource_group_name: Optional[str] = None,
vpn_server_configuration_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVpnServerConfigurationResult:
"""
VpnServerConfiguration Resource.
:param str resource_group_name: The resource group name of the VpnServerConfiguration.
:param str vpn_server_configuration_name: The name of the VpnServerConfiguration being retrieved.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['vpnServerConfigurationName'] = vpn_server_configuration_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20191201:getVpnServerConfiguration', __args__, opts=opts, typ=GetVpnServerConfigurationResult).value
return AwaitableGetVpnServerConfigurationResult(
aad_authentication_parameters=__ret__.aad_authentication_parameters,
etag=__ret__.etag,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
p2_s_vpn_gateways=__ret__.p2_s_vpn_gateways,
provisioning_state=__ret__.provisioning_state,
radius_client_root_certificates=__ret__.radius_client_root_certificates,
radius_server_address=__ret__.radius_server_address,
radius_server_root_certificates=__ret__.radius_server_root_certificates,
radius_server_secret=__ret__.radius_server_secret,
tags=__ret__.tags,
type=__ret__.type,
vpn_authentication_types=__ret__.vpn_authentication_types,
vpn_client_ipsec_policies=__ret__.vpn_client_ipsec_policies,
vpn_client_revoked_certificates=__ret__.vpn_client_revoked_certificates,
vpn_client_root_certificates=__ret__.vpn_client_root_certificates,
vpn_protocols=__ret__.vpn_protocols)
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'gtest',
'type': 'static_library',
'sources': [
'gtest/include/gtest/gtest-death-test.h',
'gtest/include/gtest/gtest-message.h',
'gtest/include/gtest/gtest-param-test.h',
'gtest/include/gtest/gtest-printers.h',
'gtest/include/gtest/gtest-spi.h',
'gtest/include/gtest/gtest-test-part.h',
'gtest/include/gtest/gtest-typed-test.h',
'gtest/include/gtest/gtest.h',
'gtest/include/gtest/gtest_pred_impl.h',
'gtest/include/gtest/internal/gtest-death-test-internal.h',
'gtest/include/gtest/internal/gtest-filepath.h',
'gtest/include/gtest/internal/gtest-internal.h',
'gtest/include/gtest/internal/gtest-linked_ptr.h',
'gtest/include/gtest/internal/gtest-param-util-generated.h',
'gtest/include/gtest/internal/gtest-param-util.h',
'gtest/include/gtest/internal/gtest-port.h',
'gtest/include/gtest/internal/gtest-string.h',
'gtest/include/gtest/internal/gtest-tuple.h',
'gtest/include/gtest/internal/gtest-type-util.h',
'gtest/src/gtest-all.cc',
'gtest/src/gtest-death-test.cc',
'gtest/src/gtest-filepath.cc',
'gtest/src/gtest-internal-inl.h',
'gtest/src/gtest-port.cc',
'gtest/src/gtest-printers.cc',
'gtest/src/gtest-test-part.cc',
'gtest/src/gtest-typed-test.cc',
'gtest/src/gtest.cc',
'multiprocess_func_list.cc',
'multiprocess_func_list.h',
'platform_test.h',
],
'sources!': [
'gtest/src/gtest-all.cc', # Not needed by our build.
],
'include_dirs': [
'gtest',
'gtest/include',
],
'dependencies': [
'gtest_prod',
],
'conditions': [
['OS == "mac" or OS == "ios"', {
'sources': [
'gtest_mac.h',
'gtest_mac.mm',
'platform_test_mac.mm'
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
],
},
}],
['OS == "ios"', {
'dependencies' : [
'<(DEPTH)/testing/iossim/iossim.gyp:iossim',
],
'direct_dependent_settings': {
'target_conditions': [
# Turn all tests into bundles on iOS because that's the only
# type of executable supported for iOS.
['_type=="executable"', {
'variables': {
# Use a variable so the path gets fixed up so it is always
# correct when INFOPLIST_FILE finally gets set.
'ios_unittest_info_plist_path':
'<(DEPTH)/testing/gtest_ios/unittest-Info.plist',
},
'mac_bundle': 1,
'xcode_settings': {
'BUNDLE_ID_TEST_NAME':
'>!(echo ">(_target_name)" | sed -e "s/_//g")',
'INFOPLIST_FILE': '>(ios_unittest_info_plist_path)',
},
'mac_bundle_resources': [
'<(ios_unittest_info_plist_path)',
],
'mac_bundle_resources!': [
'<(ios_unittest_info_plist_path)',
],
}],
],
},
}],
['os_posix == 1', {
'defines': [
# gtest isn't able to figure out when RTTI is disabled for gcc
# versions older than 4.3.2, and assumes it's enabled. Our Mac
# and Linux builds disable RTTI, and cannot guarantee that the
# compiler will be 4.3.2. or newer. The Mac, for example, uses
# 4.2.1 as that is the latest available on that platform. gtest
# must be instructed that RTTI is disabled here, and for any
# direct dependents that might include gtest headers.
'GTEST_HAS_RTTI=0',
],
'direct_dependent_settings': {
'defines': [
'GTEST_HAS_RTTI=0',
],
},
}],
['OS=="android" and android_app_abi=="x86"', {
'defines': [
'GTEST_HAS_CLONE=0',
],
'direct_dependent_settings': {
'defines': [
'GTEST_HAS_CLONE=0',
],
},
}],
['OS=="android"', {
# We want gtest features that use tr1::tuple, but we currently
# don't support the variadic templates used by libstdc++'s
# implementation. gtest supports this scenario by providing its
# own implementation but we must opt in to it.
'defines': [
'GTEST_USE_OWN_TR1_TUPLE=1',
],
'direct_dependent_settings': {
'defines': [
'GTEST_USE_OWN_TR1_TUPLE=1',
],
},
}],
],
'direct_dependent_settings': {
'defines': [
'UNIT_TEST',
],
'include_dirs': [
'gtest/include', # So that gtest headers can find themselves.
],
'target_conditions': [
['_type=="executable"', {
'test': 1,
'conditions': [
['OS=="mac"', {
'run_as': {
'action????': ['${BUILT_PRODUCTS_DIR}/${PRODUCT_NAME}'],
},
}],
['OS=="ios"', {
'variables': {
# Use a variable so the path gets fixed up so it is always
# correct when the action finally gets used.
'ios_run_unittest_script_path':
'<(DEPTH)/testing/gtest_ios/run-unittest.sh',
},
'run_as': {
'action????': ['>(ios_run_unittest_script_path)'],
},
}],
['OS=="win"', {
'run_as': {
'action????': ['$(TargetPath)', '--gtest_print_time'],
},
}],
],
}],
],
'msvs_disabled_warnings': [4800],
},
},
{
'target_name': 'gtest_main',
'type': 'static_library',
'dependencies': [
'gtest',
],
'sources': [
'gtest/src/gtest_main.cc',
],
},
{
'target_name': 'gtest_prod',
'toolsets': ['host', 'target'],
'type': 'none',
'sources': [
'gtest/include/gtest/gtest_prod.h',
],
},
],
}
|
"""
The MIT License (MIT)
Copyright (c) 2015-2021 Rapptz
Copyright (c) 2021-present Disnake Development
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import List, Optional, TypedDict
from .snowflake import Snowflake
from .user import PartialUser
class TeamMember(TypedDict):
user: PartialUser
membership_state: int
permissions: List[str]
team_id: Snowflake
class Team(TypedDict):
id: Snowflake
name: str
owner_id: Snowflake
members: List[TeamMember]
icon: Optional[str]
|
"""Copy a skeleton directory structure keeping only [trajectory-NEB-results.txt] and endpoint .gins.
Needed as PCs appeared to die copying, zipping, or unzipping large directories.
"""
import os
import shutil
def Copy(sFromDirPath,sToDirPath):
for sEntity in os.listdir(sFromDirPath):
sEntityPath= sFromDirPath+sEntity
if os.path.isdir(sEntityPath):
if sEntity=="channel" or sEntity=="pointer" or sEntity=="travel" or sEntity=="zeo++": continue
sNewDirPath= sToDirPath+sEntity+"/"
Copy(sEntityPath+"/",sNewDirPath)
elif sEntity=="trajectory-NEB-results.txt" or sEntity[-4:]==".gin":
os.makedirs(sToDirPath,exist_ok=True)
shutil.copy(sEntityPath,sToDirPath)
Copy("211220-remainder/","211220-small/")
|
'use strict';
var util = require('./util');
var buildOptions = require('./util').buildOptions;
var XmlNode = require('./xmlNode');
var TagType = { OPENING: 1, CLOSING: 2, SELF: 3, CDATA: 4 };
let regx =
'<((!\\[CDATA\\[([\\s\\S]*?)(]]>))|(([\\w:\\-._]*:)?([\\w:\\-._]+))([^>]*)>|((\\/)(([\\w:\\-._]*:)?([\\w:\\-._]+))\\s*>))([^<]*)';
// var tagsRegx = new RegExp("<(\\/?[\\w:\\-\._]+)([^>]*)>(\\s*"+cdataRegx+")*([^<]+)?","g");
// var tagsRegx = new RegExp("<(\\/?)((\\w*:)?([\\w:\\-\._]+))([^>]*)>([^<]*)("+cdataRegx+"([^<]*))*([^<]+)?","g");
var defaultOptions = {
attributeNamePrefix: '@_',
attrNodeName: false,
textNodeName: '#text',
ignoreAttributes: true,
ignoreNameSpace: false,
allowBooleanAttributes: false, // a tag can have attributes without any value
// ignoreRootElement : false,
parseNodeValue: true,
parseAttributeValue: false,
arrayMode: false,
trimValues: true, // Trim string values of tag and attributes
cdataTagName: false,
cdataPositionChar: '\\c',
localeRange: '',
tagValueProcessor: function (a) {
return a;
},
attrValueProcessor: function (a) {
return a;
},
stopNodes: []
// decodeStrict: false,
};
exports.defaultOptions = defaultOptions;
var props = [
'attributeNamePrefix',
'attrNodeName',
'textNodeName',
'ignoreAttributes',
'ignoreNameSpace',
'allowBooleanAttributes',
'parseNodeValue',
'parseAttributeValue',
'arrayMode',
'trimValues',
'cdataTagName',
'cdataPositionChar',
'localeRange',
'tagValueProcessor',
'attrValueProcessor',
'parseTrueNumberOnly',
'stopNodes'
];
exports.props = props;
var getTraversalObj = function (xmlData, options) {
options = buildOptions(options, defaultOptions, props);
// xmlData = xmlData.replace(/\r?\n/g, " ");//make it single line
xmlData = xmlData.replace(/<!--[\s\S]*?-->/g, ''); // Remove comments
var xmlObj = new XmlNode('!xml');
let currentNode = xmlObj;
regx = regx.replace(/\[\\w/g, '[' + options.localeRange + '\\w');
var tagsRegx = new RegExp(regx, 'g');
let tag = tagsRegx.exec(xmlData);
let nextTag = tagsRegx.exec(xmlData);
while (tag) {
var tagType = checkForTagType(tag);
if (tagType === TagType.CLOSING) {
// add parsed data to parent node
if (currentNode.parent && tag[14]) {
currentNode.parent.val = util.getValue(currentNode.parent.val) + '' + processTagValue(tag[14], options);
}
if (options.stopNodes.length && options.stopNodes.includes(currentNode.tagname)) {
currentNode.child = [];
if (currentNode.attrsMap === undefined) { currentNode.attrsMap = {}; }
currentNode.val = xmlData.substr(currentNode.startIndex + 1, tag.index - currentNode.startIndex - 1);
}
currentNode = currentNode.parent;
} else if (tagType === TagType.CDATA) {
if (options.cdataTagName) {
// add cdata node
var childNodeCdata = new XmlNode(options.cdataTagName, currentNode, tag[3]);
childNodeCdata.attrsMap = buildAttributesMap(tag[8], options);
currentNode.addChild(childNodeCdata);
// for backtracking
currentNode.val = util.getValue(currentNode.val) + options.cdataPositionChar;
// add rest value to parent node
if (tag[14]) {
currentNode.val += processTagValue(tag[14], options);
}
} else {
currentNode.val = (currentNode.val || '') + (tag[3] || '') + processTagValue(tag[14], options);
}
} else if (tagType === TagType.SELF) {
if (currentNode && tag[14]) {
currentNode.val = util.getValue(currentNode.val) + '' + processTagValue(tag[14], options);
}
var childNodeSelf = new XmlNode(options.ignoreNameSpace ? tag[7] : tag[5], currentNode, '');
if (tag[8] && tag[8].length > 0) {
tag[8] = tag[8].substr(0, tag[8].length - 1);
}
childNodeSelf.attrsMap = buildAttributesMap(tag[8], options);
currentNode.addChild(childNodeSelf);
} else {
// TagType.OPENING
var childNode = new XmlNode(
options.ignoreNameSpace ? tag[7] : tag[5],
currentNode,
processTagValue(tag[14], options)
);
if (options.stopNodes.length && options.stopNodes.includes(childNode.tagname)) {
childNode.startIndex = tag.index + tag[1].length;
}
childNode.attrsMap = buildAttributesMap(tag[8], options);
currentNode.addChild(childNode);
currentNode = childNode;
}
tag = nextTag;
nextTag = tagsRegx.exec(xmlData);
}
return xmlObj;
};
function processTagValue(val, options) {
if (val) {
if (options.trimValues) {
val = val.trim();
}
val = options.tagValueProcessor(val);
val = parseValue(val, options.parseNodeValue, options.parseTrueNumberOnly);
}
return val;
}
function checkForTagType(match) {
if (match[4] === ']]>') {
return TagType.CDATA;
} else if (match[10] === '/') {
return TagType.CLOSING;
} else if (typeof match[8] !== 'undefined' && match[8].substr(match[8].length - 1) === '/') {
return TagType.SELF;
}
return TagType.OPENING;
}
function resolveNameSpace(tagname, options) {
if (options.ignoreNameSpace) {
var tags = tagname.split(':');
var prefix = tagname.charAt(0) === '/' ? '/' : '';
if (tags[0] === 'xmlns') {
return '';
}
if (tags.length === 2) {
tagname = prefix + tags[1];
}
}
return tagname;
}
function parseValue(val, shouldParse, parseTrueNumberOnly) {
if (shouldParse && typeof val === 'string') {
let parsed;
if (val.trim() === '' || isNaN(val)) {
parsed = val === 'true' ? true : val === 'false' ? false : val;
} else {
if (val.indexOf('0x') !== -1) {
// support hexa decimal
parsed = Number(val);
} else if (val.indexOf('.') !== -1) {
parsed = Number(val);
} else {
parsed = Number(val);
}
if (parseTrueNumberOnly) {
parsed = String(parsed) === val ? parsed : val;
}
}
return parsed;
}
if (util.isExist(val)) {
return val;
}
return '';
}
// TODO: change regex to capture NS
// var attrsRegx = new RegExp("([\\w\\-\\.\\:]+)\\s*=\\s*(['\"])((.|\n)*?)\\2","gm");
var attrsRegx = new RegExp('([^\\s=]+)\\s*(=\\s*([\'"])(.*?)\\3)?', 'g');
function buildAttributesMap(attrStr, options) {
if (!options.ignoreAttributes && typeof attrStr === 'string') {
attrStr = attrStr.replace(/\r?\n/g, ' ');
// attrStr = attrStr || attrStr.trim();
var matches = util.getAllMatches(attrStr, attrsRegx);
var len = matches.length; // don't make it inline
var attrs = {};
for (let i = 0; i < len; i++) {
var attrName = resolveNameSpace(matches[i][1], options);
if (attrName.length) {
if (matches[i][4] !== undefined) {
if (options.trimValues) {
matches[i][4] = matches[i][4].trim();
}
matches[i][4] = options.attrValueProcessor(matches[i][4]);
attrs[options.attributeNamePrefix + attrName] = parseValue(
matches[i][4],
options.parseAttributeValue,
options.parseTrueNumberOnly
);
} else if (options.allowBooleanAttributes) {
attrs[options.attributeNamePrefix + attrName] = true;
}
}
}
if (!Object.keys(attrs).length) {
return;
}
if (options.attrNodeName) {
var attrCollection = {};
attrCollection[options.attrNodeName] = attrs;
return attrCollection;
}
return attrs;
}
}
exports.getTraversalObj = getTraversalObj;
|
//// Core modules
const fs = require('fs')
//// External modules
const express = require('express')
const fileUpload = require('express-fileupload')
const flash = require('kisapmata')
const phAddress = require('ph-address')
const lodash = require('lodash')
const moment = require('moment')
const qr = require('qr-image')
//// Modules
const db = require('../../db');
const middlewares = require('./api-middlewares');
const s3 = require('../../aws-s3');
// Router
let router = express.Router()
router.use('/api', middlewares.getAuthApp)
router.get('/api/status', async (req, res, next) => {
try {
res.send('Api running...')
} catch (err) {
next(err);
}
});
router.use(require('./application'));
module.exports = router;
|
from back.aleph.salidas import add_all, add_result
from back.aleph.memento import ConcreteMemento, Memento
from back.aleph.auxiliar import (
encolar,
getIndexPositions,
encargoDaemon,
check_daemons,
freeDaemon,
contPrioridad
)
class QManager:
def __init__(self):
self._state = None
self.queue_high = [] # todo:Cambiar por algo mas sencillo
self.queue_medium = []
self.queue_low = []
# Contador para politicas de servicio
self.cont_prioridad_alta = 0
self.cont_prioridad_media = 0
self.cont_prioridad_baja = 0
self.politica = "HIGH"
self.status_daemons = [True, True, True]
def store(self, nodo_info, event, tipo_daemon):
add_result(nodo_info, event.parametros['id_copy'], "#QManager#", "qmanager")
# add_result(nodo_info, event.parametros['id_copy'], f'La prioridad es: {event.prioridad}', "qmanager")
elementos = {
'tipo_daemon': tipo_daemon,
'nodo_objetivo': event.nodo_objetivo,
'source': event.source,
'operacion': event.operacion,
'parametros': event.parametros,
'id_daemon_objetivo': event.target_element_id
}
encolar(self, elementos, event.prioridad)
add_result(nodo_info, event.parametros['id_copy'], f"Encola a deamon tipo:{tipo_daemon} Prioridad:{event.prioridad}", "qmanager")
def daemon_do(self, nodo_info, id_copy=None):
if True in self.status_daemons:
despachado = False
while (self.queue_high or self.queue_medium or self.queue_low) and not despachado:
free_daemons = getIndexPositions(self.status_daemons, True)
if self.politica == "HIGH":
if self.queue_high:
iterar_daemon(self, nodo_info, self.queue_high, free_daemons, "HIGH", id_copy)
despachado = True
else: # NO HAY NADA EN LA LISTA DE PRIORIDAD ALTA, CAMBIAMOS POLITICA
add_result(nodo_info, id_copy,
"No hay nada en la lista de prioridad alta, cambioamos politica, vamos a media")
self.politica = "MEDIUM"
if self.politica == "MEDIUM":
if self.queue_medium:
iterar_daemon(self, nodo_info, self.queue_medium, free_daemons, "MEDIUM", id_copy)
despachado = True
else:
add_result(nodo_info, id_copy,
"No hay nada en la lista de prioridad media, cambioamos politica, vamos a baja")
self.politica = "LOW"
if self.politica == "LOW":
if self.queue_low:
iterar_daemon(self, nodo_info, self.queue_low, free_daemons, "LOW", id_copy)
despachado = True
else:
add_result(nodo_info, id_copy,
"No hay nada en la lista de prioridad baja, cambioamos politica, vamos a alta")
self.politica = "HIGH"
# else:
# # print("No hay tareas pendientes")
# add_all(nodo_info, f'No hay tareas pendientes: {self.politica}')
else:
add_all(nodo_info, "No hay demonios disponibles")
def save(self) -> ConcreteMemento:
self._state = "state de qmanager"
return ConcreteMemento({
'queue_high': self.queue_high,
'queue_medium': self.queue_medium,
'queue_low': self.queue_low,
'contador_prioridad_alta': self.cont_prioridad_alta,
'contador_prioridad_media': self.cont_prioridad_media,
'contador_prioridad_baja': self.cont_prioridad_baja,
'politica': self.politica,
'status_daemons': self.status_daemons
})
# return ConcreteMemento(self._state)
def restore(self, memento: Memento):
self._state = memento.get_state()
self.queue_high = self._state['queue_high']
self.queue_medium = self._state['queue_medium']
self.queue_low = self._state['queue_low']
self.cont_prioridad_alta = self._state['contador_prioridad_alta']
self.cont_prioridad_media = self._state['contador_prioridad_media']
self.cont_prioridad_baja = self._state['contador_prioridad_baja']
self.politica = self._state['politica']
self.status_daemons = self._state['status_daemons']
def iterar_daemon(self, nodo_info, queue: list, free_daemons: int, prioridad: str, id_copy: int):
for iterador in range(len(queue)):
# Revisamos el primer elementos en la cola
tipo_daemon = queue[iterador]['tipo_daemon']
if tipo_daemon == 1 and 1 in free_daemons:
if queue[iterador]['id_daemon_objetivo'] is not None:
# Quiere decir que el insert lo hizo un daemon hacia si mismo.
index_daemon = queue[iterador]['id_daemon_objetivo']
if nodo_info.t1_daemons[index_daemon].status == "FREE":
encargoDaemon(self, nodo_info, prioridad, index_daemon, id_copy)
break
# else: # No esta disponible el daemon, vamos al siguiente elemento de la cola
# print(
# f"No esta disponible el T1daemon {queue[iterador]['id_daemon_objetivo']}, clock: {nodo_info.clock}")
# continue
else:
get_free_daemon = freeDaemon(nodo_info.t1_daemons)
if get_free_daemon != -1:
# add_result(nodo_info, id_copy, f'Se envia trabajo al T1Daemon: {get_free_daemon}', "qmanager")
encargoDaemon(self, nodo_info, prioridad, get_free_daemon, id_copy)
nodo_info.t1_daemons[get_free_daemon].status = "BUSY" # Para evitar errores
# Revisa si hay mas libres aparte de el, cambia a false si no hay
check_daemons(self, nodo_info, 1)
break
# else: # No hay demonios disponibles
self.status_daemons[0] = False
add_result(nodo_info, id_copy, f'{free_daemons}', "qmanager")
add_result(nodo_info, id_copy, "Ya no hay T1Daemons", "qmanager")
continue
elif tipo_daemon == 2 and 2 in free_daemons:
if queue[iterador]['id_daemon_objetivo'] is not None:
# Quiere decir que el insert lo hizo un daemon hacia si mismo.
index_daemon = queue[iterador]['id_daemon_objetivo']
if nodo_info.t2_daemons[index_daemon].status == "FREE":
encargoDaemon(self, nodo_info, prioridad, index_daemon, id_copy)
break
# else: # No esta disponible el daemon, vamos al siguiente elemento de la cola
# print(f"No esta disponible el T2daemon {queue[iterador]['id_daemon_objetivo']}")
# continue
else:
get_free_daemon = freeDaemon(nodo_info.t2_daemons)
if get_free_daemon != -1:
# add_result(nodo_info, id_copy, f'Se envia trabajo al T2Daemon: {get_free_daemon}', "qmanager")
encargoDaemon(self, nodo_info, prioridad, get_free_daemon, id_copy)
nodo_info.t2_daemons[get_free_daemon].status = "BUSY"
check_daemons(self, nodo_info, 2)
break
self.status_daemons[1] = False
add_result(nodo_info, id_copy, "Ya no hay T2Daemons", "qmanager")
continue
elif tipo_daemon == 3 and 3:
# El demonio tipo 3 siempre esta disponible
get_free_daemon = freeDaemon(nodo_info.t3_daemons) # SOlo hay un demonio tipo 3
# add_result(nodo_info, id_copy, f'Se envia trabajo al T3Daemon: {get_free_daemon}', "qmanager")
encargoDaemon(self, nodo_info, prioridad, get_free_daemon, id_copy)
break
contPrioridad(self, prioridad)
|
import {
Toast,
WARNING,
ERROR,
SENTRY
} from '@/modules/toast/handler/handlerToast';
import Vue from 'vue';
const WalletErrorHandler = (errors, warnings) => {
const errorValues = Object.keys(errors);
const warningValues = Object.keys(warnings);
return err => {
const foundError = errorValues.find(item => {
const message =
err && err.message
? err.message.hasOwProperty('message')
? err.message.message
: err.message
: err;
if (!message) return false;
return message.includes(item);
});
const foundWarning = warningValues.find(item => {
return err.message.includes(item);
});
if (foundError) {
Toast(Vue.$i18n.t(errors[foundError]), {}, ERROR);
} else if (foundWarning) {
Toast(Vue.$i18n.t(warnings[foundWarning]), {}, WARNING);
} else {
Toast(err, {}, SENTRY);
}
};
};
export default WalletErrorHandler;
|
from InquirerPy.utils import color_print
import os, ctypes, colorama
from src.startup import Startup
from src.config.constants import Constants
from src.utility_functions import ErrorHandling
colorama.init(autoreset=True)
if __name__ == "__main__":
print()
color_print([("Tomato", f""" _ _ __ __ _____ ____ __ _ _ ____ _ _ ___ ____ ____
( \/ )/__\ ( ) ( _ )( _ \ /__\ ( \( )(_ _)___( \/ )/ __)(_ _)( _ \\
\ //(__)\ )(__ )(_)( ) / /(__)\ ) ( )( (___)\ / \__ \ )( ) /
\/(__)(__)(____)(_____)(_)\_)(__)(__)(_)\_) (__) (__) (___/ (__) (_)\_) """), ("White", f"{Constants.VERSION_VSTR}\n\n"), ("White", "Original code by colinhartigan.\n")])
try:
ctypes.windll.kernel32.SetConsoleTitleW(f"{Constants.PROGRAM_NAME} {Constants.VERSION_VSTR}") # Set window title
user32 = ctypes.WinDLL('user32')
kernel32 = ctypes.WinDLL('kernel32')
window = kernel32.GetConsoleWindow()
kernel32.SetConsoleMode(kernel32.GetStdHandle(-10), (0x4|0x80|0x20|0x2|0x10|0x1|0x00|0x100)) # Disable inputs to console
kernel32.SetConsoleMode(kernel32.GetStdHandle(-11), 7) # Allow ANSI sequences
Startup()
except Exception:
ErrorHandling.handle_error()
os._exit(1)
|
import opensearchpy
import requests
from lxml import etree
import click
import glob
from opensearchpy import OpenSearch
from opensearchpy.helpers import bulk
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logging.basicConfig(format='%(levelname)s:%(message)s')
# NOTE: this is not a complete list of fields. If you wish to add more, put in the appropriate XPath expression.
#TODO: is there a way to do this using XPath/XSL Functions so that we don't have to maintain a big list?
mappings = [
"productId/text()", "productId",
"sku/text()", "sku",
"name/text()", "name",
"type/text()", "type",
"startDate/text()", "startDate",
"active/text()", "active",
"regularPrice/text()", "regularPrice",
"salePrice/text()", "salePrice",
"onSale/text()", "onSale",
"digital/text()", "digital",
"frequentlyPurchasedWith/*/text()", "frequentlyPurchasedWith",# Note the match all here to get the subfields
"accessories/*/text()", "accessories",# Note the match all here to get the subfields
"relatedProducts/*/text()", "relatedProducts",# Note the match all here to get the subfields
"crossSell/text()", "crossSell",
"salesRankShortTerm/text()", "salesRankShortTerm",
"salesRankMediumTerm/text()", "salesRankMediumTerm",
"salesRankLongTerm/text()", "salesRankLongTerm",
"bestSellingRank/text()", "bestSellingRank",
"url/text()", "url",
"categoryPath/*/name/text()", "categoryPath", # Note the match all here to get the subfields
"categoryPath/*/id/text()", "categoryPathIds", # Note the match all here to get the subfields
"categoryPath/category[last()]/id/text()", "categoryLeaf",
"count(categoryPath/*/name)", "categoryPathCount",
"customerReviewCount/text()", "customerReviewCount",
"customerReviewAverage/text()", "customerReviewAverage",
"inStoreAvailability/text()", "inStoreAvailability",
"onlineAvailability/text()", "onlineAvailability",
"releaseDate/text()", "releaseDate",
"shippingCost/text()", "shippingCost",
"shortDescription/text()", "shortDescription",
"shortDescriptionHtml/text()", "shortDescriptionHtml",
"class/text()", "class",
"classId/text()", "classId",
"subclass/text()", "subclass",
"subclassId/text()", "subclassId",
"department/text()", "department",
"departmentId/text()", "departmentId",
"bestBuyItemId/text()", "bestBuyItemId",
"description/text()", "description",
"manufacturer/text()", "manufacturer",
"modelNumber/text()", "modelNumber",
"image/text()", "image",
"condition/text()", "condition",
"inStorePickup/text()", "inStorePickup",
"homeDelivery/text()", "homeDelivery",
"quantityLimit/text()", "quantityLimit",
"color/text()", "color",
"depth/text()", "depth",
"height/text()", "height",
"weight/text()", "weight",
"shippingWeight/text()", "shippingWeight",
"width/text()", "width",
"longDescription/text()", "longDescription",
"longDescriptionHtml/text()", "longDescriptionHtml",
"features/*/text()", "features" # Note the match all here to get the subfields
]
def get_opensearch():
host = 'localhost'
port = 9200
auth = ('admin', 'admin')
client = OpenSearch(
hosts=[{'host': host, 'port': port}],
http_compress=True, # enables gzip compression for request bodies
http_auth=auth,
# client_cert = client_cert_path,
# client_key = client_key_path,
use_ssl=True,
verify_certs=False,
ssl_assert_hostname=False,
ssl_show_warn=False,
#ca_certs=ca_certs_path
)
return client
@click.command()
@click.option('--source_dir', '-s', help='XML files source directory')
def main(source_dir):
index_name = 'bbuy_products'
client = get_opensearch()
files = glob.glob(source_dir + "/*.xml")
docs_indexed = 0
for file in files:
logger.info(f'Processing file : {file}')
tree = etree.parse(file)
root = tree.getroot()
children = root.findall("./product")
docs = []
for child in children:
doc = {}
for idx in range(0, len(mappings), 2):
xpath_expr = mappings[idx]
key = mappings[idx + 1]
doc[key] = child.xpath(xpath_expr)
#print(doc)
if not 'productId' in doc or len(doc['productId']) == 0:
continue
docs.append({'_index': index_name, '_id':doc['sku'][0], '_source' : doc})
#docs.append({'_index': index_name, '_source': doc})
docs_indexed += 1
if docs_indexed % 200 == 0:
bulk(client, docs, request_timeout=60)
logger.info(f'{docs_indexed} documents indexed')
docs = []
if len(docs) > 0:
bulk(client, docs, request_timeout=60)
logger.info(f'{docs_indexed} documents indexed')
logger.info(f'Done. Total docs: {docs_indexed}')
if __name__ == "__main__":
main()
|
/*-
* Copyright (c) 1980, 1986, 1989, 1993
* The Regents of the University of California. All rights reserved.
* (c) UNIX System Laboratories, Inc.
* All or some portions of this file are derived from material licensed
* to the University of California by American Telephone and Telegraph
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
* the permission of UNIX System Laboratories, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)param.c 8.3 (Berkeley) 8/20/94
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_param.h"
#include "opt_msgbuf.h"
#include "opt_maxusers.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/limits.h>
#include <sys/msgbuf.h>
#include <sys/sysctl.h>
#include <sys/proc.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
/*
* System parameter formulae.
*/
#ifndef HZ
# if defined(__mips__) || defined(__arm__)
# define HZ 100
# else
# define HZ 1000
# endif
# ifndef HZ_VM
# define HZ_VM 100
# endif
#else
# ifndef HZ_VM
# define HZ_VM HZ
# endif
#endif
#define NPROC (20 + 16 * maxusers)
#ifndef NBUF
#define NBUF 0
#endif
#ifndef MAXFILES
#define MAXFILES (40 + 32 * maxusers)
#endif
static int sysctl_kern_vm_guest(SYSCTL_HANDLER_ARGS);
int hz; /* system clock's frequency */
int tick; /* usec per tick (1000000 / hz) */
struct bintime tick_bt; /* bintime per tick (1s / hz) */
sbintime_t tick_sbt;
int maxusers; /* base tunable */
int maxproc; /* maximum # of processes */
int maxprocperuid; /* max # of procs per user */
int maxfiles; /* sys. wide open files limit */
int maxfilesperproc; /* per-proc open files limit */
int msgbufsize; /* size of kernel message buffer */
int nbuf;
int bio_transient_maxcnt;
int ngroups_max; /* max # groups per process */
int nswbuf;
pid_t pid_max = PID_MAX;
long maxswzone; /* max swmeta KVA storage */
long maxbcache; /* max buffer cache KVA storage */
long maxpipekva; /* Limit on pipe KVA */
int vm_guest = VM_GUEST_NO; /* Running as virtual machine guest? */
u_long maxtsiz; /* max text size */
u_long dfldsiz; /* initial data size limit */
u_long maxdsiz; /* max data size */
u_long dflssiz; /* initial stack size limit */
u_long maxssiz; /* max stack size */
u_long sgrowsiz; /* amount to grow stack */
SYSCTL_INT(_kern, OID_AUTO, hz, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &hz, 0,
"Number of clock ticks per second");
SYSCTL_INT(_kern, OID_AUTO, nbuf, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &nbuf, 0,
"Number of buffers in the buffer cache");
SYSCTL_INT(_kern, OID_AUTO, nswbuf, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &nswbuf, 0,
"Number of swap buffers");
SYSCTL_INT(_kern, OID_AUTO, msgbufsize, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &msgbufsize, 0,
"Size of the kernel message buffer");
SYSCTL_LONG(_kern, OID_AUTO, maxswzone, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &maxswzone, 0,
"Maximum memory for swap metadata");
SYSCTL_LONG(_kern, OID_AUTO, maxbcache, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &maxbcache, 0,
"Maximum value of vfs.maxbufspace");
SYSCTL_INT(_kern, OID_AUTO, bio_transient_maxcnt, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
&bio_transient_maxcnt, 0,
"Maximum number of transient BIOs mappings");
SYSCTL_ULONG(_kern, OID_AUTO, maxtsiz, CTLFLAG_RWTUN | CTLFLAG_NOFETCH, &maxtsiz, 0,
"Maximum text size");
SYSCTL_ULONG(_kern, OID_AUTO, dfldsiz, CTLFLAG_RWTUN | CTLFLAG_NOFETCH, &dfldsiz, 0,
"Initial data size limit");
SYSCTL_ULONG(_kern, OID_AUTO, maxdsiz, CTLFLAG_RWTUN | CTLFLAG_NOFETCH, &maxdsiz, 0,
"Maximum data size");
SYSCTL_ULONG(_kern, OID_AUTO, dflssiz, CTLFLAG_RWTUN | CTLFLAG_NOFETCH, &dflssiz, 0,
"Initial stack size limit");
SYSCTL_ULONG(_kern, OID_AUTO, maxssiz, CTLFLAG_RWTUN | CTLFLAG_NOFETCH, &maxssiz, 0,
"Maximum stack size");
SYSCTL_ULONG(_kern, OID_AUTO, sgrowsiz, CTLFLAG_RWTUN | CTLFLAG_NOFETCH, &sgrowsiz, 0,
"Amount to grow stack on a stack fault");
SYSCTL_PROC(_kern, OID_AUTO, vm_guest, CTLFLAG_RD | CTLTYPE_STRING,
NULL, 0, sysctl_kern_vm_guest, "A",
"Virtual machine guest detected?");
/*
* The elements of this array are ordered based upon the values of the
* corresponding enum VM_GUEST members.
*/
static const char *const vm_guest_sysctl_names[] = {
"none",
"generic",
"xen",
"hv",
"vmware",
NULL
};
CTASSERT(nitems(vm_guest_sysctl_names) - 1 == VM_LAST);
/*
* Boot time overrides that are not scaled against main memory
*/
void
init_param1(void)
{
#if !defined(__mips__) && !defined(__arm64__) && !defined(__sparc64__)
TUNABLE_INT_FETCH("kern.kstack_pages", &kstack_pages);
#endif
hz = -1;
TUNABLE_INT_FETCH("kern.hz", &hz);
if (hz == -1)
hz = vm_guest > VM_GUEST_NO ? HZ_VM : HZ;
tick = 1000000 / hz;
tick_sbt = SBT_1S / hz;
tick_bt = sbttobt(tick_sbt);
#ifdef VM_SWZONE_SIZE_MAX
maxswzone = VM_SWZONE_SIZE_MAX;
#endif
TUNABLE_LONG_FETCH("kern.maxswzone", &maxswzone);
#ifdef VM_BCACHE_SIZE_MAX
maxbcache = VM_BCACHE_SIZE_MAX;
#endif
TUNABLE_LONG_FETCH("kern.maxbcache", &maxbcache);
msgbufsize = MSGBUF_SIZE;
TUNABLE_INT_FETCH("kern.msgbufsize", &msgbufsize);
maxtsiz = MAXTSIZ;
TUNABLE_ULONG_FETCH("kern.maxtsiz", &maxtsiz);
dfldsiz = DFLDSIZ;
TUNABLE_ULONG_FETCH("kern.dfldsiz", &dfldsiz);
maxdsiz = MAXDSIZ;
TUNABLE_ULONG_FETCH("kern.maxdsiz", &maxdsiz);
dflssiz = DFLSSIZ;
TUNABLE_ULONG_FETCH("kern.dflssiz", &dflssiz);
maxssiz = MAXSSIZ;
TUNABLE_ULONG_FETCH("kern.maxssiz", &maxssiz);
sgrowsiz = SGROWSIZ;
TUNABLE_ULONG_FETCH("kern.sgrowsiz", &sgrowsiz);
/*
* Let the administrator set {NGROUPS_MAX}, but disallow values
* less than NGROUPS_MAX which would violate POSIX.1-2008 or
* greater than INT_MAX-1 which would result in overflow.
*/
ngroups_max = NGROUPS_MAX;
TUNABLE_INT_FETCH("kern.ngroups", &ngroups_max);
if (ngroups_max < NGROUPS_MAX)
ngroups_max = NGROUPS_MAX;
/*
* Only allow to lower the maximal pid.
* Prevent setting up a non-bootable system if pid_max is too low.
*/
TUNABLE_INT_FETCH("kern.pid_max", &pid_max);
if (pid_max > PID_MAX)
pid_max = PID_MAX;
else if (pid_max < 300)
pid_max = 300;
TUNABLE_INT_FETCH("vfs.unmapped_buf_allowed", &unmapped_buf_allowed);
}
/*
* Boot time overrides that are scaled against main memory
*/
void
init_param2(long physpages)
{
/* Base parameters */
maxusers = MAXUSERS;
TUNABLE_INT_FETCH("kern.maxusers", &maxusers);
if (maxusers == 0) {
maxusers = physpages / (2 * 1024 * 1024 / PAGE_SIZE);
if (maxusers < 32)
maxusers = 32;
#ifdef VM_MAX_AUTOTUNE_MAXUSERS
if (maxusers > VM_MAX_AUTOTUNE_MAXUSERS)
maxusers = VM_MAX_AUTOTUNE_MAXUSERS;
#endif
/*
* Scales down the function in which maxusers grows once
* we hit 384.
*/
if (maxusers > 384)
maxusers = 384 + ((maxusers - 384) / 8);
}
/*
* The following can be overridden after boot via sysctl. Note:
* unless overriden, these macros are ultimately based on maxusers.
* Limit maxproc so that kmap entries cannot be exhausted by
* processes.
*/
maxproc = NPROC;
TUNABLE_INT_FETCH("kern.maxproc", &maxproc);
if (maxproc > (physpages / 12))
maxproc = physpages / 12;
if (maxproc > pid_max)
maxproc = pid_max;
maxprocperuid = (maxproc * 9) / 10;
/*
* The default limit for maxfiles is 1/12 of the number of
* physical page but not less than 16 times maxusers.
* At most it can be 1/6 the number of physical pages.
*/
maxfiles = imax(MAXFILES, physpages / 8);
TUNABLE_INT_FETCH("kern.maxfiles", &maxfiles);
if (maxfiles > (physpages / 4))
maxfiles = physpages / 4;
maxfilesperproc = (maxfiles / 10) * 9;
TUNABLE_INT_FETCH("kern.maxfilesperproc", &maxfilesperproc);
/*
* Cannot be changed after boot.
*/
nbuf = NBUF;
TUNABLE_INT_FETCH("kern.nbuf", &nbuf);
TUNABLE_INT_FETCH("kern.bio_transient_maxcnt", &bio_transient_maxcnt);
/*
* The default for maxpipekva is min(1/64 of the kernel address space,
* max(1/64 of main memory, 512KB)). See sys_pipe.c for more details.
*/
maxpipekva = (physpages / 64) * PAGE_SIZE;
TUNABLE_LONG_FETCH("kern.ipc.maxpipekva", &maxpipekva);
if (maxpipekva < 512 * 1024)
maxpipekva = 512 * 1024;
if (maxpipekva > (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 64)
maxpipekva = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) /
64;
}
/*
* Sysctl stringifying handler for kern.vm_guest.
*/
static int
sysctl_kern_vm_guest(SYSCTL_HANDLER_ARGS)
{
return (SYSCTL_OUT_STR(req, vm_guest_sysctl_names[vm_guest]));
}
|
--- qcommon/qcommon.h.orig 2006-12-31 19:01:34.000000000 +0200
+++ qcommon/qcommon.h
@@ -50,6 +50,8 @@ Foundation, Inc., 59 Temple Place - Suit
#if defined __FreeBSD__
#define BUILDSTRING "FreeBSD"
+#elif defined __DragonFly__
+#define BUILDSTRING "DragonFly"
#else
#define BUILDSTRING "Linux"
#endif
|
import _plotly_utils.basevalidators
class Yperiod0Validator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="yperiod0", parent_name="box", **kwargs):
super(Yperiod0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
// flow-typed signature: 7bf53f2d625773fe86b682233ab197c8
// flow-typed version: <<STUB>>/pull-glob_v^1.0.6/flow_v0.51.0
/**
* This is an autogenerated libdef stub for:
*
* 'pull-glob'
*
* Fill this stub out by replacing all the `any` types.
*
* Once filled out, we encourage you to share your work with the
* community by sending a pull request to:
* https://github.com/flowtype/flow-typed
*/
declare module 'pull-glob' {
declare module.exports: any;
}
/**
* We include stubs for each file inside this npm package in case you need to
* require those files directly. Feel free to delete any files that aren't
* needed.
*/
declare module 'pull-glob/bin' {
declare module.exports: any;
}
declare module 'pull-glob/example' {
declare module.exports: any;
}
// Filename aliases
declare module 'pull-glob/bin.js' {
declare module.exports: $Exports<'pull-glob/bin'>;
}
declare module 'pull-glob/example.js' {
declare module.exports: $Exports<'pull-glob/example'>;
}
declare module 'pull-glob/index' {
declare module.exports: $Exports<'pull-glob'>;
}
declare module 'pull-glob/index.js' {
declare module.exports: $Exports<'pull-glob'>;
}
|
/*
* Copyright 2014-2016 Nippon Telegraph and Telephone Corporation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __LAGOPUS_TYPES_H__
#define __LAGOPUS_TYPES_H__
/**
* @file lagopus_types.h
*/
/**
* @details The result type.
*/
typedef int64_t lagopus_result_t;
/**
* @details The flat nano second expression of the time, mainly
* acquired by \b clock_gettime(). For arithmetic operations, the sign
* extension is needed.
*/
typedef int64_t lagopus_chrono_t;
#endif /* ! __LAGOPUS_TYPES_H__ */
|
/**
* @module modifiers/snapEdges
*
* @description
* This module allows snapping of the edges of targets during resize
* interactions.
*
* @example
* interact(target).resizable({
* snapEdges: {
* targets: [interact.snappers.grid({ x: 100, y: 50 })],
* },
* });
*
* interact(target).resizable({
* snapEdges: {
* targets: [
* interact.snappers.grid({
* top: 50,
* left: 50,
* bottom: 100,
* right: 100,
* }),
* ],
* },
* });
*/
import clone from '@interactjs/utils/clone';
import extend from '@interactjs/utils/extend';
import snapSize from './size';
function install (scope) {
const {
defaults,
} = scope;
defaults.perAction.snapEdges = snapEdges.defaults;
}
function start (arg) {
const edges = arg.interaction.prepared.edges;
if (!edges) { return null; }
arg.state.targetFields = arg.state.targetFields || [
[edges.left ? 'left' : 'right', edges.top ? 'top' : 'bottom'],
];
return snapSize.start(arg);
}
function set (arg) {
return snapSize.set(arg);
}
function modifyCoords (arg) {
snapSize.modifyCoords(arg);
}
const snapEdges = {
install,
start,
set,
modifyCoords,
defaults: extend(clone(snapSize.defaults), {
offset: { x: 0, y: 0 },
}),
};
export default snapEdges;
|
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.mockTreeGridProps = exports.EMPTY_VIEW_TEXT_CONTENT = void 0;
var _react = _interopRequireDefault(require("react"));
var _fastCheck = _interopRequireDefault(require("fast-check"));
var _function = require("fp-ts/lib/function");
var Tree = _interopRequireWildcard(require("fp-ts/lib/Tree"));
var _fpTsImports = require("../../../../sharedHelpers/fp-ts-imports");
var _exports = require("../../../../exports");
var Treex = _interopRequireWildcard(require("../../../../sharedHelpers/fp-ts-ext/Tree"));
var _Tweet = require("./Tweet.mock");
function _getRequireWildcardCache() { if (typeof WeakMap !== "function") return null; var cache = new WeakMap(); _getRequireWildcardCache = function () { return cache; }; return cache; }
function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } if (obj === null || typeof obj !== "object" && typeof obj !== "function") { return { default: obj }; } var cache = _getRequireWildcardCache(); if (cache && cache.has(obj)) { return cache.get(obj); } var newObj = {}; var hasPropertyDescriptor = Object.defineProperty && Object.getOwnPropertyDescriptor; for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) { var desc = hasPropertyDescriptor ? Object.getOwnPropertyDescriptor(obj, key) : null; if (desc && (desc.get || desc.set)) { Object.defineProperty(newObj, key, desc); } else { newObj[key] = obj[key]; } } } newObj.default = obj; if (cache) { cache.set(obj, newObj); } return newObj; }
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
const EMPTY_VIEW_TEXT_CONTENT = 'You can render anything here, a loading indicator, error message, etc.';
exports.EMPTY_VIEW_TEXT_CONTENT = EMPTY_VIEW_TEXT_CONTENT;
const mockTreeGridProps = {
ariaLabel: {
tag: 'label',
value: 'Tweets'
},
content: (0, _function.pipe)(_fastCheck.default.sample(_Tweet.arbitraryTweetThread, 50), _fpTsImports.A.mapWithIndex((ix, tree) => {
/** Guarantee the first tree in the forest has at least one child node */
return ix === 0 ? { ...tree,
forest: (0, _function.pipe)(tree.forest, _fpTsImports.A.cons(Tree.make(_fastCheck.default.sample(_Tweet.arbitraryTweet, 1)[0])))
} : tree;
}), Treex.mapForest(tweet => ({
key: tweet.id,
value: tweet
})), _fpTsImports.E.right),
expandCollapse: {
tag: 'Uncontrolled'
},
columns: _fpTsImports.NEA.cons({
id: 'authorId',
renderHeader: () => /*#__PURE__*/_react.default.createElement(_exports.Text, null, "Author Id"),
renderCell: node => node.value.authorId
}, [{
id: 'createdAt',
renderHeader: () => /*#__PURE__*/_react.default.createElement(_exports.Text, null, "Date"),
renderCell: node => node.value.createdAt.toLocaleString()
}, {
id: 'content',
renderHeader: () => /*#__PURE__*/_react.default.createElement(_exports.Text, null, "Content"),
renderCell: node => node.value.content
}])
};
exports.mockTreeGridProps = mockTreeGridProps;
|
#!/usr/bin/env python3
if __name__ == "__main__":
for word in [input() for _ in range(int(input()))]:
print(len([i for i in range(len(word) - 1) if word[i] == word[i + 1]]))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2018 Prof. William H. Green (whgreen@mit.edu), #
# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
import unittest
from rmgpy.molecule.molecule import Molecule
from rmgpy.molecule.resonance import generate_resonance_structures
from rmgpy.molecule.filtration import get_octet_deviation_list, get_octet_deviation, filter_structures,charge_filtration, get_charge_span_list
################################################################################
class FiltrationTest(unittest.TestCase):
def basic_filtration_test(self):
"""Test that structures with higher octet deviation get filtered out"""
adj1 = """
multiplicity 2
1 N u0 p1 c0 {2,D} {3,S}
2 O u0 p2 c0 {1,D}
3 O u1 p2 c0 {1,S}
"""
adj2 = """
multiplicity 2
1 N u1 p1 c0 {2,S} {3,S}
2 O u0 p2 c+1 {1,S}
3 O u0 p3 c-1 {1,S}
"""
adj3 = """
multiplicity 2
1 O u1 p2 c0 {3,S}
2 O u0 p3 c-1 {3,S}
3 N u0 p1 c+1 {1,S} {2,S}
"""
mol1 = Molecule().fromAdjacencyList(adj1)
mol2 = Molecule().fromAdjacencyList(adj2)
mol3 = Molecule().fromAdjacencyList(adj3)
mol_list = [mol1,mol2,mol3]
octet_deviation_list = get_octet_deviation_list(mol_list)
filtered_list = filter_structures(mol_list)
self.assertEqual(octet_deviation_list,[1, 3, 3])
self.assertEqual(len(filtered_list), 1)
self.assertTrue(all([atom.charge == 0 for atom in filtered_list[0].vertices]))
def penalty_for_O4tc_test(self):
"""Test that an O4tc atomType with octet 8 gets penalized in the electronegativity heuristic"""
adj = """
1 S u0 p1 c0 {2,S} {3,T}
2 O u0 p3 c-1 {1,S}
3 O u0 p1 c+1 {1,T}
"""
mol = Molecule().fromAdjacencyList(adj)
octet_deviation = get_octet_deviation(mol)
self.assertEqual(octet_deviation, 0)
self.assertEqual(mol.vertices[2].atomType.label, 'O4tc')
mol_list = generate_resonance_structures(mol)
self.assertEqual(len(mol_list), 2)
for mol in mol_list:
if mol.reactive:
for atom in mol.vertices:
self.assertTrue(atom.charge == 0)
def penalty_birads_replacing_lone_pairs_test(self):
"""Test that birads on `S u2 p0` are penalized"""
adj = """
multiplicity 3
1 S u2 p0 c0 {2,D} {3,D}
2 O u0 p2 c0 {1,D}
3 O u0 p2 c0 {1,D}
"""
mol = Molecule().fromAdjacencyList(adj)
mol.update()
mol_list = generate_resonance_structures(mol, keep_isomorphic=False, filter_structures=True)
for mol in mol_list:
if mol.reactive:
for atom in mol.vertices:
if atom.isSulfur():
self.assertNotEquals(atom.radicalElectrons, 2)
self.assertEqual(len(mol_list), 3)
self.assertEqual(sum([1 for mol in mol_list if mol.reactive]), 2)
def penalty_for_s_triple_s_test(self):
"""Test that an S#S substructure in a molecule gets penalized in the octet deviation score"""
adj = """
1 C u0 p0 c0 {3,S} {5,S} {6,S} {7,S}
2 C u0 p0 c0 {4,S} {8,S} {9,S} {10,S}
3 S u0 p0 c0 {1,S} {4,T} {11,D}
4 S u0 p1 c0 {2,S} {3,T}
5 H u0 p0 c0 {1,S}
6 H u0 p0 c0 {1,S}
7 H u0 p0 c0 {1,S}
8 H u0 p0 c0 {2,S}
9 H u0 p0 c0 {2,S}
10 H u0 p0 c0 {2,S}
11 O u0 p2 c0 {3,D}
"""
mol = Molecule().fromAdjacencyList(adj)
octet_deviation = get_octet_deviation(mol)
self.assertEqual(octet_deviation, 3.0)
def radical_site_test(self):
"""Test that a charged molecule isn't filtered if it introduces new radical site"""
adj1 = """
multiplicity 2
1 O u1 p2 c0 {3,S}
2 O u0 p2 c0 {3,D}
3 N u0 p1 c0 {1,S} {2,D}
"""
adj2 = """
multiplicity 2
1 O u0 p3 c-1 {3,S}
2 O u0 p2 c0 {3,D}
3 N u1 p0 c+1 {1,S} {2,D}
"""
adj3 = """
multiplicity 2
1 O u1 p2 c0 {3,S}
2 O u0 p3 c-1 {3,S}
3 N u0 p1 c+1 {1,S} {2,S}
"""
mol_list = [Molecule().fromAdjacencyList(adj1),
Molecule().fromAdjacencyList(adj2),
Molecule().fromAdjacencyList(adj3)]
for mol in mol_list:
mol.update() # the charge_filtration uses the atom.sortingLabel attribute
filtered_list = charge_filtration(mol_list, get_charge_span_list(mol_list))
self.assertEqual(len(filtered_list), 2)
self.assertTrue(any([mol.getChargeSpan() == 1 for mol in filtered_list]))
for mol in filtered_list:
if mol.getChargeSpan() == 1:
for atom in mol.vertices:
if atom.charge == -1:
self.assertTrue(atom.isOxygen())
if atom.charge == 1:
self.assertTrue(atom.isNitrogen())
def electronegativity_test(self):
"""Test that structures with charge separation are only kept if they obey the electronegativity rule
(If a structure must have charge separation, negative charges will be assigned to more electronegative atoms,
whereas positive charges will be assigned to less electronegative atoms)
In this test, only the three structures with no charge separation and the structure where both partial charges
are on the nitrogen atoms should be kept."""
adj1 = """
multiplicity 2
1 N u0 p1 c0 {2,S} {3,D}
2 N u1 p1 c0 {1,S} {4,S}
3 S u0 p1 c0 {1,D} {5,D}
4 H u0 p0 c0 {2,S}
5 O u0 p2 c0 {3,D}
"""
adj2 = """
multiplicity 2
1 N u0 p1 c0 {2,D} {3,S}
2 N u0 p1 c0 {1,D} {4,S}
3 S u1 p1 c0 {1,S} {5,D}
4 H u0 p0 c0 {2,S}
5 O u0 p2 c0 {3,D}
"""
adj3 = """
multiplicity 2
1 N u0 p1 c0 {2,D} {3,S}
2 N u0 p1 c0 {1,D} {4,S}
3 S u0 p2 c0 {1,S} {5,S}
4 H u0 p0 c0 {2,S}
5 O u1 p2 c0 {3,S}
"""
adj4 = """
multiplicity 2
1 N u1 p0 c+1 {2,S} {3,D}
2 N u0 p2 c-1 {1,S} {4,S}
3 S u0 p1 c0 {1,D} {5,D}
4 H u0 p0 c0 {2,S}
5 O u0 p2 c0 {3,D}
"""
adj5 = """
multiplicity 2
1 N u1 p0 c+1 {2,D} {3,S}
2 N u0 p1 c0 {1,D} {4,S}
3 S u0 p2 c-1 {1,S} {5,D}
4 H u0 p0 c0 {2,S}
5 O u0 p2 c0 {3,D}
"""
adj6 = """
multiplicity 2
1 N u1 p1 c0 {2,S} {3,S}
2 N u0 p2 c-1 {1,S} {4,S}
3 S u0 p1 c+1 {1,S} {5,D}
4 H u0 p0 c0 {2,S}
5 O u0 p2 c0 {3,D}
"""
adj7 = """
multiplicity 2
1 N u1 p0 c+1 {2,D} {3,S}
2 N u0 p1 c0 {1,D} {4,S}
3 S u0 p2 c0 {1,S} {5,S}
4 H u0 p0 c0 {2,S}
5 O u0 p3 c-1 {3,S}
"""
mol_list = [Molecule().fromAdjacencyList(adj1),
Molecule().fromAdjacencyList(adj2),
Molecule().fromAdjacencyList(adj3),
Molecule().fromAdjacencyList(adj4),
Molecule().fromAdjacencyList(adj5),
Molecule().fromAdjacencyList(adj6),
Molecule().fromAdjacencyList(adj7)]
for mol in mol_list:
mol.update() # the charge_filtration uses the atom.sortingLabel attribute
filtered_list = charge_filtration(mol_list, get_charge_span_list(mol_list))
self.assertEqual(len(filtered_list), 4)
self.assertTrue(any([mol.getChargeSpan() == 1 for mol in filtered_list]))
for mol in filtered_list:
if mol.getChargeSpan() == 1:
for atom in mol.vertices:
if abs(atom.charge) == 1:
self.assertTrue(atom.isNitrogen())
|
(function(root, factory) {
if (typeof define === 'function' && define.amd) {
// AMD.
define(['expect.js', process.cwd()+'/src/index'], factory);
} else if (typeof module === 'object' && module.exports) {
// CommonJS-like environments that support module.exports, like Node.
factory(require('expect.js'), require(process.cwd()+'/src/index'));
} else {
// Browser globals (root is window)
factory(root.expect, root.ApacheFineract);
}
}(this, function(expect, ApacheFineract) {
'use strict';
var instance;
beforeEach(function() {
instance = new ApacheFineract.InteropQuoteResponseData();
});
var getProperty = function(object, getter, property) {
// Use getter method if present; otherwise, get the property directly.
if (typeof object[getter] === 'function')
return object[getter]();
else
return object[property];
}
var setProperty = function(object, setter, property, value) {
// Use setter method if present; otherwise, set the property directly.
if (typeof object[setter] === 'function')
object[setter](value);
else
object[property] = value;
}
describe('InteropQuoteResponseData', function() {
it('should create an instance of InteropQuoteResponseData', function() {
// uncomment below and update the code to test InteropQuoteResponseData
//var instane = new ApacheFineract.InteropQuoteResponseData();
//expect(instance).to.be.a(ApacheFineract.InteropQuoteResponseData);
});
it('should have the property officeId (base name: "officeId")', function() {
// uncomment below and update the code to test the property officeId
//var instane = new ApacheFineract.InteropQuoteResponseData();
//expect(instance).to.be();
});
it('should have the property groupId (base name: "groupId")', function() {
// uncomment below and update the code to test the property groupId
//var instane = new ApacheFineract.InteropQuoteResponseData();
//expect(instance).to.be();
});
it('should have the property clientId (base name: "clientId")', function() {
// uncomment below and update the code to test the property clientId
//var instane = new ApacheFineract.InteropQuoteResponseData();
//expect(instance).to.be();
});
it('should have the property loanId (base name: "loanId")', function() {
// uncomment below and update the code to test the property loanId
//var instane = new ApacheFineract.InteropQuoteResponseData();
//expect(instance).to.be();
});
it('should have the property savingsId (base name: "savingsId")', function() {
// uncomment below and update the code to test the property savingsId
//var instane = new ApacheFineract.InteropQuoteResponseData();
//expect(instance).to.be();
});
it('should have the property subResourceId (base name: "subResourceId")', function() {
// uncomment below and update the code to test the property subResourceId
//var instane = new ApacheFineract.InteropQuoteResponseData();
//expect(instance).to.be();
});
it('should have the property transactionId (base name: "transactionId")', function() {
// uncomment below and update the code to test the property transactionId
//var instane = new ApacheFineract.InteropQuoteResponseData();
//expect(instance).to.be();
});
it('should have the property changes (base name: "changes")', function() {
// uncomment below and update the code to test the property changes
//var instane = new ApacheFineract.InteropQuoteResponseData();
//expect(instance).to.be();
});
it('should have the property productId (base name: "productId")', function() {
// uncomment below and update the code to test the property productId
//var instane = new ApacheFineract.InteropQuoteResponseData();
//expect(instance).to.be();
});
it('should have the property gsimId (base name: "gsimId")', function() {
// uncomment below and update the code to test the property gsimId
//var instane = new ApacheFineract.InteropQuoteResponseData();
//expect(instance).to.be();
});
it('should have the property glimId (base name: "glimId")', function() {
// uncomment below and update the code to test the property glimId
//var instane = new ApacheFineract.InteropQuoteResponseData();
//expect(instance).to.be();
});
it('should have the property rollbackTransaction (base name: "rollbackTransaction")', function() {
// uncomment below and update the code to test the property rollbackTransaction
//var instane = new ApacheFineract.InteropQuoteResponseData();
//expect(instance).to.be();
});
it('should have the property transactionCode (base name: "transactionCode")', function() {
// uncomment below and update the code to test the property transactionCode
//var instane = new ApacheFineract.InteropQuoteResponseData();
//expect(instance).to.be();
});
it('should have the property state (base name: "state")', function() {
// uncomment below and update the code to test the property state
//var instane = new ApacheFineract.InteropQuoteResponseData();
//expect(instance).to.be();
});
it('should have the property expiration (base name: "expiration")', function() {
// uncomment below and update the code to test the property expiration
//var instane = new ApacheFineract.InteropQuoteResponseData();
//expect(instance).to.be();
});
it('should have the property extensionList (base name: "extensionList")', function() {
// uncomment below and update the code to test the property extensionList
//var instane = new ApacheFineract.InteropQuoteResponseData();
//expect(instance).to.be();
});
it('should have the property quoteCode (base name: "quoteCode")', function() {
// uncomment below and update the code to test the property quoteCode
//var instane = new ApacheFineract.InteropQuoteResponseData();
//expect(instance).to.be();
});
it('should have the property fspFee (base name: "fspFee")', function() {
// uncomment below and update the code to test the property fspFee
//var instane = new ApacheFineract.InteropQuoteResponseData();
//expect(instance).to.be();
});
it('should have the property fspCommission (base name: "fspCommission")', function() {
// uncomment below and update the code to test the property fspCommission
//var instane = new ApacheFineract.InteropQuoteResponseData();
//expect(instance).to.be();
});
});
}));
|
var arrayLikeToArray = require("./arrayLikeToArray.js");
function _unsupportedIterableToArray(o, minLen) {
if (!o) return;
if (typeof o === "string") return arrayLikeToArray(o, minLen);
var n = Object.prototype.toString.call(o).slice(8, -1);
if (n === "Object" && o.constructor) n = o.constructor.name;
if (n === "Map" || n === "Set") return Array.from(o);
if (n === "Arguments" || /^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)) return arrayLikeToArray(o, minLen);
}
module.exports = _unsupportedIterableToArray;
module.exports["default"] = module.exports, module.exports.__esModule = true;
|
const util = require('./util');
const should = require('should');
describe('util 함수는..', () => {
it('A + B를 더하는 함수입니다.', () => {
const a = 1;
const b = 2;
const result = util(a)(b);
result.should.be.equal(3);
});
});
|
/**
***********************************************************************************************
* @file Haply_Arduino_Control.h
* @author Steve Ding, Colin Gallacher
* @version V0.1.0
* @date 27-February-2017
* @brief constants and helper functions actuator control
***********************************************************************************************
* @attention
*
*
***********************************************************************************************
*/
/* Data length definitons **********************************************************************/
/* maximim number of actuators available on board for control */
#define TOTAL_ACTUATORS 4
/* number of control parameters per actuator */
#define ACTUATOR_PARAMETERS 2
/* Encoder pin definitions *********************************************************************/
#define ENCPIN1_1 28 // J2
#define ENCPIN1_2 29
#define ENCPIN2_1 24 // J3
#define ENCPIN2_2 25
#define ENCPIN3_1 36 // J4
#define ENCPIN3_2 37
#define ENCPIN4_1 32 // J5
#define ENCPIN4_2 33
/* PWM definitions block ***********************************************************************/
#define PWMPIN1 9
#define DIRPIN1 26
#define PWMPIN2 8
#define DIRPIN2 22
#define PWMPIN3 6
#define DIRPIN3 34
#define PWMPIN4 7
#define DIRPIN4 30
#define PWMFREQ 40000
#define GAIN 44.86
#define VMAX 5.5
/* Actuator struct definitions *****************************************************************/
typedef struct motor{
float Enc_offset;
float Enc_resolution;
int pwmPin;
int dirPin;
Encoder *Enc;
}actuator;
/* Receive function definitions ****************************************************************/
byte command_instructions(byte control, int number, byte active[]);
byte receive_parameters(byte a1[], byte a2[], byte a3[], byte a4[], int number, byte actuators[]);
byte receive_torques(float *t1, float *t2, float *t3, float *t4, int number, byte actuators[]);
/* Send function definitions *******************************************************************/
void send_reply(byte device_address, byte motors_active[]);
void send_encoders_data(float a1, float a2, float a3, float a4, int number, byte device_address, byte actuators[]);
/* Device control function definitions *********************************************************/
byte setup_actuators(actuator m1, actuator m2, actuator m3, actuator m4, int number, byte active[]);
void initialize_actuator(actuator *mtr, byte parameters[], int pwm, int dir, int enc1, int enc2);
byte write_torques(actuator *m1, actuator *m2, actuator *m3, actuator *m4, int number, byte actuators[]);
void create_torque(actuator *mtr, float torque);
void read_encoders(actuator *m1, actuator *m2, actuator *m3, actuator *m4, int number, byte address, byte actuators[]);
float read_encoder(actuator *mtr);
/* Helper function definitions *****************************************************************/
void FloatToBytes(float val, byte segments[]);
float BytesToFloat(byte segments[]);
void ArrayCopy(byte src[], int src_index, byte dest[], int dest_index, int len );
/* Receive functions ***************************************************************************/
/**
* Decypher command instructions
*
* @note updates, motors_active, number_of_motors, and cmd_code
* @param control: input header byte to be parsed
* @param number: number of motors value to be manipulated
* @param motors_active: active motors array indicator
* @return command code indicating communication type
*/
byte command_instructions(byte control, int *number, byte active[]){
int j = 0;
for(int i = 0; i < TOTAL_ACTUATORS; i++){
active[i] = control &0x01;
control = control >> 1;
if(active[i] > 0){
j++;
}
}
*number = j;
return control;
}
/**
* Parses and receives initial encoder values
*
* @note Will log active actuators for use
* @param a1: encoder 1 setup value
* @param a2: encoder 2 setup value
* @param a3: encoder 3 setup value
* @param a4: encoder 4 setup value
* @param number: number of motors active
* @param actuators: active actuator positions
* @return device_address
*/
byte receive_parameters(byte a1[], byte a2[], byte a3[], byte a4[], int number, byte actuators[]){
/* Determine incoming setup parameters datalength */
int data_length = number * 4 * ACTUATOR_PARAMETERS + 1;
/* Incoming parameters array */
byte actuator_parameters[data_length];
SerialUSB.readBytes(actuator_parameters, data_length);
int j = 1;
/* Cycle through all possible actuators and activate relevant actuators */
for(int i = 0; i < TOTAL_ACTUATORS; i++){
if(actuators[i] > 0){
switch (i){
case 0:
ArrayCopy(actuator_parameters, j, a1, 0, 4*ACTUATOR_PARAMETERS);
j = j + 4 * ACTUATOR_PARAMETERS;
break;
case 1:
ArrayCopy(actuator_parameters, j, a2, 0, 4*ACTUATOR_PARAMETERS);
j = j + 4 * ACTUATOR_PARAMETERS;
break;
case 2:
ArrayCopy(actuator_parameters, j, a3, 0, 4*ACTUATOR_PARAMETERS);
j = j + 4 * ACTUATOR_PARAMETERS;
break;
case 3:
ArrayCopy(actuator_parameters, j, a4, 0, 4*ACTUATOR_PARAMETERS);
break;
}
}
}
return actuator_parameters[0];
}
/**
* Parses and recieves torque values from simulation
*
* @note Will log active actuators for use
* @param t1: torque1 value to be extracted
* @param t2: torque1 value to be extracted
* @param t3: torque1 value to be extracted
* @param t4: torque1 value to be extracted
* @param number: number of motors active
* @param actuators: active actuator positions
* @return device address
*/
byte receive_torques(float *t1, float *t2, float *t3, float *t4, int number, byte actuators[]){
int data_length = number * 4 + 1;;
byte segments[4];
byte torque_values[data_length];
SerialUSB.readBytes(torque_values, data_length);
int j = 1;
for(int i = 0; i < TOTAL_ACTUATORS; i++){
if(actuators[i] > 0){
switch(i){
case 0:
ArrayCopy(torque_values, j, segments, 0, 4);
*t1 = BytesToFloat(segments);
j = j + 4;
break;
case 1:
ArrayCopy(torque_values, j, segments, 0, 4);
*t2 = BytesToFloat(segments);
j = j + 4;
break;
case 2:
ArrayCopy(torque_values, j, segments, 0, 4);
*t3 = BytesToFloat(segments);
j = j + 4;
break;
case 3:
ArrayCopy(torque_values, j, segments, 0, 4);
*t4 = BytesToFloat(segments);
break;
}
}
}
return torque_values[0];
}
/* Send functions ******************************************************************************/
/**
* Formats and send verification reply after setup
*
* @note response to setup command
* @param device_address: address of device that setup board
* @param motors_active: number of motors currently in active state
* @return None
*/
void send_reply(byte device_address, byte motors_active[]){
byte outData[5];
outData[0] = device_address;
ArrayCopy(motors_active, 0, outData, 1, 4);
SerialUSB.write(outData, 5);
}
/**
* Formats and sends encoder values over Serial
*
* @note Will only send encoder values of active actuators
* @param a1: encoder 1 value
* @param a2: encoder 2 value
* @param a3: encoder 3 value
* @param a4: encoder 4 value
* @param number: number of motors active
* @param actuators: active actuator positions
* @param device_address: address of device that requested data
* @return None
*/
void send_encoders_data(float a1, float a2, float a3, float a4, int number, byte device_address, byte actuators[]){
byte segments[4];
byte outData[number*4+1];
outData[0] = device_address;
int j = 1;
for(int i = 0; i < TOTAL_ACTUATORS; i++){
if(actuators[i] > 0){
switch(i){
case 0:
FloatToBytes(a1, segments);
ArrayCopy(segments, 0, outData, j, 4);
j = j + 4;
break;
case 1:
FloatToBytes(a2, segments);
ArrayCopy(segments, 0, outData, j, 4);
j = j + 4;
break;
case 2:
FloatToBytes(a3, segments);
ArrayCopy(segments, 0, outData, j, 4);
j = j + 4;
break;
case 3:
FloatToBytes(a4, segments);
ArrayCopy(segments, 0, outData, j, 4);
break;
}
}
}
SerialUSB.write(outData, number*4+1);
}
/* Device control functions ********************************************************************/
/**
* Sets up actuators that are to be used based on initial activation command
*
* @note Function calls subsequent functions which individually sets up each actuator
* @param m1: actuator1 struct
* @param m2: actuator2 struct
* @param m3: actuator3 struct
* @param m4: actuator4 struct
* @param number: number of motors active
* @param actuators: positions of motors active
* @return device_address: address of device sending setup data
*/
byte setup_actuators(actuator *m1, actuator *m2, actuator *m3, actuator *m4, int number, byte actuators[]){
/* address for device */
byte address;
/* set pwm resolution to 12-bits */
pwm_set_resolution(12);
/* declare parameter array for each actuator */
byte a1[4*ACTUATOR_PARAMETERS];
byte a2[4*ACTUATOR_PARAMETERS];
byte a3[4*ACTUATOR_PARAMETERS];
byte a4[4*ACTUATOR_PARAMETERS];
address = receive_parameters(a1, a2, a3, a4, number, actuators);
for(int i = 0; i < TOTAL_ACTUATORS; i++){
if(actuators[i] > 0){
switch(i){
case 0:
initialize_actuator(m1, a1, PWMPIN3, DIRPIN3, ENCPIN3_1, ENCPIN3_2);
break;
case 1:
initialize_actuator(m2, a2, PWMPIN2, DIRPIN2, ENCPIN2_1, ENCPIN2_2);
break;
case 2:
initialize_actuator(m3, a3, PWMPIN1, DIRPIN1, ENCPIN1_1, ENCPIN1_2);
break;
case 3:
initialize_actuator(m4, a4, PWMPIN4, DIRPIN4, ENCPIN4_1, ENCPIN4_2);
break;
}
}
}
return address;
}
/**
* Initialize an actuator and a corresponding Encoder for use
*
* @note Currently under prototype
* @param *mtr: pointer to actuator struct for parameters access
* @param parameters[]: actuator input parameters for one actuator
* @param pwm: pin for PWM control
* @param dir: pin for direction control
* @return none
*/
void initialize_actuator(actuator *mtr, byte parameters[], int pwm, int dir, int enc1, int enc2){
int i = 0;
byte actuator_value[4];
mtr->pwmPin = pwm;
mtr->dirPin = dir;
pinMode(pwm, OUTPUT);
pinMode(dir, OUTPUT);
pwm_setup(pwm, PWMFREQ, 1);
mtr->Enc = new Encoder(enc1, enc2);
ArrayCopy(parameters, i, actuator_value, 0, 4);
mtr->Enc_offset = BytesToFloat(actuator_value);
i = i + 4;
ArrayCopy(parameters, i, actuator_value, 0, 4);
mtr->Enc_resolution = BytesToFloat(actuator_value);
mtr->Enc->write(mtr->Enc_offset * mtr->Enc_resolution / 360);
}
/**
* Write torque to be generated to motors
*
* @note Will only write to active actuators
* @param m1: actuator1 struct
* @param m2: actuator2 struct
* @param m3: actuator3 struct
* @param m4: actuator4 struct
* @param number: number of motors active
* @param actuators: positions of motors active
* @return device_address: address of device sending torque request
*/
byte write_torques(actuator *m1, actuator *m2, actuator *m3, actuator *m4, int number, byte actuators[]){
byte address;
float torque1, torque2, torque3, torque4;
address = receive_torques(&torque1, &torque2, &torque3, &torque4, number, actuators);
for(int i = 0; i < TOTAL_ACTUATORS; i++){
if(actuators[i] > 0){
switch(i){
case 0:
create_torque(m1, torque1);
break;
case 1:
create_torque(m2, torque2);
break;
case 2:
create_torque(m3, torque3);
break;
case 3:
create_torque(m4, torque4);
break;
}
}
}
return address;
}
/**
* read a torque at the stated actuator
*
* @note Currently under prototype
* @param *mtr: pointer to actuator struct for parameters access
* @param torque: torque to be created
* @return None
*/
void create_torque(actuator *mtr, float torque){
int duty;
if(torque <= 0){
digitalWrite(mtr->dirPin, HIGH);
}
else{
digitalWrite(mtr->dirPin, LOW);
}
torque = abs(torque);
if(torque > 0.123){ //Nm
torque = 0.123;
}
duty = 4095 * torque /0.123;
pwm_write_duty(mtr->pwmPin, duty);
}
/**
* Determine current angle seen by encoders and send data
*
* @note Will only read active actuator encoder values
* @param None
* @return None
*/
void read_encoders(actuator *m1, actuator *m2, actuator *m3, actuator *m4, int number, byte address, byte actuators[]){
float encoder1, encoder2, encoder3, encoder4;
for(int i = 0; i < TOTAL_ACTUATORS; i++){
if(actuators[i] > 0){
switch(i){
case 0:
encoder1 = read_encoder(m1);
break;
case 1:
encoder2 = read_encoder(m2);
break;
case 2:
encoder3 = read_encoder(m3);
break;
case 3:
encoder4 = read_encoder(m4);
break;
}
}
}
send_encoders_data(encoder1, encoder2, encoder3, encoder4, number, address, actuators);
}
/**
* read an individual encoder and parse data for byte transmission
*
* @note Currently under prototype
* @param *mtr: pointer to actuator struct for parameters access
* @param &Enc: Encoder object reference
* @return th_degrees: angle detected by encoder
*/
float read_encoder(actuator *mtr){
float th_degrees;
th_degrees = 360.0 * mtr->Enc->read()/mtr->Enc_resolution;
return th_degrees;
}
/* Helper functions ****************************************************************************/
/**
* Union definition for floating point and integer representation conversion
*/
typedef union{
long val_l;
float val_f;
} ufloat;
/**
* Translates a 32-bit floating point into an array of four bytes
*
* @note None
* @param val: 32-bit floating point
* @param segments: array of four bytes
* @return None
*/
void FloatToBytes(float val, byte segments[]){
ufloat temp;
temp.val_f = val;
segments[3] = (byte)((temp.val_l >> 24) & 0xff);
segments[2] = (byte)((temp.val_l >> 16) & 0xff);
segments[1] = (byte)((temp.val_l >> 8) & 0xff);
segments[0] = (byte)((temp.val_l) & 0xff);
}
/**
* Translates an array of four bytes into a floating point
*
* @note None
* @param segment: the input array of four bytes
* @return Translated 32-bit floating point
*/
float BytesToFloat(byte segments[]){
ufloat temp;
temp.val_l = (temp.val_l | (segments[3] & 0xff)) << 8;
temp.val_l = (temp.val_l | (segments[2] & 0xff)) << 8;
temp.val_l = (temp.val_l | (segments[1] & 0xff)) << 8;
temp.val_l = (temp.val_l | (segments[0] & 0xff));
return temp.val_f;
}
/**
* Copies elements from one array to another
*
* @note None
* @param src: The source array to be copied from
* @param src_index: The starting index of the source array
* @param dest: The destination array to be copied to
* @param dest_index: The starting index of the destination array
* @param len: Number of elements to be copied
* @return None
*/
void ArrayCopy(byte src[], int src_index, byte dest[], int dest_index, int len ){
for(int i = 0; i < len; i++){
dest[dest_index + i] = src[src_index + i];
}
}
|
# Sample of de-serialization
import pickle
with open('./data/serialization', 'rb') as source_file:
L = pickle.load(source_file)
print(L)
print('load done.')
|
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
class Decode(Instruccion):
def __init__(self, valor, tipo, linea, columna):
Instruccion.__init__(self,tipo,linea,columna)
self.valor = valor
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
print("DECODE")
print(self.valor.decode('base64','strict'))
return self.valor.decode('base64','strict')
instruccion = Decode("hola mundo",None, 1,2)
instruccion.ejecutar(None,None)
|
const assert = require('chai').assert;
const funtionsToTest = require('./04.FunctionsToTest');
const sum = funtionsToTest.sum;
const isSymmetric = funtionsToTest.isSymmetric;
describe('SumTests', function () {
it('should Sum Correct', function () {
let arr = [1,2,3,4,5];
let result = sum(arr);
assert.equal(result,15);
});
it('empty Arr Should Sum 0', function () {
assert.equal(sum([]),0);
});
it('Work only with arrays', function () {
let callSum = () => sum(null);
let callSumWithNum = () => sum(10);
assert.throws(sum);
assert.throws(callSum);
assert.throws(callSumWithNum);
});
});
describe('SummetryTests', function () {
it('should return false if input not arr', function () {
let result = isSymmetric('asdff');
assert.equal(result,false);
assert.equal(isSymmetric(1),false);
assert.equal(isSymmetric(undefined),false);
});
it('should work fine with symmetric arrays', function () {
let result = isSymmetric([1,2,3,2,1]);
assert.equal(result,true);
});
it('should return false with string in arr', function () {
let result = isSymmetric([1,2,3,2,'test']);
assert.equal(false,result);
});
it("should return true if empty arr", () => {
let expectedResult = isSymmetric([]);
assert.isTrue(expectedResult);
});
it("should return false if no array isnt symmetric", () => {
assert.isFalse(isSymmetric([1,2,3,4,5]));
});
});
|
'use strict';
function _interopDefault (ex) { return (ex && (typeof ex === 'object') && 'default' in ex) ? ex['default'] : ex; }
var React = _interopDefault(require('react'));
var _extends = Object.assign || function (target) {
for (var i = 1; i < arguments.length; i++) {
var source = arguments[i];
for (var key in source) {
if (Object.prototype.hasOwnProperty.call(source, key)) {
target[key] = source[key];
}
}
}
return target;
};
var objectWithoutProperties = function (obj, keys) {
var target = {};
for (var i in obj) {
if (keys.indexOf(i) >= 0) continue;
if (!Object.prototype.hasOwnProperty.call(obj, i)) continue;
target[i] = obj[i];
}
return target;
};
var StopCircleIcon = function StopCircleIcon(_ref) {
var _ref$color = _ref.color,
color = _ref$color === undefined ? 'currentColor' : _ref$color,
_ref$size = _ref.size,
size = _ref$size === undefined ? 24 : _ref$size,
children = _ref.children,
props = objectWithoutProperties(_ref, ['color', 'size', 'children']);
var className = 'mdi-icon ' + (props.className || '');
return React.createElement(
'svg',
_extends({}, props, { className: className, width: size, height: size, fill: color, viewBox: '0 0 24 24' }),
React.createElement('path', { d: 'M12,2A10,10 0 0,0 2,12A10,10 0 0,0 12,22A10,10 0 0,0 22,12A10,10 0 0,0 12,2M9,9H15V15H9' })
);
};
var StopCircleIcon$1 = React.memo ? React.memo(StopCircleIcon) : StopCircleIcon;
module.exports = StopCircleIcon$1;
|
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run an experiment of the synthesis of a basket call with mc methods.
Consider we are selling an European basket call option which in general cannot
be priced or hedged analytically and cannot be replicated statically even
under a simple Black-Scholes model for the dynamics of the underlyings.
For simplicity, bid-ask spread and fees are not taken into account.
We simulate correlated stock price variations under the historical probability
and hedge at each time period with a delta computed by a Monte Carlo method
(path-wise differentiation) under the risk neutral probability.
Tensorflow's automated differentiation capabilities make such a Monte Carlo
simulation simple to differentiate to extract the delta and easy to accelerate
on GPU/TPU.
See http://www.cmap.polytechnique.fr/~touzi/Poly-MAP552.pdf p99 7.4.2.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl import app
from absl import flags
from absl import logging
import numpy as np
import scipy.linalg
import tensorflow.compat.v1 as tf
from simulation_research.tf_risk import controllers
from simulation_research.tf_risk import monte_carlo_manager
from simulation_research.tf_risk.dynamics import gbm_log_euler_step_nd
from simulation_research.tf_risk.payoffs import basket_call_payoff
flags.DEFINE_integer("num_dims", 100,
"Number of assets in basket option.")
flags.DEFINE_integer("num_batch_samples", 1000,
"Number of Monte Carlo per batch.")
flags.DEFINE_integer("num_batches", 10,
"Number of Monte Carlo batches.")
flags.DEFINE_float("initial_price", 100.0, "Initial price.")
flags.DEFINE_float("drift", 0.0, "Stock drift.")
flags.DEFINE_float("volatility", 0.2,
"Volatilty of each stock (assumed equal across stock).")
flags.DEFINE_float("correlation", 0.5,
"Pairwise correlation between stocks "
"(assumed equal across pairs).")
flags.DEFINE_float("strike", 100.0, "Basket strike.")
flags.DEFINE_float("maturity", 1.0, "European option maturity (in years).")
flags.DEFINE_float("delta_t_historical", 0.1,
"Time step of historical stock price simulation.")
flags.DEFINE_float("delta_t_monte_carlo", 0.1,
"Time step of monte carlo risk neutral price simulation.")
FLAGS = flags.FLAGS
def hist_log_euler_step(hist_state, hist_drift, hist_vol_matrix, dt):
"""Simulation of price movements under the historical probability."""
num_dims = hist_state.shape[0]
hist_dw_t = np.random.normal(size=[num_dims]) * np.sqrt(dt)
return np.maximum(hist_state * (
1.0 + hist_drift * dt + np.matmul(hist_dw_t, hist_vol_matrix)), 1e-7)
def main(_):
num_dims = FLAGS.num_dims
num_batches = FLAGS.num_batches
hist_drift = FLAGS.drift
hist_vol = FLAGS.volatility
hist_cor = FLAGS.correlation
hist_cor_matrix = (hist_cor * np.ones((num_dims, num_dims))
+ (1.0 - hist_cor) * np.eye(num_dims))
hist_price = FLAGS.initial_price * np.ones(num_dims)
hist_vol_matrix = hist_vol * np.real(scipy.linalg.sqrtm(hist_cor_matrix))
hist_dt = FLAGS.delta_t_historical
sim_dt = FLAGS.delta_t_monte_carlo
strike = FLAGS.strike
maturity = FLAGS.maturity
# Placeholders for tensorflow-based simulator's arguments.
sim_price = tf.placeholder(shape=[num_dims], dtype=tf.float32)
sim_drift = tf.placeholder(shape=(), dtype=tf.float32)
sim_vol_matrix = tf.constant(hist_vol_matrix, dtype=tf.float32)
sim_maturity = tf.placeholder(shape=(), dtype=tf.float32)
# Transition operation between t and t + dt with price in log scale.
def _dynamics_op(log_s, t, dt):
return gbm_log_euler_step_nd(
log_s, sim_drift, sim_vol_matrix, t, dt)
# Terminal payoff function (with price in log scale).
def _payoff_fn(log_s):
return basket_call_payoff(tf.exp(log_s), strike)
# Call's price and delta estimates (sensitivity to current underlying price).
# Monte Carlo estimation under the risk neutral probability is used.
# The reason why we employ the risk neutral probability is that the position
# is hedged each day depending on the value of the underlying.
# See http://www.cmap.polytechnique.fr/~touzi/Poly-MAP552.pdf for a complete
# explanation.
price_estimate, _, _ = monte_carlo_manager.non_callable_price_mc(
initial_state=tf.log(sim_price),
dynamics_op=_dynamics_op,
payoff_fn=_payoff_fn,
maturity=sim_maturity,
num_samples=FLAGS.num_batch_samples,
dt=sim_dt)
delta_estimate = monte_carlo_manager.sensitivity_autodiff(
price_estimate, sim_price)
# Start the hedging experiment.
session = tf.Session()
hist_price_profile = []
cash_profile = []
underlying_profile = []
wall_times = []
t = 0
cash_owned = 0.0
underlying_owned = np.zeros(num_dims)
while t <= maturity:
# Each day, a new stock price is observed.
cash_eval = 0.0
delta_eval = 0.0
for _ in range(num_batches):
if t == 0.0:
# The first day a derivative price is computed to decide how mush cash
# is initially needed to replicate the derivative's payoff at maturity.
cash_eval_batch = controllers.price_derivative(
price_estimate,
session,
params={
sim_drift: 0.0,
sim_price: hist_price,
sim_maturity: maturity - t
})
# Each day the delta of the derivative is computed to decide how many
# shares of the underlying should be owned to replicate the derivative's
# payoff at maturity.
start_time = time.time()
delta_eval_batch = controllers.hedge_derivative(
delta_estimate,
session,
params={
sim_drift: 0.0,
sim_price: hist_price,
sim_maturity: maturity - t
})
wall_times.append(time.time() - start_time)
delta_eval += delta_eval_batch / num_batches
cash_eval += cash_eval_batch / num_batches
if t == 0.0:
logging.info("Initial price estimate: %.2f", cash_eval)
# Self-financing portfolio dynamics, held cash is used to buy the underlying
# or increases when the underlying is sold.
if t == 0.0:
cash_owned = cash_eval - np.sum(delta_eval * hist_price)
underlying_owned = delta_eval
else:
cash_owned -= np.sum((delta_eval - underlying_owned) * hist_price)
underlying_owned = delta_eval
logging.info("Cash at t=%.2f: %.2f", t, cash_owned)
logging.info("Mean delta at t=%.2f: %.4f", t, np.mean(delta_eval))
logging.info("Mean underlying at t=%.2f: %.2f ", t, np.mean(hist_price))
hist_price_profile.append(hist_price)
cash_profile.append(cash_owned)
underlying_profile.append(underlying_owned)
# Simulation of price movements under the historical probability (i.e. what
# is actually happening in the stock market).
hist_price = hist_log_euler_step(
hist_price, hist_drift, hist_vol_matrix, hist_dt)
t += hist_dt
session.close()
# At maturity, the value of the replicating portfolio should be exactly
# the opposite of the payoff of the option being sold.
# The reason why the match is not exact here is two-fold: we only hedge once
# a day and we use noisy Monte Carlo estimates to do so.
underlying_owned_value = np.sum(underlying_owned * hist_price)
profit = np.sum(underlying_owned * hist_price) + cash_owned
loss = (np.mean(hist_price) - strike) * (np.mean(hist_price) > strike)
logging.info("Cash owned at maturity %.3f.", cash_owned)
logging.info("Value of underlying owned at maturity %.3f.",
underlying_owned_value)
logging.info("Profit (value held) = %.3f.", profit)
logging.info("Loss (payoff sold, 0 if price is below strike) = %.3f.", loss)
logging.info("PnL (should be close to 0) = %.3f.", profit - loss)
if __name__ == "__main__":
app.run(main)
|
# --------------- COLLECTS DATA ON EIGENFACE BY RUNNING IT ON A GIVEN IMAGE AND SAVING DATA TO A TEXT FILE -------------
# ------------------------------ SAVES THE DATA IN 3 TEXT FILES & PLOTS THE DATA --------------------------------------
# ------------------------------------ BY LAHIRU DINALANKARA - AKA SPIKE ----------------------------------------------
import os # importing the OS for path
import cv2 # importing the OpenCV library
import numpy as np # importing Numpy library
from PIL import Image # importing Image library
import matplotlib.pyplot as plt
import NameFind
face_cascade = cv2.CascadeClassifier('Haar/haarcascade_frontalcatface.xml')
path = 'dataSet' # path to the photos
img = cv2.imread('Me4.jpg')
def getImageWithID (path):
imagePaths = [os.path.join(path, f) for f in os.listdir(path)]
FaceList = []
IDs = []
for imagePath in imagePaths:
faceImage = Image.open(imagePath).convert('L') # Open image and convert to gray
print(str((faceImage.size)))
faceImage = faceImage.resize((110, 110)) # resize the image so the EIGEN recogniser can be trained
faceNP = np.array(faceImage, 'uint8') # convert the image to Numpy array
print(str((faceNP.shape)))
ID = int(os.path.split(imagePath)[-1].split('.')[1]) # Get the ID of the array
FaceList.append(faceNP) # Append the Numpy Array to the list
IDs.append(ID) # Append the ID to the IDs list
return np.array(IDs), FaceList # The IDs are converted in to a Numpy array
IDs, FaceList = getImageWithID(path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Convert the Camera to gray
faces = face_cascade.detectMultiScale(gray, 1.3, 4) # Detect the faces and store the positions
Info = open("SaveData/EIGEN_TEST_DATA.txt", "w+")
face_number = 1
for (x, y, w, h) in faces:
Face = cv2.resize((gray[y: y+h, x: x+w]), (110, 110))
Lev = 1
eigen_ID = []
eigen_conf = []
for _ in range(200):
recog = cv2.face.createEigenFaceRecognizer(Lev) # creating EIGEN FACE RECOGNISER
print('TRAINING FOR ' + str(Lev) + ' COMPONENTS')
recog .train(FaceList, IDs) # The recongniser is trained using the images
print('EIGEN FACE RECOGNISER TRAINED')
ID, conf = recog.predict(Face)
eigen_ID.append(ID)
eigen_conf.append(conf)
Info.write(str(ID) + "," + str(conf) + "\n")
print('FOR ' + str(Lev) + ' COMPONENTS ID: ' + str(ID) + ' CONFIDENT: ' + str(conf))
Lev = Lev + 1
# ---------------------------------------- 1ST PLOT -----------------------------------------------------
fig = plt.gcf()
fig.canvas.set_window_title('RESULTS FOR FACE ' + str(face_number))
plt.subplot(2, 1, 1)
plt.plot(eigen_ID)
plt.title('ID against Number of Components', fontsize=10)
plt.axis([0, Lev, 0, 25])
plt.ylabel('ID', fontsize=8)
plt.xlabel('Number of Components', fontsize=8)
p2 = plt.subplot(2, 1, 2)
plt.plot(eigen_conf, 'red')
plt.title('Confidence against Number of Components', fontsize=10)
p2.set_xlim(xmin=0)
p2.set_xlim(xmax=Lev)
plt.ylabel('Confidence', fontsize=8)
plt.xlabel('Number of Components', fontsize=8)
plt.tight_layout()
print(' SHOW RESULTS FOR FACE ' + str(face_number))
NameFind.tell_time_passed() # TIME PASSED
cv2.imshow('FACE' + str(face_number), Face)
plt.show()
face_number = face_number + 1
Info.close()
cv2.destroyAllWindows()
|
// Hello World client
// Connects REQ socket to tcp://localhost:5559
// Sends "Hello" to server, expects "World" back
#include "zhelpers.h"
int main (void)
{
void *context = zmq_ctx_new ();
// Socket to talk to server
void *requester = zmq_socket (context, ZMQ_REQ);
zmq_connect (requester, "tcp://localhost:5559");
int request_nbr;
for (request_nbr = 0; request_nbr != 10; request_nbr++) {
printf ("send hello\n");
s_send (requester, "Hello");
char *string = s_recv (requester);
printf ("Received reply %d [%s]\n", request_nbr, string);
free (string);
}
zmq_close (requester);
zmq_ctx_destroy (context);
return 0;
}
|
from utils import *
def merge_cmd(args):
if len(args.graphs)<2:
logging.fatal("Specify multiple gfa files to merge them.")
return
G=nx.DiGraph()
for graph in args.graphs:
logging.info("Adding %s ..." %graph)
read_gfa(graph,None,"",G,remap=True)
if args.outprefix!=None:
write_gfa(G,"",outputfile=args.outprefix+".gfa")
else:
write_gfa(G,"",outputfile="_".join([os.path.basename(f)[:os.path.basename(f).rfind('.')] for f in args.graphs])+".gfa")
|
import React from 'react'
import GraphiQL from 'graphiql'
import { buildClientSchema, introspectionQuery, isType, GraphQLObjectType } from 'graphql'
const { POSTGRAPHQL_CONFIG } = window
/**
* The standard GraphiQL interface wrapped with some PostGraphQL extensions.
* Including a JWT setter and live schema udpate capabilities.
*/
class PostGraphiQL extends React.Component {
state = {
// Our GraphQL schema which GraphiQL will use to do its intelligence
// stuffs.
schema: null,
}
componentDidMount() {
// Update the schema for the first time. Log an error if we fail.
this.updateSchema()
.catch(error => console.error(error)) // tslint:disable-line no-console
// If we were given a `streamUrl`, we want to construct an `EventSource`
// and add listeners.
if (POSTGRAPHQL_CONFIG.streamUrl) {
// Starts listening to the event stream at the `sourceUrl`.
const eventSource = new EventSource(POSTGRAPHQL_CONFIG.streamUrl)
// When we get a change notification, we want to update our schema.
eventSource.addEventListener('changed', () => {
this.updateSchema()
.then(() => console.log('PostGraphQL: Schema updated')) // tslint:disable-line no-console
.catch(error => console.error(error)) // tslint:disable-line no-console
}, false)
// Add event listeners that just log things in the console.
eventSource.addEventListener('open', () => console.log('PostGraphQL: Listening for server sent events'), false) // tslint:disable-line no-console
eventSource.addEventListener('error', () => console.log('PostGraphQL: Failed to connect to server'), false) // tslint:disable-line no-console
// Store our event source so we can unsubscribe later.
this._eventSource = eventSource
}
}
componentWillUnmount() {
// Close out our event source so we get no more events.
this._eventSource.close()
this._eventSource = null
}
/**
* Executes a GraphQL query with some extra information then the standard
* parameters. Namely a JWT which may be added as an `Authorization` header.
*/
async executeQuery (graphQLParams, { jwtToken } = {}) {
const response = await fetch(POSTGRAPHQL_CONFIG.graphqlUrl, {
method: 'POST',
headers: Object.assign({
'Accept': 'application/json',
'Content-Type': 'application/json',
}, jwtToken ? {
'Authorization': `Bearer ${jwtToken}`,
} : {}),
credentials: 'same-origin',
body: JSON.stringify(graphQLParams),
})
const result = await response.json()
return result
}
/**
* When we recieve an event signaling a change for the schema, we must rerun
* our introspection query and notify the user of the results.
*/
// TODO: Send the introspection query results in the server sent event?
async updateSchema () {
// Don’t allow users to see a schema while we update.
this.setState({ schema: undefined })
// Fetch the schema using our introspection query and report once that has
// finished.
const { data } = await this.executeQuery({ query: introspectionQuery })
// Use the data we got back from GraphQL to build a client schema (a
// schema without resolvers).
const schema = buildClientSchema(data)
// Update our component with the new schema.
this.setState({ schema })
// Do some hacky stuff to GraphiQL.
this._updateGraphiQLDocExplorerNavStack(schema)
}
/**
* Updates the GraphiQL documentation explorer’s navigation stack. This
* depends on private API. By default the GraphiQL navigation stack uses
* objects from a GraphQL schema. Therefore if the schema is updated, the
* old objects will still be in the navigation stack. This is bad for us
* because we want to reflect the new schema information! So, we manually
* update the navigation stack with this function.
*
* I’m sorry Lee Byron.
*/
// TODO: Submit a PR which adds this as a non-hack.
_updateGraphiQLDocExplorerNavStack (nextSchema) {
// Get the documentation explorer component from GraphiQL. Unfortunately
// for them this looks like public API. Muwahahahaha.
const { docExplorerComponent } = this.graphiql
const { navStack } = docExplorerComponent.state
// If one type/field isn’t find this will be set to false and the
// `navStack` will just reset itself.
let allOk = true
// Ok, so if you look at GraphiQL source code, the `navStack` is made up of
// objects that are either types or fields. Let’s use that to search in
// our new schema for matching (updated) types and fields.
const nextNavStack = navStack.map((navStackItem, i) => {
// If we are not ok, abort!
if (!allOk)
return null
// Get the definition from the nav stack item.
const typeOrField = navStackItem.def
// If there is no type or field then this is likely the root schema view,
// or a search. If this is the case then just return that nav stack item!
if (!typeOrField) {
return navStackItem
}
// If this is a type, let’s do some shenanigans...
else if (isType(typeOrField)) {
// Let’s see if we can get a type with the same name.
const nextType = nextSchema.getType(typeOrField.name)
// If there is no type with this name (it was removed), we are not ok
// so set `allOk` to false and return undefined.
if (!nextType) {
allOk = false
return null
}
// If there is a type with the same name, let’s return it! This is the
// new type with all our new information.
return { ...navStackItem, def: nextType }
}
// If you thought this function was already pretty bad, it’s about to get
// worse. We want to update the information for an object field.
else {
// Ok, so since this is an object field, we will assume that the last
// element in our stack was an object type.
const nextLastType = nextSchema.getType(navStack[i - 1] ? navStack[i - 1].name : null)
// If there is no type for the last type in the nav stack’s name.
// Panic!
if (!nextLastType) {
allOk = false
return null
}
// If the last type is not an object type. Panic!
if (!(nextLastType instanceof GraphQLObjectType)) {
allOk = false
return null
}
// Next we will see if the new field exists in the last object type.
const nextField = nextLastType.getFields()[typeOrField.name]
// If not, Panic!
if (!nextField) {
allOk = false
return null
}
// Otherwise we hope very much that it is correct.
return { ...navStackItem, def: nextField }
}
})
// This is very hacky but works. React is cool.
this.graphiql.docExplorerComponent.setState({
// If we are not ok, just reset the `navStack` with an empty array.
// Otherwise use our new stack.
navStack: allOk ? nextNavStack : [],
})
}
render () {
const { schema } = this.state
return (
<GraphiQL
ref={ref => (this.graphiql = ref)}
schema={schema}
fetcher={params => this.executeQuery(params)}
/>
)
}
}
export default PostGraphiQL
|
(function () {
'use strict';
angular
.module('core')
.service('utility', utility);
function utility($q, $timeout) {
/**
* sets an index
* @param obj the object from which we want to set the index
* @param is string representation of the index
* @param value
* @returns {*}
*/
function setIndex(obj, is, value) {
if (typeof is == 'string') {
return setIndex(obj, is.split('.'), value);
}
else if (is.length == 1 && angular.isDefined(value)) {
return obj[is[0]] = value;
}
else if (is.length == 0) {
return obj;
}
else {
if (!obj[is[0]]) {
obj[is[0]] = {};
}
return setIndex(obj[is[0]], is.slice(1), value);
}
}
/**
* gets an index
* @param obj the object from which we want to retrieve the index value
* @param is string representation of the index
* @param value
* @returns {*}
*/
function getIndex(obj, is, value) {
//handle array
if(angular.isObject(obj) && angular.isArray(is) && !angular.isUndefined(obj[is[0]]) && angular.isArray(obj[is[0]])) {
var result = '';
var isNow = angular.copy(is);
isNow.splice(0,1);
var str = isNow.join('.');
angular.forEach(obj[is[0]], function(each) {
result = getIndex(each, str) + ', ' + result;
});
return result;
}
if (typeof is == 'string') {
return getIndex(obj, is.split('.'), value);
}
else if (is.length == 1 && angular.isDefined(value)) {
return obj[is[0]] = value;
}
else if (is.length == 0) {
return obj;
}
else if (!obj) {
return null;
}
else {
return getIndex(obj[is[0]], is.slice(1), value);
}
}
/**
* debounce a function
* @param func
* @param wait
* @param immediate
* @returns {Function}
*/
function debounce(func, wait, immediate) {
var timeout;
var deferred = $q.defer();
return function() {
var context = this, args = arguments;
var later = function() {
timeout = null;
if(!immediate) {
deferred.resolve(func.apply(context, args));
deferred = $q.defer();
}
};
var callNow = immediate && !timeout;
if ( timeout ) {
$timeout.cancel(timeout);
}
timeout = $timeout(later, wait);
if (callNow) {
deferred.resolve(func.apply(context,args));
deferred = $q.defer();
}
return deferred.promise;
};
}
return {
setIndex: setIndex,
getIndex: getIndex,
debounce: debounce
};
}
})();
|
// copied from https://github.com/xpl/crx-hotreload and modified for this project.
const filesInDirectory = dir =>
new Promise(resolve =>
dir.createReader().readEntries(entries =>
Promise.all(
entries
.filter(e => e.name[0] !== ".")
.map(
e =>
e.isDirectory
? filesInDirectory(e)
: new Promise(resolve => e.file(resolve))
)
)
.then(files => [].concat(...files))
.then(resolve)
)
);
const timestampForFilesInDirectory = dir =>
filesInDirectory(dir).then(files =>
files.map(f => f.name + f.lastModifiedDate).join()
);
const watchChanges = (dir, lastTimestamp) => {
timestampForFilesInDirectory(dir).then(timestamp => {
if (!lastTimestamp || lastTimestamp === timestamp) {
setTimeout(() => watchChanges(dir, timestamp), 1000); // retry after 1s
} else {
chrome.runtime.reload();
}
});
};
// NOTICE: getPackageDirectoryEntry is only supported in chrome
chrome.runtime.getPackageDirectoryEntry(dir => watchChanges(dir));
|
x,y=map(int,input().split());s=[];f=True
for _ in range(x):
s.append(input())
for i in range(x):
z=input()
for j in range(y):
if z[2*j]!=z[2*j+1] or z[2*j]!=s[i][j]:f=False
if f: print("Eyfa")
else: print("Not Eyfa")
|
import pandas as pd
import numpy as np
from keras.preprocessing.text import Tokenizer, text_to_word_sequence
from keras import regularizers
from keras.callbacks import ModelCheckpoint
from keras.layers import Embedding, Input, Dense, LSTM, Bidirectional, TimeDistributed, Dropout
from keras.models import Model
import matplotlib.pyplot as plt
from nltk import tokenize
from attention_with_context import AttentionWithContext
import re
import time
from pyspark import SparkContext, SparkConf
from elephas.spark_model import SparkModel
class HAN(object):
"""
HAN model is implemented here.
"""
def __init__(self, text, labels, pretrained_embedded_vector_path, max_features, max_senten_len, max_senten_num, embedding_size, num_categories=None, validation_split=0.2, verbose=0):
"""Initialize the HAN module
Keyword arguments:
text -- list of the articles for training.
labels -- labels corresponding the given `text`.
pretrained_embedded_vector_path -- path of any pretrained vector
max_features -- max features embeddeding matrix can have. To more checkout https://keras.io/layers/embeddings/
max_senten_len -- maximum sentence length. It is recommended not to use the maximum one but the one that covers 0.95 quatile of the data.
max_senten_num -- maximum number of sentences. It is recommended not to use the maximum one but the one that covers 0.95 quatile of the data.
embedding_size -- size of the embedding vector
num_categories -- total number of categories.
validation_split -- train-test split.
verbose -- how much you want to see.
"""
try:
self.verbose = verbose
self.max_features = max_features
self.max_senten_len = max_senten_len
self.max_senten_num = max_senten_num
self.embed_size = embedding_size
self.validation_split = validation_split
self.embedded_dir = pretrained_embedded_vector_path
self.text = pd.Series(text)
self.categories = pd.Series(labels)
self.classes = self.categories.unique().tolist()
# Initialize default hyperparameters
# You can change it using `set_hyperparameters` function
self.hyperparameters = {
'l2_regulizer': None,
'dropout_regulizer' : None,
'rnn' : LSTM,
'rnn_units' : 150,
'dense_units': 200,
'activation' : 'softmax',
'optimizer' : 'adam',
'metrics' : ['acc'],
'loss': 'categorical_crossentropy'
}
if num_categories is not None:
assert (num_categories == len(self.classes))
assert (self.text.shape[0] == self.categories.shape[0])
self.data, self.labels = self.preprocessing()
self.x_train, self.y_train, self.x_val, self.y_val = self.split_dataset()
self.embedding_index = self.add_glove_model()
self.set_model()
except AssertionError:
print('Input and label data must be of same size')
# Implement this after you have seen all the different kinds of errors
# try:
# conf = SparkConf().setAppName('HANMusicClassifier').setMaster('')
# self.sc = SparkContext(conf=conf)
# except Error:
conf = SparkConf().setAppName('HANMusicClassifier')
self.sc = SparkContext(conf=conf)
def set_hyperparameters(self, tweaked_instances):
"""Set hyperparameters of HAN model.
Keywords arguemnts:
tweaked_instances -- dictionary of all those keys you want to change
"""
for key, value in tweaked_instances.items():
if key in self.hyperparameters:
self.hyperparameters[key] = value
else:
raise KeyError(key + ' does not exist in hyperparameters')
self.set_model()
def show_hyperparameters(self):
"""To check the values of all the current hyperparameters
"""
print('Hyperparameter\tCorresponding Value')
for key, value in self.hyperparameters.items():
print(key, '\t\t', value)
def clean_string(self, string):
"""
Tokenization/string cleaning for dataset
Every dataset is lower cased except
"""
string = re.sub(r"\\", "", string)
string = re.sub(r"\'", "", string)
string = re.sub(r"\"", "", string)
return string.strip().lower()
def add_dataset(self, text, labels):
try:
self.text = pd.concat([self.text, pd.Series(text)])
self.categories = pd.concat([self.categories, pd.Series(labels)])
assert (len(self.classes) == self.categories.unique().tolist())
except AssertionError:
print("New class cannot be added in this manner")
def preprocessing(self):
"""Preprocessing of the text to make it more resonant for training
"""
paras = []
labels = []
texts = []
for idx in range(self.text.shape[0]):
text = self.clean_string(self.text[idx])
texts.append(text)
sentences = tokenize.sent_tokenize(text)
paras.append(sentences)
tokenizer = Tokenizer(num_words=self.max_features, oov_token=True)
tokenizer.fit_on_texts(texts)
data = np.zeros((len(texts), self.max_senten_num,
self.max_senten_len), dtype='int32')
for i, sentences in enumerate(paras):
for j, sent in enumerate(sentences):
if j < self.max_senten_num:
wordTokens = text_to_word_sequence(sent)
k = 0
for _, word in enumerate(wordTokens):
if k < self.max_senten_len and word in tokenizer.word_index and tokenizer.word_index[word] < self.max_features:
data[i, j, k] = tokenizer.word_index[word]
k = k+1
self.word_index = tokenizer.word_index
if self.verbose == 1:
print('Total %s unique tokens.' % len(self.word_index))
labels = pd.get_dummies(self.categories)
if self.verbose == 1:
print('Shape of data tensor:', data.shape)
print('Shape of labels tensor:', labels.shape)
assert (len(self.classes) == labels.shape[1])
assert (data.shape[0] == labels.shape[0])
return data, labels
def split_dataset(self):
indices = np.arange(self.data.shape[0])
np.random.shuffle(indices)
self.data = self.data[indices]
self.labels = self.labels.iloc[indices]
nb_validation_samples = int(self.validation_split * self.data.shape[0])
x_train = self.data[:-nb_validation_samples]
y_train = self.labels[:-nb_validation_samples]
x_val = self.data[-nb_validation_samples:]
y_val = self.labels[-nb_validation_samples:]
if self.verbose == 1:
print('Number of positive and negative reviews in traing and validation set')
print(y_train.columns.tolist())
print(y_train.sum(axis=0).tolist())
print(y_val.sum(axis=0).tolist())
return x_train, y_train, x_val, y_val
def get_model(self):
"""
Returns the HAN model so that it can be used as a part of pipeline
"""
return self.model
def add_glove_model(self):
"""
Read and save Pretrained Embedding model
"""
embeddings_index = {}
try:
f = open(self.embedded_dir)
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
assert (coefs.shape[0] == self.embed_size)
embeddings_index[word] = coefs
f.close()
except OSError:
print('Embedded file does not found')
exit()
except AssertionError:
print("Embedding vector size does not match with given embedded size")
return embeddings_index
def get_embedding_matrix(self):
"""
Returns Embedding matrix
"""
embedding_matrix = np.random.random((len(self.word_index) + 1, self.embed_size))
absent_words = 0
for word, i in self.word_index.items():
embedding_vector = self.embedding_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
else:
absent_words += 1
if self.verbose == 1:
print('Total absent words are', absent_words, 'which is', "%0.2f" %
(absent_words * 100 / len(self.word_index)), '% of total words')
return embedding_matrix
def get_embedding_layer(self):
"""
Returns Embedding layer
"""
embedding_matrix = self.get_embedding_matrix()
return Embedding(len(self.word_index) + 1, self.embed_size, weights=[embedding_matrix], input_length=self.max_senten_len, trainable=False)
def set_model(self):
"""
Set the HAN model according to the given hyperparameters
"""
if self.hyperparameters['l2_regulizer'] is None:
kernel_regularizer = None
else:
kernel_regularizer = regularizers.l2(self.hyperparameters['l2_regulizer'])
if self.hyperparameters['dropout_regulizer'] is None:
dropout_regularizer = 1
else:
dropout_regularizer = self.hyperparameters['dropout_regulizer']
word_input = Input(shape=(self.max_senten_len,), dtype='float32')
word_sequences = self.get_embedding_layer()(word_input)
word_lstm = Bidirectional(
self.hyperparameters['rnn'](self.hyperparameters['rnn_units'], return_sequences=True, kernel_regularizer=kernel_regularizer))(word_sequences)
word_dense = TimeDistributed(
Dense(self.hyperparameters['dense_units'], kernel_regularizer=kernel_regularizer))(word_lstm)
word_att = AttentionWithContext()(word_dense)
wordEncoder = Model(word_input, word_att)
sent_input = Input(shape=(self.max_senten_num, self.max_senten_len), dtype='float32')
sent_encoder = TimeDistributed(wordEncoder)(sent_input)
sent_lstm = Bidirectional(self.hyperparameters['rnn'](
self.hyperparameters['rnn_units'], return_sequences=True, kernel_regularizer=kernel_regularizer))(sent_encoder)
sent_dense = TimeDistributed(
Dense(self.hyperparameters['dense_units'], kernel_regularizer=kernel_regularizer))(sent_lstm)
sent_att = Dropout(dropout_regularizer)(
AttentionWithContext()(sent_dense))
preds = Dense(len(self.classes))(sent_att)
self.model = Model(sent_input, preds)
self.model.compile(
loss=self.hyperparameters['loss'], optimizer=self.hyperparameters['optimizer'], metrics=self.hyperparameters['metrics'])
self.spark_model = SparkModel(self.model, frequency='epoch', mode='asynchronous')
# Currently cannot plot learning curve
def train_model(self, rdd, epochs, batch_size, verbose=1, validation_split=0.1):
"""Training the model
rdd -- The actual data
epochs -- Total number of epochs
batch_size -- size of a batch
verbose -- Whether or not we want verbose feedback
validation_split -- What percentage of the data from the rdd is actually used as a validation set
"""
self.spark_model.fit(self, rdd, epochs=epochs, batch_size=batch_size, verbose=verbose, validation_split=validation_split)
def predict(self, rdd):
self.spark_model.predict(rdd)
def plot_results(self):
"""
Plotting learning curve of last trained model.
"""
# summarize history for accuracy
plt.subplot(211)
plt.plot(self.history.history['acc'])
plt.plot(self.history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
# summarize history for loss
plt.subplot(212)
plt.plot(self.history.history['val_loss'])
plt.plot(self.history.history['loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
time.sleep(10)
plt.close()
|
"""
# lex-re
Converter e associar expressões regulares da teoria de compiladores com
expressões regulares escritas em linguagem de programação
Criar e manipular expressões regulares em Python ou outra linguagem de programação.
Vamos testar esta habilidade traduzindo as regras para símbolos terminais de
Rust em expressões regulares de Python. Esta habilidade verifica os tipos
numéricos em
* Inteiros: https://doc.rust-lang.org/reference/tokens.html#integer-literals
* Floats: https://doc.rust-lang.org/reference/tokens.html#floating-point-literals
* Comentários no formato C, tanto no estilo // até o fim da linha
quanto no estilo /* bloco */. O Rust possui regras mais sofisticadas, mas vamos
ignorá-las na atividade.
* Identificadores: https://doc.rust-lang.org/reference/identifiers.html
(mas a última é trivial, porque a referência já fornece a expressão regular).
Quem optar por implementar as regras de string e raw string ganha também a
habilidade opcional re-adv*, mas isto é testado pelo arquivo re_adv_V1. Se não
estiver interessado(a) nesta competência, implemente strings como sequências de
letras e espaços entre aspas.
"""
import pytest
import lark
@pytest.mark.parametrize("grp", "ID INT BIN_INT OCT_INT HEX_INT FLOAT".split())
def test_exemplos_positivos(grp, mod, data):
for ex in sorted(data(grp), key=len):
typ = None
if grp.endswith("INT"):
typ = int
if grp.endswith("FLOAT"):
typ = float
check_valid_token(ex, mod, grp, typ=typ)
def test_comentários(mod, data):
grp = "COMMENT"
for ex in sorted(data(grp), key=len):
print(f"Testando: {ex!r} ({grp})")
seq = mod.lex_list(ex)
if seq:
raise AssertionError(f"erro: esperava comentário, obteve sequência {seq}")
@pytest.mark.parametrize("grp", "ID INT BIN_INT OCT_INT HEX_INT FLOAT COMMENT".split())
def test_exemplos_negativos(grp, mod, data):
for ex in sorted(data(grp + "_bad"), key=len):
print(f"Testando: {ex!r} ({grp})")
try:
seq = mod.lex_list(ex)
except lark.LarkError:
continue
if grp == "COMMENT" and not seq:
raise AssertionError(f"aceitou elemento: {ex}")
elif len(seq) == 1 and seq[0].type == grp and seq[0] == ex:
raise AssertionError(f"aceitou elemento: {seq}")
def check_valid_token(ex, mod, grp, typ=None):
print(f"Testando: {ex!r} ({grp})")
seq = mod.lex_list(ex)
try:
[tk] = seq
except ValueError:
raise AssertionError(f"erro: esperava token único, obteve sequência {seq}")
if typ is not None:
val = mod.transform(tk)
assert isinstance(
val, typ
), f"tipo errado {tk} ({tk.type}): esperava {typ}, obteve {type(val)}"
return seq
|
import nextConnect from "next-connect";
import middleware from "../../../../../lib/db";
const handler = nextConnect();
handler.use(middleware);
handler.get(async (req, res) => {
const id = parseInt(req.query.id);
const updateView = req.query.updateview ? (req.query.updateview === "false" ? false : true) : true;
// prettier-ignore
let post = await req.db
.collection("post")
.find({ id: id })
.project({ upvoter: 0, downvoter: 0, _id: 0 })
.toArray();
if (post.length == 0) {
res.status(404).json({
message: "Post not found",
});
} else {
if (updateView) await req.db.collection("post").updateOne({ id: id }, { $inc: { views: 1 } });
res.status(200).json(post);
}
});
export default handler;
|
/*
* Copyright 2017-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#ifndef ALEXA_CLIENT_SDK_AVSCOMMON_SDKINTERFACES_INCLUDE_AVSCOMMON_SDKINTERFACES_AUDIO_AUDIOFACTORYINTERFACE_H_
#define ALEXA_CLIENT_SDK_AVSCOMMON_SDKINTERFACES_INCLUDE_AVSCOMMON_SDKINTERFACES_AUDIO_AUDIOFACTORYINTERFACE_H_
#include <memory>
#include "AVSCommon/SDKInterfaces/Audio/AlertsAudioFactoryInterface.h"
#include "AVSCommon/SDKInterfaces/Audio/CommunicationsAudioFactoryInterface.h"
#include "AVSCommon/SDKInterfaces/Audio/NotificationsAudioFactoryInterface.h"
#include "AVSCommon/SDKInterfaces/Audio/SystemSoundAudioFactoryInterface.h"
namespace alexaClientSDK {
namespace avsCommon {
namespace sdkInterfaces {
namespace audio {
/**
* This is the interface that distributes interfaces for various audio stream providers.
*/
class AudioFactoryInterface {
public:
virtual ~AudioFactoryInterface() = default;
/**
* This shares a factory that produces audio streams for the alerts components.
*/
virtual std::shared_ptr<AlertsAudioFactoryInterface> alerts() const = 0;
/**
* This shares a factory that produces audio streams for the notifications components.
*/
virtual std::shared_ptr<NotificationsAudioFactoryInterface> notifications() const = 0;
/**
* This shares a factory that produces audio streams for the communications components.
*/
virtual std::shared_ptr<CommunicationsAudioFactoryInterface> communications() const = 0;
/**
* This shares a factory that produces audio streams for the system sound components.
*/
virtual std::shared_ptr<SystemSoundAudioFactoryInterface> systemSounds() const = 0;
};
} // namespace audio
} // namespace sdkInterfaces
} // namespace avsCommon
} // namespace alexaClientSDK
#endif // ALEXA_CLIENT_SDK_AVSCOMMON_SDKINTERFACES_INCLUDE_AVSCOMMON_SDKINTERFACES_AUDIO_AUDIOFACTORYINTERFACE_H_
|
from argparse import ArgumentParser
from sys import stdin
from mlmorph import Generator, Analyser, check_foreign_word
def main():
"""Invoke a simple CLI analyser or generator."""
a = ArgumentParser()
a.add_argument('-i', '--input', metavar="INFILE", type=open,
dest="infile", help="source of analysis data")
a.add_argument('-a', '--analyse', action='store_true',
help="Analyse the input file strings")
a.add_argument('-g', '--generate', action='store_true',
help="Generate the input file strings")
a.add_argument('-f', '--foreign', action='store_true',
help="Check if the word is foreign word or not")
a.add_argument('-v', '--verbose', action='store_true',
help="print verbosely while processing")
options = a.parse_args()
if not options.infile:
options.infile = stdin
if options.verbose:
print("reading from", options.infile.name)
analyser = Analyser()
generator = Generator()
for line in options.infile:
line = line.strip()
if not line or line == '':
continue
if options.analyse:
anals = analyser.analyse(line, True)
if not anals:
print(line, "\t?")
for anal in anals:
print(line, "\t", anal[0], "\t", anal[1])
if options.generate:
gens = generator.generate(line, True)
if not gens:
print(line, "\t?")
for gen in gens:
print(line, "\t", gen[0], "\t", gen[1])
if options.foreign:
is_foreign = check_foreign_word(line)
print(line, "\t", is_foreign)
print()
exit(0)
if __name__ == "__main__":
main()
|
import React from "react"
import cx from "classnames"
import { reduxForm } from "redux-form"
import { connect } from "react-redux"
import { ReduxFormCheckboxField } from "../../../forms/checkbox-field"
import { ReduxFormSelectField } from "../../../forms/select-field"
import { ReduxFormInputField } from "../../../forms/input-field"
import { validate } from "../resources-form-approved-validation"
import { getProjectDetail } from "../../projects-selectors"
import config from "../../../../config"
const goBack = () => window.history.back()
let ApprovedResourceRequestFormImpl = ({
handleSubmit,
onFormSubmit,
usersEmails
}) => (
<div className="row">
<div className="col-9">
<div className="card">
<div className="card-header"></div>
<div className="card-body">
<form
onSubmit={handleSubmit(onFormSubmit)}
className={cx({
"resource-approved-form": true,
})}>
<ReduxFormInputField
name="PrimaryInvestigator"
type="email"
label="Primary Investigator Email"
cy="resource-approved-form-pi"
placeholder="Please enter an email of primary investigator..."
/>
<ReduxFormSelectField
name="ProjectContactEmail"
label="Project Contact Person (her/his email)"
displayNames={usersEmails}
values={usersEmails}
cy="resource-approved-form-contact"
placeholder="Please select user as a main contact person for this resource request..."
/>
<ReduxFormSelectField
name="HPCProvider"
label="HPC Center"
displayNames={config.hpcProviders}
values={config.hpcProviders}
cy="resource-approved-form-hpcprovider"
placeholder="Please select a HPC providing center..."
/>
<ReduxFormInputField
name="AssociatedHPCProject"
type="text"
label="Existing HPC Project"
cy="resource-approved-form-hpcproject"
placeholder="Please enter an ID of existing HPC project which the request will be associated with..."
/>
<ReduxFormCheckboxField
name="TermsConsent"
label="I declare that the information provided by me is correct, that I have read the contents of the Contract on the use of high performance cluster and agree to its terms."
required={true}
cy="resource-approved-form-termsconsent"
/>
<button
className="btn btn-success btn-simple text-nowrap mr-1"
type="submit"
cy="resource-approved-form-btn-submit">
<span className="white d-inline-flex mx-1">
<i className="tim-icons icon-simple-add"></i>
</span>{" "}
Request
</button>
<button
type="button"
className="btn btn-info btn-simple text-nowrap ml-1"
onClick={goBack}>
<span className="white d-inline-flex mx-1">
<i className="tim-icons icon-simple-remove"></i>
</span>{" "}
Cancel
</button>
</form>
</div>
</div>
</div>
</div>
)
const mapStateToProps = state => ({
project: getProjectDetail(state),
})
ApprovedResourceRequestFormImpl = connect(
mapStateToProps,
null
)(ApprovedResourceRequestFormImpl)
export const ApprovedResourceRequestForm = reduxForm({ validate: validate })(
ApprovedResourceRequestFormImpl
)
|
'use strict'
var fs = require('graceful-fs')
var path = require('path')
var mkdirp = require('mkdirp')
var requireInject = require('require-inject')
var test = require('tap').test
var common = require('../common-tap.js')
var pkg = common.pkg
var json = {
name: 'gist-short-shortcut',
version: '0.0.0'
}
test('setup', function (t) {
setup()
t.end()
})
test('gist-shortcut', function (t) {
var cloneUrls = [
['git://gist.github.com/deadbeef.git', 'GitHub gist shortcuts try git URLs first'],
['https://gist.github.com/deadbeef.git', 'GitHub gist shortcuts try HTTPS URLs second'],
['ssh://git@gist.github.com/deadbeef.git', 'GitHub gist shortcuts try SSH third']
]
var npm = requireInject.installGlobally('../../lib/npm.js', {
'child_process': {
'execFile': function (cmd, args, options, cb) {
process.nextTick(function () {
if (args.indexOf('clone') === -1) return cb(null, '', '')
var cloneUrl = cloneUrls.shift()
if (cloneUrl) {
t.is(args[args.length - 2], cloneUrl[0], cloneUrl[1])
} else {
t.fail('too many attempts to clone')
}
cb(new Error('execFile mock fails on purpose'))
})
}
}
})
var opts = {
cache: common.cache,
prefix: pkg,
registry: common.registry,
loglevel: 'silent'
}
npm.load(opts, function (er) {
t.ifError(er, 'npm loaded without error')
npm.commands.install(['gist:deadbeef'], function (er, result) {
t.ok(er, 'mocked install failed as expected')
t.end()
})
})
})
function setup () {
mkdirp.sync(pkg)
fs.writeFileSync(
path.join(pkg, 'package.json'),
JSON.stringify(json, null, 2)
)
process.chdir(pkg)
}
|
const server = require("../../server");
const { get, post } = server.router;
const { render } = server.reply;
server(
get("/", (ctx) => render("index.hbs")),
get("/:id", (ctx) => render("page.hbs", { page: ctx.params.id }))
);
|
''' URLS worker: Tries to extract URL from strings output '''
import re
import pprint
class URLS(object):
''' This worker looks for url patterns in strings output '''
dependencies = ['strings']
def __init__(self):
''' Initialize the URL worker '''
self.url_match = re.compile(r'http[s]?://[^\s<>"]+|www\.[^\s<>"]+', re.MULTILINE)
def execute(self, input_data):
''' Execute the URL worker '''
string_output = input_data['strings']['string_list']
flatten = ' '.join(string_output)
urls = self.url_match.findall(flatten)
return {'url_list': urls}
# Unit test: Create the class, the proper input and run the execute() method for a test
def test():
''' url.py: Unit test'''
# This worker test requires a local server running
import zerorpc
workbench = zerorpc.Client(timeout=300, heartbeat=60)
workbench.connect("tcp://127.0.0.1:4242")
# Generate input for the worker
import os
data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'../data/pe/bad/505804ec7c7212a52ec85e075b91ed84')
md5 = workbench.store_sample(open(data_path, 'rb').read(), 'bad_pe', 'exe')
input_data = workbench.work_request('strings', md5)
# Execute the worker (unit test)
worker = URLS()
output = worker.execute(input_data)
print '\n<<< Unit Test >>>'
pprint.pprint(output)
# Execute the worker (server test)
output = workbench.work_request('url', md5)
print '\n<<< Server Test >>>'
pprint.pprint(output)
if __name__ == "__main__":
test()
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 23 13:58:21 2013
@author: edouard.duchesnay@cea.fr
@author: benoit.da_mota@inria.fr
"""
from epac.workflow.pipeline import Pipe
from epac.workflow.splitters import Perms, Methods, CV
from epac.workflow.splitters import CVBestSearchRefit
from epac.workflow.splitters import CVBestSearchRefitParallel
from epac.workflow.splitters import ColumnSplitter, RowSplitter, CRSplitter
from epac.workflow.base import BaseNode, key_pop, key_split
from epac.configuration import conf, debug
from epac.map_reduce.results import ResultSet, Result
from epac.utils import train_test_merge, train_test_split, dict_diff
from epac.utils import range_log2, export_csv, export_resultset_csv, \
export_leaves_csv
from epac.stores import StoreFs, StoreMem
from epac.map_reduce.mappers import MapperSubtrees
from epac.map_reduce.engine import SomaWorkflowEngine, LocalEngine
from epac.map_reduce.reducers import ClassificationReport, PvalPerms
__version__ = '0.10-git'
from .sklearn_plugins import *
__all__ = ['BaseNode',
'Pipe',
'Perms',
'Methods',
'CV',
'CVBestSearchRefit',
'CVBestSearchRefitParallel',
'ColumnSplitter', 'RowSplitter', 'CRSplitter',
'ClassificationReport', 'PvalPerms',
'Result',
'ResultSet',
'sklearn_plugins',
'conf',
'debug',
'train_test_split',
'train_test_merge',
'key_pop',
'key_split',
'dict_diff',
'export_csv',
'export_resultset_csv',
'export_leaves_csv',
'StoreFs',
'StoreMem',
'range_log2',
'MapperSubtrees',
'SomaWorkflowEngine',
'LocalEngine'
]
|
const Discord = require("discord.js")
module.exports = {
name: 'help',
aliases: ["h"],
run: (bot, message, args) => {
const LogChannel = message.client.channels.cache.get("700438892888719501");
message.delete()
.catch(err => {
console.log(err)
});
const embed = new Discord.MessageEmbed()
.setTitle('**Commands Available for KFC Bucket Boi**')
.setColor(0x36cbf5)
.addField('__Current Server__', message.guild.name)
.setThumbnail(message.guild.iconURL({ dynamic: true }))
.setAuthor('Creator: King Of Karma#0069', 'https://karmakingdom.weebly.com/uploads/1/3/1/7/131732357/published/pfp.png?1587066957')
.setDescription(`**Home Server:** https://discord.gg/nQRC3SRUse \n **Invite at:** https://invite.bucketbot.dev \n the Prefix \`k!\` at the start of the command to use the command eg "k!ping"`,)
.addField('**[📚] __Info Commands__**', '```These commands give you some info```')
.addField('help', '- Brings up this menu in your DMs \n**alias: k!h**', true)
.addField('info', '- Info about the bot \n**aliases: k!botinfo, k!bot**', true)
.addField('invite', '- A link to invite me to your servers! \n**aliases: k!iv, k!ivt, k!invt**', true)
.addField('support', '- You can make me post the link to my support/home server!', true)
.addField('membercount ', '- Displays the amount of members/humans/bots in the server \n**aliases: k!members, k!countmembers**', true)
.addField('servers ', '- Displays how many servers I am in \n**alias: k!guilds**', true)
.addField('vote', '- Obtain a link to vote for me at top.gg \n**aliases: k!upvote, k!discord**')
.addField('\u200b', '\u200b')
.addField('**[🔨] __Utility Commands__**', '```These commands are used to be useful in certain situaions```')
.addField('whois', "- Displays a user's userinfo, roles, perms, etc \n**aliases k!userinfo, k!whois**", true)
.addField('ping', "- Bot's latency", true)
.addField('avatar ', "- Displays a user's display avatar (pfp) \n**alias: k!av**", true)
.addField('say', "- I will say what you want!\n**alias: k!s**", true)
.addField('embed ', "- I will say what you want but as an embed! \n**alias: k!eb**", true)
.addField('\u200b', '\u200b')
.setFooter(`Use the Prefix \`k!\` at the start of the command to use the command eg "k!ping"`)
message.author.send(embed)
.catch(err => {
message.reply(`I was unable to DM you ${message.author}.\nMake sure your dms are enabled!`)
console.log(`Error, \"Help cmd\" Failed Reason: ${err} \nMessageAuthor : ${message.author.tag}\nGuild : ${message.guild.name}`)
const ErrorEmbed = new Discord.MessageEmbed()
.setAuthor(message.author.tag, message.author.displayAvatarURL(({ dynamic: true })))
.setColor("0xFF0000")
.setDescription(`\`\`\`Error, \"Help cmd\" Failed Reason: ${err} \nMessageAuthor : ${message.author.tag}\nGuild : ${message.guild.name}\`\`\``)
.setThumbnail(message.guild.iconURL({ dynamic: true }))
.setFooter(`This bot was brought to you by King Of Karma#0069`, `https://media.discordapp.net/attachments/697238236896165921/700081276912402512/pfp.png?width=481&height=481`)
LogChannel.send(ErrorEmbed)
return
});
const embed2 = new Discord.MessageEmbed()
embed2
.setColor(0x36cbf5)
.setFooter(`This bot was brought to you by King Of Karma#0069`, `https://media.discordapp.net/attachments/697238236896165921/700081276912402512/pfp.png?width=481&height=481`)
.addField('**[🎉] __Fun Commands__**', '```These commands are here for your entertainment```')
.addField('shuffle ', "- Plays the guessing game where you choose 1, 2 or 3\n**aliases: k!shufflehat, k!sh**", true)
// .addField('mc ', "- Displays the information on the offical Karma Kraft Minecraft server!\n**alias: k!server**", true)
.addField('cool ', "- Learn how to be cool! 😎", true)
.addField('kids ', "- Displays how many kids the second argument will have (e.g. k!kids Kaine) \n**aliases: k!kid, k!children**", true)
.addField('kk ', "- Thats racist", true)
.addField('8ball', "- Get your fortune \n**aliases: k!eightball k!fortune**", true)
.addField('dadjoke', "- Yes this is a command with over 30 responses \n**alias: k!dad**", true)
.addField('comment', "- Sends your message as a Youtube comment \n **alias: k!cm**")
.addField('\u200b', '\u200b')
.addField('**[📷] __Image Commands__**', '```These are the commands that display images```')
.addField('emote', '- Will display an emote of your choice \n **aliases: k!emotes, k!emoji**')
.addField('image', '- It will send whatever you ask it to send (e.g k!image pillows) \n**aliases: k!pic, k!picture, k!img**', true)
.addField('karma', "- I'll send you a media file from **King of Karma's** personal folder (;\n**alias: k!kaine**", true)
.addField('meme', "- I'll give you a meme from a few select subreddits \n**alias: k!memes**", true)
.addField('wholesome', "- I'll give you a meme from a wholesome subreddit \n**alias: k!cute**", true)
.addField('\u200b', '\u200b')
.addField('**[👇] __Interaction Commands__**', '```These comamnds used for interacting with other users```')
.addField('hug', 'Hug a user of your choice!', true)
.addField('kiss', 'Kiss someone <a:kaineflushedeyes:708477282079211570>', true)
.addField('bucket', 'Give someone a wonderful hat! my favourite kind! \n**alias: k!hat**', true)
.addField('rep', 'Allows you to add rep to any user \n**aliases: k!addrep, k!plusrep**', true)
.addField('viewrep', 'Allows you to see how much rep a user has \n**alias: k!repview**', true)
.addField('pp', 'Find a user\'s pp size \n**alias: k!size**', true)
.addField('\u200b', '\u200b')
// .addField('**[⏳] __Weekly Commands__**', '```These commands will send something new every week \n alias: k!week```')
// .addField('weekly meme', '- This command will respond with a new meme of the week every week',true)
message.author.send(embed2);
const embed3 = new Discord.MessageEmbed();
embed3
.setColor(0x36cbf5)
.addField('**[🌠] __Staff Commnads__**', '```These commands is to be used by staff of a server```')
.addField('warn', '- Warns a member with a reason ', true)
.addField('kick', '- Kicks a mebmer with a reason ', true)
.addField('ban', '- Bans a members with a reason ', true)
.addField('modlogs', '- Grabs the logs of a user \n**alias: k!modlog**', true)
.addField('purge', '- Mass deletes a select number of msgs \n**aliases: k!prune, k!delete**', true)
.addField('toggle', '- Toggle\'s Modules within the bot.\n**alias: k!tg**', true)
.addField('removelogs', '- This removes all cases/logs of the mentioned user. \n**alias: k!rmlogs**', true)
.addField('\u200b', '\u200b')
.addField('**[👑] __XP Commnads__**', '```These commands is to be used by all members of a server```')
.addField('xp', '- Shows how much xp a user has\n**alias: k!exp**', true)
.addField('leaderboard', '- shows the current leaderboard of the server\n**aliases: k!lb and k!top**.\n **example: k!leaderboard 10**', true)
.addField('global-leaderboard', "- shows the current global leaderboard \n **aliases: k!glb and k!gtop**\n **example: k! gtop 10**", true)
// currency
.addField('**[👑] __Currency/Work Commnads__**', '```These commands is to be used by all members of a server```')
.addField('bal', '- Shows you current amount of Chicken Nuggies<:chickennuggie:706268265424355399>', true)
.addField('buy', '- Displays your or a mentioned users balance', true)
.addField('inventory', '- Displays your inventory only as of now', true)
.addField('item', '- This an admin only command to add and remove items from the servers shop. (Max items is 5 for now)')
.addField('shop', "- Displays the servers shop. And creates one if it doesnt exsist", true)
.addField('work', '- Is the base command of everything', true)
.setFooter(`Use the Prefix \`k!\` at the start of the command to use the command eg "k!ping"`)
message.author.send(embed3);
message.reply("Check Dms, make sure they're enabled so i can send you them!");
}
}
|
/* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
* Copyright (c) 2007-2015, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
* \file rendservice.c
* \brief The hidden-service side of rendezvous functionality.
**/
#define RENDSERVICE_PRIVATE
#include "or.h"
#include "circpathbias.h"
#include "circuitbuild.h"
#include "circuitlist.h"
#include "circuituse.h"
#include "config.h"
#include "control.h"
#include "directory.h"
#include "main.h"
#include "networkstatus.h"
#include "nodelist.h"
#include "rendclient.h"
#include "rendcommon.h"
#include "rendservice.h"
#include "router.h"
#include "relay.h"
#include "rephist.h"
#include "replaycache.h"
#include "routerlist.h"
#include "routerparse.h"
#include "routerset.h"
struct rend_service_t;
static origin_circuit_t *find_intro_circuit(rend_intro_point_t *intro,
const char *pk_digest);
static rend_intro_point_t *find_intro_point(origin_circuit_t *circ);
static rend_intro_point_t *find_expiring_intro_point(
struct rend_service_t *service, origin_circuit_t *circ);
static extend_info_t *find_rp_for_intro(
const rend_intro_cell_t *intro,
char **err_msg_out);
static int intro_point_accepted_intro_count(rend_intro_point_t *intro);
static int intro_point_should_expire_now(rend_intro_point_t *intro,
time_t now);
static int rend_service_derive_key_digests(struct rend_service_t *s);
static int rend_service_load_keys(struct rend_service_t *s);
static int rend_service_load_auth_keys(struct rend_service_t *s,
const char *hfname);
static struct rend_service_t *rend_service_get_by_pk_digest(
const char* digest);
static struct rend_service_t *rend_service_get_by_service_id(const char *id);
static const char *rend_service_escaped_dir(
const struct rend_service_t *s);
static ssize_t rend_service_parse_intro_for_v0_or_v1(
rend_intro_cell_t *intro,
const uint8_t *buf,
size_t plaintext_len,
char **err_msg_out);
static ssize_t rend_service_parse_intro_for_v2(
rend_intro_cell_t *intro,
const uint8_t *buf,
size_t plaintext_len,
char **err_msg_out);
static ssize_t rend_service_parse_intro_for_v3(
rend_intro_cell_t *intro,
const uint8_t *buf,
size_t plaintext_len,
char **err_msg_out);
/** Represents the mapping from a virtual port of a rendezvous service to
* a real port on some IP.
*/
struct rend_service_port_config_s {
/* The incoming HS virtual port we're mapping */
uint16_t virtual_port;
/* Is this an AF_UNIX port? */
unsigned int is_unix_addr:1;
/* The outgoing TCP port to use, if !is_unix_addr */
uint16_t real_port;
/* The outgoing IPv4 or IPv6 address to use, if !is_unix_addr */
tor_addr_t real_addr;
/* The socket path to connect to, if is_unix_addr */
char unix_addr[FLEXIBLE_ARRAY_MEMBER];
};
/** Try to maintain this many intro points per service by default. */
#define NUM_INTRO_POINTS_DEFAULT 3
/** Maximum number of intro points per service. */
#define NUM_INTRO_POINTS_MAX 10
/** Number of extra intro points we launch if our set of intro nodes is
* empty. See proposal 155, section 4. */
#define NUM_INTRO_POINTS_EXTRA 2
/** If we can't build our intro circuits, don't retry for this long. */
#define INTRO_CIRC_RETRY_PERIOD (60*5)
/** Don't try to build more than this many circuits before giving up
* for a while.*/
#define MAX_INTRO_CIRCS_PER_PERIOD 10
/** How many times will a hidden service operator attempt to connect to
* a requested rendezvous point before giving up? */
#define MAX_REND_FAILURES 1
/** How many seconds should we spend trying to connect to a requested
* rendezvous point before giving up? */
#define MAX_REND_TIMEOUT 30
/** Represents a single hidden service running at this OP. */
typedef struct rend_service_t {
/* Fields specified in config file */
char *directory; /**< where in the filesystem it stores it. Will be NULL if
* this service is ephemeral. */
int dir_group_readable; /**< if 1, allow group read
permissions on directory */
smartlist_t *ports; /**< List of rend_service_port_config_t */
rend_auth_type_t auth_type; /**< Client authorization type or 0 if no client
* authorization is performed. */
smartlist_t *clients; /**< List of rend_authorized_client_t's of
* clients that may access our service. Can be NULL
* if no client authorization is performed. */
/* Other fields */
crypto_pk_t *private_key; /**< Permanent hidden-service key. */
char service_id[REND_SERVICE_ID_LEN_BASE32+1]; /**< Onion address without
* '.onion' */
char pk_digest[DIGEST_LEN]; /**< Hash of permanent hidden-service key. */
smartlist_t *intro_nodes; /**< List of rend_intro_point_t's we have,
* or are trying to establish. */
/** List of rend_intro_point_t that are expiring. They are removed once
* the new descriptor is successfully uploaded. A node in this list CAN
* NOT appear in the intro_nodes list. */
smartlist_t *expiring_nodes;
time_t intro_period_started; /**< Start of the current period to build
* introduction points. */
int n_intro_circuits_launched; /**< Count of intro circuits we have
* established in this period. */
unsigned int n_intro_points_wanted; /**< Number of intro points this
* service wants to have open. */
rend_service_descriptor_t *desc; /**< Current hidden service descriptor. */
time_t desc_is_dirty; /**< Time at which changes to the hidden service
* descriptor content occurred, or 0 if it's
* up-to-date. */
time_t next_upload_time; /**< Scheduled next hidden service descriptor
* upload time. */
/** Replay cache for Diffie-Hellman values of INTRODUCE2 cells, to
* detect repeats. Clients may send INTRODUCE1 cells for the same
* rendezvous point through two or more different introduction points;
* when they do, this keeps us from launching multiple simultaneous attempts
* to connect to the same rend point. */
replaycache_t *accepted_intro_dh_parts;
/** If true, we don't close circuits for making requests to unsupported
* ports. */
int allow_unknown_ports;
/** The maximum number of simultanious streams-per-circuit that are allowed
* to be established, or 0 if no limit is set.
*/
int max_streams_per_circuit;
/** If true, we close circuits that exceed the max_streams_per_circuit
* limit. */
int max_streams_close_circuit;
} rend_service_t;
/** Returns a escaped string representation of the service, <b>s</b>.
*/
static const char *
rend_service_escaped_dir(const struct rend_service_t *s)
{
return (s->directory) ? escaped(s->directory) : "[EPHEMERAL]";
}
/** A list of rend_service_t's for services run on this OP.
*/
static smartlist_t *rend_service_list = NULL;
/** Return the number of rendezvous services we have configured. */
int
num_rend_services(void)
{
if (!rend_service_list)
return 0;
return smartlist_len(rend_service_list);
}
/** Helper: free storage held by a single service authorized client entry. */
static void
rend_authorized_client_free(rend_authorized_client_t *client)
{
if (!client)
return;
if (client->client_key)
crypto_pk_free(client->client_key);
memwipe(client->client_name, 0, strlen(client->client_name));
tor_free(client->client_name);
memwipe(client->descriptor_cookie, 0, sizeof(client->descriptor_cookie));
tor_free(client);
}
/** Helper for strmap_free. */
static void
rend_authorized_client_strmap_item_free(void *authorized_client)
{
rend_authorized_client_free(authorized_client);
}
/** Release the storage held by <b>service</b>.
*/
static void
rend_service_free(rend_service_t *service)
{
if (!service)
return;
tor_free(service->directory);
SMARTLIST_FOREACH(service->ports, rend_service_port_config_t*, p,
rend_service_port_config_free(p));
smartlist_free(service->ports);
if (service->private_key)
crypto_pk_free(service->private_key);
if (service->intro_nodes) {
SMARTLIST_FOREACH(service->intro_nodes, rend_intro_point_t *, intro,
rend_intro_point_free(intro););
smartlist_free(service->intro_nodes);
}
if (service->expiring_nodes) {
SMARTLIST_FOREACH(service->expiring_nodes, rend_intro_point_t *, intro,
rend_intro_point_free(intro););
smartlist_free(service->expiring_nodes);
}
rend_service_descriptor_free(service->desc);
if (service->clients) {
SMARTLIST_FOREACH(service->clients, rend_authorized_client_t *, c,
rend_authorized_client_free(c););
smartlist_free(service->clients);
}
if (service->accepted_intro_dh_parts) {
replaycache_free(service->accepted_intro_dh_parts);
}
tor_free(service);
}
/** Release all the storage held in rend_service_list.
*/
void
rend_service_free_all(void)
{
if (!rend_service_list)
return;
SMARTLIST_FOREACH(rend_service_list, rend_service_t*, ptr,
rend_service_free(ptr));
smartlist_free(rend_service_list);
rend_service_list = NULL;
}
/** Validate <b>service</b> and add it to rend_service_list if possible.
* Return 0 on success. On failure, free <b>service</b> and return -1.
*/
static int
rend_add_service(rend_service_t *service)
{
int i;
rend_service_port_config_t *p;
service->intro_nodes = smartlist_new();
service->expiring_nodes = smartlist_new();
if (service->max_streams_per_circuit < 0) {
log_warn(LD_CONFIG, "Hidden service (%s) configured with negative max "
"streams per circuit; ignoring.",
rend_service_escaped_dir(service));
rend_service_free(service);
return -1;
}
if (service->max_streams_close_circuit < 0 ||
service->max_streams_close_circuit > 1) {
log_warn(LD_CONFIG, "Hidden service (%s) configured with invalid "
"max streams handling; ignoring.",
rend_service_escaped_dir(service));
rend_service_free(service);
return -1;
}
if (service->auth_type != REND_NO_AUTH &&
smartlist_len(service->clients) == 0) {
log_warn(LD_CONFIG, "Hidden service (%s) with client authorization but no "
"clients; ignoring.",
rend_service_escaped_dir(service));
rend_service_free(service);
return -1;
}
if (!smartlist_len(service->ports)) {
log_warn(LD_CONFIG, "Hidden service (%s) with no ports configured; "
"ignoring.",
rend_service_escaped_dir(service));
rend_service_free(service);
return -1;
} else {
int dupe = 0;
/* XXX This duplicate check has two problems:
*
* a) It's O(n^2), but the same comment from the bottom of
* rend_config_services() should apply.
*
* b) We only compare directory paths as strings, so we can't
* detect two distinct paths that specify the same directory
* (which can arise from symlinks, case-insensitivity, bind
* mounts, etc.).
*
* It also can't detect that two separate Tor instances are trying
* to use the same HiddenServiceDir; for that, we would need a
* lock file. But this is enough to detect a simple mistake that
* at least one person has actually made.
*/
if (service->directory != NULL) { /* Skip dupe for ephemeral services. */
SMARTLIST_FOREACH(rend_service_list, rend_service_t*, ptr,
dupe = dupe ||
!strcmp(ptr->directory, service->directory));
if (dupe) {
log_warn(LD_REND, "Another hidden service is already configured for "
"directory %s, ignoring.",
rend_service_escaped_dir(service));
rend_service_free(service);
return -1;
}
}
smartlist_add(rend_service_list, service);
log_debug(LD_REND,"Configuring service with directory \"%s\"",
service->directory);
for (i = 0; i < smartlist_len(service->ports); ++i) {
p = smartlist_get(service->ports, i);
if (!(p->is_unix_addr)) {
log_debug(LD_REND,
"Service maps port %d to %s",
p->virtual_port,
fmt_addrport(&p->real_addr, p->real_port));
} else {
#ifdef HAVE_SYS_UN_H
log_debug(LD_REND,
"Service maps port %d to socket at \"%s\"",
p->virtual_port, p->unix_addr);
#else
log_debug(LD_REND,
"Service maps port %d to an AF_UNIX socket, but we "
"have no AF_UNIX support on this platform. This is "
"probably a bug.",
p->virtual_port);
#endif /* defined(HAVE_SYS_UN_H) */
}
}
return 0;
}
/* NOTREACHED */
}
/** Return a new rend_service_port_config_t with its path set to
* <b>socket_path</b> or empty if <b>socket_path</b> is NULL */
static rend_service_port_config_t *
rend_service_port_config_new(const char *socket_path)
{
if (!socket_path)
return tor_malloc_zero(sizeof(rend_service_port_config_t) + 1);
const size_t pathlen = strlen(socket_path) + 1;
rend_service_port_config_t *conf =
tor_malloc_zero(sizeof(rend_service_port_config_t) + pathlen);
memcpy(conf->unix_addr, socket_path, pathlen);
conf->is_unix_addr = 1;
return conf;
}
/** Parses a real-port to virtual-port mapping separated by the provided
* separator and returns a new rend_service_port_config_t, or NULL and an
* optional error string on failure.
*
* The format is: VirtualPort SEP (IP|RealPort|IP:RealPort|'socket':path)?
*
* IP defaults to 127.0.0.1; RealPort defaults to VirtualPort.
*/
rend_service_port_config_t *
rend_service_parse_port_config(const char *string, const char *sep,
char **err_msg_out)
{
smartlist_t *sl;
int virtport;
int realport = 0;
uint16_t p;
tor_addr_t addr;
const char *addrport;
rend_service_port_config_t *result = NULL;
unsigned int is_unix_addr = 0;
char *socket_path = NULL;
char *err_msg = NULL;
sl = smartlist_new();
smartlist_split_string(sl, string, sep,
SPLIT_SKIP_SPACE|SPLIT_IGNORE_BLANK, 0);
if (smartlist_len(sl) < 1 || smartlist_len(sl) > 2) {
if (err_msg_out)
err_msg = tor_strdup("Bad syntax in hidden service port configuration.");
goto err;
}
virtport = (int)tor_parse_long(smartlist_get(sl,0), 10, 1, 65535, NULL,NULL);
if (!virtport) {
if (err_msg_out)
tor_asprintf(&err_msg, "Missing or invalid port %s in hidden service "
"port configuration", escaped(smartlist_get(sl,0)));
goto err;
}
if (smartlist_len(sl) == 1) {
/* No addr:port part; use default. */
realport = virtport;
tor_addr_from_ipv4h(&addr, 0x7F000001u); /* 127.0.0.1 */
} else {
int ret;
addrport = smartlist_get(sl,1);
ret = config_parse_unix_port(addrport, &socket_path);
if (ret < 0 && ret != -ENOENT) {
if (ret == -EINVAL)
if (err_msg_out)
err_msg = tor_strdup("Empty socket path in hidden service port "
"configuration.");
goto err;
}
if (socket_path) {
is_unix_addr = 1;
} else if (strchr(addrport, ':') || strchr(addrport, '.')) {
/* else try it as an IP:port pair if it has a : or . in it */
if (tor_addr_port_lookup(addrport, &addr, &p)<0) {
if (err_msg_out)
err_msg = tor_strdup("Unparseable address in hidden service port "
"configuration.");
goto err;
}
realport = p?p:virtport;
} else {
/* No addr:port, no addr -- must be port. */
realport = (int)tor_parse_long(addrport, 10, 1, 65535, NULL, NULL);
if (!realport) {
if (err_msg_out)
tor_asprintf(&err_msg, "Unparseable or out-of-range port %s in "
"hidden service port configuration.",
escaped(addrport));
goto err;
}
tor_addr_from_ipv4h(&addr, 0x7F000001u); /* Default to 127.0.0.1 */
}
}
/* Allow room for unix_addr */
result = rend_service_port_config_new(socket_path);
result->virtual_port = virtport;
result->is_unix_addr = is_unix_addr;
if (!is_unix_addr) {
result->real_port = realport;
tor_addr_copy(&result->real_addr, &addr);
result->unix_addr[0] = '\0';
}
err:
if (err_msg_out) *err_msg_out = err_msg;
SMARTLIST_FOREACH(sl, char *, c, tor_free(c));
smartlist_free(sl);
if (socket_path) tor_free(socket_path);
return result;
}
/** Release all storage held in a rend_service_port_config_t. */
void
rend_service_port_config_free(rend_service_port_config_t *p)
{
tor_free(p);
}
/** Set up rend_service_list, based on the values of HiddenServiceDir and
* HiddenServicePort in <b>options</b>. Return 0 on success and -1 on
* failure. (If <b>validate_only</b> is set, parse, warn and return as
* normal, but don't actually change the configured services.)
*/
MOCK_IMPL(int,
rend_config_services,(const or_options_t *options, int validate_only))
{
config_line_t *line;
rend_service_t *service = NULL;
rend_service_port_config_t *portcfg;
smartlist_t *old_service_list = NULL;
int ok = 0;
if (!validate_only) {
old_service_list = rend_service_list;
rend_service_list = smartlist_new();
}
for (line = options->RendConfigLines; line; line = line->next) {
if (!strcasecmp(line->key, "HiddenServiceDir")) {
if (service) { /* register the one we just finished parsing */
if (validate_only)
rend_service_free(service);
else
rend_add_service(service);
}
service = tor_malloc_zero(sizeof(rend_service_t));
service->directory = tor_strdup(line->value);
service->ports = smartlist_new();
service->intro_period_started = time(NULL);
service->n_intro_points_wanted = NUM_INTRO_POINTS_DEFAULT;
continue;
}
if (!service) {
log_warn(LD_CONFIG, "%s with no preceding HiddenServiceDir directive",
line->key);
rend_service_free(service);
return -1;
}
if (!strcasecmp(line->key, "HiddenServicePort")) {
char *err_msg = NULL;
portcfg = rend_service_parse_port_config(line->value, " ", &err_msg);
if (!portcfg) {
if (err_msg)
log_warn(LD_CONFIG, "%s", err_msg);
tor_free(err_msg);
rend_service_free(service);
return -1;
}
tor_assert(!err_msg);
smartlist_add(service->ports, portcfg);
} else if (!strcasecmp(line->key, "HiddenServiceAllowUnknownPorts")) {
service->allow_unknown_ports = (int)tor_parse_long(line->value,
10, 0, 1, &ok, NULL);
if (!ok) {
log_warn(LD_CONFIG,
"HiddenServiceAllowUnknownPorts should be 0 or 1, not %s",
line->value);
rend_service_free(service);
return -1;
}
log_info(LD_CONFIG,
"HiddenServiceAllowUnknownPorts=%d for %s",
(int)service->allow_unknown_ports, service->directory);
} else if (!strcasecmp(line->key,
"HiddenServiceDirGroupReadable")) {
service->dir_group_readable = (int)tor_parse_long(line->value,
10, 0, 1, &ok, NULL);
if (!ok) {
log_warn(LD_CONFIG,
"HiddenServiceDirGroupReadable should be 0 or 1, not %s",
line->value);
rend_service_free(service);
return -1;
}
log_info(LD_CONFIG,
"HiddenServiceDirGroupReadable=%d for %s",
service->dir_group_readable, service->directory);
} else if (!strcasecmp(line->key, "HiddenServiceMaxStreams")) {
service->max_streams_per_circuit = (int)tor_parse_long(line->value,
10, 0, 65535, &ok, NULL);
if (!ok) {
log_warn(LD_CONFIG,
"HiddenServiceMaxStreams should be between 0 and %d, not %s",
65535, line->value);
rend_service_free(service);
return -1;
}
log_info(LD_CONFIG,
"HiddenServiceMaxStreams=%d for %s",
service->max_streams_per_circuit, service->directory);
} else if (!strcasecmp(line->key, "HiddenServiceMaxStreamsCloseCircuit")) {
service->max_streams_close_circuit = (int)tor_parse_long(line->value,
10, 0, 1, &ok, NULL);
if (!ok) {
log_warn(LD_CONFIG,
"HiddenServiceMaxStreamsCloseCircuit should be 0 or 1, "
"not %s",
line->value);
rend_service_free(service);
return -1;
}
log_info(LD_CONFIG,
"HiddenServiceMaxStreamsCloseCircuit=%d for %s",
(int)service->max_streams_close_circuit, service->directory);
} else if (!strcasecmp(line->key, "HiddenServiceNumIntroductionPoints")) {
service->n_intro_points_wanted =
(unsigned int) tor_parse_long(line->value, 10,
NUM_INTRO_POINTS_DEFAULT,
NUM_INTRO_POINTS_MAX, &ok, NULL);
if (!ok) {
log_warn(LD_CONFIG,
"HiddenServiceNumIntroductionPoints "
"should be between %d and %d, not %s",
NUM_INTRO_POINTS_DEFAULT, NUM_INTRO_POINTS_MAX,
line->value);
rend_service_free(service);
return -1;
}
log_info(LD_CONFIG, "HiddenServiceNumIntroductionPoints=%d for %s",
service->n_intro_points_wanted, service->directory);
} else if (!strcasecmp(line->key, "HiddenServiceAuthorizeClient")) {
/* Parse auth type and comma-separated list of client names and add a
* rend_authorized_client_t for each client to the service's list
* of authorized clients. */
smartlist_t *type_names_split, *clients;
const char *authname;
int num_clients;
if (service->auth_type != REND_NO_AUTH) {
log_warn(LD_CONFIG, "Got multiple HiddenServiceAuthorizeClient "
"lines for a single service.");
rend_service_free(service);
return -1;
}
type_names_split = smartlist_new();
smartlist_split_string(type_names_split, line->value, " ", 0, 2);
if (smartlist_len(type_names_split) < 1) {
log_warn(LD_BUG, "HiddenServiceAuthorizeClient has no value. This "
"should have been prevented when parsing the "
"configuration.");
smartlist_free(type_names_split);
rend_service_free(service);
return -1;
}
authname = smartlist_get(type_names_split, 0);
if (!strcasecmp(authname, "basic")) {
service->auth_type = REND_BASIC_AUTH;
} else if (!strcasecmp(authname, "stealth")) {
service->auth_type = REND_STEALTH_AUTH;
} else {
log_warn(LD_CONFIG, "HiddenServiceAuthorizeClient contains "
"unrecognized auth-type '%s'. Only 'basic' or 'stealth' "
"are recognized.",
(char *) smartlist_get(type_names_split, 0));
SMARTLIST_FOREACH(type_names_split, char *, cp, tor_free(cp));
smartlist_free(type_names_split);
rend_service_free(service);
return -1;
}
service->clients = smartlist_new();
if (smartlist_len(type_names_split) < 2) {
log_warn(LD_CONFIG, "HiddenServiceAuthorizeClient contains "
"auth-type '%s', but no client names.",
service->auth_type == REND_BASIC_AUTH ? "basic" : "stealth");
SMARTLIST_FOREACH(type_names_split, char *, cp, tor_free(cp));
smartlist_free(type_names_split);
continue;
}
clients = smartlist_new();
smartlist_split_string(clients, smartlist_get(type_names_split, 1),
",", SPLIT_SKIP_SPACE, 0);
SMARTLIST_FOREACH(type_names_split, char *, cp, tor_free(cp));
smartlist_free(type_names_split);
/* Remove duplicate client names. */
num_clients = smartlist_len(clients);
smartlist_sort_strings(clients);
smartlist_uniq_strings(clients);
if (smartlist_len(clients) < num_clients) {
log_info(LD_CONFIG, "HiddenServiceAuthorizeClient contains %d "
"duplicate client name(s); removing.",
num_clients - smartlist_len(clients));
num_clients = smartlist_len(clients);
}
SMARTLIST_FOREACH_BEGIN(clients, const char *, client_name)
{
rend_authorized_client_t *client;
size_t len = strlen(client_name);
if (len < 1 || len > REND_CLIENTNAME_MAX_LEN) {
log_warn(LD_CONFIG, "HiddenServiceAuthorizeClient contains an "
"illegal client name: '%s'. Length must be "
"between 1 and %d characters.",
client_name, REND_CLIENTNAME_MAX_LEN);
SMARTLIST_FOREACH(clients, char *, cp, tor_free(cp));
smartlist_free(clients);
rend_service_free(service);
return -1;
}
if (strspn(client_name, REND_LEGAL_CLIENTNAME_CHARACTERS) != len) {
log_warn(LD_CONFIG, "HiddenServiceAuthorizeClient contains an "
"illegal client name: '%s'. Valid "
"characters are [A-Za-z0-9+_-].",
client_name);
SMARTLIST_FOREACH(clients, char *, cp, tor_free(cp));
smartlist_free(clients);
rend_service_free(service);
return -1;
}
client = tor_malloc_zero(sizeof(rend_authorized_client_t));
client->client_name = tor_strdup(client_name);
smartlist_add(service->clients, client);
log_debug(LD_REND, "Adding client name '%s'", client_name);
}
SMARTLIST_FOREACH_END(client_name);
SMARTLIST_FOREACH(clients, char *, cp, tor_free(cp));
smartlist_free(clients);
/* Ensure maximum number of clients. */
if ((service->auth_type == REND_BASIC_AUTH &&
smartlist_len(service->clients) > 512) ||
(service->auth_type == REND_STEALTH_AUTH &&
smartlist_len(service->clients) > 16)) {
log_warn(LD_CONFIG, "HiddenServiceAuthorizeClient contains %d "
"client authorization entries, but only a "
"maximum of %d entries is allowed for "
"authorization type '%s'.",
smartlist_len(service->clients),
service->auth_type == REND_BASIC_AUTH ? 512 : 16,
service->auth_type == REND_BASIC_AUTH ? "basic" : "stealth");
rend_service_free(service);
return -1;
}
} else {
tor_assert(!strcasecmp(line->key, "HiddenServiceVersion"));
if (strcmp(line->value, "2")) {
log_warn(LD_CONFIG,
"The only supported HiddenServiceVersion is 2.");
rend_service_free(service);
return -1;
}
}
}
if (service) {
cpd_check_t check_opts = CPD_CHECK_MODE_ONLY|CPD_CHECK;
if (service->dir_group_readable) {
check_opts |= CPD_GROUP_READ;
}
if (check_private_dir(service->directory, check_opts, options->User) < 0) {
rend_service_free(service);
return -1;
}
if (validate_only) {
rend_service_free(service);
} else {
rend_add_service(service);
}
}
/* If this is a reload and there were hidden services configured before,
* keep the introduction points that are still needed and close the
* other ones. */
if (old_service_list && !validate_only) {
smartlist_t *surviving_services = smartlist_new();
/* Preserve the existing ephemeral services.
*
* This is the ephemeral service equivalent of the "Copy introduction
* points to new services" block, except there's no copy required since
* the service structure isn't regenerated.
*
* After this is done, all ephemeral services will be:
* * Removed from old_service_list, so the equivalent non-ephemeral code
* will not attempt to preserve them.
* * Added to the new rend_service_list (that previously only had the
* services listed in the configuration).
* * Added to surviving_services, which is the list of services that
* will NOT have their intro point closed.
*/
SMARTLIST_FOREACH(old_service_list, rend_service_t *, old, {
if (!old->directory) {
SMARTLIST_DEL_CURRENT(old_service_list, old);
smartlist_add(surviving_services, old);
smartlist_add(rend_service_list, old);
}
});
/* Copy introduction points to new services. */
/* XXXX This is O(n^2), but it's only called on reconfigure, so it's
* probably ok? */
SMARTLIST_FOREACH_BEGIN(rend_service_list, rend_service_t *, new) {
SMARTLIST_FOREACH_BEGIN(old_service_list, rend_service_t *, old) {
if (new->directory && old->directory &&
!strcmp(old->directory, new->directory)) {
smartlist_add_all(new->intro_nodes, old->intro_nodes);
smartlist_clear(old->intro_nodes);
smartlist_add_all(new->expiring_nodes, old->expiring_nodes);
smartlist_clear(old->expiring_nodes);
smartlist_add(surviving_services, old);
break;
}
} SMARTLIST_FOREACH_END(old);
} SMARTLIST_FOREACH_END(new);
/* Close introduction circuits of services we don't serve anymore. */
/* XXXX it would be nicer if we had a nicer abstraction to use here,
* so we could just iterate over the list of services to close, but
* once again, this isn't critical-path code. */
SMARTLIST_FOREACH_BEGIN(circuit_get_global_list(), circuit_t *, circ) {
if (!circ->marked_for_close &&
circ->state == CIRCUIT_STATE_OPEN &&
(circ->purpose == CIRCUIT_PURPOSE_S_ESTABLISH_INTRO ||
circ->purpose == CIRCUIT_PURPOSE_S_INTRO)) {
origin_circuit_t *oc = TO_ORIGIN_CIRCUIT(circ);
int keep_it = 0;
tor_assert(oc->rend_data);
SMARTLIST_FOREACH(surviving_services, rend_service_t *, ptr, {
if (tor_memeq(ptr->pk_digest, oc->rend_data->rend_pk_digest,
DIGEST_LEN)) {
keep_it = 1;
break;
}
});
if (keep_it)
continue;
log_info(LD_REND, "Closing intro point %s for service %s.",
safe_str_client(extend_info_describe(
oc->build_state->chosen_exit)),
oc->rend_data->onion_address);
circuit_mark_for_close(circ, END_CIRC_REASON_FINISHED);
/* XXXX Is there another reason we should use here? */
}
}
SMARTLIST_FOREACH_END(circ);
smartlist_free(surviving_services);
SMARTLIST_FOREACH(old_service_list, rend_service_t *, ptr,
rend_service_free(ptr));
smartlist_free(old_service_list);
}
return 0;
}
/** Add the ephemeral service <b>pk</b>/<b>ports</b> if possible, with
* <b>max_streams_per_circuit</b> streams allowed per rendezvous circuit,
* and circuit closure on max streams being exceeded set by
* <b>max_streams_close_circuit</b>.
*
* Regardless of sucess/failure, callers should not touch pk/ports after
* calling this routine, and may assume that correct cleanup has been done
* on failure.
*
* Return an appropriate rend_service_add_ephemeral_status_t.
*/
rend_service_add_ephemeral_status_t
rend_service_add_ephemeral(crypto_pk_t *pk,
smartlist_t *ports,
int max_streams_per_circuit,
int max_streams_close_circuit,
char **service_id_out)
{
*service_id_out = NULL;
/* Allocate the service structure, and initialize the key, and key derived
* parameters.
*/
rend_service_t *s = tor_malloc_zero(sizeof(rend_service_t));
s->directory = NULL; /* This indicates the service is ephemeral. */
s->private_key = pk;
s->auth_type = REND_NO_AUTH;
s->ports = ports;
s->intro_period_started = time(NULL);
s->n_intro_points_wanted = NUM_INTRO_POINTS_DEFAULT;
s->max_streams_per_circuit = max_streams_per_circuit;
s->max_streams_close_circuit = max_streams_close_circuit;
if (rend_service_derive_key_digests(s) < 0) {
rend_service_free(s);
return RSAE_BADPRIVKEY;
}
if (!s->ports || smartlist_len(s->ports) == 0) {
log_warn(LD_CONFIG, "At least one VIRTPORT/TARGET must be specified.");
rend_service_free(s);
return RSAE_BADVIRTPORT;
}
/* Enforcing pk/id uniqueness should be done by rend_service_load_keys(), but
* it's not, see #14828.
*/
if (rend_service_get_by_pk_digest(s->pk_digest)) {
log_warn(LD_CONFIG, "Onion Service private key collides with an "
"existing service.");
rend_service_free(s);
return RSAE_ADDREXISTS;
}
if (rend_service_get_by_service_id(s->service_id)) {
log_warn(LD_CONFIG, "Onion Service id collides with an existing service.");
rend_service_free(s);
return RSAE_ADDREXISTS;
}
/* Initialize the service. */
if (rend_add_service(s)) {
return RSAE_INTERNAL;
}
*service_id_out = tor_strdup(s->service_id);
log_debug(LD_CONFIG, "Added ephemeral Onion Service: %s", s->service_id);
return RSAE_OKAY;
}
/** Remove the ephemeral service <b>service_id</b> if possible. Returns 0 on
* success, and -1 on failure.
*/
int
rend_service_del_ephemeral(const char *service_id)
{
rend_service_t *s;
if (!rend_valid_service_id(service_id)) {
log_warn(LD_CONFIG, "Requested malformed Onion Service id for removal.");
return -1;
}
if ((s = rend_service_get_by_service_id(service_id)) == NULL) {
log_warn(LD_CONFIG, "Requested non-existent Onion Service id for "
"removal.");
return -1;
}
if (s->directory) {
log_warn(LD_CONFIG, "Requested non-ephemeral Onion Service for removal.");
return -1;
}
/* Kill the intro point circuit for the Onion Service, and remove it from
* the list. Closing existing connections is the application's problem.
*
* XXX: As with the comment in rend_config_services(), a nice abstraction
* would be ideal here, but for now just duplicate the code.
*/
SMARTLIST_FOREACH_BEGIN(circuit_get_global_list(), circuit_t *, circ) {
if (!circ->marked_for_close &&
circ->state == CIRCUIT_STATE_OPEN &&
(circ->purpose == CIRCUIT_PURPOSE_S_ESTABLISH_INTRO ||
circ->purpose == CIRCUIT_PURPOSE_S_INTRO)) {
origin_circuit_t *oc = TO_ORIGIN_CIRCUIT(circ);
tor_assert(oc->rend_data);
if (!tor_memeq(s->pk_digest, oc->rend_data->rend_pk_digest, DIGEST_LEN))
continue;
log_debug(LD_REND, "Closing intro point %s for service %s.",
safe_str_client(extend_info_describe(
oc->build_state->chosen_exit)),
oc->rend_data->onion_address);
circuit_mark_for_close(circ, END_CIRC_REASON_FINISHED);
}
} SMARTLIST_FOREACH_END(circ);
smartlist_remove(rend_service_list, s);
rend_service_free(s);
log_debug(LD_CONFIG, "Removed ephemeral Onion Service: %s", service_id);
return 0;
}
/** Replace the old value of <b>service</b>-\>desc with one that reflects
* the other fields in service.
*/
static void
rend_service_update_descriptor(rend_service_t *service)
{
rend_service_descriptor_t *d;
origin_circuit_t *circ;
int i;
rend_service_descriptor_free(service->desc);
service->desc = NULL;
d = service->desc = tor_malloc_zero(sizeof(rend_service_descriptor_t));
d->pk = crypto_pk_dup_key(service->private_key);
d->timestamp = time(NULL);
d->timestamp -= d->timestamp % 3600; /* Round down to nearest hour */
d->intro_nodes = smartlist_new();
/* Support intro protocols 2 and 3. */
d->protocols = (1 << 2) + (1 << 3);
for (i = 0; i < smartlist_len(service->intro_nodes); ++i) {
rend_intro_point_t *intro_svc = smartlist_get(service->intro_nodes, i);
rend_intro_point_t *intro_desc;
/* This intro point won't be listed in the descriptor... */
intro_svc->listed_in_last_desc = 0;
circ = find_intro_circuit(intro_svc, service->pk_digest);
if (!circ || circ->base_.purpose != CIRCUIT_PURPOSE_S_INTRO) {
/* This intro point's circuit isn't finished yet. Don't list it. */
continue;
}
/* ...unless this intro point is listed in the descriptor. */
intro_svc->listed_in_last_desc = 1;
/* We have an entirely established intro circuit. Publish it in
* our descriptor. */
intro_desc = tor_malloc_zero(sizeof(rend_intro_point_t));
intro_desc->extend_info = extend_info_dup(intro_svc->extend_info);
if (intro_svc->intro_key)
intro_desc->intro_key = crypto_pk_dup_key(intro_svc->intro_key);
smartlist_add(d->intro_nodes, intro_desc);
if (intro_svc->time_published == -1) {
/* We are publishing this intro point in a descriptor for the
* first time -- note the current time in the service's copy of
* the intro point. */
intro_svc->time_published = time(NULL);
}
}
}
/** Load and/or generate private keys for all hidden services, possibly
* including keys for client authorization. Return 0 on success, -1 on
* failure. */
MOCK_IMPL(int,
rend_service_load_all_keys, (void))
{
SMARTLIST_FOREACH_BEGIN(rend_service_list, rend_service_t *, s) {
if (s->private_key)
continue;
log_info(LD_REND, "Loading hidden-service keys from \"%s\"",
s->directory);
if (rend_service_load_keys(s) < 0)
return -1;
} SMARTLIST_FOREACH_END(s);
return 0;
}
/** Add to <b>lst</b> every filename used by <b>s</b>. */
static void
rend_service_add_filenames_to_list(smartlist_t *lst, const rend_service_t *s)
{
tor_assert(lst);
tor_assert(s);
tor_assert(s->directory);
smartlist_add_asprintf(lst, "%s"PATH_SEPARATOR"private_key",
s->directory);
smartlist_add_asprintf(lst, "%s"PATH_SEPARATOR"hostname",
s->directory);
smartlist_add_asprintf(lst, "%s"PATH_SEPARATOR"client_keys",
s->directory);
}
/** Add to <b>open_lst</b> every filename used by a configured hidden service,
* and to <b>stat_lst</b> every directory used by a configured hidden
* service */
void
rend_services_add_filenames_to_lists(smartlist_t *open_lst,
smartlist_t *stat_lst)
{
if (!rend_service_list)
return;
SMARTLIST_FOREACH_BEGIN(rend_service_list, rend_service_t *, s) {
if (s->directory) {
rend_service_add_filenames_to_list(open_lst, s);
smartlist_add(stat_lst, tor_strdup(s->directory));
}
} SMARTLIST_FOREACH_END(s);
}
/** Derive all rend_service_t internal material based on the service's key.
* Returns 0 on sucess, -1 on failure.
*/
static int
rend_service_derive_key_digests(struct rend_service_t *s)
{
if (rend_get_service_id(s->private_key, s->service_id)<0) {
log_warn(LD_BUG, "Internal error: couldn't encode service ID.");
return -1;
}
if (crypto_pk_get_digest(s->private_key, s->pk_digest)<0) {
log_warn(LD_BUG, "Couldn't compute hash of public key.");
return -1;
}
return 0;
}
/** Load and/or generate private keys for the hidden service <b>s</b>,
* possibly including keys for client authorization. Return 0 on success, -1
* on failure. */
static int
rend_service_load_keys(rend_service_t *s)
{
char fname[512];
char buf[128];
cpd_check_t check_opts = CPD_CREATE;
if (s->dir_group_readable) {
check_opts |= CPD_GROUP_READ;
}
/* Check/create directory */
if (check_private_dir(s->directory, check_opts, get_options()->User) < 0) {
return -1;
}
#ifndef _WIN32
if (s->dir_group_readable) {
/* Only new dirs created get new opts, also enforce group read. */
if (chmod(s->directory, 0750)) {
log_warn(LD_FS,"Unable to make %s group-readable.", s->directory);
}
}
#endif
/* Load key */
if (strlcpy(fname,s->directory,sizeof(fname)) >= sizeof(fname) ||
strlcat(fname,PATH_SEPARATOR"private_key",sizeof(fname))
>= sizeof(fname)) {
log_warn(LD_CONFIG, "Directory name too long to store key file: \"%s\".",
s->directory);
return -1;
}
s->private_key = init_key_from_file(fname, 1, LOG_ERR, 0);
if (!s->private_key)
return -1;
if (rend_service_derive_key_digests(s) < 0)
return -1;
/* Create service file */
if (strlcpy(fname,s->directory,sizeof(fname)) >= sizeof(fname) ||
strlcat(fname,PATH_SEPARATOR"hostname",sizeof(fname))
>= sizeof(fname)) {
log_warn(LD_CONFIG, "Directory name too long to store hostname file:"
" \"%s\".", s->directory);
return -1;
}
tor_snprintf(buf, sizeof(buf),"%s.onion\n", s->service_id);
if (write_str_to_file(fname,buf,0)<0) {
log_warn(LD_CONFIG, "Could not write onion address to hostname file.");
memwipe(buf, 0, sizeof(buf));
return -1;
}
#ifndef _WIN32
if (s->dir_group_readable) {
/* Also verify hostname file created with group read. */
if (chmod(fname, 0640))
log_warn(LD_FS,"Unable to make hidden hostname file %s group-readable.",
fname);
}
#endif
memwipe(buf, 0, sizeof(buf));
/* If client authorization is configured, load or generate keys. */
if (s->auth_type != REND_NO_AUTH) {
if (rend_service_load_auth_keys(s, fname) < 0)
return -1;
}
return 0;
}
/** Load and/or generate client authorization keys for the hidden service
* <b>s</b>, which stores its hostname in <b>hfname</b>. Return 0 on success,
* -1 on failure. */
static int
rend_service_load_auth_keys(rend_service_t *s, const char *hfname)
{
int r = 0;
char cfname[512];
char *client_keys_str = NULL;
strmap_t *parsed_clients = strmap_new();
FILE *cfile, *hfile;
open_file_t *open_cfile = NULL, *open_hfile = NULL;
char extended_desc_cookie[REND_DESC_COOKIE_LEN+1];
char desc_cook_out[3*REND_DESC_COOKIE_LEN_BASE64+1];
char service_id[16+1];
char buf[1500];
/* Load client keys and descriptor cookies, if available. */
if (tor_snprintf(cfname, sizeof(cfname), "%s"PATH_SEPARATOR"client_keys",
s->directory)<0) {
log_warn(LD_CONFIG, "Directory name too long to store client keys "
"file: \"%s\".", s->directory);
goto err;
}
client_keys_str = read_file_to_str(cfname, RFTS_IGNORE_MISSING, NULL);
if (client_keys_str) {
if (rend_parse_client_keys(parsed_clients, client_keys_str) < 0) {
log_warn(LD_CONFIG, "Previously stored client_keys file could not "
"be parsed.");
goto err;
} else {
log_info(LD_CONFIG, "Parsed %d previously stored client entries.",
strmap_size(parsed_clients));
}
}
/* Prepare client_keys and hostname files. */
if (!(cfile = start_writing_to_stdio_file(cfname,
OPEN_FLAGS_REPLACE | O_TEXT,
0600, &open_cfile))) {
log_warn(LD_CONFIG, "Could not open client_keys file %s",
escaped(cfname));
goto err;
}
if (!(hfile = start_writing_to_stdio_file(hfname,
OPEN_FLAGS_REPLACE | O_TEXT,
0600, &open_hfile))) {
log_warn(LD_CONFIG, "Could not open hostname file %s", escaped(hfname));
goto err;
}
/* Either use loaded keys for configured clients or generate new
* ones if a client is new. */
SMARTLIST_FOREACH_BEGIN(s->clients, rend_authorized_client_t *, client) {
rend_authorized_client_t *parsed =
strmap_get(parsed_clients, client->client_name);
int written;
size_t len;
/* Copy descriptor cookie from parsed entry or create new one. */
if (parsed) {
memcpy(client->descriptor_cookie, parsed->descriptor_cookie,
REND_DESC_COOKIE_LEN);
} else {
crypto_rand(client->descriptor_cookie, REND_DESC_COOKIE_LEN);
}
if (base64_encode(desc_cook_out, 3*REND_DESC_COOKIE_LEN_BASE64+1,
client->descriptor_cookie,
REND_DESC_COOKIE_LEN, 0) < 0) {
log_warn(LD_BUG, "Could not base64-encode descriptor cookie.");
goto err;
}
/* Copy client key from parsed entry or create new one if required. */
if (parsed && parsed->client_key) {
client->client_key = crypto_pk_dup_key(parsed->client_key);
} else if (s->auth_type == REND_STEALTH_AUTH) {
/* Create private key for client. */
crypto_pk_t *prkey = NULL;
if (!(prkey = crypto_pk_new())) {
log_warn(LD_BUG,"Error constructing client key");
goto err;
}
if (crypto_pk_generate_key(prkey)) {
log_warn(LD_BUG,"Error generating client key");
crypto_pk_free(prkey);
goto err;
}
if (crypto_pk_check_key(prkey) <= 0) {
log_warn(LD_BUG,"Generated client key seems invalid");
crypto_pk_free(prkey);
goto err;
}
client->client_key = prkey;
}
/* Add entry to client_keys file. */
written = tor_snprintf(buf, sizeof(buf),
"client-name %s\ndescriptor-cookie %s\n",
client->client_name, desc_cook_out);
if (written < 0) {
log_warn(LD_BUG, "Could not write client entry.");
goto err;
}
if (client->client_key) {
char *client_key_out = NULL;
if (crypto_pk_write_private_key_to_string(client->client_key,
&client_key_out, &len) != 0) {
log_warn(LD_BUG, "Internal error: "
"crypto_pk_write_private_key_to_string() failed.");
goto err;
}
if (rend_get_service_id(client->client_key, service_id)<0) {
log_warn(LD_BUG, "Internal error: couldn't encode service ID.");
/*
* len is string length, not buffer length, but last byte is NUL
* anyway.
*/
memwipe(client_key_out, 0, len);
tor_free(client_key_out);
goto err;
}
written = tor_snprintf(buf + written, sizeof(buf) - written,
"client-key\n%s", client_key_out);
memwipe(client_key_out, 0, len);
tor_free(client_key_out);
if (written < 0) {
log_warn(LD_BUG, "Could not write client entry.");
goto err;
}
}
if (fputs(buf, cfile) < 0) {
log_warn(LD_FS, "Could not append client entry to file: %s",
strerror(errno));
goto err;
}
/* Add line to hostname file. */
if (s->auth_type == REND_BASIC_AUTH) {
/* Remove == signs (newline has been removed above). */
desc_cook_out[strlen(desc_cook_out)-2] = '\0';
tor_snprintf(buf, sizeof(buf),"%s.onion %s # client: %s\n",
s->service_id, desc_cook_out, client->client_name);
} else {
memcpy(extended_desc_cookie, client->descriptor_cookie,
REND_DESC_COOKIE_LEN);
extended_desc_cookie[REND_DESC_COOKIE_LEN] =
((int)s->auth_type - 1) << 4;
if (base64_encode(desc_cook_out, 3*REND_DESC_COOKIE_LEN_BASE64+1,
extended_desc_cookie,
REND_DESC_COOKIE_LEN+1, 0) < 0) {
log_warn(LD_BUG, "Could not base64-encode descriptor cookie.");
goto err;
}
desc_cook_out[strlen(desc_cook_out)-2] = '\0'; /* Remove A=. */
tor_snprintf(buf, sizeof(buf),"%s.onion %s # client: %s\n",
service_id, desc_cook_out, client->client_name);
}
if (fputs(buf, hfile)<0) {
log_warn(LD_FS, "Could not append host entry to file: %s",
strerror(errno));
goto err;
}
} SMARTLIST_FOREACH_END(client);
finish_writing_to_file(open_cfile);
finish_writing_to_file(open_hfile);
goto done;
err:
r = -1;
if (open_cfile)
abort_writing_to_file(open_cfile);
if (open_hfile)
abort_writing_to_file(open_hfile);
done:
if (client_keys_str) {
memwipe(client_keys_str, 0, strlen(client_keys_str));
tor_free(client_keys_str);
}
strmap_free(parsed_clients, rend_authorized_client_strmap_item_free);
memwipe(cfname, 0, sizeof(cfname));
/* Clear stack buffers that held key-derived material. */
memwipe(buf, 0, sizeof(buf));
memwipe(desc_cook_out, 0, sizeof(desc_cook_out));
memwipe(service_id, 0, sizeof(service_id));
memwipe(extended_desc_cookie, 0, sizeof(extended_desc_cookie));
return r;
}
/** Return the service whose public key has a digest of <b>digest</b>, or
* NULL if no such service exists.
*/
static rend_service_t *
rend_service_get_by_pk_digest(const char* digest)
{
SMARTLIST_FOREACH(rend_service_list, rend_service_t*, s,
if (tor_memeq(s->pk_digest,digest,DIGEST_LEN))
return s);
return NULL;
}
/** Return the service whose service id is <b>id</b>, or NULL if no such
* service exists.
*/
static struct rend_service_t *
rend_service_get_by_service_id(const char *id)
{
tor_assert(strlen(id) == REND_SERVICE_ID_LEN_BASE32);
SMARTLIST_FOREACH(rend_service_list, rend_service_t*, s, {
if (tor_memeq(s->service_id, id, REND_SERVICE_ID_LEN_BASE32))
return s;
});
return NULL;
}
/** Return 1 if any virtual port in <b>service</b> wants a circuit
* to have good uptime. Else return 0.
*/
static int
rend_service_requires_uptime(rend_service_t *service)
{
int i;
rend_service_port_config_t *p;
for (i=0; i < smartlist_len(service->ports); ++i) {
p = smartlist_get(service->ports, i);
if (smartlist_contains_int_as_string(get_options()->LongLivedPorts,
p->virtual_port))
return 1;
}
return 0;
}
/** Check client authorization of a given <b>descriptor_cookie</b> of
* length <b>cookie_len</b> for <b>service</b>. Return 1 for success
* and 0 for failure. */
static int
rend_check_authorization(rend_service_t *service,
const char *descriptor_cookie,
size_t cookie_len)
{
rend_authorized_client_t *auth_client = NULL;
tor_assert(service);
tor_assert(descriptor_cookie);
if (!service->clients) {
log_warn(LD_BUG, "Can't check authorization for a service that has no "
"authorized clients configured.");
return 0;
}
if (cookie_len != REND_DESC_COOKIE_LEN) {
log_info(LD_REND, "Descriptor cookie is %lu bytes, but we expected "
"%lu bytes. Dropping cell.",
(unsigned long)cookie_len, (unsigned long)REND_DESC_COOKIE_LEN);
return 0;
}
/* Look up client authorization by descriptor cookie. */
SMARTLIST_FOREACH(service->clients, rend_authorized_client_t *, client, {
if (tor_memeq(client->descriptor_cookie, descriptor_cookie,
REND_DESC_COOKIE_LEN)) {
auth_client = client;
break;
}
});
if (!auth_client) {
char descriptor_cookie_base64[3*REND_DESC_COOKIE_LEN_BASE64];
base64_encode(descriptor_cookie_base64, sizeof(descriptor_cookie_base64),
descriptor_cookie, REND_DESC_COOKIE_LEN, 0);
log_info(LD_REND, "No authorization found for descriptor cookie '%s'! "
"Dropping cell!",
descriptor_cookie_base64);
return 0;
}
/* Allow the request. */
log_info(LD_REND, "Client %s authorized for service %s.",
auth_client->client_name, service->service_id);
return 1;
}
/******
* Handle cells
******/
/** Respond to an INTRODUCE2 cell by launching a circuit to the chosen
* rendezvous point.
*/
int
rend_service_receive_introduction(origin_circuit_t *circuit,
const uint8_t *request,
size_t request_len)
{
/* Global status stuff */
int status = 0, result;
const or_options_t *options = get_options();
char *err_msg = NULL;
const char *stage_descr = NULL;
int reason = END_CIRC_REASON_TORPROTOCOL;
/* Service/circuit/key stuff we can learn before parsing */
char serviceid[REND_SERVICE_ID_LEN_BASE32+1];
rend_service_t *service = NULL;
rend_intro_point_t *intro_point = NULL;
crypto_pk_t *intro_key = NULL;
/* Parsed cell */
rend_intro_cell_t *parsed_req = NULL;
/* Rendezvous point */
extend_info_t *rp = NULL;
/* XXX not handled yet */
char buf[RELAY_PAYLOAD_SIZE];
char keys[DIGEST_LEN+CPATH_KEY_MATERIAL_LEN]; /* Holds KH, Df, Db, Kf, Kb */
int i;
crypto_dh_t *dh = NULL;
origin_circuit_t *launched = NULL;
crypt_path_t *cpath = NULL;
char hexcookie[9];
int circ_needs_uptime;
time_t now = time(NULL);
time_t elapsed;
int replay;
/* Do some initial validation and logging before we parse the cell */
if (circuit->base_.purpose != CIRCUIT_PURPOSE_S_INTRO) {
log_warn(LD_PROTOCOL,
"Got an INTRODUCE2 over a non-introduction circuit %u.",
(unsigned) circuit->base_.n_circ_id);
goto err;
}
#ifndef NON_ANONYMOUS_MODE_ENABLED
tor_assert(!(circuit->build_state->onehop_tunnel));
#endif
tor_assert(circuit->rend_data);
/* We'll use this in a bazillion log messages */
base32_encode(serviceid, REND_SERVICE_ID_LEN_BASE32+1,
circuit->rend_data->rend_pk_digest, REND_SERVICE_ID_LEN);
/* look up service depending on circuit. */
service =
rend_service_get_by_pk_digest(circuit->rend_data->rend_pk_digest);
if (!service) {
log_warn(LD_BUG,
"Internal error: Got an INTRODUCE2 cell on an intro "
"circ for an unrecognized service %s.",
escaped(serviceid));
goto err;
}
intro_point = find_intro_point(circuit);
if (intro_point == NULL) {
intro_point = find_expiring_intro_point(service, circuit);
if (intro_point == NULL) {
log_warn(LD_BUG,
"Internal error: Got an INTRODUCE2 cell on an "
"intro circ (for service %s) with no corresponding "
"rend_intro_point_t.",
escaped(serviceid));
goto err;
}
}
log_info(LD_REND, "Received INTRODUCE2 cell for service %s on circ %u.",
escaped(serviceid), (unsigned)circuit->base_.n_circ_id);
/* use intro key instead of service key. */
intro_key = circuit->intro_key;
tor_free(err_msg);
stage_descr = NULL;
stage_descr = "early parsing";
/* Early parsing pass (get pk, ciphertext); type 2 is INTRODUCE2 */
parsed_req =
rend_service_begin_parse_intro(request, request_len, 2, &err_msg);
if (!parsed_req) {
goto log_error;
} else if (err_msg) {
log_info(LD_REND, "%s on circ %u.", err_msg,
(unsigned)circuit->base_.n_circ_id);
tor_free(err_msg);
}
/* make sure service replay caches are present */
if (!service->accepted_intro_dh_parts) {
service->accepted_intro_dh_parts =
replaycache_new(REND_REPLAY_TIME_INTERVAL,
REND_REPLAY_TIME_INTERVAL);
}
if (!intro_point->accepted_intro_rsa_parts) {
intro_point->accepted_intro_rsa_parts = replaycache_new(0, 0);
}
/* check for replay of PK-encrypted portion. */
replay = replaycache_add_test_and_elapsed(
intro_point->accepted_intro_rsa_parts,
parsed_req->ciphertext, parsed_req->ciphertext_len,
&elapsed);
if (replay) {
log_warn(LD_REND,
"Possible replay detected! We received an "
"INTRODUCE2 cell with same PK-encrypted part %d "
"seconds ago. Dropping cell.",
(int)elapsed);
goto err;
}
stage_descr = "decryption";
/* Now try to decrypt it */
result = rend_service_decrypt_intro(parsed_req, intro_key, &err_msg);
if (result < 0) {
goto log_error;
} else if (err_msg) {
log_info(LD_REND, "%s on circ %u.", err_msg,
(unsigned)circuit->base_.n_circ_id);
tor_free(err_msg);
}
stage_descr = "late parsing";
/* Parse the plaintext */
result = rend_service_parse_intro_plaintext(parsed_req, &err_msg);
if (result < 0) {
goto log_error;
} else if (err_msg) {
log_info(LD_REND, "%s on circ %u.", err_msg,
(unsigned)circuit->base_.n_circ_id);
tor_free(err_msg);
}
stage_descr = "late validation";
/* Validate the parsed plaintext parts */
result = rend_service_validate_intro_late(parsed_req, &err_msg);
if (result < 0) {
goto log_error;
} else if (err_msg) {
log_info(LD_REND, "%s on circ %u.", err_msg,
(unsigned)circuit->base_.n_circ_id);
tor_free(err_msg);
}
stage_descr = NULL;
/* Increment INTRODUCE2 counter */
++(intro_point->accepted_introduce2_count);
/* Find the rendezvous point */
rp = find_rp_for_intro(parsed_req, &err_msg);
if (!rp)
goto log_error;
/* Check if we'd refuse to talk to this router */
if (options->StrictNodes &&
routerset_contains_extendinfo(options->ExcludeNodes, rp)) {
log_warn(LD_REND, "Client asked to rendezvous at a relay that we "
"exclude, and StrictNodes is set. Refusing service.");
reason = END_CIRC_REASON_INTERNAL; /* XXX might leak why we refused */
goto err;
}
base16_encode(hexcookie, 9, (const char *)(parsed_req->rc), 4);
/* Check whether there is a past request with the same Diffie-Hellman,
* part 1. */
replay = replaycache_add_test_and_elapsed(
service->accepted_intro_dh_parts,
parsed_req->dh, DH_KEY_LEN,
&elapsed);
if (replay) {
/* A Tor client will send a new INTRODUCE1 cell with the same rend
* cookie and DH public key as its previous one if its intro circ
* times out while in state CIRCUIT_PURPOSE_C_INTRODUCE_ACK_WAIT .
* If we received the first INTRODUCE1 cell (the intro-point relay
* converts it into an INTRODUCE2 cell), we are already trying to
* connect to that rend point (and may have already succeeded);
* drop this cell. */
log_info(LD_REND, "We received an "
"INTRODUCE2 cell with same first part of "
"Diffie-Hellman handshake %d seconds ago. Dropping "
"cell.",
(int) elapsed);
goto err;
}
/* If the service performs client authorization, check included auth data. */
if (service->clients) {
if (parsed_req->version == 3 && parsed_req->u.v3.auth_len > 0) {
if (rend_check_authorization(service,
(const char*)parsed_req->u.v3.auth_data,
parsed_req->u.v3.auth_len)) {
log_info(LD_REND, "Authorization data in INTRODUCE2 cell are valid.");
} else {
log_info(LD_REND, "The authorization data that are contained in "
"the INTRODUCE2 cell are invalid. Dropping cell.");
reason = END_CIRC_REASON_CONNECTFAILED;
goto err;
}
} else {
log_info(LD_REND, "INTRODUCE2 cell does not contain authentication "
"data, but we require client authorization. Dropping cell.");
reason = END_CIRC_REASON_CONNECTFAILED;
goto err;
}
}
/* Try DH handshake... */
dh = crypto_dh_new(DH_TYPE_REND);
if (!dh || crypto_dh_generate_public(dh)<0) {
log_warn(LD_BUG,"Internal error: couldn't build DH state "
"or generate public key.");
reason = END_CIRC_REASON_INTERNAL;
goto err;
}
if (crypto_dh_compute_secret(LOG_PROTOCOL_WARN, dh,
(char *)(parsed_req->dh),
DH_KEY_LEN, keys,
DIGEST_LEN+CPATH_KEY_MATERIAL_LEN)<0) {
log_warn(LD_BUG, "Internal error: couldn't complete DH handshake");
reason = END_CIRC_REASON_INTERNAL;
goto err;
}
circ_needs_uptime = rend_service_requires_uptime(service);
/* help predict this next time */
rep_hist_note_used_internal(now, circ_needs_uptime, 1);
/* Launch a circuit to alice's chosen rendezvous point.
*/
for (i=0;i<MAX_REND_FAILURES;i++) {
int flags = CIRCLAUNCH_NEED_CAPACITY | CIRCLAUNCH_IS_INTERNAL;
if (circ_needs_uptime) flags |= CIRCLAUNCH_NEED_UPTIME;
launched = circuit_launch_by_extend_info(
CIRCUIT_PURPOSE_S_CONNECT_REND, rp, flags);
if (launched)
break;
}
if (!launched) { /* give up */
log_warn(LD_REND, "Giving up launching first hop of circuit to rendezvous "
"point %s for service %s.",
safe_str_client(extend_info_describe(rp)),
serviceid);
reason = END_CIRC_REASON_CONNECTFAILED;
goto err;
}
log_info(LD_REND,
"Accepted intro; launching circuit to %s "
"(cookie %s) for service %s.",
safe_str_client(extend_info_describe(rp)),
hexcookie, serviceid);
tor_assert(launched->build_state);
/* Fill in the circuit's state. */
launched->rend_data =
rend_data_service_create(service->service_id,
circuit->rend_data->rend_pk_digest,
parsed_req->rc, service->auth_type);
launched->build_state->service_pending_final_cpath_ref =
tor_malloc_zero(sizeof(crypt_path_reference_t));
launched->build_state->service_pending_final_cpath_ref->refcount = 1;
launched->build_state->service_pending_final_cpath_ref->cpath = cpath =
tor_malloc_zero(sizeof(crypt_path_t));
cpath->magic = CRYPT_PATH_MAGIC;
launched->build_state->expiry_time = now + MAX_REND_TIMEOUT;
cpath->rend_dh_handshake_state = dh;
dh = NULL;
if (circuit_init_cpath_crypto(cpath,keys+DIGEST_LEN,1)<0)
goto err;
memcpy(cpath->rend_circ_nonce, keys, DIGEST_LEN);
goto done;
log_error:
if (!err_msg) {
if (stage_descr) {
tor_asprintf(&err_msg,
"unknown %s error for INTRODUCE2", stage_descr);
} else {
err_msg = tor_strdup("unknown error for INTRODUCE2");
}
}
log_warn(LD_REND, "%s on circ %u", err_msg,
(unsigned)circuit->base_.n_circ_id);
err:
status = -1;
if (dh) crypto_dh_free(dh);
if (launched) {
circuit_mark_for_close(TO_CIRCUIT(launched), reason);
}
tor_free(err_msg);
done:
memwipe(keys, 0, sizeof(keys));
memwipe(buf, 0, sizeof(buf));
memwipe(serviceid, 0, sizeof(serviceid));
memwipe(hexcookie, 0, sizeof(hexcookie));
/* Free the parsed cell */
rend_service_free_intro(parsed_req);
/* Free rp */
extend_info_free(rp);
return status;
}
/** Given a parsed and decrypted INTRODUCE2, find the rendezvous point or
* return NULL and an error string if we can't. Return a newly allocated
* extend_info_t* for the rendezvous point. */
static extend_info_t *
find_rp_for_intro(const rend_intro_cell_t *intro,
char **err_msg_out)
{
extend_info_t *rp = NULL;
char *err_msg = NULL;
const char *rp_nickname = NULL;
const node_t *node = NULL;
if (!intro) {
if (err_msg_out)
err_msg = tor_strdup("Bad parameters to find_rp_for_intro()");
goto err;
}
if (intro->version == 0 || intro->version == 1) {
rp_nickname = (const char *)(intro->u.v0_v1.rp);
node = node_get_by_nickname(rp_nickname, 0);
if (!node) {
if (err_msg_out) {
tor_asprintf(&err_msg,
"Couldn't find router %s named in INTRODUCE2 cell",
escaped_safe_str_client(rp_nickname));
}
goto err;
}
rp = extend_info_from_node(node, 0);
if (!rp) {
if (err_msg_out) {
tor_asprintf(&err_msg,
"Could build extend_info_t for router %s named "
"in INTRODUCE2 cell",
escaped_safe_str_client(rp_nickname));
}
goto err;
}
} else if (intro->version == 2) {
rp = extend_info_dup(intro->u.v2.extend_info);
} else if (intro->version == 3) {
rp = extend_info_dup(intro->u.v3.extend_info);
} else {
if (err_msg_out) {
tor_asprintf(&err_msg,
"Unknown version %d in INTRODUCE2 cell",
(int)(intro->version));
}
goto err;
}
goto done;
err:
if (err_msg_out) *err_msg_out = err_msg;
else tor_free(err_msg);
done:
return rp;
}
/** Free a parsed INTRODUCE1 or INTRODUCE2 cell that was allocated by
* rend_service_parse_intro().
*/
void
rend_service_free_intro(rend_intro_cell_t *request)
{
if (!request) {
return;
}
/* Free ciphertext */
tor_free(request->ciphertext);
request->ciphertext_len = 0;
/* Have plaintext? */
if (request->plaintext) {
/* Zero it out just to be safe */
memwipe(request->plaintext, 0, request->plaintext_len);
tor_free(request->plaintext);
request->plaintext_len = 0;
}
/* Have parsed plaintext? */
if (request->parsed) {
switch (request->version) {
case 0:
case 1:
/*
* Nothing more to do; these formats have no further pointers
* in them.
*/
break;
case 2:
extend_info_free(request->u.v2.extend_info);
request->u.v2.extend_info = NULL;
break;
case 3:
if (request->u.v3.auth_data) {
memwipe(request->u.v3.auth_data, 0, request->u.v3.auth_len);
tor_free(request->u.v3.auth_data);
}
extend_info_free(request->u.v3.extend_info);
request->u.v3.extend_info = NULL;
break;
default:
log_info(LD_BUG,
"rend_service_free_intro() saw unknown protocol "
"version %d.",
request->version);
}
}
/* Zero it out to make sure sensitive stuff doesn't hang around in memory */
memwipe(request, 0, sizeof(*request));
tor_free(request);
}
/** Parse an INTRODUCE1 or INTRODUCE2 cell into a newly allocated
* rend_intro_cell_t structure. Free it with rend_service_free_intro()
* when finished. The type parameter should be 1 or 2 to indicate whether
* this is INTRODUCE1 or INTRODUCE2. This parses only the non-encrypted
* parts; after this, call rend_service_decrypt_intro() with a key, then
* rend_service_parse_intro_plaintext() to finish parsing. The optional
* err_msg_out parameter is set to a string suitable for log output
* if parsing fails. This function does some validation, but only
* that which depends solely on the contents of the cell and the
* key; it can be unit-tested. Further validation is done in
* rend_service_validate_intro().
*/
rend_intro_cell_t *
rend_service_begin_parse_intro(const uint8_t *request,
size_t request_len,
uint8_t type,
char **err_msg_out)
{
rend_intro_cell_t *rv = NULL;
char *err_msg = NULL;
if (!request || request_len <= 0) goto err;
if (!(type == 1 || type == 2)) goto err;
/* First, check that the cell is long enough to be a sensible INTRODUCE */
/* min key length plus digest length plus nickname length */
if (request_len <
(DIGEST_LEN + REND_COOKIE_LEN + (MAX_NICKNAME_LEN + 1) +
DH_KEY_LEN + 42)) {
if (err_msg_out) {
tor_asprintf(&err_msg,
"got a truncated INTRODUCE%d cell",
(int)type);
}
goto err;
}
/* Allocate a new parsed cell structure */
rv = tor_malloc_zero(sizeof(*rv));
/* Set the type */
rv->type = type;
/* Copy in the ID */
memcpy(rv->pk, request, DIGEST_LEN);
/* Copy in the ciphertext */
rv->ciphertext = tor_malloc(request_len - DIGEST_LEN);
memcpy(rv->ciphertext, request + DIGEST_LEN, request_len - DIGEST_LEN);
rv->ciphertext_len = request_len - DIGEST_LEN;
goto done;
err:
rend_service_free_intro(rv);
rv = NULL;
if (err_msg_out && !err_msg) {
tor_asprintf(&err_msg,
"unknown INTRODUCE%d error",
(int)type);
}
done:
if (err_msg_out) *err_msg_out = err_msg;
else tor_free(err_msg);
return rv;
}
/** Parse the version-specific parts of a v0 or v1 INTRODUCE1 or INTRODUCE2
* cell
*/
static ssize_t
rend_service_parse_intro_for_v0_or_v1(
rend_intro_cell_t *intro,
const uint8_t *buf,
size_t plaintext_len,
char **err_msg_out)
{
const char *rp_nickname, *endptr;
size_t nickname_field_len, ver_specific_len;
if (intro->version == 1) {
ver_specific_len = MAX_HEX_NICKNAME_LEN + 2;
rp_nickname = ((const char *)buf) + 1;
nickname_field_len = MAX_HEX_NICKNAME_LEN + 1;
} else if (intro->version == 0) {
ver_specific_len = MAX_NICKNAME_LEN + 1;
rp_nickname = (const char *)buf;
nickname_field_len = MAX_NICKNAME_LEN + 1;
} else {
if (err_msg_out)
tor_asprintf(err_msg_out,
"rend_service_parse_intro_for_v0_or_v1() called with "
"bad version %d on INTRODUCE%d cell (this is a bug)",
intro->version,
(int)(intro->type));
goto err;
}
if (plaintext_len < ver_specific_len) {
if (err_msg_out)
tor_asprintf(err_msg_out,
"short plaintext of encrypted part in v1 INTRODUCE%d "
"cell (%lu bytes, needed %lu)",
(int)(intro->type),
(unsigned long)plaintext_len,
(unsigned long)ver_specific_len);
goto err;
}
endptr = memchr(rp_nickname, 0, nickname_field_len);
if (!endptr || endptr == rp_nickname) {
if (err_msg_out) {
tor_asprintf(err_msg_out,
"couldn't find a nul-padded nickname in "
"INTRODUCE%d cell",
(int)(intro->type));
}
goto err;
}
if ((intro->version == 0 &&
!is_legal_nickname(rp_nickname)) ||
(intro->version == 1 &&
!is_legal_nickname_or_hexdigest(rp_nickname))) {
if (err_msg_out) {
tor_asprintf(err_msg_out,
"bad nickname in INTRODUCE%d cell",
(int)(intro->type));
}
goto err;
}
memcpy(intro->u.v0_v1.rp, rp_nickname, endptr - rp_nickname + 1);
return ver_specific_len;
err:
return -1;
}
/** Parse the version-specific parts of a v2 INTRODUCE1 or INTRODUCE2 cell
*/
static ssize_t
rend_service_parse_intro_for_v2(
rend_intro_cell_t *intro,
const uint8_t *buf,
size_t plaintext_len,
char **err_msg_out)
{
unsigned int klen;
extend_info_t *extend_info = NULL;
ssize_t ver_specific_len;
/*
* We accept version 3 too so that the v3 parser can call this with
* an adjusted buffer for the latter part of a v3 cell, which is
* identical to a v2 cell.
*/
if (!(intro->version == 2 ||
intro->version == 3)) {
if (err_msg_out)
tor_asprintf(err_msg_out,
"rend_service_parse_intro_for_v2() called with "
"bad version %d on INTRODUCE%d cell (this is a bug)",
intro->version,
(int)(intro->type));
goto err;
}
/* 7 == version, IP and port, DIGEST_LEN == id, 2 == key length */
if (plaintext_len < 7 + DIGEST_LEN + 2) {
if (err_msg_out) {
tor_asprintf(err_msg_out,
"truncated plaintext of encrypted parted of "
"version %d INTRODUCE%d cell",
intro->version,
(int)(intro->type));
}
goto err;
}
extend_info = tor_malloc_zero(sizeof(extend_info_t));
tor_addr_from_ipv4n(&extend_info->addr, get_uint32(buf + 1));
extend_info->port = ntohs(get_uint16(buf + 5));
memcpy(extend_info->identity_digest, buf + 7, DIGEST_LEN);
extend_info->nickname[0] = '$';
base16_encode(extend_info->nickname + 1, sizeof(extend_info->nickname) - 1,
extend_info->identity_digest, DIGEST_LEN);
klen = ntohs(get_uint16(buf + 7 + DIGEST_LEN));
/* 7 == version, IP and port, DIGEST_LEN == id, 2 == key length */
if (plaintext_len < 7 + DIGEST_LEN + 2 + klen) {
if (err_msg_out) {
tor_asprintf(err_msg_out,
"truncated plaintext of encrypted parted of "
"version %d INTRODUCE%d cell",
intro->version,
(int)(intro->type));
}
goto err;
}
extend_info->onion_key =
crypto_pk_asn1_decode((const char *)(buf + 7 + DIGEST_LEN + 2), klen);
if (!extend_info->onion_key) {
if (err_msg_out) {
tor_asprintf(err_msg_out,
"error decoding onion key in version %d "
"INTRODUCE%d cell",
intro->version,
(intro->type));
}
goto err;
}
if (128 != crypto_pk_keysize(extend_info->onion_key)) {
if (err_msg_out) {
tor_asprintf(err_msg_out,
"invalid onion key size in version %d INTRODUCE%d cell",
intro->version,
(intro->type));
}
goto err;
}
ver_specific_len = 7+DIGEST_LEN+2+klen;
if (intro->version == 2) intro->u.v2.extend_info = extend_info;
else intro->u.v3.extend_info = extend_info;
return ver_specific_len;
err:
extend_info_free(extend_info);
return -1;
}
/** Parse the version-specific parts of a v3 INTRODUCE1 or INTRODUCE2 cell
*/
static ssize_t
rend_service_parse_intro_for_v3(
rend_intro_cell_t *intro,
const uint8_t *buf,
size_t plaintext_len,
char **err_msg_out)
{
ssize_t adjust, v2_ver_specific_len, ts_offset;
/* This should only be called on v3 cells */
if (intro->version != 3) {
if (err_msg_out)
tor_asprintf(err_msg_out,
"rend_service_parse_intro_for_v3() called with "
"bad version %d on INTRODUCE%d cell (this is a bug)",
intro->version,
(int)(intro->type));
goto err;
}
/*
* Check that we have at least enough to get auth_len:
*
* 1 octet for version, 1 for auth_type, 2 for auth_len
*/
if (plaintext_len < 4) {
if (err_msg_out) {
tor_asprintf(err_msg_out,
"truncated plaintext of encrypted parted of "
"version %d INTRODUCE%d cell",
intro->version,
(int)(intro->type));
}
goto err;
}
/*
* The rend_client_send_introduction() function over in rendclient.c is
* broken (i.e., fails to match the spec) in such a way that we can't
* change it without breaking the protocol. Specifically, it doesn't
* emit auth_len when auth-type is REND_NO_AUTH, so everything is off
* by two bytes after that. Calculate ts_offset and do everything from
* the timestamp on relative to that to handle this dain bramage.
*/
intro->u.v3.auth_type = buf[1];
if (intro->u.v3.auth_type != REND_NO_AUTH) {
intro->u.v3.auth_len = ntohs(get_uint16(buf + 2));
ts_offset = 4 + intro->u.v3.auth_len;
} else {
intro->u.v3.auth_len = 0;
ts_offset = 2;
}
/* Check that auth len makes sense for this auth type */
if (intro->u.v3.auth_type == REND_BASIC_AUTH ||
intro->u.v3.auth_type == REND_STEALTH_AUTH) {
if (intro->u.v3.auth_len != REND_DESC_COOKIE_LEN) {
if (err_msg_out) {
tor_asprintf(err_msg_out,
"wrong auth data size %d for INTRODUCE%d cell, "
"should be %d",
(int)(intro->u.v3.auth_len),
(int)(intro->type),
REND_DESC_COOKIE_LEN);
}
goto err;
}
}
/* Check that we actually have everything up through the timestamp */
if (plaintext_len < (size_t)(ts_offset)+4) {
if (err_msg_out) {
tor_asprintf(err_msg_out,
"truncated plaintext of encrypted parted of "
"version %d INTRODUCE%d cell",
intro->version,
(int)(intro->type));
}
goto err;
}
if (intro->u.v3.auth_type != REND_NO_AUTH &&
intro->u.v3.auth_len > 0) {
/* Okay, we can go ahead and copy auth_data */
intro->u.v3.auth_data = tor_malloc(intro->u.v3.auth_len);
/*
* We know we had an auth_len field in this case, so 4 is
* always right.
*/
memcpy(intro->u.v3.auth_data, buf + 4, intro->u.v3.auth_len);
}
/*
* From here on, the format is as in v2, so we call the v2 parser with
* adjusted buffer and length. We are 4 + ts_offset octets in, but the
* v2 parser expects to skip over a version byte at the start, so we
* adjust by 3 + ts_offset.
*/
adjust = 3 + ts_offset;
v2_ver_specific_len =
rend_service_parse_intro_for_v2(intro,
buf + adjust, plaintext_len - adjust,
err_msg_out);
/* Success in v2 parser */
if (v2_ver_specific_len >= 0) return v2_ver_specific_len + adjust;
/* Failure in v2 parser; it will have provided an err_msg */
else return v2_ver_specific_len;
err:
return -1;
}
/** Table of parser functions for version-specific parts of an INTRODUCE2
* cell.
*/
static ssize_t
(*intro_version_handlers[])(
rend_intro_cell_t *,
const uint8_t *,
size_t,
char **) =
{ rend_service_parse_intro_for_v0_or_v1,
rend_service_parse_intro_for_v0_or_v1,
rend_service_parse_intro_for_v2,
rend_service_parse_intro_for_v3 };
/** Decrypt the encrypted part of an INTRODUCE1 or INTRODUCE2 cell,
* return 0 if successful, or < 0 and write an error message to
* *err_msg_out if provided.
*/
int
rend_service_decrypt_intro(
rend_intro_cell_t *intro,
crypto_pk_t *key,
char **err_msg_out)
{
char *err_msg = NULL;
uint8_t key_digest[DIGEST_LEN];
char service_id[REND_SERVICE_ID_LEN_BASE32+1];
ssize_t key_len;
uint8_t buf[RELAY_PAYLOAD_SIZE];
int result, status = -1;
if (!intro || !key) {
if (err_msg_out) {
err_msg =
tor_strdup("rend_service_decrypt_intro() called with bad "
"parameters");
}
status = -2;
goto err;
}
/* Make sure we have ciphertext */
if (!(intro->ciphertext) || intro->ciphertext_len <= 0) {
if (err_msg_out) {
tor_asprintf(&err_msg,
"rend_intro_cell_t was missing ciphertext for "
"INTRODUCE%d cell",
(int)(intro->type));
}
status = -3;
goto err;
}
/* Check that this cell actually matches this service key */
/* first DIGEST_LEN bytes of request is intro or service pk digest */
crypto_pk_get_digest(key, (char *)key_digest);
if (tor_memneq(key_digest, intro->pk, DIGEST_LEN)) {
if (err_msg_out) {
base32_encode(service_id, REND_SERVICE_ID_LEN_BASE32 + 1,
(char*)(intro->pk), REND_SERVICE_ID_LEN);
tor_asprintf(&err_msg,
"got an INTRODUCE%d cell for the wrong service (%s)",
(int)(intro->type),
escaped(service_id));
}
status = -4;
goto err;
}
/* Make sure the encrypted part is long enough to decrypt */
key_len = crypto_pk_keysize(key);
if (intro->ciphertext_len < key_len) {
if (err_msg_out) {
tor_asprintf(&err_msg,
"got an INTRODUCE%d cell with a truncated PK-encrypted "
"part",
(int)(intro->type));
}
status = -5;
goto err;
}
/* Decrypt the encrypted part */
note_crypto_pk_op(REND_SERVER);
result =
crypto_pk_private_hybrid_decrypt(
key, (char *)buf, sizeof(buf),
(const char *)(intro->ciphertext), intro->ciphertext_len,
PK_PKCS1_OAEP_PADDING, 1);
if (result < 0) {
if (err_msg_out) {
tor_asprintf(&err_msg,
"couldn't decrypt INTRODUCE%d cell",
(int)(intro->type));
}
status = -6;
goto err;
}
intro->plaintext_len = result;
intro->plaintext = tor_malloc(intro->plaintext_len);
memcpy(intro->plaintext, buf, intro->plaintext_len);
status = 0;
goto done;
err:
if (err_msg_out && !err_msg) {
tor_asprintf(&err_msg,
"unknown INTRODUCE%d error decrypting encrypted part",
intro ? (int)(intro->type) : -1);
}
done:
if (err_msg_out) *err_msg_out = err_msg;
else tor_free(err_msg);
/* clean up potentially sensitive material */
memwipe(buf, 0, sizeof(buf));
memwipe(key_digest, 0, sizeof(key_digest));
memwipe(service_id, 0, sizeof(service_id));
return status;
}
/** Parse the plaintext of the encrypted part of an INTRODUCE1 or
* INTRODUCE2 cell, return 0 if successful, or < 0 and write an error
* message to *err_msg_out if provided.
*/
int
rend_service_parse_intro_plaintext(
rend_intro_cell_t *intro,
char **err_msg_out)
{
char *err_msg = NULL;
ssize_t ver_specific_len, ver_invariant_len;
uint8_t version;
int status = -1;
if (!intro) {
if (err_msg_out) {
err_msg =
tor_strdup("rend_service_parse_intro_plaintext() called with NULL "
"rend_intro_cell_t");
}
status = -2;
goto err;
}
/* Check that we have plaintext */
if (!(intro->plaintext) || intro->plaintext_len <= 0) {
if (err_msg_out) {
err_msg = tor_strdup("rend_intro_cell_t was missing plaintext");
}
status = -3;
goto err;
}
/* In all formats except v0, the first byte is a version number */
version = intro->plaintext[0];
/* v0 has no version byte (stupid...), so handle it as a fallback */
if (version > 3) version = 0;
/* Copy the version into the parsed cell structure */
intro->version = version;
/* Call the version-specific parser from the table */
ver_specific_len =
intro_version_handlers[version](intro,
intro->plaintext, intro->plaintext_len,
&err_msg);
if (ver_specific_len < 0) {
status = -4;
goto err;
}
/** The rendezvous cookie and Diffie-Hellman stuff are version-invariant
* and at the end of the plaintext of the encrypted part of the cell.
*/
ver_invariant_len = intro->plaintext_len - ver_specific_len;
if (ver_invariant_len < REND_COOKIE_LEN + DH_KEY_LEN) {
tor_asprintf(&err_msg,
"decrypted plaintext of INTRODUCE%d cell was truncated (%ld bytes)",
(int)(intro->type),
(long)(intro->plaintext_len));
status = -5;
goto err;
} else if (ver_invariant_len > REND_COOKIE_LEN + DH_KEY_LEN) {
tor_asprintf(&err_msg,
"decrypted plaintext of INTRODUCE%d cell was too long (%ld bytes)",
(int)(intro->type),
(long)(intro->plaintext_len));
status = -6;
goto err;
} else {
memcpy(intro->rc,
intro->plaintext + ver_specific_len,
REND_COOKIE_LEN);
memcpy(intro->dh,
intro->plaintext + ver_specific_len + REND_COOKIE_LEN,
DH_KEY_LEN);
}
/* Flag it as being fully parsed */
intro->parsed = 1;
status = 0;
goto done;
err:
if (err_msg_out && !err_msg) {
tor_asprintf(&err_msg,
"unknown INTRODUCE%d error parsing encrypted part",
intro ? (int)(intro->type) : -1);
}
done:
if (err_msg_out) *err_msg_out = err_msg;
else tor_free(err_msg);
return status;
}
/** Do validity checks on a parsed intro cell after decryption; some of
* these are not done in rend_service_parse_intro_plaintext() itself because
* they depend on a lot of other state and would make it hard to unit test.
* Returns >= 0 if successful or < 0 if the intro cell is invalid, and
* optionally writes out an error message for logging. If an err_msg
* pointer is provided, it is the caller's responsibility to free any
* provided message.
*/
int
rend_service_validate_intro_late(const rend_intro_cell_t *intro,
char **err_msg_out)
{
int status = 0;
if (!intro) {
if (err_msg_out)
*err_msg_out =
tor_strdup("NULL intro cell passed to "
"rend_service_validate_intro_late()");
status = -1;
goto err;
}
if (intro->version == 3 && intro->parsed) {
if (!(intro->u.v3.auth_type == REND_NO_AUTH ||
intro->u.v3.auth_type == REND_BASIC_AUTH ||
intro->u.v3.auth_type == REND_STEALTH_AUTH)) {
/* This is an informative message, not an error, as in the old code */
if (err_msg_out)
tor_asprintf(err_msg_out,
"unknown authorization type %d",
intro->u.v3.auth_type);
}
}
err:
return status;
}
/** Called when we fail building a rendezvous circuit at some point other
* than the last hop: launches a new circuit to the same rendezvous point.
*/
void
rend_service_relaunch_rendezvous(origin_circuit_t *oldcirc)
{
origin_circuit_t *newcirc;
cpath_build_state_t *newstate, *oldstate;
tor_assert(oldcirc->base_.purpose == CIRCUIT_PURPOSE_S_CONNECT_REND);
/* Don't relaunch the same rend circ twice. */
if (oldcirc->hs_service_side_rend_circ_has_been_relaunched) {
log_info(LD_REND, "Rendezvous circuit to %s has already been relaunched; "
"not relaunching it again.",
oldcirc->build_state ?
safe_str(extend_info_describe(oldcirc->build_state->chosen_exit))
: "*unknown*");
return;
}
oldcirc->hs_service_side_rend_circ_has_been_relaunched = 1;
if (!oldcirc->build_state ||
oldcirc->build_state->failure_count > MAX_REND_FAILURES ||
oldcirc->build_state->expiry_time < time(NULL)) {
log_info(LD_REND,
"Attempt to build circuit to %s for rendezvous has failed "
"too many times or expired; giving up.",
oldcirc->build_state ?
safe_str(extend_info_describe(oldcirc->build_state->chosen_exit))
: "*unknown*");
return;
}
oldstate = oldcirc->build_state;
tor_assert(oldstate);
if (oldstate->service_pending_final_cpath_ref == NULL) {
log_info(LD_REND,"Skipping relaunch of circ that failed on its first hop. "
"Initiator will retry.");
return;
}
log_info(LD_REND,"Reattempting rendezvous circuit to '%s'",
safe_str(extend_info_describe(oldstate->chosen_exit)));
newcirc = circuit_launch_by_extend_info(CIRCUIT_PURPOSE_S_CONNECT_REND,
oldstate->chosen_exit,
CIRCLAUNCH_NEED_CAPACITY|CIRCLAUNCH_IS_INTERNAL);
if (!newcirc) {
log_warn(LD_REND,"Couldn't relaunch rendezvous circuit to '%s'.",
safe_str(extend_info_describe(oldstate->chosen_exit)));
return;
}
newstate = newcirc->build_state;
tor_assert(newstate);
newstate->failure_count = oldstate->failure_count+1;
newstate->expiry_time = oldstate->expiry_time;
newstate->service_pending_final_cpath_ref =
oldstate->service_pending_final_cpath_ref;
++(newstate->service_pending_final_cpath_ref->refcount);
newcirc->rend_data = rend_data_dup(oldcirc->rend_data);
}
/** Launch a circuit to serve as an introduction point for the service
* <b>service</b> at the introduction point <b>nickname</b>
*/
static int
rend_service_launch_establish_intro(rend_service_t *service,
rend_intro_point_t *intro)
{
origin_circuit_t *launched;
log_info(LD_REND,
"Launching circuit to introduction point %s for service %s",
safe_str_client(extend_info_describe(intro->extend_info)),
service->service_id);
rep_hist_note_used_internal(time(NULL), 1, 0);
++service->n_intro_circuits_launched;
launched = circuit_launch_by_extend_info(CIRCUIT_PURPOSE_S_ESTABLISH_INTRO,
intro->extend_info,
CIRCLAUNCH_NEED_UPTIME|CIRCLAUNCH_IS_INTERNAL);
if (!launched) {
log_info(LD_REND,
"Can't launch circuit to establish introduction at %s.",
safe_str_client(extend_info_describe(intro->extend_info)));
return -1;
}
/* We must have the same exit node even if cannibalized. */
tor_assert(tor_memeq(intro->extend_info->identity_digest,
launched->build_state->chosen_exit->identity_digest,
DIGEST_LEN));
launched->rend_data = rend_data_service_create(service->service_id,
service->pk_digest, NULL,
service->auth_type);
launched->intro_key = crypto_pk_dup_key(intro->intro_key);
if (launched->base_.state == CIRCUIT_STATE_OPEN)
rend_service_intro_has_opened(launched);
return 0;
}
/** Return the number of introduction points that are established for the
* given service. */
static unsigned int
count_established_intro_points(const rend_service_t *service)
{
unsigned int num = 0;
SMARTLIST_FOREACH(service->intro_nodes, rend_intro_point_t *, intro,
num += intro->circuit_established
);
return num;
}
/** Return the number of introduction points that are or are being
* established for the given service. This function iterates over all
* circuit and count those that are linked to the service and are waiting
* for the intro point to respond. */
static unsigned int
count_intro_point_circuits(const rend_service_t *service)
{
unsigned int num_ipos = 0;
SMARTLIST_FOREACH_BEGIN(circuit_get_global_list(), circuit_t *, circ) {
if (!circ->marked_for_close &&
circ->state == CIRCUIT_STATE_OPEN &&
(circ->purpose == CIRCUIT_PURPOSE_S_ESTABLISH_INTRO ||
circ->purpose == CIRCUIT_PURPOSE_S_INTRO)) {
origin_circuit_t *oc = TO_ORIGIN_CIRCUIT(circ);
if (oc->rend_data &&
!rend_cmp_service_ids(service->service_id,
oc->rend_data->onion_address))
num_ipos++;
}
}
SMARTLIST_FOREACH_END(circ);
return num_ipos;
}
/** Called when we're done building a circuit to an introduction point:
* sends a RELAY_ESTABLISH_INTRO cell.
*/
void
rend_service_intro_has_opened(origin_circuit_t *circuit)
{
rend_service_t *service;
size_t len;
int r;
char buf[RELAY_PAYLOAD_SIZE];
char auth[DIGEST_LEN + 9];
char serviceid[REND_SERVICE_ID_LEN_BASE32+1];
int reason = END_CIRC_REASON_TORPROTOCOL;
crypto_pk_t *intro_key;
tor_assert(circuit->base_.purpose == CIRCUIT_PURPOSE_S_ESTABLISH_INTRO);
#ifndef NON_ANONYMOUS_MODE_ENABLED
tor_assert(!(circuit->build_state->onehop_tunnel));
#endif
tor_assert(circuit->cpath);
tor_assert(circuit->rend_data);
base32_encode(serviceid, REND_SERVICE_ID_LEN_BASE32+1,
circuit->rend_data->rend_pk_digest, REND_SERVICE_ID_LEN);
service = rend_service_get_by_pk_digest(
circuit->rend_data->rend_pk_digest);
if (!service) {
log_warn(LD_REND, "Unrecognized service ID %s on introduction circuit %u.",
serviceid, (unsigned)circuit->base_.n_circ_id);
reason = END_CIRC_REASON_NOSUCHSERVICE;
goto err;
}
/* If we already have enough introduction circuits for this service,
* redefine this one as a general circuit or close it, depending.
* Substract the amount of expiring nodes here since the circuits are
* still opened. */
if ((count_intro_point_circuits(service) -
smartlist_len(service->expiring_nodes)) >
service->n_intro_points_wanted) {
const or_options_t *options = get_options();
/* Remove the intro point associated with this circuit, it's being
* repurposed or closed thus cleanup memory. */
rend_intro_point_t *intro = find_intro_point(circuit);
if (intro != NULL) {
smartlist_remove(service->intro_nodes, intro);
rend_intro_point_free(intro);
}
if (options->ExcludeNodes) {
/* XXXX in some future version, we can test whether the transition is
allowed or not given the actual nodes in the circuit. But for now,
this case, we might as well close the thing. */
log_info(LD_CIRC|LD_REND, "We have just finished an introduction "
"circuit, but we already have enough. Closing it.");
reason = END_CIRC_REASON_NONE;
goto err;
} else {
tor_assert(circuit->build_state->is_internal);
log_info(LD_CIRC|LD_REND, "We have just finished an introduction "
"circuit, but we already have enough. Redefining purpose to "
"general; leaving as internal.");
circuit_change_purpose(TO_CIRCUIT(circuit), CIRCUIT_PURPOSE_C_GENERAL);
{
rend_data_t *rend_data = circuit->rend_data;
circuit->rend_data = NULL;
rend_data_free(rend_data);
}
{
crypto_pk_t *intro_key = circuit->intro_key;
circuit->intro_key = NULL;
crypto_pk_free(intro_key);
}
circuit_has_opened(circuit);
goto done;
}
}
log_info(LD_REND,
"Established circuit %u as introduction point for service %s",
(unsigned)circuit->base_.n_circ_id, serviceid);
/* Use the intro key instead of the service key in ESTABLISH_INTRO. */
intro_key = circuit->intro_key;
/* Build the payload for a RELAY_ESTABLISH_INTRO cell. */
r = crypto_pk_asn1_encode(intro_key, buf+2,
RELAY_PAYLOAD_SIZE-2);
if (r < 0) {
log_warn(LD_BUG, "Internal error; failed to establish intro point.");
reason = END_CIRC_REASON_INTERNAL;
goto err;
}
len = r;
set_uint16(buf, htons((uint16_t)len));
len += 2;
memcpy(auth, circuit->cpath->prev->rend_circ_nonce, DIGEST_LEN);
memcpy(auth+DIGEST_LEN, "INTRODUCE", 9);
if (crypto_digest(buf+len, auth, DIGEST_LEN+9))
goto err;
len += 20;
note_crypto_pk_op(REND_SERVER);
r = crypto_pk_private_sign_digest(intro_key, buf+len, sizeof(buf)-len,
buf, len);
if (r<0) {
log_warn(LD_BUG, "Internal error: couldn't sign introduction request.");
reason = END_CIRC_REASON_INTERNAL;
goto err;
}
len += r;
if (relay_send_command_from_edge(0, TO_CIRCUIT(circuit),
RELAY_COMMAND_ESTABLISH_INTRO,
buf, len, circuit->cpath->prev)<0) {
log_info(LD_GENERAL,
"Couldn't send introduction request for service %s on circuit %u",
serviceid, (unsigned)circuit->base_.n_circ_id);
reason = END_CIRC_REASON_INTERNAL;
goto err;
}
/* We've attempted to use this circuit */
pathbias_count_use_attempt(circuit);
goto done;
err:
circuit_mark_for_close(TO_CIRCUIT(circuit), reason);
done:
memwipe(buf, 0, sizeof(buf));
memwipe(auth, 0, sizeof(auth));
memwipe(serviceid, 0, sizeof(serviceid));
return;
}
/** Called when we get an INTRO_ESTABLISHED cell; mark the circuit as a
* live introduction point, and note that the service descriptor is
* now out-of-date. */
int
rend_service_intro_established(origin_circuit_t *circuit,
const uint8_t *request,
size_t request_len)
{
rend_service_t *service;
rend_intro_point_t *intro;
char serviceid[REND_SERVICE_ID_LEN_BASE32+1];
(void) request;
(void) request_len;
if (circuit->base_.purpose != CIRCUIT_PURPOSE_S_ESTABLISH_INTRO) {
log_warn(LD_PROTOCOL,
"received INTRO_ESTABLISHED cell on non-intro circuit.");
goto err;
}
tor_assert(circuit->rend_data);
service = rend_service_get_by_pk_digest(
circuit->rend_data->rend_pk_digest);
if (!service) {
log_warn(LD_REND, "Unknown service on introduction circuit %u.",
(unsigned)circuit->base_.n_circ_id);
goto err;
}
/* We've just successfully established a intro circuit to one of our
* introduction point, account for it. */
intro = find_intro_point(circuit);
if (intro == NULL) {
log_warn(LD_REND,
"Introduction circuit established without a rend_intro_point_t "
"object for service %s on circuit %u",
safe_str_client(serviceid), (unsigned)circuit->base_.n_circ_id);
goto err;
}
intro->circuit_established = 1;
/* We might not have every introduction point ready but at this point we
* know that the descriptor needs to be uploaded. */
service->desc_is_dirty = time(NULL);
circuit_change_purpose(TO_CIRCUIT(circuit), CIRCUIT_PURPOSE_S_INTRO);
base32_encode(serviceid, REND_SERVICE_ID_LEN_BASE32 + 1,
circuit->rend_data->rend_pk_digest, REND_SERVICE_ID_LEN);
log_info(LD_REND,
"Received INTRO_ESTABLISHED cell on circuit %u for service %s",
(unsigned)circuit->base_.n_circ_id, serviceid);
/* Getting a valid INTRODUCE_ESTABLISHED means we've successfully
* used the circ */
pathbias_mark_use_success(circuit);
return 0;
err:
circuit_mark_for_close(TO_CIRCUIT(circuit), END_CIRC_REASON_TORPROTOCOL);
return -1;
}
/** Called once a circuit to a rendezvous point is established: sends a
* RELAY_COMMAND_RENDEZVOUS1 cell.
*/
void
rend_service_rendezvous_has_opened(origin_circuit_t *circuit)
{
rend_service_t *service;
char buf[RELAY_PAYLOAD_SIZE];
crypt_path_t *hop;
char serviceid[REND_SERVICE_ID_LEN_BASE32+1];
char hexcookie[9];
int reason;
tor_assert(circuit->base_.purpose == CIRCUIT_PURPOSE_S_CONNECT_REND);
tor_assert(circuit->cpath);
tor_assert(circuit->build_state);
#ifndef NON_ANONYMOUS_MODE_ENABLED
tor_assert(!(circuit->build_state->onehop_tunnel));
#endif
tor_assert(circuit->rend_data);
/* Declare the circuit dirty to avoid reuse, and for path-bias */
if (!circuit->base_.timestamp_dirty)
circuit->base_.timestamp_dirty = time(NULL);
/* This may be redundant */
pathbias_count_use_attempt(circuit);
hop = circuit->build_state->service_pending_final_cpath_ref->cpath;
base16_encode(hexcookie,9,circuit->rend_data->rend_cookie,4);
base32_encode(serviceid, REND_SERVICE_ID_LEN_BASE32+1,
circuit->rend_data->rend_pk_digest, REND_SERVICE_ID_LEN);
log_info(LD_REND,
"Done building circuit %u to rendezvous with "
"cookie %s for service %s",
(unsigned)circuit->base_.n_circ_id, hexcookie, serviceid);
/* Clear the 'in-progress HS circ has timed out' flag for
* consistency with what happens on the client side; this line has
* no effect on Tor's behaviour. */
circuit->hs_circ_has_timed_out = 0;
/* If hop is NULL, another rend circ has already connected to this
* rend point. Close this circ. */
if (hop == NULL) {
log_info(LD_REND, "Another rend circ has already reached this rend point; "
"closing this rend circ.");
reason = END_CIRC_REASON_NONE;
goto err;
}
/* Remove our final cpath element from the reference, so that no
* other circuit will try to use it. Store it in
* pending_final_cpath for now to ensure that it will be freed if
* our rendezvous attempt fails. */
circuit->build_state->pending_final_cpath = hop;
circuit->build_state->service_pending_final_cpath_ref->cpath = NULL;
service = rend_service_get_by_pk_digest(
circuit->rend_data->rend_pk_digest);
if (!service) {
log_warn(LD_GENERAL, "Internal error: unrecognized service ID on "
"rendezvous circuit.");
reason = END_CIRC_REASON_INTERNAL;
goto err;
}
/* All we need to do is send a RELAY_RENDEZVOUS1 cell... */
memcpy(buf, circuit->rend_data->rend_cookie, REND_COOKIE_LEN);
if (crypto_dh_get_public(hop->rend_dh_handshake_state,
buf+REND_COOKIE_LEN, DH_KEY_LEN)<0) {
log_warn(LD_GENERAL,"Couldn't get DH public key.");
reason = END_CIRC_REASON_INTERNAL;
goto err;
}
memcpy(buf+REND_COOKIE_LEN+DH_KEY_LEN, hop->rend_circ_nonce,
DIGEST_LEN);
/* Send the cell */
if (relay_send_command_from_edge(0, TO_CIRCUIT(circuit),
RELAY_COMMAND_RENDEZVOUS1,
buf, REND_COOKIE_LEN+DH_KEY_LEN+DIGEST_LEN,
circuit->cpath->prev)<0) {
log_warn(LD_GENERAL, "Couldn't send RENDEZVOUS1 cell.");
reason = END_CIRC_REASON_INTERNAL;
goto err;
}
crypto_dh_free(hop->rend_dh_handshake_state);
hop->rend_dh_handshake_state = NULL;
/* Append the cpath entry. */
hop->state = CPATH_STATE_OPEN;
/* set the windows to default. these are the windows
* that bob thinks alice has.
*/
hop->package_window = circuit_initial_package_window();
hop->deliver_window = CIRCWINDOW_START;
onion_append_to_cpath(&circuit->cpath, hop);
circuit->build_state->pending_final_cpath = NULL; /* prevent double-free */
/* Change the circuit purpose. */
circuit_change_purpose(TO_CIRCUIT(circuit), CIRCUIT_PURPOSE_S_REND_JOINED);
goto done;
err:
circuit_mark_for_close(TO_CIRCUIT(circuit), reason);
done:
memwipe(buf, 0, sizeof(buf));
memwipe(serviceid, 0, sizeof(serviceid));
memwipe(hexcookie, 0, sizeof(hexcookie));
return;
}
/*
* Manage introduction points
*/
/** Return the (possibly non-open) introduction circuit ending at
* <b>intro</b> for the service whose public key is <b>pk_digest</b>.
* (<b>desc_version</b> is ignored). Return NULL if no such service is
* found.
*/
static origin_circuit_t *
find_intro_circuit(rend_intro_point_t *intro, const char *pk_digest)
{
origin_circuit_t *circ = NULL;
tor_assert(intro);
while ((circ = circuit_get_next_by_pk_and_purpose(circ,pk_digest,
CIRCUIT_PURPOSE_S_INTRO))) {
if (tor_memeq(circ->build_state->chosen_exit->identity_digest,
intro->extend_info->identity_digest, DIGEST_LEN) &&
circ->rend_data) {
return circ;
}
}
circ = NULL;
while ((circ = circuit_get_next_by_pk_and_purpose(circ,pk_digest,
CIRCUIT_PURPOSE_S_ESTABLISH_INTRO))) {
if (tor_memeq(circ->build_state->chosen_exit->identity_digest,
intro->extend_info->identity_digest, DIGEST_LEN) &&
circ->rend_data) {
return circ;
}
}
return NULL;
}
/** Return the corresponding introdution point using the circuit <b>circ</b>
* found in the <b>service</b>. NULL is returned if not found. */
static rend_intro_point_t *
find_expiring_intro_point(rend_service_t *service, origin_circuit_t *circ)
{
tor_assert(service);
tor_assert(TO_CIRCUIT(circ)->purpose == CIRCUIT_PURPOSE_S_ESTABLISH_INTRO ||
TO_CIRCUIT(circ)->purpose == CIRCUIT_PURPOSE_S_INTRO);
SMARTLIST_FOREACH(service->intro_nodes, rend_intro_point_t *, intro_point,
if (crypto_pk_eq_keys(intro_point->intro_key, circ->intro_key)) {
return intro_point;
});
return NULL;
}
/** Return a pointer to the rend_intro_point_t corresponding to the
* service-side introduction circuit <b>circ</b>. */
static rend_intro_point_t *
find_intro_point(origin_circuit_t *circ)
{
const char *serviceid;
rend_service_t *service = NULL;
tor_assert(TO_CIRCUIT(circ)->purpose == CIRCUIT_PURPOSE_S_ESTABLISH_INTRO ||
TO_CIRCUIT(circ)->purpose == CIRCUIT_PURPOSE_S_INTRO);
tor_assert(circ->rend_data);
serviceid = circ->rend_data->onion_address;
SMARTLIST_FOREACH(rend_service_list, rend_service_t *, s,
if (tor_memeq(s->service_id, serviceid, REND_SERVICE_ID_LEN_BASE32)) {
service = s;
break;
});
if (service == NULL) return NULL;
SMARTLIST_FOREACH(service->intro_nodes, rend_intro_point_t *, intro_point,
if (crypto_pk_eq_keys(intro_point->intro_key, circ->intro_key)) {
return intro_point;
});
return NULL;
}
/** Upload the rend_encoded_v2_service_descriptor_t's in <b>descs</b>
* associated with the rend_service_descriptor_t <b>renddesc</b> to
* the responsible hidden service directories OR the hidden service
* directories specified by <b>hs_dirs</b>; <b>service_id</b> and
* <b>seconds_valid</b> are only passed for logging purposes.
*/
void
directory_post_to_hs_dir(rend_service_descriptor_t *renddesc,
smartlist_t *descs, smartlist_t *hs_dirs,
const char *service_id, int seconds_valid)
{
int i, j, failed_upload = 0;
smartlist_t *responsible_dirs = smartlist_new();
smartlist_t *successful_uploads = smartlist_new();
routerstatus_t *hs_dir;
for (i = 0; i < smartlist_len(descs); i++) {
rend_encoded_v2_service_descriptor_t *desc = smartlist_get(descs, i);
/** If any HSDirs are specified, they should be used instead of
* the responsible directories */
if (hs_dirs && smartlist_len(hs_dirs) > 0) {
smartlist_add_all(responsible_dirs, hs_dirs);
} else {
/* Determine responsible dirs. */
if (hid_serv_get_responsible_directories(responsible_dirs,
desc->desc_id) < 0) {
log_warn(LD_REND, "Could not determine the responsible hidden service "
"directories to post descriptors to.");
control_event_hs_descriptor_upload(service_id,
"UNKNOWN",
"UNKNOWN");
goto done;
}
}
for (j = 0; j < smartlist_len(responsible_dirs); j++) {
char desc_id_base32[REND_DESC_ID_V2_LEN_BASE32 + 1];
char *hs_dir_ip;
const node_t *node;
rend_data_t *rend_data;
hs_dir = smartlist_get(responsible_dirs, j);
if (smartlist_contains_digest(renddesc->successful_uploads,
hs_dir->identity_digest))
/* Don't upload descriptor if we succeeded in doing so last time. */
continue;
node = node_get_by_id(hs_dir->identity_digest);
if (!node || !node_has_descriptor(node)) {
log_info(LD_REND, "Not launching upload for for v2 descriptor to "
"hidden service directory %s; we don't have its "
"router descriptor. Queuing for later upload.",
safe_str_client(routerstatus_describe(hs_dir)));
failed_upload = -1;
continue;
}
/* Send publish request. */
/* We need the service ID to identify which service did the upload
* request. Lookup is made in rend_service_desc_has_uploaded(). */
rend_data = rend_data_client_create(service_id, desc->desc_id, NULL,
REND_NO_AUTH);
directory_initiate_command_routerstatus_rend(hs_dir,
DIR_PURPOSE_UPLOAD_RENDDESC_V2,
ROUTER_PURPOSE_GENERAL,
DIRIND_ANONYMOUS, NULL,
desc->desc_str,
strlen(desc->desc_str),
0, rend_data);
rend_data_free(rend_data);
base32_encode(desc_id_base32, sizeof(desc_id_base32),
desc->desc_id, DIGEST_LEN);
hs_dir_ip = tor_dup_ip(hs_dir->addr);
log_info(LD_REND, "Launching upload for v2 descriptor for "
"service '%s' with descriptor ID '%s' with validity "
"of %d seconds to hidden service directory '%s' on "
"%s:%d.",
safe_str_client(service_id),
safe_str_client(desc_id_base32),
seconds_valid,
hs_dir->nickname,
hs_dir_ip,
hs_dir->or_port);
control_event_hs_descriptor_upload(service_id,
hs_dir->identity_digest,
desc_id_base32);
tor_free(hs_dir_ip);
/* Remember successful upload to this router for next time. */
if (!smartlist_contains_digest(successful_uploads,
hs_dir->identity_digest))
smartlist_add(successful_uploads, hs_dir->identity_digest);
}
smartlist_clear(responsible_dirs);
}
if (!failed_upload) {
if (renddesc->successful_uploads) {
SMARTLIST_FOREACH(renddesc->successful_uploads, char *, c, tor_free(c););
smartlist_free(renddesc->successful_uploads);
renddesc->successful_uploads = NULL;
}
renddesc->all_uploads_performed = 1;
} else {
/* Remember which routers worked this time, so that we don't upload the
* descriptor to them again. */
if (!renddesc->successful_uploads)
renddesc->successful_uploads = smartlist_new();
SMARTLIST_FOREACH(successful_uploads, const char *, c, {
if (!smartlist_contains_digest(renddesc->successful_uploads, c)) {
char *hsdir_id = tor_memdup(c, DIGEST_LEN);
smartlist_add(renddesc->successful_uploads, hsdir_id);
}
});
}
done:
smartlist_free(responsible_dirs);
smartlist_free(successful_uploads);
}
/** Encode and sign an up-to-date service descriptor for <b>service</b>,
* and upload it/them to the responsible hidden service directories.
*/
static void
upload_service_descriptor(rend_service_t *service)
{
time_t now = time(NULL);
int rendpostperiod;
char serviceid[REND_SERVICE_ID_LEN_BASE32+1];
int uploaded = 0;
rendpostperiod = get_options()->RendPostPeriod;
/* Upload descriptor? */
if (get_options()->PublishHidServDescriptors) {
networkstatus_t *c = networkstatus_get_latest_consensus();
if (c && smartlist_len(c->routerstatus_list) > 0) {
int seconds_valid, i, j, num_descs;
smartlist_t *descs = smartlist_new();
smartlist_t *client_cookies = smartlist_new();
/* Either upload a single descriptor (including replicas) or one
* descriptor for each authorized client in case of authorization
* type 'stealth'. */
num_descs = service->auth_type == REND_STEALTH_AUTH ?
smartlist_len(service->clients) : 1;
for (j = 0; j < num_descs; j++) {
crypto_pk_t *client_key = NULL;
rend_authorized_client_t *client = NULL;
smartlist_clear(client_cookies);
switch (service->auth_type) {
case REND_NO_AUTH:
/* Do nothing here. */
break;
case REND_BASIC_AUTH:
SMARTLIST_FOREACH(service->clients, rend_authorized_client_t *,
cl, smartlist_add(client_cookies, cl->descriptor_cookie));
break;
case REND_STEALTH_AUTH:
client = smartlist_get(service->clients, j);
client_key = client->client_key;
smartlist_add(client_cookies, client->descriptor_cookie);
break;
}
/* Encode the current descriptor. */
seconds_valid = rend_encode_v2_descriptors(descs, service->desc,
now, 0,
service->auth_type,
client_key,
client_cookies);
if (seconds_valid < 0) {
log_warn(LD_BUG, "Internal error: couldn't encode service "
"descriptor; not uploading.");
smartlist_free(descs);
smartlist_free(client_cookies);
return;
}
/* Post the current descriptors to the hidden service directories. */
rend_get_service_id(service->desc->pk, serviceid);
log_info(LD_REND, "Launching upload for hidden service %s",
serviceid);
directory_post_to_hs_dir(service->desc, descs, NULL, serviceid,
seconds_valid);
/* Free memory for descriptors. */
for (i = 0; i < smartlist_len(descs); i++)
rend_encoded_v2_service_descriptor_free(smartlist_get(descs, i));
smartlist_clear(descs);
/* Update next upload time. */
if (seconds_valid - REND_TIME_PERIOD_OVERLAPPING_V2_DESCS
> rendpostperiod)
service->next_upload_time = now + rendpostperiod;
else if (seconds_valid < REND_TIME_PERIOD_OVERLAPPING_V2_DESCS)
service->next_upload_time = now + seconds_valid + 1;
else
service->next_upload_time = now + seconds_valid -
REND_TIME_PERIOD_OVERLAPPING_V2_DESCS + 1;
/* Post also the next descriptors, if necessary. */
if (seconds_valid < REND_TIME_PERIOD_OVERLAPPING_V2_DESCS) {
seconds_valid = rend_encode_v2_descriptors(descs, service->desc,
now, 1,
service->auth_type,
client_key,
client_cookies);
if (seconds_valid < 0) {
log_warn(LD_BUG, "Internal error: couldn't encode service "
"descriptor; not uploading.");
smartlist_free(descs);
smartlist_free(client_cookies);
return;
}
directory_post_to_hs_dir(service->desc, descs, NULL, serviceid,
seconds_valid);
/* Free memory for descriptors. */
for (i = 0; i < smartlist_len(descs); i++)
rend_encoded_v2_service_descriptor_free(smartlist_get(descs, i));
smartlist_clear(descs);
}
}
smartlist_free(descs);
smartlist_free(client_cookies);
uploaded = 1;
log_info(LD_REND, "Successfully uploaded v2 rend descriptors!");
}
}
/* If not uploaded, try again in one minute. */
if (!uploaded)
service->next_upload_time = now + 60;
/* Unmark dirty flag of this service. */
service->desc_is_dirty = 0;
}
/** Return the number of INTRODUCE2 cells this hidden service has received
* from this intro point. */
static int
intro_point_accepted_intro_count(rend_intro_point_t *intro)
{
return intro->accepted_introduce2_count;
}
/** Return non-zero iff <b>intro</b> should 'expire' now (i.e. we
* should stop publishing it in new descriptors and eventually close
* it). */
static int
intro_point_should_expire_now(rend_intro_point_t *intro,
time_t now)
{
tor_assert(intro != NULL);
if (intro->time_published == -1) {
/* Don't expire an intro point if we haven't even published it yet. */
return 0;
}
if (intro_point_accepted_intro_count(intro) >=
intro->max_introductions) {
/* This intro point has been used too many times. Expire it now. */
return 1;
}
if (intro->time_to_expire == -1) {
/* This intro point has been published, but we haven't picked an
* expiration time for it. Pick one now. */
int intro_point_lifetime_seconds =
crypto_rand_int_range(INTRO_POINT_LIFETIME_MIN_SECONDS,
INTRO_POINT_LIFETIME_MAX_SECONDS);
/* Start the expiration timer now, rather than when the intro
* point was first published. There shouldn't be much of a time
* difference. */
intro->time_to_expire = now + intro_point_lifetime_seconds;
return 0;
}
/* This intro point has a time to expire set already. Use it. */
return (now >= intro->time_to_expire);
}
/** Iterate over intro points in the given service and remove the invalid
* ones. For an intro point object to be considered invalid, the circuit
* _and_ node need to have disappeared.
*
* If the intro point should expire, it's placed into the expiring_nodes
* list of the service and removed from the active intro nodes list.
*
* If <b>exclude_nodes</b> is not NULL, add the valid nodes to it.
*
* If <b>retry_nodes</b> is not NULL, add the valid node to it if the
* circuit disappeared but the node is still in the consensus. */
static void
remove_invalid_intro_points(rend_service_t *service,
smartlist_t *exclude_nodes,
smartlist_t *retry_nodes, time_t now)
{
tor_assert(service);
SMARTLIST_FOREACH_BEGIN(service->intro_nodes, rend_intro_point_t *,
intro) {
/* Find the introduction point node object. */
const node_t *node =
node_get_by_id(intro->extend_info->identity_digest);
/* Find the intro circuit, this might be NULL. */
origin_circuit_t *intro_circ =
find_intro_circuit(intro, service->pk_digest);
/* Add the valid node to the exclusion list so we don't try to establish
* an introduction point to it again. */
if (node && exclude_nodes) {
smartlist_add(exclude_nodes, (void*) node);
}
/* First, make sure we still have a valid circuit for this intro point.
* If we dont, we'll give up on it and make a new one. */
if (intro_circ == NULL) {
log_info(LD_REND, "Attempting to retry on %s as intro point for %s"
" (circuit disappeared).",
safe_str_client(extend_info_describe(intro->extend_info)),
safe_str_client(service->service_id));
/* We've lost the circuit for this intro point, flag it so it can be
* accounted for when considiring uploading a descriptor. */
intro->circuit_established = 0;
/* Node is gone or we've reached our maximum circuit creationg retry
* count, clean up everything, we'll find a new one. */
if (node == NULL ||
intro->circuit_retries >= MAX_INTRO_POINT_CIRCUIT_RETRIES) {
rend_intro_point_free(intro);
SMARTLIST_DEL_CURRENT(service->intro_nodes, intro);
/* We've just killed the intro point, nothing left to do. */
continue;
}
/* The intro point is still alive so let's try to use it again because
* we have a published descriptor containing it. Keep the intro point
* in the intro_nodes list because it's still valid, we are rebuilding
* a circuit to it. */
if (retry_nodes) {
smartlist_add(retry_nodes, intro);
}
}
/* else, the circuit is valid so in both cases, node being alive or not,
* we leave the circuit and intro point object as is. Closing the
* circuit here would leak new consensus timing and freeing the intro
* point object would make the intro circuit unusable. */
/* Now, check if intro point should expire. If it does, queue it so
* it can be cleaned up once it has been replaced properly. */
if (intro_point_should_expire_now(intro, now)) {
log_info(LD_REND, "Expiring %s as intro point for %s.",
safe_str_client(extend_info_describe(intro->extend_info)),
safe_str_client(service->service_id));
smartlist_add(service->expiring_nodes, intro);
SMARTLIST_DEL_CURRENT(service->intro_nodes, intro);
/* Intro point is expired, we need a new one thus don't consider it
* anymore has a valid established intro point. */
intro->circuit_established = 0;
}
} SMARTLIST_FOREACH_END(intro);
}
/** A new descriptor has been successfully uploaded for the given
* <b>rend_data</b>. Remove and free the expiring nodes from the associated
* service. */
void
rend_service_desc_has_uploaded(const rend_data_t *rend_data)
{
rend_service_t *service;
tor_assert(rend_data);
service = rend_service_get_by_service_id(rend_data->onion_address);
if (service == NULL) {
log_warn(LD_REND, "Service %s not found after descriptor upload",
safe_str_client(rend_data->onion_address));
return;
}
SMARTLIST_FOREACH_BEGIN(service->expiring_nodes, rend_intro_point_t *,
intro) {
origin_circuit_t *intro_circ =
find_intro_circuit(intro, service->pk_digest);
if (intro_circ != NULL) {
circuit_mark_for_close(TO_CIRCUIT(intro_circ),
END_CIRC_REASON_FINISHED);
}
SMARTLIST_DEL_CURRENT(service->expiring_nodes, intro);
rend_intro_point_free(intro);
} SMARTLIST_FOREACH_END(intro);
}
/** For every service, check how many intro points it currently has, and:
* - Invalidate introdution points based on specific criteria, see
* remove_invalid_intro_points comments.
* - Pick new intro points as necessary.
* - Launch circuits to any new intro points.
*
* This is called once a second by the main loop.
*/
void
rend_consider_services_intro_points(void)
{
int i;
time_t now;
const or_options_t *options = get_options();
/* List of nodes we need to _exclude_ when choosing a new node to
* establish an intro point to. */
smartlist_t *exclude_nodes;
/* List of nodes we need to retry to build a circuit on them because the
* node is valid but circuit died. */
smartlist_t *retry_nodes;
if (!have_completed_a_circuit())
return;
exclude_nodes = smartlist_new();
retry_nodes = smartlist_new();
now = time(NULL);
SMARTLIST_FOREACH_BEGIN(rend_service_list, rend_service_t *, service) {
int r;
/* Number of intro points we want to open and add to the intro nodes
* list of the service. */
unsigned int n_intro_points_to_open;
/* Have an unsigned len so we can use it to compare values else gcc is
* not happy with unmatching signed comparaison. */
unsigned int intro_nodes_len;
/* Different service are allowed to have the same introduction point as
* long as they are on different circuit thus why we clear this list. */
smartlist_clear(exclude_nodes);
smartlist_clear(retry_nodes);
/* This retry period is important here so we don't stress circuit
* creation. */
if (now > service->intro_period_started + INTRO_CIRC_RETRY_PERIOD) {
/* One period has elapsed; we can try building circuits again. */
service->intro_period_started = now;
service->n_intro_circuits_launched = 0;
} else if (service->n_intro_circuits_launched >=
MAX_INTRO_CIRCS_PER_PERIOD) {
/* We have failed too many times in this period; wait for the next
* one before we try again. */
continue;
}
/* Cleanup the invalid intro points and save the node objects, if apply,
* in the exclude_nodes and retry_nodes list. */
remove_invalid_intro_points(service, exclude_nodes, retry_nodes, now);
/* Let's try to rebuild circuit on the nodes we want to retry on. */
SMARTLIST_FOREACH_BEGIN(retry_nodes, rend_intro_point_t *, intro) {
r = rend_service_launch_establish_intro(service, intro);
if (r < 0) {
log_warn(LD_REND, "Error launching circuit to node %s for service %s.",
safe_str_client(extend_info_describe(intro->extend_info)),
safe_str_client(service->service_id));
/* Unable to launch a circuit to that intro point, remove it from
* the valid list so we can create a new one. */
smartlist_remove(service->intro_nodes, intro);
rend_intro_point_free(intro);
continue;
}
intro->circuit_retries++;
} SMARTLIST_FOREACH_END(intro);
/* Avoid mismatched signed comparaison below. */
intro_nodes_len = (unsigned int) smartlist_len(service->intro_nodes);
/* Quiescent state, no node expiring and we have more or the amount of
* wanted node for this service. Proceed to the next service. Could be
* more because we launch two preemptive circuits if our intro nodes
* list is empty. */
if (smartlist_len(service->expiring_nodes) == 0 &&
intro_nodes_len >= service->n_intro_points_wanted) {
continue;
}
/* Number of intro points we want to open which is the wanted amount
* minus the current amount of valid nodes. */
n_intro_points_to_open = service->n_intro_points_wanted - intro_nodes_len;
if (intro_nodes_len == 0) {
/* We want to end up with n_intro_points_wanted intro points, but if
* we have no intro points at all (chances are they all cycled or we
* are starting up), we launch NUM_INTRO_POINTS_EXTRA extra circuits
* and use the first n_intro_points_wanted that complete. See proposal
* #155, section 4 for the rationale of this which is purely for
* performance.
*
* The ones after the first n_intro_points_to_open will be converted
* to 'general' internal circuits in rend_service_intro_has_opened(),
* and then we'll drop them from the list of intro points. */
n_intro_points_to_open += NUM_INTRO_POINTS_EXTRA;
}
for (i = 0; i < (int) n_intro_points_to_open; i++) {
const node_t *node;
rend_intro_point_t *intro;
router_crn_flags_t flags = CRN_NEED_UPTIME|CRN_NEED_DESC;
if (get_options()->AllowInvalid_ & ALLOW_INVALID_INTRODUCTION)
flags |= CRN_ALLOW_INVALID;
node = router_choose_random_node(exclude_nodes,
options->ExcludeNodes, flags);
if (!node) {
log_warn(LD_REND,
"We only have %d introduction points established for %s; "
"wanted %u.",
smartlist_len(service->intro_nodes),
safe_str_client(service->service_id),
n_intro_points_to_open);
break;
}
/* Add the choosen node to the exclusion list in order to avoid to
* pick it again in the next iteration. */
smartlist_add(exclude_nodes, (void*)node);
intro = tor_malloc_zero(sizeof(rend_intro_point_t));
intro->extend_info = extend_info_from_node(node, 0);
intro->intro_key = crypto_pk_new();
const int fail = crypto_pk_generate_key(intro->intro_key);
tor_assert(!fail);
intro->time_published = -1;
intro->time_to_expire = -1;
intro->max_introductions =
crypto_rand_int_range(INTRO_POINT_MIN_LIFETIME_INTRODUCTIONS,
INTRO_POINT_MAX_LIFETIME_INTRODUCTIONS);
smartlist_add(service->intro_nodes, intro);
log_info(LD_REND, "Picked router %s as an intro point for %s.",
safe_str_client(node_describe(node)),
safe_str_client(service->service_id));
/* Establish new introduction circuit to our chosen intro point. */
r = rend_service_launch_establish_intro(service, intro);
if (r < 0) {
log_warn(LD_REND, "Error launching circuit to node %s for service %s.",
safe_str_client(extend_info_describe(intro->extend_info)),
safe_str_client(service->service_id));
/* This funcion will be called again by the main loop so this intro
* point without a intro circuit will be retried on or removed after
* a maximum number of attempts. */
}
}
} SMARTLIST_FOREACH_END(service);
smartlist_free(exclude_nodes);
smartlist_free(retry_nodes);
}
#define MIN_REND_INITIAL_POST_DELAY (30)
#define MIN_REND_INITIAL_POST_DELAY_TESTING (5)
/** Regenerate and upload rendezvous service descriptors for all
* services, if necessary. If the descriptor has been dirty enough
* for long enough, definitely upload; else only upload when the
* periodic timeout has expired.
*
* For the first upload, pick a random time between now and two periods
* from now, and pick it independently for each service.
*/
void
rend_consider_services_upload(time_t now)
{
int i;
rend_service_t *service;
int rendpostperiod = get_options()->RendPostPeriod;
int rendinitialpostdelay = (get_options()->TestingTorNetwork ?
MIN_REND_INITIAL_POST_DELAY_TESTING :
MIN_REND_INITIAL_POST_DELAY);
if (!get_options()->PublishHidServDescriptors)
return;
for (i=0; i < smartlist_len(rend_service_list); ++i) {
service = smartlist_get(rend_service_list, i);
if (!service->next_upload_time) { /* never been uploaded yet */
/* The fixed lower bound of rendinitialpostdelay seconds ensures that
* the descriptor is stable before being published. See comment below. */
service->next_upload_time =
now + rendinitialpostdelay + crypto_rand_int(2*rendpostperiod);
}
/* Does every introduction points have been established? */
unsigned int intro_points_ready =
count_established_intro_points(service) >=
service->n_intro_points_wanted;
if (intro_points_ready &&
(service->next_upload_time < now ||
(service->desc_is_dirty &&
service->desc_is_dirty < now-rendinitialpostdelay))) {
/* if it's time, or if the directory servers have a wrong service
* descriptor and ours has been stable for rendinitialpostdelay seconds,
* upload a new one of each format. */
rend_service_update_descriptor(service);
upload_service_descriptor(service);
}
}
}
/** True if the list of available router descriptors might have changed so
* that we should have a look whether we can republish previously failed
* rendezvous service descriptors. */
static int consider_republishing_rend_descriptors = 1;
/** Called when our internal view of the directory has changed, so that we
* might have router descriptors of hidden service directories available that
* we did not have before. */
void
rend_hsdir_routers_changed(void)
{
consider_republishing_rend_descriptors = 1;
}
/** Consider republication of v2 rendezvous service descriptors that failed
* previously, but without regenerating descriptor contents.
*/
void
rend_consider_descriptor_republication(void)
{
int i;
rend_service_t *service;
if (!consider_republishing_rend_descriptors)
return;
consider_republishing_rend_descriptors = 0;
if (!get_options()->PublishHidServDescriptors)
return;
for (i=0; i < smartlist_len(rend_service_list); ++i) {
service = smartlist_get(rend_service_list, i);
if (service->desc && !service->desc->all_uploads_performed) {
/* If we failed in uploading a descriptor last time, try again *without*
* updating the descriptor's contents. */
upload_service_descriptor(service);
}
}
}
/** Log the status of introduction points for all rendezvous services
* at log severity <b>severity</b>.
*/
void
rend_service_dump_stats(int severity)
{
int i,j;
rend_service_t *service;
rend_intro_point_t *intro;
const char *safe_name;
origin_circuit_t *circ;
for (i=0; i < smartlist_len(rend_service_list); ++i) {
service = smartlist_get(rend_service_list, i);
tor_log(severity, LD_GENERAL, "Service configured in \"%s\":",
service->directory);
for (j=0; j < smartlist_len(service->intro_nodes); ++j) {
intro = smartlist_get(service->intro_nodes, j);
safe_name = safe_str_client(intro->extend_info->nickname);
circ = find_intro_circuit(intro, service->pk_digest);
if (!circ) {
tor_log(severity, LD_GENERAL, " Intro point %d at %s: no circuit",
j, safe_name);
continue;
}
tor_log(severity, LD_GENERAL, " Intro point %d at %s: circuit is %s",
j, safe_name, circuit_state_to_string(circ->base_.state));
}
}
}
#ifdef HAVE_SYS_UN_H
/** Given <b>ports</b>, a smarlist containing rend_service_port_config_t,
* add the given <b>p</b>, a AF_UNIX port to the list. Return 0 on success
* else return -ENOSYS if AF_UNIX is not supported (see function in the
* #else statement below). */
static int
add_unix_port(smartlist_t *ports, rend_service_port_config_t *p)
{
tor_assert(ports);
tor_assert(p);
tor_assert(p->is_unix_addr);
smartlist_add(ports, p);
return 0;
}
/** Given <b>conn</b> set it to use the given port <b>p</b> values. Return 0
* on success else return -ENOSYS if AF_UNIX is not supported (see function
* in the #else statement below). */
static int
set_unix_port(edge_connection_t *conn, rend_service_port_config_t *p)
{
tor_assert(conn);
tor_assert(p);
tor_assert(p->is_unix_addr);
conn->base_.socket_family = AF_UNIX;
tor_addr_make_unspec(&conn->base_.addr);
conn->base_.port = 1;
conn->base_.address = tor_strdup(p->unix_addr);
return 0;
}
#else /* defined(HAVE_SYS_UN_H) */
static int
set_unix_port(edge_connection_t *conn, rend_service_port_config_t *p)
{
(void) conn;
(void) p;
return -ENOSYS;
}
static int
add_unix_port(smartlist_t *ports, rend_service_port_config_t *p)
{
(void) ports;
(void) p;
return -ENOSYS;
}
#endif /* HAVE_SYS_UN_H */
/** Given <b>conn</b>, a rendezvous exit stream, look up the hidden service for
* 'circ', and look up the port and address based on conn-\>port.
* Assign the actual conn-\>addr and conn-\>port. Return -2 on failure
* for which the circuit should be closed, -1 on other failure,
* or 0 for success.
*/
int
rend_service_set_connection_addr_port(edge_connection_t *conn,
origin_circuit_t *circ)
{
rend_service_t *service;
char serviceid[REND_SERVICE_ID_LEN_BASE32+1];
smartlist_t *matching_ports;
rend_service_port_config_t *chosen_port;
unsigned int warn_once = 0;
tor_assert(circ->base_.purpose == CIRCUIT_PURPOSE_S_REND_JOINED);
tor_assert(circ->rend_data);
log_debug(LD_REND,"beginning to hunt for addr/port");
base32_encode(serviceid, REND_SERVICE_ID_LEN_BASE32+1,
circ->rend_data->rend_pk_digest, REND_SERVICE_ID_LEN);
service = rend_service_get_by_pk_digest(
circ->rend_data->rend_pk_digest);
if (!service) {
log_warn(LD_REND, "Couldn't find any service associated with pk %s on "
"rendezvous circuit %u; closing.",
serviceid, (unsigned)circ->base_.n_circ_id);
return -2;
}
if (service->max_streams_per_circuit > 0) {
/* Enforce the streams-per-circuit limit, and refuse to provide a
* mapping if this circuit will exceed the limit. */
#define MAX_STREAM_WARN_INTERVAL 600
static struct ratelim_t stream_ratelim =
RATELIM_INIT(MAX_STREAM_WARN_INTERVAL);
if (circ->rend_data->nr_streams >= service->max_streams_per_circuit) {
log_fn_ratelim(&stream_ratelim, LOG_WARN, LD_REND,
"Maximum streams per circuit limit reached on rendezvous "
"circuit %u; %s. Circuit has %d out of %d streams.",
(unsigned)circ->base_.n_circ_id,
service->max_streams_close_circuit ?
"closing circuit" :
"ignoring open stream request",
circ->rend_data->nr_streams,
service->max_streams_per_circuit);
return service->max_streams_close_circuit ? -2 : -1;
}
}
matching_ports = smartlist_new();
SMARTLIST_FOREACH(service->ports, rend_service_port_config_t *, p,
{
if (conn->base_.port != p->virtual_port) {
continue;
}
if (!(p->is_unix_addr)) {
smartlist_add(matching_ports, p);
} else {
if (add_unix_port(matching_ports, p)) {
if (!warn_once) {
/* Unix port not supported so warn only once. */
log_warn(LD_REND,
"Saw AF_UNIX virtual port mapping for port %d on service "
"%s, which is unsupported on this platform. Ignoring it.",
conn->base_.port, serviceid);
}
warn_once++;
}
}
});
chosen_port = smartlist_choose(matching_ports);
smartlist_free(matching_ports);
if (chosen_port) {
if (!(chosen_port->is_unix_addr)) {
/* Get a non-AF_UNIX connection ready for connection_exit_connect() */
tor_addr_copy(&conn->base_.addr, &chosen_port->real_addr);
conn->base_.port = chosen_port->real_port;
} else {
if (set_unix_port(conn, chosen_port)) {
/* Simply impossible to end up here else we were able to add a Unix
* port without AF_UNIX support... ? */
tor_assert(0);
}
}
return 0;
}
log_info(LD_REND,
"No virtual port mapping exists for port %d on service %s",
conn->base_.port, serviceid);
if (service->allow_unknown_ports)
return -1;
else
return -2;
}
|
/* eslint-disable react/prop-types */
/* eslint-disable no-unused-vars */
/* eslint-disable prefer-const */
/* eslint-disable react/prefer-stateless-function */
import React from 'react';
import {
FaTwitter, FaFacebookF, FaPinterestP, FaAngleLeft, FaAngleRight,
} from 'react-icons/fa';
import { v4 as uuidv4 } from 'uuid';
import { Link } from 'react-router-dom';
import '../styles/items.css';
class Items extends React.Component {
constructor(props) {
super(props);
this.state = {
data: {},
newData: [],
threeIndex: [],
workingdata: [],
colorArray: ['wheat', 'wheat', ' rgb(59, 59, 92)', 'rgb(23, 23, 119)', 'rgb(59, 59, 119)', 'wheat'],
};
this.handleChangeLeft = this.handleChangeLeft.bind(this);
this.handleChangeRight = this.handleChangeRight.bind(this);
this.backColor = this.backColor.bind(this);
}
componentDidMount() {
const { response } = this.props;
const { threeIndex } = this.state;
let middle = Math.floor(response.length / 2);
const first = middle;
middle += 1;
const last = middle + 1;
threeIndex.push(first);
threeIndex.push(middle);
threeIndex.push(last);
let fill = [];
response.forEach((value, index) => {
threeIndex.forEach(val => {
if (index + 1 === val) {
fill.push(value);
this.setState({
workingdata: fill,
});
}
});
});
this.setState({
data: response,
});
}
handleChangeLeft() {
const { newData, threeIndex } = this.state;
let threeData = newData;
if (threeData.length === 3) {
threeData = [];
}
if (threeIndex[0] >= 2) {
threeIndex[0] -= 1;
threeIndex[1] -= 1;
threeIndex[2] -= 1;
}
const { data } = this.state;
data.forEach((value, index) => {
threeIndex.forEach((val, ind) => {
let { workingdata } = this.state;
if (index === val - 1) {
workingdata[ind] = value;
}
return workingdata;
});
});
this.setState({
threeIndex,
});
}
handleChangeRight() {
const { threeIndex, data, workingdata } = this.state;
const mylength = data.length;
const limit = mylength;
if (threeIndex[0] <= limit - 3) {
threeIndex[0] += 1;
threeIndex[1] += 1;
threeIndex[2] += 1;
}
data.forEach((value, index) => {
threeIndex.forEach((val, ind) => {
if (index + 1 === val) {
workingdata[ind] = value;
}
return workingdata;
});
});
this.setState({
threeIndex,
});
}
backColor(index) {
const { colorArray } = this.state;
return colorArray[index - 1];
}
render() {
const { workingdata, threeIndex } = this.state;
const myitems = Object.keys(workingdata);
const display = workingdata.length !== 0
? (myitems.map((post, i) => (
<div key={uuidv4()} className="utopian-items">
<div className="itemContainer">
<div className="backgroundc" style={{ backgroundColor: `${this.backColor(threeIndex[0] + i)}` }} />
<Link to={`/model/${workingdata[i].id}`}>
<img alt="img" className="itemsImg" src={`https://res.cloudinary.com/dhxgtfnci/image/upload//hospital/tesla${threeIndex[0] + i}.webp`} />
</Link>
</div>
<p className="teslanames">
{workingdata[i].name}
</p>
<div className="dots">............</div>
<p className="itemdescrption">{workingdata[i].description}</p>
<div className="footer">
<FaTwitter className="icons" />
<FaFacebookF className="icons" />
<FaPinterestP className="icons" />
</div>
</div>
))) : (<div>Loading</div>);
return (
<div className="itemsContainer">
<button
type="button"
className="leftbutton"
onClick={this.handleChangeLeft}
>
<FaAngleLeft />
</button>
{display}
<button
type="button"
className="rightbutton"
onClick={this.handleChangeRight}
>
<FaAngleRight />
</button>
</div>
);
}
}
export default Items;
|
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
from PySide2 import QtCore
from GridCal.Engine.Devices.wire import Wire
from GridCal.Engine.Devices.tower import Tower, WireInTower
"""
Equations source:
a) ATP-EMTP theory book
Typical values of earth
10 Ω/m3 - Resistivity of swampy ground
100 Ω/m3 - Resistivity of average damp earth
1000 Ω/m3 - Resistivity of dry earth
"""
class WiresTable(QtCore.QAbstractTableModel):
def __init__(self, parent=None):
QtCore.QAbstractTableModel.__init__(self, parent)
self.header = ['Name', 'R (Ohm/km)', 'GMR (m)']
self.index_prop = {0: 'name', 1: 'r', 2: 'gmr'}
self.converter = {0: str, 1: float, 2: float}
self.editable = [True, True, True]
self.wires = list()
def add(self, wire: Wire):
"""
Add wire
:param wire:
:return:
"""
row = len(self.wires)
self.beginInsertRows(QtCore.QModelIndex(), row, row)
self.wires.append(wire)
self.endInsertRows()
def delete(self, index):
"""
Delete wire
:param index:
:return:
"""
row = len(self.wires)
self.beginRemoveRows(QtCore.QModelIndex(), row - 1, row - 1)
self.wires.pop(index)
self.endRemoveRows()
def is_used(self, name):
"""
checks if the name is used
"""
n = len(self.wires)
for i in range(n-1, -1, -1):
if self.wires[i].name == name:
return True
return False
def flags(self, index):
if self.editable[index.column()]:
return QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
else:
return QtCore.Qt.ItemIsEnabled
def rowCount(self, parent=QtCore.QModelIndex()):
return len(self.wires)
def columnCount(self, parent=QtCore.QModelIndex()):
return len(self.header)
def parent(self, index=None):
return QtCore.QModelIndex()
def data(self, index, role=QtCore.Qt.DisplayRole):
if index.isValid():
if role == QtCore.Qt.DisplayRole:
val = getattr(self.wires[index.row()], self.index_prop[index.column()])
return str(val)
return None
def headerData(self, p_int, orientation, role):
if role == QtCore.Qt.DisplayRole:
if orientation == QtCore.Qt.Horizontal:
return self.header[p_int]
def setData(self, index, value, role=QtCore.Qt.DisplayRole):
"""
Set data by simple editor (whatever text)
:param index:
:param value:
:param role:
"""
if self.editable[index.column()]:
wire = self.wires[index.row()]
attr = self.index_prop[index.column()]
if attr == 'tower_name':
if self.is_used(value):
pass
else:
setattr(wire, attr, self.converter[index.column()](value))
else:
setattr(wire, attr, self.converter[index.column()](value))
return True
class WiresCollection(QtCore.QAbstractTableModel):
def __init__(self, parent=None, wires_in_tower=()):
"""
:param parent:
:param wires_in_tower:
"""
QtCore.QAbstractTableModel.__init__(self, parent)
self.header = ['name', 'X (m)', 'Y (m)', 'phase']
self.index_prop = {0: 'name', 1: 'xpos', 2: 'ypos', 3: 'phase'}
self.converter = {0: str, 1: float, 2: float, 3: float}
self.editable = [False, True, True, True]
self.wires_in_tower = list(wires_in_tower)
def add(self, wire: WireInTower):
"""
Add wire
:param wire:
:return:
"""
row = len(self.wires_in_tower)
self.beginInsertRows(QtCore.QModelIndex(), row, row)
self.wires_in_tower.append(wire)
self.endInsertRows()
def delete(self, index):
"""
Delete wire
:param index: index of the wire
:return:
"""
row = len(self.wires_in_tower)
self.beginRemoveRows(QtCore.QModelIndex(), row - 1, row - 1)
self.wires_in_tower.pop(index)
self.endRemoveRows()
def is_used(self, name):
"""
checks if the name is used
"""
n = len(self.wires_in_tower)
for i in range(n-1, -1, -1):
if self.wires_in_tower[i].name == name:
return True
return False
def flags(self, index):
if self.editable[index.column()]:
return QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
else:
return QtCore.Qt.ItemIsEnabled
def rowCount(self, parent=QtCore.QModelIndex()):
return len(self.wires_in_tower)
def columnCount(self, parent=QtCore.QModelIndex()):
return len(self.header)
def parent(self, index=None):
return QtCore.QModelIndex()
def data(self, index, role=QtCore.Qt.DisplayRole):
if index.isValid():
if role == QtCore.Qt.DisplayRole:
val = getattr(self.wires_in_tower[index.row()], self.index_prop[index.column()])
return str(val)
return None
def headerData(self, p_int, orientation, role):
if role == QtCore.Qt.DisplayRole:
if orientation == QtCore.Qt.Horizontal:
return self.header[p_int]
def setData(self, index, value, role=QtCore.Qt.DisplayRole):
"""
Set data by simple editor (whatever text)
:param index:
:param value:
:param role:
"""
if self.editable[index.column()]:
wire = self.wires_in_tower[index.row()]
attr = self.index_prop[index.column()]
if attr == 'tower_name':
if self.is_used(value):
pass
else:
setattr(wire, attr, self.converter[index.column()](value))
else:
setattr(wire, attr, self.converter[index.column()](value))
return True
class TowerModel(QtCore.QAbstractTableModel):
def __init__(self, parent=None, edit_callback=None, tower: Tower=None):
"""
:param parent:
:param edit_callback:
:param tower:
"""
QtCore.QAbstractTableModel.__init__(self)
if tower is None:
self.tower = Tower()
else:
self.tower = tower
# other properties
self.edit_callback = edit_callback
def __str__(self):
return self.tower.name
def add(self, wire: WireInTower):
"""
Add wire
:param wire:
:return:
"""
row = self.rowCount()
self.beginInsertRows(QtCore.QModelIndex(), row, row)
self.tower.wires_in_tower.append(wire)
self.endInsertRows()
def delete(self, index):
"""
Delete wire
:param index:
:return:
"""
row = self.rowCount()
self.beginRemoveRows(QtCore.QModelIndex(), row - 1, row - 1)
self.tower.wires_in_tower.pop(index)
self.endRemoveRows()
def delete_by_name(self, wire: Wire):
"""
Delete wire by name
:param wire: Wire object
"""
n = len(self.tower.wires_in_tower)
for i in range(n-1, -1, -1):
if self.tower.wires_in_tower[i].name == wire.name:
self.delete(i)
def is_used(self, wire: Wire):
"""
:param wire:
:return:
"""
n = len(self.tower.wires_in_tower)
for i in range(n-1, -1, -1):
if self.tower.wires_in_tower[i].name == wire.name:
return True
def flags(self, index):
"""
:param index:
:return:
"""
if self.tower.editable_wire[index.column()]:
return QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
else:
return QtCore.Qt.ItemIsEnabled
def rowCount(self, parent=None):
"""
:param parent:
:return:
"""
return len(self.tower.wires_in_tower)
def columnCount(self, parent=None):
"""
:param parent:
:return:
"""
return len(self.tower.header)
def data(self, index, role=QtCore.Qt.DisplayRole):
"""
:param index:
:param role:
:return:
"""
if index.isValid():
if role == QtCore.Qt.DisplayRole:
val = getattr(self.tower.wires_in_tower[index.row()], self.tower.index_prop[index.column()])
return str(val)
return None
def headerData(self, p_int, orientation, role):
"""
:param p_int:
:param orientation:
:param role:
:return:
"""
if role == QtCore.Qt.DisplayRole:
if orientation == QtCore.Qt.Horizontal:
return self.tower.header[p_int]
def setData(self, index, value, role=QtCore.Qt.DisplayRole):
"""
Set data by simple editor (whatever text)
:param index:
:param value:
:param role:
"""
if self.tower.editable_wire[index.column()]:
wire = self.tower.wires_in_tower[index.row()]
attr = self.tower.index_prop[index.column()]
try:
val = self.tower.converter[index.column()](value)
except:
val = 0
# correct the phase to the correct range
if attr == 'phase':
if val < 0 or val > 3:
val = 0
setattr(wire, attr, val)
if self.edit_callback is not None:
self.edit_callback()
return True
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Copyright (c) 2018-2021 The EMRALS Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests the includeconf argument
Verify that:
1. adding includeconf to the configuration file causes the includeconf
file to be loaded in the correct order.
2. includeconf cannot be used as a command line argument.
3. includeconf cannot be used recursively (ie includeconf can only
be used from the base config file).
4. multiple includeconf arguments can be specified in the main config
file.
"""
import os
from test_framework.test_framework import EMRALSTestFramework
class IncludeConfTest(EMRALSTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 1
def setup_chain(self):
super().setup_chain()
# Create additional config files
# - tmpdir/node0/relative.conf
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative\n")
# - tmpdir/node0/relative2.conf
with open(os.path.join(self.options.tmpdir, "node0", "relative2.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative2\n")
with open(os.path.join(self.options.tmpdir, "node0", "emrals.conf"), "a", encoding='utf8') as f:
f.write("uacomment=main\nincludeconf=relative.conf\n")
def run_test(self):
self.log.info("-includeconf works from config file. subversion should end with 'main; relative)/'")
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative)/")
self.log.info("-includeconf cannot be used as command-line arg")
self.stop_node(0)
self.nodes[0].assert_start_raises_init_error(extra_args=["-includeconf=relative2.conf"], expected_msg="Error: Error parsing command line arguments: -includeconf cannot be used from commandline; -includeconf=relative2.conf")
self.log.info("-includeconf cannot be used recursively. subversion should end with 'main; relative)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "a", encoding="utf8") as f:
f.write("includeconf=relative2.conf\n")
self.start_node(0)
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative)/")
self.stop_node(0, expected_stderr="warning: -includeconf cannot be used from included files; ignoring -includeconf=relative2.conf")
self.log.info("-includeconf cannot contain invalid arg")
# Commented out as long as we ignore invalid arguments in configuration files
#with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
# f.write("foo=bar\n")
#self.nodes[0].assert_start_raises_init_error(expected_msg="Error: Error reading configuration file: Invalid configuration value foo")
self.log.info("-includeconf cannot be invalid path")
os.remove(os.path.join(self.options.tmpdir, "node0", "relative.conf"))
self.nodes[0].assert_start_raises_init_error(expected_msg="Error: Error reading configuration file: Failed to include configuration file relative.conf")
self.log.info("multiple -includeconf args can be used from the base config file. subversion should end with 'main; relative; relative2)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
# Restore initial file contents
f.write("uacomment=relative\n")
with open(os.path.join(self.options.tmpdir, "node0", "emrals.conf"), "a", encoding='utf8') as f:
f.write("includeconf=relative2.conf\n")
self.start_node(0)
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative; relative2)/")
if __name__ == '__main__':
IncludeConfTest().main()
|
import variable from "./../variables/platform";
export default (variables = variable) => {
const platform = variables.platform;
const cardItemTheme = {
"NativeBase.Left": {
"NativeBase.Body": {
"NativeBase.Text": {
".note": {
color: variables.listNoteColor,
fontWeight: "400",
marginRight: 20,
},
},
flex: 1,
marginLeft: 10,
alignItems: null,
},
"NativeBase.Icon": {
fontSize: variables.iconFontSize,
},
"NativeBase.IconNB": {
fontSize: variables.iconFontSize,
},
"NativeBase.Text": {
marginLeft: 10,
alignSelf: "center",
},
"NativeBase.Button": {
".transparent": {
"NativeBase.Text": {
fontSize: variables.DefaultFontSize - 4,
color: variables.sTabBarActiveTextColor,
},
"NativeBase.Icon": {
fontSize: variables.iconFontSize - 10,
color: variables.sTabBarActiveTextColor,
marginHorizontal: null,
},
"NativeBase.IconNB": {
fontSize: variables.iconFontSize - 10,
color: variables.sTabBarActiveTextColor,
},
paddingVertical: null,
paddingHorizontal: null,
paddingRight: variables.listItemPadding + 5,
},
},
flex: 1,
flexDirection: "row",
alignItems: "center",
},
".content": {
"NativeBase.Text": {
color: platform === "ios" ? "#555" : "#222",
fontSize: variables.DefaultFontSize - 3,
},
},
".cardBody": {
"NativeBase.Text": {
marginTop: 5,
},
},
"NativeBase.Body": {
"NativeBase.Text": {
".note": {
color: variables.listNoteColor,
fontWeight: "200",
marginRight: 20,
},
lineHeight: 20,
fontSize: variables.DefaultFontSize - 2,
},
"NativeBase.Button": {
".transparent": {
"NativeBase.Text": {
fontSize: variables.DefaultFontSize - 4,
color: variables.sTabBarActiveTextColor,
},
"NativeBase.Icon": {
fontSize: variables.iconFontSize - 10,
color: variables.sTabBarActiveTextColor,
marginHorizontal: null,
},
"NativeBase.IconNB": {
fontSize: variables.iconFontSize - 10,
color: variables.sTabBarActiveTextColor,
},
paddingVertical: null,
paddingHorizontal: null,
paddingRight: variables.listItemPadding + 5,
alignSelf: "stretch",
},
},
flex: 1,
alignSelf: "stretch",
alignItems: "flex-start",
},
"NativeBase.Right": {
"NativeBase.Badge": {
alignSelf: null,
},
"NativeBase.Button": {
".transparent": {
"NativeBase.Text": {
fontSize: variables.DefaultFontSize - 4,
color: variables.sTabBarActiveTextColor,
},
"NativeBase.Icon": {
fontSize: variables.iconFontSize - 10,
color: variables.sTabBarActiveTextColor,
marginHorizontal: null,
},
"NativeBase.IconNB": {
fontSize: variables.iconFontSize - 10,
color: variables.sTabBarActiveTextColor,
},
paddingVertical: null,
paddingHorizontal: null,
},
alignSelf: null,
},
"NativeBase.Icon": {
alignSelf: null,
fontSize: variables.iconFontSize - 8,
color: variables.cardBorderColor,
},
"NativeBase.IconNB": {
alignSelf: null,
fontSize: variables.iconFontSize - 8,
color: variables.cardBorderColor,
},
"NativeBase.Text": {
fontSize: variables.DefaultFontSize - 2,
alignSelf: null,
},
"NativeBase.Thumbnail": {
alignSelf: null,
},
"NativeBase.Image": {
alignSelf: null,
},
"NativeBase.Radio": {
alignSelf: null,
},
"NativeBase.Checkbox": {
alignSelf: null,
},
"NativeBase.Switch": {
alignSelf: null,
},
flex: 0.8,
},
".header": {
"NativeBase.Text": {
fontSize: 16,
fontWeight: platform === "ios" ? "500" : undefined,
},
".bordered": {
"NativeBase.Text": {
color: variables.sTabBarActiveTextColor,
fontWeight: platform === "ios" ? "500" : undefined,
},
borderBottomWidth: platform === "ios" ? variables.borderWidth : null,
},
borderBottomWidth: null,
paddingVertical: variables.listItemPadding + 5,
},
".footer": {
"NativeBase.Text": {
fontSize: 16,
fontWeight: platform === "ios" ? "500" : undefined,
},
".bordered": {
"NativeBase.Text": {
color: variables.activeTab,
fontWeight: "500",
},
borderTopWidth: platform === "ios" ? variables.borderWidth : null,
},
borderBottomWidth: null,
},
"NativeBase.Text": {
".note": {
color: variables.listNoteColor,
fontWeight: "200",
},
},
"NativeBase.Icon": {
width: variables.iconFontSize + 5,
fontSize: variables.iconFontSize - 2,
},
"NativeBase.IconNB": {
width: variables.iconFontSize + 5,
fontSize: variables.iconFontSize - 2,
},
".bordered": {
borderBottomWidth: variables.borderWidth,
borderColor: variables.cardBorderColor,
},
padding: variables.listItemPadding + 5,
paddingVertical: variables.listItemPadding,
backgroundColor: variables.cardDefaultBg,
};
return cardItemTheme;
};
|
/* $FreeBSD: src/sys/kern/sysv_sem.c,v 1.69 2004/03/17 09:37:13 cperciva Exp $ */
/*
* Implementation of SVID semaphores
*
* Author: Daniel Boulet
* Copyright (c) 2013 Larisa Grigore <larisagrigore@gmail.com>
*
* This software is provided ``AS IS'' without any warranties of any kind.
*/
#include "namespace.h"
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <err.h>
#include <pthread.h>
#include <string.h>
#include <stdarg.h>
#include <sys/param.h>
#include <sys/queue.h>
#include <sys/mman.h>
#include <sys/sem.h>
#include "un-namespace.h"
#include "sysvipc_lock.h"
#include "sysvipc_ipc.h"
#include "sysvipc_shm.h"
#include "sysvipc_sem.h"
#include "sysvipc_hash.h"
#define SYSV_MUTEX_LOCK(x) if (__isthreaded) _pthread_mutex_lock(x)
#define SYSV_MUTEX_UNLOCK(x) if (__isthreaded) _pthread_mutex_unlock(x)
#define SYSV_MUTEX_DESTROY(x) if (__isthreaded) _pthread_mutex_destroy(x)
extern struct hashtable *shmaddrs;
extern struct hashtable *shmres;
extern pthread_mutex_t lock_resources;
struct sem_undo *undos = NULL;
pthread_mutex_t lock_undo = PTHREAD_MUTEX_INITIALIZER;
static int semundo_clear(int, int);
static int
put_shmdata(int id) {
struct shm_data *data;
int ret = -1;
SYSV_MUTEX_LOCK(&lock_resources);
data = _hash_lookup(shmres, id);
if (!data) {
sysv_print_err("something wrong put_shmdata\n");
goto done; /* It should not reach here. */
}
data->used--;
if (data->used == 0 && data->removed) {
sysv_print("really remove the sem\n");
SYSV_MUTEX_UNLOCK(&lock_resources);
/* OBS: Even if the shmctl fails (the thread doesn't
* have IPC_M permissions), all structures associated
* with it will be removed in the current process.*/
sysvipc_shmdt(data->internal);
semundo_clear(id, -1);
if (data->removed == SEG_ALREADY_REMOVED)
return 1; /* The semaphore was removed
by another process so there is nothing else
we must do. */
/* Else inform the daemon that the segment is removed. */
return (sysvipc_shmctl(id, IPC_RMID, NULL));
}
ret = 0;
done:
SYSV_MUTEX_UNLOCK(&lock_resources);
return (ret);
}
static struct semid_pool*
get_semaptr(int semid, int to_remove, int shm_access) {
struct semid_pool *semaptr;
struct shm_data *shmdata = get_shmdata(semid, to_remove, shm_access);
if (!shmdata) {
/* Error is set in get_shmdata. */
return (NULL);
}
semaptr = (struct semid_pool *)shmdata->internal;
if (!semaptr) {
put_shmdata(semid);
errno = EINVAL;
return (NULL);
}
return (semaptr);
}
static int
sema_exist(int semid, struct semid_pool *semaptr) {
/* Was it removed? */
if (semaptr->gen == -1 ||
semaptr->ds.sem_perm.seq != IPCID_TO_SEQ(semid))
return (0);
return (1);
}
/* This is the function called when a the semaphore
* is descovered as removed. It marks the process
* internal data and munmap the */
static void
mark_for_removal(int shmid) {
sysv_print("Mark that the segment was removed\n");
get_shmdata(shmid, SEG_ALREADY_REMOVED, 0);
/* Setting SEG_ALREADY_REMOVED parameter, when put_shmdata
* is called, the internal resources will be freed.
*/
/* Decrement the "usage" field. */
put_shmdata(shmid);
}
static int
try_rwlock_rdlock(int semid, struct semid_pool *semaptr) {
sysv_print(" before rd lock id = %d %x\n", semid, semaptr);
#ifdef SYSV_RWLOCK
sysv_rwlock_rdlock(&semaptr->rwlock);
sysv_print("rd lock id = %d\n", semid);
#else
sysv_mutex_lock(&semaptr->mutex);
sysv_print("lock id = %d\n", semid);
#endif
if (!sema_exist(semid, semaptr)) {
errno = EINVAL;
sysv_print("error sema %d doesn't exist\n", semid);
#ifdef SYSV_RWLOCK
sysv_rwlock_unlock(&semaptr->rwlock);
#else
sysv_mutex_unlock(&semaptr->mutex);
#endif
/* Internal resources must be freed. */
mark_for_removal(semid);
return (-1);
}
return (0);
}
static int
try_rwlock_wrlock(int semid, struct semid_pool *semaptr) {
#ifdef SYSV_RWLOCK
sysv_print("before wrlock id = %d %x\n", semid, semaptr);
sysv_rwlock_wrlock(&semaptr->rwlock);
#else
sysv_print("before lock id = %d %x\n", semid, semaptr);
sysv_mutex_lock(&semaptr->mutex);
#endif
sysv_print("lock id = %d\n", semid);
if (!sema_exist(semid, semaptr)) {
errno = EINVAL;
sysv_print("error sema %d doesn't exist\n", semid);
#ifdef SYSV_RWLOCK
sysv_rwlock_unlock(&semaptr->rwlock);
#else
sysv_mutex_unlock(&semaptr->mutex);
#endif
/* Internal resources must be freed. */
mark_for_removal(semid);
return (-1);
}
return (0);
}
static int
rwlock_unlock(int semid, struct semid_pool *semaptr) {
sysv_print("unlock id = %d %x\n", semid, semaptr);
if (!sema_exist(semid, semaptr)) {
/* Internal resources must be freed. */
mark_for_removal(semid);
errno = EINVAL;
return (-1);
}
#ifdef SYSV_RWLOCK
sysv_rwlock_unlock(&semaptr->rwlock);
#else
sysv_mutex_unlock(&semaptr->mutex);
#endif
return (0);
}
int
sysvipc_semget(key_t key, int nsems, int semflg) {
int semid;
void *shmaddr;
//int shm_access;
int size = sizeof(struct semid_pool) + nsems * sizeof(struct sem);
//TODO resources limits
sysv_print("handle semget\n");
semid = _shmget(key, size, semflg, SEMGET);
if (semid == -1) {
/* errno already set. */
goto done;
}
/* If the semaphore is in process of being removed there are two cases:
* - the daemon knows that and it will handle this situation.
* - one of the threads from this address space remove it and the daemon
* wasn't announced yet; in this scenario, the semaphore is marked
* using "removed" field of shm_data and future calls will return
* EIDRM error.
*/
#if 0
/* Set access type. */
shm_access = semflg & (IPC_W | IPC_R);
if(set_shmdata_access(semid, shm_access) != 0) {
/* errno already set. */
goto done;
}
#endif
shmaddr = sysvipc_shmat(semid, NULL, 0);
if (!shmaddr) {
semid = -1;
sysvipc_shmctl(semid, IPC_RMID, NULL);
goto done;
}
//TODO more semaphores in a single file
done:
sysv_print("end handle semget %d\n", semid);
return (semid);
}
static int
semundo_clear(int semid, int semnum)
{
struct undo *sunptr;
int i;
sysv_print("semundo clear\n");
SYSV_MUTEX_LOCK(&lock_undo);
if (!undos)
goto done;
sunptr = &undos->un_ent[0];
i = 0;
while (i < undos->un_cnt) {
if (sunptr->un_id == semid) {
if (semnum == -1 || sunptr->un_num == semnum) {
undos->un_cnt--;
if (i < undos->un_cnt) {
undos->un_ent[i] =
undos->un_ent[undos->un_cnt];
continue;
}
}
if (semnum != -1)
break;
}
++i;
++sunptr;
}
//TODO Shrink memory if case; not sure if necessary
done:
SYSV_MUTEX_UNLOCK(&lock_undo);
sysv_print("end semundo clear\n");
return (0);
}
int
sysvipc___semctl(int semid, int semnum , int cmd, union semun *arg)
{
int i, error;
struct semid_pool *semaptr = NULL;
struct sem *semptr = NULL;
struct shmid_ds shmds;
int shm_access = 0;
/*if (!jail_sysvipc_allowed && cred->cr_prison != NULL)
return (ENOSYS);
*/
sysv_print("semctl cmd = %d\n", cmd);
error = 0;
switch (cmd) {
case IPC_SET: /* Originally was IPC_M but this is checked
by daemon. */
case SETVAL:
case SETALL:
shm_access = IPC_W;
break;
case IPC_STAT:
case GETNCNT:
case GETPID:
case GETVAL:
case GETALL:
case GETZCNT:
shm_access = IPC_R;
break;
default:
break;
}
semaptr = get_semaptr(semid, cmd==IPC_RMID, shm_access);
if (!semaptr) {
/* errno already set. */
return (-1);
}
switch (cmd) {
case IPC_RMID:
/* Mark that the segment is removed. This is done in
* get_semaptr call in order to announce other processes.
* It will be actually removed after put_shmdata call and
* not other thread from this address space use shm_data
* structure.
*/
break;
case IPC_SET:
if (!arg->buf) {
error = EFAULT;
break;
}
memset(&shmds, 0, sizeof(shmds)/sizeof(unsigned char));
memcpy(&shmds.shm_perm, &arg->buf->sem_perm,
sizeof(struct ipc_perm));
error = sysvipc_shmctl(semid, cmd, &shmds);
/* OBS: didn't update ctime and mode as in kernel implementation
* it is done. Those fields are already updated for shmid_ds
* struct when calling shmctl
*/
break;
case IPC_STAT:
if (!arg->buf) {
error = EFAULT;
break;
}
error = sysvipc_shmctl(semid, cmd, &shmds);
if (error)
break;
memcpy(&arg->buf->sem_perm, &shmds.shm_perm,
sizeof(struct ipc_perm));
arg->buf->sem_nsems = (shmds.shm_segsz - sizeof(struct semid_pool)) /
sizeof(struct sem);
arg->buf->sem_ctime = shmds.shm_ctime;
/* otime is semaphore specific so read it from
* semaptr
*/
error = try_rwlock_rdlock(semid, semaptr);
if (error)
break;
arg->buf->sem_otime = semaptr->ds.sem_otime;
rwlock_unlock(semid, semaptr);
break;
case GETNCNT:
if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
errno = EINVAL;
break;
}
error = try_rwlock_rdlock(semid, semaptr);
if (error)
break;
error = semaptr->ds.sem_base[semnum].semncnt;
rwlock_unlock(semid, semaptr);
break;
case GETPID:
if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
errno = EINVAL;
break;
}
error = try_rwlock_rdlock(semid, semaptr);
if (error)
break;
error = semaptr->ds.sem_base[semnum].sempid;
rwlock_unlock(semid, semaptr);
break;
case GETVAL:
if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
errno = EINVAL;
break;
}
error = try_rwlock_rdlock(semid, semaptr);
if (error)
break;
error = semaptr->ds.sem_base[semnum].semval;
rwlock_unlock(semid, semaptr);
break;
case GETALL:
if (!arg->array) {
error = EFAULT;
break;
}
error = try_rwlock_rdlock(semid, semaptr);
if (error)
break;
for (i = 0; i < semaptr->ds.sem_nsems; i++) {
arg->array[i] = semaptr->ds.sem_base[i].semval;
}
rwlock_unlock(semid, semaptr);
break;
case GETZCNT:
if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
errno = EINVAL;
break;
}
error = try_rwlock_rdlock(semid, semaptr);
if (error)
break;
error = semaptr->ds.sem_base[semnum].semzcnt;
rwlock_unlock(semid, semaptr);
break;
case SETVAL:
if (semnum < 0 || semnum >= semaptr->ds.sem_nsems) {
errno = EINVAL;
break;
}
error = try_rwlock_wrlock(semid, semaptr);
if (error)
break;
semptr = &semaptr->ds.sem_base[semnum];
semptr->semval = arg->val;
semundo_clear(semid, semnum);
if (semptr->semzcnt || semptr->semncnt)
umtx_wakeup((int *)&semptr->semval, 0);
rwlock_unlock(semid, semaptr);
break;
case SETALL:
if (!arg->array) {
error = EFAULT;
break;
}
error = try_rwlock_wrlock(semid, semaptr);
if (error)
break;
for (i = 0; i < semaptr->ds.sem_nsems; i++) {
semptr = &semaptr->ds.sem_base[i];
semptr->semval = arg->array[i];
if (semptr->semzcnt || semptr->semncnt)
umtx_wakeup((int *)&semptr->semval, 0);
}
semundo_clear(semid, -1);
rwlock_unlock(semid, semaptr);
break;
default:
errno = EINVAL;
break;
}
put_shmdata(semid);
sysv_print("end semctl\n");
return (error);
}
/*
* Adjust a particular entry for a particular proc
*/
static int
semundo_adjust(int semid, int semnum, int adjval)
{
struct undo *sunptr;
int i;
int error = 0;
size_t size;
int undoid;
void *addr;
struct shm_data *data;
sysv_print("semundo adjust\n");
if (!adjval)
goto done;
SYSV_MUTEX_LOCK(&lock_undo);
if (!undos) {
sysv_print("get undo segment\n");
undoid = _shmget(IPC_PRIVATE, PAGE_SIZE, IPC_CREAT | IPC_EXCL | 0600,
UNDOGET);
if (undoid == -1) {
sysv_print_err("no undo segment\n");
return (-1);
}
addr = sysvipc_shmat(undoid, NULL, 0);
if (!addr) {
sysv_print_err("can not map undo segment\n");
sysvipc_shmctl(undoid, IPC_RMID, NULL);
return (-1);
}
undos = (struct sem_undo *)addr;
undos->un_pages = 1;
undos->un_cnt = 0;
}
/*
* Look for the requested entry and adjust it (delete if adjval becomes
* 0).
*/
sunptr = &undos->un_ent[0];
for (i = 0; i < undos->un_cnt; i++, sunptr++) {
if (sunptr->un_id != semid && sunptr->un_num != semnum)
continue;
sunptr->un_adjval += adjval;
if (sunptr->un_adjval == 0) {
undos->un_cnt--;
if (i < undos->un_cnt)
undos->un_ent[i] = undos->un_ent[undos->un_cnt];
}
goto done;
}
/* Didn't find the right entry - create it */
size = sizeof(struct sem_undo) + (undos->un_cnt + 1) *
sizeof(struct sem_undo);
if (size > (unsigned int)(undos->un_pages * PAGE_SIZE)) {
sysv_print("need more undo space\n");
sysvipc_shmdt(undos);
undos->un_pages++;
SYSV_MUTEX_LOCK(&lock_resources);
data = _hash_lookup(shmaddrs, (u_long)undos);
SYSV_MUTEX_UNLOCK(&lock_resources);
/* It is not necessary any lock on "size" because it is used
* only by shmat and shmdt.
* shmat for undoid is called only from this function and it
* is protected by undo_lock.
* shmdt for undoid is not called anywhere because the segment
* is destroyed by the daemon when the client dies.
*/
data->size = undos->un_pages * PAGE_SIZE;
undos = sysvipc_shmat(data->shmid, NULL, 0);
}
sunptr = &undos->un_ent[undos->un_cnt];
undos->un_cnt++;
sunptr->un_adjval = adjval;
sunptr->un_id = semid;
sunptr->un_num = semnum;
//if (suptr->un_cnt == seminfo.semume) TODO move it in daemon
/*} else {
error = EINVAL; //se face prin notificare
}*/
done:
SYSV_MUTEX_UNLOCK(&lock_undo);
sysv_print("semundo adjust end\n");
return (error);
}
int sysvipc_semop (int semid, struct sembuf *sops, unsigned nsops) {
struct semid_pool *semaptr = NULL, *auxsemaptr = NULL;
struct sembuf *sopptr;
struct sem *semptr = NULL;
struct sem *xsemptr = NULL;
int eval = 0;
int i, j;
int do_undos;
int val_to_sleep;
sysv_print("[client %d] call to semop(%d, %u)\n",
getpid(), semid, nsops);
//TODO
/*if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
return (ENOSYS);
*/
semaptr = get_semaptr(semid, 0, IPC_W);
if (!semaptr) {
errno = EINVAL;
return (-1);
}
#ifdef SYSV_SEMS
if (try_rwlock_rdlock(semid, semaptr) == -1) {
#else
if (try_rwlock_wrlock(semid, semaptr) == -1) {
#endif
sysv_print("sema removed\n");
errno = EIDRM;
goto done2;
}
if (nsops > MAX_SOPS) {
sysv_print("too many sops (max=%d, nsops=%u)\n",
getpid(), MAX_SOPS, nsops);
eval = E2BIG;
goto done;
}
/*
* Loop trying to satisfy the vector of requests.
* If we reach a point where we must wait, any requests already
* performed are rolled back and we go to sleep until some other
* process wakes us up. At this point, we start all over again.
*
* This ensures that from the perspective of other tasks, a set
* of requests is atomic (never partially satisfied).
*/
do_undos = 0;
for (;;) {
semptr = NULL;
for (i = 0; i < (int)nsops; i++) {
sopptr = &sops[i];
if (sopptr->sem_num >= semaptr->ds.sem_nsems) {
eval = EFBIG;
goto done;
}
semptr = &semaptr->ds.sem_base[sopptr->sem_num];
#ifdef SYSV_SEMS
sysv_mutex_lock(&semptr->sem_mutex);
#endif
sysv_print("semop: sem[%d]=%d : op=%d, flag=%s\n",
sopptr->sem_num, semptr->semval, sopptr->sem_op,
(sopptr->sem_flg & IPC_NOWAIT) ? "nowait" : "wait");
if (sopptr->sem_op < 0) {
if (semptr->semval + sopptr->sem_op < 0) {
sysv_print("semop: can't do it now\n");
break;
} else {
semptr->semval += sopptr->sem_op;
if (semptr->semval == 0 &&
semptr->semzcnt > 0)
umtx_wakeup((int *)&semptr->semval, 0);
}
if (sopptr->sem_flg & SEM_UNDO)
do_undos = 1;
} else if (sopptr->sem_op == 0) {
if (semptr->semval > 0) {
sysv_print("semop: not zero now\n");
break;
}
} else {
semptr->semval += sopptr->sem_op;
if (sopptr->sem_flg & SEM_UNDO)
do_undos = 1;
if (semptr->semncnt > 0)
umtx_wakeup((int *)&semptr->semval, 0);
}
#ifdef SYSV_SEMS
sysv_mutex_unlock(&semptr->sem_mutex);
#endif
}
/*
* Did we get through the entire vector?
*/
if (i >= (int)nsops)
goto donex;
if (sopptr->sem_op == 0)
semptr->semzcnt++;
else
semptr->semncnt++;
/*
* Get interlock value before rleeasing sem_mutex.
*
* XXX horrible hack until we get a umtx_sleep16() (and a umtx_sleep64())
* system call.
*/
val_to_sleep = *(int *)&semptr->semval;
#ifdef SYSV_SEMS
sysv_mutex_unlock(&semptr->sem_mutex);
#endif
/*
* Rollback the semaphores we had acquired.
*/
sysv_print("semop: rollback 0 through %d\n", i-1);
for (j = 0; j < i; j++) {
xsemptr = &semaptr->ds.sem_base[sops[j].sem_num];
#ifdef SYSV_SEMS
sysv_mutex_lock(&xsemptr->sem_mutex);
#endif
xsemptr->semval -= sops[j].sem_op;
if (xsemptr->semval == 0 && xsemptr->semzcnt > 0)
umtx_wakeup((int *)&xsemptr->semval, 0);
if (xsemptr->semval <= 0 && xsemptr->semncnt > 0)
umtx_wakeup((int *)&xsemptr->semval, 0); //?!
#ifdef SYSV_SEMS
sysv_mutex_unlock(&xsemptr->sem_mutex);
#endif
}
/*
* If the request that we couldn't satisfy has the
* NOWAIT flag set then return with EAGAIN.
*/
if (sopptr->sem_flg & IPC_NOWAIT) {
eval = EAGAIN;
goto done;
}
/*
* Release semaptr->lock while sleeping, allowing other
* semops (like SETVAL, SETALL, etc), which require an
* exclusive lock and might wake us up.
*
* Reload and recheck the validity of semaptr on return.
* Note that semptr itself might have changed too, but
* we've already interlocked for semptr and that is what
* will be woken up if it wakes up the tsleep on a MP
* race.
*
*/
sysv_print("semop: good night!\n");
rwlock_unlock(semid, semaptr);
put_shmdata(semid);
/* We don't sleep more than SYSV_TIMEOUT because we could
* go to sleep after another process calls wakeup and remain
* blocked.
*/
eval = umtx_sleep((int *)&semptr->semval, val_to_sleep, SYSV_TIMEOUT);
/* return code is checked below, after sem[nz]cnt-- */
/*
* Make sure that the semaphore still exists
*/
/* Check if another thread didn't remove the semaphore. */
auxsemaptr = get_semaptr(semid, 0, IPC_W); /* Redundant access check. */
if (!auxsemaptr) {
errno = EIDRM;
return (-1);
}
if (auxsemaptr != semaptr) {
errno = EIDRM;
goto done;
}
/* Check if another process didn't remove the semaphore. */
#ifdef SYSV_SEMS
if (try_rwlock_rdlock(semid, semaptr) == -1) {
#else
if (try_rwlock_wrlock(semid, semaptr) == -1) {
#endif
errno = EIDRM;
goto done;
}
sysv_print("semop: good morning (eval=%d)!\n", eval);
/* The semaphore is still alive. Readjust the count of
* waiting processes.
*/
semptr = &semaptr->ds.sem_base[sopptr->sem_num];
#ifdef SYSV_SEMS
sysv_mutex_lock(&semptr->sem_mutex);
#endif
if (sopptr->sem_op == 0)
semptr->semzcnt--;
else
semptr->semncnt--;
#ifdef SYSV_SEMS
sysv_mutex_unlock(&semptr->sem_mutex);
#endif
/*
* Is it really morning, or was our sleep interrupted?
* (Delayed check of tsleep() return code because we
* need to decrement sem[nz]cnt either way.)
*
* Always retry on EBUSY
*/
if (eval == EAGAIN) {
eval = EINTR;
goto done;
}
sysv_print("semop: good morning!\n");
/* RETRY LOOP */
}
donex:
/*
* Process any SEM_UNDO requests.
*/
if (do_undos) {
for (i = 0; i < (int)nsops; i++) {
/*
* We only need to deal with SEM_UNDO's for non-zero
* op's.
*/
int adjval;
if ((sops[i].sem_flg & SEM_UNDO) == 0)
continue;
adjval = sops[i].sem_op;
if (adjval == 0)
continue;
eval = semundo_adjust(semid, sops[i].sem_num, -adjval);
if (eval == 0)
continue;
/*
* Oh-Oh! We ran out of either sem_undo's or undo's.
* Rollback the adjustments to this point and then
* rollback the semaphore ups and down so we can return
* with an error with all structures restored. We
* rollback the undo's in the exact reverse order that
* we applied them. This guarantees that we won't run
* out of space as we roll things back out.
*/
for (j = i - 1; j >= 0; j--) {
if ((sops[j].sem_flg & SEM_UNDO) == 0)
continue;
adjval = sops[j].sem_op;
if (adjval == 0)
continue;
if (semundo_adjust(semid, sops[j].sem_num,
adjval) != 0)
sysv_print("semop - can't undo undos");
}
for (j = 0; j < (int)nsops; j++) {
xsemptr = &semaptr->ds.sem_base[
sops[j].sem_num];
#ifdef SYSV_SEMS
sysv_mutex_lock(&semptr->sem_mutex);
#endif
xsemptr->semval -= sops[j].sem_op;
if (xsemptr->semval == 0 &&
xsemptr->semzcnt > 0)
umtx_wakeup((int *)&xsemptr->semval, 0);
if (xsemptr->semval <= 0 &&
xsemptr->semncnt > 0)
umtx_wakeup((int *)&xsemptr->semval, 0); //?!
#ifdef SYSV_SEMS
sysv_mutex_unlock(&semptr->sem_mutex);
#endif
}
sysv_print("eval = %d from semundo_adjust\n", eval);
goto done;
}
}
/* Set sempid field for each semaphore. */
for (i = 0; i < (int)nsops; i++) {
sopptr = &sops[i];
semptr = &semaptr->ds.sem_base[sopptr->sem_num];
#ifdef SYSV_SEMS
sysv_mutex_lock(&semptr->sem_mutex);
#endif
semptr->sempid = getpid();
#ifdef SYSV_SEMS
sysv_mutex_unlock(&semptr->sem_mutex);
#endif
}
sysv_print("semop: done\n");
semaptr->ds.sem_otime = time(NULL);
done:
rwlock_unlock(semid, semaptr);
done2:
put_shmdata(semid);
return (eval);
}
|
"""
# -- --------------------------------------------------------------------------------------------------- -- #
# -- project: A SHORT DESCRIPTION OF THE PROJECT -- #
# -- script: data.py : python script for data collection -- #
# -- author: YOUR GITHUB USER NAME -- #
# -- license: GPL-3.0 License -- #
# -- repository: YOUR REPOSITORY URL -- #
# -- --------------------------------------------------------------------------------------------------- -- #
"""
dict_test = {'key_a': 'a', 'key_b': 'b'}
|
"""myfunction.py"""
import requests
def myfunction(numbers):
"""Adds up all ints in numbers and returns the result"""
return sum(numbers)
def awesomefunction():
"""Returns stuff!"""
r = requests.get('http://localhost/', json={})
return r.json()
|
import React from 'react'
import PropTypes from 'prop-types'
/*
* Error Boundary for when components throw errors because of invalid props.
* TODO: Dispatch error message for visual feedback.
*/
class ErrorBoundary extends React.Component {
static propTypes = {
children: PropTypes.any,
}
state = { hasError: false }
static getDerivedStateFromError () {
return { hasError: true }
}
/* We need to continually reset the error state for each prop change on the child */
UNSAFE_componentWillReceiveProps () {
this.setState({ hasError: false })
}
render () {
return this.state.hasError ? null : this.props.children
}
}
export default ErrorBoundary
|
import React, { useRef, useLayoutEffect } from "react";
import anime from "animejs/lib/anime.es.js";
import Card from "./Card";
import Demo1 from "./Demo1";
import Demo2 from "./Demo2";
import Demo3 from "./Demo3";
import Demo4 from "./Demo4";
import Demo5 from "./Demo5";
const Rules = () => {
return (
<div>
<div className="rule-concept">
<p className="rule-text">
The goal of the game is for the fox to collect all the gems without
straying off the path too many times. The two players cooperatively
move the fox back and forth along the path.
</p>
</div>
<div className="rule-concept">
<p className="rule-text">
When the fox lands on a space with a gem it picks up that gem.
</p>
{/* Something about demo1 causes the text to only be allowed a tiny amoutn of space */}
<div className="demo-larger">
<Demo1 />
</div>
</div>
<div className="rule-concept">
<p className="rule-text">
The fox moves in the direction of the player who won the most recent
trick. The distance the fox moves is the total number of paw prints on
both of the cards played in the most recent trick.
</p>
<div className="demo-larger">
<Demo2 />
</div>
</div>
<div className="rule-concept">
<p className="rule-text">
The following player in a trick must play a card matching the suit of
the leading card if they can. Otherwise they may play any card they
like.
</p>
<div className="demo-larger">
<Demo3 />
</div>
</div>
<div className="rule-concept">
<div className="rule-text">
<p>If the fox moves beyond the path:</p>
<ul>
<li>
The end of the path where the fox stepped off will be shortened by
one space
</li>
<li>
Any gems that had been on the removed part of the path will be
shifted to the new end of the path.
</li>
<li>The fox will be moved to the new end of the path.</li>
</ul>
<p>
If the fox strays from the path more than three times the game is
over.
</p>
</div>
<div className="demo-larger">
<Demo4 />
</div>
</div>
<div className="rule-concept">
<div className="rule-text">
<p>How the winner of a trick is determined:</p>
<ul>
<li>
If the two cards played are of the same suit, the card with the
higher number wins.
</li>
<li>
If the two cards are of different suits and one of the played
suits matches that of the decree card, then that card wins.
</li>
<li>
If the two cards are of different suits and neither of the played
suits matches that of the decree card, then the card of the
leading player wins.
</li>
</ul>
</div>
<div className="demo-larger">
<Demo5 />
</div>
</div>
</div>
);
};
export default Rules;
|
import discord
from discord.ext import commands
import sys
class Mod(commands.Cog):
def __init__(self, bot):
self.bot = bot
self._last_member = None
@commands.command()
async def calc(self, ctx, i):
await ctx.message.channel.send(eval(i))
@commands.command()
async def avatar(self, ctx, member: discord.Member=None):
if not member:
member = ctx.message.author
await ctx.message.channel.send(member.avatar_url)
def setup(client):
client.add_cog(Mod(client))
|
/*
MIT License http://www.opensource.org/licenses/mit-license.php
Author Tobias Koppers @sokra
*/
"use strict";
/**
* @template T
*/
class ArrayQueue {
/**
* @param {Iterable<T>=} items The initial elements.
*/
constructor(items) {
/** @private @type {T[]} */
this._list = items ? Array.from(items) : [];
/** @private @type {T[]} */
this._listReversed = [];
}
/**
* Returns the number of elements in this queue.
* @returns {number} The number of elements in this queue.
*/
get length() {
return this._list.length + this._listReversed.length;
}
/**
* Empties the queue.
*/
clear() {
this._list.length = 0;
this._listReversed.length = 0;
}
/**
* Appends the specified element to this queue.
* @param {T} item The element to add.
* @returns {void}
*/
enqueue(item) {
this._list.push(item);
}
/**
* Retrieves and removes the head of this queue.
* @returns {T | undefined} The head of the queue of `undefined` if this queue is empty.
*/
dequeue() {
if (this._listReversed.length === 0) {
if (this._list.length === 0) return undefined;
if (this._list.length === 1) return this._list.pop();
if (this._list.length < 16) return this._list.shift();
const temp = this._listReversed;
this._listReversed = this._list;
this._listReversed.reverse();
this._list = temp;
}
return this._listReversed.pop();
}
/**
* Finds and removes an item
* @param {T} item the item
* @returns {void}
*/
delete(item) {
const i = this._list.indexOf(item);
if (i >= 0) {
this._list.splice(i, 1);
} else {
const i = this._listReversed.indexOf(item);
if (i >= 0) this._listReversed.splice(i, 1);
}
}
[Symbol.iterator]() {
let i = -1;
let reversed = false;
return {
next: () => {
if (!reversed) {
i++;
if (i < this._list.length) {
return {
done: false,
value: this._list[i],
};
}
reversed = true;
i = this._listReversed.length;
}
i--;
if (i < 0) {
return {
done: true,
value: undefined,
};
}
return {
done: false,
value: this._listReversed[i],
};
},
};
}
}
module.exports = ArrayQueue;
|
/*
* This header is generated by classdump-dyld 1.5
* on Tuesday, November 10, 2020 at 10:18:40 PM Mountain Standard Time
* Operating System: Version 14.2 (Build 18K57)
* Image Source: /System/Library/PrivateFrameworks/VideoSubscriberAccountUI.framework/VideoSubscriberAccountUI
* classdump-dyld is licensed under GPLv3, Copyright © 2013-2016 by Elias Limneos. Updated by Kevin Bradley.
*/
#import <UIKitCore/UIViewController.h>
@protocol VSSupportedAppsViewControllerDelegate;
@class VSOptional, VSAuditToken, NSOperationQueue, NSArray;
@interface VSSupportedAppsViewController : UIViewController {
VSOptional* _identityProvider;
VSAuditToken* _auditToken;
id<VSSupportedAppsViewControllerDelegate> _delegate;
NSOperationQueue* _privateQueue;
NSArray* _apps;
}
@property (nonatomic,retain) NSOperationQueue * privateQueue; //@synthesize privateQueue=_privateQueue - In the implementation block
@property (nonatomic,copy) NSArray * apps; //@synthesize apps=_apps - In the implementation block
@property (nonatomic,retain) VSOptional * identityProvider; //@synthesize identityProvider=_identityProvider - In the implementation block
@property (nonatomic,copy) VSAuditToken * auditToken; //@synthesize auditToken=_auditToken - In the implementation block
@property (assign,nonatomic,__weak) id<VSSupportedAppsViewControllerDelegate> delegate; //@synthesize delegate=_delegate - In the implementation block
-(id<VSSupportedAppsViewControllerDelegate>)delegate;
-(void)setDelegate:(id<VSSupportedAppsViewControllerDelegate>)arg1 ;
-(VSAuditToken *)auditToken;
-(NSArray *)apps;
-(void)setApps:(NSArray *)arg1 ;
-(void)setAuditToken:(VSAuditToken *)arg1 ;
-(void)viewDidLoad;
-(id)initWithNibName:(id)arg1 bundle:(id)arg2 ;
-(void)_didFinish;
-(NSOperationQueue *)privateQueue;
-(void)setPrivateQueue:(NSOperationQueue *)arg1 ;
-(VSOptional *)identityProvider;
-(void)setIdentityProvider:(VSOptional *)arg1 ;
-(void)_presentError:(id)arg1 ;
-(void)beginLoadingApps;
-(void)_doneButtonPresed:(id)arg1 ;
-(void)_finishLoadingApps:(id)arg1 ;
@end
|
/*****************************************************************************\
* ANALYSIS PERFORMANCE TOOLS *
* Extrae *
* Instrumentation package for parallel applications *
*****************************************************************************
* ___ This library is free software; you can redistribute it and/or *
* / __ modify it under the terms of the GNU LGPL as published *
* / / _____ by the Free Software Foundation; either version 2.1 *
* / / / \ of the License, or (at your option) any later version. *
* ( ( ( B S C ) *
* \ \ \_____/ This library is distributed in hope that it will be *
* \ \__ useful but WITHOUT ANY WARRANTY; without even the *
* \___ implied warranty of MERCHANTABILITY or FITNESS FOR A *
* PARTICULAR PURPOSE. See the GNU LGPL for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with this library; if not, write to the Free Software Foundation, *
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA *
* The GNU LEsser General Public License is contained in the file COPYING. *
* --------- *
* Barcelona Supercomputing Center - Centro Nacional de Supercomputacion *
\*****************************************************************************/
#include "common.h"
#ifdef HAVE_STDIO_H
# include <stdio.h>
#endif
#ifdef HAVE_STDLIB_H
# include <stdlib.h>
#endif
#ifdef HAVE_STRING_H
# include <string.h>
#endif
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#endif
#ifdef HAVE_FCNTL_H
# include <fcntl.h>
#endif
#ifdef HAVE_SYS_STAT_H
# include <sys/stat.h>
#endif
#include "extrae_user_events.h"
#include "extrae-cmd.h"
#include "extrae-cmd-init.h"
static unsigned _TASKID = 0;
static unsigned _NTHREADS = 1;
static unsigned _NTASKS = 1;
static unsigned CMD_INIT_TASKID (void)
{
return _TASKID;
}
static unsigned CMD_INIT_NUMTASKS (void)
{
return _NTASKS;
}
static unsigned CMD_INIT_NUMTHREADS (void)
{
return _NTHREADS;
}
static void Extrae_CMD_Init_dump_info (void)
{
pid_t p = getpid();
char HOST[1024];
if (0 == gethostname (HOST, sizeof(HOST)))
{
char TMPFILE[2048];
int fd;
sprintf (TMPFILE, EXTRAE_CMD_FILE_PREFIX"%s", HOST);
fd = creat (TMPFILE, S_IRUSR|S_IWUSR);
if (fd >= 0)
{
char buffer[1024];
sprintf (buffer, "%u\n%u\n%u\n", p, _TASKID, _NTHREADS);
if (write (fd, buffer, strlen(buffer)) != (ssize_t) strlen(buffer))
fprintf (stderr, CMD_INIT " Error! Failed to write on temporary file\n");
close (fd);
}
else
fprintf (stderr, CMD_INIT " Error! Failed to create temporary file\n");
}
}
int Extrae_CMD_Init (int i, int argc, char *argv[])
{
int taskid, nthreads;
char *endptr;
if (argc-i < 2)
{
fprintf (stderr, CMD_INIT" command requires 2 parameters TASKID and Number of Threads/Slots\n");
return 0;
}
taskid = strtol (argv[i], &endptr, 10);
if (endptr == &argv[i][strlen(argv[i])])
{
if (taskid < 0)
{
fprintf (stderr, CMD_INIT" command cannot handle negative TASKID\n");
return 0;
}
else
_TASKID = taskid;
}
nthreads = strtol (argv[i+1], &endptr, 10);
if (endptr == &argv[i+1][strlen(argv[i+1])])
{
if (nthreads < 0)
{
fprintf (stderr, CMD_INIT" command cannot handle negative Number of Threads/Slots\n");
return 0;
}
else
_NTHREADS = nthreads;
}
Extrae_set_taskid_function (CMD_INIT_TASKID);
Extrae_set_numthreads_function (CMD_INIT_NUMTHREADS);
_NTASKS = _TASKID+1;
Extrae_set_numtasks_function (CMD_INIT_NUMTASKS);
putenv ("EXTRAE_ON=1");
Extrae_init();
Extrae_CMD_Init_dump_info();
Extrae_fini();
return 2;
}
|
###############################################################################
# Code from
# https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py
# Modified the original code so that it also loads images from the current
# directory as well as the subdirectories
###############################################################################
import torch.utils.data as data
from PIL import Image
import os
import os.path
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def make_dataset(dir):
images = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in os.walk(dir):
for fname in fnames:
# if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
return images
def default_loader(path):
return Image.open(path).convert('RGB')
class ImageFolder(data.Dataset):
def __init__(self, root, transform=None, return_paths=False,
loader=default_loader):
imgs = make_dataset(root)
if len(imgs) == 0:
raise(RuntimeError("Found 0 images in: " + root + "\n"
"Supported image extensions are: " +
",".join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.transform = transform
self.return_paths = return_paths
self.loader = loader
def __getitem__(self, index):
path = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.return_paths:
return img, path
else:
return img
def __len__(self):
return len(self.imgs)
|
"""
Example script for data preparation.
If you use this code, please cite the following paper.
Seungbo Ha and Ilwoo Lyu
SPHARM-Net: Spherical Harmonics-based Convolution for Cortical Parcellation.
IEEE Transactions on Medical Imaging. 2022
Copyright 2022 Ilwoo Lyu
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import os
import argparse
import numpy as np
from joblib import Parallel, delayed
from spharmnet.lib.sphere import TriangleSearch
from spharmnet.lib.io import read_feature, read_mesh, read_annotation
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--sphere",
type=str,
default="./sphere/ico6.vtk",
help="Reference sphere mesh for re-tessellation (vtk or FreeSurfer format)",
)
parser.add_argument("--data-dir", type=str, help="Path to FreeSurfer home (default: $SUBJECTS_DIR)")
parser.add_argument("--out-dir", type=str, default="./dataset", help="Path to re-tessellated data (output)")
parser.add_argument("--native-sphere", type=str, default="sphere", help="Native sphere mesh (sphere, sphere.reg, etc.)")
parser.add_argument("--hemi", type=str, nargs="+", choices=["lh", "rh"], help="Hemisphere for data generation", required=True)
parser.add_argument("--in-ch", type=str, default=["curv", "sulc", "inflated.H"], nargs="+", help="List of geometry")
parser.add_argument("--annot", type=str, default="aparc", help="Manual labels (e.g. aparc for ?h.aparc.annot)")
parser.add_argument("--threads", type=int, default=1, help="# of CPU threads for parallel data generation")
args = parser.parse_args()
return args
def gen_data(data_dir, out_dir, subj_name, hemi, native_sphere, in_ch, ico_v, annot_file):
print("Processing {}...".format(subj_name))
surf_dir = os.path.join(os.path.join(data_dir, subj_name), "surf")
label_dir = os.path.join(os.path.join(data_dir, subj_name), "label")
feat_out_dir = os.path.join(out_dir, "features")
label_out_dir = os.path.join(out_dir, "labels")
csv_out_dir = os.path.join(out_dir, "label_csv")
for this_hemi in hemi:
native_v, native_f = read_mesh(os.path.join(surf_dir, this_hemi + "." + native_sphere))
native_mesh = TriangleSearch(native_v, native_f)
triangle_idx, bary_coeff = native_mesh.query(ico_v)
# Generating features
for feat_name in in_ch:
feat_path = os.path.join(surf_dir, this_hemi + "." + feat_name)
feat, _ = read_feature(feat_path)
feat_remesh = np.multiply(feat[native_f[triangle_idx]], bary_coeff).sum(-1)
with open(
os.path.join(feat_out_dir, "{}.{}.aug0.{}.dat".format(subj_name, this_hemi, feat_name)), "wb"
) as f:
f.write(feat_remesh)
# Generating labels
num_vert = native_v.shape[0]
label_arr = np.zeros(num_vert, dtype=np.int16)
annot = os.path.join(label_dir, this_hemi + "." + annot_file + ".annot")
vertices, label, sturcture_ls, structureID_ls = read_annotation(annot)
# If 'label' contains unidenfied label, map those elements to 0
label = [structureID_ls.index(l) if l in structureID_ls else 0 for l in label]
label_arr[vertices] = label
label_remesh = label_arr[native_f[triangle_idx, np.argmax(bary_coeff, axis=1)]]
with open(os.path.join(label_out_dir, "{}.{}.aug0.label.dat".format(subj_name, this_hemi)), "wb") as f:
f.write(label_remesh)
# write csv file
with open(os.path.join(csv_out_dir, "{}.{}.csv".format(subj_name, this_hemi)), "w") as f:
f.write("label,ID\n")
for id, roi in enumerate(sturcture_ls):
f.write("{},{}\n".format(roi, id))
def main(args):
feat_out_dir = os.path.join(args.out_dir, "features")
label_out_dir = os.path.join(args.out_dir, "labels")
csv_out_dir = os.path.join(args.out_dir, "label_csv")
if not os.path.exists(feat_out_dir):
os.makedirs(feat_out_dir)
if not os.path.exists(label_out_dir):
os.makedirs(label_out_dir)
if not os.path.exists(csv_out_dir):
os.makedirs(csv_out_dir)
data_dir = os.environ.get("SUBJECTS_DIR") if args.data_dir is None else args.data_dir
print("Subject dir: {}".format(data_dir))
subj_name_ls = sorted(next(os.walk(data_dir))[1])
ico_v, _ = read_mesh(args.sphere)
Parallel(n_jobs=args.threads)(
delayed(gen_data)(
data_dir=data_dir,
out_dir=args.out_dir,
subj_name=subj_name,
hemi=args.hemi,
native_sphere=args.native_sphere,
in_ch=args.in_ch,
ico_v=ico_v,
annot_file=args.annot,
)
for subj_name in subj_name_ls
)
if __name__ == "__main__":
args = get_args()
main(args)
|
'use strict';
module.exports = {
port: 443,
db: process.env.MONGOHQ_URL || process.env.MONGOLAB_URI || 'mongodb://localhost/surveyapp14',
assets: {
lib: {
css: [
'public/lib/bootstrap/dist/css/bootstrap.min.css',
'public/lib/bootstrap/dist/css/bootstrap-theme.min.css',
],
js: [
'public/lib/angular/angular.min.js',
'public/lib/angular-resource/angular-resource.min.js',
'public/lib/angular-animate/angular-animate.min.js',
'public/lib/angular-ui-router/release/angular-ui-router.min.js',
'public/lib/angular-ui-utils/ui-utils.min.js',
'public/lib/angular-bootstrap/ui-bootstrap-tpls.min.js'
]
},
css: 'public/dist/application.min.css',
js: 'public/dist/application.min.js'
},
facebook: {
clientID: process.env.FACEBOOK_ID || 'APP_ID',
clientSecret: process.env.FACEBOOK_SECRET || 'APP_SECRET',
callbackURL: 'https://localhost:443/auth/facebook/callback'
},
twitter: {
clientID: process.env.TWITTER_KEY || 'CONSUMER_KEY',
clientSecret: process.env.TWITTER_SECRET || 'CONSUMER_SECRET',
callbackURL: 'https://localhost:443/auth/twitter/callback'
},
google: {
clientID: process.env.GOOGLE_ID || 'APP_ID',
clientSecret: process.env.GOOGLE_SECRET || 'APP_SECRET',
callbackURL: 'https://localhost:443/auth/google/callback'
},
linkedin: {
clientID: process.env.LINKEDIN_ID || 'APP_ID',
clientSecret: process.env.LINKEDIN_SECRET || 'APP_SECRET',
callbackURL: 'https://localhost:443/auth/linkedin/callback'
},
github: {
clientID: process.env.GITHUB_ID || 'APP_ID',
clientSecret: process.env.GITHUB_SECRET || 'APP_SECRET',
callbackURL: 'https://localhost:443/auth/github/callback'
},
mailer: {
from: process.env.MAILER_FROM || 'MAILER_FROM',
options: {
service: process.env.MAILER_SERVICE_PROVIDER || 'MAILER_SERVICE_PROVIDER',
auth: {
user: process.env.MAILER_EMAIL_ID || 'MAILER_EMAIL_ID',
pass: process.env.MAILER_PASSWORD || 'MAILER_PASSWORD'
}
}
}
};
|
#ifndef GOL_RENDERER
#define GOL_RENDERER
#include <SDL.h>
#include <vector>
#include "TextureWrapper.h"
namespace visual{
enum textureID{
ButtonLoad = 0,
ButtonSave = 1,
ButtonPause = 2,
};
class Renderer{
SDL_Renderer* sdlRenderer;
//no time to create texture sheet in gimp
TextureWrapper loadTexture;
TextureWrapper saveTexture;
TextureWrapper pauseTexture;
public:
void startRendering();
void swapBuffers();
void renderRectangle(int x, int y, int w, int h);
void renderTexture(textureID textureID ,int x, int y, int w, int h);
public:
Renderer(SDL_Renderer* _sdlRenderer);
};
}
#endif
|
# Standard library imports
import inspect
import io
import os
import sys
from json import loads
# Bokeh imports
import bokeh.models as models
from bokeh.core.json_encoder import serialize_json
from bokeh.model import Model
dest_dir = sys.argv[1]
classes = [member for name, member in inspect.getmembers(models) if inspect.isclass(member)]
model_class = next(klass for klass in classes if klass.__name__ == 'Model')
widget_class = next(klass for klass in classes if klass.__name__ == 'Widget')
# getclasstree returns a list which contains [ (class, parentClass), [(subClassOfClass, class), ...]]
# where the subclass list is omitted if there are no subclasses.
# If you say unique=True then mixins will be registered as leaves so don't use unique=True,
# and expect to have duplicates in the result of leaves()
all_tree = inspect.getclasstree(classes, unique=False)
def leaves(tree, underneath):
if len(tree) == 0:
return []
elif len(tree) > 1 and isinstance(tree[1], list):
subs = tree[1]
if underneath is None or tree[0][0] != underneath:
return leaves(subs, underneath) + leaves(tree[2:], underneath)
else:
# underneath=None to return all leaves from here out
return leaves(subs, underneath=None)
else:
leaf = tree[0]
tail = tree[1:]
if leaf[0] == underneath:
return [leaf]
elif underneath is not None:
return leaves(tail, underneath)
else:
return [leaf] + leaves(tail, underneath)
all_json = {}
for leaf in leaves(all_tree, model_class):
klass = leaf[0]
vm_name = klass.__view_model__
if vm_name in all_json:
continue
defaults = {}
instance = klass()
props_with_values = instance.query_properties_with_values(lambda prop: prop.readonly or prop.serialized)
for name, default in props_with_values.items():
if isinstance(default, Model):
ref = default.ref
raw_attrs = default._to_json_like(include_defaults=True)
attrs = loads(serialize_json(raw_attrs))
ref['attributes'] = attrs
del ref['id'] # there's no way the ID will match bokehjs
default = ref
elif isinstance(default, float) and default == float('inf'):
default = None
defaults[name] = default
all_json[vm_name] = defaults
widgets_json = {}
for leaf_widget in leaves(all_tree, widget_class):
klass = leaf_widget[0]
vm_name = klass.__view_model__
if vm_name not in widgets_json:
widgets_json[vm_name] = all_json[vm_name]
del all_json[vm_name]
def output_defaults_module(filename, defaults):
dest = os.path.join(dest_dir, ".generated_defaults", filename)
try:
os.makedirs(os.path.dirname(dest))
except OSError:
pass
output = serialize_json(defaults, indent=2)
with io.open(dest, "w", encoding="utf-8") as f:
f.write(output)
print("Wrote %s with %d model classes" % (filename, len(defaults)))
output_defaults_module('models_defaults.json', all_json)
output_defaults_module('widgets_defaults.json', widgets_json)
|
# Copyright (c) 2003-2012 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
from impacket import ImpactPacket, ImpactDecoder, structure
O_ETH = 0
O_IP = 1
O_ARP = 1
O_UDP = 2
O_TCP = 2
O_ICMP = 2
O_UDP_DATA = 3
O_ICMP_DATA = 3
MAGIC = "\xD4\xC3\xB2\xA1"
class PCapFileHeader(structure.Structure):
structure = (
('magic', MAGIC),
('versionMajor', '<H=2'),
('versionMinor', '<H=4'),
('GMT2localCorrection', '<l=0'),
('timeAccuracy', '<L=0'),
('maxLength', '<L=0xffff'),
('linkType', '<L=1'),
('packets','*:=[]'),
)
class PCapFilePacket(structure.Structure):
structure = (
('tsec', '<L=0'),
('tmsec', '<L=0'),
('savedLength', '<L-data'),
('realLength', '<L-data'),
('data',':'),
)
def __init__(self, *args, **kargs):
structure.Structure.__init__(self, *args, **kargs)
self['data'] = ''
class PcapFile:
def __init__(self, fileName=None, mode='rb'):
if not fileName is None:
self.file = open(fileName, mode)
self.hdr = None
self.wroteHeader = False
def reset(self):
self.hdr = None
self.file.seek(0)
def close(self):
self.file.close()
def fileno(self):
return self.file.fileno()
def setFile(self, file):
self.file = file
def setSnapLen(self, snapLen):
self.createHeaderOnce()
self.hdr['maxLength'] = snapLen
def getSnapLen(self):
self.readHeaderOnce()
return self.hdr['maxLength']
def setLinkType(self, linkType):
self.createHeaderOnce()
self.hdr['linkType'] = linkType
def getLinkType(self):
self.readHeaderOnce()
return self.hdr['linkType']
def readHeaderOnce(self):
if self.hdr is None:
self.hdr = PCapFileHeader.fromFile(self.file)
def createHeaderOnce(self):
if self.hdr is None:
self.hdr = PCapFileHeader()
def writeHeaderOnce(self):
if not self.wroteHeader:
self.wroteHeader = True
self.file.seek(0)
self.createHeaderOnce()
self.file.write(str(self.hdr))
def read(self):
self.readHeaderOnce()
try:
pkt = PCapFilePacket.fromFile(self.file)
pkt['data'] = self.file.read(pkt['savedLength'])
return pkt
except:
return None
def write(self, pkt):
self.writeHeaderOnce()
self.file.write(str(pkt))
def packets(self):
self.reset()
while 1:
answer = self.read()
if answer is None: break
yield answer
def process(onion):
# for dhcp we only want UDP packets
if len(onion) <= O_UDP: return
if onion[O_UDP].protocol != ImpactPacket.UDP.protocol:
return
# we only want UDP port 67
if ((onion[O_UDP].get_uh_dport() != 67) and
(onion[O_UDP].get_uh_sport() != 67)): return
# we've got a dhcp packet
def main():
import sys
f_in = open(sys.argv[1],'rb')
try:
f_out = open(sys.argv[2],'wb')
f_out.write(str(PCapFileHeader()))
except:
f_out = None
hdr = PCapFileHeader()
hdr.fromString(f_in.read(len(hdr)))
# hdr.dump()
decoder = ImpactDecoder.EthDecoder()
while 1:
pkt = PCapFilePacket()
try:
pkt.fromString(f_in.read(len(pkt)))
except:
break
pkt['data'] = f_in.read(pkt['savedLength'])
hdr['packets'].append(pkt)
p = pkt['data']
try: in_onion = [decoder.decode(p[1])]
except: in_onion = [decoder.decode(p[0])]
try:
while 1: in_onion.append(in_onion[-1].child())
except:
pass
process(in_onion)
pkt.dump()
# print "%r" % str(pkt)
if f_out:
# print eth
pkt_out = PCapFilePacket()
pkt_out['data'] = str(eth.get_packet())
# pkt_out.dump()
f_out.write(str(pkt_out))
if __name__ == '__main__':
main()
|
import aioftp
from aiohttp import web
import asyncio
from utils import parse_headers
from errors import FtpProxyError, ServerUnreachable, MissingMandatoryQueryParameter
class AioftpError(FtpProxyError):
def __init__(self, ftp_error):
super().__init__('\n'.join([info.strip() for info in ftp_error.info]))
FTP_TIMEOUT = 5
async def ping(request):
"""test FTP connection by sending a minimal LS command
returns "pong" on success
"""
host, port, login, password = parse_headers(request)
try:
async with aioftp.ClientSession(host, port, login, password, socket_timeout=FTP_TIMEOUT, path_timeout=FTP_TIMEOUT) as client:
async for _ in client.list('/'): # noqa
# Iterate once if any result, only list command matters not the results
break
return web.json_response({'success': True})
except (OSError, asyncio.TimeoutError, TimeoutError):
raise ServerUnreachable
except aioftp.errors.StatusCodeError as ftp_error:
raise AioftpError(ftp_error)
async def ls(request):
"""ftp LS command
Optional query params:
path: directory to list (defaults to "/")
recursive: recurse down folders (defaults to "false")
"""
host, port, login, password = parse_headers(request)
root_path = request.query.get('path', '/')
recursive = request.query.get('recursive', 'false') == 'true'
extension = request.query.get('extension')
files = []
try:
async with aioftp.ClientSession(host, port, login, password, socket_timeout=FTP_TIMEOUT, path_timeout=FTP_TIMEOUT) as client:
async for path, info in client.list(root_path, recursive=recursive):
if extension is None or path.suffix == extension:
files.append(str(path))
except (OSError, asyncio.TimeoutError, TimeoutError):
raise ServerUnreachable
except aioftp.errors.StatusCodeError as ftp_error:
raise AioftpError(ftp_error)
return web.json_response(files)
async def download(request):
host, port, login, password = parse_headers(request)
path = request.query.get('path')
if not path:
raise MissingMandatoryQueryParameter('path')
try:
async with aioftp.ClientSession(host, port, login, password, socket_timeout=FTP_TIMEOUT, path_timeout=FTP_TIMEOUT) as client:
ftp_stream = await client.download_stream(path)
response = web.StreamResponse()
response.content_type = 'application/octet-stream'
await response.prepare(request)
async for chunk in ftp_stream.iter_by_block():
await response.write(chunk)
return response
except (OSError, asyncio.TimeoutError, TimeoutError):
raise ServerUnreachable
except aioftp.errors.StatusCodeError as ftp_error:
raise AioftpError(ftp_error)
|
# coding: utf-8
"""
Uptrends API v4
This document describes Uptrends API version 4. This Swagger environment also lets you execute API methods directly. Please note that this is not a sandbox environment: these API methods operate directly on your actual Uptrends account. For more information, please visit https://www.uptrends.com/api. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ScheduledReportFileType(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
allowed enum values
"""
PDF = "PDF"
EXCEL = "Excel"
HTML = "Html"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""ScheduledReportFileType - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ScheduledReportFileType, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ScheduledReportFileType):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
import glob
import sys
import traceback
from os.path import basename, join
import numpy as np
import pandas
from hic import *
from redun import Dir, task
from scipy import stats
from insitro_core.utils.cloud.bucket_utils import *
from insitro_core.utils.storage import *
@task()
def compute_powerlaw_fit_from_hic(
hicdir: str,
output_dir: str,
tmpdir: str,
hic_type: str = "juicebox",
resolution: int = 5000,
minwindow: int = 5000,
maxwindow: int = 1e6,
chromosomes: str = "all",
):
"""
#Params
hicdir:Directory containing observed HiC KR normalized matrices. File naming and structure should be: hicDir/chr*/chr*.KRobserved
hic_type: format of hic files
resolution:For Juicebox: resolution of hic dataset (in bp). For bedpe: distances will be binned to this resolution for powerlaw fit
minwindow:Minimum distance between bins to include in powerlaw fit (bp). Recommended to be at least >= resolution to avoid using the diagonal of the HiC Matrix
maxwindow:Maximum distance between bins to include in powerlaw fit (bp)
chromosomes:Comma delimited list of chromosomes to use for fit. Defualts to chr[1..22],chrX
"""
makedirs(output_dir)
HiC = load_hic_for_powerlaw(
tmpdir, chromosomes, hic_type, hicdir, resolution, minwindow, maxwindow
)
# Run
slope, intercept, hic_mean_var = do_powerlaw_fit(HiC)
# Store outputs in outputdir
res = pandas.DataFrame(
{
"resolution": [resolution],
"maxWindow": [maxwindow],
"minWindow": [minwindow],
"pl_gamma": [slope],
"pl_scale": [intercept],
}
)
res.to_csv(join(tmpdir, "hic.powerlaw.txt"), sep="\t", index=False, header=True)
hic_mean_var.to_csv(
join(tmpdir, "hic.mean_var.txt"), sep="\t", index=True, header=True
)
upload_file(join(tmpdir, "hic.powerlaw.txt"), output_dir)
upload_file(join(tmpdir, "hic.mean_var.txt"), output_dir)
def load_hic_for_powerlaw(
tmpdir, chromosomes, hic_type, hicdir, resolution, minwindow, maxwindow
):
if chromosomes == "all":
chromosomes = ["chr" + str(x) for x in list(range(1, 23))] + ["chrX"]
else:
chromosomes = chromosomes.split(",")
all_data_list = []
for chrom in chromosomes:
try:
if hic_type == "juicebox":
hic_file, hic_norm_file, hic_is_vc = get_hic_file(
chrom, hicdir, allow_vc=False
)
print("Working on {}".format(hic_file))
this_data = load_hic(
tmpdir=tmpdir,
hic_file=hic_file,
hic_norm_file=hic_norm_file,
hic_is_vc=hic_is_vc,
hic_type="juicebox",
hic_resolution=resolution,
tss_hic_contribution=100,
window=maxwindow,
min_window=minwindow,
gamma=np.nan,
interpolate_nan=False,
)
this_data["dist_for_fit"] = (
abs(this_data["bin1"] - this_data["bin2"]) * resolution
)
all_data_list.append(this_data)
elif hic_type == "bedpe":
hic_file = get_hic_file(chrom, hicdir, hic_type="bedpe")
print("Working on {}".format(hic_file))
this_data = load_hic(
hic_file=hic_file,
tmpdir=tmpdir,
hic_type="bedpe",
hic_norm_file=None,
hic_is_vc=None,
hic_resolution=None,
tss_hic_contribution=None,
window=None,
min_window=None,
gamma=None,
)
# Compute distance in bins as with juicebox data.
# This is needed to in order to maintain consistency, but is probably slightly less accurate.
# Binning also reduces noise level.
rawdist = abs(
(this_data["x2"] + this_data["x1"]) / 2
- (this_data["y2"] + this_data["y1"]) / 2
)
this_data["dist_for_fit"] = (rawdist // resolution) * resolution
this_data = this_data.loc[
np.logical_and(
this_data["dist_for_fit"] >= minwindow,
this_data["dist_for_fit"] <= maxwindow,
)
]
all_data_list.append(this_data)
else:
error("invalid --hic_type")
except Exception as e:
print(e)
traceback.print_exc(file=sys.stdout)
all_data = pd.concat(all_data_list)
return all_data
def do_powerlaw_fit(HiC):
print("Running regression")
# TO DO:
# Print out mean/var plot of powerlaw relationship
HiC_summary = HiC.groupby("dist_for_fit").agg({"hic_contact": "sum"})
HiC_summary["hic_contact"] = (
HiC_summary.hic_contact / HiC_summary.hic_contact.sum()
) # technically this normalization should be over the entire genome (not just to maxWindow). Will only affect intercept though..
res = stats.linregress(
np.log(HiC_summary.index), np.log(HiC_summary["hic_contact"])
)
hic_mean_var = HiC.groupby("dist_for_fit").agg({"hic_contact": ["mean", "var"]})
hic_mean_var.columns = ["mean", "var"]
return res.slope, res.intercept, hic_mean_var
|
/* eslint-disable linebreak-style */
const algosdk = require('algosdk')
const { exit } = require('process')
const readline = require('readline')
const PricecasterLib = require('../lib/pricecaster')
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout
})
function ask (questionText) {
return new Promise((resolve) => {
rl.question(questionText, input => resolve(input))
})
}
let globalMnemo = ''
function signCallback (sender, tx) {
const txSigned = tx.signTxn(algosdk.mnemonicToSecretKey(globalMnemo).sk)
return txSigned
}
async function startOp (algodClient, symbol, vaddr, fromAddress) {
const pclib = new PricecasterLib.PricecasterLib(algodClient)
console.log('Creating new app...')
const txId = await pclib.createApp(fromAddress, vaddr, symbol, signCallback)
console.log('txId: ' + txId)
const txResponse = await pclib.waitForTransactionResponse(txId)
const appId = pclib.appIdFromCreateAppResponse(txResponse)
console.log('Deployment App Id: %d', appId)
}
(async () => {
console.log('\nPricekeeper Deployment Tool -- (c)2021-22 Randlabs, Inc.')
console.log('----------------------------------------------------------\n')
if (process.argv.length !== 6) {
console.log('Usage: deploy <symbol> <vaddr> <from> <network>\n')
console.log('where:\n')
console.log('symbol The supported symbol for this priceKeeper (e.g BTC/USD)')
console.log('vaddr The validator address')
console.log('from Deployer account')
console.log('network Testnet, betanet or mainnet')
exit(0)
}
const symbol = process.argv[2]
const vaddr = process.argv[3]
const fromAddress = process.argv[4]
const network = process.argv[5]
const config = { server: '', apiToken: '', port: '' }
if (network === 'betanet') {
config.server = 'https://api.betanet.algoexplorer.io'
} else if (network === 'mainnet') {
config.server = 'https://api.algoexplorer.io'
} else if (network === 'testnet') {
config.server = 'https://api.testnet.algoexplorer.io'
} else {
console.error('Invalid network: ' + network)
exit(1)
}
if (!algosdk.isValidAddress(vaddr)) {
console.error('Invalid validator address: ' + vaddr)
exit(1)
}
if (!algosdk.isValidAddress(fromAddress)) {
console.error('Invalid deployer address: ' + fromAddress)
exit(1)
}
const algodClient = new algosdk.Algodv2(config.apiToken, config.server, config.port)
console.log('Parameters for deployment: ')
console.log('symbol: ' + symbol)
console.log('Validator addr: ' + vaddr)
console.log('From: ' + fromAddress)
console.log('Network: ' + network)
const answer = await ask('\nEnter YES to confirm parameters, anything else to abort. ')
if (answer !== 'YES') {
console.warn('Aborted by user.')
exit(1)
}
globalMnemo = await ask('\nEnter mnemonic for sender account.\nBE SURE TO DO THIS FROM A SECURED SYSTEM\n')
await startOp(algodClient, symbol, vaddr, fromAddress)
console.log('Bye.')
exit(0)
})()
|
# Replace this file with the Solution for Project Euler Problem #058
# Project Euler Problem: #058
# Repository Maintainer: https://www.github.com/theSwapnilSaste
# File Creation Date : 14th March 2020
# Solution Author : **** Insert Your Name/Github Handle Here ***
# Solution added on : **** Insert Date here ****
# Problem Status : Complete/Incomplete/Need Improvement etc.
# Space for Notes
# .
# .
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import CancelPending
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidNonce
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.precise import Precise
class kraken(Exchange):
def describe(self):
return self.deep_extend(super(kraken, self).describe(), {
'id': 'kraken',
'name': 'Kraken',
'countries': ['US'],
'version': '0',
'rateLimit': 3000,
'certified': False,
'pro': True,
'has': {
'cancelAllOrders': True,
'cancelOrder': True,
'CORS': None,
'createDepositAddress': True,
'createOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchPremiumIndexOHLCV': False,
'fetchLedger': True,
'fetchLedgerEntry': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrderTrades': 'emulated',
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': True,
'fetchTradingFees': True,
'fetchWithdrawals': True,
'setMarginMode': False, # Kraken only supports cross margin
'withdraw': True,
},
'marketsByAltname': {},
'timeframes': {
'1m': 1,
'5m': 5,
'15m': 15,
'30m': 30,
'1h': 60,
'4h': 240,
'1d': 1440,
'1w': 10080,
'2w': 21600,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/51840849/76173629-fc67fb00-61b1-11ea-84fe-f2de582f58a3.jpg',
'api': {
'public': 'https://api.kraken.com',
'private': 'https://api.kraken.com',
'zendesk': 'https://kraken.zendesk.com/api/v2/help_center/en-us/articles', # use the public zendesk api to receive article bodies and bypass new anti-spam protections
},
'www': 'https://www.kraken.com',
'doc': 'https://www.kraken.com/features/api',
'fees': 'https://www.kraken.com/en-us/features/fee-schedule',
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': 0.26 / 100,
'maker': 0.16 / 100,
'tiers': {
'taker': [
[0, 0.0026],
[50000, 0.0024],
[100000, 0.0022],
[250000, 0.0020],
[500000, 0.0018],
[1000000, 0.0016],
[2500000, 0.0014],
[5000000, 0.0012],
[10000000, 0.0001],
],
'maker': [
[0, 0.0016],
[50000, 0.0014],
[100000, 0.0012],
[250000, 0.0010],
[500000, 0.0008],
[1000000, 0.0006],
[2500000, 0.0004],
[5000000, 0.0002],
[10000000, 0.0],
],
},
},
# self is a bad way of hardcoding fees that change on daily basis
# hardcoding is now considered obsolete, we will remove all of it eventually
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {
'BTC': 0.001,
'ETH': 0.005,
'XRP': 0.02,
'XLM': 0.00002,
'LTC': 0.02,
'DOGE': 2,
'ZEC': 0.00010,
'ICN': 0.02,
'REP': 0.01,
'ETC': 0.005,
'MLN': 0.003,
'XMR': 0.05,
'DASH': 0.005,
'GNO': 0.01,
'EOS': 0.5,
'BCH': 0.001,
'XTZ': 0.05,
'USD': 5, # if domestic wire
'EUR': 5, # if domestic wire
'CAD': 10, # CAD EFT Withdrawal
'JPY': 300, # if domestic wire
},
'deposit': {
'BTC': 0,
'ETH': 0,
'XRP': 0,
'XLM': 0,
'LTC': 0,
'DOGE': 0,
'ZEC': 0,
'ICN': 0,
'REP': 0,
'ETC': 0,
'MLN': 0,
'XMR': 0,
'DASH': 0,
'GNO': 0,
'EOS': 0,
'BCH': 0,
'XTZ': 0.05,
'USD': 5, # if domestic wire
'EUR': 0, # free deposit if EUR SEPA Deposit
'CAD': 5, # if domestic wire
'JPY': 0, # Domestic Deposit(Free, ¥5,000 deposit minimum)
},
},
},
'handleContentTypeApplicationZip': True,
'api': {
'zendesk': {
'get': [
# we should really refrain from putting fixed fee numbers and stop hardcoding
# we will be using their web APIs to scrape all numbers from these articles
'360000292886', # -What-are-the-deposit-fees-
'201893608', # -What-are-the-withdrawal-fees-
],
},
'public': {
'get': [
'Assets',
'AssetPairs',
'Depth',
'OHLC',
'Spread',
'Ticker',
'Time',
'Trades',
],
},
'private': {
'post': {
'AddOrder': 0,
'AddExport': 1,
'Balance': 1,
'CancelAll': 1,
'CancelOrder': 0,
'ClosedOrders': 2,
'DepositAddresses': 1,
'DepositMethods': 1,
'DepositStatus': 1,
'ExportStatus': 1,
'GetWebSocketsToken': 1,
'Ledgers': 2,
'OpenOrders': 1,
'OpenPositions': 1,
'QueryLedgers': 1,
'QueryOrders': 1,
'QueryTrades': 1,
'RetrieveExport': 1,
'RemoveExport': 1,
'TradeBalance': 1,
'TradesHistory': 2,
'TradeVolume': 1,
'Withdraw': 1,
'WithdrawCancel': 1,
'WithdrawInfo': 1,
'WithdrawStatus': 1,
},
},
},
'commonCurrencies': {
'XBT': 'BTC',
'XBT.M': 'BTC.M', # https://support.kraken.com/hc/en-us/articles/360039879471-What-is-Asset-S-and-Asset-M-
'XDG': 'DOGE',
'REPV2': 'REP',
'REP': 'REPV1',
},
'options': {
'delistedMarketsById': {},
# cannot withdraw/deposit these
'inactiveCurrencies': ['CAD', 'USD', 'JPY', 'GBP'],
'networks': {
'ETH': 'ERC20',
'TRX': 'TRC20',
},
'depositMethods': {
'1INCH': '1inch(1INCH)',
'AAVE': 'Aave',
'ADA': 'ADA',
'ALGO': 'Algorand',
'ANKR': 'ANKR(ANKR)',
'ANT': 'Aragon(ANT)',
'ATOM': 'Cosmos',
'AXS': 'Axie Infinity Shards(AXS)',
'BADGER': 'Bager DAO(BADGER)',
'BAL': 'Balancer(BAL)',
'BAND': 'Band Protocol(BAND)',
'BAT': 'BAT',
'BCH': 'Bitcoin Cash',
'BNC': 'Bifrost(BNC)',
'BNT': 'Bancor(BNT)',
'BTC': 'Bitcoin',
'CHZ': 'Chiliz(CHZ)',
'COMP': 'Compound(COMP)',
'CQT': '\tCovalent Query Token(CQT)',
'CRV': 'Curve DAO Token(CRV)',
'CTSI': 'Cartesi(CTSI)',
'DAI': 'Dai',
'DASH': 'Dash',
'DOGE': 'Dogecoin',
'DOT': 'Polkadot',
'DYDX': 'dYdX(DYDX)',
'ENJ': 'Enjin Coin(ENJ)',
'EOS': 'EOS',
'ETC': 'Ether Classic(Hex)',
'ETH': 'Ether(Hex)',
'EWT': 'Energy Web Token',
'FEE': 'Kraken Fee Credit',
'FIL': 'Filecoin',
'FLOW': 'Flow',
'GHST': 'Aavegotchi(GHST)',
'GNO': 'GNO',
'GRT': 'GRT',
'ICX': 'Icon',
'INJ': 'Injective Protocol(INJ)',
'KAR': 'Karura(KAR)',
'KAVA': 'Kava',
'KEEP': 'Keep Token(KEEP)',
'KNC': 'Kyber Network(KNC)',
'KSM': 'Kusama',
'LINK': 'Link',
'LPT': 'Livepeer Token(LPT)',
'LRC': 'Loopring(LRC)',
'LSK': 'Lisk',
'LTC': 'Litecoin',
'MANA': 'MANA',
'MATIC': 'Polygon(MATIC)',
'MINA': 'Mina', # inspected from webui
'MIR': 'Mirror Protocol(MIR)',
'MKR': 'Maker(MKR)',
'MLN': 'MLN',
'MOVR': 'Moonriver(MOVR)',
'NANO': 'NANO',
'OCEAN': 'OCEAN',
'OGN': 'Origin Protocol(OGN)',
'OMG': 'OMG',
'OXT': 'Orchid(OXT)',
'OXY': 'Oxygen(OXY)',
'PAXG': 'PAX(Gold)',
'PERP': 'Perpetual Protocol(PERP)',
'PHA': 'Phala(PHA)',
'QTUM': 'QTUM',
'RARI': 'Rarible(RARI)',
'RAY': 'Raydium(RAY)',
'REN': 'Ren Protocol(REN)',
'REP': 'REPv2',
'REPV1': 'REP',
'SAND': 'The Sandbox(SAND)',
'SC': 'Siacoin',
'SDN': 'Shiden(SDN)',
'SOL': 'Solana', # their deposit method api doesn't work for SOL - was guessed
'SNX': 'Synthetix Network(SNX)',
'SRM': 'Serum', # inspected from webui
'STORJ': 'Storj(STORJ)',
'SUSHI': 'Sushiswap(SUSHI)',
'TBTC': 'tBTC',
'TRX': 'Tron',
'UNI': 'UNI',
'USDC': 'USDC',
'USDT': 'Tether USD(ERC20)',
'USDT-TRC20': 'Tether USD(TRC20)',
'WAVES': 'Waves',
'WBTC': 'Wrapped Bitcoin(WBTC)',
'XLM': 'Stellar XLM',
'XMR': 'Monero',
'XRP': 'Ripple XRP',
'XTZ': 'XTZ',
'YFI': 'YFI',
'ZEC': 'Zcash(Transparent)',
'ZRX': '0x(ZRX)',
},
},
'exceptions': {
'EQuery:Invalid asset pair': BadSymbol, # {"error":["EQuery:Invalid asset pair"]}
'EAPI:Invalid key': AuthenticationError,
'EFunding:Unknown withdraw key': InvalidAddress, # {"error":["EFunding:Unknown withdraw key"]}
'EFunding:Invalid amount': InsufficientFunds,
'EService:Unavailable': ExchangeNotAvailable,
'EDatabase:Internal error': ExchangeNotAvailable,
'EService:Busy': ExchangeNotAvailable,
'EQuery:Unknown asset': BadSymbol, # {"error":["EQuery:Unknown asset"]}
'EAPI:Rate limit exceeded': DDoSProtection,
'EOrder:Rate limit exceeded': DDoSProtection,
'EGeneral:Internal error': ExchangeNotAvailable,
'EGeneral:Temporary lockout': DDoSProtection,
'EGeneral:Permission denied': PermissionDenied,
'EOrder:Unknown order': InvalidOrder,
'EOrder:Order minimum not met': InvalidOrder,
'EGeneral:Invalid arguments': BadRequest,
'ESession:Invalid session': AuthenticationError,
'EAPI:Invalid nonce': InvalidNonce,
'EFunding:No funding method': BadRequest, # {"error":"EFunding:No funding method"}
'EFunding:Unknown asset': BadSymbol, # {"error":["EFunding:Unknown asset"]}
},
})
def cost_to_precision(self, symbol, cost):
return self.decimal_to_precision(cost, TRUNCATE, self.markets[symbol]['precision']['price'], DECIMAL_PLACES)
def fee_to_precision(self, symbol, fee):
return self.decimal_to_precision(fee, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
async def fetch_markets(self, params={}):
response = await self.publicGetAssetPairs(params)
#
# {
# "error":[],
# "result":{
# "ADAETH":{
# "altname":"ADAETH",
# "wsname":"ADA\/ETH",
# "aclass_base":"currency",
# "base":"ADA",
# "aclass_quote":"currency",
# "quote":"XETH",
# "lot":"unit",
# "pair_decimals":7,
# "lot_decimals":8,
# "lot_multiplier":1,
# "leverage_buy":[],
# "leverage_sell":[],
# "fees":[
# [0,0.26],
# [50000,0.24],
# [100000,0.22],
# [250000,0.2],
# [500000,0.18],
# [1000000,0.16],
# [2500000,0.14],
# [5000000,0.12],
# [10000000,0.1]
# ],
# "fees_maker":[
# [0,0.16],
# [50000,0.14],
# [100000,0.12],
# [250000,0.1],
# [500000,0.08],
# [1000000,0.06],
# [2500000,0.04],
# [5000000,0.02],
# [10000000,0]
# ],
# "fee_volume_currency":"ZUSD",
# "margin_call":80,
# "margin_stop":40,
# "ordermin": "1"
# },
# }
# }
#
markets = self.safe_value(response, 'result', {})
keys = list(markets.keys())
result = []
for i in range(0, len(keys)):
id = keys[i]
market = markets[id]
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
darkpool = id.find('.d') >= 0
altname = self.safe_string(market, 'altname')
symbol = altname if darkpool else (base + '/' + quote)
makerFees = self.safe_value(market, 'fees_maker', [])
firstMakerFee = self.safe_value(makerFees, 0, [])
firstMakerFeeRate = self.safe_number(firstMakerFee, 1)
maker = None
if firstMakerFeeRate is not None:
maker = float(firstMakerFeeRate) / 100
takerFees = self.safe_value(market, 'fees', [])
firstTakerFee = self.safe_value(takerFees, 0, [])
firstTakerFeeRate = self.safe_number(firstTakerFee, 1)
taker = None
if firstTakerFeeRate is not None:
taker = float(firstTakerFeeRate) / 100
precision = {
'amount': self.safe_integer(market, 'lot_decimals'),
'price': self.safe_integer(market, 'pair_decimals'),
}
minAmount = self.safe_number(market, 'ordermin')
leverageBuy = self.safe_value(market, 'leverage_buy', [])
leverageBuyLength = len(leverageBuy)
maxLeverage = self.safe_value(leverageBuy, leverageBuyLength - 1, 1)
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'darkpool': darkpool,
'info': market,
'altname': market['altname'],
'maker': maker,
'taker': taker,
'type': 'spot',
'spot': True,
'active': True,
'precision': precision,
'limits': {
'amount': {
'min': minAmount,
'max': math.pow(10, precision['amount']),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': None,
},
'cost': {
'min': 0,
'max': None,
},
'leverage': {
'max': maxLeverage,
},
},
})
result = self.append_inactive_markets(result)
self.marketsByAltname = self.index_by(result, 'altname')
return result
def safe_currency(self, currencyId, currency=None):
if currencyId is not None:
if len(currencyId) > 3:
if (currencyId.find('X') == 0) or (currencyId.find('Z') == 0):
if currencyId.find('.') > 0:
return super(kraken, self).safe_currency(currencyId, currency)
else:
currencyId = currencyId[1:]
return super(kraken, self).safe_currency(currencyId, currency)
def append_inactive_markets(self, result):
# result should be an array to append to
precision = {'amount': 8, 'price': 8}
costLimits = {'min': 0, 'max': None}
priceLimits = {'min': math.pow(10, -precision['price']), 'max': None}
amountLimits = {'min': math.pow(10, -precision['amount']), 'max': math.pow(10, precision['amount'])}
limits = {'amount': amountLimits, 'price': priceLimits, 'cost': costLimits}
defaults = {
'darkpool': False,
'info': None,
'maker': None,
'taker': None,
'active': False,
'precision': precision,
'limits': limits,
}
markets = [
# {'id': 'XXLMZEUR', 'symbol': 'XLM/EUR', 'base': 'XLM', 'quote': 'EUR', 'altname': 'XLMEUR'},
]
for i in range(0, len(markets)):
result.append(self.extend(defaults, markets[i]))
return result
async def fetch_currencies(self, params={}):
response = await self.publicGetAssets(params)
#
# {
# "error": [],
# "result": {
# "ADA": {"aclass": "currency", "altname": "ADA", "decimals": 8, "display_decimals": 6},
# "BCH": {"aclass": "currency", "altname": "BCH", "decimals": 10, "display_decimals": 5},
# ...
# },
# }
#
currencies = self.safe_value(response, 'result')
ids = list(currencies.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
currency = currencies[id]
# todo: will need to rethink the fees
# see: https://support.kraken.com/hc/en-us/articles/201893608-What-are-the-withdrawal-fees-
# to add support for multiple withdrawal/deposit methods and
# differentiated fees for each particular method
code = self.safe_currency_code(self.safe_string(currency, 'altname'))
precision = self.safe_integer(currency, 'decimals')
# assumes all currencies are active except those listed above
active = not self.in_array(code, self.options['inactiveCurrencies'])
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': code,
'active': active,
'fee': None,
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'withdraw': {
'min': None,
'max': math.pow(10, precision),
},
},
}
return result
async def fetch_trading_fees(self, params={}):
await self.load_markets()
response = await self.privatePostTradeVolume(params)
tradedVolume = self.safe_number(response['result'], 'volume')
tiers = self.fees['trading']['tiers']
taker = tiers['taker'][1]
maker = tiers['maker'][1]
for i in range(0, len(tiers['taker'])):
if tradedVolume >= tiers['taker'][i][0]:
taker = tiers['taker'][i][1]
for i in range(0, len(tiers['maker'])):
if tradedVolume >= tiers['maker'][i][0]:
maker = tiers['maker'][i][1]
return {
'info': response,
'maker': maker,
'taker': taker,
}
def parse_bid_ask(self, bidask, priceKey=0, amountKey=1):
price = self.safe_number(bidask, priceKey)
amount = self.safe_number(bidask, amountKey)
timestamp = self.safe_integer(bidask, 2)
return [price, amount, timestamp]
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
if market['darkpool']:
raise ExchangeError(self.id + ' does not provide an order book for darkpool symbol ' + symbol)
request = {
'pair': market['id'],
}
if limit is not None:
request['count'] = limit # 100
response = await self.publicGetDepth(self.extend(request, params))
#
# {
# "error":[],
# "result":{
# "XETHXXBT":{
# "asks":[
# ["0.023480","4.000",1586321307],
# ["0.023490","50.095",1586321306],
# ["0.023500","28.535",1586321302],
# ],
# "bids":[
# ["0.023470","59.580",1586321307],
# ["0.023460","20.000",1586321301],
# ["0.023440","67.832",1586321306],
# ]
# }
# }
# }
#
result = self.safe_value(response, 'result', {})
orderbook = self.safe_value(result, market['id'])
# sometimes kraken returns wsname instead of market id
# https://github.com/ccxt/ccxt/issues/8662
marketInfo = self.safe_value(market, 'info', {})
wsName = self.safe_value(marketInfo, 'wsname')
if wsName is not None:
orderbook = self.safe_value(result, wsName, orderbook)
return self.parse_order_book(orderbook, symbol)
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market:
symbol = market['symbol']
baseVolume = float(ticker['v'][1])
vwap = float(ticker['p'][1])
quoteVolume = None
if baseVolume is not None and vwap is not None:
quoteVolume = baseVolume * vwap
last = float(ticker['c'][0])
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['h'][1]),
'low': float(ticker['l'][1]),
'bid': float(ticker['b'][0]),
'bidVolume': None,
'ask': float(ticker['a'][0]),
'askVolume': None,
'vwap': vwap,
'open': self.safe_number(ticker, 'o'),
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
symbols = self.symbols if (symbols is None) else symbols
marketIds = []
for i in range(0, len(symbols)):
symbol = symbols[i]
market = self.markets[symbol]
if market['active'] and not market['darkpool']:
marketIds.append(market['id'])
request = {
'pair': ','.join(marketIds),
}
response = await self.publicGetTicker(self.extend(request, params))
tickers = response['result']
ids = list(tickers.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
market = self.markets_by_id[id]
symbol = market['symbol']
ticker = tickers[id]
result[symbol] = self.parse_ticker(ticker, market)
return self.filter_by_array(result, 'symbol', symbols)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
darkpool = symbol.find('.d') >= 0
if darkpool:
raise ExchangeError(self.id + ' does not provide a ticker for darkpool symbol ' + symbol)
market = self.market(symbol)
request = {
'pair': market['id'],
}
response = await self.publicGetTicker(self.extend(request, params))
ticker = response['result'][market['id']]
return self.parse_ticker(ticker, market)
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1591475640,
# "0.02500",
# "0.02500",
# "0.02500",
# "0.02500",
# "0.02500",
# "9.12201000",
# 5
# ]
#
return [
self.safe_timestamp(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 6),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
'interval': self.timeframes[timeframe],
}
if since is not None:
request['since'] = int((since - 1) / 1000)
response = await self.publicGetOHLC(self.extend(request, params))
#
# {
# "error":[],
# "result":{
# "XETHXXBT":[
# [1591475580,"0.02499","0.02499","0.02499","0.02499","0.00000","0.00000000",0],
# [1591475640,"0.02500","0.02500","0.02500","0.02500","0.02500","9.12201000",5],
# [1591475700,"0.02499","0.02499","0.02499","0.02499","0.02499","1.28681415",2],
# [1591475760,"0.02499","0.02499","0.02499","0.02499","0.02499","0.08800000",1],
# ],
# "last":1591517580
# }
# }
result = self.safe_value(response, 'result', {})
ohlcvs = self.safe_value(result, market['id'], [])
return self.parse_ohlcvs(ohlcvs, market, timeframe, since, limit)
def parse_ledger_entry_type(self, type):
types = {
'trade': 'trade',
'withdrawal': 'transaction',
'deposit': 'transaction',
'transfer': 'transfer',
'margin': 'margin',
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
# {
# 'LTFK7F-N2CUX-PNY4SX': {
# refid: "TSJTGT-DT7WN-GPPQMJ",
# time: 1520102320.555,
# type: "trade",
# aclass: "currency",
# asset: "XETH",
# amount: "0.1087194600",
# fee: "0.0000000000",
# balance: "0.2855851000"
# },
# ...
# }
#
id = self.safe_string(item, 'id')
direction = None
account = None
referenceId = self.safe_string(item, 'refid')
referenceAccount = None
type = self.parse_ledger_entry_type(self.safe_string(item, 'type'))
code = self.safe_currency_code(self.safe_string(item, 'asset'), currency)
amount = self.safe_number(item, 'amount')
if amount < 0:
direction = 'out'
amount = abs(amount)
else:
direction = 'in'
time = self.safe_number(item, 'time')
timestamp = None
if time is not None:
timestamp = int(time * 1000)
fee = {
'cost': self.safe_number(item, 'fee'),
'currency': code,
}
before = None
after = self.safe_number(item, 'balance')
status = 'ok'
return {
'info': item,
'id': id,
'direction': direction,
'account': account,
'referenceId': referenceId,
'referenceAccount': referenceAccount,
'type': type,
'currency': code,
'amount': amount,
'before': before,
'after': after,
'status': status,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': fee,
}
async def fetch_ledger(self, code=None, since=None, limit=None, params={}):
# https://www.kraken.com/features/api#get-ledgers-info
await self.load_markets()
request = {}
currency = None
if code is not None:
currency = self.currency(code)
request['asset'] = currency['id']
if since is not None:
request['start'] = int(since / 1000)
response = await self.privatePostLedgers(self.extend(request, params))
# { error: [],
# result: {ledger: {'LPUAIB-TS774-UKHP7X': { refid: "A2B4HBV-L4MDIE-JU4N3N",
# time: 1520103488.314,
# type: "withdrawal",
# aclass: "currency",
# asset: "XETH",
# amount: "-0.2805800000",
# fee: "0.0050000000",
# balance: "0.0000051000" },
result = self.safe_value(response, 'result', {})
ledger = self.safe_value(result, 'ledger', {})
keys = list(ledger.keys())
items = []
for i in range(0, len(keys)):
key = keys[i]
value = ledger[key]
value['id'] = key
items.append(value)
return self.parse_ledger(items, currency, since, limit)
async def fetch_ledger_entries_by_ids(self, ids, code=None, params={}):
# https://www.kraken.com/features/api#query-ledgers
await self.load_markets()
ids = ','.join(ids)
request = self.extend({
'id': ids,
}, params)
response = await self.privatePostQueryLedgers(request)
# { error: [],
# result: {'LPUAIB-TS774-UKHP7X': { refid: "A2B4HBV-L4MDIE-JU4N3N",
# time: 1520103488.314,
# type: "withdrawal",
# aclass: "currency",
# asset: "XETH",
# amount: "-0.2805800000",
# fee: "0.0050000000",
# balance: "0.0000051000" }}}
result = response['result']
keys = list(result.keys())
items = []
for i in range(0, len(keys)):
key = keys[i]
value = result[key]
value['id'] = key
items.append(value)
return self.parse_ledger(items)
async def fetch_ledger_entry(self, id, code=None, params={}):
items = await self.fetch_ledger_entries_by_ids([id], code, params)
return items[0]
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# [
# "0.032310", # price
# "4.28169434", # amount
# 1541390792.763, # timestamp
# "s", # sell or buy
# "l", # limit or market
# ""
# ]
#
# fetchOrderTrades(private)
#
# {
# id: 'TIMIRG-WUNNE-RRJ6GT', # injected from outside
# ordertxid: 'OQRPN2-LRHFY-HIFA7D',
# postxid: 'TKH2SE-M7IF5-CFI7LT',
# pair: 'USDCUSDT',
# time: 1586340086.457,
# type: 'sell',
# ordertype: 'market',
# price: '0.99860000',
# cost: '22.16892001',
# fee: '0.04433784',
# vol: '22.20000000',
# margin: '0.00000000',
# misc: ''
# }
#
timestamp = None
side = None
type = None
priceString = None
amountString = None
id = None
orderId = None
fee = None
symbol = None
if isinstance(trade, list):
timestamp = self.safe_timestamp(trade, 2)
side = 'sell' if (trade[3] == 's') else 'buy'
type = 'limit' if (trade[4] == 'l') else 'market'
priceString = self.safe_string(trade, 0)
amountString = self.safe_string(trade, 1)
tradeLength = len(trade)
if tradeLength > 6:
id = self.safe_string(trade, 6) # artificially added as per #1794
elif isinstance(trade, basestring):
id = trade
elif 'ordertxid' in trade:
marketId = self.safe_string(trade, 'pair')
foundMarket = self.find_market_by_altname_or_id(marketId)
if foundMarket is not None:
market = foundMarket
elif marketId is not None:
# delisted market ids go here
market = self.get_delisted_market_by_id(marketId)
orderId = self.safe_string(trade, 'ordertxid')
id = self.safe_string_2(trade, 'id', 'postxid')
timestamp = self.safe_timestamp(trade, 'time')
side = self.safe_string(trade, 'type')
type = self.safe_string(trade, 'ordertype')
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'vol')
if 'fee' in trade:
currency = None
if market is not None:
currency = market['quote']
fee = {
'cost': self.safe_number(trade, 'fee'),
'currency': currency,
}
if market is not None:
symbol = market['symbol']
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
return {
'id': id,
'order': orderId,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
id = market['id']
request = {
'pair': id,
}
# https://support.kraken.com/hc/en-us/articles/218198197-How-to-pull-all-trade-data-using-the-Kraken-REST-API
# https://github.com/ccxt/ccxt/issues/5677
if since is not None:
# php does not format it properly
# therefore we use string concatenation here
request['since'] = since * 1e6
request['since'] = str(since) + '000000' # expected to be in nanoseconds
# https://github.com/ccxt/ccxt/issues/5698
if limit is not None and limit != 1000:
fetchTradesWarning = self.safe_value(self.options, 'fetchTradesWarning', True)
if fetchTradesWarning:
raise ExchangeError(self.id + ' fetchTrades() cannot serve ' + str(limit) + " trades without breaking the pagination, see https://github.com/ccxt/ccxt/issues/5698 for more details. Set exchange.options['fetchTradesWarning'] to acknowledge self warning and silence it.")
response = await self.publicGetTrades(self.extend(request, params))
#
# {
# "error": [],
# "result": {
# "XETHXXBT": [
# ["0.032310","4.28169434",1541390792.763,"s","l",""]
# ],
# "last": "1541439421200678657"
# }
# }
#
result = response['result']
trades = result[id]
# trades is a sorted array: last(most recent trade) goes last
length = len(trades)
if length <= 0:
return []
lastTrade = trades[length - 1]
lastTradeId = self.safe_string(result, 'last')
lastTrade.append(lastTradeId)
return self.parse_trades(trades, market, since, limit)
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privatePostBalance(params)
#
# {
# "error":[],
# "result":{
# "ZUSD":"58.8649",
# "KFEE":"4399.43",
# "XXBT":"0.0000034506",
# }
# }
#
balances = self.safe_value(response, 'result', {})
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
currencyIds = list(balances.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_string(balances, currencyId)
result[code] = account
return self.parse_balance(result)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
'type': side,
'ordertype': type,
'volume': self.amount_to_precision(symbol, amount),
}
clientOrderId = self.safe_string_2(params, 'userref', 'clientOrderId')
params = self.omit(params, ['userref', 'clientOrderId'])
if clientOrderId is not None:
request['userref'] = clientOrderId
#
# market
# limit(price = limit price)
# stop-loss(price = stop loss price)
# take-profit(price = take profit price)
# stop-loss-limit(price = stop loss trigger price, price2 = triggered limit price)
# take-profit-limit(price = take profit trigger price, price2 = triggered limit price)
# settle-position
#
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
elif (type == 'stop-loss') or (type == 'take-profit'):
stopPrice = self.safe_number_2(params, 'price', 'stopPrice', price)
if stopPrice is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a price/stopPrice parameter for a ' + type + ' order')
else:
request['price'] = self.price_to_precision(symbol, stopPrice)
elif (type == 'stop-loss-limit') or (type == 'take-profit-limit'):
stopPrice = self.safe_number_2(params, 'price', 'stopPrice')
limitPrice = self.safe_number(params, 'price2')
stopPriceDefined = (stopPrice is not None)
limitPriceDefined = (limitPrice is not None)
if stopPriceDefined and limitPriceDefined:
request['price'] = self.price_to_precision(symbol, stopPrice)
request['price2'] = self.price_to_precision(symbol, limitPrice)
elif (price is None) or (not(stopPriceDefined or limitPriceDefined)):
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument and/or price/stopPrice/price2 parameters for a ' + type + ' order')
else:
if stopPriceDefined:
request['price'] = self.price_to_precision(symbol, stopPrice)
request['price2'] = self.price_to_precision(symbol, price)
elif limitPriceDefined:
request['price'] = self.price_to_precision(symbol, price)
request['price2'] = self.price_to_precision(symbol, limitPrice)
params = self.omit(params, ['price', 'stopPrice', 'price2'])
response = await self.privatePostAddOrder(self.extend(request, params))
#
# {
# error: [],
# result: {
# descr: {order: 'buy 0.02100000 ETHUSDT @ limit 330.00'},
# txid: ['OEKVV2-IH52O-TPL6GZ']
# }
# }
#
result = self.safe_value(response, 'result')
return self.parse_order(result)
def find_market_by_altname_or_id(self, id):
if id in self.marketsByAltname:
return self.marketsByAltname[id]
elif id in self.markets_by_id:
return self.markets_by_id[id]
return None
def get_delisted_market_by_id(self, id):
if id is None:
return id
market = self.safe_value(self.options['delistedMarketsById'], id)
if market is not None:
return market
baseIdStart = 0
baseIdEnd = 3
quoteIdStart = 3
quoteIdEnd = 6
if len(id) == 8:
baseIdEnd = 4
quoteIdStart = 4
quoteIdEnd = 8
elif len(id) == 7:
baseIdEnd = 4
quoteIdStart = 4
quoteIdEnd = 7
baseId = id[baseIdStart:baseIdEnd]
quoteId = id[quoteIdStart:quoteIdEnd]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
market = {
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
}
self.options['delistedMarketsById'][id] = market
return market
def parse_order_status(self, status):
statuses = {
'pending': 'open', # order pending book entry
'open': 'open',
'closed': 'closed',
'canceled': 'canceled',
'expired': 'expired',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# descr: {order: 'buy 0.02100000 ETHUSDT @ limit 330.00'},
# txid: ['OEKVV2-IH52O-TPL6GZ']
# }
#
description = self.safe_value(order, 'descr', {})
orderDescription = self.safe_string(description, 'order')
side = None
type = None
marketId = None
price = None
amount = None
if orderDescription is not None:
parts = orderDescription.split(' ')
side = self.safe_string(parts, 0)
amount = self.safe_number(parts, 1)
marketId = self.safe_string(parts, 2)
type = self.safe_string(parts, 4)
price = self.safe_number(parts, 5)
side = self.safe_string(description, 'type', side)
type = self.safe_string(description, 'ordertype', type)
marketId = self.safe_string(description, 'pair', marketId)
foundMarket = self.find_market_by_altname_or_id(marketId)
symbol = None
if foundMarket is not None:
market = foundMarket
elif marketId is not None:
# delisted market ids go here
market = self.get_delisted_market_by_id(marketId)
timestamp = self.safe_timestamp(order, 'opentm')
amount = self.safe_number(order, 'vol', amount)
filled = self.safe_number(order, 'vol_exec')
fee = None
cost = self.safe_number(order, 'cost')
price = self.safe_number(description, 'price', price)
if (price is None) or (price == 0.0):
price = self.safe_number(description, 'price2')
if (price is None) or (price == 0.0):
price = self.safe_number(order, 'price', price)
average = self.safe_number(order, 'price')
if market is not None:
symbol = market['symbol']
if 'fee' in order:
flags = order['oflags']
feeCost = self.safe_number(order, 'fee')
fee = {
'cost': feeCost,
'rate': None,
}
if flags.find('fciq') >= 0:
fee['currency'] = market['quote']
elif flags.find('fcib') >= 0:
fee['currency'] = market['base']
status = self.parse_order_status(self.safe_string(order, 'status'))
id = self.safe_string(order, 'id')
if id is None:
txid = self.safe_value(order, 'txid')
id = self.safe_string(txid, 0)
clientOrderId = self.safe_string(order, 'userref')
rawTrades = self.safe_value(order, 'trades')
trades = None
if rawTrades is not None:
trades = self.parse_trades(rawTrades, market, None, None, {'order': id})
stopPrice = self.safe_number(order, 'stopprice')
return self.safe_order({
'id': id,
'clientOrderId': clientOrderId,
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': stopPrice,
'cost': cost,
'amount': amount,
'filled': filled,
'average': average,
'remaining': None,
'fee': fee,
'trades': trades,
})
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
clientOrderId = self.safe_value_2(params, 'userref', 'clientOrderId')
request = {
'trades': True, # whether or not to include trades in output(optional, default False)
# 'txid': id, # do not comma separate a list of ids - use fetchOrdersByIds instead
# 'userref': 'optional', # restrict results to given user reference id(optional)
}
query = params
if clientOrderId is not None:
request['userref'] = clientOrderId
query = self.omit(params, ['userref', 'clientOrderId'])
else:
request['txid'] = id
response = await self.privatePostQueryOrders(self.extend(request, query))
#
# {
# "error":[],
# "result":{
# "OTLAS3-RRHUF-NDWH5A":{
# "refid":null,
# "userref":null,
# "status":"closed",
# "reason":null,
# "opentm":1586822919.3342,
# "closetm":1586822919.365,
# "starttm":0,
# "expiretm":0,
# "descr":{
# "pair":"XBTUSDT",
# "type":"sell",
# "ordertype":"market",
# "price":"0",
# "price2":"0",
# "leverage":"none",
# "order":"sell 0.21804000 XBTUSDT @ market",
# "close":""
# },
# "vol":"0.21804000",
# "vol_exec":"0.21804000",
# "cost":"1493.9",
# "fee":"3.8",
# "price":"6851.5",
# "stopprice":"0.00000",
# "limitprice":"0.00000",
# "misc":"",
# "oflags":"fciq",
# "trades":["TT5UC3-GOIRW-6AZZ6R"]
# }
# }
# }
#
result = self.safe_value(response, 'result', [])
if not (id in result):
raise OrderNotFound(self.id + ' fetchOrder() could not find order id ' + id)
order = self.parse_order(self.extend({'id': id}, result[id]))
return self.extend({'info': response}, order)
async def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
orderTrades = self.safe_value(params, 'trades')
tradeIds = []
if orderTrades is None:
raise ArgumentsRequired(self.id + " fetchOrderTrades() requires a unified order structure in the params argument or a 'trades' param(an array of trade id strings)")
else:
for i in range(0, len(orderTrades)):
orderTrade = orderTrades[i]
if isinstance(orderTrade, basestring):
tradeIds.append(orderTrade)
else:
tradeIds.append(orderTrade['id'])
await self.load_markets()
options = self.safe_value(self.options, 'fetchOrderTrades', {})
batchSize = self.safe_integer(options, 'batchSize', 20)
numTradeIds = len(tradeIds)
numBatches = int(numTradeIds / batchSize)
numBatches = self.sum(numBatches, 1)
result = []
for j in range(0, numBatches):
requestIds = []
for k in range(0, batchSize):
index = self.sum(j * batchSize, k)
if index < numTradeIds:
requestIds.append(tradeIds[index])
request = {
'txid': ','.join(requestIds),
}
response = await self.privatePostQueryTrades(request)
#
# {
# error: [],
# result: {
# 'TIMIRG-WUNNE-RRJ6GT': {
# ordertxid: 'OQRPN2-LRHFY-HIFA7D',
# postxid: 'TKH2SE-M7IF5-CFI7LT',
# pair: 'USDCUSDT',
# time: 1586340086.457,
# type: 'sell',
# ordertype: 'market',
# price: '0.99860000',
# cost: '22.16892001',
# fee: '0.04433784',
# vol: '22.20000000',
# margin: '0.00000000',
# misc: ''
# }
# }
# }
#
rawTrades = self.safe_value(response, 'result')
ids = list(rawTrades.keys())
for i in range(0, len(ids)):
rawTrades[ids[i]]['id'] = ids[i]
trades = self.parse_trades(rawTrades, None, since, limit)
tradesFilteredBySymbol = self.filter_by_symbol(trades, symbol)
result = self.array_concat(result, tradesFilteredBySymbol)
return result
async def fetch_orders_by_ids(self, ids, symbol=None, params={}):
await self.load_markets()
response = await self.privatePostQueryOrders(self.extend({
'trades': True, # whether or not to include trades in output(optional, default False)
'txid': ','.join(ids), # comma delimited list of transaction ids to query info about(20 maximum)
}, params))
result = self.safe_value(response, 'result', {})
orders = []
orderIds = list(result.keys())
for i in range(0, len(orderIds)):
id = orderIds[i]
item = result[id]
order = self.parse_order(self.extend({'id': id}, item))
orders.append(order)
return orders
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'type': 'all', # any position, closed position, closing position, no position
# 'trades': False, # whether or not to include trades related to position in output
# 'start': 1234567890, # starting unix timestamp or trade tx id of results(exclusive)
# 'end': 1234567890, # ending unix timestamp or trade tx id of results(inclusive)
# 'ofs' = result offset
}
if since is not None:
request['start'] = int(since / 1000)
response = await self.privatePostTradesHistory(self.extend(request, params))
#
# {
# "error": [],
# "result": {
# "trades": {
# "GJ3NYQ-XJRTF-THZABF": {
# "ordertxid": "TKH2SE-ZIF5E-CFI7LT",
# "postxid": "OEN3VX-M7IF5-JNBJAM",
# "pair": "XICNXETH",
# "time": 1527213229.4491,
# "type": "sell",
# "ordertype": "limit",
# "price": "0.001612",
# "cost": "0.025792",
# "fee": "0.000026",
# "vol": "16.00000000",
# "margin": "0.000000",
# "misc": ""
# },
# ...
# },
# "count": 9760,
# },
# }
#
trades = response['result']['trades']
ids = list(trades.keys())
for i in range(0, len(ids)):
trades[ids[i]]['id'] = ids[i]
market = None
if symbol is not None:
market = self.market(symbol)
return self.parse_trades(trades, market, since, limit)
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
response = None
clientOrderId = self.safe_value_2(params, 'userref', 'clientOrderId')
try:
response = await self.privatePostCancelOrder(self.extend({
'txid': clientOrderId or id,
}, params))
except Exception as e:
if self.last_http_response:
if self.last_http_response.find('EOrder:Unknown order') >= 0:
raise OrderNotFound(self.id + ' cancelOrder() error ' + self.last_http_response)
raise e
return response
async def cancel_all_orders(self, symbol=None, params={}):
await self.load_markets()
return await self.privatePostCancelAll(params)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
if since is not None:
request['start'] = int(since / 1000)
query = params
clientOrderId = self.safe_value_2(params, 'userref', 'clientOrderId')
if clientOrderId is not None:
request['userref'] = clientOrderId
query = self.omit(params, ['userref', 'clientOrderId'])
response = await self.privatePostOpenOrders(self.extend(request, query))
market = None
if symbol is not None:
market = self.market(symbol)
result = self.safe_value(response, 'result', {})
orders = self.safe_value(result, 'open', [])
return self.parse_orders(orders, market, since, limit)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
if since is not None:
request['start'] = int(since / 1000)
query = params
clientOrderId = self.safe_value_2(params, 'userref', 'clientOrderId')
if clientOrderId is not None:
request['userref'] = clientOrderId
query = self.omit(params, ['userref', 'clientOrderId'])
response = await self.privatePostClosedOrders(self.extend(request, query))
#
# {
# "error":[],
# "result":{
# "closed":{
# "OETZYO-UL524-QJMXCT":{
# "refid":null,
# "userref":null,
# "status":"canceled",
# "reason":"User requested",
# "opentm":1601489313.3898,
# "closetm":1601489346.5507,
# "starttm":0,
# "expiretm":0,
# "descr":{
# "pair":"ETHUSDT",
# "type":"buy",
# "ordertype":"limit",
# "price":"330.00",
# "price2":"0",
# "leverage":"none",
# "order":"buy 0.02100000 ETHUSDT @ limit 330.00",
# "close":""
# },
# "vol":"0.02100000",
# "vol_exec":"0.00000000",
# "cost":"0.00000",
# "fee":"0.00000",
# "price":"0.00000",
# "stopprice":"0.00000",
# "limitprice":"0.00000",
# "misc":"",
# "oflags":"fciq"
# },
# },
# "count":16
# }
# }
#
market = None
if symbol is not None:
market = self.market(symbol)
result = self.safe_value(response, 'result', {})
orders = self.safe_value(result, 'closed', [])
return self.parse_orders(orders, market, since, limit)
def parse_transaction_status(self, status):
# IFEX transaction states
statuses = {
'Initial': 'pending',
'Pending': 'pending',
'Success': 'ok',
'Settled': 'pending',
'Failure': 'failed',
'Partial': 'ok',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
#
# {method: "Ether(Hex)",
# aclass: "currency",
# asset: "XETH",
# refid: "Q2CANKL-LBFVEE-U4Y2WQ",
# txid: "0x57fd704dab1a73c20e24c8696099b695d596924b401b261513cfdab23…",
# info: "0x615f9ba7a9575b0ab4d571b2b36b1b324bd83290",
# amount: "7.9999257900",
# fee: "0.0000000000",
# time: 1529223212,
# status: "Success" }
#
# fetchWithdrawals
#
# {method: "Ether",
# aclass: "currency",
# asset: "XETH",
# refid: "A2BF34S-O7LBNQ-UE4Y4O",
# txid: "0x288b83c6b0904d8400ef44e1c9e2187b5c8f7ea3d838222d53f701a15b5c274d",
# info: "0x7cb275a5e07ba943fee972e165d80daa67cb2dd0",
# amount: "9.9950000000",
# fee: "0.0050000000",
# time: 1530481750,
# status: "Success" }
#
id = self.safe_string(transaction, 'refid')
txid = self.safe_string(transaction, 'txid')
timestamp = self.safe_timestamp(transaction, 'time')
currencyId = self.safe_string(transaction, 'asset')
code = self.safe_currency_code(currencyId, currency)
address = self.safe_string(transaction, 'info')
amount = self.safe_number(transaction, 'amount')
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
type = self.safe_string(transaction, 'type') # injected from the outside
feeCost = self.safe_number(transaction, 'fee')
if feeCost is None:
if type == 'deposit':
feeCost = 0
return {
'info': transaction,
'id': id,
'currency': code,
'amount': amount,
'address': address,
'tag': None,
'status': status,
'type': type,
'updated': None,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': {
'currency': code,
'cost': feeCost,
},
}
def parse_transactions_by_type(self, type, transactions, code=None, since=None, limit=None):
result = []
for i in range(0, len(transactions)):
transaction = self.parse_transaction(self.extend({
'type': type,
}, transactions[i]))
result.append(transaction)
return self.filter_by_currency_since_limit(result, code, since, limit)
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
# https://www.kraken.com/en-us/help/api#deposit-status
if code is None:
raise ArgumentsRequired(self.id + ' fetchDeposits() requires a currency code argument')
await self.load_markets()
currency = self.currency(code)
request = {
'asset': currency['id'],
}
response = await self.privatePostDepositStatus(self.extend(request, params))
#
# { error: [],
# result: [{method: "Ether(Hex)",
# aclass: "currency",
# asset: "XETH",
# refid: "Q2CANKL-LBFVEE-U4Y2WQ",
# txid: "0x57fd704dab1a73c20e24c8696099b695d596924b401b261513cfdab23…",
# info: "0x615f9ba7a9575b0ab4d571b2b36b1b324bd83290",
# amount: "7.9999257900",
# fee: "0.0000000000",
# time: 1529223212,
# status: "Success" }]}
#
return self.parse_transactions_by_type('deposit', response['result'], code, since, limit)
async def fetch_time(self, params={}):
# https://www.kraken.com/en-us/features/api#get-server-time
response = await self.publicGetTime(params)
#
# {
# "error": [],
# "result": {
# "unixtime": 1591502873,
# "rfc1123": "Sun, 7 Jun 20 04:07:53 +0000"
# }
# }
#
result = self.safe_value(response, 'result', {})
return self.safe_timestamp(result, 'unixtime')
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
# https://www.kraken.com/en-us/help/api#withdraw-status
if code is None:
raise ArgumentsRequired(self.id + ' fetchWithdrawals() requires a currency code argument')
await self.load_markets()
currency = self.currency(code)
request = {
'asset': currency['id'],
}
response = await self.privatePostWithdrawStatus(self.extend(request, params))
#
# { error: [],
# result: [{method: "Ether",
# aclass: "currency",
# asset: "XETH",
# refid: "A2BF34S-O7LBNQ-UE4Y4O",
# txid: "0x298c83c7b0904d8400ef43e1c9e2287b518f7ea3d838822d53f704a1565c274d",
# info: "0x7cb275a5e07ba943fee972e165d80daa67cb2dd0",
# amount: "9.9950000000",
# fee: "0.0050000000",
# time: 1530481750,
# status: "Success" }]}
#
return self.parse_transactions_by_type('withdrawal', response['result'], code, since, limit)
async def create_deposit_address(self, code, params={}):
request = {
'new': 'true',
}
return await self.fetch_deposit_address(code, self.extend(request, params))
async def fetch_deposit_methods(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'asset': currency['id'],
}
response = await self.privatePostDepositMethods(self.extend(request, params))
#
# {
# "error":[],
# "result":[
# {"method":"Ether(Hex)","limit":false,"gen-address":true}
# ]
# }
#
# {
# "error":[],
# "result":[
# {"method":"Tether USD(ERC20)","limit":false,"address-setup-fee":"0.00000000","gen-address":true},
# {"method":"Tether USD(TRC20)","limit":false,"address-setup-fee":"0.00000000","gen-address":true}
# ]
# }
#
# {
# "error":[],
# "result":[
# {"method":"Bitcoin","limit":false,"fee":"0.0000000000","gen-address":true}
# ]
# }
#
return self.safe_value(response, 'result')
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
network = self.safe_string_upper(params, 'network')
networks = self.safe_value(self.options, 'networks', {})
network = self.safe_string(networks, network, network) # support ETH > ERC20 aliases
params = self.omit(params, 'network')
if (code == 'USDT') and (network == 'TRC20'):
code = code + '-' + network
defaultDepositMethods = self.safe_value(self.options, 'depositMethods', {})
defaultDepositMethod = self.safe_string(defaultDepositMethods, code)
depositMethod = self.safe_string(params, 'method', defaultDepositMethod)
# if the user has specified an exchange-specific method in params
# we pass it as is, otherwise we take the 'network' unified param
if depositMethod is None:
depositMethods = await self.fetch_deposit_methods(code)
if network is not None:
# find best matching deposit method, or fallback to the first one
for i in range(0, len(depositMethods)):
entry = self.safe_string(depositMethods[i], 'method')
if entry.find(network) >= 0:
depositMethod = entry
break
# if depositMethod was not specified, fallback to the first available deposit method
if depositMethod is None:
firstDepositMethod = self.safe_value(depositMethods, 0, {})
depositMethod = self.safe_string(firstDepositMethod, 'method')
request = {
'asset': currency['id'],
'method': depositMethod,
}
response = await self.privatePostDepositAddresses(self.extend(request, params))
#
# {
# "error":[],
# "result":[
# {"address":"0x77b5051f97efa9cc52c9ad5b023a53fc15c200d3","expiretm":"0"}
# ]
# }
#
result = self.safe_value(response, 'result', [])
firstResult = self.safe_value(result, 0, {})
if firstResult is None:
raise InvalidAddress(self.id + ' privatePostDepositAddresses() returned no addresses for ' + code)
return self.parse_deposit_address(firstResult, currency)
def parse_deposit_address(self, depositAddress, currency=None):
#
# {
# "address":"0x77b5051f97efa9cc52c9ad5b023a53fc15c200d3",
# "expiretm":"0"
# }
#
address = self.safe_string(depositAddress, 'address')
tag = self.safe_string(depositAddress, 'tag')
currency = self.safe_currency(None, currency)
code = currency['code']
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': depositAddress,
}
async def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
if 'key' in params:
await self.load_markets()
currency = self.currency(code)
request = {
'asset': currency['id'],
'amount': amount,
# 'address': address, # they don't allow withdrawals to direct addresses
}
response = await self.privatePostWithdraw(self.extend(request, params))
result = self.safe_value(response, 'result', {})
id = self.safe_string(result, 'refid')
return {
'info': result,
'id': id,
}
raise ExchangeError(self.id + " withdraw() requires a 'key' parameter(withdrawal key name, as set up on your account)")
async def fetch_positions(self, symbols=None, params={}):
await self.load_markets()
request = {
# 'txid': 'comma delimited list of transaction ids to restrict output to',
# 'docalcs': False, # whether or not to include profit/loss calculations
# 'consolidation': 'market', # what to consolidate the positions data around, market will consolidate positions based on market pair
}
response = await self.privatePostOpenPositions(self.extend(request, params))
#
# no consolidation
#
# {
# error: [],
# result: {
# 'TGUFMY-FLESJ-VYIX3J': {
# ordertxid: "O3LRNU-ZKDG5-XNCDFR",
# posstatus: "open",
# pair: "ETHUSDT",
# time: 1611557231.4584,
# type: "buy",
# ordertype: "market",
# cost: "28.49800",
# fee: "0.07979",
# vol: "0.02000000",
# vol_closed: "0.00000000",
# margin: "14.24900",
# terms: "0.0200% per 4 hours",
# rollovertm: "1611571631",
# misc: "",
# oflags: ""
# }
# }
# }
#
# consolidation by market
#
# {
# error: [],
# result: [
# {
# pair: "ETHUSDT",
# positions: "1",
# type: "buy",
# leverage: "2.00000",
# cost: "28.49800",
# fee: "0.07979",
# vol: "0.02000000",
# vol_closed: "0.00000000",
# margin: "14.24900"
# }
# ]
# }
#
result = self.safe_value(response, 'result')
# todo unify parsePosition/parsePositions
return result
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/' + self.version + '/' + api + '/' + path
if api == 'public':
if params:
url += '?' + self.urlencode(params)
elif api == 'private':
self.check_required_credentials()
nonce = str(self.nonce())
body = self.urlencode(self.extend({'nonce': nonce}, params))
auth = self.encode(nonce + body)
hash = self.hash(auth, 'sha256', 'binary')
binary = self.encode(url)
binhash = self.binary_concat(binary, hash)
secret = self.base64_to_binary(self.secret)
signature = self.hmac(binhash, secret, hashlib.sha512, 'base64')
headers = {
'API-Key': self.apiKey,
'API-Sign': signature,
'Content-Type': 'application/x-www-form-urlencoded',
}
else:
url = '/' + path
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def nonce(self):
return self.milliseconds()
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if code == 520:
raise ExchangeNotAvailable(self.id + ' ' + str(code) + ' ' + reason)
# todo: rewrite self for "broad" exceptions matching
if body.find('Invalid order') >= 0:
raise InvalidOrder(self.id + ' ' + body)
if body.find('Invalid nonce') >= 0:
raise InvalidNonce(self.id + ' ' + body)
if body.find('Insufficient funds') >= 0:
raise InsufficientFunds(self.id + ' ' + body)
if body.find('Cancel pending') >= 0:
raise CancelPending(self.id + ' ' + body)
if body.find('Invalid arguments:volume') >= 0:
raise InvalidOrder(self.id + ' ' + body)
if body.find('Rate limit exceeded') >= 0:
raise RateLimitExceeded(self.id + ' ' + body)
if response is None:
return
if body[0] == '{':
if not isinstance(response, basestring):
if 'error' in response:
numErrors = len(response['error'])
if numErrors:
message = self.id + ' ' + body
for i in range(0, len(response['error'])):
error = response['error'][i]
self.throw_exactly_matched_exception(self.exceptions, error, message)
raise ExchangeError(message)
|
import stylize, {encodeDisallowedCharacters} from './style-serializer'
import serialize from './content-serializer'
export function path({req, value, parameter}) {
const {name, style, explode, content} = parameter
if (content) {
const effectiveMediaType = Object.keys(content)[0]
req.url = req.url.split(`{${name}}`).join(
encodeDisallowedCharacters(
serialize(value, effectiveMediaType),
{escape: true}
)
)
return
}
const styledValue = stylize({
key: parameter.name,
value,
style: style || 'simple',
explode: explode || false,
escape: true,
})
req.url = req.url.split(`{${name}}`).join(styledValue)
}
export function query({req, value, parameter}) {
req.query = req.query || {}
if (parameter.content) {
const effectiveMediaType = Object.keys(parameter.content)[0]
req.query[parameter.name] = serialize(value, effectiveMediaType)
return
}
if (value === false) {
value = 'false'
}
if (value === 0) {
value = '0'
}
if (value) {
const type = typeof value
if (parameter.style === 'deepObject') {
const valueKeys = Object.keys(value)
valueKeys.forEach((k) => {
const v = value[k]
req.query[`${parameter.name}[${k}]`] = {
value: stylize({
key: k,
value: v,
style: 'deepObject',
escape: parameter.allowReserved ? 'unsafe' : 'reserved',
}),
skipEncoding: true
}
})
}
else if (
type === 'object' &&
!Array.isArray(value) &&
(parameter.style === 'form' || !parameter.style) &&
(parameter.explode || parameter.explode === undefined)
) {
// form explode needs to be handled here,
// since we aren't assigning to `req.query[parameter.name]`
// like we usually do.
const valueKeys = Object.keys(value)
valueKeys.forEach((k) => {
const v = value[k]
req.query[k] = {
value: stylize({
key: k,
value: v,
style: parameter.style || 'form',
escape: parameter.allowReserved ? 'unsafe' : 'reserved',
}),
skipEncoding: true
}
})
}
else {
const encodedParamName = encodeURIComponent(parameter.name)
req.query[encodedParamName] = {
value: stylize({
key: encodedParamName,
value,
style: parameter.style || 'form',
explode: typeof parameter.explode === 'undefined' ? true : parameter.explode,
escape: parameter.allowReserved ? 'unsafe' : 'reserved',
}),
skipEncoding: true
}
}
}
else if (parameter.allowEmptyValue && value !== undefined) {
const paramName = parameter.name
req.query[paramName] = req.query[paramName] || {}
req.query[paramName].allowEmptyValue = true
}
}
const PARAMETER_HEADER_BLACKLIST = [
'accept',
'authorization',
'content-type'
]
export function header({req, parameter, value}) {
req.headers = req.headers || {}
if (PARAMETER_HEADER_BLACKLIST.indexOf(parameter.name.toLowerCase()) > -1) {
return
}
if (parameter.content) {
const effectiveMediaType = Object.keys(parameter.content)[0]
req.headers[parameter.name] = serialize(value, effectiveMediaType)
return
}
if (typeof value !== 'undefined') {
req.headers[parameter.name] = stylize({
key: parameter.name,
value,
style: parameter.style || 'simple',
explode: typeof parameter.explode === 'undefined' ? false : parameter.explode,
escape: false,
})
}
}
export function cookie({req, parameter, value}) {
req.headers = req.headers || {}
const type = typeof value
if (parameter.content) {
const effectiveMediaType = Object.keys(parameter.content)[0]
req.headers.Cookie = `${parameter.name}=${serialize(value, effectiveMediaType)}`
return
}
if (type !== 'undefined') {
const prefix = (
type === 'object' &&
!Array.isArray(value) &&
parameter.explode
) ? '' : `${parameter.name}=`
req.headers.Cookie = prefix + stylize({
key: parameter.name,
value,
escape: false,
style: parameter.style || 'form',
explode: typeof parameter.explode === 'undefined' ? false : parameter.explode
})
}
}
|
"""
Mixins for logistic regression and NMF models, shared across different models
"""
import numpy as np
from scipy.special import expit as _sigmoid, kl_div
from scipy.stats import bernoulli
class LogisticMixin(object):
def _negative_log_likelihood(self, w, y, X, mask=None):
"""
Returns logistic regression negative log likelihood
:param w: the parameters at their current estimates of shape (n_features,)
:param y: the response vector of shape (n_obs,)
:param X: the design matrix of shape (n_features, n_obs)
:param mask: the binary mask vector of shape (n_obs,). 1 if observed, 0 o/w
:returns: negative log likelihood value
:rtype: float
"""
sigm = _sigmoid(w.dot(X))
if mask is not None:
return -np.sum(np.log(bernoulli.pmf(y, sigm) * mask + 1e-5))
else:
return -np.sum(np.log(bernoulli.pmf(y, sigm) + 1e-5))
def _update_param(self, w, y, X, mask=None, eta=0.01):
"""
:param w: the parameters at their current estimates of shape (n_features,)
:param y: the response vector of shape (n_obs,)
:param X: the design matrix of shape (n_features, n_obs)
:param mask: the binary mask vector of shape (n_obs,). 1 if observed, 0 o/w
:param eta: the batch gradient descent step size
:returns: updated parameter vector of shape (n_features,)
"""
# if mask is not None:
# X = X * mask
# y = y * mask
if mask is not None:
return w + eta * X.dot(mask* (y - _sigmoid(w.dot(X)))) #?
else:
return w + eta * X.dot((y - _sigmoid(w.dot(X)))) #?
def _update_param_mult(self, w, y, X, mask=None):
"""
Logistic regression, implemented with the multiplicative update rule. Note that the
multiplicative update works quite poorly and only handles the case where a
nonnegative coefficient vector is required.
:param w: the parameters at their current estimates of shape (n_features,)
:param y: the response vector of shape (n_obs,)
:param X: the design matrix of shape (n_features, n_obs)
:param mask: the binary mask vector of shape (n_obs,). 1 if observed, 0 o/w
:returns: updated parameter vector of shape (n_features,)
"""
if mask is not None:
X = X * mask
y = y * mask
return w * X.dot(y) / (X.dot(_sigmoid(w.dot(X))) + 1e-10)
def _score(self, w, X):
return _sigmoid(w.dot(X))
class NMFMixin(object):
def _kl_divergence(self, X, W, H):
"""
Calculate the generalized Kullback-Leibler divergence (also called Information Divergence or
I-Divergence) between two matrices.
"""
B = W.dot(H)
return np.sum(kl_div(X,B))
def _euc_distance(self, X, W, H):
"""
Calculate the Euclidean distance between two matrices.
"""
return np.linalg.norm(X - W.dot(H), "fro")
def _update_W_kl(self, X, W, H):
"""
update first parameterizing matrix as per KL divergence multiplicative
update step
"""
eps = self.eps
return W * (X / (W.dot(H) + eps)).dot(H.T) / (np.sum(H,1) + eps)
def _update_H_kl(self, X, W, H):
"""
Update the second factor matrix as per KL divergence multiplicative update
"""
eps = self.eps
return H * (W.T.dot(X / (W.dot(H) + eps)).T / (np.sum(W,0) + eps)).T
def _update_W_euc(self, X, W, H, phi=1.):
"""
:param phi: the dispersion parameter
:type phi: float
"""
eps = self.eps
return W * (1 / phi+eps) * X.dot(H.T) / (W.dot(H).dot(H.T) + eps)
def _update_H_euc(self, X, W, H, phi=1.):
"""
:param phi: the dispersion parameter
:type phi: float
"""
eps = self.eps
return H * (1 / phi + eps) * W.T.dot(X) / (W.T.dot(W).dot(H) + eps)
|
//
// YCXMenu.h
// YCXMenuDemo_ObjC
//
// Created by 牛萌 on 15/5/6.
// Copyright (c) 2015年 NiuMeng. All rights reserved.
//
#import <Foundation/Foundation.h>
#import "YCXMenuItem.h"
// Menu将要显示的通知
extern NSString * const YCXMenuWillAppearNotification;
// Menu已经显示的通知
extern NSString * const YCXMenuDidAppearNotification;
// Menu将要隐藏的通知
extern NSString * const YCXMenuWillDisappearNotification;
// Menu已经隐藏的通知
extern NSString * const YCXMenuDidDisappearNotification;
typedef void(^YCXMenuSelectedItem)(NSInteger index, YCXMenuItem *item);
typedef enum {
YCXMenuBackgrounColorEffectSolid = 0, //!<背景显示效果.纯色
YCXMenuBackgrounColorEffectGradient = 1, //!<背景显示效果.渐变叠加
} YCXMenuBackgrounColorEffect;
@interface YCXMenu : NSObject
+ (void)showMenuInView:(UIView *)view fromRect:(CGRect)rect menuItems:(NSArray *)menuItems selected:(YCXMenuSelectedItem)selectedItem;
+ (void)dismissMenu;
+ (BOOL)isShow;
// 主题色
+ (UIColor *)tintColor;
+ (void)setTintColor:(UIColor *)tintColor;
// 圆角
+ (CGFloat)cornerRadius;
+ (void)setCornerRadius:(CGFloat)cornerRadius;
// 箭头尺寸
+ (CGFloat)arrowSize;
+ (void)setArrowSize:(CGFloat)arrowSize;
// 标题字体
+ (UIFont *)titleFont;
+ (void)setTitleFont:(UIFont *)titleFont;
// 背景效果
+ (YCXMenuBackgrounColorEffect)backgrounColorEffect;
+ (void)setBackgrounColorEffect:(YCXMenuBackgrounColorEffect)effect;
// 是否显示阴影
+ (BOOL)hasShadow;
+ (void)setHasShadow:(BOOL)flag;
// 选中颜色
+ (UIColor*)selectedColor;
+ (void)setSelectedColor:(UIColor*)selectedColor;
// 分割线颜色
+ (UIColor*)separatorColor;
+ (void)setSeparatorColor:(UIColor*)separatorColor;
/// 菜单元素垂直方向上的边距值
+ (CGFloat)menuItemMarginY;
+ (void)setMenuItemMarginY:(CGFloat)menuItemMarginY;
@end
|
/*
YUI 3.17.1 (build 0eb5a52)
Copyright 2014 Yahoo! Inc. All rights reserved.
Licensed under the BSD License.
http://yuilibrary.com/license/
*/
YUI.add("event-touch",function(e,t){var n="scale",r="rotation",i="identifier",s=e.config.win,o={};e.DOMEventFacade.prototype._touch=function(t,s,o){var u,a,f,l,c;if(t.touches){this.touches=[],c={};for(u=0,a=t.touches.length;u<a;++u)l=t.touches[u],c[e.stamp(l)]=this.touches[u]=new e.DOMEventFacade(l,s,o)}if(t.targetTouches){this.targetTouches=[];for(u=0,a=t.targetTouches.length;u<a;++u)l=t.targetTouches[u],f=c&&c[e.stamp(l,!0)],this.targetTouches[u]=f||new e.DOMEventFacade(l,s,o)}if(t.changedTouches){this.changedTouches=[];for(u=0,a=t.changedTouches.length;u<a;++u)l=t.changedTouches[u],f=c&&c[e.stamp(l,!0)],this.changedTouches[u]=f||new e.DOMEventFacade(l,s,o)}n in t&&(this[n]=t[n]),r in t&&(this[r]=t[r]),i in t&&(this[i]=t[i])},e.Node.DOM_EVENTS&&e.mix(e.Node.DOM_EVENTS,{touchstart:1,touchmove:1,touchend:1,touchcancel:1,gesturestart:1,gesturechange:1,gestureend:1,MSPointerDown:1,MSPointerUp:1,MSPointerMove:1,MSPointerCancel:1,pointerdown:1,pointerup:1,pointermove:1,pointercancel:1}),s&&"ontouchstart"in s&&!(e.UA.chrome&&e.UA.chrome<6)?(o.start=["touchstart","mousedown"],o.end=["touchend","mouseup"],o.move=["touchmove","mousemove"],o.cancel=["touchcancel","mousecancel"]):s&&s.PointerEvent?(o.start="pointerdown",o.end="pointerup",o.move="pointermove",o.cancel="pointercancel"):s&&"msPointerEnabled"in s.navigator?(o.start="MSPointerDown",o.end="MSPointerUp",o.move="MSPointerMove",o.cancel="MSPointerCancel"):(o.start="mousedown",o.end="mouseup",o.move="mousemove",o.cancel="mousecancel"),e.Event._GESTURE_MAP=o},"3.17.1",{requires:["node-base"]});
|
goog.addDependency("base.js", ['goog'], []);
goog.addDependency("debug/error.js", ['goog.debug.Error'], []);
goog.addDependency("dom/nodetype.js", ['goog.dom.NodeType'], []);
goog.addDependency("asserts/asserts.js", ['goog.asserts', 'goog.asserts.AssertionError'], ['goog.debug.Error', 'goog.dom.NodeType']);
goog.addDependency("dom/asserts.js", ['goog.dom.asserts'], ['goog.asserts']);
goog.addDependency("functions/functions.js", ['goog.functions'], []);
goog.addDependency("array/array.js", ['goog.array'], ['goog.asserts']);
goog.addDependency("dom/htmlelement.js", ['goog.dom.HtmlElement'], []);
goog.addDependency("dom/tagname.js", ['goog.dom.TagName'], ['goog.dom.HtmlElement']);
goog.addDependency("object/object.js", ['goog.object'], []);
goog.addDependency("dom/tags.js", ['goog.dom.tags'], ['goog.object']);
goog.addDependency("html/trustedtypes.js", ['goog.html.trustedtypes'], []);
goog.addDependency("string/typedstring.js", ['goog.string.TypedString'], []);
goog.addDependency("string/const.js", ['goog.string.Const'], ['goog.asserts', 'goog.string.TypedString']);
goog.addDependency("html/safescript.js", ['goog.html.SafeScript'], ['goog.asserts', 'goog.html.trustedtypes', 'goog.string.Const', 'goog.string.TypedString']);
goog.addDependency("fs/url.js", ['goog.fs.url'], []);
goog.addDependency("i18n/bidi.js", ['goog.i18n.bidi', 'goog.i18n.bidi.Dir', 'goog.i18n.bidi.DirectionalString', 'goog.i18n.bidi.Format'], []);
goog.addDependency("html/trustedresourceurl.js", ['goog.html.TrustedResourceUrl'], ['goog.asserts', 'goog.html.trustedtypes', 'goog.i18n.bidi.Dir', 'goog.i18n.bidi.DirectionalString', 'goog.string.Const', 'goog.string.TypedString']);
goog.addDependency("string/internal.js", ['goog.string.internal'], []);
goog.addDependency("html/safeurl.js", ['goog.html.SafeUrl'], ['goog.asserts', 'goog.fs.url', 'goog.html.TrustedResourceUrl', 'goog.i18n.bidi.Dir', 'goog.i18n.bidi.DirectionalString', 'goog.string.Const', 'goog.string.TypedString', 'goog.string.internal']);
goog.addDependency("html/safestyle.js", ['goog.html.SafeStyle'], ['goog.array', 'goog.asserts', 'goog.html.SafeUrl', 'goog.string.Const', 'goog.string.TypedString', 'goog.string.internal']);
goog.addDependency("html/safestylesheet.js", ['goog.html.SafeStyleSheet'], ['goog.array', 'goog.asserts', 'goog.html.SafeStyle', 'goog.object', 'goog.string.Const', 'goog.string.TypedString', 'goog.string.internal']);
goog.addDependency("labs/useragent/util.js", ['goog.labs.userAgent.util'], ['goog.string.internal']);
goog.addDependency("labs/useragent/browser.js", ['goog.labs.userAgent.browser'], ['goog.array', 'goog.labs.userAgent.util', 'goog.object', 'goog.string.internal']);
goog.addDependency("html/safehtml.js", ['goog.html.SafeHtml'], ['goog.array', 'goog.asserts', 'goog.dom.TagName', 'goog.dom.tags', 'goog.html.SafeScript', 'goog.html.SafeStyle', 'goog.html.SafeStyleSheet', 'goog.html.SafeUrl', 'goog.html.TrustedResourceUrl', 'goog.html.trustedtypes', 'goog.i18n.bidi.Dir', 'goog.i18n.bidi.DirectionalString', 'goog.labs.userAgent.browser', 'goog.object', 'goog.string.Const', 'goog.string.TypedString', 'goog.string.internal']);
goog.addDependency("html/uncheckedconversions.js", ['goog.html.uncheckedconversions'], ['goog.asserts', 'goog.html.SafeHtml', 'goog.html.SafeScript', 'goog.html.SafeStyle', 'goog.html.SafeStyleSheet', 'goog.html.SafeUrl', 'goog.html.TrustedResourceUrl', 'goog.string.Const', 'goog.string.internal']);
goog.addDependency("dom/safe.js", ['goog.dom.safe', 'goog.dom.safe.InsertAdjacentHtmlPosition'], ['goog.asserts', 'goog.dom.asserts', 'goog.functions', 'goog.html.SafeHtml', 'goog.html.SafeScript', 'goog.html.SafeStyle', 'goog.html.SafeUrl', 'goog.html.TrustedResourceUrl', 'goog.html.uncheckedconversions', 'goog.string.Const', 'goog.string.internal']);
goog.addDependency("string/string.js", ['goog.string', 'goog.string.Unicode'], ['goog.dom.safe', 'goog.html.uncheckedconversions', 'goog.string.Const', 'goog.string.internal']);
goog.addDependency("structs/structs.js", ['goog.structs'], ['goog.array', 'goog.object']);
goog.addDependency("math/math.js", ['goog.math'], ['goog.array', 'goog.asserts']);
goog.addDependency("iter/iter.js", ['goog.iter', 'goog.iter.Iterable', 'goog.iter.Iterator', 'goog.iter.StopIteration'], ['goog.array', 'goog.asserts', 'goog.functions', 'goog.math']);
goog.addDependency("structs/map.js", ['goog.structs.Map'], ['goog.iter.Iterator', 'goog.iter.StopIteration']);
goog.addDependency("uri/utils.js", ['goog.uri.utils', 'goog.uri.utils.ComponentIndex', 'goog.uri.utils.QueryArray', 'goog.uri.utils.QueryValue', 'goog.uri.utils.StandardQueryParam'], ['goog.array', 'goog.asserts', 'goog.string']);
goog.addDependency("uri/uri.js", ['goog.Uri', 'goog.Uri.QueryData'], ['goog.array', 'goog.asserts', 'goog.string', 'goog.structs', 'goog.structs.Map', 'goog.uri.utils', 'goog.uri.utils.ComponentIndex', 'goog.uri.utils.StandardQueryParam']);
goog.addDependency("reflect/reflect.js", ['goog.reflect'], []);
goog.addDependency("math/integer.js", ['goog.math.Integer'], ['goog.reflect']);
goog.addDependency("string/stringbuffer.js", ['goog.string.StringBuffer'], []);
goog.addDependency("math/long.js", ['goog.math.Long'], ['goog.asserts', 'goog.reflect']);
goog.addDependency("../cljs/core.js", ['cljs.core'], ['goog.string', 'goog.Uri', 'goog.object', 'goog.math.Integer', 'goog.string.StringBuffer', 'goog.array', 'goog.math.Long']);
goog.addDependency("debug/errorcontext.js", ['goog.debug.errorcontext'], []);
goog.addDependency("labs/useragent/engine.js", ['goog.labs.userAgent.engine'], ['goog.array', 'goog.labs.userAgent.util', 'goog.string']);
goog.addDependency("labs/useragent/platform.js", ['goog.labs.userAgent.platform'], ['goog.labs.userAgent.util', 'goog.string']);
goog.addDependency("useragent/useragent.js", ['goog.userAgent'], ['goog.labs.userAgent.browser', 'goog.labs.userAgent.engine', 'goog.labs.userAgent.platform', 'goog.labs.userAgent.util', 'goog.reflect', 'goog.string']);
goog.addDependency("debug/debug.js", ['goog.debug'], ['goog.array', 'goog.debug.errorcontext', 'goog.userAgent']);
goog.addDependency("debug/logrecord.js", ['goog.debug.LogRecord'], []);
goog.addDependency("debug/logbuffer.js", ['goog.debug.LogBuffer'], ['goog.asserts', 'goog.debug.LogRecord']);
goog.addDependency("debug/logger.js", ['goog.debug.LogManager', 'goog.debug.Loggable', 'goog.debug.Logger', 'goog.debug.Logger.Level'], ['goog.array', 'goog.asserts', 'goog.debug', 'goog.debug.LogBuffer', 'goog.debug.LogRecord']);
goog.addDependency("debug/relativetimeprovider.js", ['goog.debug.RelativeTimeProvider'], []);
goog.addDependency("debug/formatter.js", ['goog.debug.Formatter', 'goog.debug.HtmlFormatter', 'goog.debug.TextFormatter'], ['goog.debug', 'goog.debug.Logger', 'goog.debug.RelativeTimeProvider', 'goog.html.SafeHtml', 'goog.html.SafeUrl', 'goog.html.uncheckedconversions', 'goog.string.Const']);
goog.addDependency("debug/console.js", ['goog.debug.Console'], ['goog.debug.LogManager', 'goog.debug.Logger', 'goog.debug.TextFormatter']);
goog.addDependency("log/log.js", ['goog.log', 'goog.log.Level', 'goog.log.LogRecord', 'goog.log.Logger'], ['goog.debug', 'goog.debug.LogManager', 'goog.debug.LogRecord', 'goog.debug.Logger']);
goog.addDependency("promise/thenable.js", ['goog.Thenable'], []);
goog.addDependency("async/freelist.js", ['goog.async.FreeList'], []);
goog.addDependency("async/workqueue.js", ['goog.async.WorkItem', 'goog.async.WorkQueue'], ['goog.asserts', 'goog.async.FreeList']);
goog.addDependency("debug/entrypointregistry.js", ['goog.debug.EntryPointMonitor', 'goog.debug.entryPointRegistry'], ['goog.asserts']);
goog.addDependency("dom/browserfeature.js", ['goog.dom.BrowserFeature'], ['goog.userAgent']);
goog.addDependency("math/coordinate.js", ['goog.math.Coordinate'], ['goog.math']);
goog.addDependency("math/size.js", ['goog.math.Size'], []);
goog.addDependency("dom/dom.js", ['goog.dom', 'goog.dom.Appendable', 'goog.dom.DomHelper'], ['goog.array', 'goog.asserts', 'goog.dom.BrowserFeature', 'goog.dom.NodeType', 'goog.dom.TagName', 'goog.dom.safe', 'goog.html.SafeHtml', 'goog.html.uncheckedconversions', 'goog.math.Coordinate', 'goog.math.Size', 'goog.object', 'goog.string', 'goog.string.Unicode', 'goog.userAgent']);
goog.addDependency("async/nexttick.js", ['goog.async.nextTick', 'goog.async.throwException'], ['goog.debug.entryPointRegistry', 'goog.dom', 'goog.dom.TagName', 'goog.dom.safe', 'goog.functions', 'goog.html.SafeHtml', 'goog.html.TrustedResourceUrl', 'goog.labs.userAgent.browser', 'goog.labs.userAgent.engine', 'goog.string.Const']);
goog.addDependency("async/run.js", ['goog.async.run'], ['goog.async.WorkQueue', 'goog.async.nextTick', 'goog.async.throwException']);
goog.addDependency("promise/resolver.js", ['goog.promise.Resolver'], []);
goog.addDependency("promise/promise.js", ['goog.Promise'], ['goog.Thenable', 'goog.asserts', 'goog.async.FreeList', 'goog.async.run', 'goog.async.throwException', 'goog.debug.Error', 'goog.promise.Resolver']);
goog.addDependency("cssom/cssom.js", ['goog.cssom', 'goog.cssom.CssRuleType'], ['goog.array', 'goog.dom', 'goog.dom.TagName']);
goog.addDependency("../clojure/string.js", ['clojure.string'], ['goog.string', 'cljs.core', 'goog.string.StringBuffer']);
goog.addDependency("events/browserfeature.js", ['goog.events.BrowserFeature'], ['goog.userAgent']);
goog.addDependency("disposable/idisposable.js", ['goog.disposable.IDisposable'], []);
goog.addDependency("disposable/disposable.js", ['goog.Disposable', 'goog.dispose', 'goog.disposeAll'], ['goog.disposable.IDisposable']);
goog.addDependency("events/eventid.js", ['goog.events.EventId'], []);
goog.addDependency("events/event.js", ['goog.events.Event', 'goog.events.EventLike'], ['goog.Disposable', 'goog.events.EventId']);
goog.addDependency("events/eventtype.js", ['goog.events.EventType', 'goog.events.MouseAsMouseEventType', 'goog.events.MouseEvents', 'goog.events.PointerAsMouseEventType', 'goog.events.PointerAsTouchEventType', 'goog.events.PointerFallbackEventType', 'goog.events.PointerTouchFallbackEventType'], ['goog.events.BrowserFeature', 'goog.userAgent']);
goog.addDependency("events/browserevent.js", ['goog.events.BrowserEvent', 'goog.events.BrowserEvent.MouseButton', 'goog.events.BrowserEvent.PointerType'], ['goog.debug', 'goog.events.BrowserFeature', 'goog.events.Event', 'goog.events.EventType', 'goog.reflect', 'goog.userAgent']);
goog.addDependency("events/listenable.js", ['goog.events.Listenable', 'goog.events.ListenableKey'], ['goog.events.EventId']);
goog.addDependency("events/listener.js", ['goog.events.Listener'], ['goog.events.ListenableKey']);
goog.addDependency("events/listenermap.js", ['goog.events.ListenerMap'], ['goog.array', 'goog.events.Listener', 'goog.object']);
goog.addDependency("events/events.js", ['goog.events', 'goog.events.CaptureSimulationMode', 'goog.events.Key', 'goog.events.ListenableType'], ['goog.asserts', 'goog.debug.entryPointRegistry', 'goog.events.BrowserEvent', 'goog.events.BrowserFeature', 'goog.events.Listenable', 'goog.events.ListenerMap']);
goog.addDependency("../figwheel/main/css_reload.js", ['figwheel.main.css_reload'], ['goog.debug.Console', 'goog.Uri', 'cljs.core', 'goog.object', 'goog.log', 'goog.Promise', 'goog.cssom', 'clojure.string', 'goog.events']);
goog.addDependency("../devtools/version.js", ['devtools.version'], ['cljs.core']);
goog.addDependency("../cljs/pprint.js", ['cljs.pprint'], ['goog.string', 'cljs.core', 'goog.string.StringBuffer', 'clojure.string']);
goog.addDependency("../devtools/context.js", ['devtools.context'], ['cljs.core']);
goog.addDependency("../clojure/set.js", ['clojure.set'], ['cljs.core']);
goog.addDependency("../clojure/data.js", ['clojure.data'], ['cljs.core', 'clojure.set']);
goog.addDependency("../devtools/defaults.js", ['devtools.defaults'], ['cljs.core']);
goog.addDependency("../devtools/prefs.js", ['devtools.prefs'], ['cljs.core', 'devtools.defaults']);
goog.addDependency("../devtools/util.js", ['devtools.util'], ['cljs.core', 'devtools.version', 'goog.userAgent', 'cljs.pprint', 'devtools.context', 'clojure.data', 'devtools.prefs']);
goog.addDependency("../process/env.js", ['process.env'], ['cljs.core']);
goog.addDependency("storage/mechanism/mechanism.js", ['goog.storage.mechanism.Mechanism'], []);
goog.addDependency("mochikit/async/deferred.js", ['goog.async.Deferred', 'goog.async.Deferred.AlreadyCalledError', 'goog.async.Deferred.CanceledError'], ['goog.Promise', 'goog.Thenable', 'goog.array', 'goog.asserts', 'goog.debug.Error']);
goog.addDependency("net/jsloader.js", ['goog.net.jsloader', 'goog.net.jsloader.Error', 'goog.net.jsloader.ErrorCode', 'goog.net.jsloader.Options'], ['goog.array', 'goog.async.Deferred', 'goog.debug.Error', 'goog.dom', 'goog.dom.TagName', 'goog.dom.safe', 'goog.html.TrustedResourceUrl', 'goog.object']);
goog.addDependency("net/xhrlike.js", ['goog.net.XhrLike'], []);
goog.addDependency("storage/mechanism/errorcode.js", ['goog.storage.mechanism.ErrorCode'], []);
goog.addDependency("storage/mechanism/iterablemechanism.js", ['goog.storage.mechanism.IterableMechanism'], ['goog.array', 'goog.asserts', 'goog.iter', 'goog.storage.mechanism.Mechanism']);
goog.addDependency("storage/mechanism/ieuserdata.js", ['goog.storage.mechanism.IEUserData'], ['goog.asserts', 'goog.iter.Iterator', 'goog.iter.StopIteration', 'goog.storage.mechanism.ErrorCode', 'goog.storage.mechanism.IterableMechanism', 'goog.structs.Map', 'goog.userAgent']);
goog.addDependency("html/legacyconversions.js", ['goog.html.legacyconversions'], ['goog.html.SafeHtml', 'goog.html.SafeScript', 'goog.html.SafeStyle', 'goog.html.SafeStyleSheet', 'goog.html.SafeUrl', 'goog.html.TrustedResourceUrl']);
goog.addDependency("json/json.js", ['goog.json', 'goog.json.Replacer', 'goog.json.Reviver', 'goog.json.Serializer'], []);
goog.addDependency("events/eventtarget.js", ['goog.events.EventTarget'], ['goog.Disposable', 'goog.asserts', 'goog.events', 'goog.events.Event', 'goog.events.Listenable', 'goog.events.ListenerMap', 'goog.object']);
goog.addDependency("timer/timer.js", ['goog.Timer'], ['goog.Promise', 'goog.events.EventTarget']);
goog.addDependency("json/hybrid.js", ['goog.json.hybrid'], ['goog.asserts', 'goog.json']);
goog.addDependency("net/errorcode.js", ['goog.net.ErrorCode'], []);
goog.addDependency("net/eventtype.js", ['goog.net.EventType'], []);
goog.addDependency("net/httpstatus.js", ['goog.net.HttpStatus'], []);
goog.addDependency("net/xmlhttpfactory.js", ['goog.net.XmlHttpFactory'], ['goog.net.XhrLike']);
goog.addDependency("net/wrapperxmlhttpfactory.js", ['goog.net.WrapperXmlHttpFactory'], ['goog.net.XhrLike', 'goog.net.XmlHttpFactory']);
goog.addDependency("net/xmlhttp.js", ['goog.net.DefaultXmlHttpFactory', 'goog.net.XmlHttp', 'goog.net.XmlHttp.OptionType', 'goog.net.XmlHttp.ReadyState', 'goog.net.XmlHttpDefines'], ['goog.asserts', 'goog.net.WrapperXmlHttpFactory', 'goog.net.XmlHttpFactory']);
goog.addDependency("net/xhrio.js", ['goog.net.XhrIo', 'goog.net.XhrIo.ResponseType'], ['goog.Timer', 'goog.array', 'goog.asserts', 'goog.debug.entryPointRegistry', 'goog.events.EventTarget', 'goog.json.hybrid', 'goog.log', 'goog.net.ErrorCode', 'goog.net.EventType', 'goog.net.HttpStatus', 'goog.net.XmlHttp', 'goog.object', 'goog.string', 'goog.structs', 'goog.structs.Map', 'goog.uri.utils', 'goog.userAgent']);
goog.addDependency("../figwheel/main.js", ['figwheel.main'], ['cljs.core']);
goog.addDependency("storage/mechanism/html5webstorage.js", ['goog.storage.mechanism.HTML5WebStorage'], ['goog.asserts', 'goog.iter.Iterator', 'goog.iter.StopIteration', 'goog.storage.mechanism.ErrorCode', 'goog.storage.mechanism.IterableMechanism']);
goog.addDependency("storage/mechanism/html5localstorage.js", ['goog.storage.mechanism.HTML5LocalStorage'], ['goog.storage.mechanism.HTML5WebStorage']);
goog.addDependency("useragent/product.js", ['goog.userAgent.product'], ['goog.labs.userAgent.browser', 'goog.labs.userAgent.platform', 'goog.userAgent']);
goog.addDependency("dom/dataset.js", ['goog.dom.dataset'], ['goog.labs.userAgent.browser', 'goog.string', 'goog.userAgent.product']);
goog.addDependency("../figwheel/tools/heads_up.js", ['figwheel.tools.heads_up'], ['goog.dom', 'goog.string', 'cljs.core', 'goog.dom.dataset', 'goog.object', 'cljs.pprint', 'goog.Promise', 'clojure.string']);
goog.addDependency("string/stringformat.js", ['goog.string.format'], ['goog.string']);
goog.addDependency("../figwheel/core.js", ['figwheel.core'], ['goog.string', 'goog.debug.Console', 'cljs.core', 'goog.object', 'goog.events.EventTarget', 'figwheel.tools.heads_up', 'goog.log', 'clojure.set', 'goog.Promise', 'goog.string.format', 'goog.async.Deferred', 'goog.events.Event', 'clojure.string']);
goog.addDependency("storage/mechanism/html5sessionstorage.js", ['goog.storage.mechanism.HTML5SessionStorage'], ['goog.storage.mechanism.HTML5WebStorage']);
goog.addDependency("storage/mechanism/prefixedmechanism.js", ['goog.storage.mechanism.PrefixedMechanism'], ['goog.iter.Iterator', 'goog.storage.mechanism.IterableMechanism']);
goog.addDependency("storage/mechanism/mechanismfactory.js", ['goog.storage.mechanism.mechanismfactory'], ['goog.storage.mechanism.HTML5LocalStorage', 'goog.storage.mechanism.HTML5SessionStorage', 'goog.storage.mechanism.IEUserData', 'goog.storage.mechanism.PrefixedMechanism']);
goog.addDependency("net/websocket.js", ['goog.net.WebSocket', 'goog.net.WebSocket.ErrorEvent', 'goog.net.WebSocket.EventType', 'goog.net.WebSocket.MessageEvent'], ['goog.Timer', 'goog.asserts', 'goog.debug.entryPointRegistry', 'goog.events', 'goog.events.Event', 'goog.events.EventTarget', 'goog.log']);
goog.addDependency("../figwheel/repl.js", ['figwheel.repl'], ['goog.userAgent.product', 'goog.net.XhrIo', 'goog.json', 'goog.string', 'goog.debug.Console', 'goog.storage.mechanism.HTML5SessionStorage', 'goog.Uri.QueryData', 'goog.net.jsloader', 'goog.Uri', 'cljs.core', 'goog.object', 'goog.log', 'goog.html.legacyconversions', 'goog.Promise', 'goog.storage.mechanism.mechanismfactory', 'goog.net.WebSocket', 'clojure.string', 'goog.array']);
goog.addDependency("../devtools/protocols.js", ['devtools.protocols'], ['cljs.core']);
goog.addDependency("../devtools/format.js", ['devtools.format'], ['cljs.core', 'devtools.context']);
goog.addDependency("../devtools/munging.js", ['devtools.munging'], ['cljs.core', 'goog.object', 'goog.string.StringBuffer', 'devtools.context', 'clojure.string']);
goog.addDependency("../devtools/formatters/helpers.js", ['devtools.formatters.helpers'], ['cljs.core', 'devtools.protocols', 'devtools.format', 'devtools.prefs', 'devtools.munging']);
goog.addDependency("../devtools/formatters/state.js", ['devtools.formatters.state'], ['cljs.core']);
goog.addDependency("../clojure/walk.js", ['clojure.walk'], ['cljs.core']);
goog.addDependency("../devtools/formatters/templating.js", ['devtools.formatters.templating'], ['devtools.formatters.helpers', 'devtools.formatters.state', 'devtools.util', 'cljs.core', 'devtools.protocols', 'clojure.string', 'clojure.walk']);
goog.addDependency("../devtools/formatters/printing.js", ['devtools.formatters.printing'], ['devtools.formatters.helpers', 'devtools.formatters.state', 'cljs.core', 'devtools.protocols', 'devtools.format', 'devtools.prefs']);
goog.addDependency("../devtools/formatters/markup.js", ['devtools.formatters.markup'], ['devtools.formatters.helpers', 'devtools.formatters.printing', 'devtools.formatters.templating', 'devtools.formatters.state', 'cljs.core', 'devtools.munging']);
goog.addDependency("../cljs/stacktrace.js", ['cljs.stacktrace'], ['goog.string', 'cljs.core', 'clojure.string']);
goog.addDependency("../devtools/toolbox.js", ['devtools.toolbox'], ['devtools.formatters.markup', 'devtools.formatters.templating', 'cljs.core', 'devtools.protocols']);
goog.addDependency("../devtools/async.js", ['devtools.async'], ['cljs.core', 'goog.labs.userAgent.browser', 'devtools.context', 'goog.async.nextTick']);
goog.addDependency("../devtools/reporter.js", ['devtools.reporter'], ['devtools.util', 'cljs.core', 'devtools.context']);
goog.addDependency("../devtools/formatters/budgeting.js", ['devtools.formatters.budgeting'], ['devtools.formatters.helpers', 'devtools.formatters.markup', 'devtools.formatters.templating', 'devtools.formatters.state', 'cljs.core']);
goog.addDependency("../devtools/formatters/core.js", ['devtools.formatters.core'], ['devtools.formatters.helpers', 'devtools.formatters.markup', 'devtools.formatters.templating', 'devtools.formatters.state', 'cljs.core', 'devtools.reporter', 'devtools.protocols', 'devtools.formatters.budgeting', 'devtools.format', 'devtools.prefs']);
goog.addDependency("../devtools/formatters.js", ['devtools.formatters'], ['devtools.formatters.core', 'devtools.util', 'cljs.core', 'goog.labs.userAgent.browser', 'devtools.context', 'devtools.prefs']);
goog.addDependency("../devtools/hints.js", ['devtools.hints'], ['cljs.stacktrace', 'cljs.core', 'devtools.context', 'devtools.prefs']);
goog.addDependency("../devtools/core.js", ['devtools.core'], ['devtools.toolbox', 'devtools.util', 'cljs.core', 'devtools.async', 'devtools.formatters', 'devtools.hints', 'devtools.context', 'devtools.defaults', 'devtools.prefs']);
goog.addDependency("../figwheel/repl/preload.js", ['figwheel.repl.preload'], ['cljs.core', 'figwheel.repl']);
goog.addDependency("../devtools/preload.js", ['devtools.preload'], ['cljs.core', 'devtools.core', 'devtools.prefs']);
goog.addDependency("../cljs/test.js", ['cljs.test'], ['cljs.core', 'cljs.pprint', 'clojure.string']);
goog.addDependency("../databooze/core_test.js", ['databooze.core_test'], ['cljs.core', 'cljs.test']);
goog.addDependency("../figwheel/main/async_result.js", ['figwheel.main.async_result'], ['cljs.core', 'figwheel.repl']);
goog.addDependency("../figwheel/main/testing.js", ['figwheel.main.testing'], ['goog.dom', 'cljs.core', 'cljs.test', 'clojure.string', 'figwheel.main.async_result']);
goog.addDependency("../databooze/test_runner.js", ['databooze.test_runner'], ['databooze.core_test', 'cljs.core', 'figwheel.main.testing']);
goog.addDependency("../reagent/debug.js", ['reagent.debug'], ['cljs.core']);
goog.addDependency("../cljsjs/react/development/react.inc.js", ['react', 'cljsjs.react'], [], {'foreign-lib': true});
goog.addDependency("../cljsjs/react-dom/development/react-dom.inc.js", ['react_dom', 'cljsjs.react.dom'], ['react'], {'foreign-lib': true});
goog.addDependency("../reagent/impl/util.js", ['reagent.impl.util'], ['cljs.core', 'clojure.string']);
goog.addDependency("../reagent/impl/batching.js", ['reagent.impl.batching'], ['reagent.impl.util', 'cljs.core', 'reagent.debug']);
goog.addDependency("../reagent/ratom.js", ['reagent.ratom'], ['reagent.impl.util', 'cljs.core', 'goog.object', 'reagent.impl.batching', 'clojure.set', 'reagent.debug']);
goog.addDependency("../reagent/impl/component.js", ['reagent.impl.component'], ['reagent.impl.util', 'reagent.ratom', 'react', 'cljs.core', 'goog.object', 'reagent.impl.batching', 'reagent.debug']);
goog.addDependency("../reagent/impl/template.js", ['reagent.impl.template'], ['reagent.impl.util', 'reagent.ratom', 'react', 'cljs.core', 'goog.object', 'reagent.impl.batching', 'reagent.impl.component', 'reagent.debug', 'clojure.string', 'clojure.walk']);
goog.addDependency("../reagent/dom.js", ['reagent.dom'], ['reagent.impl.util', 'reagent.ratom', 'cljs.core', 'reagent.impl.template', 'reagent.impl.batching', 'react_dom']);
goog.addDependency("../reagent/core.js", ['reagent.core'], ['reagent.impl.util', 'reagent.ratom', 'react', 'cljs.core', 'reagent.impl.template', 'reagent.impl.batching', 'reagent.impl.component', 'reagent.debug', 'reagent.dom']);
goog.addDependency("../databooze/core.js", ['databooze.core'], ['goog.dom', 'reagent.core', 'cljs.core']);
goog.addDependency("../figwheel/main/generated/dev_auto_test_runner.js", ['figwheel.main.generated.dev_auto_test_runner'], ['cljs.core', 'figwheel.main.testing']);
|
# Copyright 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Define lowering and related passes.
"""
from .passes import MlirDumpPlier, MlirBackend
from .settings import USE_MLIR
from numba.core.compiler_machinery import register_pass
from numba.core.lowering import Lower as orig_Lower
from numba.core.typed_passes import NoPythonBackend as orig_NoPythonBackend
# looks like that we don't need it but it is inherited from BaseLower too
# from numba.core.pylowering import PyLower as orig_PyLower
from .runtime import *
from .math_runtime import *
class mlir_lower(orig_Lower):
def lower(self):
if USE_MLIR:
self.emit_environment_object()
self.genlower = None
self.lower_normal_function(self.fndesc)
self.context.post_lowering(self.module, self.library)
else:
orig_Lower.lower(self)
def lower_normal_function(self, fndesc):
if USE_MLIR:
mod_ir = self.metadata['mlir_blob']
import llvmlite.binding as llvm
mod = llvm.parse_bitcode(mod_ir)
self.setup_function(fndesc)
self.library.add_llvm_module(mod);
else:
orig_Lower.lower_normal_function(self, desc)
@register_pass(mutates_CFG=True, analysis_only=False)
class mlir_NoPythonBackend(orig_NoPythonBackend):
def __init__(self):
orig_NoPythonBackend.__init__(self)
def run_pass(self, state):
import numba.core.lowering
numba.core.lowering.Lower = mlir_lower
try:
res = orig_NoPythonBackend.run_pass(self, state)
finally:
numba.core.lowering.Lower = orig_Lower
return res
|
# Terminal_AD_and_Bat.py
from cyberbot import *
bot(22).tone(2000, 300)
display.off()
while True:
ad0 = pin0.read_analog()
ad1 = pin1.read_analog()
ad2 = pin2.read_analog()
ad4 = pin4.read_analog()
print('ad0 = %d, ad1 = %d, ad2 = %d, ad4 = %d' % (ad0, ad1, ad2, ad4))
sleep(1000)
|
/*!
* jquery.wm-gridfolio - v 1.0
* desenvolvido por Welison Menezes
* email : welisonmenezes@gmail.com
*
*
* Copyright 2014 Welison Menezes
* @license http://www.opensource.org/licenses/mit-license.html MIT License
* @license http://www.gnu.org/licenses/gpl.html GPL2 License
*/
!function(e){"use strict";e.fn.extend({WMGridfolio:function(n){var i={selectors:{item:"wmg-item",thumbnail:"wmg-thumbnail",details:"wmg-details",close:"wmg-close",arrow:"wmg-arrow"},thumbnail:{columns:6,minSize:100,margin:5},details:{minHeight:400,speed:350,full_w:!1},config:{open:"open",openToTop:!0,hasImg:!0,keepOpen:!1,onlyThumb:!1},callbacks:{CB_LoadGrid:!1,CB_ResizeGrid:!1,CB_OpenDetail:!1,CB_CloseDetail:!1,CB_CloseAll:!1}},n=e.extend(!0,i,n),t={container:!1,element:!1,thumbnail:!1,details:!1,close:!1,CB_LoadGrid:function(){e.isFunction(n.callbacks.CB_LoadGrid)&&n.callbacks.CB_LoadGrid.apply(this,arguments)},CB_ResizeGrid:function(){e.isFunction(n.callbacks.CB_ResizeGrid)&&n.callbacks.CB_ResizeGrid.apply(this,arguments)},CB_OpenDetail:function(){e.isFunction(n.callbacks.CB_OpenDetail)&&n.callbacks.CB_OpenDetail.apply(this,arguments)},CB_CloseDetail:function(){e.isFunction(n.callbacks.CB_CloseDetail)&&n.callbacks.CB_CloseDetail.apply(this,arguments)},CB_CloseAll:function(){e.isFunction(n.callbacks.CB_CloseAll)&&n.callbacks.CB_CloseAll.apply(this,arguments)}},s={getWidthColumn:function(e){for(var i=e,t=i.width(),s=n.thumbnail.columns,a=t/s;a<=n.thumbnail.minSize;){if(t<=n.thumbnail.minSize)return!1;s-=1,a=t/s}return a},openContent:function(i){var s=i,a=s.parent(),o=n.selectors.details,l=n.selectors.arrow,c=n.config.open,r=n.details.speed,d=e("."+o).find("> div").height(),m=n.selectors.close,p=d+(30-2*n.thumbnail.margin);if(s.addClass(c),s.parent().addClass(c),n.config.keepOpen===!0?(a.css({"margin-bottom":p+"px"}).addClass("wm-margin"),a.find("."+o).css({height:p+"px"}).addClass(c),t.CB_OpenDetail()):(a.animate({"margin-bottom":p+"px"},r).addClass("wm-margin"),a.find("."+o).stop().animate({height:p+"px"},r,function(){t.CB_OpenDetail()}).addClass(c),a.find("."+l).show()),n.config.openToTop&&a.offset()){var f=a.offset().top;e("html, body").animate({scrollTop:f},"500")}t.container=a.parent(),t.element=a,t.thumbnail=s,t.details=a.find("."+o),t.close=a.find("."+m)},closeContent:function(i,a){var o=i,l=a,c=n.selectors.details,r=n.selectors.thumbnail,d=n.selectors.item,m=n.selectors.arrow,p=n.selectors.close,f=n.config.open,h=n.details.speed;if(o.find("."+r).removeClass(f),o.find("."+d).removeClass(f),n.config.keepOpen===!0){var C=o.find(".wm-margin"),u=o.find("."+f);C.removeClass("wm-margin"),u.removeClass(f),l&&s.openContent(l),C.css({"margin-bottom":"0px"}),u.css({height:"0px"}),t.CB_CloseDetail()}else o.find(".wm-margin").animate({"margin-bottom":"0px"},h+50).removeClass("wm-margin"),o.find("."+c+"."+f).stop().animate({height:"0px"},h,function(){e(this).removeClass(f),l&&s.openContent(l),t.CB_CloseDetail()});o.parent().find("."+m).hide(),t.container=o,t.element=l===!1?o.find("."+d):l.parent(),t.thumbnail=l===!1?o.find("."+r):l,t.details=l===!1?o.find("."+c):l.parent().find("."+c),t.close=l===!1?o.find("."+p):l.parent().find("."+p)}},a={dynamicCSS:function(i,t,a,o){var l=i,c=t,r=a,d=o,m=n.selectors.details,p=s.getWidthColumn(l),f=n.config.open,h=n.thumbnail.margin,C=r.width()/2-2*h,u=e("."+m).find("> div").height(),g=u+(30-2*h);e("."+m).hasClass(f)&&(e("."+n.selectors.item+"."+f).css({"margin-bottom":g+"px"}),e("."+m+"."+f).css({height:g+"px"})),c.css({width:p+"px",height:p+"px"}),d.css({left:C+"px"}),n.config.hasImg&&(r.css({"line-height":Math.floor(p-2*h-1)+"px"}),r.find("img").css({"max-width":Math.ceil(p-2*h)+"px","max-height":Math.ceil(p-2*h)+"px"}))},toogleContent:function(i,t){var a=i,o=t;o.each(function(){var i=e(this),t=n.config.open;i.on("click",function(){return e(this).hasClass(t)?!1:(i.parent().parent().hasClass(t)?s.closeContent(a,i):s.openContent(i),i.parent().parent().addClass(t),!1)})})},hideContent:function(i){i.on("click",function(){var i=e(this),a=i.parent().parent().parent(),o=n.config.open;s.closeContent(a,!1),a.removeClass(o),t.CB_CloseAll()})},staticCSS:function(e,i,t,s,a){var o=i,l=s,c=n.thumbnail.margin;l.css({height:"0px"}),l.find("> div").css({"min-height":n.details.minHeight}),n.details.full_w||l.find("> div").css({"margin-left":c,"margin-right":c}),o.css({padding:c+"px"})}};return this.each(function(){var i=e(this),s=i.find("."+n.selectors.item),o=i.find("."+n.selectors.thumbnail),l=i.find("."+n.selectors.details),c=i.find("."+n.selectors.close),r=i.find("."+n.selectors.arrow);t.container=i,t.element=s,t.thumbnail=o,t.details=l,t.close=c,a.staticCSS(i,s,o,l,r),n.config.onlyThumb===!1&&(a.toogleContent(i,o),a.hideContent(c)),a.dynamicCSS(i,s,o,r),setTimeout(function(){a.dynamicCSS(i,s,o,r),t.CB_LoadGrid(),i.css({filter:"alpha(opacity=100)",zoom:"1",opacity:"1"})},100),e(window).resize(function(){a.dynamicCSS(i,s,o,r),setTimeout(function(){a.dynamicCSS(i,s,o,r),t.CB_ResizeGrid()},100)})})}})}(jQuery);
|
from operator import itemgetter
import pytest
from responses import GET
from spinta.testing.datasets import pull
@pytest.mark.skip('datasets')
def test_csv(rc, cli, app, responses):
responses.add(
GET, 'http://example.com/countries.csv',
status=200, content_type='text/plain; charset=utf-8',
body=(
'kodas,šalis\n'
'lt,Lietuva\n'
'lv,Latvija\n'
'ee,Estija'
),
)
assert len(pull(cli, rc, 'csv')) == 3
assert len(pull(cli, rc, 'csv')) == 0
app.authmodel('country', ['getall'])
app.authmodel('country/:dataset/csv/:resource/countries', ['getall'])
assert sorted([(x['code'], x['title']) for x in app.get('/country').json()['_data']]) == []
assert sorted([(x['code'], x['title']) for x in app.get('/country/:dataset/csv/:resource/countries').json()['_data']]) == [
('ee', 'Estija'),
('lt', 'Lietuva'),
('lv', 'Latvija'),
]
@pytest.mark.skip('datasets')
def test_denorm(rc, cli, app, responses):
responses.add(
GET, 'http://example.com/orgs.csv',
status=200, content_type='text/plain; charset=utf-8',
body=(
'govid,org,kodas,šalis\n'
'1,Org1,lt,Lietuva\n'
'2,Org2,lt,Lietuva\n'
'3,Org3,lv,Latvija'
),
)
assert len(pull(cli, rc, 'denorm')) == 5
assert len(pull(cli, rc, 'denorm')) == 0
lt = '552c4c243ec8c98c313255ea9bf16ee286591f8c'
lv = 'b5dcb86880816fb966cdfbbacd1f3406739464f4'
app.authmodel('org', ['getall'])
app.authmodel('org/:dataset/denorm/:resource/orgs', ['getall'])
app.authmodel('country', ['getall'])
app.authmodel('country/:dataset/denorm/:resource/orgs', ['getall'])
assert app.get('country').json()['_data'] == []
assert sorted([(x['_id'], x['title']) for x in app.get('country/:dataset/denorm/:resource/orgs').json()['_data']]) == [
(lt, 'Lietuva'),
(lv, 'Latvija'),
]
assert app.get('/org').json()['_data'] == []
assert sorted([(x['_id'], x['title'], x['country']['_id']) for x in app.get('/org/:dataset/denorm/:resource/orgs').json()['_data']], key=itemgetter(1)) == [
('23fcdb953846e7c709d2967fb549de67d975c010', 'Org1', lt),
('6f9f652eb6dae29e4406f1737dd6043af6142090', 'Org2', lt),
('11a0764da48b674ce0c09982e7c43002b510d5b5', 'Org3', lv),
]
|
from .copy_manager import DestinationAlreadyExistsError
from .native_copy_manager import NativeCopyManager
from .rsync_copy_manager import RsyncCopyManager
class UnknownCopyManagerError(Exception):
pass
class CopyManagerFactory(object):
@classmethod
def get(cls, manager_name):
if manager_name in globals():
return globals()[manager_name]()
raise UnknownCopyManagerError('Failed to find copy manager: {}'.format(manager_name))
__all__ = [
'CopyManagerFactory',
'DestinationAlreadyExistsError',
'NativeCopyManager',
'RsyncCopyManager',
'UnknownCopyManagerError'
]
|
app.directive('pagination', function () {
return {
templateUrl: '../angular/app/admin/orders/orders-pagination.template.html',
controller: ['$scope', PaginationController]
}
});
function PaginationController($scope) {
$scope.rangeFrom = function (to)
{
var response = [];
for (var i = 0; i <= to; i++)
{
if (i > 1) {
response.push(i);
}
}
return response;
}
}
|
import string
import secrets
import math
import pickle
from spass.exceptions import ParameterError
def generate_random_password(length=9, letters=True, digits=True, punctuation=True, ignored_chars=''):
"""
Generates a cryptographically secure random password
:param length: Length of password
:param letters: True to use letters in password
:param digits: True to use digits in password
:param punctuation: True to use punctuation in password
:param ignored_chars: str containing all the characters that should be ignored during generation
:return: A dictionary containing the password and entropy
"""
if not (letters or digits or punctuation):
raise ParameterError('At least one set of characters must be selected for a password to be generated')
char_pool = ''
if letters:
char_pool += string.ascii_letters
if digits:
char_pool += string.digits
if punctuation:
char_pool += string.punctuation
char_list = [char for char in char_pool if char not in ignored_chars]
result = ''
for _ in range(length):
result += secrets.choice(char_list)
return {'password': result, 'entropy': __calc_entropy_password(result, len(char_list))}
def generate_passphrase(word_count=5, pad_length=0, digits=True, punctuation=True, ignored_symbols=''):
"""
Generates a passphrase with the specified amount of padding
:param word_count: Number of words in passphrase
:param pad_length: The number of padding characters
:param digits: True to use digits in padding
:param punctuation: True to use punctuation in padding
:param ignored_symbols: str containing all the symbols to ignore during padding generation
:return: A dictionary containing the passphrase, entropy and the +- deviation of the entropy
"""
if word_count < 2:
raise ParameterError('You need at least two words to make a passphrase')
with open('object/word_map.pickle', 'rb') as words:
word_bank = pickle.load(words)
placements, pad_bank_size = {}, 0
if pad_length > 0:
if not (digits or punctuation):
raise ParameterError('At least one set of characters must be selected for the padding')
placements, pad_bank_size = __scatter_padding(word_count, pad_length, digits, punctuation, ignored_symbols)
result, words_used, coin = '', [], [0, 1]
for i in range(word_count):
if i in placements:
result += ''.join(sym for sym in placements[i])
word = secrets.choice(word_bank)
if secrets.choice(coin) == 0:
word = word[0].upper() + word[1:]
result += word
words_used.append(word)
if word_count in placements:
result += ''.join(sym for sym in placements[word_count])
entropy, deviation = __calc_entropy_passphrase(word_count, len(word_bank), pad_length, pad_bank_size)
return {'password': result, 'entropy': entropy, 'deviation': deviation}
def __scatter_padding(word_count, pad_length, digits, punctuation, ignored_symbols):
"""
Randomly decides where to add padding and which characters to use
:param word_count: Number of words in passphrase
:param pad_length: Number of characters to use for padding
:param digits: True to use digits in padding
:param punctuation: True to use punctuation in padding
:param ignored_symbols: str containing all characters to ignore during padding generation
:return: A tuple containing the padding placements and the size of the character pool used to pad
"""
char_pool = ''
if digits:
char_pool += string.digits
if punctuation:
char_pool += string.punctuation
char_list = [char for char in char_pool if char not in ignored_symbols]
indexes = [index for index in range(word_count + 1)]
placements = {}
for _ in range(pad_length):
idx = secrets.choice(indexes)
if idx not in placements:
placements.update({idx: [secrets.choice(char_list)]})
else:
placements[idx].append(secrets.choice(char_list))
return placements, len(char_list)
def __calc_entropy_password(password, pool_size):
"""
Calculates the entropy of a random password
:param password: The password
:param pool_size: The size of the character pool used to generate password
:return: Entropy
"""
if not password or not pool_size:
return 0
# entropy = log_2(P^L) { R: Total number of possible choices, L: Length of the password }
inner = math.pow(pool_size, len(password))
return math.log(inner, 2)
def __calc_entropy_passphrase(word_count, word_bank_size, pad_length, pad_bank_size):
"""
Approximates the minimum entropy of the passphrase with its possible deviation
:param word_count: Number of words in passphrase
:param word_bank_size: Total number of words in the word bank
:param pad_length: Number of characters used in padding
:param pad_bank_size: The size of the character pool used to generate padding
:return: A tuple containing the minimum entropy and deviation
"""
# Multiply word bank size by 2 since there are uppercase or lower case words
inner = math.pow(word_bank_size*2, word_count)
entropy = math.log(inner, 2)
inner = math.pow(pad_bank_size, pad_length)
deviation = math.log(inner, 2)
return entropy, deviation
|
import torch
from PIL import ImageDraw
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.ops import box_convert
from torchvision.datasets import VOCDetection
from utils.Color import getRandomColor
class YoloVOCDetection(VOCDetection):
def __init__(self,
root,
S=7,
B=2,
C=20,
imageSize=448,
year='2012',
image_set='train',
download=False,
transform=None):
self.S, self.B, self.C = S, B, C
self.VocLabelMap = ["aeroplane", "bicycle", "bird", "boat", "bottle",
"bus", "car", "cat", "chair", "cow",
"diningtable", "dog", "horse", "motorbike", "person",
"pottedplant", "sheep", "sofa", "train", "tvmonitor"]
self.transform = transform or transforms.Compose([
transforms.Resize([imageSize, imageSize]),
transforms.ColorJitter(brightness=(1, 3), contrast=(1, 3), saturation=(1, 3), hue=(0.1, 0.3)),
transforms.ToTensor(),
])
super().__init__(root, year, image_set, download, self.transform, self.getTargetTransform)
def getTargetTransform(self, data):
length = self.B * 5 + self.C
target = torch.zeros(self.S, self.S, length)
boxes, labels = torch.zeros(len(data["annotation"]["object"]), 4), []
width, height = int(data["annotation"]["size"]["width"]), int(data["annotation"]["size"]["height"])
for index, obj in enumerate(data["annotation"]["object"]):
x0, y0, x1, y1 = obj["bndbox"]["xmin"], obj["bndbox"]["ymin"], obj["bndbox"]["xmax"], obj["bndbox"]["ymax"]
box = torch.tensor([int(x0) / width, int(y0) / height, int(x1) / width, int(y1) / height])
box = box_convert(boxes=box, in_fmt="xyxy", out_fmt="cxcywh")
box[2:] = torch.sqrt(box[2:])
boxes[index] = box
labels.append(self.VocLabelMap.index(obj["name"]))
labels = torch.tensor(labels)
# 每个网格的宽度
cellSize = 1.0 / float(self.S)
# 每个 bounding box 的中心点坐标
boxesXy = boxes[:, :2]
for box in range(labels.size()[0]):
xy = boxesXy[box]
# 表示其在网格上的位置的 y&x 索引
ij = (xy / cellSize).ceil() - 1
i, j = int(ij[1]), int(ij[0])
# 对应类别 confidence 设置为1
target[i, j, labels[box].item()] = 1
# 将最后 5 * self.B 位设置为 bounding box 的坐标
target[i, j, self.C:] = torch.cat((boxes[box], torch.tensor([1])), 0).repeat(1, self.B)
return target
if __name__ == '__main__':
VocTrainSet = YoloVOCDetection(root="../../datasets/PASCAL-VOC", year="2012", image_set="train")
VocTrainLoader = DataLoader(VocTrainSet, batch_size=8)
trainIter = iter(VocTrainLoader)
feature, label = trainIter.__next__()
print(feature[0].size())
print(label[0].size())
image = transforms.functional.to_pil_image(feature[0])
draw = ImageDraw.Draw(image)
w, h = image.size
for i in range(7):
for j in range(7):
if 1 in label[0, i, j, :20]:
index = label[0, i, j, :20].tolist().index(1)
b = label[0, i, j, 20:24]
b[2:] = torch.pow(b[2:], 2)
x0, y0, x1, y1 = box_convert(boxes=b, in_fmt="cxcywh", out_fmt="xyxy")
x0, y0, x1, y1 = x0 * w, y0 * h, x1 * w, y1 * h
print(x0, y0, x1, y1)
color = getRandomColor()
draw.rectangle([int(x0), int(y0), int(x1), int(y1)], outline=color, width=3)
draw.text([x0 + 5, y0 + 5], VocTrainSet.VocLabelMap[index], fill=color)
image.show()
|