text
stringlengths 3
1.05M
|
|---|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from peewee import fn
import datetime
from models import QuestionEvent, TaskPeriod
logger = logging.getLogger('data')
CONCERN_COUNT = 6 # needs to be updated to reflect count of concerns in our study
# A tuple of task periods that we want to discard.
# We discard all task periods for each user-task index pair.
# We found that there are a number of false detections.
# This is our way of removing them.
# At this time, all task periods with that user and task index are discarded, even
# if only one is incorrect. If any of the detected task periods are wrongly discarded,
# you should at them back in with the `EXTRA_TASK_PERIODS` tuple.
# For additional notes on why these tasks were incorrectly detected and how we corrected them,
# email <andrewhead@berkeley.edu> for research notes.
DISCARD_TASK_PERIODS = (
{'user_id': 5, 'task_index': 4},
{'user_id': 6, 'task_index': 0},
{'user_id': 9, 'task_index': 0},
{'user_id': 11, 'task_index': 2},
{'user_id': 13, 'task_index': 6},
{'user_id': 15, 'task_index': 0},
{'user_id': 16, 'task_index': 0},
{'user_id': 16, 'task_index': 1},
{'user_id': 16, 'task_index': 3},
{'user_id': 17, 'task_index': 1},
{'user_id': 17, 'task_index': 2},
{'user_id': 20, 'task_index': 0},
)
# These task periods will be added in after all task periods have been detected
# and those matching the discard patterns have been discarded.
# A task period is defined by a user ID, a task index, and a start and end time
EXTRA_TASK_PERIODS = ({
'user_id': 5,
'task_index': 4,
'start': datetime.datetime(2016, 6, 20, 14, 31, 58),
'end': datetime.datetime(2016, 6, 20, 14, 36, 35),
}, {
'user_id': 6,
'task_index': 0,
'start': datetime.datetime(2016, 6, 20, 15, 46, 16),
'end': datetime.datetime(2016, 6, 20, 15, 52, 49),
}, {
'user_id': 9,
'task_index': 0,
'start': datetime.datetime(2016, 6, 22, 18, 18, 22),
'end': datetime.datetime(2016, 6, 22, 18, 24, 40),
}, {
'user_id': 11,
'task_index': 2,
'start': datetime.datetime(2016, 6, 24, 9, 47, 24),
'end': datetime.datetime(2016, 6, 24, 9, 52, 20),
}, {
'user_id': 13,
'task_index': 6,
'start': datetime.datetime(2016, 6, 29, 11, 10, 49),
'end': datetime.datetime(2016, 6, 29, 11, 11, 59),
}, {
'user_id': 15,
'task_index': 0,
'start': datetime.datetime(2016, 7, 1, 16, 30, 40),
'end': datetime.datetime(2016, 7, 1, 16, 39, 13),
}, {
'user_id': 16,
'task_index': 0,
'start': datetime.datetime(2016, 7, 6, 18, 21, 13),
'end': datetime.datetime(2016, 7, 6, 18, 28, 4),
}, {
'user_id': 16,
'task_index': 1,
'start': datetime.datetime(2016, 7, 6, 18, 33, 1),
'end': datetime.datetime(2016, 7, 6, 18, 43, 30),
}, {
'user_id': 16,
'task_index': 3,
'start': datetime.datetime(2016, 7, 6, 19, 3, 35),
'end': datetime.datetime(2016, 7, 6, 19, 9, 53),
}, {
'user_id': 20,
'task_index': 0,
'start': datetime.datetime(2016, 7, 13, 16, 19, 44),
'end': datetime.datetime(2016, 7, 13, 16, 26, 28),
})
def _get_concern_index(user_id, task_index):
'''
Compute the index of a concern assigned to a user for a task.
This re-applies the counter-balancing logic from our study design here, so that
we can recover the the questions that participants were answering for each task.
Typically, this routine will return a "concern index" between 0 and (CONCERN_COUNT - 1)
To disambiguate the 0th task (the introductory task) from the rest, this method returns -1 .
'''
if task_index == 0:
return -1
offset = user_id % CONCERN_COUNT
return (offset + task_index) % CONCERN_COUNT
def compute_task_periods(discard_periods=DISCARD_TASK_PERIODS, extra_periods=EXTRA_TASK_PERIODS):
# Create a new index for this computation
last_compute_index = TaskPeriod.select(fn.Max(TaskPeriod.compute_index)).scalar() or 0
compute_index = last_compute_index + 1
# Compute the ID of the last user to complete the study
max_user_id = QuestionEvent.select(fn.Max(QuestionEvent.user_id)).scalar() or 0
# Compute the time that each user spends in each question
for user_id in range(0, max_user_id + 1):
question_events = (
QuestionEvent
.select()
.where(QuestionEvent.user_id == user_id)
.order_by(QuestionEvent.time.asc())
)
start_task_event = None
for question_event in question_events:
# If the 'task' page has been loaded, store the question event that started it.
if question_event.event_type == 'get task':
start_task_event = question_event
elif question_event.event_type == 'post task':
if start_task_event is not None:
# Save an event if the index of task for a 'post' event that comes
# after a task starts matches the task index of the event that started it.
if question_event.question_index == start_task_event.question_index:
# Only save a task period if its user and index are not in the discard list.
task_discard_specification = {
'user_id': user_id,
'task_index': question_event.question_index,
}
if task_discard_specification not in discard_periods:
TaskPeriod.create(
compute_index=compute_index,
user_id=user_id,
task_index=question_event.question_index,
concern_index=_get_concern_index(
user_id, question_event.question_index),
start=start_task_event.time,
end=question_event.time,
)
# As long as we have seen an event for the end of a task, reset
# state such that no "start task" event has been seen
start_task_event = None
# The caller may have provided a list of extra task periods to append to the computed results.
# Add these records in one by one.
for period_data in extra_periods:
TaskPeriod.create(
compute_index=compute_index,
user_id=period_data['user_id'],
task_index=period_data['task_index'],
concern_index=_get_concern_index(period_data['user_id'], period_data['task_index']),
start=period_data['start'],
end=period_data['end'],
)
def main(*args, **kwargs):
compute_task_periods()
def configure_parser(parser):
parser.description = "Compute the time bounds of all tasks a user completed in our form."
|
import { Seq } from 'immutable';
import PromiseState from './PromiseState';
export default ({
type,
types = [type],
mapResolved = payload => payload,
handle = state => state,
}) =>
(state = PromiseState.INVALID, action) => {
const value = Seq(types)
.map(type => {
switch (action.type) {
case type:
return PromiseState.LOADING;
case `${type}_RESOLVED`:
return PromiseState.resolved(mapResolved(action.payload));
case `${type}_REJECTED`:
return PromiseState.rejected(action.payload);
case `${type}_INVALIDATE`:
return PromiseState.INVALID;
default:
return null;
}
})
.find(value => value !== null);
if (value === undefined) {
return handle(state, action);
} else {
return value;
}
}
|
'use strict';
exports.ons = {
default: {
onsAddr: 'http://onsaddr-internal.aliyun.com:8080/rocketmq/nsaddr4client-internal',
},
};
|
import { getCandidatesList, getCandidate } from '@/api';
const fetchPayload = async ({ commit }) => {
try {
const { candidatos } = await getCandidatesList('governador', 'SC');
commit('SET_CANDIDATES', candidatos);
const promises = candidatos.map(candidato => getCandidate(candidato.id, 'SC'));
const candidatesPayload = await Promise.all(promises);
commit('SET_CANDIDATES', candidatesPayload);
} catch (e) {
console.error(e);
}
};
export default {
fetchPayload,
};
|
(function ($) {
if (!$) {
return;
}
$(function () {
var $registerForm = $('#RegisterForm');
$.validator.addMethod("customUsername", function (value, element) {
if (value === $registerForm.find('input[name="EmailAddress"]').val()) {
return true;
}
//Username can not be an email address (except the email address entered)
return !$.validator.methods.email.apply(this, arguments);
}, abp.localization.localize("RegisterFormUserNameInvalidMessage", "AbpMvc5"));
$registerForm.validate({
rules: {
UserName: {
required: true,
customUsername: true
}
},
highlight: function (input) {
$(input).parents('.form-line').addClass('error');
},
unhighlight: function (input) {
$(input).parents('.form-line').removeClass('error');
},
errorPlacement: function (error, element) {
$(element).parents('.form-group').append(error);
}
});
});
})(jQuery);
|
const router = require("express").Router();
const gitHubController = require("../../controllers/gitHubController");
router.route("/")
.get(gitHubController.findAll)
.post(gitHubController.create);
router
.route("/:id")
.delete(gitHubController.remove);
// router.get('/', (req,res) => res.send('Make a Dent'));
module.exports = router;
|
#include <algorithm> // std::swap
class IntCell
{
public:
explicit IntCell( int initialValue = 0 ) // no parameter default constructor
{
storedValue = new int{ initialValue };
}
IntCell( const IntCell & rhs ) // copy constructor
{
storedValue = new int( *rhs.storedValue );
}
IntCell( IntCell && rhs ) : storedValue{ rhs.storedValue } // move constructor
{
rhs.storedValue = nullptr; // i think this is to stop memory leaks? setting the passed IntCell's *storedValue to null? hmm
}
~IntCell( ) // destructor
{
delete storedValue;
}
const IntCell & operator=( const IntCell & rhs ) // copy assignment
{
if( this != &rhs ) *storedValue = *rhs.storedValue; // I think this is saying that if these are not the same object then copy
// the value that is pointed to by rhs to this object's storedValue
return *this; // and return it.
}
IntCell & operator=( IntCell & rhs ) // move assignment.
{
std::swap( storedValue, rhs.storedValue );
return *this;
}
int read( ) const;
void write( int x );
private:
int *storedValue;
};
int IntCell::read( ) const
{
return *storedValue;
}
void IntCell::write( int x )
{
*storedValue = x;
}
|
#pragma once
#define MAXCHANNELS 16
#define MINRC 1000 //1000 to 2000 is old standard for RC channels. midRc needs to be adjustable.
#define MAXRC 2000 //1000 to 2000 is old standard for RC channels. midRc needs to be adjustable.
//config structure which is loaded by config
typedef struct {
float deadBand[MAXCHANNELS];
uint32_t midRc[MAXCHANNELS];
uint32_t minRc[MAXCHANNELS];
uint32_t maxRc[MAXCHANNELS];
uint32_t channelMap[MAXCHANNELS];
uint32_t rcCalibrated;
uint32_t rxProtcol;
uint32_t rxUsart;
uint32_t rxInvertPin;
uint32_t rxInvertPort;
uint32_t rxInvertDirection;
uint32_t bind;
uint32_t shortThrow;
uint32_t armMethod;
} rc_control_config;
typedef struct
{
int useCurve;
float rcSmoothingFactor;
float rates[3];
float acroPlus[3];
float curveExpo[3];
} rc_rate;
typedef struct
{
uint32_t dataValue;
uint32_t timesOccurred;
} rx_calibraation_record;
typedef struct
{
uint32_t boardArmed;
uint32_t latchFirstArm;
uint32_t armModeSet;
uint32_t armModeActive;
uint32_t rcCalibrated;
uint32_t boardCalibrated;
uint32_t progMode;
uint32_t throttleIsSafe;
uint32_t rxTimeout;
uint32_t failsafeHappend;
uint32_t activeFailsafe;
} arming_structure;
extern volatile float throttleVelocity;
extern volatile uint32_t throttleIsSafe;
extern volatile arming_structure armingStructure;
#define RX_CHECK_AMOUNT 24
typedef struct
{
rx_calibraation_record rxCalibrationRecord[RX_CHECK_AMOUNT];
uint32_t highestDataValue;
} rx_calibration_records;
enum {
ARM_DOUBLE_SINGLE = 0,
ARM_DOUBLE_DOUBLE = 1,
};
//Enumerate the different channels in code. The TX map is not affected by this. This is for internal code only.
enum {
YAW = 0,
ROLL = 1,
PITCH = 2,
THROTTLE = 3,
AUX1 = 4,
AUX2 = 5,
AUX3 = 6,
AUX4 = 7,
AUX5 = 8,
AUX6 = 9,
AUX7 = 10,
AUX8 = 11,
AUX9 = 12,
AUX10 = 13,
AUX11 = 14,
AUX12 = 15,
};
enum {
ACCX = 0,
ACCY = 1,
ACCZ = 2,
};
//We can use different styles of curves
//used in config.c string table
enum
{
NO_EXPO = 0,
SKITZO_EXPO = 1,
TARANIS_EXPO = 2,
FAST_EXPO = 3,
ACRO_PLUS = 4,
KISS_EXPO = 5,
KISS_EXPO2 = 6,
BETAFLOP_EXPO = 7,
EXPO_CURVE_END = 8,
};
//used in config.c string table
#define USING_MANUAL 0
#define USING_SPEK_R 1
#define USING_SPEK_T 2
#define USING_SBUS_R 3
#define USING_SBUS_T 4
#define USING_SUMD_R 5
#define USING_SUMD_T 6
#define USING_IBUS_R 7
#define USING_IBUS_T 8
#define USING_DSM2_R 9
#define USING_DSM2_T 10
#define USING_CPPM_R 11
#define USING_CPPM_T 12
#define USING_SPORT 13
#define USING_MSP 14
#define USING_RFVTX 15
#define USING_SMARTAUDIO 16
#define USING_RFOSD 17
#define USING_TRAMP 18
#define USING_CRSF_R 19
#define USING_CRSF_T 20
#define USING_CRSF_B 21
#define USING_CRSF_TELEM 22
#define USING_RX_END 23
extern volatile float smoothCurvedThrottle0_1;
extern volatile float trueCurvedThrottle0_1;
extern uint32_t ppmPin;
extern volatile float maxFlopRate[];
extern volatile float maxKissRate[];
extern volatile uint32_t armBoardAt;
extern volatile uint32_t progMode;
extern volatile SPM_VTX_DATA vtxData;
extern volatile uint32_t rx_timeout;
extern float trueRcCommandF[MAXCHANNELS]; //4 sticks. range is -1 to 1, directly related to stick position
extern float curvedRcCommandF[MAXCHANNELS]; //4 sticks. range is -1 to 1, this is the rcCommand after the curve is applied
extern float smoothedRcCommandF[MAXCHANNELS]; //4 sticks. range is -1 to 1, this is the smoothed rcCommand
extern uint32_t rxDataRaw[MAXCHANNELS];
extern uint32_t rxData[MAXCHANNELS];
extern volatile unsigned char isRxDataNew;
extern uint32_t skipRxMap;
extern uint32_t PreArmFilterCheck;
extern uint32_t activeFailsafe;
extern uint32_t failsafeHappend;
extern void CheckThrottleSafe(void);
extern void SpektrumBind (uint32_t bindNumber);
extern void InitRcData(void);
extern void InlineCollectRcCommand(void);
extern float InlineApplyRcCommandCurve(float rcCommand, uint32_t curveToUse, float expo, uint32_t axis);
extern void InlineRcSmoothing(float curvedRcCommandF[], float smoothedRcCommandF[]);
extern void ProcessArmingStructure(void);
extern void ProcessSpektrumPacket(uint32_t serialNumber);
extern void PowerInveter(uint32_t port, uint32_t pin, uint32_t direction);
extern void ProcessSbusPacket(uint32_t serialNumber);
extern void ProcessSumdPacket(uint8_t serialRxBuffer[], uint32_t frameSize);
extern void ProcessIbusPacket(uint8_t serialRxBuffer[], uint32_t frameSize);
extern void ProcessCrsfPacket(uint8_t serialRxBuffer[], uint32_t frameSize);
extern void RxUpdate(void);
extern void CheckFailsafe(void);
extern uint32_t SpektrumChannelMap(uint32_t inChannel);
extern uint32_t ChannelMap(uint32_t inChannel);
extern void PpmExtiCallback(uint32_t callbackNumber);
extern void SetRxDefaults(uint32_t rxProtocol, uint32_t usart);
extern int CalculateThrottleVelocity(void);
|
import { restore, filterWidget } from "__support__/e2e/cypress";
import { setAdHocFilter } from "../../native-filters/helpers/e2e-date-filter-helpers";
describe.skip("issue 17551", () => {
beforeEach(() => {
restore();
cy.signInAsAdmin();
cy.createNativeQuestion({
native: {
query:
"select 'yesterday' as \"text\", dateadd('day', -1, current_date::date) as \"date\" union all\nselect 'today', current_date::date union all\nselect 'tomorrow', dateadd('day', 1, current_date::date)\n",
},
}).then(({ body: { id: baseQuestionId } }) => {
const questionDetails = {
name: "17551 QB",
query: { "source-table": `card__${baseQuestionId}` },
};
const filter = {
name: "Date Filter",
slug: "date_filter",
id: "888188ad",
type: "date/all-options",
sectionId: "date",
};
const dashboardDetails = { parameters: [filter] };
cy.createQuestionAndDashboard({
questionDetails,
dashboardDetails,
}).then(({ body: card }) => {
const { card_id, dashboard_id } = card;
const mapFilterToCard = {
parameter_mappings: [
{
parameter_id: filter.id,
card_id,
target: [
"dimension",
[
"field",
"date",
{
"base-type": "type/DateTime",
},
],
],
},
],
};
cy.editDashboardCard(card, mapFilterToCard);
cy.visit(`/dashboard/${dashboard_id}`);
});
});
});
it("should include today in the 'All time' date filter when chosen 'Next' (metabase#17551)", () => {
filterWidget().click();
setAdHocFilter({ condition: "Next", includeCurrent: true });
cy.url().should("include", "?date_filter=next30days~");
cy.findByText("tomorrow");
cy.findByText("today");
});
});
|
/* ==========================================================================
Licensed under BSD 2clause license. See LICENSE file for more information
Author: Michał Łyszczek <michal.lyszczek@bofc.pl>
========================================================================== */
#ifndef PARAM_TESTS_H
#define PARAM_TESTS_H 1
void param_test_in_loop(void);
void named_param_test_in_loop(void);
#endif /* PARAM_TESTS_H */
|
import { shallow } from '@vue/test-utils'
import BUpload from '@components/upload/Upload'
describe('BUpload', () => {
it('is called', () => {
const wrapper = shallow(BUpload)
expect(wrapper.name()).toBe('BUpload')
expect(wrapper.isVueInstance()).toBeTruthy()
})
})
|
from Repositories.JobRepository import JobRepository
from Controllers.StateController import *
class JobController:
def __init__(self):
self.repository = JobRepository()
def add_job(self, location, requirements, company_email_id):
try:
if middleware_company():
return self.repository.create(location, requirements, company_email_id)
else:
raise Exception('Not Logged In')
except Exception as error:
return str(error)
def available_jobs(self):
try:
if middleware_general():
return self.repository.get_all()
else:
raise Exception('Not Logged In')
except Exception as error:
return str(error)
def company_jobs(self, email_id):
try:
if middleware_general():
return self.repository.get_jobs_by_company(email_id)
else:
raise Exception('Not Logged In')
except Exception as error:
return str(error)
def update_details(self, old_location, old_requirements, old_company_email_id, new_location, new_requirements,
new_company_email_id):
try:
if middleware_company():
return self.repository.update(old_location, old_requirements, old_company_email_id, new_location,
new_requirements,
new_company_email_id)
else:
raise Exception('Not Logged In')
except Exception as error:
return str(error)
def delete_job(self, location, requirements, company_email_id):
try:
if middleware_company():
return self.repository.delete(location, requirements, company_email_id)
else:
raise Exception('Not Logged In')
except Exception as error:
return str(error)
|
module.exports = {
presets: [['@vue/app', { useBuiltIns: 'entry' }]]
};
|
const mix = require('laravel-mix');
/*
|--------------------------------------------------------------------------
| Mix Asset Management
|--------------------------------------------------------------------------
|
| Mix provides a clean, fluent API for defining some Webpack build steps
| for your Laravel application. By default, we are compiling the Sass
| file for the application as well as bundling up all the JS files.
|
*/
mix.js('resources/js/app.js', 'public/js').vue()
.sass('resources/sass/app.scss', 'public/css').sourceMaps();
|
#coding=utf-8
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import division
import csv
import numpy as np
import pdb
import os
import paddle
import paddle.fluid as fluid
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class Logger(object):
def __init__(self, path, header, resume_path, begin_epoch):
if (not os.path.exists(path)) or (resume_path==''):
self.log_file = open(path, 'w+')
self.logger = csv.writer(self.log_file, delimiter='\t')
self.logger.writerow(header)
else:
self.log_file = open(path, 'r+')
self.logger = csv.writer(self.log_file, delimiter='\t')
reader = csv.reader(self.log_file, delimiter='\t')
lines = []
print("begin = ", begin_epoch)
for line in reader:
lines.append(line)
if len(lines) == begin_epoch +1 :
break
self.log_file.close()
self.log_file = open(path, 'w')
self.logger = csv.writer(self.log_file, delimiter='\t')
self.logger.writerows(lines[:begin_epoch+1])
self.log_file.flush()
self.header = header
def __del(self):
self.log_file.close()
def log(self, values):
write_values = []
for col in self.header:
assert col in values
write_values.append(values[col])
self.logger.writerow(write_values)
self.log_file.flush()
class Logger_MARS(object):
def __init__(self, path, header, resume_path, begin_epoch):
if resume_path == '':
self.log_file = open(path, 'w+')
self.logger = csv.writer(self.log_file, delimiter='\t')
self.logger.writerow(header)
else:
if os.path.exists(path):
self.log_file = open(path, 'r+')
self.logger = csv.writer(self.log_file, delimiter='\t')
reader = csv.reader(self.log_file, delimiter='\t')
lines = []
print("begin = ", begin_epoch)
for line in reader:
lines.append(line)
if len(lines) == begin_epoch +1 :
break
self.log_file.close()
self.log_file = open(path, 'w')
self.logger = csv.writer(self.log_file, delimiter='\t')
self.logger.writerows(lines[:begin_epoch+1])
self.log_file.flush()
else:
self.log_file = open(path, 'w+')
self.logger = csv.writer(self.log_file, delimiter='\t')
self.logger.writerow(header)
self.header = header
def __del(self):
self.log_file.close()
def log(self, values):
write_values = []
for col in self.header:
assert col in values
write_values.append(values[col])
self.logger.writerow(write_values)
self.log_file.flush()
def load_value_file(file_path):
with open(file_path, 'r') as input_file:
value = float(input_file.read().rstrip('\n\r'))
return value
def calculate_accuracy(outputs, targets):
#pdb.set_trace()
batch_size = targets.shape[0]*1.0
_, pred = fluid.layers.topk(outputs,1)
pred = fluid.layers.reshape(pred,[-1])
correct = fluid.layers.equal(pred,targets)#fluid.layers.reshape(targets,[1, -1]))
correct = fluid.layers.cast(correct, dtype='int64')
n_correct_elems = fluid.layers.reduce_sum(correct).numpy()
return n_correct_elems/batch_size
|
from math import sqrt, ceil
def largestPrimeFactor(number):
"""Return largest prime factor of 'number'"""
i = 2
while i <= int(ceil(sqrt(number))):
if number % i == 0:
number //= i
i = 2
else:
i += 1
return number
print(largestPrimeFactor(600851475143))
|
(function($) {
'use strict';
var TxtType = function(el, toRotate, period) {
this.toRotate = toRotate;
this.el = el;
this.loopNum = 0;
this.period = parseInt(period, 10) || 2000;
this.txt = '';
this.tick();
this.isDeleting = !1
};
TxtType.prototype.tick = function() {
var i = this.loopNum % this.toRotate.length;
var fullTxt = this.toRotate[i];
if (this.isDeleting) {
this.txt = fullTxt.substring(0, this.txt.length - 1)
} else {
this.txt = fullTxt.substring(0, this.txt.length + 1)
}
this.el.innerHTML = '<span class="wrap">' + this.txt + '</span>';
var that = this;
var delta = 200 - Math.random() * 100;
if (this.isDeleting) {
delta /= 2
}
if (!this.isDeleting && this.txt === fullTxt) {
delta = this.period;
this.isDeleting = !0
} else if (this.isDeleting && this.txt === '') {
this.isDeleting = !1;
this.loopNum++;
delta = 500
}
setTimeout(function() {
that.tick()
}, delta)
};
window.onload = function() {
var elements = document.getElementsByClassName('typewrite');
for (var i = 0; i < elements.length; i++) {
var toRotate = elements[i].getAttribute('data-type');
var period = elements[i].getAttribute('data-period');
if (toRotate) {
new TxtType(elements[i], JSON.parse(toRotate), period)
}
}
var css = document.createElement("style");
css.type = "text/css";
css.innerHTML = ".typewrite > .wrap { border-right: 0.05em solid rgba(147, 197, 253)}";
document.body.appendChild(css)
}
})(jQuery);
|
/*! angularjs-slider - v5.9.0 -
(c) Rafal Zajac <rzajac@gmail.com>, Valentin Hervieu <valentin@hervieu.me>, Jussi Saarivirta <jusasi@gmail.com>, Angelin Sirbu <angelin.sirbu@gmail.com> -
https://github.com/angular-slider/angularjs-slider -
2016-12-12 */
/*jslint unparam: true */
/*global angular: false, console: false, define, module */
(function(root, factory) {
'use strict';
/* istanbul ignore next */
if (typeof define === 'function' && define.amd) {
// AMD. Register as an anonymous module.
define(['angular'], factory);
} else if (typeof module === 'object' && module.exports) {
// Node. Does not work with strict CommonJS, but
// only CommonJS-like environments that support module.exports,
// like Node.
// to support bundler like browserify
var angularObj = angular || require('angular');
if ((!angularObj || !angularObj.module) && typeof angular != 'undefined') {
angularObj = angular;
}
module.exports = factory(angularObj);
} else {
// Browser globals (root is window)
factory(root.angular);
}
}(this, function(angular) {
'use strict';
var module = angular.module('rzModule', [])
.factory('RzSliderOptions', function() {
var defaultOptions = {
floor: 0,
ceil: null, //defaults to rz-slider-model
step: 1,
precision: 0,
minRange: null,
maxRange: null,
pushRange: false,
minLimit: null,
maxLimit: null,
id: null,
translate: null,
getLegend: null,
stepsArray: null,
bindIndexForStepsArray: false,
draggableRange: false,
draggableRangeOnly: false,
showSelectionBar: false,
showSelectionBarEnd: false,
showSelectionBarFromValue: null,
hidePointerLabels: false,
hideLimitLabels: false,
autoHideLimitLabels: true,
readOnly: false,
disabled: false,
interval: 350,
showTicks: false,
showTicksValues: false,
ticksArray: null,
ticksTooltip: null,
ticksValuesTooltip: null,
vertical: false,
getSelectionBarColor: null,
getTickColor: null,
getPointerColor: null,
keyboardSupport: true,
scale: 1,
enforceStep: true,
enforceRange: false,
noSwitching: false,
onlyBindHandles: false,
onStart: null,
onChange: null,
onEnd: null,
rightToLeft: false,
boundPointerLabels: true,
mergeRangeLabelsIfSame: false,
customTemplateScope: null,
logScale: false,
customValueToPosition: null,
customPositionToValue: null,
selectionBarGradient: null
};
var globalOptions = {};
var factory = {};
/**
* `options({})` allows global configuration of all sliders in the
* application.
*
* var app = angular.module( 'App', ['rzModule'], function( RzSliderOptions ) {
* // show ticks for all sliders
* RzSliderOptions.options( { showTicks: true } );
* });
*/
factory.options = function(value) {
angular.extend(globalOptions, value);
};
factory.getOptions = function(options) {
return angular.extend({}, defaultOptions, globalOptions, options);
};
return factory;
})
.factory('rzThrottle', ['$timeout', function($timeout) {
/**
* rzThrottle
*
* Taken from underscore project
*
* @param {Function} func
* @param {number} wait
* @param {ThrottleOptions} options
* @returns {Function}
*/
return function(func, wait, options) {
'use strict';
/* istanbul ignore next */
var getTime = (Date.now || function() {
return new Date().getTime();
});
var context, args, result;
var timeout = null;
var previous = 0;
options = options || {};
var later = function() {
previous = getTime();
timeout = null;
result = func.apply(context, args);
context = args = null;
};
return function() {
var now = getTime();
var remaining = wait - (now - previous);
context = this;
args = arguments;
if (remaining <= 0) {
$timeout.cancel(timeout);
timeout = null;
previous = now;
result = func.apply(context, args);
context = args = null;
} else if (!timeout && options.trailing !== false) {
timeout = $timeout(later, remaining);
}
return result;
};
}
}])
.factory('RzSlider', ['$timeout', '$document', '$window', '$compile', 'RzSliderOptions', 'rzThrottle', function($timeout, $document, $window, $compile, RzSliderOptions, rzThrottle) {
'use strict';
/**
* Slider
*
* @param {ngScope} scope The AngularJS scope
* @param {Element} sliderElem The slider directive element wrapped in jqLite
* @constructor
*/
var Slider = function(scope, sliderElem) {
/**
* The slider's scope
*
* @type {ngScope}
*/
this.scope = scope;
/**
* The slider inner low value (linked to rzSliderModel)
* @type {number}
*/
this.lowValue = 0;
/**
* The slider inner high value (linked to rzSliderHigh)
* @type {number}
*/
this.highValue = 0;
/**
* Slider element wrapped in jqLite
*
* @type {jqLite}
*/
this.sliderElem = sliderElem;
/**
* Slider type
*
* @type {boolean} Set to true for range slider
*/
this.range = this.scope.rzSliderModel !== undefined && this.scope.rzSliderHigh !== undefined;
/**
* Values recorded when first dragging the bar
*
* @type {Object}
*/
this.dragging = {
active: false,
value: 0,
difference: 0,
position: 0,
lowLimit: 0,
highLimit: 0
};
/**
* property that handle position (defaults to left for horizontal)
* @type {string}
*/
this.positionProperty = 'left';
/**
* property that handle dimension (defaults to width for horizontal)
* @type {string}
*/
this.dimensionProperty = 'width';
/**
* Half of the width or height of the slider handles
*
* @type {number}
*/
this.handleHalfDim = 0;
/**
* Maximum position the slider handle can have
*
* @type {number}
*/
this.maxPos = 0;
/**
* Precision
*
* @type {number}
*/
this.precision = 0;
/**
* Step
*
* @type {number}
*/
this.step = 1;
/**
* The name of the handle we are currently tracking
*
* @type {string}
*/
this.tracking = '';
/**
* Minimum value (floor) of the model
*
* @type {number}
*/
this.minValue = 0;
/**
* Maximum value (ceiling) of the model
*
* @type {number}
*/
this.maxValue = 0;
/**
* The delta between min and max value
*
* @type {number}
*/
this.valueRange = 0;
/**
* If showTicks/showTicksValues options are number.
* In this case, ticks values should be displayed below the slider.
* @type {boolean}
*/
this.intermediateTicks = false;
/**
* Set to true if init method already executed
*
* @type {boolean}
*/
this.initHasRun = false;
/**
* Used to call onStart on the first keydown event
*
* @type {boolean}
*/
this.firstKeyDown = false;
/**
* Internal flag to prevent watchers to be called when the sliders value are modified internally.
* @type {boolean}
*/
this.internalChange = false;
/**
* Internal flag to keep track of the visibility of combo label
* @type {boolean}
*/
this.cmbLabelShown = false;
/**
* Internal variable to keep track of the focus element
*/
this.currentFocusElement = null;
// Slider DOM elements wrapped in jqLite
this.fullBar = null; // The whole slider bar
this.selBar = null; // Highlight between two handles
this.minH = null; // Left slider handle
this.maxH = null; // Right slider handle
this.flrLab = null; // Floor label
this.ceilLab = null; // Ceiling label
this.minLab = null; // Label above the low value
this.maxLab = null; // Label above the high value
this.cmbLab = null; // Combined label
this.ticks = null; // The ticks
// Initialize slider
this.init();
};
// Add instance methods
Slider.prototype = {
/**
* Initialize slider
*
* @returns {undefined}
*/
init: function() {
var thrLow, thrHigh,
self = this;
var calcDimFn = function() {
self.calcViewDimensions();
};
this.applyOptions();
this.syncLowValue();
if (this.range)
this.syncHighValue();
this.initElemHandles();
this.manageElementsStyle();
this.setDisabledState();
this.calcViewDimensions();
this.setMinAndMax();
this.addAccessibility();
this.updateCeilLab();
this.updateFloorLab();
this.initHandles();
this.manageEventsBindings();
// Recalculate slider view dimensions
this.scope.$on('reCalcViewDimensions', calcDimFn);
// Recalculate stuff if view port dimensions have changed
angular.element($window).on('resize', calcDimFn);
this.initHasRun = true;
// Watch for changes to the model
thrLow = rzThrottle(function() {
self.onLowHandleChange();
}, self.options.interval);
thrHigh = rzThrottle(function() {
self.onHighHandleChange();
}, self.options.interval);
this.scope.$on('rzSliderForceRender', function() {
self.resetLabelsValue();
thrLow();
if (self.range) {
thrHigh();
}
self.resetSlider();
});
// Watchers (order is important because in case of simultaneous change,
// watchers will be called in the same order)
this.scope.$watch('rzSliderOptions()', function(newValue, oldValue) {
if (newValue === oldValue)
return;
self.applyOptions(); // need to be called before synchronizing the values
self.syncLowValue();
if (self.range)
self.syncHighValue();
self.resetSlider();
}, true);
this.scope.$watch('rzSliderModel', function(newValue, oldValue) {
if (self.internalChange)
return;
if (newValue === oldValue)
return;
thrLow();
});
this.scope.$watch('rzSliderHigh', function(newValue, oldValue) {
if (self.internalChange)
return;
if (newValue === oldValue)
return;
if (newValue != null)
thrHigh();
if (self.range && newValue == null || !self.range && newValue != null) {
self.applyOptions();
self.resetSlider();
}
});
this.scope.$on('$destroy', function() {
self.unbindEvents();
angular.element($window).off('resize', calcDimFn);
self.currentFocusElement = null;
});
},
findStepIndex: function(modelValue) {
var index = 0;
for (var i = 0; i < this.options.stepsArray.length; i++) {
var step = this.options.stepsArray[i];
if (step === modelValue) {
index = i;
break;
}
else if (angular.isDate(step)) {
if (step.getTime() === modelValue.getTime()) {
index = i;
break;
}
}
else if (angular.isObject(step)) {
if (angular.isDate(step.value) && step.value.getTime() === modelValue.getTime() || step.value === modelValue) {
index = i;
break;
}
}
}
return index;
},
syncLowValue: function() {
if (this.options.stepsArray) {
if (!this.options.bindIndexForStepsArray)
this.lowValue = this.findStepIndex(this.scope.rzSliderModel);
else
this.lowValue = this.scope.rzSliderModel
}
else
this.lowValue = this.scope.rzSliderModel;
},
syncHighValue: function() {
if (this.options.stepsArray) {
if (!this.options.bindIndexForStepsArray)
this.highValue = this.findStepIndex(this.scope.rzSliderHigh);
else
this.highValue = this.scope.rzSliderHigh
}
else
this.highValue = this.scope.rzSliderHigh;
},
getStepValue: function(sliderValue) {
var step = this.options.stepsArray[sliderValue];
if (angular.isDate(step))
return step;
if (angular.isObject(step))
return step.value;
return step;
},
applyLowValue: function() {
if (this.options.stepsArray) {
if (!this.options.bindIndexForStepsArray)
this.scope.rzSliderModel = this.getStepValue(this.lowValue);
else
this.scope.rzSliderModel = this.lowValue
}
else
this.scope.rzSliderModel = this.lowValue;
},
applyHighValue: function() {
if (this.options.stepsArray) {
if (!this.options.bindIndexForStepsArray)
this.scope.rzSliderHigh = this.getStepValue(this.highValue);
else
this.scope.rzSliderHigh = this.highValue
}
else
this.scope.rzSliderHigh = this.highValue;
},
/*
* Reflow the slider when the low handle changes (called with throttle)
*/
onLowHandleChange: function() {
this.syncLowValue();
if (this.range)
this.syncHighValue();
this.setMinAndMax();
this.updateLowHandle(this.valueToPosition(this.lowValue));
this.updateSelectionBar();
this.updateTicksScale();
this.updateAriaAttributes();
if (this.range) {
this.updateCmbLabel();
}
},
/*
* Reflow the slider when the high handle changes (called with throttle)
*/
onHighHandleChange: function() {
this.syncLowValue();
this.syncHighValue();
this.setMinAndMax();
this.updateHighHandle(this.valueToPosition(this.highValue));
this.updateSelectionBar();
this.updateTicksScale();
this.updateCmbLabel();
this.updateAriaAttributes();
},
/**
* Read the user options and apply them to the slider model
*/
applyOptions: function() {
var sliderOptions;
if (this.scope.rzSliderOptions)
sliderOptions = this.scope.rzSliderOptions();
else
sliderOptions = {};
this.options = RzSliderOptions.getOptions(sliderOptions);
if (this.options.step <= 0)
this.options.step = 1;
this.range = this.scope.rzSliderModel !== undefined && this.scope.rzSliderHigh !== undefined;
this.options.draggableRange = this.range && this.options.draggableRange;
this.options.draggableRangeOnly = this.range && this.options.draggableRangeOnly;
if (this.options.draggableRangeOnly) {
this.options.draggableRange = true;
}
this.options.showTicks = this.options.showTicks || this.options.showTicksValues || !!this.options.ticksArray;
this.scope.showTicks = this.options.showTicks; //scope is used in the template
if (angular.isNumber(this.options.showTicks) || this.options.ticksArray)
this.intermediateTicks = true;
this.options.showSelectionBar = this.options.showSelectionBar || this.options.showSelectionBarEnd
|| this.options.showSelectionBarFromValue !== null;
if (this.options.stepsArray) {
this.parseStepsArray();
} else {
if (this.options.translate)
this.customTrFn = this.options.translate;
else
this.customTrFn = function(value) {
return String(value);
};
this.getLegend = this.options.getLegend;
}
if (this.options.vertical) {
this.positionProperty = 'bottom';
this.dimensionProperty = 'height';
}
if (this.options.customTemplateScope)
this.scope.custom = this.options.customTemplateScope;
},
parseStepsArray: function() {
this.options.floor = 0;
this.options.ceil = this.options.stepsArray.length - 1;
this.options.step = 1;
if (this.options.translate) {
this.customTrFn = this.options.translate;
}
else {
this.customTrFn = function(modelValue) {
if (this.options.bindIndexForStepsArray)
return this.getStepValue(modelValue);
return modelValue;
};
}
this.getLegend = function(index) {
var step = this.options.stepsArray[index];
if (angular.isObject(step))
return step.legend;
return null;
};
},
/**
* Resets slider
*
* @returns {undefined}
*/
resetSlider: function() {
this.manageElementsStyle();
this.addAccessibility();
this.setMinAndMax();
this.updateCeilLab();
this.updateFloorLab();
this.unbindEvents();
this.manageEventsBindings();
this.setDisabledState();
this.calcViewDimensions();
this.refocusPointerIfNeeded();
},
refocusPointerIfNeeded: function() {
if (this.currentFocusElement) {
this.onPointerFocus(this.currentFocusElement.pointer, this.currentFocusElement.ref);
this.focusElement(this.currentFocusElement.pointer)
}
},
/**
* Set the slider children to variables for easy access
*
* Run only once during initialization
*
* @returns {undefined}
*/
initElemHandles: function() {
// Assign all slider elements to object properties for easy access
angular.forEach(this.sliderElem.children(), function(elem, index) {
var jElem = angular.element(elem);
switch (index) {
case 0:
this.fullBar = jElem;
break;
case 1:
this.selBar = jElem;
break;
case 2:
this.minH = jElem;
break;
case 3:
this.maxH = jElem;
break;
case 4:
this.flrLab = jElem;
break;
case 5:
this.ceilLab = jElem;
break;
case 6:
this.minLab = jElem;
break;
case 7:
this.maxLab = jElem;
break;
case 8:
this.cmbLab = jElem;
break;
case 9:
this.ticks = jElem;
break;
}
}, this);
// Initialize position cache properties
this.selBar.rzsp = 0;
this.minH.rzsp = 0;
this.maxH.rzsp = 0;
this.flrLab.rzsp = 0;
this.ceilLab.rzsp = 0;
this.minLab.rzsp = 0;
this.maxLab.rzsp = 0;
this.cmbLab.rzsp = 0;
},
/**
* Update each elements style based on options
*/
manageElementsStyle: function() {
if (!this.range)
this.maxH.css('display', 'none');
else
this.maxH.css('display', '');
this.alwaysHide(this.flrLab, this.options.showTicksValues || this.options.hideLimitLabels);
this.alwaysHide(this.ceilLab, this.options.showTicksValues || this.options.hideLimitLabels);
var hideLabelsForTicks = this.options.showTicksValues && !this.intermediateTicks;
this.alwaysHide(this.minLab, hideLabelsForTicks || this.options.hidePointerLabels);
this.alwaysHide(this.maxLab, hideLabelsForTicks || !this.range || this.options.hidePointerLabels);
this.alwaysHide(this.cmbLab, hideLabelsForTicks || !this.range || this.options.hidePointerLabels);
this.alwaysHide(this.selBar, !this.range && !this.options.showSelectionBar);
if (this.options.vertical)
this.sliderElem.addClass('rz-vertical');
if (this.options.draggableRange)
this.selBar.addClass('rz-draggable');
else
this.selBar.removeClass('rz-draggable');
if (this.intermediateTicks && this.options.showTicksValues)
this.ticks.addClass('rz-ticks-values-under');
},
alwaysHide: function(el, hide) {
el.rzAlwaysHide = hide;
if (hide)
this.hideEl(el);
else
this.showEl(el)
},
/**
* Manage the events bindings based on readOnly and disabled options
*
* @returns {undefined}
*/
manageEventsBindings: function() {
if (this.options.disabled || this.options.readOnly)
this.unbindEvents();
else
this.bindEvents();
},
/**
* Set the disabled state based on rzSliderDisabled
*
* @returns {undefined}
*/
setDisabledState: function() {
if (this.options.disabled) {
this.sliderElem.attr('disabled', 'disabled');
} else {
this.sliderElem.attr('disabled', null);
}
},
/**
* Reset label values
*
* @return {undefined}
*/
resetLabelsValue: function() {
this.minLab.rzsv = undefined;
this.maxLab.rzsv = undefined;
},
/**
* Initialize slider handles positions and labels
*
* Run only once during initialization and every time view port changes size
*
* @returns {undefined}
*/
initHandles: function() {
this.updateLowHandle(this.valueToPosition(this.lowValue));
/*
the order here is important since the selection bar should be
updated after the high handle but before the combined label
*/
if (this.range)
this.updateHighHandle(this.valueToPosition(this.highValue));
this.updateSelectionBar();
if (this.range)
this.updateCmbLabel();
this.updateTicksScale();
},
/**
* Translate value to human readable format
*
* @param {number|string} value
* @param {jqLite} label
* @param {String} which
* @param {boolean} [useCustomTr]
* @returns {undefined}
*/
translateFn: function(value, label, which, useCustomTr) {
useCustomTr = useCustomTr === undefined ? true : useCustomTr;
var valStr = '',
getDimension = false,
noLabelInjection = label.hasClass('no-label-injection');
if (useCustomTr) {
if (this.options.stepsArray && !this.options.bindIndexForStepsArray)
value = this.getStepValue(value);
valStr = String(this.customTrFn(value, this.options.id, which));
}
else {
valStr = String(value)
}
if (label.rzsv === undefined || label.rzsv.length !== valStr.length || (label.rzsv.length > 0 && label.rzsd === 0)) {
getDimension = true;
label.rzsv = valStr;
}
if (!noLabelInjection) {
label.html(valStr);
}
;
this.scope[which + 'Label'] = valStr;
// Update width only when length of the label have changed
if (getDimension) {
this.getDimension(label);
}
},
/**
* Set maximum and minimum values for the slider and ensure the model and high
* value match these limits
* @returns {undefined}
*/
setMinAndMax: function() {
this.step = +this.options.step;
this.precision = +this.options.precision;
this.minValue = this.options.floor;
if (this.options.logScale && this.minValue === 0)
throw Error("Can't use floor=0 with logarithmic scale");
if (this.options.enforceStep) {
this.lowValue = this.roundStep(this.lowValue);
if (this.range)
this.highValue = this.roundStep(this.highValue);
}
if (this.options.ceil != null)
this.maxValue = this.options.ceil;
else
this.maxValue = this.options.ceil = this.range ? this.highValue : this.lowValue;
if (this.options.enforceRange) {
this.lowValue = this.sanitizeValue(this.lowValue);
if (this.range)
this.highValue = this.sanitizeValue(this.highValue);
}
this.applyLowValue();
if (this.range)
this.applyHighValue();
this.valueRange = this.maxValue - this.minValue;
},
/**
* Adds accessibility attributes
*
* Run only once during initialization
*
* @returns {undefined}
*/
addAccessibility: function() {
this.minH.attr('role', 'slider');
this.updateAriaAttributes();
if (this.options.keyboardSupport && !(this.options.readOnly || this.options.disabled))
this.minH.attr('tabindex', '0');
else
this.minH.attr('tabindex', '');
if (this.options.vertical)
this.minH.attr('aria-orientation', 'vertical');
if (this.range) {
this.maxH.attr('role', 'slider');
if (this.options.keyboardSupport && !(this.options.readOnly || this.options.disabled))
this.maxH.attr('tabindex', '0');
else
this.maxH.attr('tabindex', '');
if (this.options.vertical)
this.maxH.attr('aria-orientation', 'vertical');
}
},
/**
* Updates aria attributes according to current values
*/
updateAriaAttributes: function() {
this.minH.attr({
'aria-valuenow': this.scope.rzSliderModel,
'aria-valuetext': this.customTrFn(this.scope.rzSliderModel, this.options.id, 'model'),
'aria-valuemin': this.minValue,
'aria-valuemax': this.maxValue
});
if (this.range) {
this.maxH.attr({
'aria-valuenow': this.scope.rzSliderHigh,
'aria-valuetext': this.customTrFn(this.scope.rzSliderHigh, this.options.id, 'high'),
'aria-valuemin': this.minValue,
'aria-valuemax': this.maxValue
});
}
},
/**
* Calculate dimensions that are dependent on view port size
*
* Run once during initialization and every time view port changes size.
*
* @returns {undefined}
*/
calcViewDimensions: function() {
var handleWidth = this.getDimension(this.minH);
this.handleHalfDim = handleWidth / 2;
this.barDimension = this.getDimension(this.fullBar);
this.maxPos = this.barDimension - handleWidth;
this.getDimension(this.sliderElem);
this.sliderElem.rzsp = this.sliderElem[0].getBoundingClientRect()[this.positionProperty];
if (this.initHasRun) {
this.updateFloorLab();
this.updateCeilLab();
this.initHandles();
var self = this;
$timeout(function() {
self.updateTicksScale();
});
}
},
/**
* Update the ticks position
*
* @returns {undefined}
*/
updateTicksScale: function() {
if (!this.options.showTicks) return;
var ticksArray = this.options.ticksArray || this.getTicksArray(),
translate = this.options.vertical ? 'translateY' : 'translateX',
self = this;
if (this.options.rightToLeft)
ticksArray.reverse();
this.scope.ticks = ticksArray.map(function(value) {
var position = self.valueToPosition(value);
if (self.options.vertical)
position = self.maxPos - position;
var tick = {
selected: self.isTickSelected(value),
style: {
transform: translate + '(' + Math.round(position) + 'px)'
}
};
if (tick.selected && self.options.getSelectionBarColor) {
tick.style['background-color'] = self.getSelectionBarColor();
}
if (!tick.selected && self.options.getTickColor) {
tick.style['background-color'] = self.getTickColor(value);
}
if (self.options.ticksTooltip) {
tick.tooltip = self.options.ticksTooltip(value);
tick.tooltipPlacement = self.options.vertical ? 'right' : 'top';
}
if (self.options.showTicksValues) {
tick.value = self.getDisplayValue(value, 'tick-value');
if (self.options.ticksValuesTooltip) {
tick.valueTooltip = self.options.ticksValuesTooltip(value);
tick.valueTooltipPlacement = self.options.vertical ? 'right' : 'top';
}
}
if (self.getLegend) {
var legend = self.getLegend(value, self.options.id);
if (legend)
tick.legend = legend;
}
return tick;
});
},
getTicksArray: function() {
var step = this.step,
ticksArray = [];
if (this.intermediateTicks)
step = this.options.showTicks;
for (var value = this.minValue; value <= this.maxValue; value += step) {
ticksArray.push(value);
}
return ticksArray;
},
isTickSelected: function(value) {
if (!this.range) {
if (this.options.showSelectionBarFromValue !== null) {
var center = this.options.showSelectionBarFromValue;
if (this.lowValue > center && value >= center && value <= this.lowValue)
return true;
else if (this.lowValue < center && value <= center && value >= this.lowValue)
return true;
}
else if (this.options.showSelectionBarEnd) {
if (value >= this.lowValue)
return true;
}
else if (this.options.showSelectionBar && value <= this.lowValue)
return true;
}
if (this.range && value >= this.lowValue && value <= this.highValue)
return true;
return false;
},
/**
* Update position of the floor label
*
* @returns {undefined}
*/
updateFloorLab: function() {
this.translateFn(this.minValue, this.flrLab, 'floor');
this.getDimension(this.flrLab);
var position = this.options.rightToLeft ? this.barDimension - this.flrLab.rzsd : 0;
this.setPosition(this.flrLab, position);
},
/**
* Update position of the ceiling label
*
* @returns {undefined}
*/
updateCeilLab: function() {
this.translateFn(this.maxValue, this.ceilLab, 'ceil');
this.getDimension(this.ceilLab);
var position = this.options.rightToLeft ? 0 : this.barDimension - this.ceilLab.rzsd;
this.setPosition(this.ceilLab, position);
},
/**
* Update slider handles and label positions
*
* @param {string} which
* @param {number} newPos
*/
updateHandles: function(which, newPos) {
if (which === 'lowValue')
this.updateLowHandle(newPos);
else
this.updateHighHandle(newPos);
this.updateSelectionBar();
this.updateTicksScale();
if (this.range)
this.updateCmbLabel();
},
/**
* Helper function to work out the position for handle labels depending on RTL or not
*
* @param {string} labelName maxLab or minLab
* @param newPos
*
* @returns {number}
*/
getHandleLabelPos: function(labelName, newPos) {
var labelRzsd = this[labelName].rzsd,
nearHandlePos = newPos - labelRzsd / 2 + this.handleHalfDim,
endOfBarPos = this.barDimension - labelRzsd;
if (!this.options.boundPointerLabels)
return nearHandlePos;
if (this.options.rightToLeft && labelName === 'minLab' || !this.options.rightToLeft && labelName === 'maxLab') {
return Math.min(nearHandlePos, endOfBarPos);
} else {
return Math.min(Math.max(nearHandlePos, 0), endOfBarPos);
}
},
/**
* Update low slider handle position and label
*
* @param {number} newPos
* @returns {undefined}
*/
updateLowHandle: function(newPos) {
this.setPosition(this.minH, newPos);
this.translateFn(this.lowValue, this.minLab, 'model');
this.setPosition(this.minLab, this.getHandleLabelPos('minLab', newPos));
if (this.options.getPointerColor) {
var pointercolor = this.getPointerColor('min');
this.scope.minPointerStyle = {
backgroundColor: pointercolor
};
}
if (this.options.autoHideLimitLabels) {
this.shFloorCeil();
}
},
/**
* Update high slider handle position and label
*
* @param {number} newPos
* @returns {undefined}
*/
updateHighHandle: function(newPos) {
this.setPosition(this.maxH, newPos);
this.translateFn(this.highValue, this.maxLab, 'high');
this.setPosition(this.maxLab, this.getHandleLabelPos('maxLab', newPos));
if (this.options.getPointerColor) {
var pointercolor = this.getPointerColor('max');
this.scope.maxPointerStyle = {
backgroundColor: pointercolor
};
}
if (this.options.autoHideLimitLabels) {
this.shFloorCeil();
}
},
/**
* Show/hide floor/ceiling label
*
* @returns {undefined}
*/
shFloorCeil: function() {
// Show based only on hideLimitLabels if pointer labels are hidden
if (this.options.hidePointerLabels) {
return;
}
var flHidden = false,
clHidden = false,
isMinLabAtFloor = this.isLabelBelowFloorLab(this.minLab),
isMinLabAtCeil = this.isLabelAboveCeilLab(this.minLab),
isMaxLabAtCeil = this.isLabelAboveCeilLab(this.maxLab),
isCmbLabAtFloor = this.isLabelBelowFloorLab(this.cmbLab),
isCmbLabAtCeil = this.isLabelAboveCeilLab(this.cmbLab);
if (isMinLabAtFloor) {
flHidden = true;
this.hideEl(this.flrLab);
} else {
flHidden = false;
this.showEl(this.flrLab);
}
if (isMinLabAtCeil) {
clHidden = true;
this.hideEl(this.ceilLab);
} else {
clHidden = false;
this.showEl(this.ceilLab);
}
if (this.range) {
var hideCeil = this.cmbLabelShown ? isCmbLabAtCeil : isMaxLabAtCeil;
var hideFloor = this.cmbLabelShown ? isCmbLabAtFloor : isMinLabAtFloor;
if (hideCeil) {
this.hideEl(this.ceilLab);
} else if (!clHidden) {
this.showEl(this.ceilLab);
}
// Hide or show floor label
if (hideFloor) {
this.hideEl(this.flrLab);
} else if (!flHidden) {
this.showEl(this.flrLab);
}
}
},
isLabelBelowFloorLab: function(label) {
var isRTL = this.options.rightToLeft,
pos = label.rzsp,
dim = label.rzsd,
floorPos = this.flrLab.rzsp,
floorDim = this.flrLab.rzsd;
return isRTL ?
pos + dim >= floorPos - 2 :
pos <= floorPos + floorDim + 2;
},
isLabelAboveCeilLab: function(label) {
var isRTL = this.options.rightToLeft,
pos = label.rzsp,
dim = label.rzsd,
ceilPos = this.ceilLab.rzsp,
ceilDim = this.ceilLab.rzsd;
return isRTL ?
pos <= ceilPos + ceilDim + 2 :
pos + dim >= ceilPos - 2;
},
/**
* Update slider selection bar, combined label and range label
*
* @returns {undefined}
*/
updateSelectionBar: function() {
var position = 0,
dimension = 0,
isSelectionBarFromRight = this.options.rightToLeft ? !this.options.showSelectionBarEnd : this.options.showSelectionBarEnd,
positionForRange = this.options.rightToLeft ? this.maxH.rzsp + this.handleHalfDim : this.minH.rzsp + this.handleHalfDim;
if (this.range) {
dimension = Math.abs(this.maxH.rzsp - this.minH.rzsp);
position = positionForRange;
}
else {
if (this.options.showSelectionBarFromValue !== null) {
var center = this.options.showSelectionBarFromValue,
centerPosition = this.valueToPosition(center),
isModelGreaterThanCenter = this.options.rightToLeft ? this.lowValue <= center : this.lowValue > center;
if (isModelGreaterThanCenter) {
dimension = this.minH.rzsp - centerPosition;
position = centerPosition + this.handleHalfDim;
}
else {
dimension = centerPosition - this.minH.rzsp;
position = this.minH.rzsp + this.handleHalfDim;
}
}
else if (isSelectionBarFromRight) {
dimension = Math.abs(this.maxPos - this.minH.rzsp) + this.handleHalfDim;
position = this.minH.rzsp + this.handleHalfDim;
} else {
dimension = Math.abs(this.maxH.rzsp - this.minH.rzsp) + this.handleHalfDim;
position = 0;
}
}
this.setDimension(this.selBar, dimension);
this.setPosition(this.selBar, position);
if (this.options.getSelectionBarColor) {
var color = this.getSelectionBarColor();
this.scope.barStyle = {
backgroundColor: color
};
} else if (this.options.selectionBarGradient) {
var offset = this.options.showSelectionBarFromValue !== null ? this.valueToPosition(this.options.showSelectionBarFromValue) : 0,
reversed = offset - position > 0 ^ isSelectionBarFromRight,
direction = this.options.vertical ? (reversed ? 'bottom' : 'top') : (reversed ? 'left' : 'right');
this.scope.barStyle = {
backgroundImage: 'linear-gradient(to ' + direction + ', ' + this.options.selectionBarGradient.from + ' 0%,' + this.options.selectionBarGradient.to + ' 100%)'
};
if (this.options.vertical) {
this.scope.barStyle.backgroundPosition = 'center ' + (offset + dimension + position + (reversed ? -this.handleHalfDim : 0)) + 'px';
this.scope.barStyle.backgroundSize = '100% ' + (this.barDimension - this.handleHalfDim) + 'px';
} else {
this.scope.barStyle.backgroundPosition = (offset - position + (reversed ? this.handleHalfDim : 0)) + 'px center';
this.scope.barStyle.backgroundSize = (this.barDimension - this.handleHalfDim) + 'px 100%';
}
}
},
/**
* Wrapper around the getSelectionBarColor of the user to pass to
* correct parameters
*/
getSelectionBarColor: function() {
if (this.range)
return this.options.getSelectionBarColor(this.scope.rzSliderModel, this.scope.rzSliderHigh);
return this.options.getSelectionBarColor(this.scope.rzSliderModel);
},
/**
* Wrapper around the getPointerColor of the user to pass to
* correct parameters
*/
getPointerColor: function(pointerType) {
if (pointerType === 'max') {
return this.options.getPointerColor(this.scope.rzSliderHigh, pointerType);
}
return this.options.getPointerColor(this.scope.rzSliderModel, pointerType);
},
/**
* Wrapper around the getTickColor of the user to pass to
* correct parameters
*/
getTickColor: function(value) {
return this.options.getTickColor(value);
},
/**
* Update combined label position and value
*
* @returns {undefined}
*/
updateCmbLabel: function() {
var isLabelOverlap = null;
if (this.options.rightToLeft) {
isLabelOverlap = this.minLab.rzsp - this.minLab.rzsd - 10 <= this.maxLab.rzsp;
} else {
isLabelOverlap = this.minLab.rzsp + this.minLab.rzsd + 10 >= this.maxLab.rzsp;
}
if (isLabelOverlap) {
var lowTr = this.getDisplayValue(this.lowValue, 'model'),
highTr = this.getDisplayValue(this.highValue, 'high'),
labelVal = '';
if (this.options.mergeRangeLabelsIfSame && lowTr === highTr) {
labelVal = lowTr;
} else {
labelVal = this.options.rightToLeft ? highTr + ' - ' + lowTr : lowTr + ' - ' + highTr;
}
this.translateFn(labelVal, this.cmbLab, 'cmb', false);
var pos = this.options.boundPointerLabels ? Math.min(
Math.max(
this.selBar.rzsp + this.selBar.rzsd / 2 - this.cmbLab.rzsd / 2,
0
),
this.barDimension - this.cmbLab.rzsd
) : this.selBar.rzsp + this.selBar.rzsd / 2 - this.cmbLab.rzsd / 2;
this.setPosition(this.cmbLab, pos);
this.cmbLabelShown = true;
this.hideEl(this.minLab);
this.hideEl(this.maxLab);
this.showEl(this.cmbLab);
} else {
this.cmbLabelShown = false;
this.showEl(this.maxLab);
this.showEl(this.minLab);
this.hideEl(this.cmbLab);
}
if (this.options.autoHideLimitLabels) {
this.shFloorCeil();
}
},
/**
* Return the translated value if a translate function is provided else the original value
* @param value
* @param which if it's min or max handle
* @returns {*}
*/
getDisplayValue: function(value, which) {
if (this.options.stepsArray && !this.options.bindIndexForStepsArray) {
value = this.getStepValue(value);
}
return this.customTrFn(value, this.options.id, which);
},
/**
* Round value to step and precision based on minValue
*
* @param {number} value
* @param {number} customStep a custom step to override the defined step
* @returns {number}
*/
roundStep: function(value, customStep) {
var step = customStep ? customStep : this.step,
steppedDifference = parseFloat((value - this.minValue) / step).toPrecision(12);
steppedDifference = Math.round(+steppedDifference) * step;
var newValue = (this.minValue + steppedDifference).toFixed(this.precision);
return +newValue;
},
/**
* Hide element
*
* @param element
* @returns {jqLite} The jqLite wrapped DOM element
*/
hideEl: function(element) {
return element.css({
visibility: 'hidden'
});
},
/**
* Show element
*
* @param element The jqLite wrapped DOM element
* @returns {jqLite} The jqLite
*/
showEl: function(element) {
if (!!element.rzAlwaysHide) {
return element;
}
return element.css({
visibility: 'visible'
});
},
/**
* Set element left/top position depending on whether slider is horizontal or vertical
*
* @param {jqLite} elem The jqLite wrapped DOM element
* @param {number} pos
* @returns {number}
*/
setPosition: function(elem, pos) {
elem.rzsp = pos;
var css = {};
css[this.positionProperty] = Math.round(pos) + 'px';
elem.css(css);
return pos;
},
/**
* Get element width/height depending on whether slider is horizontal or vertical
*
* @param {jqLite} elem The jqLite wrapped DOM element
* @returns {number}
*/
getDimension: function(elem) {
var val = elem[0].getBoundingClientRect();
if (this.options.vertical)
elem.rzsd = (val.bottom - val.top) * this.options.scale;
else
elem.rzsd = (val.right - val.left) * this.options.scale;
return elem.rzsd;
},
/**
* Set element width/height depending on whether slider is horizontal or vertical
*
* @param {jqLite} elem The jqLite wrapped DOM element
* @param {number} dim
* @returns {number}
*/
setDimension: function(elem, dim) {
elem.rzsd = dim;
var css = {};
css[this.dimensionProperty] = Math.round(dim) + 'px';
elem.css(css);
return dim;
},
/**
* Returns a value that is within slider range
*
* @param {number} val
* @returns {number}
*/
sanitizeValue: function(val) {
return Math.min(Math.max(val, this.minValue), this.maxValue);
},
/**
* Translate value to pixel position
*
* @param {number} val
* @returns {number}
*/
valueToPosition: function(val) {
var fn = this.linearValueToPosition;
if (this.options.customValueToPosition)
fn = this.options.customValueToPosition;
else if (this.options.logScale)
fn = this.logValueToPosition;
val = this.sanitizeValue(val);
var percent = fn(val, this.minValue, this.maxValue) || 0;
if (this.options.rightToLeft)
percent = 1 - percent;
return percent * this.maxPos;
},
linearValueToPosition: function(val, minVal, maxVal) {
var range = maxVal - minVal;
return (val - minVal) / range;
},
logValueToPosition: function(val, minVal, maxVal) {
val = Math.log(val);
minVal = Math.log(minVal);
maxVal = Math.log(maxVal);
var range = maxVal - minVal;
return (val - minVal) / range;
},
/**
* Translate position to model value
*
* @param {number} position
* @returns {number}
*/
positionToValue: function(position) {
var percent = position / this.maxPos;
if (this.options.rightToLeft)
percent = 1 - percent;
var fn = this.linearPositionToValue;
if (this.options.customPositionToValue)
fn = this.options.customPositionToValue;
else if (this.options.logScale)
fn = this.logPositionToValue;
return fn(percent, this.minValue, this.maxValue) || 0;
},
linearPositionToValue: function(percent, minVal, maxVal) {
return percent * (maxVal - minVal) + minVal;
},
logPositionToValue: function(percent, minVal, maxVal) {
minVal = Math.log(minVal);
maxVal = Math.log(maxVal);
var value = percent * (maxVal - minVal) + minVal;
return Math.exp(value);
},
// Events
/**
* Get the X-coordinate or Y-coordinate of an event
*
* @param {Object} event The event
* @returns {number}
*/
getEventXY: function(event) {
/* http://stackoverflow.com/a/12336075/282882 */
//noinspection JSLint
var clientXY = this.options.vertical ? 'clientY' : 'clientX';
if (event[clientXY] !== undefined) {
return event[clientXY];
}
return event.originalEvent === undefined ?
event.touches[0][clientXY] : event.originalEvent.touches[0][clientXY];
},
/**
* Compute the event position depending on whether the slider is horizontal or vertical
* @param event
* @returns {number}
*/
getEventPosition: function(event) {
var sliderPos = this.sliderElem.rzsp,
eventPos = 0;
if (this.options.vertical)
eventPos = -this.getEventXY(event) + sliderPos;
else
eventPos = this.getEventXY(event) - sliderPos;
return eventPos * this.options.scale - this.handleHalfDim; // #346 handleHalfDim is already scaled
},
/**
* Get event names for move and event end
*
* @param {Event} event The event
*
* @return {{moveEvent: string, endEvent: string}}
*/
getEventNames: function(event) {
var eventNames = {
moveEvent: '',
endEvent: ''
};
if (event.touches || (event.originalEvent !== undefined && event.originalEvent.touches)) {
eventNames.moveEvent = 'touchmove';
eventNames.endEvent = 'touchend';
} else {
eventNames.moveEvent = 'mousemove';
eventNames.endEvent = 'mouseup';
}
return eventNames;
},
/**
* Get the handle closest to an event.
*
* @param event {Event} The event
* @returns {jqLite} The handle closest to the event.
*/
getNearestHandle: function(event) {
if (!this.range) {
return this.minH;
}
var position = this.getEventPosition(event),
distanceMin = Math.abs(position - this.minH.rzsp),
distanceMax = Math.abs(position - this.maxH.rzsp);
if (distanceMin < distanceMax)
return this.minH;
else if (distanceMin > distanceMax)
return this.maxH;
else if (!this.options.rightToLeft)
//if event is at the same distance from min/max then if it's at left of minH, we return minH else maxH
return position < this.minH.rzsp ? this.minH : this.maxH;
else
//reverse in rtl
return position > this.minH.rzsp ? this.minH : this.maxH;
},
/**
* Wrapper function to focus an angular element
*
* @param el {AngularElement} the element to focus
*/
focusElement: function(el) {
var DOM_ELEMENT = 0;
el[DOM_ELEMENT].focus();
},
/**
* Bind mouse and touch events to slider handles
*
* @returns {undefined}
*/
bindEvents: function() {
var barTracking, barStart, barMove;
if (this.options.draggableRange) {
barTracking = 'rzSliderDrag';
barStart = this.onDragStart;
barMove = this.onDragMove;
} else {
barTracking = 'lowValue';
barStart = this.onStart;
barMove = this.onMove;
}
if (!this.options.onlyBindHandles) {
this.selBar.on('mousedown', angular.bind(this, barStart, null, barTracking));
this.selBar.on('mousedown', angular.bind(this, barMove, this.selBar));
}
if (this.options.draggableRangeOnly) {
this.minH.on('mousedown', angular.bind(this, barStart, null, barTracking));
this.maxH.on('mousedown', angular.bind(this, barStart, null, barTracking));
} else {
this.minH.on('mousedown', angular.bind(this, this.onStart, this.minH, 'lowValue'));
if (this.range) {
this.maxH.on('mousedown', angular.bind(this, this.onStart, this.maxH, 'highValue'));
}
if (!this.options.onlyBindHandles) {
this.fullBar.on('mousedown', angular.bind(this, this.onStart, null, null));
this.fullBar.on('mousedown', angular.bind(this, this.onMove, this.fullBar));
this.ticks.on('mousedown', angular.bind(this, this.onStart, null, null));
this.ticks.on('mousedown', angular.bind(this, this.onTickClick, this.ticks));
}
}
if (!this.options.onlyBindHandles) {
this.selBar.on('touchstart', angular.bind(this, barStart, null, barTracking));
this.selBar.on('touchstart', angular.bind(this, barMove, this.selBar));
}
if (this.options.draggableRangeOnly) {
this.minH.on('touchstart', angular.bind(this, barStart, null, barTracking));
this.maxH.on('touchstart', angular.bind(this, barStart, null, barTracking));
} else {
this.minH.on('touchstart', angular.bind(this, this.onStart, this.minH, 'lowValue'));
if (this.range) {
this.maxH.on('touchstart', angular.bind(this, this.onStart, this.maxH, 'highValue'));
}
if (!this.options.onlyBindHandles) {
this.fullBar.on('touchstart', angular.bind(this, this.onStart, null, null));
this.fullBar.on('touchstart', angular.bind(this, this.onMove, this.fullBar));
this.ticks.on('touchstart', angular.bind(this, this.onStart, null, null));
this.ticks.on('touchstart', angular.bind(this, this.onTickClick, this.ticks));
}
}
if (this.options.keyboardSupport) {
this.minH.on('focus', angular.bind(this, this.onPointerFocus, this.minH, 'lowValue'));
if (this.range) {
this.maxH.on('focus', angular.bind(this, this.onPointerFocus, this.maxH, 'highValue'));
}
}
},
/**
* Unbind mouse and touch events to slider handles
*
* @returns {undefined}
*/
unbindEvents: function() {
this.minH.off();
this.maxH.off();
this.fullBar.off();
this.selBar.off();
this.ticks.off();
},
/**
* onStart event handler
*
* @param {?Object} pointer The jqLite wrapped DOM element; if null, the closest handle is used
* @param {?string} ref The name of the handle being changed; if null, the closest handle's value is modified
* @param {Event} event The event
* @returns {undefined}
*/
onStart: function(pointer, ref, event) {
var ehMove, ehEnd,
eventNames = this.getEventNames(event);
event.stopPropagation();
event.preventDefault();
// We have to do this in case the HTML where the sliders are on
// have been animated into view.
this.calcViewDimensions();
if (pointer) {
this.tracking = ref;
} else {
pointer = this.getNearestHandle(event);
this.tracking = pointer === this.minH ? 'lowValue' : 'highValue';
}
pointer.addClass('rz-active');
if (this.options.keyboardSupport)
this.focusElement(pointer);
ehMove = angular.bind(this, this.dragging.active ? this.onDragMove : this.onMove, pointer);
ehEnd = angular.bind(this, this.onEnd, ehMove);
$document.on(eventNames.moveEvent, ehMove);
$document.one(eventNames.endEvent, ehEnd);
this.callOnStart();
},
/**
* onMove event handler
*
* @param {jqLite} pointer
* @param {Event} event The event
* @param {boolean} fromTick if the event occured on a tick or not
* @returns {undefined}
*/
onMove: function(pointer, event, fromTick) {
var newPos = this.getEventPosition(event),
newValue,
ceilValue = this.options.rightToLeft ? this.minValue : this.maxValue,
flrValue = this.options.rightToLeft ? this.maxValue : this.minValue;
if (newPos <= 0) {
newValue = flrValue;
} else if (newPos >= this.maxPos) {
newValue = ceilValue;
} else {
newValue = this.positionToValue(newPos);
if (fromTick && angular.isNumber(this.options.showTicks))
newValue = this.roundStep(newValue, this.options.showTicks);
else
newValue = this.roundStep(newValue);
}
this.positionTrackingHandle(newValue);
},
/**
* onEnd event handler
*
* @param {Event} event The event
* @param {Function} ehMove The the bound move event handler
* @returns {undefined}
*/
onEnd: function(ehMove, event) {
var moveEventName = this.getEventNames(event).moveEvent;
if (!this.options.keyboardSupport) {
this.minH.removeClass('rz-active');
this.maxH.removeClass('rz-active');
this.tracking = '';
}
this.dragging.active = false;
$document.off(moveEventName, ehMove);
this.callOnEnd();
},
onTickClick: function(pointer, event) {
this.onMove(pointer, event, true);
},
onPointerFocus: function(pointer, ref) {
this.tracking = ref;
pointer.one('blur', angular.bind(this, this.onPointerBlur, pointer));
pointer.on('keydown', angular.bind(this, this.onKeyboardEvent));
pointer.on('keyup', angular.bind(this, this.onKeyUp));
this.firstKeyDown = true;
pointer.addClass('rz-active');
this.currentFocusElement = {
pointer: pointer,
ref: ref
};
},
onKeyUp: function() {
this.firstKeyDown = true;
this.callOnEnd();
},
onPointerBlur: function(pointer) {
pointer.off('keydown');
pointer.off('keyup');
this.tracking = '';
pointer.removeClass('rz-active');
this.currentFocusElement = null
},
/**
* Key actions helper function
*
* @param {number} currentValue value of the slider
*
* @returns {?Object} action value mappings
*/
getKeyActions: function(currentValue) {
var increaseStep = currentValue + this.step,
decreaseStep = currentValue - this.step,
increasePage = currentValue + this.valueRange / 10,
decreasePage = currentValue - this.valueRange / 10;
//Left to right default actions
var actions = {
'UP': increaseStep,
'DOWN': decreaseStep,
'LEFT': decreaseStep,
'RIGHT': increaseStep,
'PAGEUP': increasePage,
'PAGEDOWN': decreasePage,
'HOME': this.minValue,
'END': this.maxValue
};
//right to left means swapping right and left arrows
if (this.options.rightToLeft) {
actions.LEFT = increaseStep;
actions.RIGHT = decreaseStep;
// right to left and vertical means we also swap up and down
if (this.options.vertical) {
actions.UP = decreaseStep;
actions.DOWN = increaseStep;
}
}
return actions;
},
onKeyboardEvent: function(event) {
var currentValue = this[this.tracking],
keyCode = event.keyCode || event.which,
keys = {
38: 'UP',
40: 'DOWN',
37: 'LEFT',
39: 'RIGHT',
33: 'PAGEUP',
34: 'PAGEDOWN',
36: 'HOME',
35: 'END'
},
actions = this.getKeyActions(currentValue),
key = keys[keyCode],
action = actions[key];
if (action == null || this.tracking === '') return;
event.preventDefault();
if (this.firstKeyDown) {
this.firstKeyDown = false;
this.callOnStart();
}
var self = this;
$timeout(function() {
var newValue = self.roundStep(self.sanitizeValue(action));
if (!self.options.draggableRangeOnly) {
self.positionTrackingHandle(newValue);
}
else {
var difference = self.highValue - self.lowValue,
newMinValue, newMaxValue;
if (self.tracking === 'lowValue') {
newMinValue = newValue;
newMaxValue = newValue + difference;
if (newMaxValue > self.maxValue) {
newMaxValue = self.maxValue;
newMinValue = newMaxValue - difference;
}
} else {
newMaxValue = newValue;
newMinValue = newValue - difference;
if (newMinValue < self.minValue) {
newMinValue = self.minValue;
newMaxValue = newMinValue + difference;
}
}
self.positionTrackingBar(newMinValue, newMaxValue);
}
});
},
/**
* onDragStart event handler
*
* Handles dragging of the middle bar.
*
* @param {Object} pointer The jqLite wrapped DOM element
* @param {string} ref One of the refLow, refHigh values
* @param {Event} event The event
* @returns {undefined}
*/
onDragStart: function(pointer, ref, event) {
var position = this.getEventPosition(event);
this.dragging = {
active: true,
value: this.positionToValue(position),
difference: this.highValue - this.lowValue,
lowLimit: this.options.rightToLeft ? this.minH.rzsp - position : position - this.minH.rzsp,
highLimit: this.options.rightToLeft ? position - this.maxH.rzsp : this.maxH.rzsp - position
};
this.onStart(pointer, ref, event);
},
/**
* getValue helper function
*
* gets max or min value depending on whether the newPos is outOfBounds above or below the bar and rightToLeft
*
* @param {string} type 'max' || 'min' The value we are calculating
* @param {number} newPos The new position
* @param {boolean} outOfBounds Is the new position above or below the max/min?
* @param {boolean} isAbove Is the new position above the bar if out of bounds?
*
* @returns {number}
*/
getValue: function(type, newPos, outOfBounds, isAbove) {
var isRTL = this.options.rightToLeft,
value = null;
if (type === 'min') {
if (outOfBounds) {
if (isAbove) {
value = isRTL ? this.minValue : this.maxValue - this.dragging.difference;
} else {
value = isRTL ? this.maxValue - this.dragging.difference : this.minValue;
}
} else {
value = isRTL ? this.positionToValue(newPos + this.dragging.lowLimit) : this.positionToValue(newPos - this.dragging.lowLimit)
}
} else {
if (outOfBounds) {
if (isAbove) {
value = isRTL ? this.minValue + this.dragging.difference : this.maxValue;
} else {
value = isRTL ? this.maxValue : this.minValue + this.dragging.difference;
}
} else {
if (isRTL) {
value = this.positionToValue(newPos + this.dragging.lowLimit) + this.dragging.difference
} else {
value = this.positionToValue(newPos - this.dragging.lowLimit) + this.dragging.difference;
}
}
}
return this.roundStep(value);
},
/**
* onDragMove event handler
*
* Handles dragging of the middle bar.
*
* @param {jqLite} pointer
* @param {Event} event The event
* @returns {undefined}
*/
onDragMove: function(pointer, event) {
var newPos = this.getEventPosition(event),
newMinValue, newMaxValue,
ceilLimit, flrLimit,
isUnderFlrLimit, isOverCeilLimit,
flrH, ceilH;
if (this.options.rightToLeft) {
ceilLimit = this.dragging.lowLimit;
flrLimit = this.dragging.highLimit;
flrH = this.maxH;
ceilH = this.minH;
} else {
ceilLimit = this.dragging.highLimit;
flrLimit = this.dragging.lowLimit;
flrH = this.minH;
ceilH = this.maxH;
}
isUnderFlrLimit = newPos <= flrLimit;
isOverCeilLimit = newPos >= this.maxPos - ceilLimit;
if (isUnderFlrLimit) {
if (flrH.rzsp === 0)
return;
newMinValue = this.getValue('min', newPos, true, false);
newMaxValue = this.getValue('max', newPos, true, false);
} else if (isOverCeilLimit) {
if (ceilH.rzsp === this.maxPos)
return;
newMaxValue = this.getValue('max', newPos, true, true);
newMinValue = this.getValue('min', newPos, true, true);
} else {
newMinValue = this.getValue('min', newPos, false);
newMaxValue = this.getValue('max', newPos, false);
}
this.positionTrackingBar(newMinValue, newMaxValue);
},
/**
* Set the new value and position for the entire bar
*
* @param {number} newMinValue the new minimum value
* @param {number} newMaxValue the new maximum value
*/
positionTrackingBar: function(newMinValue, newMaxValue) {
if (this.options.minLimit != null && newMinValue < this.options.minLimit) {
newMinValue = this.options.minLimit;
newMaxValue = newMinValue + this.dragging.difference;
}
if (this.options.maxLimit != null && newMaxValue > this.options.maxLimit) {
newMaxValue = this.options.maxLimit;
newMinValue = newMaxValue - this.dragging.difference;
}
this.lowValue = newMinValue;
this.highValue = newMaxValue;
this.applyLowValue();
if (this.range)
this.applyHighValue();
this.applyModel();
this.updateHandles('lowValue', this.valueToPosition(newMinValue));
this.updateHandles('highValue', this.valueToPosition(newMaxValue));
},
/**
* Set the new value and position to the current tracking handle
*
* @param {number} newValue new model value
*/
positionTrackingHandle: function(newValue) {
var valueChanged = false;
newValue = this.applyMinMaxLimit(newValue);
if (this.range) {
if (this.options.pushRange) {
newValue = this.applyPushRange(newValue);
valueChanged = true;
}
else {
if (this.options.noSwitching) {
if (this.tracking === 'lowValue' && newValue > this.highValue)
newValue = this.applyMinMaxRange(this.highValue);
else if (this.tracking === 'highValue' && newValue < this.lowValue)
newValue = this.applyMinMaxRange(this.lowValue);
}
newValue = this.applyMinMaxRange(newValue);
/* This is to check if we need to switch the min and max handles */
if (this.tracking === 'lowValue' && newValue > this.highValue) {
this.lowValue = this.highValue;
this.applyLowValue();
this.updateHandles(this.tracking, this.maxH.rzsp);
this.updateAriaAttributes();
this.tracking = 'highValue';
this.minH.removeClass('rz-active');
this.maxH.addClass('rz-active');
if (this.options.keyboardSupport)
this.focusElement(this.maxH);
valueChanged = true;
}
else if (this.tracking === 'highValue' && newValue < this.lowValue) {
this.highValue = this.lowValue;
this.applyHighValue();
this.updateHandles(this.tracking, this.minH.rzsp);
this.updateAriaAttributes();
this.tracking = 'lowValue';
this.maxH.removeClass('rz-active');
this.minH.addClass('rz-active');
if (this.options.keyboardSupport)
this.focusElement(this.minH);
valueChanged = true;
}
}
}
if (this[this.tracking] !== newValue) {
this[this.tracking] = newValue;
if (this.tracking === 'lowValue')
this.applyLowValue();
else
this.applyHighValue();
this.updateHandles(this.tracking, this.valueToPosition(newValue));
this.updateAriaAttributes();
valueChanged = true;
}
if (valueChanged)
this.applyModel();
},
applyMinMaxLimit: function(newValue) {
if (this.options.minLimit != null && newValue < this.options.minLimit)
return this.options.minLimit;
if (this.options.maxLimit != null && newValue > this.options.maxLimit)
return this.options.maxLimit;
return newValue;
},
applyMinMaxRange: function(newValue) {
var oppositeValue = this.tracking === 'lowValue' ? this.highValue : this.lowValue,
difference = Math.abs(newValue - oppositeValue);
if (this.options.minRange != null) {
if (difference < this.options.minRange) {
if (this.tracking === 'lowValue')
return this.highValue - this.options.minRange;
else
return this.lowValue + this.options.minRange;
}
}
if (this.options.maxRange != null) {
if (difference > this.options.maxRange) {
if (this.tracking === 'lowValue')
return this.highValue - this.options.maxRange;
else
return this.lowValue + this.options.maxRange;
}
}
return newValue;
},
applyPushRange: function(newValue) {
var difference = this.tracking === 'lowValue' ? this.highValue - newValue : newValue - this.lowValue,
minRange = this.options.minRange !== null ? this.options.minRange : this.options.step,
maxRange = this.options.maxRange;
// if smaller than minRange
if (difference < minRange) {
if (this.tracking === 'lowValue') {
this.highValue = Math.min(newValue + minRange, this.maxValue);
newValue = this.highValue - minRange;
this.applyHighValue();
this.updateHandles('highValue', this.valueToPosition(this.highValue));
}
else {
this.lowValue = Math.max(newValue - minRange, this.minValue);
newValue = this.lowValue + minRange;
this.applyLowValue();
this.updateHandles('lowValue', this.valueToPosition(this.lowValue));
}
this.updateAriaAttributes();
}
// if greater than maxRange
else if (maxRange !== null && difference > maxRange) {
if (this.tracking === 'lowValue') {
this.highValue = newValue + maxRange;
this.applyHighValue();
this.updateHandles('highValue', this.valueToPosition(this.highValue));
}
else {
this.lowValue = newValue - maxRange;
this.applyLowValue();
this.updateHandles('lowValue', this.valueToPosition(this.lowValue));
}
this.updateAriaAttributes();
}
return newValue;
},
/**
* Apply the model values using scope.$apply.
* We wrap it with the internalChange flag to avoid the watchers to be called
*/
applyModel: function() {
this.internalChange = true;
this.scope.$apply();
this.callOnChange();
this.internalChange = false;
},
/**
* Call the onStart callback if defined
* The callback call is wrapped in a $evalAsync to ensure that its result will be applied to the scope.
*
* @returns {undefined}
*/
callOnStart: function() {
if (this.options.onStart) {
var self = this,
pointerType = this.tracking === 'lowValue' ? 'min' : 'max';
this.scope.$evalAsync(function() {
self.options.onStart(self.options.id, self.scope.rzSliderModel, self.scope.rzSliderHigh, pointerType);
});
}
},
/**
* Call the onChange callback if defined
* The callback call is wrapped in a $evalAsync to ensure that its result will be applied to the scope.
*
* @returns {undefined}
*/
callOnChange: function() {
if (this.options.onChange) {
var self = this,
pointerType = this.tracking === 'lowValue' ? 'min' : 'max';
this.scope.$evalAsync(function() {
self.options.onChange(self.options.id, self.scope.rzSliderModel, self.scope.rzSliderHigh, pointerType);
});
}
},
/**
* Call the onEnd callback if defined
* The callback call is wrapped in a $evalAsync to ensure that its result will be applied to the scope.
*
* @returns {undefined}
*/
callOnEnd: function() {
if (this.options.onEnd) {
var self = this,
pointerType = this.tracking === 'lowValue' ? 'min' : 'max';
this.scope.$evalAsync(function() {
self.options.onEnd(self.options.id, self.scope.rzSliderModel, self.scope.rzSliderHigh, pointerType);
});
}
this.scope.$emit('slideEnded');
}
};
return Slider;
}])
.directive('rzslider', ['RzSlider', function(RzSlider) {
'use strict';
return {
restrict: 'AE',
replace: true,
scope: {
rzSliderModel: '=?',
rzSliderHigh: '=?',
rzSliderOptions: '&?',
rzSliderTplUrl: '@'
},
/**
* Return template URL
*
* @param {jqLite} elem
* @param {Object} attrs
* @return {string}
*/
templateUrl: function(elem, attrs) {
//noinspection JSUnresolvedVariable
return attrs.rzSliderTplUrl || 'rzSliderTpl.html';
},
link: function(scope, elem) {
scope.slider = new RzSlider(scope, elem); //attach on scope so we can test it
}
};
}]);
// IDE assist
/**
* @name ngScope
*
* @property {number} rzSliderModel
* @property {number} rzSliderHigh
* @property {Object} rzSliderOptions
*/
/**
* @name jqLite
*
* @property {number|undefined} rzsp rzslider label position position
* @property {number|undefined} rzsd rzslider element dimension
* @property {string|undefined} rzsv rzslider label value/text
* @property {Function} css
* @property {Function} text
*/
/**
* @name Event
* @property {Array} touches
* @property {Event} originalEvent
*/
/**
* @name ThrottleOptions
*
* @property {boolean} leading
* @property {boolean} trailing
*/
module.run(['$templateCache', function($templateCache) {
'use strict';
$templateCache.put('rzSliderTpl.html',
"<div class=rzslider><span class=rz-bar-wrapper><span class=rz-bar></span></span> <span class=rz-bar-wrapper><span class=\"rz-bar rz-selection\" ng-style=barStyle></span></span> <span class=\"rz-pointer rz-pointer-min\" ng-style=minPointerStyle></span> <span class=\"rz-pointer rz-pointer-max\" ng-style=maxPointerStyle></span> <span class=\"rz-bubble rz-limit rz-floor\"></span> <span class=\"rz-bubble rz-limit rz-ceil\"></span> <span class=rz-bubble></span> <span class=rz-bubble></span> <span class=rz-bubble></span><ul ng-show=showTicks class=rz-ticks><li ng-repeat=\"t in ticks track by $index\" class=rz-tick ng-class=\"{'rz-selected': t.selected}\" ng-style=t.style ng-attr-uib-tooltip=\"{{ t.tooltip }}\" ng-attr-tooltip-placement={{t.tooltipPlacement}} ng-attr-tooltip-append-to-body=\"{{ t.tooltip ? true : undefined}}\"><span ng-if=\"t.value != null\" class=rz-tick-value ng-attr-uib-tooltip=\"{{ t.valueTooltip }}\" ng-attr-tooltip-placement={{t.valueTooltipPlacement}}>{{ t.value }}</span> <span ng-if=\"t.legend != null\" class=rz-tick-legend>{{ t.legend }}</span></li></ul></div>"
);
}]);
return module.name
}));
|
/**
* @file 组件样式入口
*/
import '../../core/styles/index';
import './index.less';
import '../../empty/style';
import '../../checkbox/style';
import '../../button/style';
import '../../input/style';
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 2 09:25:41 2019
@author: michaelek
"""
import pytest
from tethys_utils import *
import pandas as pd
from tethys_utils.datasets import get_path
pd.options.display.max_columns = 10
###############################################
### Parameters
d_name1 = '218810'
d_path1 = get_path(d_name1)
attrs = {'quality_code': {'standard_name': 'quality_flag',
'long_name': 'NEMS quality code',
'references': 'https://www.lawa.org.nz/media/16580/nems-quality-code-schema-2013-06-1-.pdf'},
'well_depth': {'units': 'm'},
'well_diameter': {'units': 'mm'},
'well_screens': {'units': ''},
'well_top_screen': {'units': 'm'},
'well_bottom_screen': {'units': 'm'},
'precipitation': {'feature': 'atmosphere',
'parameter': 'precipitation',
'method': 'sensor_recording',
'processing_code': '1',
'owner': 'ECan',
'aggregation_statistic': 'cumulative',
'frequency_interval': '1H',
'utc_offset': '0H',
'units': 'mm',
'license': 'https://creativecommons.org/licenses/by/4.0/',
'result_type': 'time_series',
'standard_name': 'precipitation_amount'}}
encoding = {'quality_code': {'dtype': 'int16', '_FillValue': -9999},
'well_depth': {'dtype': 'int32', '_FillValue': -99999, 'scale_factor': 0.1},
'well_diameter': {'dtype': 'int32',
'_FillValue': -99999,
'scale_factor': 0.1},
'well_screens': {'dtype': 'int16', '_FillValue': -9999},
'well_top_screen': {'dtype': 'int32',
'_FillValue': -99999,
'scale_factor': 0.1},
'well_bottom_screen': {'dtype': 'int32',
'_FillValue': -99999,
'scale_factor': 0.1},
'precipitation': {'scale_factor': 0.1, 'dtype': 'int16', '_FillValue': -99}}
nc_type = 'H23'
param_name = 'precipitation'
run_date_key = '20200803T225843Z'
ancillary_variables = ['quality_code']
compression = True
########################################
### Tests
def test_read_pkl_zstd():
df1 = read_pkl_zstd(d_path1)
assert df1.shape == (20000, 7)
df1 = read_pkl_zstd(d_path1)
def test_write_pkl_zstd():
p_df1 = write_pkl_zstd(df1)
len1 = round(len(p_df1), -3)
assert (len1 < 200000) and (len1 > 100000)
def test_df_to_xarray():
p_ds1 = df_to_xarray(df1, nc_type, param_name, attrs, encoding, run_date_key, ancillary_variables, compression)
len2 = round(len(p_ds1), -3)
ds1 = df_to_xarray(df1, nc_type, param_name, attrs, encoding, run_date_key, ancillary_variables)
assert (len(ds1) == 6) and (len2 < 30000) and (len2 > 20000)
# @pytest.mark.parametrize('input_sites', [input_sites1, input_sites2, input_sites3])
# def test_nat(input_sites):
# f1 = FlowNat(from_date, to_date, input_sites=input_sites)
#
# nat_flow = f1.naturalisation()
#
# assert (len(f1.summ) >= 1) & (len(nat_flow) > 2900)
|
from __future__ import division
from builtins import object
import numpy as np
from scipy.ndimage import convolve
from sporco import linalg
from sporco import util
class TestSet01(object):
def setup_method(self, method):
np.random.seed(12345)
def test_01(self):
rho = 1e-1
N = 64
M = 128
K = 32
D = np.random.randn(N, M)
X = np.random.randn(M, K)
S = D.dot(X)
Z = (D.T.dot(D).dot(X) + rho*X - D.T.dot(S)) / rho
lu, piv = linalg.lu_factor(D, rho)
Xslv = linalg.lu_solve_ATAI(D, rho, D.T.dot(S) + rho*Z, lu, piv)
assert(linalg.rrs(D.T.dot(D).dot(Xslv) + rho*Xslv,
D.T.dot(S) + rho*Z) < 1e-11)
def test_02(self):
rho = 1e-1
N = 128
M = 64
K = 32
D = np.random.randn(N, M)
X = np.random.randn(M, K)
S = D.dot(X)
Z = (D.T.dot(D).dot(X) + rho*X - D.T.dot(S)) / rho
lu, piv = linalg.lu_factor(D, rho)
Xslv = linalg.lu_solve_ATAI(D, rho, D.T.dot(S) + rho*Z, lu, piv)
assert(linalg.rrs(D.T.dot(D).dot(Xslv) + rho*Xslv,
D.T.dot(S) + rho*Z) < 1e-14)
def test_03(self):
rho = 1e-1
N = 64
M = 128
K = 32
D = np.random.randn(N, M)
X = np.random.randn(M, K)
S = D.dot(X)
Z = (D.dot(X).dot(X.T) + rho*D - S.dot(X.T)) / rho
lu, piv = linalg.lu_factor(X, rho)
Dslv = linalg.lu_solve_AATI(X, rho, S.dot(X.T) + rho*Z, lu, piv)
assert(linalg.rrs(Dslv.dot(X).dot(X.T) + rho*Dslv,
S.dot(X.T) + rho*Z) < 1e-11)
def test_04(self):
rho = 1e-1
N = 128
M = 64
K = 32
D = np.random.randn(N, M)
X = np.random.randn(M, K)
S = D.dot(X)
Z = (D.dot(X).dot(X.T) + rho*D - S.dot(X.T)) / rho
lu, piv = linalg.lu_factor(X, rho)
Dslv = linalg.lu_solve_AATI(X, rho, S.dot(X.T) + rho*Z, lu, piv)
assert(linalg.rrs(Dslv.dot(X).dot(X.T) + rho*Dslv,
S.dot(X.T) + rho*Z) < 1e-11)
def test_05(self):
rho = 1e-1
N = 64
M = 128
K = 32
D = np.random.randn(N, M)
X = np.random.randn(M, K)
S = D.dot(X)
Z = (D.T.dot(D).dot(X) + rho*X - D.T.dot(S)) / rho
c, lwr = linalg.cho_factor(D, rho)
Xslv = linalg.cho_solve_ATAI(D, rho, D.T.dot(S) + rho*Z, c, lwr)
assert(linalg.rrs(D.T.dot(D).dot(Xslv) + rho*Xslv,
D.T.dot(S) + rho*Z) < 1e-11)
def test_06(self):
rho = 1e-1
N = 128
M = 64
K = 32
D = np.random.randn(N, M)
X = np.random.randn(M, K)
S = D.dot(X)
Z = (D.T.dot(D).dot(X) + rho*X - D.T.dot(S)) / rho
c, lwr = linalg.cho_factor(D, rho)
Xslv = linalg.cho_solve_ATAI(D, rho, D.T.dot(S) + rho*Z, c, lwr)
assert(linalg.rrs(D.T.dot(D).dot(Xslv) + rho*Xslv,
D.T.dot(S) + rho*Z) < 1e-14)
def test_07(self):
rho = 1e-1
N = 64
M = 128
K = 32
D = np.random.randn(N, M)
X = np.random.randn(M, K)
S = D.dot(X)
Z = (D.dot(X).dot(X.T) + rho*D - S.dot(X.T)) / rho
c, lwr = linalg.cho_factor(X, rho)
Dslv = linalg.cho_solve_AATI(X, rho, S.dot(X.T) + rho*Z, c, lwr)
assert(linalg.rrs(Dslv.dot(X).dot(X.T) + rho*Dslv,
S.dot(X.T) + rho*Z) < 1e-11)
def test_08(self):
rho = 1e-1
N = 128
M = 64
K = 32
D = np.random.randn(N, M)
X = np.random.randn(M, K)
S = D.dot(X)
Z = (D.dot(X).dot(X.T) + rho*D - S.dot(X.T)) / rho
c, lwr = linalg.cho_factor(X, rho)
Dslv = linalg.cho_solve_AATI(X, rho, S.dot(X.T) + rho*Z, c, lwr)
assert(linalg.rrs(Dslv.dot(X).dot(X.T) + rho*Dslv,
S.dot(X.T) + rho*Z) < 1e-11)
def test_09(self):
rho = 1e-1
N = 32
M = 16
K = 8
D = util.complex_randn(N, N, 1, 1, M)
X = util.complex_randn(N, N, 1, K, M)
S = np.sum(D*X, axis=4, keepdims=True)
Z = (D.conj()*np.sum(D*X, axis=4, keepdims=True) + \
rho*X - D.conj()*S) / rho
Xslv = linalg.solvedbi_sm(D, rho, D.conj()*S + rho*Z)
assert(linalg.rrs(D.conj()*np.sum(D*Xslv, axis=4, keepdims=True) +
rho*Xslv, D.conj()*S + rho*Z) < 1e-11)
def test_10(self):
N = 32
M = 16
K = 8
D = util.complex_randn(N, N, 1, 1, M)
X = util.complex_randn(N, N, 1, K, M)
S = np.sum(D*X, axis=4, keepdims=True)
d = 1e-1 * (np.random.randn(N, N, 1, 1, M).astype('complex') +
np.random.randn(N, N, 1, 1, M).astype('complex') * 1.0j)
Z = (D.conj()*np.sum(D*X, axis=4, keepdims=True) +
d*X - D.conj()*S) / d
Xslv = linalg.solvedbd_sm(D, d, D.conj()*S + d*Z)
assert(linalg.rrs(D.conj()*np.sum(D*Xslv, axis=4, keepdims=True) +
d*Xslv, D.conj()*S + d*Z) < 1e-11)
def test_11(self):
rho = 1e-1
N = 32
M = 16
K = 8
D = util.complex_randn(N, N, 1, 1, M)
X = util.complex_randn(N, N, 1, K, M)
S = np.sum(D*X, axis=4, keepdims=True)
Xop = lambda x: np.sum(X * x, axis=4, keepdims=True)
XHop = lambda x: np.sum(np.conj(X) * x, axis=3, keepdims=True)
Z = (XHop(Xop(D)) + rho*D - XHop(S)) / rho
Dslv = linalg.solvemdbi_ism(X, rho, XHop(S) + rho*Z, 4, 3)
assert linalg.rrs(XHop(Xop(Dslv)) + rho*Dslv, XHop(S) + rho*Z) < 1e-11
def test_12(self):
rho = 1e-1
N = 32
M = 16
C = 3
K = 8
D = util.complex_randn(N, N, C, 1, M)
X = util.complex_randn(N, N, 1, K, M)
S = np.sum(D*X, axis=4, keepdims=True)
Xop = lambda x: np.sum(X * x, axis=4, keepdims=True)
XHop = lambda x: np.sum(np.conj(X)* x, axis=3, keepdims=True)
Z = (XHop(Xop(D)) + rho*D - XHop(S)) / rho
Dslv = linalg.solvemdbi_ism(X, rho, XHop(S) + rho*Z, 4, 3)
assert linalg.rrs(XHop(Xop(Dslv)) + rho*Dslv, XHop(S) + rho*Z) < 1e-11
def test_13(self):
rho = 1e-1
N = 32
M = 16
K = 8
D = util.complex_randn(N, N, 1, 1, M)
X = util.complex_randn(N, N, 1, K, M)
S = np.sum(D*X, axis=4, keepdims=True)
Xop = lambda x: np.sum(X * x, axis=4, keepdims=True)
XHop = lambda x: np.sum(np.conj(X) * x, axis=3, keepdims=True)
Z = (XHop(Xop(D)) + rho*D - XHop(S)) / rho
Dslv = linalg.solvemdbi_rsm(X, rho, XHop(S) + rho*Z, 3)
assert linalg.rrs(XHop(Xop(Dslv)) + rho*Dslv, XHop(S) + rho*Z) < 1e-11
def test_14(self):
rho = 1e-1
N = 64
M = 32
C = 3
K = 8
D = util.complex_randn(N, N, C, 1, M)
X = util.complex_randn(N, N, 1, K, M)
S = np.sum(D*X, axis=4, keepdims=True)
Xop = lambda x: np.sum(X * x, axis=4, keepdims=True)
XHop = lambda x: np.sum(np.conj(X) * x, axis=3, keepdims=True)
Z = (XHop(Xop(D)) + rho*D - XHop(S)) / rho
Dslv = linalg.solvemdbi_rsm(X, rho, XHop(S) + rho*Z, 3)
assert linalg.rrs(XHop(Xop(Dslv)) + rho*Dslv, XHop(S) + rho*Z) < 1e-11
def test_15(self):
rho = 1e-1
N = 32
M = 16
K = 8
D = util.complex_randn(N, N, 1, 1, M)
X = util.complex_randn(N, N, 1, K, M)
S = np.sum(D*X, axis=4, keepdims=True)
Xop = lambda x: np.sum(X * x, axis=4, keepdims=True)
XHop = lambda x: np.sum(np.conj(X) * x, axis=3, keepdims=True)
Z = (XHop(Xop(D)) + rho*D - XHop(S)) / rho
Dslv, cgit = linalg.solvemdbi_cg(X, rho, XHop(S)+rho*Z, 4, 3, tol=1e-6)
assert linalg.rrs(XHop(Xop(Dslv)) + rho*Dslv, XHop(S) + rho*Z) <= 1e-6
def test_16(self):
rho = 1e-1
N = 64
M = 32
C = 3
K = 8
D = util.complex_randn(N, N, C, 1, M)
X = util.complex_randn(N, N, 1, K, M)
S = np.sum(D*X, axis=4, keepdims=True)
Xop = lambda x: np.sum(X * x, axis=4, keepdims=True)
XHop = lambda x: np.sum(np.conj(X) * x, axis=3, keepdims=True)
Z = (XHop(Xop(D)) + rho*D - XHop(S)) / rho
Dslv, cgit = linalg.solvemdbi_cg(X, rho, XHop(S)+rho*Z, 4, 3, tol=1e-6)
assert linalg.rrs(XHop(Xop(Dslv)) + rho*Dslv, XHop(S) + rho*Z) <= 1e-6
def test_17(self):
b = np.array([0.0, 0.0, 2.0])
s = np.array([0.0, 0.0, 0.0])
r = 1.0
p = linalg.proj_l2ball(b, s, r)
assert linalg.rrs(p, np.array([0.0, 0.0, 1.0])) < 1e-14
def test_18(self):
u0 = np.array([[0, 1], [2, 3]])
u1 = np.array([[4, 5], [6, 7]])
C = linalg.block_circulant((u0, u1))
assert C[3, 0] == 6
assert C[3, 3] == 3
def test_19(self):
x = np.random.randn(16, 8)
xf = linalg.fftn(x, axes=(0,))
n1 = np.linalg.norm(x)**2
n2 = linalg.fl2norm2(xf, axis=(0,))
assert np.abs(n1 - n2) < 1e-12
def test_20(self):
x = np.random.randn(16, )
xf = linalg.rfftn(x, axes=(0,))
n1 = np.linalg.norm(x)**2
n2 = linalg.rfl2norm2(xf, xs=x.shape, axis=(0,))
assert np.abs(n1 - n2) < 1e-12
def test_21(self):
x = np.random.randn(16, 8)
y = np.random.randn(16, 8)
ip1 = np.sum(x * y, axis=0, keepdims=True)
ip2 = linalg.inner(x, y, axis=0)
assert np.linalg.norm(ip1 - ip2) < 1e-13
def test_22(self):
x = np.random.randn(8, 8, 3, 12)
y = np.random.randn(8, 1, 1, 12)
ip1 = np.sum(x * y, axis=-1, keepdims=True)
ip2 = linalg.inner(x, y, axis=-1)
assert np.linalg.norm(ip1 - ip2) < 1e-13
def test_23(self):
a = np.random.randn(7, 8)
b = np.random.randn(8, 12)
c1 = a.dot(b)
c2 = linalg.dot(a, b)
assert np.linalg.norm(c1 - c2) < 1e-14
def test_24(self):
a = np.random.randn(7, 8)
b = np.random.randn(3, 4, 8, 12)
c1 = np.zeros((3, 4, 7, 12))
for i0 in range(c1.shape[0]):
for i1 in range(c1.shape[1]):
c1[i0, i1] = a.dot(b[i0, i1])
c2 = linalg.dot(a, b)
assert np.linalg.norm(c1 - c2) < 2e-14
def test_25(self):
a = np.random.randn(7, 8)
b = np.random.randn(3, 8, 4, 12)
c1 = np.zeros((3, 7, 4, 12))
for i0 in range(c1.shape[0]):
for i1 in range(c1.shape[3]):
c1[i0, ..., i1] = a.dot(b[i0, ..., i1])
c2 = linalg.dot(a, b, axis=1)
assert np.linalg.norm(c1 - c2) < 2e-14
def test_26(self):
x = np.array([[0, 1], [2, 3]])
y = np.array([[4, 5], [6, 7]])
xy = np.array([[38, 36], [30, 28]])
assert np.allclose(linalg.fftconv(x, y), xy)
def test_27(self):
x = np.random.randn(5,)
y = np.zeros((12,))
y[4] = 1.0
xy0 = convolve(y, x)
xy1 = linalg.fftconv(x, y, axes=(0,), origin=(2,))
assert np.allclose(xy0, xy1)
|
from django.shortcuts import render
from django.views.generic.list import ListView
from .models import Query
class QueryListView(ListView):
model = Query
|
from concurrent.futures import ThreadPoolExecutor
import pytest
from azure.identity.aio import DefaultAzureCredential
from adlfs import AzureBlobFileSystem
URL = "http://127.0.0.1:10000"
ACCOUNT_NAME = "devstoreaccount1"
KEY = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" # NOQA
CONN_STR = f"DefaultEndpointsProtocol=http;AccountName={ACCOUNT_NAME};AccountKey={KEY};BlobEndpoint={URL}/{ACCOUNT_NAME};" # NOQA
def test_fs_loop(storage):
"""
This is a test to verify that AzureBlobFilesystem can provide a
running event loop to azure python sdk when requesting asynchronous
credentials And running in a separate thread
"""
def test_connect_async_credential():
fs = AzureBlobFileSystem( # NOQA
account_name=storage.account_name, credential=DefaultAzureCredential()
)
with ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(test_connect_async_credential)
assert future.result() is None
def test_file_loop(storage):
"""
This is a test to verify that AzureBlobFile class provides a
running event loop to the Azure python sdk when requesting asynchronous
credentials and running in a separate thread
"""
def test_connect_async_open_credential():
fs = AzureBlobFileSystem(
account_name=storage.account_name, credential=DefaultAzureCredential()
)
fs.open(path="")
with ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(test_connect_async_open_credential)
with pytest.raises(ValueError):
future.result()
|
/* Copyright (c) 2017-2021, Hans Erik Thrane */
/* !!! THIS FILE HAS BEEN AUTO-GENERATED !!! */
#pragma once
#include <fmt/format.h>
#include <cassert>
#include <string_view>
#include <type_traits>
#include <magic_enum.hpp>
#include "roq/compat.h"
#include "roq/literals.h"
namespace roq {
//! Enumeration of request types
struct ROQ_PACKED RequestType final {
//! helper
enum type_t : uint8_t {
UNDEFINED = 0,
CREATE_ORDER,
MODIFY_ORDER,
CANCEL_ORDER,
};
constexpr RequestType() = default;
// cppcheck-suppress noExplicitConstructor
constexpr RequestType(type_t type) // NOLINT (allow implicit)
: type_(type) {}
explicit constexpr RequestType(uint8_t value)
: type_(magic_enum::enum_cast<type_t>(value).value_or(type_t::UNDEFINED)) {}
explicit constexpr RequestType(const std::string_view &value)
: type_(magic_enum::enum_cast<type_t>(value).value_or(type_t::UNDEFINED)) {}
constexpr operator type_t() const { return type_; }
constexpr std::string_view name() const { return magic_enum::enum_name(type_); }
constexpr operator std::string_view() const { return name(); }
static constexpr auto values() { return magic_enum::enum_values<type_t>(); }
static constexpr size_t count() { return magic_enum::enum_count<type_t>(); }
static constexpr RequestType from_index(size_t index) { return magic_enum::enum_value<type_t>(index); }
constexpr size_t to_index() const {
auto result = magic_enum::enum_index(type_); // std::optional
return result.value(); // note! could throw
}
private:
type_t type_ = type_t::UNDEFINED;
};
} // namespace roq
template <>
struct std::is_enum<roq::RequestType> : std::true_type {};
template <>
struct std::underlying_type<roq::RequestType> {
using type = uint8_t;
};
template <>
struct fmt::formatter<roq::RequestType> {
template <typename Context>
constexpr auto parse(Context &context) {
return context.begin();
}
template <typename Context>
auto format(const roq::RequestType &value, Context &context) {
using namespace roq::literals;
return fmt::format_to(context.out(), "{}"_sv, value.name());
}
};
|
// @flow
export function setJsonRpcRoutes(server: any) {
server.all('api/wallet-core/', 'api/wallet-core/', async ctx => {
const {jsonrpc, id, method, params} = ctx.request.body;
setCors(ctx);
try {
const result = await server.gateways.walletCore[method](...params);
return (ctx.response.body = {
id,
jsonrpc,
result,
});
} catch (e) {
return (ctx.response.body = {
id,
jsonrpc,
error: {
code: -32600,
message: JSON.stringify(e),
},
});
}
});
}
function setCors(ctx) {
ctx.set('Access-Control-Allow-Origin', '*');
ctx.set('Access-Control-Allow-Methods', 'POST', 'GET');
ctx.set(
'Access-Control-Allow-Headers',
'Content-Type, Access-Control-Allow-Headers, Authorization, X-Requested-With'
);
}
|
/*
* Copyright 2017 Banco Bilbao Vizcaya Argentaria, S.A.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var dashboardsForTesting = readJSON('test/mocks/dashboards');
function buildFakeServer() {
var server = sinon.fakeServer.create();
//server.autoRespond = true;
server.respondWith(
'GET',
Utils.getBackEndUrl() + '/dashboards',
[
200,
{
'Content-Type': 'application/json'
},
JSON.stringify(dashboardsForTesting)
]
);
return server;
}
|
#ifndef GPU_ENERGY_H
#define GPU_ENERGY_H
#include <utility>
//#include "heap.h"
/* Class used to transform between xyz coordinates and abc coordinates.*/
class UNIT_CELL {
double va_x;
double vb_x, vb_y;
double vc_x, vc_y, vc_z;
double inv_va_x;
double inv_vb_x, inv_vb_y;
double inv_vc_x, inv_vc_y, inv_vc_z;
public:
/* Construct a unit cell with the provided vector components*/
UNIT_CELL(double vax, double vbx, double vby, double vcx, double vcy, double vcz);
/** Transform the coordinates relative to the uc vectors into ones relative to
* xyz coordinate system and store them using the provided pointers. */
void abc_to_xyz(double a, double b, double c, double &x, double &y, double &z);
/** Transform the coordinates relative to the xyz coordinate system vectors into ones relative to
* the unit cell vectors and store them using the provided pointers. */
void xyz_to_abc(double x, double y, double z, double &a, double &b, double &c);
};
/* Class used to store the displacement and energy of a path when it encounters a particular node.*/
class DISP_INFO {
public:
bool isReal;
char a,b,c; // Displacement components. Chars used to conserve space
float maxEnergy; // Maximum energy barrier on path so far
// Constructor that creates a dummy container
DISP_INFO();
/** Create a container with the provided displacement components and energy.
* Displacements are stored as characters to conserve space. */
DISP_INFO(int myA, int myB, int myC, float myMaxEnergy);
// Returns true iff the displacements are equal
bool equalDisplacement(DISP_INFO other);
};
/* Simple class used to represent a set of three integers. */
class TRIPLET {
public:
int vals[3];
int x, y, z;
/* Construct a TRIPLET with the three provided components.*/
TRIPLET(int myX, int myY, int myZ);
/* Access one of the TRIPLETs three values.*/
int& operator[](int index);
/** Add each component of the triplet to that of the provided TRIPLET
* and return the result. */
TRIPLET add(TRIPLET other);
};
// The number and directions of a grid point's neighbors
const int NUM_DIRECTIONS = 6;
const TRIPLET DIRECTIONS [6] = {TRIPLET(1,0,0), TRIPLET(-1,0,0), TRIPLET(0,1,0), TRIPLET(0,-1,0), TRIPLET(0,0,1), TRIPLET(0,0,-1)};
/* Return the 1-d index for the provided grid indices*/
int transformIndex(int x, int y, int z, int numX, int numY);
/* Returns the energy of the point located at the provided grid indices.
* Assumes the grid indices are within the appropriate range.*/
float getEnergy(int x, int y, int z, int numX, int numY, float *energyGrid);
/* Returns the energy of the point located in the grid referred to by the TRIPLET of indices.
* Assumes the grid indices are within the appropriate range.*/
float getEnergy(TRIPLET indices, int numX, int numY, float *energyGrid);
/* Returns true iff the grid point referred to by the provided grid indices
* is indeed accessible. Assumes the grid indices are within the appropriate range. */
bool isAccessible(int x, int y, int z, int numX, int numY, float *accessGrid);
/* Returns the integer nearest to the provided double.*/
int nearestInt(double num);
/* Translate the coordinate by unit cell increments so that it lies within the 0 to 1 range.*/
double translate_to_original_uc(double x);
/* Adjusts the indices so that they lie within the appropriate range
* and such that they are adjusted according to periodic boundary conditions.
* Stores a TRIPLET representing to which unit cell the indices originally referred.*/
void adjustIndices(TRIPLET &gridIndices, TRIPLET &shift, UNIT_CELL &uc);
/* Returns true iff the grid point referred to by the provided triplet of grid indices
* is indeed accessible. Assumes the grid indices are within the appropriate range. */
bool isAccessible(TRIPLET indices, int numX, int numY, float *accessGrid);
/* Returns true if the first pair has energy >= that of the second pair.*/
bool hasHigherEnergy(std::pair<TRIPLET, DISP_INFO > p1, std::pair<TRIPLET, DISP_INFO > p2);
// Helper function used in calculateMinEnergyBarrier()
bool findMinEnergyBarrier(int startX, int startY, int startZ, double &barrier,
int numX, int numY, int numZ, float *energyGrid, float *accessGrid, UNIT_CELL unit_cell);
/* Calculates the minimum energy barrier of a path that must pass through the point referred to by the provided
* grid indices. The resulting barrier is stored using the provided pointer. If no path is possible,
* the barrier is set to DBL_MAX. The function returns true iff a path is found. */
bool calculateMinEnergyBarrier(double &minEnergy, double &barrierEnergy, float box_x, float box_y, float box_z, int numX, int numY, int numZ, float *energyGrid, float *accessGrid,
double vax, double vbx, double vby, double vcx, double vcy, double vcz);
#endif
|
import React from 'react';
import {connect} from "react-redux";
class ComA extends React.Component{
// 1) 初始化阶段
constructor(props){
super(props);
this.state = {
};
}
handleClick=()=>{
// console.log("输出:",this.props);
this.props.addAction();
}
render(){
return (
<button onClick={this.handleClick}>+</button>
)
}
}
/*这个函数需要一个返回值,返回值是一个对象*/
const mapDispatchToProps = (dispatch) =>{
return {
addAction:()=>{
// 利用dispatch发送一个action
// 传递action 对象,我们要定义个type属性
dispatch({
type:"add_action"
})
}
}
}
export default connect(null,mapDispatchToProps)(ComA);
|
"""This module contains main Application class.
The class implements core logic of the program.
"""
import fnmatch
import logging
import logging.handlers
from enum import Enum
from typing import Tuple
import colorlog
import utils
import yaml
CLI_OK = 0
CLI_ERROR = 1
class Command(Enum):
"""The enum of command types."""
POWER_STATUS = 1
POWER_ON = 2
POWER_OFF = 3
POWER_CYCLE = 4
BOOTDEV_BIOS = 5
BOOTDEV_DISK = 6
BOOTDEV_PXE = 7
CONSOLE = 8
class Application:
"""Main application class."""
DEFAULT_MACHINE_CONFIG_PATH = "./config/nodes.yaml"
def __init__(
self,
debug=False,
dry_run=False,
machine_config="",
no_color=False,
verbose=False,
):
"""Set up logger and read node config file."""
# Read global options
self.debug = debug
self.dry_run = dry_run
self.machine_config = (
machine_config if machine_config else self.DEFAULT_MACHINE_CONFIG_PATH
)
self.no_color = no_color
self.verbose = verbose
# Configure logger
self.logger = self._get_logger() if no_color else self._get_colored_logger()
def _get_logger(self):
"""Create and return a logger object."""
# Create a logger
logger = logging.getLogger(__name__)
if self.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# Create console formatter
console_format = "%(levelname)s: %(message)s"
console_formatter = logging.Formatter(fmt=console_format)
# Create a console handler: log to console
console_handler = logging.StreamHandler()
# Configure and register console handler
console_handler.setFormatter(console_formatter)
logger.addHandler(console_handler)
return logger
def _get_colored_logger(self):
"""Create and return a colored logger object."""
# Create a logger
logger = colorlog.getLogger(__name__)
if self.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# Create console formatter
console_format = "%(log_color)s%(message)s"
console_formatter = colorlog.ColoredFormatter(fmt=console_format)
# Create a console handler: log to console
console_handler = colorlog.StreamHandler()
# Configure and register console handler
console_handler.setFormatter(console_formatter)
logger.addHandler(console_handler)
return logger
def _read_machines_config(self) -> dict:
"""Read YAML machine config file.
:return Dictionary with machines' details or None if details
could not be retrieved.
"""
# Python representation of the YAML machine config file
machines = None
try:
with open(self.machine_config) as file:
machines = yaml.load(file, Loader=yaml.FullLoader)
self.logger.debug(
f"Read machines from {self.machine_config}: {machines}"
)
except yaml.YAMLError as e:
self.logger.error(f"Error in machines configuration file: {e}")
except (FileNotFoundError, PermissionError, NotADirectoryError) as e:
self.logger.error(
f"Cannot open machines configuration file: '{self.machine_config}'"
)
self.logger.error(e)
# The YAML file with nodes' details may be formatted as dict or list.
# We need to convert the type to the dict, so that we have a way for
# uniqely identifying machines. For the key, we will use 'name'.
machines_dict = {}
if type(machines) is list:
for machine in machines:
# If a machine with the same name is already in the dictinary,
# warn the user
if machines_dict.get(machine["name"]):
self.logger.warning(
f"Machine with the name {machine['name']} is defined multiple "
"times in the config file ({self.machine_config})! Only one "
"of these machines can be taken into account. Make sure "
"each machine name is unique."
)
machines_dict[machine["name"]] = machine
else:
# Assuming that the type is 'dict'
machines_dict = machines
return machines_dict
def _is_glob_pattern(self, text: str) -> bool:
"""Check if the string is a glob pattern."""
# Characters used in glob patterns. A string containig these
# characters is a glob pattern.
glob_pattern_characters = set("*?[")
if not any((char in glob_pattern_characters) for char in text):
return False
return True
def _get_matching_machines(self, machines, include, exclude) -> list:
"""Find matching machines.
Evaluate command parameters against the machines
found in the config file and return the list or matching
machines.
:param machines: Tuple of machine names. Multiple machine names can
be provided.
"""
# A set for storing matching machine names. Casted to a list,
# this is used as a return value of this function.
matching_machines = set()
# Create a list of machines so that it can be modifed
machines = list(machines)
# If no machine name was provided, assume all machines should match
if len(machines) == 0:
self.logger.debug(
"No machine name(s) provided, assuming all machines match"
)
machines.append("*")
# Build a list of machine names pulled from the config file
config_machine_names = []
for k, v in self.machines.items():
config_machine_names.append(k)
# Iteratively build a set of maching machine names
for machine in machines:
# Treat a machine name as a glob pattern
pattern = machine
if self._is_glob_pattern(machine) is False:
# Machine name is not a glob pattern. Explicitly make it
# a glob pattern so that "guessing" a full machine name from
# the partial string can be implemented.
pattern = "*{}*".format(pattern)
# Find matching machine names
matches = fnmatch.filter(config_machine_names, pattern)
for match in matches:
matching_machines.add(match)
# Multiple machines found, but at least one of the machine names
# provided was not a glob pattern. Cancel a command (by returning
# an empty list) and inform the user that more than one machine
# matching the pattern was found.
if (len(matches) > 1) and (self._is_glob_pattern(machine) is False):
message = (
"Ambiguous machine name provided. "
"Found {} machines matching '{}' name:\n {}\n"
"Refine the '{}' machine name pattern by adding more "
"details to target a specific machine.\n"
"Alternatively, for commands that support multiple "
"MACHINE-NAMEs (such as power or bootdev), you can "
"provide a glob pattern to target more than one "
"machine, e.g. '*{}*'.".format(
len(matches),
machine,
"\n ".join(matches),
machine,
machine,
)
)
self.logger.warning(message)
return []
# None of the machine names matches, return an empty list
if len(matching_machines) == 0:
message = (
"No machines matching name '{}' found. "
"Available machines:\n {}".format(
"' or '".join(machines), "\n ".join(self.machines)
)
)
self.logger.warning(message)
return []
# Return a sorted list of matching machines
matching_machines = list(matching_machines)
matching_machines.sort()
return matching_machines
def _execute_wrapper(self, command: Command, machine: str) -> Tuple[bool, str]:
utility = utils.Ipmitool(
self._get_config_value(self.machines[machine], "bmc_user"),
self._get_config_value(self.machines[machine], "bmc_password"),
self._get_config_value(self.machines[machine], "bmc_address"),
self.dry_run,
)
# Execute the command
if command == Command.POWER_STATUS:
return utility.power_status()
if command == Command.POWER_ON:
return utility.power_on()
if command == Command.POWER_OFF:
return utility.power_off()
if command == Command.POWER_CYCLE:
return utility.power_cycle()
if command == Command.BOOTDEV_BIOS:
return utility.bootdev_bios()
if command == Command.BOOTDEV_DISK:
return utility.bootdev_disk()
if command == Command.BOOTDEV_PXE:
return utility.bootdev_pxe()
if command == Command.CONSOLE:
return utility.console()
def _run_command(self, command: Command, machines: list):
"""Run command on all machines.
:return: CLI_OK if all commands were successful, CLI_ERROR otherwise
"""
self.logger.debug(f"Running command {command} on machines: {machines}")
return_code = CLI_OK
# For each machine in the list
for machine in machines:
# Execute the command...
success, output = self._execute_wrapper(command, machine)
# And print the result
if success:
if output:
self.logger.info("{}: {}".format(machine, output))
else:
return_code = CLI_ERROR
self.logger.error("{}: {}".format(machine, output))
return return_code
def _get_config_value(self, machine: dict, key: str) -> str:
# Default return value
value = machine[key]
# If the value starts with this pattern, read the value from the file
include_rel_pattern = "include-rel://"
if machine[key].startswith(include_rel_pattern):
# Get the file path by removing the include-file:// pattern
file_path = machine[key].replace(include_rel_pattern, "")
try:
with open(file_path) as file:
value = file.read().strip()
except (FileNotFoundError, PermissionError) as e:
self.logger.error(
f"Cannot open '{file_path}' file referred in the '{key}' value"
)
self.logger.error(e)
exit(1)
return value
def run(self, command: Command, machines, include, exclude):
"""Build a list of applicable machines and execute an action upon them.
:return: CLI_OK if successful, CLI_ERROR on error.
"""
self.logger.debug(
"Running command {} with parameters: "
"machines={}, include={}, exclude={}".format(
command, machines, include, exclude
)
)
# Read YAML file containing BMC details of machines
machines_from_config = self._read_machines_config()
if machines_from_config:
self.machines = machines_from_config
else:
# Could not read machines from config file
self.logger.error("Could not read machines from machines config file")
return CLI_ERROR
# Exit early if glob pattern is provided for the command that
# does not support it
if (command is Command.CONSOLE) and self._is_glob_pattern(machines[0]):
self.logger.warning(
"Glob patterns for MACHINE-NAME are not supported for this command"
)
return CLI_ERROR
# Build a list of machines matching the request
matching_machines = self._get_matching_machines(machines, include, exclude)
# Exit early if no matching machines were found
if len(matching_machines) == 0:
return CLI_ERROR
# Execute an action on the machines
return self._run_command(command, matching_machines)
|
import React from 'react';
import { BrowserRouter, Switch, Route } from 'react-router-dom';
import Main from './pages/main';
import Product from './pages/product';
const Routes = () => (
<BrowserRouter>
<Switch>
<Route exact path="/" component={ Main } />
<Route path="/products/:id" component={ Product } />
</Switch>
</BrowserRouter>
);
export default Routes;
|
# -*- coding: utf-8 -*-
########################################################################
#
# License: BSD
# Created: April 02, 2007
# Author: Francesc Alted - faltet@pytables.com
#
# $Id$
#
########################################################################
"""Utilities to be used mainly by the Index class."""
import sys
import math
import numpy
# Hints for chunk/slice/block/superblock computations:
# - The slicesize should not exceed 2**32 elements (because of
# implementation reasons). Such an extreme case would make the
# sorting algorithms to consume up to 64 GB of memory.
# - In general, one should favor a small chunksize ( < 128 KB) if one
# wants to reduce the latency for indexed queries. However, keep in
# mind that a very low value of chunksize for big datasets may hurt
# the performance by requering the HDF5 to use a lot of memory and CPU
# for its internal B-Tree.
def csformula(nrows):
"""Return the fitted chunksize (a float value) for nrows."""
# This formula has been computed using two points:
# 2**12 = m * 2**(n + log10(10**6))
# 2**15 = m * 2**(n + log10(10**9))
# where 2**12 and 2**15 are reasonable values for chunksizes for indexes
# with 10**6 and 10**9 elements respectively.
# Yes, return a floating point number!
return 64 * 2**math.log10(nrows)
def limit_er(expectedrows):
"""Protection against creating too small or too large chunks or slices."""
if expectedrows < 10**5:
expectedrows = 10**5
elif expectedrows > 10**12:
expectedrows = 10**12
return expectedrows
def computechunksize(expectedrows):
"""Get the optimum chunksize based on expectedrows."""
expectedrows = limit_er(expectedrows)
zone = int(math.log10(expectedrows))
nrows = 10**zone
return int(csformula(nrows))
def computeslicesize(expectedrows, memlevel):
"""Get the optimum slicesize based on expectedrows and memorylevel."""
expectedrows = limit_er(expectedrows)
# First, the optimum chunksize
cs = csformula(expectedrows)
# Now, the actual chunksize
chunksize = computechunksize(expectedrows)
# The optimal slicesize
ss = int(cs * memlevel**2)
# We *need* slicesize to be an exact multiple of the actual chunksize
ss = (ss // chunksize) * chunksize
ss *= 4 # slicesize should be at least divisible by 4
# ss cannot be bigger than 2**31 - 1 elements because of fundamental
# reasons (this limitation comes mainly from the way of compute
# indices for indexes, but also because C keysort is not implemented
# yet for the string type). Besides, it cannot be larger than
# 2**30, because limitiations of the optimized binary search code
# (in idx-opt.c, the line ``mid = lo + (hi-lo)/2;`` will overflow
# for values of ``lo`` and ``hi`` >= 2**30). Finally, ss must be a
# multiple of 4, so 2**30 must definitely be an upper limit.
if ss > 2**30:
ss = 2**30
return ss
def computeblocksize(expectedrows, compoundsize, lowercompoundsize):
"""Calculate the optimum number of superblocks made from compounds blocks.
This is useful for computing the sizes of both blocks and
superblocks (using the PyTables terminology for blocks in indexes).
"""
nlowerblocks = (expectedrows // lowercompoundsize) + 1
if nlowerblocks > 2**20:
# Protection against too large number of compound blocks
nlowerblocks = 2**20
size = lowercompoundsize * nlowerblocks
# We *need* superblocksize to be an exact multiple of the actual
# compoundblock size (a ceil must be performed here!)
size = ((size // compoundsize) + 1) * compoundsize
return size
def calc_chunksize(expectedrows, optlevel=6, indsize=4, memlevel=4):
"""Calculate the HDF5 chunk size for index and sorted arrays.
The logic to do that is based purely in experiments playing with
different chunksizes and compression flag. It is obvious that using
big chunks optimizes the I/O speed, but if they are too large, the
uncompressor takes too much time. This might (should) be further
optimized by doing more experiments.
"""
chunksize = computechunksize(expectedrows)
slicesize = computeslicesize(expectedrows, memlevel)
# Correct the slicesize and the chunksize based on optlevel
if indsize == 1: # ultralight
chunksize, slicesize = ccs_ultralight(optlevel, chunksize, slicesize)
elif indsize == 2: # light
chunksize, slicesize = ccs_light(optlevel, chunksize, slicesize)
elif indsize == 4: # medium
chunksize, slicesize = ccs_medium(optlevel, chunksize, slicesize)
elif indsize == 8: # full
chunksize, slicesize = ccs_full(optlevel, chunksize, slicesize)
# Finally, compute blocksize and superblocksize
blocksize = computeblocksize(expectedrows, slicesize, chunksize)
superblocksize = computeblocksize(expectedrows, blocksize, slicesize)
# The size for different blocks information
sizes = (superblocksize, blocksize, slicesize, chunksize)
return sizes
def ccs_ultralight(optlevel, chunksize, slicesize):
"""Correct the slicesize and the chunksize based on optlevel."""
if optlevel in (0, 1, 2):
slicesize //= 2
slicesize += optlevel * slicesize
elif optlevel in (3, 4, 5):
slicesize *= optlevel - 1
elif optlevel in (6, 7, 8):
slicesize *= optlevel - 1
elif optlevel == 9:
slicesize *= optlevel - 1
return chunksize, slicesize
def ccs_light(optlevel, chunksize, slicesize):
"""Correct the slicesize and the chunksize based on optlevel."""
if optlevel in (0, 1, 2):
slicesize //= 2
elif optlevel in (3, 4, 5):
pass
elif optlevel in (6, 7, 8):
chunksize /= 2
elif optlevel == 9:
# Reducing the chunksize and enlarging the slicesize is the
# best way to reduce the entropy with the current algorithm.
chunksize /= 2
slicesize *= 2
return chunksize, slicesize
def ccs_medium(optlevel, chunksize, slicesize):
"""Correct the slicesize and the chunksize based on optlevel."""
if optlevel in (0, 1, 2):
slicesize //= 2
elif optlevel in (3, 4, 5):
pass
elif optlevel in (6, 7, 8):
chunksize //= 2
elif optlevel == 9:
# Reducing the chunksize and enlarging the slicesize is the
# best way to reduce the entropy with the current algorithm.
chunksize //= 2
slicesize *= 2
return chunksize, slicesize
def ccs_full(optlevel, chunksize, slicesize):
"""Correct the slicesize and the chunksize based on optlevel."""
if optlevel in (0, 1, 2):
slicesize //= 2
elif optlevel in (3, 4, 5):
pass
elif optlevel in (6, 7, 8):
chunksize //= 2
elif optlevel == 9:
# Reducing the chunksize and enlarging the slicesize is the
# best way to reduce the entropy with the current algorithm.
chunksize //= 2
slicesize *= 2
return chunksize, slicesize
def calcoptlevels(nblocks, optlevel, indsize):
"""Compute the optimizations to be done.
The calculation is based on the number of blocks, optlevel and
indexing mode.
"""
if indsize == 2: # light
return col_light(nblocks, optlevel)
elif indsize == 4: # medium
return col_medium(nblocks, optlevel)
elif indsize == 8: # full
return col_full(nblocks, optlevel)
def col_light(nblocks, optlevel):
"""Compute the optimizations to be done for light indexes."""
optmedian, optstarts, optstops, optfull = (False,) * 4
if 0 < optlevel <= 3:
optmedian = True
elif 3 < optlevel <= 6:
optmedian, optstarts = (True, True)
elif 6 < optlevel <= 9:
optmedian, optstarts, optstops = (True, True, True)
return optmedian, optstarts, optstops, optfull
def col_medium(nblocks, optlevel):
"""Compute the optimizations to be done for medium indexes."""
optmedian, optstarts, optstops, optfull = (False,) * 4
# Medium case
if nblocks <= 1:
if 0 < optlevel <= 3:
optmedian = True
elif 3 < optlevel <= 6:
optmedian, optstarts = (True, True)
elif 6 < optlevel <= 9:
optfull = 1
else: # More than a block
if 0 < optlevel <= 3:
optfull = 1
elif 3 < optlevel <= 6:
optfull = 2
elif 6 < optlevel <= 9:
optfull = 3
return optmedian, optstarts, optstops, optfull
def col_full(nblocks, optlevel):
"""Compute the optimizations to be done for full indexes."""
optmedian, optstarts, optstops, optfull = (False,) * 4
# Full case
if nblocks <= 1:
if 0 < optlevel <= 3:
optmedian = True
elif 3 < optlevel <= 6:
optmedian, optstarts = (True, True)
elif 6 < optlevel <= 9:
optfull = 1
else: # More than a block
if 0 < optlevel <= 3:
optfull = 1
elif 3 < optlevel <= 6:
optfull = 2
elif 6 < optlevel <= 9:
optfull = 3
return optmedian, optstarts, optstops, optfull
def get_reduction_level(indsize, optlevel, slicesize, chunksize):
"""Compute the reduction level based on indsize and optlevel."""
rlevels = [
[8, 8, 8, 8, 4, 4, 4, 2, 2, 1], # 8-bit indices (ultralight)
[4, 4, 4, 4, 2, 2, 2, 1, 1, 1], # 16-bit indices (light)
[2, 2, 2, 2, 1, 1, 1, 1, 1, 1], # 32-bit indices (medium)
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], # 64-bit indices (full)
]
isizes = {1: 0, 2: 1, 4: 2, 8: 3}
rlevel = rlevels[isizes[indsize]][optlevel]
# The next cases should only happen in tests
if rlevel >= slicesize:
rlevel = 1
if slicesize <= chunksize * rlevel:
rlevel = 1
if indsize == 8:
# Ensure that, for full indexes we will never perform a reduction.
# This is required because of implementation assumptions.
assert rlevel == 1
return rlevel
# Python implementations of NextAfter and NextAfterF
#
# These implementations exist because the standard function
# nextafterf is not available on Microsoft platforms.
#
# These implementations are based on the IEEE representation of
# floats and doubles.
# Author: Shack Toms - shack@livedata.com
#
# Thanks to Shack Toms shack@livedata.com for NextAfter and NextAfterF
# implementations in Python. 2004-10-01
# epsilon = math.ldexp(1.0, -53) # smallest double such that
# # 0.5 + epsilon != 0.5
# epsilonF = math.ldexp(1.0, -24) # smallest float such that 0.5 + epsilonF
# != 0.5
# maxFloat = float(2**1024 - 2**971) # From the IEEE 754 standard
# maxFloatF = float(2**128 - 2**104) # From the IEEE 754 standard
# minFloat = math.ldexp(1.0, -1022) # min positive normalized double
# minFloatF = math.ldexp(1.0, -126) # min positive normalized float
# smallEpsilon = math.ldexp(1.0, -1074) # smallest increment for
# # doubles < minFloat
# smallEpsilonF = math.ldexp(1.0, -149) # smallest increment for
# # floats < minFloatF
infinity = math.ldexp(1.0, 1023) * 2
infinityf = math.ldexp(1.0, 128)
# Finf = float("inf") # Infinite in the IEEE 754 standard (not avail in Win)
# A portable representation of NaN
# if sys.byteorder == "little":
# testNaN = struct.unpack("d", '\x01\x00\x00\x00\x00\x00\xf0\x7f')[0]
# elif sys.byteorder == "big":
# testNaN = struct.unpack("d", '\x7f\xf0\x00\x00\x00\x00\x00\x01')[0]
# else:
# raise ValueError("Byteorder '%s' not supported!" % sys.byteorder)
# This one seems better
# testNaN = infinity - infinity
# "infinity" for several types
infinitymap = {
'bool': [0, 1],
'int8': [-2**7, 2**7 - 1],
'uint8': [0, 2**8 - 1],
'int16': [-2**15, 2**15 - 1],
'uint16': [0, 2**16 - 1],
'int32': [-2**31, 2**31 - 1],
'uint32': [0, 2**32 - 1],
'int64': [-2**63, 2**63 - 1],
'uint64': [0, 2**64 - 1],
'float32': [-infinityf, infinityf],
'float64': [-infinity, infinity],
}
if hasattr(numpy, 'float16'):
infinitymap['float16'] = [-numpy.float16(numpy.inf),
numpy.float16(numpy.inf)]
if hasattr(numpy, 'float96'):
infinitymap['float96'] = [-numpy.float96(numpy.inf),
numpy.float96(numpy.inf)]
if hasattr(numpy, 'float128'):
infinitymap['float128'] = [-numpy.float128(numpy.inf),
numpy.float128(numpy.inf)]
# deprecated API
infinityMap = infinitymap
infinityF = infinityf
# Utility functions
def inftype(dtype, itemsize, sign=+1):
"""Return a superior limit for maximum representable data type."""
assert sign in [-1, +1]
if dtype.kind == "S":
if sign < 0:
return b"\x00" * itemsize
else:
return b"\xff" * itemsize
try:
return infinitymap[dtype.name][sign >= 0]
except KeyError:
raise TypeError("Type %s is not supported" % dtype.name)
def string_next_after(x, direction, itemsize):
"""Return the next representable neighbor of x in the appropriate
direction."""
assert direction in [-1, +1]
# Pad the string with \x00 chars until itemsize completion
padsize = itemsize - len(x)
if padsize > 0:
x += b"\x00" * padsize
# int.to_bytes is not available in Python < 3.2
# xlist = [i.to_bytes(1, sys.byteorder) for i in x]
xlist = [bytes([i]) for i in x]
xlist.reverse()
i = 0
if direction > 0:
if xlist == b"\xff" * itemsize:
# Maximum value, return this
return b"".join(xlist)
for xchar in xlist:
if ord(xchar) < 0xff:
xlist[i] = chr(ord(xchar) + 1).encode('ascii')
break
else:
xlist[i] = b"\x00"
i += 1
else:
if xlist == b"\x00" * itemsize:
# Minimum value, return this
return b"".join(xlist)
for xchar in xlist:
if ord(xchar) > 0x00:
xlist[i] = chr(ord(xchar) - 1).encode('ascii')
break
else:
xlist[i] = b"\xff"
i += 1
xlist.reverse()
return b"".join(xlist)
def int_type_next_after(x, direction, itemsize):
"""Return the next representable neighbor of x in the appropriate
direction."""
assert direction in [-1, +1]
# x is guaranteed to be either an int or a float
if direction < 0:
if isinstance(x, int):
return x - 1
else:
# return int(PyNextAfter(x, x - 1))
return int(numpy.nextafter(x, x - 1))
else:
if isinstance(x, int):
return x + 1
else:
# return int(PyNextAfter(x,x + 1)) + 1
return int(numpy.nextafter(x, x + 1)) + 1
def bool_type_next_after(x, direction, itemsize):
"""Return the next representable neighbor of x in the appropriate
direction."""
assert direction in [-1, +1]
# x is guaranteed to be either a boolean
if direction < 0:
return False
else:
return True
def nextafter(x, direction, dtype, itemsize):
"""Return the next representable neighbor of x in the appropriate
direction."""
assert direction in [-1, 0, +1]
assert dtype.kind == "S" or type(x) in (bool, float, int)
if direction == 0:
return x
if dtype.kind == "S":
return string_next_after(x, direction, itemsize)
if dtype.kind in ['b']:
return bool_type_next_after(x, direction, itemsize)
elif dtype.kind in ['i', 'u']:
return int_type_next_after(x, direction, itemsize)
elif dtype.kind == "f":
if direction < 0:
return numpy.nextafter(x, x - 1)
else:
return numpy.nextafter(x, x + 1)
# elif dtype.name == "float32":
# if direction < 0:
# return PyNextAfterF(x,x-1)
# else:
# return PyNextAfterF(x,x + 1)
# elif dtype.name == "float64":
# if direction < 0:
# return PyNextAfter(x,x-1)
# else:
# return PyNextAfter(x,x + 1)
raise TypeError("data type ``%s`` is not supported" % dtype)
## Local Variables:
## mode: python
## py-indent-offset: 4
## tab-width: 4
## fill-column: 72
## End:
|
# Crie um programa que leia um número real qualquer e mostre na tela a sua porção inteira. (math)
# Digita um número 6.67; esse número tem a parte inteira 6.
import math
n = float(input('Digite um número real: '))
print(f'O número digitado foi {n} e sua porção inteira é {math.trunc(n)}')
n = float(input('Digite um número real: '))
print(f'O número digitado é {n} e tem como parte inteira {int(n)}')
n = float(input('Digite um número real: '))
print(f'O número digitado foi {n} e sua porção inteira é {n:.0f}')
# Três formas de resolver esse problema.
|
import React, { Component } from 'react'
class Product extends Component {
render() {
return (
<div>
</div>
)
}
}
export default Product;
|
"""fileformattoml.py unit tests."""
import pytest
from pypyr.context import Context
from pypyr.errors import KeyInContextHasNoValueError, KeyNotInContextError
import pypyr.steps.fileformattoml as fileformat
# region validation
def test_fileformattoml_no_in_obj_raises():
"""None in path raises."""
context = Context({
'k1': 'v1'})
with pytest.raises(KeyNotInContextError) as err_info:
fileformat.run_step(context)
assert str(err_info.value) == (
"context['fileFormatToml'] doesn't exist. "
"It must exist for pypyr.steps.fileformattoml.")
def test_fileformattoml_no_inpath_raises():
"""None in path raises."""
context = Context({
'fileFormatToml': 'v1'})
with pytest.raises(KeyNotInContextError) as err_info:
fileformat.run_step(context)
assert str(err_info.value) == (
"context['fileFormatToml']['in'] doesn't exist. It must exist for "
"pypyr.steps.fileformattoml.")
def test_fileformattoml_empty_inpath_raises():
"""Empty in path raises."""
context = Context({
'fileFormatToml': {'in': None}})
with pytest.raises(KeyInContextHasNoValueError) as err_info:
fileformat.run_step(context)
assert str(err_info.value) == ("context['fileFormatToml']['in'] must have "
"a value for pypyr.steps.fileformattoml.")
# endregion validation
# region functional tests
def test_fileformattoml_pass_no_substitutions(fs):
"""Relative path to file should succeed."""
in_path = './tests/testfiles/test.toml'
fs.create_file(in_path, contents="""key1 = "value1"
key2 = "value2"
key3 = "value3"
""", encoding='utf-8')
context = Context({
'ok1': 'ov1',
'fileFormatToml': {'in': in_path,
'out': './tests/testfiles/out/out.toml'}})
fileformat.run_step(context)
assert context, "context shouldn't be None"
assert len(context) == 2, "context should have 2 items"
assert context['ok1'] == 'ov1'
assert context['fileFormatToml'] == {
'in': in_path,
'out': './tests/testfiles/out/out.toml'}
with open('./tests/testfiles/out/out.toml') as outfile:
outcontents = outfile.read()
assert outcontents == """key1 = "value1"
key2 = "value2"
key3 = "value3"
"""
def test_fileformattoml_pass_to_out_dir(fs):
"""Relative path to file succeed with out dir rather than full path."""
in_path = './tests/testfiles/test.toml'
fs.create_file(in_path, contents="""key1 = "value1"
key2 = "value2"
key3 = "value3"
""", encoding='utf-8')
context = Context({
'ok1': 'ov1',
'fileFormatToml': {'in': in_path,
'out': './tests/testfiles/out/'}})
fileformat.run_step(context)
assert context, "context shouldn't be None"
assert len(context) == 2, "context should have 2 items"
assert context['ok1'] == 'ov1'
assert context['fileFormatToml'] == {
'in': in_path,
'out': './tests/testfiles/out/'}
with open('./tests/testfiles/out/test.toml', encoding='utf-8') as outfile:
outcontents = outfile.read()
assert outcontents == """key1 = "value1"
key2 = "value2"
key3 = "value3"
"""
def test_fileformattoml_edit_no_substitutions(fs):
"""Relative path to file should succeed, no out means in place edit."""
in_path = './tests/testfiles/out/edittest.toml'
fs.create_file(in_path, contents="""key1 = "value1"
key2 = "value2"
key3 = "value3"
""", encoding='utf-8')
context = Context({
'ok1': 'ov1',
'fileFormatToml': {'in': './tests/testfiles/out/edittest.toml'}})
fileformat.run_step(context)
assert context, "context shouldn't be None"
assert len(context) == 2, "context should have 2 items"
assert context['ok1'] == 'ov1'
assert context['fileFormatToml'] == {
'in': './tests/testfiles/out/edittest.toml'}
with open('./tests/testfiles/out/edittest.toml',
encoding='utf-8') as outfile:
outcontents = outfile.read()
assert outcontents == """key1 = "value1"
key2 = "value2"
key3 = "value3"
"""
def test_fileformattoml_pass_with_substitutions(fs):
"""Relative path to file should succeed."""
in_path = './tests/testfiles/testsubst.toml'
fs.create_file(in_path, contents="""key1 = "{k1}value !£$% *"
["key2_{k2}"]
abc = "{k3} def {k4}"
def = [
"l1",
"l2 {k5}",
"l3",
]
k21 = "value"
""", encoding='utf-8')
context = Context({
'k1': 'v1',
'k2': 'v2',
'k3': 'v3',
'k4': 'v4',
'k5': 'v5',
'fileFormatToml': {'in': in_path,
'out': './tests/testfiles/out/outsubst.toml'}})
fileformat.run_step(context)
assert context, "context shouldn't be None"
assert len(context) == 6, "context should have 6 items"
assert context['k1'] == 'v1'
assert context['fileFormatToml'] == {
'in': './tests/testfiles/testsubst.toml',
'out': './tests/testfiles/out/outsubst.toml'}
with open('./tests/testfiles/out/outsubst.toml',
encoding='utf-8') as outfile:
outcontents = outfile.read()
expected = """key1 = "v1value !£$% *"
[key2_v2]
abc = "v3 def v4"
def = [
"l1",
"l2 v5",
"l3",
]
k21 = "value"
"""
assert outcontents == expected
def test_fileformattoml_pass_with_path_substitutions(fs):
"""Relative path to file should succeed with path substitutions."""
in_path = './tests/testfiles/testsubst.toml'
fs.create_file(in_path, contents="""key1 = "{k1}value !£$% *"
["key2_{k2}"]
abc = "{k3} def {k4}"
def = [
"l1",
"l2 {k5}",
"l3",
]
k21 = "value"
""", encoding='utf-8')
context = Context({
'k1': 'v1',
'k2': 'v2',
'k3': 'v3',
'k4': 'v4',
'k5': 'v5',
'pathIn': 'testsubst',
'pathOut': 'outsubst',
'fileFormatToml': {'in': './tests/testfiles/{pathIn}.toml',
'out': './tests/testfiles/out/{pathOut}.toml'}})
fileformat.run_step(context)
assert context, "context shouldn't be None"
assert len(context) == 8, "context should have 8 items"
assert context['k1'] == 'v1'
assert context['fileFormatToml'] == {
'in': './tests/testfiles/{pathIn}.toml',
'out': './tests/testfiles/out/{pathOut}.toml'}
with open('./tests/testfiles/out/outsubst.toml',
encoding='utf-8') as outfile:
outcontents = outfile.read()
expected = """key1 = "v1value !£$% *"
[key2_v2]
abc = "v3 def v4"
def = [
"l1",
"l2 v5",
"l3",
]
k21 = "value"
"""
assert outcontents == expected
def test_fileformattoml_with_encoding(fs):
"""Toml parses binary for utf-8 only, no encoding allowed."""
in_path = './tests/testfiles/test.toml'
fs.create_file(in_path, contents='key1 = "value"')
context = Context({
'ok1': 'ov1',
'fileFormatToml': {'in': in_path,
'out': 'test/out/output.toml',
'encoding': 'utf-16'}})
with pytest.raises(ValueError) as err:
fileformat.run_step(context)
assert str(err.value) == "binary mode doesn't take an encoding argument"
# endregion functional tests
|
Highcharts.theme={colors:["#DDDF0D","#7798BF","#55BF3B","#DF5353","#aaeeee","#ff0066","#eeaaee","#55BF3B","#DF5353","#7798BF","#aaeeee"],chart:{backgroundColor:{linearGradient:{x1:0,y1:0,x2:0,y2:1},stops:[[0,"rgb(96, 96, 96)"],[1,"rgb(16, 16, 16)"]]},borderWidth:0,borderRadius:15,plotBackgroundColor:null,plotShadow:false,plotBorderWidth:0},title:{style:{color:"#FFF",font:"16px Lucida Grande, Lucida Sans Unicode, Verdana, Arial, Helvetica, sans-serif"}},subtitle:{style:{color:"#DDD",font:"12px Lucida Grande, Lucida Sans Unicode, Verdana, Arial, Helvetica, sans-serif"}},xAxis:{gridLineWidth:0,lineColor:"#999",tickColor:"#999",labels:{style:{color:"#999",fontWeight:"bold"}},title:{style:{color:"#AAA",font:"bold 12px Lucida Grande, Lucida Sans Unicode, Verdana, Arial, Helvetica, sans-serif"}}},yAxis:{alternateGridColor:null,minorTickInterval:null,gridLineColor:"rgba(255, 255, 255, .1)",minorGridLineColor:"rgba(255,255,255,0.07)",lineWidth:0,tickWidth:0,labels:{style:{color:"#999",fontWeight:"bold"}},title:{style:{color:"#AAA",font:"bold 12px Lucida Grande, Lucida Sans Unicode, Verdana, Arial, Helvetica, sans-serif"}}},legend:{itemStyle:{color:"#CCC"},itemHoverStyle:{color:"#FFF"},itemHiddenStyle:{color:"#333"}},labels:{style:{color:"#CCC"}},tooltip:{backgroundColor:{linearGradient:{x1:0,y1:0,x2:0,y2:1},stops:[[0,"rgba(96, 96, 96, .8)"],[1,"rgba(16, 16, 16, .8)"]]},borderWidth:0,style:{color:"#FFF"}},plotOptions:{series:{shadow:true},line:{dataLabels:{color:"#CCC"},marker:{lineColor:"#333"}},spline:{marker:{lineColor:"#333"}},scatter:{marker:{lineColor:"#333"}},candlestick:{lineColor:"white"}},toolbar:{itemStyle:{color:"#CCC"}},navigation:{buttonOptions:{symbolStroke:"#DDDDDD",hoverSymbolStroke:"#FFFFFF",theme:{fill:{linearGradient:{x1:0,y1:0,x2:0,y2:1},stops:[[0.4,"#606060"],[0.6,"#333333"]]},stroke:"#000000"}}},rangeSelector:{buttonTheme:{fill:{linearGradient:{x1:0,y1:0,x2:0,y2:1},stops:[[0.4,"#888"],[0.6,"#555"]]},stroke:"#000000",style:{color:"#CCC",fontWeight:"bold"},states:{hover:{fill:{linearGradient:{x1:0,y1:0,x2:0,y2:1},stops:[[0.4,"#BBB"],[0.6,"#888"]]},stroke:"#000000",style:{color:"white"}},select:{fill:{linearGradient:{x1:0,y1:0,x2:0,y2:1},stops:[[0.1,"#000"],[0.3,"#333"]]},stroke:"#000000",style:{color:"yellow"}}}},inputStyle:{backgroundColor:"#333",color:"silver"},labelStyle:{color:"silver"}},navigator:{handles:{backgroundColor:"#666",borderColor:"#AAA"},outlineColor:"#CCC",maskFill:"rgba(16, 16, 16, 0.5)",series:{color:"#7798BF",lineColor:"#A6C7ED"}},scrollbar:{barBackgroundColor:{linearGradient:{x1:0,y1:0,x2:0,y2:1},stops:[[0.4,"#888"],[0.6,"#555"]]},barBorderColor:"#CCC",buttonArrowColor:"#CCC",buttonBackgroundColor:{linearGradient:{x1:0,y1:0,x2:0,y2:1},stops:[[0.4,"#888"],[0.6,"#555"]]},buttonBorderColor:"#CCC",rifleColor:"#FFF",trackBackgroundColor:{linearGradient:{x1:0,y1:0,x2:0,y2:1},stops:[[0,"#000"],[1,"#333"]]},trackBorderColor:"#666"},legendBackgroundColor:"rgba(48, 48, 48, 0.8)",legendBackgroundColorSolid:"rgb(70, 70, 70)",dataLabelsColor:"#444",textColor:"#E0E0E0",maskColor:"rgba(255,255,255,0.3)"};var highchartsOptions=Highcharts.setOptions(Highcharts.theme);
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# MIT License
# Copyright (c) 2021 Tharuk, This is a part of nstcentertainmentbot.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import threading
from sqlalchemy import Column, Integer, UnicodeText
from nstcentertainmentbot.helpers.database import SESSION, BASE
from nstcentertainmentbot import dp
class Users(BASE):
__tablename__ = "users"
user_id = Column(Integer, primary_key=True)
username = Column(UnicodeText)
def __init__(self, user_id, username=None):
self.user_id = user_id
self.username = username
def __repr__(self):
return "<User {} ({})>".format(self.username, self.user_id)
Users.__table__.create(checkfirst=True)
INSERTION_LOCK = threading.RLock()
def ensure_bot_in_db():
with INSERTION_LOCK:
bot = Users(dp.bot.id, dp.bot.username)
SESSION.merge(bot)
SESSION.commit()
def update_user(user_id, username):
with INSERTION_LOCK:
user = SESSION.query(Users).get(user_id)
if not user:
user = Users(user_id, username)
SESSION.add(user)
SESSION.flush()
else:
user.username = username
SESSION.commit()
def get_all_users():
try:
return SESSION.query(Users).all()
finally:
SESSION.close()
def users_count():
try:
return SESSION.query(Users).count()
finally:
SESSION.close()
ensure_bot_in_db()
|
exports.backupTwoToneImpl = require('@material-ui/icons/BackupTwoTone').default;
|
from typing import List, Tuple
import pandas as pd
from glob import glob
from os import path
TaggedName = Tuple[str, pd.DataFrame]
TaggedNameList = List[TaggedName]
def get_testing_set(folder: str, prefix: str) -> TaggedNameList:
"""I've laid out my testing data so that each set of files starts with the same prefix. This
function takes a reference to the folder test data resides in and the prefix that's
currently of interest and it returns a list of tuples containing the data names and the
actual data
:param folder: The location where you're keeping the test data
:param prefix: The prefix (separated from file name by an underscore) associated with
the set of datasets you're interested in
:return: A list of tuples, where the first item of each is the data's name and the second
item the actual data, as a pandas dataframe.
"""
gpath = path.join(folder, prefix)
gpath += '_*'
files = glob(gpath)
return [(f_path.split(f'{prefix}_')[-1].split('.')[0], pd.read_csv(f_path, sep='\t', header=None))
for f_path in files]
|
#!/usr/bin/env python
from collections import namedtuple
from datetime import datetime
from typing import List
import pandas as pd
from hummingbot.core.data_type.trade_fee import TradeFeeBase
from hummingbot.core.data_type.common import OrderType, TradeType
class Trade(namedtuple("_Trade", "trading_pair, side, price, amount, order_type, market, timestamp, trade_fee")):
trading_pair: str
side: TradeType
price: float
amount: float
order_type: OrderType
market: str
timestamp: float
trade_fee: TradeFeeBase
@classmethod
def to_pandas(cls, trades: List):
columns: List[str] = ["trading_pair",
"price",
"quantity",
"order_type",
"trade_side",
"market",
"timestamp",
"fee_percent",
"flat_fee / gas"]
data = []
for trade in trades:
if len(trade.trade_fee.flat_fees) == 0:
flat_fee_str = "None"
else:
fee_strs = [f"{fee_tuple[0]} {fee_tuple[1]}" for fee_tuple in trade.trade_fee.flat_fees]
flat_fee_str = ",".join(fee_strs)
data.append([
trade.trading_pair,
trade.price,
trade.amount,
trade.order_type.name.lower(),
trade.side.name.lower(),
trade.market,
datetime.fromtimestamp(trade.timestamp).strftime("%Y-%m-%d %H:%M:%S"),
trade.trade_fee.percent,
flat_fee_str,
])
return pd.DataFrame(data=data, columns=columns)
@property
def trade_type(self):
return self.side.name
|
/* ************************************************************************** */
/* */
/* ::: :::::::: */
/* ft_memmove.c :+: :+: :+: */
/* +:+ +:+ +:+ */
/* By: slaanani <marvin@42.fr> +#+ +:+ +#+ */
/* +#+#+#+#+#+ +#+ */
/* Created: 2019/05/26 04:39:26 by slaanani #+# #+# */
/* Updated: 2019/05/26 04:39:28 by slaanani ### ########.fr */
/* */
/* ************************************************************************** */
#include "libft.h"
void *ft_memmove(void *dst, const void *src, size_t len)
{
unsigned char *memdst;
unsigned char *memsrc;
int i;
i = (int)len;
memsrc = (unsigned char *)src;
memdst = (unsigned char *)dst;
if (src > dst)
ft_memcpy(memdst, memsrc, len);
else
while (i--)
memdst[i] = memsrc[i];
return (dst);
}
|
from django.shortcuts import render
from django.core.paginator import Paginator
from django.shortcuts import render
from .models import Department
def index(request):
department_list = Department.objects.all()
paginator = Paginator(department_list, 10)
page = request.GET.get('page')
departments = paginator.get_page(page)
return render(request, 'department/views/index.html', {'departments': departments})
|
(function(e){"function"==typeof define&&define.amd?define(["jquery","moment"],e):e(jQuery,moment)})(function(e,t){var n="ene._feb._mar._abr._may._jun._jul._ago._sep._oct._nov._dic.".split("_"),a="ene_feb_mar_abr_may_jun_jul_ago_sep_oct_nov_dic".split("_");(t.defineLocale||t.lang).call(t,"es",{months:"enero_febrero_marzo_abril_mayo_junio_julio_agosto_septiembre_octubre_noviembre_diciembre".split("_"),monthsShort:function(e,t){return/-MMM-/.test(t)?a[e.month()]:n[e.month()]},weekdays:"domingo_lunes_martes_miércoles_jueves_viernes_sábado".split("_"),weekdaysShort:"dom._lun._mar._mié._jue._vie._sáb.".split("_"),weekdaysMin:"Do_Lu_Ma_Mi_Ju_Vi_Sá".split("_"),longDateFormat:{LT:"H:mm",L:"DD/MM/YYYY",LL:"D [de] MMMM [del] YYYY",LLL:"D [de] MMMM [del] YYYY LT",LLLL:"dddd, D [de] MMMM [del] YYYY LT"},calendar:{sameDay:function(){return"[hoy a la"+(1!==this.hours()?"s":"")+"] LT"},nextDay:function(){return"[mañana a la"+(1!==this.hours()?"s":"")+"] LT"},nextWeek:function(){return"dddd [a la"+(1!==this.hours()?"s":"")+"] LT"},lastDay:function(){return"[ayer a la"+(1!==this.hours()?"s":"")+"] LT"},lastWeek:function(){return"[el] dddd [pasado a la"+(1!==this.hours()?"s":"")+"] LT"},sameElse:"L"},relativeTime:{future:"en %s",past:"hace %s",s:"unos segundos",m:"un minuto",mm:"%d minutos",h:"una hora",hh:"%d horas",d:"un día",dd:"%d días",M:"un mes",MM:"%d meses",y:"un año",yy:"%d años"},ordinal:"%dº",week:{dow:1,doy:4}}),e.fullCalendar.datepickerLang("es","es",{closeText:"Cerrar",prevText:"<Ant",nextText:"Sig>",currentText:"Hoy",monthNames:["enero","febrero","marzo","abril","mayo","junio","julio","agosto","septiembre","octubre","noviembre","diciembre"],monthNamesShort:["ene","feb","mar","abr","may","jun","jul","ago","sep","oct","nov","dic"],dayNames:["domingo","lunes","martes","miércoles","jueves","viernes","sábado"],dayNamesShort:["dom","lun","mar","mié","jue","vie","sáb"],dayNamesMin:["D","L","M","X","J","V","S"],weekHeader:"Sm",dateFormat:"dd/mm/yy",firstDay:1,isRTL:!1,showMonthAfterYear:!1,yearSuffix:""}),e.fullCalendar.lang("es",{defaultButtonText:{month:"Mes",week:"Semana",day:"Día",list:"Agenda"},allDayText:"Todo el día"})});
|
import warnings
import logging
from pathlib import Path
import torch.nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
import torch.autograd as autograd
import dadmatools.models.flair.nn
import dadmatools.models.flair as flair
import torch
from dadmatools.models.flair.data import Dictionary, Sentence, Token, Label
from dadmatools.models.flair.datasets import DataLoader
from dadmatools.models.flair.embeddings import TokenEmbeddings
from dadmatools.models.flair.file_utils import cached_path
from dadmatools.models.flair.custom_data_loader import BatchedData
from typing import List, Tuple, Union
from dadmatools.models.flair.training_utils import Result, store_embeddings
from .biaffine_attention import BiaffineAttention
from tqdm import tqdm
from tabulate import tabulate
import numpy as np
import pdb
import copy
import time
import sys
# sys.path.insert(0,'/home/wangxy/workspace/flair/parser')
# sys.path.append('./flair/parser/modules')
from dadmatools.models.flair.parser.modules import CHAR_LSTM, MLP, BertEmbedding, Biaffine, BiLSTM, TrilinearScorer
from dadmatools.models.flair.parser.modules.dropout import IndependentDropout, SharedDropout
from dadmatools.models.flair.parser.utils.alg import eisner, crf
from dadmatools.models.flair.parser.utils.metric import Metric
from dadmatools.models.flair.parser.utils.fn import ispunct, istree, numericalize_arcs
# from flair.parser.utils.fn import ispunct
import torch
import torch.nn as nn
from torch.nn.utils.rnn import (pack_padded_sequence, pad_packed_sequence,
pad_sequence)
from .mst_decoder import MST_inference
def process_potential(log_potential):
# (batch, sent_len+1, sent_len+1) or (batch, sent_len+1, sent_len+1, labels)
# (batch, sent_len)
root_score = log_potential[:,1:,0]
# convert (dependency, head) to (head, dependency)
# (batch, sent_len, sent_len)
log_potential = log_potential.transpose(1,2)[:,1:,1:]
batch, sent_len = log_potential.shape[:2]
# Remove the <ROOT> and put the root probability in the diagonal part
log_potential[:,torch.arange(sent_len),torch.arange(sent_len)] = root_score
return log_potential
def get_struct_predictions(dist):
# (batch, sent_len, sent_len) | head, dep
argmax_val = dist.argmax
batch, sent_len, _ = argmax_val.shape
res_val = torch.zeros([batch,sent_len+1,sent_len+1]).type_as(argmax_val)
res_val[:,1:,1:] = argmax_val
res_val = res_val.transpose(1,2)
# set diagonal part to heads
res_val[:,:,0] = res_val[:,torch.arange(sent_len+1),torch.arange(sent_len+1)]
res_val[:,torch.arange(sent_len+1),torch.arange(sent_len+1)] = 0
return res_val.argmax(-1)
def convert_score_back(marginals):
# (batch, sent_len, sent_len) | head, dep
batch = marginals.shape[0]
sent_len = marginals.shape[1]
res_val = torch.zeros([batch,sent_len+1,sent_len+1]+list(marginals.shape[3:])).type_as(marginals)
res_val[:,1:,1:] = marginals
res_val = res_val.transpose(1,2)
# set diagonal part to heads
res_val[:,:,0] = res_val[:,torch.arange(sent_len+1),torch.arange(sent_len+1)]
res_val[:,torch.arange(sent_len+1),torch.arange(sent_len+1)] = 0
return res_val
def is_punctuation(word, pos, punct_set=None):
if punct_set is None:
return is_uni_punctuation(word)
else:
return pos in punct_set
import uuid
uid = uuid.uuid4().hex[:6]
log = logging.getLogger("flair")
START_TAG: str = "<START>"
STOP_TAG: str = "<STOP>"
def to_scalar(var):
return var.view(-1).detach().tolist()[0]
def argmax(vec):
_, idx = torch.max(vec, 1)
return to_scalar(idx)
def log_sum_exp(vec):
max_score = vec[0, argmax(vec)]
max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])
return max_score + torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))
def argmax_batch(vecs):
_, idx = torch.max(vecs, 1)
return idx
def log_sum_exp_batch(vecs):
maxi = torch.max(vecs, 1)[0]
maxi_bc = maxi[:, None].repeat(1, vecs.shape[1])
recti_ = torch.log(torch.sum(torch.exp(vecs - maxi_bc), 1))
return maxi + recti_
def log_sum_exp_vb(vec, m_size):
"""
calculate log of exp sum
args:
vec (batch_size, vanishing_dim, hidden_dim) : input tensor
m_size : hidden_dim
return:
batch_size, hidden_dim
"""
_, idx = torch.max(vec, 1) # B * 1 * M
max_score = torch.gather(vec, 1, idx.view(-1, 1, m_size)).view(-1, 1, m_size) # B * M
return max_score.view(-1, m_size) + torch.log(torch.sum(torch.exp(vec - max_score.expand_as(vec)), 1)).view(-1,
m_size) # B * M
def pad_tensors(tensor_list):
ml = max([x.shape[0] for x in tensor_list])
shape = [len(tensor_list), ml] + list(tensor_list[0].shape[1:])
template = torch.zeros(*shape, dtype=torch.long, device=flair.device)
lens_ = [x.shape[0] for x in tensor_list]
for i, tensor in enumerate(tensor_list):
template[i, : lens_[i]] = tensor
return template, lens_
# Part of Codes are from https://github.com/yzhangcs/biaffine-parser
class SemanticDependencyParser(flair.nn.Model):
def __init__(
self,
hidden_size: int,
embeddings: TokenEmbeddings,
tag_dictionary: Dictionary,
tag_type: str,
use_crf: bool = False,
use_rnn: bool = False,
train_initial_hidden_state: bool = False,
punct: bool = False, # ignore all punct in default
tree: bool = False, # keep the dpendency with tree structure
n_mlp_arc = 500,
n_mlp_rel = 100,
mlp_dropout = .33,
use_second_order = False,
token_loss = False,
n_mlp_sec = 150,
init_std = 0.25,
factorize = True,
use_sib = True,
use_gp = True,
use_cop = False,
iterations = 3,
binary = True,
is_mst = False,
rnn_layers: int = 3,
lstm_dropout: float = 0.33,
dropout: float = 0.0,
word_dropout: float = 0.33,
locked_dropout: float = 0.5,
pickle_module: str = "pickle",
interpolation: float = 0.5,
factorize_interpolation: float = 0.025,
config = None,
use_decoder_timer = True,
debug = False,
target_languages = 1,
word_map = None,
char_map = None,
relearn_embeddings = False,
distill_arc: bool = False,
distill_rel: bool = False,
distill_crf: bool = False,
distill_posterior: bool = False,
distill_prob: bool = False,
distill_factorize: bool = False,
crf_attention: bool = False,
temperature: float = 1,
diagonal: bool = False,
is_srl: bool = False,
embedding_selector = False,
use_rl: bool = False,
use_gumbel: bool = False,
identity: bool = False,
embedding_attention: bool = False,
testing: bool = False,
is_sdp: bool = False,
):
"""
Initializes a SequenceTagger
:param hidden_size: number of hidden states in RNN
:param embeddings: word embeddings used in tagger
:param tag_dictionary: dictionary of tags you want to predict
:param tag_type: string identifier for tag type
:param use_crf: if True use CRF decoder, else project directly to tag space
:param use_rnn: if True use RNN layer, otherwise use word embeddings directly
:param rnn_layers: number of RNN layers
:param dropout: dropout probability
:param word_dropout: word dropout probability
:param locked_dropout: locked dropout probability
:param distill_crf: CRF information distillation
:param crf_attention: use CRF distillation weights
:param biaf_attention: use bilinear attention for word-KD distillation
"""
super(SemanticDependencyParser, self).__init__()
self.debug = False
self.biaf_attention = False
self.token_level_attention = False
self.use_language_attention = False
self.use_language_vector = False
self.use_crf = use_crf
self.use_decoder_timer = False
self.sentence_level_loss = False
self.train_initial_hidden_state = train_initial_hidden_state
#add interpolation for target loss and distillation loss
self.token_loss = token_loss
self.interpolation = interpolation
self.debug = debug
self.use_rnn = use_rnn
self.hidden_size = hidden_size
self.rnn_layers: int = rnn_layers
self.embeddings = embeddings
self.config = config
self.punct = punct
self.punct_list = ['``', "''", ':', ',', '.', 'PU', 'PUNCT']
self.tree = tree
self.is_mst = is_mst
self.is_srl = is_srl
self.use_rl = use_rl
self.use_gumbel = use_gumbel
self.embedding_attention = embedding_attention
# set the dictionaries
self.tag_dictionary: Dictionary = tag_dictionary
self.tag_type: str = tag_type
self.tagset_size: int = len(tag_dictionary)
self.word_map = word_map
self.char_map = char_map
self.is_sdp = is_sdp
# distillation part
self.distill_arc = distill_arc
self.distill_rel = distill_rel
self.distill_crf = distill_crf
self.distill_posterior = distill_posterior
self.distill_prob = distill_prob
self.distill_factorize = distill_factorize
self.factorize_interpolation = factorize_interpolation
self.temperature = temperature
self.crf_attention = crf_attention
self.diagonal = diagonal
self.embedding_selector = embedding_selector
# initialize the network architecture
self.nlayers: int = rnn_layers
self.hidden_word = None
self.identity = identity
# dropouts
self.use_dropout: float = dropout
self.use_word_dropout: float = word_dropout
self.use_locked_dropout: float = locked_dropout
self.pickle_module = pickle_module
if dropout > 0.0:
self.dropout = torch.nn.Dropout(dropout)
if word_dropout > 0.0:
self.word_dropout = flair.nn.WordDropout(word_dropout)
if locked_dropout > 0.0:
self.locked_dropout = flair.nn.LockedDropout(locked_dropout)
rnn_input_dim: int = self.embeddings.embedding_length
self.relearn_embeddings: bool = relearn_embeddings
if (self.embedding_selector and not self.use_rl) or self.embedding_attention:
if use_gumbel:
self.selector = Parameter(
torch.zeros(len(self.embeddings.embeddings),2),
requires_grad=True,
)
else:
self.selector = Parameter(
torch.zeros(len(self.embeddings.embeddings)),
requires_grad=True,
)
if self.relearn_embeddings:
self.embedding2nn = torch.nn.Linear(rnn_input_dim, rnn_input_dim)
self.bidirectional = True
self.rnn_type = "LSTM"
if not self.use_rnn:
self.bidirectional = False
# bidirectional LSTM on top of embedding layer
num_directions = 1
# hiddens
self.n_mlp_arc = n_mlp_arc
self.n_mlp_rel = n_mlp_rel
self.mlp_dropout = mlp_dropout
self.n_mlp_sec = n_mlp_sec
self.init_std = init_std
self.lstm_dropout = lstm_dropout
self.factorize = factorize
# Initialization of Biaffine Parser
self.embed_dropout = IndependentDropout(p=word_dropout)
if self.use_rnn:
self.rnn = BiLSTM(input_size=rnn_input_dim,
hidden_size=hidden_size,
num_layers=self.nlayers,
dropout=self.lstm_dropout)
self.lstm_dropout_func = SharedDropout(p=self.lstm_dropout)
# num_directions = 2 if self.bidirectional else 1
# if self.rnn_type in ["LSTM", "GRU"]:
# self.rnn = getattr(torch.nn, self.rnn_type)(
# rnn_input_dim,
# hidden_size,
# num_layers=self.nlayers,
# dropout=0.0 if self.nlayers == 1 else 0.5,
# bidirectional=True,
# )
# # Create initial hidden state and initialize it
# if self.train_initial_hidden_state:
# self.hs_initializer = torch.nn.init.xavier_normal_
# self.lstm_init_h = Parameter(
# torch.randn(self.nlayers * num_directions, self.hidden_size),
# requires_grad=True,
# )
# self.lstm_init_c = Parameter(
# torch.randn(self.nlayers * num_directions, self.hidden_size),
# requires_grad=True,
# )
# # TODO: Decide how to initialize the hidden state variables
# # self.hs_initializer(self.lstm_init_h)
# # self.hs_initializer(self.lstm_init_c)
# final linear map to tag space
mlp_input_hidden = hidden_size * 2
else:
mlp_input_hidden = rnn_input_dim
# the MLP layers
self.mlp_arc_h = MLP(n_in=mlp_input_hidden,
n_hidden=n_mlp_arc,
dropout=mlp_dropout,
identity=self.identity)
self.mlp_arc_d = MLP(n_in=mlp_input_hidden,
n_hidden=n_mlp_arc,
dropout=mlp_dropout,
identity=self.identity)
self.mlp_rel_h = MLP(n_in=mlp_input_hidden,
n_hidden=n_mlp_rel,
dropout=mlp_dropout,
identity=self.identity)
self.mlp_rel_d = MLP(n_in=mlp_input_hidden,
n_hidden=n_mlp_rel,
dropout=mlp_dropout,
identity=self.identity)
# the Biaffine layers
self.arc_attn = Biaffine(n_in=n_mlp_arc,
bias_x=True,
bias_y=False)
self.rel_attn = Biaffine(n_in=n_mlp_rel,
n_out=self.tagset_size,
bias_x=True,
bias_y=True,
diagonal=self.diagonal,)
self.binary = binary
# the Second Order Parts
self.use_second_order=use_second_order
self.iterations=iterations
self.use_sib = use_sib
self.use_cop = use_cop
self.use_gp = use_gp
if self.use_second_order:
if use_sib:
self.mlp_sib_h = MLP(n_in=mlp_input_hidden,
n_hidden=n_mlp_sec,
dropout=mlp_dropout,
identity=self.identity)
self.mlp_sib_d = MLP(n_in=mlp_input_hidden,
n_hidden=n_mlp_sec,
dropout=mlp_dropout,
identity=self.identity)
self.trilinear_sib = TrilinearScorer(n_mlp_sec,n_mlp_sec,n_mlp_sec,init_std=init_std, rank = n_mlp_sec, factorize = factorize)
if use_cop:
self.mlp_cop_h = MLP(n_in=mlp_input_hidden,
n_hidden=n_mlp_sec,
dropout=mlp_dropout,
identity=self.identity)
self.mlp_cop_d = MLP(n_in=mlp_input_hidden,
n_hidden=n_mlp_sec,
dropout=mlp_dropout,
identity=self.identity)
self.trilinear_cop = TrilinearScorer(n_mlp_sec,n_mlp_sec,n_mlp_sec,init_std=init_std, rank = n_mlp_sec, factorize = factorize)
if use_gp:
self.mlp_gp_h = MLP(n_in=mlp_input_hidden,
n_hidden=n_mlp_sec,
dropout=mlp_dropout,
identity=self.identity)
self.mlp_gp_d = MLP(n_in=mlp_input_hidden,
n_hidden=n_mlp_sec,
dropout=mlp_dropout,
identity=self.identity)
self.mlp_gp_hd = MLP(n_in=mlp_input_hidden,
n_hidden=n_mlp_sec,
dropout=mlp_dropout,
identity=self.identity)
self.trilinear_gp = TrilinearScorer(n_mlp_sec,n_mlp_sec,n_mlp_sec,init_std=init_std, rank = n_mlp_sec, factorize = factorize)
# self.pad_index = pad_index
# self.unk_index = unk_index
self.rel_criterion = nn.CrossEntropyLoss()
self.arc_criterion = nn.CrossEntropyLoss()
if self.binary:
self.rel_criterion = nn.CrossEntropyLoss(reduction='none')
self.arc_criterion = nn.BCEWithLogitsLoss(reduction='none')
if self.crf_attention:
self.distill_criterion = nn.CrossEntropyLoss(reduction='none')
self.distill_rel_criterion = nn.CrossEntropyLoss(reduction='none')
if not testing:
self.to(flair.device)
def _init_model_with_state_dict(state, testing = False):
use_dropout = 0.0 if not "use_dropout" in state.keys() else state["use_dropout"]
use_word_dropout = (
0.0 if not "use_word_dropout" in state.keys() else state["use_word_dropout"]
)
use_locked_dropout = (
0.0
if not "use_locked_dropout" in state.keys()
else state["use_locked_dropout"]
)
if 'biaf_attention' in state:
biaf_attention = state['biaf_attention']
else:
biaf_attention = False
if 'token_level_attention' in state:
token_level_attention = state['token_level_attention']
else:
token_level_attention = False
if 'teacher_hidden' in state:
teacher_hidden = state['teacher_hidden']
else:
teacher_hidden = 256
use_cnn=state["use_cnn"] if 'use_cnn' in state else False
model = SemanticDependencyParser(
hidden_size=state["hidden_size"],
embeddings=state["embeddings"],
tag_dictionary=state["tag_dictionary"],
tag_type=state["tag_type"],
use_crf=state["use_crf"],
use_rnn=state["use_rnn"],
tree=state["tree"],
punct=state["punct"],
train_initial_hidden_state=state["train_initial_hidden_state"],
n_mlp_arc = state["n_mlp_arc"],
n_mlp_rel = state["n_mlp_rel"],
mlp_dropout = state["mlp_dropout"],
token_loss = False if 'token_loss' not in state else state["token_loss"],
use_second_order = state["use_second_order"],
n_mlp_sec = state["n_mlp_sec"],
init_std = state["init_std"],
factorize = state["factorize"],
use_sib = state["use_sib"],
use_gp = state["use_gp"],
use_cop = state["use_cop"],
iterations = state["iterations"],
is_mst = False if "is_mst" not in state else state["is_mst"],
binary = state["binary"],
rnn_layers=state["rnn_layers"],
dropout=use_dropout,
word_dropout=use_word_dropout,
locked_dropout=use_locked_dropout,
config=state['config'] if "config" in state else None,
word_map=None if 'word_map' not in state else state['word_map'],
char_map=None if 'char_map' not in state else state['char_map'],
relearn_embeddings = True if 'relearn_embeddings' not in state else state['relearn_embeddings'],
distill_arc = False if 'distill_arc' not in state else state['distill_arc'],
distill_rel = False if 'distill_rel' not in state else state['distill_rel'],
distill_crf = False if 'distill_crf' not in state else state['distill_crf'],
distill_posterior = False if 'distill_posterior' not in state else state['distill_posterior'],
distill_prob = False if 'distill_prob' not in state else state['distill_prob'],
distill_factorize = False if 'distill_factorize' not in state else state['distill_factorize'],
factorize_interpolation = False if 'factorize_interpolation' not in state else state['factorize_interpolation'],
diagonal = False if 'diagonal' not in state else state['diagonal'],
embedding_selector = False if "embedding_selector" not in state else state["embedding_selector"],
use_rl = False if "use_rl" not in state else state["use_rl"],
use_gumbel = False if "use_gumbel" not in state else state["use_gumbel"],
identity = False if "identity" not in state else state["identity"],
embedding_attention = False if "embedding_attention" not in state else state["embedding_attention"],
testing = testing,
is_sdp = False if "is_sdp" not in state else state["is_sdp"],
)
model.load_state_dict(state["state_dict"])
return model
def _get_state_dict(self):
model_state = {
"state_dict": self.state_dict(),
"embeddings": self.embeddings,
"hidden_size": self.hidden_size,
"tag_dictionary":self.tag_dictionary,
"tag_type":self.tag_type,
"tree":self.tree,
"punct":self.punct,
"use_crf": self.use_crf,
"use_rnn":self.use_rnn,
"train_initial_hidden_state": self.train_initial_hidden_state,
"n_mlp_arc": self.n_mlp_arc,
"n_mlp_rel": self.n_mlp_rel,
"mlp_dropout": self.mlp_dropout,
"token_loss": self.token_loss,
"use_second_order": self.use_second_order,
"n_mlp_sec": self.n_mlp_sec,
"init_std": self.init_std,
"factorize": self.factorize,
"use_sib": self.use_sib,
"use_gp": self.use_gp,
"use_cop": self.use_cop,
"iterations": self.iterations,
"is_mst": self.is_mst,
"binary": self.binary,
"rnn_layers": self.rnn_layers,
"dropout": self.use_dropout,
"word_dropout": self.use_word_dropout,
"locked_dropout": self.use_locked_dropout,
"config": self.config,
"word_map": self.word_map,
"char_map": self.char_map,
"relearn_embeddings": self.relearn_embeddings,
"distill_arc": self.distill_arc,
"distill_rel": self.distill_rel,
"distill_crf": self.distill_crf,
"distill_posterior": self.distill_posterior,
"distill_prob": self.distill_prob,
"distill_factorize": self.distill_factorize,
"factorize_interpolation": self.factorize_interpolation,
"diagonal": self.diagonal,
"embedding_selector": self.embedding_selector,
"use_rl": self.use_rl,
"use_gumbel": self.use_gumbel,
"embedding_attention": self.embedding_attention,
"identity": self.identity,
"is_sdp": self.is_sdp,
}
return model_state
def forward(self, sentences: List[Sentence], prediction_mode = False):
# self.zero_grad()
lengths: List[int] = [len(sentence.tokens) for sentence in sentences]
longest_token_sequence_in_batch: int = max(lengths)
if prediction_mode and self.embedding_selector:
self.embeddings.embed(sentences,embedding_mask=self.selection)
else:
self.embeddings.embed(sentences)
if self.embedding_selector:
if self.use_rl:
if self.embedding_attention:
embatt=torch.sigmoid(self.selector)
sentence_tensor = torch.cat([sentences.features[x].to(flair.device) * self.selection[idx] * embatt[idx] for idx, x in enumerate(sorted(sentences.features.keys()))],-1)
else:
sentence_tensor = torch.cat([sentences.features[x].to(flair.device) * self.selection[idx] for idx, x in enumerate(sorted(sentences.features.keys()))],-1)
# sentence_tensor = torch.cat([sentences.features[x].to(flair.device) * self.selection[idx] for idx, x in enumerate(sentences.features.keys())],-1)
else:
# if self.training:
# selection=torch.nn.functional.gumbel_softmax(self.selector,hard=True)
# sentence_tensor = torch.cat([sentences.features[x].to(flair.device) * selection[idx][1] for idx, x in enumerate(sorted(sentences.features.keys()))],-1)
# else:
# selection=torch.sigmoid(self.selector)
# sentence_tensor = torch.cat([sentences.features[x].to(flair.device) * selection[idx] for idx, x in enumerate(sorted(sentences.features.keys()))],-1)
if self.use_gumbel:
if self.training:
selection=torch.nn.functional.gumbel_softmax(self.selector,hard=True)
sentence_tensor = torch.cat([sentences.features[x].to(flair.device) * selection[idx][1] for idx, x in enumerate(sorted(sentences.features.keys()))],-1)
else:
selection=torch.argmax(self.selector,-1)
sentence_tensor = torch.cat([sentences.features[x].to(flair.device) * selection[idx] for idx, x in enumerate(sorted(sentences.features.keys()))],-1)
else:
selection=torch.sigmoid(self.selector)
sentence_tensor = torch.cat([sentences.features[x].to(flair.device) * selection[idx] for idx, x in enumerate(sorted(sentences.features.keys()))],-1)
else:
# sentence_tensor = torch.cat([sentences.features[x].to(flair.device) for x in sentences.features],-1)
sentence_tensor = torch.cat([sentences.features[x].to(flair.device) for x in sorted(sentences.features.keys())],-1)
# print('===================')
# for x in sentences.features: print(x)
# print('===================')
# pdb.set_trace()
if hasattr(self,'keep_embedding'):
sentence_tensor = [sentences.features[x].to(flair.device) for x in sorted(sentences.features.keys())]
embedding_name = sorted(sentences.features.keys())[self.keep_embedding]
if 'forward' in embedding_name or 'backward' in embedding_name:
# sentence_tensor = torch.cat([sentences.features[x].to(flair.device) for x in sorted(sentences.features.keys()) if 'forward' in x or 'backward' in x],-1)
for idx, x in enumerate(sorted(sentences.features.keys())):
if 'forward' not in x and 'backward' not in x:
sentence_tensor[idx].fill_(0)
else:
for idx, x in enumerate(sorted(sentences.features.keys())):
if x != embedding_name:
sentence_tensor[idx].fill_(0)
sentence_tensor = torch.cat(sentence_tensor,-1)
sentence_tensor = self.embed_dropout(sentence_tensor)[0]
if self.relearn_embeddings:
sentence_tensor = self.embedding2nn(sentence_tensor)
# sentence_tensor = self.embedding2nn(sentence_tensor)
if self.use_rnn:
x = pack_padded_sequence(sentence_tensor, lengths, True, False)
x, _ = self.rnn(x)
sentence_tensor, _ = pad_packed_sequence(x, True, total_length=sentence_tensor.shape[1])
sentence_tensor = self.lstm_dropout_func(sentence_tensor)
mask=self.sequence_mask(torch.tensor(lengths),longest_token_sequence_in_batch).type_as(sentence_tensor)
self.mask=mask
# mask = words.ne(self.pad_index)
# lens = mask.sum(dim=1)
# get outputs from embedding layers
x = sentence_tensor
# apply MLPs to the BiLSTM output states
arc_h = self.mlp_arc_h(x)
arc_d = self.mlp_arc_d(x)
rel_h = self.mlp_rel_h(x)
rel_d = self.mlp_rel_d(x)
# get arc and rel scores from the bilinear attention
# [batch_size, seq_len, seq_len]
s_arc = self.arc_attn(arc_d, arc_h)
# [batch_size, seq_len, seq_len, n_rels]
s_rel = self.rel_attn(rel_d, rel_h).permute(0, 2, 3, 1)
# add second order using mean field variational inference
if self.use_second_order:
mask_unary, mask_sib, mask_cop, mask_gp = self.from_mask_to_3d_mask(mask)
unary = mask_unary*s_arc
arc_sib, arc_cop, arc_gp = self.encode_second_order(x)
layer_sib, layer_cop, layer_gp = self.get_edge_second_order_node_scores(arc_sib, arc_cop, arc_gp, mask_sib, mask_cop, mask_gp)
s_arc = self.mean_field_variational_infernece(unary, layer_sib, layer_cop, layer_gp)
# set the scores that exceed the length of each sentence to -inf
if not self.binary:
s_arc.masked_fill_(~mask.unsqueeze(1).bool(), float(-1e9))
return s_arc, s_rel
def mean_field_variational_infernece(self, unary, layer_sib=None, layer_cop=None, layer_gp=None):
layer_gp2 = layer_gp.permute(0,2,3,1)
# modify from (dep, head) to (head, dep), in order to fit my code
unary = unary.transpose(1,2)
unary_potential = unary.clone()
q_value = unary_potential.clone()
for i in range(self.iterations):
if self.binary:
q_value=torch.sigmoid(q_value)
else:
q_value=F.softmax(q_value,1)
if self.use_sib:
second_temp_sib = torch.einsum('nac,nabc->nab', (q_value, layer_sib))
#(n x ma x mb) -> (n x ma) -> (n x ma x 1) | (n x ma x mb x mc) -> (n x mb x ma x mc) -> (n x mb x ma) -> (n x ma x mb)
#Q(a,a)*p(a,b,a)
diag_sib1 = torch.diagonal(q_value,dim1=1,dim2=2).unsqueeze(-1) * torch.diagonal(layer_sib.transpose(1,2),dim1=-2,dim2=-1).transpose(1,2)
# (n x ma x mb x mc) -> (n x ma x mb)
#Q(a,b)*p(a,b,b)
diag_sib2 = q_value * torch.diagonal(layer_sib,dim1=-2,dim2=-1)
#(n x ma x mb x mc) -> (n x mb x ma x mc) -> (n x mb x ma) -> (n x ma x mb)
second_temp_sib = second_temp_sib - diag_sib1 - diag_sib2
else:
second_temp_sib=0
if self.use_gp:
second_temp_gp = torch.einsum('nbc,nabc->nab', (q_value, layer_gp))
second_temp_gp2 = torch.einsum('nca,nabc->nab', (q_value, layer_gp2))
#Q(b,a)*p(a,b,a)
diag_gp1 = q_value.transpose(1,2) * torch.diagonal(layer_gp.transpose(1,2),dim1=-2,dim2=-1).transpose(1,2)
#(n x ma x mb) -> (n x mb) -> (n x 1 x mb) | (n x ma x mb x mc) -> (n x ma x mb)
#Q(b,b)*p(a,b,b)
diag_gp2 = torch.diagonal(q_value,dim1=-2,dim2=-1).unsqueeze(1) * torch.diagonal(layer_gp,dim1=-2,dim2=-1)
#(n x ma x mb x mc) -> (n x mb x ma x mc) -> (n x mb x ma) -> (n x ma x mb)
#Q(a,a)*p(a,b,a)
diag_gp21 = torch.diagonal(q_value,dim1=-2,dim2=-1).unsqueeze(-1) * torch.diagonal(layer_gp2.transpose(1,2),dim1=-2,dim2=-1).transpose(1,2)
#(n x ma x mb) -> (n x mb) -> (n x 1 x mb) | (n x ma x mb x mc) -> (n x ma x mb)
#Q(b,a)*p(a,b,b)
diag_gp22 = q_value.transpose(1,2) * torch.diagonal(layer_gp2,dim1=-2,dim2=-1)
second_temp_gp = second_temp_gp - diag_gp1 - diag_gp2
#c->a->b
second_temp_gp2 = second_temp_gp2 - diag_gp21 - diag_gp22
else:
second_temp_gp=second_temp_gp2=0
if self.use_cop:
second_temp_cop = torch.einsum('ncb,nabc->nab', (q_value, layer_cop))
#(n x ma x mb x mc) -> (n x mb x ma x mc) -> (n x mb x ma) -> (n x ma x mb)
#Q(a,b)*p(a,b,a)
diag_cop1 = q_value * torch.diagonal(layer_cop.transpose(1,2),dim1=-2,dim2=-1).transpose(1,2)
# diag_cop1 = q_value * tf.transpose(tf.linalg.diag_part(tf.transpose(layer_cop,perm=[0,2,1,3])),perm=[0,2,1])
#(n x ma x mb) -> (n x mb) -> (n x 1 x mb) | (n x ma x mb x mc) -> (n x ma x mb)
#Q(b,b)*p(a,b,b)
diag_cop2 = torch.diagonal(q_value,dim1=-2,dim2=-1).unsqueeze(1) * torch.diagonal(layer_cop,dim1=-2,dim2=-1)
# diag_cop2 = tf.expand_dims(tf.linalg.diag_part(q_value),1) * tf.linalg.diag_part(layer_cop)
second_temp_cop = second_temp_cop - diag_cop1 - diag_cop2
else:
second_temp_cop=0
second_temp = second_temp_sib + second_temp_gp + second_temp_gp2 + second_temp_cop
q_value = unary_potential + second_temp
# transpose from (head, dep) to (dep, head)
return q_value.transpose(1,2)
def encode_second_order(self, memory_bank):
if self.use_sib:
edge_node_sib_h = self.mlp_sib_h(memory_bank)
edge_node_sib_m = self.mlp_sib_d(memory_bank)
arc_sib=(edge_node_sib_h, edge_node_sib_m)
else:
arc_sib=None
if self.use_cop:
edge_node_cop_h = self.mlp_cop_h(memory_bank)
edge_node_cop_m = self.mlp_cop_d(memory_bank)
arc_cop=(edge_node_cop_h, edge_node_cop_m)
else:
arc_cop=None
if self.use_gp:
edge_node_gp_h = self.mlp_gp_h(memory_bank)
edge_node_gp_m = self.mlp_gp_d(memory_bank)
edge_node_gp_hm = self.mlp_gp_hd(memory_bank)
arc_gp=(edge_node_gp_h, edge_node_gp_hm, edge_node_gp_m)
else:
arc_gp=None
return arc_sib, arc_cop, arc_gp
def get_edge_second_order_node_scores(self, arc_sib, arc_cop, arc_gp, mask_sib, mask_cop, mask_gp):
if self.use_sib:
edge_node_sib_h, edge_node_sib_m = arc_sib
layer_sib = self.trilinear_sib(edge_node_sib_h, edge_node_sib_m, edge_node_sib_m) * mask_sib
# keep (ma x mb x mc) -> (ma x mb x mb)
#layer_sib = 0.5 * (layer_sib + layer_sib.transpose(3,2))
one_mask=torch.ones(layer_sib.shape[-2:])
tril_mask=torch.tril(one_mask,-1)
triu_mask=torch.triu(one_mask,1)
layer_sib = layer_sib-layer_sib*tril_mask.unsqueeze(0).unsqueeze(0) + (layer_sib*triu_mask.unsqueeze(0).unsqueeze(0)).permute([0,1,3,2])
else:
layer_sib = None
if self.use_cop:
edge_node_cop_h, edge_node_cop_m = arc_cop
layer_cop = self.trilinear_cop(edge_node_cop_h, edge_node_cop_m, edge_node_cop_h) * mask_cop
# keep (ma x mb x mc) -> (ma x mb x ma)
one_mask=torch.ones(layer_cop.shape[-2:])
tril_mask=torch.tril(one_mask,-1)
triu_mask=torch.triu(one_mask,1)
layer_cop=layer_cop.transpose(1,2)
layer_cop = layer_cop-layer_cop*tril_mask.unsqueeze(0).unsqueeze(0) + (layer_cop*triu_mask.unsqueeze(0).unsqueeze(0)).permute([0,1,3,2])
layer_cop=layer_cop.transpose(1,2)
else:
layer_cop = None
if self.use_gp:
edge_node_gp_h, edge_node_gp_hm, edge_node_gp_m = arc_gp
layer_gp = self.trilinear_gp(edge_node_gp_h, edge_node_gp_hm, edge_node_gp_m) * mask_gp
else:
layer_gp = None
return layer_sib,layer_cop,layer_gp
def from_mask_to_3d_mask(self,token_weights):
root_weights = token_weights.clone()
root_weights[:,0] = 0
token_weights3D = token_weights.unsqueeze(-1) * root_weights.unsqueeze(-2)
token_weights2D = root_weights.unsqueeze(-1) * root_weights.unsqueeze(-2)
# abc -> ab,ac
#token_weights_sib = tf.cast(tf.expand_dims(root_, axis=-3) * tf.expand_dims(tf.expand_dims(root_weights, axis=-1),axis=-1),dtype=tf.float32)
#abc -> ab,cb
if self.use_cop:
token_weights_cop = token_weights.unsqueeze(-1).unsqueeze(-1) * root_weights.unsqueeze(1).unsqueeze(-1) * token_weights.unsqueeze(1).unsqueeze(1)
token_weights_cop[:,0,:,0] = 0
else:
token_weights_cop=None
#data=np.stack((devprint['printdata']['layer_cop'][0][0]*devprint['token_weights3D'][0].T)[None,:],devprint['printdata']['layer_cop'][0][1:])
#abc -> ab, bc
if self.use_gp:
token_weights_gp = token_weights.unsqueeze(-1).unsqueeze(-1) * root_weights.unsqueeze(1).unsqueeze(-1) * root_weights.unsqueeze(1).unsqueeze(1)
else:
token_weights_gp = None
if self.use_sib:
#abc -> ca, ab
if self.use_gp:
token_weights_sib = token_weights_gp.clone()
else:
token_weights.unsqueeze(-1).unsqueeze(-1) * root_weights.unsqueeze(1).unsqueeze(-1) * root_weights.unsqueeze(1).unsqueeze(1)
else:
token_weights_sib = None
return token_weights3D, token_weights_sib, token_weights_cop, token_weights_gp
def forward_loss(
self, data_points: Union[List[Sentence], Sentence], sort=True
) -> torch.tensor:
s_arc, s_rel = self.forward(data_points)
# lengths = [len(sentence.tokens) for sentence in data_points]
# longest_token_sequence_in_batch: int = max(lengths)
# max_len = features.shape[1]
# mask=self.sequence_mask(torch.tensor(lengths), max_len).type_as(features)
loss = self._calculate_loss(s_arc, s_rel, data_points, self.mask)
return loss
def simple_forward_distillation_loss(
self, data_points: Union[List[Sentence], Sentence], teacher_data_points: Union[List[Sentence], Sentence]=None, teacher=None, sort=True,
interpolation=0.5, train_with_professor=False, professor_interpolation=0.5, language_attention_warmup = False, calc_teachers_target_loss = False,
language_weight = None, biaffine = None, language_vector = None,
) -> torch.tensor:
arc_scores, rel_scores = self.forward(data_points)
lengths = [len(sentence.tokens) for sentence in data_points]
max_len = arc_scores.shape[1]
mask=self.mask.clone()
posterior_loss = 0
if self.distill_posterior:
# mask[:,0] = 0
if hasattr(data_points,'teacher_features') and 'posteriors' in data_points.teacher_features:
teacher_scores = data_points.teacher_features['posteriors'].to(flair.device)
else:
teacher_scores = torch.stack([sentence.get_teacher_posteriors() for sentence in data_points],0)
if self.distill_arc:
root_mask = mask.clone()
root_mask[:,0] = 0
binary_mask = root_mask.unsqueeze(-1) * mask.unsqueeze(-2)
arc_scores.masked_fill_(~binary_mask.bool(), float(-1e9))
for i in range(teacher_scores.shape[-2]):
if self.distill_rel:
assert 0
marginals = convert_score_back(teacher_scores[:,:,:,i])
arc_probs = arc_scores.softmax(-1)
rel_probs = rel_scores.softmax(-1)
student_probs = arc_probs.unsqueeze(-1) * rel_probs
student_scores = (student_probs+1e-12).log()
student_scores = student_scores.view(list(student_scores.shape[0:2])+[-1])
marginals = marginals.reshape(list(marginals.shape[0:2])+[-1])
# create the mask
binary_mask = binary_mask.unsqueeze(-1).expand(list(binary_mask.shape)+[rel_probs.shape[-1]]).reshape(list(binary_mask.shape[0:2])+[-1])
else:
marginals = convert_score_back(teacher_scores[:,:,i])
posterior_loss += self._calculate_distillation_loss(student_scores, marginals, root_mask, binary_mask, T=self.temperature, teacher_is_score = False)
else:
root_mask = mask.clone()
root_mask[:,0] = 0
binary_mask = root_mask.unsqueeze(-1) * mask.unsqueeze(-2)
inside_outside_prob = crf(arc_scores, root_mask.bool(),marginal_gradient=True)
inside_outside_score = (inside_outside_prob + 1e-12).log()
for i in range(teacher_scores.shape[-2]):
posterior_loss += self._calculate_distillation_loss(inside_outside_score, teacher_scores[:,:,i], root_mask, binary_mask, T=self.temperature, teacher_is_score = False)
# temp_mask = mask[:,1:]
# dist=generate_tree(arc_scores,temp_mask.squeeze(-1).long(),is_mst=self.is_mst)
# forward_backward_score = dist.marginals
# # change back to relation of (dependency, head)
# input_forward_score = (forward_backward_score.transpose(-1,-2)+1e-12).log()
# binary_mask = temp_mask.unsqueeze(-1) * temp_mask.unsqueeze(-2)
# input_forward_score.masked_fill_(~binary_mask.bool(), float(-1e9))
# for i in range(teacher_scores.shape[-2]):
# posterior_loss += self._calculate_distillation_loss(input_forward_score, teacher_scores[:,:,i].transpose(-1,-2), temp_mask, binary_mask, T=self.temperature, teacher_is_score = False)
posterior_loss/=teacher_scores.shape[-2]
distillation_loss = 0
if self.distill_crf:
# [batch, length, kbest]
mask[:,0] = 0
if hasattr(data_points,'teacher_features') and 'topk' in data_points.teacher_features:
teacher_tags = data_points.teacher_features['topk'].to(flair.device)
teacher_weights = data_points.teacher_features['weights'].to(flair.device)
if self.distill_rel:
teacher_rel_tags = data_points.teacher_features['topk_rels'].to(flair.device)
else:
teacher_tags = torch.stack([sentence.get_teacher_target() for sentence in data_points],0)
teacher_weights = torch.stack([sentence.get_teacher_weights() for sentence in data_points],0)
if self.distill_rel:
teacher_rel_tags = torch.stack([sentence.get_teacher_rel_target() for sentence in data_points],0)
# proprocess, convert k best to batch wise
teacher_mask = (mask.unsqueeze(-1) * (teacher_weights.unsqueeze(1)>0).type_as(mask)).bool()
student_arc_scores = arc_scores.unsqueeze(-2).expand(list(arc_scores.shape[:2])+[teacher_mask.shape[-1],arc_scores.shape[-1]])[teacher_mask]
teacher_topk_arcs = teacher_tags[teacher_mask]
if self.distill_rel:
# gold_arcs = arcs[mask]
# rel_scores, rels = rel_scores[mask], rels[mask]
# rel_scores = rel_scores[torch.arange(len(gold_arcs)), gold_arcs]
student_rel_scores = rel_scores.unsqueeze(-3).expand(list(rel_scores.shape[:2])+[teacher_mask.shape[-1]]+list(rel_scores.shape[-2:]))[teacher_mask]
teacher_topk_rels = teacher_rel_tags[teacher_mask]
student_rel_scores = student_rel_scores[torch.arange(len(teacher_topk_arcs)),teacher_topk_arcs]
if self.crf_attention:
weights = teacher_weights.unsqueeze(1).expand([teacher_weights.shape[0],arc_scores.shape[1],teacher_weights.shape[1]])[teacher_mask]
distillation_loss = self.distill_criterion(student_arc_scores, teacher_topk_arcs)
# the loss calculates only one times because the sum of weight is 1
distillation_loss = (distillation_loss * weights).sum() / mask.sum()
if self.distill_rel:
rel_distillation_loss = self.distill_rel_criterion(student_rel_scores, teacher_topk_rels)
rel_distillation_loss = (rel_distillation_loss * weights).sum() / mask.sum()
else:
# the loss calculates for k times
distillation_loss = self.arc_criterion(student_arc_scores, teacher_topk_arcs)
if self.distill_rel:
rel_distillation_loss = self.rel_criterion(student_rel_scores, teacher_topk_rels)
arc_loss,rel_loss = self._calculate_loss(arc_scores, rel_scores, data_points, self.mask.clone(), return_arc_rel=True)
if (self.distill_arc or self.distill_rel) and not self.distill_posterior and not self.distill_crf:
root_mask = mask.clone()
root_mask[:,0] = 0
binary_mask = root_mask.unsqueeze(-1) * mask.unsqueeze(-2)
if hasattr(data_points,'teacher_features') and 'distributions' in data_points.teacher_features:
teacher_features = data_points.teacher_features['distributions'].to(flair.device)
else:
teacher_features = torch.stack([sentence.get_teacher_prediction() for sentence in data_points],0)
if self.distill_arc:
features = arc_scores
if self.distill_rel:
# features = arc_scores.unsqueeze(-1) * rel_scores
if self.distill_factorize:
rel_binary_mask = binary_mask.unsqueeze(-1).expand(list(binary_mask.shape)+[rel_scores.shape[-1]]).reshape(list(binary_mask.shape[0:2])+[-1])
if hasattr(data_points,'teacher_features') and 'rel_distributions' in data_points.teacher_features:
teacher_rel_features = data_points.teacher_features['rel_distributions'].to(flair.device)
else:
teacher_rel_features = torch.stack([sentence.get_teacher_rel_prediction() for sentence in data_points],0)
rel_probs = rel_scores.softmax(-1)
rel_probs = rel_probs.view(list(rel_probs.shape[0:2])+[-1])
rel_scores = (rel_probs+1e-12).log()
teacher_rel_features = teacher_rel_features.view(list(teacher_rel_features.shape[0:2])+[-1])
rel_distillation_loss = self._calculate_distillation_loss(rel_scores, teacher_rel_features, root_mask, rel_binary_mask, T=self.temperature, teacher_is_score=(not self.distill_prob) and (not self.distill_rel))
features = arc_scores
else:
arc_probs = arc_scores.softmax(-1)
rel_probs = rel_scores.softmax(-1)
features = arc_probs.unsqueeze(-1) * rel_probs
features = features.view(list(features.shape[0:2])+[-1])
features = (features+1e-12).log()
teacher_features = teacher_features.view(list(teacher_features.shape[0:2])+[-1])
# create the mask
binary_mask = binary_mask.unsqueeze(-1).expand(list(binary_mask.shape)+[rel_probs.shape[-1]]).reshape(list(binary_mask.shape[0:2])+[-1])
else:
teacher_features.masked_fill_(~self.mask.unsqueeze(1).bool(), float(-1e9))
distillation_loss = self._calculate_distillation_loss(features, teacher_features, root_mask, binary_mask, T=self.temperature, teacher_is_score=(not self.distill_prob) and (not self.distill_rel))
# target_loss2 = super()._calculate_loss(features,data_points)
# distillation_loss2 = super()._calculate_distillation_loss(features, teacher_features,torch.tensor(lengths))
# (interpolation * (posterior_loss + distillation_loss) + (1-interpolation) * target_loss).backward()
if self.distill_rel:
# if distilling both arc and rel distribution, just use the same interpolation
target_loss = 2 * ((1-self.interpolation) * arc_loss + self.interpolation * rel_loss)
if self.distill_factorize:
# balance the relation distillation loss and arc distillation loss through a new interpolation
distillation_loss = 2 * ((1-self.factorize_interpolation) * distillation_loss + self.factorize_interpolation * rel_distillation_loss)
if self.distill_crf:
distillation_loss = 2 * ((1-self.interpolation) * distillation_loss + self.interpolation * rel_distillation_loss)
return interpolation * (posterior_loss + distillation_loss) + (1-interpolation) * target_loss
else:
# otherwise, balance between the (arc distillation loss + arc loss) and (rel loss)
return 2*((1-self.interpolation) * (interpolation * (posterior_loss + distillation_loss) + (1-interpolation) * arc_loss) + self.interpolation * rel_loss)
def sequence_mask(self, lengths, max_len=None):
"""
Creates a boolean mask from sequence lengths.
"""
batch_size = lengths.numel()
max_len = max_len or lengths.max()
return (torch.arange(0, max_len)
.type_as(lengths)
.repeat(batch_size, 1)
.lt(lengths.unsqueeze(1)))
def _calculate_distillation_loss(self, features, teacher_features, mask, binary_mask, T = 1, teacher_is_score=True, student_is_score = True):
# TODO: time with mask, and whether this should do softmax
# pdb.set_trace()
if teacher_is_score:
teacher_prob=F.softmax(teacher_features/T, dim=-1)
else:
if T>1:
teacher_scores = (teacher_features+1e-12).log()
teacher_prob=F.softmax(teacher_scores/T, dim=-1)
else:
teacher_prob=teacher_features
KD_loss = torch.nn.functional.kl_div(F.log_softmax(features/T, dim=-1), teacher_prob,reduction='none') * binary_mask * T * T
# KD_loss = KD_loss.sum()/mask.sum()
if self.sentence_level_loss:
KD_loss = KD_loss.sum()/KD_loss.shape[0]
else:
KD_loss = KD_loss.sum()/mask.sum()
return KD_loss
# return torch.nn.functional.MSELoss(features, teacher_features, reduction='mean')
def _calculate_loss(
self, arc_scores: torch.tensor, rel_scores: torch.tensor, sentences: List[Sentence], mask: torch.tensor, return_arc_rel = False,
) -> float:
if self.binary:
root_mask = mask.clone()
root_mask[:,0] = 0
binary_mask = root_mask.unsqueeze(-1) * mask.unsqueeze(-2)
# arc_mat=
if hasattr(sentences,self.tag_type+'_arc_tags'):
arc_mat=getattr(sentences,self.tag_type+'_arc_tags').to(flair.device).float()
else:
arc_mat=torch.stack([getattr(sentence,self.tag_type+'_arc_tags').to(flair.device) for sentence in sentences],0).float()
if hasattr(sentences,self.tag_type+'_rel_tags'):
rel_mat=getattr(sentences,self.tag_type+'_rel_tags').to(flair.device).long()
else:
rel_mat=torch.stack([getattr(sentence,self.tag_type+'_rel_tags').to(flair.device) for sentence in sentences],0).long()
arc_loss = self.arc_criterion(arc_scores, arc_mat)
rel_loss = self.rel_criterion(rel_scores.reshape(-1,self.tagset_size), rel_mat.reshape(-1))
arc_loss = (arc_loss*binary_mask).sum()/binary_mask.sum()
rel_mask = (rel_mat>0)*binary_mask
num_rels=rel_mask.sum()
if num_rels>0:
rel_loss = (rel_loss*rel_mask.view(-1)).sum()/num_rels
else:
rel_loss = 0
# rel_loss = (rel_loss*rel_mat.view(-1)).sum()/rel_mat.sum()
else:
if hasattr(sentences,self.tag_type+'_arc_tags'):
arcs=getattr(sentences,self.tag_type+'_arc_tags').to(flair.device).long()
else:
arcs=torch.stack([getattr(sentence,self.tag_type+'_arc_tags').to(flair.device) for sentence in sentences],0).long()
if hasattr(sentences,self.tag_type+'_rel_tags'):
rels=getattr(sentences,self.tag_type+'_rel_tags').to(flair.device).long()
else:
rels=torch.stack([getattr(sentence,self.tag_type+'_rel_tags').to(flair.device) for sentence in sentences],0).long()
self.arcs=arcs
self.rels=rels
mask[:,0] = 0
mask = mask.bool()
gold_arcs = arcs[mask]
rel_scores, rels = rel_scores[mask], rels[mask]
rel_scores = rel_scores[torch.arange(len(gold_arcs)), gold_arcs]
if self.use_crf:
arc_loss, arc_probs = crf(arc_scores, mask, arcs)
arc_loss = arc_loss/mask.sum()
rel_loss = self.rel_criterion(rel_scores, rels)
#=============================================================================================
# dist=generate_tree(arc_scores,mask,is_mst=self.is_mst)
# labels = dist.struct.to_parts(arcs[:,1:], lengths=mask.sum(-1)).type_as(arc_scores)
# log_prob = dist.log_prob(labels)
# if (log_prob>0).any():
# log_prob[torch.where(log_prob>0)]=0
# print("failed to get correct loss!")
# if self.token_loss:
# arc_loss = - log_prob.sum()/mask.sum()
# else:
# arc_loss = - log_prob.mean()
# self.dist=dist
# rel_loss = self.rel_criterion(rel_scores, rels)
# if self.token_loss:
# rel_loss = rel_loss.mean()
# else:
# rel_loss = rel_loss.sum()/len(sentences)
# if self.debug:
# if rel_loss<0 or arc_loss<0:
# pdb.set_trace()
#=============================================================================================
else:
arc_scores, arcs = arc_scores[mask], arcs[mask]
arc_loss = self.arc_criterion(arc_scores, arcs)
# rel_scores, rels = rel_scores[mask], rels[mask]
# rel_scores = rel_scores[torch.arange(len(arcs)), arcs]
rel_loss = self.rel_criterion(rel_scores, rels)
if return_arc_rel:
return (arc_loss,rel_loss)
loss = 2 * ((1-self.interpolation) * arc_loss + self.interpolation * rel_loss)
# score = torch.nn.functional.cross_entropy(features.view(-1,features.shape[-1]), tag_list.view(-1,), reduction='none') * mask.view(-1,)
# if self.sentence_level_loss or self.use_crf:
# score = score.sum()/features.shape[0]
# else:
# score = score.sum()/mask.sum()
# score = (1-self.posterior_interpolation) * score + self.posterior_interpolation * posterior_score
return loss
def evaluate(
self,
data_loader: DataLoader,
out_path: Path = None,
embeddings_storage_mode: str = "cpu",
prediction_mode: bool = False,
) -> (Result, float):
print('Hi I am in evaluation dependancy :)')
data_loader.assign_embeddings()
with torch.no_grad():
if self.binary:
print('In binary IFFF')
eval_loss = 0
batch_no: int = 0
# metric = Metric("Evaluation")
# sentence_writer = open('temps/'+str(uid)+'_eval'+'.conllu','w')
lines: List[str] = []
utp = 0
ufp = 0
ufn = 0
ltp = 0
lfp = 0
lfn = 0
if out_path is not None:
outfile = open(out_path, "w", encoding="utf-8")
for batch in data_loader:
batch_no += 1
print('batch', batch)
print('type batch', type(batch))
print('batch', batch[0])
print('batch', batch[0][0])
print('batch', type(batch[0]))
arc_scores, rel_scores = self.forward(batch, prediction_mode=prediction_mode)
mask=self.mask
root_mask = mask.clone()
root_mask[:,0] = 0
binary_mask = root_mask.unsqueeze(-1) * mask.unsqueeze(-2)
arc_predictions = (arc_scores.sigmoid() > 0.5) * binary_mask
rel_predictions = (rel_scores.softmax(-1)*binary_mask.unsqueeze(-1)).argmax(-1)
# if not prediction_mode:
arc_mat=torch.stack([getattr(sentence,self.tag_type+'_arc_tags').to(flair.device) for sentence in batch],0).float()
rel_mat=torch.stack([getattr(sentence,self.tag_type+'_rel_tags').to(flair.device) for sentence in batch],0).long()
loss = self._calculate_loss(arc_scores, rel_scores, batch, mask)
if self.is_srl:
# let the head selection fixed to the gold predicate only
binary_mask[:,:,0] = arc_mat[:,:,0]
arc_predictions = (arc_scores.sigmoid() > 0.5) * binary_mask
# UF1
true_positives = arc_predictions * arc_mat
# (n x m x m) -> ()
n_predictions = arc_predictions.sum()
n_unlabeled_predictions = n_predictions
n_targets = arc_mat.sum()
n_unlabeled_targets = n_targets
n_true_positives = true_positives.sum()
# () - () -> ()
n_false_positives = n_predictions - n_true_positives
n_false_negatives = n_targets - n_true_positives
# (n x m x m) -> (n)
n_targets_per_sequence = arc_mat.sum([1,2])
n_true_positives_per_sequence = true_positives.sum([1,2])
# (n) x 2 -> ()
n_correct_sequences = (n_true_positives_per_sequence==n_targets_per_sequence).sum()
utp += n_true_positives
ufp += n_false_positives
ufn += n_false_negatives
# LF1
# (n x m x m) (*) (n x m x m) -> (n x m x m)
true_positives = (rel_predictions == rel_mat) * arc_predictions
correct_label_tokens = (rel_predictions == rel_mat) * arc_mat
# (n x m x m) -> ()
# n_unlabeled_predictions = tf.reduce_sum(unlabeled_predictions)
# n_unlabeled_targets = tf.reduce_sum(unlabeled_targets)
n_true_positives = true_positives.sum()
n_correct_label_tokens = correct_label_tokens.sum()
# () - () -> ()
n_false_positives = n_unlabeled_predictions - n_true_positives
n_false_negatives = n_unlabeled_targets - n_true_positives
# (n x m x m) -> (n)
n_targets_per_sequence = arc_mat.sum([1,2])
n_true_positives_per_sequence = true_positives.sum([1,2])
n_correct_label_tokens_per_sequence = correct_label_tokens.sum([1,2])
# (n) x 2 -> ()
n_correct_sequences = (n_true_positives_per_sequence == n_targets_per_sequence).sum()
n_correct_label_sequences = ((n_correct_label_tokens_per_sequence == n_targets_per_sequence)).sum()
ltp += n_true_positives
lfp += n_false_positives
lfn += n_false_negatives
eval_loss += loss
if out_path is not None:
masked_arc_scores = arc_scores.masked_fill(~binary_mask.bool(), float(-1e9))
# if self.target
# lengths = [len(sentence.tokens) for sentence in batch]
# temp_preds = eisner(arc_scores, mask)
if not self.is_mst and self.tree:
temp_preds = eisner(arc_scores, root_mask.bool())
for (sent_idx, sentence) in enumerate(batch):
if self.is_mst:
preds=MST_inference(torch.softmax(masked_arc_scores[sent_idx],-1).cpu().numpy(), len(sentence), binary_mask[sent_idx].cpu().numpy())
elif self.tree:
preds=temp_preds[sent_idx]
else:
preds = []
sent_arc_preds=torch.where(arc_predictions[sent_idx]>0)
if len(sent_arc_preds[0])==0:
graph_score = 0
else:
sent_arc_scores = arc_scores[sent_idx, sent_arc_preds[0], sent_arc_preds[1]]
sent_rel_scores = rel_scores[sent_idx, sent_arc_preds[0], sent_arc_preds[1]].max(-1)[0]
final_score = sent_arc_scores*sent_rel_scores
graph_score = final_score.sum().cpu().item()
if out_path is not None:
outfile.write(f'# Tree score: {graph_score}\n')
for token_idx, token in enumerate(sentence):
if token_idx == 0:
continue
# append both to file for evaluation
arc_heads = torch.where(arc_predictions[sent_idx,token_idx]>0)[0]
if len(preds)>0 and preds[token_idx] not in arc_heads:
val=torch.zeros(1).type_as(arc_heads)
val[0]=preds[token_idx].item()
arc_heads=torch.cat([arc_heads,val],0)
# this part should be removed for SDP
# if len(arc_heads) == 0:
# arc_heads = masked_arc_scores[sent_idx,token_idx].argmax().unsqueeze(0)
if len(arc_heads) != 0:
rel_index = rel_predictions[sent_idx,token_idx,arc_heads]
rel_labels = [self.tag_dictionary.get_item_for_index(x) for x in rel_index]
arc_list=[]
token_arc_scores = arc_scores[sent_idx,token_idx, arc_heads]
token_rel_scores = rel_scores[sent_idx,token_idx, arc_heads].max(-1)[0]
token_score = (token_arc_scores*token_rel_scores).sum().cpu().item()
for i, label in enumerate(rel_labels):
if '+' in label:
labels = label.split('+')
for temp_label in labels:
arc_list.append(str(arc_heads[i].item())+':'+temp_label)
else:
arc_list.append(str(arc_heads[i].item())+':'+label)
else:
arc_list = ['_']
token_score = 0
eval_line = "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(
token_idx,
token.text,
'X',
'X',
'X',
token.get_tag(self.tag_type).value,
str(token_idx-1),
'root' if token_idx-1==0 else 'det',
'|'.join(arc_list),
f'{token_score}',
)
# lines.append(eval_line)
if out_path is not None:
outfile.write(eval_line)
# lines.append("\n")
if out_path is not None:
outfile.write('\n')
eval_loss /= batch_no
UF1=self.compute_F1(utp,ufp,ufn).cpu().numpy()
LF1=self.compute_F1(ltp,lfp,lfn).cpu().numpy()
if out_path is not None:
outfile.close()
# with open(out_path, "w", encoding="utf-8") as outfile:
# outfile.write("".join(lines))
# if prediction_mode:
# return None, None
result = Result(
main_score=LF1,
log_line=f"\nUF1: {UF1} - LF1 {LF1}",
log_header="PRECISION\tRECALL\tF1",
detailed_results=f"\nUF1: {UF1} - LF1 {LF1}",
)
else:
# if prediction_mode:
# eval_loss, metric=self.dependency_evaluate(data_loader,out_path=out_path,prediction_mode=prediction_mode)
# return eval_loss, metric
# else:
# print('data_loader', data_loader)
# for b in data_loader:
# batch = b
# arc_scores, rel_scores = self.forward(batch, prediction_mode=prediction_mode)
# mask=self.mask
# root_mask = mask.clone()
# root_mask[:,0] = 0
# binary_mask = root_mask.unsqueeze(-1) * mask.unsqueeze(-2)
# arc_predictions = (arc_scores.sigmoid() > 0.5) * binary_mask
# rel_predictions = (rel_scores.softmax(-1)*binary_mask.unsqueeze(-1)).argmax(-1)
# print(arc_predictions)
# print(rel_predictions)
eval_loss, metric=self.dependency_evaluate(data_loader,out_path=out_path)
UAS=metric.uas
LAS=metric.las
result = Result(main_score=LAS,log_line=f"\nUAS: {UAS} - LAS {LAS}",log_header="PRECISION\tRECALL\tF1",detailed_results=f"\nUAS: {UAS} - LAS {LAS}",)
return result, eval_loss
def compute_F1(self, tp, fp, fn):
precision = tp/(tp+fp + 1e-12)
recall = tp/(tp+fn + 1e-12)
return 2 * (precision * recall) / (precision + recall+ 1e-12)
@torch.no_grad()
def dependency_evaluate(self, loader, out_path=None, prediction_mode=False):
# self.model.eval()
loss, metric = 0, Metric()
# total_start_time=time.time()
# forward_time=0
# loss_time=0
# decode_time=0
# punct_time=0
lines=[]
for batch in loader:
print(batch)
print(type(batch))
forward_start=time.time()
arc_scores, rel_scores = self.forward(batch)
# forward_end=time.time()
mask = self.mask
if not prediction_mode:
loss += self._calculate_loss(arc_scores, rel_scores, batch, mask)
# loss_end=time.time()
# forward_time+=forward_end-forward_start
# loss_time+=loss_end-forward_end
mask=mask.bool()
# decode_start=time.time()
arc_preds, rel_preds, pred_arc_scores, pred_rel_scores = self.decode(arc_scores, rel_scores, mask)
# decode_end=time.time()
# decode_time+=decode_end-decode_start
# ignore all punctuation if not specified
# if out_path is not None:
# pdb.set_trace()
if not self.punct:
for sent_id,sentence in enumerate(batch):
for token_id, token in enumerate(sentence):
upos=token.get_tag('upos').value
xpos=token.get_tag('pos').value
word=token.text
if is_punctuation(word,upos,self.punct_list) or is_punctuation(word,upos,self.punct_list):
mask[sent_id][token_id]=0
# mask &= words.unsqueeze(-1).ne(self.puncts).all(-1)
final_score = pred_arc_scores*pred_rel_scores
tree_score = final_score.sum(-1)
if out_path is not None:
for (sent_idx, sentence) in enumerate(batch):
lines.append(f'# Tree score: {tree_score[sent_idx].cpu().item()}\n')
for token_idx, token in enumerate(sentence):
if token_idx == 0:
continue
# append both to file for evaluation
eval_line = "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(
token_idx,
token.text,
'X',
'X',
'X',
token.get_tag(self.tag_type).value,
arc_preds[sent_idx,token_idx],
self.tag_dictionary.get_item_for_index(rel_preds[sent_idx,token_idx]),
'X',
final_score[sent_idx,token_idx].cpu().item(),
)
lines.append(eval_line)
lines.append("\n")
print(lines)
if not prediction_mode:
# punct_end=time.time()
# punct_time+=punct_end-decode_end
metric(arc_preds, rel_preds, self.arcs, self.rels, mask)
if out_path is not None:
with open(out_path, "w", encoding="utf-8") as outfile:
outfile.write("".join(lines))
if prediction_mode:
return None, None
# total_end_time=time.time()
# print(total_start_time-total_end_time)
# print(forward_time)
# print(punct_time)
# print(decode_time)
loss /= len(loader)
return loss, metric
@torch.no_grad()
def predict(self, token_list, prediction_mode=False):
# self.model.eval()
sentence: Sentence = Sentence()
for idx, t in enumerate(token_list):
if idx == 0:
# token = Token('<ROOT>')
token = Token('<ROOT>', head_id=int(0))
sentence.add_token(token)
token = Token(t)
sentence.add_token(token)
# print(sentence.to_original_text())
batch = [sentence]
batch = BatchedData(batch)
# print(batch)
lines=[]
arc_scores, rel_scores = self.forward(batch)
mask = self.mask
mask=mask.bool()
arc_preds, rel_preds, pred_arc_scores, pred_rel_scores = self.decode(arc_scores, rel_scores, mask)
if not self.punct:
for sent_id,sentence in enumerate(batch):
for token_id, token in enumerate(sentence):
upos=token.get_tag('upos').value
xpos=token.get_tag('pos').value
word=token.text
if is_punctuation(word,upos,self.punct_list) or is_punctuation(word,upos,self.punct_list):
mask[sent_id][token_id]=0
# mask &= words.unsqueeze(-1).ne(self.puncts).all(-1)
final_score = pred_arc_scores*pred_rel_scores
tree_score = final_score.sum(-1)
preds_arcs, preds_rels = [], []
for (sent_idx, sentence) in enumerate(batch):
for token_idx, token in enumerate(sentence):
if token_idx == 0:
continue
# append both to file for evaluation
preds_arcs.append(arc_preds[sent_idx,token_idx])
preds_rels.append(self.tag_dictionary.get_item_for_index(rel_preds[sent_idx,token_idx]))
# eval_line = "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(
# token_idx,
# token.text,
# 'X',
# 'X',
# 'X',
# token.get_tag(self.tag_type).value,
# arc_preds[sent_idx,token_idx],
# self.tag_dictionary.get_item_for_index(rel_preds[sent_idx,token_idx]),
# 'X',
# final_score[sent_idx,token_idx].cpu().item(),
# )
# lines.append(eval_line)
# lines.append("\n")
return preds_arcs, preds_rels
def decode(self, arc_scores, rel_scores, mask):
arc_preds = arc_scores.argmax(-1)
bad = [not istree(sequence, not self.is_mst)
for sequence in arc_preds.tolist()]
if self.tree and any(bad):
arc_preds[bad] = eisner(arc_scores[bad], mask[bad])
# if not hasattr(self,'dist') or self.is_mst:
# dist = generate_tree(arc_scores,mask,is_mst=False)
# else:
# dist = self.dist
# arc_preds=get_struct_predictions(dist)
# deal with masking
# if not (arc_preds*mask == result*mask).all():
# pdb.set_trace()
rel_preds = rel_scores.argmax(-1)
rel_preds = rel_preds.gather(-1, arc_preds.unsqueeze(-1)).squeeze(-1)
# pdb.set_trace()
return arc_preds, rel_preds, arc_scores.max(-1)[0] * mask, rel_scores.max(-1)[0].gather(-1, arc_preds.unsqueeze(-1)).squeeze(-1) * mask
def get_state(self,):
return None
|
var request = require('request');
var util = require('util');
//The target
var statusUpdateService = {
statusUpdates: {},
sendUpdate: function(status) {
console.log('Status sent: ' + status);
var id = Math.floor(Math.random() * 1000000);
statusUpdateService.statusUpdates[id] = status;
return id;
},
destroyUpdate: function(id) {
console.log('Status removed: ' + id);
delete statusUpdateService.statusUpdates[id];
}
}
//The Command
function createSendStatusCmd(service, status) {
var postId = null;
var command = function() {
postId = service.sendUpdate(status);
};
command.undo = function() {
if(postId) {
service.destroyUpdate(postId);
postId = null;
}
};
command.serialize = function() {
return {type: 'status', action: 'post', status: status};
}
return command;
}
//The Invoker
function Invoker() {
this.history = [];
}
Invoker.prototype.run = function(cmd) {
this.history.push(cmd);
cmd();
console.log('Command executed', cmd.serialize());
};
Invoker.prototype.delay = function(cmd, delay) {
var self = this;
setTimeout(function() {
self.run(cmd);
}, delay)
}
Invoker.prototype.undo = function() {
var cmd = this.history.pop()
cmd.undo();
console.log('Command undone', cmd.serialize());
}
Invoker.prototype.runRemotely = function(cmd) {
var self = this;
request.post('http://localhost:3000/cmd',
{json: cmd.serialize()}, function(err) {
console.log('Command executed remotely', cmd.serialize());
});
}
//The Client code
var invoker = new Invoker();
var command = createSendStatusCmd(statusUpdateService, 'HI!');
invoker.run(command);
invoker.delay(command, 1000 * 60 * 60);
invoker.undo();
invoker.runRemotely(command);
|
import React from 'react';
import SideBarNavItem from './SideBarNavItem';
import { dashboardSidebarData } from '../../dummy-data-structures/dashboard-sidebar-data';
function SideBar({ hide }) {
return (
<div className={'dashboard_sidebar_container' + (hide ? ' hide' : '')}>
{dashboardSidebarData.map(navItem => {
return <SideBarNavItem key={navItem.id} navItem={navItem} />;
})}
</div>
);
}
export default SideBar;
|
import gulp from 'gulp';
import path from 'path';
import {expect} from 'chai';
import {tmpDir, chDir, overrideMethod} from '../src/cleanup-wrapper';
import {expectEventuallyDeleted} from 'stat-again';
describe('Testing tmpDir wrapper', function () {
before(function () {
this.dirty = function (dir = 'tmp_utils') {
return new Promise((resolve, reject) => {
gulp.src('.babelrc').pipe(gulp.dest(dir))
.on('end', resolve)
.on('error', reject);
});
};
this.clean = tmpDir('tmp_utils', this.dirty);
});
it(`tmpDir wrapper cleans up dir`, function () {
return this.clean().then(() => {
return expectEventuallyDeleted('tmp_utils', 50, 10);
});
});
it(`tmpDir wrapper cleans up [...dirs]`, function () {
const dirs = ['tmp_utils1', 'tmp_utils2'];
return tmpDir(dirs, () => Promise.all(
dirs.map(dir => this.dirty.bind(this, dir))
))().then(() => {
return Promise.all(dirs.map(dir => expectEventuallyDeleted(dir, 50, 10)));
});
});
it(`If dir already exists, tmpDir wrapper throws an error`,
tmpDir('tmp_utils', function () {
return this.dirty().then(this.clean)
.catch(err => {
expect(err).to.match(
/Error: Dir '.*' already exists/);
});
}));
});
describe('Testing chDir wrapper', function () {
before(function () {
this.cwd = process.cwd();
this.chdir = path.join(this.cwd, 'src');
this.tmpdir = path.join(this.cwd, 'test');
this.dirty = function (chdir) {
expect(process.cwd()).to.equal(this.chdir);
process.chdir(chdir);
expect(process.cwd()).to.equal(this.tmpdir);
};
this.clean = chDir(this.chdir, this.dirty);
});
it(`chDir wrapper restores cwd after running`, function () {
expect(process.cwd()).to.equal(this.cwd);
expect(this.clean.bind(this, this.tmpdir)).not.to.throw();
expect(process.cwd()).to.equal(this.cwd);
expect(this.dirty.bind(this, this.tmpdir)).to.throw();
expect(process.cwd()).to.equal(this.cwd);
process.chdir(this.chdir);
expect(this.dirty.bind(this, this.tmpdir)).not.to.throw();
expect(process.cwd()).to.equal(this.tmpdir);
expect(this.clean.bind(this, this.tmpdir)).not.to.throw();
expect(process.cwd()).to.equal(this.cwd);
});
});
describe('Testing overrideMethod wrapper', function () {
before(function () {
this.object = {
_name: 'original',
name () {
return this._name;
},
};
this.dirty = function (object) {
expect(object.name()).to.equal('overridden');
};
this.clean = overrideMethod(this.object, 'name', function () {
return 'overridden';
}, this.dirty);
});
it(`overrideMethod wrapper restores object after running`, function () {
expect(this.dirty.bind(undefined, this.object)).to.throw(Error,
/expected 'original' to equal 'overridden'/);
expect(this.clean.bind(undefined, this.object)).not.to.throw();
expect(this.object.name()).to.equal('original');
});
});
|
import { OldFilmFilter } from "@pixi/filter-old-film";
import { Application, Container, Loader, Sprite, Ticker } from "pixi.js";
import Swiper from "swiper";
import animate from "animateplus";
import { getRandomId } from "src/utils";
const filterParams = {
noise: 0.23,
scratchDensity: 3.67,
noiseSize: 0.16,
sepia: 0.35
};
const createPixiInstance = function(elem) {
const app = new Application(elem.dataset.width, elem.dataset.height, {
transparent: true,
autoStart: false,
resizeTo: elem.parentElement
});
let tkr = Ticker.shared;
tkr.autoStart = false;
tkr.stop();
let rafHandle = null;
function run(timestamp) {
if (rafHandle) {
cancelAnimationFrame(rafHandle);
return;
}
rafHandle = requestAnimationFrame(run);
tkr.update(timestamp);
}
run(0);
app.view.width = elem.dataset.width;
app.view.height = elem.dataset.height;
const container = new Container();
container.filters = [new OldFilmFilter(filterParams, Math.random() / 5)];
container.filters[0].enabled = true;
app.stage.addChild(container);
elem.parentNode.replaceChild(app.view, elem);
const loader = new Loader();
const id = `img-${getRandomId()}`;
loader.add(id, elem.dataset.src);
return new Promise((res, rej) => {
loader.load(() => {
const bgSprite = new Sprite(loader.resources[id].texture);
bgSprite.width = app.view.width;
bgSprite.height = app.view.height;
bgSprite.x = 0;
bgSprite.y = 0;
container.addChild(bgSprite);
res(app);
});
});
};
const pixify = function(selector) {
let elements = document.querySelectorAll(selector);
let pixiPromises = [...elements].map(element => createPixiInstance(element));
return Promise.all(pixiPromises);
};
function setupSlider({ portfolio }, dataNodeSelector, sliderNodeSelector) {
pixify(dataNodeSelector).then(pixiArray => {
const swiper = new Swiper(sliderNodeSelector, {
slidesPerView: "auto",
centeredSlides: true,
spaceBetween: 200,
touchRatio: 1
});
const track = portfolio.counter.track;
let current = 0;
let previous = 0;
let numSlides = portfolio.slides.length;
portfolio.counter.stop.innerHTML = `0${numSlides}`;
// set filtering params to 0 for initial slide
pixiArray[0].stage.children[0].filters[0].noise = 0;
pixiArray[0].stage.children[0].filters[0].scratchDensity = 0;
pixiArray[0].stage.children[0].filters[0].noiseSize = 0;
pixiArray[0].stage.children[0].filters[0].sepia = 0;
swiper.on("progress", a => {
let width = track.clientWidth;
let gradientStop = (100 * (1 + (numSlides - 1) * a)) / numSlides;
track.style = ` background: linear-gradient(to right, #fbee30 ${gradientStop}%, #3b3b3b ${gradientStop}%);
width: ${width}px;
height: 1px;`;
});
swiper.on("transitionStart", function() {
previous = current;
current = this.activeIndex;
});
swiper.on("transitionEnd", function() {
if (current === previous) return;
portfolio.counter.start.innerHTML = `0${current + 1}`;
const currentPixiApp = pixiArray[current].stage.children[0].filters[0];
const previousPixiApp = pixiArray[previous].stage.children[0].filters[0];
const fadeFn = (pixiApp, progress) => {
pixiApp.noise = progress * filterParams.noise;
pixiApp.scratchDensity = progress * filterParams.scratchDensity;
pixiApp.noiseSize = progress * filterParams.noiseSize;
pixiApp.sepia = progress * filterParams.sepia;
};
const handleFilters = progress => {
fadeFn(previousPixiApp, progress);
fadeFn(currentPixiApp, 1 - progress);
};
animate({
easing: "in-quintic",
duration: 500,
change: handleFilters
});
});
});
}
export { setupSlider };
|
from setuptools import find_packages, setup
setup(
name="asyncache",
version="0.1.1",
url="https://github.com/hephex/asyncache",
license="MIT",
author="Hephex",
description="Helpers to use cachetools with async functions",
long_description=open("README.rst").read(),
keywords="cache caching memoize memoizing memoization async",
packages=find_packages(exclude=["tests", "tests.*"]),
install_requires=["cachetools>=2.1"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
|
from setuptools import setup, find_packages
import os
# To use a consistent encoding
from codecs import open
from cmwalk import version
# Get the long description from the README file
def readme():
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
return f.read().replace('\r', '')
setup(name='cmwalk',
version=version.VERSION,
description='A python script to walk subdirectories of a C/C++ project of embedded system to generate CMakeLists.txt files for building the executable.',
long_description=readme(),
long_description_content_type='text/x-rst; charset=UTF-8',
keywords = ['cmake'],
classifiers=[],
url='https://github.com/maxpeng/cmWalk',
author='Max Peng',
author_email='max.peng1768@gmail.com',
license='MIT',
packages=find_packages(),
include_package_data=True,
install_requires=['jinja2', 'walkdir'],
entry_points={
'console_scripts': [
'cmwalk = cmwalk.cmwalk:main'
]
},
zip_safe=False)
|
from __future__ import annotations
from typing import Any, Dict
async def run_python(
ctx: Dict[Any, Any], py: str, *, kernel_name: str = "LSST"
) -> str:
"""Execute Python code in a JupyterLab pod with a specific Jupyter kernel.
Parameters
----------
ctx
Arq worker context.
py : str
Python code to execute.
kernel_name : str
Name of the Python kernel.
Returns
-------
result : str
The standard-out
"""
logger = ctx["logger"].bind(task="run_python")
logger.info("Running run_python", py=py)
jupyter_client = ctx["jupyter_client"]
async with jupyter_client.open_lab_session(
kernel_name=kernel_name
) as session:
result = await session.run_python(py)
logger.info("Running run_python", result=result)
return result
|
#! /usr/bin/env node
/*
* Copyright (c) 2018-present, IBM CORP.
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
var program = require('commander');
program
.version('0.0.1')
.description('Create extensions for Maximo\'s object strucutre (i.e. Mbo,Field Classes,Applications)')
.command('app', 'Extends an application structure').alias('application')
.command('field', 'Extends a field class').alias('fld')
.command('mbo', 'Extends a Mbo structure').alias('mb')
.command('service', 'Extends a application\'s service structure').alias('svc')
.parse(process.argv);
// check if the user passed a command
if (!program.commands.map(cmd => cmd._name).includes(program.args[0])) {
console.log("Invalid command: " + program.args[0]);
}
|
#!/usr/bin/env python
# coding: utf-8
import os
import logging
import json
import yaml
import importlib
import imp
import traceback
from logger import log, stdouthandler, logfilehandler, timestamp
from version import detectversion
from helpers import findfile
auto_paths = [
'gamelog.txt', 'errorlog.txt',
'stderr.log', 'stdout.log',
'raw/objects', 'raw/graphics',
'data/art', 'data/init', 'data/speech',
'dfhack.init', 'dfhack.init-example', 'dfhack.history',
'raw/onLoad.init', 'raw/onWorldLoad.init',
'hack/lua', 'hack/plugins', 'hack/raw', 'hack/ruby', 'hack/scripts',
'stonesense',
]
class config(object):
'''
Objects store settings used by session objects to determine
customizeable behavior.
'''
def __init__(
self,
version=None, paths=None, hackversion=None, input=None, output=None, backup=None,
scripts=[], packages=[], verbose=False, log='logs/%s.txt' % timestamp,
):
'''Initialize a config object.'''
self.version = version # Dwarf Fortress version, for handling script compatibility metadata
self.hackversion = hackversion # DFHack version
self.input = input # Raws are loaded from this input directory
self.output = output # Raws are written to this output directory
self.backup = backup # Raws are backed up to this directory before any changes are made
self.paths = paths # Files are only handled in these paths, relative to input
self.scripts = scripts # These scripts are run in the order that they appear
self.packages = packages # These packages are imported (probably because they contain PyDwarf scripts)
self.verbose = verbose # Log DEBUG messages to stdout if True, otherwise only INFO and above
self.log = log # Log file goes here
@staticmethod
def load(root=None, json='config.json', yaml='config.yaml', override='config.py', logoverridefailure=False, args=None):
'''
Load a config object given default configuration file paths and
command line arguments.
'''
conf = config()
# Load default config files with the precedence: python override > json > yaml
if yaml:
if root: yaml = os.path.join(root, yaml)
if os.path.isfile(yaml): conf.yaml(yaml)
if json:
if root: json = os.path.join(root, json)
if os.path.isfile(json): conf.json(json)
if override:
if root and override.endswith('.py'): override = os.path.join(root, override)
try:
conf.override(override)
except:
if logoverridefailure:
log.debug('Tried and failed to apply default override from module %s. (But that\'s okay! It\'s just a default.)' % override)
log.debug(traceback.format_exc())
# Handle --config argument
if args and args.get('config'):
argsconf = args['config']
if isinstance(argsconf, basestring): argsconf = (argsconf,)
for applyconf in argsconf:
try:
if applyconf.endswith('.json'):
conf.json(applyconf)
elif applyconf.endswith('.yaml'):
conf.yaml(applyconf)
else:
conf.override(applyconf)
except:
log.exception('Failed to load configuration from %s.' % applyconf)
# Apply other command line arguments
if args: conf.apply(args)
# Handle things like automatic version detection, package importing
conf.setup()
# All done!
return conf
def __str__(self):
'''Get a string representation.'''
return str(self.__dict__)
def __getitem__(self, attr):
'''Get configuration argument.'''
return self.__dict__[attr]
def __setitem__(self, attr, value):
'''Set configuration argument.'''
self.__dict__[attr] = value
def __iter__(self):
'''Iterate through configuration dict.'''
return iter(self.__dict__)
def iteritems(self):
'''Iterate through configuration dict items.'''
return self.__dict__.iteritems()
def __add__(self, other):
'''Merge two configuration objects.'''
return config.concat(self, other)
def __radd__(self, other):
'''Merge two configuration objects.'''
return config.concat(other, self)
def __iadd__(self, item):
'''Merge another configuration object into this one.'''
self.apply(item)
return self
def __and__(self, other):
'''Get the intersection of two configuration objects.'''
return config.intersect(self, other)
def json(self, path, *args, **kwargs):
'''Load json configuration from a file.'''
log.info('Applying json configuration from %s.' % path)
try:
with open(path, 'rb') as jsonfile:
jsondata = json.load(jsonfile)
return self.apply(jsondata, *args, **kwargs)
except ValueError as error:
strerror = str(error)
if strerror.startswith('Invalid \\escape'):
raise ValueError('Failed to load json from %s because of a misplaced backslash at %s. Perhaps you meant to use a forward slash instead?' % (path, strerror[17:]))
else:
raise error
def yaml(self, path, *args, **kwargs):
'''Load yaml configuration from a file.'''
log.info('Applying yaml configuration from %s.' % path)
with open(path, 'rb') as yamlfile:
yamldata = yaml.load(yamlfile)
return self.apply(yamldata, *args, **kwargs)
def override(self, module, *args, **kwargs):
'''Load python configuration from a file.'''
log.info('Applying python configuration from %s.' % module)
if module.endswith('.py'):
modulename = os.path.splitext(os.path.basename(module))[0]
with open(module, 'U') as modulefile:
package = imp.load_module(modulename, modulefile, module, ('.py', 'U', imp.PY_SOURCE))
else:
package = importlib.import_module(module)
try:
export = package.export
except:
log.exception('Failed to load override module %s because it has no export attribute.' % module)
self.apply(export)
def apply(self, data, applynone=False):
'''Apply another dict or config object's settings to this one.'''
if data:
for key, value in data.iteritems():
if applynone or value is not None: self.__dict__[key] = value
return self
def copy(self):
'''Copy the config object.'''
copy = config()
for key, value in self: copy[key] = value
return copy
@staticmethod
def concat(*configs):
'''Merge two config objects.'''
result = config()
for conf in configs: result.apply(conf)
return result
@staticmethod
def intersect(*configs):
'''Get the intersection of two configuration objects.'''
result = config()
first = configs[0]
for attr, value in first.iteritems():
for conf in configs:
if (conf is not first) and (attr not in conf or conf[attr] != value): break
else:
result[attr] = value
return result
def setup(self, logger=True):
'''
Setup logger and handle 'auto' arguments after a configuration
object has been fully loaded.
'''
# Set up the pydwarf logger
if logger: self.setuplogger()
# Handle paths == 'auto' or ['auto']
self.setuppaths()
# Handle version == 'auto'
self.setupversion()
# Handle hackversion == 'auto'
self.setuphackversion()
# Import packages
self.setuppackages()
def setuplogger(self):
'''Internal: Setup the logger object.'''
# Handler for console output
stdouthandler.setLevel(logging.DEBUG if self.verbose else logging.INFO)
# Handler for log file output
if self.log:
logdir = os.path.dirname(self.log)
if not os.path.exists(logdir): os.makedirs(logdir)
logfilehandler.__init__(self.log) # Call the constructor given a filepath, now that we actually have one to give
log.addHandler(logfilehandler)
def setuppackages(self):
'''Internal: Import packages.'''
self.importedpackages = []
if isinstance(self.packages, basestring): self.packages = (self.packages,)
for package in self.packages:
try:
self.importedpackages.append(importlib.import_module(package))
except:
log.exception('Failed to import package %s.' % package)
def setuppaths(self):
'''Internal: Handle 'auto' for --paths.'''
if self.paths == 'auto' or self.paths == ['auto'] or self.paths == ('auto',):
self.paths = auto_paths
def setupversion(self):
'''Internal: Handle 'auto' for --version.'''
# Handle automatic version detection
if self.version == 'auto':
log.debug('Attempting to automatically detect Dwarf Fortress version.')
self.version = detectversion(paths=(self.input, self.output))
if self.version is None:
log.error('Unable to detect Dwarf Fortress version.')
else:
log.debug('Detected Dwarf Fortress version %s.' % self.version)
elif self.version is None:
log.warning('No Dwarf Fortress version was specified. Scripts will be run regardless of their indicated compatibility.')
else:
log.info('Managing Dwarf Fortress version %s.' % self.version)
def setuphackversion(self):
'''Internal: Handle 'auto' for --hackversion.''' # TODO: Probably doesn't work for all releases
if self.hackversion == 'auto':
log.debug('Attempting to automatically detect DFHack version.')
dfhackdir = findfile(name='hack', paths=(self.input, self.output))
if dfhackdir is None:
log.error('Unable to detect DFHack directory.')
return
else:
log.debug('Detected DFHack directory at %s.' % dfhackdir)
newspath = os.path.join(dfhackdir, 'NEWS')
if os.path.isfile(newspath):
with open(newspath, 'rb') as news: self.hackversion = news.readline().strip()
if self.hackversion is None:
log.error('Unable to detect DFHack version.')
else:
log.debug('Detected DFHack version %s.' % self.hackversion)
elif self.hackversion is None:
log.warning('No DFHack version was specified.')
|
import * as algoliaSearchActionsConstants from '../actions/algoliaSearchActionsConstants';
const initialState = {
keyword: '',
algoliaLeadsList: [],
algoliaContactsList: [],
algoliaAccountsList: [],
algoliaTasksList: [],
algoliaEventsList: [],
algoliaDealsList: [],
algoliaLeadsListLoading: false,
algoliaLeadsListSuccess: false,
algoliaLeadsListError: false,
algoliaContactsListLoading: false,
algoliaContactsListSuccess: false,
algoliaContactsListError: false,
algoliaAccountsListLoading: false,
algoliaAccountsListSuccess: false,
algoliaAccountsListError: false,
algoliaTasksListLoading: false,
algoliaTasksListSuccess: false,
algoliaTasksListError: false,
algoliaEventsListLoading: false,
algoliaEventsListSuccess: false,
algoliaEventsListError: false,
algoliaDealsListLoading: false,
algoliaDealsListSuccess: false,
algoliaDealsListError: false
};
const algoliaSearchReducer = (state = initialState, action) => {
switch (action.type) {
case algoliaSearchActionsConstants.RECORD_SEARCH_KEYWORD:
return {
...state,
keyword: action.keyword
};
case algoliaSearchActionsConstants.GET_ALGOLIA_LEAD:
return {
...state,
algoliaLeadsListLoading: true,
algoliaLeadsListSuccess: false,
algoliaLeadsListError: false
};
case algoliaSearchActionsConstants.GET_ALGOLIA_LEAD_SUCCESS:
return {
...state,
algoliaLeadsListLoading: false,
algoliaLeadsListSuccess: true,
algoliaLeadsListError: false,
algoliaLeadsList: action.payload
};
case algoliaSearchActionsConstants.GET_ALGOLIA_LEAD_ERROR:
return {
...state,
algoliaLeadsListLoading: false,
algoliaLeadsListSuccess: false,
algoliaLeadsListError: true
};
case algoliaSearchActionsConstants.CLEAR_ALGOLIA_LEAD:
return {
...state,
algoliaLeadsListLoading: false,
algoliaLeadsListSuccess: false,
algoliaLeadsListError: false,
algoliaLeadsList: []
};
case algoliaSearchActionsConstants.GET_ALGOLIA_CONTACT:
return {
...state,
algoliaContactsListLoading: true,
algoliaContactsListSuccess: false,
algoliaContactsListError: false
};
case algoliaSearchActionsConstants.GET_ALGOLIA_CONTACT_SUCCESS:
return {
...state,
algoliaContactsListLoading: false,
algoliaContactsListSuccess: true,
algoliaContactsListError: false,
algoliaContactsList: action.payload
};
case algoliaSearchActionsConstants.GET_ALGOLIA_CONTACT_ERROR:
return {
...state,
algoliaContactsListLoading: false,
algoliaContactsListSuccess: false,
algoliaContactsListError: true
};
case algoliaSearchActionsConstants.CLEAR_ALGOLIA_CONTACT:
return {
...state,
algoliaContactsListLoading: false,
algoliaContactsListSuccess: false,
algoliaContactsListError: false,
algoliaContactsList: []
};
case algoliaSearchActionsConstants.GET_ALGOLIA_ACCOUNT:
return {
...state,
algoliaAccountsListLoading: true,
algoliaAccountsListSuccess: false,
algoliaAccountsListError: false
};
case algoliaSearchActionsConstants.GET_ALGOLIA_ACCOUNT_SUCCESS:
return {
...state,
algoliaAccountsListLoading: false,
algoliaAccountsListSuccess: true,
algoliaAccountsListError: false,
algoliaAccountsList: action.payload
};
case algoliaSearchActionsConstants.GET_ALGOLIA_ACCOUNT_ERROR:
return {
...state,
algoliaAccountsListLoading: false,
algoliaAccountsListSuccess: false,
algoliaAccountsListError: true,
};
case algoliaSearchActionsConstants.CLEAR_ALGOLIA_ACCOUNT:
return {
...state,
algoliaAccountsListLoading: false,
algoliaAccountsListSuccess: false,
algoliaAccountsListError: false,
algoliaAccountsList: []
};
case algoliaSearchActionsConstants.GET_ALGOLIA_TASK:
return {
...state,
algoliaTasksListLoading: true,
algoliaTasksListSuccess: false,
algoliaTasksListError: false
};
case algoliaSearchActionsConstants.GET_ALGOLIA_TASK_SUCCESS:
return {
...state,
algoliaTasksListLoading: false,
algoliaTasksListSuccess: true,
algoliaTasksListError: false,
algoliaTasksList: action.payload
};
case algoliaSearchActionsConstants.GET_ALGOLIA_TASK_ERROR:
return {
...state,
algoliaTasksListLoading: false,
algoliaTasksListSuccess: false,
algoliaTasksListError: true
};
case algoliaSearchActionsConstants.CLEAR_ALGOLIA_TASK:
return {
...state,
algoliaTasksListLoading: false,
algoliaTasksListSuccess: false,
algoliaTasksListError: false,
algoliaTasksList: []
};
case algoliaSearchActionsConstants.GET_ALGOLIA_EVENT:
return {
...state,
algoliaEventsListLoading: true,
algoliaEventsListSuccess: false,
algoliaEventsListError: false
};
case algoliaSearchActionsConstants.GET_ALGOLIA_EVENT_SUCCESS:
return {
...state,
algoliaEventsListLoading: false,
algoliaEventsListSuccess: true,
algoliaEventsListError: false,
algoliaEventsList: action.payload
};
case algoliaSearchActionsConstants.GET_ALGOLIA_EVENT_ERROR:
return {
...state,
algoliaEventsListLoading: false,
algoliaEventsListSuccess: false,
algoliaEventsListError: true
};
case algoliaSearchActionsConstants.CLEAR_ALGOLIA_EVENT:
return {
...state,
algoliaEventsListLoading: false,
algoliaEventsListSuccess: false,
algoliaEventsListError: false,
algoliaEventsList: []
};
case algoliaSearchActionsConstants.GET_ALGOLIA_DEAL:
return {
...state,
algoliaDealsListLoading: true,
algoliaDealsListSuccess: false,
algoliaDealsListError: false
};
case algoliaSearchActionsConstants.GET_ALGOLIA_DEAL_SUCCESS:
return {
...state,
algoliaDealsListLoading: false,
algoliaDealsListSuccess: true,
algoliaDealsListError: false,
algoliaDealsList: action.payload
};
case algoliaSearchActionsConstants.GET_ALGOLIA_DEAL_ERROR:
return {
...state,
algoliaDealsListLoading: false,
algoliaDealsListSuccess: false,
algoliaDealsListError: true
};
case algoliaSearchActionsConstants.CLEAR_ALGOLIA_DEAL:
return {
...state,
algoliaDealsListLoading: false,
algoliaDealsListSuccess: false,
algoliaDealsListError: false,
algoliaDealsList: []
};
default:
return state;
}
};
export default algoliaSearchReducer;
|
#/opt/local/bin/python3
import sys, math, re, time, os
import numpy as np
import numpy.random as rand
import random
import hashlib
from copy import deepcopy
#helpful resources: https://www.youtube.com/c/learnmeabitcoin/videos
#Txn for transaction
#BlkChn for blockchain
##################################################################################
class blockEntry:
_thisHash = None;
_preHash = None;
_timeTime = None;
_transactions = None;
def __init__(self, preHash, transactions):
self._transactions = deepcopy(transactions);
self._preHash = deepcopy(preHash);
self._timeTime = str(int(time.time()));
strToHash="".join(self._transactions) + self._preHash + self._timeTime;
self._thisHash = hashlib.sha1(strToHash.encode()).hexdigest();
def content(self):
return deepcopy({"transactions": self._transactions,\
"timeTime": self._timeTime,\
"thisHash": self._thisHash,\
"preHash": self._preHash});
def secretContentAccess(self):
return {"transactions": self._transactions,\
"timeTime": self._timeTime,\
"thisHash": self._thisHash,\
"preHash": self._preHash};
class userEntry:
__name = None;
__wallet = None;
__mineRate = None;
__txnRate = None;
__mempool = None;
__blockchain = None;
__updateIter = None;
__isMalicious = False;
def __init__(self, name, mineRate, txnRate, blockchain):
self.__name = deepcopy(name);
self.__blockchain = deepcopy(blockchain);
self.__wallet = 10;
self.__mineRate = mineRate;
self.__txnRate = txnRate;
self.__updateIter = 0;
self.__mempool = [];
self.updateTxnFromBlkChn();
def updateMempool(self, mempool, userList, selfPosted=False,\
clearSelfMempool=False, ignoreDoubleSpending=False):
if clearSelfMempool == True:
self.__mempool = [];
memNew = self.__mempool + mempool;
memNew = list(set(memNew));
blockNtoCheck = 3;
warnings = [];
for block in self.__blockchain[-blockNtoCheck:]:
transactions = block.content()["transactions"];
memNew = [m for m in memNew if m not in transactions];
#check wallet amount
walletNums = {"A": 0, "B": 0, "C": 0, "D": 0, "E": 0};
txnNums = {"A": 0, "B": 0, "C": 0, "D": 0, "E": 0};
for user in userList:
uName = user.content()["name"];
uWallet = user.content()["wallet"];
walletNums[uName] += uWallet;
#sort in time
memNewSort = [];
for transaction in memNew:
memNewSort.append([transaction.split(":")[-1], transaction]);
memNewSort = np.array(memNewSort);
memNewSort.sort(axis=0);
memNewTemp = [];
for item in memNewSort:
memNewTemp.append(item[1]);
memNew = memNewTemp;
#remove unsolicited
txnWanted = "";
RmIdxs = 0;
for i, transaction in enumerate(memNew):
if "Rm_" in transaction:
txnWanted = transaction[1:].replace("Rm_", "");
if int(transaction[0]) > RmIdxs:
RmIdxs = int(transaction[0]);
RmIdxs += 1;
self.__mempool = [t for t in self.__mempool\
if (t != txnWanted) and ("Rm_" not in t)];
memNew = [t for t in memNew\
if (t != txnWanted) and ("Rm_" not in t)];
if txnWanted != "":
memNew.append(str(RmIdxs) + "Rm_" + txnWanted);
###
for transaction in self.__mempool:
if "Rm_" not in transaction:
sender = transaction.split("=>")[0];
txnNums[sender] += 1;
for transaction in memNew:
if transaction not in self.__mempool:
txn = transaction.split("=>");
sender = txn[0];
receiver = txn[1].split(":")[0];
coin = int(txn[1].split(":")[1]);
if "Rm_" in transaction:
if "9Rm_" not in transaction:
self.__mempool.append(transaction);
elif (selfPosted == False) and (sender == self.__name):
warnings.append(
self.genWarning(self.__name, receiver, transaction,
"unsolicited transaction"));
self.__mempool.append("0Rm_"+transaction);
elif coin > walletNums[sender]:
warnings.append(
self.genWarning(self.__name, sender, transaction,
"not enough coin in wallet"));
else:
txnNums[sender] += 1;
if txnNums[sender] > 1:
warnings.append(
self.genWarning(self.__name, sender, transaction,\
"only one transaction allowed "+\
"in memory pool"));
if ignoreDoubleSpending == True:
self.__mempool.append(transaction);
else:
self.__mempool.append(transaction);
return warnings;
def checkTxnValidityFromBlkChn(self, name, blockchain):
validity = True;
warnings = [];
txnWanted = "";
for transaction in self.__mempool:
if "Rm_" in transaction:
txnWanted = transaction[1:].replace("Rm_", "");
for block in blockchain[max(0, len(self.__blockchain)-3):]:
transactions = block.content()["transactions"];
for transaction in transactions:
txn = transaction.split("=>");
if "genesis" not in transaction:
formatBool = (len(txn) == 2) and\
(len(txn[1].split(":")) == 3) and\
(int(txn[1].split(":")[1]) >= 0);
if (formatBool == False) or\
("Rm_" in transaction):
validity = False;
warnings.append(self.genWarning(self.__name, name,\
transaction, "invalid transaction in block"));
elif transaction in warnings:
validity = False;
warnings.append(self.genWarning(self.__name, name,\
transaction, "unsolicited transaction in block"));
return validity, warnings;
def checkTxnDuplicateFromBlkChn(self, name, blockchain):
blockNtoCheck = 10;
validity = True;
txns = [];
warnings = [];
for block in blockchain[-blockNtoCheck:]:
transactions = block.content()["transactions"];
for transaction in transactions:
if transaction in txns:
validity = False;
warnings.append(self.genWarning(self.__name, name,\
transaction, "repeated transaction"));
txns = txns + transactions;
return validity, warnings;
def genWarning(self, nameFind, nameCulprit, transaction, reason):
warning = "WARNING: "+nameFind+" reject transaction from ";
warning += nameCulprit+" (";
warning += transaction.split(":")[0]+":"+transaction.split(":")[1]+"): ";
warning += reason+".";
return warning;
def checkBlkChnValidity(self, blockchain):
validity = True;
warnings = [];
for i, block in enumerate(blockchain):
if i != 0:
transactions = block.content()["transactions"];
preHash = block.content()["preHash"];
timeTime = block.content()["timeTime"];
strToHash = "".join(transactions) + preHash + timeTime;
thisHash = hashlib.sha1(strToHash.encode()).hexdigest();
if (block.content()["thisHash"] != thisHash) and\
(self.__isMalicious == False):
validity = False;
blockName = "block" + str(i);
firstTxn = block.content()["transactions"][0];
warnings.append(self.genWarning(self.__name,blockName,firstTxn,
"block hash mismatching"));
return validity, warnings;
def checkBlkChnLength(self, blockchain):
if len(blockchain) > len(self.__blockchain):
return True;
return False;
def updateBlkChn(self, blockchain):
self.__updateIter = len(self.__blockchain) - 1;
while self.__blockchain[self.__updateIter].content()["thisHash"] != \
blockchain[self.__updateIter].content()["thisHash"]:
self.__reverseWallet(self.__blockchain[self.__updateIter]);
self.__updateIter -= 1;
if self.__updateIter < 0:
break;
self.__updateIter += 1;
self.__blockchain = deepcopy(blockchain);
def __reverseWallet(self, block):
transactions = block.content()["transactions"];
if "genesis" in transactions:
pass;
else:
for transaction in transactions:
txn = transaction.split("=>");
sender = txn[0];
receiver = txn[1].split(":")[0];
coin = int(txn[1].split(":")[1]);
if sender == self.__name:
self.__wallet += coin;
if receiver == self.__name:
self.__wallet -= coin;
def updateTxnFromBlkChn(self):
for block in self.__blockchain[self.__updateIter:]:
transactions = block.content()["transactions"];
if "genesis" in transactions:
pass;
else:
for transaction in transactions:
txn = transaction.split("=>");
sender = txn[0];
receiver = txn[1].split(":")[0];
coin = int(txn[1].split(":")[1]);
if sender == self.__name:
self.__wallet -= coin;
if receiver == self.__name:
self.__wallet += coin;
self.__mempool = [t for t in self.__mempool if t not in transactions];
self.__updateIter = len(self.__blockchain);
return 0;
def getTxnRate(self):
return deepcopy(self.__txnRate);
def getMineRate(self):
return deepcopy(self.__mineRate);
def mine(self):
blockNtoCheck = 3;
for block in self.__blockchain[-blockNtoCheck:]:
transactions = block.content()["transactions"];
self.__mempool = [m for m in self.__mempool if m not in transactions];
txnStr = "SYS=>" + self.__name + ":1"; #earning 1 coin for mining
txnStr += ":" + str(int(np.ceil(time.time()*pow(10, 3))));
self.__mempool.append(txnStr);
blockPre = self.__blockchain[-1];
block = blockEntry(blockPre.content()["thisHash"], self.__mempool);
self.__blockchain.append(block);
self.__mempool = [];
def content(self):
return deepcopy({"name": self.__name,\
"wallet": self.__wallet,\
"mempool": self.__mempool,\
"blockchain": self.__blockchain});
def setMalicious(self, isMalicious):
self.__isMalicious = isMalicious;
def secretContentAccess(self):
return {"name": self.__name,\
"wallet": self.__wallet,\
"mempool": self.__mempool,\
"blockchain": self.__blockchain};
|
"""Simple daemonize manager implementation."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import sys
from ..interfaces import daemonize as daemonize_iface
from ..interfaces import exit
LOG = logging.getLogger(__name__)
class SimpleDaemonizeManager(daemonize_iface.DaemonizeManager):
"""Daemonizer which does a unix double fork."""
def daemonize(self):
"""Double fork and set the pid."""
self._double_fork()
# Write pidfile.
self.pid = os.getpid()
LOG.info(
"Succesfully daemonized process {0}.".format(self.pid)
)
def _double_fork(self):
"""Do the UNIX double-fork magic.
See Stevens' "Advanced Programming in the UNIX Environment" for details
(ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# Exit first parent.
sys.exit(0)
return None
except OSError as err:
LOG.exception(
"Fork #1 failed: {0} ({1})".format(
err.errno,
err.strerror,
),
)
sys.exit(exit.DAEMONIZE_FAILED)
return None
# Decouple from parent environment.
os.chdir("/")
os.setsid()
os.umask(0)
# Do second fork.
try:
pid = os.fork()
if pid > 0:
# Exit from second parent.
sys.exit(0)
except OSError as err:
LOG.exception(
"Fork #2 failed: {0} ({1})".format(
err.errno,
err.strerror,
),
)
sys.exit(exit.DAEMONIZE_FAILED)
return None
|
#
# Copyright (c) nexB Inc. and others.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list
# of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from os.path import abspath
from os.path import dirname
from os.path import join
from plugincode.location_provider import LocationProviderPlugin
class LibmagicPaths(LocationProviderPlugin):
def get_locations(self):
curr_dir = dirname(abspath(__file__))
data_dir = join(curr_dir, 'data')
lib_dir = join(curr_dir, 'lib')
locations = {
'typecode.libmagic.libdir': lib_dir,
'typecode.libmagic.dll': join(lib_dir, 'libmagic-1.dll'),
'typecode.libmagic.db': join(data_dir, 'magic.mgc'),
}
return locations
|
import pandas as pd
from pathlib import Path
def parse_dates(s):
"""
Fast date parser, source: https://github.com/sanand0/benchmarks/tree/master/date-parse
This is an extremely fast approach to datetime parsing.
For large data, the same dates are often repeated. Rather than
re-parse these, we store all unique dates, parse them, and
use a lookup to convert all dates.
"""
dates = {date:pd.to_datetime(date) for date in s.unique()}
return s.map(dates)
def clean(string):
# Simple heuristic to clean company name data
str_ = string.replace('Inc','')
str_ = str_.lower()
str_ = str_.replace('.com','')
str_ = str_.replace('.','')
str_ = str_.replace(',','')
str_ = str_.replace('®','')
str_ = str_.replace("'",'')
str_ = str_.replace('corporation','')
str_ = str_.replace('group','')
str_ = str_.replace(' ',' ')
str_ = str_.replace('-','')
str_ = str_.strip()
return str_
def clean_name(series):
names = {name: clean(name) for name in series.unique()}
return series.map(names)
def parse_currency(x):
if type(x) == float or type(x) == int:
return x
else: x = x.replace('$','')
if 'K' in x:
if len(x) > 1:
return float(x.replace('K', '')) * 1000
return 1000.0
if 'M' in x:
if len(x) > 1:
return float(x.replace('M', '')) * 1000000
return 1000000.0
if 'B' in x:
return float(x.replace('B', '')) * 1000000000
return 0.0
def data_load(PATH):
PATH = Path(PATH) #make sure we're dealing with a pathlib object
link = pd.read_csv(PATH / 'temp_datalab_records_linkedin_company.csv',
parse_dates=['as_of_date', 'date_added', 'date_updated'],
index_col='as_of_date')
companies = pd.read_csv(PATH / 'extracted_correlations_all.csv')
# Data source: https://www.quandl.com/databases/WIKIP
stocks = pd.read_csv(PATH / 'WIKI_PRICES.csv')
# Fast clean dates
stocks['date'] = parse_dates(stocks.date)
stocks = stocks.set_index('date')
# Select on the adjusted close column
stocks = stocks[['ticker', 'adj_close']].pivot(columns='ticker')
stocks.columns = stocks.columns.droplevel(0)
return link, companies, stocks
|
# Pylint doesn't play well with fixtures and dependency injection from pytest
# pylint: disable=redefined-outer-name
import os
import tarfile
import hashlib
import re
import shutil
import pytest
from buildstream._testing import cli # pylint: disable=unused-import
from buildstream._testing import create_repo
from buildstream._testing._utils.site import CASD_SEPARATE_USER
from buildstream import _yaml
from buildstream.exceptions import ErrorDomain, LoadErrorReason
from buildstream import utils
from tests.testutils import generate_junction, create_artifact_share
from . import configure_project
# Project directory
DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project",)
def strict_args(args, strict):
if strict != "strict":
return ["--no-strict", *args]
return args
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize(
"strict,hardlinks",
[("strict", "copies"), ("strict", "hardlinks"), ("non-strict", "copies"), ("non-strict", "hardlinks"),],
)
def test_build_checkout(datafiles, cli, strict, hardlinks):
if CASD_SEPARATE_USER and hardlinks == "hardlinks":
pytest.xfail("Cannot hardlink with buildbox-casd running as a separate user")
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
# First build it
result = cli.run(project=project, args=strict_args(["build", "target.bst"], strict))
result.assert_success()
# Assert that after a successful build, the builddir is empty
builddir = os.path.join(cli.directory, "build")
assert os.path.isdir(builddir)
assert not os.listdir(builddir)
# Prepare checkout args
checkout_args = strict_args(["artifact", "checkout"], strict)
if hardlinks == "hardlinks":
checkout_args += ["--hardlinks"]
checkout_args += ["target.bst", "--directory", checkout]
# Now check it out
result = cli.run(project=project, args=checkout_args)
result.assert_success()
# Check that the executable hello file is found in the checkout
filename = os.path.join(checkout, "usr", "bin", "hello")
assert os.path.exists(filename)
filename = os.path.join(checkout, "usr", "include", "pony.h")
assert os.path.exists(filename)
@pytest.mark.datafiles(DATA_DIR)
def test_include_dot_bst_in_checkout_dirname(datafiles, cli):
# put '.bst' on the end of the checkout directory
# this is a regression test for a bug. The bug would
# remove '.bst' if it appeared at the end of of the
# "--directory" argument.
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout.bst")
# build the artifact
result = cli.run(project=project, args=["--no-strict", "build", "target.bst"])
result.assert_success()
# checkout
result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkout])
result.assert_success()
# check that the executable hello file is found in the checkout (ie
# confirm files were checked out to directory called "checkout.bst"
# (as expected) and not to a directory calld "checkout")
filename = os.path.join(checkout, "usr", "bin", "hello")
assert os.path.exists(filename)
@pytest.mark.datafiles(DATA_DIR)
def test_non_strict_build_strict_checkout(datafiles, cli):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
# First build it in non-strict mode.
# As this is a clean build from scratch, the result and also the cache keys
# should be identical to a build in strict mode.
result = cli.run(project=project, args=["--no-strict", "build", "target.bst"])
result.assert_success()
# Now check it out in strict mode.
# This verifies that the clean build in non-strict mode produced an artifact
# matching the strict cache key.
result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkout])
result.assert_success()
# Check that the executable hello file is found in the checkout
filename = os.path.join(checkout, "usr", "bin", "hello")
assert os.path.exists(filename)
@pytest.mark.datafiles(DATA_DIR)
def test_non_strict_pull_build_strict_checkout(datafiles, cli, tmpdir):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
# Target with at least one (indirect) build-only dependency
element_name = "target.bst"
with create_artifact_share(os.path.join(str(tmpdir), "artifactshare")) as share:
cli.configure({"artifacts": {"servers": [{"url": share.repo}]}})
# First build it in non-strict mode with an artifact server configured.
# With this configuration BuildStream will attempt to pull the build-only
# dependencies after attempting to pull the target element. This means
# that the cache key calculation of the target element has to be deferred
# until the pull attempt of the build-only dependencies, exercising a
# different code path.
# As this is a clean build from scratch, the result and also the cache keys
# should be identical to a build in strict mode.
result = cli.run(project=project, args=["--no-strict", "build", element_name])
result.assert_success()
# Now check it out in strict mode.
# This verifies that the clean build in non-strict mode produced an artifact
# matching the strict cache key.
result = cli.run(project=project, args=["artifact", "checkout", element_name, "--directory", checkout])
result.assert_success()
# Check that the executable hello file is found in the checkout
filename = os.path.join(checkout, "usr", "bin", "hello")
assert os.path.exists(filename)
# Regression test for https://github.com/apache/buildstream/issues/1469
# Test that artifact checkout without pull doesn't trigger a BUG in non-strict mode.
@pytest.mark.datafiles(DATA_DIR)
def test_non_strict_checkout_uncached(datafiles, cli, tmpdir):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
element_name = "target.bst"
with create_artifact_share(os.path.join(str(tmpdir), "artifactshare")) as share:
cli.configure({"artifacts": {"servers": [{"url": share.repo}]}})
# Attempt to checkout an uncached artifact with remote artifact server
# configured but pull disabled.
result = cli.run(
project=project, args=["--no-strict", "artifact", "checkout", element_name, "--directory", checkout]
)
result.assert_main_error(ErrorDomain.STREAM, "uncached-checkout-attempt")
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize("deps", [("run"), ("none"), ("build"), ("all")])
def test_build_checkout_deps(datafiles, cli, deps):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
element_name = "checkout-deps.bst"
# First build it
result = cli.run(project=project, args=["build", element_name])
result.assert_success()
# Assert that after a successful build, the builddir is empty
builddir = os.path.join(cli.directory, "build")
assert os.path.isdir(builddir)
assert not os.listdir(builddir)
# Now check it out
result = cli.run(
project=project, args=["artifact", "checkout", element_name, "--deps", deps, "--directory", checkout]
)
result.assert_success()
# Verify output of this element
filename = os.path.join(checkout, "etc", "buildstream", "config")
if deps == "build":
assert not os.path.exists(filename)
else:
assert os.path.exists(filename)
# Verify output of this element's build dependencies
filename = os.path.join(checkout, "usr", "include", "pony.h")
if deps in ["build", "all"]:
assert os.path.exists(filename)
else:
assert not os.path.exists(filename)
# Verify output of this element's runtime dependencies
filename = os.path.join(checkout, "usr", "bin", "hello")
if deps in ["run", "all"]:
assert os.path.exists(filename)
else:
assert not os.path.exists(filename)
@pytest.mark.datafiles(DATA_DIR)
def test_build_deps_build(datafiles, cli):
project = str(datafiles)
target = "checkout-deps.bst"
build_dep = "import-dev.bst"
runtime_dep = "import-bin.bst"
result = cli.run(project=project, args=["build", "--deps", "build", target])
result.assert_success()
states = cli.get_element_states(project, [target, build_dep, runtime_dep])
assert states[build_dep] == "cached"
assert states[target] == "buildable"
assert states[runtime_dep] == "buildable"
@pytest.mark.datafiles(DATA_DIR)
def test_build_checkout_unbuilt(datafiles, cli):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
# Check that checking out an unbuilt element fails nicely
result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkout])
result.assert_main_error(ErrorDomain.STREAM, "uncached-checkout-attempt")
@pytest.mark.datafiles(DATA_DIR)
def test_build_checkout_compression_no_tar(datafiles, cli):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout.tar")
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
checkout_args = ["artifact", "checkout", "--directory", checkout, "--compression", "gz", "target.bst"]
result = cli.run(project=project, args=checkout_args)
assert "ERROR: --compression can only be provided if --tar is provided" in result.stderr
assert result.exit_code != 0
# If we don't support the extension, we default to an uncompressed tarball
@pytest.mark.datafiles(DATA_DIR)
def test_build_checkout_tar_with_unconventional_name(datafiles, cli):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout.foo")
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
checkout_args = ["artifact", "checkout", "--tar", checkout, "target.bst"]
result = cli.run(project=project, args=checkout_args)
result.assert_success()
with tarfile.open(name=checkout, mode="r") as tar:
assert os.path.join(".", "usr", "bin", "hello") in tar.getnames()
assert os.path.join(".", "usr", "include", "pony.h") in tar.getnames()
@pytest.mark.datafiles(DATA_DIR)
def test_build_checkout_tar_with_unsupported_ext(datafiles, cli):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout.tar.foo")
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
checkout_args = ["artifact", "checkout", "--tar", checkout, "target.bst"]
result = cli.run(project=project, args=checkout_args)
assert (
"Invalid file extension given with '--tar': Expected compression with unknown file extension ('.foo'), "
"supported extensions are ('.tar'), ('.gz'), ('.xz'), ('.bz2')" in result.stderr
)
@pytest.mark.datafiles(DATA_DIR)
def test_build_checkout_tar_no_compression(datafiles, cli):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout.tar.gz")
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
builddir = os.path.join(cli.directory, "build")
assert os.path.isdir(builddir)
assert not os.listdir(builddir)
checkout_args = ["artifact", "checkout", "--tar", checkout, "target.bst"]
result = cli.run(project=project, args=checkout_args)
result.assert_success()
with tarfile.open(name=checkout, mode="r:gz") as tar:
assert os.path.join(".", "usr", "bin", "hello") in tar.getnames()
assert os.path.join(".", "usr", "include", "pony.h") in tar.getnames()
@pytest.mark.datafiles(DATA_DIR)
def test_build_checkout_tarball(datafiles, cli):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout.tar")
# Work-around datafiles not preserving mode
os.chmod(os.path.join(project, "files/bin-files/usr/bin/hello"), 0o0755)
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
builddir = os.path.join(cli.directory, "build")
assert os.path.isdir(builddir)
assert not os.listdir(builddir)
checkout_args = ["artifact", "checkout", "--tar", checkout, "target.bst"]
result = cli.run(project=project, args=checkout_args)
result.assert_success()
with tarfile.TarFile(checkout) as tar:
tarinfo = tar.getmember(os.path.join(".", "usr", "bin", "hello"))
assert tarinfo.mode == 0o755
assert tarinfo.uid == 0 and tarinfo.gid == 0
assert tarinfo.uname == "" and tarinfo.gname == ""
tarinfo = tar.getmember(os.path.join(".", "usr", "include", "pony.h"))
assert tarinfo.mode == 0o644
assert tarinfo.uid == 0 and tarinfo.gid == 0
assert tarinfo.uname == "" and tarinfo.gname == ""
@pytest.mark.datafiles(DATA_DIR)
def test_build_checkout_using_ref(datafiles, cli):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
result = cli.run(project=project, args=["build", "checkout-deps.bst"])
result.assert_success()
key = cli.get_element_key(project, "checkout-deps.bst")
checkout_args = ["artifact", "checkout", "--directory", checkout, "--deps", "none", "test/checkout-deps/" + key]
result = cli.run(project=project, args=checkout_args)
result.assert_success()
filename = os.path.join(checkout, "etc", "buildstream", "config")
assert os.path.exists(filename)
@pytest.mark.datafiles(DATA_DIR)
def test_build_checkout_tarball_using_ref(datafiles, cli):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout.tar")
result = cli.run(project=project, args=["build", "checkout-deps.bst"])
result.assert_success()
builddir = os.path.join(cli.directory, "build")
assert os.path.isdir(builddir)
assert not os.listdir(builddir)
key = cli.get_element_key(project, "checkout-deps.bst")
checkout_args = ["artifact", "checkout", "--deps", "none", "--tar", checkout, "test/checkout-deps/" + key]
result = cli.run(project=project, args=checkout_args)
result.assert_success()
with tarfile.TarFile(checkout) as tar:
assert os.path.join(".", "etc", "buildstream", "config") in tar.getnames()
@pytest.mark.datafiles(DATA_DIR)
def test_build_checkout_build_deps_using_ref(datafiles, cli):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
result = cli.run(project=project, args=["build", "checkout-deps.bst"])
result.assert_success()
key = cli.get_element_key(project, "checkout-deps.bst")
checkout_args = ["artifact", "checkout", "--directory", checkout, "--deps", "build", "test/checkout-deps/" + key]
result = cli.run(project=project, args=checkout_args)
result.assert_success()
build_dep_files = os.path.join(checkout, "usr", "include", "pony.h")
runtime_dep_files = os.path.join(checkout, "usr", "bin", "hello")
target_files = os.path.join(checkout, "etc", "buildstream", "config")
assert os.path.exists(build_dep_files)
assert not os.path.exists(runtime_dep_files)
assert not os.path.exists(target_files)
@pytest.mark.datafiles(DATA_DIR)
def test_build_checkout_runtime_deps_using_ref_fails(datafiles, cli):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
result = cli.run(project=project, args=["build", "checkout-deps.bst"])
result.assert_success()
key = cli.get_element_key(project, "checkout-deps.bst")
checkout_args = ["artifact", "checkout", "--directory", checkout, "--deps", "run", "test/checkout-deps/" + key]
result = cli.run(project=project, args=checkout_args)
result.assert_main_error(ErrorDomain.STREAM, "deps-not-supported")
@pytest.mark.datafiles(DATA_DIR)
def test_build_checkout_invalid_ref(datafiles, cli):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout.tar")
result = cli.run(project=project, args=["build", "checkout-deps.bst"])
result.assert_success()
builddir = os.path.join(cli.directory, "build")
assert os.path.isdir(builddir)
assert not os.listdir(builddir)
non_existent_artifact = "test/checkout-deps/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
checkout_args = ["artifact", "checkout", "--deps", "none", "--tar", checkout, non_existent_artifact]
result = cli.run(project=project, args=checkout_args)
result.assert_main_error(ErrorDomain.STREAM, "missing-sandbox-config")
@pytest.mark.datafiles(DATA_DIR)
def test_build_checkout_no_tar_no_directory(datafiles, cli, tmpdir):
project = str(datafiles)
runtestdir = str(tmpdir)
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
checkout_args = ["artifact", "checkout", "target.bst"]
result = cli.run(cwd=runtestdir, project=project, args=checkout_args)
result.assert_success()
filename = os.path.join(runtestdir, "target", "usr", "bin", "hello")
assert os.path.exists(filename)
filename = os.path.join(runtestdir, "target", "usr", "include", "pony.h")
assert os.path.exists(filename)
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize("compression", [("gz"), ("xz"), ("bz2")])
def test_build_checkout_tarball_compression(datafiles, cli, compression):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout.tar")
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
builddir = os.path.join(cli.directory, "build")
assert os.path.isdir(builddir)
assert not os.listdir(builddir)
checkout_args = ["artifact", "checkout", "--tar", checkout, "--compression", compression, "target.bst"]
result = cli.run(project=project, args=checkout_args)
result.assert_success()
with tarfile.open(name=checkout, mode="r:" + compression) as tar:
assert os.path.join(".", "usr", "bin", "hello") in tar.getnames()
assert os.path.join(".", "usr", "include", "pony.h") in tar.getnames()
@pytest.mark.datafiles(DATA_DIR)
def test_build_checkout_tarball_stdout(datafiles, cli):
project = str(datafiles)
tarball = os.path.join(cli.directory, "tarball.tar")
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
builddir = os.path.join(cli.directory, "build")
assert os.path.isdir(builddir)
assert not os.listdir(builddir)
checkout_args = ["artifact", "checkout", "--tar", "-", "target.bst"]
result = cli.run(project=project, args=checkout_args, binary_capture=True)
result.assert_success()
with open(tarball, "wb") as f:
f.write(result.output)
with tarfile.TarFile(tarball) as tar:
assert os.path.join(".", "usr", "bin", "hello") in tar.getnames()
assert os.path.join(".", "usr", "include", "pony.h") in tar.getnames()
@pytest.mark.datafiles(DATA_DIR)
def test_build_checkout_tarball_mtime_nonzero(datafiles, cli):
project = str(datafiles)
tarpath = os.path.join(cli.directory, "mtime_tar.tar")
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
checkout_args = ["artifact", "checkout", "--tar", tarpath, "target.bst"]
result = cli.run(project=project, args=checkout_args)
result.assert_success()
with tarfile.TarFile(tarpath) as tar:
for tarinfo in tar.getmembers():
# An mtime of zero can be confusing to other software,
# e.g. ninja build and template toolkit have both taken zero mtime to
# mean 'file does not exist'.
assert tarinfo.mtime > 0
@pytest.mark.datafiles(DATA_DIR)
def test_build_checkout_tarball_is_deterministic(datafiles, cli):
project = str(datafiles)
tarball1 = os.path.join(cli.directory, "tarball1.tar")
tarball2 = os.path.join(cli.directory, "tarball2.tar")
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
builddir = os.path.join(cli.directory, "build")
assert os.path.isdir(builddir)
assert not os.listdir(builddir)
checkout_args = ["artifact", "checkout", "--force", "target.bst"]
checkout_args1 = checkout_args + ["--tar", tarball1]
result = cli.run(project=project, args=checkout_args1)
result.assert_success()
checkout_args2 = checkout_args + ["--tar", tarball2]
result = cli.run(project=project, args=checkout_args2)
result.assert_success()
with open(tarball1, "rb") as f:
contents = f.read()
hash1 = hashlib.sha1(contents).hexdigest()
with open(tarball2, "rb") as f:
contents = f.read()
hash2 = hashlib.sha1(contents).hexdigest()
assert hash1 == hash2
@pytest.mark.datafiles(DATA_DIR)
def test_build_checkout_tarball_links(datafiles, cli):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout.tar")
extract = os.path.join(cli.directory, "extract")
# Create the link before running the tests.
# This is needed for users working on Windows, git checks out symlinks as files which content is the name
# of the symlink and the test therefore doesn't have the correct content
os.symlink(
os.path.join("..", "basicfile"),
os.path.join(project, "files", "files-and-links", "basicfolder", "basicsymlink"),
)
result = cli.run(project=project, args=["build", "import-links.bst"])
result.assert_success()
builddir = os.path.join(cli.directory, "build")
assert os.path.isdir(builddir)
assert not os.listdir(builddir)
checkout_args = ["artifact", "checkout", "--tar", checkout, "import-links.bst"]
result = cli.run(project=project, args=checkout_args)
result.assert_success()
with tarfile.open(name=checkout, mode="r:") as tar:
tar.extractall(extract)
with open(os.path.join(extract, "basicfolder", "basicsymlink"), encoding="utf-8") as fp:
data = fp.read()
assert data == "file contents\n"
@pytest.mark.datafiles(DATA_DIR)
def test_build_checkout_links(datafiles, cli):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
# Create the link before running the tests.
# This is needed for users working on Windows, git checks out symlinks as files which content is the name
# of the symlink and the test therefore doesn't have the correct content
os.symlink(
os.path.join("..", "basicfile"),
os.path.join(project, "files", "files-and-links", "basicfolder", "basicsymlink"),
)
result = cli.run(project=project, args=["build", "import-links.bst"])
result.assert_success()
builddir = os.path.join(cli.directory, "build")
assert os.path.isdir(builddir)
assert not os.listdir(builddir)
checkout_args = ["artifact", "checkout", "--directory", checkout, "import-links.bst"]
result = cli.run(project=project, args=checkout_args)
result.assert_success()
with open(os.path.join(checkout, "basicfolder", "basicsymlink"), encoding="utf-8") as fp:
data = fp.read()
assert data == "file contents\n"
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize("hardlinks", [("copies"), ("hardlinks")])
def test_build_checkout_nonempty(datafiles, cli, hardlinks):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
filename = os.path.join(checkout, "file.txt")
# First build it
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
# Assert that after a successful build, the builddir is empty
builddir = os.path.join(cli.directory, "build")
assert os.path.isdir(builddir)
assert not os.listdir(builddir)
# Create the checkout dir and add a file to it, should cause checkout to fail
os.makedirs(checkout, exist_ok=True)
with open(filename, "w", encoding="utf-8") as f:
f.write("Hello")
# Prepare checkout args
checkout_args = ["artifact", "checkout"]
if hardlinks == "hardlinks":
checkout_args += ["--hardlinks"]
checkout_args += ["target.bst", "--directory", checkout]
# Now check it out
result = cli.run(project=project, args=checkout_args)
result.assert_main_error(ErrorDomain.STREAM, None)
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize("hardlinks", [("copies"), ("hardlinks")])
def test_build_checkout_force(datafiles, cli, hardlinks):
if CASD_SEPARATE_USER and hardlinks == "hardlinks":
pytest.xfail("Cannot hardlink with buildbox-casd running as a separate user")
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
filename = os.path.join(checkout, "file.txt")
# First build it
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
# Assert that after a successful build, the builddir is empty
builddir = os.path.join(cli.directory, "build")
assert os.path.isdir(builddir)
assert not os.listdir(builddir)
# Create the checkout dir and add a file to it, should cause checkout to fail
os.makedirs(checkout, exist_ok=True)
with open(filename, "w", encoding="utf-8") as f:
f.write("Hello")
# Prepare checkout args
checkout_args = ["artifact", "checkout", "--force"]
if hardlinks == "hardlinks":
checkout_args += ["--hardlinks"]
checkout_args += ["target.bst", "--directory", checkout]
# Now check it out
result = cli.run(project=project, args=checkout_args)
result.assert_success()
# Check that the file we added is still there
filename = os.path.join(checkout, "file.txt")
assert os.path.exists(filename)
# Check that the executable hello file is found in the checkout
filename = os.path.join(checkout, "usr", "bin", "hello")
assert os.path.exists(filename)
# Check that the executable hello file is found in the checkout
filename = os.path.join(checkout, "usr", "include", "pony.h")
assert os.path.exists(filename)
@pytest.mark.datafiles(DATA_DIR)
def test_build_checkout_force_tarball(datafiles, cli):
project = str(datafiles)
tarball = os.path.join(cli.directory, "tarball.tar")
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
builddir = os.path.join(cli.directory, "build")
assert os.path.isdir(builddir)
assert not os.listdir(builddir)
with open(tarball, "w", encoding="utf-8") as f:
f.write("Hello")
checkout_args = ["artifact", "checkout", "--force", "--tar", tarball, "target.bst"]
result = cli.run(project=project, args=checkout_args)
result.assert_success()
with tarfile.TarFile(tarball) as tar:
assert os.path.join(".", "usr", "bin", "hello") in tar.getnames()
assert os.path.join(".", "usr", "include", "pony.h") in tar.getnames()
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize("ref_storage", [("inline"), ("project.refs")])
def test_inconsistent_junction(cli, tmpdir, datafiles, ref_storage):
project = str(datafiles)
subproject_path = os.path.join(project, "files", "sub-project")
junction_path = os.path.join(project, "elements", "junction.bst")
element_path = os.path.join(project, "elements", "junction-dep.bst")
configure_project(project, {"ref-storage": ref_storage})
# Create a repo to hold the subproject and generate a junction element for it
generate_junction(tmpdir, subproject_path, junction_path, store_ref=False)
# Create a stack element to depend on a cross junction element
#
element = {"kind": "stack", "depends": [{"junction": "junction.bst", "filename": "import-etc.bst"}]}
_yaml.roundtrip_dump(element, element_path)
# Now try to track it, this will bail with the appropriate error
# informing the user to track the junction first
result = cli.run(project=project, args=["build", "junction-dep.bst"])
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.SUBPROJECT_INCONSISTENT)
# Assert that we have the expected provenance encoded into the error
element_node = _yaml.load(element_path, shortname="junction-dep.bst")
ref_node = element_node.get_sequence("depends").mapping_at(0)
provenance = ref_node.get_provenance()
assert str(provenance) in result.stderr
@pytest.mark.datafiles(DATA_DIR)
@pytest.mark.parametrize("ref_storage", [("inline"), ("project.refs")])
def test_unfetched_junction(cli, tmpdir, datafiles, ref_storage):
project = str(datafiles)
subproject_path = os.path.join(project, "files", "sub-project")
junction_path = os.path.join(project, "elements", "junction.bst")
element_path = os.path.join(project, "elements", "junction-dep.bst")
configure_project(project, {"ref-storage": ref_storage})
# Create a repo to hold the subproject and generate a junction element for it
ref = generate_junction(tmpdir, subproject_path, junction_path, store_ref=(ref_storage == "inline"))
# Create a stack element to depend on a cross junction element
#
element = {"kind": "stack", "depends": [{"junction": "junction.bst", "filename": "import-etc.bst"}]}
_yaml.roundtrip_dump(element, element_path)
# Dump a project.refs if we're using project.refs storage
#
if ref_storage == "project.refs":
project_refs = {"projects": {"test": {"junction.bst": [{"ref": ref}]}}}
_yaml.roundtrip_dump(project_refs, os.path.join(project, "junction.refs"))
# Now try to build it, this should automatically result in fetching
# the junction itself at load time.
result = cli.run(project=project, args=["build", "junction-dep.bst"])
result.assert_success()
# Assert that it's cached now
assert cli.get_element_state(project, "junction-dep.bst") == "cached"
@pytest.mark.datafiles(DATA_DIR)
def test_build_checkout_junction(cli, tmpdir, datafiles):
project = str(datafiles)
subproject_path = os.path.join(project, "files", "sub-project")
junction_path = os.path.join(project, "elements", "junction.bst")
element_path = os.path.join(project, "elements", "junction-dep.bst")
checkout = os.path.join(cli.directory, "checkout")
# Create a repo to hold the subproject and generate a junction element for it
generate_junction(tmpdir, subproject_path, junction_path)
# Create a stack element to depend on a cross junction element
#
element = {"kind": "stack", "depends": [{"junction": "junction.bst", "filename": "import-etc.bst"}]}
_yaml.roundtrip_dump(element, element_path)
# Now try to build it, this should automatically result in fetching
# the junction itself at load time.
result = cli.run(project=project, args=["build", "junction-dep.bst"])
result.assert_success()
# Assert that it's cached now
assert cli.get_element_state(project, "junction-dep.bst") == "cached"
# Now check it out
result = cli.run(project=project, args=["artifact", "checkout", "junction-dep.bst", "--directory", checkout])
result.assert_success()
# Assert the content of /etc/animal.conf
filename = os.path.join(checkout, "etc", "animal.conf")
assert os.path.exists(filename)
with open(filename, "r", encoding="utf-8") as f:
contents = f.read()
assert contents == "animal=Pony\n"
@pytest.mark.datafiles(DATA_DIR)
def test_build_checkout_workspaced_junction(cli, tmpdir, datafiles):
project = str(datafiles)
subproject_path = os.path.join(project, "files", "sub-project")
junction_path = os.path.join(project, "elements", "junction.bst")
element_path = os.path.join(project, "elements", "junction-dep.bst")
workspace = os.path.join(cli.directory, "workspace")
checkout = os.path.join(cli.directory, "checkout")
# Create a repo to hold the subproject and generate a junction element for it
generate_junction(tmpdir, subproject_path, junction_path)
# Create a stack element to depend on a cross junction element
#
element = {"kind": "stack", "depends": [{"junction": "junction.bst", "filename": "import-etc.bst"}]}
_yaml.roundtrip_dump(element, element_path)
# Now open a workspace on the junction
#
result = cli.run(project=project, args=["workspace", "open", "--directory", workspace, "junction.bst"])
result.assert_success()
filename = os.path.join(workspace, "files", "etc-files", "etc", "animal.conf")
# Assert the content of /etc/animal.conf in the workspace
assert os.path.exists(filename)
with open(filename, "r", encoding="utf-8") as f:
contents = f.read()
assert contents == "animal=Pony\n"
# Modify the content of the animal.conf in the workspace
with open(filename, "w", encoding="utf-8") as f:
f.write("animal=Horsy\n")
# Now try to build it, this should automatically result in fetching
# the junction itself at load time.
result = cli.run(project=project, args=["build", "junction-dep.bst"])
result.assert_success()
# Assert that it's cached now
assert cli.get_element_state(project, "junction-dep.bst") == "cached"
# Now check it out
result = cli.run(project=project, args=["artifact", "checkout", "junction-dep.bst", "--directory", checkout])
result.assert_success()
# Assert the workspace modified content of /etc/animal.conf
filename = os.path.join(checkout, "etc", "animal.conf")
assert os.path.exists(filename)
with open(filename, "r", encoding="utf-8") as f:
contents = f.read()
assert contents == "animal=Horsy\n"
@pytest.mark.datafiles(DATA_DIR)
def test_build_checkout_cross_junction(datafiles, cli, tmpdir):
project = str(datafiles)
subproject_path = os.path.join(project, "files", "sub-project")
junction_path = os.path.join(project, "elements", "junction.bst")
checkout = os.path.join(cli.directory, "checkout")
generate_junction(tmpdir, subproject_path, junction_path)
result = cli.run(project=project, args=["build", "junction.bst:import-etc.bst"])
result.assert_success()
result = cli.run(
project=project, args=["artifact", "checkout", "junction.bst:import-etc.bst", "--directory", checkout]
)
result.assert_success()
filename = os.path.join(checkout, "etc", "animal.conf")
assert os.path.exists(filename)
@pytest.mark.datafiles(DATA_DIR)
def test_build_junction_short_notation(cli, tmpdir, datafiles):
project = str(datafiles)
subproject_path = os.path.join(project, "files", "sub-project")
junction_path = os.path.join(project, "elements", "junction.bst")
element_path = os.path.join(project, "elements", "junction-dep.bst")
checkout = os.path.join(cli.directory, "checkout")
# Create a repo to hold the subproject and generate a junction element for it
generate_junction(tmpdir, subproject_path, junction_path)
# Create a stack element to depend on a cross junction element, using
# colon (:) as the separator
element = {"kind": "stack", "depends": ["junction.bst:import-etc.bst"]}
_yaml.roundtrip_dump(element, element_path)
# Now try to build it, this should automatically result in fetching
# the junction itself at load time.
result = cli.run(project=project, args=["build", "junction-dep.bst"])
result.assert_success()
# Assert that it's cached now
assert cli.get_element_state(project, "junction-dep.bst") == "cached"
# Now check it out
result = cli.run(project=project, args=["artifact", "checkout", "junction-dep.bst", "--directory", checkout])
result.assert_success()
# Assert the content of /etc/animal.conf
filename = os.path.join(checkout, "etc", "animal.conf")
assert os.path.exists(filename)
with open(filename, "r", encoding="utf-8") as f:
contents = f.read()
assert contents == "animal=Pony\n"
@pytest.mark.datafiles(DATA_DIR)
def test_build_junction_short_notation_filename(cli, tmpdir, datafiles):
project = str(datafiles)
subproject_path = os.path.join(project, "files", "sub-project")
junction_path = os.path.join(project, "elements", "junction.bst")
element_path = os.path.join(project, "elements", "junction-dep.bst")
checkout = os.path.join(cli.directory, "checkout")
# Create a repo to hold the subproject and generate a junction element for it
generate_junction(tmpdir, subproject_path, junction_path)
# Create a stack element to depend on a cross junction element, using
# colon (:) as the separator
element = {"kind": "stack", "depends": [{"filename": "junction.bst:import-etc.bst"}]}
_yaml.roundtrip_dump(element, element_path)
# Now try to build it, this should automatically result in fetching
# the junction itself at load time.
result = cli.run(project=project, args=["build", "junction-dep.bst"])
result.assert_success()
# Assert that it's cached now
assert cli.get_element_state(project, "junction-dep.bst") == "cached"
# Now check it out
result = cli.run(project=project, args=["artifact", "checkout", "junction-dep.bst", "--directory", checkout])
result.assert_success()
# Assert the content of /etc/animal.conf
filename = os.path.join(checkout, "etc", "animal.conf")
assert os.path.exists(filename)
with open(filename, "r", encoding="utf-8") as f:
contents = f.read()
assert contents == "animal=Pony\n"
@pytest.mark.datafiles(DATA_DIR)
def test_build_junction_short_notation_with_junction(cli, tmpdir, datafiles):
project = str(datafiles)
subproject_path = os.path.join(project, "files", "sub-project")
junction_path = os.path.join(project, "elements", "junction.bst")
element_path = os.path.join(project, "elements", "junction-dep.bst")
# Create a repo to hold the subproject and generate a junction element for it
generate_junction(tmpdir, subproject_path, junction_path)
# Create a stack element to depend on a cross junction element, using
# colon (:) as the separator
element = {"kind": "stack", "depends": [{"filename": "junction.bst:import-etc.bst", "junction": "junction.bst",}]}
_yaml.roundtrip_dump(element, element_path)
# Now try to build it, this should fail as filenames should not contain
# `:` when junction is explicity specified
result = cli.run(project=project, args=["build", "junction-dep.bst"])
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
@pytest.mark.datafiles(DATA_DIR)
def test_build_junction_transitive_short_notation_with_junction(cli, tmpdir, datafiles):
project = str(datafiles)
subproject_path = os.path.join(project, "files", "sub-project")
junction_path = os.path.join(project, "elements", "junction.bst")
element_path = os.path.join(project, "elements", "junction-dep.bst")
# Create a repo to hold the subproject and generate a junction element for it
generate_junction(tmpdir, subproject_path, junction_path)
# Create a stack element to depend on a cross junction element, using
# colon (:) as the separator
element = {"kind": "stack", "depends": ["junction.bst:import-etc.bst:foo.bst"]}
_yaml.roundtrip_dump(element, element_path)
# Now try to build it, this should fail as recursive lookups for
# cross-junction elements is not allowed.
result = cli.run(project=project, args=["build", "junction-dep.bst"])
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
# Should check that after a build we have partial artifacts locally, but should
# then attempt to fetch them when doing a artifact checkout
@pytest.mark.datafiles(DATA_DIR)
def test_partial_artifact_checkout_fetch(cli, datafiles, tmpdir):
project = str(datafiles)
checkout_dir = os.path.join(str(tmpdir), "checkout")
repo = create_repo("git", str(tmpdir))
repo.create(os.path.join(str(datafiles), "files"))
element_dir = os.path.join(str(tmpdir), "elements")
project = str(tmpdir)
project_config = {
"name": "partial-artifact-checkout-fetch",
"min-version": "2.0",
"element-path": "elements",
}
project_file = os.path.join(str(tmpdir), "project.conf")
_yaml.roundtrip_dump(project_config, project_file)
input_config = {
"kind": "import",
"sources": [repo.source_config()],
}
input_name = "input.bst"
input_file = os.path.join(element_dir, input_name)
_yaml.roundtrip_dump(input_config, input_file)
with create_artifact_share(os.path.join(str(tmpdir), "artifactshare")) as share:
cli.configure({"artifacts": {"servers": [{"url": share.repo, "push": True}]}})
result = cli.run(project=project, args=["source", "track", input_name])
result.assert_success()
result = cli.run(project=project, args=["build", input_name])
result.assert_success()
# A push artifact cache means we have to pull to push to them, so
# delete some blobs from that CAS such that we have to fetch
digest = utils.sha256sum(os.path.join(project, "files", "bin-files", "usr", "bin", "hello"))
objpath = os.path.join(cli.directory, "cas", "objects", digest[:2], digest[2:])
os.unlink(objpath)
# Verify that the build-only dependency is not (complete) in the local cache
cli.configure({"artifacts": {}})
result = cli.run(project=project, args=["artifact", "checkout", input_name, "--directory", checkout_dir])
result.assert_main_error(ErrorDomain.STREAM, "uncached-checkout-attempt")
# Verify that the pull method fetches relevant artifacts in order to stage
cli.configure({"artifacts": {"servers": [{"url": share.repo, "push": True}]}})
result = cli.run(project=project, args=["artifact", "checkout", input_name, "--directory", checkout_dir])
result.assert_success()
# should have pulled whatever was deleted previous
assert input_name in result.get_pulled_elements()
@pytest.mark.datafiles(DATA_DIR)
def test_partial_checkout_fail(tmpdir, datafiles, cli):
project = str(datafiles)
build_elt = "import-bin.bst"
checkout_dir = os.path.join(str(tmpdir), "checkout")
with create_artifact_share(os.path.join(str(tmpdir), "artifactshare")) as share:
cli.configure({"artifacts": {"servers": [{"url": share.repo, "push": True}]}})
res = cli.run(project=project, args=["artifact", "checkout", build_elt, "--directory", checkout_dir])
res.assert_main_error(ErrorDomain.STREAM, "uncached-checkout-attempt")
assert re.findall(r"Remote \((\S+)\) does not have artifact (\S+) cached", res.stderr)
# Regression test for https://gitlab.com/BuildStream/buildstream/-/issues/1367.
# Make sure that `artifact checkout` fails gracefully when no arguments are
# provided.
@pytest.mark.datafiles(DATA_DIR)
def test_fail_no_args(datafiles, cli):
project = str(datafiles)
result = cli.run(project=project, args=["artifact", "checkout"])
result.assert_main_error(ErrorDomain.APP, None)
assert "Missing argument" in result.stderr
# This test reproduces a scenario where BuildStream can get confused
# if the strictness of a dependency is not taken into account in the
# strong artifact cache key, as reported in issue #1270.
#
# While we were unable to reproduce the exact experience, we can test
# the expected behavior.
#
STRICT_DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "strict-scenario",)
@pytest.mark.datafiles(STRICT_DATA_DIR)
def test_changing_strict_dependency_scenario(datafiles, cli):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
target_path = os.path.join(project, "elements", "target.bst")
# Function to (re)write the target element so that it depends
# on the base.bst element strictly or non-strictly
#
def configure_target(strict):
dependency = {"filename": "base.bst"}
if strict:
dependency["strict"] = True
config = {
"kind": "import",
"depends": [dependency],
"sources": [{"kind": "local", "path": "files/target.txt"}],
}
_yaml.roundtrip_dump(config, target_path)
# First build where the target normally depends on the base element
configure_target(False)
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
# Now configure the target to *strictly* depend on the base, try to check it out
#
# This will fail in both strict more or non-strict mode, as the strictness of the
# dependency will affect both keys.
configure_target(True)
result = cli.run(project=project, args=["artifact", "checkout", "--directory", checkout, "target.bst"])
result.assert_main_error(ErrorDomain.STREAM, "uncached-checkout-attempt")
shutil.rmtree(checkout)
result = cli.run(
project=project, args=["--no-strict", "artifact", "checkout", "--directory", checkout, "target.bst"]
)
result.assert_main_error(ErrorDomain.STREAM, "uncached-checkout-attempt")
shutil.rmtree(checkout)
# Now perform a build on the newly strict dependency, which should cause it to be
# available under both strict and non-strict checkout scenarios
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
result = cli.run(project=project, args=["artifact", "checkout", "--directory", checkout, "target.bst"])
result.assert_success()
shutil.rmtree(checkout)
result = cli.run(
project=project, args=["--no-strict", "artifact", "checkout", "--directory", checkout, "target.bst"]
)
result.assert_success()
shutil.rmtree(checkout)
|
import React from "react";
import "../styles/Categories.css";
const Categories = ({ children, title }) => (
<React.Fragment>
<div className="categories">
<h3 class="categories__title">{title}</h3>
{children}
</div>
</React.Fragment>
);
export default Categories;
|
from secrets import choice
from AyiinXd import CMD_HANDLER as cmd
from AyiinXd import CMD_HELP
from AyiinXd.ayiin import ayiin_cmd, deEmojify, eod, eor
from Stringyins import get_string
@ayiin_cmd(pattern="rst(?: |$)(.*)")
async def rastick(animu):
text = animu.pattern_match.group(1)
xx = await eor(animu, get_string("com_1"))
if not text:
if animu.is_reply:
text = (await animu.get_reply_message()).message
else:
return await xx.answer(get_string("rstick_1"))
animus = [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
]
sticcers = await animu.client.inline_query(
"stickerizerbot", f"#{choice(animus)}{(deEmojify(text))}"
)
try:
await sticcers[0].click(
animu.chat_id,
reply_to=animu.reply_to_msg_id,
silent=bool(animu.is_reply),
hide_via=True,
)
except Exception:
return await eod(
xx, get_string("rstick_2")
)
await sleep(5)
await animu.delete()
CMD_HELP.update(
{
"rastick": f"**Plugin : **`rastick`\
\n\n » **Perintah :** `{cmd}rst`\
\n » **Kegunaan : **Untuk membuat stiker teks Anda dengan templat stiker acak daro @StickerizerBot\
"
}
)
|
/* Copyright (C) 1992-2016 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#include <hurd.h>
#include <hurd/msg_server.h>
#include <hurd/fd.h>
#include <unistd.h>
#include <limits.h>
#include <string.h>
#include <argz.h>
#define AUTHCHECK \
if (auth != mach_task_self () && ! __USEPORT (AUTH, port == auth)) \
return EPERM
/* Snarfing and frobbing the init ports. */
kern_return_t
_S_msg_get_init_port (mach_port_t msgport, mach_port_t auth, int which,
mach_port_t *result, mach_msg_type_name_t *result_type)
{
AUTHCHECK;
*result_type = MACH_MSG_TYPE_MOVE_SEND;
/* This function adds a new user reference for the *RESULT it gives back.
Our reply message uses a move-send right that consumes this reference. */
return _hurd_ports_get (which, result);
}
kern_return_t
_S_msg_set_init_port (mach_port_t msgport, mach_port_t auth,
int which, mach_port_t port)
{
error_t err;
AUTHCHECK;
err = _hurd_ports_set (which, port);
if (err == 0)
__mach_port_deallocate (__mach_task_self (), port);
return 0;
}
kern_return_t
_S_msg_get_init_ports (mach_port_t msgport, mach_port_t auth,
mach_port_t **ports,
mach_msg_type_name_t *ports_type,
mach_msg_type_number_t *nports)
{
mach_msg_type_number_t i;
error_t err;
AUTHCHECK;
if (err = __vm_allocate (__mach_task_self (), (vm_address_t *) ports,
_hurd_nports * sizeof (mach_port_t), 1))
return err;
*nports = _hurd_nports;
for (i = 0; i < _hurd_nports; ++i)
/* This function adds a new user ref for the *RESULT it gives back.
Our reply message uses move-send rights that consumes this ref. */
if (err = _hurd_ports_get (i, &(*ports)[i]))
{
/* Died part way through. Deallocate the ports already fetched. */
while (i-- > 0)
__mach_port_deallocate (__mach_task_self (), (*ports)[i]);
__vm_deallocate (__mach_task_self (),
(vm_address_t) *ports,
*nports * sizeof (mach_port_t));
return err;
}
*ports_type = MACH_MSG_TYPE_MOVE_SEND;
return 0;
}
kern_return_t
_S_msg_set_init_ports (mach_port_t msgport, mach_port_t auth,
mach_port_t *ports, mach_msg_type_number_t nports)
{
mach_msg_type_number_t i;
error_t err;
AUTHCHECK;
for (i = 0; i < _hurd_nports; ++i)
{
if (err = _hurd_ports_set (i, ports[i]))
return err;
else
__mach_port_deallocate (__mach_task_self (), ports[i]);
}
return 0;
}
/* Snarfing and frobbing the init ints. */
static kern_return_t
get_int (int which, int *value)
{
switch (which)
{
case INIT_UMASK:
*value = _hurd_umask;
return 0;
case INIT_SIGMASK:
{
struct hurd_sigstate *ss = _hurd_thread_sigstate (_hurd_sigthread);
__spin_lock (&ss->lock);
*value = ss->blocked;
__spin_unlock (&ss->lock);
return 0;
}
case INIT_SIGPENDING:
{
struct hurd_sigstate *ss = _hurd_thread_sigstate (_hurd_sigthread);
__spin_lock (&ss->lock);
*value = ss->pending;
__spin_unlock (&ss->lock);
return 0;
}
case INIT_SIGIGN:
{
struct hurd_sigstate *ss = _hurd_thread_sigstate (_hurd_sigthread);
sigset_t ign;
int sig;
__spin_lock (&ss->lock);
__sigemptyset (&ign);
for (sig = 1; sig < NSIG; ++sig)
if (ss->actions[sig].sa_handler == SIG_IGN)
__sigaddset (&ign, sig);
__spin_unlock (&ss->lock);
*value = ign;
return 0;
}
default:
return EINVAL;
}
}
kern_return_t
_S_msg_get_init_int (mach_port_t msgport, mach_port_t auth,
int which, int *value)
{
AUTHCHECK;
return get_int (which, value);
}
kern_return_t
_S_msg_get_init_ints (mach_port_t msgport, mach_port_t auth,
int **values, mach_msg_type_number_t *nvalues)
{
error_t err;
mach_msg_type_number_t i;
AUTHCHECK;
if (err = __vm_allocate (__mach_task_self (), (vm_address_t *) values,
INIT_INT_MAX * sizeof (int), 1))
return err;
*nvalues = INIT_INT_MAX;
for (i = 0; i < INIT_INT_MAX; ++i)
switch (err = get_int (i, &(*values)[i]))
{
case 0: /* Success. */
break;
case EINVAL: /* Unknown index. */
(*values)[i] = 0;
break;
default: /* Lossage. */
__vm_deallocate (__mach_task_self (),
(vm_address_t) *values, INIT_INT_MAX * sizeof (int));
return err;
}
return 0;
}
static kern_return_t
set_int (int which, int value)
{
switch (which)
{
case INIT_UMASK:
_hurd_umask = value;
return 0;
/* These are pretty odd things to do. But you asked for it. */
case INIT_SIGMASK:
{
struct hurd_sigstate *ss = _hurd_thread_sigstate (_hurd_sigthread);
__spin_lock (&ss->lock);
ss->blocked = value;
__spin_unlock (&ss->lock);
return 0;
}
case INIT_SIGPENDING:
{
struct hurd_sigstate *ss = _hurd_thread_sigstate (_hurd_sigthread);
__spin_lock (&ss->lock);
ss->pending = value;
__spin_unlock (&ss->lock);
return 0;
}
case INIT_SIGIGN:
{
struct hurd_sigstate *ss = _hurd_thread_sigstate (_hurd_sigthread);
int sig;
const sigset_t ign = value;
__spin_lock (&ss->lock);
for (sig = 1; sig < NSIG; ++sig)
{
if (__sigismember (&ign, sig))
ss->actions[sig].sa_handler = SIG_IGN;
else if (ss->actions[sig].sa_handler == SIG_IGN)
ss->actions[sig].sa_handler = SIG_DFL;
}
__spin_unlock (&ss->lock);
return 0;
case INIT_TRACEMASK:
_hurdsig_traced = value;
return 0;
}
default:
return EINVAL;
}
}
kern_return_t
_S_msg_set_init_int (mach_port_t msgport, mach_port_t auth,
int which, int value)
{
AUTHCHECK;
return set_int (which, value);
}
kern_return_t
_S_msg_set_init_ints (mach_port_t msgport, mach_port_t auth,
int *values, mach_msg_type_number_t nvalues)
{
error_t err;
mach_msg_type_number_t i;
AUTHCHECK;
for (i = 0; i < INIT_INT_MAX; ++i)
switch (err = set_int (i, values[i]))
{
case 0: /* Success. */
break;
case EINVAL: /* Unknown index. */
break;
default: /* Lossage. */
return err;
}
return 0;
}
kern_return_t
_S_msg_get_fd (mach_port_t msgport, mach_port_t auth, int which,
mach_port_t *result, mach_msg_type_name_t *result_type)
{
AUTHCHECK;
/* This creates a new user reference for the send right.
Our reply message will move that reference to the caller. */
*result = __getdport (which);
if (*result == MACH_PORT_NULL)
return errno;
*result_type = MACH_MSG_TYPE_MOVE_SEND;
return 0;
}
kern_return_t
_S_msg_set_fd (mach_port_t msgport, mach_port_t auth,
int which, mach_port_t port)
{
AUTHCHECK;
/* We consume the reference if successful. */
return HURD_FD_USE (which, (_hurd_port2fd (descriptor, port, 0), 0));
}
/* Snarfing and frobbing environment variables. */
kern_return_t
_S_msg_get_env_variable (mach_port_t msgport,
char *variable,
char **data, mach_msg_type_number_t *datalen)
{
error_t err;
mach_msg_type_number_t valuelen;
const char *value = getenv (variable);
if (value == NULL)
return ENOENT;
valuelen = strlen (value);
if (valuelen > *datalen)
{
if (err = __vm_allocate (__mach_task_self (),
(vm_address_t *) data, valuelen, 1))
return err;
}
memcpy (*data, value, valuelen);
*datalen = valuelen;
return 0;
}
kern_return_t
_S_msg_set_env_variable (mach_port_t msgport, mach_port_t auth,
char *variable,
char *value,
int replace)
{
AUTHCHECK;
if (setenv (variable, value, replace)) /* XXX name space */
return errno;
return 0;
}
kern_return_t
_S_msg_get_environment (mach_port_t msgport,
char **data, mach_msg_type_number_t *datalen)
{
/* Pack the environment into an array with nulls separating elements. */
if (__environ != NULL)
{
char *ap, **p;
size_t envlen = 0;
for (p = __environ; *p != NULL; ++p)
envlen += strlen (*p) + 1;
if (envlen > *datalen)
{
if (__vm_allocate (__mach_task_self (),
(vm_address_t *) data, envlen, 1))
return ENOMEM;
}
ap = *data;
for (p = __environ; *p != NULL; ++p)
ap = __memccpy (ap, *p, '\0', ULONG_MAX);
*datalen = envlen;
}
else
*datalen = 0;
return 0;
}
kern_return_t
_S_msg_set_environment (mach_port_t msgport, mach_port_t auth,
char *data, mach_msg_type_number_t datalen)
{
int _hurd_split_args (char *, mach_msg_type_number_t, char **);
int envc;
char **envp;
AUTHCHECK;
envc = __argz_count (data, datalen);
envp = malloc ((envc + 1) * sizeof (char *));
if (envp == NULL)
return errno;
__argz_extract (data, datalen, envp);
__environ = envp; /* XXX cooperate with loadenv et al */
return 0;
}
/* XXX */
kern_return_t
_S_msg_get_dtable (mach_port_t process,
mach_port_t refport,
portarray_t *dtable,
mach_msg_type_name_t *dtablePoly,
mach_msg_type_number_t *dtableCnt)
{ return EOPNOTSUPP; }
kern_return_t
_S_msg_set_dtable (mach_port_t process,
mach_port_t refport,
portarray_t dtable,
mach_msg_type_number_t dtableCnt)
{ return EOPNOTSUPP; }
|
// Copyright (c) 2009-2017 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef PERFECTCOIN_WALLET_CRYPTER_H
#define PERFECTCOIN_WALLET_CRYPTER_H
#include <keystore.h>
#include <serialize.h>
#include <support/allocators/secure.h>
#include <atomic>
const unsigned int WALLET_CRYPTO_KEY_SIZE = 32;
const unsigned int WALLET_CRYPTO_SALT_SIZE = 8;
const unsigned int WALLET_CRYPTO_IV_SIZE = 16;
/**
* Private key encryption is done based on a CMasterKey,
* which holds a salt and random encryption key.
*
* CMasterKeys are encrypted using AES-256-CBC using a key
* derived using derivation method nDerivationMethod
* (0 == EVP_sha512()) and derivation iterations nDeriveIterations.
* vchOtherDerivationParameters is provided for alternative algorithms
* which may require more parameters (such as scrypt).
*
* Wallet Private Keys are then encrypted using AES-256-CBC
* with the double-sha256 of the public key as the IV, and the
* master key's key as the encryption key (see keystore.[ch]).
*/
/** Master key for wallet encryption */
class CMasterKey
{
public:
std::vector<unsigned char> vchCryptedKey;
std::vector<unsigned char> vchSalt;
//! 0 = EVP_sha512()
//! 1 = scrypt()
unsigned int nDerivationMethod;
unsigned int nDeriveIterations;
//! Use this for more parameters to key derivation,
//! such as the various parameters to scrypt
std::vector<unsigned char> vchOtherDerivationParameters;
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
inline void SerializationOp(Stream& s, Operation ser_action) {
READWRITE(vchCryptedKey);
READWRITE(vchSalt);
READWRITE(nDerivationMethod);
READWRITE(nDeriveIterations);
READWRITE(vchOtherDerivationParameters);
}
CMasterKey()
{
// 25000 rounds is just under 0.1 seconds on a 1.86 GHz Pentium M
// ie slightly lower than the lowest hardware we need bother supporting
nDeriveIterations = 25000;
nDerivationMethod = 0;
vchOtherDerivationParameters = std::vector<unsigned char>(0);
}
};
typedef std::vector<unsigned char, secure_allocator<unsigned char> > CKeyingMaterial;
namespace wallet_crypto_tests
{
class TestCrypter;
}
/** Encryption/decryption context with key information */
class CCrypter
{
friend class wallet_crypto_tests::TestCrypter; // for test access to chKey/chIV
private:
std::vector<unsigned char, secure_allocator<unsigned char>> vchKey;
std::vector<unsigned char, secure_allocator<unsigned char>> vchIV;
bool fKeySet;
int BytesToKeySHA512AES(const std::vector<unsigned char>& chSalt, const SecureString& strKeyData, int count, unsigned char *key,unsigned char *iv) const;
public:
bool SetKeyFromPassphrase(const SecureString &strKeyData, const std::vector<unsigned char>& chSalt, const unsigned int nRounds, const unsigned int nDerivationMethod);
bool Encrypt(const CKeyingMaterial& vchPlaintext, std::vector<unsigned char> &vchCiphertext) const;
bool Decrypt(const std::vector<unsigned char>& vchCiphertext, CKeyingMaterial& vchPlaintext) const;
bool SetKey(const CKeyingMaterial& chNewKey, const std::vector<unsigned char>& chNewIV);
void CleanKey()
{
memory_cleanse(vchKey.data(), vchKey.size());
memory_cleanse(vchIV.data(), vchIV.size());
fKeySet = false;
}
CCrypter()
{
fKeySet = false;
vchKey.resize(WALLET_CRYPTO_KEY_SIZE);
vchIV.resize(WALLET_CRYPTO_IV_SIZE);
}
~CCrypter()
{
CleanKey();
}
};
/** Keystore which keeps the private keys encrypted.
* It derives from the basic key store, which is used if no encryption is active.
*/
class CCryptoKeyStore : public CBasicKeyStore
{
private:
CKeyingMaterial vMasterKey;
//! if fUseCrypto is true, mapKeys must be empty
//! if fUseCrypto is false, vMasterKey must be empty
std::atomic<bool> fUseCrypto;
//! keeps track of whether Unlock has run a thorough check before
bool fDecryptionThoroughlyChecked;
protected:
bool SetCrypted();
//! will encrypt previously unencrypted keys
bool EncryptKeys(CKeyingMaterial& vMasterKeyIn);
bool Unlock(const CKeyingMaterial& vMasterKeyIn);
CryptedKeyMap mapCryptedKeys;
public:
CCryptoKeyStore() : fUseCrypto(false), fDecryptionThoroughlyChecked(false)
{
}
bool IsCrypted() const { return fUseCrypto; }
bool IsLocked() const;
bool Lock();
virtual bool AddCryptedKey(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret);
bool AddKeyPubKey(const CKey& key, const CPubKey &pubkey) override;
bool HaveKey(const CKeyID &address) const override;
bool GetKey(const CKeyID &address, CKey& keyOut) const override;
bool GetPubKey(const CKeyID &address, CPubKey& vchPubKeyOut) const override;
std::set<CKeyID> GetKeys() const override;
/**
* Wallet status (encrypted, locked) changed.
* Note: Called without locks held.
*/
boost::signals2::signal<void (CCryptoKeyStore* wallet)> NotifyStatusChanged;
};
#endif // PERFECTCOIN_WALLET_CRYPTER_H
|
/**
* ==========================
* @description Scene's platform object
* ==========================
*
* @author Evgeny Savelyev
* @since 21.12.17
* @version 1.0.0
* @licence See the LICENCE file in the project root.
*/
"use strict";
const Actor = require("../Actor");
const RectBox = require("../../../geometry/RectBox");
const AMDBuilder = require("../../../graphics/animation/AMDBuilder");
const Texture = require("../../../graphics/Texture");
/* eslint-disable no-magic-numbers */
const boundbox = new RectBox(-110, -30, 220, 60);
const actor = new Actor(
"Platform",
new AMDBuilder()
.startState("spawn")
.nextSprite(new Texture("resources/lolPlatform.jpg", 0.4, 0.2), -1)
.setDefaultState("spawn")
.build(),
boundbox
);
exports = module.exports = actor;
|
let fetch = require('node-fetch')
let handler = async (m, { conn }) => conn.sendButtonLoc(m.chat, await (await fetch(thanks)).buffer(), `
BIG THANKS TO
•Allah swt
•My ortu
•⳹ ❋ཻུ۪۪⸙Zifabotz⳹ ❋ཻུ۪۪⸙
•Rozi{OWNER ZIFABOTZ}
•Penyedia Layanan API
•Orang-orang yang Berdonasi
`.trim(), watermark, 'Menu', '.menu')
handler.help = ['Thanksto', 'tqto']
handler.tags = ['main']
handler.command = /^(tqto|thanks|thanksto|bigthanks)$/i
module.exports = handler
// di ilangin jangan,di tambahin boleh
|
from .response import BotResponse
class NothingResponse(BotResponse):
def run(self):
pass
|
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
# See: https://spdx.org/licenses/
import unittest
import numpy as np
from lava.lib.optimization.problems.constraints import (
DiscreteConstraints,
EqualityConstraints,
InequalityConstraints,
ArithmeticConstraints,
Constraints,
)
from lava.lib.optimization.problems.coefficients import (
CoefficientTensorsMixin,
)
class TestDiscreteConstraint(unittest.TestCase):
def setUp(self) -> None:
constraints = [(0, 1, np.logical_not(np.eye(5))), (1, 2, np.eye(5, 4))]
self.relations_2d = [np.logical_not(np.eye(5)), np.eye(5, 4)]
self.dconstraint = DiscreteConstraints(constraints)
def test_create_obj(self):
self.assertIsInstance(self.dconstraint, DiscreteConstraints)
def test_var_subset(self):
self.assertEqual(self.dconstraint.var_subsets, [(0, 1), (1, 2)])
def test_var_subset_is_required(self):
with self.assertRaises(TypeError):
DiscreteConstraints()
def test_relation(self):
for n, relation in enumerate(self.relations_2d):
with self.subTest(msg=f"Test id {n}"):
self.assertTrue(
(self.dconstraint.relations[n] == relation).all()
)
def test__input_validation_relation_matches_var_subset_dimension(self):
constraints = [
(0, 1, 2, np.logical_not(np.eye(5))),
(1, 2, np.eye(5, 4)),
]
with self.assertRaises(ValueError):
DiscreteConstraints(constraints)
def test_set_constraints(self):
new_constraints = [
(1, 2, np.logical_not(np.eye(5, 4))),
(0, 1, np.eye(5)),
]
self.dconstraint.constraints = new_constraints
self.assertIs(self.dconstraint.constraints, new_constraints)
def test_setted_relation(self):
new_constraints = [
(1, 2, np.logical_not(np.eye(5, 4))),
(0, 1, np.eye(5)),
]
self.dconstraint.constraints = new_constraints
new_relations = [np.logical_not(np.eye(5, 4)), np.eye(5)]
for n, relation in enumerate(new_relations):
with self.subTest(msg=f"Test id {n}"):
self.assertTrue(
(self.dconstraint.relations[n] == relation).all()
)
def test_setted_var_subset(self):
new_constraints = [
(1, 2, np.logical_not(np.eye(5, 4))),
(0, 1, np.eye(5)),
]
self.dconstraint.constraints = new_constraints
self.assertEqual(self.dconstraint.var_subsets, [(1, 2), (0, 1)])
def test_var_subsets_from_function_set_relations_var_subsets(self):
new_constraints = [
(1, 2, np.logical_not(np.eye(5, 4))),
(0, 1, np.eye(5)),
]
self.dconstraint.set_relations_var_subsets(new_constraints)
self.assertEqual(self.dconstraint._var_subset, [(1, 2), (0, 1)])
def test__relations_from_function_set_relations_var_subsets(self):
new_constraints = [
(1, 2, np.logical_not(np.eye(5, 4))),
(0, 1, np.eye(5)),
]
self.dconstraint.set_relations_var_subsets(new_constraints)
for n, relation in enumerate(
[np.logical_not(np.eye(5, 4)), np.eye(5)]
):
with self.subTest(msg=f"Relation index {n}"):
self.assertTrue(
(self.dconstraint._relations[n] == relation).all()
)
class TestEqualityConstraint(unittest.TestCase):
def setUp(self) -> None:
coefficients_np = (
np.asarray(1),
np.ones(2),
np.ones((2, 2)),
np.ones((2, 2, 2)),
)
self.constraint = EqualityConstraints(*coefficients_np)
def test_create_obj(self):
self.assertIsInstance(self.constraint, EqualityConstraints)
def test_created_obj_includes_mixin(self):
self.assertIsInstance(self.constraint, CoefficientTensorsMixin)
class TestInequalityConstraint(unittest.TestCase):
def setUp(self) -> None:
coefficients_np = (
np.asarray(1),
np.ones(2),
np.ones((2, 2)),
np.ones((2, 2, 2)),
)
self.constraint = InequalityConstraints(*coefficients_np)
def test_create_obj(self):
self.assertIsInstance(self.constraint, InequalityConstraints)
def test_created_obj_includes_mixin(self):
self.assertIsInstance(self.constraint, CoefficientTensorsMixin)
class TestArithmeticConstraint(unittest.TestCase):
def setUp(self) -> None:
self.constraint = ArithmeticConstraints()
def test_create_obj(self):
self.assertIsInstance(self.constraint, ArithmeticConstraints)
def test_set_arithmetic_constraints_equality(self):
new_constraints_eq = (
np.asarray(1),
np.ones(2),
np.ones((2, 2)),
)
self.constraint.equality = new_constraints_eq
for n, coefficient in enumerate(new_constraints_eq):
with self.subTest(msg=f"{n}"):
self.assertTrue(
(
coefficient == self.constraint.equality.coefficients[n]
).all()
)
def test_set_arithmetic_constraints_inequality(self):
new_constraints_ineq = (
np.asarray(1),
np.ones(2),
np.ones((2, 2)),
)
self.constraint.inequality = new_constraints_ineq
for n, coefficient in enumerate(new_constraints_ineq):
with self.subTest(msg=f"{n}"):
self.assertTrue(
(
coefficient
== self.constraint.inequality.coefficients[n]
).all()
)
class TestConstraints(unittest.TestCase):
def setUp(self) -> None:
self.constraints = Constraints()
def test_create_obj(self):
self.assertIsInstance(self.constraints, Constraints)
def test_discrete_defaults_to_none(self):
self.assertIsNone(self.constraints.discrete)
def test_arithmetic_defaults_to_none(self):
self.assertIsNone(self.constraints.arithmetic)
def test_set_discrete_constraints(self):
new_constraints = [(0, 1, np.eye(5))]
self.constraints.discrete = DiscreteConstraints(new_constraints)
self.assertIs(self.constraints.discrete._constraints, new_constraints)
def test_class_of_setted_discrete_constraints(self):
new_constraints = [(0, 1, np.eye(5))]
self.constraints.discrete = DiscreteConstraints(new_constraints)
self.assertIsInstance(self.constraints.discrete, DiscreteConstraints)
def teest_set_arithmetic_constraint(self):
new_constraint = ArithmeticConstraints()
self.constraints.arithmetic = new_constraint
self.assertIs(self.constraints.arithmetic, new_constraint)
def test_class_of_setted_arithmetic_constraints(self):
new_constraint = ArithmeticConstraints()
self.constraints.arithmetic = new_constraint
self.assertIsInstance(
self.constraints.arithmetic, ArithmeticConstraints
)
if __name__ == "__main__":
unittest.main()
|
import React from 'react';
import pure from 'recompose/pure';
import SvgIcon from 'material-ui/SvgIcon';
const SvgIconCustom = global.__MUI_SvgIcon__ || SvgIcon;
let SwapCalls = props =>
<SvgIconCustom {...props}>
<path d="M18 4l-4 4h3v7c0 1.1-.9 2-2 2s-2-.9-2-2V8c0-2.21-1.79-4-4-4S5 5.79 5 8v7H2l4 4 4-4H7V8c0-1.1.9-2 2-2s2 .9 2 2v7c0 2.21 1.79 4 4 4s4-1.79 4-4V8h3l-4-4z" />
</SvgIconCustom>;
SwapCalls = pure(SwapCalls);
SwapCalls.muiName = 'SvgIcon';
export default SwapCalls;
|
import time
import requests
from bs4 import BeautifulSoup
from crawlers.generic import BaseCrawler
from settings import BEGIN_CRAWL_SINCE
class ScreenEggsCrawler(BaseCrawler):
def __init__(self, *args, **kwargs):
super(ScreenEggsCrawler, self).__init__(source='screen_eggs', *args, **kwargs)
self.url = 'http://screeneggs.com/category/memes/'
def get_feed(self, page=1):
images = []
page_url = self.url
if page > 1:
page_url = '{}page/{}/'.format(self.url, page)
response = requests.get(page_url)
if response.status_code == 200:
soup = BeautifulSoup(response.content, 'html.parser')
posts = soup.findAll("div", {"class": "post"})
for p in posts:
try:
i = p.find('img')
images.append({
"id": p.attrs.get('id').replace('post-', ''),
"title": i.attrs.get('alt'),
"url": i.attrs.get('src')
})
except Exception as e:
print(e)
return images
def _pre_process_data(self, data):
results = []
for d in data:
results.append(
{
"id": d['id'],
"title": d.get('title'),
"image_url": d.get('url'),
"file_name": 'data/{}/{}.jpg'.format(self.source, d['id']),
"source": self.source,
"created_at": d.get('created_at')
}
)
return results
def run(self):
self._log_console("Starting up {} crawler ...".format(self.source))
self._create_mongo_db_connection()
next_page = 0
while self.running:
try:
next_page += 1
data = self.get_feed(next_page)
pre_processed_data = self._pre_process_data(data)
inserted, oldest_timestamp = self.process_data(pre_processed_data)
self._log_console("Iteration ended with {} results".format(len(pre_processed_data)))
time.sleep(4)
if oldest_timestamp < BEGIN_CRAWL_SINCE or not inserted:
next_page = 0
time.sleep(8)
if (oldest_timestamp - BEGIN_CRAWL_SINCE) > 300:
time.sleep(60)
except Exception as e:
print(e)
self._log_console("Exception on main thread run()")
|
"""
Django settings for sport_club project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from dotenv import load_dotenv
from pathlib import Path
from datetime import timedelta
from dj_database_url import config
env_path = Path('.') / '.env'
load_dotenv(dotenv_path=env_path)
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.getenv('DEBUG')
ALLOWED_HOSTS = ['sport-club-dev.herokuapp.com', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'api.apps.ApiConfig',
'api.blog_api.apps.BlogApiConfig',
'api.cards_api.apps.CardsApiConfig',
'api.accounts_api.apps.AccountsApiConfig',
'api.subscription_api.apps.SubscriptionApiConfig',
'api.reservation_api.apps.ReservationApiConfig',
'api.staff_api.apps.StaffApiConfig',
'rest_framework',
'corsheaders',
'rest_framework_simplejwt.token_blacklist',
'drf_yasg',
'django_extensions',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sport_club.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
BASE_DIR / 'client/build',
BASE_DIR / 'api/accounts_api/templates',
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sport_club.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {'default': config(conn_max_age=600)}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_ROOT = BASE_DIR / 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = [
BASE_DIR / 'client/build/static'
]
# Media files
MEDIA_ROOT = BASE_DIR / 'media'
MEDIA_URL = '/media/'
# Override the default user model
AUTH_USER_MODEL = 'accounts_api.Member'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# Allow any website to make cross-origin requests
CORS_ALLOW_ALL_ORIGINS = True
# Rest Framework settings
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny',
],
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
'rest_framework.authentication.SessionAuthentication',
)
}
# JSON Web Token settings
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(hours=6),
'REFRESH_TOKEN_LIFETIME': timedelta(days=1),
'ROTATE_REFRESH_TOKENS': False,
'BLACKLIST_AFTER_ROTATION': True,
'UPDATE_LAST_LOGIN': False,
'ALGORITHM': 'HS256',
'SIGNING_KEY': SECRET_KEY,
'VERIFYING_KEY': None,
'AUDIENCE': None,
'ISSUER': None,
'AUTH_HEADER_TYPES': ('JWT', 'Bearer',),
'AUTH_HEADER_NAME': 'HTTP_AUTHORIZATION',
'USER_ID_FIELD': 'id',
'USER_ID_CLAIM': 'user_id',
'USER_AUTHENTICATION_RULE': 'rest_framework_simplejwt.authentication.default_user_authentication_rule',
'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.AccessToken',),
'TOKEN_TYPE_CLAIM': 'token_type',
'JTI_CLAIM': 'jti',
'SLIDING_TOKEN_REFRESH_EXP_CLAIM': 'refresh_exp',
'SLIDING_TOKEN_LIFETIME': timedelta(minutes=5),
'SLIDING_TOKEN_REFRESH_LIFETIME': timedelta(days=1),
}
# API Documentation settings
SWAGGER_SETTINGS = {
'SECURITY_DEFINITIONS': {
'Bearer': {
'type': 'apiKey',
'description': 'JWT Authorization',
'name': 'Authorization',
'in': 'header',
}
},
'LOGIN_URL': '/api-auth/login/',
'LOGOUT_URL': '/api-auth/logout/',
}
# Django extensions: Graph model
GRAPH_MODELS = {
'group_models': True,
}
|
import math
import os
import time
import numpy as np
import pandas as pd
import sklearn
from tabulate import tabulate
from tqdm import tqdm
from ..utils.alias_table import AliasTable
from ..utils.common_util import get_dataframe_from_npz, save_dataframe_as_npz
from ..utils.constants import (
DEFAULT_FLAG_COL,
DEFAULT_ITEM_COL,
DEFAULT_ORDER_COL,
DEFAULT_RATING_COL,
DEFAULT_TIMESTAMP_COL,
DEFAULT_USER_COL,
)
def filter_by_count(df, group_col, filter_col, num):
"""Filter out the group_col column values that have a less than num count of filter_col.
Args:
df (DataFrame): interaction DataFrame to be processed.
group_col (string): column name to be filtered.
filter_col (string): column with the filter condition.
num (int): minimum count condition that should be filter out.
Returns:
DataFrame: The filtered interactions.
"""
ordercount = (
df.groupby([group_col])[filter_col].nunique().rename("count").reset_index()
)
filter_df = df[
df[group_col].isin(ordercount[ordercount["count"] >= num][group_col])
]
return filter_df
def check_data_available(data):
"""Check if a dataset is available after filtering.
Check whether a given dataset is available for later use.
Args:
data (DataFrame): interaction DataFrame to be processed.
Raises:
RuntimeError: An error occurred it there is no interaction.
"""
if len(data.index) < 1:
raise RuntimeError(
"This dataset contains no interaction after filtering. Please check the default filter setup of this split!"
)
def filter_user_item(df, min_u_c=5, min_i_c=5):
"""Filter data by the minimum purchase number of items and users.
Args:
df (DataFrame): interaction DataFrame to be processed.
min_u_c (int): filter the items that were purchased by less than min_u_c users.
(default: :obj:`5`)
min_i_c (int): filter the users that have purchased by less than min_i_c items.
(default: :obj:`5`)
Returns:
DataFrame: The filtered interactions
"""
print(f"filter_user_item under condition min_u_c={min_u_c}, min_i_c={min_i_c}")
print("-" * 80)
print("Dataset statistics before filter")
print(
tabulate(
df.agg(["count", "nunique"]),
headers=df.columns,
tablefmt="psql",
disable_numparse=True,
)
)
n_interact = len(df.index)
while True:
# Filter out users that have less than min_i_c interactions (items)
if min_i_c > 0:
df = filter_by_count(df, DEFAULT_USER_COL, DEFAULT_ITEM_COL, min_i_c)
# Filter out items that have less than min_u_c users
if min_u_c > 0:
df = filter_by_count(df, DEFAULT_ITEM_COL, DEFAULT_USER_COL, min_u_c)
new_n_interact = len(df.index)
if n_interact != new_n_interact:
n_interact = new_n_interact
else:
break # no change
check_data_available(df)
print("Dataset statistics after filter")
print(
tabulate(
df.agg(["count", "nunique"]),
headers=df.columns,
tablefmt="psql",
disable_numparse=True,
)
)
print("-" * 80)
return df
def filter_user_item_order(df, min_u_c=5, min_i_c=5, min_o_c=5):
"""Filter data by the minimum purchase number of items and users.
Args:
df (DataFrame): interaction DataFrame to be processed.
min_u_c: filter the items that were purchased by less than min_u_c users.
(default: :obj:`5`)
min_i_c: filter the users that have purchased by less than min_i_c items.
(default: :obj:`5`)
min_o_c: filter the users that have purchased by less than min_o_c orders.
(default: :obj:`5`)
Returns:
The filtered DataFrame.
"""
print(
f"filter_user_item_order under condition min_u_c={min_u_c}, min_i_c={min_i_c}, min_o_c={min_o_c}"
)
print("-" * 80)
print("Dataset statistics before filter")
print(
tabulate(
df.agg(["count", "nunique"]),
headers=df.columns,
tablefmt="psql",
disable_numparse=True,
)
)
n_interact = len(df.index)
while True:
# Filter out users by that have less than min_o_c purchased orders
if min_o_c > 0:
df = filter_by_count(df, DEFAULT_USER_COL, DEFAULT_ORDER_COL, min_o_c)
# Filter out users that have less than min_i_c interactions (items)
if min_i_c > 0:
df = filter_by_count(df, DEFAULT_USER_COL, DEFAULT_ITEM_COL, min_i_c)
# Filter out items that have less than min_u_c users
if min_u_c > 0:
df = filter_by_count(df, DEFAULT_ITEM_COL, DEFAULT_USER_COL, min_u_c)
new_n_interact = len(df.index)
if n_interact != new_n_interact:
n_interact = new_n_interact
else:
break # no change
check_data_available(df)
print("Dataset statistics after filter")
print(
tabulate(
df.agg(["count", "nunique"]),
headers=df.columns,
tablefmt="psql",
disable_numparse=True,
)
)
print("-" * 80)
return df
def feed_neg_sample(data, negative_num, item_sampler):
"""Sample negative items for a interaction DataFrame.
Args:
data (DataFrame): interaction DataFrame to be processed.
negative_num (int): number of negative items.
if negative_num<0, will keep all the negative items for each user.
item_sampler (AliasTable): a AliasTable sampler that contains the items.
Returns:
DataFrame: interaction DataFrame with a new 'flag' column labeling with "train", "test" or "valid".
"""
unique_item_set = set(data[DEFAULT_ITEM_COL].unique())
unique_rating_num = data[DEFAULT_RATING_COL].nunique()
interact_status = (
data.groupby([DEFAULT_USER_COL])[DEFAULT_ITEM_COL].apply(set).reset_index()
)
total_interact = pd.DataFrame(
{DEFAULT_USER_COL: [], DEFAULT_ITEM_COL: [], DEFAULT_RATING_COL: []},
dtype=np.long,
)
for index, user_items in interact_status.iterrows():
u = user_items[DEFAULT_USER_COL]
pos_items = set(user_items[DEFAULT_ITEM_COL]) # item set for user u
pos_items_li = list(pos_items) # the positive items should be unique
n_pos_items = len(pos_items_li) # number of positive item for user u
if negative_num < 0: # keep all the negative items
neg_items_li = list(unique_item_set - pos_items)
n_neg_items = len(neg_items_li)
else: # only keep negative_num negative items
n_neg_items = negative_num
neg_items = set(item_sampler.sample(negative_num + n_pos_items, 1, True))
neg_items_li = list(neg_items - pos_items)[:negative_num]
# filter the positive items and truncate the first negative_num
df_items = np.append(pos_items_li, neg_items_li)
df_users = np.array([u] * (n_pos_items + n_neg_items), dtype=type(u))
pos_rating = []
if unique_rating_num != 1:
# get the rating scores.
for item in pos_items_li:
pos_rating.append(
data.loc[
(data[DEFAULT_USER_COL] == u)
& (data[DEFAULT_ITEM_COL] == item),
DEFAULT_RATING_COL,
].to_numpy()[0]
)
else:
pos_rating = np.full(n_pos_items, 1)
neg_rating = np.zeros(n_neg_items, dtype=np.long)
df_ratings = np.append(pos_rating, neg_rating)
df = pd.DataFrame(
{
DEFAULT_USER_COL: df_users,
DEFAULT_ITEM_COL: df_items,
DEFAULT_RATING_COL: df_ratings,
}
)
total_interact = total_interact.append(df)
# shuffle interactions to avoid all the negative samples being together
total_interact = sklearn.utils.shuffle(total_interact)
return total_interact
def load_split_data(path, n_test=10):
"""Load split DataFrame from a specified path.
Args:
path (string): split data path.
n_test: number of testing and validation datasets.
If n_test==0, will load the original (no negative items) valid and test datasets.
Returns:
(DataFrame, list(DataFrame), list(DataFrame)): DataFrame of training interaction,
DataFrame list of validation interaction,
DataFrame list of testing interaction,
"""
train_file = os.path.join(path, "train.npz")
train_data = get_dataframe_from_npz(train_file)
print("-" * 80)
print("Loaded training set statistics")
print(
tabulate(
train_data.agg(["count", "nunique"]),
headers=train_data.columns,
tablefmt="psql",
disable_numparse=True,
)
)
if not n_test:
valid_df = get_dataframe_from_npz(os.path.join(path, "valid.npz"))
test_df = get_dataframe_from_npz(os.path.join(path, "test.npz"))
print("Loaded validation set statistics")
print(
tabulate(
valid_df.agg(["count", "nunique"]),
headers=valid_df.columns,
tablefmt="psql",
disable_numparse=True,
)
)
print("Loaded testing set statistics")
print(
tabulate(
test_df.agg(["count", "nunique"]),
headers=test_df.columns,
tablefmt="psql",
disable_numparse=True,
)
)
print("-" * 80)
return train_data, valid_df, test_df
valid_data_li = []
test_data_li = []
for i in range(n_test):
valid_df = get_dataframe_from_npz(os.path.join(path, f"valid_{i}.npz"))
valid_data_li.append(valid_df)
if i == 0:
print(f"valid_data_{i} statistics")
print(
tabulate(
valid_df.agg(["count", "nunique"]),
headers=valid_df.columns,
tablefmt="psql",
disable_numparse=True,
)
)
test_df = get_dataframe_from_npz(os.path.join(path, f"test_{i}.npz"))
test_data_li.append(test_df)
if i == 0:
print(f"test_data_{i} statistics")
print(
tabulate(
test_df.agg(["count", "nunique"]),
headers=test_df.columns,
tablefmt="psql",
disable_numparse=True,
)
)
print("-" * 80)
return train_data, valid_data_li, test_data_li
def save_split_data(
data,
base_dir,
data_split="leave_one_basket",
parameterized_dir=None,
suffix="train.npz",
):
"""Save DataFrame to compressed npz.
Args:
data (DataFrame): interaction DataFrame to be saved.
parameterized_dir (string): data_split parameter string.
suffix (string): suffix of the data to be saved.
base_dir (string): directory to save.
data_split (string): sub folder name for saving the data.
"""
data_file = os.path.join(base_dir, data_split)
if not os.path.exists(data_file):
os.makedirs(data_file)
data_file = os.path.join(data_file, parameterized_dir)
if not os.path.exists(data_file):
os.makedirs(data_file)
data_file = os.path.join(data_file, suffix)
save_dataframe_as_npz(data, data_file)
print("Data is dumped in :", data_file)
def random_split(data, test_rate=0.1, by_user=False):
"""random_basket_split.
Args:
data (DataFrame): interaction DataFrame to be split.
test_rate (float): percentage of the test data.
Note that percentage of the validation data will be the same as testing.
by_user (bool): Default False.
- Ture: user-based split,
- False: global split,
Returns:
DataFrame: DataFrame that have already by labeled by a col with "train", "test" or "valid".
"""
print("random_split")
data[DEFAULT_FLAG_COL] = "train"
if by_user:
users = data[DEFAULT_USER_COL].unique()
for u in tqdm(users):
interactions = data[data[DEFAULT_USER_COL] == u].index.values # numpy array
interactions = sklearn.utils.shuffle(interactions)
total_size = len(interactions)
validate_size = math.ceil(total_size * test_rate)
test_size = math.ceil(total_size * test_rate)
train_size = total_size - test_size
data.loc[
interactions[train_size:],
DEFAULT_FLAG_COL,
] = "test" # the last test_rate of the total orders to be the test set
data.loc[
interactions[train_size - validate_size : train_size],
DEFAULT_FLAG_COL,
] = "validate"
else:
interactions = data.index.values # numpy array
interactions = sklearn.utils.shuffle(interactions)
total_size = len(interactions)
validate_size = math.ceil(total_size * test_rate)
test_size = math.ceil(total_size * test_rate)
train_size = total_size - test_size
data.loc[
interactions[train_size:],
DEFAULT_FLAG_COL,
] = "test" # the last test_rate of the total orders to be the test set
data.loc[
interactions[train_size - validate_size : train_size],
DEFAULT_FLAG_COL,
] = "validate"
return data
def random_basket_split(data, test_rate=0.1, by_user=False):
"""random_basket_split.
Args:
data (DataFrame): interaction DataFrame to be split.
test_rate (float): percentage of the test data.
Note that percentage of the validation data will be the same as testing.
by_user (bool): Default False.
- True: user-based split,
- False: global split,
Returns:
DataFrame: DataFrame that have already by labeled by a col with "train", "test" or "valid".
"""
print("random_basket_split")
data[DEFAULT_FLAG_COL] = "train"
if by_user:
users = data[DEFAULT_USER_COL].unique()
for u in tqdm(users):
orders = data[data[DEFAULT_USER_COL] == u][DEFAULT_ORDER_COL].unique()
orders = sklearn.utils.shuffle(orders)
total_size = len(orders)
validate_size = math.ceil(total_size * test_rate)
test_size = math.ceil(total_size * test_rate)
train_size = total_size - test_size
data.loc[
data[DEFAULT_ORDER_COL].isin(orders[train_size:]),
DEFAULT_FLAG_COL,
] = "test" # the last test_rate of the total orders to be the test set
data.loc[
data[DEFAULT_ORDER_COL].isin(
orders[train_size - validate_size : train_size]
),
DEFAULT_FLAG_COL,
] = "validate"
else:
orders = data[DEFAULT_ORDER_COL].unique()
orders = sklearn.utils.shuffle(orders)
total_size = len(orders)
validate_size = math.ceil(total_size * test_rate)
test_size = math.ceil(total_size * test_rate)
train_size = total_size - test_size
data.loc[
data[DEFAULT_ORDER_COL].isin(orders[train_size:]),
DEFAULT_FLAG_COL,
] = "test" # the last test_rate of the total orders to be the test set
data.loc[
data[DEFAULT_ORDER_COL].isin(
orders[train_size - validate_size : train_size]
),
DEFAULT_FLAG_COL,
] = "validate"
return data
def leave_one_out(data, random=False):
"""leave_one_out split.
Args:
data (DataFrame): interaction DataFrame to be split.
random (bool): Whether randomly leave one item/basket as testing. only for leave_one_out and leave_one_basket.
Returns:
DataFrame: DataFrame that have already by labeled by a col with "train", "test" or "valid".
"""
start_time = time.time()
print("leave_one_out")
data[DEFAULT_FLAG_COL] = "train"
if random:
data = sklearn.utils.shuffle(data)
else:
data.sort_values(by=[DEFAULT_TIMESTAMP_COL], ascending=False, inplace=True)
data.loc[
data.groupby([DEFAULT_USER_COL]).head(2).index, DEFAULT_FLAG_COL
] = "validate"
data.loc[data.groupby([DEFAULT_USER_COL]).head(1).index, DEFAULT_FLAG_COL] = "test"
end_time = time.time()
print(f"leave_one_out time cost: {end_time - start_time}")
return data
def leave_one_basket(data, random=False):
"""leave_one_basket split.
Args:
data (DataFrame): interaction DataFrame to be split.
random (bool): Whether randomly leave one item/basket as testing. only for leave_one_out and leave_one_basket.
Returns:
DataFrame: DataFrame that have already by labeled by a col with "train", "test" or "valid".
"""
print("leave_one_basket")
data[DEFAULT_FLAG_COL] = "train"
if random:
data = sklearn.utils.shuffle(data)
else:
data.sort_values(by=[DEFAULT_TIMESTAMP_COL], inplace=True)
users = data[DEFAULT_USER_COL].unique()
for u in tqdm(users):
user_orders = data[data[DEFAULT_USER_COL] == u][DEFAULT_ORDER_COL].unique()
data.loc[data[DEFAULT_ORDER_COL] == user_orders[-1], DEFAULT_FLAG_COL] = "test"
data.loc[
data[DEFAULT_ORDER_COL] == user_orders[-2], DEFAULT_FLAG_COL
] = "validate"
return data
def temporal_split(data, test_rate=0.1, by_user=False):
"""temporal_split.
Args:
data (DataFrame): interaction DataFrame to be split.
test_rate (float): percentage of the test data.
Note that percentage of the validation data will be the same as testing.
by_user (bool): bool. Default False.
- True: user-based split,
- False: global split,
Returns:
DataFrame: DataFrame that have already by labeled by a col with "train", "test" or "valid".
"""
print("temporal_split")
data[DEFAULT_FLAG_COL] = "train"
data.sort_values(by=[DEFAULT_TIMESTAMP_COL], inplace=True)
if by_user:
users = data[DEFAULT_USER_COL].unique()
for u in tqdm(users):
interactions = data[data[DEFAULT_USER_COL] == u].index.values
total_size = len(interactions)
validate_size = math.ceil(total_size * test_rate)
test_size = math.ceil(total_size * test_rate)
train_size = total_size - test_size
data.loc[
interactions[train_size:],
DEFAULT_FLAG_COL,
] = "test" # the last test_rate of the total orders to be the test set
data.loc[
interactions[train_size - validate_size : train_size],
DEFAULT_FLAG_COL,
] = "validate"
else:
interactions = data.index.values
total_size = len(interactions)
validate_size = math.ceil(total_size * test_rate)
test_size = math.ceil(total_size * test_rate)
train_size = total_size - test_size
data.loc[
interactions[train_size:],
DEFAULT_FLAG_COL,
] = "test" # the last test_rate of the total orders to be the test set
data.loc[
interactions[train_size - validate_size : train_size],
DEFAULT_FLAG_COL,
] = "validate"
return data
def temporal_basket_split(data, test_rate=0.1, by_user=False):
"""temporal_basket_split.
Args:
data (DataFrame): interaction DataFrame to be split.
It must have a col DEFAULT_ORDER_COL.
test_rate (float): percentage of the test data.
Note that percentage of the validation data will be the same as testing.
by_user (bool): Default False.
- True: user-based split,
- False: global split,
Returns:
DataFrame: DataFrame that have already by labeled by a col with "train", "test" or "valid".
"""
print("temporal_split_basket")
data[DEFAULT_FLAG_COL] = "train"
data.sort_values(by=[DEFAULT_TIMESTAMP_COL], inplace=True)
if by_user:
users = data[DEFAULT_USER_COL].unique()
for u in tqdm(users):
orders = data[data[DEFAULT_USER_COL] == u][DEFAULT_ORDER_COL].unique()
total_size = len(orders)
validate_size = math.ceil(total_size * test_rate)
test_size = math.ceil(total_size * test_rate)
train_size = total_size - test_size
data.loc[
data[DEFAULT_ORDER_COL].isin(orders[train_size:]),
DEFAULT_FLAG_COL,
] = "test" # the last test_rate of the total orders to be the test set
data.loc[
data[DEFAULT_ORDER_COL].isin(
orders[train_size - validate_size : train_size]
),
DEFAULT_FLAG_COL,
] = "validate"
else:
orders = data[DEFAULT_ORDER_COL].unique()
total_size = len(orders)
validate_size = math.ceil(total_size * test_rate)
test_size = math.ceil(total_size * test_rate)
train_size = total_size - test_size
data.loc[
data[DEFAULT_ORDER_COL].isin(orders[train_size:]),
DEFAULT_FLAG_COL,
] = "test" # the last test_rate of the total orders to be the test set
data.loc[
data[DEFAULT_ORDER_COL].isin(
orders[train_size - validate_size : train_size]
),
DEFAULT_FLAG_COL,
] = "validate"
return data
def split_data(
data,
split_type,
test_rate,
random=False,
n_negative=100,
save_dir=None,
by_user=False,
n_test=10,
):
"""Split data by split_type and other parameters.
Args:
data (DataFrame): interaction DataFrame to be split
split_type (string): options can be:
- random
- random_basket
- leave_one_out
- leave_one_basket
- temporal
- temporal_basket
random (bool): Whether random leave one item/basket as testing. only for leave_one_out and leave_one_basket.
test_rate (float): percentage of the test data.
Note that percentage of the validation data will be the same as testing.
n_negative (int): Number of negative samples for testing and validation data.
save_dir (string or Path): Default None. If specified, the split data will be saved to the dir.
by_user (bool): Default False.
- True: user-based split,
- False: global split,
n_test (int): Default 10. The number of testing and validation copies.
Returns:
DataFrame: The split data. Note that the returned data will not have negative samples.
"""
print(f"Splitting data by {split_type} ...")
if n_negative < 0 and n_test > 1:
# n_negative < 0, validate and testing sets of splits will contain all the negative items.
# There will be only one validate and one testing sets.
n_test = 1
if split_type == "random":
data = random_split(data, test_rate, by_user)
elif split_type == "random_basket":
data = random_basket_split(data, test_rate, by_user)
elif split_type == "leave_one_out":
data = leave_one_out(data, random)
elif split_type == "leave_one_basket":
data = leave_one_basket(data, random)
elif split_type == "temporal":
data = temporal_split(data, test_rate, by_user)
elif split_type == "temporal_basket":
data = temporal_basket_split(data, test_rate, by_user)
else:
print("[ERROR] wrong split_type.")
return None
tp_train = data[data[DEFAULT_FLAG_COL] == "train"]
tp_validate = data[data[DEFAULT_FLAG_COL] == "validate"]
tp_test = data[data[DEFAULT_FLAG_COL] == "test"]
if save_dir is None:
return data
parameterized_path = generate_parameterized_path(
test_rate=test_rate, random=random, n_negative=n_negative, by_user=by_user
)
save_split_data(tp_train, save_dir, split_type, parameterized_path, "train.npz")
# keep the original validation and test sets.
save_split_data(tp_validate, save_dir, split_type, parameterized_path, "valid.npz")
save_split_data(tp_test, save_dir, split_type, parameterized_path, "test.npz")
item_sampler = AliasTable(data[DEFAULT_ITEM_COL].value_counts().to_dict())
n_items = tp_train[DEFAULT_ITEM_COL].nunique()
valid_neg_max = (
tp_validate.groupby([DEFAULT_USER_COL])[DEFAULT_ITEM_COL].count().max()
)
test_neg_max = tp_test.groupby([DEFAULT_USER_COL])[DEFAULT_ITEM_COL].count().max()
if n_items - valid_neg_max < n_negative or n_items - test_neg_max < n_negative:
raise RuntimeError(
"This dataset do not have sufficient negative items for sampling! \n"
+ f"valid_neg_max: {n_items - valid_neg_max}, "
+ f"test_neg_max: {n_items - test_neg_max},"
+ f"n_negative: {n_negative}\nPlease directly use valid.npz and test.npz."
)
for i in range(n_test):
tp_validate_new = feed_neg_sample(tp_validate, n_negative, item_sampler)
tp_test_new = feed_neg_sample(tp_test, n_negative, item_sampler)
save_split_data(
tp_validate_new,
save_dir,
split_type,
parameterized_path,
"valid_" + str(i) + ".npz",
)
save_split_data(
tp_test_new,
save_dir,
split_type,
parameterized_path,
"test_" + str(i) + ".npz",
)
return data
def generate_random_data(n_interaction, user_id, item_id):
"""Generate random data for testing.
Generate random data for unit test.
"""
oder_id = 10
users = np.random.randint(user_id, size=n_interaction)
orders = np.random.randint(oder_id, size=n_interaction) * 100 + users
timestamps = orders
items = np.random.randint(item_id, size=n_interaction)
ratings = np.array([1] * n_interaction)
data = {
DEFAULT_USER_COL: users,
DEFAULT_ORDER_COL: orders,
DEFAULT_TIMESTAMP_COL: timestamps,
DEFAULT_ITEM_COL: items,
DEFAULT_RATING_COL: ratings,
}
data = pd.DataFrame(data)
return data
def generate_parameterized_path(
test_rate=0, random=False, n_negative=100, by_user=False
):
"""Generate parameterized path.
Encode parameters into path to differentiate different split parameters.
Args:
by_user (bool): split by user.
test_rate (float): percentage of the test data.
Note that percentage of the validation data will be the same as testing.
random (bool): Whether random leave one item/basket as testing. only for leave_one_out and leave_one_basket.
n_negative (int): Number of negative samples for testing and validation data.
Returns:
string: A string that encodes parameters.
"""
path_str = ""
if by_user:
path_str = "user_based" + path_str
else:
path_str = "full" + path_str
test_rate *= 100
test_rate = round(test_rate)
path_str += "_test_rate_" + str(test_rate) if test_rate != 0 else ""
path_str += "_random" if random is True else ""
path_str += "_n_neg_" + str(n_negative)
return path_str
|
#pragma once
#include "Module.h"
// Just a basic example of how a render system that utilizes the modules and cecsar could look like.
class BasicRenderSystem final : public jecs::Module<BasicRenderSystem>
{
public:
// Anti Aliasing MSAA.
int32_t aaSamples = 4;
// Render the models using multiple cameras and their respective post processing stacks.
void Update();
};
|
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
from matplotlib import cm
fig = plt.figure()
ax = fig.gca(projection='3d')
X, Y, Z = axes3d.get_test_data(0.05)
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3)
cset = ax.contour(X, Y, Z, zdir='z', offset=-100, cmap=cm.coolwarm)
#cset = ax.contour(X, Y, Z, zdir='x', offset=-40, cmap=cm.coolwarm)
#cset = ax.contour(X, Y, Z, zdir='y', offset=40, cmap=cm.coolwarm)
ax.set_xlabel('X')
ax.set_xlim(-40, 40)
ax.set_ylabel('Y')
ax.set_ylim(-40, 40)
ax.set_zlabel('Z')
ax.set_zlim(-100, 100)
plt.show()
|
from __future__ import absolute_import
from datetime import timedelta
from requests import RequestException
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.utils import timezone
from allauth.exceptions import ImmediateHttpResponse
from allauth.socialaccount import providers
from allauth.socialaccount.helpers import (
complete_social_login,
render_authentication_error,
)
from allauth.socialaccount.models import SocialLogin, SocialToken
from allauth.socialaccount.providers.base import ProviderException
from allauth.socialaccount.providers.oauth2.client import OAuth2Client, OAuth2Error
from allauth.utils import build_absolute_uri, get_request_param
from ..base import AuthAction, AuthError
class OAuth2Adapter(object):
client_cls = OAuth2Client
expires_in_key = "expires_in"
supports_state = True
redirect_uri_protocol = None
access_token_method = "POST"
login_cancelled_error = "access_denied"
scope_delimiter = " "
basic_auth = False
headers = None
def __init__(self, request):
self.request = request
def get_provider(self):
return providers.registry.by_id(self.provider_id, self.request)
def complete_login(self, request, app, access_token, **kwargs):
"""
Returns a SocialLogin instance
"""
raise NotImplementedError
def get_callback_url(self, request, app):
callback_url = reverse(self.provider_id + "_callback")
protocol = self.redirect_uri_protocol
return build_absolute_uri(request, callback_url, protocol)
def parse_token(self, data):
token = SocialToken(token=data["access_token"])
token.token_secret = data.get("refresh_token", "")
expires_in = data.get(self.expires_in_key, None)
if expires_in:
token.expires_at = timezone.now() + timedelta(seconds=int(expires_in))
return token
def get_access_token_data(self, request, app, client):
code = get_request_param(self.request, "code")
return client.get_access_token(code)
class OAuth2View(object):
@classmethod
def adapter_view(cls, adapter):
def view(request, *args, **kwargs):
self = cls()
self.request = request
self.adapter = adapter(request)
try:
return self.dispatch(request, *args, **kwargs)
except ImmediateHttpResponse as e:
return e.response
return view
def get_client(self, request, app):
callback_url = self.adapter.get_callback_url(request, app)
provider = self.adapter.get_provider()
scope = provider.get_scope(request)
client = self.adapter.client_cls(
self.request,
app.client_id,
app.secret,
self.adapter.access_token_method,
self.adapter.access_token_url,
callback_url,
scope,
key=app.key,
cert=app.cert,
scope_delimiter=self.adapter.scope_delimiter,
headers=self.adapter.headers,
basic_auth=self.adapter.basic_auth,
)
return client
class OAuth2LoginView(OAuth2View):
def dispatch(self, request, *args, **kwargs):
provider = self.adapter.get_provider()
app = provider.get_app(self.request)
client = self.get_client(request, app)
action = request.GET.get('action', AuthAction.AUTHENTICATE)
auth_url = self.adapter.authorize_url
auth_params = provider.get_auth_params(request, action)
client.state = SocialLogin.stash_state(request)
try:
return HttpResponseRedirect(client.get_redirect_url(
auth_url, auth_params))
except OAuth2Error as e:
return render_authentication_error(
request,
provider.id,
exception=e)
class OAuth2CallbackView(OAuth2View):
def dispatch(self, request, *args, **kwargs):
auth_error = get_request_param(request, "error")
code = get_request_param(request, "code")
if auth_error or not code:
# Distinguish cancel from error
if auth_error == self.adapter.login_cancelled_error:
error = AuthError.CANCELLED
else:
error = AuthError.UNKNOWN
return render_authentication_error(
request,
self.adapter.provider_id,
error=error)
app = self.adapter.get_provider().get_app(self.request)
client = self.get_client(self.request, app)
try:
token_data = self.adapter.get_access_token_data(
self.request, app=app, client=client
)
token = self.adapter.parse_token(data=token_data)
token.app = app
login = self.adapter.complete_login(
request, app, token, response=token_data
)
login.token = token
state = get_request_param(request, "state")
if self.adapter.supports_state:
login.state = SocialLogin.verify_and_unstash_state(request, state)
else:
login.state = SocialLogin.unstash_state(request)
return complete_social_login(request, login)
except (
PermissionDenied,
OAuth2Error,
RequestException,
ProviderException,
) as e:
return render_authentication_error(
request, self.adapter.provider_id, exception=e
)
|
const mongoose = require('mongoose')
const Schema = mongoose.Schema;
const ProfileSchema = new Schema({
handle: {
type: String,
required: true,
max: 40
},
country: {
type: String
},
location: {
type: String
},
birthdate: {
type: String
},
social: {
twitter: {
type: String
},
facebook: {
type: String
}
},
genre: {
type: String
},
heigth: {
type: Schema.Types.Decimal128
},
currentweigth: {
type: Schema.Types.Decimal128
},
desiredweigth: {
type: Schema.Types.Decimal128
},
activity: {
type: String
},
workoutinweek: {
type: Number
},
workouttime: {
type: Number
},
goal: {
type: String
},
qty: {
type: String
},
user: {
type: Schema.Types.ObjectId,
ref: 'users'
}
})
module.exports = Profile = mongoose.model("profile", ProfileSchema);
|
/*
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
var solrAdminServices = angular.module('solrAdminServices', ['ngResource']);
solrAdminServices.factory('System',
['$resource', function($resource) {
return $resource('/solr/admin/info/system', {"wt":"json", "_":Date.now()});
}])
.factory('Collections',
['$resource', function($resource) {
return $resource('/solr/admin/collections',
{'wt':'json', '_':Date.now()}, {
"list": {params:{action: "LIST"}},
"status": {params:{action: "CLUSTERSTATUS"}},
"add": {params:{action: "CREATE"}},
"delete": {params:{action: "DELETE"}},
"rename": {params:{action: "RENAME"}},
"createAlias": {params:{action: "CREATEALIAS"}},
"deleteAlias": {params:{action: "DELETEALIAS"}},
"deleteReplica": {params:{action: "DELETEREPLICA"}},
"addReplica": {params:{action: "ADDREPLICA"}},
"reload": {method: "GET", params:{action:"RELOAD", core: "@core"}},
"optimize": {params:{}}
});
}])
.factory('Cores',
['$resource', function($resource) {
return $resource('/solr/admin/cores',
{'wt':'json', '_':Date.now()}, {
"query": {},
"list": {params:{indexInfo: false}},
"add": {params:{action: "CREATE"}},
"unload": {params:{action: "UNLOAD", core: "@core"}},
"rename": {params:{action: "RENAME"}},
"swap": {params:{}},
"reload": {method: "GET", params:{action:"RELOAD", core: "@core"}, headers:{doNotIntercept: "true"}},
"optimize": {params:{}}
});
}])
.factory('Logging',
['$resource', function($resource) {
return $resource('/solr/admin/info/logging', {'wt':'json', '_':Date.now()}, {
"events": {params: {since:'0'}},
"levels": {},
"setLevel": {}
});
}])
.factory('Zookeeper',
['$resource', function($resource) {
return $resource('/solr/admin/zookeeper', {wt:'json', _:Date.now()}, {
"simple": {},
"dump": {params: {dump: "true"}},
"liveNodes": {params: {path: '/live_nodes'}},
"clusterState": {params: {detail: "true", path: "/clusterstate.json"}},
"detail": {params: {detail: "true", path: "@path"}},
"configs": {params: {detail:false, path: "/configs/"}},
"aliases": {params: {detail: "true", path: "/aliases.json"}, transformResponse:function(data) {
var znode = $.parseJSON(data).znode;
if (znode.data) {
return {aliases: $.parseJSON(znode.data).collection};
} else {
return {aliases: {}};
}
}}
});
}])
.factory('Properties',
['$resource', function($resource) {
return $resource('/solr/admin/info/properties', {'wt':'json', '_':Date.now()});
}])
.factory('Threads',
['$resource', function($resource) {
return $resource('/solr/admin/info/threads', {'wt':'json', '_':Date.now()});
}])
.factory('Properties',
['$resource', function($resource) {
return $resource('/solr/admin/info/properties', {'wt':'json', '_':Date.now()});
}])
.factory('Replication',
['$resource', function($resource) {
return $resource('/solr/:core/replication', {'wt':'json', core: "@core", '_':Date.now()}, {
"details": {params: {command: "details"}},
"command": {params: {}}
});
}])
.factory('CoreSystem',
['$resource', function($resource) {
return $resource('/solr/:core/admin/system', {wt:'json', core: "@core", _:Date.now()});
}])
.factory('Update',
['$resource', function($resource) {
return $resource('/solr/:core/:handler', {core: '@core', wt:'json', _:Date.now(), handler:'update'}, {
"optimize": {params: { optimize: "true"}},
"commit": {params: {commit: "true"}},
"post": {headers: {'Content-type': 'application/json'}, method: "POST", params: {handler: '@handler'}},
"postJson": {headers: {'Content-type': 'application/json'}, method: "POST", params: {handler: '@handler'}},
"postXml": {headers: {'Content-type': 'text/xml'}, method: "POST", params: {handler: '@handler'}},
"postCsv": {headers: {'Content-type': 'application/csv'}, method: "POST", params: {handler: '@handler'}}
});
}])
.service('FileUpload', function ($http) {
this.upload = function(params, file, success, error){
var url = "/solr/" + params.core + "/" + params.handler + "?";
raw = params.raw;
delete params.core;
delete params.handler;
delete params.raw;
url += $.param(params);
if (raw && raw.length>0) {
if (raw[0] != "&") raw = "&" + raw;
url += raw;
}
var fd = new FormData();
fd.append('file', file);
$http.post(url, fd, {
transformRequest: angular.identity,
headers: {'Content-Type': undefined}
}).success(success).error(error);
}
})
.factory('Luke',
['$resource', function($resource) {
return $resource('/solr/:core/admin/luke', {core: '@core', wt:'json', _:Date.now()}, {
"index": {params: {numTerms: 0, show: 'index'}},
"raw": {params: {numTerms: 0}},
"schema": {params: {show:'schema'}},
"field": {},
"fields": {params: {show:'schema'}, interceptor: {
response: function(response) {
var fieldsAndTypes = [];
for (var field in response.data.schema.fields) {
fieldsAndTypes.push({group: "Fields", label: field, value: "fieldname=" + field});
}
for (var type in response.data.schema.types) {
fieldsAndTypes.push({group: "Types", label: type, value: "fieldtype=" + type});
}
return fieldsAndTypes;
}
}}
});
}])
.factory('Analysis',
['$resource', function($resource) {
return $resource('/solr/:core/analysis/field', {core: '@core', wt:'json', _:Date.now()}, {
"field": {params: {"analysis.showmatch": true}}
});
}])
.factory('DataImport',
['$resource', function($resource) {
return $resource('/solr/:core/dataimport', {core: '@core', indent:'on', wt:'json', _:Date.now()}, {
"config": {params: {command: "show-config"}, headers: {doNotIntercept: "true"},
transformResponse: function(data) {
return {config: data};
}
},
"status": {params: {command: "status"}, headers: {doNotIntercept: "true"}},
"reload": {params: {command: "reload-config"}},
"post": {method: "POST",
headers: {'Content-type': 'application/x-www-form-urlencoded'},
transformRequest: function(data) { return $.param(data) }}
});
}])
.factory('Ping',
['$resource', function($resource) {
return $resource('/solr/:core/admin/ping', {wt:'json', core: '@core', ts:Date.now(), _:Date.now()}, {
"ping": {},
"status": {params:{action:"status"}, headers: {doNotIntercept: "true"}
}});
}])
.factory('Mbeans',
['$resource', function($resource) {
return $resource('/solr/:core/admin/mbeans', {'wt':'json', core: '@core', '_':Date.now()}, {
stats: {params: {stats: true}},
info: {},
reference: {
params: {wt: "xml", stats: true}, transformResponse: function (data) {
return {reference: data}
}
},
delta: {method: "POST",
params: {stats: true, diff:true},
headers: {'Content-type': 'application/x-www-form-urlencoded'},
transformRequest: function(data) {
return "stream.body=" + encodeURIComponent(data);
}
}
});
}])
.factory('Files',
['$resource', function($resource) {
return $resource('/solr/:core/admin/file', {'wt':'json', core: '@core', '_':Date.now()}, {
"list": {},
"get": {method: "GET", interceptor: {
response: function(config) {return config;}
}}
});
}])
.factory('Query',
['$resource', function($resource) {
var resource = $resource('/solr/:core:handler', {core: '@core', handler: '@handler', '_':Date.now()}, {
"query": {
method: "GET",
transformResponse: function (data) {
return {data: data}
},
headers: {doNotIntercept: "true"}
}
});
resource.url = function(params) {
var qs = [];
for (key in params) {
if (key != "core" && key != "handler") {
for (var i in params[key]) {
qs.push(key + "=" + params[key][i]);
}
}
}
return "/solr/" + params.core + params.handler + "?" + qs.sort().join("&");
}
return resource;
}])
.factory('Segments',
['$resource', function($resource) {
return $resource('/solr/:core/admin/segments', {'wt':'json', core: '@core', _:Date.now()}, {
get: {}
});
}])
.factory('Schema',
['$resource', function($resource) {
return $resource('/solr/:core/schema', {wt: 'json', core: '@core', _:Date.now()}, {
get: {method: "GET"},
check: {method: "GET", headers: {doNotIntercept: "true"}},
post: {method: "POST"}
});
}])
.factory('Config',
['$resource', function($resource) {
return $resource('/solr/:core/config', {wt: 'json', core: '@core', _:Date.now()}, {
get: {method: "GET"}
})
}]);
|
// Copyright 2012 Mark Cavage, Inc. All rights reserved.
'use strict';
var crypto = require('crypto');
var zlib = require('zlib');
var assert = require('assert-plus');
var once = require('once');
var errors = require('restify-errors');
///--- Globals
var BadDigestError = errors.BadDigestError;
var RequestEntityTooLargeError = errors.RequestEntityTooLargeError;
var PayloadTooLargeError = errors.PayloadTooLargeError;
var UnsupportedMediaTypeError = errors.UnsupportedMediaTypeError;
var MD5_MSG = "Content-MD5 '%s' didn't match '%s'";
///--- Helpers
function createBodyWriter(req) {
var buffers = [];
var contentType = req.contentType();
var isText = false;
if (
!contentType ||
contentType === 'application/json' ||
contentType === 'application/x-www-form-urlencoded' ||
contentType === 'multipart/form-data' ||
contentType.substr(0, 5) === 'text/'
) {
isText = true;
}
req.body = new Buffer(0);
return {
write: function write(chunk) {
buffers.push(chunk);
},
end: function end() {
req.body = Buffer.concat(buffers);
if (isText) {
req.body = req.body.toString('utf8');
}
}
};
}
///--- API
/**
* Reads the body of the request.
*
* @public
* @function bodyReader
* @throws {BadDigestError | PayloadTooLargeError}
* @param {Object} options - an options object
* @returns {Function} Handler
*/
function bodyReader(options) {
var opts = options || {};
assert.object(opts, 'opts');
var maxBodySize = opts.maxBodySize || 0;
function readBody(req, res, originalNext) {
var next = once(originalNext);
// #100 don't read the body again if we've read it once
if (req._readBody) {
next();
return;
} else {
req._readBody = true;
}
if (
(req.getContentLength() === 0 && !req.isChunked()) ||
req.contentType() === 'multipart/form-data' ||
req.contentType() === 'application/octet-stream'
) {
next();
return;
}
var bodyWriter = createBodyWriter(req);
var bytesReceived = 0;
var digest;
var gz;
var hash;
var md5;
var unsupportedContentEncoding;
if ((md5 = req.headers['content-md5'])) {
hash = crypto.createHash('md5');
}
function done() {
bodyWriter.end();
if (unsupportedContentEncoding) {
next(
new UnsupportedMediaTypeError(
{
info: {
contentEncoding: unsupportedContentEncoding
}
},
'content encoding not supported'
)
);
return;
}
if (maxBodySize && bytesReceived > maxBodySize) {
var msg = 'Request body size exceeds ' + maxBodySize;
var err;
// Between Node 0.12 and 4 http status code messages changed
// RequestEntityTooLarge was changed to PayloadTooLarge
// this check is to maintain backwards compatibility
if (PayloadTooLargeError !== undefined) {
err = new PayloadTooLargeError(msg);
} else {
err = new RequestEntityTooLargeError(msg);
}
next(err);
return;
}
if (!req.body.length) {
next();
return;
}
if (hash && md5 !== (digest = hash.digest('base64'))) {
next(new BadDigestError(MD5_MSG, md5, digest));
return;
}
next();
}
if (req.headers['content-encoding'] === undefined) {
// This handles the original else branch
req.once('end', done);
} else if (req.headers['content-encoding'] === 'gzip') {
gz = zlib.createGunzip();
gz.on('data', bodyWriter.write);
gz.once('end', done);
req.once('end', gz.end.bind(gz));
} else {
unsupportedContentEncoding = req.headers['content-encoding'];
res.setHeader('Accept-Encoding', 'gzip');
req.once('end', done);
}
req.on('data', function onRequestData(chunk) {
if (maxBodySize) {
bytesReceived += chunk.length;
if (bytesReceived > maxBodySize) {
return;
}
}
if (hash) {
hash.update(chunk, 'binary');
}
if (gz) {
gz.write(chunk);
} else {
bodyWriter.write(chunk);
}
});
req.once('error', next);
// add 'close and 'aborted' event handlers so that requests (and their
// corresponding memory) don't leak if client stops sending data half
// way through a POST request
req.socket.once('close', next);
req.once('aborted', next);
req.resume();
}
return readBody;
}
module.exports = bodyReader;
|
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import csv
import inspect
import os
from telemetry.page import page as page_module
from telemetry.page import page_set_archive_info
from telemetry.user_story import user_story_set
from telemetry.util import cloud_storage
PUBLIC_BUCKET = cloud_storage.PUBLIC_BUCKET
PARTNER_BUCKET = cloud_storage.PARTNER_BUCKET
INTERNAL_BUCKET = cloud_storage.INTERNAL_BUCKET
class PageSetError(Exception):
pass
class PageSet(user_story_set.UserStorySet):
def __init__(self, file_path=None, archive_data_file='', user_agent_type=None,
make_javascript_deterministic=True, startup_url='',
serving_dirs=None, bucket=None):
super(PageSet, self).__init__()
# The default value of file_path is location of the file that define this
# page set instance's class.
if file_path is None:
file_path = inspect.getfile(self.__class__)
# Turn pyc file into py files if we can
if file_path.endswith('.pyc') and os.path.exists(file_path[:-1]):
file_path = file_path[:-1]
self.file_path = file_path
# These attributes can be set dynamically by the page set.
self.archive_data_file = archive_data_file
self.user_agent_type = user_agent_type
self.make_javascript_deterministic = make_javascript_deterministic
self._wpr_archive_info = None
self.startup_url = startup_url
self.user_stories = []
# Convert any relative serving_dirs to absolute paths.
self._serving_dirs = set(os.path.realpath(os.path.join(self.base_dir, d))
for d in serving_dirs or [])
if self._IsValidPrivacyBucket(bucket):
self._bucket = bucket
else:
raise ValueError("Pageset privacy bucket %s is invalid" % bucket)
@property
def pages(self):
return self.user_stories
def AddUserStory(self, user_story):
assert isinstance(user_story, page_module.Page)
assert user_story.page_set is self
super(PageSet, self).AddUserStory(user_story)
def AddPage(self, page):
self.AddUserStory(page)
def AddPageWithDefaultRunNavigate(self, page_url):
""" Add a simple page with url equals to page_url that contains only default
RunNavigateSteps.
"""
self.AddUserStory(page_module.Page(
page_url, self, self.base_dir))
@staticmethod
def _IsValidPrivacyBucket(bucket_name):
return bucket_name in (None, PUBLIC_BUCKET, PARTNER_BUCKET, INTERNAL_BUCKET)
@property
def base_dir(self):
if os.path.isfile(self.file_path):
return os.path.dirname(self.file_path)
else:
return self.file_path
@property
def serving_dirs(self):
return self._serving_dirs
@property
def wpr_archive_info(self): # pylint: disable=E0202
"""Lazily constructs wpr_archive_info if it's not set and returns it."""
if self.archive_data_file and not self._wpr_archive_info:
self._wpr_archive_info = (
page_set_archive_info.PageSetArchiveInfo.FromFile(
os.path.join(self.base_dir, self.archive_data_file)))
return self._wpr_archive_info
@property
def bucket(self):
return self._bucket
@wpr_archive_info.setter
def wpr_archive_info(self, value): # pylint: disable=E0202
self._wpr_archive_info = value
def ContainsOnlyFileURLs(self):
for page in self.user_stories:
if not page.is_file:
return False
return True
def ReorderPageSet(self, results_file):
"""Reorders this page set based on the results of a past run."""
page_set_dict = {}
for page in self.user_stories:
page_set_dict[page.url] = page
user_stories = []
with open(results_file, 'rb') as csv_file:
csv_reader = csv.reader(csv_file)
csv_header = csv_reader.next()
if 'url' not in csv_header:
raise Exception('Unusable results_file.')
url_index = csv_header.index('url')
for csv_row in csv_reader:
if csv_row[url_index] in page_set_dict:
self.AddPage(page_set_dict[csv_row[url_index]])
else:
raise Exception('Unusable results_file.')
return user_stories
def WprFilePathForPage(self, page):
if not self.wpr_archive_info:
return None
return self.wpr_archive_info.WprFilePathForPage(page)
|
import React, { Component } from 'react';
import Link from 'gatsby-link';
import { Image } from 'semantic-ui-react';
import banner from './banner.png';
class Header extends Component {
constructor() {
super();
}
render() {
return (
<div style={{
margin: '0 auto',
maxWidth: 960,
padding: '0px 1.0875rem 1.45rem',
paddingTop: 0,
}}>
<Link to='/'>
<Image src={banner} fluid />
</Link>
</div>
)
}
}
export default Header
|
from shutil import rmtree
from pathlib import Path
from sys import getsizeof
from os.path import exists
from os import mkdir, getcwd
from tqdm import tqdm
from neo4j import GraphDatabase
from neo4j.exceptions import ServiceUnavailable
from ._backends import to_json, get_unique_prop_key
class Extractor:
def __init__(self, project_dir, driver: GraphDatabase.driver, database: str = "neo4j", input_yes: bool = False,
compress: bool = True):
"""
The purpose of this class is to extract all the information from a neo4j graph
:param project_dir: The directory where to backup Neo4j Graph
:param driver: Neo4j driver
:param input_yes: bool, determines weather to just type in "y" for all input options. Be careful when running
this option
:param compress: bool, weather or not to compress files as they are being extracted
"""
self.project_dir: Path = Path(getcwd()) / project_dir
self.data_dir: Path = self.project_dir / 'data'
self.driver: GraphDatabase.driver = driver
self.database: str = database
self.input_yes: bool = input_yes
self.compress: bool = compress
self.property_keys: set = set()
self.labels: set = set()
self.rel_types: set = set()
self.constraints: list = []
self.constraints_names: list = []
self.db_id: str = ""
self.json_file_size: int = int("0xFFFF", 16) # Default size of json objects in memory
def extract_data(self):
self._test_connection()
self._verify_db_not_empty()
self._pull_db_id() # Get ID of database
if exists(self.project_dir):
if self.input_yes:
rmtree(self.project_dir)
else:
user_input = input(f"The directory {self.project_dir} already exist, would you like to replace the "
f"directory? (y/N)\n")
if user_input.lower() == "y":
rmtree(self.project_dir)
else:
raise UserWarning("Aborted, project_dir directory already exists")
mkdir(self.project_dir)
mkdir(self.data_dir)
self._pull_constraints() # get constraints of database
self._pull_lonely_nodes() # get nodes in database
self._pull_relationships() # get relationship in database
# calculate a unique prop key to act a dummy id prop for importing
unique_prop_key = self._calc_unique_prop_key()
# Store meta data
to_json(file_path=self.project_dir / f"db_id.json", data=self.db_id)
to_json(file_path=self.project_dir / f"unique_prop_key.json", data=unique_prop_key)
to_json(file_path=self.project_dir / f"constraints.json", data=self.constraints)
to_json(file_path=self.project_dir / f"constraints_names.json", data=self.constraints_names)
to_json(file_path=self.project_dir / "property_keys.json", data=list(self.property_keys))
to_json(file_path=self.project_dir / "labels.json", data=list(self.labels))
to_json(file_path=self.project_dir / "types.json", data=list(self.rel_types))
to_json(file_path=self.project_dir / "compressed.json", data=self.compress)
def _test_connection(self):
try:
with self.driver.session(database=self.database) as session:
session.run("MATCH (a) RETURN a LIMIT 1")
except ServiceUnavailable:
raise ServiceUnavailable("Unable to connect to database. If this is a local database, make sure the "
"database is running. If this is a remote database, make sure the correct "
"database is referenced.")
def _verify_db_not_empty(self):
with self.driver.session(database=self.database) as session:
results = session.run("MATCH (a) RETURN a LIMIT 1").data()
if not results:
raise LookupError("There is not data to pull from the database, make sure the correct database is "
"referenced/running.")
def _pull_db_id(self):
with self.driver.session(database=self.database) as session:
results = session.run("CALL db.info")
for result in results:
self.db_id = dict(result)['id']
def _pull_constraints(self):
with self.driver.session(database=self.database) as session:
results = session.run("CALL db.constraints")
for result in results:
self.constraints.append(dict(result)['description'])
self.constraints_names.append(dict(result)['name'])
@staticmethod
def __parse_node__(node):
node_id = node.id
node_labels = ":".join(list(node.labels))
node_props = dict(node)
return node_id, node_labels, node_props
def _pull_lonely_nodes(self):
query = """
MATCH (node)
WHERE NOT (node)-[]-()
RETURN node
"""
extracted_data = []
with self.driver.session(database=self.database) as session:
number_of_nodes = session.run("MATCH (node) WHERE NOT (node)-[]-() RETURN COUNT(node)").value()[0]
results = session.run(query)
for index, record in enumerate(tqdm(results, total=number_of_nodes, desc="Extracting Nodes")):
# Base node object
node = record['node']
node_id, node_labels, node_props = self.__parse_node__(node)
self.property_keys.update(node_props.keys())
self.labels.add(node_labels)
row = {'node_id': node_id, 'node_labels': node_labels, 'node_props': node_props}
extracted_data.append(row)
if index % 1000 == 0 and index != 0:
size_in_ram = getsizeof(extracted_data)
if size_in_ram > self.json_file_size:
to_json(self.data_dir / f"lonely_nodes_{index}.json", extracted_data, compress=self.compress)
extracted_data = []
# dump and compress remaining data
if extracted_data:
to_json(self.data_dir / f"lonely_nodes_{index}.json", extracted_data, compress=self.compress)
def _pull_relationships(self):
query = """
MATCH (start_node)-[rel]->(end_node)
RETURN start_node, end_node, rel
"""
extracted_data = []
with self.driver.session(database=self.database) as session:
number_of_relationships = session.run("MATCH p=(start_node)-[rel]->(end_node) RETURN COUNT(p)").value()[0]
results = session.run(query)
for index, record in enumerate(tqdm(results, total=number_of_relationships,
desc="Extracting Relationships")):
# Gather starting_node
start_node = record['start_node']
start_node_id, start_node_labels, start_node_props = self.__parse_node__(start_node)
self.property_keys.update(start_node_props.keys())
self.labels.add(start_node_labels)
# Gather ending_node
end_node = record['end_node']
end_node_id, end_node_labels, end_node_props = self.__parse_node__(end_node)
self.property_keys.update(end_node_props.keys())
self.labels.add(end_node_labels)
# Gather relationship
rel = record['rel']
rel_type, rel_props = rel.type, dict(rel)
self.property_keys.update(rel_props.keys())
self.rel_types.add(rel_type)
row = {'start_node_id': start_node_id, 'start_node_labels': start_node_labels,
'start_node_props': start_node_props,
'end_node_id': end_node_id, 'end_node_labels': end_node_labels,
'end_node_props': end_node_props,
'rel_type': rel_type, 'rel_props': rel_props}
extracted_data.append(row)
if index % 1000 == 0 and index != 0:
size_in_ram = getsizeof(extracted_data)
if size_in_ram > self.json_file_size:
to_json(self.data_dir / f"relationships_{index}.json", extracted_data, compress=self.compress)
extracted_data = []
# dump and compress remaining data
if extracted_data:
to_json(self.data_dir / f"relationships_{index}.json", extracted_data, compress=self.compress)
def _calc_unique_prop_key(self):
keys_to_avoid = self.property_keys.copy()
keys_to_avoid.update(self.constraints_names)
# Neo4j's built in IDs can change as new entities are added. So, a unique property is generated where the
# pulled ids are placed temporarily. A unique property is calculated because we do not want to 'create' a
# dummy property that the user actually uses.
unique_prop_key = get_unique_prop_key(keys_to_avoid)
return unique_prop_key
|
import os
import time
import torch
from tqdm import tqdm
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from misc import util, ops
from network.model import Glow
class Trainer:
criterion_dict = {
'single_class': lambda y_logits, y: Glow.single_class_loss(y_logits, y),
'multi_class': lambda y_logits, y_onehot: Glow.single_class_loss(y_logits, y_onehot)
}
def __init__(self, hps, result_subdir,
step, graph, optimizer, scheduler, devices,
dataset, data_device):
"""
Network trainer
:param hps: hyper-parameters for this network
:type hps: dict
:param result_subdir: path to result sub-directory
:type result_subdir: str
:param step: global step of model
:type step: int
:param graph: model graph
:type graph: torch.nn.Module
:param optimizer: optimizer
:type optimizer: torch.optim.Optimizer
:param scheduler: learning rate scheduler
:type scheduler: function
:param devices: list of usable devices for model running
:type devices: list
:param dataset: dataset for training model
:type dataset: torch.utils.data.Dataset
:param data_device:
:type data_device:
"""
# general
self.hps = hps
self.result_subdir = result_subdir
self.start_time = time.time()
# state
self.step = step
self.graph = graph
self.optimizer = optimizer
self.scheduler = scheduler
self.devices = devices
# data
self.data_device = data_device
self.batch_size = self.hps.optim.num_batch_train
self.num_classes = self.hps.dataset.num_classes
self.data_loader = DataLoader(dataset, batch_size=self.batch_size,
num_workers=self.hps.dataset.num_workers,
shuffle=True,
drop_last=True)
self.num_epochs = self.hps.optim.num_epochs
# ablation
self.y_condition = self.hps.ablation.y_condition
if self.y_condition:
self.y_criterion = self.hps.ablation.y_criterion
assert self.y_criterion in self.criterion_dict.keys(), "Unsupported criterion: {}".format(self.y_criterion)
self.max_grad_clip = self.hps.ablation.max_grad_clip
self.max_grad_norm = self.hps.ablation.max_grad_norm
# logging
self.writer = SummaryWriter(log_dir=self.result_subdir)
self.interval_scalar = self.hps.optim.interval_scalar
self.interval_snapshot = self.hps.optim.interval_snapshot
self.interval_valid = self.hps.optim.interval_valid
self.interval_sample = self.hps.optim.interval_sample
self.num_sample = self.hps.optim.num_sample
def train(self):
"""
Train network
"""
self.graph.train()
for epoch in range(self.num_epochs):
print('[Trainer] Epoch ({}/{})'.format(epoch, self.num_epochs))
progress = tqdm(self.data_loader)
for idx, batch in enumerate(progress):
# update learning rate
lr = self.scheduler(global_step=self.step)
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
self.optimizer.zero_grad()
if self.step % self.interval_scalar == 0 and self.step > 0:
self.writer.add_scalar('lr/lr', lr, self.step)
# extract batch data - may ignore class annotations
batch[0] = batch[0].to(self.data_device)
x = batch[0]
y = None
y_onehot = None
if self.y_condition:
if self.y_criterion == 'single_class':
assert 'y' in batch.keys(), 'Single-class criterion needs "y" in batch data'
y = batch['y']
y_onehot = ops.onehot(y, self.num_classes)
else:
assert 'y_onehot' in batch.keys(), 'Multi-class criterion needs "y_onehot" in batch data'
y_onehot = batch['y_onehot']
# initialize actnorm layer at first
if self.step == 0:
self.graph(x=x[:self.batch_size // len(self.devices), ...],
y_onehot=y_onehot[:self.batch_size // len(self.devices), ...]
if y_onehot is not None else None)
# data parallel
if len(self.devices) > 1 and not hasattr(self.graph, 'module'):
self.graph = torch.nn.parallel.DataParallel(module=self.graph,
device_ids=self.devices,
output_device=self.devices[0])
# forward model
z, nll, y_logits = self.graph(x=x, y_onehot=y_onehot)
# compute loss
generative_loss = Glow.generative_loss(nll)
classification_loss = 0
if self.y_condition:
classification_loss = self.criterion_dict[self.y_criterion](y_logits,
y if self.y_criterion == 'single_class' else y_onehot)
loss = generative_loss + classification_loss * self.hps.model.weight_y
if self.step % self.interval_scalar == 0 and self.step > 0:
self.writer.add_scalar('loss/generative_loss', generative_loss, self.step)
if self.y_condition:
self.writer.add_scalar('loss/classification_loss', classification_loss, self.step)
# backward model
self.graph.zero_grad()
self.optimizer.zero_grad()
loss.backward()
# gradient operation
if self.max_grad_clip is not None and self.max_grad_clip > 0:
torch.nn.utils.clip_grad_value_(self.graph.parameters(), self.max_grad_clip)
if self.max_grad_norm is not None and self.max_grad_norm > 0:
grad_norm = torch.nn.utils.clip_grad_norm_(self.graph.parameters(), self.max_grad_norm)
if self.step % self.interval_scalar == 0 and self.step > 0:
self.writer.add_scalar("grad_norm/grad_norm", grad_norm, self.step)
# optimize
self.optimizer.step()
# snapshot
if self.step % self.interval_snapshot == 0 and self.step > 0:
util.save_model(result_subdir=self.result_subdir,
step=self.step,
graph=self.graph,
optimizer=self.optimizer,
seconds=time.time() - self.start_time,
is_best=True)
with torch.no_grad():
# valid
if self.step % self.interval_valid == 0 and self.step > 0:
img = self.graph(z=z, y_onehot=y_onehot, reverse=True)
for i in range(min(self.num_sample, img.shape[0])):
self.writer.add_image("reconstructed/{}".format(i),
ops.cat_channel(img[i]/2+0.5, batch[0][i]/2+0.5),
self.step)
# sample
if self.step % self.interval_sample == 0 and self.step > 0:
img = self.graph(z=None, y_onehot=y_onehot, eps_std=0.7, reverse=True)
for i in range(min(self.num_sample, img.shape[0])):
self.writer.add_image("sample/{}".format(i),
img[i]/2+0.5, self.step)
self.step += 1
self.writer.export_scalars_to_json(os.path.join(self.result_subdir, "all_scalars.json"))
self.writer.close()
|
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Functions for common roidb manipulations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import six
import logging
import numpy as np
import sys
import utils.boxes as box_utils
import utils.keypoints as keypoint_utils
import utils.segms as segm_utils
import utils.blob as blob_utils
from core.config import cfg
from .json_dataset import JsonDataset
logger = logging.getLogger(__name__)
def combined_roidb_for_training(dataset_names, proposal_files):
"""Load and concatenate roidbs for one or more datasets, along with optional
object proposals. The roidb entries are then prepared for use in training,
which involves caching certain types of metadata for each roidb entry.
"""
def get_roidb(dataset_name, proposal_file):
ds = JsonDataset(dataset_name)
roidb = ds.get_roidb(
gt=True,
proposal_file=proposal_file,
crowd_filter_thresh=cfg.TRAIN.CROWD_FILTER_THRESH
)
if cfg.TRAIN.USE_FLIPPED:
logger.info('Appending horizontally-flipped training examples...')
extend_with_flipped_entries(roidb, ds)
logger.info('Loaded dataset: {:s}'.format(ds.name))
return roidb
if isinstance(dataset_names, six.string_types):
dataset_names = (dataset_names, )
if isinstance(proposal_files, six.string_types):
proposal_files = (proposal_files, )
if len(proposal_files) == 0:
proposal_files = (None, ) * len(dataset_names)
assert len(dataset_names) == len(proposal_files)
roidbs = [get_roidb(*args) for args in zip(dataset_names, proposal_files)]
roidb = roidbs[0]
for r in roidbs[1:]:
roidb.extend(r)
roidb = filter_for_training(roidb)
if cfg.TRAIN.ASPECT_GROUPING or cfg.TRAIN.ASPECT_CROPPING:
logger.info('Computing image aspect ratios and ordering the ratios...')
ratio_list, ratio_index = rank_for_training(roidb)
logger.info('done')
else:
ratio_list, ratio_index = None, None
logger.info('Computing bounding-box regression targets...')
add_bbox_regression_targets(roidb)
logger.info('done')
_compute_and_log_stats(roidb)
# for each in roidb[0].keys():
# if not each == 'image':
# print(each, roidb[0][each])
# sys.exit('')
return roidb, ratio_list, ratio_index
# def extend_with_flipped_entries(roidb, dataset):
# """Flip each entry in the given roidb and return a new roidb that is the
# concatenation of the original roidb and the flipped entries.
# "Flipping" an entry means that that image and associated metadata (e.g.,
# ground truth boxes and object proposals) are horizontally flipped.
# """
# flipped_roidb = []
# for entry in roidb:
# width = entry['width']
# boxes = entry['boxes'].copy()
# oldx1 = boxes[:, 0].copy()
# oldx2 = boxes[:, 2].copy()
# boxes[:, 0] = width - oldx2 - 1
# boxes[:, 2] = width - oldx1 - 1
# assert (boxes[:, 2] >= boxes[:, 0]).all()
# flipped_entry = {}
# dont_copy = ('boxes', 'segms', 'gt_keypoints', 'flipped')
# for k, v in entry.items():
# if k not in dont_copy:
# flipped_entry[k] = v
# flipped_entry['boxes'] = boxes
# flipped_entry['segms'] = segm_utils.flip_segms(
# entry['segms'], entry['height'], entry['width']
# )
# if dataset.keypoints is not None:
# flipped_entry['gt_keypoints'] = keypoint_utils.flip_keypoints(
# dataset.keypoints, dataset.keypoint_flip_map,
# entry['gt_keypoints'], entry['width']
# )
# flipped_entry['flipped'] = True
# flipped_roidb.append(flipped_entry)
# roidb.extend(flipped_roidb)
def extend_with_flipped_entries(roidb, dataset):
"""Flip each entry in the given roidb and return a new roidb that is the
concatenation of the original roidb and the flipped entries.
"Flipping" an entry means that that image and associated metadata (e.g.,
ground truth boxes and object proposals) are horizontally flipped.
"""
# import ipdb; ipdb.set_trace()
flipped_roidb = []
for entry in roidb:
entry['flipped'] = False
width = entry['width']
boxes = entry['boxes'].copy()
precomp_boxes = entry['precomp_boxes'].copy()
oldx1 = boxes[:, 0].copy()
oldx2 = boxes[:, 2].copy()
precomp_boxes_x1 = precomp_boxes[:, 0].copy()
precomp_boxes_x2 = precomp_boxes[:, 2].copy()
boxes[:, 0] = width - oldx2 - 1
boxes[:, 2] = width - oldx1 - 1
precomp_boxes[:, 0] = width - precomp_boxes_x2 - 1
precomp_boxes[:, 2] = width - precomp_boxes_x1 - 1
assert (boxes[:, 2] >= boxes[:, 0]).all()
flipped_entry = {}
dont_copy = ('boxes', 'segms', 'gt_keypoints', 'flipped', 'precomp_boxes')
for k, v in entry.items():
if k not in dont_copy:
flipped_entry[k] = v
flipped_entry['boxes'] = boxes
flipped_entry['precomp_boxes'] = precomp_boxes
flipped_entry['segms'] = segm_utils.flip_segms(
entry['segms'], entry['height'], entry['width']
)
if dataset.keypoints is not None:
flipped_entry['gt_keypoints'] = keypoint_utils.flip_keypoints(
dataset.keypoints, dataset.keypoint_flip_map,
entry['gt_keypoints'], entry['width']
)
flipped_entry['precomp_keypoints'] = keypoint_utils.flip_keypoints(
dataset.keypoints, dataset.keypoint_flip_map,
entry['precomp_keypoints'], entry['width']
)
flipped_entry['flipped'] = True
flipped_roidb.append(flipped_entry)
roidb.extend(flipped_roidb)
def filter_for_training(roidb):
"""Remove roidb entries that have no usable RoIs based on config settings.
"""
def is_valid(entry):
# Valid images have:
# (1) At least one foreground RoI OR
# (2) At least one background RoI
overlaps = entry['max_overlaps']
# find boxes with sufficient overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# image is only valid if such boxes exist
valid = len(fg_inds) > 0 or len(bg_inds) > 0
if cfg.MODEL.KEYPOINTS_ON:
# If we're training for keypoints, exclude images with no keypoints
valid = valid and entry['has_visible_keypoints']
return valid
num = len(roidb)
filtered_roidb = [entry for entry in roidb if is_valid(entry)]
num_after = len(filtered_roidb)
logger.info('Filtered {} roidb entries: {} -> {}'.
format(num - num_after, num, num_after))
return filtered_roidb
def rank_for_training(roidb):
"""Rank the roidb entries according to image aspect ration and mark for cropping
for efficient batching if image is too long.
Returns:
ratio_list: ndarray, list of aspect ratios from small to large
ratio_index: ndarray, list of roidb entry indices correspond to the ratios
"""
RATIO_HI = cfg.TRAIN.ASPECT_HI # largest ratio to preserve.
RATIO_LO = cfg.TRAIN.ASPECT_LO # smallest ratio to preserve.
need_crop_cnt = 0
ratio_list = []
for entry in roidb:
width = entry['width']
height = entry['height']
ratio = width / float(height)
if cfg.TRAIN.ASPECT_CROPPING:
if ratio > RATIO_HI:
entry['need_crop'] = True
ratio = RATIO_HI
need_crop_cnt += 1
elif ratio < RATIO_LO:
entry['need_crop'] = True
ratio = RATIO_LO
need_crop_cnt += 1
else:
entry['need_crop'] = False
else:
entry['need_crop'] = False
ratio_list.append(ratio)
if cfg.TRAIN.ASPECT_CROPPING:
logging.info('Number of entries that need to be cropped: %d. Ratio bound: [%.2f, %.2f]',
need_crop_cnt, RATIO_LO, RATIO_HI)
ratio_list = np.array(ratio_list)
ratio_index = np.argsort(ratio_list)
return ratio_list[ratio_index], ratio_index
def add_bbox_regression_targets(roidb):
"""Add information needed to train bounding-box regressors."""
for entry in roidb:
entry['bbox_targets'] = _compute_targets(entry)
def _compute_targets(entry):
"""Compute bounding-box regression targets for an image."""
# Indices of ground-truth ROIs
rois = entry['boxes']
overlaps = entry['max_overlaps']
labels = entry['max_classes']
gt_inds = np.where((entry['gt_classes'] > 0) & (entry['is_crowd'] == 0))[0]
# Targets has format (class, tx, ty, tw, th)
targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
if len(gt_inds) == 0:
# Bail if the image has no ground-truth ROIs
return targets
# Indices of examples for which we try to make predictions
ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0]
# print('overlaps', overlaps)
# print('ex', ex_inds)
# print('gt', gt_inds)
# Get IoU overlap between each ex ROI and gt ROI
ex_gt_overlaps = box_utils.bbox_overlaps(
rois[ex_inds, :].astype(dtype=np.float32, copy=False),
rois[gt_inds, :].astype(dtype=np.float32, copy=False))
# print('ex_gt_ov', ex_gt_overlaps)
# Find which gt ROI each ex ROI has max overlap with:
# this will be the ex ROI's gt target
gt_assignment = ex_gt_overlaps.argmax(axis=1)
gt_rois = rois[gt_inds[gt_assignment], :]
ex_rois = rois[ex_inds, :]
# print('gt_rois', gt_rois)
# print('ex_rois', ex_rois)
# Use class "1" for all boxes if using class_agnostic_bbox_reg
targets[ex_inds, 0] = (
1 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else labels[ex_inds])
targets[ex_inds, 1:] = box_utils.bbox_transform_inv(
ex_rois, gt_rois, cfg.MODEL.BBOX_REG_WEIGHTS)
# print('targets', targets)
# sys.exit()
return targets
def _compute_and_log_stats(roidb):
classes = roidb[0]['dataset'].classes
char_len = np.max([len(c) for c in classes])
hist_bins = np.arange(len(classes) + 1)
# Histogram of ground-truth objects
gt_hist = np.zeros((len(classes)), dtype=np.int)
for entry in roidb:
gt_inds = np.where(
(entry['gt_classes'] > 0) & (entry['is_crowd'] == 0))[0]
gt_classes = entry['gt_classes'][gt_inds]
gt_hist += np.histogram(gt_classes, bins=hist_bins)[0]
logger.debug('Ground-truth class histogram:')
for i, v in enumerate(gt_hist):
logger.debug(
'{:d}{:s}: {:d}'.format(
i, classes[i].rjust(char_len), v))
logger.debug('-' * char_len)
logger.debug(
'{:s}: {:d}'.format(
'total'.rjust(char_len), np.sum(gt_hist)))
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include "config.h"
#include "display.h"
#include "log.h"
#include <guacamole/client.h>
#include <stdlib.h>
int guacenc_handle_cursor(guacenc_display* display, int argc, char** argv) {
/* Verify argument count */
if (argc < 7) {
guacenc_log(GUAC_LOG_WARNING, "\"cursor\" instruction incomplete");
return 1;
}
/* Parse arguments */
int hotspot_x = atoi(argv[0]);
int hotspot_y = atoi(argv[1]);
int src_index = atoi(argv[2]);
int src_x = atoi(argv[3]);
int src_y = atoi(argv[4]);
int src_w = atoi(argv[5]);
int src_h = atoi(argv[6]);
/* Nothing to do with cursor (yet) */
guacenc_log(GUAC_LOG_DEBUG, "Ignoring cursor: hotspot (%i, %i) "
"src_layer=%i (%i, %i) %ix%i", hotspot_x, hotspot_y,
src_index, src_x, src_y, src_w, src_h);
return 0;
}
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import metrics, modules, utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion('masked_lm')
class MaskedLmLoss(FairseqCriterion):
"""
Implementation for the loss used in masked language model (MLM) training.
"""
def __init__(self, task, tpu):
super().__init__(task)
self.tpu = tpu
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
masked_tokens = sample['target'].ne(self.padding_idx)
sample_size = masked_tokens.int().sum()
# Rare: when all tokens are masked, project all tokens.
# We use torch.where to avoid device-to-host transfers,
# except on CPU where torch.where is not well supported
# (see github.com/pytorch/pytorch/issues/26247).
if self.tpu:
masked_tokens = None # always project all tokens on TPU
elif masked_tokens.device == torch.device('cpu'):
if not masked_tokens.any():
masked_tokens = None
else:
masked_tokens = torch.where(
masked_tokens.any(),
masked_tokens,
masked_tokens.new([True]),
)
logits = model(**sample['net_input'], masked_tokens=masked_tokens)[0]
targets = model.get_targets(sample, [logits])
if masked_tokens is not None:
targets = targets[masked_tokens]
loss = modules.cross_entropy(
logits.view(-1, logits.size(-1)),
targets.view(-1),
reduction='sum',
ignore_index=self.padding_idx,
)
logging_output = {
'loss': loss,
'ntokens': sample['ntokens'],
'nsentences': sample['nsentences'],
'sample_size': sample_size,
}
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get('loss', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3)
metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['loss'].avg))
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.wamp import ApplicationSession
class Component(ApplicationSession):
"""
An application component providing procedures with different kinds of arguments.
"""
@inlineCallbacks
def onJoin(self, details):
print("session attached")
def ping():
return
def add2(a, b):
return a + b
def stars(nick="somebody", stars=0):
return u"{} starred {}x".format(nick, stars)
def orders(product, limit=5):
return [u"Product {}".format(i) for i in range(50)][:limit]
def arglen(*args, **kwargs):
return [len(args), len(kwargs)]
yield self.register(ping, u'com.arguments.ping')
yield self.register(add2, u'com.arguments.add2')
yield self.register(stars, u'com.arguments.stars')
yield self.register(orders, u'com.arguments.orders')
yield self.register(arglen, u'com.arguments.arglen')
print("procedures registered")
if __name__ == '__main__':
from autobahn.twisted.wamp import ApplicationRunner
runner = ApplicationRunner("ws://127.0.0.1:8080/ws", "realm1")
runner.run(Component)
|
const aws = require('aws-sdk');
function LogScraper(region) {
const cloudwatchlogs = new aws.CloudWatchLogs({region});
const getAllLogItemsMatching = async function (params) {
let data = await cloudwatchlogs.filterLogEvents(params).promise();
let events = data.events;
let nextToken = data.nextToken;
while (nextToken) {
params.nextToken = nextToken;
data = await cloudwatchlogs.filterLogEvents(params).promise();
events = events.concat(data.events);
nextToken = data.nextToken;
}
return events;
}
return {
getAllLogGroups: async function () {
let response = await cloudwatchlogs.describeLogGroups().promise();
let logGroups = response.logGroups.map(lg => lg.logGroupName);
let nextToken = response.nextToken;
while (nextToken) {
response = await cloudwatchlogs.describeLogGroups( { nextToken } ).promise();
logGroups = logGroups.concat(response.logGroups.map(lg => lg.logGroupName));
let nextToken = response.nextToken;
}
return logGroups;
},
getAllLogStreamsOfGroup: async function (group) {
let response = await cloudwatchlogs.describeLogStreams({ logGroupName: group }).promise();
let logStreams = response.logStreams.map(ls => ls.logStreamName);
let nextToken = response.nextToken;
while (nextToken) {
response = await cloudwatchlogs.describeLogStreams({ logGroupName: group }).promise();
logStreams = logStreams.concat(response.logStreams.map(ls => ls.logStreamName));
nextToken = response.nextToken;
}
return logStreams;
},
getAllLogStreams: async function () {
const lgs = await this.getAllLogGroups();
let logStreams = []
for (const lg of lgs) {
const lss = await this.getAllLogStreamsOfGroup(lg);
logStreams = logStreams.concat(lss);
}
return logStreams;
},
getAllLogItemsForStream: async function(group, stream) {
let entries = []
let logEvents = await cloudwatchlogs.getLogEvents({logGroupName: group , logStreamName: stream, startFromHead: true}).promise();
entries = entries.concat(logEvents.events);
let nextToken = logEvents.nextForwardToken;
while (nextToken) {
logEvents = await cloudwatchlogs.getLogEvents({logGroupName: group , logStreamName: stream, nextToken}).promise();
if (logEvents.events.length > 0) {
nextToken = logEvents.nextForwardToken;
entries = entries.concat(logEvents.events);
} else {
nextToken = undefined;
}
}
return entries;
},
getAllLogItemsForStreamMatching: async function(group, stream, pattern) {
const params = {
logGroupName: group,
filterPattern: pattern,
logStreamNames: [stream]
};
return await getAllLogItemsMatching(params);
},
getAllLogItemsForGroup: async function(group) {
const streams = await this.getAllLogStreamsOfGroup(group);
let items = []
for (const stream of streams) {
const tmpItems = await this.getAllLogItemsForStream(group, stream);
items = items.concat(tmpItems);
}
return items;
},
getAllLogItemsForGroupMatching: async function(group, pattern) {
const params = {
logGroupName: group,
filterPattern: pattern,
};
return await getAllLogItemsMatching(params);
},
clearLogGroup: async function (group) {
const streams = await this.getAllLogStreamsOfGroup(group)
console.log(`Got the following streams for group ${group}:\n${streams}`)
for (const stream of streams) {
console.log(`Deleting stream ${stream} of group ${group}`)
const resp = await cloudwatchlogs.deleteLogStream({logGroupName:group, logStreamName: stream}).promise()
}
}
}
}
module.exports.LogScraper = LogScraper;
if (require.main === module) {
const scraper = new LogScraper('eu-west-1');
// getAllLogGroups()
// .then(a => console.log(a));
// getAllLogStreamsOfGroup('/aws/lambda/realworld-dev-watchtower-monitor')
// .then(a => console.log(a));
// getAllLogStreams()
// .then(a => console.log(a));
// scraper.getAllLogItemsForStream('/aws/lambda/realworld-dev-watchtower-monitor', '2019/03/20/[$LATEST]1625d9ff778b4139ab0cef32963c5c70')
// .then(a => console.log(a));
// scraper.getAllLogItemsForGroup('/aws/lambda/realworld-dev-watchtower-monitor')
// .then(a => console.log(a));
// const pattern = 'WT_PROF VIOLATION REPORT DELAY';
// const notificationDelayRE = /@@@@WT_PROF: VIOLATION REPORT DELAY: ([0-9]*)\(ms\)/;
// const notificationDelayRE = '\"VIOLATION REPORT DELAY\"';
// const logGroup = '/aws/lambda/wt-full-flow-test-watchtower-monitor';
//
// scraper.getAllLogItemsForGroupMatching(logGroup, pattern)
// .then(res => console.log(res));
const group = '/aws/lambda/wt-collision-count-test-watchtower-monitor'
scraper.clearLogGroup(group)
}
|
import React, { forwardRef } from 'react';
import PropTypes from 'prop-types';
const SkipStartBtnFill = forwardRef(({ color, size, ...rest }, ref) => {
return (
<svg
ref={ref}
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
width={size}
height={size}
fill={color}
{...rest}
>
<path d="M0 12V4a2 2 0 0 1 2-2h12a2 2 0 0 1 2 2v8a2 2 0 0 1-2 2H2a2 2 0 0 1-2-2zm9.71-6.907L7 7.028V5.5a.5.5 0 0 0-1 0v5a.5.5 0 0 0 1 0V8.972l2.71 1.935a.5.5 0 0 0 .79-.407v-5a.5.5 0 0 0-.79-.407z" />
</svg>
);
});
SkipStartBtnFill.propTypes = {
color: PropTypes.string,
size: PropTypes.oneOfType([PropTypes.string, PropTypes.number]),
};
SkipStartBtnFill.defaultProps = {
color: 'currentColor',
size: '1em',
};
export default SkipStartBtnFill;
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('default', '0001_initial'),
('psa', '0002_usersession'),
]
operations = [
migrations.CreateModel(
name='SecondaryEmail',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('email', models.EmailField(max_length=75, verbose_name=b'Secondary Email')),
('provider', models.ForeignKey(to='social_django.UserSocialAuth')),
('user', models.ForeignKey(related_name='secondary', to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='secondaryemail',
unique_together=set([('provider', 'email')]),
),
]
|
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\tag.py
# Compiled at: 2020-01-16 03:44:59
# Size of source mod 2**32: 2481 bytes
import functools
from sims4.tuning.dynamic_enum import DynamicEnumLocked
from sims4.tuning.tunable import TunableSet, TunableEnumEntry, TunableEnumWithFilter
from sims4.tuning.tunable_base import ExportModes
import singletons
PORTAL_DISALLOWANCE_PREFIX = ('PortalDisallowance', )
INTERACTION_PREFIX = ('interaction', )
SPAWN_PREFIX = ('Spawn', )
class Tag(DynamicEnumLocked, export_modes=(ExportModes.ClientBinary, ExportModes.ServerXML), display_sorted=True, partitioned=True):
INVALID = 0
class TagCategory(DynamicEnumLocked, export_modes=(ExportModes.ClientBinary, ExportModes.ServerXML)):
INVALID = 0
class TunableTag(TunableEnumWithFilter):
def __init__(self, description='A tag.', filter_prefixes=singletons.EMPTY_SET, pack_safe=True, **kwargs):
(super().__init__)(tunable_type=Tag,
default=Tag.INVALID,
invalid_enums=(
Tag.INVALID,),
pack_safe=pack_safe,
filter_prefixes=filter_prefixes,
description=description, **kwargs)
class TunableTags(TunableSet):
def __init__(self, filter_prefixes=None, pack_safe=True, minlength=None, maxlength=None, **kwargs):
if filter_prefixes is None:
tunable_fn = TunableEnumEntry
else:
tunable_fn = functools.partial(TunableEnumWithFilter, filter_prefixes=filter_prefixes)
super().__init__(tunable_fn(tunable_type=Tag,
default=Tag.INVALID,
invalid_enums=(
Tag.INVALID,),
pack_safe=pack_safe, **kwargs),
minlength=minlength,
maxlength=maxlength)
|
// main.js hacked to let compiler made stuff without problems when we build the requireVersion minified,
// then it's removed from minified code
define( [], function(){} );
|
from P458.data import (
attributes,
read_arff,
)
from P458.id3 import (
id3,
)
from P458.tree import (
str_tree,
decide,
)
data = read_arff('./data/contact-lenses.arff')
decision_tree = id3(data, 'contact-lenses')
row = ['sunny', 'hot', 'high', False, False]
result = decide(decision_tree, row, attributes(data))
print(' --- ')
print(str_tree(decision_tree))
print(f'For row: {row} id3 predicts: {result}')
|
import glob
from io import DEFAULT_BUFFER_SIZE
import math
import os
import json
import random
import shutil
import copy
import time
import warnings
from collections import defaultdict, OrderedDict
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from lib.utils.utils import xyxy2xywh, xywh2xyxy, ltwh2xywh
cls2id = {
'pedestrian': 0,
'rider': 1,
'car': 2,
'truck': 3,
'bus': 4,
'train': 5,
'motorcycle': 6,
'bicycle': 7
}
id2cls = {
0: 'pedestrian',
1: 'rider',
2: 'car',
3: 'truck',
4: 'bus',
5: 'train',
6: 'motorcycle',
7: 'bicycle'
}
help_url = 'https://github.com/ultralytics/yolov3/wiki/Train-Custom-Data'
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
vid_formats = ['.mov', '.avi', '.mp4']
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
class LoadImages:
def __init__(self, path, img_size=(1024, 576)):
"""
:param path:
:param img_size:
"""
self.frame_rate = 10 # no actual meaning here
print(path)
if type(path) == str:
if os.path.isdir(path):
image_format = ['.jpg', '.jpeg', '.png', '.tif']
self.files = sorted(glob.glob('%s/*.*' % path))
self.files = list(filter(lambda x: os.path.splitext(x)[
1].lower() in image_format, self.files))
elif os.path.isfile(path):
self.files = [path]
elif type(path) == list:
self.files = path
self.nF = len(self.files) # number of image files
self.width = img_size[0]
self.height = img_size[1]
self.count = 0
assert self.nF > 0, 'No images found in ' + path
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if self.count == self.nF:
raise StopIteration
img_path = self.files[self.count]
# Read image
img_0 = cv2.imread(img_path) # BGR
assert img_0 is not None, 'Failed to load ' + img_path
# Padded resize
img, _, (dw, dh) = letterbox(img_0, (self.height, self.width))
# Normalize RGB
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img, dtype=np.float32)
return img_path, img, img_0, (dw, dh)
def __getitem__(self, idx):
idx = idx % self.nF
img_path = self.files[idx]
# Read image
img_0 = cv2.imread(img_path) # BGR
assert img_0 is not None, 'Failed to load ' + img_path
# Padded resize
img, _, _, _ = letterbox(img_0, height=self.height, width=self.width)
# Normalize RGB: BGR -> RGB and H×W×C -> C×H×W
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img, dtype=np.float32)
return img_path, img, img_0
def __len__(self):
return self.nF # number of files
class LoadVideo: # for inference
def __init__(self, path, img_size=(1024, 576)):
"""
:param path:
:param img_size:
"""
self.cap = cv2.VideoCapture(path)
self.frame_rate = int(round(self.cap.get(cv2.CAP_PROP_FPS)))
self.vw = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.vh = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.vn = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
self.width = img_size[0]
self.height = img_size[1]
self.count = 0
self.w, self.h = 1920, 1080 # 设置(输出的分辨率)
print('Length of the video: {:d} frames'.format(self.vn))
def get_size(self, vw, vh, dw, dh):
wa, ha = float(dw) / vw, float(dh) / vh
a = min(wa, ha)
return int(vw * a), int(vh * a)
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if self.count == len(self):
raise StopIteration
# Read image
res, img_0 = self.cap.read() # BGR
assert img_0 is not None, 'Failed to load frame {:d}'.format(self.count)
img_0 = cv2.resize(img_0, (self.w, self.h))
# Padded resize
img, _, _ = letterbox(img_0, (self.height, self.width))
# Normalize RGB
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR->RGB and HWC->CHW
img = np.ascontiguousarray(img, dtype=np.float32)
# save letterbox image
# cv2.imwrite(img_path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1])
return self.count, img, img_0
def __len__(self):
return self.vn # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=416):
self.img_size = img_size
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=416):
self.mode = 'images'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(0 if s == '0' else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class YOLOMOT(Dataset): # for training/testing
def __init__(self,
path,
img_size=(576, 1024),
num_classes=8,
single_cls=False,
opt=None):
"""
:param path:
:param img_size:
:param batch_size:
:param augment:
:param single_cls:
"""
# Path is .train or .val file
assert os.path.isfile(path), 'File not found %s. See %s' % (path, help_url)
# ------ Check for QuickLoad of ID Counts
tid_num = "/hpctmp/e0425991/datasets/bdd100k/bdd100k/MOT/tid_num.json"
tidnumval = "/hpctmp/e0425991/datasets/bdd100k/bdd100k/MOT/tid_numval.json"
# Get List of Img Files
with open(path, 'r') as f:
self.img_files = [x.replace('/', os.sep) for x in f.read().splitlines() # os-agnostic
if os.path.splitext(x)[-1].lower() in img_formats]
# Get List of Label Files
self.label_files = [x.replace('images', 'labels_with_ids').replace(os.path.splitext(x)[-1], '.txt')
for x in self.img_files]
# ----- Calculate Dataset Parameters
n = len(self.img_files)
self.n = n
self.default_input_wh = img_size
self.num_classes = num_classes
self.augment = opt.augment
self.mosaic = opt.mosaic
self.max_objs = opt.K
self.img_size = img_size
# ------ Check for QuickLoad of Image Labels
if os.path.exists(tid_num) and not opt.val and not opt.test_emb:
print("Loading cached ID Dict...")
with open(tid_num) as json_file:
self.tid_num = json.load(json_file)
elif os.path.exists(tidnumval) and opt.val and not opt.test_emb:
print("Loading cached validation ID Dict...")
with open(tidnumval) as json_file:
self.tid_num = json.load(json_file)
else:
# ----- Generate ID Counts
print("Caching Labels & Generating ID Counts...")
max_ids_dict = defaultdict(int) # cls_id => max track id
self.tid_num = OrderedDict()
# 子数据集中每个label
for lp in tqdm(self.label_files):
if not os.path.isfile(lp):
print('[Warning]: invalid label file {}.'.format(lp))
continue
with warnings.catch_warnings():
warnings.simplefilter("ignore")
lb = np.loadtxt(lp)
if len(lb) < 1: # 空标签文件
continue
lb = lb.reshape(-1, 6)
for item in lb: # label中每一个item(检测目标)
if item[1] > max_ids_dict[int(item[0])]: # item[0]: cls_id, item[1]: track id
max_ids_dict[int(item[0])] = item[1]
# track id number
self.tid_num["bdd100k"] = max_ids_dict
# ----- save dicts to json to save time in future
if not opt.test_emb:
if opt.val:
print("Writing cached max ID dict to JSON...")
with open(tidnumval, 'w', encoding='utf-8') as f:
json.dump(self.tid_num, f, ensure_ascii=False, indent=4)
else:
print("Writing cached validationmax ID dict to JSON...")
with open(tid_num, 'w', encoding='utf-8') as f:
json.dump(self.tid_num, f, ensure_ascii=False, indent=4)
# @even: for MCMOT training
self.tid_start_idx_of_cls_ids = defaultdict(dict)
last_idx_dict = defaultdict(int)
for k, v in self.tid_num.items():
for cls_id, id_num in v.items():
self.tid_start_idx_of_cls_ids[k][int(cls_id)] = last_idx_dict[int(cls_id)]
last_idx_dict[int(cls_id)] += id_num
# Generate nID Dict for Building reID Classifier
self.nID_dict = defaultdict(int)
for k, v in last_idx_dict.items():
self.nID_dict[int(k)] = int(v) # 每个类别的tack ids数量
for k, v in sorted(self.nID_dict.items()):
class_name = id2cls[k]
print('* Total {:d} IDs of {}'.format(v, class_name))
# print('start index', self.tid_start_index)
for k, v in sorted(self.tid_start_idx_of_cls_ids.items()):
for cls_id, start_idx in sorted(v.items()):
print('* Start index of dataset {} class {} is {}'
.format(k, cls_id, start_idx))
def __len__(self):
return len(self.img_files)
def __getitem__(self, idx):
if self.mosaic:
img, labels, track_ids = load_mosaic_with_ids(self, idx)
else:
# ----- Load Image, Labels & Track IDs
# Label Format: Class, Track ID, Normalised ltwh
img, (h, w) = load_image(self, idx)
resize_ratio = self.img_size[0] / img.shape[0]
if img.shape[:2] != self.img_size:
img = cv2.resize(img, self.img_size[::-1])
labels = []
with warnings.catch_warnings():
warnings.simplefilter("ignore")
x = np.loadtxt(self.label_files[idx]).reshape(-1, 6)[:, [0, 2, 3, 4, 5]] # Skip Loading Track IDs
if x.size > 0:
# Normaliseded xywh to pixel xyxy format:
# For compatibility with random_affine function
labels = x.copy()
labels[:, 1] = resize_ratio * w * (x[:, 1] - x[:, 3] / 2) # x1 = ct - w / 2
labels[:, 2] = resize_ratio * h * (x[:, 2] - x[:, 4] / 2) # y1 = ct + h / 2
labels[:, 3] = resize_ratio * w * (x[:, 1] + x[:, 3] / 2) # x2 = ct + w / 2
labels[:, 4] = resize_ratio * h * (x[:, 2] + x[:, 4] / 2) # y2 = ct - h / 2
# Now We Load Track IDs
with warnings.catch_warnings():
warnings.simplefilter("ignore")
track_ids = np.loadtxt(self.label_files[idx]).reshape(-1, 6)[:, 1]
if self.augment:
if not self.mosaic:
# Random Affine & Augment ColourSpace
img, labels, track_ids = random_affine_with_ids(img, labels, track_ids)
augment_hsv(img)
# Number of Labels
nL = len(labels)
# Track ID starts from 1 (not 0)
track_ids -= 1
# ----- Further Augmentations
if nL:
# Convert back from xyxy to xywh
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
# Normalise Again for Easy Flipping
labels[:, [2,4]] /= self.img_size[0]
labels[:, [1,3]] /= self.img_size[1]
if self.augment:
# Random Horizontal Flipping
lr_flip = True
if lr_flip and random.random() < 0.5:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
# Random Vertical Flipping
ud_flip = False
if ud_flip and random.random() < 0.5:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
# ----- Final BBOX Ground Truths
# Initialise Detection BBox Outs
det_labels_out = np.zeros((nL, 6)) # Additional column0 means item_i in the batch
track_ids_out = np.zeros((nL, 2))
if nL:
# Scale Normalised to Pixel XYWH for Detection
det_labels_out[:, 1] = labels[:, 0]
det_labels_out[:, [2, 4]] = labels[:, [1, 3]] * self.img_size[1]
det_labels_out[:, [3, 5]] = labels[:, [2, 4]] * self.img_size[0]
# Track IDs to be Returned
track_ids_out[:, 1] = torch.from_numpy(track_ids).long()
# Write Test Images with bbox to File
# test_image(img, det_labels_out[:, 2:])
# exit(-1)
# ------ Image Transformations - BGR to RGB
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img)
return torch.from_numpy(img), torch.from_numpy(det_labels_out), torch.from_numpy(track_ids_out)
@staticmethod
def collate_fn(batch):
img, label, track_ids = zip(*batch)
# Add Batch Index as First Element of Labels & Track IDs
for i, (l, tid) in enumerate(zip(label, track_ids)):
l[:, 0] = i
tid[:, 0] = i
return torch.stack(img, 0), torch.cat(label, 0), torch.cat(track_ids, 0)
def shuffle(self):
# Shuffle the dataset
np.random.shuffle(self.img_files)
# Reindex Corresponding Label Files
self.label_files = [x.replace('images', 'labels_with_ids')
.replace('.png', '.txt')
.replace('.jpg', '.txt')
for x in self.img_files]
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
return img, img.shape[:2]
def test_image(img, labels, path="/home/svu/e0425991/FairMOT-X", name="test.jpg"):
img = np.ascontiguousarray(img).copy()
for (x, y, w, h) in labels:
x1 = int(x-w/2)
y1 = int(y-h/2)
x2 = int(x+w/2)
y2 = int(y+h/2)
cv2.rectangle(img, pt1=(x1,y1), pt2=(x2,y2), color=(0, 0, 255), thickness=1)
assert cv2.imwrite(os.path.join(path, name), img)
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
def pad_resize_ratio(img, net_w, net_h):
"""
:param img:
:param net_w:
:param net_h:
:return:
"""
img = np.array(img) # H x W x channels
H, W, channels = img.shape
if net_h / net_w < H / W: # padding w
new_h = int(net_h)
new_w = int(net_h / H * W)
pad = (net_w - new_w) // 2
left = round(pad - 0.1)
right = round(pad + 0.1)
top, bottom = 0, 0
else: # padding w
new_h = int(net_w / W * H)
new_w = int(net_w)
pad = (net_h - new_h) // 2
left, right = 0, 0
top = round(pad - 0.1)
bottom = round(pad + 0.1)
img_resize = cv2.resize(img, (new_w, new_h), cv2.INTER_LINEAR)
# add border
img_out = cv2.copyMakeBorder(img_resize, top, bottom, left, right, cv2.BORDER_CONSTANT, value=127)
return img_out
def pad_resize_img_square(img, square_size):
"""
:param img: RGB image
:return: square image
"""
img = np.array(img) # H x W x channels
H, W, channels = img.shape
dim_diff = np.abs(H - W)
# upper(left) and lower(right) padding
pad_lu = dim_diff // 2 # integer division
pad_rd = dim_diff - pad_lu
# determine padding for each axis: H, W, channels
pad = ((pad_lu, pad_rd), (0, 0), (0, 0)) if H <= W else \
((0, 0), (pad_lu, pad_rd), (0, 0))
# do padding(0.5) and normalize
img = np.pad(img,
pad,
'constant',
constant_values=127.5) # / 255.0
img = cv2.resize(img,
(square_size, square_size),
cv2.INTER_LINEAR)
# img.tofile('/mnt/diskb/even/img.bin')
return img
def letterbox(img,
new_shape=(416, 416),
color=(114, 114, 114),
auto=True,
scaleFill=False,
scaleup=True):
"""
:param img:
:param new_shape:
:param color:
:param auto:
:param scaleFill:
:param scaleup:
:return:
"""
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = new_shape
ratio = new_shape[0] / shape[1], new_shape[1] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_affine_with_ids(img,
targets,
track_ids,
degrees=5,
translate=0.1,
scale=0.2,
shear=2.5,
border=0):
"""
:param img:
:param targets:
:param track_ids:
:param degrees:
:param translate:
:param scale:
:param shear:
:param border:
:return:
"""
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
if targets is None: # targets = [cls, xyxy]
targets = []
height = img.shape[0] + border * 2
width = img.shape[1] + border * 2
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(-translate, translate) * img.shape[0] + border # x translation (pixels)
T[1, 2] = random.uniform(-translate, translate) * img.shape[1] + border # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Combined rotation matrix
M = S @ T @ R # ORDER IS IMPORTANT HERE!!
if (border != 0) or (M != np.eye(3)).any(): # image changed
img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_LINEAR, borderValue=(114, 114, 114))
# ----- Transform Label Coordinates
n = len(targets)
if n:
# Warp Points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# Create New Boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# Reject Warped Points Outside Image
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
area0 = (targets[:, 3] - targets[:, 1]) * (targets[:, 4] - targets[:, 2])
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) # aspect ratio
i = (w > 4) & (h > 4) & (area / (area0 * s + 1e-16) > 0.2) & (ar < 10)
targets = targets[i]
track_ids = track_ids[i]
targets[:, 1:5] = xy[i]
return img, targets, track_ids
def random_affine(img, targets=(), degrees=5, translate=.1, scale=.2, shear=2.5, border=0):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
if targets is None: # targets = [cls, xyxy]
targets = []
height = img.shape[0] + border * 2
width = img.shape[1] + border * 2
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(-translate, translate) * img.shape[0] + border # x translation (pixels)
T[1, 2] = random.uniform(-translate, translate) * img.shape[1] + border # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Combined rotation matrix
M = S @ T @ R # ORDER IS IMPORTANT HERE!!
if (border != 0) or (M != np.eye(3)).any(): # image changed
img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_LINEAR, borderValue=(114, 114, 114))
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# reject warped points outside of image
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
area0 = (targets[:, 3] - targets[:, 1]) * (targets[:, 4] - targets[:, 2])
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) # aspect ratio
i = (w > 4) & (h > 4) & (area / (area0 * s + 1e-16) > 0.2) & (ar < 10)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def load_mosaic_with_ids(self, index):
"""
:param self:
:param index:
:param track_ids:
:return:
"""
labels4, track_ids_4 = [], []
net_in_h, net_in_w = self.img_size
# Randomly Generate Mosaic Center (Coordinates in Final Image)
xc = int((random.uniform(0.35, 0.65) * net_in_w))
yc = int((random.uniform(0.35, 0.65) * net_in_h))
# Grab 3 Other Random Indices
indices = [index] + [random.randint(0, len(self.label_files) - 1) for _ in range(3)]
for i, index in enumerate(indices):
# Load Image
img, (h, w) = load_image(self, index)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
labels = np.loadtxt(self.label_files[index]).reshape(-1, 6)[:, [0,2,3,4,5]]
track_ids = np.loadtxt(self.label_files[index]).reshape(-1, 6)[:, 1]
# Generate Coordinates for Each Corner
# On First Run, Initialize Base Array with Arbitrary Pixel Value of 114
if i == 0: # top left
img4 = np.full((net_in_h, net_in_w, 3), 114, dtype=np.uint8)
x1, y1, x2, y2 = max(xc - w, 0), max(yc - h, 0), xc, yc
elif i == 1: # top right
x1, y1, x2, y2 = xc, max(yc - h, 0), min(xc + w, net_in_w), yc
elif i == 2: # bottom left
x1, y1, x2, y2 = max(xc - w, 0), yc, xc, min(yc + h, net_in_h)
elif i == 3: # bottom right
x1, y1, x2, y2 = xc, yc, min(xc + w, net_in_w), min(yc + h, net_in_h)
# Fitted Image Dimensions, Resize & Place
new_h = y2 - y1
new_w = x2 - x1
img = cv2.resize(img, (new_w, new_h))
img4[y1:y2, x1:x2] = img
# Take the Opportunity to Convert Normalised XYWH to Pixel XYXY
for ltwh_bbox in labels:
bbox = np.zeros(5)
bbox[0] = ltwh_bbox[0]
bbox[1] = x1 + (ltwh_bbox[1] - ltwh_bbox[3] / 2) * new_w
bbox[2] = y1 + (ltwh_bbox[2] - ltwh_bbox[4] / 2) * new_h
bbox[3] = bbox[1] + ltwh_bbox[3] * new_w
bbox[4] = bbox[2] + ltwh_bbox[4] * new_h
labels4.append(bbox)
track_ids_4.extend(track_ids)
# Track IDs Start from 0, not 1
labels4 = np.array(labels4)
track_ids_4 = np.array(track_ids_4)
# Standard Augmentation
img4, labels4, track_ids = random_affine_with_ids(img4, labels4, track_ids_4)
return img4, labels4, track_ids
def cutout(image, labels):
# https://arxiv.org/abs/1708.04552
# https://github.com/hysts/pytorch_cutout/blob/master/dataloader.py
# https://towardsdatascience.com/when-conventional-wisdom-fails-revisiting-data-augmentation-for-self-driving-cars-4831998c5509
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def reduce_img_size(path='../data/sm4/images',
img_size=1024): # from evaluate_utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + '_reduced' # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
def convert_images2bmp(): # from evaluate_utils.datasets import *; convert_images2bmp()
# Save images
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
# for path in ['../coco/images/val2014', '../coco/images/train2014']:
for path in ['../data/sm4/images', '../data/sm4/background']:
create_folder(path + 'bmp')
for ext in formats: # ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
for f in tqdm(glob.glob('%s/*%s' % (path, ext)), desc='Converting %s' % ext):
cv2.imwrite(f.replace(ext.lower(), '.bmp').replace(path, path + 'bmp'), cv2.imread(f))
# Save labels
# for path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']:
for file in ['../data/sm4/out_train.txt', '../data/sm4/out_test.txt']:
with open(file, 'r') as f:
lines = f.read()
# lines = f.read().replace('2014/', '2014bmp/') # coco
lines = lines.replace('/images', '/imagesbmp')
lines = lines.replace('/background', '/backgroundbmp')
for ext in formats:
lines = lines.replace(ext, '.bmp')
with open(file.replace('.txt', 'bmp.txt'), 'w') as f:
f.write(lines)
def recursive_dataset2bmp(dataset='../data/sm4_bmp'): # from evaluate_utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + '/' + file
s = Path(file).suffix
if s == '.txt': # replace text
with open(p, 'r') as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, '.bmp')
with open(p, 'w') as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
if s != '.bmp':
os.system("rm '%s'" % p)
def imagelist2folder(path='data/coco_64img.txt'): # from evaluate_utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(path, 'r') as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path='./new_folder'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
|
from sentence_transformers import SentenceTransformer
from .similarity_functions import *
function_dispatcher = { 'cosine' : cosine, 'euclidean' : euclidean,'manhattan':manhattan,'minkowski':minkowski}
class sentence_embedding():
'''load the pretrained model'''
def __init__(self,model_name):
self.model = SentenceTransformer(model_name)
'''compute the similarity score between two sentences'''
def sentence_similarity(self,sentence1,sentence2,metric="cosine"):
try:
sentence1=self.model.encode(sentence1)
sentence2=self.model.encode(sentence2)
score=function_dispatcher[metric](sentence1.tolist(),sentence2.tolist())
return score
except Exception as e:
print(e)
return None
|
/*****************************************************************************
Copyright (c) 2011, Intel Corp.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************
* Contents: Native middle-level C interface to LAPACK function sgesvxx
* Author: Intel Corporation
* Generated November, 2011
*****************************************************************************/
#include "lapacke_utils.h"
lapack_int LAPACKE_sgesvxx_work( int matrix_order, char fact, char trans,
lapack_int n, lapack_int nrhs, float* a,
lapack_int lda, float* af, lapack_int ldaf,
lapack_int* ipiv, char* equed, float* r,
float* c, float* b, lapack_int ldb, float* x,
lapack_int ldx, float* rcond, float* rpvgrw,
float* berr, lapack_int n_err_bnds,
float* err_bnds_norm, float* err_bnds_comp,
lapack_int nparams, float* params, float* work,
lapack_int* iwork )
{
lapack_int info = 0;
if( matrix_order == LAPACK_COL_MAJOR ) {
/* Call LAPACK function and adjust info */
LAPACK_sgesvxx( &fact, &trans, &n, &nrhs, a, &lda, af, &ldaf, ipiv,
equed, r, c, b, &ldb, x, &ldx, rcond, rpvgrw, berr,
&n_err_bnds, err_bnds_norm, err_bnds_comp, &nparams,
params, work, iwork, &info );
if( info < 0 ) {
info = info - 1;
}
} else if( matrix_order == LAPACK_ROW_MAJOR ) {
lapack_int lda_t = MAX(1,n);
lapack_int ldaf_t = MAX(1,n);
lapack_int ldb_t = MAX(1,n);
lapack_int ldx_t = MAX(1,n);
float* a_t = NULL;
float* af_t = NULL;
float* b_t = NULL;
float* x_t = NULL;
float* err_bnds_norm_t = NULL;
float* err_bnds_comp_t = NULL;
/* Check leading dimension(s) */
if( lda < n ) {
info = -7;
LAPACKE_xerbla( "LAPACKE_sgesvxx_work", info );
return info;
}
if( ldaf < n ) {
info = -9;
LAPACKE_xerbla( "LAPACKE_sgesvxx_work", info );
return info;
}
if( ldb < nrhs ) {
info = -15;
LAPACKE_xerbla( "LAPACKE_sgesvxx_work", info );
return info;
}
if( ldx < nrhs ) {
info = -17;
LAPACKE_xerbla( "LAPACKE_sgesvxx_work", info );
return info;
}
/* Allocate memory for temporary array(s) */
a_t = (float*)LAPACKE_malloc( sizeof(float) * lda_t * MAX(1,n) );
if( a_t == NULL ) {
info = LAPACK_TRANSPOSE_MEMORY_ERROR;
goto exit_level_0;
}
af_t = (float*)LAPACKE_malloc( sizeof(float) * ldaf_t * MAX(1,n) );
if( af_t == NULL ) {
info = LAPACK_TRANSPOSE_MEMORY_ERROR;
goto exit_level_1;
}
b_t = (float*)LAPACKE_malloc( sizeof(float) * ldb_t * MAX(1,nrhs) );
if( b_t == NULL ) {
info = LAPACK_TRANSPOSE_MEMORY_ERROR;
goto exit_level_2;
}
x_t = (float*)LAPACKE_malloc( sizeof(float) * ldx_t * MAX(1,nrhs) );
if( x_t == NULL ) {
info = LAPACK_TRANSPOSE_MEMORY_ERROR;
goto exit_level_3;
}
err_bnds_norm_t = (float*)
LAPACKE_malloc( sizeof(float) * nrhs * MAX(1,n_err_bnds) );
if( err_bnds_norm_t == NULL ) {
info = LAPACK_TRANSPOSE_MEMORY_ERROR;
goto exit_level_4;
}
err_bnds_comp_t = (float*)
LAPACKE_malloc( sizeof(float) * nrhs * MAX(1,n_err_bnds) );
if( err_bnds_comp_t == NULL ) {
info = LAPACK_TRANSPOSE_MEMORY_ERROR;
goto exit_level_5;
}
/* Transpose input matrices */
LAPACKE_sge_trans( matrix_order, n, n, a, lda, a_t, lda_t );
if( LAPACKE_lsame( fact, 'f' ) ) {
LAPACKE_sge_trans( matrix_order, n, n, af, ldaf, af_t, ldaf_t );
}
LAPACKE_sge_trans( matrix_order, n, nrhs, b, ldb, b_t, ldb_t );
/* Call LAPACK function and adjust info */
LAPACK_sgesvxx( &fact, &trans, &n, &nrhs, a_t, &lda_t, af_t, &ldaf_t,
ipiv, equed, r, c, b_t, &ldb_t, x_t, &ldx_t, rcond,
rpvgrw, berr, &n_err_bnds, err_bnds_norm_t,
err_bnds_comp_t, &nparams, params, work, iwork, &info );
if( info < 0 ) {
info = info - 1;
}
/* Transpose output matrices */
if( LAPACKE_lsame( fact, 'e' ) && ( LAPACKE_lsame( *equed, 'b' ) ||
LAPACKE_lsame( *equed, 'c' ) || LAPACKE_lsame( *equed, 'r' ) ) ) {
LAPACKE_sge_trans( LAPACK_COL_MAJOR, n, n, a_t, lda_t, a, lda );
}
if( LAPACKE_lsame( fact, 'e' ) || LAPACKE_lsame( fact, 'n' ) ) {
LAPACKE_sge_trans( LAPACK_COL_MAJOR, n, n, af_t, ldaf_t, af, ldaf );
}
if( LAPACKE_lsame( fact, 'f' ) && ( LAPACKE_lsame( *equed, 'b' ) ||
LAPACKE_lsame( *equed, 'c' ) || LAPACKE_lsame( *equed, 'r' ) ) ) {
LAPACKE_sge_trans( LAPACK_COL_MAJOR, n, nrhs, b_t, ldb_t, b, ldb );
}
LAPACKE_sge_trans( LAPACK_COL_MAJOR, n, nrhs, x_t, ldx_t, x, ldx );
LAPACKE_sge_trans( LAPACK_COL_MAJOR, nrhs, n_err_bnds, err_bnds_norm_t,
nrhs, err_bnds_norm, nrhs );
LAPACKE_sge_trans( LAPACK_COL_MAJOR, nrhs, n_err_bnds, err_bnds_comp_t,
nrhs, err_bnds_comp, nrhs );
/* Release memory and exit */
LAPACKE_free( err_bnds_comp_t );
exit_level_5:
LAPACKE_free( err_bnds_norm_t );
exit_level_4:
LAPACKE_free( x_t );
exit_level_3:
LAPACKE_free( b_t );
exit_level_2:
LAPACKE_free( af_t );
exit_level_1:
LAPACKE_free( a_t );
exit_level_0:
if( info == LAPACK_TRANSPOSE_MEMORY_ERROR ) {
LAPACKE_xerbla( "LAPACKE_sgesvxx_work", info );
}
} else {
info = -1;
LAPACKE_xerbla( "LAPACKE_sgesvxx_work", info );
}
return info;
}
|
/**
* Copyright 2014-2016 CyberVision, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#import <Foundation/Foundation.h>
/**
* Checker of a network connectivity.
*/
@interface ConnectivityChecker : NSObject
/**
* Check whether network connectivity exists.
*/
- (BOOL)isConnected;
@end
|
"""ACH Server URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from project1.foo import views
from django.contrib import admin
from django.urls import include, path, re_path
urlpatterns = [
path("admin/", admin.site.urls),
]
|
import fetch from 'isomorphic-fetch'
import {
FETCH_PARAMS_REQUEST,
FETCH_PARAMS_SUCCESS,
FETCH_PARAMS_FAILURE,
UPDATE_PARAMS
} from "../actionTypes";
import {apiGetParams} from "../api";
import {thunkCreator} from "./utils";
export const updateParams = (rowIndex, colIndex, type, value) => {
const updating = {rowIndex, colIndex, type, value};
return {type: UPDATE_PARAMS, updating};
};
export const fetchParams = (rand = 0, additional = []) => thunkCreator({
types: [FETCH_PARAMS_REQUEST, FETCH_PARAMS_SUCCESS, FETCH_PARAMS_FAILURE],
promise: fetch(`${apiGetParams}?rand=${rand}`)
.then(response => response.json()),
additional: additional
});
|
# -*- coding: utf-8 -*-
"""Tests for the family module."""
#
# (C) Pywikibot team, 2014-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id: bb5b11f9d8c6799ce123b546d89d9bacd0051875 $'
from pywikibot.family import Family, SingleSiteFamily
from pywikibot.exceptions import UnknownFamily
from pywikibot.tools import PY2
import pywikibot.site
from tests.aspects import (
unittest,
TestCase,
DeprecationTestCase,
PatchingTestCase,
)
from tests.utils import DrySite
if not PY2:
basestring = (str, )
class TestFamily(TestCase):
"""Test cases for Family methods."""
net = False
def test_family_load_valid(self):
"""Test that a family can be loaded via Family.load."""
for name in pywikibot.config.family_files:
f = Family.load(name)
self.assertIsInstance(f.langs, dict)
self.assertTrue(f.langs)
self.assertTrue(f.codes)
self.assertTrue(iter(f.codes))
self.assertIsInstance(next(iter(f.codes)), basestring)
self.assertTrue(f.domains)
self.assertTrue(iter(f.domains))
for domain in f.domains:
self.assertIsInstance(domain, basestring)
if domain != 'localhost':
self.assertIn('.', domain)
self.assertEqual(f.name, name)
self.assertIsInstance(f.languages_by_size, list)
self.assertGreaterEqual(set(f.langs), set(f.languages_by_size))
if len(f.langs) > 6 and f.name != 'wikimediachapter':
self.assertNotEqual(f.languages_by_size, [])
if isinstance(f, SingleSiteFamily):
self.assertIsNotNone(f.code)
self.assertIsNotNone(f.domain)
self.assertEqual(set(f.langs), set([f.code]))
self.assertEqual(set(f.codes), set([f.code]))
def test_family_load_invalid(self):
"""Test that an invalid family raised UnknownFamily exception."""
self.assertRaises(UnknownFamily, Family.load, 'unknown')
def test_eq_different_families_by_name(self):
"""Test that two Family with same name are equal."""
family_1 = Family()
family_2 = Family()
family_1.name = 'a'
family_2.name = 'a'
self.assertNotEqual(id(family_1), id(family_2))
self.assertEqual(family_1, family_2)
def test_eq_different_families_by_id(self):
"""Test that two Family with no name attribute are not equal."""
family_1 = Family()
family_2 = Family()
family_1.name = 'a'
del family_2.name
self.assertNotEqual(id(family_1), id(family_2))
self.assertNotEqual(family_1, family_2)
def test_eq_family_with_string_repr_same_family(self):
"""Test that Family and string with same name are equal."""
family = Family.load('wikipedia')
other = 'wikipedia'
self.assertEqual(family, other)
self.assertFalse(family != other)
def test_ne_family_with_string_repr_different_family(self):
"""Test that Family and string with different name are not equal."""
family = Family.load('wikipedia')
other = 'wikisource'
self.assertNotEqual(family, other)
self.assertFalse(family == other)
def test_eq_family_with_string_repr_not_existing_family(self):
"""Test that Family and string with different name are not equal."""
family = Family.load('wikipedia')
other = 'unknown'
self.assertRaises(UnknownFamily, family.__eq__, other)
def test_get_obsolete_wp(self):
"""Test three types of obsolete codes."""
family = Family.load('wikipedia')
self.assertIsInstance(family.obsolete, dict)
# redirected code (see site tests test_alias_code_site)
self.assertEqual(family.obsolete['dk'], 'da')
# closed/locked site (see site tests test_locked_site)
self.assertEqual(family.obsolete['mh'], None)
# offline site (see site tests test_removed_site)
self.assertEqual(family.obsolete['ru-sib'], None)
def test_get_obsolete_test(self):
"""Test WikimediaFamily default obsolete."""
family = Family.load('test')
self.assertIn('dk', family.obsolete)
self.assertIn('dk', family.interwiki_replacements)
self.assertEqual(family.obsolete, family.interwiki_replacements)
self.assertEqual(family.interwiki_removals, set())
def test_set_obsolete(self):
"""Test obsolete can be set."""
family = Family()
self.assertEqual(family.obsolete, {})
self.assertEqual(family.interwiki_replacements, {})
self.assertEqual(family.interwiki_removals, [])
family.obsolete = {'a': 'b', 'c': None}
self.assertEqual(family.obsolete, {'a': 'b', 'c': None})
self.assertEqual(family.interwiki_replacements, {'a': 'b'})
self.assertEqual(family.interwiki_removals, ['c'])
def test_obsolete_readonly(self):
"""Test obsolete result not updatable."""
family = Family.load('test')
self.assertRaises(TypeError, family.obsolete.update, {})
self.assertRaises(TypeError, family.obsolete.__setitem__, 'a', 'b')
def test_WikimediaFamily_obsolete_readonly(self):
"""Test WikimediaFamily obsolete is readonly."""
family = Family.load('test')
self.assertRaises(TypeError, family.__setattr__, 'obsolete',
{'a': 'b', 'c': None})
class TestFamilyUrlRegex(PatchingTestCase):
"""Test family URL regex."""
net = False
@PatchingTestCase.patched(pywikibot, 'Site')
def Site(self, code, fam, *args, **kwargs):
"""Own DrySite creator."""
self.assertEqual(args, tuple())
self.assertEqual(kwargs, {})
self.assertEqual(code, self.current_code)
self.assertEqual(fam, self.current_family)
site = DrySite(code, fam, None, None)
site._siteinfo._cache['general'] = ({'articlepath': self.article_path},
True)
return site
def setUp(self):
"""Setup default article path."""
super(TestFamilyUrlRegex, self).setUp()
self.article_path = '/wiki/$1'
def test_from_url_wikipedia_extra(self):
"""Test various URLs against wikipedia regex."""
self.current_code = 'vo'
self.current_family = 'wikipedia'
f = Family.load('wikipedia')
prefix = 'https://vo.wikipedia.org'
self.assertEqual(f.from_url(prefix + '/wiki/'), 'vo')
self.assertEqual(f.from_url(prefix + '/w/index.php'), 'vo')
self.assertEqual(f.from_url(prefix + '/w/index.php/'), 'vo')
self.assertEqual(f.from_url(prefix + '/w/index.php?title=$1'), 'vo')
self.assertEqual(f.from_url(prefix + '/wiki/$1'), 'vo')
self.assertEqual(f.from_url('//vo.wikipedia.org/wiki/$1'), 'vo')
self.assertEqual(f.from_url(prefix + '/w/index.php/$1'), 'vo')
self.assertEqual(f.from_url('//vo.wikipedia.org/wiki/$1'), 'vo')
# Text after $1 is not allowed
self.assertRaises(ValueError, f.from_url,
'//vo.wikipedia.org/wiki/$1/foo')
# the IWM may contain the wrong protocol, but it's only used to
# determine a site so using HTTP or HTTPS is not an issue
self.assertEqual(f.from_url('http://vo.wikipedia.org/wiki/$1'), 'vo')
# wrong protocol
self.assertIsNone(f.from_url('ftp://vo.wikipedia.org/wiki/$1'))
# wrong code
self.assertIsNone(f.from_url('https://foobar.wikipedia.org/wiki/$1'))
# wrong family
self.assertIsNone(f.from_url('https://vo.wikibooks.org/wiki/$1'))
self.assertIsNone(f.from_url('http://vo.wikibooks.org/wiki/$1'))
# invalid path
self.assertIsNone(f.from_url('https://vo.wikipedia.org/wik/$1'))
self.assertIsNone(f.from_url('https://vo.wikipedia.org/index.php/$1'))
def test_each_family(self):
"""Test each family builds a working regex."""
for family in pywikibot.config.family_files:
self.current_family = family
family = Family.load(family)
for code in family.codes:
self.current_code = code
url = ('%s://%s%s/$1' % (family.protocol(code),
family.hostname(code),
family.path(code)))
# Families can switch off if they want to be detected using URL
# this applies for test:test (there is test:wikipedia)
if family._ignore_from_url or code in family._ignore_from_url:
self.assertIsNone(family.from_url(url))
else:
self.assertEqual(family.from_url(url), code)
class TestOldFamilyMethod(DeprecationTestCase):
"""Test cases for old site.Family method."""
net = False
def test_old_site_family_function(self):
"""Test deprecated Family function with valid families."""
f = pywikibot.site.Family('species')
self.assertEqual(f.name, 'species')
f = pywikibot.site.Family('osm')
self.assertEqual(f.name, 'osm')
self.assertOneDeprecationParts('pywikibot.site.Family',
'pywikibot.family.Family.load', 2)
# @deprecated warning occurs within redirect_func's call
# invoking the method instead of this test module.
self._do_test_warning_filename = False
f = pywikibot.site.Family('i18n', fatal=False)
self.assertEqual(f.name, 'i18n')
self.assertDeprecationParts('pywikibot.site.Family',
'pywikibot.family.Family.load')
self.assertDeprecationParts('fatal argument of pywikibot.family.Family.load')
def test_old_site_family_function_invalid(self):
"""Test that an invalid family raised UnknownFamily exception."""
# As assertRaises calls the method, unittest is the module
# invoking the method instead of this test module.
self._do_test_warning_filename = False
self.assertRaises(UnknownFamily, pywikibot.site.Family, 'unknown',
fatal=False)
self.assertRaises(UnknownFamily, pywikibot.site.Family, 'unknown')
self.assertDeprecationParts('pywikibot.site.Family',
'pywikibot.family.Family.load')
self.assertDeprecationParts('fatal argument of pywikibot.family.Family.load')
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
|
import React from 'react';
import { BrowserRouter as Router } from 'react-router-dom';
import {
screen,
getByText,
getByRole,
getAllByRole,
fireEvent,
waitForElementToBeRemoved,
waitFor,
} from '@testing-library/react';
import userEvent from '@testing-library/user-event';
import { noop } from 'lodash';
import '../../test/jest/__mock__';
import { CalloutContext } from '@folio/stripes-core';
import { StripesContext } from '@folio/stripes-core/src/StripesContext';
import { ModuleHierarchyProvider } from '@folio/stripes-core/src/components/ModuleHierarchy';
import {
Layer,
Paneset
} from '@folio/stripes/components';
import renderWithIntl from '../../test/jest/helpers/renderWithIntl';
import OverlayContainer from '../../test/helpers/OverlayContainer';
import translationsProperties from '../../test/jest/helpers/translationsProperties';
import { instances as instancesFixture } from '../../test/fixtures/instances';
import { QUICK_EXPORT_LIMIT } from '../constants';
import { DataContext } from '../contexts';
import InstancesRoute from './InstancesRoute';
const stripesStub = {
connect: Component => <Component />,
hasPerm: () => true,
hasInterface: () => true,
logger: { log: noop },
locale: 'en-US',
plugins: {},
};
const InstancesRouteSetup = ({
instances = instancesFixture,
sendCallout = noop,
quickExportPOST = noop,
} = {}) => (
<Router>
<StripesContext.Provider value={stripesStub}>
<CalloutContext.Provider value={{ sendCallout }}>
<ModuleHierarchyProvider value={['@folio/inventory']}>
<DataContext.Provider value={{
contributorTypes: [],
instanceTypes: [],
locations: [],
instanceFormats: [],
modesOfIssuance: [],
natureOfContentTerms: [],
tagsRecords: [],
facets: [],
}}
>
<Paneset>
<Layer
isOpen
contentLabel="label"
>
<OverlayContainer />
<InstancesRoute
resources={{
query: {
query: '',
sort: 'title',
},
records: {
hasLoaded: true,
resource: 'records',
records: instances,
other: { totalRecords: instances.length },
},
facets: {
hasLoaded: true,
resource: 'facets',
records: [],
},
resultCount: instances.length,
resultOffset: 0,
}}
mutator={{
quickExport: { POST: quickExportPOST },
resultCount: { replace: noop },
}}
/>
</Layer>
</Paneset>
</DataContext.Provider>
</ModuleHierarchyProvider>
</CalloutContext.Provider>
</StripesContext.Provider>
</Router>
);
describe('InstancesRoute', () => {
const quickExportAPICallMock = jest.fn(Promise.resolve.bind(Promise));
const sendCalloutMock = jest.fn();
let renderWithIntlResult;
describe('rendering InstancesRoute', () => {
beforeEach(() => {
renderWithIntlResult = renderWithIntl(
<InstancesRouteSetup
quickExportPOST={quickExportAPICallMock}
sendCallout={sendCalloutMock}
/>,
translationsProperties
);
});
afterEach(() => {
jest.clearAllMocks();
});
it('should have proper list results size', () => {
expect(document.querySelectorAll('#pane-results-content .mclRowContainer > [role=row]').length).toEqual(3);
});
it('should render nothing for select row column header', () => {
expect(screen.getAllByRole('columnheader')[0].textContent).toEqual('');
});
it('should display unchecked select row checkbox', () => {
const selectRowCheckboxes = screen.getAllByRole('checkbox', { name: 'Select instance' });
expect(selectRowCheckboxes[0]).not.toBeChecked();
});
it('should not display information about selected items', () => {
expect(document.querySelector('[data-test-custom-pane-sub]')).not.toBeInTheDocument();
});
describe('opening action menu', () => {
beforeEach(() => {
userEvent.click(screen.getByRole('button', { name: 'Actions' }));
});
it('should not display exceeded quick export limit warning', () => {
expect(screen.queryByText(`Selected record limit of ${QUICK_EXPORT_LIMIT} exceeded`)).not.toBeInTheDocument();
});
it('should disable export instances (MARC) action button if there are no selected rows', () => {
expect(screen.getByRole('button', { name: 'Export instances (MARC)' })).toBeDisabled();
});
it('should disable show selected records action button if there are no selected rows', () => {
expect(screen.getByRole('button', { name: 'Show selected records' })).toBeDisabled();
});
});
describe('selecting rows so the quick export limit is exceed', () => {
let selectRowCheckboxes;
beforeEach(() => {
selectRowCheckboxes = screen.getAllByRole('checkbox', { name: 'Select instance' });
userEvent.click(selectRowCheckboxes[0]);
userEvent.click(selectRowCheckboxes[1]);
userEvent.click(selectRowCheckboxes[2]);
userEvent.click(screen.getByRole('button', { name: 'Actions' }));
});
it('should display quick export limit warning', () => {
expect(screen.queryByText(`Selected record limit of ${QUICK_EXPORT_LIMIT} exceeded`)).toBeInTheDocument();
});
it('should disable export instances (MARC) action button', () => {
expect(screen.getByRole('button', { name: 'Export instances (MARC)' })).toBeDisabled();
});
});
describe('selecting row', () => {
let selectRowCheckboxes;
beforeEach(() => {
selectRowCheckboxes = screen.getAllByRole('checkbox', { name: 'Select instance' });
userEvent.click(selectRowCheckboxes[0]);
});
it('should display checked select row checkbox', () => {
expect(selectRowCheckboxes[0]).toBeChecked();
});
it('should display selected rows count message in the sub header', () => {
expect(screen.getByText('1 record selected')).toBeInTheDocument();
});
describe('selecting one more row and clicking on show selected records action button', () => {
beforeEach(() => {
userEvent.click(selectRowCheckboxes[1]);
userEvent.click(screen.getByRole('button', { name: 'Actions' }));
userEvent.click(screen.getByRole('button', { name: 'Show selected records' }));
});
it('should open selected records modal', () => {
expect(screen.getByRole('document', { label: 'Selected records' })).toBeInTheDocument();
});
it('should have correct heading', () => {
expect(screen.getByRole('heading', { name: 'Selected records' })).toBeInTheDocument();
});
it('should display correct amount of records in modal', () => {
const modal = screen.getByRole('document', { label: 'Selected records' });
expect(modal.querySelectorAll('.mclRowContainer > [role=row]').length).toEqual(2);
});
it('should display correct data in list', () => {
const modal = screen.getByRole('document', { label: 'Selected records' });
const row = modal.querySelector('.mclRowContainer > [role=row]');
const cells = getAllByRole(row, 'gridcell');
expect(getByRole(cells[0], 'checkbox', { name: 'Select instance' })).toBeVisible();
expect(getByText(cells[1], '#youthaction')).toBeVisible();
expect(getByText(cells[2], 'Kirshner, Benjamin ; Middaugh, Ellen')).toBeVisible();
expect(getByText(cells[3], 'Information Age Publishing, Inc. (2015)')).toBeVisible();
});
it('should have all rows selected', () => {
const modal = screen.getByRole('document', { label: 'Selected records' });
const selectRowCheckboxesInModal = getAllByRole(modal, 'checkbox', { name: 'Select instance' });
selectRowCheckboxesInModal.forEach(checkbox => expect(checkbox).toBeChecked());
});
describe('unselecting rows in the modal', () => {
beforeEach(() => {
const modal = screen.getByRole('document', { label: 'Selected records' });
const selectRowCheckboxesInModal = getAllByRole(modal, 'checkbox', { name: 'Select instance' });
selectRowCheckboxesInModal.forEach(userEvent.click);
});
it('should preserve the selected state for the corresponding rows in the results list after close of the modal upon click on cancel button', async () => {
userEvent.click(screen.getByRole('button', { name: 'Cancel' }));
await waitForElementToBeRemoved(() => screen.getByRole('document', { label: 'Selected records' }));
expect(selectRowCheckboxes[0]).toBeChecked();
expect(selectRowCheckboxes[1]).toBeChecked();
});
it('should unselect corresponding rows in the results list after close of the modal upon click on save button', async () => {
userEvent.click(screen.getByRole('button', { name: 'Save & close' }));
await waitForElementToBeRemoved(() => screen.getByRole('document', { label: 'Selected records' }));
expect(selectRowCheckboxes[0]).not.toBeChecked();
expect(selectRowCheckboxes[1]).not.toBeChecked();
});
});
});
describe('selecting more than one row', () => {
beforeEach(() => {
userEvent.click(selectRowCheckboxes[1]);
});
it('should display selected rows count message (plural form) in the sub header', () => {
expect(screen.getByText('2 records selected')).toBeInTheDocument();
});
it('should not initiate display of error callout and make an API call upon click on export instances (MARC) button', () => {
userEvent.click(screen.getByRole('button', { name: 'Actions' }));
userEvent.click(screen.getByRole('button', { name: 'Export instances (MARC)' }));
expect(quickExportAPICallMock).toBeCalled();
expect(sendCalloutMock).not.toBeCalled();
});
it('should initiate display of error callout and make an API call upon click on export instances (MARC) button with API request set up to fail', async () => {
quickExportAPICallMock.mockImplementationOnce(Promise.reject.bind(Promise));
userEvent.click(screen.getByRole('button', { name: 'Actions' }));
userEvent.click(screen.getByRole('button', { name: 'Export instances (MARC)' }));
expect(quickExportAPICallMock).toBeCalled();
await waitFor(() => {
expect(sendCalloutMock).toBeCalledWith(expect.objectContaining({ type: 'error' }));
expect(sendCalloutMock.mock.calls[0][0].message.props.id).toBe('ui-inventory.communicationProblem');
});
});
});
describe('should reset instances selection upon click on on reset all button', () => {
beforeEach(async () => {
const input = screen.getByLabelText('Search');
fireEvent.change(input, { target: { value: '23' } });
await waitFor(() => expect(screen.getByRole('button', { name: 'Reset all' })).not.toBeDisabled());
userEvent.click(screen.getByRole('button', { name: 'Reset all' }));
});
it('should reset the selected state for the previously selected row', () => {
expect(selectRowCheckboxes[0]).not.toBeChecked();
});
it('should hide selected rows count message in the subheader', () => {
expect(document.querySelector('[data-test-custom-pane-sub]')).not.toBeInTheDocument();
});
});
describe('making previously selected items no longer displayed', () => {
beforeEach(() => {
renderWithIntl(
<InstancesRouteSetup
instances={[]}
quickExportPOST={quickExportAPICallMock}
sendCallout={sendCalloutMock}
/>,
translationsProperties,
renderWithIntlResult.rerender
);
});
it('should have no results', () => {
expect(document.querySelectorAll('#pane-results-content .mclRowContainer > [role=row]').length).toEqual(0);
});
it('should display selected rows count message in the sub header', () => {
expect(screen.getByText('1 record selected')).toBeInTheDocument();
});
describe('making previously selected items displayed again', () => {
beforeEach(() => {
renderWithIntl(
<InstancesRouteSetup
quickExportPOST={quickExportAPICallMock}
sendCallout={sendCalloutMock}
/>,
translationsProperties,
renderWithIntlResult.rerender
);
});
it('should preserve the selected state for the previously selected row', () => {
expect(selectRowCheckboxes[0]).toBeChecked();
});
it('should display selected rows count message in the sub header', () => {
expect(screen.getByText('1 record selected')).toBeInTheDocument();
});
});
});
});
});
});
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: haitao@openailab.com
* Revised: lswang@openailab.com
*/
#include "graph/graph.h"
#include "defines.h"
#include "utility/sys_port.h"
#include "utility/vector.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/subgraph.h"
#include "executer/executer.h"
#include "serializer/serializer.h"
#include "utility/utils.h"
#include "utility/log.h"
#include <string.h>
ir_graph_t* create_ir_graph(struct context* context)
{
ir_graph_t* ir_graph = (ir_graph_t*)sys_malloc(sizeof(ir_graph_t));
if (NULL == ir_graph)
{
return NULL;
}
ir_graph->attribute = (struct attribute*)sys_malloc(sizeof(struct attribute));
init_ir_graph(ir_graph, context);
return ir_graph;
}
void init_ir_graph(ir_graph_t* graph, struct context* context)
{
graph->tensor_list = NULL;
graph->node_list = NULL;
graph->input_nodes = NULL;
graph->output_nodes = NULL;
graph->tensor_num = 0;
graph->node_num = 0;
graph->input_num = 0;
graph->output_num = 0;
graph->subgraph_list = create_vector(sizeof(struct subgraph*), NULL);
graph->graph_layout = TENGINE_LAYOUT_NCHW;
graph->model_layout = TENGINE_LAYOUT_NCHW;
graph->model_format = MODEL_FORMAT_TENGINE;
graph->serializer = NULL;
graph->serializer_privacy = NULL;
graph->device = NULL;
graph->device_privacy = NULL;
graph->status = GRAPH_STAT_CREATED;
init_attribute(graph->attribute, context);
}
void destroy_ir_graph(ir_graph_t* graph)
{
//!< 1, destroy subgraph
if (NULL != graph->subgraph_list)
{
const int subgraph_num = get_vector_num(graph->subgraph_list);
for (int i = 0; i < subgraph_num; i++)
{
struct subgraph* subgraph = *(struct subgraph**)get_vector_data(graph->subgraph_list, i);
release_ir_subgraph(graph, subgraph);
}
release_vector(graph->subgraph_list);
}
//!< 2, destroy serializer
struct serializer* serializer = graph->serializer;
if (NULL != serializer && serializer->unload_graph)
{
serializer->unload_graph(serializer, graph, graph->serializer_privacy, graph->device_privacy);
}
//!< 3, destroy tensors
for (int i = 0; i < graph->tensor_num; i++)
{
destroy_ir_tensor(graph, graph->tensor_list[i]);
}
//!< 4, destroy nodes
for (int i = 0; i < graph->node_num; i++)
{
destroy_ir_node(graph, graph->node_list[i]);
}
sys_free(graph->tensor_list);
sys_free(graph->node_list);
sys_free(graph->input_nodes);
sys_free(graph->output_nodes);
if (NULL != graph->attribute)
{
destroy_attribute(graph, graph->attribute);
}
sys_free(graph);
}
int set_ir_graph_input_node(ir_graph_t* graph, int16_t input_nodes[], int input_number)
{
if (0 >= input_number)
{
return -1;
}
int16_t* new_input_nodes = ( int16_t* )sys_malloc(input_number * sizeof(int16_t));
if (NULL == new_input_nodes)
{
return -1;
}
if (NULL != graph->input_nodes)
{
sys_free(graph->input_nodes);
graph->input_nodes = NULL;
}
graph->input_nodes = new_input_nodes;
graph->input_num = input_number;
for (int i = 0; i < input_number; i++)
{
ir_node_t* node = get_ir_graph_node(graph, input_nodes[i]);
node->node_type = TE_NODE_TYPE_INPUT;
graph->input_nodes[i] = input_nodes[i];
}
return 0;
}
int set_ir_graph_output_node(ir_graph_t* graph, int16_t output_nodes[], int output_number)
{
if (0 >= output_number)
{
return -1;
}
int16_t* new_output_nodes = (int16_t*)sys_malloc(output_number * sizeof(int16_t));
if (NULL == new_output_nodes)
{
return -1;
}
if (NULL != graph->output_nodes)
{
sys_free(graph->output_nodes);
graph->output_nodes = NULL;
}
graph->output_nodes = new_output_nodes;
graph->output_num = output_number;
for (int i = 0; i < output_number; i++)
{
ir_node_t* node = get_ir_graph_node(graph, output_nodes[i]);
node->node_type = TE_NODE_TYPE_OUTPUT;
graph->output_nodes[i] = output_nodes[i];
}
return 0;
}
struct tensor* get_ir_graph_tensor(ir_graph_t* graph, int index)
{
return graph->tensor_list[index];
}
struct node* get_ir_graph_node(ir_graph_t* graph, int index)
{
return graph->node_list[index];
}
struct subgraph* get_ir_graph_subgraph(ir_graph_t* graph, int index)
{
return *(struct subgraph**)get_vector_data(graph->subgraph_list, index);
}
int infer_ir_graph_shape(ir_graph_t* graph)
{
const int node_num = graph->node_num;
for (int i = 0; i < node_num; i++)
{
ir_node_t* node = get_ir_graph_node(graph, i);
ir_op_t* op = &node->op;
if (node->input_num == 0)
continue;
if (node->dynamic_shape)
{
// populate the dynamic_shape
int output_num = node->output_num;
for (int j = 0; j < output_num; j++)
{
ir_tensor_t* tensor = get_ir_graph_tensor(graph, j);
for (int l = 0; l < tensor->consumer_num; l++)
{
ir_node_t* child_node = get_ir_graph_node(graph, l);
child_node->dynamic_shape = 1;
}
}
continue;
}
if (0 != op->same_shape)
{
ir_tensor_t* input = get_ir_graph_tensor(graph, node->input_tensors[0]);
ir_tensor_t* output = get_ir_graph_tensor(graph, node->output_tensors[0]);
output->dim_num = input->dim_num;
output->elem_num = input->elem_num;
memcpy(output->dims, input->dims, sizeof(int32_t) * input->dim_num);
}
else
{
if (0 != op->infer_shape(node))
{
TLOG_ERR("Tengine FATAL: Infer node(id: %d, op: %s) shape failed.\n", node->index,
get_op_name_from_type(node->op.type));
return -1;
}
}
for (int j = 0; j < node->output_num; j++)
{
ir_tensor_t* tensor = get_ir_graph_tensor(graph, j);
tensor->reshaped = 0;
}
}
return 0;
}
void dump_ir_graph(ir_graph_t* graph)
{
TLOG_INFO("graph node_num %u tensor_num: %u subgraph_num: %u\n", graph->node_num, graph->tensor_num,
get_vector_num(graph->subgraph_list));
TLOG_INFO("graph layout: %s model layout: %s model_format: %s\n", get_tensor_layout_string(graph->graph_layout),
get_tensor_layout_string(graph->model_layout), get_model_format_string(graph->model_format));
for (int i = 0; i < graph->node_num; i++)
{
dump_ir_node(graph, graph->node_list[i]);
}
TLOG_INFO("\ngraph inputs: %u\n", graph->input_num);
for (int i = 0; i < graph->input_num; i++)
{
ir_node_t* node = get_ir_graph_node(graph, graph->input_nodes[i]);
if (node->name)
{
TLOG_INFO("\t%s\n", node->name);
}
else
{
TLOG_INFO("\tnode_%d\n", node->index);
}
}
TLOG_INFO("graph outputs: %u\n", graph->output_num);
for (int i = 0; i < graph->output_num; i++)
{
ir_node_t* node = get_ir_graph_node(graph, graph->output_nodes[i]);
if (node->name)
{
TLOG_INFO("\t%s\n", node->name);
}
else
{
TLOG_INFO("\tnode_%d\n", node->index);
}
}
}
|
# IMPORTATION STANDARD
import os
# IMPORTATION THIRDPARTY
import pytest
# IMPORTATION INTERNAL
from gamestonk_terminal.economy import economy_controller
# pylint: disable=E1101
# pylint: disable=W0603
# pylint: disable=E1111
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"queue, expected",
[
(["load", "help"], []),
(["quit", "help"], ["help"]),
],
)
def test_menu_with_queue(expected, mocker, queue):
path_controller = "gamestonk_terminal.economy.economy_controller"
# MOCK SWITCH
mocker.patch(
target=f"{path_controller}.EconomyController.switch",
return_value=["quit"],
)
result_menu = economy_controller.EconomyController(queue=queue).menu()
assert result_menu == expected
@pytest.mark.vcr(record_mode="none")
def test_menu_without_queue_completion(mocker):
path_controller = "gamestonk_terminal.economy.economy_controller"
# ENABLE AUTO-COMPLETION : HELPER_FUNCS.MENU
mocker.patch(
target="gamestonk_terminal.feature_flags.USE_PROMPT_TOOLKIT",
new=True,
)
mocker.patch(
target="gamestonk_terminal.parent_classes.session",
)
mocker.patch(
target="gamestonk_terminal.parent_classes.session.prompt",
return_value="quit",
)
# DISABLE AUTO-COMPLETION : CONTROLLER.COMPLETER
mocker.patch.object(
target=economy_controller.gtff,
attribute="USE_PROMPT_TOOLKIT",
new=True,
)
mocker.patch(
target=f"{path_controller}.session",
)
mocker.patch(
target=f"{path_controller}.session.prompt",
return_value="quit",
)
result_menu = economy_controller.EconomyController(queue=None).menu()
assert result_menu == []
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"mock_input",
["help", "homee help", "home help", "mock"],
)
def test_menu_without_queue_sys_exit(mock_input, mocker):
path_controller = "gamestonk_terminal.economy.economy_controller"
# DISABLE AUTO-COMPLETION
mocker.patch.object(
target=economy_controller.gtff,
attribute="USE_PROMPT_TOOLKIT",
new=False,
)
mocker.patch(
target=f"{path_controller}.session",
return_value=None,
)
# MOCK USER INPUT
mocker.patch("builtins.input", return_value=mock_input)
# MOCK SWITCH
class SystemExitSideEffect:
def __init__(self):
self.first_call = True
def __call__(self, *args, **kwargs):
if self.first_call:
self.first_call = False
raise SystemExit()
return ["quit"]
mock_switch = mocker.Mock(side_effect=SystemExitSideEffect())
mocker.patch(
target=f"{path_controller}.EconomyController.switch",
new=mock_switch,
)
result_menu = economy_controller.EconomyController(queue=None).menu()
assert result_menu == []
@pytest.mark.vcr(record_mode="none")
@pytest.mark.record_stdout
def test_print_help():
controller = economy_controller.EconomyController(queue=None)
controller.print_help()
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"an_input, expected_queue",
[
("", []),
("/help", ["home", "help"]),
("help/help", ["help", "help"]),
("q", ["quit"]),
("h", []),
(
"r",
[
"quit",
"reset",
"economy",
],
),
],
)
def test_switch(an_input, expected_queue):
controller = economy_controller.EconomyController(queue=None)
queue = controller.switch(an_input=an_input)
assert queue == expected_queue
@pytest.mark.vcr(record_mode="none")
def test_call_cls(mocker):
mocker.patch("os.system")
controller = economy_controller.EconomyController(queue=None)
controller.call_cls([])
assert controller.queue == []
os.system.assert_called_once_with("cls||clear")
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"func, queue, expected_queue",
[
(
"call_exit",
[],
["quit", "quit"],
),
("call_exit", ["help"], ["quit", "quit", "help"]),
("call_home", [], ["quit"]),
("call_help", [], []),
("call_quit", [], ["quit"]),
("call_quit", ["help"], ["quit", "help"]),
(
"call_reset",
[],
[
"quit",
"reset",
"economy",
],
),
(
"call_reset",
["help"],
[
"quit",
"reset",
"economy",
"help",
],
),
],
)
def test_call_func_expect_queue(expected_queue, func, queue):
controller = economy_controller.EconomyController(queue=queue)
result = getattr(controller, func)([])
assert result is None
assert controller.queue == expected_queue
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"tested_func, other_args, mocked_func, called_args, called_kwargs",
[
(
"call_feargreed",
[
"jbd",
"--export=png",
],
"cnn_view.fear_and_greed_index",
[],
dict(
indicator="jbd",
export="png",
),
),
(
"call_overview",
[
"--export=csv",
],
"wsj_view.display_overview",
[],
dict(
export="csv",
),
),
(
"call_futures",
[
"--export=csv",
],
"wsj_view.display_futures",
[],
dict(
export="csv",
),
),
(
"call_overview",
[
"--type=indices",
"--export=csv",
],
"wsj_view.display_indices",
[],
dict(
export="csv",
),
),
(
"call_overview",
[
"--type=usbonds",
"--export=csv",
],
"wsj_view.display_usbonds",
[],
dict(
export="csv",
),
),
(
"call_overview",
[
"--type=glbonds",
"--export=csv",
],
"wsj_view.display_glbonds",
[],
dict(
export="csv",
),
),
(
"call_futures",
[
"--export=csv",
],
"wsj_view.display_futures",
[],
dict(
export="csv",
),
),
(
"call_overview",
[
"--type=currencies",
"--export=csv",
],
"wsj_view.display_currencies",
[],
dict(
export="csv",
),
),
(
"call_futures",
[
"--commodity=energy",
"--sortby=ticker",
"-a",
"--export=csv",
],
"finviz_view.display_future",
[],
dict(
future_type="Energy",
sort_col="ticker",
ascending=True,
export="csv",
),
),
(
"call_futures",
[
"--commodity=metals",
"--sortby=ticker",
"-a",
"--export=csv",
],
"finviz_view.display_future",
[],
dict(
future_type="Metals",
sort_col="ticker",
ascending=True,
export="csv",
),
),
(
"call_futures",
[
"--commodity=meats",
"--sortby=ticker",
"-a",
"--export=csv",
],
"finviz_view.display_future",
[],
dict(
future_type="Meats",
sort_col="ticker",
ascending=True,
export="csv",
),
),
(
"call_futures",
[
"--commodity=grains",
"--sortby=ticker",
"-a",
"--export=csv",
],
"finviz_view.display_future",
[],
dict(
future_type="Grains",
sort_col="ticker",
ascending=True,
export="csv",
),
),
(
"call_futures",
[
"--commodity=softs",
"--sortby=ticker",
"-a",
"--export=csv",
],
"finviz_view.display_future",
[],
dict(
future_type="Softs",
sort_col="ticker",
ascending=True,
export="csv",
),
),
(
"call_valuation",
[
"sector",
"--sortby=MarketCap",
"-a",
"--export=csv",
],
"finviz_view.display_valuation",
[],
dict(
s_group="Sector",
sort_col="MarketCap",
ascending=True,
export="csv",
),
),
(
"call_performance",
[
"sector",
"--sortby=Name",
"-a",
"--export=csv",
],
"finviz_view.display_performance",
[],
dict(
s_group="Sector",
sort_col="Name",
ascending=True,
export="csv",
),
),
(
"call_spectrum",
[
"sector",
"--export=png",
],
"finviz_view.display_spectrum",
[],
dict(
s_group="Sector",
),
),
(
"call_map",
[
"--period=1w",
"--type=world",
],
"finviz_view.map_sp500_view",
[],
dict(
period="1w",
map_type="world",
),
),
],
)
def test_call_func(
tested_func, mocked_func, other_args, called_args, called_kwargs, mocker
):
path_controller = "gamestonk_terminal.economy.economy_controller"
# MOCK REMOVE
mocker.patch(target=f"{path_controller}.os.remove")
if mocked_func:
mock = mocker.Mock()
mocker.patch(
target=f"{path_controller}.{mocked_func}",
new=mock,
)
controller = economy_controller.EconomyController(queue=None)
getattr(controller, tested_func)(other_args)
if called_args or called_kwargs:
mock.assert_called_once_with(*called_args, **called_kwargs)
else:
mock.assert_called_once()
else:
controller = economy_controller.EconomyController(queue=None)
getattr(controller, tested_func)(other_args)
@pytest.mark.vcr(record_mode="none")
def test_call_bigmac_codes(mocker):
path_controller = "gamestonk_terminal.economy.economy_controller"
# MOCK CHECK_COUNTRY_CODE_TYPE
mocker.patch(
target=f"{path_controller}.nasdaq_model.check_country_code_type",
return_value=["MOCK_COUNTRY_CODE"],
)
# MOCK READ_CSV
mocker.patch(target=f"{path_controller}.pd.read_csv")
# MOCK PRINT
mock_print = mocker.Mock()
mocker.patch(
target=f"{path_controller}.console.print",
new=mock_print,
)
controller = economy_controller.EconomyController(queue=None)
other_args = [
"--codes",
]
controller.call_bigmac(other_args=other_args)
mock_print.assert_called_once()
@pytest.mark.vcr(record_mode="none")
def test_call_bigmac_countries(mocker):
path_controller = "gamestonk_terminal.economy.economy_controller"
# MOCK READ_CSV
mocker.patch(
target=f"{path_controller}.nasdaq_model.check_country_code_type",
return_value=["MOCK_COUNTRY_CODE"],
)
# MOCK DISPLAY_BIG_MAC_INDEX
mock_print = mocker.Mock()
mocker.patch(
target=f"{path_controller}.nasdaq_view.display_big_mac_index",
new=mock_print,
)
controller = economy_controller.EconomyController(queue=None)
other_args = [
"--countries=MOCK_COUNTRY_CODE",
"--raw",
"--export=csv",
]
controller.call_bigmac(other_args=other_args)
mock_print.assert_called_with(
country_codes=["MOCK_COUNTRY_CODE"],
raw=True,
export="csv",
)
|