text
stringlengths 3
1.05M
|
|---|
'use strict';
var aFrom = require('es5-ext/array/from')
, source = require('./__playground');
module.exports = function (t, a) {
var testType, testObj;
testType = function (Type) {
a.h2("Constructor");
a(Type.regularType, 'bar', "Value");
a(Type.getOwnDescriptor('regularType').type, source.String, "Type");
a.h2("Prototype");
testObj(Type.prototype);
testObj(Type.prototype.nested);
};
testObj = function (obj) {
a.h3("Regular");
a(obj.regular, 'foo', "Value");
a(obj.getOwnDescriptor('regular').type, source.String, "Type");
a.h3("Computed");
a(obj.regularComputed, 'foobar');
a.h3("Multiple");
a.deep(aFrom(obj.multiple), [2, 3]);
a.h3("Multiple computed");
a.deep(aFrom(obj.multipleComputed), ['foo', 'fooraz']);
};
t(source.TypeB, source.TypeA.extend('TypeBCopy'));
testType(source.TypeBCopy);
};
|
''' Else Statements
Code that executes if contitions checked evaluates to False.
if(Condition)
'''
|
'''
Created on Feb 20, 2016
@author: Rahul Tanwani
'''
from __future__ import absolute_import, unicode_literals
from abc import ABCMeta
from concurrent.futures._base import Executor as BaseExecutor, Future
from concurrent.futures.process import ProcessPoolExecutor
from concurrent.futures.thread import ThreadPoolExecutor
def default_result_handler(future, *args, **kwargs):
return future.result()
class SequentialPoolExecutor(BaseExecutor):
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
def submit(self, fn, *args, **kwargs):
f = Future()
try:
result = fn(*args, **kwargs)
except BaseException as exc:
f.set_exception(exc)
# Break a reference cycle with the exception 'exc', copied from stdlib
self = None # noqa: W0612
else:
f.set_result(result)
return f
class Executor(object):
'''Based executor class to encapsulate the job execution.'''
__metaclass__ = ABCMeta
def __init__(self, num_workers, timeout=None):
'''Create a pool for (maybe) concurrent execution with specified number of workers.'''
self.num_workers = num_workers
self.timeout = timeout
def get_executor_pool(self, num_workers=None):
return self.executor_cls(num_workers or self.num_workers)
def execute(self, requests, resp_generator,
result_handler=default_result_handler,
*args, **kwargs):
'''Calls the resp_generator for all the requests in parallel in an asynchronous way'''
with self.get_executor_pool() as pool:
futures = [
pool.submit(resp_generator, r, *args, **kwargs)
for r in requests
]
timeout = self.timeout
return [result_handler(f, timeout) for f in futures]
class SequentialExecutor(Executor):
'''An implementation of executor using no parallelism.'''
executor_cls = SequentialPoolExecutor
class ThreadBasedExecutor(Executor):
'''An implementation of executor using threads for parallelism.'''
executor_cls = ThreadPoolExecutor
class ProcessBasedExecutor(Executor):
'''An implementation of executor using process(es) for parallelism.'''
executor_cls = ProcessPoolExecutor
|
#!/usr/bin/python
#encoding:utf-8
import re
import urllib
import urllib2
import requests
import cookielib
from bs4 import BeautifulSoup
class CSDN(object):
"""
csdn网站抓取工具
"""
def __init__(self, username, password):
self.username = username
self.password = password
self.login_url = 'https://passport.csdn.net/account/login?from=http://my.csdn.net/my/mycsdn'
self.user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.94 Safari/537.36'
self.main_page_url = 'http://my.csdn.net/my/mycsdn'
def login(self):
"""
无cookie方式登录csdn
:return:
"""
# 获取登录界面
html = urllib.urlopen(self.login_url).read()
data = urllib.urlencode(self.__parse_login_html(html))
action = re.search(r'action=\"(.*?)\"', html).group(1)
print action
#登录
response = urllib.urlopen("https://passport.csdn.net/" + action, data)
print response.read()
def view_main(self):
"""
无cookie方式访问个人主页
注:再禁用cookie的情况下,不能进入登录后界面
:return:
"""
headers = self.__prepare_headers()
headers['Referer'] = 'http://my.csdn.net'
request = urllib2.Request(self.main_page_url, headers=headers)
response = urllib2.urlopen(request)
print response.read()
def login_cookie(self):
"""
cookie方式登录csdn
:return:
"""
cookie = cookielib.CookieJar()
handler = urllib2.HTTPCookieProcessor(cookie)
opener = urllib2.build_opener(handler)
response = opener.open(self.login_url)
html = response.read()
data = self.__parse_login_html(html)
response = opener.open(self.login_url, urllib.urlencode(data))
print response.read()
print cookie
self.view_main_cookie(opener)
def view_main_cookie(self, opener):
"""
cookie方式访问个人主页
:return:
"""
headers = self.__prepare_headers()
headers['Referer'] = 'http://my.csdn.net'
opener.addheaders = list(headers.items())
print opener.open(self.main_page_url).read()
def login_session(self):
"""
session方式登录csdn
:return:
"""
session = requests.session()
# 获取登录界面
html = session.get(self.login_url).text
data = self.__parse_login_html_bs4(html)
# 登录csdn
response = session.post(self.login_url, data)
print response.text
self.view_main_session(session)
def view_main_session(self, session):
"""
session方式访问个人主页
:return:
"""
headers = self.__prepare_headers()
headers['Referer'] = 'http://my.csdn.net'
print session.get(self.main_page_url, headers=headers).text
def __parse_login_html(self, html):
"""
解析login界面中隐藏form参数
:param html: login界面的html
:return: 返回登录form参数
"""
lt = re.search(r'name=\"lt\" value=\"(.*?)\"', html).group(1)
execution = re.search(r'name=\"execution\" value=\"(.*?)\"', html).group(1)
_eventId = re.search(r'name=\"_eventId\" value=\"(.*?)\"', html).group(1)
return {
'username': self.username,
'password': self.password,
'lt': lt,
'execution': execution,
'_eventId': _eventId
}
def __parse_login_html2(self, html):
"""
自动解析hidden元素
:param html:
:return:
"""
pattern = r'input type="hidden" name="(.*?)" value="(.*?)"'
values_re = re.compile(pattern, re.M | re.I)
values = re.findall(values_re, html)
values = dict(values)
values.update({
'username': self.username,
'password': self.password
})
return values
def __parse_login_html_bs4(self, html):
"""
用bs4解析hidden元素
:param html:
:return:
"""
values = {}
soup = BeautifulSoup(html, "html.parser")
inputs = soup.find_all('input', type='hidden')
for input in inputs:
values[input.get('name')] = input.get('value')
values.update({
'username': self.username,
'password': self.password
})
return values
def __prepare_headers(self):
"""
构建csdn网站请求头
:return:
"""
header = {
'User-Agent': self.user_agent,
'Connection': 'keep-alive',
}
return header
csdnobj = CSDN('username', 'password')
csdnobj.login_session()
|
# Copyright (c) 2014-2015 Bruno Daniel <bruno.daniel@blue-yonder.com>
# Copyright (c) 2015-2016, 2018, 2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2016 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2020 hippo91 <guillaume.peillex@gmail.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
import warnings
from pylint.extensions import docparams
def register(linter):
"""Required method to auto register this checker.
:param linter: Main interface object for Pylint plugins
:type linter: Pylint object
"""
warnings.warn(
"This plugin is deprecated, use pylint.extensions.docparams instead.",
DeprecationWarning,
)
linter.register_checker(docparams.DocstringParameterChecker(linter))
|
# -*- coding: utf-8 -*-
"""
Parse, stream, create, sign and verify Bitcoin transactions as Tx structures.
The MIT License (MIT)
Copyright (c) 2013 by Richard Kiss
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import io
import warnings
from ..convention import SATOSHI_PER_COIN
from ..encoding import double_sha256, from_bytes_32
from ..serialize import b2h, b2h_rev, h2b, h2b_rev
from ..serialize.bitcoin_streamer import parse_struct, stream_struct
from ..intbytes import byte_to_int, int_to_bytes
from .TxIn import TxIn
from .TxOut import TxOut
from .Spendable import Spendable
from .exceptions import SolvingError
from .pay_to import script_obj_from_script, ScriptPayToScript
from .script import opcodes
from .script import tools
MAX_MONEY = 21000000 * SATOSHI_PER_COIN
MAX_BLOCK_SIZE = 1000000
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
SIGHASH_ANYONECANPAY = 0x80
class ValidationFailureError(Exception):
pass
class BadSpendableError(Exception):
pass
class Tx(object):
@classmethod
def coinbase_tx(class_, public_key_sec, coin_value, coinbase_bytes=b'', version=1, lock_time=0):
"""
Create the special "first in block" transaction that includes the mining fees.
"""
tx_in = TxIn.coinbase_tx_in(script=coinbase_bytes)
COINBASE_SCRIPT_OUT = "%s OP_CHECKSIG"
script_text = COINBASE_SCRIPT_OUT % b2h(public_key_sec)
script_bin = tools.compile(script_text)
tx_out = TxOut(coin_value, script_bin)
return class_(version, [tx_in], [tx_out], lock_time)
@classmethod
def parse(class_, f):
"""Parse a Bitcoin transaction Tx from the file-like object f."""
version, count = parse_struct("LI", f)
txs_in = []
for i in range(count):
txs_in.append(TxIn.parse(f))
count, = parse_struct("I", f)
txs_out = []
for i in range(count):
txs_out.append(TxOut.parse(f))
lock_time, = parse_struct("L", f)
return class_(version, txs_in, txs_out, lock_time)
@classmethod
def from_hex(class_, hex_string):
"""Return the Tx for the given hex string."""
f = io.BytesIO(h2b(hex_string))
tx = class_.parse(f)
try:
tx.parse_unspents(f)
except Exception:
# parsing unspents failed
tx.unspents = []
return tx
@classmethod
def tx_from_hex(class_, hex_string):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn("Call to deprecated function tx_from_hex, use from_hex instead",
category=DeprecationWarning, stacklevel=2)
warnings.simplefilter('default', DeprecationWarning)
return class_.from_hex(hex_string)
def __init__(self, version, txs_in, txs_out, lock_time=0, unspents=[]):
self.version = version
self.txs_in = txs_in
self.txs_out = txs_out
self.lock_time = lock_time
self.unspents = unspents
def stream(self, f, blank_solutions=False):
"""Stream a Bitcoin transaction Tx to the file-like object f."""
stream_struct("LI", f, self.version, len(self.txs_in))
for t in self.txs_in:
t.stream(f, blank_solutions=blank_solutions)
stream_struct("I", f, len(self.txs_out))
for t in self.txs_out:
t.stream(f)
stream_struct("L", f, self.lock_time)
def as_bin(self, include_unspents=False):
"""Return the transaction as binary."""
f = io.BytesIO()
self.stream(f)
if include_unspents and not self.missing_unspents():
self.stream_unspents(f)
return f.getvalue()
def as_hex(self, include_unspents=False):
"""Return the transaction as hex."""
return b2h(self.as_bin(include_unspents=include_unspents))
def hash(self, hash_type=None):
"""Return the hash for this Tx object."""
s = io.BytesIO()
self.stream(s)
if hash_type:
stream_struct("L", s, hash_type)
return double_sha256(s.getvalue())
def blanked_hash(self):
"""
Return the hash for this Tx object with solution scripts blanked.
Useful for determining if two Txs might be equivalent modulo
malleability. (That is, even if tx1 is morphed into tx2 using the malleability
weakness, they will still have the same blanked hash.)
"""
s = io.BytesIO()
self.stream(s, blank_solutions=True)
return double_sha256(s.getvalue())
def id(self):
"""Return the human-readable hash for this Tx object."""
return b2h_rev(self.hash())
def signature_hash(self, tx_out_script, unsigned_txs_out_idx, hash_type):
"""
Return the canonical hash for a transaction. We need to
remove references to the signature, since it's a signature
of the hash before the signature is applied.
tx_out_script: the script the coins for unsigned_txs_out_idx are coming from
unsigned_txs_out_idx: where to put the tx_out_script
hash_type: one of SIGHASH_NONE, SIGHASH_SINGLE, SIGHASH_ALL,
optionally bitwise or'ed with SIGHASH_ANYONECANPAY
"""
# In case concatenating two scripts ends up with two codeseparators,
# or an extra one at the end, this prevents all those possible incompatibilities.
tx_out_script = tools.delete_subscript(tx_out_script, int_to_bytes(opcodes.OP_CODESEPARATOR))
# blank out other inputs' signatures
def tx_in_for_idx(idx, tx_in):
if idx == unsigned_txs_out_idx:
return TxIn(tx_in.previous_hash, tx_in.previous_index, tx_out_script, tx_in.sequence)
return TxIn(tx_in.previous_hash, tx_in.previous_index, b'', tx_in.sequence)
txs_in = [tx_in_for_idx(i, tx_in) for i, tx_in in enumerate(self.txs_in)]
txs_out = self.txs_out
# Blank out some of the outputs
if (hash_type & 0x1f) == SIGHASH_NONE:
# Wildcard payee
txs_out = []
# Let the others update at will
for i in range(len(txs_in)):
if i != unsigned_txs_out_idx:
txs_in[i].sequence = 0
elif (hash_type & 0x1f) == SIGHASH_SINGLE:
# This preserves the ability to validate existing legacy
# transactions which followed a buggy path in Satoshi's
# original code; note that higher level functions for signing
# new transactions (e.g., is_signature_ok and sign_tx_in)
# check to make sure we never get here (or at least they
# should)
if unsigned_txs_out_idx >= len(txs_out):
# This should probably be moved to a constant, but the
# likelihood of ever getting here is already really small
# and getting smaller
return (1 << 248)
# Only lock in the txout payee at same index as txin; delete
# any outputs after this one and set all outputs before this
# one to "null" (where "null" means an empty script and a
# value of -1)
txs_out = [TxOut(0xffffffffffffffff, b'')] * unsigned_txs_out_idx
txs_out.append(self.txs_out[unsigned_txs_out_idx])
# Let the others update at will
for i in range(len(self.txs_in)):
if i != unsigned_txs_out_idx:
txs_in[i].sequence = 0
# Blank out other inputs completely, not recommended for open transactions
if hash_type & SIGHASH_ANYONECANPAY:
txs_in = [txs_in[unsigned_txs_out_idx]]
tmp_tx = Tx(self.version, txs_in, txs_out, self.lock_time)
return from_bytes_32(tmp_tx.hash(hash_type=hash_type))
def solve(self, hash160_lookup, tx_in_idx, tx_out_script, hash_type=SIGHASH_ALL, **kwargs):
"""
Sign a standard transaction.
hash160_lookup:
An object with a get method that accepts a hash160 and returns the
corresponding (secret exponent, public_pair, is_compressed) tuple or
None if it's unknown (in which case the script will obviously not be signed).
A standard dictionary will do nicely here.
tx_in_idx:
the index of the tx_in we are currently signing
tx_out:
the tx_out referenced by the given tx_in
"""
tx_in = self.txs_in[tx_in_idx]
is_p2h = (len(tx_out_script) == 23 and byte_to_int(tx_out_script[0]) == opcodes.OP_HASH160
and byte_to_int(tx_out_script[-1]) == opcodes.OP_EQUAL)
if is_p2h:
hash160 = ScriptPayToScript.from_script(tx_out_script).hash160
p2sh_lookup = kwargs.get("p2sh_lookup")
if p2sh_lookup is None:
raise ValueError("p2sh_lookup not set")
if hash160 not in p2sh_lookup:
raise ValueError("hash160=%s not found in p2sh_lookup" %
b2h(hash160))
script_to_hash = p2sh_lookup[hash160]
else:
script_to_hash = tx_out_script
# Leave out the signature from the hash, since a signature can't sign itself.
# The checksig op will also drop the signatures from its hash.
signature_for_hash_type_f = lambda hash_type, script: self.signature_hash(
script, tx_in_idx, hash_type)
if tx_in.verify(tx_out_script, signature_for_hash_type_f):
return
sign_value = self.signature_hash(script_to_hash, tx_in_idx, hash_type=hash_type)
the_script = script_obj_from_script(tx_out_script)
solution = the_script.solve(
hash160_lookup=hash160_lookup, sign_value=sign_value, signature_type=hash_type,
existing_script=self.txs_in[tx_in_idx].script, **kwargs)
return solution
def sign_tx_in(self, hash160_lookup, tx_in_idx, tx_out_script, hash_type=SIGHASH_ALL, **kwargs):
self.txs_in[tx_in_idx].script = self.solve(hash160_lookup, tx_in_idx, tx_out_script, hash_type=SIGHASH_ALL, **kwargs)
def verify_tx_in(self, tx_in_idx, tx_out_script, expected_hash_type=None):
tx_in = self.txs_in[tx_in_idx]
signature_for_hash_type_f = lambda hash_type, script: self.signature_hash(script, tx_in_idx, hash_type)
if not tx_in.verify(tx_out_script, signature_for_hash_type_f, expected_hash_type):
raise ValidationFailureError(
"just signed script Tx %s TxIn index %d did not verify" % (
b2h_rev(tx_in.previous_hash), tx_in_idx))
def total_out(self):
return sum(tx_out.coin_value for tx_out in self.txs_out)
def tx_outs_as_spendable(self, block_index_available=0):
h = self.hash()
return [
Spendable(tx_out.coin_value, tx_out.script, h, tx_out_index, block_index_available)
for tx_out_index, tx_out in enumerate(self.txs_out)]
def is_coinbase(self):
return len(self.txs_in) == 1 and self.txs_in[0].is_coinbase()
def __str__(self):
return "Tx [%s]" % self.id()
def __repr__(self):
return "Tx [%s] (v:%d) [%s] [%s]" % (
self.id(), self.version, ", ".join(str(t) for t in self.txs_in),
", ".join(str(t) for t in self.txs_out))
def check(self):
"""
Basic checks that don't depend on any context.
Adapted from Bicoin Code: main.cpp
"""
if not self.txs_in:
raise ValidationFailureError("txs_in = []")
if not self.txs_out:
raise ValidationFailureError("txs_out = []")
# Size limits
f = io.BytesIO()
self.stream(f)
size = len(f.getvalue())
if size > MAX_BLOCK_SIZE:
raise ValidationFailureError("size > MAX_BLOCK_SIZE")
# Check for negative or overflow output values
nValueOut = 0
for tx_out in self.txs_out:
if tx_out.coin_value < 0 or tx_out.coin_value > MAX_MONEY:
raise ValidationFailureError("tx_out value negative or out of range")
nValueOut += tx_out.coin_value
if nValueOut > MAX_MONEY:
raise ValidationFailureError("tx_out total out of range")
# Check for duplicate inputs
if [x for x in self.txs_in if self.txs_in.count(x) > 1]:
raise ValidationFailureError("duplicate inputs")
if (self.is_coinbase()):
if not (2 <= len(self.txs_in[0].script) <= 100):
raise ValidationFailureError("bad coinbase script size")
else:
refs = set()
for tx_in in self.txs_in:
if tx_in.previous_hash == b'0' * 32:
raise ValidationFailureError("prevout is null")
pair = (tx_in.previous_hash, tx_in.previous_index)
if pair in refs:
raise ValidationFailureError("spendable reused")
refs.add(pair)
"""
The functions below here deal with an optional additional parameter: "unspents".
This parameter is a list of tx_out objects that are referenced by the
list of self.tx_in objects.
"""
def unspents_from_db(self, tx_db, ignore_missing=False):
unspents = []
for tx_in in self.txs_in:
if tx_in.is_coinbase():
unspents.append(None)
continue
tx = tx_db.get(tx_in.previous_hash)
if tx and tx.hash() == tx_in.previous_hash:
unspents.append(tx.txs_out[tx_in.previous_index])
elif ignore_missing:
unspents.append(None)
else:
raise KeyError(
"can't find tx_out for %s:%d" % (b2h_rev(tx_in.previous_hash), tx_in.previous_index))
self.unspents = unspents
def set_unspents(self, unspents):
if len(unspents) != len(self.txs_in):
raise ValueError("wrong number of unspents")
self.unspents = unspents
def missing_unspent(self, idx):
if self.is_coinbase():
return True
if len(self.unspents) <= idx:
return True
return self.unspents[idx] is None
def missing_unspents(self):
if self.is_coinbase():
return False
return (len(self.unspents) != len(self.txs_in) or
any(self.missing_unspent(idx) for idx, tx_in in enumerate(self.txs_in)))
def check_unspents(self):
if self.missing_unspents():
raise ValueError("wrong number of unspents. Call unspents_from_db or set_unspents.")
def txs_in_as_spendable(self):
return [
Spendable(tx_out.coin_value, tx_out.script, tx_in.previous_hash, tx_in.previous_index)
for tx_in_index, (tx_in, tx_out) in enumerate(zip(self.txs_in, self.unspents))]
def stream_unspents(self, f):
self.check_unspents()
for tx_out in self.unspents:
if tx_out is None:
tx_out = TxOut(0, b'')
tx_out.stream(f)
def parse_unspents(self, f):
unspents = []
for i in enumerate(self.txs_in):
tx_out = TxOut.parse(f)
if tx_out.coin_value == 0:
tx_out = None
unspents.append(tx_out)
self.set_unspents(unspents)
def is_signature_ok(self, tx_in_idx, traceback_f=None):
tx_in = self.txs_in[tx_in_idx]
if tx_in.is_coinbase():
return True
if len(self.unspents) <= tx_in_idx:
return False
unspent = self.unspents[tx_in_idx]
if unspent is None:
return False
tx_out_script = self.unspents[tx_in_idx].script
signature_for_hash_type_f = lambda hash_type, script: self.signature_hash(
script, tx_in_idx, hash_type)
return tx_in.verify(tx_out_script, signature_for_hash_type_f, traceback_f=traceback_f)
def sign(self, hash160_lookup, hash_type=SIGHASH_ALL, **kwargs):
"""
Sign a standard transaction.
hash160_lookup:
A dictionary (or another object with .get) where keys are hash160 and
values are tuples (secret exponent, public_pair, is_compressed) or None
(in which case the script will obviously not be signed).
"""
self.check_unspents()
for idx, tx_in in enumerate(self.txs_in):
if self.is_signature_ok(idx) or tx_in.is_coinbase():
continue
try:
if self.unspents[idx]:
self.sign_tx_in(
hash160_lookup, idx, self.unspents[idx].script, hash_type=hash_type, **kwargs)
except SolvingError:
pass
return self
def bad_signature_count(self):
count = 0
for idx, tx_in in enumerate(self.txs_in):
if not self.is_signature_ok(idx):
count += 1
return count
def total_in(self):
if self.is_coinbase():
return self.txs_out[0].coin_value
self.check_unspents()
return sum(tx_out.coin_value for tx_out in self.unspents)
def fee(self):
return self.total_in() - self.total_out()
def validate_unspents(self, tx_db):
"""
Spendable objects returned from blockchain.info or
similar services contain coin_value information that must be trusted
on faith. Mistaken coin_value data can result in coins being wasted
to fees.
This function solves this problem by iterating over the incoming
transactions, fetching them from the tx_db in full, and verifying
that the coin_values are as expected.
Returns the fee for this transaction. If any of the spendables set by
tx.set_unspents do not match the authenticated transactions, a
ValidationFailureError is raised.
"""
ZERO = b'\0' * 32
tx_hashes = set((tx_in.previous_hash for tx_in in self.txs_in))
# build a local copy of the DB
tx_lookup = {}
for h in tx_hashes:
if h == ZERO:
continue
the_tx = tx_db.get(h)
if the_tx is None:
raise KeyError("hash id %s not in tx_db" % b2h_rev(h))
if the_tx.hash() != h:
raise KeyError("attempt to load Tx %s yielded a Tx with id %s" % (h2b_rev(h), the_tx.id()))
tx_lookup[h] = the_tx
for idx, tx_in in enumerate(self.txs_in):
if tx_in.previous_hash == ZERO:
continue
if tx_in.previous_hash not in tx_lookup:
raise KeyError("hash id %s not in tx_lookup" % b2h_rev(tx_in.previous_hash))
txs_out = tx_lookup[tx_in.previous_hash].txs_out
if tx_in.previous_index > len(txs_out):
raise BadSpendableError("tx_out index %d is too big for Tx %s" %
(tx_in.previous_index, b2h_rev(tx_in.previous_hash)))
tx_out1 = txs_out[tx_in.previous_index]
tx_out2 = self.unspents[idx]
if tx_out1.coin_value != tx_out2.coin_value:
raise BadSpendableError(
"unspents[%d] coin value mismatch (%d vs %d)" % (
idx, tx_out1.coin_value, tx_out2.coin_value))
if tx_out1.script != tx_out2.script:
raise BadSpendableError("unspents[%d] script mismatch!" % idx)
return self.fee()
|
from colossalai.amp import AMP_TYPE
# ViT Base
BATCH_SIZE = 128
DROP_RATE = 0.1
NUM_EPOCHS = 2
fp16 = dict(
mode=AMP_TYPE.NAIVE,
)
clip_grad_norm = 1.0
|
/*
This is a list of all the capsizumized zlib interfaces.
Every one of the capsicumized functions will have a define.
This list is taken from zlib.h, in this same directory.
All this defines represent the commands passed to the real
zlib listening through a program, and it will recognize them.
Also, the defines have been checked for not being duplicates.
Since the only things that we can Capsicumize are: deflate()
and inflate(), we only have to care about sending those to
commands (related to the basic functions, and utility functions.
*/
#define MAXLEN (5*1024)
#define ZCAPCMD_DEFLATEINIT 0
#define ZCAPCMD_DEFLATE 1
#define ZCAPCMD_DEFLATEEND 2
#define ZCAPCMD_INFLATEINIT 3
#define ZCAPCMD_INFLATE 4
#define ZCAPCMD_INFLATEEND 5
/* Advanced functions */
#define ZCAPCMD_DEFLATESETDICTIONARY 6
#define ZCAPCMD_DEFLATECOPY 7
#define ZCAPCMD_DEFLATERESET 8
#define ZCAPCMD_DEFLATEPARAMS 9
#define ZCAPCMD_DEFLATETUNE 10
#define ZCAPCMD_DEFLATEBOUND 11
#define ZCAPCMD_DEFLATEPENDING 12
#define ZCAPCMD_DEFLATEPRIME 13
#define ZCAPCMD_DEFLATESETHEADER 14
#define ZCAPCMD_INFLATEINIT2 15
#define ZCAPCMD_INFLATESETDICTIONARY 16
#define ZCAPCMD_INFLATEGETDICTIONARY 17
#define ZCAPCMD_INFLATESYNC 18
#define ZCAPCMD_INFLATECOPY 19
#define ZCAPCMD_INFLATERESET 20
#define ZCAPCMD_INFLATERESET2 21
#define ZCAPCMD_INFLATEPRIME 22
#define ZCAPCMD_INFLATEMARK 23
#define ZCAPCMD_INFLATEGETHEADER 24
#define ZCAPCMD_INFLATEBACKINIT 25
#define ZCAPCMD_INFLATEBACK 26
#define ZCAPCMD_INFLATEBACKEND 27
#define ZCAPCMD_ZLIBCOMPILEFLAGS 28
/* utility functions */
#define ZCAPCMD_COMPRESSBOUND 29
/* gzip file access functions */
#define ZCAPCMD_GZOPEN 30
#define ZCAPCMD_GZBUFFER 31
#define ZCAPCMD_GZSETPARAMS 32
#define ZCAPCMD_GZREAD 33
#define ZCAPCMD_GZWRITE 34
#define ZCAPCMD_GZPRINTF 35
#define ZCAPCMD_GZPUTS 36
#define ZCAPCMD_GZGETS 46
#define ZCAPCMD_GZPUTC 47
#define ZCAPCMD_GZGETC 48
#define ZCAPCMD_GZUNGETC 49
#define ZCAPCMD_GZFLUSH 50
#define ZCAPCMD_GZSEEK 51
#define ZCAPCMD_GZREWIND 52
#define ZCAPCMD_GZTELL 53
#define ZCAPCMD_GZOFFSET 54
#define ZCAPCMD_GZEOF 55
#define ZCAPCMD_GZDIRECT 56
#define ZCAPCMD_GZCLOSE_R 57
#define ZCAPCMD_GZCLOSE_W 58
#define ZCAPCMD_GZERROR 59
#define ZCAPCMD_GZCLEARERR 60
/* checksum functions */
#define ZCAPCMD_ADLER32 61
#define ZCAPCMD_ADLER32_COMBINE 62
#define ZCAPCMD_CRC32 63
#define ZCAPCMD_CRC32_COMBINE 64
|
#!/usr/bin/env python
u"""
bilinear_interp.py (03/2021)
Bilinear interpolation of input data to output coordinates
CALLING SEQUENCE:
data = bilinear_interp(ilon,ilat,idata,lon,lat)
INPUTS:
ilon: longitude of tidal model
ilat: latitude of tidal model
idata: tide model data
lat: output latitude
lon: output longitude
OPTIONS:
fill_value: invalid value
dtype: output data type
OUTPUT:
data: interpolated data
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
UPDATE HISTORY:
Updated 03/2021: replaced numpy bool/int to prevent deprecation warnings
Updated 12/2020: using numpy isclose to check corner points
Updated 08/2020: check that output coordinates are within bounds
allow small extrapolations if individual grid cells are invalid
Updated 07/2020: split into separate function
Updated 06/2020: use argmin and argmax in bilinear interpolation
Updated 09/2017: Rewritten in Python
"""
import numpy as np
#-- PURPOSE: bilinear interpolation of input data to output data
def bilinear_interp(ilon,ilat,idata,lon,lat,fill_value=np.nan,
dtype=np.float64):
"""
Bilinear interpolation of input data to output coordinates
Arguments
---------
ilon: longitude of tidal model
ilat: latitude of tidal model
idata: tide model data
lat: output latitude
lon: output longitude
Keyword arguments
-----------------
fill_value: invalid value
dtype: output data type
Returns
-------
data: interpolated data
"""
#-- find valid points (within bounds)
valid, = np.nonzero((lon >= ilon.min()) & (lon <= ilon.max()) &
(lat > ilat.min()) & (lat < ilat.max()))
#-- interpolate gridded data values to data
npts = len(lon)
#-- allocate to output interpolated data array
data = np.ma.zeros((npts),dtype=dtype,fill_value=fill_value)
data.mask = np.ones((npts),dtype=bool)
#-- initially set all data to fill value
data.data[:] = data.fill_value
#-- for each valid point
for i in valid:
#-- calculating the indices for the original grid
ix, = np.nonzero((ilon[0:-1] <= lon[i]) & (ilon[1:] > lon[i]))
iy, = np.nonzero((ilat[0:-1] <= lat[i]) & (ilat[1:] > lat[i]))
#-- corner data values for adjacent grid cells
IM = np.ma.zeros((4),fill_value=fill_value,dtype=dtype)
IM.mask = np.ones((4),dtype=bool)
#-- corner weight values for adjacent grid cells
WM = np.zeros((4))
#-- build data and weight arrays
for j,XI,YI in zip([0,1,2,3],[ix,ix+1,ix,ix+1],[iy,iy,iy+1,iy+1]):
IM.data[j], = idata.data[YI,XI]
IM.mask[j], = idata.mask[YI,XI]
WM[3-j], = np.abs(lon[i]-ilon[XI])*np.abs(lat[i]-ilat[YI])
#-- if on corner value: use exact
if (np.isclose(lat[i],ilat[iy]) & np.isclose(lon[i],ilon[ix])):
data.data[i] = idata.data[iy,ix]
data.mask[i] = idata.mask[iy,ix]
elif (np.isclose(lat[i],ilat[iy+1]) & np.isclose(lon[i],ilon[ix])):
data.data[i] = idata.data[iy+1,ix]
data.mask[i] = idata.mask[iy+1,ix]
elif (np.isclose(lat[i],ilat[iy]) & np.isclose(lon[i],ilon[ix+1])):
data.data[i] = idata.data[iy,ix+1]
data.mask[i] = idata.mask[iy,ix+1]
elif (np.isclose(lat[i],ilat[iy+1]) & np.isclose(lon[i],ilon[ix+1])):
data.data[i] = idata.data[iy+1,ix+1]
data.mask[i] = idata.mask[iy+1,ix+1]
elif np.any(np.isfinite(IM) & (~IM.mask)):
#-- find valid indices for data summation and weight matrix
ii, = np.nonzero(np.isfinite(IM) & (~IM.mask))
#-- calculate interpolated value for i
data.data[i] = np.sum(WM[ii]*IM[ii])/np.sum(WM[ii])
data.mask[i] = np.all(IM.mask[ii])
#-- return interpolated values
return data
|
import AHandler from '../../../Structures/Event/AHandler';
class ShardPreReadyHandler extends AHandler {
handle() {
return null;
}
}
export default ShardPreReadyHandler;
|
import React, { Component } from 'react'
import FaHome from 'react-icons/md/arrow-back'
import { Motion, spring } from 'react-motion'
import Button from 'components/Button'
const springConfig = { stiffness: 290 }
class BackButton extends Component {
state = {
isOver: false,
}
handleMouseEnter = () => this.setState({ isOver: true })
handleMouseLeave = () => this.setState({ isOver: false })
render() {
const { projectName } = this.props
const { isOver } = this.state
return (
<Button
className="cu-d ellipsis"
transparent
link
to="/"
onMouseEnter={this.handleMouseEnter}
onMouseLeave={this.handleMouseLeave}
style={{ minWidth: 76 }}
>
<FaHome className="mr-5" />
<div className="r d-f ai-c" style={{ height: '100%' }}>
<Motion style={{ y: spring(isOver ? -30 : 0, springConfig) }}>
{m => (
<b
className="ellipsis"
style={{
transform: `translate3d(0, ${m.y}px, 0)`,
maxWidth: 250,
}}
>
{projectName}
</b>
)}
</Motion>
<div className="sticky d-f ai-c">
<Motion style={{ y: spring(isOver ? 0 : 30, springConfig) }}>
{m => <b style={{ transform: `translate3d(0, ${m.y}px, 0)` }}>{'Back'}</b>}
</Motion>
</div>
</div>
</Button>
)
}
}
export default BackButton
|
from django.db import models
# Create your models here.
class Transaction(models.Model):
user = models.ForeignKey('users.Account', on_delete=models.CASCADE)
transaction_id = models.CharField(max_length=100)
method = models.CharField(max_length=100)
amount = models.CharField(max_length=100)
status = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.transaction_id
class Invoice(models.Model):
STATUS = (
('New', 'New'),
('Accepted', 'Accepted'),
('Completed', 'Completed'),
('Cancelled', 'Cancelled'),
)
invoice_id = models.CharField(max_length=100)
user = models.ForeignKey('users.Account', on_delete=models.SET_NULL, null=True)
transaction = models.ForeignKey('orders.Transaction', on_delete=models.SET_NULL, null=True)
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
phone_number = models.CharField(max_length=50)
street_address = models.CharField(max_length=50)
ward = models.CharField(max_length=50)
district = models.CharField(max_length=50)
city = models.CharField(max_length=50)
country = models.CharField(max_length=50)
status = models.CharField(max_length=10, choices=STATUS, default='New')
notes = models.CharField(max_length=100, blank=True)
total = models.IntegerField()
tax = models.IntegerField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def full_name(self):
return "{0} {1}".format(self.first_name, self.last_name)
def full_address(self):
return "{0}, {1}, {2}, {3}, {4}".format(self.street_address, self.ward, self.district, self.city, self.country)
def __str__(self):
return self.first_name
class Detail(models.Model):
invoice = models.ForeignKey('orders.Invoice', on_delete=models.CASCADE)
item = models.ForeignKey('products.Item', on_delete=models.CASCADE)
quantity = models.IntegerField()
price = models.IntegerField()
def __str__(self):
return self.item.name
|
"""
pygame-menu
https://github.com/ppizarror/pygame-menu
TEST WIDGET SELECTION.
Test widget selection effects.
License:
-------------------------------------------------------------------------------
The MIT License (MIT)
Copyright 2017-2021 Pablo Pizarro R. @ppizarror
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-------------------------------------------------------------------------------
"""
__all__ = ['SelectionTest']
from test._utils import MenuUtils, surface, BaseTest
import copy
from pygame_menu.widgets import Button
from pygame_menu.widgets.selection import LeftArrowSelection, RightArrowSelection, \
HighlightSelection, NoneSelection, SimpleSelection
from pygame_menu.widgets.core.selection import Selection
from pygame_menu.widgets.selection.arrow_selection import ArrowSelection
class SelectionTest(BaseTest):
def setUp(self) -> None:
"""
Setup sound engine.
"""
self.menu = MenuUtils.generic_menu()
self.menu.enable()
def test_copy(self) -> None:
"""
Test copy.
"""
s = LeftArrowSelection()
s1 = copy.copy(s)
s2 = copy.deepcopy(s)
s3 = s.copy()
self.assertNotEqual(s, s1)
self.assertNotEqual(s, s2)
self.assertNotEqual(s, s3)
def test_abstracts(self) -> None:
"""
Test abstract objects errors.
"""
w = Button('epic')
# Create abstract selection object
sel = Selection(0, 0, 0, 0)
self.assertRaises(NotImplementedError, lambda: sel.draw(surface, w))
# Create abstract arrow selection
arrow = ArrowSelection(0, 0, 0, 0)
self.assertRaises(NotImplementedError, lambda: arrow.draw(surface, w))
def test_arrow(self) -> None:
"""
Test arrow selection.
"""
w = Button('epic')
w.set_selection_effect(LeftArrowSelection())
self.menu.add.generic_widget(w)
self.menu.draw(surface)
w.set_selection_effect(RightArrowSelection())
self.menu.draw(surface)
# Create abstract arrow selection
arrow = ArrowSelection(0, 0, 0, 0)
self.assertRaises(NotImplementedError, lambda: arrow.draw(surface, w))
def test_highlight(self) -> None:
"""
Test highlight selection.
"""
w = Button('epic')
border_width = 1
margin_x = 18
margin_y = 10
w.set_selection_effect(HighlightSelection(
border_width=border_width,
margin_x=margin_x,
margin_y=margin_y
))
self.menu.add.generic_widget(w)
self.menu.draw(surface)
# noinspection PyTypeChecker
sel: 'HighlightSelection' = w.get_selection_effect()
self.assertEqual(sel.get_height(), margin_y)
self.assertEqual(sel.get_width(), margin_x)
# Test inflate
rect = w.get_rect()
inflate_rect = sel.inflate(rect)
self.assertEqual(-inflate_rect.x + rect.x, sel.get_width() / 2)
self.assertEqual(-inflate_rect.y + rect.y, sel.get_height() / 2)
# Test margin xy
sel.margin_xy(10, 20)
self.assertEqual(sel.margin_left, 10)
self.assertEqual(sel.margin_right, 10)
self.assertEqual(sel.margin_top, 20)
self.assertEqual(sel.margin_bottom, 20)
# Test null border
sel._border_width = 0
sel.draw(surface, w)
def test_none(self) -> None:
"""
Test none selection.
"""
w = Button('epic')
w.set_selection_effect(NoneSelection())
self.menu.add.generic_widget(w)
self.menu.draw(surface)
rect = w.get_rect()
new_rect = w.get_selection_effect().inflate(rect)
self.assertTrue(rect == new_rect)
self.assertFalse(w.get_selection_effect().widget_apply_font_color)
# Widgets default selection effect is None
last_selection = w.get_selection_effect()
w.set_selection_effect()
self.assertIsInstance(w.get_selection_effect(), NoneSelection)
self.assertNotEqual(w.get_selection_effect(), last_selection)
def test_simple(self) -> None:
"""
Test simple selection.
"""
w = Button('epic')
w.set_selection_effect(SimpleSelection())
self.menu.add.generic_widget(w)
self.menu.draw(surface)
rect = w.get_rect()
new_rect = w.get_selection_effect().inflate(rect)
self.assertTrue(rect == new_rect)
self.assertTrue(w.get_selection_effect().widget_apply_font_color)
|
import sentencepiece as spm
import os
from collections import defaultdict
from math import sqrt
import numpy as np
from tqdm import tqdm
from math import log2 as log
from translation.utils import Opt
def preprocessCountDictionary(counts):
sum1 = sum(counts.values())
output = np.array([counts[i] / sum1 for i in range(8000)])
return output
def HellingerDistance(count1, count2):
list1 = preprocessCountDictionary(count1)
list2 = preprocessCountDictionary(count2)
list1 = np.sqrt(list1)
list2 = np.sqrt(list2)
output = (list1 - list2)**2
output = output.sum() / sqrt(2)
return output
def KullbackLeibler(P, Q):
output = 0
for p, q in zip(P, Q):
if p * q != 0:
output += p * log(p / q)
return output
def KullbackLeiblerDivergence(count1, count2):
P = preprocessCountDictionary(count1)
Q = preprocessCountDictionary(count2)
return (KullbackLeibler(P, Q) + KullbackLeibler(Q, P)) / 2
def JensenShannonDistance(count1, count2):
list1 = preprocessCountDictionary(count1)
list2 = preprocessCountDictionary(count2)
intermediateList = (list1 + list2) / 2
return (KullbackLeibler(list1, intermediateList) + KullbackLeibler(list2, intermediateList)) / 2
def getCounts():
opt = Opt.get_instance()
os.chdir(opt.dir_name)
concatenatedFile = f"ConcatenatedFile"
if not os.path.exists(concatenatedFile):
with open(concatenatedFile, 'w') as fout:
with open(f'{opt.dir_name}.{opt.src_lang}') as fin:
fout.write(fin.read() + '\n')
with open(f'{opt.dir_name}.{opt.trg_lang}') as fin:
fout.write(fin.read() + '\n')
trainingOption = (f"--input={concatenatedFile} "
f"--model_prefix={concatenatedFile} "
f"--vocab_size=8000 --character_coverage=0.99 "
f"--model_type=bpe --pad_id=-1 --bos_id=-1 --eos_id=-1 ")
if not os.path.exists(concatenatedFile + '.model'):
spm.SentencePieceTrainer.train(trainingOption)
processor = spm.SentencePieceProcessor()
processor.Init(model_file=concatenatedFile + ".model")
lang1Count = defaultdict(int)
lang2Count = defaultdict(int)
with open(f'{opt.dir_name}.{opt.src_lang}') as fin:
for line in tqdm(fin.readlines()):
line = processor.encode(line.strip('\n'))
for char in line:
lang1Count[char] += 1
with open(f'{opt.dir_name}.{opt.trg_lang}') as fin:
for line in tqdm(fin.readlines()):
line = processor.encode(line.strip('\n'))
for char in line:
lang2Count[char] += 1
os.chdir('..')
return lang1Count, lang2Count
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""The module for working with displaying progress bars for Ray engine."""
import ray
import os
import time
import threading
import warnings
progress_bars = {}
bar_lock = threading.Lock()
def call_progress_bar(result_parts, line_no):
"""
Attach a progress bar to given `result_parts`.
The progress bar is expected to be shown in a Jupyter Notebook cell.
Parameters
----------
result_parts : list of list of ray.ObjectRef
Objects which are being computed for which progress is requested.
line_no : int
Line number in the call stack which we're displaying progress for.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
from tqdm.autonotebook import tqdm as tqdm_notebook
except ImportError:
raise ImportError("Please pip install tqdm to use the progress bar")
from IPython import get_ipython
try:
cell_no = get_ipython().execution_count
# This happens if we are not in ipython or jupyter.
# No progress bar is supported in that case.
except AttributeError:
return
pbar_id = str(cell_no) + "-" + str(line_no)
futures = [x._data for row in result_parts for x in row]
bar_format = (
"{l_bar}{bar}{r_bar}"
if "DEBUG_PROGRESS_BAR" in os.environ
and os.environ["DEBUG_PROGRESS_BAR"] == "True"
else "{desc}: {percentage:3.0f}%{bar} Elapsed time: {elapsed}, estimated remaining time: {remaining}"
)
bar_lock.acquire()
if pbar_id in progress_bars:
if hasattr(progress_bars[pbar_id], "container"):
if hasattr(progress_bars[pbar_id].container.children[0], "max"):
index = 0
else:
index = 1
progress_bars[pbar_id].container.children[index].max = progress_bars[
pbar_id
].container.children[index].max + len(futures)
progress_bars[pbar_id].total = progress_bars[pbar_id].total + len(futures)
progress_bars[pbar_id].refresh()
else:
progress_bars[pbar_id] = tqdm_notebook(
total=len(futures),
desc="Estimated completion of line " + str(line_no),
bar_format=bar_format,
)
bar_lock.release()
threading.Thread(target=_show_time_updates, args=(progress_bars[pbar_id],)).start()
for i in range(1, len(futures) + 1):
ray.wait(futures, num_returns=i)
progress_bars[pbar_id].update(1)
progress_bars[pbar_id].refresh()
if progress_bars[pbar_id].n == progress_bars[pbar_id].total:
progress_bars[pbar_id].close()
def display_time_updates(bar):
"""
Start displaying the progress `bar` in a notebook.
Parameters
----------
bar : tqdm.tqdm
The progress bar wrapper to display in a notebook cell.
"""
threading.Thread(target=_show_time_updates, args=(bar,)).start()
def _show_time_updates(p_bar):
"""
Refresh displayed progress bar `p_bar` periodically until it is complete.
Parameters
----------
p_bar : tqdm.tqdm
The progress bar wrapper being displayed to refresh.
"""
while p_bar.total > p_bar.n:
time.sleep(1)
if p_bar.total > p_bar.n:
p_bar.refresh()
|
import React, { useState } from 'react';
import _ from 'lodash';
import { Checkbox, Content, Container, Dropdown, Icon, Input, InputGroup, Grid, Row, Col, Panel, Table } from 'rsuite';
import { allEpCategories } from '../data/constants';
import ItemTooltip from './item_tooltip';
import { filterByItemName, filter1HOnly, itemsForSlot } from '../util/items';
const { Column, HeaderCell, Cell } = Table;
const classes = [{
label: 'Hunter (Beast Mastery)',
value: 'hunter_bm'
},{
label: 'Hunter(Survival)',
value: 'hunter_surv'
},{
label: 'Mage (Arcane)',
value: 'mage_arcane'
},{
label: 'Mage (Fire)',
value: 'mage_fire'
},{
label: 'Mage (Frost)',
value: 'mage_frost'
},{
label: 'Priest (Shadow)',
value: 'priest_shadow'
},{
label: 'Rogue (Assassination)',
value: 'rogue_assassination'
},{
label: 'Rogue (Combat)',
value: 'rogue_combat'
},{
label: 'Shaman (Elemental)',
value: 'shaman_ele'
},{
label: 'Shaman (Enhancement)',
value: 'shaman_enh'
},{
label: 'Warlock (Afflicton + Ruin)',
value: 'warlock_affliction_ruin'
},{
label: 'Warlock (Destruction + Fire)',
value: 'warlock_destruction_fire'
},{
label: 'Warlock (Destruction + Shadow)',
value: 'warlock_destruction_shadow'
},{
label: 'Warrior (Arms)',
value: 'warrior_arms'
},{
label: 'Warrior (Fury)',
value: 'warrior_fury'
},{
label: 'Warrior (Kebab)',
value: 'warrior_kebab'
}];
function IconCell({ rowData, dataKey, ...props }) {
const cellValue = rowData[dataKey]
if(cellValue) {
return (
<Cell {...props}>
<ItemTooltip item={rowData} allowClick={true}>
<img style={{ border: '1px solid #AAA', marginTop: '-10px', marginLeft: '-13px' }} src={`icons/${cellValue}`} />
</ItemTooltip>
</Cell>
)
}
return null;
}
function NameCell({ rowData, dataKey, ...props }) {
const cellValue = rowData.displayName || rowData.name
return (
<Cell {...props}>
<ItemTooltip item={rowData} allowClick={true}>
<p className={`q${rowData.quality}`} style={{ fontWeight: 800 }}>{cellValue}</p>
</ItemTooltip>
</Cell>
)
}
function ItemLevelCell({ rowData, dataKey, ...props }) {
const cellValue = rowData[dataKey]
return (
<Cell {...props}>
<p>{cellValue}</p>
</Cell>
)
}
function EpCell({ rowData, dataKey, ...props }) {
const cellValue = rowData[dataKey]
return (
<Cell {...props}>
<p>{cellValue}</p>
</Cell>
)
}
export default function() {
const [klass, setKlass] = useState('hunter_bm');
const [itemPhase, setItemPhase] = useState(2);
const [epCategory, setEpCategory] = useState('phase2');
function getCharacter() {
return {
class: klass.substring(0, klass.indexOf('_')),
epCategory,
epSpec: klass,
gear: {
rangedTotemLibram: null
}
}
}
function EPPanel({ header, slotName }) {
const [filter, setFilter] = useState('');
const [oneHandOnly, setOneHandOnly] = useState(slotName === 'mainHand');
const filters = [filterByItemName(filter), ...(oneHandOnly ? [filter1HOnly()] : [])]
const allRowData = itemsForSlot(slotName, getCharacter(), itemPhase, null, null, filters)
return (
<Panel header={header} collapsible bordered defaultExpanded={true} style={{ marginBottom: 10 }}>
<InputGroup inside style={{ margin: '0px 0 15px 0' }}>
<Input onChange={value => setFilter(value)} />
<InputGroup.Button>
<Icon icon="search" />
</InputGroup.Button>
</InputGroup>
{slotName == 'mainHand' ?
<InputGroup inside>
<Checkbox checked={oneHandOnly} onChange={() => setOneHandOnly(!oneHandOnly)}>One-Hand Only</Checkbox>
</InputGroup>
: null}
<Table height={400} rowHeight={60} data={allRowData} affixHorizontalScrollbar={-1000}>
<Column width={55}>
<HeaderCell></HeaderCell>
<IconCell dataKey="icon" />
</Column>
<Column flexGrow={4}>
<HeaderCell>Name</HeaderCell>
<NameCell dataKey="name" />
</Column>
<Column flexGrow={1}>
<HeaderCell>EP</HeaderCell>
<EpCell dataKey="ep" />
</Column>
<Column flexGrow={1}>
<HeaderCell>ilvl</HeaderCell>
<ItemLevelCell dataKey="itemLevel" />
</Column>
</Table>
</Panel>
)
}
function KlassSelect() {
function onSelect(kls) {
setKlass(kls)
}
const klassEntry = classes.find(kls => kls.value === klass)
const klassLabel = klassEntry ? klassEntry.label : 'None'
return (
<>
<Dropdown title="Class/Spec">
{classes.map(kls => {
return <Dropdown.Item key={kls.value} eventKey={kls.value} onSelect={onSelect}>{kls.label}</Dropdown.Item>
})}
</Dropdown>
<span>{klassLabel}</span>
</>
);
}
function PhaseSelect() {
const allPhases = [1, 2, 3, 4, 5]
function onSelect(phase) {
setItemPhase(phase)
}
return (
<>
<Dropdown title="Item Filter">
{allPhases.map(phase => {
return <Dropdown.Item key={phase} eventKey={phase} onSelect={onSelect}>Phase {phase}</Dropdown.Item>
})}
</Dropdown>
<span>Phase {itemPhase}</span>
</>
);
}
function EpSelect() {
const epCategoryEntry = allEpCategories.find(epc => epc.key == epCategory)
if(epCategoryEntry == null) return null;
const epCategoryName = epCategoryEntry.name;
function onSelect(epCategory) {
setEpCategory(epCategory)
}
return (
<>
<Dropdown title="EP Category">
{allEpCategories.map(epCategory => {
return <Dropdown.Item key={epCategory.key} eventKey={epCategory.key} onSelect={onSelect}>{epCategory.name}</Dropdown.Item>
})}
</Dropdown>
<span>{epCategoryName}</span>
</>
);
}
return (
<Content>
<Container style={{ marginTop: 20, marginBottom: 20, marginLeft: 15 }}>
<Row>
<h5>Gear Equivalence Points</h5>
<p>This page is meant to be used as a quick comparison between candidates for an item slot, using TBCSim EP.</p>
<br/>
<h5>Notes and Caveats</h5>
<ul>
<li>Item procs and set bonuses are not currently included in total item EP when selecting items in the UI (Procs and sets are fully modeled in the sim)</li>
</ul>
</Row>
</Container>
<Grid fluid={true}>
<Row>
<KlassSelect />
<PhaseSelect />
<EpSelect />
</Row>
<Container style={{ margin: 15 }}>
{(!klass || !epCategory || !itemPhase) ?
<Row>Please select a class and EP category</Row>
:
<Row>
<Col xs={8}>
<EPPanel header='Head' slotName='head' bordered />
<EPPanel header='Neck' slotName='neck' bordered />
<EPPanel header='Shoulders' slotName='shoulders' bordered />
<EPPanel header='Back' slotName='back' bordered />
<EPPanel header='Chest' slotName='chest' bordered />
<EPPanel header='Wrists' slotName='wrists' bordered />
</Col>
<Col xs={8}>
<EPPanel header='Main Hand' slotName='mainHand' bordered />
<EPPanel header='Off Hand' slotName='offHand' bordered />
<EPPanel header='Ranged' slotName='rangedTotemLibram' bordered />
</Col>
<Col xs={8}>
<EPPanel header='Hands' slotName='hands' bordered />
<EPPanel header='Waist' slotName='waist' bordered />
<EPPanel header='Legs' slotName='legs' bordered />
<EPPanel header='Feet' slotName='feet' bordered />
<EPPanel header='Ring' slotName='ring1' bordered />
<EPPanel header='Trinket' slotName='trinket1' bordered />
</Col>
</Row>
}
</Container>
</Grid>
</Content>
)
}
|
const Shard = require('./Shard');
class WebSocketManager {
constructor(client) {
this.client = client;
/**
* A map of active server shards.
* @type {Map<string, Shard>}
*/
this.shards = new Map();
this.totalShards = 0;
this.readyAt = 0;
}
destroy() {
if (!this.readyAt) return;
for (const shard of this.shards.values()) shard.disconnect();
this.shards.clear();
this.readyAt = 0;
this.client.emit('debug', `[WS] Destroyed ${this.totalShards} shard(s)`);
this.totalShards = 0;
}
get ping() {
if (!this.totalShards) return -1;
let sum = 0;
for (const shard of this.shards.values()) sum += shard.ping;
return sum / this.totalShards;
}
/**
* Adds a server to be connected to websockets.
* @param {string} id The identifier of the server.
* @returns {Shard} Created (or reused) shard.
*/
createShard(id) {
if (this.shards.has(id))
return this.shards.get(id);
const shard = new Shard(this.client, id);
this.shards.set(id, shard);
this.totalShards++;
return shard;
}
/**
* Removes a server from websocket connections.
* @param {string} id The identifier of the server.
* @returns {boolean} Whether shard was removed.
*/
removeShard(id) {
if (!this.shards.has(id))
return false;
this.shards.delete(id);
this.totalShards--;
return true;
}
}
module.exports = WebSocketManager;
|
define(['df', 'df-lodash', 'Df_Checkout/data'], function(df, _, dfc) {'use strict'; return(
function(plan, rateToCurrent) {return {
amount: df.c(function() {return Math.round(
dfc.grandTotal() * (1 + plan.rate / 100) + plan.fee * rateToCurrent * plan.numPayments
);}),
amountS: function() {return dfc.formatMoney(this.amount());},
domId: function() {return 'df-plan-' + plan.numPayments;},
/**
* 無須自行計算各分期金額,除不盡的金額銀行會於第一期收取。
* 舉例:總金額 1733元 分 6 期,除不盡的放第一期,293,288,288,288,288,288»
* @returns {Number}
*/
firstPayment: df.c(function() {
var remainder = this.amount() % plan.numPayments;
var singlePaymentAmount = Math.floor(this.amount() / plan.numPayments);
return remainder + singlePaymentAmount;
}),
firstPaymentS: function() {return dfc.formatMoney(this.firstPayment());},
numPayments: plan.numPayments,
onRowClicked: function(_this, event) {
plan.option(_this.numPayments);
return true;
}
};});});
|
/*
* This source code is public domain.
*
* Authors: Olivier Lapicque <olivierl@jps.net>
*/
#include "libmodplug.h"
#ifdef SNDFX_C
static const BYTE ImpulseTrackerPortaVolCmd[16] =
{
0x00, 0x01, 0x04, 0x08, 0x10, 0x20, 0x40, 0x60,
0x80, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
};
// Period table for Protracker octaves 0-5:
static const WORD ProTrackerPeriodTable[6*12] =
{
1712,1616,1524,1440,1356,1280,1208,1140,1076,1016,960,907,
856,808,762,720,678,640,604,570,538,508,480,453,
428,404,381,360,339,320,302,285,269,254,240,226,
214,202,190,180,170,160,151,143,135,127,120,113,
107,101,95,90,85,80,75,71,67,63,60,56,
53,50,47,45,42,40,37,35,33,31,30,28
};
static const WORD ProTrackerTunedPeriods[16*12] =
{
1712,1616,1524,1440,1356,1280,1208,1140,1076,1016,960,907,
1700,1604,1514,1430,1348,1274,1202,1134,1070,1010,954,900,
1688,1592,1504,1418,1340,1264,1194,1126,1064,1004,948,894,
1676,1582,1492,1408,1330,1256,1184,1118,1056,996,940,888,
1664,1570,1482,1398,1320,1246,1176,1110,1048,990,934,882,
1652,1558,1472,1388,1310,1238,1168,1102,1040,982,926,874,
1640,1548,1460,1378,1302,1228,1160,1094,1032,974,920,868,
1628,1536,1450,1368,1292,1220,1150,1086,1026,968,914,862,
1814,1712,1616,1524,1440,1356,1280,1208,1140,1076,1016,960,
1800,1700,1604,1514,1430,1350,1272,1202,1134,1070,1010,954,
1788,1688,1592,1504,1418,1340,1264,1194,1126,1064,1004,948,
1774,1676,1582,1492,1408,1330,1256,1184,1118,1056,996,940,
1762,1664,1570,1482,1398,1320,1246,1176,1110,1048,988,934,
1750,1652,1558,1472,1388,1310,1238,1168,1102,1040,982,926,
1736,1640,1548,1460,1378,1302,1228,1160,1094,1032,974,920,
1724,1628,1536,1450,1368,1292,1220,1150,1086,1026,968,914
};
// S3M C-4 periods
static const WORD FreqS3MTable[16] =
{
1712,1616,1524,1440,1356,1280,
1208,1140,1076,1016,960,907,
0,0,0,0
};
// S3M FineTune frequencies
static const WORD S3MFineTuneTable[16] =
{
7895,7941,7985,8046,8107,8169,8232,8280,
8363,8413,8463,8529,8581,8651,8723,8757, // 8363*2^((i-8)/(12*8))
};
#endif
#ifdef SNDMIX_C
// Sinus table
static const int16_t ModSinusTable[64] =
{
0,12,25,37,49,60,71,81,90,98,106,112,117,122,125,126,
127,126,125,122,117,112,106,98,90,81,71,60,49,37,25,12,
0,-12,-25,-37,-49,-60,-71,-81,-90,-98,-106,-112,-117,-122,-125,-126,
-127,-126,-125,-122,-117,-112,-106,-98,-90,-81,-71,-60,-49,-37,-25,-12
};
// Triangle wave table (ramp down)
static const int16_t ModRampDownTable[64] =
{
0,-4,-8,-12,-16,-20,-24,-28,-32,-36,-40,-44,-48,-52,-56,-60,
-64,-68,-72,-76,-80,-84,-88,-92,-96,-100,-104,-108,-112,-116,-120,-124,
127,123,119,115,111,107,103,99,95,91,87,83,79,75,71,67,
63,59,55,51,47,43,39,35,31,27,23,19,15,11,7,3
};
// Square wave table
static const int16_t ModSquareTable[64] =
{
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
-127,-127,-127,-127,-127,-127,-127,-127,-127,-127,-127,-127,-127,-127,-127,-127,
-127,-127,-127,-127,-127,-127,-127,-127,-127,-127,-127,-127,-127,-127,-127,-127
};
// Random wave table
static const int16_t ModRandomTable[64] =
{
98,-127,-43,88,102,41,-65,-94,125,20,-71,-86,-70,-32,-16,-96,
17,72,107,-5,116,-69,-62,-40,10,-61,65,109,-18,-38,-13,-76,
-23,88,21,-94,8,106,21,-112,6,109,20,-88,-30,9,-127,118,
42,-34,89,-4,-51,-72,21,-29,112,123,84,-101,-92,98,-54,-95
};
#endif
#ifdef SNDFX_C
// volume fade tables for Retrig Note:
static const int8_t retrigTable1[16] =
{ 0, 0, 0, 0, 0, 0, 10, 8, 0, 0, 0, 0, 0, 0, 24, 32 };
static const int8_t retrigTable2[16] =
{ 0, -1, -2, -4, -8, -16, 0, 0, 0, 1, 2, 4, 8, 16, 0, 0 };
static const WORD XMPeriodTable[104] =
{
907,900,894,887,881,875,868,862,856,850,844,838,832,826,820,814,
808,802,796,791,785,779,774,768,762,757,752,746,741,736,730,725,
720,715,709,704,699,694,689,684,678,675,670,665,660,655,651,646,
640,636,632,628,623,619,614,610,604,601,597,592,588,584,580,575,
570,567,563,559,555,551,547,543,538,535,532,528,524,520,516,513,
508,505,502,498,494,491,487,484,480,477,474,470,467,463,460,457,
453,450,447,443,440,437,434,431
};
static const uint32_t XMLinearTable[768] =
{
535232,534749,534266,533784,533303,532822,532341,531861,
531381,530902,530423,529944,529466,528988,528511,528034,
527558,527082,526607,526131,525657,525183,524709,524236,
523763,523290,522818,522346,521875,521404,520934,520464,
519994,519525,519057,518588,518121,517653,517186,516720,
516253,515788,515322,514858,514393,513929,513465,513002,
512539,512077,511615,511154,510692,510232,509771,509312,
508852,508393,507934,507476,507018,506561,506104,505647,
505191,504735,504280,503825,503371,502917,502463,502010,
501557,501104,500652,500201,499749,499298,498848,498398,
497948,497499,497050,496602,496154,495706,495259,494812,
494366,493920,493474,493029,492585,492140,491696,491253,
490809,490367,489924,489482,489041,488600,488159,487718,
487278,486839,486400,485961,485522,485084,484647,484210,
483773,483336,482900,482465,482029,481595,481160,480726,
480292,479859,479426,478994,478562,478130,477699,477268,
476837,476407,475977,475548,475119,474690,474262,473834,
473407,472979,472553,472126,471701,471275,470850,470425,
470001,469577,469153,468730,468307,467884,467462,467041,
466619,466198,465778,465358,464938,464518,464099,463681,
463262,462844,462427,462010,461593,461177,460760,460345,
459930,459515,459100,458686,458272,457859,457446,457033,
456621,456209,455797,455386,454975,454565,454155,453745,
453336,452927,452518,452110,451702,451294,450887,450481,
450074,449668,449262,448857,448452,448048,447644,447240,
446836,446433,446030,445628,445226,444824,444423,444022,
443622,443221,442821,442422,442023,441624,441226,440828,
440430,440033,439636,439239,438843,438447,438051,437656,
437261,436867,436473,436079,435686,435293,434900,434508,
434116,433724,433333,432942,432551,432161,431771,431382,
430992,430604,430215,429827,429439,429052,428665,428278,
427892,427506,427120,426735,426350,425965,425581,425197,
424813,424430,424047,423665,423283,422901,422519,422138,
421757,421377,420997,420617,420237,419858,419479,419101,
418723,418345,417968,417591,417214,416838,416462,416086,
415711,415336,414961,414586,414212,413839,413465,413092,
412720,412347,411975,411604,411232,410862,410491,410121,
409751,409381,409012,408643,408274,407906,407538,407170,
406803,406436,406069,405703,405337,404971,404606,404241,
403876,403512,403148,402784,402421,402058,401695,401333,
400970,400609,400247,399886,399525,399165,398805,398445,
398086,397727,397368,397009,396651,396293,395936,395579,
395222,394865,394509,394153,393798,393442,393087,392733,
392378,392024,391671,391317,390964,390612,390259,389907,
389556,389204,388853,388502,388152,387802,387452,387102,
386753,386404,386056,385707,385359,385012,384664,384317,
383971,383624,383278,382932,382587,382242,381897,381552,
381208,380864,380521,380177,379834,379492,379149,378807,
378466,378124,377783,377442,377102,376762,376422,376082,
375743,375404,375065,374727,374389,374051,373714,373377,
373040,372703,372367,372031,371695,371360,371025,370690,
370356,370022,369688,369355,369021,368688,368356,368023,
367691,367360,367028,366697,366366,366036,365706,365376,
365046,364717,364388,364059,363731,363403,363075,362747,
362420,362093,361766,361440,361114,360788,360463,360137,
359813,359488,359164,358840,358516,358193,357869,357547,
357224,356902,356580,356258,355937,355616,355295,354974,
354654,354334,354014,353695,353376,353057,352739,352420,
352103,351785,351468,351150,350834,350517,350201,349885,
349569,349254,348939,348624,348310,347995,347682,347368,
347055,346741,346429,346116,345804,345492,345180,344869,
344558,344247,343936,343626,343316,343006,342697,342388,
342079,341770,341462,341154,340846,340539,340231,339924,
339618,339311,339005,338700,338394,338089,337784,337479,
337175,336870,336566,336263,335959,335656,335354,335051,
334749,334447,334145,333844,333542,333242,332941,332641,
332341,332041,331741,331442,331143,330844,330546,330247,
329950,329652,329355,329057,328761,328464,328168,327872,
327576,327280,326985,326690,326395,326101,325807,325513,
325219,324926,324633,324340,324047,323755,323463,323171,
322879,322588,322297,322006,321716,321426,321136,320846,
320557,320267,319978,319690,319401,319113,318825,318538,
318250,317963,317676,317390,317103,316817,316532,316246,
315961,315676,315391,315106,314822,314538,314254,313971,
313688,313405,313122,312839,312557,312275,311994,311712,
311431,311150,310869,310589,310309,310029,309749,309470,
309190,308911,308633,308354,308076,307798,307521,307243,
306966,306689,306412,306136,305860,305584,305308,305033,
304758,304483,304208,303934,303659,303385,303112,302838,
302565,302292,302019,301747,301475,301203,300931,300660,
300388,300117,299847,299576,299306,299036,298766,298497,
298227,297958,297689,297421,297153,296884,296617,296349,
296082,295815,295548,295281,295015,294749,294483,294217,
293952,293686,293421,293157,292892,292628,292364,292100,
291837,291574,291311,291048,290785,290523,290261,289999,
289737,289476,289215,288954,288693,288433,288173,287913,
287653,287393,287134,286875,286616,286358,286099,285841,
285583,285326,285068,284811,284554,284298,284041,283785,
283529,283273,283017,282762,282507,282252,281998,281743,
281489,281235,280981,280728,280475,280222,279969,279716,
279464,279212,278960,278708,278457,278206,277955,277704,
277453,277203,276953,276703,276453,276204,275955,275706,
275457,275209,274960,274712,274465,274217,273970,273722,
273476,273229,272982,272736,272490,272244,271999,271753,
271508,271263,271018,270774,270530,270286,270042,269798,
269555,269312,269069,268826,268583,268341,268099,267857
};
#endif
#ifdef SNDMIX_C
static const int8_t ft2VibratoTable[256] =
{
0,-2,-3,-5,-6,-8,-9,-11,-12,-14,-16,-17,-19,-20,-22,-23,
-24,-26,-27,-29,-30,-32,-33,-34,-36,-37,-38,-39,-41,-42,
-43,-44,-45,-46,-47,-48,-49,-50,-51,-52,-53,-54,-55,-56,
-56,-57,-58,-59,-59,-60,-60,-61,-61,-62,-62,-62,-63,-63,
-63,-64,-64,-64,-64,-64,-64,-64,-64,-64,-64,-64,-63,-63,
-63,-62,-62,-62,-61,-61,-60,-60,-59,-59,-58,-57,-56,-56,
-55,-54,-53,-52,-51,-50,-49,-48,-47,-46,-45,-44,-43,-42,
-41,-39,-38,-37,-36,-34,-33,-32,-30,-29,-27,-26,-24,-23,
-22,-20,-19,-17,-16,-14,-12,-11,-9,-8,-6,-5,-3,-2,0,
2,3,5,6,8,9,11,12,14,16,17,19,20,22,23,24,26,27,29,30,
32,33,34,36,37,38,39,41,42,43,44,45,46,47,48,49,50,51,
52,53,54,55,56,56,57,58,59,59,60,60,61,61,62,62,62,63,
63,63,64,64,64,64,64,64,64,64,64,64,64,63,63,63,62,62,
62,61,61,60,60,59,59,58,57,56,56,55,54,53,52,51,50,49,
48,47,46,45,44,43,42,41,39,38,37,36,34,33,32,30,29,27,
26,24,23,22,20,19,17,16,14,12,11,9,8,6,5,3,2
};
#endif
static const DWORD FineLinearSlideUpTable[16] =
{
65536, 65595, 65654, 65714, 65773, 65832, 65892, 65951,
66011, 66071, 66130, 66190, 66250, 66309, 66369, 66429
};
static const DWORD FineLinearSlideDownTable[16] =
{
65535, 65477, 65418, 65359, 65300, 65241, 65182, 65123,
65065, 65006, 64947, 64888, 64830, 64772, 64713, 64645
};
static const DWORD LinearSlideUpTable[256] =
{
65536, 65773, 66010, 66249, 66489, 66729, 66971, 67213,
67456, 67700, 67945, 68190, 68437, 68685, 68933, 69182,
69432, 69684, 69936, 70189, 70442, 70697, 70953, 71209,
71467, 71725, 71985, 72245, 72507, 72769, 73032, 73296,
73561, 73827, 74094, 74362, 74631, 74901, 75172, 75444,
75717, 75991, 76265, 76541, 76818, 77096, 77375, 77655,
77935, 78217, 78500, 78784, 79069, 79355, 79642, 79930,
80219, 80509, 80800, 81093, 81386, 81680, 81976, 82272,
82570, 82868, 83168, 83469, 83771, 84074, 84378, 84683,
84989, 85297, 85605, 85915, 86225, 86537, 86850, 87164,
87480, 87796, 88113, 88432, 88752, 89073, 89395, 89718,
90043, 90369, 90695, 91023, 91353, 91683, 92015, 92347,
92681, 93017, 93353, 93691, 94029, 94370, 94711, 95053,
95397, 95742, 96088, 96436, 96785, 97135, 97486, 97839,
98193, 98548, 98904, 99262, 99621, 99981, 100343, 100706,
101070, 101435, 101802, 102170, 102540, 102911, 103283, 103657,
104031, 104408, 104785, 105164, 105545, 105926, 106309, 106694,
107080, 107467, 107856, 108246, 108637, 109030, 109425, 109820,
110217, 110616, 111016, 111418, 111821, 112225, 112631, 113038,
113447, 113857, 114269, 114682, 115097, 115514, 115931, 116351,
116771, 117194, 117618, 118043, 118470, 118898, 119328, 119760,
120193, 120628, 121064, 121502, 121941, 122382, 122825, 123269,
123715, 124162, 124611, 125062, 125514, 125968, 126424, 126881,
127340, 127801, 128263, 128727, 129192, 129660, 130129, 130599,
131072, 131546, 132021, 132499, 132978, 133459, 133942, 134426,
134912, 135400, 135890, 136381, 136875, 137370, 137866, 138365,
138865, 139368, 139872, 140378, 140885, 141395, 141906, 142419,
142935, 143451, 143970, 144491, 145014, 145538, 146064, 146593,
147123, 147655, 148189, 148725, 149263, 149803, 150344, 150888,
151434, 151982, 152531, 153083, 153637, 154192, 154750, 155310,
155871, 156435, 157001, 157569, 158138, 158710, 159284, 159860,
160439, 161019, 161601, 162186, 162772, 163361, 163952, 164545,
};
static const DWORD LinearSlideDownTable[256] =
{
65536, 65299, 65064, 64830, 64596, 64363, 64131, 63900,
63670, 63440, 63212, 62984, 62757, 62531, 62305, 62081,
61857, 61634, 61412, 61191, 60970, 60751, 60532, 60314,
60096, 59880, 59664, 59449, 59235, 59021, 58809, 58597,
58385, 58175, 57965, 57757, 57548, 57341, 57134, 56928,
56723, 56519, 56315, 56112, 55910, 55709, 55508, 55308,
55108, 54910, 54712, 54515, 54318, 54123, 53928, 53733,
53540, 53347, 53154, 52963, 52772, 52582, 52392, 52204,
52015, 51828, 51641, 51455, 51270, 51085, 50901, 50717,
50535, 50353, 50171, 49990, 49810, 49631, 49452, 49274,
49096, 48919, 48743, 48567, 48392, 48218, 48044, 47871,
47698, 47526, 47355, 47185, 47014, 46845, 46676, 46508,
46340, 46173, 46007, 45841, 45676, 45511, 45347, 45184,
45021, 44859, 44697, 44536, 44376, 44216, 44056, 43898,
43740, 43582, 43425, 43268, 43112, 42957, 42802, 42648,
42494, 42341, 42189, 42037, 41885, 41734, 41584, 41434,
41285, 41136, 40988, 40840, 40693, 40546, 40400, 40254,
40109, 39965, 39821, 39677, 39534, 39392, 39250, 39108,
38967, 38827, 38687, 38548, 38409, 38270, 38132, 37995,
37858, 37722, 37586, 37450, 37315, 37181, 37047, 36913,
36780, 36648, 36516, 36384, 36253, 36122, 35992, 35862,
35733, 35604, 35476, 35348, 35221, 35094, 34968, 34842,
34716, 34591, 34466, 34342, 34218, 34095, 33972, 33850,
33728, 33606, 33485, 33364, 33244, 33124, 33005, 32886,
32768, 32649, 32532, 32415, 32298, 32181, 32065, 31950,
31835, 31720, 31606, 31492, 31378, 31265, 31152, 31040,
30928, 30817, 30706, 30595, 30485, 30375, 30266, 30157,
30048, 29940, 29832, 29724, 29617, 29510, 29404, 29298,
29192, 29087, 28982, 28878, 28774, 28670, 28567, 28464,
28361, 28259, 28157, 28056, 27955, 27854, 27754, 27654,
27554, 27455, 27356, 27257, 27159, 27061, 26964, 26866,
26770, 26673, 26577, 26481, 26386, 26291, 26196, 26102,
};
#if 0
static const int SpectrumSinusTable[256*2] =
{
0, 1, 1, 2, 3, 3, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11,
12, 13, 14, 14, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23,
24, 25, 25, 26, 27, 28, 28, 29, 30, 30, 31, 32, 32, 33, 34, 34,
35, 36, 36, 37, 38, 38, 39, 39, 40, 41, 41, 42, 42, 43, 44, 44,
45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 50, 50, 51, 51, 52, 52,
53, 53, 53, 54, 54, 55, 55, 55, 56, 56, 57, 57, 57, 58, 58, 58,
59, 59, 59, 59, 60, 60, 60, 60, 61, 61, 61, 61, 61, 62, 62, 62,
62, 62, 62, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63,
63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 62, 62,
62, 62, 62, 62, 61, 61, 61, 61, 61, 60, 60, 60, 60, 59, 59, 59,
59, 58, 58, 58, 57, 57, 57, 56, 56, 55, 55, 55, 54, 54, 53, 53,
53, 52, 52, 51, 51, 50, 50, 49, 49, 48, 48, 47, 47, 46, 46, 45,
45, 44, 44, 43, 42, 42, 41, 41, 40, 39, 39, 38, 38, 37, 36, 36,
35, 34, 34, 33, 32, 32, 31, 30, 30, 29, 28, 28, 27, 26, 25, 25,
24, 23, 22, 22, 21, 20, 20, 19, 18, 17, 17, 16, 15, 14, 14, 13,
12, 11, 10, 10, 9, 8, 7, 7, 6, 5, 4, 3, 3, 2, 1, 0,
0, -1, -1, -2, -3, -3, -4, -5, -6, -7, -7, -8, -9, -10, -10, -11,
-12, -13, -14, -14, -15, -16, -17, -17, -18, -19, -20, -20, -21, -22, -22, -23,
-24, -25, -25, -26, -27, -28, -28, -29, -30, -30, -31, -32, -32, -33, -34, -34,
-35, -36, -36, -37, -38, -38, -39, -39, -40, -41, -41, -42, -42, -43, -44, -44,
-45, -45, -46, -46, -47, -47, -48, -48, -49, -49, -50, -50, -51, -51, -52, -52,
-53, -53, -53, -54, -54, -55, -55, -55, -56, -56, -57, -57, -57, -58, -58, -58,
-59, -59, -59, -59, -60, -60, -60, -60, -61, -61, -61, -61, -61, -62, -62, -62,
-62, -62, -62, -63, -63, -63, -63, -63, -63, -63, -63, -63, -63, -63, -63, -63,
-63, -63, -63, -63, -63, -63, -63, -63, -63, -63, -63, -63, -63, -63, -62, -62,
-62, -62, -62, -62, -61, -61, -61, -61, -61, -60, -60, -60, -60, -59, -59, -59,
-59, -58, -58, -58, -57, -57, -57, -56, -56, -55, -55, -55, -54, -54, -53, -53,
-53, -52, -52, -51, -51, -50, -50, -49, -49, -48, -48, -47, -47, -46, -46, -45,
-45, -44, -44, -43, -42, -42, -41, -41, -40, -39, -39, -38, -38, -37, -36, -36,
-35, -34, -34, -33, -32, -32, -31, -30, -30, -29, -28, -28, -27, -26, -25, -25,
-24, -23, -22, -22, -21, -20, -20, -19, -18, -17, -17, -16, -15, -14, -14, -13,
-12, -11, -10, -10, -9, -8, -7, -7, -6, -5, -4, -3, -3, -2, -1, 0,
};
#endif
|
/*
* iSCSI transport class definitions
*
* Copyright (C) IBM Corporation, 2004
* Copyright (C) Mike Christie, 2004 - 2005
* Copyright (C) Dmitry Yusupov, 2004 - 2005
* Copyright (C) Alex Aizman, 2004 - 2005
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/bsg-lib.h>
#include <linux/idr.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_iscsi.h>
#include <scsi/iscsi_if.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_bsg_iscsi.h>
#define ISCSI_TRANSPORT_VERSION "2.0-870"
static int dbg_session;
module_param_named(debug_session, dbg_session, int,
S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug_session,
"Turn on debugging for sessions in scsi_transport_iscsi "
"module. Set to 1 to turn on, and zero to turn off. Default "
"is off.");
static int dbg_conn;
module_param_named(debug_conn, dbg_conn, int,
S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug_conn,
"Turn on debugging for connections in scsi_transport_iscsi "
"module. Set to 1 to turn on, and zero to turn off. Default "
"is off.");
#define ISCSI_DBG_TRANS_SESSION(_session, dbg_fmt, arg...) \
do { \
if (dbg_session) \
iscsi_cls_session_printk(KERN_INFO, _session, \
"%s: " dbg_fmt, \
__func__, ##arg); \
} while (0);
#define ISCSI_DBG_TRANS_CONN(_conn, dbg_fmt, arg...) \
do { \
if (dbg_conn) \
iscsi_cls_conn_printk(KERN_INFO, _conn, \
"%s: " dbg_fmt, \
__func__, ##arg); \
} while (0);
struct iscsi_internal {
struct scsi_transport_template t;
struct iscsi_transport *iscsi_transport;
struct list_head list;
struct device dev;
struct transport_container conn_cont;
struct transport_container session_cont;
};
static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
static struct workqueue_struct *iscsi_eh_timer_workq;
static DEFINE_IDA(iscsi_sess_ida);
/*
* list of registered transports and lock that must
* be held while accessing list. The iscsi_transport_lock must
* be acquired after the rx_queue_mutex.
*/
static LIST_HEAD(iscsi_transports);
static DEFINE_SPINLOCK(iscsi_transport_lock);
#define to_iscsi_internal(tmpl) \
container_of(tmpl, struct iscsi_internal, t)
#define dev_to_iscsi_internal(_dev) \
container_of(_dev, struct iscsi_internal, dev)
static void iscsi_transport_release(struct device *dev)
{
struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
kfree(priv);
}
/*
* iscsi_transport_class represents the iscsi_transports that are
* registered.
*/
static struct class iscsi_transport_class = {
.name = "iscsi_transport",
.dev_release = iscsi_transport_release,
};
static ssize_t
show_transport_handle(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
return sprintf(buf, "%llu\n", (unsigned long long)iscsi_handle(priv->iscsi_transport));
}
static DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL);
#define show_transport_attr(name, format) \
static ssize_t \
show_transport_##name(struct device *dev, \
struct device_attribute *attr,char *buf) \
{ \
struct iscsi_internal *priv = dev_to_iscsi_internal(dev); \
return sprintf(buf, format"\n", priv->iscsi_transport->name); \
} \
static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
show_transport_attr(caps, "0x%x");
static struct attribute *iscsi_transport_attrs[] = {
&dev_attr_handle.attr,
&dev_attr_caps.attr,
NULL,
};
static struct attribute_group iscsi_transport_group = {
.attrs = iscsi_transport_attrs,
};
/*
* iSCSI endpoint attrs
*/
#define iscsi_dev_to_endpoint(_dev) \
container_of(_dev, struct iscsi_endpoint, dev)
#define ISCSI_ATTR(_prefix,_name,_mode,_show,_store) \
struct device_attribute dev_attr_##_prefix##_##_name = \
__ATTR(_name,_mode,_show,_store)
static void iscsi_endpoint_release(struct device *dev)
{
struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
kfree(ep);
}
static struct class iscsi_endpoint_class = {
.name = "iscsi_endpoint",
.dev_release = iscsi_endpoint_release,
};
static ssize_t
show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
{
struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
return sprintf(buf, "%llu\n", (unsigned long long) ep->id);
}
static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
static struct attribute *iscsi_endpoint_attrs[] = {
&dev_attr_ep_handle.attr,
NULL,
};
static struct attribute_group iscsi_endpoint_group = {
.attrs = iscsi_endpoint_attrs,
};
#define ISCSI_MAX_EPID -1
static int iscsi_match_epid(struct device *dev, const void *data)
{
struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
const uint64_t *epid = data;
return *epid == ep->id;
}
struct iscsi_endpoint *
iscsi_create_endpoint(int dd_size)
{
struct device *dev;
struct iscsi_endpoint *ep;
uint64_t id;
int err;
for (id = 1; id < ISCSI_MAX_EPID; id++) {
dev = class_find_device(&iscsi_endpoint_class, NULL, &id,
iscsi_match_epid);
if (!dev)
break;
else
put_device(dev);
}
if (id == ISCSI_MAX_EPID) {
printk(KERN_ERR "Too many connections. Max supported %u\n",
ISCSI_MAX_EPID - 1);
return NULL;
}
ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
if (!ep)
return NULL;
ep->id = id;
ep->dev.class = &iscsi_endpoint_class;
dev_set_name(&ep->dev, "ep-%llu", (unsigned long long) id);
err = device_register(&ep->dev);
if (err)
goto free_ep;
err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
if (err)
goto unregister_dev;
if (dd_size)
ep->dd_data = &ep[1];
return ep;
unregister_dev:
device_unregister(&ep->dev);
return NULL;
free_ep:
kfree(ep);
return NULL;
}
EXPORT_SYMBOL_GPL(iscsi_create_endpoint);
void iscsi_destroy_endpoint(struct iscsi_endpoint *ep)
{
sysfs_remove_group(&ep->dev.kobj, &iscsi_endpoint_group);
device_unregister(&ep->dev);
}
EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint);
struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
{
struct iscsi_endpoint *ep;
struct device *dev;
dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
iscsi_match_epid);
if (!dev)
return NULL;
ep = iscsi_dev_to_endpoint(dev);
/*
* we can drop this now because the interface will prevent
* removals and lookups from racing.
*/
put_device(dev);
return ep;
}
EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
/*
* Interface to display network param to sysfs
*/
static void iscsi_iface_release(struct device *dev)
{
struct iscsi_iface *iface = iscsi_dev_to_iface(dev);
struct device *parent = iface->dev.parent;
kfree(iface);
put_device(parent);
}
static struct class iscsi_iface_class = {
.name = "iscsi_iface",
.dev_release = iscsi_iface_release,
};
#define ISCSI_IFACE_ATTR(_prefix, _name, _mode, _show, _store) \
struct device_attribute dev_attr_##_prefix##_##_name = \
__ATTR(_name, _mode, _show, _store)
/* iface attrs show */
#define iscsi_iface_attr_show(type, name, param_type, param) \
static ssize_t \
show_##type##_##name(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct iscsi_iface *iface = iscsi_dev_to_iface(dev); \
struct iscsi_transport *t = iface->transport; \
return t->get_iface_param(iface, param_type, param, buf); \
} \
#define iscsi_iface_net_attr(type, name, param) \
iscsi_iface_attr_show(type, name, ISCSI_NET_PARAM, param) \
static ISCSI_IFACE_ATTR(type, name, S_IRUGO, show_##type##_##name, NULL);
#define iscsi_iface_attr(type, name, param) \
iscsi_iface_attr_show(type, name, ISCSI_IFACE_PARAM, param) \
static ISCSI_IFACE_ATTR(type, name, S_IRUGO, show_##type##_##name, NULL);
/* generic read only ipv4 attribute */
iscsi_iface_net_attr(ipv4_iface, ipaddress, ISCSI_NET_PARAM_IPV4_ADDR);
iscsi_iface_net_attr(ipv4_iface, gateway, ISCSI_NET_PARAM_IPV4_GW);
iscsi_iface_net_attr(ipv4_iface, subnet, ISCSI_NET_PARAM_IPV4_SUBNET);
iscsi_iface_net_attr(ipv4_iface, bootproto, ISCSI_NET_PARAM_IPV4_BOOTPROTO);
iscsi_iface_net_attr(ipv4_iface, dhcp_dns_address_en,
ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN);
iscsi_iface_net_attr(ipv4_iface, dhcp_slp_da_info_en,
ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN);
iscsi_iface_net_attr(ipv4_iface, tos_en, ISCSI_NET_PARAM_IPV4_TOS_EN);
iscsi_iface_net_attr(ipv4_iface, tos, ISCSI_NET_PARAM_IPV4_TOS);
iscsi_iface_net_attr(ipv4_iface, grat_arp_en,
ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN);
iscsi_iface_net_attr(ipv4_iface, dhcp_alt_client_id_en,
ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN);
iscsi_iface_net_attr(ipv4_iface, dhcp_alt_client_id,
ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID);
iscsi_iface_net_attr(ipv4_iface, dhcp_req_vendor_id_en,
ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN);
iscsi_iface_net_attr(ipv4_iface, dhcp_use_vendor_id_en,
ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN);
iscsi_iface_net_attr(ipv4_iface, dhcp_vendor_id,
ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID);
iscsi_iface_net_attr(ipv4_iface, dhcp_learn_iqn_en,
ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN);
iscsi_iface_net_attr(ipv4_iface, fragment_disable,
ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE);
iscsi_iface_net_attr(ipv4_iface, incoming_forwarding_en,
ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN);
iscsi_iface_net_attr(ipv4_iface, ttl, ISCSI_NET_PARAM_IPV4_TTL);
/* generic read only ipv6 attribute */
iscsi_iface_net_attr(ipv6_iface, ipaddress, ISCSI_NET_PARAM_IPV6_ADDR);
iscsi_iface_net_attr(ipv6_iface, link_local_addr,
ISCSI_NET_PARAM_IPV6_LINKLOCAL);
iscsi_iface_net_attr(ipv6_iface, router_addr, ISCSI_NET_PARAM_IPV6_ROUTER);
iscsi_iface_net_attr(ipv6_iface, ipaddr_autocfg,
ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG);
iscsi_iface_net_attr(ipv6_iface, link_local_autocfg,
ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG);
iscsi_iface_net_attr(ipv6_iface, link_local_state,
ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE);
iscsi_iface_net_attr(ipv6_iface, router_state,
ISCSI_NET_PARAM_IPV6_ROUTER_STATE);
iscsi_iface_net_attr(ipv6_iface, grat_neighbor_adv_en,
ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN);
iscsi_iface_net_attr(ipv6_iface, mld_en, ISCSI_NET_PARAM_IPV6_MLD_EN);
iscsi_iface_net_attr(ipv6_iface, flow_label, ISCSI_NET_PARAM_IPV6_FLOW_LABEL);
iscsi_iface_net_attr(ipv6_iface, traffic_class,
ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS);
iscsi_iface_net_attr(ipv6_iface, hop_limit, ISCSI_NET_PARAM_IPV6_HOP_LIMIT);
iscsi_iface_net_attr(ipv6_iface, nd_reachable_tmo,
ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO);
iscsi_iface_net_attr(ipv6_iface, nd_rexmit_time,
ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME);
iscsi_iface_net_attr(ipv6_iface, nd_stale_tmo,
ISCSI_NET_PARAM_IPV6_ND_STALE_TMO);
iscsi_iface_net_attr(ipv6_iface, dup_addr_detect_cnt,
ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT);
iscsi_iface_net_attr(ipv6_iface, router_adv_link_mtu,
ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU);
/* common read only iface attribute */
iscsi_iface_net_attr(iface, enabled, ISCSI_NET_PARAM_IFACE_ENABLE);
iscsi_iface_net_attr(iface, vlan_id, ISCSI_NET_PARAM_VLAN_ID);
iscsi_iface_net_attr(iface, vlan_priority, ISCSI_NET_PARAM_VLAN_PRIORITY);
iscsi_iface_net_attr(iface, vlan_enabled, ISCSI_NET_PARAM_VLAN_ENABLED);
iscsi_iface_net_attr(iface, mtu, ISCSI_NET_PARAM_MTU);
iscsi_iface_net_attr(iface, port, ISCSI_NET_PARAM_PORT);
iscsi_iface_net_attr(iface, ipaddress_state, ISCSI_NET_PARAM_IPADDR_STATE);
iscsi_iface_net_attr(iface, delayed_ack_en, ISCSI_NET_PARAM_DELAYED_ACK_EN);
iscsi_iface_net_attr(iface, tcp_nagle_disable,
ISCSI_NET_PARAM_TCP_NAGLE_DISABLE);
iscsi_iface_net_attr(iface, tcp_wsf_disable, ISCSI_NET_PARAM_TCP_WSF_DISABLE);
iscsi_iface_net_attr(iface, tcp_wsf, ISCSI_NET_PARAM_TCP_WSF);
iscsi_iface_net_attr(iface, tcp_timer_scale, ISCSI_NET_PARAM_TCP_TIMER_SCALE);
iscsi_iface_net_attr(iface, tcp_timestamp_en, ISCSI_NET_PARAM_TCP_TIMESTAMP_EN);
iscsi_iface_net_attr(iface, cache_id, ISCSI_NET_PARAM_CACHE_ID);
iscsi_iface_net_attr(iface, redirect_en, ISCSI_NET_PARAM_REDIRECT_EN);
/* common iscsi specific settings attributes */
iscsi_iface_attr(iface, def_taskmgmt_tmo, ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO);
iscsi_iface_attr(iface, header_digest, ISCSI_IFACE_PARAM_HDRDGST_EN);
iscsi_iface_attr(iface, data_digest, ISCSI_IFACE_PARAM_DATADGST_EN);
iscsi_iface_attr(iface, immediate_data, ISCSI_IFACE_PARAM_IMM_DATA_EN);
iscsi_iface_attr(iface, initial_r2t, ISCSI_IFACE_PARAM_INITIAL_R2T_EN);
iscsi_iface_attr(iface, data_seq_in_order,
ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN);
iscsi_iface_attr(iface, data_pdu_in_order, ISCSI_IFACE_PARAM_PDU_INORDER_EN);
iscsi_iface_attr(iface, erl, ISCSI_IFACE_PARAM_ERL);
iscsi_iface_attr(iface, max_recv_dlength, ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH);
iscsi_iface_attr(iface, first_burst_len, ISCSI_IFACE_PARAM_FIRST_BURST);
iscsi_iface_attr(iface, max_outstanding_r2t, ISCSI_IFACE_PARAM_MAX_R2T);
iscsi_iface_attr(iface, max_burst_len, ISCSI_IFACE_PARAM_MAX_BURST);
iscsi_iface_attr(iface, chap_auth, ISCSI_IFACE_PARAM_CHAP_AUTH_EN);
iscsi_iface_attr(iface, bidi_chap, ISCSI_IFACE_PARAM_BIDI_CHAP_EN);
iscsi_iface_attr(iface, discovery_auth_optional,
ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL);
iscsi_iface_attr(iface, discovery_logout,
ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN);
iscsi_iface_attr(iface, strict_login_comp_en,
ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN);
iscsi_iface_attr(iface, initiator_name, ISCSI_IFACE_PARAM_INITIATOR_NAME);
static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct iscsi_iface *iface = iscsi_dev_to_iface(dev);
struct iscsi_transport *t = iface->transport;
int param;
int param_type;
if (attr == &dev_attr_iface_enabled.attr)
param = ISCSI_NET_PARAM_IFACE_ENABLE;
else if (attr == &dev_attr_iface_vlan_id.attr)
param = ISCSI_NET_PARAM_VLAN_ID;
else if (attr == &dev_attr_iface_vlan_priority.attr)
param = ISCSI_NET_PARAM_VLAN_PRIORITY;
else if (attr == &dev_attr_iface_vlan_enabled.attr)
param = ISCSI_NET_PARAM_VLAN_ENABLED;
else if (attr == &dev_attr_iface_mtu.attr)
param = ISCSI_NET_PARAM_MTU;
else if (attr == &dev_attr_iface_port.attr)
param = ISCSI_NET_PARAM_PORT;
else if (attr == &dev_attr_iface_ipaddress_state.attr)
param = ISCSI_NET_PARAM_IPADDR_STATE;
else if (attr == &dev_attr_iface_delayed_ack_en.attr)
param = ISCSI_NET_PARAM_DELAYED_ACK_EN;
else if (attr == &dev_attr_iface_tcp_nagle_disable.attr)
param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE;
else if (attr == &dev_attr_iface_tcp_wsf_disable.attr)
param = ISCSI_NET_PARAM_TCP_WSF_DISABLE;
else if (attr == &dev_attr_iface_tcp_wsf.attr)
param = ISCSI_NET_PARAM_TCP_WSF;
else if (attr == &dev_attr_iface_tcp_timer_scale.attr)
param = ISCSI_NET_PARAM_TCP_TIMER_SCALE;
else if (attr == &dev_attr_iface_tcp_timestamp_en.attr)
param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN;
else if (attr == &dev_attr_iface_cache_id.attr)
param = ISCSI_NET_PARAM_CACHE_ID;
else if (attr == &dev_attr_iface_redirect_en.attr)
param = ISCSI_NET_PARAM_REDIRECT_EN;
else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr)
param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO;
else if (attr == &dev_attr_iface_header_digest.attr)
param = ISCSI_IFACE_PARAM_HDRDGST_EN;
else if (attr == &dev_attr_iface_data_digest.attr)
param = ISCSI_IFACE_PARAM_DATADGST_EN;
else if (attr == &dev_attr_iface_immediate_data.attr)
param = ISCSI_IFACE_PARAM_IMM_DATA_EN;
else if (attr == &dev_attr_iface_initial_r2t.attr)
param = ISCSI_IFACE_PARAM_INITIAL_R2T_EN;
else if (attr == &dev_attr_iface_data_seq_in_order.attr)
param = ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN;
else if (attr == &dev_attr_iface_data_pdu_in_order.attr)
param = ISCSI_IFACE_PARAM_PDU_INORDER_EN;
else if (attr == &dev_attr_iface_erl.attr)
param = ISCSI_IFACE_PARAM_ERL;
else if (attr == &dev_attr_iface_max_recv_dlength.attr)
param = ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH;
else if (attr == &dev_attr_iface_first_burst_len.attr)
param = ISCSI_IFACE_PARAM_FIRST_BURST;
else if (attr == &dev_attr_iface_max_outstanding_r2t.attr)
param = ISCSI_IFACE_PARAM_MAX_R2T;
else if (attr == &dev_attr_iface_max_burst_len.attr)
param = ISCSI_IFACE_PARAM_MAX_BURST;
else if (attr == &dev_attr_iface_chap_auth.attr)
param = ISCSI_IFACE_PARAM_CHAP_AUTH_EN;
else if (attr == &dev_attr_iface_bidi_chap.attr)
param = ISCSI_IFACE_PARAM_BIDI_CHAP_EN;
else if (attr == &dev_attr_iface_discovery_auth_optional.attr)
param = ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL;
else if (attr == &dev_attr_iface_discovery_logout.attr)
param = ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN;
else if (attr == &dev_attr_iface_strict_login_comp_en.attr)
param = ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN;
else if (attr == &dev_attr_iface_initiator_name.attr)
param = ISCSI_IFACE_PARAM_INITIATOR_NAME;
else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
if (attr == &dev_attr_ipv4_iface_ipaddress.attr)
param = ISCSI_NET_PARAM_IPV4_ADDR;
else if (attr == &dev_attr_ipv4_iface_gateway.attr)
param = ISCSI_NET_PARAM_IPV4_GW;
else if (attr == &dev_attr_ipv4_iface_subnet.attr)
param = ISCSI_NET_PARAM_IPV4_SUBNET;
else if (attr == &dev_attr_ipv4_iface_bootproto.attr)
param = ISCSI_NET_PARAM_IPV4_BOOTPROTO;
else if (attr ==
&dev_attr_ipv4_iface_dhcp_dns_address_en.attr)
param = ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN;
else if (attr ==
&dev_attr_ipv4_iface_dhcp_slp_da_info_en.attr)
param = ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN;
else if (attr == &dev_attr_ipv4_iface_tos_en.attr)
param = ISCSI_NET_PARAM_IPV4_TOS_EN;
else if (attr == &dev_attr_ipv4_iface_tos.attr)
param = ISCSI_NET_PARAM_IPV4_TOS;
else if (attr == &dev_attr_ipv4_iface_grat_arp_en.attr)
param = ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN;
else if (attr ==
&dev_attr_ipv4_iface_dhcp_alt_client_id_en.attr)
param = ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN;
else if (attr == &dev_attr_ipv4_iface_dhcp_alt_client_id.attr)
param = ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID;
else if (attr ==
&dev_attr_ipv4_iface_dhcp_req_vendor_id_en.attr)
param = ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN;
else if (attr ==
&dev_attr_ipv4_iface_dhcp_use_vendor_id_en.attr)
param = ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN;
else if (attr == &dev_attr_ipv4_iface_dhcp_vendor_id.attr)
param = ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID;
else if (attr ==
&dev_attr_ipv4_iface_dhcp_learn_iqn_en.attr)
param = ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN;
else if (attr ==
&dev_attr_ipv4_iface_fragment_disable.attr)
param = ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE;
else if (attr ==
&dev_attr_ipv4_iface_incoming_forwarding_en.attr)
param = ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN;
else if (attr == &dev_attr_ipv4_iface_ttl.attr)
param = ISCSI_NET_PARAM_IPV4_TTL;
else
return 0;
} else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) {
if (attr == &dev_attr_ipv6_iface_ipaddress.attr)
param = ISCSI_NET_PARAM_IPV6_ADDR;
else if (attr == &dev_attr_ipv6_iface_link_local_addr.attr)
param = ISCSI_NET_PARAM_IPV6_LINKLOCAL;
else if (attr == &dev_attr_ipv6_iface_router_addr.attr)
param = ISCSI_NET_PARAM_IPV6_ROUTER;
else if (attr == &dev_attr_ipv6_iface_ipaddr_autocfg.attr)
param = ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG;
else if (attr == &dev_attr_ipv6_iface_link_local_autocfg.attr)
param = ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG;
else if (attr == &dev_attr_ipv6_iface_link_local_state.attr)
param = ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE;
else if (attr == &dev_attr_ipv6_iface_router_state.attr)
param = ISCSI_NET_PARAM_IPV6_ROUTER_STATE;
else if (attr ==
&dev_attr_ipv6_iface_grat_neighbor_adv_en.attr)
param = ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN;
else if (attr == &dev_attr_ipv6_iface_mld_en.attr)
param = ISCSI_NET_PARAM_IPV6_MLD_EN;
else if (attr == &dev_attr_ipv6_iface_flow_label.attr)
param = ISCSI_NET_PARAM_IPV6_FLOW_LABEL;
else if (attr == &dev_attr_ipv6_iface_traffic_class.attr)
param = ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS;
else if (attr == &dev_attr_ipv6_iface_hop_limit.attr)
param = ISCSI_NET_PARAM_IPV6_HOP_LIMIT;
else if (attr == &dev_attr_ipv6_iface_nd_reachable_tmo.attr)
param = ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO;
else if (attr == &dev_attr_ipv6_iface_nd_rexmit_time.attr)
param = ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME;
else if (attr == &dev_attr_ipv6_iface_nd_stale_tmo.attr)
param = ISCSI_NET_PARAM_IPV6_ND_STALE_TMO;
else if (attr == &dev_attr_ipv6_iface_dup_addr_detect_cnt.attr)
param = ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT;
else if (attr == &dev_attr_ipv6_iface_router_adv_link_mtu.attr)
param = ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU;
else
return 0;
} else {
WARN_ONCE(1, "Invalid iface attr");
return 0;
}
switch (param) {
case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
case ISCSI_IFACE_PARAM_HDRDGST_EN:
case ISCSI_IFACE_PARAM_DATADGST_EN:
case ISCSI_IFACE_PARAM_IMM_DATA_EN:
case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
case ISCSI_IFACE_PARAM_ERL:
case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
case ISCSI_IFACE_PARAM_FIRST_BURST:
case ISCSI_IFACE_PARAM_MAX_R2T:
case ISCSI_IFACE_PARAM_MAX_BURST:
case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
case ISCSI_IFACE_PARAM_INITIATOR_NAME:
param_type = ISCSI_IFACE_PARAM;
break;
default:
param_type = ISCSI_NET_PARAM;
}
return t->attr_is_visible(param_type, param);
}
static struct attribute *iscsi_iface_attrs[] = {
&dev_attr_iface_enabled.attr,
&dev_attr_iface_vlan_id.attr,
&dev_attr_iface_vlan_priority.attr,
&dev_attr_iface_vlan_enabled.attr,
&dev_attr_ipv4_iface_ipaddress.attr,
&dev_attr_ipv4_iface_gateway.attr,
&dev_attr_ipv4_iface_subnet.attr,
&dev_attr_ipv4_iface_bootproto.attr,
&dev_attr_ipv6_iface_ipaddress.attr,
&dev_attr_ipv6_iface_link_local_addr.attr,
&dev_attr_ipv6_iface_router_addr.attr,
&dev_attr_ipv6_iface_ipaddr_autocfg.attr,
&dev_attr_ipv6_iface_link_local_autocfg.attr,
&dev_attr_iface_mtu.attr,
&dev_attr_iface_port.attr,
&dev_attr_iface_ipaddress_state.attr,
&dev_attr_iface_delayed_ack_en.attr,
&dev_attr_iface_tcp_nagle_disable.attr,
&dev_attr_iface_tcp_wsf_disable.attr,
&dev_attr_iface_tcp_wsf.attr,
&dev_attr_iface_tcp_timer_scale.attr,
&dev_attr_iface_tcp_timestamp_en.attr,
&dev_attr_iface_cache_id.attr,
&dev_attr_iface_redirect_en.attr,
&dev_attr_iface_def_taskmgmt_tmo.attr,
&dev_attr_iface_header_digest.attr,
&dev_attr_iface_data_digest.attr,
&dev_attr_iface_immediate_data.attr,
&dev_attr_iface_initial_r2t.attr,
&dev_attr_iface_data_seq_in_order.attr,
&dev_attr_iface_data_pdu_in_order.attr,
&dev_attr_iface_erl.attr,
&dev_attr_iface_max_recv_dlength.attr,
&dev_attr_iface_first_burst_len.attr,
&dev_attr_iface_max_outstanding_r2t.attr,
&dev_attr_iface_max_burst_len.attr,
&dev_attr_iface_chap_auth.attr,
&dev_attr_iface_bidi_chap.attr,
&dev_attr_iface_discovery_auth_optional.attr,
&dev_attr_iface_discovery_logout.attr,
&dev_attr_iface_strict_login_comp_en.attr,
&dev_attr_iface_initiator_name.attr,
&dev_attr_ipv4_iface_dhcp_dns_address_en.attr,
&dev_attr_ipv4_iface_dhcp_slp_da_info_en.attr,
&dev_attr_ipv4_iface_tos_en.attr,
&dev_attr_ipv4_iface_tos.attr,
&dev_attr_ipv4_iface_grat_arp_en.attr,
&dev_attr_ipv4_iface_dhcp_alt_client_id_en.attr,
&dev_attr_ipv4_iface_dhcp_alt_client_id.attr,
&dev_attr_ipv4_iface_dhcp_req_vendor_id_en.attr,
&dev_attr_ipv4_iface_dhcp_use_vendor_id_en.attr,
&dev_attr_ipv4_iface_dhcp_vendor_id.attr,
&dev_attr_ipv4_iface_dhcp_learn_iqn_en.attr,
&dev_attr_ipv4_iface_fragment_disable.attr,
&dev_attr_ipv4_iface_incoming_forwarding_en.attr,
&dev_attr_ipv4_iface_ttl.attr,
&dev_attr_ipv6_iface_link_local_state.attr,
&dev_attr_ipv6_iface_router_state.attr,
&dev_attr_ipv6_iface_grat_neighbor_adv_en.attr,
&dev_attr_ipv6_iface_mld_en.attr,
&dev_attr_ipv6_iface_flow_label.attr,
&dev_attr_ipv6_iface_traffic_class.attr,
&dev_attr_ipv6_iface_hop_limit.attr,
&dev_attr_ipv6_iface_nd_reachable_tmo.attr,
&dev_attr_ipv6_iface_nd_rexmit_time.attr,
&dev_attr_ipv6_iface_nd_stale_tmo.attr,
&dev_attr_ipv6_iface_dup_addr_detect_cnt.attr,
&dev_attr_ipv6_iface_router_adv_link_mtu.attr,
NULL,
};
static struct attribute_group iscsi_iface_group = {
.attrs = iscsi_iface_attrs,
.is_visible = iscsi_iface_attr_is_visible,
};
/* convert iscsi_ipaddress_state values to ascii string name */
static const struct {
enum iscsi_ipaddress_state value;
char *name;
} iscsi_ipaddress_state_names[] = {
{ISCSI_IPDDRESS_STATE_UNCONFIGURED, "Unconfigured" },
{ISCSI_IPDDRESS_STATE_ACQUIRING, "Acquiring" },
{ISCSI_IPDDRESS_STATE_TENTATIVE, "Tentative" },
{ISCSI_IPDDRESS_STATE_VALID, "Valid" },
{ISCSI_IPDDRESS_STATE_DISABLING, "Disabling" },
{ISCSI_IPDDRESS_STATE_INVALID, "Invalid" },
{ISCSI_IPDDRESS_STATE_DEPRECATED, "Deprecated" },
};
char *iscsi_get_ipaddress_state_name(enum iscsi_ipaddress_state port_state)
{
int i;
char *state = NULL;
for (i = 0; i < ARRAY_SIZE(iscsi_ipaddress_state_names); i++) {
if (iscsi_ipaddress_state_names[i].value == port_state) {
state = iscsi_ipaddress_state_names[i].name;
break;
}
}
return state;
}
EXPORT_SYMBOL_GPL(iscsi_get_ipaddress_state_name);
/* convert iscsi_router_state values to ascii string name */
static const struct {
enum iscsi_router_state value;
char *name;
} iscsi_router_state_names[] = {
{ISCSI_ROUTER_STATE_UNKNOWN, "Unknown" },
{ISCSI_ROUTER_STATE_ADVERTISED, "Advertised" },
{ISCSI_ROUTER_STATE_MANUAL, "Manual" },
{ISCSI_ROUTER_STATE_STALE, "Stale" },
};
char *iscsi_get_router_state_name(enum iscsi_router_state router_state)
{
int i;
char *state = NULL;
for (i = 0; i < ARRAY_SIZE(iscsi_router_state_names); i++) {
if (iscsi_router_state_names[i].value == router_state) {
state = iscsi_router_state_names[i].name;
break;
}
}
return state;
}
EXPORT_SYMBOL_GPL(iscsi_get_router_state_name);
struct iscsi_iface *
iscsi_create_iface(struct Scsi_Host *shost, struct iscsi_transport *transport,
uint32_t iface_type, uint32_t iface_num, int dd_size)
{
struct iscsi_iface *iface;
int err;
iface = kzalloc(sizeof(*iface) + dd_size, GFP_KERNEL);
if (!iface)
return NULL;
iface->transport = transport;
iface->iface_type = iface_type;
iface->iface_num = iface_num;
iface->dev.release = iscsi_iface_release;
iface->dev.class = &iscsi_iface_class;
/* parent reference released in iscsi_iface_release */
iface->dev.parent = get_device(&shost->shost_gendev);
if (iface_type == ISCSI_IFACE_TYPE_IPV4)
dev_set_name(&iface->dev, "ipv4-iface-%u-%u", shost->host_no,
iface_num);
else
dev_set_name(&iface->dev, "ipv6-iface-%u-%u", shost->host_no,
iface_num);
err = device_register(&iface->dev);
if (err)
goto free_iface;
err = sysfs_create_group(&iface->dev.kobj, &iscsi_iface_group);
if (err)
goto unreg_iface;
if (dd_size)
iface->dd_data = &iface[1];
return iface;
unreg_iface:
device_unregister(&iface->dev);
return NULL;
free_iface:
put_device(iface->dev.parent);
kfree(iface);
return NULL;
}
EXPORT_SYMBOL_GPL(iscsi_create_iface);
void iscsi_destroy_iface(struct iscsi_iface *iface)
{
sysfs_remove_group(&iface->dev.kobj, &iscsi_iface_group);
device_unregister(&iface->dev);
}
EXPORT_SYMBOL_GPL(iscsi_destroy_iface);
/*
* Interface to display flash node params to sysfs
*/
#define ISCSI_FLASHNODE_ATTR(_prefix, _name, _mode, _show, _store) \
struct device_attribute dev_attr_##_prefix##_##_name = \
__ATTR(_name, _mode, _show, _store)
/* flash node session attrs show */
#define iscsi_flashnode_sess_attr_show(type, name, param) \
static ssize_t \
show_##type##_##name(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct iscsi_bus_flash_session *fnode_sess = \
iscsi_dev_to_flash_session(dev);\
struct iscsi_transport *t = fnode_sess->transport; \
return t->get_flashnode_param(fnode_sess, param, buf); \
} \
#define iscsi_flashnode_sess_attr(type, name, param) \
iscsi_flashnode_sess_attr_show(type, name, param) \
static ISCSI_FLASHNODE_ATTR(type, name, S_IRUGO, \
show_##type##_##name, NULL);
/* Flash node session attributes */
iscsi_flashnode_sess_attr(fnode, auto_snd_tgt_disable,
ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE);
iscsi_flashnode_sess_attr(fnode, discovery_session,
ISCSI_FLASHNODE_DISCOVERY_SESS);
iscsi_flashnode_sess_attr(fnode, portal_type, ISCSI_FLASHNODE_PORTAL_TYPE);
iscsi_flashnode_sess_attr(fnode, entry_enable, ISCSI_FLASHNODE_ENTRY_EN);
iscsi_flashnode_sess_attr(fnode, immediate_data, ISCSI_FLASHNODE_IMM_DATA_EN);
iscsi_flashnode_sess_attr(fnode, initial_r2t, ISCSI_FLASHNODE_INITIAL_R2T_EN);
iscsi_flashnode_sess_attr(fnode, data_seq_in_order,
ISCSI_FLASHNODE_DATASEQ_INORDER);
iscsi_flashnode_sess_attr(fnode, data_pdu_in_order,
ISCSI_FLASHNODE_PDU_INORDER);
iscsi_flashnode_sess_attr(fnode, chap_auth, ISCSI_FLASHNODE_CHAP_AUTH_EN);
iscsi_flashnode_sess_attr(fnode, discovery_logout,
ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN);
iscsi_flashnode_sess_attr(fnode, bidi_chap, ISCSI_FLASHNODE_BIDI_CHAP_EN);
iscsi_flashnode_sess_attr(fnode, discovery_auth_optional,
ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL);
iscsi_flashnode_sess_attr(fnode, erl, ISCSI_FLASHNODE_ERL);
iscsi_flashnode_sess_attr(fnode, first_burst_len, ISCSI_FLASHNODE_FIRST_BURST);
iscsi_flashnode_sess_attr(fnode, def_time2wait, ISCSI_FLASHNODE_DEF_TIME2WAIT);
iscsi_flashnode_sess_attr(fnode, def_time2retain,
ISCSI_FLASHNODE_DEF_TIME2RETAIN);
iscsi_flashnode_sess_attr(fnode, max_outstanding_r2t, ISCSI_FLASHNODE_MAX_R2T);
iscsi_flashnode_sess_attr(fnode, isid, ISCSI_FLASHNODE_ISID);
iscsi_flashnode_sess_attr(fnode, tsid, ISCSI_FLASHNODE_TSID);
iscsi_flashnode_sess_attr(fnode, max_burst_len, ISCSI_FLASHNODE_MAX_BURST);
iscsi_flashnode_sess_attr(fnode, def_taskmgmt_tmo,
ISCSI_FLASHNODE_DEF_TASKMGMT_TMO);
iscsi_flashnode_sess_attr(fnode, targetalias, ISCSI_FLASHNODE_ALIAS);
iscsi_flashnode_sess_attr(fnode, targetname, ISCSI_FLASHNODE_NAME);
iscsi_flashnode_sess_attr(fnode, tpgt, ISCSI_FLASHNODE_TPGT);
iscsi_flashnode_sess_attr(fnode, discovery_parent_idx,
ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX);
iscsi_flashnode_sess_attr(fnode, discovery_parent_type,
ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE);
iscsi_flashnode_sess_attr(fnode, chap_in_idx, ISCSI_FLASHNODE_CHAP_IN_IDX);
iscsi_flashnode_sess_attr(fnode, chap_out_idx, ISCSI_FLASHNODE_CHAP_OUT_IDX);
iscsi_flashnode_sess_attr(fnode, username, ISCSI_FLASHNODE_USERNAME);
iscsi_flashnode_sess_attr(fnode, username_in, ISCSI_FLASHNODE_USERNAME_IN);
iscsi_flashnode_sess_attr(fnode, password, ISCSI_FLASHNODE_PASSWORD);
iscsi_flashnode_sess_attr(fnode, password_in, ISCSI_FLASHNODE_PASSWORD_IN);
iscsi_flashnode_sess_attr(fnode, is_boot_target, ISCSI_FLASHNODE_IS_BOOT_TGT);
static struct attribute *iscsi_flashnode_sess_attrs[] = {
&dev_attr_fnode_auto_snd_tgt_disable.attr,
&dev_attr_fnode_discovery_session.attr,
&dev_attr_fnode_portal_type.attr,
&dev_attr_fnode_entry_enable.attr,
&dev_attr_fnode_immediate_data.attr,
&dev_attr_fnode_initial_r2t.attr,
&dev_attr_fnode_data_seq_in_order.attr,
&dev_attr_fnode_data_pdu_in_order.attr,
&dev_attr_fnode_chap_auth.attr,
&dev_attr_fnode_discovery_logout.attr,
&dev_attr_fnode_bidi_chap.attr,
&dev_attr_fnode_discovery_auth_optional.attr,
&dev_attr_fnode_erl.attr,
&dev_attr_fnode_first_burst_len.attr,
&dev_attr_fnode_def_time2wait.attr,
&dev_attr_fnode_def_time2retain.attr,
&dev_attr_fnode_max_outstanding_r2t.attr,
&dev_attr_fnode_isid.attr,
&dev_attr_fnode_tsid.attr,
&dev_attr_fnode_max_burst_len.attr,
&dev_attr_fnode_def_taskmgmt_tmo.attr,
&dev_attr_fnode_targetalias.attr,
&dev_attr_fnode_targetname.attr,
&dev_attr_fnode_tpgt.attr,
&dev_attr_fnode_discovery_parent_idx.attr,
&dev_attr_fnode_discovery_parent_type.attr,
&dev_attr_fnode_chap_in_idx.attr,
&dev_attr_fnode_chap_out_idx.attr,
&dev_attr_fnode_username.attr,
&dev_attr_fnode_username_in.attr,
&dev_attr_fnode_password.attr,
&dev_attr_fnode_password_in.attr,
&dev_attr_fnode_is_boot_target.attr,
NULL,
};
static umode_t iscsi_flashnode_sess_attr_is_visible(struct kobject *kobj,
struct attribute *attr,
int i)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct iscsi_bus_flash_session *fnode_sess =
iscsi_dev_to_flash_session(dev);
struct iscsi_transport *t = fnode_sess->transport;
int param;
if (attr == &dev_attr_fnode_auto_snd_tgt_disable.attr) {
param = ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE;
} else if (attr == &dev_attr_fnode_discovery_session.attr) {
param = ISCSI_FLASHNODE_DISCOVERY_SESS;
} else if (attr == &dev_attr_fnode_portal_type.attr) {
param = ISCSI_FLASHNODE_PORTAL_TYPE;
} else if (attr == &dev_attr_fnode_entry_enable.attr) {
param = ISCSI_FLASHNODE_ENTRY_EN;
} else if (attr == &dev_attr_fnode_immediate_data.attr) {
param = ISCSI_FLASHNODE_IMM_DATA_EN;
} else if (attr == &dev_attr_fnode_initial_r2t.attr) {
param = ISCSI_FLASHNODE_INITIAL_R2T_EN;
} else if (attr == &dev_attr_fnode_data_seq_in_order.attr) {
param = ISCSI_FLASHNODE_DATASEQ_INORDER;
} else if (attr == &dev_attr_fnode_data_pdu_in_order.attr) {
param = ISCSI_FLASHNODE_PDU_INORDER;
} else if (attr == &dev_attr_fnode_chap_auth.attr) {
param = ISCSI_FLASHNODE_CHAP_AUTH_EN;
} else if (attr == &dev_attr_fnode_discovery_logout.attr) {
param = ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN;
} else if (attr == &dev_attr_fnode_bidi_chap.attr) {
param = ISCSI_FLASHNODE_BIDI_CHAP_EN;
} else if (attr == &dev_attr_fnode_discovery_auth_optional.attr) {
param = ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL;
} else if (attr == &dev_attr_fnode_erl.attr) {
param = ISCSI_FLASHNODE_ERL;
} else if (attr == &dev_attr_fnode_first_burst_len.attr) {
param = ISCSI_FLASHNODE_FIRST_BURST;
} else if (attr == &dev_attr_fnode_def_time2wait.attr) {
param = ISCSI_FLASHNODE_DEF_TIME2WAIT;
} else if (attr == &dev_attr_fnode_def_time2retain.attr) {
param = ISCSI_FLASHNODE_DEF_TIME2RETAIN;
} else if (attr == &dev_attr_fnode_max_outstanding_r2t.attr) {
param = ISCSI_FLASHNODE_MAX_R2T;
} else if (attr == &dev_attr_fnode_isid.attr) {
param = ISCSI_FLASHNODE_ISID;
} else if (attr == &dev_attr_fnode_tsid.attr) {
param = ISCSI_FLASHNODE_TSID;
} else if (attr == &dev_attr_fnode_max_burst_len.attr) {
param = ISCSI_FLASHNODE_MAX_BURST;
} else if (attr == &dev_attr_fnode_def_taskmgmt_tmo.attr) {
param = ISCSI_FLASHNODE_DEF_TASKMGMT_TMO;
} else if (attr == &dev_attr_fnode_targetalias.attr) {
param = ISCSI_FLASHNODE_ALIAS;
} else if (attr == &dev_attr_fnode_targetname.attr) {
param = ISCSI_FLASHNODE_NAME;
} else if (attr == &dev_attr_fnode_tpgt.attr) {
param = ISCSI_FLASHNODE_TPGT;
} else if (attr == &dev_attr_fnode_discovery_parent_idx.attr) {
param = ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX;
} else if (attr == &dev_attr_fnode_discovery_parent_type.attr) {
param = ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE;
} else if (attr == &dev_attr_fnode_chap_in_idx.attr) {
param = ISCSI_FLASHNODE_CHAP_IN_IDX;
} else if (attr == &dev_attr_fnode_chap_out_idx.attr) {
param = ISCSI_FLASHNODE_CHAP_OUT_IDX;
} else if (attr == &dev_attr_fnode_username.attr) {
param = ISCSI_FLASHNODE_USERNAME;
} else if (attr == &dev_attr_fnode_username_in.attr) {
param = ISCSI_FLASHNODE_USERNAME_IN;
} else if (attr == &dev_attr_fnode_password.attr) {
param = ISCSI_FLASHNODE_PASSWORD;
} else if (attr == &dev_attr_fnode_password_in.attr) {
param = ISCSI_FLASHNODE_PASSWORD_IN;
} else if (attr == &dev_attr_fnode_is_boot_target.attr) {
param = ISCSI_FLASHNODE_IS_BOOT_TGT;
} else {
WARN_ONCE(1, "Invalid flashnode session attr");
return 0;
}
return t->attr_is_visible(ISCSI_FLASHNODE_PARAM, param);
}
static struct attribute_group iscsi_flashnode_sess_attr_group = {
.attrs = iscsi_flashnode_sess_attrs,
.is_visible = iscsi_flashnode_sess_attr_is_visible,
};
static const struct attribute_group *iscsi_flashnode_sess_attr_groups[] = {
&iscsi_flashnode_sess_attr_group,
NULL,
};
static void iscsi_flashnode_sess_release(struct device *dev)
{
struct iscsi_bus_flash_session *fnode_sess =
iscsi_dev_to_flash_session(dev);
kfree(fnode_sess->targetname);
kfree(fnode_sess->targetalias);
kfree(fnode_sess->portal_type);
kfree(fnode_sess);
}
static const struct device_type iscsi_flashnode_sess_dev_type = {
.name = "iscsi_flashnode_sess_dev_type",
.groups = iscsi_flashnode_sess_attr_groups,
.release = iscsi_flashnode_sess_release,
};
/* flash node connection attrs show */
#define iscsi_flashnode_conn_attr_show(type, name, param) \
static ssize_t \
show_##type##_##name(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev);\
struct iscsi_bus_flash_session *fnode_sess = \
iscsi_flash_conn_to_flash_session(fnode_conn);\
struct iscsi_transport *t = fnode_conn->transport; \
return t->get_flashnode_param(fnode_sess, param, buf); \
} \
#define iscsi_flashnode_conn_attr(type, name, param) \
iscsi_flashnode_conn_attr_show(type, name, param) \
static ISCSI_FLASHNODE_ATTR(type, name, S_IRUGO, \
show_##type##_##name, NULL);
/* Flash node connection attributes */
iscsi_flashnode_conn_attr(fnode, is_fw_assigned_ipv6,
ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6);
iscsi_flashnode_conn_attr(fnode, header_digest, ISCSI_FLASHNODE_HDR_DGST_EN);
iscsi_flashnode_conn_attr(fnode, data_digest, ISCSI_FLASHNODE_DATA_DGST_EN);
iscsi_flashnode_conn_attr(fnode, snack_req, ISCSI_FLASHNODE_SNACK_REQ_EN);
iscsi_flashnode_conn_attr(fnode, tcp_timestamp_stat,
ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT);
iscsi_flashnode_conn_attr(fnode, tcp_nagle_disable,
ISCSI_FLASHNODE_TCP_NAGLE_DISABLE);
iscsi_flashnode_conn_attr(fnode, tcp_wsf_disable,
ISCSI_FLASHNODE_TCP_WSF_DISABLE);
iscsi_flashnode_conn_attr(fnode, tcp_timer_scale,
ISCSI_FLASHNODE_TCP_TIMER_SCALE);
iscsi_flashnode_conn_attr(fnode, tcp_timestamp_enable,
ISCSI_FLASHNODE_TCP_TIMESTAMP_EN);
iscsi_flashnode_conn_attr(fnode, fragment_disable,
ISCSI_FLASHNODE_IP_FRAG_DISABLE);
iscsi_flashnode_conn_attr(fnode, keepalive_tmo, ISCSI_FLASHNODE_KEEPALIVE_TMO);
iscsi_flashnode_conn_attr(fnode, port, ISCSI_FLASHNODE_PORT);
iscsi_flashnode_conn_attr(fnode, ipaddress, ISCSI_FLASHNODE_IPADDR);
iscsi_flashnode_conn_attr(fnode, max_recv_dlength,
ISCSI_FLASHNODE_MAX_RECV_DLENGTH);
iscsi_flashnode_conn_attr(fnode, max_xmit_dlength,
ISCSI_FLASHNODE_MAX_XMIT_DLENGTH);
iscsi_flashnode_conn_attr(fnode, local_port, ISCSI_FLASHNODE_LOCAL_PORT);
iscsi_flashnode_conn_attr(fnode, ipv4_tos, ISCSI_FLASHNODE_IPV4_TOS);
iscsi_flashnode_conn_attr(fnode, ipv6_traffic_class, ISCSI_FLASHNODE_IPV6_TC);
iscsi_flashnode_conn_attr(fnode, ipv6_flow_label,
ISCSI_FLASHNODE_IPV6_FLOW_LABEL);
iscsi_flashnode_conn_attr(fnode, redirect_ipaddr,
ISCSI_FLASHNODE_REDIRECT_IPADDR);
iscsi_flashnode_conn_attr(fnode, max_segment_size,
ISCSI_FLASHNODE_MAX_SEGMENT_SIZE);
iscsi_flashnode_conn_attr(fnode, link_local_ipv6,
ISCSI_FLASHNODE_LINK_LOCAL_IPV6);
iscsi_flashnode_conn_attr(fnode, tcp_xmit_wsf, ISCSI_FLASHNODE_TCP_XMIT_WSF);
iscsi_flashnode_conn_attr(fnode, tcp_recv_wsf, ISCSI_FLASHNODE_TCP_RECV_WSF);
iscsi_flashnode_conn_attr(fnode, statsn, ISCSI_FLASHNODE_STATSN);
iscsi_flashnode_conn_attr(fnode, exp_statsn, ISCSI_FLASHNODE_EXP_STATSN);
static struct attribute *iscsi_flashnode_conn_attrs[] = {
&dev_attr_fnode_is_fw_assigned_ipv6.attr,
&dev_attr_fnode_header_digest.attr,
&dev_attr_fnode_data_digest.attr,
&dev_attr_fnode_snack_req.attr,
&dev_attr_fnode_tcp_timestamp_stat.attr,
&dev_attr_fnode_tcp_nagle_disable.attr,
&dev_attr_fnode_tcp_wsf_disable.attr,
&dev_attr_fnode_tcp_timer_scale.attr,
&dev_attr_fnode_tcp_timestamp_enable.attr,
&dev_attr_fnode_fragment_disable.attr,
&dev_attr_fnode_max_recv_dlength.attr,
&dev_attr_fnode_max_xmit_dlength.attr,
&dev_attr_fnode_keepalive_tmo.attr,
&dev_attr_fnode_port.attr,
&dev_attr_fnode_ipaddress.attr,
&dev_attr_fnode_redirect_ipaddr.attr,
&dev_attr_fnode_max_segment_size.attr,
&dev_attr_fnode_local_port.attr,
&dev_attr_fnode_ipv4_tos.attr,
&dev_attr_fnode_ipv6_traffic_class.attr,
&dev_attr_fnode_ipv6_flow_label.attr,
&dev_attr_fnode_link_local_ipv6.attr,
&dev_attr_fnode_tcp_xmit_wsf.attr,
&dev_attr_fnode_tcp_recv_wsf.attr,
&dev_attr_fnode_statsn.attr,
&dev_attr_fnode_exp_statsn.attr,
NULL,
};
static umode_t iscsi_flashnode_conn_attr_is_visible(struct kobject *kobj,
struct attribute *attr,
int i)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev);
struct iscsi_transport *t = fnode_conn->transport;
int param;
if (attr == &dev_attr_fnode_is_fw_assigned_ipv6.attr) {
param = ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6;
} else if (attr == &dev_attr_fnode_header_digest.attr) {
param = ISCSI_FLASHNODE_HDR_DGST_EN;
} else if (attr == &dev_attr_fnode_data_digest.attr) {
param = ISCSI_FLASHNODE_DATA_DGST_EN;
} else if (attr == &dev_attr_fnode_snack_req.attr) {
param = ISCSI_FLASHNODE_SNACK_REQ_EN;
} else if (attr == &dev_attr_fnode_tcp_timestamp_stat.attr) {
param = ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT;
} else if (attr == &dev_attr_fnode_tcp_nagle_disable.attr) {
param = ISCSI_FLASHNODE_TCP_NAGLE_DISABLE;
} else if (attr == &dev_attr_fnode_tcp_wsf_disable.attr) {
param = ISCSI_FLASHNODE_TCP_WSF_DISABLE;
} else if (attr == &dev_attr_fnode_tcp_timer_scale.attr) {
param = ISCSI_FLASHNODE_TCP_TIMER_SCALE;
} else if (attr == &dev_attr_fnode_tcp_timestamp_enable.attr) {
param = ISCSI_FLASHNODE_TCP_TIMESTAMP_EN;
} else if (attr == &dev_attr_fnode_fragment_disable.attr) {
param = ISCSI_FLASHNODE_IP_FRAG_DISABLE;
} else if (attr == &dev_attr_fnode_max_recv_dlength.attr) {
param = ISCSI_FLASHNODE_MAX_RECV_DLENGTH;
} else if (attr == &dev_attr_fnode_max_xmit_dlength.attr) {
param = ISCSI_FLASHNODE_MAX_XMIT_DLENGTH;
} else if (attr == &dev_attr_fnode_keepalive_tmo.attr) {
param = ISCSI_FLASHNODE_KEEPALIVE_TMO;
} else if (attr == &dev_attr_fnode_port.attr) {
param = ISCSI_FLASHNODE_PORT;
} else if (attr == &dev_attr_fnode_ipaddress.attr) {
param = ISCSI_FLASHNODE_IPADDR;
} else if (attr == &dev_attr_fnode_redirect_ipaddr.attr) {
param = ISCSI_FLASHNODE_REDIRECT_IPADDR;
} else if (attr == &dev_attr_fnode_max_segment_size.attr) {
param = ISCSI_FLASHNODE_MAX_SEGMENT_SIZE;
} else if (attr == &dev_attr_fnode_local_port.attr) {
param = ISCSI_FLASHNODE_LOCAL_PORT;
} else if (attr == &dev_attr_fnode_ipv4_tos.attr) {
param = ISCSI_FLASHNODE_IPV4_TOS;
} else if (attr == &dev_attr_fnode_ipv6_traffic_class.attr) {
param = ISCSI_FLASHNODE_IPV6_TC;
} else if (attr == &dev_attr_fnode_ipv6_flow_label.attr) {
param = ISCSI_FLASHNODE_IPV6_FLOW_LABEL;
} else if (attr == &dev_attr_fnode_link_local_ipv6.attr) {
param = ISCSI_FLASHNODE_LINK_LOCAL_IPV6;
} else if (attr == &dev_attr_fnode_tcp_xmit_wsf.attr) {
param = ISCSI_FLASHNODE_TCP_XMIT_WSF;
} else if (attr == &dev_attr_fnode_tcp_recv_wsf.attr) {
param = ISCSI_FLASHNODE_TCP_RECV_WSF;
} else if (attr == &dev_attr_fnode_statsn.attr) {
param = ISCSI_FLASHNODE_STATSN;
} else if (attr == &dev_attr_fnode_exp_statsn.attr) {
param = ISCSI_FLASHNODE_EXP_STATSN;
} else {
WARN_ONCE(1, "Invalid flashnode connection attr");
return 0;
}
return t->attr_is_visible(ISCSI_FLASHNODE_PARAM, param);
}
static struct attribute_group iscsi_flashnode_conn_attr_group = {
.attrs = iscsi_flashnode_conn_attrs,
.is_visible = iscsi_flashnode_conn_attr_is_visible,
};
static const struct attribute_group *iscsi_flashnode_conn_attr_groups[] = {
&iscsi_flashnode_conn_attr_group,
NULL,
};
static void iscsi_flashnode_conn_release(struct device *dev)
{
struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev);
kfree(fnode_conn->ipaddress);
kfree(fnode_conn->redirect_ipaddr);
kfree(fnode_conn->link_local_ipv6_addr);
kfree(fnode_conn);
}
static const struct device_type iscsi_flashnode_conn_dev_type = {
.name = "iscsi_flashnode_conn_dev_type",
.groups = iscsi_flashnode_conn_attr_groups,
.release = iscsi_flashnode_conn_release,
};
static struct bus_type iscsi_flashnode_bus;
int iscsi_flashnode_bus_match(struct device *dev,
struct device_driver *drv)
{
if (dev->bus == &iscsi_flashnode_bus)
return 1;
return 0;
}
EXPORT_SYMBOL_GPL(iscsi_flashnode_bus_match);
static struct bus_type iscsi_flashnode_bus = {
.name = "iscsi_flashnode",
.match = &iscsi_flashnode_bus_match,
};
/**
* iscsi_create_flashnode_sess - Add flashnode session entry in sysfs
* @shost: pointer to host data
* @index: index of flashnode to add in sysfs
* @transport: pointer to transport data
* @dd_size: total size to allocate
*
* Adds a sysfs entry for the flashnode session attributes
*
* Returns:
* pointer to allocated flashnode sess on success
* %NULL on failure
*/
struct iscsi_bus_flash_session *
iscsi_create_flashnode_sess(struct Scsi_Host *shost, int index,
struct iscsi_transport *transport,
int dd_size)
{
struct iscsi_bus_flash_session *fnode_sess;
int err;
fnode_sess = kzalloc(sizeof(*fnode_sess) + dd_size, GFP_KERNEL);
if (!fnode_sess)
return NULL;
fnode_sess->transport = transport;
fnode_sess->target_id = index;
fnode_sess->dev.type = &iscsi_flashnode_sess_dev_type;
fnode_sess->dev.bus = &iscsi_flashnode_bus;
fnode_sess->dev.parent = &shost->shost_gendev;
dev_set_name(&fnode_sess->dev, "flashnode_sess-%u:%u",
shost->host_no, index);
err = device_register(&fnode_sess->dev);
if (err)
goto free_fnode_sess;
if (dd_size)
fnode_sess->dd_data = &fnode_sess[1];
return fnode_sess;
free_fnode_sess:
kfree(fnode_sess);
return NULL;
}
EXPORT_SYMBOL_GPL(iscsi_create_flashnode_sess);
/**
* iscsi_create_flashnode_conn - Add flashnode conn entry in sysfs
* @shost: pointer to host data
* @fnode_sess: pointer to the parent flashnode session entry
* @transport: pointer to transport data
* @dd_size: total size to allocate
*
* Adds a sysfs entry for the flashnode connection attributes
*
* Returns:
* pointer to allocated flashnode conn on success
* %NULL on failure
*/
struct iscsi_bus_flash_conn *
iscsi_create_flashnode_conn(struct Scsi_Host *shost,
struct iscsi_bus_flash_session *fnode_sess,
struct iscsi_transport *transport,
int dd_size)
{
struct iscsi_bus_flash_conn *fnode_conn;
int err;
fnode_conn = kzalloc(sizeof(*fnode_conn) + dd_size, GFP_KERNEL);
if (!fnode_conn)
return NULL;
fnode_conn->transport = transport;
fnode_conn->dev.type = &iscsi_flashnode_conn_dev_type;
fnode_conn->dev.bus = &iscsi_flashnode_bus;
fnode_conn->dev.parent = &fnode_sess->dev;
dev_set_name(&fnode_conn->dev, "flashnode_conn-%u:%u:0",
shost->host_no, fnode_sess->target_id);
err = device_register(&fnode_conn->dev);
if (err)
goto free_fnode_conn;
if (dd_size)
fnode_conn->dd_data = &fnode_conn[1];
return fnode_conn;
free_fnode_conn:
kfree(fnode_conn);
return NULL;
}
EXPORT_SYMBOL_GPL(iscsi_create_flashnode_conn);
/**
* iscsi_is_flashnode_conn_dev - verify passed device is to be flashnode conn
* @dev: device to verify
* @data: pointer to data containing value to use for verification
*
* Verifies if the passed device is flashnode conn device
*
* Returns:
* 1 on success
* 0 on failure
*/
static int iscsi_is_flashnode_conn_dev(struct device *dev, void *data)
{
return dev->bus == &iscsi_flashnode_bus;
}
static int iscsi_destroy_flashnode_conn(struct iscsi_bus_flash_conn *fnode_conn)
{
device_unregister(&fnode_conn->dev);
return 0;
}
static int flashnode_match_index(struct device *dev, void *data)
{
struct iscsi_bus_flash_session *fnode_sess = NULL;
int ret = 0;
if (!iscsi_flashnode_bus_match(dev, NULL))
goto exit_match_index;
fnode_sess = iscsi_dev_to_flash_session(dev);
ret = (fnode_sess->target_id == *((int *)data)) ? 1 : 0;
exit_match_index:
return ret;
}
/**
* iscsi_get_flashnode_by_index -finds flashnode session entry by index
* @shost: pointer to host data
* @idx: index to match
*
* Finds the flashnode session object for the passed index
*
* Returns:
* pointer to found flashnode session object on success
* %NULL on failure
*/
static struct iscsi_bus_flash_session *
iscsi_get_flashnode_by_index(struct Scsi_Host *shost, uint32_t idx)
{
struct iscsi_bus_flash_session *fnode_sess = NULL;
struct device *dev;
dev = device_find_child(&shost->shost_gendev, &idx,
flashnode_match_index);
if (dev)
fnode_sess = iscsi_dev_to_flash_session(dev);
return fnode_sess;
}
/**
* iscsi_find_flashnode_sess - finds flashnode session entry
* @shost: pointer to host data
* @data: pointer to data containing value to use for comparison
* @fn: function pointer that does actual comparison
*
* Finds the flashnode session object comparing the data passed using logic
* defined in passed function pointer
*
* Returns:
* pointer to found flashnode session device object on success
* %NULL on failure
*/
struct device *
iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
int (*fn)(struct device *dev, void *data))
{
return device_find_child(&shost->shost_gendev, data, fn);
}
EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess);
/**
* iscsi_find_flashnode_conn - finds flashnode connection entry
* @fnode_sess: pointer to parent flashnode session entry
*
* Finds the flashnode connection object comparing the data passed using logic
* defined in passed function pointer
*
* Returns:
* pointer to found flashnode connection device object on success
* %NULL on failure
*/
struct device *
iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess)
{
return device_find_child(&fnode_sess->dev, NULL,
iscsi_is_flashnode_conn_dev);
}
EXPORT_SYMBOL_GPL(iscsi_find_flashnode_conn);
static int iscsi_iter_destroy_flashnode_conn_fn(struct device *dev, void *data)
{
if (!iscsi_is_flashnode_conn_dev(dev, NULL))
return 0;
return iscsi_destroy_flashnode_conn(iscsi_dev_to_flash_conn(dev));
}
/**
* iscsi_destroy_flashnode_sess - destroy flashnode session entry
* @fnode_sess: pointer to flashnode session entry to be destroyed
*
* Deletes the flashnode session entry and all children flashnode connection
* entries from sysfs
*/
void iscsi_destroy_flashnode_sess(struct iscsi_bus_flash_session *fnode_sess)
{
int err;
err = device_for_each_child(&fnode_sess->dev, NULL,
iscsi_iter_destroy_flashnode_conn_fn);
if (err)
pr_err("Could not delete all connections for %s. Error %d.\n",
fnode_sess->dev.kobj.name, err);
device_unregister(&fnode_sess->dev);
}
EXPORT_SYMBOL_GPL(iscsi_destroy_flashnode_sess);
static int iscsi_iter_destroy_flashnode_fn(struct device *dev, void *data)
{
if (!iscsi_flashnode_bus_match(dev, NULL))
return 0;
iscsi_destroy_flashnode_sess(iscsi_dev_to_flash_session(dev));
return 0;
}
/**
* iscsi_destroy_all_flashnode - destroy all flashnode session entries
* @shost: pointer to host data
*
* Destroys all the flashnode session entries and all corresponding children
* flashnode connection entries from sysfs
*/
void iscsi_destroy_all_flashnode(struct Scsi_Host *shost)
{
device_for_each_child(&shost->shost_gendev, NULL,
iscsi_iter_destroy_flashnode_fn);
}
EXPORT_SYMBOL_GPL(iscsi_destroy_all_flashnode);
/*
* BSG support
*/
/**
* iscsi_bsg_host_dispatch - Dispatch command to LLD.
* @job: bsg job to be processed
*/
static int iscsi_bsg_host_dispatch(struct bsg_job *job)
{
struct Scsi_Host *shost = iscsi_job_to_shost(job);
struct iscsi_bsg_request *req = job->request;
struct iscsi_bsg_reply *reply = job->reply;
struct iscsi_internal *i = to_iscsi_internal(shost->transportt);
int cmdlen = sizeof(uint32_t); /* start with length of msgcode */
int ret;
/* check if we have the msgcode value at least */
if (job->request_len < sizeof(uint32_t)) {
ret = -ENOMSG;
goto fail_host_msg;
}
/* Validate the host command */
switch (req->msgcode) {
case ISCSI_BSG_HST_VENDOR:
cmdlen += sizeof(struct iscsi_bsg_host_vendor);
if ((shost->hostt->vendor_id == 0L) ||
(req->rqst_data.h_vendor.vendor_id !=
shost->hostt->vendor_id)) {
ret = -ESRCH;
goto fail_host_msg;
}
break;
default:
ret = -EBADR;
goto fail_host_msg;
}
/* check if we really have all the request data needed */
if (job->request_len < cmdlen) {
ret = -ENOMSG;
goto fail_host_msg;
}
ret = i->iscsi_transport->bsg_request(job);
if (!ret)
return 0;
fail_host_msg:
/* return the errno failure code as the only status */
BUG_ON(job->reply_len < sizeof(uint32_t));
reply->reply_payload_rcv_len = 0;
reply->result = ret;
job->reply_len = sizeof(uint32_t);
bsg_job_done(job, ret, 0);
return 0;
}
/**
* iscsi_bsg_host_add - Create and add the bsg hooks to receive requests
* @shost: shost for iscsi_host
* @ihost: iscsi_cls_host adding the structures to
*/
static int
iscsi_bsg_host_add(struct Scsi_Host *shost, struct iscsi_cls_host *ihost)
{
struct device *dev = &shost->shost_gendev;
struct iscsi_internal *i = to_iscsi_internal(shost->transportt);
struct request_queue *q;
char bsg_name[20];
if (!i->iscsi_transport->bsg_request)
return -ENOTSUPP;
snprintf(bsg_name, sizeof(bsg_name), "iscsi_host%d", shost->host_no);
q = bsg_setup_queue(dev, bsg_name, iscsi_bsg_host_dispatch, 0, NULL);
if (IS_ERR(q)) {
shost_printk(KERN_ERR, shost, "bsg interface failed to "
"initialize - no request queue\n");
return PTR_ERR(q);
}
__scsi_init_queue(shost, q);
ihost->bsg_q = q;
return 0;
}
static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
struct device *cdev)
{
struct Scsi_Host *shost = dev_to_shost(dev);
struct iscsi_cls_host *ihost = shost->shost_data;
memset(ihost, 0, sizeof(*ihost));
atomic_set(&ihost->nr_scans, 0);
mutex_init(&ihost->mutex);
iscsi_bsg_host_add(shost, ihost);
/* ignore any bsg add error - we just can't do sgio */
return 0;
}
static int iscsi_remove_host(struct transport_container *tc,
struct device *dev, struct device *cdev)
{
struct Scsi_Host *shost = dev_to_shost(dev);
struct iscsi_cls_host *ihost = shost->shost_data;
if (ihost->bsg_q) {
bsg_unregister_queue(ihost->bsg_q);
blk_cleanup_queue(ihost->bsg_q);
}
return 0;
}
static DECLARE_TRANSPORT_CLASS(iscsi_host_class,
"iscsi_host",
iscsi_setup_host,
iscsi_remove_host,
NULL);
static DECLARE_TRANSPORT_CLASS(iscsi_session_class,
"iscsi_session",
NULL,
NULL,
NULL);
static DECLARE_TRANSPORT_CLASS(iscsi_connection_class,
"iscsi_connection",
NULL,
NULL,
NULL);
static struct sock *nls;
static DEFINE_MUTEX(rx_queue_mutex);
static LIST_HEAD(sesslist);
static DEFINE_SPINLOCK(sesslock);
static LIST_HEAD(connlist);
static DEFINE_SPINLOCK(connlock);
static uint32_t iscsi_conn_get_sid(struct iscsi_cls_conn *conn)
{
struct iscsi_cls_session *sess = iscsi_dev_to_session(conn->dev.parent);
return sess->sid;
}
/*
* Returns the matching session to a given sid
*/
static struct iscsi_cls_session *iscsi_session_lookup(uint32_t sid)
{
unsigned long flags;
struct iscsi_cls_session *sess;
spin_lock_irqsave(&sesslock, flags);
list_for_each_entry(sess, &sesslist, sess_list) {
if (sess->sid == sid) {
spin_unlock_irqrestore(&sesslock, flags);
return sess;
}
}
spin_unlock_irqrestore(&sesslock, flags);
return NULL;
}
/*
* Returns the matching connection to a given sid / cid tuple
*/
static struct iscsi_cls_conn *iscsi_conn_lookup(uint32_t sid, uint32_t cid)
{
unsigned long flags;
struct iscsi_cls_conn *conn;
spin_lock_irqsave(&connlock, flags);
list_for_each_entry(conn, &connlist, conn_list) {
if ((conn->cid == cid) && (iscsi_conn_get_sid(conn) == sid)) {
spin_unlock_irqrestore(&connlock, flags);
return conn;
}
}
spin_unlock_irqrestore(&connlock, flags);
return NULL;
}
/*
* The following functions can be used by LLDs that allocate
* their own scsi_hosts or by software iscsi LLDs
*/
static struct {
int value;
char *name;
} iscsi_session_state_names[] = {
{ ISCSI_SESSION_LOGGED_IN, "LOGGED_IN" },
{ ISCSI_SESSION_FAILED, "FAILED" },
{ ISCSI_SESSION_FREE, "FREE" },
};
static const char *iscsi_session_state_name(int state)
{
int i;
char *name = NULL;
for (i = 0; i < ARRAY_SIZE(iscsi_session_state_names); i++) {
if (iscsi_session_state_names[i].value == state) {
name = iscsi_session_state_names[i].name;
break;
}
}
return name;
}
int iscsi_session_chkready(struct iscsi_cls_session *session)
{
unsigned long flags;
int err;
spin_lock_irqsave(&session->lock, flags);
switch (session->state) {
case ISCSI_SESSION_LOGGED_IN:
err = 0;
break;
case ISCSI_SESSION_FAILED:
err = DID_IMM_RETRY << 16;
break;
case ISCSI_SESSION_FREE:
err = DID_TRANSPORT_FAILFAST << 16;
break;
default:
err = DID_NO_CONNECT << 16;
break;
}
spin_unlock_irqrestore(&session->lock, flags);
return err;
}
EXPORT_SYMBOL_GPL(iscsi_session_chkready);
int iscsi_is_session_online(struct iscsi_cls_session *session)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&session->lock, flags);
if (session->state == ISCSI_SESSION_LOGGED_IN)
ret = 1;
spin_unlock_irqrestore(&session->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(iscsi_is_session_online);
static void iscsi_session_release(struct device *dev)
{
struct iscsi_cls_session *session = iscsi_dev_to_session(dev);
struct Scsi_Host *shost;
shost = iscsi_session_to_shost(session);
scsi_host_put(shost);
ISCSI_DBG_TRANS_SESSION(session, "Completing session release\n");
kfree(session);
}
int iscsi_is_session_dev(const struct device *dev)
{
return dev->release == iscsi_session_release;
}
EXPORT_SYMBOL_GPL(iscsi_is_session_dev);
static int iscsi_iter_session_fn(struct device *dev, void *data)
{
void (* fn) (struct iscsi_cls_session *) = data;
if (!iscsi_is_session_dev(dev))
return 0;
fn(iscsi_dev_to_session(dev));
return 0;
}
void iscsi_host_for_each_session(struct Scsi_Host *shost,
void (*fn)(struct iscsi_cls_session *))
{
device_for_each_child(&shost->shost_gendev, fn,
iscsi_iter_session_fn);
}
EXPORT_SYMBOL_GPL(iscsi_host_for_each_session);
/**
* iscsi_scan_finished - helper to report when running scans are done
* @shost: scsi host
* @time: scan run time
*
* This function can be used by drives like qla4xxx to report to the scsi
* layer when the scans it kicked off at module load time are done.
*/
int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
struct iscsi_cls_host *ihost = shost->shost_data;
/*
* qla4xxx will have kicked off some session unblocks before calling
* scsi_scan_host, so just wait for them to complete.
*/
return !atomic_read(&ihost->nr_scans);
}
EXPORT_SYMBOL_GPL(iscsi_scan_finished);
struct iscsi_scan_data {
unsigned int channel;
unsigned int id;
u64 lun;
enum scsi_scan_mode rescan;
};
static int iscsi_user_scan_session(struct device *dev, void *data)
{
struct iscsi_scan_data *scan_data = data;
struct iscsi_cls_session *session;
struct Scsi_Host *shost;
struct iscsi_cls_host *ihost;
unsigned long flags;
unsigned int id;
if (!iscsi_is_session_dev(dev))
return 0;
session = iscsi_dev_to_session(dev);
ISCSI_DBG_TRANS_SESSION(session, "Scanning session\n");
shost = iscsi_session_to_shost(session);
ihost = shost->shost_data;
mutex_lock(&ihost->mutex);
spin_lock_irqsave(&session->lock, flags);
if (session->state != ISCSI_SESSION_LOGGED_IN) {
spin_unlock_irqrestore(&session->lock, flags);
goto user_scan_exit;
}
id = session->target_id;
spin_unlock_irqrestore(&session->lock, flags);
if (id != ISCSI_MAX_TARGET) {
if ((scan_data->channel == SCAN_WILD_CARD ||
scan_data->channel == 0) &&
(scan_data->id == SCAN_WILD_CARD ||
scan_data->id == id))
scsi_scan_target(&session->dev, 0, id,
scan_data->lun, scan_data->rescan);
}
user_scan_exit:
mutex_unlock(&ihost->mutex);
ISCSI_DBG_TRANS_SESSION(session, "Completed session scan\n");
return 0;
}
static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
uint id, u64 lun)
{
struct iscsi_scan_data scan_data;
scan_data.channel = channel;
scan_data.id = id;
scan_data.lun = lun;
scan_data.rescan = SCSI_SCAN_MANUAL;
return device_for_each_child(&shost->shost_gendev, &scan_data,
iscsi_user_scan_session);
}
static void iscsi_scan_session(struct work_struct *work)
{
struct iscsi_cls_session *session =
container_of(work, struct iscsi_cls_session, scan_work);
struct Scsi_Host *shost = iscsi_session_to_shost(session);
struct iscsi_cls_host *ihost = shost->shost_data;
struct iscsi_scan_data scan_data;
scan_data.channel = 0;
scan_data.id = SCAN_WILD_CARD;
scan_data.lun = SCAN_WILD_CARD;
scan_data.rescan = SCSI_SCAN_RESCAN;
iscsi_user_scan_session(&session->dev, &scan_data);
atomic_dec(&ihost->nr_scans);
}
/**
* iscsi_block_scsi_eh - block scsi eh until session state has transistioned
* @cmd: scsi cmd passed to scsi eh handler
*
* If the session is down this function will wait for the recovery
* timer to fire or for the session to be logged back in. If the
* recovery timer fires then FAST_IO_FAIL is returned. The caller
* should pass this error value to the scsi eh.
*/
int iscsi_block_scsi_eh(struct scsi_cmnd *cmd)
{
struct iscsi_cls_session *session =
starget_to_session(scsi_target(cmd->device));
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&session->lock, flags);
while (session->state != ISCSI_SESSION_LOGGED_IN) {
if (session->state == ISCSI_SESSION_FREE) {
ret = FAST_IO_FAIL;
break;
}
spin_unlock_irqrestore(&session->lock, flags);
msleep(1000);
spin_lock_irqsave(&session->lock, flags);
}
spin_unlock_irqrestore(&session->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(iscsi_block_scsi_eh);
static void session_recovery_timedout(struct work_struct *work)
{
struct iscsi_cls_session *session =
container_of(work, struct iscsi_cls_session,
recovery_work.work);
unsigned long flags;
iscsi_cls_session_printk(KERN_INFO, session,
"session recovery timed out after %d secs\n",
session->recovery_tmo);
spin_lock_irqsave(&session->lock, flags);
switch (session->state) {
case ISCSI_SESSION_FAILED:
session->state = ISCSI_SESSION_FREE;
break;
case ISCSI_SESSION_LOGGED_IN:
case ISCSI_SESSION_FREE:
/* we raced with the unblock's flush */
spin_unlock_irqrestore(&session->lock, flags);
return;
}
spin_unlock_irqrestore(&session->lock, flags);
if (session->transport->session_recovery_timedout)
session->transport->session_recovery_timedout(session);
ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n");
scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n");
}
static void __iscsi_unblock_session(struct work_struct *work)
{
struct iscsi_cls_session *session =
container_of(work, struct iscsi_cls_session,
unblock_work);
struct Scsi_Host *shost = iscsi_session_to_shost(session);
struct iscsi_cls_host *ihost = shost->shost_data;
unsigned long flags;
ISCSI_DBG_TRANS_SESSION(session, "Unblocking session\n");
/*
* The recovery and unblock work get run from the same workqueue,
* so try to cancel it if it was going to run after this unblock.
*/
cancel_delayed_work(&session->recovery_work);
spin_lock_irqsave(&session->lock, flags);
session->state = ISCSI_SESSION_LOGGED_IN;
spin_unlock_irqrestore(&session->lock, flags);
/* start IO */
scsi_target_unblock(&session->dev, SDEV_RUNNING);
/*
* Only do kernel scanning if the driver is properly hooked into
* the async scanning code (drivers like iscsi_tcp do login and
* scanning from userspace).
*/
if (shost->hostt->scan_finished) {
if (scsi_queue_work(shost, &session->scan_work))
atomic_inc(&ihost->nr_scans);
}
ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking session\n");
}
/**
* iscsi_unblock_session - set a session as logged in and start IO.
* @session: iscsi session
*
* Mark a session as ready to accept IO.
*/
void iscsi_unblock_session(struct iscsi_cls_session *session)
{
queue_work(iscsi_eh_timer_workq, &session->unblock_work);
/*
* make sure all the events have completed before tell the driver
* it is safe
*/
flush_workqueue(iscsi_eh_timer_workq);
}
EXPORT_SYMBOL_GPL(iscsi_unblock_session);
static void __iscsi_block_session(struct work_struct *work)
{
struct iscsi_cls_session *session =
container_of(work, struct iscsi_cls_session,
block_work);
unsigned long flags;
ISCSI_DBG_TRANS_SESSION(session, "Blocking session\n");
spin_lock_irqsave(&session->lock, flags);
session->state = ISCSI_SESSION_FAILED;
spin_unlock_irqrestore(&session->lock, flags);
scsi_target_block(&session->dev);
ISCSI_DBG_TRANS_SESSION(session, "Completed SCSI target blocking\n");
if (session->recovery_tmo >= 0)
queue_delayed_work(iscsi_eh_timer_workq,
&session->recovery_work,
session->recovery_tmo * HZ);
}
void iscsi_block_session(struct iscsi_cls_session *session)
{
queue_work(iscsi_eh_timer_workq, &session->block_work);
}
EXPORT_SYMBOL_GPL(iscsi_block_session);
static void __iscsi_unbind_session(struct work_struct *work)
{
struct iscsi_cls_session *session =
container_of(work, struct iscsi_cls_session,
unbind_work);
struct Scsi_Host *shost = iscsi_session_to_shost(session);
struct iscsi_cls_host *ihost = shost->shost_data;
unsigned long flags;
unsigned int target_id;
ISCSI_DBG_TRANS_SESSION(session, "Unbinding session\n");
/* Prevent new scans and make sure scanning is not in progress */
mutex_lock(&ihost->mutex);
spin_lock_irqsave(&session->lock, flags);
if (session->target_id == ISCSI_MAX_TARGET) {
spin_unlock_irqrestore(&session->lock, flags);
mutex_unlock(&ihost->mutex);
return;
}
target_id = session->target_id;
session->target_id = ISCSI_MAX_TARGET;
spin_unlock_irqrestore(&session->lock, flags);
mutex_unlock(&ihost->mutex);
if (session->ida_used)
ida_simple_remove(&iscsi_sess_ida, target_id);
scsi_remove_target(&session->dev);
iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n");
}
struct iscsi_cls_session *
iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
int dd_size)
{
struct iscsi_cls_session *session;
session = kzalloc(sizeof(*session) + dd_size,
GFP_KERNEL);
if (!session)
return NULL;
session->transport = transport;
session->creator = -1;
session->recovery_tmo = 120;
session->recovery_tmo_sysfs_override = false;
session->state = ISCSI_SESSION_FREE;
INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
INIT_LIST_HEAD(&session->sess_list);
INIT_WORK(&session->unblock_work, __iscsi_unblock_session);
INIT_WORK(&session->block_work, __iscsi_block_session);
INIT_WORK(&session->unbind_work, __iscsi_unbind_session);
INIT_WORK(&session->scan_work, iscsi_scan_session);
spin_lock_init(&session->lock);
/* this is released in the dev's release function */
scsi_host_get(shost);
session->dev.parent = &shost->shost_gendev;
session->dev.release = iscsi_session_release;
device_initialize(&session->dev);
if (dd_size)
session->dd_data = &session[1];
ISCSI_DBG_TRANS_SESSION(session, "Completed session allocation\n");
return session;
}
EXPORT_SYMBOL_GPL(iscsi_alloc_session);
int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
{
unsigned long flags;
int id = 0;
int err;
session->sid = atomic_add_return(1, &iscsi_session_nr);
if (target_id == ISCSI_MAX_TARGET) {
id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
if (id < 0) {
iscsi_cls_session_printk(KERN_ERR, session,
"Failure in Target ID Allocation\n");
return id;
}
session->target_id = (unsigned int)id;
session->ida_used = true;
} else
session->target_id = target_id;
dev_set_name(&session->dev, "session%u", session->sid);
err = device_add(&session->dev);
if (err) {
iscsi_cls_session_printk(KERN_ERR, session,
"could not register session's dev\n");
goto release_ida;
}
transport_register_device(&session->dev);
spin_lock_irqsave(&sesslock, flags);
list_add(&session->sess_list, &sesslist);
spin_unlock_irqrestore(&sesslock, flags);
iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION);
ISCSI_DBG_TRANS_SESSION(session, "Completed session adding\n");
return 0;
release_ida:
if (session->ida_used)
ida_simple_remove(&iscsi_sess_ida, session->target_id);
return err;
}
EXPORT_SYMBOL_GPL(iscsi_add_session);
/**
* iscsi_create_session - create iscsi class session
* @shost: scsi host
* @transport: iscsi transport
* @dd_size: private driver data size
* @target_id: which target
*
* This can be called from a LLD or iscsi_transport.
*/
struct iscsi_cls_session *
iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
int dd_size, unsigned int target_id)
{
struct iscsi_cls_session *session;
session = iscsi_alloc_session(shost, transport, dd_size);
if (!session)
return NULL;
if (iscsi_add_session(session, target_id)) {
iscsi_free_session(session);
return NULL;
}
return session;
}
EXPORT_SYMBOL_GPL(iscsi_create_session);
static void iscsi_conn_release(struct device *dev)
{
struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev);
struct device *parent = conn->dev.parent;
ISCSI_DBG_TRANS_CONN(conn, "Releasing conn\n");
kfree(conn);
put_device(parent);
}
static int iscsi_is_conn_dev(const struct device *dev)
{
return dev->release == iscsi_conn_release;
}
static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
{
if (!iscsi_is_conn_dev(dev))
return 0;
return iscsi_destroy_conn(iscsi_dev_to_conn(dev));
}
void iscsi_remove_session(struct iscsi_cls_session *session)
{
unsigned long flags;
int err;
ISCSI_DBG_TRANS_SESSION(session, "Removing session\n");
spin_lock_irqsave(&sesslock, flags);
list_del(&session->sess_list);
spin_unlock_irqrestore(&sesslock, flags);
/* make sure there are no blocks/unblocks queued */
flush_workqueue(iscsi_eh_timer_workq);
/* make sure the timedout callout is not running */
if (!cancel_delayed_work(&session->recovery_work))
flush_workqueue(iscsi_eh_timer_workq);
/*
* If we are blocked let commands flow again. The lld or iscsi
* layer should set up the queuecommand to fail commands.
* We assume that LLD will not be calling block/unblock while
* removing the session.
*/
spin_lock_irqsave(&session->lock, flags);
session->state = ISCSI_SESSION_FREE;
spin_unlock_irqrestore(&session->lock, flags);
scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
/* flush running scans then delete devices */
flush_work(&session->scan_work);
__iscsi_unbind_session(&session->unbind_work);
/* hw iscsi may not have removed all connections from session */
err = device_for_each_child(&session->dev, NULL,
iscsi_iter_destroy_conn_fn);
if (err)
iscsi_cls_session_printk(KERN_ERR, session,
"Could not delete all connections "
"for session. Error %d.\n", err);
transport_unregister_device(&session->dev);
ISCSI_DBG_TRANS_SESSION(session, "Completing session removal\n");
device_del(&session->dev);
}
EXPORT_SYMBOL_GPL(iscsi_remove_session);
void iscsi_free_session(struct iscsi_cls_session *session)
{
ISCSI_DBG_TRANS_SESSION(session, "Freeing session\n");
iscsi_session_event(session, ISCSI_KEVENT_DESTROY_SESSION);
put_device(&session->dev);
}
EXPORT_SYMBOL_GPL(iscsi_free_session);
/**
* iscsi_create_conn - create iscsi class connection
* @session: iscsi cls session
* @dd_size: private driver data size
* @cid: connection id
*
* This can be called from a LLD or iscsi_transport. The connection
* is child of the session so cid must be unique for all connections
* on the session.
*
* Since we do not support MCS, cid will normally be zero. In some cases
* for software iscsi we could be trying to preallocate a connection struct
* in which case there could be two connection structs and cid would be
* non-zero.
*/
struct iscsi_cls_conn *
iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
{
struct iscsi_transport *transport = session->transport;
struct iscsi_cls_conn *conn;
unsigned long flags;
int err;
conn = kzalloc(sizeof(*conn) + dd_size, GFP_KERNEL);
if (!conn)
return NULL;
if (dd_size)
conn->dd_data = &conn[1];
mutex_init(&conn->ep_mutex);
INIT_LIST_HEAD(&conn->conn_list);
conn->transport = transport;
conn->cid = cid;
/* this is released in the dev's release function */
if (!get_device(&session->dev))
goto free_conn;
dev_set_name(&conn->dev, "connection%d:%u", session->sid, cid);
conn->dev.parent = &session->dev;
conn->dev.release = iscsi_conn_release;
err = device_register(&conn->dev);
if (err) {
iscsi_cls_session_printk(KERN_ERR, session, "could not "
"register connection's dev\n");
goto release_parent_ref;
}
transport_register_device(&conn->dev);
spin_lock_irqsave(&connlock, flags);
list_add(&conn->conn_list, &connlist);
spin_unlock_irqrestore(&connlock, flags);
ISCSI_DBG_TRANS_CONN(conn, "Completed conn creation\n");
return conn;
release_parent_ref:
put_device(&session->dev);
free_conn:
kfree(conn);
return NULL;
}
EXPORT_SYMBOL_GPL(iscsi_create_conn);
/**
* iscsi_destroy_conn - destroy iscsi class connection
* @conn: iscsi cls session
*
* This can be called from a LLD or iscsi_transport.
*/
int iscsi_destroy_conn(struct iscsi_cls_conn *conn)
{
unsigned long flags;
spin_lock_irqsave(&connlock, flags);
list_del(&conn->conn_list);
spin_unlock_irqrestore(&connlock, flags);
transport_unregister_device(&conn->dev);
ISCSI_DBG_TRANS_CONN(conn, "Completing conn destruction\n");
device_unregister(&conn->dev);
return 0;
}
EXPORT_SYMBOL_GPL(iscsi_destroy_conn);
/*
* iscsi interface functions
*/
static struct iscsi_internal *
iscsi_if_transport_lookup(struct iscsi_transport *tt)
{
struct iscsi_internal *priv;
unsigned long flags;
spin_lock_irqsave(&iscsi_transport_lock, flags);
list_for_each_entry(priv, &iscsi_transports, list) {
if (tt == priv->iscsi_transport) {
spin_unlock_irqrestore(&iscsi_transport_lock, flags);
return priv;
}
}
spin_unlock_irqrestore(&iscsi_transport_lock, flags);
return NULL;
}
static int
iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp)
{
return nlmsg_multicast(nls, skb, 0, group, gfp);
}
static int
iscsi_unicast_skb(struct sk_buff *skb, u32 portid)
{
return nlmsg_unicast(nls, skb, portid);
}
int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
char *data, uint32_t data_size)
{
struct nlmsghdr *nlh;
struct sk_buff *skb;
struct iscsi_uevent *ev;
char *pdu;
struct iscsi_internal *priv;
int len = nlmsg_total_size(sizeof(*ev) + sizeof(struct iscsi_hdr) +
data_size);
priv = iscsi_if_transport_lookup(conn->transport);
if (!priv)
return -EINVAL;
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb) {
iscsi_conn_error_event(conn, ISCSI_ERR_CONN_FAILED);
iscsi_cls_conn_printk(KERN_ERR, conn, "can not deliver "
"control PDU: OOM\n");
return -ENOMEM;
}
nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
ev = nlmsg_data(nlh);
memset(ev, 0, sizeof(*ev));
ev->transport_handle = iscsi_handle(conn->transport);
ev->type = ISCSI_KEVENT_RECV_PDU;
ev->r.recv_req.cid = conn->cid;
ev->r.recv_req.sid = iscsi_conn_get_sid(conn);
pdu = (char*)ev + sizeof(*ev);
memcpy(pdu, hdr, sizeof(struct iscsi_hdr));
memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size);
return iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC);
}
EXPORT_SYMBOL_GPL(iscsi_recv_pdu);
int iscsi_offload_mesg(struct Scsi_Host *shost,
struct iscsi_transport *transport, uint32_t type,
char *data, uint16_t data_size)
{
struct nlmsghdr *nlh;
struct sk_buff *skb;
struct iscsi_uevent *ev;
int len = nlmsg_total_size(sizeof(*ev) + data_size);
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb) {
printk(KERN_ERR "can not deliver iscsi offload message:OOM\n");
return -ENOMEM;
}
nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
ev = nlmsg_data(nlh);
memset(ev, 0, sizeof(*ev));
ev->type = type;
ev->transport_handle = iscsi_handle(transport);
switch (type) {
case ISCSI_KEVENT_PATH_REQ:
ev->r.req_path.host_no = shost->host_no;
break;
case ISCSI_KEVENT_IF_DOWN:
ev->r.notify_if_down.host_no = shost->host_no;
break;
}
memcpy((char *)ev + sizeof(*ev), data, data_size);
return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_ATOMIC);
}
EXPORT_SYMBOL_GPL(iscsi_offload_mesg);
void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
{
struct nlmsghdr *nlh;
struct sk_buff *skb;
struct iscsi_uevent *ev;
struct iscsi_internal *priv;
int len = nlmsg_total_size(sizeof(*ev));
priv = iscsi_if_transport_lookup(conn->transport);
if (!priv)
return;
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb) {
iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored "
"conn error (%d)\n", error);
return;
}
nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
ev = nlmsg_data(nlh);
ev->transport_handle = iscsi_handle(conn->transport);
ev->type = ISCSI_KEVENT_CONN_ERROR;
ev->r.connerror.error = error;
ev->r.connerror.cid = conn->cid;
ev->r.connerror.sid = iscsi_conn_get_sid(conn);
iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC);
iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n",
error);
}
EXPORT_SYMBOL_GPL(iscsi_conn_error_event);
void iscsi_conn_login_event(struct iscsi_cls_conn *conn,
enum iscsi_conn_state state)
{
struct nlmsghdr *nlh;
struct sk_buff *skb;
struct iscsi_uevent *ev;
struct iscsi_internal *priv;
int len = nlmsg_total_size(sizeof(*ev));
priv = iscsi_if_transport_lookup(conn->transport);
if (!priv)
return;
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb) {
iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored "
"conn login (%d)\n", state);
return;
}
nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
ev = nlmsg_data(nlh);
ev->transport_handle = iscsi_handle(conn->transport);
ev->type = ISCSI_KEVENT_CONN_LOGIN_STATE;
ev->r.conn_login.state = state;
ev->r.conn_login.cid = conn->cid;
ev->r.conn_login.sid = iscsi_conn_get_sid(conn);
iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC);
iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn login (%d)\n",
state);
}
EXPORT_SYMBOL_GPL(iscsi_conn_login_event);
void iscsi_post_host_event(uint32_t host_no, struct iscsi_transport *transport,
enum iscsi_host_event_code code, uint32_t data_size,
uint8_t *data)
{
struct nlmsghdr *nlh;
struct sk_buff *skb;
struct iscsi_uevent *ev;
int len = nlmsg_total_size(sizeof(*ev) + data_size);
skb = alloc_skb(len, GFP_NOIO);
if (!skb) {
printk(KERN_ERR "gracefully ignored host event (%d):%d OOM\n",
host_no, code);
return;
}
nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
ev = nlmsg_data(nlh);
ev->transport_handle = iscsi_handle(transport);
ev->type = ISCSI_KEVENT_HOST_EVENT;
ev->r.host_event.host_no = host_no;
ev->r.host_event.code = code;
ev->r.host_event.data_size = data_size;
if (data_size)
memcpy((char *)ev + sizeof(*ev), data, data_size);
iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_NOIO);
}
EXPORT_SYMBOL_GPL(iscsi_post_host_event);
void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport,
uint32_t status, uint32_t pid, uint32_t data_size,
uint8_t *data)
{
struct nlmsghdr *nlh;
struct sk_buff *skb;
struct iscsi_uevent *ev;
int len = nlmsg_total_size(sizeof(*ev) + data_size);
skb = alloc_skb(len, GFP_NOIO);
if (!skb) {
printk(KERN_ERR "gracefully ignored ping comp: OOM\n");
return;
}
nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
ev = nlmsg_data(nlh);
ev->transport_handle = iscsi_handle(transport);
ev->type = ISCSI_KEVENT_PING_COMP;
ev->r.ping_comp.host_no = host_no;
ev->r.ping_comp.status = status;
ev->r.ping_comp.pid = pid;
ev->r.ping_comp.data_size = data_size;
memcpy((char *)ev + sizeof(*ev), data, data_size);
iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_NOIO);
}
EXPORT_SYMBOL_GPL(iscsi_ping_comp_event);
static int
iscsi_if_send_reply(u32 portid, int type, void *payload, int size)
{
struct sk_buff *skb;
struct nlmsghdr *nlh;
int len = nlmsg_total_size(size);
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb) {
printk(KERN_ERR "Could not allocate skb to send reply.\n");
return -ENOMEM;
}
nlh = __nlmsg_put(skb, 0, 0, type, (len - sizeof(*nlh)), 0);
memcpy(nlmsg_data(nlh), payload, size);
return iscsi_unicast_skb(skb, portid);
}
static int
iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
{
struct iscsi_uevent *ev = nlmsg_data(nlh);
struct iscsi_stats *stats;
struct sk_buff *skbstat;
struct iscsi_cls_conn *conn;
struct nlmsghdr *nlhstat;
struct iscsi_uevent *evstat;
struct iscsi_internal *priv;
int len = nlmsg_total_size(sizeof(*ev) +
sizeof(struct iscsi_stats) +
sizeof(struct iscsi_stats_custom) *
ISCSI_STATS_CUSTOM_MAX);
int err = 0;
priv = iscsi_if_transport_lookup(transport);
if (!priv)
return -EINVAL;
conn = iscsi_conn_lookup(ev->u.get_stats.sid, ev->u.get_stats.cid);
if (!conn)
return -EEXIST;
do {
int actual_size;
skbstat = alloc_skb(len, GFP_ATOMIC);
if (!skbstat) {
iscsi_cls_conn_printk(KERN_ERR, conn, "can not "
"deliver stats: OOM\n");
return -ENOMEM;
}
nlhstat = __nlmsg_put(skbstat, 0, 0, 0,
(len - sizeof(*nlhstat)), 0);
evstat = nlmsg_data(nlhstat);
memset(evstat, 0, sizeof(*evstat));
evstat->transport_handle = iscsi_handle(conn->transport);
evstat->type = nlh->nlmsg_type;
evstat->u.get_stats.cid =
ev->u.get_stats.cid;
evstat->u.get_stats.sid =
ev->u.get_stats.sid;
stats = (struct iscsi_stats *)
((char*)evstat + sizeof(*evstat));
memset(stats, 0, sizeof(*stats));
transport->get_stats(conn, stats);
actual_size = nlmsg_total_size(sizeof(struct iscsi_uevent) +
sizeof(struct iscsi_stats) +
sizeof(struct iscsi_stats_custom) *
stats->custom_length);
actual_size -= sizeof(*nlhstat);
actual_size = nlmsg_msg_size(actual_size);
skb_trim(skbstat, NLMSG_ALIGN(actual_size));
nlhstat->nlmsg_len = actual_size;
err = iscsi_multicast_skb(skbstat, ISCSI_NL_GRP_ISCSID,
GFP_ATOMIC);
} while (err < 0 && err != -ECONNREFUSED);
return err;
}
/**
* iscsi_session_event - send session destr. completion event
* @session: iscsi class session
* @event: type of event
*/
int iscsi_session_event(struct iscsi_cls_session *session,
enum iscsi_uevent_e event)
{
struct iscsi_internal *priv;
struct Scsi_Host *shost;
struct iscsi_uevent *ev;
struct sk_buff *skb;
struct nlmsghdr *nlh;
int rc, len = nlmsg_total_size(sizeof(*ev));
priv = iscsi_if_transport_lookup(session->transport);
if (!priv)
return -EINVAL;
shost = iscsi_session_to_shost(session);
skb = alloc_skb(len, GFP_KERNEL);
if (!skb) {
iscsi_cls_session_printk(KERN_ERR, session,
"Cannot notify userspace of session "
"event %u\n", event);
return -ENOMEM;
}
nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
ev = nlmsg_data(nlh);
ev->transport_handle = iscsi_handle(session->transport);
ev->type = event;
switch (event) {
case ISCSI_KEVENT_DESTROY_SESSION:
ev->r.d_session.host_no = shost->host_no;
ev->r.d_session.sid = session->sid;
break;
case ISCSI_KEVENT_CREATE_SESSION:
ev->r.c_session_ret.host_no = shost->host_no;
ev->r.c_session_ret.sid = session->sid;
break;
case ISCSI_KEVENT_UNBIND_SESSION:
ev->r.unbind_session.host_no = shost->host_no;
ev->r.unbind_session.sid = session->sid;
break;
default:
iscsi_cls_session_printk(KERN_ERR, session, "Invalid event "
"%u.\n", event);
kfree_skb(skb);
return -EINVAL;
}
/*
* this will occur if the daemon is not up, so we just warn
* the user and when the daemon is restarted it will handle it
*/
rc = iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL);
if (rc == -ESRCH)
iscsi_cls_session_printk(KERN_ERR, session,
"Cannot notify userspace of session "
"event %u. Check iscsi daemon\n",
event);
ISCSI_DBG_TRANS_SESSION(session, "Completed handling event %d rc %d\n",
event, rc);
return rc;
}
EXPORT_SYMBOL_GPL(iscsi_session_event);
static int
iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
struct iscsi_uevent *ev, pid_t pid,
uint32_t initial_cmdsn, uint16_t cmds_max,
uint16_t queue_depth)
{
struct iscsi_transport *transport = priv->iscsi_transport;
struct iscsi_cls_session *session;
struct Scsi_Host *shost;
session = transport->create_session(ep, cmds_max, queue_depth,
initial_cmdsn);
if (!session)
return -ENOMEM;
session->creator = pid;
shost = iscsi_session_to_shost(session);
ev->r.c_session_ret.host_no = shost->host_no;
ev->r.c_session_ret.sid = session->sid;
ISCSI_DBG_TRANS_SESSION(session,
"Completed creating transport session\n");
return 0;
}
static int
iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
{
struct iscsi_cls_conn *conn;
struct iscsi_cls_session *session;
session = iscsi_session_lookup(ev->u.c_conn.sid);
if (!session) {
printk(KERN_ERR "iscsi: invalid session %d.\n",
ev->u.c_conn.sid);
return -EINVAL;
}
conn = transport->create_conn(session, ev->u.c_conn.cid);
if (!conn) {
iscsi_cls_session_printk(KERN_ERR, session,
"couldn't create a new connection.");
return -ENOMEM;
}
ev->r.c_conn_ret.sid = session->sid;
ev->r.c_conn_ret.cid = conn->cid;
ISCSI_DBG_TRANS_CONN(conn, "Completed creating transport conn\n");
return 0;
}
static int
iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
{
struct iscsi_cls_conn *conn;
conn = iscsi_conn_lookup(ev->u.d_conn.sid, ev->u.d_conn.cid);
if (!conn)
return -EINVAL;
ISCSI_DBG_TRANS_CONN(conn, "Destroying transport conn\n");
if (transport->destroy_conn)
transport->destroy_conn(conn);
return 0;
}
static int
iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
{
char *data = (char*)ev + sizeof(*ev);
struct iscsi_cls_conn *conn;
struct iscsi_cls_session *session;
int err = 0, value = 0;
session = iscsi_session_lookup(ev->u.set_param.sid);
conn = iscsi_conn_lookup(ev->u.set_param.sid, ev->u.set_param.cid);
if (!conn || !session)
return -EINVAL;
switch (ev->u.set_param.param) {
case ISCSI_PARAM_SESS_RECOVERY_TMO:
sscanf(data, "%d", &value);
if (!session->recovery_tmo_sysfs_override)
session->recovery_tmo = value;
break;
default:
err = transport->set_param(conn, ev->u.set_param.param,
data, ev->u.set_param.len);
}
return err;
}
static int iscsi_if_ep_connect(struct iscsi_transport *transport,
struct iscsi_uevent *ev, int msg_type)
{
struct iscsi_endpoint *ep;
struct sockaddr *dst_addr;
struct Scsi_Host *shost = NULL;
int non_blocking, err = 0;
if (!transport->ep_connect)
return -EINVAL;
if (msg_type == ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST) {
shost = scsi_host_lookup(ev->u.ep_connect_through_host.host_no);
if (!shost) {
printk(KERN_ERR "ep connect failed. Could not find "
"host no %u\n",
ev->u.ep_connect_through_host.host_no);
return -ENODEV;
}
non_blocking = ev->u.ep_connect_through_host.non_blocking;
} else
non_blocking = ev->u.ep_connect.non_blocking;
dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
ep = transport->ep_connect(shost, dst_addr, non_blocking);
if (IS_ERR(ep)) {
err = PTR_ERR(ep);
goto release_host;
}
ev->r.ep_connect_ret.handle = ep->id;
release_host:
if (shost)
scsi_host_put(shost);
return err;
}
static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
u64 ep_handle)
{
struct iscsi_cls_conn *conn;
struct iscsi_endpoint *ep;
if (!transport->ep_disconnect)
return -EINVAL;
ep = iscsi_lookup_endpoint(ep_handle);
if (!ep)
return -EINVAL;
conn = ep->conn;
if (conn) {
mutex_lock(&conn->ep_mutex);
conn->ep = NULL;
mutex_unlock(&conn->ep_mutex);
}
transport->ep_disconnect(ep);
return 0;
}
static int
iscsi_if_transport_ep(struct iscsi_transport *transport,
struct iscsi_uevent *ev, int msg_type)
{
struct iscsi_endpoint *ep;
int rc = 0;
switch (msg_type) {
case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST:
case ISCSI_UEVENT_TRANSPORT_EP_CONNECT:
rc = iscsi_if_ep_connect(transport, ev, msg_type);
break;
case ISCSI_UEVENT_TRANSPORT_EP_POLL:
if (!transport->ep_poll)
return -EINVAL;
ep = iscsi_lookup_endpoint(ev->u.ep_poll.ep_handle);
if (!ep)
return -EINVAL;
ev->r.retcode = transport->ep_poll(ep,
ev->u.ep_poll.timeout_ms);
break;
case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
rc = iscsi_if_ep_disconnect(transport,
ev->u.ep_disconnect.ep_handle);
break;
}
return rc;
}
static int
iscsi_tgt_dscvr(struct iscsi_transport *transport,
struct iscsi_uevent *ev)
{
struct Scsi_Host *shost;
struct sockaddr *dst_addr;
int err;
if (!transport->tgt_dscvr)
return -EINVAL;
shost = scsi_host_lookup(ev->u.tgt_dscvr.host_no);
if (!shost) {
printk(KERN_ERR "target discovery could not find host no %u\n",
ev->u.tgt_dscvr.host_no);
return -ENODEV;
}
dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
err = transport->tgt_dscvr(shost, ev->u.tgt_dscvr.type,
ev->u.tgt_dscvr.enable, dst_addr);
scsi_host_put(shost);
return err;
}
static int
iscsi_set_host_param(struct iscsi_transport *transport,
struct iscsi_uevent *ev)
{
char *data = (char*)ev + sizeof(*ev);
struct Scsi_Host *shost;
int err;
if (!transport->set_host_param)
return -ENOSYS;
shost = scsi_host_lookup(ev->u.set_host_param.host_no);
if (!shost) {
printk(KERN_ERR "set_host_param could not find host no %u\n",
ev->u.set_host_param.host_no);
return -ENODEV;
}
err = transport->set_host_param(shost, ev->u.set_host_param.param,
data, ev->u.set_host_param.len);
scsi_host_put(shost);
return err;
}
static int
iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev)
{
struct Scsi_Host *shost;
struct iscsi_path *params;
int err;
if (!transport->set_path)
return -ENOSYS;
shost = scsi_host_lookup(ev->u.set_path.host_no);
if (!shost) {
printk(KERN_ERR "set path could not find host no %u\n",
ev->u.set_path.host_no);
return -ENODEV;
}
params = (struct iscsi_path *)((char *)ev + sizeof(*ev));
err = transport->set_path(shost, params);
scsi_host_put(shost);
return err;
}
static int
iscsi_set_iface_params(struct iscsi_transport *transport,
struct iscsi_uevent *ev, uint32_t len)
{
char *data = (char *)ev + sizeof(*ev);
struct Scsi_Host *shost;
int err;
if (!transport->set_iface_param)
return -ENOSYS;
shost = scsi_host_lookup(ev->u.set_iface_params.host_no);
if (!shost) {
printk(KERN_ERR "set_iface_params could not find host no %u\n",
ev->u.set_iface_params.host_no);
return -ENODEV;
}
err = transport->set_iface_param(shost, data, len);
scsi_host_put(shost);
return err;
}
static int
iscsi_send_ping(struct iscsi_transport *transport, struct iscsi_uevent *ev)
{
struct Scsi_Host *shost;
struct sockaddr *dst_addr;
int err;
if (!transport->send_ping)
return -ENOSYS;
shost = scsi_host_lookup(ev->u.iscsi_ping.host_no);
if (!shost) {
printk(KERN_ERR "iscsi_ping could not find host no %u\n",
ev->u.iscsi_ping.host_no);
return -ENODEV;
}
dst_addr = (struct sockaddr *)((char *)ev + sizeof(*ev));
err = transport->send_ping(shost, ev->u.iscsi_ping.iface_num,
ev->u.iscsi_ping.iface_type,
ev->u.iscsi_ping.payload_size,
ev->u.iscsi_ping.pid,
dst_addr);
scsi_host_put(shost);
return err;
}
static int
iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh)
{
struct iscsi_uevent *ev = nlmsg_data(nlh);
struct Scsi_Host *shost = NULL;
struct iscsi_chap_rec *chap_rec;
struct iscsi_internal *priv;
struct sk_buff *skbchap;
struct nlmsghdr *nlhchap;
struct iscsi_uevent *evchap;
uint32_t chap_buf_size;
int len, err = 0;
char *buf;
if (!transport->get_chap)
return -EINVAL;
priv = iscsi_if_transport_lookup(transport);
if (!priv)
return -EINVAL;
chap_buf_size = (ev->u.get_chap.num_entries * sizeof(*chap_rec));
len = nlmsg_total_size(sizeof(*ev) + chap_buf_size);
shost = scsi_host_lookup(ev->u.get_chap.host_no);
if (!shost) {
printk(KERN_ERR "%s: failed. Could not find host no %u\n",
__func__, ev->u.get_chap.host_no);
return -ENODEV;
}
do {
int actual_size;
skbchap = alloc_skb(len, GFP_KERNEL);
if (!skbchap) {
printk(KERN_ERR "can not deliver chap: OOM\n");
err = -ENOMEM;
goto exit_get_chap;
}
nlhchap = __nlmsg_put(skbchap, 0, 0, 0,
(len - sizeof(*nlhchap)), 0);
evchap = nlmsg_data(nlhchap);
memset(evchap, 0, sizeof(*evchap));
evchap->transport_handle = iscsi_handle(transport);
evchap->type = nlh->nlmsg_type;
evchap->u.get_chap.host_no = ev->u.get_chap.host_no;
evchap->u.get_chap.chap_tbl_idx = ev->u.get_chap.chap_tbl_idx;
evchap->u.get_chap.num_entries = ev->u.get_chap.num_entries;
buf = (char *)evchap + sizeof(*evchap);
memset(buf, 0, chap_buf_size);
err = transport->get_chap(shost, ev->u.get_chap.chap_tbl_idx,
&evchap->u.get_chap.num_entries, buf);
actual_size = nlmsg_total_size(sizeof(*ev) + chap_buf_size);
skb_trim(skbchap, NLMSG_ALIGN(actual_size));
nlhchap->nlmsg_len = actual_size;
err = iscsi_multicast_skb(skbchap, ISCSI_NL_GRP_ISCSID,
GFP_KERNEL);
} while (err < 0 && err != -ECONNREFUSED);
exit_get_chap:
scsi_host_put(shost);
return err;
}
static int iscsi_set_chap(struct iscsi_transport *transport,
struct iscsi_uevent *ev, uint32_t len)
{
char *data = (char *)ev + sizeof(*ev);
struct Scsi_Host *shost;
int err = 0;
if (!transport->set_chap)
return -ENOSYS;
shost = scsi_host_lookup(ev->u.set_path.host_no);
if (!shost) {
pr_err("%s could not find host no %u\n",
__func__, ev->u.set_path.host_no);
return -ENODEV;
}
err = transport->set_chap(shost, data, len);
scsi_host_put(shost);
return err;
}
static int iscsi_delete_chap(struct iscsi_transport *transport,
struct iscsi_uevent *ev)
{
struct Scsi_Host *shost;
int err = 0;
if (!transport->delete_chap)
return -ENOSYS;
shost = scsi_host_lookup(ev->u.delete_chap.host_no);
if (!shost) {
printk(KERN_ERR "%s could not find host no %u\n",
__func__, ev->u.delete_chap.host_no);
return -ENODEV;
}
err = transport->delete_chap(shost, ev->u.delete_chap.chap_tbl_idx);
scsi_host_put(shost);
return err;
}
static const struct {
enum iscsi_discovery_parent_type value;
char *name;
} iscsi_discovery_parent_names[] = {
{ISCSI_DISC_PARENT_UNKNOWN, "Unknown" },
{ISCSI_DISC_PARENT_SENDTGT, "Sendtarget" },
{ISCSI_DISC_PARENT_ISNS, "isns" },
};
char *iscsi_get_discovery_parent_name(int parent_type)
{
int i;
char *state = "Unknown!";
for (i = 0; i < ARRAY_SIZE(iscsi_discovery_parent_names); i++) {
if (iscsi_discovery_parent_names[i].value & parent_type) {
state = iscsi_discovery_parent_names[i].name;
break;
}
}
return state;
}
EXPORT_SYMBOL_GPL(iscsi_get_discovery_parent_name);
static int iscsi_set_flashnode_param(struct iscsi_transport *transport,
struct iscsi_uevent *ev, uint32_t len)
{
char *data = (char *)ev + sizeof(*ev);
struct Scsi_Host *shost;
struct iscsi_bus_flash_session *fnode_sess;
struct iscsi_bus_flash_conn *fnode_conn;
struct device *dev;
uint32_t idx;
int err = 0;
if (!transport->set_flashnode_param) {
err = -ENOSYS;
goto exit_set_fnode;
}
shost = scsi_host_lookup(ev->u.set_flashnode.host_no);
if (!shost) {
pr_err("%s could not find host no %u\n",
__func__, ev->u.set_flashnode.host_no);
err = -ENODEV;
goto put_host;
}
idx = ev->u.set_flashnode.flashnode_idx;
fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
if (!fnode_sess) {
pr_err("%s could not find flashnode %u for host no %u\n",
__func__, idx, ev->u.set_flashnode.host_no);
err = -ENODEV;
goto put_host;
}
dev = iscsi_find_flashnode_conn(fnode_sess);
if (!dev) {
err = -ENODEV;
goto put_sess;
}
fnode_conn = iscsi_dev_to_flash_conn(dev);
err = transport->set_flashnode_param(fnode_sess, fnode_conn, data, len);
put_device(dev);
put_sess:
put_device(&fnode_sess->dev);
put_host:
scsi_host_put(shost);
exit_set_fnode:
return err;
}
static int iscsi_new_flashnode(struct iscsi_transport *transport,
struct iscsi_uevent *ev, uint32_t len)
{
char *data = (char *)ev + sizeof(*ev);
struct Scsi_Host *shost;
int index;
int err = 0;
if (!transport->new_flashnode) {
err = -ENOSYS;
goto exit_new_fnode;
}
shost = scsi_host_lookup(ev->u.new_flashnode.host_no);
if (!shost) {
pr_err("%s could not find host no %u\n",
__func__, ev->u.new_flashnode.host_no);
err = -ENODEV;
goto put_host;
}
index = transport->new_flashnode(shost, data, len);
if (index >= 0)
ev->r.new_flashnode_ret.flashnode_idx = index;
else
err = -EIO;
put_host:
scsi_host_put(shost);
exit_new_fnode:
return err;
}
static int iscsi_del_flashnode(struct iscsi_transport *transport,
struct iscsi_uevent *ev)
{
struct Scsi_Host *shost;
struct iscsi_bus_flash_session *fnode_sess;
uint32_t idx;
int err = 0;
if (!transport->del_flashnode) {
err = -ENOSYS;
goto exit_del_fnode;
}
shost = scsi_host_lookup(ev->u.del_flashnode.host_no);
if (!shost) {
pr_err("%s could not find host no %u\n",
__func__, ev->u.del_flashnode.host_no);
err = -ENODEV;
goto put_host;
}
idx = ev->u.del_flashnode.flashnode_idx;
fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
if (!fnode_sess) {
pr_err("%s could not find flashnode %u for host no %u\n",
__func__, idx, ev->u.del_flashnode.host_no);
err = -ENODEV;
goto put_host;
}
err = transport->del_flashnode(fnode_sess);
put_device(&fnode_sess->dev);
put_host:
scsi_host_put(shost);
exit_del_fnode:
return err;
}
static int iscsi_login_flashnode(struct iscsi_transport *transport,
struct iscsi_uevent *ev)
{
struct Scsi_Host *shost;
struct iscsi_bus_flash_session *fnode_sess;
struct iscsi_bus_flash_conn *fnode_conn;
struct device *dev;
uint32_t idx;
int err = 0;
if (!transport->login_flashnode) {
err = -ENOSYS;
goto exit_login_fnode;
}
shost = scsi_host_lookup(ev->u.login_flashnode.host_no);
if (!shost) {
pr_err("%s could not find host no %u\n",
__func__, ev->u.login_flashnode.host_no);
err = -ENODEV;
goto put_host;
}
idx = ev->u.login_flashnode.flashnode_idx;
fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
if (!fnode_sess) {
pr_err("%s could not find flashnode %u for host no %u\n",
__func__, idx, ev->u.login_flashnode.host_no);
err = -ENODEV;
goto put_host;
}
dev = iscsi_find_flashnode_conn(fnode_sess);
if (!dev) {
err = -ENODEV;
goto put_sess;
}
fnode_conn = iscsi_dev_to_flash_conn(dev);
err = transport->login_flashnode(fnode_sess, fnode_conn);
put_device(dev);
put_sess:
put_device(&fnode_sess->dev);
put_host:
scsi_host_put(shost);
exit_login_fnode:
return err;
}
static int iscsi_logout_flashnode(struct iscsi_transport *transport,
struct iscsi_uevent *ev)
{
struct Scsi_Host *shost;
struct iscsi_bus_flash_session *fnode_sess;
struct iscsi_bus_flash_conn *fnode_conn;
struct device *dev;
uint32_t idx;
int err = 0;
if (!transport->logout_flashnode) {
err = -ENOSYS;
goto exit_logout_fnode;
}
shost = scsi_host_lookup(ev->u.logout_flashnode.host_no);
if (!shost) {
pr_err("%s could not find host no %u\n",
__func__, ev->u.logout_flashnode.host_no);
err = -ENODEV;
goto put_host;
}
idx = ev->u.logout_flashnode.flashnode_idx;
fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
if (!fnode_sess) {
pr_err("%s could not find flashnode %u for host no %u\n",
__func__, idx, ev->u.logout_flashnode.host_no);
err = -ENODEV;
goto put_host;
}
dev = iscsi_find_flashnode_conn(fnode_sess);
if (!dev) {
err = -ENODEV;
goto put_sess;
}
fnode_conn = iscsi_dev_to_flash_conn(dev);
err = transport->logout_flashnode(fnode_sess, fnode_conn);
put_device(dev);
put_sess:
put_device(&fnode_sess->dev);
put_host:
scsi_host_put(shost);
exit_logout_fnode:
return err;
}
static int iscsi_logout_flashnode_sid(struct iscsi_transport *transport,
struct iscsi_uevent *ev)
{
struct Scsi_Host *shost;
struct iscsi_cls_session *session;
int err = 0;
if (!transport->logout_flashnode_sid) {
err = -ENOSYS;
goto exit_logout_sid;
}
shost = scsi_host_lookup(ev->u.logout_flashnode_sid.host_no);
if (!shost) {
pr_err("%s could not find host no %u\n",
__func__, ev->u.logout_flashnode.host_no);
err = -ENODEV;
goto put_host;
}
session = iscsi_session_lookup(ev->u.logout_flashnode_sid.sid);
if (!session) {
pr_err("%s could not find session id %u\n",
__func__, ev->u.logout_flashnode_sid.sid);
err = -EINVAL;
goto put_host;
}
err = transport->logout_flashnode_sid(session);
put_host:
scsi_host_put(shost);
exit_logout_sid:
return err;
}
static int
iscsi_get_host_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
{
struct iscsi_uevent *ev = nlmsg_data(nlh);
struct Scsi_Host *shost = NULL;
struct iscsi_internal *priv;
struct sk_buff *skbhost_stats;
struct nlmsghdr *nlhhost_stats;
struct iscsi_uevent *evhost_stats;
int host_stats_size = 0;
int len, err = 0;
char *buf;
if (!transport->get_host_stats)
return -ENOSYS;
priv = iscsi_if_transport_lookup(transport);
if (!priv)
return -EINVAL;
host_stats_size = sizeof(struct iscsi_offload_host_stats);
len = nlmsg_total_size(sizeof(*ev) + host_stats_size);
shost = scsi_host_lookup(ev->u.get_host_stats.host_no);
if (!shost) {
pr_err("%s: failed. Could not find host no %u\n",
__func__, ev->u.get_host_stats.host_no);
return -ENODEV;
}
do {
int actual_size;
skbhost_stats = alloc_skb(len, GFP_KERNEL);
if (!skbhost_stats) {
pr_err("cannot deliver host stats: OOM\n");
err = -ENOMEM;
goto exit_host_stats;
}
nlhhost_stats = __nlmsg_put(skbhost_stats, 0, 0, 0,
(len - sizeof(*nlhhost_stats)), 0);
evhost_stats = nlmsg_data(nlhhost_stats);
memset(evhost_stats, 0, sizeof(*evhost_stats));
evhost_stats->transport_handle = iscsi_handle(transport);
evhost_stats->type = nlh->nlmsg_type;
evhost_stats->u.get_host_stats.host_no =
ev->u.get_host_stats.host_no;
buf = (char *)evhost_stats + sizeof(*evhost_stats);
memset(buf, 0, host_stats_size);
err = transport->get_host_stats(shost, buf, host_stats_size);
if (err) {
kfree_skb(skbhost_stats);
goto exit_host_stats;
}
actual_size = nlmsg_total_size(sizeof(*ev) + host_stats_size);
skb_trim(skbhost_stats, NLMSG_ALIGN(actual_size));
nlhhost_stats->nlmsg_len = actual_size;
err = iscsi_multicast_skb(skbhost_stats, ISCSI_NL_GRP_ISCSID,
GFP_KERNEL);
} while (err < 0 && err != -ECONNREFUSED);
exit_host_stats:
scsi_host_put(shost);
return err;
}
static int
iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
{
int err = 0;
u32 portid;
struct iscsi_uevent *ev = nlmsg_data(nlh);
struct iscsi_transport *transport = NULL;
struct iscsi_internal *priv;
struct iscsi_cls_session *session;
struct iscsi_cls_conn *conn;
struct iscsi_endpoint *ep = NULL;
if (nlh->nlmsg_type == ISCSI_UEVENT_PATH_UPDATE)
*group = ISCSI_NL_GRP_UIP;
else
*group = ISCSI_NL_GRP_ISCSID;
priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
if (!priv)
return -EINVAL;
transport = priv->iscsi_transport;
if (!try_module_get(transport->owner))
return -EINVAL;
portid = NETLINK_CB(skb).portid;
switch (nlh->nlmsg_type) {
case ISCSI_UEVENT_CREATE_SESSION:
err = iscsi_if_create_session(priv, ep, ev,
portid,
ev->u.c_session.initial_cmdsn,
ev->u.c_session.cmds_max,
ev->u.c_session.queue_depth);
break;
case ISCSI_UEVENT_CREATE_BOUND_SESSION:
ep = iscsi_lookup_endpoint(ev->u.c_bound_session.ep_handle);
if (!ep) {
err = -EINVAL;
break;
}
err = iscsi_if_create_session(priv, ep, ev,
portid,
ev->u.c_bound_session.initial_cmdsn,
ev->u.c_bound_session.cmds_max,
ev->u.c_bound_session.queue_depth);
break;
case ISCSI_UEVENT_DESTROY_SESSION:
session = iscsi_session_lookup(ev->u.d_session.sid);
if (session)
transport->destroy_session(session);
else
err = -EINVAL;
break;
case ISCSI_UEVENT_UNBIND_SESSION:
session = iscsi_session_lookup(ev->u.d_session.sid);
if (session)
scsi_queue_work(iscsi_session_to_shost(session),
&session->unbind_work);
else
err = -EINVAL;
break;
case ISCSI_UEVENT_CREATE_CONN:
err = iscsi_if_create_conn(transport, ev);
break;
case ISCSI_UEVENT_DESTROY_CONN:
err = iscsi_if_destroy_conn(transport, ev);
break;
case ISCSI_UEVENT_BIND_CONN:
session = iscsi_session_lookup(ev->u.b_conn.sid);
conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid);
if (conn && conn->ep)
iscsi_if_ep_disconnect(transport, conn->ep->id);
if (!session || !conn) {
err = -EINVAL;
break;
}
ev->r.retcode = transport->bind_conn(session, conn,
ev->u.b_conn.transport_eph,
ev->u.b_conn.is_leading);
if (ev->r.retcode || !transport->ep_connect)
break;
ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph);
if (ep) {
ep->conn = conn;
mutex_lock(&conn->ep_mutex);
conn->ep = ep;
mutex_unlock(&conn->ep_mutex);
} else
iscsi_cls_conn_printk(KERN_ERR, conn,
"Could not set ep conn "
"binding\n");
break;
case ISCSI_UEVENT_SET_PARAM:
err = iscsi_set_param(transport, ev);
break;
case ISCSI_UEVENT_START_CONN:
conn = iscsi_conn_lookup(ev->u.start_conn.sid, ev->u.start_conn.cid);
if (conn)
ev->r.retcode = transport->start_conn(conn);
else
err = -EINVAL;
break;
case ISCSI_UEVENT_STOP_CONN:
conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid);
if (conn)
transport->stop_conn(conn, ev->u.stop_conn.flag);
else
err = -EINVAL;
break;
case ISCSI_UEVENT_SEND_PDU:
conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid);
if (conn)
ev->r.retcode = transport->send_pdu(conn,
(struct iscsi_hdr*)((char*)ev + sizeof(*ev)),
(char*)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size,
ev->u.send_pdu.data_size);
else
err = -EINVAL;
break;
case ISCSI_UEVENT_GET_STATS:
err = iscsi_if_get_stats(transport, nlh);
break;
case ISCSI_UEVENT_TRANSPORT_EP_CONNECT:
case ISCSI_UEVENT_TRANSPORT_EP_POLL:
case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST:
err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type);
break;
case ISCSI_UEVENT_TGT_DSCVR:
err = iscsi_tgt_dscvr(transport, ev);
break;
case ISCSI_UEVENT_SET_HOST_PARAM:
err = iscsi_set_host_param(transport, ev);
break;
case ISCSI_UEVENT_PATH_UPDATE:
err = iscsi_set_path(transport, ev);
break;
case ISCSI_UEVENT_SET_IFACE_PARAMS:
err = iscsi_set_iface_params(transport, ev,
nlmsg_attrlen(nlh, sizeof(*ev)));
break;
case ISCSI_UEVENT_PING:
err = iscsi_send_ping(transport, ev);
break;
case ISCSI_UEVENT_GET_CHAP:
err = iscsi_get_chap(transport, nlh);
break;
case ISCSI_UEVENT_DELETE_CHAP:
err = iscsi_delete_chap(transport, ev);
break;
case ISCSI_UEVENT_SET_FLASHNODE_PARAMS:
err = iscsi_set_flashnode_param(transport, ev,
nlmsg_attrlen(nlh,
sizeof(*ev)));
break;
case ISCSI_UEVENT_NEW_FLASHNODE:
err = iscsi_new_flashnode(transport, ev,
nlmsg_attrlen(nlh, sizeof(*ev)));
break;
case ISCSI_UEVENT_DEL_FLASHNODE:
err = iscsi_del_flashnode(transport, ev);
break;
case ISCSI_UEVENT_LOGIN_FLASHNODE:
err = iscsi_login_flashnode(transport, ev);
break;
case ISCSI_UEVENT_LOGOUT_FLASHNODE:
err = iscsi_logout_flashnode(transport, ev);
break;
case ISCSI_UEVENT_LOGOUT_FLASHNODE_SID:
err = iscsi_logout_flashnode_sid(transport, ev);
break;
case ISCSI_UEVENT_SET_CHAP:
err = iscsi_set_chap(transport, ev,
nlmsg_attrlen(nlh, sizeof(*ev)));
break;
case ISCSI_UEVENT_GET_HOST_STATS:
err = iscsi_get_host_stats(transport, nlh);
break;
default:
err = -ENOSYS;
break;
}
module_put(transport->owner);
return err;
}
/*
* Get message from skb. Each message is processed by iscsi_if_recv_msg.
* Malformed skbs with wrong lengths or invalid creds are not processed.
*/
static void
iscsi_if_rx(struct sk_buff *skb)
{
u32 portid = NETLINK_CB(skb).portid;
mutex_lock(&rx_queue_mutex);
while (skb->len >= NLMSG_HDRLEN) {
int err;
uint32_t rlen;
struct nlmsghdr *nlh;
struct iscsi_uevent *ev;
uint32_t group;
nlh = nlmsg_hdr(skb);
if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) ||
skb->len < nlh->nlmsg_len) {
break;
}
ev = nlmsg_data(nlh);
rlen = NLMSG_ALIGN(nlh->nlmsg_len);
if (rlen > skb->len)
rlen = skb->len;
err = iscsi_if_recv_msg(skb, nlh, &group);
if (err) {
ev->type = ISCSI_KEVENT_IF_ERROR;
ev->iferror = err;
}
do {
/*
* special case for GET_STATS:
* on success - sending reply and stats from
* inside of if_recv_msg(),
* on error - fall through.
*/
if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
break;
if (ev->type == ISCSI_UEVENT_GET_CHAP && !err)
break;
err = iscsi_if_send_reply(portid, nlh->nlmsg_type,
ev, sizeof(*ev));
} while (err < 0 && err != -ECONNREFUSED && err != -ESRCH);
skb_pull(skb, rlen);
}
mutex_unlock(&rx_queue_mutex);
}
#define ISCSI_CLASS_ATTR(_prefix,_name,_mode,_show,_store) \
struct device_attribute dev_attr_##_prefix##_##_name = \
__ATTR(_name,_mode,_show,_store)
/*
* iSCSI connection attrs
*/
#define iscsi_conn_attr_show(param) \
static ssize_t \
show_conn_param_##param(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); \
struct iscsi_transport *t = conn->transport; \
return t->get_conn_param(conn, param, buf); \
}
#define iscsi_conn_attr(field, param) \
iscsi_conn_attr_show(param) \
static ISCSI_CLASS_ATTR(conn, field, S_IRUGO, show_conn_param_##param, \
NULL);
iscsi_conn_attr(max_recv_dlength, ISCSI_PARAM_MAX_RECV_DLENGTH);
iscsi_conn_attr(max_xmit_dlength, ISCSI_PARAM_MAX_XMIT_DLENGTH);
iscsi_conn_attr(header_digest, ISCSI_PARAM_HDRDGST_EN);
iscsi_conn_attr(data_digest, ISCSI_PARAM_DATADGST_EN);
iscsi_conn_attr(ifmarker, ISCSI_PARAM_IFMARKER_EN);
iscsi_conn_attr(ofmarker, ISCSI_PARAM_OFMARKER_EN);
iscsi_conn_attr(persistent_port, ISCSI_PARAM_PERSISTENT_PORT);
iscsi_conn_attr(exp_statsn, ISCSI_PARAM_EXP_STATSN);
iscsi_conn_attr(persistent_address, ISCSI_PARAM_PERSISTENT_ADDRESS);
iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO);
iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO);
iscsi_conn_attr(local_port, ISCSI_PARAM_LOCAL_PORT);
iscsi_conn_attr(statsn, ISCSI_PARAM_STATSN);
iscsi_conn_attr(keepalive_tmo, ISCSI_PARAM_KEEPALIVE_TMO);
iscsi_conn_attr(max_segment_size, ISCSI_PARAM_MAX_SEGMENT_SIZE);
iscsi_conn_attr(tcp_timestamp_stat, ISCSI_PARAM_TCP_TIMESTAMP_STAT);
iscsi_conn_attr(tcp_wsf_disable, ISCSI_PARAM_TCP_WSF_DISABLE);
iscsi_conn_attr(tcp_nagle_disable, ISCSI_PARAM_TCP_NAGLE_DISABLE);
iscsi_conn_attr(tcp_timer_scale, ISCSI_PARAM_TCP_TIMER_SCALE);
iscsi_conn_attr(tcp_timestamp_enable, ISCSI_PARAM_TCP_TIMESTAMP_EN);
iscsi_conn_attr(fragment_disable, ISCSI_PARAM_IP_FRAGMENT_DISABLE);
iscsi_conn_attr(ipv4_tos, ISCSI_PARAM_IPV4_TOS);
iscsi_conn_attr(ipv6_traffic_class, ISCSI_PARAM_IPV6_TC);
iscsi_conn_attr(ipv6_flow_label, ISCSI_PARAM_IPV6_FLOW_LABEL);
iscsi_conn_attr(is_fw_assigned_ipv6, ISCSI_PARAM_IS_FW_ASSIGNED_IPV6);
iscsi_conn_attr(tcp_xmit_wsf, ISCSI_PARAM_TCP_XMIT_WSF);
iscsi_conn_attr(tcp_recv_wsf, ISCSI_PARAM_TCP_RECV_WSF);
iscsi_conn_attr(local_ipaddr, ISCSI_PARAM_LOCAL_IPADDR);
#define iscsi_conn_ep_attr_show(param) \
static ssize_t show_conn_ep_param_##param(struct device *dev, \
struct device_attribute *attr,\
char *buf) \
{ \
struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); \
struct iscsi_transport *t = conn->transport; \
struct iscsi_endpoint *ep; \
ssize_t rc; \
\
/* \
* Need to make sure ep_disconnect does not free the LLD's \
* interconnect resources while we are trying to read them. \
*/ \
mutex_lock(&conn->ep_mutex); \
ep = conn->ep; \
if (!ep && t->ep_connect) { \
mutex_unlock(&conn->ep_mutex); \
return -ENOTCONN; \
} \
\
if (ep) \
rc = t->get_ep_param(ep, param, buf); \
else \
rc = t->get_conn_param(conn, param, buf); \
mutex_unlock(&conn->ep_mutex); \
return rc; \
}
#define iscsi_conn_ep_attr(field, param) \
iscsi_conn_ep_attr_show(param) \
static ISCSI_CLASS_ATTR(conn, field, S_IRUGO, \
show_conn_ep_param_##param, NULL);
iscsi_conn_ep_attr(address, ISCSI_PARAM_CONN_ADDRESS);
iscsi_conn_ep_attr(port, ISCSI_PARAM_CONN_PORT);
static struct attribute *iscsi_conn_attrs[] = {
&dev_attr_conn_max_recv_dlength.attr,
&dev_attr_conn_max_xmit_dlength.attr,
&dev_attr_conn_header_digest.attr,
&dev_attr_conn_data_digest.attr,
&dev_attr_conn_ifmarker.attr,
&dev_attr_conn_ofmarker.attr,
&dev_attr_conn_address.attr,
&dev_attr_conn_port.attr,
&dev_attr_conn_exp_statsn.attr,
&dev_attr_conn_persistent_address.attr,
&dev_attr_conn_persistent_port.attr,
&dev_attr_conn_ping_tmo.attr,
&dev_attr_conn_recv_tmo.attr,
&dev_attr_conn_local_port.attr,
&dev_attr_conn_statsn.attr,
&dev_attr_conn_keepalive_tmo.attr,
&dev_attr_conn_max_segment_size.attr,
&dev_attr_conn_tcp_timestamp_stat.attr,
&dev_attr_conn_tcp_wsf_disable.attr,
&dev_attr_conn_tcp_nagle_disable.attr,
&dev_attr_conn_tcp_timer_scale.attr,
&dev_attr_conn_tcp_timestamp_enable.attr,
&dev_attr_conn_fragment_disable.attr,
&dev_attr_conn_ipv4_tos.attr,
&dev_attr_conn_ipv6_traffic_class.attr,
&dev_attr_conn_ipv6_flow_label.attr,
&dev_attr_conn_is_fw_assigned_ipv6.attr,
&dev_attr_conn_tcp_xmit_wsf.attr,
&dev_attr_conn_tcp_recv_wsf.attr,
&dev_attr_conn_local_ipaddr.attr,
NULL,
};
static umode_t iscsi_conn_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
struct device *cdev = container_of(kobj, struct device, kobj);
struct iscsi_cls_conn *conn = transport_class_to_conn(cdev);
struct iscsi_transport *t = conn->transport;
int param;
if (attr == &dev_attr_conn_max_recv_dlength.attr)
param = ISCSI_PARAM_MAX_RECV_DLENGTH;
else if (attr == &dev_attr_conn_max_xmit_dlength.attr)
param = ISCSI_PARAM_MAX_XMIT_DLENGTH;
else if (attr == &dev_attr_conn_header_digest.attr)
param = ISCSI_PARAM_HDRDGST_EN;
else if (attr == &dev_attr_conn_data_digest.attr)
param = ISCSI_PARAM_DATADGST_EN;
else if (attr == &dev_attr_conn_ifmarker.attr)
param = ISCSI_PARAM_IFMARKER_EN;
else if (attr == &dev_attr_conn_ofmarker.attr)
param = ISCSI_PARAM_OFMARKER_EN;
else if (attr == &dev_attr_conn_address.attr)
param = ISCSI_PARAM_CONN_ADDRESS;
else if (attr == &dev_attr_conn_port.attr)
param = ISCSI_PARAM_CONN_PORT;
else if (attr == &dev_attr_conn_exp_statsn.attr)
param = ISCSI_PARAM_EXP_STATSN;
else if (attr == &dev_attr_conn_persistent_address.attr)
param = ISCSI_PARAM_PERSISTENT_ADDRESS;
else if (attr == &dev_attr_conn_persistent_port.attr)
param = ISCSI_PARAM_PERSISTENT_PORT;
else if (attr == &dev_attr_conn_ping_tmo.attr)
param = ISCSI_PARAM_PING_TMO;
else if (attr == &dev_attr_conn_recv_tmo.attr)
param = ISCSI_PARAM_RECV_TMO;
else if (attr == &dev_attr_conn_local_port.attr)
param = ISCSI_PARAM_LOCAL_PORT;
else if (attr == &dev_attr_conn_statsn.attr)
param = ISCSI_PARAM_STATSN;
else if (attr == &dev_attr_conn_keepalive_tmo.attr)
param = ISCSI_PARAM_KEEPALIVE_TMO;
else if (attr == &dev_attr_conn_max_segment_size.attr)
param = ISCSI_PARAM_MAX_SEGMENT_SIZE;
else if (attr == &dev_attr_conn_tcp_timestamp_stat.attr)
param = ISCSI_PARAM_TCP_TIMESTAMP_STAT;
else if (attr == &dev_attr_conn_tcp_wsf_disable.attr)
param = ISCSI_PARAM_TCP_WSF_DISABLE;
else if (attr == &dev_attr_conn_tcp_nagle_disable.attr)
param = ISCSI_PARAM_TCP_NAGLE_DISABLE;
else if (attr == &dev_attr_conn_tcp_timer_scale.attr)
param = ISCSI_PARAM_TCP_TIMER_SCALE;
else if (attr == &dev_attr_conn_tcp_timestamp_enable.attr)
param = ISCSI_PARAM_TCP_TIMESTAMP_EN;
else if (attr == &dev_attr_conn_fragment_disable.attr)
param = ISCSI_PARAM_IP_FRAGMENT_DISABLE;
else if (attr == &dev_attr_conn_ipv4_tos.attr)
param = ISCSI_PARAM_IPV4_TOS;
else if (attr == &dev_attr_conn_ipv6_traffic_class.attr)
param = ISCSI_PARAM_IPV6_TC;
else if (attr == &dev_attr_conn_ipv6_flow_label.attr)
param = ISCSI_PARAM_IPV6_FLOW_LABEL;
else if (attr == &dev_attr_conn_is_fw_assigned_ipv6.attr)
param = ISCSI_PARAM_IS_FW_ASSIGNED_IPV6;
else if (attr == &dev_attr_conn_tcp_xmit_wsf.attr)
param = ISCSI_PARAM_TCP_XMIT_WSF;
else if (attr == &dev_attr_conn_tcp_recv_wsf.attr)
param = ISCSI_PARAM_TCP_RECV_WSF;
else if (attr == &dev_attr_conn_local_ipaddr.attr)
param = ISCSI_PARAM_LOCAL_IPADDR;
else {
WARN_ONCE(1, "Invalid conn attr");
return 0;
}
return t->attr_is_visible(ISCSI_PARAM, param);
}
static struct attribute_group iscsi_conn_group = {
.attrs = iscsi_conn_attrs,
.is_visible = iscsi_conn_attr_is_visible,
};
/*
* iSCSI session attrs
*/
#define iscsi_session_attr_show(param, perm) \
static ssize_t \
show_session_param_##param(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct iscsi_cls_session *session = \
iscsi_dev_to_session(dev->parent); \
struct iscsi_transport *t = session->transport; \
\
if (perm && !capable(CAP_SYS_ADMIN)) \
return -EACCES; \
return t->get_session_param(session, param, buf); \
}
#define iscsi_session_attr(field, param, perm) \
iscsi_session_attr_show(param, perm) \
static ISCSI_CLASS_ATTR(sess, field, S_IRUGO, show_session_param_##param, \
NULL);
iscsi_session_attr(targetname, ISCSI_PARAM_TARGET_NAME, 0);
iscsi_session_attr(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN, 0);
iscsi_session_attr(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T, 0);
iscsi_session_attr(immediate_data, ISCSI_PARAM_IMM_DATA_EN, 0);
iscsi_session_attr(first_burst_len, ISCSI_PARAM_FIRST_BURST, 0);
iscsi_session_attr(max_burst_len, ISCSI_PARAM_MAX_BURST, 0);
iscsi_session_attr(data_pdu_in_order, ISCSI_PARAM_PDU_INORDER_EN, 0);
iscsi_session_attr(data_seq_in_order, ISCSI_PARAM_DATASEQ_INORDER_EN, 0);
iscsi_session_attr(erl, ISCSI_PARAM_ERL, 0);
iscsi_session_attr(tpgt, ISCSI_PARAM_TPGT, 0);
iscsi_session_attr(username, ISCSI_PARAM_USERNAME, 1);
iscsi_session_attr(username_in, ISCSI_PARAM_USERNAME_IN, 1);
iscsi_session_attr(password, ISCSI_PARAM_PASSWORD, 1);
iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1);
iscsi_session_attr(chap_out_idx, ISCSI_PARAM_CHAP_OUT_IDX, 1);
iscsi_session_attr(chap_in_idx, ISCSI_PARAM_CHAP_IN_IDX, 1);
iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
iscsi_session_attr(tgt_reset_tmo, ISCSI_PARAM_TGT_RESET_TMO, 0);
iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0);
iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0);
iscsi_session_attr(targetalias, ISCSI_PARAM_TARGET_ALIAS, 0);
iscsi_session_attr(boot_root, ISCSI_PARAM_BOOT_ROOT, 0);
iscsi_session_attr(boot_nic, ISCSI_PARAM_BOOT_NIC, 0);
iscsi_session_attr(boot_target, ISCSI_PARAM_BOOT_TARGET, 0);
iscsi_session_attr(auto_snd_tgt_disable, ISCSI_PARAM_AUTO_SND_TGT_DISABLE, 0);
iscsi_session_attr(discovery_session, ISCSI_PARAM_DISCOVERY_SESS, 0);
iscsi_session_attr(portal_type, ISCSI_PARAM_PORTAL_TYPE, 0);
iscsi_session_attr(chap_auth, ISCSI_PARAM_CHAP_AUTH_EN, 0);
iscsi_session_attr(discovery_logout, ISCSI_PARAM_DISCOVERY_LOGOUT_EN, 0);
iscsi_session_attr(bidi_chap, ISCSI_PARAM_BIDI_CHAP_EN, 0);
iscsi_session_attr(discovery_auth_optional,
ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL, 0);
iscsi_session_attr(def_time2wait, ISCSI_PARAM_DEF_TIME2WAIT, 0);
iscsi_session_attr(def_time2retain, ISCSI_PARAM_DEF_TIME2RETAIN, 0);
iscsi_session_attr(isid, ISCSI_PARAM_ISID, 0);
iscsi_session_attr(tsid, ISCSI_PARAM_TSID, 0);
iscsi_session_attr(def_taskmgmt_tmo, ISCSI_PARAM_DEF_TASKMGMT_TMO, 0);
iscsi_session_attr(discovery_parent_idx, ISCSI_PARAM_DISCOVERY_PARENT_IDX, 0);
iscsi_session_attr(discovery_parent_type, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, 0);
static ssize_t
show_priv_session_state(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
return sprintf(buf, "%s\n", iscsi_session_state_name(session->state));
}
static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
NULL);
static ssize_t
show_priv_session_creator(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
return sprintf(buf, "%d\n", session->creator);
}
static ISCSI_CLASS_ATTR(priv_sess, creator, S_IRUGO, show_priv_session_creator,
NULL);
static ssize_t
show_priv_session_target_id(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
return sprintf(buf, "%d\n", session->target_id);
}
static ISCSI_CLASS_ATTR(priv_sess, target_id, S_IRUGO,
show_priv_session_target_id, NULL);
#define iscsi_priv_session_attr_show(field, format) \
static ssize_t \
show_priv_session_##field(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct iscsi_cls_session *session = \
iscsi_dev_to_session(dev->parent); \
if (session->field == -1) \
return sprintf(buf, "off\n"); \
return sprintf(buf, format"\n", session->field); \
}
#define iscsi_priv_session_attr_store(field) \
static ssize_t \
store_priv_session_##field(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
int val; \
char *cp; \
struct iscsi_cls_session *session = \
iscsi_dev_to_session(dev->parent); \
if ((session->state == ISCSI_SESSION_FREE) || \
(session->state == ISCSI_SESSION_FAILED)) \
return -EBUSY; \
if (strncmp(buf, "off", 3) == 0) { \
session->field = -1; \
session->field##_sysfs_override = true; \
} else { \
val = simple_strtoul(buf, &cp, 0); \
if (*cp != '\0' && *cp != '\n') \
return -EINVAL; \
session->field = val; \
session->field##_sysfs_override = true; \
} \
return count; \
}
#define iscsi_priv_session_rw_attr(field, format) \
iscsi_priv_session_attr_show(field, format) \
iscsi_priv_session_attr_store(field) \
static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUSR, \
show_priv_session_##field, \
store_priv_session_##field)
iscsi_priv_session_rw_attr(recovery_tmo, "%d");
static struct attribute *iscsi_session_attrs[] = {
&dev_attr_sess_initial_r2t.attr,
&dev_attr_sess_max_outstanding_r2t.attr,
&dev_attr_sess_immediate_data.attr,
&dev_attr_sess_first_burst_len.attr,
&dev_attr_sess_max_burst_len.attr,
&dev_attr_sess_data_pdu_in_order.attr,
&dev_attr_sess_data_seq_in_order.attr,
&dev_attr_sess_erl.attr,
&dev_attr_sess_targetname.attr,
&dev_attr_sess_tpgt.attr,
&dev_attr_sess_password.attr,
&dev_attr_sess_password_in.attr,
&dev_attr_sess_username.attr,
&dev_attr_sess_username_in.attr,
&dev_attr_sess_fast_abort.attr,
&dev_attr_sess_abort_tmo.attr,
&dev_attr_sess_lu_reset_tmo.attr,
&dev_attr_sess_tgt_reset_tmo.attr,
&dev_attr_sess_ifacename.attr,
&dev_attr_sess_initiatorname.attr,
&dev_attr_sess_targetalias.attr,
&dev_attr_sess_boot_root.attr,
&dev_attr_sess_boot_nic.attr,
&dev_attr_sess_boot_target.attr,
&dev_attr_priv_sess_recovery_tmo.attr,
&dev_attr_priv_sess_state.attr,
&dev_attr_priv_sess_creator.attr,
&dev_attr_sess_chap_out_idx.attr,
&dev_attr_sess_chap_in_idx.attr,
&dev_attr_priv_sess_target_id.attr,
&dev_attr_sess_auto_snd_tgt_disable.attr,
&dev_attr_sess_discovery_session.attr,
&dev_attr_sess_portal_type.attr,
&dev_attr_sess_chap_auth.attr,
&dev_attr_sess_discovery_logout.attr,
&dev_attr_sess_bidi_chap.attr,
&dev_attr_sess_discovery_auth_optional.attr,
&dev_attr_sess_def_time2wait.attr,
&dev_attr_sess_def_time2retain.attr,
&dev_attr_sess_isid.attr,
&dev_attr_sess_tsid.attr,
&dev_attr_sess_def_taskmgmt_tmo.attr,
&dev_attr_sess_discovery_parent_idx.attr,
&dev_attr_sess_discovery_parent_type.attr,
NULL,
};
static umode_t iscsi_session_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
struct device *cdev = container_of(kobj, struct device, kobj);
struct iscsi_cls_session *session = transport_class_to_session(cdev);
struct iscsi_transport *t = session->transport;
int param;
if (attr == &dev_attr_sess_initial_r2t.attr)
param = ISCSI_PARAM_INITIAL_R2T_EN;
else if (attr == &dev_attr_sess_max_outstanding_r2t.attr)
param = ISCSI_PARAM_MAX_R2T;
else if (attr == &dev_attr_sess_immediate_data.attr)
param = ISCSI_PARAM_IMM_DATA_EN;
else if (attr == &dev_attr_sess_first_burst_len.attr)
param = ISCSI_PARAM_FIRST_BURST;
else if (attr == &dev_attr_sess_max_burst_len.attr)
param = ISCSI_PARAM_MAX_BURST;
else if (attr == &dev_attr_sess_data_pdu_in_order.attr)
param = ISCSI_PARAM_PDU_INORDER_EN;
else if (attr == &dev_attr_sess_data_seq_in_order.attr)
param = ISCSI_PARAM_DATASEQ_INORDER_EN;
else if (attr == &dev_attr_sess_erl.attr)
param = ISCSI_PARAM_ERL;
else if (attr == &dev_attr_sess_targetname.attr)
param = ISCSI_PARAM_TARGET_NAME;
else if (attr == &dev_attr_sess_tpgt.attr)
param = ISCSI_PARAM_TPGT;
else if (attr == &dev_attr_sess_chap_in_idx.attr)
param = ISCSI_PARAM_CHAP_IN_IDX;
else if (attr == &dev_attr_sess_chap_out_idx.attr)
param = ISCSI_PARAM_CHAP_OUT_IDX;
else if (attr == &dev_attr_sess_password.attr)
param = ISCSI_PARAM_USERNAME;
else if (attr == &dev_attr_sess_password_in.attr)
param = ISCSI_PARAM_USERNAME_IN;
else if (attr == &dev_attr_sess_username.attr)
param = ISCSI_PARAM_PASSWORD;
else if (attr == &dev_attr_sess_username_in.attr)
param = ISCSI_PARAM_PASSWORD_IN;
else if (attr == &dev_attr_sess_fast_abort.attr)
param = ISCSI_PARAM_FAST_ABORT;
else if (attr == &dev_attr_sess_abort_tmo.attr)
param = ISCSI_PARAM_ABORT_TMO;
else if (attr == &dev_attr_sess_lu_reset_tmo.attr)
param = ISCSI_PARAM_LU_RESET_TMO;
else if (attr == &dev_attr_sess_tgt_reset_tmo.attr)
param = ISCSI_PARAM_TGT_RESET_TMO;
else if (attr == &dev_attr_sess_ifacename.attr)
param = ISCSI_PARAM_IFACE_NAME;
else if (attr == &dev_attr_sess_initiatorname.attr)
param = ISCSI_PARAM_INITIATOR_NAME;
else if (attr == &dev_attr_sess_targetalias.attr)
param = ISCSI_PARAM_TARGET_ALIAS;
else if (attr == &dev_attr_sess_boot_root.attr)
param = ISCSI_PARAM_BOOT_ROOT;
else if (attr == &dev_attr_sess_boot_nic.attr)
param = ISCSI_PARAM_BOOT_NIC;
else if (attr == &dev_attr_sess_boot_target.attr)
param = ISCSI_PARAM_BOOT_TARGET;
else if (attr == &dev_attr_sess_auto_snd_tgt_disable.attr)
param = ISCSI_PARAM_AUTO_SND_TGT_DISABLE;
else if (attr == &dev_attr_sess_discovery_session.attr)
param = ISCSI_PARAM_DISCOVERY_SESS;
else if (attr == &dev_attr_sess_portal_type.attr)
param = ISCSI_PARAM_PORTAL_TYPE;
else if (attr == &dev_attr_sess_chap_auth.attr)
param = ISCSI_PARAM_CHAP_AUTH_EN;
else if (attr == &dev_attr_sess_discovery_logout.attr)
param = ISCSI_PARAM_DISCOVERY_LOGOUT_EN;
else if (attr == &dev_attr_sess_bidi_chap.attr)
param = ISCSI_PARAM_BIDI_CHAP_EN;
else if (attr == &dev_attr_sess_discovery_auth_optional.attr)
param = ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL;
else if (attr == &dev_attr_sess_def_time2wait.attr)
param = ISCSI_PARAM_DEF_TIME2WAIT;
else if (attr == &dev_attr_sess_def_time2retain.attr)
param = ISCSI_PARAM_DEF_TIME2RETAIN;
else if (attr == &dev_attr_sess_isid.attr)
param = ISCSI_PARAM_ISID;
else if (attr == &dev_attr_sess_tsid.attr)
param = ISCSI_PARAM_TSID;
else if (attr == &dev_attr_sess_def_taskmgmt_tmo.attr)
param = ISCSI_PARAM_DEF_TASKMGMT_TMO;
else if (attr == &dev_attr_sess_discovery_parent_idx.attr)
param = ISCSI_PARAM_DISCOVERY_PARENT_IDX;
else if (attr == &dev_attr_sess_discovery_parent_type.attr)
param = ISCSI_PARAM_DISCOVERY_PARENT_TYPE;
else if (attr == &dev_attr_priv_sess_recovery_tmo.attr)
return S_IRUGO | S_IWUSR;
else if (attr == &dev_attr_priv_sess_state.attr)
return S_IRUGO;
else if (attr == &dev_attr_priv_sess_creator.attr)
return S_IRUGO;
else if (attr == &dev_attr_priv_sess_target_id.attr)
return S_IRUGO;
else {
WARN_ONCE(1, "Invalid session attr");
return 0;
}
return t->attr_is_visible(ISCSI_PARAM, param);
}
static struct attribute_group iscsi_session_group = {
.attrs = iscsi_session_attrs,
.is_visible = iscsi_session_attr_is_visible,
};
/*
* iSCSI host attrs
*/
#define iscsi_host_attr_show(param) \
static ssize_t \
show_host_param_##param(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct Scsi_Host *shost = transport_class_to_shost(dev); \
struct iscsi_internal *priv = to_iscsi_internal(shost->transportt); \
return priv->iscsi_transport->get_host_param(shost, param, buf); \
}
#define iscsi_host_attr(field, param) \
iscsi_host_attr_show(param) \
static ISCSI_CLASS_ATTR(host, field, S_IRUGO, show_host_param_##param, \
NULL);
iscsi_host_attr(netdev, ISCSI_HOST_PARAM_NETDEV_NAME);
iscsi_host_attr(hwaddress, ISCSI_HOST_PARAM_HWADDRESS);
iscsi_host_attr(ipaddress, ISCSI_HOST_PARAM_IPADDRESS);
iscsi_host_attr(initiatorname, ISCSI_HOST_PARAM_INITIATOR_NAME);
iscsi_host_attr(port_state, ISCSI_HOST_PARAM_PORT_STATE);
iscsi_host_attr(port_speed, ISCSI_HOST_PARAM_PORT_SPEED);
static struct attribute *iscsi_host_attrs[] = {
&dev_attr_host_netdev.attr,
&dev_attr_host_hwaddress.attr,
&dev_attr_host_ipaddress.attr,
&dev_attr_host_initiatorname.attr,
&dev_attr_host_port_state.attr,
&dev_attr_host_port_speed.attr,
NULL,
};
static umode_t iscsi_host_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
struct device *cdev = container_of(kobj, struct device, kobj);
struct Scsi_Host *shost = transport_class_to_shost(cdev);
struct iscsi_internal *priv = to_iscsi_internal(shost->transportt);
int param;
if (attr == &dev_attr_host_netdev.attr)
param = ISCSI_HOST_PARAM_NETDEV_NAME;
else if (attr == &dev_attr_host_hwaddress.attr)
param = ISCSI_HOST_PARAM_HWADDRESS;
else if (attr == &dev_attr_host_ipaddress.attr)
param = ISCSI_HOST_PARAM_IPADDRESS;
else if (attr == &dev_attr_host_initiatorname.attr)
param = ISCSI_HOST_PARAM_INITIATOR_NAME;
else if (attr == &dev_attr_host_port_state.attr)
param = ISCSI_HOST_PARAM_PORT_STATE;
else if (attr == &dev_attr_host_port_speed.attr)
param = ISCSI_HOST_PARAM_PORT_SPEED;
else {
WARN_ONCE(1, "Invalid host attr");
return 0;
}
return priv->iscsi_transport->attr_is_visible(ISCSI_HOST_PARAM, param);
}
static struct attribute_group iscsi_host_group = {
.attrs = iscsi_host_attrs,
.is_visible = iscsi_host_attr_is_visible,
};
/* convert iscsi_port_speed values to ascii string name */
static const struct {
enum iscsi_port_speed value;
char *name;
} iscsi_port_speed_names[] = {
{ISCSI_PORT_SPEED_UNKNOWN, "Unknown" },
{ISCSI_PORT_SPEED_10MBPS, "10 Mbps" },
{ISCSI_PORT_SPEED_100MBPS, "100 Mbps" },
{ISCSI_PORT_SPEED_1GBPS, "1 Gbps" },
{ISCSI_PORT_SPEED_10GBPS, "10 Gbps" },
{ISCSI_PORT_SPEED_25GBPS, "25 Gbps" },
{ISCSI_PORT_SPEED_40GBPS, "40 Gbps" },
};
char *iscsi_get_port_speed_name(struct Scsi_Host *shost)
{
int i;
char *speed = "Unknown!";
struct iscsi_cls_host *ihost = shost->shost_data;
uint32_t port_speed = ihost->port_speed;
for (i = 0; i < ARRAY_SIZE(iscsi_port_speed_names); i++) {
if (iscsi_port_speed_names[i].value & port_speed) {
speed = iscsi_port_speed_names[i].name;
break;
}
}
return speed;
}
EXPORT_SYMBOL_GPL(iscsi_get_port_speed_name);
/* convert iscsi_port_state values to ascii string name */
static const struct {
enum iscsi_port_state value;
char *name;
} iscsi_port_state_names[] = {
{ISCSI_PORT_STATE_DOWN, "LINK DOWN" },
{ISCSI_PORT_STATE_UP, "LINK UP" },
};
char *iscsi_get_port_state_name(struct Scsi_Host *shost)
{
int i;
char *state = "Unknown!";
struct iscsi_cls_host *ihost = shost->shost_data;
uint32_t port_state = ihost->port_state;
for (i = 0; i < ARRAY_SIZE(iscsi_port_state_names); i++) {
if (iscsi_port_state_names[i].value & port_state) {
state = iscsi_port_state_names[i].name;
break;
}
}
return state;
}
EXPORT_SYMBOL_GPL(iscsi_get_port_state_name);
static int iscsi_session_match(struct attribute_container *cont,
struct device *dev)
{
struct iscsi_cls_session *session;
struct Scsi_Host *shost;
struct iscsi_internal *priv;
if (!iscsi_is_session_dev(dev))
return 0;
session = iscsi_dev_to_session(dev);
shost = iscsi_session_to_shost(session);
if (!shost->transportt)
return 0;
priv = to_iscsi_internal(shost->transportt);
if (priv->session_cont.ac.class != &iscsi_session_class.class)
return 0;
return &priv->session_cont.ac == cont;
}
static int iscsi_conn_match(struct attribute_container *cont,
struct device *dev)
{
struct iscsi_cls_session *session;
struct iscsi_cls_conn *conn;
struct Scsi_Host *shost;
struct iscsi_internal *priv;
if (!iscsi_is_conn_dev(dev))
return 0;
conn = iscsi_dev_to_conn(dev);
session = iscsi_dev_to_session(conn->dev.parent);
shost = iscsi_session_to_shost(session);
if (!shost->transportt)
return 0;
priv = to_iscsi_internal(shost->transportt);
if (priv->conn_cont.ac.class != &iscsi_connection_class.class)
return 0;
return &priv->conn_cont.ac == cont;
}
static int iscsi_host_match(struct attribute_container *cont,
struct device *dev)
{
struct Scsi_Host *shost;
struct iscsi_internal *priv;
if (!scsi_is_host_device(dev))
return 0;
shost = dev_to_shost(dev);
if (!shost->transportt ||
shost->transportt->host_attrs.ac.class != &iscsi_host_class.class)
return 0;
priv = to_iscsi_internal(shost->transportt);
return &priv->t.host_attrs.ac == cont;
}
struct scsi_transport_template *
iscsi_register_transport(struct iscsi_transport *tt)
{
struct iscsi_internal *priv;
unsigned long flags;
int err;
BUG_ON(!tt);
priv = iscsi_if_transport_lookup(tt);
if (priv)
return NULL;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return NULL;
INIT_LIST_HEAD(&priv->list);
priv->iscsi_transport = tt;
priv->t.user_scan = iscsi_user_scan;
priv->t.create_work_queue = 1;
priv->dev.class = &iscsi_transport_class;
dev_set_name(&priv->dev, "%s", tt->name);
err = device_register(&priv->dev);
if (err)
goto free_priv;
err = sysfs_create_group(&priv->dev.kobj, &iscsi_transport_group);
if (err)
goto unregister_dev;
/* host parameters */
priv->t.host_attrs.ac.class = &iscsi_host_class.class;
priv->t.host_attrs.ac.match = iscsi_host_match;
priv->t.host_attrs.ac.grp = &iscsi_host_group;
priv->t.host_size = sizeof(struct iscsi_cls_host);
transport_container_register(&priv->t.host_attrs);
/* connection parameters */
priv->conn_cont.ac.class = &iscsi_connection_class.class;
priv->conn_cont.ac.match = iscsi_conn_match;
priv->conn_cont.ac.grp = &iscsi_conn_group;
transport_container_register(&priv->conn_cont);
/* session parameters */
priv->session_cont.ac.class = &iscsi_session_class.class;
priv->session_cont.ac.match = iscsi_session_match;
priv->session_cont.ac.grp = &iscsi_session_group;
transport_container_register(&priv->session_cont);
spin_lock_irqsave(&iscsi_transport_lock, flags);
list_add(&priv->list, &iscsi_transports);
spin_unlock_irqrestore(&iscsi_transport_lock, flags);
printk(KERN_NOTICE "iscsi: registered transport (%s)\n", tt->name);
return &priv->t;
unregister_dev:
device_unregister(&priv->dev);
return NULL;
free_priv:
kfree(priv);
return NULL;
}
EXPORT_SYMBOL_GPL(iscsi_register_transport);
int iscsi_unregister_transport(struct iscsi_transport *tt)
{
struct iscsi_internal *priv;
unsigned long flags;
BUG_ON(!tt);
mutex_lock(&rx_queue_mutex);
priv = iscsi_if_transport_lookup(tt);
BUG_ON (!priv);
spin_lock_irqsave(&iscsi_transport_lock, flags);
list_del(&priv->list);
spin_unlock_irqrestore(&iscsi_transport_lock, flags);
transport_container_unregister(&priv->conn_cont);
transport_container_unregister(&priv->session_cont);
transport_container_unregister(&priv->t.host_attrs);
sysfs_remove_group(&priv->dev.kobj, &iscsi_transport_group);
device_unregister(&priv->dev);
mutex_unlock(&rx_queue_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(iscsi_unregister_transport);
static __init int iscsi_transport_init(void)
{
int err;
struct netlink_kernel_cfg cfg = {
.groups = 1,
.input = iscsi_if_rx,
};
printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
ISCSI_TRANSPORT_VERSION);
atomic_set(&iscsi_session_nr, 0);
err = class_register(&iscsi_transport_class);
if (err)
return err;
err = class_register(&iscsi_endpoint_class);
if (err)
goto unregister_transport_class;
err = class_register(&iscsi_iface_class);
if (err)
goto unregister_endpoint_class;
err = transport_class_register(&iscsi_host_class);
if (err)
goto unregister_iface_class;
err = transport_class_register(&iscsi_connection_class);
if (err)
goto unregister_host_class;
err = transport_class_register(&iscsi_session_class);
if (err)
goto unregister_conn_class;
err = bus_register(&iscsi_flashnode_bus);
if (err)
goto unregister_session_class;
nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, &cfg);
if (!nls) {
err = -ENOBUFS;
goto unregister_flashnode_bus;
}
iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh");
if (!iscsi_eh_timer_workq) {
err = -ENOMEM;
goto release_nls;
}
return 0;
release_nls:
netlink_kernel_release(nls);
unregister_flashnode_bus:
bus_unregister(&iscsi_flashnode_bus);
unregister_session_class:
transport_class_unregister(&iscsi_session_class);
unregister_conn_class:
transport_class_unregister(&iscsi_connection_class);
unregister_host_class:
transport_class_unregister(&iscsi_host_class);
unregister_iface_class:
class_unregister(&iscsi_iface_class);
unregister_endpoint_class:
class_unregister(&iscsi_endpoint_class);
unregister_transport_class:
class_unregister(&iscsi_transport_class);
return err;
}
static void __exit iscsi_transport_exit(void)
{
destroy_workqueue(iscsi_eh_timer_workq);
netlink_kernel_release(nls);
bus_unregister(&iscsi_flashnode_bus);
transport_class_unregister(&iscsi_connection_class);
transport_class_unregister(&iscsi_session_class);
transport_class_unregister(&iscsi_host_class);
class_unregister(&iscsi_endpoint_class);
class_unregister(&iscsi_iface_class);
class_unregister(&iscsi_transport_class);
}
module_init(iscsi_transport_init);
module_exit(iscsi_transport_exit);
MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, "
"Dmitry Yusupov <dmitry_yus@yahoo.com>, "
"Alex Aizman <itn780@yahoo.com>");
MODULE_DESCRIPTION("iSCSI Transport Interface");
MODULE_LICENSE("GPL");
MODULE_VERSION(ISCSI_TRANSPORT_VERSION);
MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_ISCSI);
|
from __future__ import print_function
import numpy as np
import random
from tqdm import tqdm
import os
#import cPickle as cp
#import _pickle as cp # python3 compatability
import networkx as nx
import pdb
import argparse
cmd_opt = argparse.ArgumentParser(description='Argparser for graph_classification')
cmd_opt.add_argument('-mode', default='cpu', help='cpu/gpu')
cmd_opt.add_argument('-gm', default='DGCNN', help='gnn model to use')
cmd_opt.add_argument('-data', default=None, help='data folder name')
cmd_opt.add_argument('-batch_size', type=int, default=50, help='minibatch size')
cmd_opt.add_argument('-seed', type=int, default=1, help='seed')
cmd_opt.add_argument('-feat_dim', type=int, default=0, help='dimension of discrete node feature (maximum node tag)')
cmd_opt.add_argument('-edge_feat_dim', type=int, default=0, help='dimension of edge features')
cmd_opt.add_argument('-num_class', type=int, default=0, help='#classes')
cmd_opt.add_argument('-fold', type=int, default=1, help='fold (1..10)')
cmd_opt.add_argument('-test_number', type=int, default=0, help='if specified, will overwrite -fold and use the last -test_number graphs as testing data')
cmd_opt.add_argument('-num_epochs', type=int, default=1000, help='number of epochs')
cmd_opt.add_argument('-latent_dim', type=str, default='64', help='dimension(s) of latent layers')
cmd_opt.add_argument('-sortpooling_k', type=float, default=30, help='number of nodes kept after SortPooling')
cmd_opt.add_argument('-conv1d_activation', type=str, default='ReLU', help='which nn activation layer to use')
cmd_opt.add_argument('-out_dim', type=int, default=1024, help='graph embedding output size')
cmd_opt.add_argument('-hidden', type=int, default=100, help='dimension of mlp hidden layer')
cmd_opt.add_argument('-max_lv', type=int, default=4, help='max rounds of message passing')
cmd_opt.add_argument('-learning_rate', type=float, default=0.0001, help='init learning_rate')
cmd_opt.add_argument('-dropout', type=float, default=0, help='dropout percent for MLP')
cmd_opt.add_argument('-printAUC', type=bool, default=False, help='whether to print AUC (for binary classification only)')
cmd_opt.add_argument('-extract_features', type=bool, default=False, help='whether to extract final graph features')
cmd_args, _ = cmd_opt.parse_known_args()
cmd_args.latent_dim = [int(x) for x in cmd_args.latent_dim.split('-')]
if len(cmd_args.latent_dim) == 1:
cmd_args.latent_dim = cmd_args.latent_dim[0]
class GNNGraph(object):
def __init__(self, g, label, node_tags=None, node_features=None):
'''
g: a networkx graph
label: an integer graph label
node_tags: a list of integer node tags
node_features: a numpy array of continuous node features
'''
self.num_nodes = len(node_tags)
self.node_tags = node_tags
self.label = label
self.node_features = node_features # numpy array (node_num * feature_dim)
self.degs = list(dict(g.degree).values())
if len(g.edges()) != 0:
x, y = zip(*g.edges())
self.num_edges = len(x)
self.edge_pairs = np.ndarray(shape=(self.num_edges, 2), dtype=np.int32)
self.edge_pairs[:, 0] = x
self.edge_pairs[:, 1] = y
self.edge_pairs = self.edge_pairs.flatten()
else:
self.num_edges = 0
self.edge_pairs = np.array([])
# see if there are edge features
self.edge_features = None
if nx.get_edge_attributes(g, 'features'):
# make sure edges have an attribute 'features' (1 * feature_dim numpy array)
edge_features = nx.get_edge_attributes(g, 'features')
assert(type(edge_features.values()[0]) == np.ndarray)
# need to rearrange edge_features using the e2n edge order
edge_features = {(min(x, y), max(x, y)): z for (x, y), z in edge_features.items()}
keys = sorted(edge_features)
self.edge_features = []
for edge in keys:
self.edge_features.append(edge_features[edge])
self.edge_features.append(edge_features[edge]) # add reversed edges
self.edge_features = np.concatenate(self.edge_features, 0)
def load_data():
print('loading data')
g_list = []
label_dict = {}
feat_dict = {}
with open('data/%s/%s.txt' % (cmd_args.data, cmd_args.data), 'r') as f:
n_g = int(f.readline().strip())
for i in range(n_g):
row = f.readline().strip().split()
n, l = [int(w) for w in row]
if not l in label_dict:
mapped = len(label_dict)
label_dict[l] = mapped
g = nx.Graph()
node_tags = []
node_features = []
n_edges = 0
for j in range(n):
g.add_node(j)
row = f.readline().strip().split()
tmp = int(row[1]) + 2
if tmp == len(row):
# no node attributes
row = [int(w) for w in row]
attr = None
else:
row, attr = [int(w) for w in row[:tmp]], np.array([float(w) for w in row[tmp:]])
if not row[0] in feat_dict:
mapped = len(feat_dict)
feat_dict[row[0]] = mapped
node_tags.append(feat_dict[row[0]])
if attr is not None:
node_features.append(attr)
n_edges += row[1]
for k in range(2, len(row)):
g.add_edge(j, row[k])
if node_features != []:
node_features = np.stack(node_features)
node_feature_flag = True
else:
node_features = None
node_feature_flag = False
#assert len(g.edges()) * 2 == n_edges (some graphs in COLLAB have self-loops, ignored here)
assert len(g) == n
g_list.append(GNNGraph(g, l, node_tags, node_features))
for g in g_list:
g.label = label_dict[g.label]
cmd_args.num_class = len(label_dict)
cmd_args.feat_dim = len(feat_dict) # maximum node label (tag)
cmd_args.edge_feat_dim = 0
if node_feature_flag == True:
cmd_args.attr_dim = node_features.shape[1] # dim of node features (attributes)
else:
cmd_args.attr_dim = 0
print('# classes: %d' % cmd_args.num_class)
print('# maximum node tag: %d' % cmd_args.feat_dim)
if cmd_args.test_number == 0:
train_idxes = np.loadtxt('data/%s/10fold_idx/train_idx-%d.txt' % (cmd_args.data, cmd_args.fold), dtype=np.int32).tolist()
test_idxes = np.loadtxt('data/%s/10fold_idx/test_idx-%d.txt' % (cmd_args.data, cmd_args.fold), dtype=np.int32).tolist()
return [g_list[i] for i in train_idxes], [g_list[i] for i in test_idxes]
else:
return g_list[: n_g - cmd_args.test_number], g_list[n_g - cmd_args.test_number :]
|
# coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import *
def build_scenario(builder):
builder.config().game_duration = 400
builder.config().deterministic = False
builder.config().offsides = False
builder.config().end_episode_on_score = True
builder.config().end_episode_on_out_of_play = True
builder.config().end_episode_on_possession_change = False
builder.SetBallPosition(0.99, 0.41)
builder.SetTeam(Team.e_Left)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK)
builder.AddPlayer(1.0, 0.42, e_PlayerRole_LB)
builder.AddPlayer(0.7, 0.15, e_PlayerRole_CB)
builder.AddPlayer(0.7, 0.05, e_PlayerRole_CB)
builder.AddPlayer(0.7, -0.05, e_PlayerRole_RB)
builder.AddPlayer(0.0, 0.0, e_PlayerRole_CM)
builder.AddPlayer(0.6, 0.35, e_PlayerRole_CM)
builder.AddPlayer(0.8, 0.07, e_PlayerRole_CM)
builder.AddPlayer(0.8, -0.03, e_PlayerRole_LM)
builder.AddPlayer(0.8, -0.13, e_PlayerRole_RM)
builder.AddPlayer(0.7, -0.3, e_PlayerRole_CF)
builder.SetTeam(Team.e_Right)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK)
builder.AddPlayer(-0.75, -0.18, e_PlayerRole_LB)
builder.AddPlayer(-0.75, -0.08, e_PlayerRole_CB)
builder.AddPlayer(-0.75, 0.02, e_PlayerRole_CB)
builder.AddPlayer(-1.0, -0.1, e_PlayerRole_RB)
builder.AddPlayer(-0.8, -0.25, e_PlayerRole_CM)
builder.AddPlayer(-0.88, -0.07, e_PlayerRole_CM)
builder.AddPlayer(-0.88, 0.03, e_PlayerRole_CM)
builder.AddPlayer(-0.88, 0.13, e_PlayerRole_LM)
builder.AddPlayer(-0.75, 0.25, e_PlayerRole_RM)
builder.AddPlayer(-0.2, 0.0, e_PlayerRole_CF)
|
/*
* BMKLocationViewDisplayParam.h
* BMapKit
*
* Copyright 2013 Baidu Inc. All rights reserved.
*
*/
#import <UIKit/UIKit.h>
#import <Foundation/Foundation.h>
///此类表示定位图层自定义样式参数
@interface BMKLocationViewDisplayParam : NSObject
{
float _locationViewOffsetX; // 定位图标偏移量(经度)
float _locationViewOffsetY; // 定位图标偏移量(纬度)
bool _isAccuracyCircleShow;// 精度圈是否显示
bool _isRotateAngleValid; // 跟随态旋转角度是否生效
NSString* _locationViewImgName;// 定位图标名称
}
///定位图标偏移量X
@property (nonatomic, assign) float locationViewOffsetX;
///定位图标偏移量Y
@property (nonatomic, assign) float locationViewOffsetY;
///精度圈是否显示
@property (nonatomic, assign) bool isAccuracyCircleShow;
///跟随态旋转角度是否生效
@property (nonatomic, assign) bool isRotateAngleValid;
///定位图标
@property (nonatomic, strong) NSString* locationViewImgName;
@end
|
var Helix_Shopify_Product_Image =
[
[ "DIR", "Helix_Shopify_Product_Image.html#c482d92", null ],
[ "TYPE", "Helix_Shopify_Product_Image.html#0cb7892", null ],
[ "_container", "Helix_Shopify_Product_Image.html#9f43825", null ],
[ "_metafieldType", "Helix_Shopify_Product_Image.html#6c90164", null ],
[ "getCreatedAt", "Helix_Shopify_Product_Image.html#1adffdc", null ],
[ "getFilename", "Helix_Shopify_Product_Image.html#3a0245c", null ],
[ "getHeight", "Helix_Shopify_Product_Image.html#af1ae3d", null ],
[ "getPosition", "Helix_Shopify_Product_Image.html#6b1712f", null ],
[ "getProduct", "Helix_Shopify_Product_Image.html#17015cd", null ],
[ "getProductId", "Helix_Shopify_Product_Image.html#34302c3", null ],
[ "getUpdatedAt", "Helix_Shopify_Product_Image.html#975c7c7", null ],
[ "getVariantIds", "Helix_Shopify_Product_Image.html#198c674", null ],
[ "getWidth", "Helix_Shopify_Product_Image.html#bfc2ea8", null ],
[ "hasVariantIds", "Helix_Shopify_Product_Image.html#724905b", null ],
[ "setAttachment", "Helix_Shopify_Product_Image.html#1c86545", null ],
[ "setFilename", "Helix_Shopify_Product_Image.html#8902283", null ],
[ "setHeight", "Helix_Shopify_Product_Image.html#fa6bae7", null ],
[ "setPosition", "Helix_Shopify_Product_Image.html#48b9573", null ],
[ "setSrc", "Helix_Shopify_Product_Image.html#a2ee72b", null ],
[ "setVariantIds", "Helix_Shopify_Product_Image.html#ba60014", null ],
[ "setWidth", "Helix_Shopify_Product_Image.html#6f7b935", null ]
];
|
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
/**
* @fileoverview
* 'cr-camera' is a Polymer element used to take a picture from the
* user webcam to use as a Chrome OS profile picture.
*/
(function() {
/**
* Dimensions for camera capture.
* @const
*/
const CAPTURE_SIZE = {
width: 576,
height: 576
};
/**
* Interval between frames for camera capture (milliseconds).
* @const
*/
const CAPTURE_INTERVAL_MS = 1000 / 10;
/**
* Duration of camera capture (milliseconds).
* @const
*/
const CAPTURE_DURATION_MS = 1000;
Polymer({
is: 'cr-camera',
properties: {
/** Strings provided by host */
takePhotoLabel: String,
captureVideoLabel: String,
switchModeToCameraLabel: String,
switchModeToVideoLabel: String,
/** True if video mode is enabled. */
videoModeEnabled: {
type: Boolean,
value: false,
},
/**
* True if currently in video mode.
* @private {boolean}
*/
videomode: {
type: Boolean,
value: false,
reflectToAttribute: true,
},
/**
* True when the camera is actually streaming video. May be false even when
* the camera is present and shown, but still initializing.
* @private {boolean}
*/
cameraOnline_: {
type: Boolean,
value: false,
},
},
/** @private {boolean} */
cameraStartInProgress_: false,
/** @private {boolean} */
cameraCaptureInProgress_: false,
/** @override */
attached() {
this.$.cameraVideo.addEventListener('canplay', function() {
this.$.userImageStreamCrop.classList.add('preview');
this.cameraOnline_ = true;
this.focusTakePhotoButton();
}.bind(this));
this.startCamera();
},
/** @override */
detached() {
this.stopCamera();
},
/** Only focuses the button if it's not disabled. */
focusTakePhotoButton() {
if (this.cameraOnline_) {
this.$.takePhoto.focus();
}
},
/**
* Performs photo capture from the live camera stream. A 'photo-taken' event
* will be fired as soon as captured photo is available, with the
* 'photoDataURL' property containing the photo encoded as a data URL.
*/
takePhoto() {
if (!this.cameraOnline_ || this.cameraCaptureInProgress_) {
return;
}
this.cameraCaptureInProgress_ = true;
/** Pre-allocate all frames needed for capture. */
const frames = [];
if (this.videomode) {
/** Reduce capture size when in video mode. */
const captureSize = {
width: CAPTURE_SIZE.width / 2,
height: CAPTURE_SIZE.height / 2
};
const captureFrameCount = CAPTURE_DURATION_MS / CAPTURE_INTERVAL_MS;
while (frames.length < captureFrameCount) {
frames.push(this.allocateFrame_(captureSize));
}
} else {
frames.push(this.allocateFrame_(CAPTURE_SIZE));
}
/** Start capturing frames at an interval. */
const capturedFrames = [];
this.$.userImageStreamCrop.classList.remove('preview');
this.$.userImageStreamCrop.classList.add('capture');
const interval = setInterval(() => {
/** Stop capturing frames when all allocated frames have been consumed. */
if (frames.length) {
capturedFrames.push(
this.captureFrame_(this.$.cameraVideo, frames.pop()));
} else {
clearInterval(interval);
this.fire(
'photo-taken',
{photoDataUrl: this.convertFramesToPng_(capturedFrames)});
this.$.userImageStreamCrop.classList.remove('capture');
this.cameraCaptureInProgress_ = false;
}
}, CAPTURE_INTERVAL_MS);
},
/** Tries to start the camera stream capture. */
startCamera() {
this.stopCamera();
this.cameraStartInProgress_ = true;
const successCallback = function(stream) {
if (this.cameraStartInProgress_) {
this.$.cameraVideo.srcObject = stream;
this.cameraStream_ = stream;
} else {
this.stopVideoTracks_(stream);
}
this.cameraStartInProgress_ = false;
}.bind(this);
const errorCallback = function() {
this.cameraOnline_ = false;
this.cameraStartInProgress_ = false;
}.bind(this);
const videoConstraints = {
facingMode: 'user',
width: {ideal: CAPTURE_SIZE.width},
height: {ideal: CAPTURE_SIZE.height},
resizeMode: 'none',
};
navigator.webkitGetUserMedia(
{video: videoConstraints}, successCallback, errorCallback);
},
/** Stops the camera stream capture if it's currently active. */
stopCamera() {
this.$.userImageStreamCrop.classList.remove('preview');
this.cameraOnline_ = false;
this.$.cameraVideo.srcObject = null;
if (this.cameraStream_) {
this.stopVideoTracks_(this.cameraStream_);
this.cameraStream_ = null;
}
// Cancel any pending getUserMedia() checks.
this.cameraStartInProgress_ = false;
},
/**
* Stops all video tracks associated with a MediaStream object.
* @param {!MediaStream} stream
* @private
*/
stopVideoTracks_(stream) {
const tracks = stream.getVideoTracks();
for (let i = 0; i < tracks.length; i++) {
tracks[i].stop();
}
},
/**
* Switch between photo and video mode.
* @private
*/
onTapSwitchMode_() {
this.videomode = !this.videomode;
this.fire('switch-mode', this.videomode);
},
/**
* Allocates a canvas for capturing a single still frame at a specific size.
* @param {{width: number, height: number}} size Frame size.
* @return {!HTMLCanvasElement} The allocated canvas.
* @private
*/
allocateFrame_(size) {
const canvas =
/** @type {!HTMLCanvasElement} */ (document.createElement('canvas'));
canvas.width = size.width;
canvas.height = size.height;
const ctx = /** @type {!CanvasRenderingContext2D} */ (
canvas.getContext('2d', {alpha: false}));
// Flip frame horizontally.
ctx.translate(size.width, 0);
ctx.scale(-1.0, 1.0);
return canvas;
},
/**
* Captures a single still frame from a <video> element, placing it at the
* current drawing origin of a canvas context.
* @param {!HTMLVideoElement} video Video element to capture from.
* @param {!HTMLCanvasElement} canvas Canvas to save frame in.
* @return {!HTMLCanvasElement} The canvas frame was saved in.
* @private
*/
captureFrame_(video, canvas) {
const ctx =
/** @type {!CanvasRenderingContext2D} */ (
canvas.getContext('2d', {alpha: false}));
const width = video.videoWidth;
const height = video.videoHeight;
if (width < canvas.width || height < canvas.height) {
console.error(
'Video capture size too small: ' + width + 'x' + height + '!');
}
const src = {};
if (width / canvas.width > height / canvas.height) {
// Full height, crop left/right.
src.height = height;
src.width = height * canvas.width / canvas.height;
} else {
// Full width, crop top/bottom.
src.width = width;
src.height = width * canvas.height / canvas.width;
}
src.x = (width - src.width) / 2;
src.y = (height - src.height) / 2;
ctx.drawImage(
video, src.x, src.y, src.width, src.height, 0, 0, canvas.width,
canvas.height);
return canvas;
},
/**
* Encode frames and convert to animated PNG image.
* @param {!Array<!HTMLCanvasElement>} frames The frames to convert to image.
* @return {!string} The data URL for image.
* @private
*/
convertFramesToPng_(frames) {
/** Encode captured frames. */
const encodedImages = frames.map(function(frame) {
return frame.toDataURL('image/png');
});
/** No need for further processing if single frame. */
if (encodedImages.length === 1) {
return encodedImages[0];
}
/** Create forward/backward image sequence. */
const forwardBackwardImageSequence =
encodedImages.concat(encodedImages.slice(1, -1).reverse());
/** Convert image sequence to animated PNG. */
return cr.png.convertImageSequenceToPng(forwardBackwardImageSequence);
},
/**
* @return {string}
* @private
*/
getTakePhotoIcon_() {
return this.videomode ? 'cr-picture:videocam-shutter-icon' :
'cr-picture:camera-shutter-icon';
},
/**
* Returns the label to use for take photo button.
* @return {string}
* @private
*/
getTakePhotoLabel_(videomode, photoLabel, videoLabel) {
return videomode ? videoLabel : photoLabel;
},
/**
* @return {string}
* @private
*/
getSwitchModeIcon_() {
return this.videomode ? 'cr-picture:camera-alt-icon' :
'cr-picture:videocam-icon';
},
/**
* Returns the label to use for switch mode button.
* @return {string}
* @private
*/
getSwitchModeLabel_(videomode, cameraLabel, videoLabel) {
return videomode ? cameraLabel : videoLabel;
},
});
})();
|
const path = require('path');
const webpack = require('webpack');
const merge = require('webpack-merge');
const ExtractTextPlugin = require('extract-text-webpack-plugin');
const productionEnvPlugin = new webpack.DefinePlugin({
'process.env': {
NODE_ENV: JSON.stringify('production'),
},
});
const PATHS = {
build: path.resolve(__dirname, 'public/bundles'),
src: path.resolve(__dirname, 'src'),
};
const entry = ['babel-polyfill', './src/client/index.js'];
const extractTextPluginConfig = new ExtractTextPlugin('bundle.css');
const baseConfig = {
devtool: 'source-map',
context: path.join(process.cwd()),
entry,
output: {
path: PATHS.build,
filename: 'bundle.js',
},
module: {
rules: [
{
test: /\.jsx?$/,
include: [PATHS.src],
loader: 'babel-loader',
},
{
test: /\.scss$/,
use: extractTextPluginConfig.extract({
fallback: 'style-loader',
use: ['css-loader', 'postcss-loader', 'sass-loader'],
}),
},
],
},
plugins: [extractTextPluginConfig],
};
const envConfig = new Proxy(
{
dev: {},
dist: {
devtool: false,
plugins: [productionEnvPlugin],
},
},
{
// Proxy will force dev configuration to be returned
// if no matching environment found
get(target, name) {
if (target[name]) {
return target[name];
}
return target.dev;
},
},
);
module.exports = env => merge(baseConfig, envConfig[env]);
|
"""
A two-dimensional vector class
>>> v1 = Vector2d(3, 4)
>>> print(v1.x, v1.y)
3.0 4.0
>>> x, y = v1
>>> x, y
(3.0, 4.0)
>>> v1
Vector2d(3.0, 4.0)
>>> v1_clone = eval(repr(v1))
>>> v1 == v1_clone
True
>>> print(v1)
(3.0, 4.0)
>>> octets = bytes(v1)
>>> octets
b'd\\x00\\x00\\x00\\x00\\x00\\x00\\x08@\\x00\\x00\\x00\\x00\\x00\\x00\\x10@'
>>> abs(v1)
5.0
>>> bool(v1), bool(Vector2d(0, 0))
(True, False)
Test of ``.frombytes()`` class method:
>>> v1_clone = Vector2d.frombytes(bytes(v1))
>>> v1_clone
Vector2d(3.0, 4.0)
>>> v1 == v1_clone
True
Tests of ``format()`` with Cartesian coordinates:
>>> format(v1)
'(3.0, 4.0)'
>>> format(v1, '.2f')
'(3.00, 4.00)'
>>> format(v1, '.3e')
'(3.000e+00, 4.000e+00)'
Tests of the ``angle`` method::
>>> Vector2d(0, 0).angle()
0.0
>>> Vector2d(1, 0).angle()
0.0
>>> epsilon = 10**-8
>>> abs(Vector2d(0, 1).angle() - math.pi/2) < epsilon
True
>>> abs(Vector2d(1, 1).angle() - math.pi/4) < epsilon
True
Tests of ``format()`` with polar coordinates:
>>> format(Vector2d(1, 1), 'p') # doctest:+ELLIPSIS
'<1.414213..., 0.785398...>'
>>> format(Vector2d(1, 1), '.3ep')
'<1.414e+00, 7.854e-01>'
>>> format(Vector2d(1, 1), '0.5fp')
'<1.41421, 0.78540>'
Tests of ``x`` and ``y`` read-only properties:
>>> v1.x, v1.y
(3.0, 4.0)
>>> v1.x = 123
Traceback (most recent call last):
...
AttributeError: can't set attribute
Tests of hashing:
>>> v1 = Vector2d(3, 4)
>>> v2 = Vector2d(3.1, 4.2)
>>> hash(v1), hash(v2)
(7, 384307168202284039)
>>> len(set([v1, v2]))
2
Converting to/from a ``complex``:
# tag::VECTOR2D_V4_DEMO[]
>>> from typing import SupportsComplex
>>> v3 = Vector2d(1.5, 2.5)
>>> isinstance(v3, SupportsComplex) # <1>
True
>>> complex(v3) # <2>
(1.5+2.5j)
>>> Vector2d.fromcomplex(4+5j) # <3>
Vector2d(4.0, 5.0)
# end::VECTOR2D_V4_DEMO[]
"""
from array import array
import math
class Vector2d:
typecode = 'd'
def __init__(self, x, y):
self.__x = float(x)
self.__y = float(y)
@property
def x(self):
return self.__x
@property
def y(self):
return self.__y
def __iter__(self):
return (i for i in (self.x, self.y))
def __repr__(self):
class_name = type(self).__name__
return '{}({!r}, {!r})'.format(class_name, *self)
def __str__(self):
return str(tuple(self))
def __bytes__(self):
return (bytes([ord(self.typecode)]) +
bytes(array(self.typecode, self)))
def __eq__(self, other):
return tuple(self) == tuple(other)
def __hash__(self):
return hash(self.x) ^ hash(self.y)
def __abs__(self):
return math.hypot(self.x, self.y)
def __bool__(self):
return bool(abs(self))
def angle(self):
return math.atan2(self.y, self.x)
def __format__(self, fmt_spec=''):
if fmt_spec.endswith('p'):
fmt_spec = fmt_spec[:-1]
coords = (abs(self), self.angle())
outer_fmt = '<{}, {}>'
else:
coords = self
outer_fmt = '({}, {})'
components = (format(c, fmt_spec) for c in coords)
return outer_fmt.format(*components)
@classmethod
def frombytes(cls, octets):
typecode = chr(octets[0])
memv = memoryview(octets[1:]).cast(typecode)
return cls(*memv)
# tag::VECTOR2D_V4_COMPLEX[]
def __complex__(self):
return complex(self.x, self.y)
@classmethod
def fromcomplex(cls, datum):
return cls(datum.real, datum.imag) # <1>
# end::VECTOR2D_V4_COMPLEX[]
|
/*
*
* MIT License
*
* Copyright (c) 2020 Mirgor
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/**
* @defgroup motor_manager Motor manager
* @brief Motor manager module documentation.
*
* The motor manager is a high level Finite State Machine (FSM)
* of the motor behavior.
* It will receive the commands to go to one direction, the other or
* to stop and it will translate them to the lower levels whom in turn
* will control the hardware.
*
* @startuml
*
* @enduml
*
* @{
*
* @defgroup motor_manager_conf Module Configuration
* @brief motor_manager module configuration parameters
*
* @defgroup motor_manager_api Module API Interface
* @brief motor_manager module API functions
*
* @defgroup motor_manager_callouts Module Callouts
* @brief motor_manager callout functions
*
* @defgroup motor_manager_imp Module Implementation
* @brief motor_manager implementation
* @}
*/
|
$(document).ready(function() {
//Setting up the Wysibb Editor.
$('#editor').wysibb({
buttons : 'bold,italic,underline,strike,|,fontcolor,fontsize,fontfamily,|,justifyleft,justifycenter,justifyright,|,bullist,|,img,link,|,code,quote',
tabInsert: false
});
//The syntex highlighting in <pre> tags.
$('pre').each(function(i, e) {hljs.highlightBlock(e)});
//The tags used in conversations.
$('#receiver').tagsInput({
defaultText: 'add user',
'width':'100%',
'height':'auto'
});
});
$(function() {
if( $('input[type="checkbox"]#gravatar').is(':checked') ) {
$('.iko.avatar_uploader').css("display", 'none');
}
$('input[type="checkbox"]#gravatar').click(function() {
$('.iko.avatar_uploader').slideToggle();
});
});
var formsave1=new autosaveform({
formid: 'tango_form',
pause: 1000 //<--no comma following last option!
});
/*
* Forum functions.
*/
//Quote Post.
function quote(id) {
$('#editor').execCommand('quote',{author: '',seltext:'Post ID: ' + id + ''});
}
//Inserting Smilies.
function add_emoji(text) {
console.log(text);
var ori_val = $('#editor').val();
$('#editor').val(ori_val + text);
}
|
import socket
import json
import threading
from gui import *
gui = GUI()
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def main():
client.connect(('localhost', 8888))
readIncomingThread = threading.Thread(target=read_incoming)
readIncomingThread.start()
gui.pack()
gui.inputField.bind('<Return>', send_message)
gui.root.mainloop()
def send_message(e):
try:
client.send(json.dumps({"route": "test", "body": {"text": gui.inputField.get()}}).encode())
gui.inputField.delete(0, END)
except:
print("Failed to send...")
def read_incoming():
while True:
data, addr = client.recvfrom(1024)
print(data.decode())
j = json.loads(data.decode())
gui.chatWindow.insert(END, j["sender"]["name"] + "> " + j["content"] + "\n")
if __name__ == "__main__":
main()
|
graph = {
'f': ['g', 'i'],
'g': ['h'],
'h': [],
'i': ['g', 'k'],
'j': ['i'],
'k': []
}
def has_path(graph, start, end):
queue = [start]
while len(queue) > 0:
current = queue[0]
queue.pop(0)
if current == end:
return True
for adj in graph[current]:
queue.append(adj)
return False
def has_path_rdfs(graph, start, end):
if start == end:
return True
for adj in graph[start]:
if has_path_rdfs(graph, adj, end):
return True
return False
print(has_path(graph, 'f', 'k'))
print(has_path_rdfs(graph, 'f', 'k'))
print(has_path(graph, 'f', 'j'))
print(has_path_rdfs(graph, 'f', 'j'))
|
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='stix-pattern-translator',
version='0.1.0',
description='A translator to convert STIX2 patterns into other search platforms (e.g., ElasticSearch) and data models (e.g., CIM)',
long_description=long_description,
url='https://github.com/mitre/stix2patterns_translator',
author='The MITRE Corporation',
author_email = 'hfoster@mitre.org',
license='Apache License 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Security',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='stix cybersecurity analytics',
packages=find_packages(exclude=['tests']),
install_requires=['antlr4-python3-runtime', 'python-dateutil'],
extras_require={
'dev': ['pytest', 'tox', 'flask', 'requests'],
'web': ['flask'],
},
package_data={},
data_files=[],
entry_points={
'console_scripts': [
'translate-stix-pattern=stix2patterns_translator.translator:main',
'pattern-translator-server=stix2patterns_translator.web_api:main',
],
},
)
|
var structovs__cmdl__context =
[
[ "argc", "structovs__cmdl__context.html#aaa77261fe624d9665f93e259c7f4cf25", null ],
[ "argv", "structovs__cmdl__context.html#aadc790bc240e17aff1c90ab9a5a113bc", null ],
[ "pvt", "structovs__cmdl__context.html#a2a8b690780113b476ff67ffd7b38ba35", null ]
];
|
const { resolve } = require('path');
const test = require('tape');
const { unlink, write } = require('../../');
const dir = resolve(process.cwd(), './test/data/');
const filepath1 = resolve(dir, './unlink.txt');
test('setup unlink', (t) => {
write(filepath1, 'file content\n')
.then(() => t.end())
.catch(t.end);
});
test('unlink', (t) => {
unlink(filepath1)
.then(() => {
t.pass('file deleted');
t.end();
})
.catch(t.end);
});
test('unlink non-existing file', (t) => {
const path = resolve(dir, './nothing-here.txt');
unlink(path)
.then(() => {
t.fail('should not unlink non-existing file');
t.end();
})
.catch((error) => {
t.true(error instanceof Error, 'instance of Error');
t.equal(error.code, 'ENOENT', 'error.code is ENOENT');
t.equal(error.syscall, 'unlink', 'error.syscall is unlink');
t.equal(error.path, path, 'correct error.path');
t.true(
error.message.includes('no such file or directory'),
'includes no such file or directory message'
);
t.end();
});
});
|
'use strict';
// ---------------------------------------------------------------------------
const Exchange = require ('./base/Exchange');
const { ExchangeError, AuthenticationError, InsufficientFunds, ExchangeNotAvailable, InvalidOrder, BadRequest, OrderNotFound, NotSupported } = require ('./base/errors');
// ---------------------------------------------------------------------------
module.exports = class anxpro extends Exchange {
describe () {
return this.deepExtend (super.describe (), {
'id': 'anxpro',
'name': 'ANXPro',
'countries': [ 'JP', 'SG', 'HK', 'NZ' ],
'rateLimit': 1500,
'has': {
'CORS': false,
'fetchCurrencies': true,
'fetchOHLCV': false,
'fetchTrades': false,
'fetchOpenOrders': true,
'fetchDepositAddress': true,
'fetchTransactions': true,
'fetchMyTrades': true,
'createDepositAddress': false,
'withdraw': true,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27765983-fd8595da-5ec9-11e7-82e3-adb3ab8c2612.jpg',
'api': {
'public': 'https://anxpro.com/api/2',
'private': 'https://anxpro.com/api/2',
'v3public': 'https://anxpro.com/api/3',
'v3private': 'https://anxpro.com/api/3',
},
'www': 'https://anxpro.com',
'doc': [
'https://anxv2.docs.apiary.io',
'https://anxv3.docs.apiary.io',
'https://anxpro.com/pages/api',
],
},
'api': {
'v3public': {
'get': [
'currencyStatic',
],
},
'v3private': {
'post': [
'register/register',
'register/verifyRegistration',
'register/resendVerification',
'register/autoRegister',
'account',
'subaccount/new',
'transaction/list',
'order/list',
'trade/list',
'send',
'receive',
'receive/create',
'batch/new',
'batch/add',
'batch/list',
'batch/info',
'batch/closeForSend',
'order/new',
'order/info',
'order/cancel',
'retail/quote',
'retail/trade',
'validateAddress',
'address/check',
'alert/create',
'alert/delete',
'alert/list',
'kyc/personal',
'kyc/document',
'kyc/status',
'kyc/verifyCode',
'news/list',
'press/list',
'announcements/list',
'apiDoc/list',
],
},
'public': {
'get': [
'{currency_pair}/money/ticker',
'{currency_pair}/money/depth/full',
'{currency_pair}/money/trade/fetch', // disabled by ANXPro
],
},
'private': {
'post': [
'{currency_pair}/money/order/add',
'{currency_pair}/money/order/cancel',
'{currency_pair}/money/order/quote',
'{currency_pair}/money/order/result',
'{currency_pair}/money/orders',
'money/{currency}/address',
'money/{currency}/send_simple',
'money/info',
'money/trade/list',
'money/wallet/history',
],
},
},
'httpExceptions': {
'403': AuthenticationError,
},
'exceptions': {
'exact': {
// v2
'Insufficient Funds': InsufficientFunds,
'Trade value too small': InvalidOrder,
'The currency pair is not supported': BadRequest,
'Order amount is too low': InvalidOrder,
'Order amount is too high': InvalidOrder,
'order rate is too low': InvalidOrder,
'order rate is too high': InvalidOrder,
'Too many open orders': InvalidOrder,
'Unexpected error': ExchangeError,
'Order Engine is offline': ExchangeNotAvailable,
'No executed order with that identifer found': OrderNotFound,
'Unknown server error, please contact support.': ExchangeError,
},
},
'fees': {
'trading': {
'tierBased': false,
'percentage': true,
'maker': 0.1 / 100,
'taker': 0.2 / 100,
},
},
'options': {
'fetchMyTradesMethod': 'private_post_money_trade_list', // or 'v3private_post_trade_list'
},
});
}
async fetchTransactions (code = undefined, since = undefined, limit = undefined, params = {}) {
await this.loadMarkets ();
const request = {};
if (since !== undefined) {
request['from'] = since;
}
if (limit !== undefined) {
request['max'] = limit;
}
const currency = (code === undefined) ? undefined : this.currency (code);
if (currency !== undefined) {
request['ccy'] = currency['id'];
}
const response = await this.v3privatePostTransactionList (this.extend (request, params));
//
// {
// transactions: [
// {
// transactionClass: 'COIN',
// uuid: '7896857b-2ed6-4c62-ba4c-619837438d9c',
// userUuid: '82027ee9-cb59-4f29-80d6-f7e793f39ad4',
// amount: -17865.72689976,
// fee: 1,
// balanceBefore: 17865.72689976,
// balanceAfter: 17865.72689976,
// ccy: 'XRP',
// transactionState: 'PROCESSED',
// transactionType: 'WITHDRAWAL',
// received: '1551357946000',
// processed: '1551357946000',
// timestampMillis: '1557441435932',
// displayTitle: 'Coin Withdrawal',
// displayDescription: 'Withdraw to: rw2ciyaNshpHe7bCHo4bRWq6pqqynnWKQg?dt=3750180345',
// coinAddress: 'rw2ciyaNshpHe7bCHo4bRWq6pqqynnWKQg?dt=3750180345',
// coinTransactionId: '68444611753E9D8F5C33DCBBF43F01391070F79CAFCF7625397D1CEFA519064A',
// subAccount: [
// Object
// ]
// },
// {
// transactionClass: 'FILL',
// uuid: 'a5ae54de-c14a-4ef8-842d-56000c9dc7ab',
// userUuid: '82027ee9-cb59-4f29-80d6-f7e793f39ad4',
// amount: 0.09006364,
// fee: 0.00018013,
// balanceBefore: 0.3190001,
// balanceAfter: 0.40888361,
// ccy: 'BTC',
// transactionState: 'PROCESSED',
// transactionType: 'FILL_CREDIT',
// received: '1551357057000',
// processed: '1551357057000',
// timestampMillis: '1557441435956',
// displayTitle: 'Order Fill',
// displayDescription: 'Buy BTC @ 3008.53930 EUR/BTC'
// }
// ],
// count: ...,
// timestamp: '1557441435971',
// resultCode: 'OK'
// }
//
const transactions = this.safeValue (response, 'transactions', []);
const grouped = this.groupBy (transactions, 'transactionType');
const depositsAndWithdrawals = this.arrayConcat (grouped['DEPOSIT'], grouped['WITHDRAWAL']);
return this.parseTransactions (depositsAndWithdrawals, currency, since, limit);
}
parseTransaction (transaction, currency = undefined) {
//
// withdrawal
//
// {
// transactionClass: 'COIN',
// uuid: 'bff91938-4dad-4c48-9db6-468324ce96c1',
// userUuid: '82027ee9-cb59-4f29-80d6-f7e793f39ad4',
// amount: -0.40888361,
// fee: 0.002,
// balanceBefore: 0.40888361,
// balanceAfter: 0.40888361,
// ccy: 'BTC',
// transactionState: 'PROCESSED',
// transactionType: 'WITHDRAWAL',
// received: '1551357156000',
// processed: '1551357156000',
// timestampMillis: '1557441846213',
// displayTitle: 'Coin Withdrawal',
// displayDescription: 'Withdraw to: 1AHnhqbvbYx3rnZx8uC7NbFZaTe4tafFHX',
// coinAddress: '1AHnhqbvbYx3rnZx8uC7NbFZaTe4tafFHX',
// coinTransactionId:
// 'ab80abcb62bf6261ebc827c73dd59a4ce15d740b6ba734af6542f43b6485b923',
// subAccount: {
// uuid: '652e1add-0d0b-462c-a03c-d6197c825c1a',
// name: 'DEFAULT'
// }
// }
//
// deposit
//
// {
// "transactionClass": "COIN",
// "uuid": "eb65576f-c1a8-423c-8e2f-fa50109b2eab",
// "userUuid": "82027ee9-cb59-4f29-80d6-f7e793f39ad4",
// "amount": 3.99287184,
// "fee": 0,
// "balanceBefore": 8.39666034,
// "balanceAfter": 12.38953218,
// "ccy": "ETH",
// "transactionState": "PROCESSED",
// "transactionType": "DEPOSIT",
// "received": "1529420056000",
// "processed": "1529420766000",
// "timestampMillis": "1557442743854",
// "displayTitle": "Coin Deposit",
// "displayDescription": "Deposit to: 0xf123aa44fadea913a7da99cc2ee202db684ce0e3",
// "coinTransactionId": "0x33a3e5ea7c034dc5324a88aa313962df0a5d571ab4bcc3cb00b876b1bdfc54f7",
// "coinConfirmations": 51,
// "coinConfirmationsRequired": 45,
// "subAccount": {"uuid": "aba1de05-c7c6-49d7-84ab-a6aca0e827b6", "name": "DEFAULT"}
// }
//
const timestamp = this.safeInteger (transaction, 'received');
const updated = this.safeInteger (transaction, 'processed');
const transactionType = this.safeString (transaction, 'transactionType');
let type = undefined;
let amount = this.safeFloat (transaction, 'amount');
let address = this.safeString (transaction, 'coinAddress');
let tag = undefined;
if (transactionType === 'WITHDRAWAL') {
type = 'withdrawal';
amount = -amount;
if (address) {
// xrp: "coinAddress": "rw2ciyaNshpHe7bCHo4bRWq6pqqynnWKQg?dt=3750180345",
if (address.indexOf ('?dt=') >= 0) {
const parts = address.split ('?dt=');
address = parts[0];
tag = parts[1];
}
}
} else if (transactionType === 'DEPOSIT') {
if (!address) {
const displayDescription = this.safeString (transaction, 'displayDescription');
const addressText = displayDescription.replace ('Deposit to: ', '');
if (addressText.length > 0) {
// eth: "displayDescription": "Deposit to: 0xf123aa44fadea913a7da99cc2ee202db684ce0e3",
// xrp: "displayDescription": "Deposit to: rUjxty1WWLwX1evhKf3C2XNZDMcXEZ9ToJ?dt=504562345",
if (addressText.indexOf ('?dt=') >= 0) {
const parts = addressText.split ('?dt=');
address = parts[0];
tag = parts[1];
} else {
address = addressText;
}
}
}
type = 'deposit';
}
const currencyId = this.safeString (transaction, 'ccy');
const code = this.commonCurrencyCode (currencyId);
const transactionState = this.safeString (transaction, 'transactionState');
const status = this.parseTransactionStatus (transactionState);
return {
'timestamp': timestamp,
'datetime': this.iso8601 (timestamp),
'id': this.safeString (transaction, 'uuid'),
'currency': code,
'amount': amount,
'address': address,
'tag': tag,
'status': status,
'type': type,
'updated': updated,
'txid': this.safeString (transaction, 'coinTransactionId'),
'fee': {
'cost': this.safeFloat (transaction, 'fee'),
'currency': code,
},
'info': transaction,
};
}
parseTransactionStatus (status) {
const statuses = {
'PROCESSED': 'ok',
'REVERSED': 'canceled',
'CANCELLED_INSUFFICIENT_FUNDS': 'canceled',
'CANCELLED_LIMIT_BREACH': 'canceled',
};
return this.safeString (statuses, status, status);
}
async fetchMyTrades (symbol = undefined, since = undefined, limit = undefined, params = {}) {
await this.loadMarkets ();
//
// v2
//
// {
// result: 'success',
// data: [
// {
// tradeId: 'c2ed821d-717a-4b7e-beb0-a9ba60e8f5a0',
// orderId: '5a65ae21-c7a8-4009-b3af-306c2ad21a02',
// timestamp: '1551357057000',
// tradedCurrencyFillAmount: '0.09006364',
// settlementCurrencyFillAmount: '270.96',
// settlementCurrencyFillAmountUnrounded: '270.96000000',
// price: '3008.53930',
// ccyPair: 'BTCEUR',
// side: 'BUY' // missing in v3
// },
// {
// tradeId: 'fc0d3a9d-8b0b-4dff-b2e9-edd160785210',
// orderId: '8161ae6e-251a-4eed-a56f-d3d6555730c1',
// timestamp: '1551357033000',
// tradedCurrencyFillAmount: '0.06521746',
// settlementCurrencyFillAmount: '224.09',
// settlementCurrencyFillAmountUnrounded: '224.09000000',
// price: '3436.04305',
// ccyPair: 'BTCUSD',
// side: 'BUY' // missing in v3
// },
// ]
// }
//
// v3
//
// {
// trades: [
// {
// tradeId: 'c2ed821d-717a-4b7e-beb0-a9ba60e8f5a0',
// orderId: '5a65ae21-c7a8-4009-b3af-306c2ad21a02',
// timestamp: '1551357057000',
// tradedCurrencyFillAmount: '0.09006364',
// settlementCurrencyFillAmount: '270.96',
// settlementCurrencyFillAmountUnrounded: '270.96000000',
// price: '3008.53930',
// ccyPair: 'BTCEUR'
// },
// {
// tradeId: 'fc0d3a9d-8b0b-4dff-b2e9-edd160785210',
// orderId: '8161ae6e-251a-4eed-a56f-d3d6555730c1',
// timestamp: '1551357033000',
// tradedCurrencyFillAmount: '0.06521746',
// settlementCurrencyFillAmount: '224.09',
// settlementCurrencyFillAmountUnrounded: '224.09000000',
// price: '3436.04305',
// ccyPair: 'BTCUSD'
// },
// ],
// count: 3,
// timestamp: '1557438456732',
// resultCode: 'OK'
// }
//
const request = {};
if (limit !== undefined) {
request['max'] = limit;
}
const method = this.safeString (this.options, 'fetchMyTradesMethod', 'private_post_money_trade_list');
const response = await this[method] (this.extend (request, params));
const trades = this.safeValue2 (response, 'trades', 'data', []);
const market = (symbol === undefined) ? undefined : this.market (symbol);
return this.parseTrades (trades, market, since, limit);
}
parseTrade (trade, market = undefined) {
//
// v2
//
// {
// tradeId: 'fc0d3a9d-8b0b-4dff-b2e9-edd160785210',
// orderId: '8161ae6e-251a-4eed-a56f-d3d6555730c1',
// timestamp: '1551357033000',
// tradedCurrencyFillAmount: '0.06521746',
// settlementCurrencyFillAmount: '224.09',
// settlementCurrencyFillAmountUnrounded: '224.09000000',
// price: '3436.04305',
// ccyPair: 'BTCUSD',
// side: 'BUY', // missing in v3
// }
//
// v3
//
// {
// tradeId: 'fc0d3a9d-8b0b-4dff-b2e9-edd160785210',
// orderId: '8161ae6e-251a-4eed-a56f-d3d6555730c1',
// timestamp: '1551357033000',
// tradedCurrencyFillAmount: '0.06521746',
// settlementCurrencyFillAmount: '224.09',
// settlementCurrencyFillAmountUnrounded: '224.09000000',
// price: '3436.04305',
// ccyPair: 'BTCUSD'
// }
//
const id = this.safeString (trade, 'tradeId');
const orderId = this.safeString (trade, 'orderId');
const timestamp = this.safeInteger (trade, 'timestamp');
const price = this.safeFloat (trade, 'price');
const amount = this.safeFloat (trade, 'tradedCurrencyFillAmount');
const cost = this.safeFloat (trade, 'settlementCurrencyFillAmount');
let side = this.safeString (trade, 'side');
side = (side === undefined) ? undefined : side.toLowerCase ();
return {
'id': id,
'order': orderId,
'timestamp': timestamp,
'datetime': this.iso8601 (timestamp),
'symbol': this.findSymbol (this.safeString (trade, 'ccyPair')),
'type': undefined,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': undefined,
'info': trade,
};
}
async fetchCurrencies (params = {}) {
const response = await this.v3publicGetCurrencyStatic (params);
const result = {};
const currencies = response['currencyStatic']['currencies'];
// "currencies": {
// "HKD": {
// "decimals": 2,
// "minOrderSize": 1.00000000,
// "maxOrderSize": 10000000000.00000000,
// "displayDenominator": 1,
// "summaryDecimals": 0,
// "displayUnit": "HKD",
// "symbol": "$",
// "type": "FIAT",
// "engineSettings": {
// "depositsEnabled": false,
// "withdrawalsEnabled": true,
// "displayEnabled": true,
// "mobileAccessEnabled": true
// },
// "minOrderValue": 1.00000000,
// "maxOrderValue": 10000000000.00000000,
// "maxMarketOrderValue": 36000.00000000,
// "maxMarketOrderSize": 36000.00000000,
// "assetDivisibility": 0
// },
// "ETH": {
// "decimals": 8,
// "minOrderSize": 0.00010000,
// "maxOrderSize": 1000000000.00000000,
// "type": "CRYPTO",
// "confirmationThresholds": [
// { "confosRequired": 30, "threshold": 0.50000000 },
// { "confosRequired": 45, "threshold": 10.00000000 },
// { "confosRequired": 70 }
// ],
// "networkFee": 0.00500000,
// "engineSettings": {
// "depositsEnabled": true,
// "withdrawalsEnabled": true,
// "displayEnabled": true,
// "mobileAccessEnabled": true
// },
// "minOrderValue": 0.00010000,
// "maxOrderValue": 10000000000.00000000,
// "maxMarketOrderValue": 10000000000.00000000,
// "maxMarketOrderSize": 1000000000.00000000,
// "digitalCurrencyType": "ETHEREUM",
// "assetDivisibility": 0,
// "assetIcon": "/images/currencies/crypto/ETH.svg"
// },
// },
const ids = Object.keys (currencies);
for (let i = 0; i < ids.length; i++) {
const id = ids[i];
const currency = currencies[id];
const code = this.commonCurrencyCode (id);
const engineSettings = this.safeValue (currency, 'engineSettings');
const depositsEnabled = this.safeValue (engineSettings, 'depositsEnabled');
const withdrawalsEnabled = this.safeValue (engineSettings, 'withdrawalsEnabled');
const displayEnabled = this.safeValue (engineSettings, 'displayEnabled');
const active = depositsEnabled && withdrawalsEnabled && displayEnabled;
const precision = this.safeInteger (currency, 'decimals');
const fee = this.safeFloat (currency, 'networkFee');
let type = this.safeString (currency, 'type');
if (type !== 'undefined') {
type = type.toLowerCase ();
}
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': code,
'type': type,
'active': active,
'precision': precision,
'fee': fee,
'limits': {
'amount': {
'min': this.safeFloat (currency, 'minOrderSize'),
'max': this.safeFloat (currency, 'maxOrderSize'),
},
'price': {
'min': undefined,
'max': undefined,
},
'cost': {
'min': this.safeFloat (currency, 'minOrderValue'),
'max': this.safeFloat (currency, 'maxOrderValue'),
},
'withdraw': {
'min': undefined,
'max': undefined,
},
},
};
}
return result;
}
async fetchMarkets (params = {}) {
const response = await this.v3publicGetCurrencyStatic (params);
//
// {
// "currencyStatic": {
// "currencies": {
// "HKD": {
// "decimals": 2,
// "minOrderSize": 1.00000000,
// "maxOrderSize": 10000000000.00000000,
// "displayDenominator": 1,
// "summaryDecimals": 0,
// "displayUnit": "HKD",
// "symbol": "$",
// "type": "FIAT",
// "engineSettings": {
// "depositsEnabled": false,
// "withdrawalsEnabled": true,
// "displayEnabled": true,
// "mobileAccessEnabled": true
// },
// "minOrderValue": 1.00000000,
// "maxOrderValue": 10000000000.00000000,
// "maxMarketOrderValue": 36000.00000000,
// "maxMarketOrderSize": 36000.00000000,
// "assetDivisibility": 0
// },
// "ETH": {
// "decimals": 8,
// "minOrderSize": 0.00010000,
// "maxOrderSize": 1000000000.00000000,
// "type": "CRYPTO",
// "confirmationThresholds": [
// { "confosRequired": 30, "threshold": 0.50000000 },
// { "confosRequired": 45, "threshold": 10.00000000 },
// { "confosRequired": 70 }
// ],
// "networkFee": 0.00500000,
// "engineSettings": {
// "depositsEnabled": true,
// "withdrawalsEnabled": true,
// "displayEnabled": true,
// "mobileAccessEnabled": true
// },
// "minOrderValue": 0.00010000,
// "maxOrderValue": 10000000000.00000000,
// "maxMarketOrderValue": 10000000000.00000000,
// "maxMarketOrderSize": 1000000000.00000000,
// "digitalCurrencyType": "ETHEREUM",
// "assetDivisibility": 0,
// "assetIcon": "/images/currencies/crypto/ETH.svg"
// },
// },
// "currencyPairs": {
// "ETHUSD": {
// "priceDecimals": 5,
// "engineSettings": {
// "tradingEnabled": true,
// "displayEnabled": true,
// "cancelOnly": true,
// "verifyRequired": false,
// "restrictedBuy": false,
// "restrictedSell": false
// },
// "minOrderRate": 10.00000000,
// "maxOrderRate": 10000.00000000,
// "displayPriceDecimals": 5,
// "tradedCcy": "ETH",
// "settlementCcy": "USD",
// "preferredMarket": "ANX",
// "chartEnabled": true,
// "simpleTradeEnabled": false
// },
// },
// },
// "timestamp": "1549840691039",
// "resultCode": "OK"
// }
//
const currencyStatic = this.safeValue (response, 'currencyStatic', {});
const currencies = this.safeValue (currencyStatic, 'currencies', {});
const currencyPairs = this.safeValue (currencyStatic, 'currencyPairs', {});
const result = [];
const ids = Object.keys (currencyPairs);
for (let i = 0; i < ids.length; i++) {
const id = ids[i];
const market = currencyPairs[id];
//
// "ETHUSD": {
// "priceDecimals": 5,
// "engineSettings": {
// "tradingEnabled": true,
// "displayEnabled": true,
// "cancelOnly": true,
// "verifyRequired": false,
// "restrictedBuy": false,
// "restrictedSell": false
// },
// "minOrderRate": 10.00000000,
// "maxOrderRate": 10000.00000000,
// "displayPriceDecimals": 5,
// "tradedCcy": "ETH",
// "settlementCcy": "USD",
// "preferredMarket": "ANX",
// "chartEnabled": true,
// "simpleTradeEnabled": false
// },
//
const baseId = this.safeString (market, 'tradedCcy');
const quoteId = this.safeString (market, 'settlementCcy');
const base = this.commonCurrencyCode (baseId);
const quote = this.commonCurrencyCode (quoteId);
const symbol = base + '/' + quote;
const baseCurrency = this.safeValue (currencies, baseId, {});
const quoteCurrency = this.safeValue (currencies, quoteId, {});
const precision = {
'price': this.safeInteger (market, 'priceDecimals'),
'amount': this.safeInteger (baseCurrency, 'decimals'),
};
const engineSettings = this.safeValue (market, 'engineSettings');
const displayEnabled = this.safeValue (engineSettings, 'displayEnabled');
const tradingEnabled = this.safeValue (engineSettings, 'tradingEnabled');
const active = displayEnabled && tradingEnabled;
result.push ({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'precision': precision,
'active': active,
'limits': {
'price': {
'min': this.safeFloat (market, 'minOrderRate'),
'max': this.safeFloat (market, 'maxOrderRate'),
},
'amount': {
'min': this.safeFloat (baseCurrency, 'minOrderSize'),
'max': this.safeFloat (baseCurrency, 'maxOrderSize'),
},
'cost': {
'min': this.safeFloat (quoteCurrency, 'minOrderValue'),
'max': this.safeFloat (quoteCurrency, 'maxOrderValue'),
},
},
'info': market,
});
}
return result;
}
async fetchBalance (params = {}) {
await this.loadMarkets ();
const response = await this.privatePostMoneyInfo (params);
const balance = this.safeValue (response, 'data', {});
const wallets = balance['Wallets'];
const currencies = Object.keys (wallets);
const result = { 'info': balance };
for (let c = 0; c < currencies.length; c++) {
const currencyId = currencies[c];
const code = this.commonCurrencyCode (currencyId);
const account = this.account ();
if (currencyId in wallets) {
const wallet = wallets[currencyId];
account['free'] = this.safeFloat (wallet['Available_Balance'], 'value');
account['total'] = this.safeFloat (wallet['Balance'], 'value');
account['used'] = account['total'] - account['free'];
}
result[code] = account;
}
return this.parseBalance (result);
}
async fetchOrderBook (symbol, limit = undefined, params = {}) {
await this.loadMarkets ();
const request = {
'currency_pair': this.marketId (symbol),
};
const response = await this.publicGetCurrencyPairMoneyDepthFull (this.extend (request, params));
const orderbook = this.safeValue (response, 'data', {});
const t = this.safeInteger (orderbook, 'dataUpdateTime');
const timestamp = (t === undefined) ? t : parseInt (t / 1000);
return this.parseOrderBook (orderbook, timestamp, 'bids', 'asks', 'price', 'amount');
}
async fetchTicker (symbol, params = {}) {
await this.loadMarkets ();
const request = {
'currency_pair': this.marketId (symbol),
};
const response = await this.publicGetCurrencyPairMoneyTicker (this.extend (request, params));
const ticker = this.safeValue (response, 'data', {});
const t = this.safeInteger (ticker, 'dataUpdateTime');
const timestamp = (t === undefined) ? t : parseInt (t / 1000);
const bid = this.safeFloat (ticker['buy'], 'value');
const ask = this.safeFloat (ticker['sell'], 'value');
const baseVolume = this.safeFloat (ticker['vol'], 'value');
const last = this.safeFloat (ticker['last'], 'value');
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': this.iso8601 (timestamp),
'high': this.safeFloat (ticker['high'], 'value'),
'low': this.safeFloat (ticker['low'], 'value'),
'bid': bid,
'bidVolume': undefined,
'ask': ask,
'askVolume': undefined,
'vwap': undefined,
'open': undefined,
'close': last,
'last': last,
'previousClose': undefined,
'change': undefined,
'percentage': undefined,
'average': this.safeFloat (ticker['avg'], 'value'),
'baseVolume': baseVolume,
'quoteVolume': undefined,
'info': ticker,
};
}
async fetchTrades (symbol, since = undefined, limit = undefined, params = {}) {
throw new NotSupported (this.id + ' switched off the trades endpoint, see their docs at https://docs.anxv2.apiary.io');
}
async fetchOrders (symbol = undefined, since = undefined, limit = undefined, params = {}) {
await this.loadMarkets ();
const request = {};
if (limit !== undefined) {
request['max'] = limit;
}
const response = await this.v3privatePostOrderList (this.extend (request, params));
const orders = this.safeValue (response, 'orders', []);
const market = (symbol === undefined) ? undefined : this.market (symbol);
return this.parseOrders (orders, market, since, limit);
}
async fetchOpenOrders (symbol = undefined, since = undefined, limit = undefined, params = {}) {
await this.loadMarkets ();
const market = this.market (symbol);
const request = {
'currency_pair': market['id'],
};
// ANXPro will return all symbol pairs regardless of what is specified in request
const response = await this.privatePostCurrencyPairMoneyOrders (this.extend (request, params));
//
// {
// "result": "success",
// "data": [
// {
// "oid": "e74305c7-c424-4fbc-a8a2-b41d8329deb0",
// "currency": "HKD",
// "item": "BTC",
// "type": "offer",
// "amount": {
// "currency": "BTC",
// "display": "10.00000000 BTC",
// "display_short": "10.00 BTC",
// "value": "10.00000000",
// "value_int": "1000000000"
// },
// "effective_amount": {
// "currency": "BTC",
// "display": "10.00000000 BTC",
// "display_short": "10.00 BTC",
// "value": "10.00000000",
// "value_int": "1000000000"
// },
// "price": {
// "currency": "HKD",
// "display": "412.34567 HKD",
// "display_short": "412.35 HKD",
// "value": "412.34567",
// "value_int": "41234567"
// },
// "status": "open",
// "date": 1393411075000,
// "priority": 1393411075000000,
// "actions": []
// },
// ...
// ]
// }
//
return this.parseOrders (this.safeValue (response, 'data', {}), market, since, limit);
}
parseOrder (order, market = undefined) {
if ('orderId' in order)
return this.parseOrderV3 (order, market);
else
return this.parseOrderV2 (order, market);
}
parseOrderStatus (status) {
const statuses = {
'ACTIVE': 'open',
'FULL_FILL': 'closed',
'CANCEL': 'canceled',
};
return this.safeString (statuses, status, status);
}
parseOrderV3 (order, market = undefined) {
//
// v3
//
// {
// orderType: 'LIMIT',
// tradedCurrency: 'XRP',
// settlementCurrency: 'BTC',
// tradedCurrencyAmount: '400.00000000',
// buyTradedCurrency: true,
// limitPriceInSettlementCurrency: '0.00007129',
// timestamp: '1522547850000',
// orderId: '62a8be4d-73c6-4469-90cd-28b4726effe0',
// tradedCurrencyAmountOutstanding: '0.00000000',
// orderStatus: 'FULL_FILL',
// executedAverageRate: '0.00007127',
// trades: [
// {
// tradeId: 'fe16b796-df57-41a2-b6d9-3489f189749e',
// orderId: '62a8be4d-73c6-4469-90cd-28b4726effe0',
// timestamp: '1522547850000',
// tradedCurrencyFillAmount: '107.91298639',
// settlementCurrencyFillAmount: '0.00768772',
// settlementCurrencyFillAmountUnrounded: '0.00768772',
// price: '0.00007124',
// ccyPair: 'XRPBTC'
// },
// {
// tradeId: 'e2962f67-c094-4243-8b88-0cdc70a1b1c7',
// orderId: '62a8be4d-73c6-4469-90cd-28b4726effe0',
// timestamp: '1522547851000',
// tradedCurrencyFillAmount: '292.08701361',
// settlementCurrencyFillAmount: '0.02082288',
// settlementCurrencyFillAmountUnrounded: '0.02082288',
// price: '0.00007129',
// ccyPair: 'XRPBTC'
// }
// ]
// }
//
const tradedCurrency = this.safeString (order, 'tradedCurrency');
const orderStatus = this.safeString (order, 'orderStatus');
const status = this.parseOrderStatus (orderStatus);
const settlementCurrency = this.safeString (order, 'settlementCurrency');
const symbol = this.findSymbol (tradedCurrency + '/' + settlementCurrency);
const buyTradedCurrency = this.safeString (order, 'buyTradedCurrency');
const side = buyTradedCurrency === 'true' ? 'buy' : 'sell';
const timestamp = this.safeInteger (order, 'timestamp');
let lastTradeTimestamp = undefined;
const trades = [];
let filled = 0;
const type = this.safeString (order, 'orderType').toLowerCase ();
for (let i = 0; i < order['trades'].length; i++) {
const trade = order['trades'][i];
const tradeTimestamp = this.safeInteger (trade, 'timestamp');
if (!lastTradeTimestamp || lastTradeTimestamp < tradeTimestamp)
lastTradeTimestamp = tradeTimestamp;
const parsedTrade = this.extend (this.parseTrade (trade), { 'side': side, 'type': type });
trades.push (parsedTrade);
filled = this.sum (filled, parsedTrade['amount']);
}
let price = this.safeFloat (order, 'limitPriceInSettlementCurrency');
const executedAverageRate = this.safeFloat (order, 'executedAverageRate');
const remaining = type === 'market' ? 0 : this.safeFloat (order, 'tradedCurrencyAmountOutstanding');
let amount = this.safeFloat (order, 'tradedCurrencyAmount');
if (!amount) {
const settlementCurrencyAmount = this.safeFloat (order, 'settlementCurrencyAmount');
amount = settlementCurrencyAmount / executedAverageRate;
}
const cost = executedAverageRate * filled;
return {
'id': this.safeString (order, 'orderId'),
'symbol': symbol,
'timestamp': timestamp,
'datetime': this.iso8601 (timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'type': type,
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'remaining': remaining,
'filled': filled,
'status': status,
'fee': undefined,
'trades': trades,
'info': order,
};
}
parseOrderV2 (order, market = undefined) {
//
// v2
//
// {
// "oid": "e74305c7-c424-4fbc-a8a2-b41d8329deb0",
// "currency": "HKD",
// "item": "BTC",
// "type": "offer", <-- bid/offer
// "amount": {
// "currency": "BTC",
// "display": "10.00000000 BTC",
// "display_short": "10.00 BTC",
// "value": "10.00000000",
// "value_int": "1000000000"
// },
// "effective_amount": {
// "currency": "BTC",
// "display": "10.00000000 BTC",
// "display_short": "10.00 BTC",
// "value": "10.00000000",
// "value_int": "1000000000"
// },
// "price": {
// "currency": "HKD",
// "display": "412.34567 HKD",
// "display_short": "412.35 HKD",
// "value": "412.34567",
// "value_int": "41234567"
// },
// "status": "open",
// "date": 1393411075000,
// "priority": 1393411075000000,
// "actions": []
// }
//
let id = this.safeString (order, 'oid');
let status = this.safeString (order, 'status');
let timestamp = this.safeInteger (order, 'date');
const baseId = this.safeString (order, 'item');
const quoteId = this.safeString (order, 'currency');
const marketId = baseId + '/' + quoteId;
market = this.safeValue (this.markets_by_id, marketId);
let symbol = undefined;
if (typeof market !== 'undefined') {
symbol = market['symbol'];
}
let amount_info = this.safeValue (order, 'amount', {});
let effective_info = this.safeValue (order, 'effective_amount', {});
let price_info = this.safeValue (order, 'price', {});
let remaining = this.safeFloat (effective_info, 'value');
let amount = this.safeFloat (amount_info, 'volume');
let price = this.safeFloat (price_info, 'value');
let filled = undefined;
let cost = undefined;
if (typeof amount !== 'undefined') {
if (typeof remaining !== 'undefined') {
filled = amount - remaining;
cost = price * filled;
}
}
let orderType = 'limit';
let side = this.safeString (order, 'type');
if (side === 'offer') {
side = 'sell';
} else {
side = 'buy';
}
let fee = undefined;
let trades = undefined; // todo parse trades
let lastTradeTimestamp = undefined;
return {
'info': order,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': this.iso8601 (timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'type': orderType,
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'remaining': remaining,
'filled': filled,
'status': status,
'fee': fee,
'trades': trades,
};
}
async createOrder (symbol, type, side, amount, price = undefined, params = {}) {
await this.loadMarkets ();
const market = this.market (symbol);
const amountMultiplier = Math.pow (10, market['precision']['amount']);
const request = {
'currency_pair': market['id'],
'amount_int': parseInt (amount * amountMultiplier), // 10^8
};
if (type === 'limit') {
const priceMultiplier = Math.pow (10, market['precision']['price']);
request['price_int'] = parseInt (price * priceMultiplier); // 10^5 or 10^8
}
request['type'] = (side === 'buy') ? 'bid' : 'ask';
const response = await this.privatePostCurrencyPairMoneyOrderAdd (this.extend (request, params));
return {
'info': response,
'id': response['data'],
};
}
async cancelOrder (id, symbol = undefined, params = {}) {
return await this.privatePostCurrencyPairMoneyOrderCancel ({ 'oid': id });
}
getAmountMultiplier (code) {
const multipliers = {
'BTC': 100000000,
'LTC': 100000000,
'STR': 100000000,
'XRP': 100000000,
'DOGE': 100000000,
};
const defaultValue = 100;
return this.safeInteger (multipliers, code, defaultValue);
}
async withdraw (code, amount, address, tag = undefined, params = {}) {
this.checkAddress (address);
await this.loadMarkets ();
let currency = this.currency (code);
let multiplier = this.getAmountMultiplier (code);
let request = {
'currency': currency,
'amount_int': parseInt (amount * multiplier),
'address': address,
};
if (tag !== undefined) {
request['destinationTag'] = tag;
}
let response = await this.privatePostMoneyCurrencySendSimple (this.extend (request, params));
return {
'info': response,
'id': response['data']['transactionId'],
};
}
async fetchDepositAddress (code, params = {}) {
await this.loadMarkets ();
let currency = this.currency (code);
let request = {
'currency': currency['id'],
};
let response = await this.privatePostMoneyCurrencyAddress (this.extend (request, params));
let result = response['data'];
let address = this.safeString (result, 'addr');
this.checkAddress (address);
return {
'currency': code,
'address': address,
'info': response,
};
}
nonce () {
return this.milliseconds ();
}
sign (path, api = 'public', method = 'GET', params = {}, headers = undefined, body = undefined) {
let request = this.implodeParams (path, params);
let query = this.omit (params, this.extractParams (path));
let url = this.urls['api'][api] + '/' + request;
if (api === 'public' || api === 'v3public') {
if (Object.keys (query).length)
url += '?' + this.urlencode (query);
} else {
this.checkRequiredCredentials ();
let nonce = this.nonce ();
let auth = undefined;
let contentType = undefined;
if (api === 'v3private') {
body = this.json (this.extend ({ 'tonce': nonce * 1000 }, query));
const path = url.replace ('https://anxpro.com/', '');
auth = path + '\0' + body;
contentType = 'application/json';
} else {
body = this.urlencode (this.extend ({ 'nonce': nonce }, query));
// eslint-disable-next-line quotes
auth = request + "\0" + body;
contentType = 'application/x-www-form-urlencoded';
}
let secret = this.base64ToBinary (this.secret);
let signature = this.hmac (this.encode (auth), secret, 'sha512', 'base64');
headers = {
'Content-Type': contentType,
'Rest-Key': this.apiKey,
'Rest-Sign': this.decode (signature),
};
}
return { 'url': url, 'method': method, 'body': body, 'headers': headers };
}
handleErrors (httpCode, reason, url, method, headers, body, response) {
if (response === undefined || response === '') {
return;
}
const result = this.safeString (response, 'result');
const code = this.safeString (response, 'resultCode');
if (((result !== undefined) && (result !== 'success')) || ((code !== undefined) && (code !== 'OK'))) {
const message = this.safeString (response, 'error');
const feedback = this.id + ' ' + body;
const exact = this.exceptions['exact'];
if (code in exact) {
throw new exact[code] (feedback);
} else if (message in exact) {
throw new exact[message] (feedback);
}
const broad = this.safeValue (this.exceptions, 'broad', {});
const broadKey = this.findBroadlyMatchedKey (broad, message);
if (broadKey !== undefined) {
throw new broad[broadKey] (feedback);
}
throw new ExchangeError (feedback); // unknown message
}
}
};
|
"""
Build a dummy index and run tests on it.
"""
import os
import shutil
import tempfile
from subprocess import check_call
import pytest
import captions
import captions.decode as decode
from lib.common import get_docs_and_lexicon
TMP_DIR = None
TEST_SUBS_SUBDIR = 'subs'
TEST_INDEX_SUBDIR = 'index'
TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'test-small.tar.gz')
BUILD_INDEX_SCRIPT = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'..', 'scripts', 'build_index.py')
@pytest.fixture(scope="session", autouse=True)
def dummy_data():
global TMP_DIR
TMP_DIR = tempfile.mkdtemp(suffix=None, prefix='caption-index-unittest-',
dir=None)
def build_test_index(tmp_dir):
subs_dir = os.path.join(tmp_dir, TEST_SUBS_SUBDIR)
idx_dir = os.path.join(tmp_dir, TEST_INDEX_SUBDIR)
# Unpack the test data
os.makedirs(subs_dir)
check_call(['tar', '-xzf', TEST_DATA_PATH, '-C', subs_dir])
# Build the index
check_call([BUILD_INDEX_SCRIPT, '-d', subs_dir, '-o', idx_dir])
assert os.path.isdir(idx_dir)
try:
build_test_index(TMP_DIR)
yield
finally:
shutil.rmtree(TMP_DIR, True)
def test_search():
idx_dir = os.path.join(TMP_DIR, TEST_INDEX_SUBDIR)
idx_path = os.path.join(idx_dir, 'index.bin')
documents, lexicon = get_docs_and_lexicon(idx_dir)
def count_and_test(index, document, tokens):
ids = index.contains(tokens, [document])
assert len(ids) == 1
count = 0
(d,) = list(index.search(tokens, [document]))
assert len(d.postings) > 0
dh = documents.open(document)
for l in d.postings:
assert l.len == len(tokens)
assert abs(l.end - l.start) < 10.0, 'ngram time too large'
count += 1
# Check that we actually found the right ngrams
assert [lexicon.decode(t) for t in dh.tokens(l.idx, l.len)] == tokens
return count
test_document = documents['cnn.srt']
with captions.CaptionIndex(idx_path, lexicon, documents) as index:
assert count_and_test(index, test_document, ['THEY']) == 12
assert count_and_test(index, test_document, ['PEOPLE']) == 12
assert count_and_test(index, test_document, ['TO', 'THE']) == 9 # one wraps
assert count_and_test(index, test_document, ['GIBSON', 'GUITAR', 'DROP']) == 1
assert count_and_test(index, test_document, ['PUT', 'THAT', 'DOWN']) == 1
assert count_and_test(index, test_document, ['CLOCK', 'STRIKES']) == 2
assert count_and_test(index, test_document, ['>>']) == 149
assert count_and_test(index, test_document, ['SEE', '?']) == 1
def test_search_position():
idx_dir = os.path.join(TMP_DIR, TEST_INDEX_SUBDIR)
idx_path = os.path.join(idx_dir, 'index.bin')
documents, lexicon = get_docs_and_lexicon(idx_dir)
with captions.CaptionIndex(idx_path, lexicon, documents) as index:
test_document = documents['test.srt']
dh = documents.open(test_document)
# In range
for i in range(10):
assert dh.position(5 * i + 2.5) == i
# Out of range
assert dh.position(51) == 10
assert dh.position(100) == 10
def _is_close(a, b):
return abs(a - b) <= 1e-6
def test_search_intervals():
idx_dir = os.path.join(TMP_DIR, TEST_INDEX_SUBDIR)
idx_path = os.path.join(idx_dir, 'index.bin')
documents, lexicon = get_docs_and_lexicon(idx_dir)
with captions.CaptionIndex(idx_path, lexicon, documents) as index:
test_document = documents['test.srt']
# unigrams
for i in range(10):
(d,) = list(index.search([str(i + 1)], [test_document]))
(p,) = d.postings
assert _is_close(p.start, i * 5.)
assert _is_close(p.end, (i + 1) * 5.)
# bigrams
for i in range(9):
bigram = [str(i + 1), str(i + 2)]
(d,) = list(index.search(bigram, [test_document]))
(p,) = d.postings
assert _is_close(p.start, i * 5.), bigram
assert _is_close(p.end, (i + 2) * 5.), bigram
# 3-grams
for i in range(8):
trigram = [str(i + 1), str(i + 2), str(i + 3)]
(d,) = list(index.search(trigram, [test_document]))
(p,) = d.postings
assert _is_close(p.start, i * 5.), trigram
assert _is_close(p.end, (i + 3) * 5.), trigram
def test_decode():
idx_dir = os.path.join(TMP_DIR, TEST_INDEX_SUBDIR)
documents, lexicon = get_docs_and_lexicon(idx_dir)
doc_handle = documents.open(0)
print(decode.get_vtt(lexicon, doc_handle))
print(decode.get_srt(lexicon, doc_handle))
|
var config = require('../../config');
var nock = require('nock');
var rewire = require('rewire');
var sinon = require('sinon');
var test = require('tape');
var events = rewire('../../lib/events');
// do not allow real http requests
nock.disableNetConnect();
test('events: error is returned if failed to get user details for the repo user', function (t) {
nock.cleanAll();
var payload = {
sender: {
login: 'foo'
}
};
var userNotFoundErrorMessage = 'foo bar baz';
var githubRequests = nock(/api\.github\.com/)
.get(new RegExp('/users/' + payload.sender.login))
.reply(404, { message: userNotFoundErrorMessage });
events(payload).catch(function(error) {
t.equal(githubRequests.isDone(), true, 'expected http requests made to github');
if (error instanceof Error) {
t.pass('error returned');
try {
var errorJson = JSON.parse(error.message);
} catch (e) {
t.fail('failed to parse error message')
}
if (errorJson.message) {
t.equal(errorJson.message, userNotFoundErrorMessage, 'correct error message is returned');
} else {
t.fail('error does not contain a message')
}
} else {
t.fail('invalid error returned');
}
t.end();
});
});
test('events: error is returned if failed to get user details for the issue author user', function (t) {
nock.cleanAll();
var payload = {
sender: {
login: 'foo'
},
issue: {
user: {
login: 'bar'
}
}
};
var userNotFoundErrorMessage = 'foo bar baz';
var githubRequests = nock(/api\.github\.com:443/)
.get(new RegExp('/users/' + payload.sender.login))
.reply(200, { login: payload.sender.login })
.get(new RegExp('/users/' + payload.issue.user.login))
.reply(404, { message: userNotFoundErrorMessage });
events(payload).catch(function(error) {
t.equal(githubRequests.isDone(), true, 'expected http requests made to github');
if (error instanceof Error) {
t.pass('error returned');
try {
var errorJson = JSON.parse(error.message);
} catch (e) {
t.fail('failed to parse error message')
}
if (errorJson.message) {
t.equal(errorJson.message, userNotFoundErrorMessage, 'correct error message is returned');
} else {
t.fail('error does not contain a message')
}
} else {
t.fail('invalid error returned');
}
t.end();
});
});
test('events: error is returned when the issue body can not be parsed', function (t) {
nock.cleanAll();
var payload = {
sender: {
login: 'foo'
},
issue: {
user: {
login: 'bar'
},
body: 12345 // incorrect type will cause markdown parsing to fail
},
labelMap: [config.labels.event]
};
var githubRequests = nock(/api\.github\.com:443/)
.get(new RegExp('/users/' + payload.sender.login))
.reply(200, { login: payload.sender.login })
.get(new RegExp('/users/' + payload.issue.user.login))
.reply(200, { login: payload.issue.user.login });
events(payload).catch(function(error) {
t.equal(githubRequests.isDone(), true, 'expected http requests made to github');
t.equal(error instanceof Error, true, 'error returned');
t.end();
});
});
test('events: error is returned when the parsed issue body does not have a date', function (t) {
nock.cleanAll();
var payload = {
sender: {
login: 'foo'
},
issue: {
user: {
login: 'bar'
},
body: ''
},
labelMap: [config.labels.event]
};
var githubRequests = nock(/api\.github\.com:443/)
.get(new RegExp('/users/' + payload.sender.login))
.reply(200, { login: payload.sender.login })
.get(new RegExp('/users/' + payload.issue.user.login))
.reply(200, { login: payload.issue.user.login });
events(payload).catch(function(error) {
t.equal(githubRequests.isDone(), true, 'expected http requests made to github');
if (error instanceof Error) {
t.pass('error returned');
t.equal(error.message, 'invalid event. Date missing.', 'correct error message is returned');
} else {
t.fail('invalid error returned');
}
t.end();
});
});
test('events: error is returned if failed to create new event', function (t) {
nock.cleanAll();
var payload = {
sender: {
login: 'foo'
},
issue: {
user: {
login: 'bar'
},
body: "---\r\ndate: 31.12.2099\r\n---"
},
labelMap: [config.labels.event]
};
var eventNotCreatedErrorMessage = 'foo bar baz';
var githubRequests = nock(/api\.github\.com:443/)
.get(new RegExp('/users/' + payload.sender.login))
.reply(200, { login: payload.sender.login })
.get(new RegExp('/users/' + payload.issue.user.login))
.reply(200, { login: payload.issue.user.login })
.get(new RegExp('/repos/' + config.github.org + '/' + config.github.repos.gitevent + '/contents/(.+).json'))
.reply(404)
.put(new RegExp('/repos/' + config.github.org + '/' + config.github.repos.gitevent + '/contents/(.+).json'), {
message: /Created event/,
content: /.*/
})
.reply(500, { message: eventNotCreatedErrorMessage });
events(payload).catch(function(error) {
t.equal(githubRequests.isDone(), true, 'expected http requests made to github');
if (error instanceof Error) {
t.pass('error returned');
try {
var errorJson = JSON.parse(error.message);
} catch (e) {
t.fail('failed to parse error message')
}
if (errorJson.message) {
t.equal(errorJson.message, eventNotCreatedErrorMessage, 'correct error message is returned');
} else {
t.fail('error does not contain a message')
}
} else {
t.fail('invalid error returned');
}
t.end();
});
});
test('events: error is returned if failed to update existing event', function (t) {
nock.cleanAll();
var payload = {
sender: {
login: 'foo'
},
issue: {
user: {
login: 'bar'
},
body: "---\r\ndate: 31.12.2099\r\n---"
},
labelMap: [config.labels.event]
};
var eventNotUpdatedErrorMessage = 'foo bar baz';
var previousEventId = 1;
var existingFileContents = '{"id":' +previousEventId + '}';
var githubRequests = nock(/api\.github\.com:443/)
.get(new RegExp('/users/' + payload.sender.login))
.reply(200, { login: payload.sender.login })
.get(new RegExp('/users/' + payload.issue.user.login))
.reply(200, { login: payload.issue.user.login })
.get(new RegExp('/repos/' + config.github.org + '/' + config.github.repos.gitevent + '/contents/(.+).json'))
.reply(200, {
content: existingFileContents,
encoding: 'utf8',
sha: 'abc12345'
})
.put(new RegExp('/repos/' + config.github.org + '/' + config.github.repos.gitevent + '/contents/(.+).json'), {
message: 'Updated event ' + previousEventId,
content: /.*/
})
.reply(500, { message: eventNotUpdatedErrorMessage });
events(payload).catch(function(error) {
t.equal(githubRequests.isDone(), true, 'expected http requests made to github');
if (error instanceof Error) {
t.pass('error returned');
try {
var errorJson = JSON.parse(error.message);
} catch (e) {
t.fail('failed to parse error message')
}
if (errorJson.message) {
t.equal(errorJson.message, eventNotUpdatedErrorMessage, 'correct error message is returned');
} else {
t.fail('error does not contain a message')
}
} else {
t.fail('invalid error returned');
}
t.end();
});
});
test('events: event is created if it does not exist', function (t) {
nock.cleanAll();
var payload = {
sender: {
login: 'foo'
},
issue: {
user: {
login: 'bar'
},
body: "---\r\ndate: 31.12.2099\r\n---"
},
labelMap: [config.labels.event]
};
var githubRequests = nock(/api\.github\.com:443/)
.get(new RegExp('/users/' + payload.sender.login))
.reply(200, { login: payload.sender.login })
.get(new RegExp('/users/' + payload.issue.user.login))
.reply(200, { login: payload.issue.user.login })
.get(new RegExp('/repos/' + config.github.org + '/' + config.github.repos.gitevent + '/contents/(.+).json'))
.reply(404)
.put(new RegExp('/repos/' + config.github.org + '/' + config.github.repos.gitevent + '/contents/(.+).json'), {
message: /Created event/,
content: /.*/
})
.reply(201);
events(payload).then(function() {
t.equal(githubRequests.isDone(), true, 'expected http requests made to github');
t.end();
});
});
test('events: event is updated if it exists', function (t) {
nock.cleanAll();
var payload = {
sender: {
login: 'foo'
},
issue: {
user: {
login: 'bar'
},
body: "---\r\ndate: 31.12.2099\r\n---"
},
labelMap: [config.labels.event]
};
var previousEventId = 1;
var existingFileContents = '{"id":' +previousEventId + '}';
var githubRequests = nock(/api\.github\.com:443/)
.get(new RegExp('/users/' + payload.sender.login))
.reply(200, { login: payload.sender.login })
.get(new RegExp('/users/' + payload.issue.user.login))
.reply(200, { login: payload.issue.user.login })
.get(new RegExp('/repos/' + config.github.org + '/' + config.github.repos.gitevent + '/contents/(.+).json'))
.reply(200, {
content: existingFileContents,
encoding: 'utf8',
sha: 'abc12345'
})
.put(new RegExp('/repos/' + config.github.org + '/' + config.github.repos.gitevent + '/contents/(.+).json'), {
message: 'Updated event ' + previousEventId,
content: /.*/
})
.reply(200);
events(payload).then(function() {
t.equal(githubRequests.isDone(), true, 'expected http requests made to github');
t.end();
});
});
test('events: event has correct data', function (t) {
nock.cleanAll();
var payloadIssueUrl = 'http://www.example.com/foo';
var payload = {
sender: {
login: 'foo'
},
issue: {
user: {
login: 'bar'
},
url: payloadIssueUrl,
body: "---\r\nname: Example Name\r\ndate: 31.12.2099\r\n\r\n---"
},
labelMap: [config.labels.event]
};
var githubRequests = nock(/api\.github\.com:443/)
.get(new RegExp('/users/' + payload.sender.login))
.reply(200, { login: payload.sender.login })
.get(new RegExp('/users/' + payload.issue.user.login))
.reply(200, { login: payload.issue.user.login })
.get(new RegExp('/repos/' + config.github.org + '/' + config.github.repos.gitevent + '/contents/(.+).json'))
.reply(404)
.put(new RegExp('/repos/' + config.github.org + '/' + config.github.repos.gitevent + '/contents/(.+).json'), {
message: /Created event/,
content: /.*/
})
.reply(201);
events(payload).then(function(result) {
if (!githubRequests.isDone()) {
t.fail('expected http requests were not made to github');
}
// @todo check it uses config.schema.default_event
t.equal(result.startDate, '2099-12-31T19:00:00Z', 'event has correct start date');
t.equal(result.id, '20991231-example-name', 'event has correct ID');
t.deepEqual(result.organizer, config.schema.default_organizer, 'event has correct organizer');
t.equal(result.github, payloadIssueUrl, 'event has correct github issue URL');
t.equal(result.url, config.schema.default_event_url + '20991231-example-name.html', 'event has correct URL');
t.equal(result.name, 'Example Name', 'event has correct name');
t.equal(result.about, config.about, 'event has correct about information');
t.deepEqual(result.location, config.schema.default_event.location, 'event has correct location');
t.end();
});
});
test('events: event has correct data: supplied start time is used', function (t) {
nock.cleanAll();
var payload = {
sender: {
login: 'foo'
},
issue: {
user: {
login: 'bar'
},
body: "---\r\ndate: 31.12.2099\r\ntime: '19:45'\r\n---"
},
labelMap: [config.labels.event]
};
var githubRequests = nock(/api\.github\.com:443/)
.get(new RegExp('/users/' + payload.sender.login))
.reply(200, { login: payload.sender.login })
.get(new RegExp('/users/' + payload.issue.user.login))
.reply(200, { login: payload.issue.user.login })
.get(new RegExp('/repos/' + config.github.org + '/' + config.github.repos.gitevent + '/contents/(.+).json'))
.reply(404)
.put(new RegExp('/repos/' + config.github.org + '/' + config.github.repos.gitevent + '/contents/(.+).json'), {
message: /Created event/,
content: /.*/
})
.reply(201);
events(payload).then(function(result) {
if (!githubRequests.isDone()) {
t.fail('expected http requests were not made to github');
}
t.equal(result.startDate, '2099-12-31T19:45:00Z', 'event has correct start time');
t.end();
});
});
test('events: event has correct data: supplied start time is formatted', function (t) {
nock.cleanAll();
var payload = {
sender: {
login: 'foo'
},
issue: {
user: {
login: 'bar'
},
body: "---\r\ndate: 31.12.2099\r\ntime: '.19,45'\r\n---"
},
labelMap: [config.labels.event]
};
var githubRequests = nock(/api\.github\.com:443/)
.get(new RegExp('/users/' + payload.sender.login))
.reply(200, { login: payload.sender.login })
.get(new RegExp('/users/' + payload.issue.user.login))
.reply(200, { login: payload.issue.user.login })
.get(new RegExp('/repos/' + config.github.org + '/' + config.github.repos.gitevent + '/contents/(.+).json'))
.reply(404)
.put(new RegExp('/repos/' + config.github.org + '/' + config.github.repos.gitevent + '/contents/(.+).json'), {
message: /Created event/,
content: /.*/
})
.reply(201);
events(payload).then(function(result) {
if (!githubRequests.isDone()) {
t.fail('expected http requests were not made to github');
}
t.equal(result.startDate, '2099-12-31T.19:45:00Z', 'event has correct start time');
t.end();
});
});
test('events: event has correct data: address is formatted', function (t) {
nock.cleanAll();
var payload = {
sender: {
login: 'foo'
},
issue: {
user: {
login: 'bar'
},
body: "---\r\ndate: 31.12.2099\r\naddress: Example Street Address, Example Postal Code, Example Address Locality\r\n\r\n---"
},
labelMap: [config.labels.event]
};
var githubRequests = nock(/api\.github\.com:443/)
.get(new RegExp('/users/' + payload.sender.login))
.reply(200, { login: payload.sender.login })
.get(new RegExp('/users/' + payload.issue.user.login))
.reply(200, { login: payload.issue.user.login })
.get(new RegExp('/repos/' + config.github.org + '/' + config.github.repos.gitevent + '/contents/(.+).json'))
.reply(404)
.put(new RegExp('/repos/' + config.github.org + '/' + config.github.repos.gitevent + '/contents/(.+).json'), {
message: /Created event/,
content: /.*/
})
.reply(201);
events(payload).then(function(result) {
if (!githubRequests.isDone()) {
t.fail('expected http requests were not made to github');
}
var expectedLocation = {
address: {
type: 'PostalAddress',
addressLocality: ' Example Address Locality',
postalCode: ' Example Postal Code',
streetAddress: 'Example Street Address'
},
type: config.schema.default_event.location.type,
url: config.schema.default_event.location.url
};
t.deepEqual(result.location, expectedLocation, 'event has correct location');
t.end();
});
});
test('events: event has correct data: address has venue', function (t) {
nock.cleanAll();
var payload = {
sender: {
login: 'foo'
},
issue: {
user: {
login: 'bar'
},
body: "---\r\ndate: 31.12.2099\r\nvenue: Example Venue\r\naddress: Example Street Address, Example Postal Code, Example Address Locality\r\n\r\n---"
},
labelMap: [config.labels.event]
};
var githubRequests = nock(/api\.github\.com:443/)
.get(new RegExp('/users/' + payload.sender.login))
.reply(200, { login: payload.sender.login })
.get(new RegExp('/users/' + payload.issue.user.login))
.reply(200, { login: payload.issue.user.login })
.get(new RegExp('/repos/' + config.github.org + '/' + config.github.repos.gitevent + '/contents/(.+).json'))
.reply(404)
.put(new RegExp('/repos/' + config.github.org + '/' + config.github.repos.gitevent + '/contents/(.+).json'), {
message: /Created event/,
content: /.*/
})
.reply(201);
events(payload).then(function(result) {
if (!githubRequests.isDone()) {
t.fail('expected http requests were not made to github');
}
t.deepEqual(result.location.address.name, 'Example Venue', 'event has correct venue');
t.end();
});
});
test('events: error is returned if a milestone is not included on a talk proposal and a comment is left on the issue', function (t) {
nock.cleanAll();
var payload = {
sender: {
login: 'foo'
},
issue: {
number: 1,
user: {
login: 'bar'
}
},
labelMap: [config.labels.talk]
};
var githubRequests = nock(/api\.github\.com:443/)
.get(new RegExp('/users/' + payload.sender.login))
.reply(200, { login: payload.sender.login })
.get(new RegExp('/users/' + payload.issue.user.login))
.reply(200, { login: payload.issue.user.login })
.post(new RegExp('/repos/' + config.github.org + '/' + config.github.repos.speakers + '/issues/' + payload.issue.number + '/comments'), {
body: new RegExp('@' + payload.sender.login)
})
.reply(201);
events(payload).catch(function(error) {
t.equal(githubRequests.isDone(), true, 'expected http requests made to github');
if (error instanceof Error) {
t.pass('error returned');
t.equal(error.message, 'missing_milestone', 'correct error message is returned');
} else {
t.fail('invalid error returned');
}
t.end();
});
});
test('events: error is returned if a talk cannot be added to an event due to event not existing', function (t) {
nock.cleanAll();
var payload = {
sender: {
login: 'foo'
},
issue: {
milestone: {
description: 'foo-bar-baz'
},
number: 1,
user: {
login: 'bar'
}
},
labelMap: [config.labels.talk]
};
var githubRequests = nock(/api\.github\.com:443/)
.get(new RegExp('/users/' + payload.sender.login))
.reply(200, { login: payload.sender.login })
.get(new RegExp('/users/' + payload.issue.user.login))
.reply(200, { login: payload.issue.user.login })
.get(new RegExp('/repos/' + config.github.org + '/' + config.github.repos.gitevent + '/contents/(.+).json'))
.reply(404);
events(payload).catch(function(error) {
t.equal(githubRequests.isDone(), true, 'expected http requests made to github');
t.equal(error, 'event not found.', 'correct error message is returned');
t.end();
});
});
test('events: error is returned if failed to add talk to an event', function (t) {
nock.cleanAll();
var payload = {
sender: {
login: 'foo'
},
issue: {
milestone: {
description: 'foo-bar-baz'
},
number: 1,
user: {
login: 'bar'
}
},
labelMap: [config.labels.talk]
};
var eventNotUpdatedErrorMessage = 'foo bar baz';
var eventId = 1;
var existingFileContents = '{"id":' + eventId + '}';
var githubRequests = nock(/api\.github\.com:443/)
.get(new RegExp('/users/' + payload.sender.login))
.reply(200, { login: payload.sender.login })
.get(new RegExp('/users/' + payload.issue.user.login))
.reply(200, { login: payload.issue.user.login })
.get(new RegExp('/repos/' + config.github.org + '/' + config.github.repos.gitevent + '/contents/(.+).json'))
.reply(200, {
content: existingFileContents,
encoding: 'utf8',
sha: 'abc12345'
})
.put(new RegExp('/repos/' + config.github.org + '/' + config.github.repos.gitevent + '/contents/(.+).json'), {
message: 'Updated event ' + eventId,
content: /.*/
})
.reply(500, { message: eventNotUpdatedErrorMessage });
events(payload).catch(function(error) {
t.equal(githubRequests.isDone(), true, 'expected http requests made to github');
if (error instanceof Error) {
t.pass('error returned');
try {
var errorJson = JSON.parse(error.message);
} catch (e) {
t.fail('failed to parse error message')
}
if (errorJson.message) {
t.equal(errorJson.message, eventNotUpdatedErrorMessage, 'correct error message is returned');
} else {
t.fail('error does not contain a message')
}
} else {
t.fail('invalid error returned');
}
t.end();
});
});
test('events: talk is added to event', function (t) {
nock.cleanAll();
var payload = {
sender: {
login: 'foo'
},
issue: {
milestone: {
description: 'foo-bar-baz'
},
number: 1,
user: {
login: 'bar'
}
},
labelMap: [config.labels.talk]
};
var eventId = 1;
var existingFileContents = '{"id":' + eventId + '}';
var githubRequests = nock(/api\.github\.com:443/)
.get(new RegExp('/users/' + payload.sender.login))
.reply(200, { login: payload.sender.login })
.get(new RegExp('/users/' + payload.issue.user.login))
.reply(200, { login: payload.issue.user.login })
.get(new RegExp('/repos/' + config.github.org + '/' + config.github.repos.gitevent + '/contents/(.+).json'))
.reply(200, {
content: existingFileContents,
encoding: 'utf8',
sha: 'abc12345'
})
.put(new RegExp('/repos/' + config.github.org + '/' + config.github.repos.gitevent + '/contents/(.+).json'), {
message: 'Updated event ' + eventId,
content: /.*/
})
.reply(200);
events(payload).then(function() {
t.equal(githubRequests.isDone(), true, 'expected http requests made to github');
t.end();
});
});
test('events: talk is not added to event if it already exists', function (t) {
nock.cleanAll();
var payload = {
sender: {
login: 'foo'
},
issue: {
milestone: {
description: 'foo-bar-baz',
due_on: '2099-12-31T19:45:00Z'
},
title: 'Example title',
number: 1,
user: {
login: 'bar'
}
},
labelMap: [config.labels.talk]
};
var existingEvent = {
id: 1,
performer: [
{
id: "20991231-example-title"
}
]
};
var existingFileContents = JSON.stringify(existingEvent);
var githubRequests = nock(/api\.github\.com:443/)
.get(new RegExp('/users/' + payload.sender.login))
.reply(200, { login: payload.sender.login })
.get(new RegExp('/users/' + payload.issue.user.login))
.reply(200, { login: payload.issue.user.login })
.get(new RegExp('/repos/' + config.github.org + '/' + config.github.repos.gitevent + '/contents/(.+).json'))
.reply(200, {
content: existingFileContents,
encoding: 'utf8',
sha: 'abc12345'
});
events(payload).then(function(event) {
t.equal(githubRequests.isDone(), true, 'expected http requests made to github');
t.deepEqual(event, existingEvent, 'event is returned');
t.end();
});
});
test('events: talk has correct data', function (t) {
nock.cleanAll();
var payload = {
sender: {
login: 'foo'
},
issue: {
milestone: {
description: 'foo-bar-baz',
due_on: '2099-12-31T19:45:00Z'
},
title: 'Example title',
number: 1,
user: {
login: 'bar'
}
},
labelMap: [config.labels.talk]
};
var eventId = 1;
var existingFileContents = '{"id":' + eventId + '}';
var speakerAvatarUrl = 'http://www.example.com/avatar/bar';
var speakerName = 'bar';
var speakerUrl = 'http://www.example.com/bar';
var githubRequests = nock(/api\.github\.com:443/)
.get(new RegExp('/users/' + payload.sender.login))
.reply(200, { login: payload.sender.login })
.get(new RegExp('/users/' + payload.issue.user.login))
.reply(200, {
login: payload.issue.user.login,
avatar_url:speakerAvatarUrl,
name: speakerName,
url: speakerUrl
})
.get(new RegExp('/repos/' + config.github.org + '/' + config.github.repos.gitevent + '/contents/(.+).json'))
.reply(200, {
content: existingFileContents,
encoding: 'utf8',
sha: 'abc12345'
})
.put(new RegExp('/repos/' + config.github.org + '/' + config.github.repos.gitevent + '/contents/(.+).json'), {
message: 'Updated event ' + eventId,
content: /.*/
})
.reply(200);
events(payload).then(function(event) {
t.equal(githubRequests.isDone(), true, 'expected http requests made to github');
if (event.performer && event.performer.length == 1) {
t.pass('event has correct number of speakers');
var speaker = event.performer[0];
t.equal(speaker.image, speakerAvatarUrl, 'speaker has correct avatar url');
t.equal(speaker.name, speakerName, 'speaker has correct name');
t.equal(speaker.id, '20991231-example-title', 'speaker has correct id');
t.equal(speaker.sameAs, speakerUrl, 'speaker has correct URL');
t.equal(speaker.url, config.schema.default_talk_url + '20991231-example-title.html', 'talk has correct URL');
} else {
t.fail('event does not have correct number of performers');
}
t.end();
});
});
test('events: talk: speaker is added to existing speakers', function (t) {
nock.cleanAll();
var payload = {
sender: {
login: 'foo'
},
issue: {
milestone: {
description: 'foo-bar-baz',
due_on: '2099-12-31T19:45:00Z'
},
title: 'Example title',
number: 1,
user: {
login: 'bar'
}
},
labelMap: [config.labels.talk]
};
var eventId = 1;
var existingFileContents = '{"id":' + eventId + ',"performer":[{}]}';
var githubRequests = nock(/api\.github\.com:443/)
.get(new RegExp('/users/' + payload.sender.login))
.reply(200, { login: payload.sender.login })
.get(new RegExp('/users/' + payload.issue.user.login))
.reply(200, { login: payload.issue.user.login })
.get(new RegExp('/repos/' + config.github.org + '/' + config.github.repos.gitevent + '/contents/(.+).json'))
.reply(200, {
content: existingFileContents,
encoding: 'utf8',
sha: 'abc12345'
})
.put(new RegExp('/repos/' + config.github.org + '/' + config.github.repos.gitevent + '/contents/(.+).json'), {
message: 'Updated event ' + eventId,
content: /.*/
})
.reply(200);
events(payload).then(function(event) {
t.equal(githubRequests.isDone(), true, 'expected http requests made to github');
t.equal(event.performer.length, 2, 'event has correct number of speakers');
t.end();
});
});
|
def main():
# input
N = int(input())
Ts = [*map(int, input().split())]
M = int(input())
PXs = [[*map(int, input().split())] for _ in range(M)]
# compute
# output
for P, X in PXs:
tmp_Ts = list(Ts)
tmp_Ts[P-1] = X
print(sum(tmp_Ts))
if __name__ == '__main__':
main()
|
var gulp = require("gulp");
var pkg = require("./package.json");
var uglify = require("gulp-uglify");
var minifycss = require('gulp-minify-css');
var del = require('del');
var concat = require('gulp-concat');
var rename = require('gulp-rename');
var header = require('gulp-header');
//var banner = ['/**',
// ' * <%= pkg.name %> - <%= pkg.description %>',
// ' * @version v<%= pkg.version %>',
// ' * @link <%= pkg.homepage %>',
// ' * @license <%= pkg.license %>',
// ' */',
// ''
//].join('\r\n');
var picker_banner = ['/**',
'* 选择列表插件',
'* varstion 2.0.0',
'* by Houfeng',
'* Houfeng@DCloud.io',
'**/',
''
].join('\r\n');
gulp.task('clear_picker', function(cb) {
del(['dist/js/*.js', 'dist/css/*.css'], cb);
// del(['dist/js/*.js', 'dist/css/*.css']).then(paths => {
// console.log('Deleted files and folders:\n', paths.join('\n'));
// });
});
gulp.task('build', ["clear_picker"], function() {
//css
gulp.src(["./css/mui.picker.css",
"./css/mui.poppicker.css",
"./css/mui.dtpicker.css"
])
.pipe(concat("mui.picker.all.css"))
//.pipe(header(picker_banner))
.pipe(gulp.dest("./dist/css/"))
.pipe(minifycss())
.pipe(rename("mui.picker.min.css"))
.pipe(header(picker_banner))
.pipe(gulp.dest("./dist/css/"));
//js
gulp.src(["./js/mui.picker.js",
"./js/mui.poppicker.js",
"./js/mui.dtpicker.js"
])
.pipe(concat("mui.picker.all.js"))
//.pipe(header(picker_banner))
.pipe(gulp.dest("./dist/js/"))
.pipe(uglify())
.pipe(rename("mui.picker.min.js"))
.pipe(header(picker_banner))
.pipe(gulp.dest("./dist/js/"));
});
gulp.task('default', ["build"]);
|
import React from 'react';
const GridItemImageView = ({imageFolder, name, fileType}) => {
let imageName = name.replace(/ /g, "-").replace(/'/g,"").toLowerCase();
fileType = fileType || 'png';
return (
<td>
<img alt={imageName} className="resource-icon" src={`${imageFolder}/${imageName}.${fileType}`}/>
</td>
)
}
export default GridItemImageView;
|
'use strict';
const express = require('express');
const router = express.Router();
const log = require('../../libs/log.js')(module);
const copy = require('../../models/project/copy.js').copy;
router.post('/', function(req, res, next) {
const id = req.body.projectId;
copy(id, function(err) {
if (err) {
return next(err);
}
res.status(200).end();
});
});
module.exports = router;
|
/*eslint no-unused-vars: "warn"*/
const esb = require('elastic-builder');
const { RESOURCES } = require('../../constants');
const { resolveSchema } = require('../../node-fhir-server');
const { search: querySearch, searchReferenecePromise, searchId, create, _delete, update} = require('../../utils/es.helper.functions.util');
const { periodQuery, identifierQuery, codeableConceptQueries, referenceQuery, numberQuery } = require('../../utils/es.querybuilder.util');
const logger = require('../../node-fhir-server').loggers.get();
let indexName = 'fhirencounter';
let resourceName = RESOURCES.ENCOUNTER;
let getBundle = (base_version) => {
return require(resolveSchema(base_version, 'Bundle'));
};
let getEncounter = (base_version) => {
return require(resolveSchema(base_version, resourceName));};
let getMeta = (base_version) => {
return require(resolveSchema(base_version, 'Meta'));};
module.exports.everything = (args) => new Promise((resolve, reject) => {
logger.info(resourceName + ' >>> everything');
let { base_version, id } = args;
let relatedPromises = [];
let Bundle = getBundle(base_version);
let default_bundle = new Bundle({type: 'searchset', entry: []});
this.searchById(args).then((encounter) => {
// Temporary sanitazation of references
const { searchById: patientSearch } = require('../patient/patient.service');
const { search: conditionSearch } = require('../condition/condition.service');
const { search: procedureSearch } = require('../procedure/procedure.service');
const { search: medicationAdministrationSearch } = require('../medicationadministration/medicationadministration.service');
const { search: observationSearch } = require('../observation/observation.service');
const { search: diagnosticReportSearch } = require('../diagnosticreport/diagnosticreport.service');
let resultResources = [];
if (!encounter) {
logger.info(`${resourceName} >>> ${id} >>> not found`);
resolve(default_bundle);
return;
}
resultResources.push({resource: encounter});
let subject_id = '';
if (encounter.subject && encounter.subject.reference) {
subject_id = encounter.subject.reference.replace('urn:uuid:', '');
}
if (encounter.subject && encounter.subject.identifier && encounter.subject.identifier.value) {
subject_id = encounter.subject.identifier.value.replace('urn:uuid:', '');
}
relatedPromises.push(patientSearch({id: subject_id, base_version: base_version, _rawresource: true}));
relatedPromises.push(procedureSearch({context: { id: id }, base_version: base_version, _rawresource: true}));
relatedPromises.push(conditionSearch({context: { id: id }, base_version: base_version,_rawresource: true}));
relatedPromises.push(medicationAdministrationSearch({context: { id: id }, base_version: base_version, _rawresource: true}));
relatedPromises.push(observationSearch({context: { id: id}, base_version: base_version, _rawresource: true}));
relatedPromises.push(diagnosticReportSearch({context: { id: id}, base_version: base_version, _rawresource: true}));
if (relatedPromises.length > 0) {
Promise.all(relatedPromises).then((results) =>
{
results.forEach((resource) => {
if (!resource) {
return
}
if (resource.constructor === Array) {
resource.forEach((subresource) => {
resultResources.push(subresource);
});
} else {
// TODO: add fullUrl to the properties -> meta data needed
resultResources.push(resource);
}
});
let resultBundle = new Bundle({total: resultResources.length, type: 'searchset', entry: resultResources});
resolve(resultBundle);
}
).catch((error) => {
logger.info(resourceName + ' related resource error >>> ' + error.toString());
resolve(default_bundle);
});
}
}).catch((error) => {
logger.info(resourceName + ' >>> ' + error.toString());
resolve(default_bundle);
});
});
let buildDstu3SearchQuery = (args) => {
// Common search params
let { base_version, _content, _format, _id, _lastUpdated, _profile, _query, _text, _security, _tag} = args;
// Resource Specific params
let id = args['id'];
let appointment = args['appointment'];
let _class = args['class'];
let date = args['date'];
let diagnosis = args['diagnosis'];
let episodeofcare = args['episodeofcare'];
let identifier = args['identifier'];
let incomingreferral = args['incomingreferral'];
let length = args['length'];
let location = args['location'];
let location_period = args['location-period'];
let part_of = args['part-of'];
let participant = args['participant'];
let participant_type = args['participant-type'];
let patient = args['patient'];
let practitioner = args['practitioner'];
let reason = args['reason'];
let service_provider = args['service-provider'];
let status = args['status'];
let type = args['type'];
let boolQuery = esb.boolQuery();
if (id) {
boolQuery = boolQuery.must(esb.termQuery('id', id));
}
if (status) {
boolQuery = boolQuery.should(esb.termQuery('status', status));
}
if (_class) {
boolQuery = boolQuery.should(esb.termQuery('class.code', _class));
boolQuery = boolQuery.should(esb.termQuery('class.display', _class));
}
if (appointment) {
boolQuery = boolQuery.should(esb.termQuery('appointment.type', appointment));
}
if (date) {
boolQuery = periodQuery(boolQuery, 'period', date);
}
if (diagnosis) {
let rq = referenceQuery('diagnosis.condition', diagnosis);
rq.forEach((query) => {
boolQuery = boolQuery.should(esb.nestedQuery(query, 'diagnosis'));
});
let ccq = codeableConceptQueries('diagnosis.role', diagnosis);
ccq.forEach((query) => {
boolQuery = boolQuery.should(esb.nestedQuery(query, 'diagnosis'));
});
}
if (episodeofcare) {
let rq = referenceQuery('episodeOfCare', episodeofcare);
rq.forEach((query) => {
boolQuery = boolQuery.should(esb.nestedQuery(query, 'episodeOfCare'));
});
}
if (identifier) {
let identifierQueries = identifierQuery('identifier', identifier);
identifierQueries.forEach((query) => {
boolQuery = boolQuery.should(query);
});
}
if (reason) {
let ccq = codeableConceptQueries('reason', reason);
ccq.forEach((query) => {
boolQuery = boolQuery.should(esb.nestedQuery(query, 'reason'));
});
}
if (incomingreferral) {
let rq = referenceQuery('incomingReferral', incomingreferral);
rq.forEach((query) => {
boolQuery = boolQuery.should(esb.nestedQuery(query, 'incomingReferral'));
});
}
if (length) {
boolQuery = boolQuery.should(esb.termQuery('length.code', length));
boolQuery = numberQuery(boolQuery, 'length.value', length);
}
if (location) {
boolQuery = boolQuery.should(esb.nestedQuery(esb.termQuery('location.status', location), 'location'));
let rq = referenceQuery('location.location', location);
rq.forEach((query) => {
boolQuery = boolQuery.should(esb.nestedQuery(query, 'location'));
});
}
if (location_period) {
boolQuery = periodQuery(boolQuery, 'location.period', location_period);
}
if (part_of) {
let rq = referenceQuery('partOf', part_of);
rq.forEach((query) => {
boolQuery = boolQuery.should(query);
});
}
if (patient) {
const { referenceQuery } = require('../../utils/es.querybuilder.util');
let rq = referenceQuery('subject', patient);
rq.forEach((query) => {
boolQuery = boolQuery.should(query);
});
}
if (participant) {
let ccq = codeableConceptQueries('participant.type', participant);
ccq.forEach((query) => {
boolQuery = boolQuery.should(esb.nestedQuery(esb.nestedQuery(query, 'participant.type'), 'participant'));
});
let rq = referenceQuery('participant.individual', practitioner);
rq.forEach((query) => {
boolQuery = boolQuery.should(esb.nestedQuery(query, 'participant'));
});
}
if (participant_type) {
let ccq = codeableConceptQueries('participant.type', participant_type);
ccq.forEach((query) => {
boolQuery = boolQuery.should(esb.nestedQuery(esb.nestedQuery(query, 'participant.type'), 'participant'));
});
}
if (service_provider) {
let rq = referenceQuery('serviceProvider', service_provider);
rq.forEach((query) => {
boolQuery = boolQuery.should(query);
});
}
if (type) {
let ccq = codeableConceptQueries(boolQuery, 'type', type);
ccq.forEach((query) => {
boolQuery = boolQuery.should(query);
});
}
return boolQuery;
};
module.exports.search = (args) => new Promise((resolve, reject) => {
logger.info(resourceName + ' >>> search');
// Common search params
let { base_version } = args;
// Resource Specific params
let practitioner = args['practitioner'];
let subject = args['subject'];
let boolQuery = buildDstu3SearchQuery(args);
let referencePromiseSearches = [];
if (practitioner) {
const { search: practitionerSearch } = require('../practitioner/practitioner.service');
referencePromiseSearches.push(practitionerSearch({...practitioner, base_version: base_version, _rawresource: true}));
}
if (subject) {
const { search: patientSearch } = require('../patient/patient.service');
referencePromiseSearches.push(patientSearch({...subject, base_version: base_version, _rawresource: true}));
}
let referenceResultMapping = {
Patient: 'subject',
Practitioner: 'participant'
};
if (referencePromiseSearches.length > 0) {
searchReferenecePromise(referencePromiseSearches, referenceResultMapping, boolQuery, indexName, args, resolve, reject);
} else {
querySearch(boolQuery, indexName, args, resolve, reject);
}
});
module.exports.searchById = (args) => new Promise((resolve, reject) => {
logger.info(resourceName + ' >>> searchById');
let { base_version, id } = args;
let Encounter = getEncounter(base_version);
searchId(id, indexName, Encounter, resolve, reject, resourceName);
});
module.exports.create = (args, { req }) => new Promise((resolve, reject) => {
logger.info(resourceName + ' >>> create');
let { base_version } = args;
let resource = req.body;
let Encounter = getEncounter(base_version);
let Meta = getMeta(base_version);
create(resource, indexName, Encounter, Meta, resolve, reject);
});
module.exports.update = (args, { req }) => new Promise((resolve, reject) => {
logger.info(resourceName + ' >>> update');
let { base_version, id} = args;
let resource = req.body;
let Encounter = getEncounter(base_version);
let Meta = getMeta(base_version);
update(id, resource, indexName, Encounter, Meta, resolve, reject, resourceName);
});
module.exports.remove = (args) => new Promise((resolve, reject) => {
logger.info(resourceName + ' >>> remove');
let { id } = args;
_delete(id, indexName, resolve, reject);
});
module.exports.searchByVersionId = (args) => new Promise((resolve, reject) => {
logger.info(resourceName + ' >>> searchByVersionId');
let { id, version_id } = args;
let boolQuery = esb.boolQuery();
if (id) {
boolQuery = boolQuery.should(esb.termQuery('_id', id));
}
if (version_id) {
boolQuery = boolQuery.should(esb.termQuery('meta.versionId', version_id));
}
querySearch(boolQuery, indexName, args, resolve, reject);
});
module.exports.history = (args) => new Promise((resolve, reject) => {
logger.info(`${resourceName} >>> history`);
let boolQuery = buildDstu3SearchQuery(args);
querySearch(boolQuery, `${indexName}_history`, args, resolve, reject);
});
module.exports.historyById = (args) => new Promise((resolve, reject) => {
let { base_version, id } = args;
let Encounter = getEncounter(base_version);
searchId(id, `${indexName}_history`, Encounter, resolve, reject, resourceName);
});
|
"""Class to perform over-sampling using SMOTE."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Fernando Nogueira
# Christos Aridas
# Dzianis Dudnik
# License: MIT
import math
from collections import Counter
import numpy as np
from scipy import sparse
from sklearn.base import clone
from sklearn.cluster import MiniBatchKMeans
from sklearn.metrics import pairwise_distances
from sklearn.preprocessing import OneHotEncoder
from sklearn.svm import SVC
from sklearn.utils import check_random_state
from sklearn.utils import _safe_indexing
from sklearn.utils import check_array
from sklearn.utils.sparsefuncs_fast import csr_mean_variance_axis0
from sklearn.utils.sparsefuncs_fast import csc_mean_variance_axis0
from .base import BaseOverSampler
from ..exceptions import raise_isinstance_error
from ..utils import check_neighbors_object
from ..utils import check_target_type
from ..utils import Substitution
from ..utils._docstring import _n_jobs_docstring
from ..utils._docstring import _random_state_docstring
from ..utils._validation import _deprecate_positional_args
class BaseSMOTE(BaseOverSampler):
"""Base class for the different SMOTE algorithms."""
def __init__(
self,
sampling_strategy="auto",
random_state=None,
k_neighbors=5,
n_jobs=None,
):
super().__init__(sampling_strategy=sampling_strategy)
self.random_state = random_state
self.k_neighbors = k_neighbors
self.n_jobs = n_jobs
def _validate_estimator(self):
"""Check the NN estimators shared across the different SMOTE
algorithms.
"""
self.nn_k_ = check_neighbors_object(
"k_neighbors", self.k_neighbors, additional_neighbor=1
)
def _make_samples(
self, X, y_dtype, y_type, nn_data, nn_num, n_samples, step_size=1.0
):
"""A support function that returns artificial samples constructed along
the line connecting nearest neighbours.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Points from which the points will be created.
y_dtype : dtype
The data type of the targets.
y_type : str or int
The minority target value, just so the function can return the
target values for the synthetic variables with correct length in
a clear format.
nn_data : ndarray of shape (n_samples_all, n_features)
Data set carrying all the neighbours to be used
nn_num : ndarray of shape (n_samples_all, k_nearest_neighbours)
The nearest neighbours of each sample in `nn_data`.
n_samples : int
The number of samples to generate.
step_size : float, default=1.0
The step size to create samples.
Returns
-------
X_new : {ndarray, sparse matrix} of shape (n_samples_new, n_features)
Synthetically generated samples.
y_new : ndarray of shape (n_samples_new,)
Target values for synthetic samples.
"""
random_state = check_random_state(self.random_state)
samples_indices = random_state.randint(
low=0, high=nn_num.size, size=n_samples
)
# np.newaxis for backwards compatability with random_state
steps = step_size * random_state.uniform(size=n_samples)[:, np.newaxis]
rows = np.floor_divide(samples_indices, nn_num.shape[1])
cols = np.mod(samples_indices, nn_num.shape[1])
X_new = self._generate_samples(X, nn_data, nn_num, rows, cols, steps)
y_new = np.full(n_samples, fill_value=y_type, dtype=y_dtype)
return X_new, y_new
def _generate_samples(self, X, nn_data, nn_num, rows, cols, steps):
r"""Generate a synthetic sample.
The rule for the generation is:
.. math::
\mathbf{s_{s}} = \mathbf{s_{i}} + \mathcal{u}(0, 1) \times
(\mathbf{s_{i}} - \mathbf{s_{nn}}) \,
where \mathbf{s_{s}} is the new synthetic samples, \mathbf{s_{i}} is
the current sample, \mathbf{s_{nn}} is a randomly selected neighbors of
\mathbf{s_{i}} and \mathcal{u}(0, 1) is a random number between [0, 1).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Points from which the points will be created.
nn_data : ndarray of shape (n_samples_all, n_features)
Data set carrying all the neighbours to be used.
nn_num : ndarray of shape (n_samples_all, k_nearest_neighbours)
The nearest neighbours of each sample in `nn_data`.
rows : ndarray of shape (n_samples,), dtype=int
Indices pointing at feature vector in X which will be used
as a base for creating new samples.
cols : ndarray of shape (n_samples,), dtype=int
Indices pointing at which nearest neighbor of base feature vector
will be used when creating new samples.
steps : ndarray of shape (n_samples,), dtype=float
Step sizes for new samples.
Returns
-------
X_new : {ndarray, sparse matrix} of shape (n_samples, n_features)
Synthetically generated samples.
"""
diffs = nn_data[nn_num[rows, cols]] - X[rows]
if sparse.issparse(X):
sparse_func = type(X).__name__
steps = getattr(sparse, sparse_func)(steps)
X_new = X[rows] + steps.multiply(diffs)
else:
X_new = X[rows] + steps * diffs
return X_new.astype(X.dtype)
def _in_danger_noise(
self, nn_estimator, samples, target_class, y, kind="danger"
):
"""Estimate if a set of sample are in danger or noise.
Used by BorderlineSMOTE and SVMSMOTE.
Parameters
----------
nn_estimator : estimator
An estimator that inherits from
:class:`sklearn.neighbors.base.KNeighborsMixin` use to determine if
a sample is in danger/noise.
samples : {array-like, sparse matrix} of shape (n_samples, n_features)
The samples to check if either they are in danger or not.
target_class : int or str
The target corresponding class being over-sampled.
y : array-like of shape (n_samples,)
The true label in order to check the neighbour labels.
kind : {'danger', 'noise'}, default='danger'
The type of classification to use. Can be either:
- If 'danger', check if samples are in danger,
- If 'noise', check if samples are noise.
Returns
-------
output : ndarray of shape (n_samples,)
A boolean array where True refer to samples in danger or noise.
"""
x = nn_estimator.kneighbors(samples, return_distance=False)[:, 1:]
nn_label = (y[x] != target_class).astype(int)
n_maj = np.sum(nn_label, axis=1)
if kind == "danger":
# Samples are in danger for m/2 <= m' < m
return np.bitwise_and(
n_maj >= (nn_estimator.n_neighbors - 1) / 2,
n_maj < nn_estimator.n_neighbors - 1,
)
elif kind == "noise":
# Samples are noise for m = m'
return n_maj == nn_estimator.n_neighbors - 1
else:
raise NotImplementedError
@Substitution(
sampling_strategy=BaseOverSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
random_state=_random_state_docstring,
)
class BorderlineSMOTE(BaseSMOTE):
"""Over-sampling using Borderline SMOTE.
This algorithm is a variant of the original SMOTE algorithm proposed in
[2]_. Borderline samples will be detected and used to generate new
synthetic samples.
Read more in the :ref:`User Guide <smote_adasyn>`.
Parameters
----------
{sampling_strategy}
{random_state}
k_neighbors : int or object, default=5
If ``int``, number of nearest neighbours to used to construct synthetic
samples. If object, an estimator that inherits from
:class:`sklearn.neighbors.base.KNeighborsMixin` that will be used to
find the k_neighbors.
{n_jobs}
m_neighbors : int or object, default=10
If int, number of nearest neighbours to use to determine if a minority
sample is in danger. If object, an estimator that inherits
from :class:`sklearn.neighbors.base.KNeighborsMixin` that will be used
to find the m_neighbors.
kind : {{"borderline-1", "borderline-2"}}, default='borderline-1'
The type of SMOTE algorithm to use one of the following options:
``'borderline-1'``, ``'borderline-2'``.
See Also
--------
SMOTE : Over-sample using SMOTE.
SMOTENC : Over-sample using SMOTE for continuous and categorical features.
SVMSMOTE : Over-sample using SVM-SMOTE variant.
ADASYN : Over-sample using ADASYN.
KMeansSMOTE : Over-sample applying a clustering before to oversample using
SMOTE.
Notes
-----
See the original papers: [2]_ for more details.
Supports multi-class resampling. A one-vs.-rest scheme is used as
originally proposed in [1]_.
References
----------
.. [1] N. V. Chawla, K. W. Bowyer, L. O.Hall, W. P. Kegelmeyer, "SMOTE:
synthetic minority over-sampling technique," Journal of artificial
intelligence research, 321-357, 2002.
.. [2] H. Han, W. Wen-Yuan, M. Bing-Huan, "Borderline-SMOTE: a new
over-sampling method in imbalanced data sets learning," Advances in
intelligent computing, 878-887, 2005.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.over_sampling import \
BorderlineSMOTE # doctest: +NORMALIZE_WHITESPACE
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> sm = BorderlineSMOTE(random_state=42)
>>> X_res, y_res = sm.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{0: 900, 1: 900}})
"""
@_deprecate_positional_args
def __init__(
self,
*,
sampling_strategy="auto",
random_state=None,
k_neighbors=5,
n_jobs=None,
m_neighbors=10,
kind="borderline-1",
):
super().__init__(
sampling_strategy=sampling_strategy,
random_state=random_state,
k_neighbors=k_neighbors,
n_jobs=n_jobs,
)
self.m_neighbors = m_neighbors
self.kind = kind
def _validate_estimator(self):
super()._validate_estimator()
self.nn_m_ = check_neighbors_object(
"m_neighbors", self.m_neighbors, additional_neighbor=1
)
self.nn_m_.set_params(**{"n_jobs": self.n_jobs})
if self.kind not in ("borderline-1", "borderline-2"):
raise ValueError(
'The possible "kind" of algorithm are '
'"borderline-1" and "borderline-2".'
"Got {} instead.".format(self.kind)
)
def _fit_resample(self, X, y):
self._validate_estimator()
X_resampled = X.copy()
y_resampled = y.copy()
for class_sample, n_samples in self.sampling_strategy_.items():
if n_samples == 0:
continue
target_class_indices = np.flatnonzero(y == class_sample)
X_class = _safe_indexing(X, target_class_indices)
self.nn_m_.fit(X)
danger_index = self._in_danger_noise(
self.nn_m_, X_class, class_sample, y, kind="danger"
)
if not any(danger_index):
continue
self.nn_k_.fit(X_class)
nns = self.nn_k_.kneighbors(
_safe_indexing(X_class, danger_index), return_distance=False
)[:, 1:]
# divergence between borderline-1 and borderline-2
if self.kind == "borderline-1":
# Create synthetic samples for borderline points.
X_new, y_new = self._make_samples(
_safe_indexing(X_class, danger_index),
y.dtype,
class_sample,
X_class,
nns,
n_samples,
)
if sparse.issparse(X_new):
X_resampled = sparse.vstack([X_resampled, X_new])
else:
X_resampled = np.vstack((X_resampled, X_new))
y_resampled = np.hstack((y_resampled, y_new))
elif self.kind == "borderline-2":
random_state = check_random_state(self.random_state)
fractions = random_state.beta(10, 10)
# only minority
X_new_1, y_new_1 = self._make_samples(
_safe_indexing(X_class, danger_index),
y.dtype,
class_sample,
X_class,
nns,
int(fractions * (n_samples + 1)),
step_size=1.0,
)
# we use a one-vs-rest policy to handle the multiclass in which
# new samples will be created considering not only the majority
# class but all over classes.
X_new_2, y_new_2 = self._make_samples(
_safe_indexing(X_class, danger_index),
y.dtype,
class_sample,
_safe_indexing(X, np.flatnonzero(y != class_sample)),
nns,
int((1 - fractions) * n_samples),
step_size=0.5,
)
if sparse.issparse(X_resampled):
X_resampled = sparse.vstack(
[X_resampled, X_new_1, X_new_2]
)
else:
X_resampled = np.vstack((X_resampled, X_new_1, X_new_2))
y_resampled = np.hstack((y_resampled, y_new_1, y_new_2))
return X_resampled, y_resampled
@Substitution(
sampling_strategy=BaseOverSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
random_state=_random_state_docstring,
)
class SVMSMOTE(BaseSMOTE):
"""Over-sampling using SVM-SMOTE.
Variant of SMOTE algorithm which use an SVM algorithm to detect sample to
use for generating new synthetic samples as proposed in [2]_.
Read more in the :ref:`User Guide <smote_adasyn>`.
Parameters
----------
{sampling_strategy}
{random_state}
k_neighbors : int or object, default=5
If ``int``, number of nearest neighbours to used to construct synthetic
samples. If object, an estimator that inherits from
:class:`sklearn.neighbors.base.KNeighborsMixin` that will be used to
find the k_neighbors.
{n_jobs}
m_neighbors : int or object, default=10
If int, number of nearest neighbours to use to determine if a minority
sample is in danger. If object, an estimator that inherits from
:class:`sklearn.neighbors.base.KNeighborsMixin` that will be used to
find the m_neighbors.
svm_estimator : object, default=SVC()
A parametrized :class:`sklearn.svm.SVC` classifier can be passed.
out_step : float, default=0.5
Step size when extrapolating.
See Also
--------
SMOTE : Over-sample using SMOTE.
SMOTENC : Over-sample using SMOTE for continuous and categorical features.
BorderlineSMOTE : Over-sample using Borderline-SMOTE.
ADASYN : Over-sample using ADASYN.
KMeansSMOTE : Over-sample applying a clustering before to oversample using
SMOTE.
Notes
-----
See the original papers: [2]_ for more details.
Supports multi-class resampling. A one-vs.-rest scheme is used as
originally proposed in [1]_.
References
----------
.. [1] N. V. Chawla, K. W. Bowyer, L. O.Hall, W. P. Kegelmeyer, "SMOTE:
synthetic minority over-sampling technique," Journal of artificial
intelligence research, 321-357, 2002.
.. [2] H. M. Nguyen, E. W. Cooper, K. Kamei, "Borderline over-sampling for
imbalanced data classification," International Journal of Knowledge
Engineering and Soft Data Paradigms, 3(1), pp.4-21, 2009.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.over_sampling import \
SVMSMOTE # doctest: +NORMALIZE_WHITESPACE
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> sm = SVMSMOTE(random_state=42)
>>> X_res, y_res = sm.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{0: 900, 1: 900}})
"""
@_deprecate_positional_args
def __init__(
self,
*,
sampling_strategy="auto",
random_state=None,
k_neighbors=5,
n_jobs=None,
m_neighbors=10,
svm_estimator=None,
out_step=0.5,
):
super().__init__(
sampling_strategy=sampling_strategy,
random_state=random_state,
k_neighbors=k_neighbors,
n_jobs=n_jobs,
)
self.m_neighbors = m_neighbors
self.svm_estimator = svm_estimator
self.out_step = out_step
def _validate_estimator(self):
super()._validate_estimator()
self.nn_m_ = check_neighbors_object(
"m_neighbors", self.m_neighbors, additional_neighbor=1
)
self.nn_m_.set_params(**{"n_jobs": self.n_jobs})
if self.svm_estimator is None:
self.svm_estimator_ = SVC(
gamma="scale", random_state=self.random_state
)
elif isinstance(self.svm_estimator, SVC):
self.svm_estimator_ = clone(self.svm_estimator)
else:
raise_isinstance_error("svm_estimator", [SVC], self.svm_estimator)
def _fit_resample(self, X, y):
self._validate_estimator()
random_state = check_random_state(self.random_state)
X_resampled = X.copy()
y_resampled = y.copy()
for class_sample, n_samples in self.sampling_strategy_.items():
if n_samples == 0:
continue
target_class_indices = np.flatnonzero(y == class_sample)
X_class = _safe_indexing(X, target_class_indices)
self.svm_estimator_.fit(X, y)
support_index = self.svm_estimator_.support_[
y[self.svm_estimator_.support_] == class_sample
]
support_vector = _safe_indexing(X, support_index)
self.nn_m_.fit(X)
noise_bool = self._in_danger_noise(
self.nn_m_, support_vector, class_sample, y, kind="noise"
)
support_vector = _safe_indexing(
support_vector, np.flatnonzero(np.logical_not(noise_bool))
)
danger_bool = self._in_danger_noise(
self.nn_m_, support_vector, class_sample, y, kind="danger"
)
safety_bool = np.logical_not(danger_bool)
self.nn_k_.fit(X_class)
fractions = random_state.beta(10, 10)
n_generated_samples = int(fractions * (n_samples + 1))
if np.count_nonzero(danger_bool) > 0:
nns = self.nn_k_.kneighbors(
_safe_indexing(support_vector, np.flatnonzero(danger_bool)),
return_distance=False,
)[:, 1:]
X_new_1, y_new_1 = self._make_samples(
_safe_indexing(support_vector, np.flatnonzero(danger_bool)),
y.dtype,
class_sample,
X_class,
nns,
n_generated_samples,
step_size=1.0,
)
if np.count_nonzero(safety_bool) > 0:
nns = self.nn_k_.kneighbors(
_safe_indexing(support_vector, np.flatnonzero(safety_bool)),
return_distance=False,
)[:, 1:]
X_new_2, y_new_2 = self._make_samples(
_safe_indexing(support_vector, np.flatnonzero(safety_bool)),
y.dtype,
class_sample,
X_class,
nns,
n_samples - n_generated_samples,
step_size=-self.out_step,
)
if (
np.count_nonzero(danger_bool) > 0
and np.count_nonzero(safety_bool) > 0
):
if sparse.issparse(X_resampled):
X_resampled = sparse.vstack(
[X_resampled, X_new_1, X_new_2]
)
else:
X_resampled = np.vstack((X_resampled, X_new_1, X_new_2))
y_resampled = np.concatenate(
(y_resampled, y_new_1, y_new_2), axis=0
)
elif np.count_nonzero(danger_bool) == 0:
if sparse.issparse(X_resampled):
X_resampled = sparse.vstack([X_resampled, X_new_2])
else:
X_resampled = np.vstack((X_resampled, X_new_2))
y_resampled = np.concatenate((y_resampled, y_new_2), axis=0)
elif np.count_nonzero(safety_bool) == 0:
if sparse.issparse(X_resampled):
X_resampled = sparse.vstack([X_resampled, X_new_1])
else:
X_resampled = np.vstack((X_resampled, X_new_1))
y_resampled = np.concatenate((y_resampled, y_new_1), axis=0)
return X_resampled, y_resampled
@Substitution(
sampling_strategy=BaseOverSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
random_state=_random_state_docstring,
)
class SMOTE(BaseSMOTE):
"""Class to perform over-sampling using SMOTE.
This object is an implementation of SMOTE - Synthetic Minority
Over-sampling Technique as presented in [1]_.
Read more in the :ref:`User Guide <smote_adasyn>`.
Parameters
----------
{sampling_strategy}
{random_state}
k_neighbors : int or object, default=5
If ``int``, number of nearest neighbours to used to construct synthetic
samples. If object, an estimator that inherits from
:class:`sklearn.neighbors.base.KNeighborsMixin` that will be used to
find the k_neighbors.
{n_jobs}
See Also
--------
SMOTENC : Over-sample using SMOTE for continuous and categorical features.
BorderlineSMOTE : Over-sample using the borderline-SMOTE variant.
SVMSMOTE : Over-sample using the SVM-SMOTE variant.
ADASYN : Over-sample using ADASYN.
KMeansSMOTE : Over-sample applying a clustering before to oversample using
SMOTE.
Notes
-----
See the original papers: [1]_ for more details.
Supports multi-class resampling. A one-vs.-rest scheme is used as
originally proposed in [1]_.
References
----------
.. [1] N. V. Chawla, K. W. Bowyer, L. O.Hall, W. P. Kegelmeyer, "SMOTE:
synthetic minority over-sampling technique," Journal of artificial
intelligence research, 321-357, 2002.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.over_sampling import \
SMOTE # doctest: +NORMALIZE_WHITESPACE
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> sm = SMOTE(random_state=42)
>>> X_res, y_res = sm.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{0: 900, 1: 900}})
"""
@_deprecate_positional_args
def __init__(
self,
*,
sampling_strategy="auto",
random_state=None,
k_neighbors=5,
n_jobs=None,
):
super().__init__(
sampling_strategy=sampling_strategy,
random_state=random_state,
k_neighbors=k_neighbors,
n_jobs=n_jobs,
)
def _fit_resample(self, X, y):
self._validate_estimator()
X_resampled = [X.copy()]
y_resampled = [y.copy()]
for class_sample, n_samples in self.sampling_strategy_.items():
if n_samples == 0:
continue
target_class_indices = np.flatnonzero(y == class_sample)
X_class = _safe_indexing(X, target_class_indices)
self.nn_k_.fit(X_class)
nns = self.nn_k_.kneighbors(X_class, return_distance=False)[:, 1:]
X_new, y_new = self._make_samples(
X_class, y.dtype, class_sample, X_class, nns, n_samples, 1.0
)
X_resampled.append(X_new)
y_resampled.append(y_new)
if sparse.issparse(X):
X_resampled = sparse.vstack(X_resampled, format=X.format)
else:
X_resampled = np.vstack(X_resampled)
y_resampled = np.hstack(y_resampled)
return X_resampled, y_resampled
# @Substitution(
# sampling_strategy=BaseOverSampler._sampling_strategy_docstring,
# random_state=_random_state_docstring)
class SMOTENC(SMOTE):
"""Synthetic Minority Over-sampling Technique for Nominal and Continuous.
Unlike :class:`SMOTE`, SMOTE-NC for dataset containing continuous and
categorical features. However, it is not designed to work with only
categorical features.
Read more in the :ref:`User Guide <smote_adasyn>`.
Parameters
----------
categorical_features : ndarray of shape (n_cat_features,) or (n_features,)
Specified which features are categorical. Can either be:
- array of indices specifying the categorical features;
- mask array of shape (n_features, ) and ``bool`` dtype for which
``True`` indicates the categorical features.
sampling_strategy : float, str, dict or callable, default='auto'
Sampling information to resample the data set.
- When ``float``, it corresponds to the desired ratio of the number of
samples in the minority class over the number of samples in the
majority class after resampling. Therefore, the ratio is expressed as
:math:`\\alpha_{os} = N_{rm} / N_{M}` where :math:`N_{rm}` is the
number of samples in the minority class after resampling and
:math:`N_{M}` is the number of samples in the majority class.
.. warning::
``float`` is only available for **binary** classification. An
error is raised for multi-class classification.
- When ``str``, specify the class targeted by the resampling. The
number of samples in the different classes will be equalized.
Possible choices are:
``'minority'``: resample only the minority class;
``'not minority'``: resample all classes but the minority class;
``'not majority'``: resample all classes but the majority class;
``'all'``: resample all classes;
``'auto'``: equivalent to ``'not majority'``.
- When ``dict``, the keys correspond to the targeted classes. The
values correspond to the desired number of samples for each targeted
class.
- When callable, function taking ``y`` and returns a ``dict``. The keys
correspond to the targeted classes. The values correspond to the
desired number of samples for each class.
random_state : int, RandomState instance, default=None
Control the randomization of the algorithm.
- If int, ``random_state`` is the seed used by the random number
generator;
- If ``RandomState`` instance, random_state is the random number
generator;
- If ``None``, the random number generator is the ``RandomState``
instance used by ``np.random``.
k_neighbors : int or object, default=5
If ``int``, number of nearest neighbours to used to construct synthetic
samples. If object, an estimator that inherits from
:class:`sklearn.neighbors.base.KNeighborsMixin` that will be used to
find the k_neighbors.
n_jobs : int, default=None
Number of CPU cores used during the cross-validation loop.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See
`Glossary <https://scikit-learn.org/stable/glossary.html#term-n-jobs>`_
for more details.
See Also
--------
SMOTE : Over-sample using SMOTE.
SVMSMOTE : Over-sample using SVM-SMOTE variant.
BorderlineSMOTE : Over-sample using Borderline-SMOTE variant.
ADASYN : Over-sample using ADASYN.
KMeansSMOTE : Over-sample applying a clustering before to oversample using
SMOTE.
Notes
-----
See the original paper [1]_ for more details.
Supports mutli-class resampling. A one-vs.-rest scheme is used as
originally proposed in [1]_.
See
:ref:`sphx_glr_auto_examples_over-sampling_plot_comparison_over_sampling.py`,
and :ref:`sphx_glr_auto_examples_over-sampling_plot_illustration_generation_sample.py`.
References
----------
.. [1] N. V. Chawla, K. W. Bowyer, L. O.Hall, W. P. Kegelmeyer, "SMOTE:
synthetic minority over-sampling technique," Journal of artificial
intelligence research, 321-357, 2002.
Examples
--------
>>> from collections import Counter
>>> from numpy.random import RandomState
>>> from sklearn.datasets import make_classification
>>> from imblearn.over_sampling import SMOTENC
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape (%s, %s)' % X.shape)
Original dataset shape (1000, 20)
>>> print('Original dataset samples per class {}'.format(Counter(y)))
Original dataset samples per class Counter({1: 900, 0: 100})
>>> # simulate the 2 last columns to be categorical features
>>> X[:, -2:] = RandomState(10).randint(0, 4, size=(1000, 2))
>>> sm = SMOTENC(random_state=42, categorical_features=[18, 19])
>>> X_res, y_res = sm.fit_resample(X, y)
>>> print('Resampled dataset samples per class {}'.format(Counter(y_res)))
Resampled dataset samples per class Counter({0: 900, 1: 900})
"""
_required_parameters = ["categorical_features"]
@_deprecate_positional_args
def __init__(
self,
categorical_features,
*,
sampling_strategy="auto",
random_state=None,
k_neighbors=5,
n_jobs=None,
):
super().__init__(
sampling_strategy=sampling_strategy,
random_state=random_state,
k_neighbors=k_neighbors,
)
self.categorical_features = categorical_features
def _check_X_y(self, X, y):
"""Overwrite the checking to let pass some string for categorical
features.
"""
y, binarize_y = check_target_type(y, indicate_one_vs_all=True)
X, y = self._validate_data(
X, y, reset=True, dtype=None, accept_sparse=["csr", "csc"]
)
return X, y, binarize_y
def _validate_estimator(self):
super()._validate_estimator()
categorical_features = np.asarray(self.categorical_features)
if categorical_features.dtype.name == "bool":
self.categorical_features_ = np.flatnonzero(categorical_features)
else:
if any(
[
cat not in np.arange(self.n_features_)
for cat in categorical_features
]
):
raise ValueError(
"Some of the categorical indices are out of range. Indices"
" should be between 0 and {}".format(self.n_features_)
)
self.categorical_features_ = categorical_features
self.continuous_features_ = np.setdiff1d(
np.arange(self.n_features_), self.categorical_features_
)
if self.categorical_features_.size == self.n_features_in_:
raise ValueError(
"SMOTE-NC is not designed to work only with categorical "
"features. It requires some numerical features."
)
def _fit_resample(self, X, y):
self.n_features_ = X.shape[1]
self._validate_estimator()
# compute the median of the standard deviation of the minority class
target_stats = Counter(y)
class_minority = min(target_stats, key=target_stats.get)
X_continuous = X[:, self.continuous_features_]
X_continuous = check_array(X_continuous, accept_sparse=["csr", "csc"])
X_minority = _safe_indexing(
X_continuous, np.flatnonzero(y == class_minority)
)
if sparse.issparse(X):
if X.format == "csr":
_, var = csr_mean_variance_axis0(X_minority)
else:
_, var = csc_mean_variance_axis0(X_minority)
else:
var = X_minority.var(axis=0)
self.median_std_ = np.median(np.sqrt(var))
X_categorical = X[:, self.categorical_features_]
if X_continuous.dtype.name != "object":
dtype_ohe = X_continuous.dtype
else:
dtype_ohe = np.float64
self.ohe_ = OneHotEncoder(
sparse=True, handle_unknown="ignore", dtype=dtype_ohe
)
# the input of the OneHotEncoder needs to be dense
X_ohe = self.ohe_.fit_transform(
X_categorical.toarray()
if sparse.issparse(X_categorical)
else X_categorical
)
# we can replace the 1 entries of the categorical features with the
# median of the standard deviation. It will ensure that whenever
# distance is computed between 2 samples, the difference will be equal
# to the median of the standard deviation as in the original paper.
# In the edge case where the median of the std is equal to 0, the 1s
# entries will be also nullified. In this case, we store the original
# categorical encoding which will be later used for inversing the OHE
if math.isclose(self.median_std_, 0):
self._X_categorical_minority_encoded = _safe_indexing(
X_ohe.toarray(), np.flatnonzero(y == class_minority)
)
X_ohe.data = (
np.ones_like(X_ohe.data, dtype=X_ohe.dtype) * self.median_std_ / 2
)
X_encoded = sparse.hstack((X_continuous, X_ohe), format="csr")
X_resampled, y_resampled = super()._fit_resample(X_encoded, y)
# reverse the encoding of the categorical features
X_res_cat = X_resampled[:, self.continuous_features_.size:]
X_res_cat.data = np.ones_like(X_res_cat.data)
X_res_cat_dec = self.ohe_.inverse_transform(X_res_cat)
if sparse.issparse(X):
X_resampled = sparse.hstack(
(
X_resampled[:, : self.continuous_features_.size],
X_res_cat_dec,
),
format="csr",
)
else:
X_resampled = np.hstack(
(
X_resampled[:, : self.continuous_features_.size].toarray(),
X_res_cat_dec,
)
)
indices_reordered = np.argsort(
np.hstack((self.continuous_features_, self.categorical_features_))
)
if sparse.issparse(X_resampled):
# the matrix is supposed to be in the CSR format after the stacking
col_indices = X_resampled.indices.copy()
for idx, col_idx in enumerate(indices_reordered):
mask = X_resampled.indices == col_idx
col_indices[mask] = idx
X_resampled.indices = col_indices
else:
X_resampled = X_resampled[:, indices_reordered]
return X_resampled, y_resampled
def _generate_samples(self, X, nn_data, nn_num, rows, cols, steps):
"""Generate a synthetic sample with an additional steps for the
categorical features.
Each new sample is generated the same way than in SMOTE. However, the
categorical features are mapped to the most frequent nearest neighbors
of the majority class.
"""
rng = check_random_state(self.random_state)
X_new = super()._generate_samples(
X, nn_data, nn_num, rows, cols, steps
)
# change in sparsity structure more efficient with LIL than CSR
X_new = (X_new.tolil() if sparse.issparse(X_new) else X_new)
# convert to dense array since scipy.sparse doesn't handle 3D
nn_data = (nn_data.toarray() if sparse.issparse(nn_data) else nn_data)
# In the case that the median std was equal to zeros, we have to
# create non-null entry based on the encoded of OHE
if math.isclose(self.median_std_, 0):
nn_data[:, self.continuous_features_.size:] = (
self._X_categorical_minority_encoded
)
all_neighbors = nn_data[nn_num[rows]]
categories_size = [self.continuous_features_.size] + [
cat.size for cat in self.ohe_.categories_
]
for start_idx, end_idx in zip(np.cumsum(categories_size)[:-1],
np.cumsum(categories_size)[1:]):
col_maxs = all_neighbors[:, :, start_idx:end_idx].sum(axis=1)
# tie breaking argmax
is_max = np.isclose(col_maxs, col_maxs.max(axis=1, keepdims=True))
max_idxs = rng.permutation(np.argwhere(is_max))
xs, idx_sels = np.unique(max_idxs[:, 0], return_index=True)
col_sels = max_idxs[idx_sels, 1]
ys = start_idx + col_sels
X_new[:, start_idx:end_idx] = 0
X_new[xs, ys] = 1
return X_new
@Substitution(
sampling_strategy=BaseOverSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
random_state=_random_state_docstring,
)
class KMeansSMOTE(BaseSMOTE):
"""Apply a KMeans clustering before to over-sample using SMOTE.
This is an implementation of the algorithm described in [1]_.
Read more in the :ref:`User Guide <smote_adasyn>`.
Parameters
----------
{sampling_strategy}
{random_state}
k_neighbors : int or object, default=2
If ``int``, number of nearest neighbours to used to construct synthetic
samples. If object, an estimator that inherits from
:class:`sklearn.neighbors.base.KNeighborsMixin` that will be used to
find the k_neighbors.
{n_jobs}
kmeans_estimator : int or object, default=None
A KMeans instance or the number of clusters to be used. By default,
we used a :class:`sklearn.cluster.MiniBatchKMeans` which tend to be
better with large number of samples.
cluster_balance_threshold : "auto" or float, default="auto"
The threshold at which a cluster is called balanced and where samples
of the class selected for SMOTE will be oversampled. If "auto", this
will be determined by the ratio for each class, or it can be set
manually.
density_exponent : "auto" or float, default="auto"
This exponent is used to determine the density of a cluster. Leaving
this to "auto" will use a feature-length based exponent.
Attributes
----------
kmeans_estimator_ : estimator
The fitted clustering method used before to apply SMOTE.
nn_k_ : estimator
The fitted k-NN estimator used in SMOTE.
cluster_balance_threshold_ : float
The threshold used during ``fit`` for calling a cluster balanced.
See Also
--------
SMOTE : Over-sample using SMOTE.
SVMSMOTE : Over-sample using SVM-SMOTE variant.
BorderlineSMOTE : Over-sample using Borderline-SMOTE variant.
ADASYN : Over-sample using ADASYN.
References
----------
.. [1] Felix Last, Georgios Douzas, Fernando Bacao, "Oversampling for
Imbalanced Learning Based on K-Means and SMOTE"
https://arxiv.org/abs/1711.00837
Examples
--------
>>> import numpy as np
>>> from imblearn.over_sampling import KMeansSMOTE
>>> from sklearn.datasets import make_blobs
>>> blobs = [100, 800, 100]
>>> X, y = make_blobs(blobs, centers=[(-10, 0), (0,0), (10, 0)])
>>> # Add a single 0 sample in the middle blob
>>> X = np.concatenate([X, [[0, 0]]])
>>> y = np.append(y, 0)
>>> # Make this a binary classification problem
>>> y = y == 1
>>> sm = KMeansSMOTE(random_state=42)
>>> X_res, y_res = sm.fit_resample(X, y)
>>> # Find the number of new samples in the middle blob
>>> n_res_in_middle = ((X_res[:, 0] > -5) & (X_res[:, 0] < 5)).sum()
>>> print("Samples in the middle blob: %s" % n_res_in_middle)
Samples in the middle blob: 801
>>> print("Middle blob unchanged: %s" % (n_res_in_middle == blobs[1] + 1))
Middle blob unchanged: True
>>> print("More 0 samples: %s" % ((y_res == 0).sum() > (y == 0).sum()))
More 0 samples: True
"""
@_deprecate_positional_args
def __init__(
self,
*,
sampling_strategy="auto",
random_state=None,
k_neighbors=2,
n_jobs=None,
kmeans_estimator=None,
cluster_balance_threshold="auto",
density_exponent="auto",
):
super().__init__(
sampling_strategy=sampling_strategy,
random_state=random_state,
k_neighbors=k_neighbors,
n_jobs=n_jobs,
)
self.kmeans_estimator = kmeans_estimator
self.cluster_balance_threshold = cluster_balance_threshold
self.density_exponent = density_exponent
def _validate_estimator(self):
super()._validate_estimator()
if self.kmeans_estimator is None:
self.kmeans_estimator_ = MiniBatchKMeans(
random_state=self.random_state
)
elif isinstance(self.kmeans_estimator, int):
self.kmeans_estimator_ = MiniBatchKMeans(
n_clusters=self.kmeans_estimator,
random_state=self.random_state,
)
else:
self.kmeans_estimator_ = clone(self.kmeans_estimator)
# validate the parameters
for param_name in ("cluster_balance_threshold", "density_exponent"):
param = getattr(self, param_name)
if isinstance(param, str) and param != "auto":
raise ValueError(
"'{}' should be 'auto' when a string is passed. "
"Got {} instead.".format(param_name, repr(param))
)
self.cluster_balance_threshold_ = (
self.cluster_balance_threshold
if self.kmeans_estimator_.n_clusters != 1
else -np.inf
)
def _find_cluster_sparsity(self, X):
"""Compute the cluster sparsity."""
euclidean_distances = pairwise_distances(
X, metric="euclidean", n_jobs=self.n_jobs
)
# negate diagonal elements
for ind in range(X.shape[0]):
euclidean_distances[ind, ind] = 0
non_diag_elements = (X.shape[0] ** 2) - X.shape[0]
mean_distance = euclidean_distances.sum() / non_diag_elements
exponent = (
math.log(X.shape[0], 1.6) ** 1.8 * 0.16
if self.density_exponent == "auto"
else self.density_exponent
)
return (mean_distance ** exponent) / X.shape[0]
def _fit_resample(self, X, y):
self._validate_estimator()
X_resampled = X.copy()
y_resampled = y.copy()
total_inp_samples = sum(self.sampling_strategy_.values())
for class_sample, n_samples in self.sampling_strategy_.items():
if n_samples == 0:
continue
# target_class_indices = np.flatnonzero(y == class_sample)
# X_class = _safe_indexing(X, target_class_indices)
X_clusters = self.kmeans_estimator_.fit_predict(X)
valid_clusters = []
cluster_sparsities = []
# identify cluster which are answering the requirements
for cluster_idx in range(self.kmeans_estimator_.n_clusters):
cluster_mask = np.flatnonzero(X_clusters == cluster_idx)
X_cluster = _safe_indexing(X, cluster_mask)
y_cluster = _safe_indexing(y, cluster_mask)
cluster_class_mean = (y_cluster == class_sample).mean()
if self.cluster_balance_threshold_ == "auto":
balance_threshold = n_samples / total_inp_samples / 2
else:
balance_threshold = self.cluster_balance_threshold_
# the cluster is already considered balanced
if cluster_class_mean < balance_threshold:
continue
# not enough samples to apply SMOTE
anticipated_samples = cluster_class_mean * X_cluster.shape[0]
if anticipated_samples < self.nn_k_.n_neighbors:
continue
X_cluster_class = _safe_indexing(
X_cluster, np.flatnonzero(y_cluster == class_sample)
)
valid_clusters.append(cluster_mask)
cluster_sparsities.append(
self._find_cluster_sparsity(X_cluster_class)
)
cluster_sparsities = np.array(cluster_sparsities)
cluster_weights = cluster_sparsities / cluster_sparsities.sum()
if not valid_clusters:
raise RuntimeError(
"No clusters found with sufficient samples of "
"class {}. Try lowering the cluster_balance_threshold "
"or increasing the number of "
"clusters.".format(class_sample)
)
for valid_cluster_idx, valid_cluster in enumerate(valid_clusters):
X_cluster = _safe_indexing(X, valid_cluster)
y_cluster = _safe_indexing(y, valid_cluster)
X_cluster_class = _safe_indexing(
X_cluster, np.flatnonzero(y_cluster == class_sample)
)
self.nn_k_.fit(X_cluster_class)
nns = self.nn_k_.kneighbors(
X_cluster_class, return_distance=False
)[:, 1:]
cluster_n_samples = int(
math.ceil(n_samples * cluster_weights[valid_cluster_idx])
)
X_new, y_new = self._make_samples(
X_cluster_class,
y.dtype,
class_sample,
X_cluster_class,
nns,
cluster_n_samples,
1.0,
)
stack = [np.vstack, sparse.vstack][int(sparse.issparse(X_new))]
X_resampled = stack((X_resampled, X_new))
y_resampled = np.hstack((y_resampled, y_new))
return X_resampled, y_resampled
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from parameterized import parameterized
from airflow.executors.executor_loader import ExecutorLoader
from airflow.plugins_manager import executors_modules, make_module
from tests.test_utils.config import conf_vars
# Plugin Manager creates new modules, which is difficult to mock, so we use test isolation by a unique name.
TEST_PLUGIN_NAME = "unique_plugin_name_to_avoid_collision_i_love_kitties"
class FakeExecutor:
pass
class TestExecutorLoader(unittest.TestCase):
def setUp(self) -> None:
ExecutorLoader._default_executor = None
def tearDown(self) -> None:
ExecutorLoader._default_executor = None
@parameterized.expand([
("LocalExecutor", ),
("DebugExecutor", ),
])
def test_should_support_executor_from_core(self, executor_name):
with conf_vars({
("core", "executor"): executor_name
}):
executor = ExecutorLoader.get_default_executor()
self.assertIsNotNone(executor)
self.assertIn(executor_name, executor.__class__.__name__)
def test_should_support_plugin(self):
executors_modules.append(make_module('airflow.executors.' + TEST_PLUGIN_NAME, [FakeExecutor]))
self.addCleanup(self.remove_executor_module)
with conf_vars({
("core", "executor"): f"{TEST_PLUGIN_NAME}.FakeExecutor"
}):
executor = ExecutorLoader.get_default_executor()
self.assertIsNotNone(executor)
self.assertIn("FakeExecutor", executor.__class__.__name__)
def remove_executor_module(self):
executors_modules.pop()
def test_should_support_custom_path(self):
with conf_vars({
("core", "executor"): f"tests.executors.test_executor_loader.FakeExecutor"
}):
executor = ExecutorLoader.get_default_executor()
self.assertIsNotNone(executor)
self.assertIn("FakeExecutor", executor.__class__.__name__)
|
(function () {
if (navigator.userAgent.match(/(iPad|iPhone|iPod).*?moschat_ios/g)) {
/**
* YYApiCore
*/
YYApiCore = {
__GLOBAL_FUNC_INDEX__: 0,
invokeClientMethod: function(module, name, parameters, callback) {
var url = 'yyapi://' + module + '/' + name + '?p=' + encodeURIComponent(JSON.stringify(parameters || {}));
if (callback) {
var name;
if (typeof callback == "function") {
name = YYApiCore.createGlobalFuncForCallback(callback);
} else {
name = callback;
}
url = url + '&cb=' + name;
}
console.log('[API]' + url);
window.webkit.messageHandlers.YYWKWebViewAPI.postMessage(url);
},
createGlobalFuncForCallback: function(callback){
if (callback) {
var name = '__GLOBAL_CALLBACK__' + (YYApiCore.__GLOBAL_FUNC_INDEX__++);
window[name] = function(){
var args = arguments;
var func = (typeof callback == "function") ? callback : window[callback];
//we need to use setimeout here to avoid ui thread being frezzen
setTimeout(function(){ func.apply(null, args); }, 0);
};
return name;
}
return null;
},
invokeWebMethod: function(callback, returnValue) {
YYApiCore.invokeCallbackWithArgs(callback, [returnValue]);
},
invokeCallbackWithArgs: function(callback, args) {
if (callback) {
var func = null;
var tmp;
if (typeof callback == "function") {
func = callback;
}
else if((tmp = window[callback]) && typeof tmp == 'function') {
func = tmp;
}
if (func) {
setTimeout(function(){ func.apply(null, args); }, 0);
}
}
}
};
// var readyEvent = window.document.createEvent('Events')
// readyEvent.initEvent('WebViewJavascriptBridgeReady')
// readyEvent.bridge = WebViewJavascriptBridge
// doc.dispatchEvent(readyEvent)
}
}) ();
|
# -*- coding: utf-8 -*-
import logging
from CommonLibrary.CommonConfiguration import result_path
class LogUtility(object):
def __init__(self):
self.logger = logging.getLogger()
self.logger.setLevel(logging.DEBUG)
def create_logger_file(self, filename):
try:
full_log_name = result_path()+'\\' + filename+'.log'
fh = logging.FileHandler(full_log_name)
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s [line:%(lineno)d] %(message)s')
fh.setFormatter(formatter)
self.logger.addHandler(fh)
except Exception as err:
self.logger.debug("Error when creating log file, error message: {}".format(str(err)))
def log(self, message):
self.logger.debug(message)
|
/** @license MUI v5.0.0-alpha.65
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
"use strict";
var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
Object.defineProperty(exports, "__esModule", {
value: true
});
var _exportNames = {
BackdropUnstyled: true,
BadgeUnstyled: true,
ButtonUnstyled: true,
ClickAwayListener: true,
unstable_composeClasses: true,
generateUtilityClass: true,
generateUtilityClasses: true,
FormControlUnstyled: true,
InputUnstyled: true,
ModalUnstyled: true,
NoSsr: true,
PopperUnstyled: true,
Portal: true,
SliderUnstyled: true,
SwitchUnstyled: true,
TabPanelUnstyled: true,
TabsListUnstyled: true,
TabsUnstyled: true,
TabUnstyled: true,
TextareaAutosize: true,
Unstable_TrapFocus: true
};
Object.defineProperty(exports, "BackdropUnstyled", {
enumerable: true,
get: function () {
return _BackdropUnstyled.default;
}
});
Object.defineProperty(exports, "BadgeUnstyled", {
enumerable: true,
get: function () {
return _BadgeUnstyled.default;
}
});
Object.defineProperty(exports, "ButtonUnstyled", {
enumerable: true,
get: function () {
return _ButtonUnstyled.default;
}
});
Object.defineProperty(exports, "ClickAwayListener", {
enumerable: true,
get: function () {
return _ClickAwayListener.default;
}
});
Object.defineProperty(exports, "FormControlUnstyled", {
enumerable: true,
get: function () {
return _FormControlUnstyled.default;
}
});
Object.defineProperty(exports, "InputUnstyled", {
enumerable: true,
get: function () {
return _InputUnstyled.default;
}
});
Object.defineProperty(exports, "ModalUnstyled", {
enumerable: true,
get: function () {
return _ModalUnstyled.default;
}
});
Object.defineProperty(exports, "NoSsr", {
enumerable: true,
get: function () {
return _NoSsr.default;
}
});
Object.defineProperty(exports, "PopperUnstyled", {
enumerable: true,
get: function () {
return _PopperUnstyled.default;
}
});
Object.defineProperty(exports, "Portal", {
enumerable: true,
get: function () {
return _Portal.default;
}
});
Object.defineProperty(exports, "SliderUnstyled", {
enumerable: true,
get: function () {
return _SliderUnstyled.default;
}
});
Object.defineProperty(exports, "SwitchUnstyled", {
enumerable: true,
get: function () {
return _SwitchUnstyled.default;
}
});
Object.defineProperty(exports, "TabPanelUnstyled", {
enumerable: true,
get: function () {
return _TabPanelUnstyled.default;
}
});
Object.defineProperty(exports, "TabUnstyled", {
enumerable: true,
get: function () {
return _TabUnstyled.default;
}
});
Object.defineProperty(exports, "TabsListUnstyled", {
enumerable: true,
get: function () {
return _TabsListUnstyled.default;
}
});
Object.defineProperty(exports, "TabsUnstyled", {
enumerable: true,
get: function () {
return _TabsUnstyled.default;
}
});
Object.defineProperty(exports, "TextareaAutosize", {
enumerable: true,
get: function () {
return _TextareaAutosize.default;
}
});
Object.defineProperty(exports, "Unstable_TrapFocus", {
enumerable: true,
get: function () {
return _Unstable_TrapFocus.default;
}
});
Object.defineProperty(exports, "generateUtilityClass", {
enumerable: true,
get: function () {
return _generateUtilityClass.default;
}
});
Object.defineProperty(exports, "generateUtilityClasses", {
enumerable: true,
get: function () {
return _generateUtilityClasses.default;
}
});
Object.defineProperty(exports, "unstable_composeClasses", {
enumerable: true,
get: function () {
return _composeClasses.default;
}
});
var _utils = require("./utils");
Object.keys(_utils).forEach(function (key) {
if (key === "default" || key === "__esModule") return;
if (Object.prototype.hasOwnProperty.call(_exportNames, key)) return;
if (key in exports && exports[key] === _utils[key]) return;
Object.defineProperty(exports, key, {
enumerable: true,
get: function () {
return _utils[key];
}
});
});
var _AutocompleteUnstyled = require("./AutocompleteUnstyled");
Object.keys(_AutocompleteUnstyled).forEach(function (key) {
if (key === "default" || key === "__esModule") return;
if (Object.prototype.hasOwnProperty.call(_exportNames, key)) return;
if (key in exports && exports[key] === _AutocompleteUnstyled[key]) return;
Object.defineProperty(exports, key, {
enumerable: true,
get: function () {
return _AutocompleteUnstyled[key];
}
});
});
var _BackdropUnstyled = _interopRequireWildcard(require("./BackdropUnstyled"));
Object.keys(_BackdropUnstyled).forEach(function (key) {
if (key === "default" || key === "__esModule") return;
if (Object.prototype.hasOwnProperty.call(_exportNames, key)) return;
if (key in exports && exports[key] === _BackdropUnstyled[key]) return;
Object.defineProperty(exports, key, {
enumerable: true,
get: function () {
return _BackdropUnstyled[key];
}
});
});
var _BadgeUnstyled = _interopRequireWildcard(require("./BadgeUnstyled"));
Object.keys(_BadgeUnstyled).forEach(function (key) {
if (key === "default" || key === "__esModule") return;
if (Object.prototype.hasOwnProperty.call(_exportNames, key)) return;
if (key in exports && exports[key] === _BadgeUnstyled[key]) return;
Object.defineProperty(exports, key, {
enumerable: true,
get: function () {
return _BadgeUnstyled[key];
}
});
});
var _ButtonUnstyled = _interopRequireWildcard(require("./ButtonUnstyled"));
Object.keys(_ButtonUnstyled).forEach(function (key) {
if (key === "default" || key === "__esModule") return;
if (Object.prototype.hasOwnProperty.call(_exportNames, key)) return;
if (key in exports && exports[key] === _ButtonUnstyled[key]) return;
Object.defineProperty(exports, key, {
enumerable: true,
get: function () {
return _ButtonUnstyled[key];
}
});
});
var _ClickAwayListener = _interopRequireDefault(require("./ClickAwayListener"));
var _composeClasses = _interopRequireDefault(require("./composeClasses"));
var _generateUtilityClass = _interopRequireWildcard(require("./generateUtilityClass"));
Object.keys(_generateUtilityClass).forEach(function (key) {
if (key === "default" || key === "__esModule") return;
if (Object.prototype.hasOwnProperty.call(_exportNames, key)) return;
if (key in exports && exports[key] === _generateUtilityClass[key]) return;
Object.defineProperty(exports, key, {
enumerable: true,
get: function () {
return _generateUtilityClass[key];
}
});
});
var _generateUtilityClasses = _interopRequireDefault(require("./generateUtilityClasses"));
var _FormControlUnstyled = _interopRequireWildcard(require("./FormControlUnstyled"));
Object.keys(_FormControlUnstyled).forEach(function (key) {
if (key === "default" || key === "__esModule") return;
if (Object.prototype.hasOwnProperty.call(_exportNames, key)) return;
if (key in exports && exports[key] === _FormControlUnstyled[key]) return;
Object.defineProperty(exports, key, {
enumerable: true,
get: function () {
return _FormControlUnstyled[key];
}
});
});
var _InputUnstyled = _interopRequireWildcard(require("./InputUnstyled"));
Object.keys(_InputUnstyled).forEach(function (key) {
if (key === "default" || key === "__esModule") return;
if (Object.prototype.hasOwnProperty.call(_exportNames, key)) return;
if (key in exports && exports[key] === _InputUnstyled[key]) return;
Object.defineProperty(exports, key, {
enumerable: true,
get: function () {
return _InputUnstyled[key];
}
});
});
var _ModalUnstyled = _interopRequireWildcard(require("./ModalUnstyled"));
Object.keys(_ModalUnstyled).forEach(function (key) {
if (key === "default" || key === "__esModule") return;
if (Object.prototype.hasOwnProperty.call(_exportNames, key)) return;
if (key in exports && exports[key] === _ModalUnstyled[key]) return;
Object.defineProperty(exports, key, {
enumerable: true,
get: function () {
return _ModalUnstyled[key];
}
});
});
var _NoSsr = _interopRequireDefault(require("./NoSsr"));
var _PopperUnstyled = _interopRequireDefault(require("./PopperUnstyled"));
var _Portal = _interopRequireDefault(require("./Portal"));
var _SliderUnstyled = _interopRequireWildcard(require("./SliderUnstyled"));
Object.keys(_SliderUnstyled).forEach(function (key) {
if (key === "default" || key === "__esModule") return;
if (Object.prototype.hasOwnProperty.call(_exportNames, key)) return;
if (key in exports && exports[key] === _SliderUnstyled[key]) return;
Object.defineProperty(exports, key, {
enumerable: true,
get: function () {
return _SliderUnstyled[key];
}
});
});
var _SwitchUnstyled = _interopRequireWildcard(require("./SwitchUnstyled"));
Object.keys(_SwitchUnstyled).forEach(function (key) {
if (key === "default" || key === "__esModule") return;
if (Object.prototype.hasOwnProperty.call(_exportNames, key)) return;
if (key in exports && exports[key] === _SwitchUnstyled[key]) return;
Object.defineProperty(exports, key, {
enumerable: true,
get: function () {
return _SwitchUnstyled[key];
}
});
});
var _TabPanelUnstyled = _interopRequireWildcard(require("./TabPanelUnstyled"));
Object.keys(_TabPanelUnstyled).forEach(function (key) {
if (key === "default" || key === "__esModule") return;
if (Object.prototype.hasOwnProperty.call(_exportNames, key)) return;
if (key in exports && exports[key] === _TabPanelUnstyled[key]) return;
Object.defineProperty(exports, key, {
enumerable: true,
get: function () {
return _TabPanelUnstyled[key];
}
});
});
var _TabsListUnstyled = _interopRequireWildcard(require("./TabsListUnstyled"));
Object.keys(_TabsListUnstyled).forEach(function (key) {
if (key === "default" || key === "__esModule") return;
if (Object.prototype.hasOwnProperty.call(_exportNames, key)) return;
if (key in exports && exports[key] === _TabsListUnstyled[key]) return;
Object.defineProperty(exports, key, {
enumerable: true,
get: function () {
return _TabsListUnstyled[key];
}
});
});
var _TabsUnstyled = _interopRequireWildcard(require("./TabsUnstyled"));
Object.keys(_TabsUnstyled).forEach(function (key) {
if (key === "default" || key === "__esModule") return;
if (Object.prototype.hasOwnProperty.call(_exportNames, key)) return;
if (key in exports && exports[key] === _TabsUnstyled[key]) return;
Object.defineProperty(exports, key, {
enumerable: true,
get: function () {
return _TabsUnstyled[key];
}
});
});
var _TabUnstyled = _interopRequireWildcard(require("./TabUnstyled"));
Object.keys(_TabUnstyled).forEach(function (key) {
if (key === "default" || key === "__esModule") return;
if (Object.prototype.hasOwnProperty.call(_exportNames, key)) return;
if (key in exports && exports[key] === _TabUnstyled[key]) return;
Object.defineProperty(exports, key, {
enumerable: true,
get: function () {
return _TabUnstyled[key];
}
});
});
var _TextareaAutosize = _interopRequireDefault(require("./TextareaAutosize"));
var _Unstable_TrapFocus = _interopRequireDefault(require("./Unstable_TrapFocus"));
function _getRequireWildcardCache(nodeInterop) { if (typeof WeakMap !== "function") return null; var cacheBabelInterop = new WeakMap(); var cacheNodeInterop = new WeakMap(); return (_getRequireWildcardCache = function (nodeInterop) { return nodeInterop ? cacheNodeInterop : cacheBabelInterop; })(nodeInterop); }
function _interopRequireWildcard(obj, nodeInterop) { if (!nodeInterop && obj && obj.__esModule) { return obj; } if (obj === null || typeof obj !== "object" && typeof obj !== "function") { return { default: obj }; } var cache = _getRequireWildcardCache(nodeInterop); if (cache && cache.has(obj)) { return cache.get(obj); } var newObj = {}; var hasPropertyDescriptor = Object.defineProperty && Object.getOwnPropertyDescriptor; for (var key in obj) { if (key !== "default" && Object.prototype.hasOwnProperty.call(obj, key)) { var desc = hasPropertyDescriptor ? Object.getOwnPropertyDescriptor(obj, key) : null; if (desc && (desc.get || desc.set)) { Object.defineProperty(newObj, key, desc); } else { newObj[key] = obj[key]; } } } newObj.default = obj; if (cache) { cache.set(obj, newObj); } return newObj; }
|
def insertionSort(myArray):
# take second element and store it in a variable
for step in range(1, len(myArray)):
key = myArray[step]
j = step - 1
# Compare variable with each element on the left of it until an element smaller than it is found
# For descending order, change variable < myArray[j] to variable > myArray[j]
while j >= 0 and key < myArray[j]:
myArray[j + 1] = myArray[j]
j = j - 1
# Place variable after the element just smaller than it
myArray[j + 1] = key
data = [22, 55, 91, 15, 66, 22, 25, 5, 18]
insertionSort(data)
print('Sorted myArray in Ascending Order:')
print(data)
|
# coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-255
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class QataskRetry30timeout60taskRef(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'moid': 'str',
'object_type': 'str'
}
attribute_map = {
'moid': 'Moid',
'object_type': 'ObjectType'
}
def __init__(self, moid=None, object_type=None):
"""
QataskRetry30timeout60taskRef - a model defined in Swagger
"""
self._moid = None
self._object_type = None
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
@property
def moid(self):
"""
Gets the moid of this QataskRetry30timeout60taskRef.
:return: The moid of this QataskRetry30timeout60taskRef.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this QataskRetry30timeout60taskRef.
:param moid: The moid of this QataskRetry30timeout60taskRef.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this QataskRetry30timeout60taskRef.
:return: The object_type of this QataskRetry30timeout60taskRef.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this QataskRetry30timeout60taskRef.
:param object_type: The object_type of this QataskRetry30timeout60taskRef.
:type: str
"""
self._object_type = object_type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, QataskRetry30timeout60taskRef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef NET_DISK_CACHE_SIMPLE_SIMPLE_INDEX_H_
#define NET_DISK_CACHE_SIMPLE_SIMPLE_INDEX_H_
#include <map>
#include <string>
#include "base/basictypes.h"
#include "base/file_util.h"
#include "base/files/file_path.h"
#include "base/hash_tables.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "net/disk_cache/disk_cache.h"
#include "net/disk_cache/simple/simple_disk_format.h"
namespace base {
class TaskRunner;
}
namespace disk_cache {
// This class is not Thread-safe.
class SimpleIndex
: public base::SupportsWeakPtr<SimpleIndex> {
public:
SimpleIndex(
const scoped_refptr<base::TaskRunner>& cache_thread,
const base::FilePath& path);
virtual ~SimpleIndex();
// Should be called on CacheThread.
bool Initialize();
void Insert(const std::string& key);
void Remove(const std::string& key);
bool Has(const std::string& key) const;
// Update the last used time of the entry with the given key and return true
// iff the entry exist in the index.
bool UseIfExists(const std::string& key);
void Cleanup();
// Update the size (in bytes) of an entry, in the metadata stored in the
// index. This should be the total disk-file size including all streams of the
// entry.
bool UpdateEntrySize(const std::string& key, uint64 entry_size);
private:
// TODO(felipeg): This way we are storing the hash_key string twice (as the
// hash_map::key and as a member of EntryMetadata. We could save space if we
// redefine the hash_map::operators and make the hash_map::key be part of the
// EntryMetadata itself.
typedef base::hash_map<std::string, SimpleIndexFile::EntryMetadata> EntrySet;
void InsertInternal(const SimpleIndexFile::EntryMetadata& entry_metadata);
// Enumerates all entries' files on disk and regenerates the index.
bool RestoreFromDisk();
// |out_buffer| needs to be pre-allocated. The serialized index is stored in
// |out_buffer|.
void Serialize(std::string* out_buffer);
bool OpenIndexFile();
bool CloseIndexFile();
static void UpdateFile(const base::FilePath& index_filename,
const base::FilePath& temp_filename,
scoped_ptr<std::string> buffer);
const base::FilePath path_;
EntrySet entries_set_;
uint64 cache_size_; // Total cache storage size in bytes.
base::FilePath index_filename_;
base::PlatformFile index_file_;
// We keep the thread from where Initialize() method has been called so that
// we run the Cleanup method in the same thread. Usually that should be the
// CacheThread.
scoped_refptr<base::TaskRunner> cache_thread_;
};
} // namespace disk_cache
#endif // NET_DISK_CACHE_SIMPLE_SIMPLE_INDEX_H_
|
import pytest
def test_gene_upgrade_remove_go_annotations(
upgrader,
gene_1,
):
new_gene = upgrader.upgrade(
'gene', gene_1, current_version='1', target_version='2'
)
assert new_gene['schema_version'] == '2'
assert 'go_annotations' not in new_gene
|
from typing import Optional
from parchmint.params import Params
class Layer:
def __init__(self, json_data=None) -> None:
"""Creates a new instance Layer
Args:
json (dict, optional): json dict after json.loads(). Defaults to None.
"""
self._id: Optional[str] = None
self.name: Optional[str] = None
self.type: Optional[str] = None
self.group: str = ""
self.params: Params = Params()
if json_data:
self.parse_from_json(json_data)
@property
def ID(self) -> str:
"""Returns the ID of the layer
Raises:
ValueError: if ID is not set
Returns:
str: ID of the layer
"""
if self._id is None:
raise ValueError("ID is not set")
return self._id
@ID.setter
def ID(self, value: str) -> None:
"""Sets the id of the layer
Args:
value (str): id of the layer
"""
self._id = value
def parse_from_json(self, json_data):
"""Loads instance data json dict from json.loads()
Args:
json ([type]): [description]
"""
self.name = json_data["name"]
self.ID = json_data["id"]
self.type = json_data["type"]
self.group = json_data["group"]
self.params = Params(json_data["params"])
def to_parchmint_v1(self):
"""Returns the json dict
Returns:
dict: dictionary that can be used in json.dumps()
"""
return {
"name": self.name,
"id": self.ID,
"type": self.type,
"params": self.params.to_parchmint_v1(),
"group": self.group,
}
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return str(self.__dict__)
def __hash__(self) -> int:
return hash(repr(self))
def __eq__(self, o: object) -> bool:
if isinstance(o, Layer):
return o.ID == self.ID
else:
return False
|
"""
License: Apache-2.0
Author: Suofei Zhang | Hang Yu
E-mail: zhangsuofei at njupt.edu.cn | hangyu5 at illinois.edu
"""
import tensorflow as tf
import tensorflow.contrib.slim as slim
from config import cfg, get_dataset_size_train, get_num_classes, get_create_inputs
import time
import numpy as np
import sys
import os
import capsnet_dynamic_routing as net
import logging
import daiquiri
daiquiri.setup(level=logging.DEBUG)
logger = daiquiri.getLogger(__name__)
def main(args):
"""Get dataset hyperparameters."""
assert isinstance(args[1], str)
dataset_name = args[1]
logger.info('Using dataset: {}'.format(dataset_name))
"""Set reproduciable random seed"""
tf.set_random_seed(1234)
dataset_size = get_dataset_size_train(dataset_name)
num_classes = get_num_classes(dataset_name)
create_inputs = get_create_inputs(dataset_name, is_train=True, epochs=cfg.epoch)
with tf.Graph().as_default(), tf.device('/cpu:0'):
"""Get global_step."""
global_step = tf.get_variable(
'global_step', [], initializer=tf.constant_initializer(0), trainable=False)
"""Get batches per epoch."""
num_batches_per_epoch = int(dataset_size / cfg.batch_size)
opt = tf.train.AdamOptimizer() # lrn_rate
"""Get batch from data queue."""
batch_x, batch_labels = create_inputs()
# batch_y = tf.one_hot(batch_labels, depth=10, axis=1, dtype=tf.float32)
"""Define the dataflow graph."""
with tf.device('/gpu:0'):
with slim.arg_scope([slim.variable], device='/cpu:0'):
batch_squash = tf.divide(batch_x, 255.)
# batch_x = slim.batch_norm(batch_x, center=False, is_training=True, trainable=True)
output, output_len = net.build_arch(batch_squash, is_train=True, num_classes=num_classes)
tf.logging.debug(output.get_shape())
loss, margin_loss, mse, _ = net.loss(
output, output_len, batch_squash, batch_labels)
acc = net.test_accuracy(output_len, batch_labels)
tf.summary.scalar('margin_loss', margin_loss)
tf.summary.scalar('reconstruction_loss', mse)
tf.summary.scalar('all_loss', loss)
tf.summary.scalar('train_acc', acc)
"""Compute gradient."""
grad = opt.compute_gradients(loss)
# See: https://stackoverflow.com/questions/40701712/how-to-check-nan-in-gradients-in-tensorflow-when-updating
grad_check = [tf.check_numerics(g, message='Gradient NaN Found!')
for g, _ in grad if g is not None] + [tf.check_numerics(loss, message='Loss NaN Found')]
"""Apply graident."""
with tf.control_dependencies(grad_check):
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = opt.apply_gradients(grad, global_step=global_step)
"""Set Session settings."""
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False))
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
"""Set Saver."""
var_to_save = [v for v in tf.global_variables(
) if 'Adam' not in v.name] # Don't save redundant Adam beta/gamma
saver = tf.train.Saver(var_list=var_to_save, max_to_keep=cfg.epoch)
"""Display parameters"""
total_p = np.sum([np.prod(v.get_shape().as_list()) for v in var_to_save]).astype(np.int32)
train_p = np.sum([np.prod(v.get_shape().as_list())
for v in tf.trainable_variables()]).astype(np.int32)
logger.info('Total Parameters: {}'.format(total_p))
logger.info('Trainable Parameters: {}'.format(train_p))
# read snapshot
# latest = os.path.join(cfg.logdir, 'model.ckpt-4680')
# saver.restore(sess, latest)
"""Set summary op."""
summary_op = tf.summary.merge_all()
"""Start coord & queue."""
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
"""Set summary writer"""
if not os.path.exists(cfg.logdir + '/caps/{}/train_log/'.format(dataset_name)):
os.makedirs(cfg.logdir + '/caps/{}/train_log/'.format(dataset_name))
summary_writer = tf.summary.FileWriter(
cfg.logdir + '/caps/{}/train_log/'.format(dataset_name), graph=sess.graph) # graph = sess.graph, huge!
"""Main loop."""
for step in range(cfg.epoch * num_batches_per_epoch + 1):
tic = time.time()
""""TF queue would pop batch until no file"""
try:
_, loss_value, summary_str = sess.run(
[train_op, loss, summary_op])
logger.info('%d iteration finishs in ' % step + '%f second' %
(time.time() - tic) + ' loss=%f' % loss_value)
except KeyboardInterrupt:
sess.close()
sys.exit()
except tf.errors.InvalidArgumentError:
logger.warning('%d iteration contains NaN gradients. Discard.' % step)
continue
else:
"""Write to summary."""
if step % 5 == 0:
summary_writer.add_summary(summary_str, step)
if (step % num_batches_per_epoch) == 0:
"""Save model periodically"""
ckpt_path = os.path.join(
cfg.logdir + '/caps/{}/'.format(dataset_name), 'model-{:.4f}.ckpt'.format(loss_value))
saver.save(sess, ckpt_path, global_step=step)
if __name__ == "__main__":
tf.app.run()
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
describe('Visualization > Pie', () => {
const PIE_FORM_DATA = {
datasource: '3__table',
viz_type: 'pie',
slice_id: 55,
granularity_sqla: 'ds',
time_grain_sqla: 'P1D',
time_range: '100 years ago : now',
metric: 'sum__num',
adhoc_filters: [],
groupby: ['gender'],
row_limit: 50000,
pie_label_type: 'key',
donut: false,
show_legend: true,
show_labels: true,
labels_outside: true,
color_scheme: 'bnbColors',
};
function verify(formData) {
cy.visitChartByParams(JSON.stringify(formData));
cy.verifySliceSuccess({ waitAlias: '@getJson' });
}
beforeEach(() => {
cy.server();
cy.login();
cy.route('POST', '/api/v1/chart/data*').as('getJson');
});
it('should work with ad-hoc metric', () => {
verify(PIE_FORM_DATA);
cy.get('.chart-container .pie canvas').should('have.length', 1);
});
it('should work with simple filter', () => {
verify({
...PIE_FORM_DATA,
adhoc_filters: [
{
expressionType: 'SIMPLE',
subject: 'gender',
operator: '==',
comparator: 'boy',
clause: 'WHERE',
sqlExpression: null,
filterOptionName: 'filter_tqx1en70hh_7nksse7nqic',
},
],
});
cy.get('.chart-container .pie canvas').should('have.length', 1);
});
});
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class FeedItem(scrapy.Item):
# required
title = scrapy.Field()
content = scrapy.Field()
trims = scrapy.Field()
url = scrapy.Field()
# request url before redirect
req_url = scrapy.Field()
name = scrapy.Field()
# optional
image_urls = scrapy.Field()
image_paths = scrapy.Field()
|
/*
* This file is part of the MicroPython project, http://micropython.org/
*
* The MIT License (MIT)
*
* Copyright (c) 2018 Dan Halbert for Adafruit Industries
* Copyright (c) 2018 Artur Pacholec
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "shared-bindings/_bleio/Connection.h"
#include <string.h>
#include <stdio.h>
#include "ble.h"
#include "ble_drv.h"
#include "ble_hci.h"
#include "nrf_soc.h"
#include "py/gc.h"
#include "py/objlist.h"
#include "py/objstr.h"
#include "py/qstr.h"
#include "py/runtime.h"
#include "shared-bindings/_bleio/__init__.h"
#include "shared-bindings/_bleio/Adapter.h"
#include "shared-bindings/_bleio/Attribute.h"
#include "shared-bindings/_bleio/Characteristic.h"
#include "shared-bindings/_bleio/Service.h"
#include "shared-bindings/_bleio/UUID.h"
#define BLE_ADV_LENGTH_FIELD_SIZE 1
#define BLE_ADV_AD_TYPE_FIELD_SIZE 1
#define BLE_AD_TYPE_FLAGS_DATA_SIZE 1
static const ble_gap_sec_params_t pairing_sec_params = {
.bond = 1,
.mitm = 0,
.lesc = 0,
.keypress = 0,
.oob = 0,
.io_caps = BLE_GAP_IO_CAPS_NONE,
.min_key_size = 7,
.max_key_size = 16,
.kdist_own = { .enc = 1, .id = 1},
.kdist_peer = { .enc = 1, .id = 1},
};
static volatile bool m_discovery_in_process;
static volatile bool m_discovery_successful;
static bleio_service_obj_t *m_char_discovery_service;
static bleio_characteristic_obj_t *m_desc_discovery_characteristic;
bool dump_events = false;
bool connection_on_ble_evt(ble_evt_t *ble_evt, void *self_in) {
bleio_connection_internal_t *self = (bleio_connection_internal_t*)self_in;
if (BLE_GAP_EVT_BASE <= ble_evt->header.evt_id && ble_evt->header.evt_id <= BLE_GAP_EVT_LAST &&
ble_evt->evt.gap_evt.conn_handle != self->conn_handle) {
return false;
}
if (BLE_GATTS_EVT_BASE <= ble_evt->header.evt_id && ble_evt->header.evt_id <= BLE_GATTS_EVT_LAST &&
ble_evt->evt.gatts_evt.conn_handle != self->conn_handle) {
return false;
}
// For debugging.
if (dump_events) {
mp_printf(&mp_plat_print, "Connection event: 0x%04x\n", ble_evt->header.evt_id);
}
switch (ble_evt->header.evt_id) {
case BLE_GAP_EVT_DISCONNECTED:
break;
case BLE_GAP_EVT_CONN_PARAM_UPDATE: // 0x12
break;
case BLE_GAP_EVT_PHY_UPDATE_REQUEST: {
ble_gap_phys_t const phys = {
.rx_phys = BLE_GAP_PHY_AUTO,
.tx_phys = BLE_GAP_PHY_AUTO,
};
sd_ble_gap_phy_update(ble_evt->evt.gap_evt.conn_handle, &phys);
break;
}
case BLE_GAP_EVT_PHY_UPDATE: // 0x22
break;
case BLE_GAP_EVT_DATA_LENGTH_UPDATE_REQUEST:
// SoftDevice will respond to a length update request.
sd_ble_gap_data_length_update(self->conn_handle, NULL, NULL);
break;
case BLE_GAP_EVT_DATA_LENGTH_UPDATE: // 0x24
break;
case BLE_GATTS_EVT_EXCHANGE_MTU_REQUEST: {
// We only handle MTU of size BLE_GATT_ATT_MTU_DEFAULT.
sd_ble_gatts_exchange_mtu_reply(self->conn_handle, BLE_GATT_ATT_MTU_DEFAULT);
break;
}
case BLE_GATTS_EVT_SYS_ATTR_MISSING:
sd_ble_gatts_sys_attr_set(self->conn_handle, NULL, 0, 0);
break;
case BLE_GATTS_EVT_HVN_TX_COMPLETE: // Capture this for now. 0x55
break;
case BLE_GAP_EVT_CONN_PARAM_UPDATE_REQUEST: {
ble_gap_evt_conn_param_update_request_t *request =
&ble_evt->evt.gap_evt.params.conn_param_update_request;
sd_ble_gap_conn_param_update(self->conn_handle, &request->conn_params);
break;
}
case BLE_GAP_EVT_SEC_PARAMS_REQUEST: {
ble_gap_sec_keyset_t keyset = {
.keys_own = {
.p_enc_key = &self->bonding_keys.own_enc,
.p_id_key = NULL,
.p_sign_key = NULL,
.p_pk = NULL
},
.keys_peer = {
.p_enc_key = &self->bonding_keys.peer_enc,
.p_id_key = &self->bonding_keys.peer_id,
.p_sign_key = NULL,
.p_pk = NULL
}
};
sd_ble_gap_sec_params_reply(self->conn_handle, BLE_GAP_SEC_STATUS_SUCCESS,
&pairing_sec_params, &keyset);
break;
}
case BLE_GAP_EVT_LESC_DHKEY_REQUEST:
// TODO for LESC pairing:
// sd_ble_gap_lesc_dhkey_reply(...);
break;
case BLE_GAP_EVT_AUTH_STATUS: { // 0x19
// Pairing process completed
ble_gap_evt_auth_status_t* status = &ble_evt->evt.gap_evt.params.auth_status;
self->sec_status = status->auth_status;
if (status->auth_status == BLE_GAP_SEC_STATUS_SUCCESS) {
// TODO _ediv = bonding_keys->own_enc.master_id.ediv;
self->pair_status = PAIR_PAIRED;
} else {
self->pair_status = PAIR_NOT_PAIRED;
}
break;
}
case BLE_GAP_EVT_SEC_INFO_REQUEST: { // 0x14
// Peer asks for the stored keys.
// - load key and return if bonded previously.
// - Else return NULL --> Initiate key exchange
ble_gap_evt_sec_info_request_t* sec_info_request = &ble_evt->evt.gap_evt.params.sec_info_request;
(void) sec_info_request;
//if ( bond_load_keys(_role, sec_req->master_id.ediv, &bkeys) ) {
//sd_ble_gap_sec_info_reply(_conn_hdl, &bkeys.own_enc.enc_info, &bkeys.peer_id.id_info, NULL);
//
//_ediv = bkeys.own_enc.master_id.ediv;
// } else {
sd_ble_gap_sec_info_reply(self->conn_handle, NULL, NULL, NULL);
// }
break;
}
case BLE_GAP_EVT_CONN_SEC_UPDATE: { // 0x1a
ble_gap_conn_sec_t* conn_sec = &ble_evt->evt.gap_evt.params.conn_sec_update.conn_sec;
if (conn_sec->sec_mode.sm <= 1 && conn_sec->sec_mode.lv <= 1) {
// Security setup did not succeed:
// mode 0, level 0 means no access
// mode 1, level 1 means open link
// mode >=1 and/or level >=1 means encryption is set up
self->pair_status = PAIR_NOT_PAIRED;
} else {
//if ( !bond_load_cccd(_role, _conn_hdl, _ediv) ) {
if (true) { // TODO: no bonding yet
// Initialize system attributes fresh.
sd_ble_gatts_sys_attr_set(self->conn_handle, NULL, 0, 0);
}
// Not quite paired yet: wait for BLE_GAP_EVT_AUTH_STATUS SUCCESS.
self->ediv = self->bonding_keys.own_enc.master_id.ediv;
}
break;
}
default:
// For debugging.
if (dump_events) {
mp_printf(&mp_plat_print, "Unhandled connection event: 0x%04x\n", ble_evt->header.evt_id);
}
return false;
}
return true;
}
void bleio_connection_clear(bleio_connection_internal_t *self) {
self->remote_service_list = NULL;
self->conn_handle = BLE_CONN_HANDLE_INVALID;
self->pair_status = PAIR_NOT_PAIRED;
memset(&self->bonding_keys, 0, sizeof(self->bonding_keys));
}
bool common_hal_bleio_connection_get_connected(bleio_connection_obj_t *self) {
if (self->connection == NULL) {
return false;
}
return self->connection->conn_handle != BLE_CONN_HANDLE_INVALID;
}
void common_hal_bleio_connection_disconnect(bleio_connection_internal_t *self) {
sd_ble_gap_disconnect(self->conn_handle, BLE_HCI_REMOTE_USER_TERMINATED_CONNECTION);
}
void common_hal_bleio_connection_pair(bleio_connection_internal_t *self) {
self->pair_status = PAIR_WAITING;
check_nrf_error(sd_ble_gap_authenticate(self->conn_handle, &pairing_sec_params));
while (self->pair_status == PAIR_WAITING) {
RUN_BACKGROUND_TASKS;
}
check_sec_status(self->sec_status);
}
// service_uuid may be NULL, to discover all services.
STATIC bool discover_next_services(bleio_connection_internal_t* connection, uint16_t start_handle, ble_uuid_t *service_uuid) {
m_discovery_successful = false;
m_discovery_in_process = true;
check_nrf_error(sd_ble_gattc_primary_services_discover(connection->conn_handle,
start_handle, service_uuid));
// Wait for a discovery event.
while (m_discovery_in_process) {
MICROPY_VM_HOOK_LOOP;
}
return m_discovery_successful;
}
STATIC bool discover_next_characteristics(bleio_connection_internal_t* connection, bleio_service_obj_t *service, uint16_t start_handle) {
m_char_discovery_service = service;
ble_gattc_handle_range_t handle_range;
handle_range.start_handle = start_handle;
handle_range.end_handle = service->end_handle;
m_discovery_successful = false;
m_discovery_in_process = true;
uint32_t err_code = sd_ble_gattc_characteristics_discover(connection->conn_handle, &handle_range);
if (err_code != NRF_SUCCESS) {
return false;
}
// Wait for a discovery event.
while (m_discovery_in_process) {
MICROPY_VM_HOOK_LOOP;
}
return m_discovery_successful;
}
STATIC bool discover_next_descriptors(bleio_connection_internal_t* connection, bleio_characteristic_obj_t *characteristic, uint16_t start_handle, uint16_t end_handle) {
m_desc_discovery_characteristic = characteristic;
ble_gattc_handle_range_t handle_range;
handle_range.start_handle = start_handle;
handle_range.end_handle = end_handle;
m_discovery_successful = false;
m_discovery_in_process = true;
uint32_t err_code = sd_ble_gattc_descriptors_discover(connection->conn_handle, &handle_range);
if (err_code != NRF_SUCCESS) {
return false;
}
// Wait for a discovery event.
while (m_discovery_in_process) {
MICROPY_VM_HOOK_LOOP;
}
return m_discovery_successful;
}
STATIC void on_primary_srv_discovery_rsp(ble_gattc_evt_prim_srvc_disc_rsp_t *response, bleio_connection_internal_t* connection) {
bleio_service_obj_t* tail = connection->remote_service_list;
for (size_t i = 0; i < response->count; ++i) {
ble_gattc_service_t *gattc_service = &response->services[i];
bleio_service_obj_t *service = m_new_obj(bleio_service_obj_t);
service->base.type = &bleio_service_type;
// Initialize several fields at once.
bleio_service_from_connection(service, bleio_connection_new_from_internal(connection));
service->is_remote = true;
service->start_handle = gattc_service->handle_range.start_handle;
service->end_handle = gattc_service->handle_range.end_handle;
service->handle = gattc_service->handle_range.start_handle;
if (gattc_service->uuid.type != BLE_UUID_TYPE_UNKNOWN) {
// Known service UUID.
bleio_uuid_obj_t *uuid = m_new_obj(bleio_uuid_obj_t);
uuid->base.type = &bleio_uuid_type;
bleio_uuid_construct_from_nrf_ble_uuid(uuid, &gattc_service->uuid);
service->uuid = uuid;
} else {
// The discovery response contained a 128-bit UUID that has not yet been registered with the
// softdevice via sd_ble_uuid_vs_add(). We need to fetch the 128-bit value and register it.
// For now, just set the UUID to NULL.
service->uuid = NULL;
}
service->next = tail;
tail = service;
}
connection->remote_service_list = tail;
if (response->count > 0) {
m_discovery_successful = true;
}
m_discovery_in_process = false;
}
STATIC void on_char_discovery_rsp(ble_gattc_evt_char_disc_rsp_t *response, bleio_connection_internal_t* connection) {
for (size_t i = 0; i < response->count; ++i) {
ble_gattc_char_t *gattc_char = &response->chars[i];
bleio_characteristic_obj_t *characteristic = m_new_obj(bleio_characteristic_obj_t);
characteristic->base.type = &bleio_characteristic_type;
bleio_uuid_obj_t *uuid = NULL;
if (gattc_char->uuid.type != BLE_UUID_TYPE_UNKNOWN) {
// Known characteristic UUID.
uuid = m_new_obj(bleio_uuid_obj_t);
uuid->base.type = &bleio_uuid_type;
bleio_uuid_construct_from_nrf_ble_uuid(uuid, &gattc_char->uuid);
} else {
// The discovery response contained a 128-bit UUID that has not yet been registered with the
// softdevice via sd_ble_uuid_vs_add(). We need to fetch the 128-bit value and register it.
// For now, just leave the UUID as NULL.
}
bleio_characteristic_properties_t props =
(gattc_char->char_props.broadcast ? CHAR_PROP_BROADCAST : 0) |
(gattc_char->char_props.indicate ? CHAR_PROP_INDICATE : 0) |
(gattc_char->char_props.notify ? CHAR_PROP_NOTIFY : 0) |
(gattc_char->char_props.read ? CHAR_PROP_READ : 0) |
(gattc_char->char_props.write ? CHAR_PROP_WRITE : 0) |
(gattc_char->char_props.write_wo_resp ? CHAR_PROP_WRITE_NO_RESPONSE : 0);
// Call common_hal_bleio_characteristic_construct() to initalize some fields and set up evt handler.
common_hal_bleio_characteristic_construct(
characteristic, m_char_discovery_service, gattc_char->handle_value, uuid,
props, SECURITY_MODE_OPEN, SECURITY_MODE_OPEN,
GATT_MAX_DATA_LENGTH, false, // max_length, fixed_length: values may not matter for gattc
NULL);
mp_obj_list_append(m_char_discovery_service->characteristic_list, MP_OBJ_FROM_PTR(characteristic));
}
if (response->count > 0) {
m_discovery_successful = true;
}
m_discovery_in_process = false;
}
STATIC void on_desc_discovery_rsp(ble_gattc_evt_desc_disc_rsp_t *response, bleio_connection_internal_t* connection) {
for (size_t i = 0; i < response->count; ++i) {
ble_gattc_desc_t *gattc_desc = &response->descs[i];
// Remember handles for certain well-known descriptors.
switch (gattc_desc->uuid.uuid) {
case BLE_UUID_DESCRIPTOR_CLIENT_CHAR_CONFIG:
m_desc_discovery_characteristic->cccd_handle = gattc_desc->handle;
break;
case BLE_UUID_DESCRIPTOR_SERVER_CHAR_CONFIG:
m_desc_discovery_characteristic->sccd_handle = gattc_desc->handle;
break;
case BLE_UUID_DESCRIPTOR_CHAR_USER_DESC:
m_desc_discovery_characteristic->user_desc_handle = gattc_desc->handle;
break;
default:
// TODO: sd_ble_gattc_descriptors_discover() can return things that are not descriptors,
// so ignore those.
// https://devzone.nordicsemi.com/f/nordic-q-a/49500/sd_ble_gattc_descriptors_discover-is-returning-attributes-that-are-not-descriptors
break;
}
bleio_descriptor_obj_t *descriptor = m_new_obj(bleio_descriptor_obj_t);
descriptor->base.type = &bleio_descriptor_type;
bleio_uuid_obj_t *uuid = NULL;
if (gattc_desc->uuid.type != BLE_UUID_TYPE_UNKNOWN) {
// Known descriptor UUID.
uuid = m_new_obj(bleio_uuid_obj_t);
uuid->base.type = &bleio_uuid_type;
bleio_uuid_construct_from_nrf_ble_uuid(uuid, &gattc_desc->uuid);
} else {
// The discovery response contained a 128-bit UUID that has not yet been registered with the
// softdevice via sd_ble_uuid_vs_add(). We need to fetch the 128-bit value and register it.
// For now, just leave the UUID as NULL.
}
common_hal_bleio_descriptor_construct(
descriptor, m_desc_discovery_characteristic, uuid,
SECURITY_MODE_OPEN, SECURITY_MODE_OPEN,
GATT_MAX_DATA_LENGTH, false, mp_const_empty_bytes);
descriptor->handle = gattc_desc->handle;
mp_obj_list_append(m_desc_discovery_characteristic->descriptor_list, MP_OBJ_FROM_PTR(descriptor));
}
if (response->count > 0) {
m_discovery_successful = true;
}
m_discovery_in_process = false;
}
STATIC bool discovery_on_ble_evt(ble_evt_t *ble_evt, mp_obj_t payload) {
bleio_connection_internal_t* connection = MP_OBJ_TO_PTR(payload);
switch (ble_evt->header.evt_id) {
case BLE_GAP_EVT_DISCONNECTED:
m_discovery_successful = false;
m_discovery_in_process = false;
break;
case BLE_GATTC_EVT_PRIM_SRVC_DISC_RSP:
on_primary_srv_discovery_rsp(&ble_evt->evt.gattc_evt.params.prim_srvc_disc_rsp, connection);
break;
case BLE_GATTC_EVT_CHAR_DISC_RSP:
on_char_discovery_rsp(&ble_evt->evt.gattc_evt.params.char_disc_rsp, connection);
break;
case BLE_GATTC_EVT_DESC_DISC_RSP:
on_desc_discovery_rsp(&ble_evt->evt.gattc_evt.params.desc_disc_rsp, connection);
break;
default:
// For debugging.
// mp_printf(&mp_plat_print, "Unhandled discovery event: 0x%04x\n", ble_evt->header.evt_id);
return false;
break;
}
return true;
}
STATIC void discover_remote_services(bleio_connection_internal_t *self, mp_obj_t service_uuids_whitelist) {
ble_drv_add_event_handler(discovery_on_ble_evt, self);
// Start over with an empty list.
self->remote_service_list = NULL;
if (service_uuids_whitelist == mp_const_none) {
// List of service UUID's not given, so discover all available services.
uint16_t next_service_start_handle = BLE_GATT_HANDLE_START;
while (discover_next_services(self, next_service_start_handle, MP_OBJ_NULL)) {
// discover_next_services() appends to remote_services_list.
// Get the most recently discovered service, and then ask for services
// whose handles start after the last attribute handle inside that service.
const bleio_service_obj_t *service = self->remote_service_list;
next_service_start_handle = service->end_handle + 1;
}
} else {
mp_obj_iter_buf_t iter_buf;
mp_obj_t iterable = mp_getiter(service_uuids_whitelist, &iter_buf);
mp_obj_t uuid_obj;
while ((uuid_obj = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
if (!MP_OBJ_IS_TYPE(uuid_obj, &bleio_uuid_type)) {
mp_raise_TypeError(translate("non-UUID found in service_uuids_whitelist"));
}
bleio_uuid_obj_t *uuid = MP_OBJ_TO_PTR(uuid_obj);
ble_uuid_t nrf_uuid;
bleio_uuid_convert_to_nrf_ble_uuid(uuid, &nrf_uuid);
// Service might or might not be discovered; that's ok. Caller has to check
// Central.remote_services to find out.
// We only need to call this once for each service to discover.
discover_next_services(self, BLE_GATT_HANDLE_START, &nrf_uuid);
}
}
bleio_service_obj_t *service = self->remote_service_list;
while (service != NULL) {
// Skip the service if it had an unknown (unregistered) UUID.
if (service->uuid == NULL) {
service = service->next;
continue;
}
uint16_t next_char_start_handle = service->start_handle;
// Stop when we go past the end of the range of handles for this service or
// discovery call returns nothing.
// discover_next_characteristics() appends to the characteristic_list.
while (next_char_start_handle <= service->end_handle &&
discover_next_characteristics(self, service, next_char_start_handle)) {
// Get the most recently discovered characteristic, and then ask for characteristics
// whose handles start after the last attribute handle inside that characteristic.
const bleio_characteristic_obj_t *characteristic =
MP_OBJ_TO_PTR(service->characteristic_list->items[service->characteristic_list->len - 1]);
next_char_start_handle = characteristic->handle + 1;
}
// Got characteristics for this service. Now discover descriptors for each characteristic.
size_t char_list_len = service->characteristic_list->len;
for (size_t char_idx = 0; char_idx < char_list_len; ++char_idx) {
bleio_characteristic_obj_t *characteristic =
MP_OBJ_TO_PTR(service->characteristic_list->items[char_idx]);
const bool last_characteristic = char_idx == char_list_len - 1;
bleio_characteristic_obj_t *next_characteristic = last_characteristic
? NULL
: MP_OBJ_TO_PTR(service->characteristic_list->items[char_idx + 1]);
// Skip the characteristic if it had an unknown (unregistered) UUID.
if (characteristic->uuid == NULL) {
continue;
}
uint16_t next_desc_start_handle = characteristic->handle + 1;
// Don't run past the end of this service or the beginning of the next characteristic.
uint16_t next_desc_end_handle = next_characteristic == NULL
? service->end_handle
: next_characteristic->handle - 1;
// Stop when we go past the end of the range of handles for this service or
// discovery call returns nothing.
// discover_next_descriptors() appends to the descriptor_list.
while (next_desc_start_handle <= service->end_handle &&
next_desc_start_handle < next_desc_end_handle &&
discover_next_descriptors(self, characteristic,
next_desc_start_handle, next_desc_end_handle)) {
// Get the most recently discovered descriptor, and then ask for descriptors
// whose handles start after that descriptor's handle.
const bleio_descriptor_obj_t *descriptor = characteristic->descriptor_list;
next_desc_start_handle = descriptor->handle + 1;
}
}
service = service->next;
}
// This event handler is no longer needed.
ble_drv_remove_event_handler(discovery_on_ble_evt, self);
}
mp_obj_tuple_t *common_hal_bleio_connection_discover_remote_services(bleio_connection_obj_t *self, mp_obj_t service_uuids_whitelist) {
discover_remote_services(self->connection, service_uuids_whitelist);
// Convert to a tuple and then clear the list so the callee will take ownership.
mp_obj_tuple_t *services_tuple = service_linked_list_to_tuple(self->connection->remote_service_list);
self->connection->remote_service_list = NULL;
return services_tuple;
}
uint16_t bleio_connection_get_conn_handle(bleio_connection_obj_t *self) {
if (self == NULL || self->connection == NULL) {
return BLE_CONN_HANDLE_INVALID;
}
return self->connection->conn_handle;
}
mp_obj_t bleio_connection_new_from_internal(bleio_connection_internal_t* internal) {
if (internal->connection_obj != mp_const_none) {
return internal->connection_obj;
}
bleio_connection_obj_t *connection = m_new_obj(bleio_connection_obj_t);
connection->base.type = &bleio_connection_type;
connection->connection = internal;
internal->connection_obj = connection;
return MP_OBJ_FROM_PTR(connection);
}
|
/*
* Copyright (c) 2000-2018 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. The rights granted to you under the License
* may not be used to create, or enable the creation or redistribution of,
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
*
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* Copyright (c) 1982, 1986, 1988, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)raw_ip.c 8.7 (Berkeley) 5/15/95
*/
/*
* NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
* support for mandatory and extensible security protections. This notice
* is included in support of clause 2.2 (b) of the Apple Public License,
* Version 2.0.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/mcache.h>
#include <sys/proc.h>
#include <sys/domain.h>
#include <sys/protosw.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/sysctl.h>
#include <libkern/OSAtomic.h>
#include <kern/zalloc.h>
#include <pexpert/pexpert.h>
#include <net/if.h>
#include <net/net_api_stats.h>
#include <net/route.h>
#define _IP_VHL
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_tclass.h>
#include <netinet/ip.h>
#include <netinet/in_pcb.h>
#include <netinet/in_var.h>
#include <netinet/ip_var.h>
#if INET6
#include <netinet6/in6_pcb.h>
#endif /* INET6 */
#include <netinet/ip_fw.h>
#if IPSEC
#include <netinet6/ipsec.h>
#endif /*IPSEC*/
#if DUMMYNET
#include <netinet/ip_dummynet.h>
#endif
#if CONFIG_MACF_NET
#include <security/mac_framework.h>
#endif /* MAC_NET */
int load_ipfw(void);
int rip_detach(struct socket *);
int rip_abort(struct socket *);
int rip_disconnect(struct socket *);
int rip_bind(struct socket *, struct sockaddr *, struct proc *);
int rip_connect(struct socket *, struct sockaddr *, struct proc *);
int rip_shutdown(struct socket *);
struct inpcbhead ripcb;
struct inpcbinfo ripcbinfo;
/* control hooks for ipfw and dummynet */
#if IPFIREWALL
ip_fw_ctl_t *ip_fw_ctl_ptr;
#endif /* IPFIREWALL */
#if DUMMYNET
ip_dn_ctl_t *ip_dn_ctl_ptr;
#endif /* DUMMYNET */
/*
* Nominal space allocated to a raw ip socket.
*/
#define RIPSNDQ 8192
#define RIPRCVQ 8192
/*
* Raw interface to IP protocol.
*/
/*
* Initialize raw connection block q.
*/
void
rip_init(struct protosw *pp, struct domain *dp)
{
#pragma unused(dp)
static int rip_initialized = 0;
struct inpcbinfo *pcbinfo;
VERIFY((pp->pr_flags & (PR_INITIALIZED|PR_ATTACHED)) == PR_ATTACHED);
if (rip_initialized)
return;
rip_initialized = 1;
LIST_INIT(&ripcb);
ripcbinfo.ipi_listhead = &ripcb;
/*
* XXX We don't use the hash list for raw IP, but it's easier
* to allocate a one entry hash list than it is to check all
* over the place for ipi_hashbase == NULL.
*/
ripcbinfo.ipi_hashbase = hashinit(1, M_PCB, &ripcbinfo.ipi_hashmask);
ripcbinfo.ipi_porthashbase = hashinit(1, M_PCB, &ripcbinfo.ipi_porthashmask);
ripcbinfo.ipi_zone = zinit(sizeof(struct inpcb),
(4096 * sizeof(struct inpcb)), 4096, "ripzone");
pcbinfo = &ripcbinfo;
/*
* allocate lock group attribute and group for udp pcb mutexes
*/
pcbinfo->ipi_lock_grp_attr = lck_grp_attr_alloc_init();
pcbinfo->ipi_lock_grp = lck_grp_alloc_init("ripcb", pcbinfo->ipi_lock_grp_attr);
/*
* allocate the lock attribute for udp pcb mutexes
*/
pcbinfo->ipi_lock_attr = lck_attr_alloc_init();
if ((pcbinfo->ipi_lock = lck_rw_alloc_init(pcbinfo->ipi_lock_grp,
pcbinfo->ipi_lock_attr)) == NULL) {
panic("%s: unable to allocate PCB lock\n", __func__);
/* NOTREACHED */
}
in_pcbinfo_attach(&ripcbinfo);
}
static struct sockaddr_in ripsrc = { sizeof(ripsrc), AF_INET , 0, {0}, {0,0,0,0,0,0,0,0,} };
/*
* Setup generic address and protocol structures
* for raw_input routine, then pass them along with
* mbuf chain.
*/
void
rip_input(struct mbuf *m, int iphlen)
{
struct ip *ip = mtod(m, struct ip *);
struct inpcb *inp;
struct inpcb *last = 0;
struct mbuf *opts = 0;
int skipit = 0, ret = 0;
struct ifnet *ifp = m->m_pkthdr.rcvif;
/* Expect 32-bit aligned data pointer on strict-align platforms */
MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
ripsrc.sin_addr = ip->ip_src;
lck_rw_lock_shared(ripcbinfo.ipi_lock);
LIST_FOREACH(inp, &ripcb, inp_list) {
#if INET6
if ((inp->inp_vflag & INP_IPV4) == 0)
continue;
#endif
if (inp->inp_ip_p && (inp->inp_ip_p != ip->ip_p))
continue;
if (inp->inp_laddr.s_addr &&
inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
continue;
if (inp->inp_faddr.s_addr &&
inp->inp_faddr.s_addr != ip->ip_src.s_addr)
continue;
if (inp_restricted_recv(inp, ifp))
continue;
if (last) {
struct mbuf *n = m_copy(m, 0, (int)M_COPYALL);
skipit = 0;
#if NECP
if (n && !necp_socket_is_allowed_to_send_recv_v4(last, 0, 0,
&ip->ip_dst, &ip->ip_src, ifp, NULL, NULL, NULL)) {
m_freem(n);
/* do not inject data to pcb */
skipit = 1;
}
#endif /* NECP */
#if CONFIG_MACF_NET
if (n && skipit == 0) {
if (mac_inpcb_check_deliver(last, n, AF_INET,
SOCK_RAW) != 0) {
m_freem(n);
skipit = 1;
}
}
#endif
if (n && skipit == 0) {
int error = 0;
if ((last->inp_flags & INP_CONTROLOPTS) != 0 ||
(last->inp_socket->so_options & SO_TIMESTAMP) != 0 ||
(last->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 ||
(last->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) {
ret = ip_savecontrol(last, &opts, ip, n);
if (ret != 0) {
m_freem(n);
m_freem(opts);
last = inp;
continue;
}
}
if (last->inp_flags & INP_STRIPHDR) {
n->m_len -= iphlen;
n->m_pkthdr.len -= iphlen;
n->m_data += iphlen;
}
so_recv_data_stat(last->inp_socket, m, 0);
if (sbappendaddr(&last->inp_socket->so_rcv,
(struct sockaddr *)&ripsrc, n,
opts, &error) != 0) {
sorwakeup(last->inp_socket);
} else {
if (error) {
/* should notify about lost packet */
ipstat.ips_raw_sappend_fail++;
}
}
opts = 0;
}
}
last = inp;
}
skipit = 0;
#if NECP
if (last && !necp_socket_is_allowed_to_send_recv_v4(last, 0, 0,
&ip->ip_dst, &ip->ip_src, ifp, NULL, NULL, NULL)) {
m_freem(m);
OSAddAtomic(1, &ipstat.ips_delivered);
/* do not inject data to pcb */
skipit = 1;
}
#endif /* NECP */
#if CONFIG_MACF_NET
if (last && skipit == 0) {
if (mac_inpcb_check_deliver(last, m, AF_INET, SOCK_RAW) != 0) {
skipit = 1;
m_freem(m);
}
}
#endif
if (skipit == 0) {
if (last) {
if ((last->inp_flags & INP_CONTROLOPTS) != 0 ||
(last->inp_socket->so_options & SO_TIMESTAMP) != 0 ||
(last->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 ||
(last->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) {
ret = ip_savecontrol(last, &opts, ip, m);
if (ret != 0) {
m_freem(m);
m_freem(opts);
goto unlock;
}
}
if (last->inp_flags & INP_STRIPHDR) {
m->m_len -= iphlen;
m->m_pkthdr.len -= iphlen;
m->m_data += iphlen;
}
so_recv_data_stat(last->inp_socket, m, 0);
if (sbappendaddr(&last->inp_socket->so_rcv,
(struct sockaddr *)&ripsrc, m, opts, NULL) != 0) {
sorwakeup(last->inp_socket);
} else {
ipstat.ips_raw_sappend_fail++;
}
} else {
m_freem(m);
OSAddAtomic(1, &ipstat.ips_noproto);
OSAddAtomic(-1, &ipstat.ips_delivered);
}
}
unlock:
/*
* Keep the list locked because socket filter may force the socket lock
* to be released when calling sbappendaddr() -- see rdar://7627704
*/
lck_rw_done(ripcbinfo.ipi_lock);
}
/*
* Generate IP header and pass packet to ip_output.
* Tack on options user may have setup with control call.
*/
int
rip_output(
struct mbuf *m,
struct socket *so,
u_int32_t dst,
struct mbuf *control)
{
struct ip *ip;
struct inpcb *inp = sotoinpcb(so);
int flags = (so->so_options & SO_DONTROUTE) | IP_ALLOWBROADCAST;
struct ip_out_args ipoa;
struct ip_moptions *imo;
int error = 0;
bzero(&ipoa, sizeof(ipoa));
ipoa.ipoa_boundif = IFSCOPE_NONE;
ipoa.ipoa_flags = IPOAF_SELECT_SRCIF;
int sotc = SO_TC_UNSPEC;
int netsvctype = _NET_SERVICE_TYPE_UNSPEC;
if (control != NULL) {
sotc = so_tc_from_control(control, &netsvctype);
m_freem(control);
control = NULL;
}
if (sotc == SO_TC_UNSPEC) {
sotc = so->so_traffic_class;
netsvctype = so->so_netsvctype;
}
if (inp == NULL
#if NECP
|| (necp_socket_should_use_flow_divert(inp))
#endif /* NECP */
) {
if (m != NULL)
m_freem(m);
VERIFY(control == NULL);
return (inp == NULL ? EINVAL : EPROTOTYPE);
}
flags |= IP_OUTARGS;
/* If socket was bound to an ifindex, tell ip_output about it */
if (inp->inp_flags & INP_BOUND_IF) {
ipoa.ipoa_boundif = inp->inp_boundifp->if_index;
ipoa.ipoa_flags |= IPOAF_BOUND_IF;
}
if (INP_NO_CELLULAR(inp))
ipoa.ipoa_flags |= IPOAF_NO_CELLULAR;
if (INP_NO_EXPENSIVE(inp))
ipoa.ipoa_flags |= IPOAF_NO_EXPENSIVE;
if (INP_AWDL_UNRESTRICTED(inp))
ipoa.ipoa_flags |= IPOAF_AWDL_UNRESTRICTED;
ipoa.ipoa_sotc = sotc;
ipoa.ipoa_netsvctype = netsvctype;
if (inp->inp_flowhash == 0)
inp->inp_flowhash = inp_calc_flowhash(inp);
/*
* If the user handed us a complete IP packet, use it.
* Otherwise, allocate an mbuf for a header and fill it in.
*/
if ((inp->inp_flags & INP_HDRINCL) == 0) {
if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) {
m_freem(m);
return(EMSGSIZE);
}
M_PREPEND(m, sizeof(struct ip), M_WAIT, 1);
if (m == NULL)
return ENOBUFS;
ip = mtod(m, struct ip *);
ip->ip_tos = inp->inp_ip_tos;
ip->ip_off = 0;
ip->ip_p = inp->inp_ip_p;
ip->ip_len = m->m_pkthdr.len;
ip->ip_src = inp->inp_laddr;
ip->ip_dst.s_addr = dst;
ip->ip_ttl = inp->inp_ip_ttl;
} else {
if (m->m_pkthdr.len > IP_MAXPACKET) {
m_freem(m);
return(EMSGSIZE);
}
ip = mtod(m, struct ip *);
/* don't allow both user specified and setsockopt options,
and don't allow packet length sizes that will crash */
if (((IP_VHL_HL(ip->ip_vhl) != (sizeof (*ip) >> 2))
&& inp->inp_options)
|| (ip->ip_len > m->m_pkthdr.len)
|| (ip->ip_len < (IP_VHL_HL(ip->ip_vhl) << 2))) {
m_freem(m);
return EINVAL;
}
if (ip->ip_id == 0 && !(rfc6864 && IP_OFF_IS_ATOMIC(ntohs(ip->ip_off))))
ip->ip_id = ip_randomid();
/* XXX prevent ip_output from overwriting header fields */
flags |= IP_RAWOUTPUT;
OSAddAtomic(1, &ipstat.ips_rawout);
}
if (inp->inp_laddr.s_addr != INADDR_ANY)
ipoa.ipoa_flags |= IPOAF_BOUND_SRCADDR;
#if NECP
{
necp_kernel_policy_id policy_id;
necp_kernel_policy_id skip_policy_id;
u_int32_t route_rule_id;
/*
* We need a route to perform NECP route rule checks
*/
if (net_qos_policy_restricted != 0 &&
ROUTE_UNUSABLE(&inp->inp_route)) {
struct sockaddr_in to;
struct sockaddr_in from;
struct in_addr laddr = ip->ip_src;
ROUTE_RELEASE(&inp->inp_route);
bzero(&from, sizeof(struct sockaddr_in));
from.sin_family = AF_INET;
from.sin_len = sizeof(struct sockaddr_in);
from.sin_addr = laddr;
bzero(&to, sizeof(struct sockaddr_in));
to.sin_family = AF_INET;
to.sin_len = sizeof(struct sockaddr_in);
to.sin_addr.s_addr = ip->ip_dst.s_addr;
if ((error = in_pcbladdr(inp, (struct sockaddr *)&to,
&laddr, ipoa.ipoa_boundif, NULL, 1)) != 0) {
printf("%s in_pcbladdr(%p) error %d\n",
__func__, inp, error);
m_freem(m);
return (error);
}
inp_update_necp_policy(inp, (struct sockaddr *)&from,
(struct sockaddr *)&to, ipoa.ipoa_boundif);
inp->inp_policyresult.results.qos_marking_gencount = 0;
}
if (!necp_socket_is_allowed_to_send_recv_v4(inp, 0, 0,
&ip->ip_src, &ip->ip_dst, NULL, &policy_id, &route_rule_id, &skip_policy_id)) {
m_freem(m);
return(EHOSTUNREACH);
}
necp_mark_packet_from_socket(m, inp, policy_id, route_rule_id, skip_policy_id);
if (net_qos_policy_restricted != 0) {
struct ifnet *rt_ifp = NULL;
if (inp->inp_route.ro_rt != NULL)
rt_ifp = inp->inp_route.ro_rt->rt_ifp;
necp_socket_update_qos_marking(inp, inp->inp_route.ro_rt,
NULL, route_rule_id);
}
}
#endif /* NECP */
if ((so->so_flags1 & SOF1_QOSMARKING_ALLOWED))
ipoa.ipoa_flags |= IPOAF_QOSMARKING_ALLOWED;
#if IPSEC
if (inp->inp_sp != NULL && ipsec_setsocket(m, so) != 0) {
m_freem(m);
return ENOBUFS;
}
#endif /*IPSEC*/
if (ROUTE_UNUSABLE(&inp->inp_route))
ROUTE_RELEASE(&inp->inp_route);
set_packet_service_class(m, so, sotc, 0);
m->m_pkthdr.pkt_flowsrc = FLOWSRC_INPCB;
m->m_pkthdr.pkt_flowid = inp->inp_flowhash;
m->m_pkthdr.pkt_flags |= (PKTF_FLOW_ID | PKTF_FLOW_LOCALSRC |
PKTF_FLOW_RAWSOCK);
m->m_pkthdr.pkt_proto = inp->inp_ip_p;
m->m_pkthdr.tx_rawip_pid = so->last_pid;
m->m_pkthdr.tx_rawip_e_pid = so->e_pid;
if (so->so_flags & SOF_DELEGATED)
m->m_pkthdr.tx_rawip_e_pid = so->e_pid;
else
m->m_pkthdr.tx_rawip_e_pid = 0;
#if CONFIG_MACF_NET
mac_mbuf_label_associate_inpcb(inp, m);
#endif
imo = inp->inp_moptions;
if (imo != NULL)
IMO_ADDREF(imo);
/*
* The domain lock is held across ip_output, so it is okay
* to pass the PCB cached route pointer directly to IP and
* the modules beneath it.
*/
// TODO: PASS DOWN ROUTE RULE ID
error = ip_output(m, inp->inp_options, &inp->inp_route, flags,
imo, &ipoa);
if (imo != NULL)
IMO_REMREF(imo);
if (inp->inp_route.ro_rt != NULL) {
struct rtentry *rt = inp->inp_route.ro_rt;
struct ifnet *outif;
if ((rt->rt_flags & (RTF_MULTICAST|RTF_BROADCAST)) ||
inp->inp_socket == NULL ||
!(inp->inp_socket->so_state & SS_ISCONNECTED)) {
rt = NULL; /* unusable */
}
/*
* Always discard the cached route for unconnected
* socket or if it is a multicast route.
*/
if (rt == NULL)
ROUTE_RELEASE(&inp->inp_route);
/*
* If this is a connected socket and the destination
* route is unicast, update outif with that of the
* route interface used by IP.
*/
if (rt != NULL &&
(outif = rt->rt_ifp) != inp->inp_last_outifp) {
inp->inp_last_outifp = outif;
}
} else {
ROUTE_RELEASE(&inp->inp_route);
}
/*
* If output interface was cellular/expensive, and this socket is
* denied access to it, generate an event.
*/
if (error != 0 && (ipoa.ipoa_retflags & IPOARF_IFDENIED) &&
(INP_NO_CELLULAR(inp) || INP_NO_EXPENSIVE(inp)))
soevent(so, (SO_FILT_HINT_LOCKED|SO_FILT_HINT_IFDENIED));
return (error);
}
#if IPFIREWALL
int
load_ipfw(void)
{
kern_return_t err;
ipfw_init();
#if DUMMYNET
if (!DUMMYNET_LOADED)
ip_dn_init();
#endif /* DUMMYNET */
err = 0;
return err == 0 && ip_fw_ctl_ptr == NULL ? -1 : err;
}
#endif /* IPFIREWALL */
/*
* Raw IP socket option processing.
*/
int
rip_ctloutput(struct socket *so, struct sockopt *sopt)
{
struct inpcb *inp = sotoinpcb(so);
int error, optval;
/* Allow <SOL_SOCKET,SO_FLUSH> at this level */
if (sopt->sopt_level != IPPROTO_IP &&
!(sopt->sopt_level == SOL_SOCKET && sopt->sopt_name == SO_FLUSH))
return (EINVAL);
error = 0;
switch (sopt->sopt_dir) {
case SOPT_GET:
switch (sopt->sopt_name) {
case IP_HDRINCL:
optval = inp->inp_flags & INP_HDRINCL;
error = sooptcopyout(sopt, &optval, sizeof optval);
break;
case IP_STRIPHDR:
optval = inp->inp_flags & INP_STRIPHDR;
error = sooptcopyout(sopt, &optval, sizeof optval);
break;
#if IPFIREWALL
case IP_FW_ADD:
case IP_FW_GET:
case IP_OLD_FW_ADD:
case IP_OLD_FW_GET:
if (ip_fw_ctl_ptr == 0)
error = load_ipfw();
if (ip_fw_ctl_ptr && error == 0)
error = ip_fw_ctl_ptr(sopt);
else
error = ENOPROTOOPT;
break;
#endif /* IPFIREWALL */
#if DUMMYNET
case IP_DUMMYNET_GET:
if (!DUMMYNET_LOADED)
ip_dn_init();
if (DUMMYNET_LOADED)
error = ip_dn_ctl_ptr(sopt);
else
error = ENOPROTOOPT;
break ;
#endif /* DUMMYNET */
default:
error = ip_ctloutput(so, sopt);
break;
}
break;
case SOPT_SET:
switch (sopt->sopt_name) {
case IP_HDRINCL:
error = sooptcopyin(sopt, &optval, sizeof optval,
sizeof optval);
if (error)
break;
if (optval)
inp->inp_flags |= INP_HDRINCL;
else
inp->inp_flags &= ~INP_HDRINCL;
break;
case IP_STRIPHDR:
error = sooptcopyin(sopt, &optval, sizeof optval,
sizeof optval);
if (error)
break;
if (optval)
inp->inp_flags |= INP_STRIPHDR;
else
inp->inp_flags &= ~INP_STRIPHDR;
break;
#if IPFIREWALL
case IP_FW_ADD:
case IP_FW_DEL:
case IP_FW_FLUSH:
case IP_FW_ZERO:
case IP_FW_RESETLOG:
case IP_OLD_FW_ADD:
case IP_OLD_FW_DEL:
case IP_OLD_FW_FLUSH:
case IP_OLD_FW_ZERO:
case IP_OLD_FW_RESETLOG:
if (ip_fw_ctl_ptr == 0)
error = load_ipfw();
if (ip_fw_ctl_ptr && error == 0)
error = ip_fw_ctl_ptr(sopt);
else
error = ENOPROTOOPT;
break;
#endif /* IPFIREWALL */
#if DUMMYNET
case IP_DUMMYNET_CONFIGURE:
case IP_DUMMYNET_DEL:
case IP_DUMMYNET_FLUSH:
if (!DUMMYNET_LOADED)
ip_dn_init();
if (DUMMYNET_LOADED)
error = ip_dn_ctl_ptr(sopt);
else
error = ENOPROTOOPT ;
break ;
#endif
case SO_FLUSH:
if ((error = sooptcopyin(sopt, &optval, sizeof (optval),
sizeof (optval))) != 0)
break;
error = inp_flush(inp, optval);
break;
default:
error = ip_ctloutput(so, sopt);
break;
}
break;
}
return (error);
}
/*
* This function exists solely to receive the PRC_IFDOWN messages which
* are sent by if_down(). It looks for an ifaddr whose ifa_addr is sa,
* and calls in_ifadown() to remove all routes corresponding to that address.
* It also receives the PRC_IFUP messages from if_up() and reinstalls the
* interface routes.
*/
void
rip_ctlinput(
int cmd,
struct sockaddr *sa,
__unused void *vip,
__unused struct ifnet *ifp)
{
struct in_ifaddr *ia = NULL;
struct ifnet *iaifp = NULL;
int err = 0;
int flags, done = 0;
switch (cmd) {
case PRC_IFDOWN:
lck_rw_lock_shared(in_ifaddr_rwlock);
for (ia = in_ifaddrhead.tqh_first; ia;
ia = ia->ia_link.tqe_next) {
IFA_LOCK(&ia->ia_ifa);
if (ia->ia_ifa.ifa_addr == sa &&
(ia->ia_flags & IFA_ROUTE)) {
done = 1;
IFA_ADDREF_LOCKED(&ia->ia_ifa);
IFA_UNLOCK(&ia->ia_ifa);
lck_rw_done(in_ifaddr_rwlock);
lck_mtx_lock(rnh_lock);
/*
* in_ifscrub kills the interface route.
*/
in_ifscrub(ia->ia_ifp, ia, 1);
/*
* in_ifadown gets rid of all the rest of
* the routes. This is not quite the right
* thing to do, but at least if we are running
* a routing process they will come back.
*/
in_ifadown(&ia->ia_ifa, 1);
lck_mtx_unlock(rnh_lock);
IFA_REMREF(&ia->ia_ifa);
break;
}
IFA_UNLOCK(&ia->ia_ifa);
}
if (!done)
lck_rw_done(in_ifaddr_rwlock);
break;
case PRC_IFUP:
lck_rw_lock_shared(in_ifaddr_rwlock);
for (ia = in_ifaddrhead.tqh_first; ia;
ia = ia->ia_link.tqe_next) {
IFA_LOCK(&ia->ia_ifa);
if (ia->ia_ifa.ifa_addr == sa) {
/* keep it locked */
break;
}
IFA_UNLOCK(&ia->ia_ifa);
}
if (ia == NULL || (ia->ia_flags & IFA_ROUTE) ||
(ia->ia_ifa.ifa_debug & IFD_NOTREADY)) {
if (ia != NULL)
IFA_UNLOCK(&ia->ia_ifa);
lck_rw_done(in_ifaddr_rwlock);
return;
}
IFA_ADDREF_LOCKED(&ia->ia_ifa);
IFA_UNLOCK(&ia->ia_ifa);
lck_rw_done(in_ifaddr_rwlock);
flags = RTF_UP;
iaifp = ia->ia_ifa.ifa_ifp;
if ((iaifp->if_flags & IFF_LOOPBACK)
|| (iaifp->if_flags & IFF_POINTOPOINT))
flags |= RTF_HOST;
err = rtinit(&ia->ia_ifa, RTM_ADD, flags);
if (err == 0) {
IFA_LOCK_SPIN(&ia->ia_ifa);
ia->ia_flags |= IFA_ROUTE;
IFA_UNLOCK(&ia->ia_ifa);
}
IFA_REMREF(&ia->ia_ifa);
break;
}
}
u_int32_t rip_sendspace = RIPSNDQ;
u_int32_t rip_recvspace = RIPRCVQ;
SYSCTL_INT(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW | CTLFLAG_LOCKED,
&rip_sendspace, 0, "Maximum outgoing raw IP datagram size");
SYSCTL_INT(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW | CTLFLAG_LOCKED,
&rip_recvspace, 0, "Maximum incoming raw IP datagram size");
SYSCTL_UINT(_net_inet_raw, OID_AUTO, pcbcount, CTLFLAG_RD | CTLFLAG_LOCKED,
&ripcbinfo.ipi_count, 0, "Number of active PCBs");
static int
rip_attach(struct socket *so, int proto, struct proc *p)
{
struct inpcb *inp;
int error;
inp = sotoinpcb(so);
if (inp)
panic("rip_attach");
if ((so->so_state & SS_PRIV) == 0)
return (EPERM);
error = soreserve(so, rip_sendspace, rip_recvspace);
if (error)
return error;
error = in_pcballoc(so, &ripcbinfo, p);
if (error)
return error;
inp = (struct inpcb *)so->so_pcb;
inp->inp_vflag |= INP_IPV4;
inp->inp_ip_p = proto;
inp->inp_ip_ttl = ip_defttl;
return 0;
}
__private_extern__ int
rip_detach(struct socket *so)
{
struct inpcb *inp;
inp = sotoinpcb(so);
if (inp == 0)
panic("rip_detach");
in_pcbdetach(inp);
return 0;
}
__private_extern__ int
rip_abort(struct socket *so)
{
soisdisconnected(so);
return rip_detach(so);
}
__private_extern__ int
rip_disconnect(struct socket *so)
{
if ((so->so_state & SS_ISCONNECTED) == 0)
return ENOTCONN;
return rip_abort(so);
}
__private_extern__ int
rip_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
{
#pragma unused(p)
struct inpcb *inp = sotoinpcb(so);
struct sockaddr_in sin;
struct ifaddr *ifa = NULL;
struct ifnet *outif = NULL;
if (inp == NULL
#if NECP
|| (necp_socket_should_use_flow_divert(inp))
#endif /* NECP */
)
return (inp == NULL ? EINVAL : EPROTOTYPE);
if (nam->sa_len != sizeof (struct sockaddr_in))
return (EINVAL);
/* Sanitized local copy for interface address searches */
bzero(&sin, sizeof (sin));
sin.sin_family = AF_INET;
sin.sin_len = sizeof (struct sockaddr_in);
sin.sin_addr.s_addr = SIN(nam)->sin_addr.s_addr;
if (TAILQ_EMPTY(&ifnet_head) ||
(sin.sin_family != AF_INET && sin.sin_family != AF_IMPLINK) ||
(sin.sin_addr.s_addr && (ifa = ifa_ifwithaddr(SA(&sin))) == 0)) {
return (EADDRNOTAVAIL);
} else if (ifa) {
/*
* Opportunistically determine the outbound
* interface that may be used; this may not
* hold true if we end up using a route
* going over a different interface, e.g.
* when sending to a local address. This
* will get updated again after sending.
*/
IFA_LOCK(ifa);
outif = ifa->ifa_ifp;
IFA_UNLOCK(ifa);
IFA_REMREF(ifa);
}
inp->inp_laddr = sin.sin_addr;
inp->inp_last_outifp = outif;
return (0);
}
__private_extern__ int
rip_connect(struct socket *so, struct sockaddr *nam, __unused struct proc *p)
{
struct inpcb *inp = sotoinpcb(so);
struct sockaddr_in *addr = (struct sockaddr_in *)(void *)nam;
if (inp == NULL
#if NECP
|| (necp_socket_should_use_flow_divert(inp))
#endif /* NECP */
)
return (inp == NULL ? EINVAL : EPROTOTYPE);
if (nam->sa_len != sizeof(*addr))
return EINVAL;
if (TAILQ_EMPTY(&ifnet_head))
return EADDRNOTAVAIL;
if ((addr->sin_family != AF_INET) &&
(addr->sin_family != AF_IMPLINK))
return EAFNOSUPPORT;
if (!(so->so_flags1 & SOF1_CONNECT_COUNTED)) {
so->so_flags1 |= SOF1_CONNECT_COUNTED;
INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_dgram_connected);
}
inp->inp_faddr = addr->sin_addr;
soisconnected(so);
return 0;
}
__private_extern__ int
rip_shutdown(struct socket *so)
{
socantsendmore(so);
return 0;
}
__private_extern__ int
rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
struct mbuf *control, struct proc *p)
{
#pragma unused(flags, p)
struct inpcb *inp = sotoinpcb(so);
u_int32_t dst;
int error = 0;
if (inp == NULL
#if NECP
|| (necp_socket_should_use_flow_divert(inp) && (error = EPROTOTYPE))
#endif /* NECP */
) {
if (inp == NULL)
error = EINVAL;
else
error = EPROTOTYPE;
goto bad;
}
if (so->so_state & SS_ISCONNECTED) {
if (nam != NULL) {
error = EISCONN;
goto bad;
}
dst = inp->inp_faddr.s_addr;
} else {
if (nam == NULL) {
error = ENOTCONN;
goto bad;
}
dst = ((struct sockaddr_in *)(void *)nam)->sin_addr.s_addr;
}
return (rip_output(m, so, dst, control));
bad:
VERIFY(error != 0);
if (m != NULL)
m_freem(m);
if (control != NULL)
m_freem(control);
return (error);
}
/* note: rip_unlock is called from different protos instead of the generic socket_unlock,
* it will handle the socket dealloc on last reference
* */
int
rip_unlock(struct socket *so, int refcount, void *debug)
{
void *lr_saved;
struct inpcb *inp = sotoinpcb(so);
if (debug == NULL)
lr_saved = __builtin_return_address(0);
else
lr_saved = debug;
if (refcount) {
if (so->so_usecount <= 0) {
panic("rip_unlock: bad refoucnt so=%p val=%x lrh= %s\n",
so, so->so_usecount, solockhistory_nr(so));
/* NOTREACHED */
}
so->so_usecount--;
if (so->so_usecount == 0 && (inp->inp_wantcnt == WNT_STOPUSING)) {
/* cleanup after last reference */
lck_mtx_unlock(so->so_proto->pr_domain->dom_mtx);
lck_rw_lock_exclusive(ripcbinfo.ipi_lock);
if (inp->inp_state != INPCB_STATE_DEAD) {
#if INET6
if (SOCK_CHECK_DOM(so, PF_INET6))
in6_pcbdetach(inp);
else
#endif /* INET6 */
in_pcbdetach(inp);
}
in_pcbdispose(inp);
lck_rw_done(ripcbinfo.ipi_lock);
return(0);
}
}
so->unlock_lr[so->next_unlock_lr] = lr_saved;
so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
lck_mtx_unlock(so->so_proto->pr_domain->dom_mtx);
return(0);
}
static int
rip_pcblist SYSCTL_HANDLER_ARGS
{
#pragma unused(oidp, arg1, arg2)
int error, i, n;
struct inpcb *inp, **inp_list;
inp_gen_t gencnt;
struct xinpgen xig;
/*
* The process of preparing the TCB list is too time-consuming and
* resource-intensive to repeat twice on every request.
*/
lck_rw_lock_exclusive(ripcbinfo.ipi_lock);
if (req->oldptr == USER_ADDR_NULL) {
n = ripcbinfo.ipi_count;
req->oldidx = 2 * (sizeof xig)
+ (n + n/8) * sizeof(struct xinpcb);
lck_rw_done(ripcbinfo.ipi_lock);
return 0;
}
if (req->newptr != USER_ADDR_NULL) {
lck_rw_done(ripcbinfo.ipi_lock);
return EPERM;
}
/*
* OK, now we're committed to doing something.
*/
gencnt = ripcbinfo.ipi_gencnt;
n = ripcbinfo.ipi_count;
bzero(&xig, sizeof(xig));
xig.xig_len = sizeof xig;
xig.xig_count = n;
xig.xig_gen = gencnt;
xig.xig_sogen = so_gencnt;
error = SYSCTL_OUT(req, &xig, sizeof xig);
if (error) {
lck_rw_done(ripcbinfo.ipi_lock);
return error;
}
/*
* We are done if there is no pcb
*/
if (n == 0) {
lck_rw_done(ripcbinfo.ipi_lock);
return 0;
}
inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK);
if (inp_list == 0) {
lck_rw_done(ripcbinfo.ipi_lock);
return ENOMEM;
}
for (inp = ripcbinfo.ipi_listhead->lh_first, i = 0; inp && i < n;
inp = inp->inp_list.le_next) {
if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD)
inp_list[i++] = inp;
}
n = i;
error = 0;
for (i = 0; i < n; i++) {
inp = inp_list[i];
if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
struct xinpcb xi;
bzero(&xi, sizeof(xi));
xi.xi_len = sizeof xi;
/* XXX should avoid extra copy */
inpcb_to_compat(inp, &xi.xi_inp);
if (inp->inp_socket)
sotoxsocket(inp->inp_socket, &xi.xi_socket);
error = SYSCTL_OUT(req, &xi, sizeof xi);
}
}
if (!error) {
/*
* Give the user an updated idea of our state.
* If the generation differs from what we told
* her before, she knows that something happened
* while we were processing this request, and it
* might be necessary to retry.
*/
bzero(&xig, sizeof(xig));
xig.xig_len = sizeof xig;
xig.xig_gen = ripcbinfo.ipi_gencnt;
xig.xig_sogen = so_gencnt;
xig.xig_count = ripcbinfo.ipi_count;
error = SYSCTL_OUT(req, &xig, sizeof xig);
}
FREE(inp_list, M_TEMP);
lck_rw_done(ripcbinfo.ipi_lock);
return error;
}
SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist,
CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
rip_pcblist, "S,xinpcb", "List of active raw IP sockets");
#if !CONFIG_EMBEDDED
static int
rip_pcblist64 SYSCTL_HANDLER_ARGS
{
#pragma unused(oidp, arg1, arg2)
int error, i, n;
struct inpcb *inp, **inp_list;
inp_gen_t gencnt;
struct xinpgen xig;
/*
* The process of preparing the TCB list is too time-consuming and
* resource-intensive to repeat twice on every request.
*/
lck_rw_lock_exclusive(ripcbinfo.ipi_lock);
if (req->oldptr == USER_ADDR_NULL) {
n = ripcbinfo.ipi_count;
req->oldidx = 2 * (sizeof xig)
+ (n + n/8) * sizeof(struct xinpcb64);
lck_rw_done(ripcbinfo.ipi_lock);
return 0;
}
if (req->newptr != USER_ADDR_NULL) {
lck_rw_done(ripcbinfo.ipi_lock);
return EPERM;
}
/*
* OK, now we're committed to doing something.
*/
gencnt = ripcbinfo.ipi_gencnt;
n = ripcbinfo.ipi_count;
bzero(&xig, sizeof(xig));
xig.xig_len = sizeof xig;
xig.xig_count = n;
xig.xig_gen = gencnt;
xig.xig_sogen = so_gencnt;
error = SYSCTL_OUT(req, &xig, sizeof xig);
if (error) {
lck_rw_done(ripcbinfo.ipi_lock);
return error;
}
/*
* We are done if there is no pcb
*/
if (n == 0) {
lck_rw_done(ripcbinfo.ipi_lock);
return 0;
}
inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK);
if (inp_list == 0) {
lck_rw_done(ripcbinfo.ipi_lock);
return ENOMEM;
}
for (inp = ripcbinfo.ipi_listhead->lh_first, i = 0; inp && i < n;
inp = inp->inp_list.le_next) {
if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD)
inp_list[i++] = inp;
}
n = i;
error = 0;
for (i = 0; i < n; i++) {
inp = inp_list[i];
if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) {
struct xinpcb64 xi;
bzero(&xi, sizeof(xi));
xi.xi_len = sizeof xi;
inpcb_to_xinpcb64(inp, &xi);
if (inp->inp_socket)
sotoxsocket64(inp->inp_socket, &xi.xi_socket);
error = SYSCTL_OUT(req, &xi, sizeof xi);
}
}
if (!error) {
/*
* Give the user an updated idea of our state.
* If the generation differs from what we told
* her before, she knows that something happened
* while we were processing this request, and it
* might be necessary to retry.
*/
bzero(&xig, sizeof(xig));
xig.xig_len = sizeof xig;
xig.xig_gen = ripcbinfo.ipi_gencnt;
xig.xig_sogen = so_gencnt;
xig.xig_count = ripcbinfo.ipi_count;
error = SYSCTL_OUT(req, &xig, sizeof xig);
}
FREE(inp_list, M_TEMP);
lck_rw_done(ripcbinfo.ipi_lock);
return error;
}
SYSCTL_PROC(_net_inet_raw, OID_AUTO, pcblist64,
CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
rip_pcblist64, "S,xinpcb64", "List of active raw IP sockets");
#endif /* !CONFIG_EMBEDDED */
static int
rip_pcblist_n SYSCTL_HANDLER_ARGS
{
#pragma unused(oidp, arg1, arg2)
int error = 0;
error = get_pcblist_n(IPPROTO_IP, req, &ripcbinfo);
return error;
}
SYSCTL_PROC(_net_inet_raw, OID_AUTO, pcblist_n,
CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
rip_pcblist_n, "S,xinpcb_n", "List of active raw IP sockets");
struct pr_usrreqs rip_usrreqs = {
.pru_abort = rip_abort,
.pru_attach = rip_attach,
.pru_bind = rip_bind,
.pru_connect = rip_connect,
.pru_control = in_control,
.pru_detach = rip_detach,
.pru_disconnect = rip_disconnect,
.pru_peeraddr = in_getpeeraddr,
.pru_send = rip_send,
.pru_shutdown = rip_shutdown,
.pru_sockaddr = in_getsockaddr,
.pru_sosend = sosend,
.pru_soreceive = soreceive,
};
/* DSEP Review Done pl-20051213-v02 @3253 */
|
# -*- coding: utf-8 -*-
from screws.freeze.main import FrozenOnly
from tools.linear_algebra.elementwise_cache.objects.sparse_matrix.main import EWC_SparseMatrix
class CSCG_Trace_Form_Coboundary_BASE(FrozenOnly):
""""""
def __init__(self, tf):
self._tf_ = tf
self._T_ = None
self.___PRIVATE_reset_cache___()
self._freeze_self_()
def ___PRIVATE_reset_cache___(self):
pass
@property
def trace_matrix(self):
if self._T_ is None:
formName = self._tf_.__class__.__name__
T = getattr(self._tf_.space.trace_matrix, formName)[0]
self._T_ = \
EWC_SparseMatrix(self._tf_.mesh.elements, T, 'constant')
return self._T_
|
#ifndef CHARPANEL_H
#define CHARPANEL_H
#include <QPushButton>
#include "cchar.h"
#include "button.h"
class MainWindow;
class CharPanel:public QWidget
{
Q_OBJECT
public:
CharPanel(MainWindow *,QFont &);
void load(const std::vector<wchar_t>& table,size_t page);
void clear();
QPoint center();
private:
Button *button[8][8];
CChar Char[8][8];
void SetChar(int x,int y,const QString tag,wchar_t wc);
void SetIntenal(int x,int y,const QString tag,size_t cmd);
MainWindow *main;
};
#endif // CHARPANEL_H
|
import assign from "lodash/assign";
import clone from "lodash/clone";
import concat from "lodash/concat";
import delay from "lodash/delay";
import debounce from "lodash/debounce";
import map from "lodash/map";
import times from "lodash/times";
import transform from "lodash/transform";
import isArray from "lodash/isArray";
import isEqual from "lodash/isEqual";
import isFunction from "lodash/isFunction";
import isObject from "lodash/isObject";
import isSet from "lodash/isSet";
export {
assign,
clone,
concat,
delay,
debounce,
map,
times,
transform,
isArray,
isEqual,
isFunction,
isObject,
isSet,
};
|
let App = getApp();
Page({
/**
* 页面的初始数据
*/
data: {
order_id: null,
order: {},
},
/**
* 生命周期函数--监听页面加载
*/
onLoad: function (options) {
this.data.order_id = options.order_id;
this.getOrderDetail(options.order_id);
},
/**
* 获取订单详情
*/
getOrderDetail: function (order_id) {
let _this = this;
App._get('user.order/detail', { order_id }, function (result) {
if (result.code === 1) {
_this.setData(result.data);
} else {
App.showError(result.msg);
}
});
},
/**
* 跳转到商品详情
*/
goodsDetail: function (e) {
let goods_id = e.currentTarget.dataset.id;
wx.navigateTo({
url: '../goods/index?goods_id=' + goods_id
});
},
/**
* 取消订单
*/
cancelOrder: function (e) {
let _this = this;
let order_id = _this.data.order_id;
wx.showModal({
title: "提示",
content: "确认取消订单?",
success: function (o) {
if (o.confirm) {
App._post_form('user.order/cancel', { order_id }, function (result) {
if (result.code === 1) {
wx.navigateBack();
} else {
App.showError(result.msg);
}
});
}
}
});
},
/**
* 发起付款
*/
payOrder: function (e) {
let _this = this;
let order_id = _this.data.order_id;
// 显示loading
wx.showLoading({ title: '正在处理...', });
App._post_form('user.order/pay', { order_id }, function (result) {
if (result.code === -10) {
App.showError(result.msg);
return false;
}
// 发起微信支付
wx.requestPayment({
timeStamp: result.data.timeStamp,
nonceStr: result.data.nonceStr,
package: 'prepay_id=' + result.data.prepay_id,
signType: 'MD5',
paySign: result.data.paySign,
success: function (res) {
_this.getOrderDetail(order_id);
},
fail: function () {
App.showError('订单未支付');
},
});
});
},
/**
* 确认收货
*/
receipt: function (e) {
let _this = this;
let order_id = _this.data.order_id;
wx.showModal({
title: "提示",
content: "确认收到商品?",
success: function (o) {
if (o.confirm) {
App._post_form('user.order/receipt', { order_id }, function (result) {
if (result.code === 1) {
_this.getOrderDetail(order_id);
} else {
App.showError(result.msg);
}
});
}
}
});
},
});
|
import inspect
from json import loads as json_loads
from json import JSONDecodeError
from collections import namedtuple
from functools import partial
from pydantic import ValidationError
from .base import BasePlugin, Context
from .page import PAGES
METHODS = {'get', 'post', 'put', 'patch', 'delete'}
Route = namedtuple('Route', ['path', 'methods', 'func'])
class StarlettePlugin(BasePlugin):
def __init__(self, spectree):
super().__init__(spectree)
from starlette.convertors import CONVERTOR_TYPES
self.conv2type = {
conv: typ for typ, conv in CONVERTOR_TYPES.items()
}
def register_route(self, app):
self.app = app
from starlette.responses import JSONResponse, HTMLResponse
self.app.add_route(
self.config.spec_url,
lambda request: JSONResponse(self.spectree.spec),
)
for ui in PAGES:
self.app.add_route(
f'/{self.config.PATH}/{ui}',
lambda request, ui=ui: HTMLResponse(
PAGES[ui].format(self.config.spec_url)
),
)
async def request_validation(self, request, query, json, headers, cookies):
request.context = Context(
query(**request.query_params) if query else None,
json(**json_loads(await request.body() or '{}')) if json else None,
headers(**request.headers) if headers else None,
cookies(**request.cookies) if cookies else None,
)
async def validate(self, func, query, json, headers, cookies, resp, *args, **kwargs):
from starlette.responses import JSONResponse
# NOTE: if func is a `HTTPEndpoint`, it should have '.' in name
# This is not an elegant way. But it seems `inspect` doesn't work here.
request = args[1] if '.' in str(func) else args[0]
try:
await self.request_validation(request, query, json, headers, cookies)
except ValidationError as err:
self.logger.info(
'422 Validation Error',
extra={
'spectree_model': err.model.__name__,
'spectree_validation': err.errors(),
},
)
return JSONResponse(err.errors(), 422)
except JSONDecodeError as err:
self.logger.info(
'422 Validation Error',
extra={
'spectree_validation': str(err),
}
)
return JSONResponse({'error_msg': str(err)}, 422)
except Exception:
raise
if inspect.iscoroutinefunction(func):
response = await func(*args, **kwargs)
else:
response = func(*args, **kwargs)
if resp:
model = resp.find_model(response.status_code)
if model:
model.validate(json_loads(response.body))
return response
def find_routes(self):
routes = []
def parse_route(app, prefix=''):
for route in app.routes:
if route.path.startswith(f'/{self.config.PATH}'):
continue
func = route.app
if isinstance(func, partial):
try:
func = func.__wrapped__
except AttributeError:
pass
if inspect.isclass(func):
for method in METHODS:
if getattr(func, method, None):
routes.append(Route(
f'{prefix}{route.path}',
{method.upper()},
getattr(func, method)
))
elif inspect.isfunction(func):
routes.append(Route(
f'{prefix}{route.path}',
route.methods,
route.endpoint))
else:
parse_route(route, prefix=f'{prefix}{route.path}')
parse_route(self.app)
return routes
def bypass(self, func, method):
if method in ['HEAD', 'OPTIONS']:
return True
return False
def parse_func(self, route):
for method in route.methods or ['GET']:
yield method, route.func
def parse_path(self, route):
from starlette.routing import compile_path
_, path, variables = compile_path(route.path)
parameters = []
for name, conv in variables.items():
schema = None
typ = self.conv2type[conv]
if typ == 'int':
schema = {
'type': 'integer',
'format': 'int32'
}
elif typ == 'float':
schema = {
'type': 'number',
'format': 'float',
}
elif typ == 'path':
schema = {
'type': 'string',
'format': 'path',
}
elif typ == 'str':
schema = {'type': 'string'}
parameters.append({
'name': name,
'in': 'path',
'required': True,
'schema': schema,
})
return path, parameters
|
import os
from datetime import timedelta
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'hhz7l-ltdismtf@bzyz+rple7*s*w$jak%whj@(@u0eok^f9k4'
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'api',
'posts',
'djoser',
'django_filters',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'yatube_api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'yatube_api.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static/'),)
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated',
'api.permissions.OwnerOrReadOnly',
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework_simplejwt.authentication.JWTAuthentication',
],
}
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(days=10),
'AUTH_HEADER_TYPES': ('Bearer',),
}
|
import os
# enables comments and blank lines in plan files
def loadScript ( path ):
# read query plan
with open ( path, "r") as planFile:
script = planFile.read()
script = os.linesep.join([s for s in script.splitlines() if s and s != "\n" ])
script = os.linesep.join([s for s in script.splitlines() if not s.lstrip().startswith('#')])
if len ( script ) > 0:
return script
else:
return "{}"
def listWrap ( elem ):
if isinstance ( elem, list ):
return elem
else:
return [elem]
def intToHuman ( number ):
B = 1000*1000*1000
M = 1000*1000
K = 1000
if ( number >= B ):
numStr = f"{number/B:.1f}" + " B"
elif ( number >= M ):
numStr = f"{number/M:.1f}" + " M"
elif ( number >= K ):
numStr = f"{number/K:.1f}" + " K"
else:
numStr = str ( int ( number ) )
return numStr
def formatDOTStr ( title, sub ):
dotstr = '''<'''
if title != None:
dotstr += title
if sub != None and sub != "":
sub = listWrap ( sub )
dotstr += '''<FONT POINT-SIZE="10">'''
for line in sub:
dotstr += '''<br/> ''' + line
dotstr += '''</FONT>'''
dotstr += '''>'''
return dotstr
|
var fs = require('fs');
var pool = require('../../common/commonFunction').pool;
const path = require('path');
var _ = require('underscore');
module.exports = function (req, res) {
console.log("m in add product");
var data1 = [];
const path = require('path');
fs.readFile('data.json', 'utf8', function (err, data) {
if (err) throw err;
data1 = data ? JSON.parse(data) : [];
data1 = data1.length ? _.sortBy( data1, function( item ) { return item.id; } ) : [];
return res.send({ status: 'success', data: data1 });
});
}
|
#!/usr/bin/python
# vim: expandtab:tabstop=4:shiftwidth=4
''' sysconfig_fact module
The purpose of this module is to read the contents of an /etc/sysconfig file
and set Ansible facts for each value.
'''
from StringIO import StringIO
import ConfigParser
def main():
''' Main function of this module.
'''
module = AnsibleModule(
argument_spec=dict(
src=dict(required=True, type='str'),
name=dict(required=True, type='str'),
),
supports_check_mode=False
)
param_src = module.params.get('src')
param_name = module.params.get('name')
strbuffer = StringIO()
strbuffer.write("[main]\n")
with open(param_src, 'r') as src_file:
strbuffer.write(src_file.read())
strbuffer.seek(0)
config = ConfigParser.ConfigParser()
config.optionxform = str
config.readfp(strbuffer)
facts = {}
facts[param_name] = {}
for item in config.items("main"):
facts[param_name][item[0]] = item[1]
module.exit_json(changed=False, ansible_facts=facts)
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
|
from conans import python_requires
import os
common = python_requires('llvm-common/0.0.2@orbitdeps/stable')
class LLVMScalarOpts(common.LLVMModulePackage):
version = common.LLVMModulePackage.version
name = 'llvm_scalar_opts'
llvm_component = 'llvm'
llvm_module = 'ScalarOpts'
llvm_requires = ['llvm_headers', 'llvm_analysis', 'llvm_core', 'llvm_instcombine', 'llvm_support', 'llvm_transform_utils']
|
import WebFontLoaderPlugin from '../../plugins/webfontloader-plugin.js';
var preload = function () {
this.add.text(100, 0, 'default font', {
fontSize: '60px'
});
this.add.text(100, 100, 'preload stage ', {
fontFamily: 'Bangers',
fontSize: '60px'
});
var config = {
google: {
families: ['Droid Sans']
}
};
this.load.rexWebFont(config);
};
var create = function () {
this.add.text(100, 200, 'create stage ', {
fontFamily: 'Droid Sans',
fontSize: '60px'
});
this.add.image(700, 500, 'dot').setScale(10);
};
var sceneConfig = {
preload: preload,
create: create,
pack: {
files: [{
type: 'rexWebFont',
key: 'webfont',
config: {
google: {
families: ['Bangers']
}
}
},
{
type: 'image',
key: 'dot',
url: 'assets/images/white-dot.png'
}
]
}
};
var config = {
type: Phaser.AUTO,
parent: 'phaser-example',
width: 800,
height: 600,
scale: {
mode: Phaser.Scale.FIT,
autoCenter: Phaser.Scale.CENTER_BOTH,
},
scene: sceneConfig,
plugins: {
global: [{
key: 'WebFontLoader',
plugin: WebFontLoaderPlugin,
start: true
}]
}
};
var game = new Phaser.Game(config);
|
import React from "react";
import ParallaxComponent from "../components/parallaxComponent/ParallaxComponent";
import Layout from "../components/layoutIndex/layoutIndex";
import Service from "../components/cardService/CardService";
import ServiceIcon from "../components/serviceIcon/ServiceIcon";
import RecentWork from "../components/cardRecentWork/CardRecentWork";
import IndexCss from "./index.module.css";
import Testimonials from "../components/testimonials/Testimonials";
import { Container, Col, Row } from "react-bootstrap";
import Footer from "../components/footer/Footer";
const IndexPage = () => (
<div>
<Layout>
<Row
className="text-center"
style={{ marginTop: "110px", marginBottom: "90px" }}
>
<Col xs={12} sm={12} md={4} lg={4}>
<Service
name="Gipsarski Radovi"
url="/img1.jpg"
linkto="/usluge/gipsarski-radovi"
text="Some quick example text to build on the card title and make up the bulk of
the card's content."
/>
</Col>
<Col xs={12} sm={12} md={4} lg={4}>
<Service
name="Molersko-farbarski Radovi"
linkto="/usluge/molersko-farbarski-radovi"
url="/img3.jpg"
text="Some quick example text to build on the card title and make up the bulk of
the card's content."
/>
</Col>
<Col xs={12} sm={12} md={4} lg={4}>
<Service
name="Fasade"
linkto="/usluge/fasade"
url="/img2-2.jpg"
text="Some quick example text to build on the card title and make up the bulk of
the card's content."
/>
</Col>
</Row>
</Layout>
<div className="container-fluid p-0">
<ParallaxComponent />
<Row
className="d-flex justify-content-center ml-auto mr-auto"
style={{ marginTop: "113px", height: "75px" }}
>
<Col
sm={12}
md={6}
xl={3}
className="d-flex justify-content-center ml-auto mr-auto h-100"
>
<div className={IndexCss.titleContainer}>
<h3 className={IndexCss.title}>NEDAVNI RADOVI</h3>
</div>
</Col>
</Row>
<Row className="text-center m-auto" style={{ marginTop: "50px" }}>
<Col className="p-0">
<RecentWork />
</Col>
</Row>
<Row
className="d-flex justify-content-center ml-auto mr-auto"
style={{ marginTop: "113px", height: "75px" }}
>
<Col
sm={12}
md={6}
xl={3}
className="d-flex justify-content-center ml-auto mr-auto h-100"
>
<div className={IndexCss.titleContainer}>
<h3 className={IndexCss.title}>ŠTA NUDIMO</h3>
</div>
</Col>
</Row>
<Row
className="d-flex justify-content-center text-center m-auto"
style={{ marginTop: "50px" }}
>
<Col>
<p>
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus
sit amet iaculis elit. Nam semper ut arcu non placerat. Praesent
nibh massa varius.
</p>
</Col>
</Row>
</div>
<div className="container" style={{ marginTop: "80px" }}>
<Row className="text-center m-auto d-flex justify-content-center">
<ServiceIcon />
</Row>
</div>
<Row
className="d-flex justify-content-center ml-auto mr-auto"
style={{ marginTop: "113px", height: "75px" }}
>
<Col
sm={12}
md={6}
xl={3}
className="d-flex justify-content-center ml-auto mr-auto h-100"
>
<div className={IndexCss.titleContainer}>
<h3 className={IndexCss.title}>UTISCI KLIJENATA</h3>
</div>
</Col>
</Row>
<Row
className="d-flex justify-content-center m-auto"
style={{ marginTop: "50px" }}
>
<Col className="text-center" xs={10}>
<p>
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus sit
amet iaculis elit. Nam semper ut arcu non placerat. Praesent nibh
massa varius.
</p>
</Col>
</Row>
<Container style={{ paddingTop: "70px", paddingBottom: "110px" }}>
<Testimonials />
</Container>
<Footer />
</div>
);
export default IndexPage;
|
import {Point} from "./../base";
import {toRadians} from "./../helpers";
import Sun from "./Sun";
import Planet from "./Planet";
import Orbit from "./Orbit";
import Star from "./Star";
class SolarSystem {
/** @var {Sun} */
sun = null;
/** @var {Planet[]} */
planets = [];
/** @var {Star[]} */
stars = [];
/** @var {Object} */
config = {
dayDuration: 15 / 365, // sec
star: {
stepTime: .05, // sec
transparencyStep: .015 // color transparency (alpha channel). max = 1
}
};
/**
* @returns {CanvasRenderingContext2D}
*/
get context() {
return this._context;
}
/**
* @param {CanvasRenderingContext2D} context
*/
set context(context) {
this._context = context;
this._center = new Point(this.width / 2, this.height / 2);
}
/**
* @returns {Point}
*/
get center() {
return this._center;
}
/**
* @returns {Number}
*/
get width() {
return this.context.canvas.width;
}
/**
* @returns {Number}
*/
get height() {
return this.context.canvas.height;
}
/**
* @param {Number} radius
* @param {Number} angle
* @returns {Point}
*/
position(radius, angle) {
return new Point(
this.center.x + radius * Math.cos(toRadians(angle)),
this.center.y + radius * Math.sin(toRadians(angle)),
);
}
/**
* @param {HTMLCanvasElement} canvas
* @param {Number} [sunRadius]
* @param {Object} [planets]
* @param {Number} [starsCount]
* @param {Object} [config]
*/
constructor(canvas, sunRadius = 50, planets = [], starsCount = 200, config = {}) {
this.context = canvas.getContext('2d');
this.config = {...this.config, ...config};
this.sun = new Sun(this, this.center, sunRadius);
this.planets = Object.values(planets).map(
planet => new Planet(
this,
planet.radius,
new Orbit(this, this.center, planet.orbitRadius),
planet.yearDuration,
planet.color,
null,
planet.satellites || []
)
);
this.stars = [...Array(starsCount)].map(() => new Star(this));
this.setup();
}
setup() {
let
maxRadius = this.planets.reduce((maxDistance, planet) => planet.orbit.radius + planet.radius, 0),
minSize = Math.min(this.width, this.height),
minRequiredSize = maxRadius * 2 + Math.min(50, maxRadius);
this.density = minSize / minRequiredSize;
}
resize() {
this.setup();
this._center = new Point(this.width / 2, this.height / 2);
this.sun.center = this.center;
this.planets.forEach(planet => planet.orbit.center = this.center);
}
/**
* @returns {Point}
*/
randomPoint() {
return new Point(
Math.floor(Math.random() * this.width),
Math.floor(Math.random() * this.height)
);
}
clear() {
--this.context.canvas.width;
++this.context.canvas.width;
}
render() {
this.clear();
this.context.fillStyle = '#000';
this.context.fillRect(0, 0, this.width, this.height);
this.stars.forEach(star => star.render());
this.sun.render();
this.planets.forEach(planet => planet.render());
}
updatePositions() {
this.planets.forEach(planet => planet.updatePosition());
this.stars.forEach(star => star.updatePosition());
}
run() {
let renderCallback = () => {
this.updatePositions();
this.render();
window.requestAnimationFrame(renderCallback);
};
window.requestAnimationFrame(renderCallback);
}
}
export default SolarSystem;
|
function fib(arr, n) {
if (!n){
return;
}
console.log(arr);
var c = arr[1];
arr[1] = arr[0] + arr[1];
arr[0] = c;
var temp = [1, 2, 3];
for (var v of temp) {
console.log(v + 4);
}
return fib(arr, n-1);
}
fib([0, 1], 8);
|
/* Generated by RuntimeBrowser
Image: /System/Library/PrivateFrameworks/CalendarDatabase.framework/CalendarDatabase
*/
@interface EKPersistentChangeStoreRowInfo : NSObject {
NSString * _clientIdentifier;
int _consumedSequenceNumber;
}
@property (nonatomic, retain) NSString *clientIdentifier;
@property (nonatomic) int consumedSequenceNumber;
- (void).cxx_destruct;
- (id)clientIdentifier;
- (int)consumedSequenceNumber;
- (id)description;
- (void)setClientIdentifier:(id)arg1;
- (void)setConsumedSequenceNumber:(int)arg1;
@end
|
import React, { useEffect, useState } from "react";
import {
TouchableOpacity,
StyleSheet,
TextInput,
View,
Image,
Dimensions,
} from "react-native";
import { Input, Button, Text, useTheme } from "react-native-elements";
import { BButton } from "../../components/index";
import { AntDesign, Ionicons, MaterialIcons } from "@expo/vector-icons";
import Icon from "react-native-vector-icons/Feather";
import MapComponent from "../../components/MapComponent";
import { onDeliveryUpdate, unsubscribeDeliveryJob } from "../../firebase";
import { COLORS } from "../../constants";
let { width, height } = Dimensions.get("window"); //Screen dimensions
const ASPECT_RATIO = width / height;
const LATITUDE_DELTA = 0.04; // Controls the zoom level of the map. Smaller means more zoomed in
const LONGITUDE_DELTA = LATITUDE_DELTA * ASPECT_RATIO; // Dependent on LATITUDE_DELTA
const DelivererToPickup = ({ navigation, route }) => {
const { senderItem, receiverItem, initPackageItem, delivererItem } =
route.params;
const [deliveryNotes, onAddDeliveryNotes] = useState("");
const [packageItem, setPackageItem] = useState(initPackageItem);
const [unsubscribe, setUnsubscribe] = useState(() => () => {});
const pickup_address = packageItem.data.source_address;
const [sourceLat, sourceLong] = [
packageItem.data.deliverer_location.latitude,
packageItem.data.deliverer_location.longitude,
];
const [destinationLat, destinationLong] = [
pickup_address.address_coord.latitude,
pickup_address.address_coord.longitude,
];
const hasLocationData = destinationLat && sourceLat;
const mapProps = hasLocationData
? {
source: { sourceLat: sourceLat, sourceLong: sourceLong },
dest: { destLat: destinationLat, destLong: destinationLong },
LATITUDE_DELTA: LATITUDE_DELTA,
LONGITUDE_DELTA: LONGITUDE_DELTA,
style: styles.map,
}
: null;
useEffect(async () => {
const unsubscribe = unsubscribeDeliveryJob(
initPackageItem.id,
setPackageItem
);
setUnsubscribe(() => unsubscribe);
}, [])
useEffect(() => {
if (packageItem.data.status >= 3) {
unsubscribe();
navigation.navigate("DelivererToDropoff", {
senderItem: senderItem,
receiverItem: receiverItem,
initPackageItem: packageItem,
delivererItem: delivererItem,
});
}
}, [packageItem]);
return (
<View style={styles.container}>
<View style={styles.topleftbutton}>
<TouchableOpacity onPress={navigation.goBack}>
<Ionicons name="arrow-back" size={24} color="black" />
</TouchableOpacity>
</View>
<View style={styles.headingContainer}>
<Text style={styles.lineone}>ETA - 2:55</Text>
</View>
{mapProps ? (
<MapComponent mapProps={{ ...mapProps, style: styles.map }} />
) : (
<Text style={styles.paragraph}>Loading Map...</Text>
)}
<View style={styles.bottomContainer}>
<Text style={styles.linetwo}>
{`${
delivererItem.data.full_name.split(" ")[0]
} is on the way to pickup the delivery!`}
</Text>
<TextInput
style={styles.linetwo}
placeholder="Any delivery notes?"
value={deliveryNotes}
onChangeText={onAddDeliveryNotes}
></TextInput>
<Icon.Button
name="phone"
backgroundColor={COLORS.white}
onPress={() => navigation.navigate("DelivererToDropoff")}
></Icon.Button>
</View>
</View>
);
};
export default DelivererToPickup;
const styles = StyleSheet.create({
container: {
flex: 1,
justifyContent: "center",
alignItems: "center",
},
lineone: {
fontSize: 30,
fontWeight: "bold",
textAlign: "center",
},
linetwo: {
paddingBottom: 25,
fontSize: 20,
fontWeight: "bold",
textAlign: "center",
},
linethree: {
paddingBottom: 25,
fontSize: 20,
fontWeight: "bold",
textAlign: "center",
},
button: {
width: 200,
marginHorizontal: 100,
marginVertical: 20,
},
buttonView: {
position: "absolute",
top: (5 / 6) * height,
},
buttonContainer: {},
buttonOutline: {},
buttonOutlineText: {},
buttonText: {
color: "blue",
},
input: {
padding: 15,
borderBottomWidth: 1,
borderColor: "rgba(0, 0, 0, .2)",
},
inputContainer: {},
headingContainer: {
position: "absolute",
top: 50,
},
bottomContainer: {
position: "absolute",
bottom: 25,
},
font: {
textAlign: "center",
maxWidth: 200,
fontWeight: "bold",
},
topleftbutton: {
position: "absolute",
left: 25,
top: 50,
},
toprightbutton: {
position: "absolute",
right: 25,
top: 50,
},
map: {
...StyleSheet.absoluteFillObject,
top: (1 / 7) * height,
bottom: (1 / 3) * height,
},
});
|
# -*- coding: utf-8 -*-
#
# Copyright (C) tkornuta, IBM Corporation 2019
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "Tomasz Kornuta"
import torch
import math
import numpy as np
from ptp.components.component import Component
from ptp.data_types.data_definition import DataDefinition
class AccuracyStatistics(Component):
"""
Class collecting statistics: batch size.
"""
def __init__(self, name, config):
"""
Initializes object.
:param name: Loss name.
:type name: str
:param config: Dictionary of parameters (read from the configuration ``.yaml`` file).
:type config: :py:class:`ptp.configuration.ConfigInterface`
"""
# Call constructors of parent classes.
Component.__init__(self, name, AccuracyStatistics, config)
# Set key mappings.
self.key_targets = self.stream_keys["targets"]
self.key_predictions = self.stream_keys["predictions"]
self.key_accuracy = self.statistics_keys["accuracy"]
def input_data_definitions(self):
"""
Function returns a dictionary with definitions of input data that are required by the component.
:return: dictionary containing input data definitions (each of type :py:class:`ptp.utils.DataDefinition`).
"""
return {
self.key_targets: DataDefinition([-1], [torch.Tensor], "Batch of targets, each being a single index [BATCH_SIZE]"),
self.key_predictions: DataDefinition([-1, -1], [torch.Tensor], "Batch of predictions, represented as tensor with probability distribution over classes [BATCH_SIZE x NUM_CLASSES]")
}
def output_data_definitions(self):
"""
Function returns a empty dictionary with definitions of output data produced the component.
:return: Empty dictionary.
"""
return {}
def __call__(self, data_dict):
"""
Call method - empty for all statistics.
"""
pass
def calculate_accuracy(self, data_dict):
"""
Calculates accuracy equal to mean number of correct classification in a given batch.
:param data_dict: DataDict containing the targets.
:type data_dict: DataDict
:return: Accuracy.
"""
# Get indices of the max log-probability.
#pred = data_dict[self.key_predictions].max(1, keepdim=True)[1]
preds = data_dict[self.key_predictions].max(1)[1]
#print("Max: {} ".format(data_dict[self.key_predictions].max(1)[1]))
# Calculate the number of correct predictinos.
correct = preds.eq(data_dict[self.key_targets]).sum().item()
#print ("TARGETS = ",data_dict[self.key_targets])
#print ("PREDICTIONS = ",data_dict[self.key_predictions])
#print ("MAX PREDICTIONS = ", preds)
#print("CORRECTS = ", correct)
#print(" Target: {}\n Prediction: {}\n Correct: {} ".format(data_dict[self.key_targets], preds, preds.eq(data_dict[self.key_targets])))
# Normalize.
batch_size = data_dict[self.key_predictions].shape[0]
accuracy = correct / batch_size
#print("ACCURACY = ", accuracy)
return accuracy
def add_statistics(self, stat_col):
"""
Adds 'accuracy' statistics to ``StatisticsCollector``.
:param stat_col: ``StatisticsCollector``.
"""
stat_col.add_statistics(self.key_accuracy, '{:6.4f}')
def collect_statistics(self, stat_col, data_dict):
"""
Collects statistics (batch_size) for given episode.
:param stat_col: ``StatisticsCollector``.
"""
stat_col[self.key_accuracy] = self.calculate_accuracy(data_dict)
def add_aggregators(self, stat_agg):
"""
Adds aggregator summing samples from all collected batches.
:param stat_agg: ``StatisticsAggregator``.
"""
stat_agg.add_aggregator(self.key_accuracy, '{:7.5f}') # represents the average accuracy
stat_agg.add_aggregator(self.key_accuracy+'_min', '{:7.5f}')
stat_agg.add_aggregator(self.key_accuracy+'_max', '{:7.5f}')
stat_agg.add_aggregator(self.key_accuracy+'_std', '{:7.5f}')
def aggregate_statistics(self, stat_col, stat_agg):
"""
Aggregates samples from all collected batches.
:param stat_col: ``StatisticsCollector``
:param stat_agg: ``StatisticsAggregator``
"""
accuracies = stat_col[self.key_accuracy]
# Check if batch size was collected.
if "batch_size" in stat_col.keys():
batch_sizes = stat_col['batch_size']
# Calculate weighted precision.
accuracies_avg = np.average(accuracies, weights=batch_sizes)
accuracies_var = np.average((accuracies-accuracies_avg)**2, weights=batch_sizes)
stat_agg[self.key_accuracy] = accuracies_avg
stat_agg[self.key_accuracy+'_min'] = np.min(accuracies)
stat_agg[self.key_accuracy+'_max'] = np.max(accuracies)
stat_agg[self.key_accuracy+'_std'] = math.sqrt(accuracies_var)
else:
# Else: use simple mean.
stat_agg[self.key_accuracy] = np.mean(accuracies)
stat_agg[self.key_accuracy+'_min'] = np.min(accuracies)
stat_agg[self.key_accuracy+'_max'] = np.max(accuracies)
stat_agg[self.key_accuracy+'_std'] = np.std(accuracies)
# But inform user about that!
self.logger.warning("Aggregated statistics might contain errors due to the lack of information about sizes of aggregated batches")
|
"use strict"
exports.__esModule = true;
exports.AwardIconConfig = {
name: 'AwardIcon',
height: 512,
width: 384,
svgPath: 'M97.12 362.63c-8.69-8.69-4.16-6.24-25.12-11.85-9.51-2.55-17.87-7.45-25.43-13.32L1.2 448.7c-4.39 10.77 3.81 22.47 15.43 22.03l52.69-2.01L105.56 507c8 8.44 22.04 5.81 26.43-4.96l52.05-127.62c-10.84 6.04-22.87 9.58-35.31 9.58-19.5 0-37.82-7.59-51.61-21.37zM382.8 448.7l-45.37-111.24c-7.56 5.88-15.92 10.77-25.43 13.32-21.07 5.64-16.45 3.18-25.12 11.85-13.79 13.78-32.12 21.37-51.62 21.37-12.44 0-24.47-3.55-35.31-9.58L252 502.04c4.39 10.77 18.44 13.4 26.43 4.96l36.25-38.28 52.69 2.01c11.62.44 19.82-11.27 15.43-22.03zM263 340c15.28-15.55 17.03-14.21 38.79-20.14 13.89-3.79 24.75-14.84 28.47-28.98 7.48-28.4 5.54-24.97 25.95-45.75 10.17-10.35 14.14-25.44 10.42-39.58-7.47-28.38-7.48-24.42 0-52.83 3.72-14.14-.25-29.23-10.42-39.58-20.41-20.78-18.47-17.36-25.95-45.75-3.72-14.14-14.58-25.19-28.47-28.98-27.88-7.61-24.52-5.62-44.95-26.41-10.17-10.35-25-14.4-38.89-10.61-27.87 7.6-23.98 7.61-51.9 0-13.89-3.79-28.72.25-38.89 10.61-20.41 20.78-17.05 18.8-44.94 26.41-13.89 3.79-24.75 14.84-28.47 28.98-7.47 28.39-5.54 24.97-25.95 45.75-10.17 10.35-14.15 25.44-10.42 39.58 7.47 28.36 7.48 24.4 0 52.82-3.72 14.14.25 29.23 10.42 39.59 20.41 20.78 18.47 17.35 25.95 45.75 3.72 14.14 14.58 25.19 28.47 28.98C104.6 325.96 106.27 325 121 340c13.23 13.47 33.84 15.88 49.74 5.82a39.676 39.676 0 0 1 42.53 0c15.89 10.06 36.5 7.65 49.73-5.82zM97.66 175.96c0-53.03 42.24-96.02 94.34-96.02s94.34 42.99 94.34 96.02-42.24 96.02-94.34 96.02-94.34-42.99-94.34-96.02z',
yOffset: 0,
xOffset: 0,
transform: ''
};
exports.AwardIcon = require('../createIcon').createIcon(exports.AwardIconConfig);
exports["default"] = exports.AwardIcon;
|
// EVENTS
$('#message').on('keyup', function(event) {
if(event.charCode == 13) {
submitMessage();
}
});
$('#send').on('click', submitMessage);
// FUNCTIONS
function submitMessage() {
var $input = $('#message');
var message = $input.val();
if(!message == "") {
socket.emit('chat message', message); // emits a chat message event to the server
}
$input.val('');
return false;
};
function addMessage(msg) {
var message = $('<li>').text(msg);
$('#messages').append(message);
$('#chat').scrollTop($('#chat')[0].scrollHeight);
};
// SOCKET EVENTS
socket.on('add chat message', addMessage);
|
"use strict";
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
const react_1 = __importDefault(require("react"));
const ReactComponent = props => (react_1.default.createElement("svg", Object.assign({ viewBox: "0 0 24 24", width: "1em", height: "1em" }, props),
react_1.default.createElement("path", { d: "M7 24h2v-2H7v2zm4 0h2v-2h-2v2zm4 0h2v-2h-2v2zM16 .01L8 0C6.9 0 6 .9 6 2v16c0 1.1.9 2 2 2h8c1.1 0 2-.9 2-2V2c0-1.1-.9-1.99-2-1.99zM16 16H8V4h8v12z" }),
react_1.default.createElement("path", { d: "M0 0h24v24H0z", fill: "none" })));
exports.default = ReactComponent;
|
import argparse
import datasets
from transformers import BertTokenizer
from bs4 import BeautifulSoup
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, required=True)
parser.add_argument('--tokenizer', type=str, required=True)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
dataset = datasets.load_from_disk(args.dataset)
tokenizer = BertTokenizer.from_pretrained(args.tokenizer)
print('===================================================')
print(dataset)
print('train INFO')
print(dataset['train'].info)
print('test INFO')
print(dataset['test'].info)
print(f"len = {len(dataset['train'])}")
print('===================================================')
for part in ['train', 'test']:
print('PART', part)
for i, x in enumerate(dataset[part]):
# print(x['labels'], sum(1 for c in x['input_ids'] if int(c) > int(0)))
# print(x['excess_entropy'], x['text'])
# print(f'{len(x["text"])}, {x["tse"]}')
print(x)
if i >= 50:
break
|
/* Work queue helper functions
*
* Copyright (c) 2018 Joachim Nilsson <troglobit@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "config.h"
#include "finit.h"
#include "schedule.h"
#define SC_INIT 0x494E4954 /* "INIT", see ascii(7) */
/*
* libuEv callback wrapper
*/
static void cb(uev_t *w, void *arg, int events)
{
struct wq *work = (struct wq *)arg;
if (UEV_ERROR == events) {
uev_timer_start(w);
return;
}
work->cb(work);
}
/*
* Place work on event queue
*/
int schedule_work(struct wq *work)
{
int msec;
if (!work)
return errno = EINVAL;
msec = work->delay;
if (work->init != SC_INIT) {
work->init = SC_INIT;
return uev_timer_init(ctx, &work->watcher, cb, work, msec, 0);
}
return uev_timer_set(&work->watcher, msec, 0);
}
/**
* Local Variables:
* indent-tabs-mode: t
* c-file-style: "linux"
* End:
*/
|
//>>built
define(["dojo","dijit","dojox"],function(f,z,t){f.provide("dojox.lang.docs");(function(){function u(d){console.log("Warning, the API docs must be available at ../util/docscripts/api.json or ../util/docscripts/api/*.json in order for dojox.lang.docs to supply schema information, but it could not be loaded: "+d)}var g={},h=[],n=t.lang.docs._loadedDocs={},l=function(d,a){g[a]=d},q=function(d){var a=d.type||"",c,e=!1,b=!1,m,a=a.replace(/\?/,function(){e=!0;return""}),a=a.replace(/\[\]/,function(){b=!0;
return""});a.match(/HTML/)?a="string":"String"==a||"Number"==a||"Boolean"==a||"Object"==a||"Array"==a||"Integer"==a||"Function"==a?a=a.toLowerCase():"bool"==a?a="boolean":a?(c=f.getObject(a)||{},m=!0):c={};c=c||{type:a};b&&(c={items:c,type:"array"},m=!1);m||(e&&(c.optional=!0),/const/.test(d.tags)&&(c.readonly=!0));return c},v=function(d,a){var c=n[a];if(c){d.description=c.description;d.properties={};d.methods={};if(c.properties)for(var e=c.properties,b=0,m=e.length;b<m;b++)"prototype"==e[b].scope&&
((d.properties[e[b].name]=q(e[b])).description=e[b].summary);if(c.methods)for(e=c.methods,b=0,m=e.length;b<m;b++)if((a=e[b].name)&&"prototype"==e[b].scope){a=d.methods[a]={};a.description=e[b].summary;var k=e[b].parameters;if(k){a.parameters=[];for(var p=0,g=k.length;p<g;p++){var h=k[p],l=a.parameters[p]=q(h);l.name=h.name;l.optional="optional"==h.usage}}(k=e[b]["return-types"])&&k[0]&&(k=q(k[0]),k.type&&(a.returns=k))}(c=c.superclass)&&(d["extends"]=f.getObject(c))}},r=function(d){h.push(d)},w=f.declare;
f.declare=function(d){var a=w.apply(this,arguments);l(a,d);return a};f.mixin(f.declare,w);var x,y=f.require;f.require=function(d){r(d);return y.apply(this,arguments)};t.lang.docs.init=function(d){function a(){f.require=y;h=null;try{f.xhrGet({sync:!d,url:f.baseUrl+"../util/docscripts/api.json",handleAs:"text"}).addCallbacks(function(a){n=(new Function("return "+a))();a=null;l=v;for(var b in g)l(g[b],b);g=null},u)}catch(b){u(b)}}if(x)return null;x=!0;var c=function(a,c){return f.xhrGet({sync:c||!d,
url:f.baseUrl+"../util/docscripts/api/"+a+".json",handleAs:"text"}).addCallback(function(a){a=(new Function("return "+a))();for(var b in a)n[b]||(n[b]=a[b])})};try{var e=h.shift();c(e,!0).addCallbacks(function(){r=function(a){if(!n[a])try{c(a)}catch(m){n[a]={}}};f.forEach(h,function(a){r(a)});h=null;l=v;for(i in g)l(g[i],i);g=null},a)}catch(b){a()}return null}})()});
|
from flask import Flask, request
from flask.templating import render_template
from flask.helpers import make_response
from sympy import symbols,integrate
from sympy.parsing.sympy_parser import (parse_expr)
from flaskwebgui import FlaskUI
app = Flask(__name__)
ui = FlaskUI(app)
@app.route('/', methods=['POST', 'GET'])
def home(name=None):
return render_template('index.html', name=name)
@app.route('/download')
def calculate():
f = parse_expr(request.args['f'])
y0=int(request.args['y'])
n=int(request.args['n'])
x, y= symbols('x y')
s=[y0]
for i in range(1,n):
s.append(y0+integrate(f.subs(y,s[i-1]), x))
ans=""
for i in range(0,n):
ans+=f"n={i+1} : y({i}) = {s[i]}\n"
response = make_response(ans)
response.headers.set('Content-Type', 'text/plain')
response.headers.set(
'Content-Disposition', 'attachment', filename='answer.txt')
return response
if __name__ == "__main__":
# app.secret_key = "super secret key"
# app.run("127.0.0.1",5002)
ui.run()
|
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.SUN/Sans_8/udhr_Latn.SUN_Sans_8.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from torch import nn
from torch.autograd.function import Function
import logging
import pickle
import json
from detectron2.layers import ShapeSpec, SELayer
from detectron2.structures import Boxes, Instances, pairwise_iou
from detectron2.utils.events import get_event_storage
from detectron2.data.datasets.lvis_categories_mapper import *
from ..box_regression import Box2BoxTransform
from ..matcher import Matcher
from ..poolers import ROIPooler
from .box_head import build_box_head
from .fast_rcnn import FastRCNNOutputLayers_class, FastRCNNOutputLayers_box, FastRCNNOutputs, fast_rcnn_inference
from .roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads
###########
# Original Cascade RCNN supported
# please trun on the original switch
# and change the
class _ScaleGradient(Function):
@staticmethod
def forward(ctx, input, scale):
ctx.scale = scale
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output * ctx.scale, None
logger = logging.getLogger(__name__)
@ROI_HEADS_REGISTRY.register()
class CascadeFREQROIHeads(StandardROIHeads):
def _init_box_head(self, cfg):
# fmt: off
self.index_list = cate_id_list()
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_scales = tuple(1.0 / self.feature_strides[k] for k in self.in_features)
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
cascade_bbox_reg_weights = cfg.MODEL.ATTENTION_ROI_HEAD.BBOX_REG_WEIGHTS
cascade_ious = cfg.MODEL.ATTENTION_ROI_HEAD.IOUS
self.num_cascade_stages = len(cascade_ious)
self.num_f_classes = cfg.DATASETS.NUM_CLASSES_F
self.num_c_classes = cfg.DATASETS.NUM_CLASSES_C
self.num_r_classes = cfg.DATASETS.NUM_CLASSES_R
self.test_keep_anns = cfg.TEST.KEEP_ANNS
self.test_cal_acc = cfg.TEST.CAL_ACC
if self.test_cal_acc:
self.acc_data_path = './LVIS_acc_data_x_teacher.json'
assert self.test_keep_anns, "[Test] Calculate acc on. Please ture on TEST_KEEP_ANNS."
self.acc_data = {'f':0,'f_gt':0,'c':0,'c_gt':0,'r':0,'r_gt':0,'c_fgt':0,'f_cgt':0,
'f_rgt':0,'c_rgt':0,'r_fgt':0,'r_cgt':0,'pred_f':0,'pred_c':0,'pred_r':0}
with open(self.acc_data_path, 'w') as outfile:
json.dump(self.acc_data, outfile)
############# my switch
self.shared_weight = cfg.MODEL.ATTENTION_ROI_HEAD.SHARED_WEIGHT
self.weighted_CE = cfg.MODEL.ATTENTION_ROI_HEAD.WEIGHTED_CE
self.learn_weighted_CE = cfg.MODEL.ATTENTION_ROI_HEAD.LEARN_WEIGHTED_CE
self.KLCE = cfg.MODEL.ATTENTION_ROI_HEAD.KLCE
self.has_selayer = cfg.MODEL.ATTENTION_ROI_HEAD.SELAYER
self.stage_phase = 0
self.multi_head = False
############# attention part
self.self_attentiion = cfg.MODEL.ATTENTION_ROI_HEAD.ATTENTION
self.enhance_size = cfg.MODEL.ATTENTION_ROI_HEAD.CHANNEL_OF_ENHANCED_FEATURE
self.relu = nn.ReLU(inplace=True)
############# weighted ce part
self.weight = None
if self.learn_weighted_CE:
self.comm_weight = torch.nn.Parameter(torch.FloatTensor(1), requires_grad=True)
self.rare_weight = torch.nn.Parameter(torch.FloatTensor(1), requires_grad=True)
self.comm_weight.data.fill_(2.)
self.rare_weight.data.fill_(3.)
self.weight = [self.comm_weight, self.rare_weight]
assert len(cascade_bbox_reg_weights) == self.num_cascade_stages
# assert self.num_cascade_stages == 3, "CascadeFREQROIHeads only support 3 stages now!"
assert cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG, \
"CascadeFREQROIHeads only support class-agnostic regression now!"
assert cascade_ious[0] == cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS[0]
# fmt: on
in_channels = [self.feature_channels[f] for f in self.in_features]
# Check all channel counts are equal
assert len(set(in_channels)) == 1, in_channels
in_channels = in_channels[0]
self.box_pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
pooled_shape = ShapeSpec(
channels=in_channels, width=pooler_resolution, height=pooler_resolution
)
if self.has_selayer:
self.se_layer = nn.ModuleList()
for k in range(self.num_cascade_stages):
self.se_layer.append(SELayer(channel=in_channels).cuda())
self.box_head = nn.ModuleList()
self.box_predictor = nn.ModuleList()
self.class_predictor = nn.ModuleList()
self.box2box_transform = []
self.proposal_matchers = []
self.W_G = []
## self.num_classes_list
# first one: frequent classes + 1 (common or rare), second one: frequent and common classes + 1 (rare)
# third one: frequent and common and rare classes.
self.num_classes_list = [self.num_f_classes, self.num_f_classes + self.num_c_classes, self.num_f_classes + self.num_c_classes + self.num_r_classes]
self.num_classes_list_with_bg = []
# if self.multi_head: self.num_cascade_stages += 1
for k in range(self.num_cascade_stages):
## Only full classes here
num_classes = self.num_classes_list[2]
# num_classes = self.num_classes_list[k]
# if k != self.num_cascade_stages - 1:
# num_classes += 1
self.num_classes_list_with_bg.append(num_classes)
box_head = build_box_head(cfg, pooled_shape)
self.box_head.append(box_head)
if self.self_attentiion and k != 0:
self.box_predictor.append(
FastRCNNOutputLayers_box(
box_head.output_size + self.enhance_size, num_classes, cls_agnostic_bbox_reg=True
)
)
self.class_predictor.append(
FastRCNNOutputLayers_class(
box_head.output_size + self.enhance_size, num_classes, cls_agnostic_bbox_reg=True
)
)
self.W_G.append(
nn.Linear(self.class_predictor[k-1].cls_score.weight.shape[1] + 1, self.enhance_size).cuda()
)
else:
self.box_predictor.append(
FastRCNNOutputLayers_box(
box_head.output_size, num_classes, cls_agnostic_bbox_reg=True
)
)
self.class_predictor.append(
FastRCNNOutputLayers_class(
box_head.output_size, num_classes, cls_agnostic_bbox_reg=True
)
)
self.box2box_transform.append(Box2BoxTransform(weights=cascade_bbox_reg_weights[k]))
if k == 0:
# The first matching is done by the matcher of ROIHeads (self.proposal_matcher).
self.proposal_matchers.append(None)
else:
self.proposal_matchers.append(
Matcher([cascade_ious[k]], [0, 1], allow_low_quality_matches=False)
)
self.mute_loss_stage = []
if self.stage_phase == 0:
logger.info("Parameters in self.class_predictor[1] are fixed!!")
for param in self.class_predictor[1].parameters():
param.requires_grad = False
logger.info("Parameters in self.box_predictor[1] are fixed!!")
for param in self.box_predictor[1].parameters():
param.requires_grad = False
##### stage 3
# logger.info("Parameters in self.class_predictor[2] are fixed!!")
# for param in self.class_predictor[2].parameters():
# param.requires_grad = False
# logger.info("Parameters in self.box_predictor[2] are fixed!!")
# for param in self.box_predictor[2].parameters():
# param.requires_grad = False
if self.multi_head:
logger.info("Parameters in self.box_head[3] are fixed!!")
for param in self.box_head[3].parameters():
param.requires_grad = False
logger.info("Parameters in self.box_predictor[3] are fixed!!")
for param in self.box_predictor[3].parameters():
param.requires_grad = False
logger.info("Parameters in self.class_predictor[3] are fixed!!")
for param in self.class_predictor[3].parameters():
param.requires_grad = False
if self.KLCE and self.stage_phase == 1:
self.mute_loss_stage = [0, 3]
logger.info("Parameters in self.box_head[0] are fixed!!")
for param in self.box_head[0].parameters():
param.requires_grad = False
logger.info("Parameters in self.box_predictor[0] are fixed!!")
for param in self.box_predictor[0].parameters():
param.requires_grad = False
logger.info("Parameters in self.class_predictor[0] are fixed!!")
for param in self.class_predictor[0].parameters():
param.requires_grad = False
logger.info("Parameters in self.class_predictor[2] are fixed!!")
for param in self.class_predictor[2].parameters():
param.requires_grad = False
if self.multi_head:
logger.info("Parameters in self.box_head[3] are fixed!!")
for param in self.box_head[3].parameters():
param.requires_grad = False
logger.info("Parameters in self.box_predictor[3] are fixed!!")
for param in self.box_predictor[3].parameters():
param.requires_grad = False
logger.info("Parameters in self.class_predictor[3] are fixed!!")
for param in self.class_predictor[3].parameters():
param.requires_grad = False
if self.has_selayer:
for param in self.se_layer[0].parameters():
param.requires_grad = False
if self.KLCE and self.stage_phase == 2:
self.mute_loss_stage = [0, 1]
logger.info("Parameters in self.box_head[0] are fixed!!")
for param in self.box_head[0].parameters():
param.requires_grad = False
logger.info("Parameters in self.box_predictor[0] are fixed!!")
for param in self.box_predictor[0].parameters():
param.requires_grad = False
logger.info("Parameters in self.class_predictor[0] are fixed!!")
for param in self.class_predictor[0].parameters():
param.requires_grad = False
logger.info("Parameters in self.box_head[1] are fixed!!")
for param in self.box_head[1].parameters():
param.requires_grad = False
logger.info("Parameters in self.box_predictor[1] are fixed!!")
for param in self.box_predictor[1].parameters():
param.requires_grad = False
logger.info("Parameters in self.class_predictor[1] are fixed!!")
for param in self.class_predictor[1].parameters():
param.requires_grad = False
if self.has_selayer:
for param in self.se_layer[0].parameters():
param.requires_grad = False
for param in self.se_layer[1].parameters():
param.requires_grad = False
if self.KLCE and self.stage_phase == 3:
self.mute_loss_stage = [0, 1, 2]
logger.info("Parameters in self.box_head[0] are fixed!!")
for param in self.box_head[0].parameters():
param.requires_grad = False
logger.info("Parameters in self.box_predictor[0] are fixed!!")
for param in self.box_predictor[0].parameters():
param.requires_grad = False
logger.info("Parameters in self.class_predictor[0] are fixed!!")
for param in self.class_predictor[0].parameters():
param.requires_grad = False
logger.info("Parameters in self.box_head[1] are fixed!!")
for param in self.box_head[1].parameters():
param.requires_grad = False
logger.info("Parameters in self.box_predictor[1] are fixed!!")
for param in self.box_predictor[1].parameters():
param.requires_grad = False
logger.info("Parameters in self.class_predictor[1] are fixed!!")
for param in self.class_predictor[1].parameters():
param.requires_grad = False
logger.info("Parameters in self.box_head[2] are fixed!!")
for param in self.box_head[2].parameters():
param.requires_grad = False
logger.info("Parameters in self.box_predictor[2] are fixed!!")
for param in self.box_predictor[2].parameters():
param.requires_grad = False
logger.info("Parameters in self.class_predictor[2] are fixed!!")
for param in self.class_predictor[2].parameters():
param.requires_grad = False
## mask in roi_head.py
if self.has_selayer:
for param in self.se_layer[0].parameters():
param.requires_grad = False
for param in self.se_layer[1].parameters():
param.requires_grad = False
def forward(self, images, features, proposals, targets=None):
del images
# reIndex the targets
if self.training or self.test_keep_anns:
# print("targets:",targets)
targets_list = self.target_gt_classes_transform(targets, self.num_classes_list, self.num_classes_list_with_bg)
assert len(targets_list[0]) == len(targets_list[1]) == len(targets_list[2])
if self.learn_weighted_CE:
logger.info("Learning weight CE comm, rare = {}, {}".format(self.comm_weight,self.rare_weight))
if self.training or self.test_keep_anns:
proposals_mask = self.label_and_sample_proposals(proposals, targets_list[2], bg_classes = self.num_classes_list_with_bg[2])
proposals = self.label_and_sample_proposals(proposals, targets_list[0], bg_classes = self.num_classes_list_with_bg[0])
### analyze the proposal boxes
# p_bboxes = {'gt':{'f':0,'c':0,'r':0}, 'prop':{'f':0,'c':0,'r':0}}
# with open('./chris_output/LVIS_proposal_analysis.pkl', 'rb') as f:
# p_bboxes = f.read()
# p_bboxes = pickle.loads(p_bboxes, encoding='latin1')
# f.close()
# for i in range(len(proposals)):
# p_bboxes['gt']['f'] += (targets_list[0][i].get('gt_frequencys') == 0).sum().item()
# p_bboxes['gt']['c'] += (targets_list[0][i].get('gt_frequencys') == 1).sum().item()
# p_bboxes['gt']['r'] += (targets_list[0][i].get('gt_frequencys') == 2).sum().item()
# foreground_mask = proposals[0].get('gt_classes') != 1230
# p_bboxes['prop']['f'] += (proposals[i].get('gt_frequencys')[foreground_mask] == 0).sum().item()
# p_bboxes['prop']['c'] += (proposals[i].get('gt_frequencys')[foreground_mask] == 1).sum().item()
# p_bboxes['prop']['r'] += (proposals[i].get('gt_frequencys')[foreground_mask] == 2).sum().item()
# with open('./chris_output/LVIS_proposal_analysis.pkl', 'wb') as f:
# pickle.dump(p_bboxes, f)
# f.close()
features_list = [features[f] for f in self.in_features]
###################
# Start forward
if self.training:
losses = self._forward_box(features_list, proposals, targets_list)
# if self.stage_phase != 3:
losses.update(self._forward_mask(features_list, proposals_mask))
losses.update(self._forward_keypoint(features_list, proposals))
return proposals, losses
elif not self.training and self.test_keep_anns:
pred_instances = self._forward_box(features_list, proposals, targets_list)
pred_instances = self.forward_with_given_boxes(features, pred_instances)
pred_instances = self.reindex_result(pred_instances)
return pred_instances, {}
else:
pred_instances = self._forward_box(features_list, proposals)
pred_instances = self.forward_with_given_boxes(features, pred_instances)
pred_instances = self.reindex_result(pred_instances)
return pred_instances, {}
def _forward_box(self, features, proposals, targets_list=None, enhanced_feature=[]):
# targets_list = [gt_instances_f, gt_instances_fc, gt_instances_fcr]
head_outputs = []
if self.test_keep_anns:
head_outputs_classes = []
head_outputs_classes_freq = []
image_sizes = [x.image_size for x in proposals]
for k in range(self.num_cascade_stages):
# print('len(proposals):',len(proposals)) # 2
# print('proposals:',proposals[0]) # 512
if k > 0 :
if not self.multi_head:
# The output boxes of the previous stage are the input proposals of the next stage
pred_boxes = head_outputs[-1].predict_boxes()
proposals = self._create_proposals_from_boxes(
pred_boxes, image_sizes
)
if self.training or self.test_keep_anns:
proposals = self._match_and_label_boxes(proposals, k, targets_list[k])
else:
if k < self.num_cascade_stages - 1:
# The output boxes of the previous stage are the input proposals of the next stage
pred_boxes = head_outputs[-1].predict_boxes()
proposals = self._create_proposals_from_boxes(
pred_boxes, image_sizes
)
if self.training or self.test_keep_anns:
proposals = self._match_and_label_boxes(proposals, k, targets_list[k])
else:
pred_boxes = head_outputs[0].predict_boxes()
proposals = self._create_proposals_from_boxes(
pred_boxes, image_sizes
)
if self.training or self.test_keep_anns:
proposals = self._match_and_label_boxes(proposals, 1, targets_list[1])
outputs, enhanced_feature = self._run_stage(features, proposals, enhanced_feature, k)
head_outputs.append(outputs)
if self.test_keep_anns:
assert len(proposals) == 1, "keep anns inference only support 1 image per GPU."
head_outputs_classes.append(proposals[0].get_fields()['gt_classes'])
head_outputs_classes_freq.append(proposals[0].get_fields()['gt_frequencys'])
if self.training:
losses = {}
storage = get_event_storage()
for stage, output in enumerate(head_outputs):
if stage not in self.mute_loss_stage:
with storage.name_scope("stage{}".format(stage)):
stage_losses = output.losses()
losses.update({k + "_stage{}".format(stage): v for k, v in stage_losses.items()})
return losses
else:
# Each is a list[Tensor] of length #image. Each tensor is Ri x (K+1)
scores_per_stage = [h.predict_probs() for h in head_outputs]
if self.test_keep_anns:
gt_class_per_stage = [h.gt_classes for h in head_outputs]
scores = []
for idx, scores_per_image in enumerate(zip(*scores_per_stage)):
scores_per_image = list(scores_per_image)
# print(scores_per_image[0].shape)
# print(scores_per_image[1].shape)
len_0 = self.num_classes_list[0] # 317
len_1 = self.num_classes_list[1] # 778
len_2 = self.num_classes_list[2] # 1232
# scores_per_image[0] = torch.cat((scores_per_image[0][:,0:len_0-2],scores_per_image[1][:,len_0:]),1)
# scores_per_image[0] = torch.cat((scores_per_image[0],scores_per_image[2][:,len_1:]),1)
# scores_per_image[1] = torch.cat((scores_per_image[1],scores_per_image[2][:,len_1:]),1)
# print(scores_per_image[2].shape)
# print(sum(list(scores_per_image)).shape) # 1000,1231
# tmp = torch.cat((scores_per_image[0][:,:len_0],scores_per_image[1][:,(len_0):(len_1)],scores_per_image[2][:,(len_1-2):(len_2-1)]),1)
# type 1
# weight = torch.ones(scores_per_image[0].size()[0]).cuda()/scores_per_image[0][:,:len_0].sum(1)
# feq_mask = scores_per_image[3].argmax(1) < len_0
# scores_per_image[3][feq_mask,:len_0] = (scores_per_image[0][feq_mask,:len_0] + scores_per_image[3][feq_mask,:len_0]) / 2.
# tmp = scores_per_image[3]
# type 2
# tmp = torch.zeros(scores_per_image[0].size()).cuda()
# for i in range(scores_per_image[0].shape[0]):
# if scores_per_image[0][i,:].argmax() <= len_0 and scores_per_image[0][i,:].max() >= 0.7:
# tmp[i,:] = scores_per_image[0][i,:]
# else:
# tmp[i,:] = scores_per_image[1][i,:]
# type 2;
# tmp = torch.zeros(scores_per_image[0].size()).cuda()
# for i in range(scores_per_image[0].shape[0]):
# if scores_per_image[0][i,:].argmax() <= len_0:# and scores_per_image[0][i,:].max() >= 0.4:
# weight = scores_per_image[1][i,:len_0].sum()/scores_per_image[0][i,:len_0].sum()
# freq_score = (scores_per_image[0][i,:len_0] * weight + scores_per_image[1][i,:len_0]) / 2.
# tmp[i,:] = torch.cat((freq_score, scores_per_image[1][i,len_0:]),0)
# else:
# tmp[i,:] = scores_per_image[1][i,:]
# type 3
# weight = scores_per_image[1][:,:len_0].sum(1)/scores_per_image[0][:,:len_0].sum(1)
# freq_score = ((scores_per_image[0][:,:len_0].T * weight).T * scores_per_image[1][:,:len_0]) /2.
# tmp = torch.cat((freq_score, scores_per_image[1][:,len_0:]),1)
# tmp = scores_per_image[1]
# scores.append(sum(list(scores_per_image)) * (1.0 / self.num_cascade_stages))
# if self.stage_phase == 2:
# tmp = scores_per_image[1]
# tmp = (scores_per_image[1] + scores_per_image[2]) / 2.
# keep annotations
# if self.test_keep_anns:
# tmp = torch.zeros(scores_per_image[0].size()).cuda()
# for i in range(scores_per_image[0].shape[0]):
# freq_mask = gt_class_per_stage[-1] < len_0
# other_mask = ~freq_mask
# # w0 = nn.Softmax(1)(scores_per_image[0][freq_mask])[:len_0].sum(1)
# # w1 = nn.Softmax(1)(scores_per_image[1][freq_mask])[:len_0].sum(1)
# tmp[freq_mask,:len_0] = (scores_per_image[0][freq_mask,:len_0] + scores_per_image[1][freq_mask,:len_0]) / 2.
# tmp[other_mask, len_0:len_1] = scores_per_image[1][other_mask, len_0:len_1]
##########
# alpha = 0.4
# feq_mask = (scores_per_image[3].argmax(1) < len_0) & (scores_per_image[3].max(1).values >= alpha)
# # other_mask = ~feq_mask
# comm_mask = (scores_per_image[3].argmax(1) >= len_0) & (scores_per_image[3].argmax(1) < len_1) & (scores_per_image[3].max(1).values >= alpha)
# scores_per_image[3][feq_mask,len_0:] = 0.
# scores_per_image[3][comm_mask,:len_0] = 0.
##########
k = 1
head = 0
# mask = torch.ones(scores_per_image[head].size(), dtype=torch.bool)
# topk_val, topk_idx = torch.topk(scores_per_image[head], k, dim=1)
# for i in range(k):
# mask[torch.arange(len(mask)), topk_idx[:,i]] = False
# scores_per_image[head][mask] = 0.
tmp = scores_per_image[head]
scores.append(tmp)
if self.test_cal_acc:
with open(self.acc_data_path) as json_file:
acc_dict = json.load(json_file)
{'f':0,'f_gt':0,'c':0,'c_gt':0,'r':0,'r_gt':0,'c_fgt':0,'f_cgt':0,
'f_rgt':0,'c_rgt':0,'r_fgt':0,'r_cgt':0,'pred_f':0,'pred_c':0,'pred_r':0}
acc_dict['f']+=int(((gt_class_per_stage[-1] == tmp.argmax(1)) & (gt_class_per_stage[-1] < len_0)).sum())
acc_dict['f_gt']+=int((gt_class_per_stage[-1] < len_0).sum())
acc_dict['c']+=int(((gt_class_per_stage[-1] == tmp.argmax(1)) & (gt_class_per_stage[-1] < len_1) & (gt_class_per_stage[-1] >= len_0)).sum())
acc_dict['c_gt']+=int(((gt_class_per_stage[-1] < len_1) & (gt_class_per_stage[-1] >= len_0)).sum())
acc_dict['r']+=int(((gt_class_per_stage[-1] == tmp.argmax(1)) & (gt_class_per_stage[-1] < len_2) & (gt_class_per_stage[-1] >= len_1)).sum())
acc_dict['r_gt']+=int(((gt_class_per_stage[-1] < len_2) & (gt_class_per_stage[-1] >= len_1)).sum())
acc_dict['f_cgt']+=int(((gt_class_per_stage[-1] < len_1) & (gt_class_per_stage[-1] >= len_0) & (tmp.argmax(1) < len_0)).sum())
acc_dict['c_fgt']+=int(((gt_class_per_stage[-1] < len_0) & (tmp.argmax(1) >= len_0) & (tmp.argmax(1) < len_1)).sum())
acc_dict['f_rgt']+=int(((gt_class_per_stage[-1] < len_2) & (gt_class_per_stage[-1] >= len_1) & (tmp.argmax(1) < len_0)).sum())
acc_dict['c_rgt']+=int(((gt_class_per_stage[-1] < len_2) & (gt_class_per_stage[-1] >= len_1) & (tmp.argmax(1) >= len_0) & (tmp.argmax(1) < len_1)).sum())
acc_dict['r_fgt']+=int(((gt_class_per_stage[-1] < len_0) & (tmp.argmax(1) >= len_1) & (tmp.argmax(1) < len_2)).sum())
acc_dict['r_cgt']+=int(((gt_class_per_stage[-1] < len_1) & (gt_class_per_stage[-1] >= len_0) & (tmp.argmax(1) >= len_1) & (tmp.argmax(1) < len_2)).sum())
acc_dict['pred_f']+=int((tmp.argmax(1) < len_0).sum())
acc_dict['pred_c']+=int(((tmp.argmax(1) >= len_0) & (tmp.argmax(1) < len_1)).sum())
acc_dict['pred_r']+=int(((tmp.argmax(1) >= len_1) & (tmp.argmax(1) < len_2)).sum())
with open(self.acc_data_path, 'w') as outfile:
json.dump(acc_dict, outfile)
# Average the scores across heads
# Use the boxes of the last head
boxes = head_outputs[0].predict_boxes()
pred_instances, _ = fast_rcnn_inference(
boxes,
scores,
image_sizes,
self.test_score_thresh,
self.test_nms_thresh,
self.test_detections_per_img,
)
return pred_instances
# def _forward_mask(self, features, instances):
# """
# Forward logic of the mask prediction branch.
# Args:
# features (list[Tensor]): #level input features for mask prediction
# instances (list[Instances]): the per-image instances to train/predict masks.
# In training, they can be the proposals.
# In inference, they can be the predicted boxes.
# Returns:
# In training, a dict of losses.
# In inference, update `instances` with new fields "pred_masks" and return it.
# """
# if not self.mask_on:
# return {} if self.training else instances
# if self.training:
# # The loss is only defined on positive proposals.
# proposals, _ = select_foreground_proposals(instances, self.num_classes)
# proposal_boxes = [x.proposal_boxes for x in proposals]
# mask_features = self.mask_pooler(features, proposal_boxes)
# mask_logits = self.mask_head(mask_features)
# return {"loss_mask": mask_rcnn_loss(mask_logits, proposals)}
# else:
# pred_boxes = [x.pred_boxes for x in instances]
# mask_features = self.mask_pooler(features, pred_boxes)
# mask_logits = self.mask_head(mask_features)
# mask_rcnn_inference(mask_logits, instances)
# return instances
@torch.no_grad()
def _match_and_label_boxes(self, proposals, stage, targets):
"""
Match proposals with groundtruth using the matcher at the given stage.
Label the proposals as foreground or background based on the match.
Args:
proposals (list[Instances]): One Instances for each image, with
the field "proposal_boxes".
stage (int): the current stage
targets (list[Instances]): the ground truth instances
Returns:
list[Instances]: the same proposals, but with fields "gt_classes" and "gt_boxes"
"""
num_fg_samples, num_bg_samples = [], []
for proposals_per_image, targets_per_image in zip(proposals, targets):
match_quality_matrix = pairwise_iou(
targets_per_image.gt_boxes, proposals_per_image.proposal_boxes
)
# proposal_labels are 0 or 1
matched_idxs, proposal_labels = self.proposal_matchers[stage](match_quality_matrix)
if len(targets_per_image) > 0:
gt_classes = targets_per_image.gt_classes[matched_idxs]
# Label unmatched proposals (0 label from matcher) as background (label=num_classes)
gt_classes[proposal_labels == 0] = self.num_classes_list_with_bg[stage] #self.num_classes
gt_boxes = targets_per_image.gt_boxes[matched_idxs]
gt_frequencys = targets_per_image.gt_frequencys[matched_idxs]
else:
gt_classes = torch.zeros_like(matched_idxs) + self.num_classes_list_with_bg[stage] # self.num_classes
gt_frequencys = torch.zeros_like(matched_idxs) + self.num_classes_list_with_bg[stage]
gt_boxes = Boxes(
targets_per_image.gt_boxes.tensor.new_zeros((len(proposals_per_image), 4))
)
proposals_per_image.gt_classes = gt_classes
proposals_per_image.gt_frequencys = gt_frequencys
proposals_per_image.gt_boxes = gt_boxes
num_fg_samples.append((proposal_labels == 1).sum().item())
num_bg_samples.append(proposal_labels.numel() - num_fg_samples[-1])
# Log the number of fg/bg samples in each stage
if not self.test_keep_anns:
storage = get_event_storage()
storage.put_scalar(
"stage{}/roi_head/num_fg_samples".format(stage),
sum(num_fg_samples) / len(num_fg_samples),
)
storage.put_scalar(
"stage{}/roi_head/num_bg_samples".format(stage),
sum(num_bg_samples) / len(num_bg_samples),
)
return proposals
def _run_stage(self, features, proposals, enhanced_feature, stage):
"""
Args:
features (list[Tensor]): #lvl input features to ROIHeads
proposals (list[Instances]): #image Instances, with the field "proposal_boxes"
stage (int): the current stage
Returns:
FastRCNNOutputs: the output of this stage
"""
##### stage 3
# if self.shared_weight:
# if stage == 1 and not self.KLCE:
# ## F -> C
# shared_len = self.num_classes_list[0] #self.box_predictor[stage-1].cls_score.weight.shape[0]-2
# shared_wid = self.class_predictor[stage-1].cls_score.weight.shape[1]
# self.class_predictor[stage].cls_score.weight.data[:shared_len,:shared_wid] = self.class_predictor[stage-1].cls_score.weight.data[:shared_len,:shared_wid]
# if stage == 2:
# ## C -> R
# shared_len = self.num_classes_list[1] #self.box_predictor[stage-1].cls_score.weight.shape[0]-2
# shared_wid = self.class_predictor[stage-1].cls_score.weight.shape[1]
# self.class_predictor[stage].cls_score.weight.data[:shared_len,:shared_wid] = self.class_predictor[stage-1].cls_score.weight.data[:shared_len,:shared_wid]
# ## R -> F
# shared_len = self.box_predictor[0].cls_score.weight.shape[0]-2
# shared_wid = self.box_predictor[0].cls_score.weight.shape[1]
# self.box_predictor[0].cls_score.weight.data[:shared_len,:shared_wid] = self.box_predictor[2].cls_score.weight.data[:shared_len,:shared_wid]
# ## R -> C
# shared_len_2 = self.box_predictor[1].cls_score.weight.shape[0]-2
# shared_wid_2 = self.box_predictor[1].cls_score.weight.shape[1]
# self.box_predictor[1].cls_score.weight.data[(shared_len+2):shared_len_2,:shared_wid_2] = self.box_predictor[2].cls_score.weight.data[(shared_len+2):shared_len_2,:shared_wid_2]
if self.has_selayer:
new_feature = []
for f in features:
new_feature.append(self.se_layer[stage](f))
features = new_feature
box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals])
if self.test_keep_anns or self.training:
box_gt_classes = torch.cat([x.gt_classes for x in proposals],0)
# The original implementation averages the losses among heads,
# but scale up the parameter gradients of the heads.
# This is equivalent to adding the losses among heads,
# but scale down the gradients on features.
box_features = _ScaleGradient.apply(box_features, 1.0 / 3.0)
if self.KLCE:
if self.test_keep_anns or self.training:
if stage == 1 or stage == 0 :
box_features_0 = self.box_head[0](box_features)
pred_class_logits_0 = self.class_predictor[0](box_features_0)
elif stage == 2:
box_features_0 = self.box_head[1](box_features)
pred_class_logits_0 = self.class_predictor[1](box_features_0)
else:
##### stage 3
len_0 = self.num_classes_list[0] # 317
len_1 = self.num_classes_list[1] # 778
len_2 = self.num_classes_list[2] # 1232
box_features_0 = self.box_head[0](box_features)
pred_class_logits_0 = self.class_predictor[0](box_features_0)
box_features_1 = self.box_head[1](box_features)
pred_class_logits_1 = self.class_predictor[1](box_features_1)
tmp = torch.zeros(pred_class_logits_1.size()).cuda() # (1024, 1231)
freq_mask = box_gt_classes < len_0
# print(freq_mask.shape)
other_mask = ~freq_mask
tmp[freq_mask,:len_0] = (pred_class_logits_0[freq_mask,:len_0] + pred_class_logits_1[freq_mask,:len_0]) / 2.
tmp[other_mask, len_0:len_1] = pred_class_logits_1[other_mask, len_0:len_1]
pred_class_logits_0 = tmp
del box_features_0
else:
box_features_0 = self.box_head[0](box_features)
pred_class_logits_0 = self.class_predictor[0](box_features_0)
box_features = self.box_head[stage](box_features)
if self.self_attentiion and len(enhanced_feature) != 0:
enhanced_feature = enhanced_feature[0].repeat(len(box_features),1)
assert len(enhanced_feature) == len(box_features)
box_features = torch.cat((box_features, enhanced_feature), 1)
pred_proposal_deltas = self.box_predictor[stage](box_features)
pred_class_logits = self.class_predictor[stage](box_features)
del box_features
if self.KLCE:
outputs = FastRCNNOutputs(
self.box2box_transform[stage],
pred_class_logits,
pred_proposal_deltas,
proposals,
self.smooth_l1_beta,
self.num_classes_list,
stage,
self.weighted_CE,
self.weight,
self.KLCE,
pred_class_logits_0,
)
else:
outputs = FastRCNNOutputs(
self.box2box_transform[stage],
pred_class_logits,
pred_proposal_deltas,
proposals,
self.smooth_l1_beta,
self.num_classes_list,
stage,
self.weighted_CE,
self.weight,
self.KLCE,
)
if self.self_attentiion and stage < 2:
# my defined of attention
# all_possible_classes_logits = [num_class + 1 - 2]
all_possible_classes_logits = pred_class_logits[:,:-1].max(0).values
all_possible_classes_logits = nn.Softmax(0)(all_possible_classes_logits)
# global_semantic_features = [num_class + 1 - 2, 1025]
global_semantic_features = torch.cat((self.class_predictor[stage].cls_score.weight[:-1,:],
self.class_predictor[stage].cls_score.bias[:-1].unsqueeze(1)), 1).detach()
# img_wise_semantic_pool = [1, 1025]
img_wise_semantic_pool = torch.mm(all_possible_classes_logits.unsqueeze(0),global_semantic_features)
tmp_feature = img_wise_semantic_pool.squeeze(0).repeat(len(pred_class_logits),1)
enhanced_feature = self.W_G[stage](tmp_feature.to(features[0].device))
return outputs, enhanced_feature
def _create_proposals_from_boxes(self, boxes, image_sizes):
"""
Args:
boxes (list[Tensor]): per-image predicted boxes, each of shape Ri x 4
image_sizes (list[tuple]): list of image shapes in (h, w)
Returns:
list[Instances]: per-image proposals with the given boxes.
"""
# Just like RPN, the proposals should not have gradients
boxes = [Boxes(b.detach()) for b in boxes]
proposals = []
for boxes_per_image, image_size in zip(boxes, image_sizes):
boxes_per_image.clip(image_size)
if self.training:
# do not filter empty boxes at inference time,
# because the scores from each stage need to be aligned and added later
boxes_per_image = boxes_per_image[boxes_per_image.nonempty()]
prop = Instances(image_size)
prop.proposal_boxes = boxes_per_image
proposals.append(prop)
return proposals
def target_gt_classes_transform(self, target, num_classes_list, num_classes_list_with_bg):
targets_stage0 = []
targets_stage1 = []
targets_stage2 = []
for idx_batch, targ_per_image in enumerate(target):
fields = targ_per_image.get_fields()
gt_classes = fields['gt_classes']
frequencys = fields['gt_frequencys']
freq_mask = fields['gt_frequencys'] == 0
comm_mask = fields['gt_frequencys'] == 1
rare_mask = fields['gt_frequencys'] == 2
all_mask = freq_mask + comm_mask + rare_mask
gt_classes[comm_mask] += num_classes_list[0]
gt_classes[rare_mask] += num_classes_list[1]
targets_stage2.append(targ_per_image[all_mask])
# gt_classes[rare_mask] = num_classes_list_with_bg[1] - 1
targets_stage1.append(targ_per_image[all_mask])
# gt_classes[comm_mask] = num_classes_list_with_bg[0] - 1
# gt_classes[rare_mask] = num_classes_list_with_bg[0] - 1
targets_stage0.append(targ_per_image[all_mask])
# for i, freq in enumerate(frequencys):
# if freq == 0:
# gt_classes[i] = gt_classes[i]
# if freq == 1:
# gt_classes[i] = gt_classes[i] + num_classes_list[0]
# if freq == 2:
# gt_classes[i] = gt_classes[i] + num_classes_list[1]
return targets_stage0, targets_stage1, targets_stage2
def split_input_to_freq(self, batched_inputs):
f = []; c = []; r = []
for idx, Int in enumerate(batched_inputs):
Int_split = Int.frequency_split()
f.append(Int_split[0])
c.append(Int_split[1])
r.append(Int_split[2])
return [f, c, r]
def Instance_shifter(self, instance_list, shift_number, training = True):
for I in instance_list:
I.classes_shifter(shift_number, train = training)
return instance_list
def Instance_combinator(self, instance_list):
I_type = type(instance_list[0][0])
num_batchs = len(instance_list[0])
results = []
for idx_batch in range(num_batchs):
tmp_list = []
for result in instance_list:
Inst = result[idx_batch]
tmp_list.append(Inst)
new_instance = I_type.cat(tmp_list)
results.append(new_instance)
return results
def reindex_result(self, result_list):
"""
Combine the result.
Args:
result_list (list[result]): same as in :meth:`forward`
Returns:
"""
assert len(result_list) > 0
num_batchs = len(result_list)
results = []
I_type = type(result_list[0])
for idx_batch in range(num_batchs):
Inst = result_list[idx_batch]
Inst.classes_fcr_reindex(self.index_list, self.num_classes_list)
results.append(Inst)
return results
|
'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
var _iceCap = require('ice-cap');
var _iceCap2 = _interopRequireDefault(_iceCap);
var _DocBuilder = require('./DocBuilder.js');
var _DocBuilder2 = _interopRequireDefault(_DocBuilder);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
/**
* File output html builder class.
*/
class TestFileDocBuilder extends _DocBuilder2.default {
exec(writeFile, copyDir) {
const ice = this._buildLayoutDoc();
const docs = this._find({ kind: 'testFile' });
for (const doc of docs) {
const fileName = this._getOutputFileName(doc);
const baseUrl = this._getBaseUrl(fileName);
const title = this._getTitle(doc);
ice.load('content', this._buildFileDoc(doc), _iceCap2.default.MODE_WRITE);
ice.attr('baseUrl', 'href', baseUrl, _iceCap2.default.MODE_WRITE);
ice.text('title', title, _iceCap2.default.MODE_WRITE);
writeFile(fileName, ice.html);
}
}
/**
* build file output html.
* @param {DocObject} doc - target file doc object.
* @returns {string} html of file output.
* @private
*/
_buildFileDoc(doc) {
const ice = new _iceCap2.default(this._readTemplate('file.html'));
ice.text('title', doc.name);
ice.text('content', doc.content);
ice.drop('emptySourceCode', !!doc.content);
return ice.html;
}
}
exports.default = TestFileDocBuilder;
|
/**
* Auto-generated action file for "U.S. EPA Enforcement and Compliance History Online (ECHO) - Detailed Facility Report (DFR)" API.
*
* Generated at: 2019-05-07T14:40:23.865Z
* Mass generator version: 1.1.0
*
* flowground :- Telekom iPaaS / epa-gov-dfr-connector
* Copyright © 2019, Deutsche Telekom AG
* contact: flowground@telekom.de
*
* All files of this connector are licensed under the Apache 2.0 License. For details
* see the file LICENSE on the toplevel directory.
*
*
* Operation: undefined
* Endpoint Path: '/dfr_rest_services.get_map'
* Method: 'post'
*
*/
const Swagger = require('swagger-client');
const processWrapper = require('../services/process-wrapper');
const spec = require('../spec.json');
// this wrapers offers a simplified emitData(data) function
module.exports.process = processWrapper(processAction);
// parameter names for this call
const PARAMETERS = [
"output",
"p_id",
"callback"
];
// mappings from connector field names to API field names
const FIELD_MAP = {
"output": "output",
"p_id": "p_id",
"callback": "callback"
};
function processAction(msg, cfg) {
var isVerbose = process.env.debug || cfg.verbose;
if (isVerbose) {
console.log(`---MSG: ${JSON.stringify(msg)}`);
console.log(`---CFG: ${JSON.stringify(cfg)}`);
console.log(`---ENV: ${JSON.stringify(process.env)}`);
}
const contentType = undefined;
const body = msg.body;
mapFieldNames(body);
let parameters = {};
for(let param of PARAMETERS) {
parameters[param] = body[param];
}
// credentials for this operation
let securities = {};
let callParams = {
spec: spec,
operationId: undefined,
pathName: '/dfr_rest_services.get_map',
method: 'post',
parameters: parameters,
requestContentType: contentType,
requestBody: body.requestBody,
securities: {authorized: securities},
server: spec.servers[cfg.server] || cfg.otherServer,
};
if (isVerbose) {
let out = Object.assign({}, callParams);
out.spec = '[omitted]';
console.log(`--SWAGGER CALL: ${JSON.stringify(out)}`);
}
// Call operation via Swagger client
return Swagger.execute(callParams).then(data => {
// emit a single message with data
this.emitData(data);
// if the response contains an array of entities, you can emit them one by one:
// data.obj.someItems.forEach((item) => {
// this.emitData(item);
// }
});
}
function mapFieldNames(obj) {
if(Array.isArray(obj)) {
obj.forEach(mapFieldNames);
}
else if(typeof obj === 'object' && obj) {
Object.keys(obj).forEach(key => {
mapFieldNames(obj[key]);
let goodKey = FIELD_MAP[key];
if(goodKey && goodKey !== key) {
obj[goodKey] = obj[key];
delete obj[key];
}
});
}
}
|
var request = require('request');
var jsonfile = require('jsonfile');
exports.swagger = true;
exports.login = true;
exports.desc = "Add a user";
exports.run = function(config, info) {
var email = info.args[1];
console.log("Granting " + email.yellow + " push access to " + info.swagger['x-api-id'].yellow + "!");
console.log("");
var user = jsonfile.readFileSync(config.apiFile);
request.post(config.host.url + '/add', {
'form': {
'user': user.token,
'email': email,
'repo': info.swagger['x-api-id'],
}
}, function() {
console.log("Success! ".green + "User has been added.");
process.exit();
});
};
|
'''
HTTP handeler to serve general endpoints request, specifically
http://myserver:9004/endpoints
For how individual endpoint requests are served look
at endpoint_handler.py
'''
import json
import logging
from tabpy.tabpy_server.common.util import format_exception
from tabpy.tabpy_server.handlers import ManagementHandler
from tornado import gen
import tornado.web
class EndpointsHandler(ManagementHandler):
def initialize(self, app):
super(EndpointsHandler, self).initialize(app)
def get(self):
if self.should_fail_with_not_authorized():
self.fail_with_not_authorized()
return
self._add_CORS_header()
self.write(json.dumps(self.tabpy_state.get_endpoints()))
@gen.coroutine
def post(self):
if self.should_fail_with_not_authorized():
self.fail_with_not_authorized()
return
try:
if not self.request.body:
self.error_out(400, "Input body cannot be empty")
self.finish()
return
try:
request_data = json.loads(
self.request.body.decode('utf-8'))
except Exception as ex:
self.error_out(
400,
"Failed to decode input body",
str(ex))
self.finish()
return
if 'name' not in request_data:
self.error_out(400,
"name is required to add an endpoint.")
self.finish()
return
name = request_data['name']
# check if endpoint already exist
if name in self.tabpy_state.get_endpoints():
self.error_out(400, f'endpoint {name} already exists.')
self.finish()
return
self.logger.log(
logging.DEBUG,
f'Adding endpoint "{name}"')
err_msg = yield self._add_or_update_endpoint('add', name, 1,
request_data)
if err_msg:
self.error_out(400, err_msg)
else:
self.logger.log(
logging.DEBUG,
f'Endpoint {name} successfully added')
self.set_status(201)
self.write(self.tabpy_state.get_endpoints(name))
self.finish()
return
except Exception as e:
err_msg = format_exception(e, '/add_endpoint')
self.error_out(500, "error adding endpoint", err_msg)
self.finish()
return
|
/*
* Copyright 2012 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef SkTLList_DEFINED
#define SkTLList_DEFINED
#include "include/core/SkTypes.h"
#include "include/private/SkMalloc.h"
#include "include/private/SkTemplates.h"
#include "src/core/SkTInternalLList.h"
#include <new>
#include <utility>
/** Doubly-linked list of objects. The objects' lifetimes are controlled by the list. I.e. the
the list creates the objects and they are deleted upon removal. This class block-allocates
space for entries based on a param passed to the constructor.
Elements of the list can be constructed in place using the following macros:
SkNEW_INSERT_IN_LLIST_BEFORE(list, location, type_name, args)
SkNEW_INSERT_IN_LLIST_AFTER(list, location, type_name, args)
where list is a SkTLList<type_name>*, location is an iterator, and args is the paren-surrounded
constructor arguments for type_name. These macros behave like addBefore() and addAfter().
allocCnt is the number of objects to allocate as a group. In the worst case fragmentation
each object is using the space required for allocCnt unfragmented objects.
*/
template <typename T, unsigned int N> class SkTLList {
private:
struct Block;
struct Node {
SkAlignedSTStorage<1, T> fObj;
SK_DECLARE_INTERNAL_LLIST_INTERFACE(Node);
Block* fBlock; // owning block.
};
typedef SkTInternalLList<Node> NodeList;
public:
class Iter;
// Having fCount initialized to -1 indicates that the first time we attempt to grab a free node
// all the nodes in the pre-allocated first block need to be inserted into the free list. This
// allows us to skip that loop in instances when the list is never populated.
SkTLList() : fCount(-1) {}
~SkTLList() {
this->validate();
typename NodeList::Iter iter;
Node* node = iter.init(fList, Iter::kHead_IterStart);
while (node) {
reinterpret_cast<T*>(node->fObj.get())->~T();
Block* block = node->fBlock;
node = iter.next();
if (0 == --block->fNodesInUse) {
for (unsigned int i = 0; i < N; ++i) {
block->fNodes[i].~Node();
}
if (block != &fFirstBlock) {
sk_free(block);
}
}
}
}
/** Adds a new element to the list at the head. */
template <typename... Args> T* addToHead(Args&&... args) {
this->validate();
Node* node = this->createNode();
fList.addToHead(node);
this->validate();
return new (node->fObj.get()) T(std::forward<Args>(args)...);
}
/** Adds a new element to the list at the tail. */
template <typename... Args> T* addToTail(Args&&... args) {
this->validate();
Node* node = this->createNode();
fList.addToTail(node);
this->validate();
return new (node->fObj.get()) T(std::forward<Args>(args)...);
}
/** Adds a new element to the list before the location indicated by the iterator. If the
iterator refers to a nullptr location then the new element is added at the tail */
template <typename... Args> T* addBefore(Iter location, Args&&... args) {
this->validate();
Node* node = this->createNode();
fList.addBefore(node, location.getNode());
this->validate();
return new (node->fObj.get()) T(std::forward<Args>(args)...);
}
/** Adds a new element to the list after the location indicated by the iterator. If the
iterator refers to a nullptr location then the new element is added at the head */
template <typename... Args> T* addAfter(Iter location, Args&&... args) {
this->validate();
Node* node = this->createNode();
fList.addAfter(node, location.getNode());
this->validate();
return new (node->fObj.get()) T(std::forward<Args>(args)...);
}
/** Convenience methods for getting an iterator initialized to the head/tail of the list. */
Iter headIter() const { return Iter(*this, Iter::kHead_IterStart); }
Iter tailIter() const { return Iter(*this, Iter::kTail_IterStart); }
T* head() { return Iter(*this, Iter::kHead_IterStart).get(); }
T* tail() { return Iter(*this, Iter::kTail_IterStart).get(); }
const T* head() const { return Iter(*this, Iter::kHead_IterStart).get(); }
const T* tail() const { return Iter(*this, Iter::kTail_IterStart).get(); }
void popHead() {
this->validate();
Node* node = fList.head();
if (node) {
this->removeNode(node);
}
this->validate();
}
void popTail() {
this->validate();
Node* node = fList.tail();
if (node) {
this->removeNode(node);
}
this->validate();
}
void remove(T* t) {
this->validate();
Node* node = reinterpret_cast<Node*>(t);
SkASSERT(reinterpret_cast<T*>(node->fObj.get()) == t);
this->removeNode(node);
this->validate();
}
void reset() {
this->validate();
Iter iter(*this, Iter::kHead_IterStart);
while (iter.get()) {
Iter next = iter;
next.next();
this->remove(iter.get());
iter = next;
}
SkASSERT(0 == fCount || -1 == fCount);
this->validate();
}
int count() const { return std::max(fCount ,0); }
bool isEmpty() const { this->validate(); return 0 == fCount || -1 == fCount; }
bool operator== (const SkTLList& list) const {
if (this == &list) {
return true;
}
// Call count() rather than use fCount because an empty list may have fCount = 0 or -1.
if (this->count() != list.count()) {
return false;
}
for (Iter a(*this, Iter::kHead_IterStart), b(list, Iter::kHead_IterStart);
a.get();
a.next(), b.next()) {
SkASSERT(b.get()); // already checked that counts match.
if (!(*a.get() == *b.get())) {
return false;
}
}
return true;
}
bool operator!= (const SkTLList& list) const { return !(*this == list); }
/** The iterator becomes invalid if the element it refers to is removed from the list. */
class Iter : private NodeList::Iter {
private:
using INHERITED = typename NodeList::Iter;
public:
typedef typename INHERITED::IterStart IterStart;
//!< Start the iterator at the head of the list.
static const IterStart kHead_IterStart = INHERITED::kHead_IterStart;
//!< Start the iterator at the tail of the list.
static const IterStart kTail_IterStart = INHERITED::kTail_IterStart;
Iter() {}
Iter(const Iter& that) : INHERITED(that) {}
Iter& operator=(const Iter& that) { INHERITED::operator=(that); return *this; }
Iter(const SkTLList& list, IterStart start = kHead_IterStart) {
INHERITED::init(list.fList, start);
}
T* init(const SkTLList& list, IterStart start = kHead_IterStart) {
return this->nodeToObj(INHERITED::init(list.fList, start));
}
T* get() { return this->nodeToObj(INHERITED::get()); }
T* next() { return this->nodeToObj(INHERITED::next()); }
T* prev() { return this->nodeToObj(INHERITED::prev()); }
private:
friend class SkTLList;
Node* getNode() { return INHERITED::get(); }
T* nodeToObj(Node* node) {
if (node) {
return reinterpret_cast<T*>(node->fObj.get());
} else {
return nullptr;
}
}
};
private:
struct Block {
int fNodesInUse;
Node fNodes[N];
};
void delayedInit() {
SkASSERT(-1 == fCount);
fFirstBlock.fNodesInUse = 0;
for (unsigned int i = 0; i < N; ++i) {
fFreeList.addToHead(fFirstBlock.fNodes + i);
fFirstBlock.fNodes[i].fBlock = &fFirstBlock;
}
fCount = 0;
this->validate();
}
Node* createNode() {
if (-1 == fCount) {
this->delayedInit();
}
Node* node = fFreeList.head();
if (node) {
fFreeList.remove(node);
++node->fBlock->fNodesInUse;
} else {
// Should not get here when count == 0 because we always have the preallocated first
// block.
SkASSERT(fCount > 0);
Block* block = reinterpret_cast<Block*>(sk_malloc_throw(sizeof(Block)));
node = &block->fNodes[0];
new (node) Node;
node->fBlock = block;
block->fNodesInUse = 1;
for (unsigned int i = 1; i < N; ++i) {
new (block->fNodes + i) Node;
fFreeList.addToHead(block->fNodes + i);
block->fNodes[i].fBlock = block;
}
}
++fCount;
return node;
}
void removeNode(Node* node) {
SkASSERT(node);
fList.remove(node);
reinterpret_cast<T*>(node->fObj.get())->~T();
Block* block = node->fBlock;
// Don't ever elease the first block, just add its nodes to the free list
if (0 == --block->fNodesInUse && block != &fFirstBlock) {
for (unsigned int i = 0; i < N; ++i) {
if (block->fNodes + i != node) {
fFreeList.remove(block->fNodes + i);
}
block->fNodes[i].~Node();
}
sk_free(block);
} else {
fFreeList.addToHead(node);
}
--fCount;
this->validate();
}
void validate() const {
#ifdef SK_DEBUG
bool isEmpty = false;
if (-1 == fCount) {
// We should not yet have initialized the free list.
SkASSERT(fFreeList.isEmpty());
isEmpty = true;
} else if (0 == fCount) {
// Should only have the nodes from the first block in the free list.
SkASSERT(fFreeList.countEntries() == N);
isEmpty = true;
}
SkASSERT(isEmpty == fList.isEmpty());
fList.validate();
fFreeList.validate();
typename NodeList::Iter iter;
Node* freeNode = iter.init(fFreeList, Iter::kHead_IterStart);
while (freeNode) {
SkASSERT(fFreeList.isInList(freeNode));
Block* block = freeNode->fBlock;
// Only the first block is allowed to have all its nodes in the free list.
SkASSERT(block->fNodesInUse > 0 || block == &fFirstBlock);
SkASSERT((unsigned)block->fNodesInUse < N);
SkDEBUGCODE(int activeCnt = 0;)
for (unsigned int i = 0; i < N; ++i) {
bool free = fFreeList.isInList(block->fNodes + i);
bool active = fList.isInList(block->fNodes + i);
SkASSERT(free != active);
SkDEBUGCODE(activeCnt += active;)
}
SkASSERT(activeCnt == block->fNodesInUse);
freeNode = iter.next();
}
int count = 0;
Node* activeNode = iter.init(fList, Iter::kHead_IterStart);
while (activeNode) {
++count;
SkASSERT(fList.isInList(activeNode));
Block* block = activeNode->fBlock;
SkASSERT(block->fNodesInUse > 0 && (unsigned)block->fNodesInUse <= N);
SkDEBUGCODE(int activeCnt = 0;)
for (unsigned int i = 0; i < N; ++i) {
bool free = fFreeList.isInList(block->fNodes + i);
bool active = fList.isInList(block->fNodes + i);
SkASSERT(free != active);
SkDEBUGCODE(activeCnt += active;)
}
SkASSERT(activeCnt == block->fNodesInUse);
activeNode = iter.next();
}
SkASSERT(count == fCount || (0 == count && -1 == fCount));
#endif
}
NodeList fList;
NodeList fFreeList;
Block fFirstBlock;
int fCount;
SkTLList(const SkTLList&) = delete;
SkTLList& operator=(const SkTLList&) = delete;
};
#endif
|
//-----------------------------------------------------------------------
// Copyright 2016 Sitecore Corporation A/S
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
// -------------------------------------------------------------------------------------------
var loyaltyCardListViewModel = null;
function initActiveLoyaltyCards(sectionId) {
ClearGlobalMessages();
AJAXPost(StorefrontUri("api/sitecore/loyalty/activeLoyaltyCards"), null, function (data, success, sender) {
if (success && data.Success) {
loyaltyCardListViewModel = new LoyaltyCardsListViewModel(data);
ko.applyBindings(loyaltyCardListViewModel, document.getElementById(sectionId));
loyaltyCardListViewModel.showLoader(false);
}
ShowGlobalMessages(data);
});
}
function initLoyaltyCards(sectionId) {
$("#" + sectionId).hide();
ClearGlobalMessages();
AJAXPost(StorefrontUri("api/sitecore/loyalty/getLoyaltyCards"), null, function (data, success, sender) {
if (success && data.Success) {
loyaltyCardListViewModel = new LoyaltyCardsListViewModel(data);
ko.applyBindings(loyaltyCardListViewModel, document.getElementById(sectionId));
$("#" + sectionId).show();
}
ShowGlobalMessages(data);
});
}
function joinLoyaltyProgram() {
ClearGlobalMessages();
$('#joinLoyaltyProgram').button("loading");
AJAXPost(StorefrontUri('api/sitecore/loyalty/activateAccount'), null, function (data, success, sender) {
if (success && data.Success) {
loyaltyCardListViewModel.reload(data);
$("#loyaltyCards").show();
$("#loyaltyCardsEmpty").hide();
}
$('#joinLoyaltyProgram').button("reset");
ShowGlobalMessages(data);
}, this);
}
|
import React, { memo, useCallback, useMemo, useState, useEffect } from 'react';
import get from 'lodash/get';
import isEqual from 'react-fast-compare';
import PropTypes from 'prop-types';
import { Stack } from '@strapi/design-system/Stack';
import { Box } from '@strapi/design-system/Box';
import { NotAllowedInput, useNotification } from '@strapi/helper-plugin';
import { getTrad } from '../../utils';
import connect from './utils/connect';
import select from './utils/select';
import AddComponentButton from './components/AddComponentButton';
import DzLabel from './components/DzLabel';
import Component from './components/Component';
import ComponentPicker from './components/ComponentPicker';
/* eslint-disable react/no-array-index-key */
const createCollapses = arrayLength =>
Array.from({ length: arrayLength }).map(() => ({ isOpen: false }));
const DynamicZone = ({
name,
// Passed with the select function
addComponentToDynamicZone,
formErrors,
isCreatingEntry,
isFieldAllowed,
isFieldReadable,
labelAction,
moveComponentUp,
moveComponentDown,
removeComponentFromDynamicZone,
dynamicDisplayedComponents,
fieldSchema,
metadatas,
}) => {
const toggleNotification = useNotification();
const [isOpen, setIsOpen] = useState(false);
const [shouldOpenAddedComponent, setShouldOpenAddedComponent] = useState(false);
const dynamicDisplayedComponentsLength = dynamicDisplayedComponents.length;
const [componentCollapses, setComponentsCollapses] = useState(
createCollapses(dynamicDisplayedComponentsLength)
);
useEffect(() => {
setComponentsCollapses(createCollapses(dynamicDisplayedComponentsLength));
}, [dynamicDisplayedComponentsLength]);
useEffect(() => {
if (shouldOpenAddedComponent) {
setComponentsCollapses(prev =>
prev.map((collapse, index) => {
if (index === prev.length - 1) {
return { ...collapse, isOpen: true };
}
return collapse;
})
);
setShouldOpenAddedComponent(false);
}
}, [shouldOpenAddedComponent]);
// We cannot use the default props here
const { max = Infinity, min = -Infinity } = fieldSchema;
const dynamicZoneErrors = useMemo(() => {
return Object.keys(formErrors)
.filter(key => {
return key === name;
})
.map(key => formErrors[key]);
}, [formErrors, name]);
const dynamicZoneAvailableComponents = useMemo(() => fieldSchema.components || [], [fieldSchema]);
const missingComponentNumber = min - dynamicDisplayedComponentsLength;
const hasError = dynamicZoneErrors.length > 0;
const hasMinError =
dynamicZoneErrors.length > 0 && get(dynamicZoneErrors, [0, 'id'], '').includes('min');
const hasMaxError =
hasError && get(dynamicZoneErrors, [0, 'id'], '') === 'components.Input.error.validation.max';
const handleAddComponent = useCallback(
componentUid => {
setIsOpen(false);
addComponentToDynamicZone(name, componentUid, hasError);
setShouldOpenAddedComponent(true);
},
[addComponentToDynamicZone, hasError, name]
);
const handleClickOpenPicker = () => {
if (dynamicDisplayedComponentsLength < max) {
setIsOpen(prev => !prev);
} else {
toggleNotification({
type: 'info',
message: { id: getTrad('components.notification.info.maximum-requirement') },
});
}
};
const handleToggleComponent = indexToToggle => {
setComponentsCollapses(prev =>
prev.map(({ isOpen }, index) => {
if (index === indexToToggle) {
return { isOpen: !isOpen };
}
return { isOpen };
})
);
};
const handleMoveComponentDown = (name, currentIndex) => {
moveComponentDown(name, currentIndex);
setComponentsCollapses(prev => {
return prev.map(({ isOpen }, index, refArray) => {
if (index === currentIndex + 1) {
return { isOpen: refArray[currentIndex].isOpen };
}
if (index === currentIndex) {
return { isOpen: refArray[index + 1].isOpen };
}
return { isOpen };
});
});
};
const handleMoveComponentUp = (name, currentIndex) => {
moveComponentUp(name, currentIndex);
setComponentsCollapses(prev => {
return prev.map(({ isOpen }, index, refArray) => {
if (index === currentIndex - 1) {
return { isOpen: refArray[currentIndex].isOpen };
}
if (index === currentIndex) {
return { isOpen: refArray[index - 1].isOpen };
}
return { isOpen };
});
});
};
const handleRemoveComponent = (name, currentIndex) => {
removeComponentFromDynamicZone(name, currentIndex);
};
if (!isFieldAllowed && isCreatingEntry) {
return (
<NotAllowedInput
description={
metadatas.description
? { id: metadatas.description, defaultMessage: metadatas.description }
: null
}
intlLabel={{ id: metadatas.label, defaultMessage: metadatas.label }}
labelAction={labelAction}
name={name}
/>
);
}
if (!isFieldAllowed && !isFieldReadable && !isCreatingEntry) {
return (
<NotAllowedInput
description={
metadatas.description
? { id: metadatas.description, defaultMessage: metadatas.description }
: null
}
intlLabel={{ id: metadatas.label, defaultMessage: metadatas.label }}
labelAction={labelAction}
name={name}
/>
);
}
return (
<Stack size={6}>
{dynamicDisplayedComponentsLength > 0 && (
<Box>
<DzLabel
label={metadatas.label}
labelAction={labelAction}
name={name}
numberOfComponents={dynamicDisplayedComponentsLength}
required={fieldSchema.required || false}
/>
{dynamicDisplayedComponents.map((componentUid, index) => {
const showDownIcon =
isFieldAllowed &&
dynamicDisplayedComponentsLength > 0 &&
index < dynamicDisplayedComponentsLength - 1;
const showUpIcon = isFieldAllowed && dynamicDisplayedComponentsLength > 0 && index > 0;
const isOpen = componentCollapses[index]?.isOpen || false;
return (
<Component
componentUid={componentUid}
formErrors={formErrors}
key={index}
index={index}
isOpen={isOpen}
isFieldAllowed={isFieldAllowed}
moveComponentDown={handleMoveComponentDown}
moveComponentUp={handleMoveComponentUp}
onToggle={handleToggleComponent}
name={name}
removeComponentFromDynamicZone={handleRemoveComponent}
showDownIcon={showDownIcon}
showUpIcon={showUpIcon}
/>
);
})}
</Box>
)}
<AddComponentButton
hasError={hasError}
hasMaxError={hasMaxError}
hasMinError={hasMinError}
isDisabled={!isFieldAllowed}
label={metadatas.label}
missingComponentNumber={missingComponentNumber}
isOpen={isOpen}
name={name}
onClick={handleClickOpenPicker}
/>
<ComponentPicker
isOpen={isOpen}
components={dynamicZoneAvailableComponents}
onClickAddComponent={handleAddComponent}
/>
</Stack>
);
};
DynamicZone.defaultProps = {
dynamicDisplayedComponents: [],
fieldSchema: {
max: Infinity,
min: -Infinity,
},
labelAction: null,
};
DynamicZone.propTypes = {
addComponentToDynamicZone: PropTypes.func.isRequired,
dynamicDisplayedComponents: PropTypes.array,
fieldSchema: PropTypes.shape({
components: PropTypes.array.isRequired,
max: PropTypes.number,
min: PropTypes.number,
required: PropTypes.bool,
}),
formErrors: PropTypes.object.isRequired,
isCreatingEntry: PropTypes.bool.isRequired,
isFieldAllowed: PropTypes.bool.isRequired,
isFieldReadable: PropTypes.bool.isRequired,
labelAction: PropTypes.element,
metadatas: PropTypes.shape({
description: PropTypes.string,
label: PropTypes.string,
}).isRequired,
moveComponentUp: PropTypes.func.isRequired,
moveComponentDown: PropTypes.func.isRequired,
name: PropTypes.string.isRequired,
removeComponentFromDynamicZone: PropTypes.func.isRequired,
};
const Memoized = memo(DynamicZone, isEqual);
export default connect(
Memoized,
select
);
export { DynamicZone };
|
const config = {
development: {
PORT: 5000,
DB_CONNECTION_URL: "mongodb://localhost/video_tutorials",
JWT_SECRET: "keyboard cats"
},
production: {
PORT: 80,
DB_CONNECTION_URL: "mongodb://localhost/video_tutorials",
JWT_SECRET: "keyboard cats"
}
}
module.exports = config[process.env.NODE_ENV];
|
function*g(){ var yield; }
|
# 2020.07.21
# Problem Statement:
# https://leetcode.com/problems/letter-combinations-of-a-phone-number/
class Solution:
# initialize global answer to return
answer = []
def letterCombinations(self, digits: str) -> List[str]:
# base case
if len(digits) == 0:
temp = Solution.answer
# set global answer to return to empty again
Solution.answer = []
return temp
# check if first time
if len(Solution.answer) == 0:
first_time = True
else:
first_time = False
# temp to hold new answer
new_answer = []
# do recursion
if digits[0] == "2":
if first_time:
Solution.answer.append("a")
Solution.answer.append("b")
Solution.answer.append("c")
else:
for element in Solution.answer:
new_answer.append(element + "a")
new_answer.append(element + "b")
new_answer.append(element + "c")
Solution.answer = new_answer
return self.letterCombinations(digits[1: ])
elif digits[0] == "3":
if first_time:
Solution.answer.append("d")
Solution.answer.append("e")
Solution.answer.append("f")
else:
for element in Solution.answer:
new_answer.append(element + "d")
new_answer.append(element + "e")
new_answer.append(element + "f")
Solution.answer = new_answer
return self.letterCombinations(digits[1: ])
elif digits[0] == "4":
if first_time:
Solution.answer.append("g")
Solution.answer.append("h")
Solution.answer.append("i")
else:
for element in Solution.answer:
new_answer.append(element + "g")
new_answer.append(element + "h")
new_answer.append(element + "i")
Solution.answer = new_answer
return self.letterCombinations(digits[1: ])
elif digits[0] == "5":
if first_time:
Solution.answer.append("j")
Solution.answer.append("k")
Solution.answer.append("l")
else:
for element in Solution.answer:
new_answer.append(element + "j")
new_answer.append(element + "k")
new_answer.append(element + "l")
Solution.answer = new_answer
return self.letterCombinations(digits[1: ])
elif digits[0] == "6":
if first_time:
Solution.answer.append("m")
Solution.answer.append("n")
Solution.answer.append("o")
else:
for element in Solution.answer:
new_answer.append(element + "m")
new_answer.append(element + "n")
new_answer.append(element + "o")
Solution.answer = new_answer
return self.letterCombinations(digits[1: ])
elif digits[0] == "7":
if first_time:
Solution.answer.append("p")
Solution.answer.append("q")
Solution.answer.append("r")
Solution.answer.append("s")
else:
for element in Solution.answer:
new_answer.append(element + "p")
new_answer.append(element + "q")
new_answer.append(element + "r")
new_answer.append(element + "s")
Solution.answer = new_answer
return self.letterCombinations(digits[1: ])
elif digits[0] == "8":
if first_time:
Solution.answer.append("t")
Solution.answer.append("u")
Solution.answer.append("v")
else:
for element in Solution.answer:
new_answer.append(element + "t")
new_answer.append(element + "u")
new_answer.append(element + "v")
Solution.answer = new_answer
return self.letterCombinations(digits[1: ])
elif digits[0] == "9":
if first_time:
Solution.answer.append("w")
Solution.answer.append("x")
Solution.answer.append("y")
Solution.answer.append("z")
else:
for element in Solution.answer:
new_answer.append(element + "w")
new_answer.append(element + "x")
new_answer.append(element + "y")
new_answer.append(element + "z")
Solution.answer = new_answer
return self.letterCombinations(digits[1: ])
|
import numpy as np
def test_broadcast_arguments():
from pytest import raises
from skypy.utils import broadcast_arguments
@broadcast_arguments('a', 'b')
def assert_same_shape(a, b):
assert a.shape == b.shape
a = [1, 2, 3]
b = [[4], [5], [6]]
assert_same_shape(a, b)
a = [[1, 2, 3], [7, 8, 9]]
with raises(ValueError):
assert_same_shape(a, b)
with raises(ValueError):
@broadcast_arguments('a', 'b')
def argument_b_does_not_exist(a):
return a
def test_dependent_argument():
from pytest import raises
from skypy.utils import dependent_argument
@dependent_argument('y', 'x')
def assert_y_is_2x(x, y):
assert np.all(y == 2*x)
x = np.arange(0, 1, 0.1)
assert_y_is_2x(x, 2*x)
assert_y_is_2x(x, lambda x: 2*x)
@dependent_argument('z', 'x', 'y')
def assert_z_is_2x_plus_y(x, y, z):
assert np.all(z == 2*x+y)
x = np.arange(0, 1, 0.1)
y = np.arange(1, 2, 0.1)
assert_z_is_2x_plus_y(x, y, 2*x+y)
assert_z_is_2x_plus_y(x, y, lambda x, y: 2*x+y)
@dependent_argument('x')
def assert_x_is_1(x):
assert x == 1
assert_x_is_1(1)
assert_x_is_1(lambda: 1)
with raises(ValueError):
@dependent_argument('x', 'y', 'z')
def argument_z_does_not_exist(x, y):
pass
|
'use strict';
angular.module('app.controllers.header', [])
.controller( 'HeaderCtrl', function($scope, _, Settings) {
$scope.settings = Settings.UI.header;
});
|
import lldb
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbtest as lldbtest
import lldbsuite.test.lldbutil as lldbutil
import unittest2
class TestSwiftPrivateDiscriminator(lldbtest.TestBase):
NO_DEBUG_INFO_TESTCASE = True
mydir = lldbtest.TestBase.compute_mydir(__file__)
@swiftTest
# FIXME: The only reason this doesn't work on Linux is because the
# dylib hasn't been loaded when run_to_source_breakpoint wants to
# set the breakpoints.
@skipUnlessDarwin
def test(self):
"""Test what happens when a private type cannot be reconstructed in the AST."""
self.build()
target, process, thread, bkpt = lldbutil.run_to_source_breakpoint(
self, 'break here', lldb.SBFileSpec('Generic.swift'),
extra_images=['Generic', 'Builder'])
self.expect("frame var -d run -- self",
substrs=['Builder.Private', 'n', '23'])
self.expect("p self", error=True, substrs=["Hint"])
process.Continue()
# This should work.
self.expect("frame var -d run -- visible",
substrs=['Generic.Visible', 'n', '42'])
self.expect("p visible", substrs=['Generic.Visible', 'n', '42'])
|
"""Helper general purpose functions"""
import glob
import importlib
import json
import os
import re
import traceback
import types
from datetime import datetime
from distutils.version import StrictVersion
def get_timestamp():
time_format = "%Y.%m.%d.%H.%M.%S.%f"
timestamp = datetime.today().strftime(time_format)
# remove last 3 decimal places from microseconds
timestamp = timestamp[:-3]
return timestamp
def get_date_from_timestamp(timestamp):
date = datetime.strptime(timestamp, '%Y.%m.%d.%H.%M.%S.%f')
return date
def get_date_time_from_timestamp(timestamp):
"""Get the date time from a timestamp.
The timestamp must have the following format:
'year.month.day.hour.minutes'
Example:
'2017.12.20.10.31' -> '2017/12/20 10:31'
"""
date_time_string = timestamp
sp = timestamp.split('.')
if len(sp) >= 5:
date_time_string = '{0}/{1}/{2} {3}:{4}'.format(sp[0], sp[1], sp[2], sp[3], sp[4])
return date_time_string
def display_tree_structure_command_line(structure, lvl=0):
"""Displays a directory tree structure to the command line"""
for element in structure:
if element['type'] == 'file':
print('{}{}'.format(' ' * lvl * 2, element['name']))
else:
print('{}{}/'.format(' ' * lvl * 2, element['name']))
display_tree_structure_command_line(element['sub_elements'], lvl + 1)
def separate_file_from_parents(full_filename):
"""Receives a full filename with parents (separated by dots)
Returns a duple, first element is the filename and second element
is the list of parents that might be empty"""
splitted = full_filename.split('.')
file = splitted.pop()
parents = splitted
return file, parents
def choose_browser_by_precedence(cli_browsers=None, suite_browsers=None,
settings_default_browser=None):
""" Defines which browser(s) to use by order of precedence
The order is the following:
1. browsers defined by CLI
2. browsers defined inside a suite
3. 'default_driver' setting
4. chrome
"""
if cli_browsers:
browsers = cli_browsers
elif suite_browsers:
browsers = suite_browsers
elif settings_default_browser:
browsers = [settings_default_browser]
else:
browsers = ['chrome'] # default default
return browsers
# TODO
def load_json_from_file(filepath, ignore_failure=False, default=None):
json_data = default
with open(filepath, encoding='utf-8') as json_file:
try:
contents = json_file.read()
if len(contents.strip()):
json_data = json.loads(contents)
except Exception as e:
msg = 'There was an error parsing file {}'.format(filepath)
print(msg)
print(traceback.format_exc())
if not ignore_failure:
raise Exception(msg).with_traceback(e.__traceback__)
return json_data
def import_module(path):
"""Import a Python module from a given path"""
mod = None
error = None
module_dir, module_file = os.path.split(path)
module_name, module_ext = os.path.splitext(module_file)
try:
spec = importlib.util.spec_from_file_location(module_name, path)
_mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(_mod)
mod = _mod
except:
error = traceback.format_exc(limit=0)
return mod, error
def module_local_public_functions(module):
"""Get a list of function names defined in a module.
Ignores functions that start with `_` and functions
imported from other modules.
"""
local_functions = []
module_name = module.__name__
for name in dir(module):
if not name.startswith('_'):
attr = getattr(module, name)
if isinstance(attr, types.FunctionType) and attr.__module__ == module_name:
local_functions.append(name)
return local_functions
def extract_version_from_webdriver_filename(filename):
"""Extract version from webdriver filename.
Expects a file in the format: `filename_1.2` or `filename_1.2.exe`
The extracted version must conform with pep-386
If a valid version is not found it returns '0.0'
"""
version = '0.0'
if '_' in filename:
components = filename.replace('.exe', '').split('_')
if len(components) > 1:
parsed_version = components[-1]
try:
StrictVersion(parsed_version)
version = parsed_version
except:
pass
return version
def match_latest_executable_path(glob_path, testdir):
"""Returns the absolute path to the webdriver executable
with the highest version given a path with glob pattern.
"""
found_files = []
glob_path = os.path.normpath(glob_path)
if not os.path.isabs(glob_path):
glob_path = os.path.join(testdir, glob_path)
# Note: recursive=True arg is not supported
# in Python 3.4, so '**' wildcard is not supported
matched_files = [x for x in glob.glob(glob_path) if os.path.isfile(x)]
for matched_file in matched_files:
found_files.append((matched_file, extract_version_from_webdriver_filename(matched_file)))
if found_files:
highest_version = sorted(found_files, key=lambda tup: StrictVersion(tup[1]), reverse=True)
return highest_version[0][0]
else:
return None
def get_valid_filename(s):
"""Receives a string and returns a valid filename"""
s = str(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
# TODO: capture ctrl+c and stop gracefully
def prompt_yes_no(question, default=True):
"""Prompt the user through the console for yes or no"""
while True:
choice = input(question).lower()
if choice in ['yes', 'y']:
return True
elif choice in ['no', 'n']:
return False
elif not choice:
return default
class ImmutableKeysDict(dict):
"""A dictionary where keys cannot be added after instantiation"""
def __setitem__(self, key, value):
if key not in self:
raise AttributeError("cannot add new keys to ImmutableKeysDict")
dict.__setitem__(self, key, value)
def validate_email(email):
"""Validate email address"""
re_str = r'^[_a-z0-9-]+(\.[_a-z0-9-]+)*@[a-z0-9-]+(\.[a-z0-9-]+)*(\.[a-z]{2,4})$'
match = re.match(re_str, email)
return match is not None
def normalize_query(path):
"""Normalize a relative path to a suite or test
to a dotted relative path
"""
normalized = os.path.normpath(path)
if '.py' in normalized:
normalized = os.path.splitext(normalized)[0]
if os.sep in normalized:
normalized = normalized.replace(os.sep, '.')
return normalized
|
from datetime import timedelta
import logging
from django.contrib.auth.models import User
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext
from django.utils.timesince import timesince
from activeusers import utils
from activeusers.utils import string_with_title
logger = logging.getLogger('activeusers.models')
class VisitorManager(models.Manager):
def active(self, timeout=None):
"""
Retrieves only visitors who have been active within the timeout
period.
"""
if not timeout:
timeout = utils.get_timeout()
now = timezone.now()
cutoff = now - timedelta(minutes=timeout)
return self.get_queryset().filter(last_update__gte=cutoff)
class Visitor(models.Model):
session_key = models.CharField(max_length=40)
ip_address = models.CharField(max_length=20)
user = models.ForeignKey(User, null=True)
user_agent = models.CharField(max_length=255)
referrer = models.CharField(max_length=255)
url = models.CharField(max_length=255)
page_views = models.PositiveIntegerField(default=0)
session_start = models.DateTimeField()
last_update = models.DateTimeField()
objects = VisitorManager()
def save(self, *args, **kwargs):
# Truncate to avoid errors when the url is longer than the field max_length
try:
max_length = self._meta.get_field('url').max_length
self.url = self.url[:max_length]
except AttributeError:
pass
super(Visitor, self).save(*args, **kwargs)
def _time_on_site(self):
"""
Attempts to determine the amount of time a visitor has spent on the
site based upon their information that's in the database.
"""
if self.session_start:
seconds = (self.last_update - self.session_start).seconds
hours = seconds / 3600
seconds -= hours * 3600
minutes = seconds / 60
seconds -= minutes * 60
return u'%i:%02i:%02i' % (hours, minutes, seconds)
else:
return ugettext(u'unknown')
time_on_site = property(_time_on_site)
def _last_seen(self):
"""
Returns a "humanised" expression for time since the user was last seen,
e.g. "3 minutes ago".
"""
return ugettext(u'%s ago') % timesince(self.last_update)
last_seen = property(_last_seen)
class Meta:
app_label = string_with_title('activeusers', 'Active users')
ordering = ('-last_update',)
unique_together = ('session_key', 'ip_address',)
verbose_name = 'active visitor'
verbose_name_plural = 'active visitors'
|
import React, { useState } from "react";
import {
Button,
Layout,
Row,
Col,
Typography,
Card,
Input,
Divider,
Carousel,
Space,
} from "antd";
import { LeftSquareOutlined } from "@ant-design/icons";
const { Search } = Input;
const RightCardPages = (props) => {
const { title, link } = props;
const [page, setPage] = useState("welcome");
const [name, setName] = useState("");
const onNameChange = (e) => {
console.log(e.target.value);
setName(e.target.value);
};
const onNameClick = (e) => {
setPage("joinOrHost");
};
const onNamePressEnter = (e) => {
setPage("joinOrHost");
};
const [playlistURL, setplaylistURL] = useState("");
if (page === "welcome") {
return (
<Card
title="Welcome"
className="centerMargin"
style={{ width: 350, height: 320 }}
>
<p>What should we call you?</p>
<Input
placeholder="Enter a nickname"
onChange={onNameChange}
onPressEnter={onNamePressEnter}
value={name}
/>
<div className="center">
<br />
<Button type="primary" onClick={onNameClick}>
Get Started
</Button>
</div>
</Card>
);
} else if (page === "joinOrHost") {
return (
<div>
<Card
title={`Welcome, ${name}`}
className="centerMargin"
style={{ width: 350, height: 320 }}
>
<Button
type="default"
icon={<LeftSquareOutlined />}
onClick={(e) => setPage("welcome")}
/>
<div className="center">
<br />
<Button
type="primary"
onClick={(e) => setPage("playlistSelection")}
>
Host
</Button>
<br />
<br />
<Button type="primary" disabled title="Coming soon...">
Join
</Button>
</div>
</Card>
</div>
);
} else if (page === "playlistSelection") {
return (
<div>
<Card
title="Enter a Spotify playlist URL"
className="centerMargin"
style={{ width: 350, height: 320 }}
>
<Button
type="default"
icon={<LeftSquareOutlined />}
onClick={(e) => setPage("joinOrHost")}
/>
<br />
<br />
<div className="center">
<Search
placeholder="Spotify playlist URL"
allowClear
enterButton="Search"
size="large"
onSearch={(e) => setPage("playlistSelection")}
onChange={(e) => setplaylistURL(e.target.value)}
/>
</div>
</Card>
</div>
);
}
};
export default RightCardPages;
|
from __future__ import print_function
from pprint import pprint
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# parameters
m = 10
k = 4
o = 2
p = 2
verbose = True
p_indicator = np.ones(m) * p
position = 0
capacity = 0
array = np.array([]).reshape(0, m)
# while(position < m):
# Lay the first sequence
array_line = np.zeros(m, dtype='int')
for i in range(k):
array_line[position + i] = 1
p_indicator[position + i] -= 1
# Increase capacity
capacity += 1
# Stack the array
array = np.vstack((array, array_line))
while position + k < m:
if verbose:
print('********')
print(position)
print(position + k)
print('**********')
# Lay another sequence
array_line = np.zeros(m, dtype='int')
# Create overlap dic with as many keys as sequences we have stored so far
overlap_dic = {index:0 for index in range(0, capacity)}
# Move the position to the first position where p_indicator > 0
for p_index, p_value in enumerate(p_indicator):
if p_value > 0:
position = p_index
break
if position + k >= m:
print('break the while')
break
position_i = position
for counter in range(k):
# Check that there is still p left to add
p_flag = True
while p_flag:
if p_indicator[position_i] == 0:
if verbose:
print('p flag')
position_i += 1
else:
p_flag = False
# Check overlap
overlap_flag = True
while overlap_flag:
for row in range(capacity):
# If overlap is bigger than o and that sequence is already there skip
if verbose:
print('--------------')
print('row', row)
print('position', position_i)
print('overlap_dic', overlap_dic)
print('array[row, position]', array[row, position_i])
if (overlap_dic[row] >= o) and (array[row, position_i] == 1):
if verbose:
print('overlap flag')
print('--------------')
position_i += 1
overlap_flag = True
break
else:
overlap_flag = False
# Add the element
array_line[position_i] += 1
p_indicator[position_i] -= 1
position_i += 1
# Add overlap
for row in range(capacity):
# If there is an element
if array[row, position_i] == 1:
overlap_dic[row] += 1
capacity += 1
array = np.vstack((array, array_line))
pprint(array)
|