file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
test_hoursbalance_model.py | import datetime
import pytz
from django.utils import timezone
from django.contrib.auth.models import User
from django.test import TestCase
from gerencex.core.models import HoursBalance, Timing, Office
from gerencex.core.time_calculations import DateData
class HoursBalanceModelTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create_user('testuser', 'test@user.com', 'senha123')
def test_balances(self):
r1 = HoursBalance.objects.create(
date=datetime.date(2016, 8, 18),
user=self.user,
credit=datetime.timedelta(hours=6).seconds,
debit=datetime.timedelta(hours=7).seconds,
)
# Test creation
self.assertTrue(HoursBalance.objects.exists())
# First balance is calculated without a previous balance (see the
# total_balance_handler function at signals.py)
self.assertEqual(r1.balance, int(datetime.timedelta(hours=-1).total_seconds()))
# Second balance takes the first balance into account (see the
# total_balance_handler function at signals.py)
r2 = HoursBalance.objects.create(
date=datetime.date(2016, 8, 19),
user=self.user,
credit=datetime.timedelta(hours=6).seconds,
debit=datetime.timedelta(hours=7).seconds,
)
self.assertEqual(r2.balance, int(datetime.timedelta(hours=-2).total_seconds()))
# Change in first credit or debit must change the second balance (see the
# next_balance_handler function at signals.py)
r1.credit = datetime.timedelta(hours=7).seconds
r1.save()
r2 = HoursBalance.objects.get(pk=2)
self.assertEqual(r2.balance, int(datetime.timedelta(hours=-1).total_seconds()))
class CreditTriggerTest(TestCase):
"""
The user credit is always registered at HourBalance via signal, when a checkout occurs.
See the 'credit_calculation' function, at signals.py
"""
@classmethod
def setUpTestData(cls):
| def test_credit_triggers(self):
# Let's record a check in...
t1 = Timing.objects.create(
user=self.user,
date_time=timezone.make_aware(datetime.datetime(2016, 10, 3, 12, 0, 0, 0)),
checkin=True
)
# ...and a checkout
t2 = Timing.objects.create(
user=self.user,
date_time=timezone.make_aware(datetime.datetime(2016, 10, 3, 13, 0, 0, 0)),
checkin=False
)
# Let's record a balance line at HoursBalance
date = datetime.date(2016, 10, 3)
new_credit = DateData(self.user, date).credit().seconds
new_debit = DateData(self.user, date).debit().seconds
HoursBalance.objects.create(
date=date,
user=self.user,
credit=new_credit,
debit=new_debit
)
# Let's change t2 (checkout record)
t2.date_time += datetime.timedelta(hours=1)
t2.save()
# The balance must have been recalculated via django signal (signals.py)
checkout_tolerance = self.user.userdetail.office.checkout_tolerance
checkin_tolerance = self.user.userdetail.office.checkin_tolerance
tolerance = checkout_tolerance + checkin_tolerance
reference = datetime.timedelta(hours=2).seconds + tolerance.seconds
line = HoursBalance.objects.first()
credit = line.credit
self.assertEqual(reference, credit)
# Let's change t1 (checkin record)
t1.date_time += datetime.timedelta(hours=1)
t1.save()
# The balance must have been recalculated via signal
modified_reference = datetime.timedelta(hours=1).seconds + tolerance.seconds
modified_balance_line = HoursBalance.objects.first()
modified_credit = modified_balance_line.credit
self.assertEqual(modified_reference, modified_credit)
# TODO: Escrever o teste depois que já houver view para produzir o balanço da divisão e do usuário
class RestdayDebitTriggerTest(TestCase):
"""
When a we record a Restday whose date is prior to the date of the Balance, the balances must
be recalculated for all users.
"""
@classmethod
def setUpTestData(cls):
Office.objects.create(name='Diacomp 1', initials='diacomp1')
Office.objects.create(name='Diacomp 2', initials='diacomp2')
cls.diacomp1 = Office.objects.get(initials='diacomp1')
cls.diacomp2 = Office.objects.get(initials='diacomp2')
cls.diacomp1.hours_control_start_date = datetime.date(2016, 9, 1)
cls.diacomp1.save()
cls.diacomp2.hours_control_start_date = datetime.date(2016, 10, 1)
cls.diacomp1.save()
User.objects.create_user('testuser1', 'test1@user.com', 'senha123')
User.objects.create_user('testuser2', 'test2@user.com', 'senha123')
cls.user1 = User.objects.get(username='testuser')
cls.user2 = User.objects.get(username='testuser')
# def test_debit_trigger(self):
def activate_timezone():
return timezone.activate(pytz.timezone('America/Sao_Paulo'))
| Office.objects.create(name='Nenhuma lotação',
initials='NL',
regular_work_hours=datetime.timedelta(hours=6))
User.objects.create_user('testuser', 'test@user.com', 'senha123')
cls.user = User.objects.get(username='testuser')
| identifier_body |
test_hoursbalance_model.py | import datetime
import pytz
from django.utils import timezone
from django.contrib.auth.models import User
from django.test import TestCase
from gerencex.core.models import HoursBalance, Timing, Office
from gerencex.core.time_calculations import DateData
class HoursBalanceModelTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create_user('testuser', 'test@user.com', 'senha123')
def test_balances(self):
r1 = HoursBalance.objects.create(
date=datetime.date(2016, 8, 18),
user=self.user,
credit=datetime.timedelta(hours=6).seconds,
debit=datetime.timedelta(hours=7).seconds,
)
# Test creation
self.assertTrue(HoursBalance.objects.exists())
# First balance is calculated without a previous balance (see the
# total_balance_handler function at signals.py)
self.assertEqual(r1.balance, int(datetime.timedelta(hours=-1).total_seconds()))
# Second balance takes the first balance into account (see the
# total_balance_handler function at signals.py)
r2 = HoursBalance.objects.create(
date=datetime.date(2016, 8, 19),
user=self.user,
credit=datetime.timedelta(hours=6).seconds,
debit=datetime.timedelta(hours=7).seconds,
)
self.assertEqual(r2.balance, int(datetime.timedelta(hours=-2).total_seconds()))
# Change in first credit or debit must change the second balance (see the
# next_balance_handler function at signals.py)
r1.credit = datetime.timedelta(hours=7).seconds
r1.save()
r2 = HoursBalance.objects.get(pk=2)
self.assertEqual(r2.balance, int(datetime.timedelta(hours=-1).total_seconds()))
class CreditTriggerTest(TestCase):
"""
The user credit is always registered at HourBalance via signal, when a checkout occurs.
See the 'credit_calculation' function, at signals.py
"""
@classmethod
def setUpTestData(cls):
Office.objects.create(name='Nenhuma lotação',
initials='NL',
regular_work_hours=datetime.timedelta(hours=6))
User.objects.create_user('testuser', 'test@user.com', 'senha123')
cls.user = User.objects.get(username='testuser')
| date_time=timezone.make_aware(datetime.datetime(2016, 10, 3, 12, 0, 0, 0)),
checkin=True
)
# ...and a checkout
t2 = Timing.objects.create(
user=self.user,
date_time=timezone.make_aware(datetime.datetime(2016, 10, 3, 13, 0, 0, 0)),
checkin=False
)
# Let's record a balance line at HoursBalance
date = datetime.date(2016, 10, 3)
new_credit = DateData(self.user, date).credit().seconds
new_debit = DateData(self.user, date).debit().seconds
HoursBalance.objects.create(
date=date,
user=self.user,
credit=new_credit,
debit=new_debit
)
# Let's change t2 (checkout record)
t2.date_time += datetime.timedelta(hours=1)
t2.save()
# The balance must have been recalculated via django signal (signals.py)
checkout_tolerance = self.user.userdetail.office.checkout_tolerance
checkin_tolerance = self.user.userdetail.office.checkin_tolerance
tolerance = checkout_tolerance + checkin_tolerance
reference = datetime.timedelta(hours=2).seconds + tolerance.seconds
line = HoursBalance.objects.first()
credit = line.credit
self.assertEqual(reference, credit)
# Let's change t1 (checkin record)
t1.date_time += datetime.timedelta(hours=1)
t1.save()
# The balance must have been recalculated via signal
modified_reference = datetime.timedelta(hours=1).seconds + tolerance.seconds
modified_balance_line = HoursBalance.objects.first()
modified_credit = modified_balance_line.credit
self.assertEqual(modified_reference, modified_credit)
# TODO: Escrever o teste depois que já houver view para produzir o balanço da divisão e do usuário
class RestdayDebitTriggerTest(TestCase):
"""
When a we record a Restday whose date is prior to the date of the Balance, the balances must
be recalculated for all users.
"""
@classmethod
def setUpTestData(cls):
Office.objects.create(name='Diacomp 1', initials='diacomp1')
Office.objects.create(name='Diacomp 2', initials='diacomp2')
cls.diacomp1 = Office.objects.get(initials='diacomp1')
cls.diacomp2 = Office.objects.get(initials='diacomp2')
cls.diacomp1.hours_control_start_date = datetime.date(2016, 9, 1)
cls.diacomp1.save()
cls.diacomp2.hours_control_start_date = datetime.date(2016, 10, 1)
cls.diacomp1.save()
User.objects.create_user('testuser1', 'test1@user.com', 'senha123')
User.objects.create_user('testuser2', 'test2@user.com', 'senha123')
cls.user1 = User.objects.get(username='testuser')
cls.user2 = User.objects.get(username='testuser')
# def test_debit_trigger(self):
def activate_timezone():
return timezone.activate(pytz.timezone('America/Sao_Paulo')) | def test_credit_triggers(self):
# Let's record a check in...
t1 = Timing.objects.create(
user=self.user, | random_line_split |
timer.d.ts | import { Observable } from '../Observable';
import { SchedulerLike } from '../types';
/**
* Creates an Observable that starts emitting after an `initialDelay` and | * <span class="informal">Its like {@link interval}, but you can specify when
* should the emissions start.</span>
*
* <img src="./img/timer.png" width="100%">
*
* `timer` returns an Observable that emits an infinite sequence of ascending
* integers, with a constant interval of time, `period` of your choosing
* between those emissions. The first emission happens after the specified
* `initialDelay`. The initial delay may be a {@link Date}. By default, this
* operator uses the `async` IScheduler to provide a notion of time, but you
* may pass any IScheduler to it. If `period` is not specified, the output
* Observable emits only one value, `0`. Otherwise, it emits an infinite
* sequence.
*
* @example <caption>Emits ascending numbers, one every second (1000ms), starting after 3 seconds</caption>
* var numbers = Rx.Observable.timer(3000, 1000);
* numbers.subscribe(x => console.log(x));
*
* @example <caption>Emits one number after five seconds</caption>
* var numbers = Rx.Observable.timer(5000);
* numbers.subscribe(x => console.log(x));
*
* @see {@link interval}
* @see {@link delay}
*
* @param {number|Date} [dueTime] The initial delay time to wait before
* emitting the first value of `0`.
* @param {number|SchedulerLike} [periodOrScheduler] The period of time between emissions of the
* subsequent numbers.
* @param {SchedulerLike} [scheduler=async] The IScheduler to use for scheduling
* the emission of values, and providing a notion of "time".
* @return {Observable} An Observable that emits a `0` after the
* `initialDelay` and ever increasing numbers after each `period` of time
* thereafter.
* @static true
* @name timer
* @owner Observable
*/
export declare function timer(dueTime?: number | Date, periodOrScheduler?: number | SchedulerLike, scheduler?: SchedulerLike): Observable<number>; | * emits ever increasing numbers after each `period` of time thereafter.
* | random_line_split |
buttons.js | /**
* @author Richard Davey <rich@photonstorm.com>
* @copyright 2015 Photon Storm Ltd.
* @license {@link http://choosealicense.com/licenses/no-license/|No License}
*
* @description This example requires the Phaser Virtual Joystick Plugin to run.
* For more details please see http://phaser.io/shop/plugins/virtualjoystick
*/
var game = new Phaser.Game(800, 600, Phaser.AUTO, 'phaser-example');
var PhaserGame = function () {
this.fx;
this.pad;
this.buttonA;
this.buttonB;
this.buttonC;
};
PhaserGame.prototype = {
preload: function () {
this.load.atlas('generic', 'assets/virtualjoystick/skins/generic-joystick.png', 'assets/virtualjoystick/skins/generic-joystick.json');
this.load.image('bg', 'assets/virtualjoystick/barbarian_loading.png');
this.load.audio('sfx', [ 'assets/virtualjoystick/magical_horror_audiosprite.mp3', 'assets/virtualjoystick/magical_horror_audiosprite.ogg' ]);
},
create: function () {
var bg = this.add.image(this.world.centerX, 32, 'bg');
bg.anchor.x = 0.5; | this.fx = game.add.audio('sfx');
this.fx.allowMultiple = true;
this.fx.addMarker('charm', 0, 2.7);
this.fx.addMarker('curse', 4, 2.9);
this.fx.addMarker('fireball', 8, 5.2);
this.fx.addMarker('spell', 14, 4.7);
this.fx.addMarker('soundscape', 20, 18.8);
this.pad = this.game.plugins.add(Phaser.VirtualJoystick);
this.buttonA = this.pad.addButton(200, 520, 'generic', 'button1-up', 'button1-down');
this.buttonA.onDown.add(this.pressButtonA, this);
this.buttonA.addKey(Phaser.Keyboard.A);
this.buttonB = this.pad.addButton(400, 500, 'generic', 'button2-up', 'button2-down');
this.buttonB.onDown.add(this.pressButtonB, this);
this.buttonB.addKey(Phaser.Keyboard.B);
this.buttonC = this.pad.addButton(600, 520, 'generic', 'button3-up', 'button3-down');
this.buttonC.onDown.add(this.pressButtonC, this);
this.buttonC.addKey(Phaser.Keyboard.C);
},
pressButtonA: function () {
this.fx.play('charm');
},
pressButtonB: function () {
this.fx.play('spell');
},
pressButtonC: function () {
this.fx.play('fireball');
}
};
game.state.add('Game', PhaserGame, true); | random_line_split | |
_jax_backend.py | import numbers
import warnings
from functools import wraps, partial
from typing import List, Callable
import logging
import numpy as np
import jax
import jax.numpy as jnp
import jax.scipy as scipy
from jax.core import Tracer
from jax.interpreters.xla import DeviceArray
from jax.scipy.sparse.linalg import cg
from jax import random
from phi.math import SolveInfo, Solve, DType
from ..math.backend._dtype import to_numpy_dtype, from_numpy_dtype
from phi.math.backend import Backend, ComputeDevice
from phi.math.backend._backend import combined_dim, SolveResult
class JaxBackend(Backend):
def __init__(self):
Backend.__init__(self, "Jax", default_device=None)
try:
self.rnd_key = jax.random.PRNGKey(seed=0)
except RuntimeError as err:
warnings.warn(f"{err}")
self.rnd_key = None
def prefers_channels_last(self) -> bool:
return True
def list_devices(self, device_type: str or None = None) -> List[ComputeDevice]:
devices = []
for jax_dev in jax.devices():
jax_dev_type = jax_dev.platform.upper()
if device_type is None or device_type == jax_dev_type:
description = f"id={jax_dev.id}"
devices.append(ComputeDevice(self, jax_dev.device_kind, jax_dev_type, -1, -1, description, jax_dev))
return devices
# def set_default_device(self, device: ComputeDevice or str):
# if device == 'CPU':
# jax.config.update('jax_platform_name', 'cpu')
# elif device == 'GPU':
# jax.config.update('jax_platform_name', 'gpu')
# else:
# raise NotImplementedError()
def _check_float64(self):
if self.precision == 64:
if not jax.config.read('jax_enable_x64'):
jax.config.update('jax_enable_x64', True)
assert jax.config.read('jax_enable_x64'), "FP64 is disabled for Jax."
def seed(self, seed: int):
self.rnd_key = jax.random.PRNGKey(seed)
def as_tensor(self, x, convert_external=True):
self._check_float64()
if self.is_tensor(x, only_native=convert_external):
array = x
else:
array = jnp.array(x)
# --- Enforce Precision ---
if not isinstance(array, numbers.Number):
if self.dtype(array).kind == float:
array = self.to_float(array)
elif self.dtype(array).kind == complex:
array = self.to_complex(array)
return array
def is_tensor(self, x, only_native=False):
if isinstance(x, jnp.ndarray) and not isinstance(x, np.ndarray): # NumPy arrays inherit from Jax arrays
return True
# if scipy.sparse.issparse(x): # TODO
# return True
if isinstance(x, jnp.bool_):
return True
# --- Above considered native ---
if only_native:
return False
# --- Non-native types ---
if isinstance(x, np.ndarray):
return True
if isinstance(x, (numbers.Number, bool, str)):
return True
if isinstance(x, (tuple, list)):
return all([self.is_tensor(item, False) for item in x])
return False
def is_available(self, tensor):
return not isinstance(tensor, Tracer)
def numpy(self, x):
return np.array(x)
def to_dlpack(self, tensor):
from jax import dlpack
return dlpack.to_dlpack(tensor)
def from_dlpack(self, capsule):
from jax import dlpack
return dlpack.from_dlpack(capsule)
def copy(self, tensor, only_mutable=False):
return jnp.array(tensor, copy=True)
sqrt = staticmethod(jnp.sqrt)
exp = staticmethod(jnp.exp)
sin = staticmethod(jnp.sin)
cos = staticmethod(jnp.cos)
tan = staticmethod(jnp.tan)
log = staticmethod(jnp.log)
log2 = staticmethod(jnp.log2)
log10 = staticmethod(jnp.log10)
isfinite = staticmethod(jnp.isfinite)
abs = staticmethod(jnp.abs)
sign = staticmethod(jnp.sign)
round = staticmethod(jnp.round)
ceil = staticmethod(jnp.ceil)
floor = staticmethod(jnp.floor)
nonzero = staticmethod(jnp.nonzero)
flip = staticmethod(jnp.flip)
stop_gradient = staticmethod(jax.lax.stop_gradient)
transpose = staticmethod(jnp.transpose)
equal = staticmethod(jnp.equal)
tile = staticmethod(jnp.tile)
stack = staticmethod(jnp.stack)
concat = staticmethod(jnp.concatenate)
zeros_like = staticmethod(jnp.zeros_like)
ones_like = staticmethod(jnp.ones_like)
maximum = staticmethod(jnp.maximum)
minimum = staticmethod(jnp.minimum)
clip = staticmethod(jnp.clip)
shape = staticmethod(jnp.shape)
staticshape = staticmethod(jnp.shape)
imag = staticmethod(jnp.imag)
real = staticmethod(jnp.real)
conj = staticmethod(jnp.conjugate)
einsum = staticmethod(jnp.einsum)
cumsum = staticmethod(jnp.cumsum)
def jit_compile(self, f: Callable) -> Callable:
def run_jit_f(*args):
logging.debug(f"JaxBackend: running jit-compiled '{f.__name__}' with shapes {[arg.shape for arg in args]} and dtypes {[arg.dtype.name for arg in args]}")
return self.as_registered.call(jit_f, *args, name=f"run jit-compiled '{f.__name__}'")
run_jit_f.__name__ = f"Jax-Jit({f.__name__})"
jit_f = jax.jit(f)
return run_jit_f
def block_until_ready(self, values):
if isinstance(values, DeviceArray):
values.block_until_ready()
if isinstance(values, (tuple, list)):
|
def functional_gradient(self, f, wrt: tuple or list, get_output: bool):
if get_output:
@wraps(f)
def aux_f(*args):
output = f(*args)
if isinstance(output, (tuple, list)) and len(output) == 1:
output = output[0]
result = (output[0], output[1:]) if isinstance(output, (tuple, list)) else (output, None)
if result[0].ndim > 0:
result = jnp.sum(result[0]), result[1]
return result
jax_grad_f = jax.value_and_grad(aux_f, argnums=wrt, has_aux=True)
@wraps(f)
def unwrap_outputs(*args):
(loss, aux), grads = jax_grad_f(*args)
return (loss, *aux, *grads) if aux is not None else (loss, *grads)
return unwrap_outputs
else:
@wraps(f)
def nonaux_f(*args):
output = f(*args)
result = output[0] if isinstance(output, (tuple, list)) else output
if result.ndim > 0:
result = jnp.sum(result)
return result
return jax.grad(nonaux_f, argnums=wrt, has_aux=False)
def custom_gradient(self, f: Callable, gradient: Callable) -> Callable:
jax_fun = jax.custom_vjp(f) # custom vector-Jacobian product (reverse-mode differentiation)
def forward(*x):
y = f(*x)
return y, (x, y)
def backward(x_y, dy):
x, y = x_y
dx = gradient(x, y, dy)
return tuple(dx)
jax_fun.defvjp(forward, backward)
return jax_fun
def divide_no_nan(self, x, y):
return jnp.nan_to_num(x / y, copy=True, nan=0)
def random_uniform(self, shape):
self._check_float64()
self.rnd_key, subkey = jax.random.split(self.rnd_key)
return random.uniform(subkey, shape, dtype=to_numpy_dtype(self.float_type))
def random_normal(self, shape):
self._check_float64()
self.rnd_key, subkey = jax.random.split(self.rnd_key)
return random.normal(subkey, shape, dtype=to_numpy_dtype(self.float_type))
def range(self, start, limit=None, delta=1, dtype: DType = DType(int, 32)):
if limit is None:
start, limit = 0, start
return jnp.arange(start, limit, delta, to_numpy_dtype(dtype))
def pad(self, value, pad_width, mode='constant', constant_values=0):
assert mode in ('constant', 'symmetric', 'periodic', 'reflect', 'boundary'), mode
if mode == 'constant':
constant_values = jnp.array(constant_values, dtype=value.dtype)
return jnp.pad(value, pad_width, 'constant', constant_values=constant_values)
else:
if mode in ('periodic', 'boundary'):
mode = {'periodic': 'wrap', 'boundary': 'edge'}[mode]
return jnp.pad(value, pad_width, mode)
def reshape(self, value, shape):
return jnp.reshape(value, shape)
def sum(self, value, axis=None, keepdims=False):
if isinstance(value, (tuple, list)):
assert axis == 0
return sum(value[1:], value[0])
return jnp.sum(value, axis=axis, keepdims=keepdims)
def prod(self, value, axis=None):
if not isinstance(value, jnp.ndarray):
value = jnp.array(value)
if value.dtype == bool:
return jnp.all(value, axis=axis)
return jnp.prod(value, axis=axis)
def where(self, condition, x=None, y=None):
if x is None or y is None:
return jnp.argwhere(condition)
return jnp.where(condition, x, y)
def zeros(self, shape, dtype: DType = None):
self._check_float64()
return jnp.zeros(shape, dtype=to_numpy_dtype(dtype or self.float_type))
def ones(self, shape, dtype: DType = None):
self._check_float64()
return jnp.ones(shape, dtype=to_numpy_dtype(dtype or self.float_type))
def meshgrid(self, *coordinates):
self._check_float64()
coordinates = [self.as_tensor(c) for c in coordinates]
return jnp.meshgrid(*coordinates, indexing='ij')
def linspace(self, start, stop, number):
self._check_float64()
return jnp.linspace(start, stop, number, dtype=to_numpy_dtype(self.float_type))
def mean(self, value, axis=None, keepdims=False):
return jnp.mean(value, axis, keepdims=keepdims)
def tensordot(self, a, a_axes: tuple or list, b, b_axes: tuple or list):
return jnp.tensordot(a, b, (a_axes, b_axes))
def mul(self, a, b):
# if scipy.sparse.issparse(a): # TODO sparse?
# return a.multiply(b)
# elif scipy.sparse.issparse(b):
# return b.multiply(a)
# else:
return Backend.mul(self, a, b)
def matmul(self, A, b):
return jnp.stack([A.dot(b[i]) for i in range(b.shape[0])])
def while_loop(self, loop: Callable, values: tuple):
if all(self.is_available(t) for t in values):
while jnp.any(values[0]):
values = loop(*values)
return values
else:
cond = lambda vals: jnp.any(vals[0])
body = lambda vals: loop(*vals)
return jax.lax.while_loop(cond, body, values)
def max(self, x, axis=None, keepdims=False):
return jnp.max(x, axis, keepdims=keepdims)
def min(self, x, axis=None, keepdims=False):
return jnp.min(x, axis, keepdims=keepdims)
def conv(self, value, kernel, zero_padding=True):
assert kernel.shape[0] in (1, value.shape[0])
assert value.shape[1] == kernel.shape[2], f"value has {value.shape[1]} channels but kernel has {kernel.shape[2]}"
assert value.ndim + 1 == kernel.ndim
# AutoDiff may require jax.lax.conv_general_dilated
if zero_padding:
result = np.zeros((value.shape[0], kernel.shape[1], *value.shape[2:]), dtype=to_numpy_dtype(self.float_type))
else:
valid = [value.shape[i + 2] - kernel.shape[i + 3] + 1 for i in range(value.ndim - 2)]
result = np.zeros([value.shape[0], kernel.shape[1], *valid], dtype=to_numpy_dtype(self.float_type))
mode = 'same' if zero_padding else 'valid'
for b in range(value.shape[0]):
b_kernel = kernel[min(b, kernel.shape[0] - 1)]
for o in range(kernel.shape[1]):
for i in range(value.shape[1]):
result[b, o, ...] += scipy.signal.correlate(value[b, i, ...], b_kernel[o, i, ...], mode=mode)
return result
def expand_dims(self, a, axis=0, number=1):
for _i in range(number):
a = jnp.expand_dims(a, axis)
return a
def cast(self, x, dtype: DType):
if self.is_tensor(x, only_native=True) and from_numpy_dtype(x.dtype) == dtype:
return x
else:
return jnp.array(x, to_numpy_dtype(dtype))
def batched_gather_nd(self, values, indices):
assert indices.shape[-1] == self.ndims(values) - 2
batch_size = combined_dim(values.shape[0], indices.shape[0])
results = []
for b in range(batch_size):
b_values = values[min(b, values.shape[0] - 1)]
b_indices = self.unstack(indices[min(b, indices.shape[0] - 1)], -1)
results.append(b_values[b_indices])
return jnp.stack(results)
def std(self, x, axis=None, keepdims=False):
return jnp.std(x, axis, keepdims=keepdims)
def boolean_mask(self, x, mask, axis=0):
slices = [mask if i == axis else slice(None) for i in range(len(x.shape))]
return x[tuple(slices)]
def any(self, boolean_tensor, axis=None, keepdims=False):
if isinstance(boolean_tensor, (tuple, list)):
boolean_tensor = jnp.stack(boolean_tensor)
return jnp.any(boolean_tensor, axis=axis, keepdims=keepdims)
def all(self, boolean_tensor, axis=None, keepdims=False):
if isinstance(boolean_tensor, (tuple, list)):
boolean_tensor = jnp.stack(boolean_tensor)
return jnp.all(boolean_tensor, axis=axis, keepdims=keepdims)
def scatter(self, base_grid, indices, values, mode: str):
base_grid, values = self.auto_cast(base_grid, values)
batch_size = combined_dim(combined_dim(indices.shape[0], values.shape[0]), base_grid.shape[0])
spatial_dims = tuple(range(base_grid.ndim - 2))
dnums = jax.lax.ScatterDimensionNumbers(update_window_dims=(1,), # channel dim of updates (batch dim removed)
inserted_window_dims=spatial_dims, # no idea what this does but spatial_dims seems to work
scatter_dims_to_operand_dims=spatial_dims) # spatial dims of base_grid (batch dim removed)
scatter = jax.lax.scatter_add if mode == 'add' else jax.lax.scatter
result = []
for b in range(batch_size):
b_grid = base_grid[b, ...]
b_indices = indices[min(b, indices.shape[0] - 1), ...]
b_values = values[min(b, values.shape[0] - 1), ...]
result.append(scatter(b_grid, b_indices, b_values, dnums))
return jnp.stack(result)
def quantile(self, x, quantiles):
return jnp.quantile(x, quantiles, axis=-1)
def fft(self, x, axes: tuple or list):
x = self.to_complex(x)
if not axes:
return x
if len(axes) == 1:
return np.fft.fft(x, axis=axes[0]).astype(x.dtype)
elif len(axes) == 2:
return np.fft.fft2(x, axes=axes).astype(x.dtype)
else:
return np.fft.fftn(x, axes=axes).astype(x.dtype)
def ifft(self, k, axes: tuple or list):
if not axes:
return k
if len(axes) == 1:
return np.fft.ifft(k, axis=axes[0]).astype(k.dtype)
elif len(axes) == 2:
return np.fft.ifft2(k, axes=axes).astype(k.dtype)
else:
return np.fft.ifftn(k, axes=axes).astype(k.dtype)
def dtype(self, array) -> DType:
if isinstance(array, int):
return DType(int, 32)
if isinstance(array, float):
return DType(float, 64)
if isinstance(array, complex):
return DType(complex, 128)
if not isinstance(array, jnp.ndarray):
array = jnp.array(array)
return from_numpy_dtype(array.dtype)
def linear_solve(self, method: str, lin, y, x0, rtol, atol, max_iter, trj: bool) -> SolveResult or List[SolveResult]:
if method == 'auto' and not trj and not self.is_available(y):
return self.conjugate_gradient(lin, y, x0, rtol, atol, max_iter, trj)
else:
return Backend.linear_solve(self, method, lin, y, x0, rtol, atol, max_iter, trj)
| for v in values:
self.block_until_ready(v) | conditional_block |
_jax_backend.py | import numbers
import warnings
from functools import wraps, partial
from typing import List, Callable
import logging
import numpy as np
import jax
import jax.numpy as jnp
import jax.scipy as scipy
from jax.core import Tracer
from jax.interpreters.xla import DeviceArray
from jax.scipy.sparse.linalg import cg
from jax import random
from phi.math import SolveInfo, Solve, DType
from ..math.backend._dtype import to_numpy_dtype, from_numpy_dtype
from phi.math.backend import Backend, ComputeDevice
from phi.math.backend._backend import combined_dim, SolveResult
class JaxBackend(Backend):
def __init__(self):
Backend.__init__(self, "Jax", default_device=None)
try:
self.rnd_key = jax.random.PRNGKey(seed=0)
except RuntimeError as err:
warnings.warn(f"{err}")
self.rnd_key = None
def | (self) -> bool:
return True
def list_devices(self, device_type: str or None = None) -> List[ComputeDevice]:
devices = []
for jax_dev in jax.devices():
jax_dev_type = jax_dev.platform.upper()
if device_type is None or device_type == jax_dev_type:
description = f"id={jax_dev.id}"
devices.append(ComputeDevice(self, jax_dev.device_kind, jax_dev_type, -1, -1, description, jax_dev))
return devices
# def set_default_device(self, device: ComputeDevice or str):
# if device == 'CPU':
# jax.config.update('jax_platform_name', 'cpu')
# elif device == 'GPU':
# jax.config.update('jax_platform_name', 'gpu')
# else:
# raise NotImplementedError()
def _check_float64(self):
if self.precision == 64:
if not jax.config.read('jax_enable_x64'):
jax.config.update('jax_enable_x64', True)
assert jax.config.read('jax_enable_x64'), "FP64 is disabled for Jax."
def seed(self, seed: int):
self.rnd_key = jax.random.PRNGKey(seed)
def as_tensor(self, x, convert_external=True):
self._check_float64()
if self.is_tensor(x, only_native=convert_external):
array = x
else:
array = jnp.array(x)
# --- Enforce Precision ---
if not isinstance(array, numbers.Number):
if self.dtype(array).kind == float:
array = self.to_float(array)
elif self.dtype(array).kind == complex:
array = self.to_complex(array)
return array
def is_tensor(self, x, only_native=False):
if isinstance(x, jnp.ndarray) and not isinstance(x, np.ndarray): # NumPy arrays inherit from Jax arrays
return True
# if scipy.sparse.issparse(x): # TODO
# return True
if isinstance(x, jnp.bool_):
return True
# --- Above considered native ---
if only_native:
return False
# --- Non-native types ---
if isinstance(x, np.ndarray):
return True
if isinstance(x, (numbers.Number, bool, str)):
return True
if isinstance(x, (tuple, list)):
return all([self.is_tensor(item, False) for item in x])
return False
def is_available(self, tensor):
return not isinstance(tensor, Tracer)
def numpy(self, x):
return np.array(x)
def to_dlpack(self, tensor):
from jax import dlpack
return dlpack.to_dlpack(tensor)
def from_dlpack(self, capsule):
from jax import dlpack
return dlpack.from_dlpack(capsule)
def copy(self, tensor, only_mutable=False):
return jnp.array(tensor, copy=True)
sqrt = staticmethod(jnp.sqrt)
exp = staticmethod(jnp.exp)
sin = staticmethod(jnp.sin)
cos = staticmethod(jnp.cos)
tan = staticmethod(jnp.tan)
log = staticmethod(jnp.log)
log2 = staticmethod(jnp.log2)
log10 = staticmethod(jnp.log10)
isfinite = staticmethod(jnp.isfinite)
abs = staticmethod(jnp.abs)
sign = staticmethod(jnp.sign)
round = staticmethod(jnp.round)
ceil = staticmethod(jnp.ceil)
floor = staticmethod(jnp.floor)
nonzero = staticmethod(jnp.nonzero)
flip = staticmethod(jnp.flip)
stop_gradient = staticmethod(jax.lax.stop_gradient)
transpose = staticmethod(jnp.transpose)
equal = staticmethod(jnp.equal)
tile = staticmethod(jnp.tile)
stack = staticmethod(jnp.stack)
concat = staticmethod(jnp.concatenate)
zeros_like = staticmethod(jnp.zeros_like)
ones_like = staticmethod(jnp.ones_like)
maximum = staticmethod(jnp.maximum)
minimum = staticmethod(jnp.minimum)
clip = staticmethod(jnp.clip)
shape = staticmethod(jnp.shape)
staticshape = staticmethod(jnp.shape)
imag = staticmethod(jnp.imag)
real = staticmethod(jnp.real)
conj = staticmethod(jnp.conjugate)
einsum = staticmethod(jnp.einsum)
cumsum = staticmethod(jnp.cumsum)
def jit_compile(self, f: Callable) -> Callable:
def run_jit_f(*args):
logging.debug(f"JaxBackend: running jit-compiled '{f.__name__}' with shapes {[arg.shape for arg in args]} and dtypes {[arg.dtype.name for arg in args]}")
return self.as_registered.call(jit_f, *args, name=f"run jit-compiled '{f.__name__}'")
run_jit_f.__name__ = f"Jax-Jit({f.__name__})"
jit_f = jax.jit(f)
return run_jit_f
def block_until_ready(self, values):
if isinstance(values, DeviceArray):
values.block_until_ready()
if isinstance(values, (tuple, list)):
for v in values:
self.block_until_ready(v)
def functional_gradient(self, f, wrt: tuple or list, get_output: bool):
if get_output:
@wraps(f)
def aux_f(*args):
output = f(*args)
if isinstance(output, (tuple, list)) and len(output) == 1:
output = output[0]
result = (output[0], output[1:]) if isinstance(output, (tuple, list)) else (output, None)
if result[0].ndim > 0:
result = jnp.sum(result[0]), result[1]
return result
jax_grad_f = jax.value_and_grad(aux_f, argnums=wrt, has_aux=True)
@wraps(f)
def unwrap_outputs(*args):
(loss, aux), grads = jax_grad_f(*args)
return (loss, *aux, *grads) if aux is not None else (loss, *grads)
return unwrap_outputs
else:
@wraps(f)
def nonaux_f(*args):
output = f(*args)
result = output[0] if isinstance(output, (tuple, list)) else output
if result.ndim > 0:
result = jnp.sum(result)
return result
return jax.grad(nonaux_f, argnums=wrt, has_aux=False)
def custom_gradient(self, f: Callable, gradient: Callable) -> Callable:
jax_fun = jax.custom_vjp(f) # custom vector-Jacobian product (reverse-mode differentiation)
def forward(*x):
y = f(*x)
return y, (x, y)
def backward(x_y, dy):
x, y = x_y
dx = gradient(x, y, dy)
return tuple(dx)
jax_fun.defvjp(forward, backward)
return jax_fun
def divide_no_nan(self, x, y):
return jnp.nan_to_num(x / y, copy=True, nan=0)
def random_uniform(self, shape):
self._check_float64()
self.rnd_key, subkey = jax.random.split(self.rnd_key)
return random.uniform(subkey, shape, dtype=to_numpy_dtype(self.float_type))
def random_normal(self, shape):
self._check_float64()
self.rnd_key, subkey = jax.random.split(self.rnd_key)
return random.normal(subkey, shape, dtype=to_numpy_dtype(self.float_type))
def range(self, start, limit=None, delta=1, dtype: DType = DType(int, 32)):
if limit is None:
start, limit = 0, start
return jnp.arange(start, limit, delta, to_numpy_dtype(dtype))
def pad(self, value, pad_width, mode='constant', constant_values=0):
assert mode in ('constant', 'symmetric', 'periodic', 'reflect', 'boundary'), mode
if mode == 'constant':
constant_values = jnp.array(constant_values, dtype=value.dtype)
return jnp.pad(value, pad_width, 'constant', constant_values=constant_values)
else:
if mode in ('periodic', 'boundary'):
mode = {'periodic': 'wrap', 'boundary': 'edge'}[mode]
return jnp.pad(value, pad_width, mode)
def reshape(self, value, shape):
return jnp.reshape(value, shape)
def sum(self, value, axis=None, keepdims=False):
if isinstance(value, (tuple, list)):
assert axis == 0
return sum(value[1:], value[0])
return jnp.sum(value, axis=axis, keepdims=keepdims)
def prod(self, value, axis=None):
if not isinstance(value, jnp.ndarray):
value = jnp.array(value)
if value.dtype == bool:
return jnp.all(value, axis=axis)
return jnp.prod(value, axis=axis)
def where(self, condition, x=None, y=None):
if x is None or y is None:
return jnp.argwhere(condition)
return jnp.where(condition, x, y)
def zeros(self, shape, dtype: DType = None):
self._check_float64()
return jnp.zeros(shape, dtype=to_numpy_dtype(dtype or self.float_type))
def ones(self, shape, dtype: DType = None):
self._check_float64()
return jnp.ones(shape, dtype=to_numpy_dtype(dtype or self.float_type))
def meshgrid(self, *coordinates):
self._check_float64()
coordinates = [self.as_tensor(c) for c in coordinates]
return jnp.meshgrid(*coordinates, indexing='ij')
def linspace(self, start, stop, number):
self._check_float64()
return jnp.linspace(start, stop, number, dtype=to_numpy_dtype(self.float_type))
def mean(self, value, axis=None, keepdims=False):
return jnp.mean(value, axis, keepdims=keepdims)
def tensordot(self, a, a_axes: tuple or list, b, b_axes: tuple or list):
return jnp.tensordot(a, b, (a_axes, b_axes))
def mul(self, a, b):
# if scipy.sparse.issparse(a): # TODO sparse?
# return a.multiply(b)
# elif scipy.sparse.issparse(b):
# return b.multiply(a)
# else:
return Backend.mul(self, a, b)
def matmul(self, A, b):
return jnp.stack([A.dot(b[i]) for i in range(b.shape[0])])
def while_loop(self, loop: Callable, values: tuple):
if all(self.is_available(t) for t in values):
while jnp.any(values[0]):
values = loop(*values)
return values
else:
cond = lambda vals: jnp.any(vals[0])
body = lambda vals: loop(*vals)
return jax.lax.while_loop(cond, body, values)
def max(self, x, axis=None, keepdims=False):
return jnp.max(x, axis, keepdims=keepdims)
def min(self, x, axis=None, keepdims=False):
return jnp.min(x, axis, keepdims=keepdims)
def conv(self, value, kernel, zero_padding=True):
assert kernel.shape[0] in (1, value.shape[0])
assert value.shape[1] == kernel.shape[2], f"value has {value.shape[1]} channels but kernel has {kernel.shape[2]}"
assert value.ndim + 1 == kernel.ndim
# AutoDiff may require jax.lax.conv_general_dilated
if zero_padding:
result = np.zeros((value.shape[0], kernel.shape[1], *value.shape[2:]), dtype=to_numpy_dtype(self.float_type))
else:
valid = [value.shape[i + 2] - kernel.shape[i + 3] + 1 for i in range(value.ndim - 2)]
result = np.zeros([value.shape[0], kernel.shape[1], *valid], dtype=to_numpy_dtype(self.float_type))
mode = 'same' if zero_padding else 'valid'
for b in range(value.shape[0]):
b_kernel = kernel[min(b, kernel.shape[0] - 1)]
for o in range(kernel.shape[1]):
for i in range(value.shape[1]):
result[b, o, ...] += scipy.signal.correlate(value[b, i, ...], b_kernel[o, i, ...], mode=mode)
return result
def expand_dims(self, a, axis=0, number=1):
for _i in range(number):
a = jnp.expand_dims(a, axis)
return a
def cast(self, x, dtype: DType):
if self.is_tensor(x, only_native=True) and from_numpy_dtype(x.dtype) == dtype:
return x
else:
return jnp.array(x, to_numpy_dtype(dtype))
def batched_gather_nd(self, values, indices):
assert indices.shape[-1] == self.ndims(values) - 2
batch_size = combined_dim(values.shape[0], indices.shape[0])
results = []
for b in range(batch_size):
b_values = values[min(b, values.shape[0] - 1)]
b_indices = self.unstack(indices[min(b, indices.shape[0] - 1)], -1)
results.append(b_values[b_indices])
return jnp.stack(results)
def std(self, x, axis=None, keepdims=False):
return jnp.std(x, axis, keepdims=keepdims)
def boolean_mask(self, x, mask, axis=0):
slices = [mask if i == axis else slice(None) for i in range(len(x.shape))]
return x[tuple(slices)]
def any(self, boolean_tensor, axis=None, keepdims=False):
if isinstance(boolean_tensor, (tuple, list)):
boolean_tensor = jnp.stack(boolean_tensor)
return jnp.any(boolean_tensor, axis=axis, keepdims=keepdims)
def all(self, boolean_tensor, axis=None, keepdims=False):
if isinstance(boolean_tensor, (tuple, list)):
boolean_tensor = jnp.stack(boolean_tensor)
return jnp.all(boolean_tensor, axis=axis, keepdims=keepdims)
def scatter(self, base_grid, indices, values, mode: str):
base_grid, values = self.auto_cast(base_grid, values)
batch_size = combined_dim(combined_dim(indices.shape[0], values.shape[0]), base_grid.shape[0])
spatial_dims = tuple(range(base_grid.ndim - 2))
dnums = jax.lax.ScatterDimensionNumbers(update_window_dims=(1,), # channel dim of updates (batch dim removed)
inserted_window_dims=spatial_dims, # no idea what this does but spatial_dims seems to work
scatter_dims_to_operand_dims=spatial_dims) # spatial dims of base_grid (batch dim removed)
scatter = jax.lax.scatter_add if mode == 'add' else jax.lax.scatter
result = []
for b in range(batch_size):
b_grid = base_grid[b, ...]
b_indices = indices[min(b, indices.shape[0] - 1), ...]
b_values = values[min(b, values.shape[0] - 1), ...]
result.append(scatter(b_grid, b_indices, b_values, dnums))
return jnp.stack(result)
def quantile(self, x, quantiles):
return jnp.quantile(x, quantiles, axis=-1)
def fft(self, x, axes: tuple or list):
x = self.to_complex(x)
if not axes:
return x
if len(axes) == 1:
return np.fft.fft(x, axis=axes[0]).astype(x.dtype)
elif len(axes) == 2:
return np.fft.fft2(x, axes=axes).astype(x.dtype)
else:
return np.fft.fftn(x, axes=axes).astype(x.dtype)
def ifft(self, k, axes: tuple or list):
if not axes:
return k
if len(axes) == 1:
return np.fft.ifft(k, axis=axes[0]).astype(k.dtype)
elif len(axes) == 2:
return np.fft.ifft2(k, axes=axes).astype(k.dtype)
else:
return np.fft.ifftn(k, axes=axes).astype(k.dtype)
def dtype(self, array) -> DType:
if isinstance(array, int):
return DType(int, 32)
if isinstance(array, float):
return DType(float, 64)
if isinstance(array, complex):
return DType(complex, 128)
if not isinstance(array, jnp.ndarray):
array = jnp.array(array)
return from_numpy_dtype(array.dtype)
def linear_solve(self, method: str, lin, y, x0, rtol, atol, max_iter, trj: bool) -> SolveResult or List[SolveResult]:
if method == 'auto' and not trj and not self.is_available(y):
return self.conjugate_gradient(lin, y, x0, rtol, atol, max_iter, trj)
else:
return Backend.linear_solve(self, method, lin, y, x0, rtol, atol, max_iter, trj)
| prefers_channels_last | identifier_name |
_jax_backend.py | import numbers
import warnings
from functools import wraps, partial
from typing import List, Callable
import logging
import numpy as np
import jax
import jax.numpy as jnp
import jax.scipy as scipy
from jax.core import Tracer
from jax.interpreters.xla import DeviceArray
from jax.scipy.sparse.linalg import cg
from jax import random
from phi.math import SolveInfo, Solve, DType
from ..math.backend._dtype import to_numpy_dtype, from_numpy_dtype
from phi.math.backend import Backend, ComputeDevice
from phi.math.backend._backend import combined_dim, SolveResult
class JaxBackend(Backend):
def __init__(self):
Backend.__init__(self, "Jax", default_device=None)
try:
self.rnd_key = jax.random.PRNGKey(seed=0)
except RuntimeError as err:
warnings.warn(f"{err}")
self.rnd_key = None
def prefers_channels_last(self) -> bool:
return True
def list_devices(self, device_type: str or None = None) -> List[ComputeDevice]:
devices = []
for jax_dev in jax.devices():
jax_dev_type = jax_dev.platform.upper()
if device_type is None or device_type == jax_dev_type:
description = f"id={jax_dev.id}"
devices.append(ComputeDevice(self, jax_dev.device_kind, jax_dev_type, -1, -1, description, jax_dev))
return devices
# def set_default_device(self, device: ComputeDevice or str):
# if device == 'CPU':
# jax.config.update('jax_platform_name', 'cpu')
# elif device == 'GPU':
# jax.config.update('jax_platform_name', 'gpu')
# else:
# raise NotImplementedError()
def _check_float64(self):
if self.precision == 64:
if not jax.config.read('jax_enable_x64'):
jax.config.update('jax_enable_x64', True)
assert jax.config.read('jax_enable_x64'), "FP64 is disabled for Jax."
def seed(self, seed: int):
self.rnd_key = jax.random.PRNGKey(seed)
def as_tensor(self, x, convert_external=True):
self._check_float64()
if self.is_tensor(x, only_native=convert_external):
array = x
else:
array = jnp.array(x)
# --- Enforce Precision ---
if not isinstance(array, numbers.Number):
if self.dtype(array).kind == float:
array = self.to_float(array)
elif self.dtype(array).kind == complex:
array = self.to_complex(array)
return array
def is_tensor(self, x, only_native=False):
if isinstance(x, jnp.ndarray) and not isinstance(x, np.ndarray): # NumPy arrays inherit from Jax arrays
return True
# if scipy.sparse.issparse(x): # TODO
# return True
if isinstance(x, jnp.bool_):
return True
# --- Above considered native ---
if only_native:
return False
# --- Non-native types ---
if isinstance(x, np.ndarray):
return True
if isinstance(x, (numbers.Number, bool, str)):
return True
if isinstance(x, (tuple, list)):
return all([self.is_tensor(item, False) for item in x])
return False
def is_available(self, tensor):
return not isinstance(tensor, Tracer)
def numpy(self, x):
|
def to_dlpack(self, tensor):
from jax import dlpack
return dlpack.to_dlpack(tensor)
def from_dlpack(self, capsule):
from jax import dlpack
return dlpack.from_dlpack(capsule)
def copy(self, tensor, only_mutable=False):
return jnp.array(tensor, copy=True)
sqrt = staticmethod(jnp.sqrt)
exp = staticmethod(jnp.exp)
sin = staticmethod(jnp.sin)
cos = staticmethod(jnp.cos)
tan = staticmethod(jnp.tan)
log = staticmethod(jnp.log)
log2 = staticmethod(jnp.log2)
log10 = staticmethod(jnp.log10)
isfinite = staticmethod(jnp.isfinite)
abs = staticmethod(jnp.abs)
sign = staticmethod(jnp.sign)
round = staticmethod(jnp.round)
ceil = staticmethod(jnp.ceil)
floor = staticmethod(jnp.floor)
nonzero = staticmethod(jnp.nonzero)
flip = staticmethod(jnp.flip)
stop_gradient = staticmethod(jax.lax.stop_gradient)
transpose = staticmethod(jnp.transpose)
equal = staticmethod(jnp.equal)
tile = staticmethod(jnp.tile)
stack = staticmethod(jnp.stack)
concat = staticmethod(jnp.concatenate)
zeros_like = staticmethod(jnp.zeros_like)
ones_like = staticmethod(jnp.ones_like)
maximum = staticmethod(jnp.maximum)
minimum = staticmethod(jnp.minimum)
clip = staticmethod(jnp.clip)
shape = staticmethod(jnp.shape)
staticshape = staticmethod(jnp.shape)
imag = staticmethod(jnp.imag)
real = staticmethod(jnp.real)
conj = staticmethod(jnp.conjugate)
einsum = staticmethod(jnp.einsum)
cumsum = staticmethod(jnp.cumsum)
def jit_compile(self, f: Callable) -> Callable:
def run_jit_f(*args):
logging.debug(f"JaxBackend: running jit-compiled '{f.__name__}' with shapes {[arg.shape for arg in args]} and dtypes {[arg.dtype.name for arg in args]}")
return self.as_registered.call(jit_f, *args, name=f"run jit-compiled '{f.__name__}'")
run_jit_f.__name__ = f"Jax-Jit({f.__name__})"
jit_f = jax.jit(f)
return run_jit_f
def block_until_ready(self, values):
if isinstance(values, DeviceArray):
values.block_until_ready()
if isinstance(values, (tuple, list)):
for v in values:
self.block_until_ready(v)
def functional_gradient(self, f, wrt: tuple or list, get_output: bool):
if get_output:
@wraps(f)
def aux_f(*args):
output = f(*args)
if isinstance(output, (tuple, list)) and len(output) == 1:
output = output[0]
result = (output[0], output[1:]) if isinstance(output, (tuple, list)) else (output, None)
if result[0].ndim > 0:
result = jnp.sum(result[0]), result[1]
return result
jax_grad_f = jax.value_and_grad(aux_f, argnums=wrt, has_aux=True)
@wraps(f)
def unwrap_outputs(*args):
(loss, aux), grads = jax_grad_f(*args)
return (loss, *aux, *grads) if aux is not None else (loss, *grads)
return unwrap_outputs
else:
@wraps(f)
def nonaux_f(*args):
output = f(*args)
result = output[0] if isinstance(output, (tuple, list)) else output
if result.ndim > 0:
result = jnp.sum(result)
return result
return jax.grad(nonaux_f, argnums=wrt, has_aux=False)
def custom_gradient(self, f: Callable, gradient: Callable) -> Callable:
jax_fun = jax.custom_vjp(f) # custom vector-Jacobian product (reverse-mode differentiation)
def forward(*x):
y = f(*x)
return y, (x, y)
def backward(x_y, dy):
x, y = x_y
dx = gradient(x, y, dy)
return tuple(dx)
jax_fun.defvjp(forward, backward)
return jax_fun
def divide_no_nan(self, x, y):
return jnp.nan_to_num(x / y, copy=True, nan=0)
def random_uniform(self, shape):
self._check_float64()
self.rnd_key, subkey = jax.random.split(self.rnd_key)
return random.uniform(subkey, shape, dtype=to_numpy_dtype(self.float_type))
def random_normal(self, shape):
self._check_float64()
self.rnd_key, subkey = jax.random.split(self.rnd_key)
return random.normal(subkey, shape, dtype=to_numpy_dtype(self.float_type))
def range(self, start, limit=None, delta=1, dtype: DType = DType(int, 32)):
if limit is None:
start, limit = 0, start
return jnp.arange(start, limit, delta, to_numpy_dtype(dtype))
def pad(self, value, pad_width, mode='constant', constant_values=0):
assert mode in ('constant', 'symmetric', 'periodic', 'reflect', 'boundary'), mode
if mode == 'constant':
constant_values = jnp.array(constant_values, dtype=value.dtype)
return jnp.pad(value, pad_width, 'constant', constant_values=constant_values)
else:
if mode in ('periodic', 'boundary'):
mode = {'periodic': 'wrap', 'boundary': 'edge'}[mode]
return jnp.pad(value, pad_width, mode)
def reshape(self, value, shape):
return jnp.reshape(value, shape)
def sum(self, value, axis=None, keepdims=False):
if isinstance(value, (tuple, list)):
assert axis == 0
return sum(value[1:], value[0])
return jnp.sum(value, axis=axis, keepdims=keepdims)
def prod(self, value, axis=None):
if not isinstance(value, jnp.ndarray):
value = jnp.array(value)
if value.dtype == bool:
return jnp.all(value, axis=axis)
return jnp.prod(value, axis=axis)
def where(self, condition, x=None, y=None):
if x is None or y is None:
return jnp.argwhere(condition)
return jnp.where(condition, x, y)
def zeros(self, shape, dtype: DType = None):
self._check_float64()
return jnp.zeros(shape, dtype=to_numpy_dtype(dtype or self.float_type))
def ones(self, shape, dtype: DType = None):
self._check_float64()
return jnp.ones(shape, dtype=to_numpy_dtype(dtype or self.float_type))
def meshgrid(self, *coordinates):
self._check_float64()
coordinates = [self.as_tensor(c) for c in coordinates]
return jnp.meshgrid(*coordinates, indexing='ij')
def linspace(self, start, stop, number):
self._check_float64()
return jnp.linspace(start, stop, number, dtype=to_numpy_dtype(self.float_type))
def mean(self, value, axis=None, keepdims=False):
return jnp.mean(value, axis, keepdims=keepdims)
def tensordot(self, a, a_axes: tuple or list, b, b_axes: tuple or list):
return jnp.tensordot(a, b, (a_axes, b_axes))
def mul(self, a, b):
# if scipy.sparse.issparse(a): # TODO sparse?
# return a.multiply(b)
# elif scipy.sparse.issparse(b):
# return b.multiply(a)
# else:
return Backend.mul(self, a, b)
def matmul(self, A, b):
return jnp.stack([A.dot(b[i]) for i in range(b.shape[0])])
def while_loop(self, loop: Callable, values: tuple):
if all(self.is_available(t) for t in values):
while jnp.any(values[0]):
values = loop(*values)
return values
else:
cond = lambda vals: jnp.any(vals[0])
body = lambda vals: loop(*vals)
return jax.lax.while_loop(cond, body, values)
def max(self, x, axis=None, keepdims=False):
return jnp.max(x, axis, keepdims=keepdims)
def min(self, x, axis=None, keepdims=False):
return jnp.min(x, axis, keepdims=keepdims)
def conv(self, value, kernel, zero_padding=True):
assert kernel.shape[0] in (1, value.shape[0])
assert value.shape[1] == kernel.shape[2], f"value has {value.shape[1]} channels but kernel has {kernel.shape[2]}"
assert value.ndim + 1 == kernel.ndim
# AutoDiff may require jax.lax.conv_general_dilated
if zero_padding:
result = np.zeros((value.shape[0], kernel.shape[1], *value.shape[2:]), dtype=to_numpy_dtype(self.float_type))
else:
valid = [value.shape[i + 2] - kernel.shape[i + 3] + 1 for i in range(value.ndim - 2)]
result = np.zeros([value.shape[0], kernel.shape[1], *valid], dtype=to_numpy_dtype(self.float_type))
mode = 'same' if zero_padding else 'valid'
for b in range(value.shape[0]):
b_kernel = kernel[min(b, kernel.shape[0] - 1)]
for o in range(kernel.shape[1]):
for i in range(value.shape[1]):
result[b, o, ...] += scipy.signal.correlate(value[b, i, ...], b_kernel[o, i, ...], mode=mode)
return result
def expand_dims(self, a, axis=0, number=1):
for _i in range(number):
a = jnp.expand_dims(a, axis)
return a
def cast(self, x, dtype: DType):
if self.is_tensor(x, only_native=True) and from_numpy_dtype(x.dtype) == dtype:
return x
else:
return jnp.array(x, to_numpy_dtype(dtype))
def batched_gather_nd(self, values, indices):
assert indices.shape[-1] == self.ndims(values) - 2
batch_size = combined_dim(values.shape[0], indices.shape[0])
results = []
for b in range(batch_size):
b_values = values[min(b, values.shape[0] - 1)]
b_indices = self.unstack(indices[min(b, indices.shape[0] - 1)], -1)
results.append(b_values[b_indices])
return jnp.stack(results)
def std(self, x, axis=None, keepdims=False):
return jnp.std(x, axis, keepdims=keepdims)
def boolean_mask(self, x, mask, axis=0):
slices = [mask if i == axis else slice(None) for i in range(len(x.shape))]
return x[tuple(slices)]
def any(self, boolean_tensor, axis=None, keepdims=False):
if isinstance(boolean_tensor, (tuple, list)):
boolean_tensor = jnp.stack(boolean_tensor)
return jnp.any(boolean_tensor, axis=axis, keepdims=keepdims)
def all(self, boolean_tensor, axis=None, keepdims=False):
if isinstance(boolean_tensor, (tuple, list)):
boolean_tensor = jnp.stack(boolean_tensor)
return jnp.all(boolean_tensor, axis=axis, keepdims=keepdims)
def scatter(self, base_grid, indices, values, mode: str):
base_grid, values = self.auto_cast(base_grid, values)
batch_size = combined_dim(combined_dim(indices.shape[0], values.shape[0]), base_grid.shape[0])
spatial_dims = tuple(range(base_grid.ndim - 2))
dnums = jax.lax.ScatterDimensionNumbers(update_window_dims=(1,), # channel dim of updates (batch dim removed)
inserted_window_dims=spatial_dims, # no idea what this does but spatial_dims seems to work
scatter_dims_to_operand_dims=spatial_dims) # spatial dims of base_grid (batch dim removed)
scatter = jax.lax.scatter_add if mode == 'add' else jax.lax.scatter
result = []
for b in range(batch_size):
b_grid = base_grid[b, ...]
b_indices = indices[min(b, indices.shape[0] - 1), ...]
b_values = values[min(b, values.shape[0] - 1), ...]
result.append(scatter(b_grid, b_indices, b_values, dnums))
return jnp.stack(result)
def quantile(self, x, quantiles):
return jnp.quantile(x, quantiles, axis=-1)
def fft(self, x, axes: tuple or list):
x = self.to_complex(x)
if not axes:
return x
if len(axes) == 1:
return np.fft.fft(x, axis=axes[0]).astype(x.dtype)
elif len(axes) == 2:
return np.fft.fft2(x, axes=axes).astype(x.dtype)
else:
return np.fft.fftn(x, axes=axes).astype(x.dtype)
def ifft(self, k, axes: tuple or list):
if not axes:
return k
if len(axes) == 1:
return np.fft.ifft(k, axis=axes[0]).astype(k.dtype)
elif len(axes) == 2:
return np.fft.ifft2(k, axes=axes).astype(k.dtype)
else:
return np.fft.ifftn(k, axes=axes).astype(k.dtype)
def dtype(self, array) -> DType:
if isinstance(array, int):
return DType(int, 32)
if isinstance(array, float):
return DType(float, 64)
if isinstance(array, complex):
return DType(complex, 128)
if not isinstance(array, jnp.ndarray):
array = jnp.array(array)
return from_numpy_dtype(array.dtype)
def linear_solve(self, method: str, lin, y, x0, rtol, atol, max_iter, trj: bool) -> SolveResult or List[SolveResult]:
if method == 'auto' and not trj and not self.is_available(y):
return self.conjugate_gradient(lin, y, x0, rtol, atol, max_iter, trj)
else:
return Backend.linear_solve(self, method, lin, y, x0, rtol, atol, max_iter, trj)
| return np.array(x) | identifier_body |
_jax_backend.py | import numbers
import warnings
from functools import wraps, partial
from typing import List, Callable
import logging
import numpy as np
import jax
import jax.numpy as jnp
import jax.scipy as scipy
from jax.core import Tracer
from jax.interpreters.xla import DeviceArray
from jax.scipy.sparse.linalg import cg
from jax import random
from phi.math import SolveInfo, Solve, DType
from ..math.backend._dtype import to_numpy_dtype, from_numpy_dtype
from phi.math.backend import Backend, ComputeDevice
from phi.math.backend._backend import combined_dim, SolveResult
class JaxBackend(Backend):
def __init__(self):
Backend.__init__(self, "Jax", default_device=None)
try:
self.rnd_key = jax.random.PRNGKey(seed=0)
except RuntimeError as err:
warnings.warn(f"{err}")
self.rnd_key = None
def prefers_channels_last(self) -> bool:
return True
def list_devices(self, device_type: str or None = None) -> List[ComputeDevice]:
devices = []
for jax_dev in jax.devices():
jax_dev_type = jax_dev.platform.upper()
if device_type is None or device_type == jax_dev_type:
description = f"id={jax_dev.id}"
devices.append(ComputeDevice(self, jax_dev.device_kind, jax_dev_type, -1, -1, description, jax_dev))
return devices
# def set_default_device(self, device: ComputeDevice or str):
# if device == 'CPU':
# jax.config.update('jax_platform_name', 'cpu')
# elif device == 'GPU':
# jax.config.update('jax_platform_name', 'gpu')
# else:
# raise NotImplementedError()
def _check_float64(self):
if self.precision == 64:
if not jax.config.read('jax_enable_x64'):
jax.config.update('jax_enable_x64', True)
assert jax.config.read('jax_enable_x64'), "FP64 is disabled for Jax."
def seed(self, seed: int):
self.rnd_key = jax.random.PRNGKey(seed)
def as_tensor(self, x, convert_external=True):
self._check_float64()
if self.is_tensor(x, only_native=convert_external):
array = x
else:
array = jnp.array(x)
# --- Enforce Precision ---
if not isinstance(array, numbers.Number):
if self.dtype(array).kind == float:
array = self.to_float(array)
elif self.dtype(array).kind == complex:
array = self.to_complex(array)
return array
def is_tensor(self, x, only_native=False):
if isinstance(x, jnp.ndarray) and not isinstance(x, np.ndarray): # NumPy arrays inherit from Jax arrays
return True
# if scipy.sparse.issparse(x): # TODO
# return True
if isinstance(x, jnp.bool_):
return True
# --- Above considered native ---
if only_native:
return False
# --- Non-native types ---
if isinstance(x, np.ndarray):
return True
if isinstance(x, (numbers.Number, bool, str)):
return True
if isinstance(x, (tuple, list)):
return all([self.is_tensor(item, False) for item in x])
return False
def is_available(self, tensor):
return not isinstance(tensor, Tracer)
def numpy(self, x):
return np.array(x)
def to_dlpack(self, tensor):
from jax import dlpack
return dlpack.to_dlpack(tensor)
def from_dlpack(self, capsule):
from jax import dlpack
return dlpack.from_dlpack(capsule)
def copy(self, tensor, only_mutable=False):
return jnp.array(tensor, copy=True)
sqrt = staticmethod(jnp.sqrt)
exp = staticmethod(jnp.exp)
sin = staticmethod(jnp.sin)
cos = staticmethod(jnp.cos)
tan = staticmethod(jnp.tan)
log = staticmethod(jnp.log)
log2 = staticmethod(jnp.log2)
log10 = staticmethod(jnp.log10)
isfinite = staticmethod(jnp.isfinite)
abs = staticmethod(jnp.abs)
sign = staticmethod(jnp.sign)
round = staticmethod(jnp.round)
ceil = staticmethod(jnp.ceil)
floor = staticmethod(jnp.floor)
nonzero = staticmethod(jnp.nonzero)
flip = staticmethod(jnp.flip)
stop_gradient = staticmethod(jax.lax.stop_gradient)
transpose = staticmethod(jnp.transpose)
equal = staticmethod(jnp.equal)
tile = staticmethod(jnp.tile)
stack = staticmethod(jnp.stack)
concat = staticmethod(jnp.concatenate)
zeros_like = staticmethod(jnp.zeros_like)
ones_like = staticmethod(jnp.ones_like)
maximum = staticmethod(jnp.maximum)
minimum = staticmethod(jnp.minimum)
clip = staticmethod(jnp.clip)
shape = staticmethod(jnp.shape)
staticshape = staticmethod(jnp.shape)
imag = staticmethod(jnp.imag)
real = staticmethod(jnp.real)
conj = staticmethod(jnp.conjugate)
einsum = staticmethod(jnp.einsum)
cumsum = staticmethod(jnp.cumsum)
def jit_compile(self, f: Callable) -> Callable:
def run_jit_f(*args):
logging.debug(f"JaxBackend: running jit-compiled '{f.__name__}' with shapes {[arg.shape for arg in args]} and dtypes {[arg.dtype.name for arg in args]}")
return self.as_registered.call(jit_f, *args, name=f"run jit-compiled '{f.__name__}'")
run_jit_f.__name__ = f"Jax-Jit({f.__name__})"
jit_f = jax.jit(f)
return run_jit_f
def block_until_ready(self, values): |
def functional_gradient(self, f, wrt: tuple or list, get_output: bool):
if get_output:
@wraps(f)
def aux_f(*args):
output = f(*args)
if isinstance(output, (tuple, list)) and len(output) == 1:
output = output[0]
result = (output[0], output[1:]) if isinstance(output, (tuple, list)) else (output, None)
if result[0].ndim > 0:
result = jnp.sum(result[0]), result[1]
return result
jax_grad_f = jax.value_and_grad(aux_f, argnums=wrt, has_aux=True)
@wraps(f)
def unwrap_outputs(*args):
(loss, aux), grads = jax_grad_f(*args)
return (loss, *aux, *grads) if aux is not None else (loss, *grads)
return unwrap_outputs
else:
@wraps(f)
def nonaux_f(*args):
output = f(*args)
result = output[0] if isinstance(output, (tuple, list)) else output
if result.ndim > 0:
result = jnp.sum(result)
return result
return jax.grad(nonaux_f, argnums=wrt, has_aux=False)
def custom_gradient(self, f: Callable, gradient: Callable) -> Callable:
jax_fun = jax.custom_vjp(f) # custom vector-Jacobian product (reverse-mode differentiation)
def forward(*x):
y = f(*x)
return y, (x, y)
def backward(x_y, dy):
x, y = x_y
dx = gradient(x, y, dy)
return tuple(dx)
jax_fun.defvjp(forward, backward)
return jax_fun
def divide_no_nan(self, x, y):
return jnp.nan_to_num(x / y, copy=True, nan=0)
def random_uniform(self, shape):
self._check_float64()
self.rnd_key, subkey = jax.random.split(self.rnd_key)
return random.uniform(subkey, shape, dtype=to_numpy_dtype(self.float_type))
def random_normal(self, shape):
self._check_float64()
self.rnd_key, subkey = jax.random.split(self.rnd_key)
return random.normal(subkey, shape, dtype=to_numpy_dtype(self.float_type))
def range(self, start, limit=None, delta=1, dtype: DType = DType(int, 32)):
if limit is None:
start, limit = 0, start
return jnp.arange(start, limit, delta, to_numpy_dtype(dtype))
def pad(self, value, pad_width, mode='constant', constant_values=0):
assert mode in ('constant', 'symmetric', 'periodic', 'reflect', 'boundary'), mode
if mode == 'constant':
constant_values = jnp.array(constant_values, dtype=value.dtype)
return jnp.pad(value, pad_width, 'constant', constant_values=constant_values)
else:
if mode in ('periodic', 'boundary'):
mode = {'periodic': 'wrap', 'boundary': 'edge'}[mode]
return jnp.pad(value, pad_width, mode)
def reshape(self, value, shape):
return jnp.reshape(value, shape)
def sum(self, value, axis=None, keepdims=False):
if isinstance(value, (tuple, list)):
assert axis == 0
return sum(value[1:], value[0])
return jnp.sum(value, axis=axis, keepdims=keepdims)
def prod(self, value, axis=None):
if not isinstance(value, jnp.ndarray):
value = jnp.array(value)
if value.dtype == bool:
return jnp.all(value, axis=axis)
return jnp.prod(value, axis=axis)
def where(self, condition, x=None, y=None):
if x is None or y is None:
return jnp.argwhere(condition)
return jnp.where(condition, x, y)
def zeros(self, shape, dtype: DType = None):
self._check_float64()
return jnp.zeros(shape, dtype=to_numpy_dtype(dtype or self.float_type))
def ones(self, shape, dtype: DType = None):
self._check_float64()
return jnp.ones(shape, dtype=to_numpy_dtype(dtype or self.float_type))
def meshgrid(self, *coordinates):
self._check_float64()
coordinates = [self.as_tensor(c) for c in coordinates]
return jnp.meshgrid(*coordinates, indexing='ij')
def linspace(self, start, stop, number):
self._check_float64()
return jnp.linspace(start, stop, number, dtype=to_numpy_dtype(self.float_type))
def mean(self, value, axis=None, keepdims=False):
return jnp.mean(value, axis, keepdims=keepdims)
def tensordot(self, a, a_axes: tuple or list, b, b_axes: tuple or list):
return jnp.tensordot(a, b, (a_axes, b_axes))
def mul(self, a, b):
# if scipy.sparse.issparse(a): # TODO sparse?
# return a.multiply(b)
# elif scipy.sparse.issparse(b):
# return b.multiply(a)
# else:
return Backend.mul(self, a, b)
def matmul(self, A, b):
return jnp.stack([A.dot(b[i]) for i in range(b.shape[0])])
def while_loop(self, loop: Callable, values: tuple):
if all(self.is_available(t) for t in values):
while jnp.any(values[0]):
values = loop(*values)
return values
else:
cond = lambda vals: jnp.any(vals[0])
body = lambda vals: loop(*vals)
return jax.lax.while_loop(cond, body, values)
def max(self, x, axis=None, keepdims=False):
return jnp.max(x, axis, keepdims=keepdims)
def min(self, x, axis=None, keepdims=False):
return jnp.min(x, axis, keepdims=keepdims)
def conv(self, value, kernel, zero_padding=True):
assert kernel.shape[0] in (1, value.shape[0])
assert value.shape[1] == kernel.shape[2], f"value has {value.shape[1]} channels but kernel has {kernel.shape[2]}"
assert value.ndim + 1 == kernel.ndim
# AutoDiff may require jax.lax.conv_general_dilated
if zero_padding:
result = np.zeros((value.shape[0], kernel.shape[1], *value.shape[2:]), dtype=to_numpy_dtype(self.float_type))
else:
valid = [value.shape[i + 2] - kernel.shape[i + 3] + 1 for i in range(value.ndim - 2)]
result = np.zeros([value.shape[0], kernel.shape[1], *valid], dtype=to_numpy_dtype(self.float_type))
mode = 'same' if zero_padding else 'valid'
for b in range(value.shape[0]):
b_kernel = kernel[min(b, kernel.shape[0] - 1)]
for o in range(kernel.shape[1]):
for i in range(value.shape[1]):
result[b, o, ...] += scipy.signal.correlate(value[b, i, ...], b_kernel[o, i, ...], mode=mode)
return result
def expand_dims(self, a, axis=0, number=1):
for _i in range(number):
a = jnp.expand_dims(a, axis)
return a
def cast(self, x, dtype: DType):
if self.is_tensor(x, only_native=True) and from_numpy_dtype(x.dtype) == dtype:
return x
else:
return jnp.array(x, to_numpy_dtype(dtype))
def batched_gather_nd(self, values, indices):
assert indices.shape[-1] == self.ndims(values) - 2
batch_size = combined_dim(values.shape[0], indices.shape[0])
results = []
for b in range(batch_size):
b_values = values[min(b, values.shape[0] - 1)]
b_indices = self.unstack(indices[min(b, indices.shape[0] - 1)], -1)
results.append(b_values[b_indices])
return jnp.stack(results)
def std(self, x, axis=None, keepdims=False):
return jnp.std(x, axis, keepdims=keepdims)
def boolean_mask(self, x, mask, axis=0):
slices = [mask if i == axis else slice(None) for i in range(len(x.shape))]
return x[tuple(slices)]
def any(self, boolean_tensor, axis=None, keepdims=False):
if isinstance(boolean_tensor, (tuple, list)):
boolean_tensor = jnp.stack(boolean_tensor)
return jnp.any(boolean_tensor, axis=axis, keepdims=keepdims)
def all(self, boolean_tensor, axis=None, keepdims=False):
if isinstance(boolean_tensor, (tuple, list)):
boolean_tensor = jnp.stack(boolean_tensor)
return jnp.all(boolean_tensor, axis=axis, keepdims=keepdims)
def scatter(self, base_grid, indices, values, mode: str):
base_grid, values = self.auto_cast(base_grid, values)
batch_size = combined_dim(combined_dim(indices.shape[0], values.shape[0]), base_grid.shape[0])
spatial_dims = tuple(range(base_grid.ndim - 2))
dnums = jax.lax.ScatterDimensionNumbers(update_window_dims=(1,), # channel dim of updates (batch dim removed)
inserted_window_dims=spatial_dims, # no idea what this does but spatial_dims seems to work
scatter_dims_to_operand_dims=spatial_dims) # spatial dims of base_grid (batch dim removed)
scatter = jax.lax.scatter_add if mode == 'add' else jax.lax.scatter
result = []
for b in range(batch_size):
b_grid = base_grid[b, ...]
b_indices = indices[min(b, indices.shape[0] - 1), ...]
b_values = values[min(b, values.shape[0] - 1), ...]
result.append(scatter(b_grid, b_indices, b_values, dnums))
return jnp.stack(result)
def quantile(self, x, quantiles):
return jnp.quantile(x, quantiles, axis=-1)
def fft(self, x, axes: tuple or list):
x = self.to_complex(x)
if not axes:
return x
if len(axes) == 1:
return np.fft.fft(x, axis=axes[0]).astype(x.dtype)
elif len(axes) == 2:
return np.fft.fft2(x, axes=axes).astype(x.dtype)
else:
return np.fft.fftn(x, axes=axes).astype(x.dtype)
def ifft(self, k, axes: tuple or list):
if not axes:
return k
if len(axes) == 1:
return np.fft.ifft(k, axis=axes[0]).astype(k.dtype)
elif len(axes) == 2:
return np.fft.ifft2(k, axes=axes).astype(k.dtype)
else:
return np.fft.ifftn(k, axes=axes).astype(k.dtype)
def dtype(self, array) -> DType:
if isinstance(array, int):
return DType(int, 32)
if isinstance(array, float):
return DType(float, 64)
if isinstance(array, complex):
return DType(complex, 128)
if not isinstance(array, jnp.ndarray):
array = jnp.array(array)
return from_numpy_dtype(array.dtype)
def linear_solve(self, method: str, lin, y, x0, rtol, atol, max_iter, trj: bool) -> SolveResult or List[SolveResult]:
if method == 'auto' and not trj and not self.is_available(y):
return self.conjugate_gradient(lin, y, x0, rtol, atol, max_iter, trj)
else:
return Backend.linear_solve(self, method, lin, y, x0, rtol, atol, max_iter, trj) | if isinstance(values, DeviceArray):
values.block_until_ready()
if isinstance(values, (tuple, list)):
for v in values:
self.block_until_ready(v) | random_line_split |
authorise.js | /**
* Copyright 2013-present NightWorld.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var error = require('./error'),
runner = require('./runner'),
Client = require('./client');
module.exports = Authorise;
/**
* This is the function order used by the runner
*
* @type {Array}
*/
var fns = [
checkAuthoriseType,
checkScope
];
/**
* Authorise
*
* @param {Object} config Instance of OAuth object
* @param {Object} req
* @param {Object} res
* @param {Object} options
* @param {Function} next
*/
function Authorise (config, req, res, options, next) {
options = options || {};
this.config = config;
this.model = config.model;
this.req = req;
this.res = res;
this.options = options;
runner(fns, this, next);
}
function checkAuthoriseType(done) {
var client = Client.credsFromBasic(this.req) || Client.credsFromBody(this.req);
if (this.options.implicit) {
if (this.req.body.response_type === 'token') {
if (client.clientId) {
this.redirectUri = this.req.body.redirect_uri || this.req.query.redirect_uri;
this.clientId = client.clientId;
this.req.auth_type = 'implicit';
return checkImplicitClient.call(this, done);
}
}
}
if (this.options.client_credentials) {
if (client.clientId && client.clientSecret) {
this.client = client;
this.req.auth_type = 'client_credentials';
return getUserFromClient.call(this, done);
}
}
getBearerToken.call(this, done);
}
function getUserFromClient(done) {
var self = this;
this.model.getClient(this.client.clientId, this.client.clientSecret,
function (err, client) {
if (err) return done(error('server_error', false, err));
if (!client) {
return done(error('invalid_client', 'Client credentials are invalid'));
}
self.model.getUserFromClient(client, function (err, user) {
if (err) return done(error('server_error', false, err));
if (!user) {
return done(error('invalid_grant', 'Client credentials are invalid'));
}
self.req.oauth = { bearerToken: user };
self.req.user = { id: user.id };
done();
});
});
}
function checkImplicitClient (done) {
var self = this;
this.model.getClient(this.clientId, null, function (err, client) {
if (err) return done(error('server_error', false, err));
if (!client) {
return done(error('invalid_client', 'Invalid client credentials'));
} else if (self.redirectUri && Array.isArray(client.redirectUri)) {
if (client.redirectUri.indexOf(self.redirectUri) === -1) {
return done(error('invalid_request', 'redirect_uri does not match'));
}
client.redirectUri = self.redirectUri;
} else if (self.redirectUri && client.redirectUri !== self.redirectUri) {
return done(error('invalid_request', 'redirect_uri does not match'));
}
self.model.getUserFromClient(client, function (err, user) {
if (err) return done(error('server_error', false, err));
if (!user) {
return done(error('invalid_grant', 'Client credentials are invalid'));
}
// The request contains valid params so any errors after this point
// are redirected to the redirect_uri
self.res.redirectUri = client.redirectUri;
self.res.oauthRedirect = true;
self.req.oauth = { bearerToken: user };
self.req.user = { id: user.id };
done();
});
});
}
/**
* Get bearer token
*
* Extract token from request according to RFC6750
*
* @param {Function} done
* @this OAuth
*/
function getBearerToken (done) {
var headerToken = this.req.get('Authorization'),
getToken = this.req.query.access_token,
postToken = this.req.body ? this.req.body.access_token : undefined;
// Check exactly one method was used
var methodsUsed = (headerToken !== undefined) + (getToken !== undefined) +
(postToken !== undefined);
if (methodsUsed > 1) {
return done(error('invalid_request',
'Only one method may be used to authenticate at a time (Auth header, ' +
'GET or POST).'));
} else if (methodsUsed === 0) |
// Header: http://tools.ietf.org/html/rfc6750#section-2.1
if (headerToken) {
var matches = headerToken.match(/Bearer\s(\S+)/);
if (!matches) {
return done(error('invalid_request', 'Malformed auth header'));
}
headerToken = matches[1];
}
// POST: http://tools.ietf.org/html/rfc6750#section-2.2
if (postToken) {
if (this.req.method === 'GET') {
return done(error('invalid_request',
'Method cannot be GET When putting the token in the body.'));
}
if (!this.req.is('application/x-www-form-urlencoded')) {
return done(error('invalid_request', 'When putting the token in the ' +
'body, content type must be application/x-www-form-urlencoded.'));
}
}
this.bearerToken = headerToken || postToken || getToken;
checkToken.call(this, done);
}
/**
* Check token
*
* Check it against model, ensure it's not expired
* @param {Function} done
* @this OAuth
*/
function checkToken (done) {
var self = this;
this.model.getAccessToken(this.bearerToken, function (err, token) {
if (err) return done(error('server_error', false, err));
if (!token) {
return done(error('invalid_token',
'The access token provided is invalid.'));
}
if (token.expires !== null &&
(!token.expires || token.expires < new Date())) {
return done(error('invalid_token',
'The access token provided has expired.'));
}
// Expose params
self.req.oauth = { bearerToken: token };
self.req.user = token.user ? token.user : { id: token.userId };
done();
});
}
/**
* Check scope
*
* @param {Function} done
* @this OAuth
*/
function checkScope (done) {
if (!this.model.authoriseScope) return done();
this.model.authoriseScope(this.req.oauth.bearerToken, this.options.scope,
function (err, invalid) {
if (err) return done(error('server_error', false, err));
if (invalid) return done(error('invalid_scope', invalid));
done();
});
}
| {
return done(error('invalid_request', 'The access token was not found'));
} | conditional_block |
authorise.js | /**
* Copyright 2013-present NightWorld.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var error = require('./error'),
runner = require('./runner'),
Client = require('./client');
module.exports = Authorise;
/**
* This is the function order used by the runner
*
* @type {Array}
*/
var fns = [
checkAuthoriseType,
checkScope
];
/**
* Authorise
*
* @param {Object} config Instance of OAuth object
* @param {Object} req
* @param {Object} res
* @param {Object} options
* @param {Function} next
*/
function Authorise (config, req, res, options, next) {
options = options || {};
this.config = config;
this.model = config.model;
this.req = req;
this.res = res;
this.options = options;
runner(fns, this, next);
}
function checkAuthoriseType(done) {
var client = Client.credsFromBasic(this.req) || Client.credsFromBody(this.req);
if (this.options.implicit) {
if (this.req.body.response_type === 'token') {
if (client.clientId) {
this.redirectUri = this.req.body.redirect_uri || this.req.query.redirect_uri;
this.clientId = client.clientId;
this.req.auth_type = 'implicit';
return checkImplicitClient.call(this, done);
}
}
}
if (this.options.client_credentials) {
if (client.clientId && client.clientSecret) {
this.client = client;
this.req.auth_type = 'client_credentials';
return getUserFromClient.call(this, done);
}
}
getBearerToken.call(this, done);
}
function getUserFromClient(done) |
function checkImplicitClient (done) {
var self = this;
this.model.getClient(this.clientId, null, function (err, client) {
if (err) return done(error('server_error', false, err));
if (!client) {
return done(error('invalid_client', 'Invalid client credentials'));
} else if (self.redirectUri && Array.isArray(client.redirectUri)) {
if (client.redirectUri.indexOf(self.redirectUri) === -1) {
return done(error('invalid_request', 'redirect_uri does not match'));
}
client.redirectUri = self.redirectUri;
} else if (self.redirectUri && client.redirectUri !== self.redirectUri) {
return done(error('invalid_request', 'redirect_uri does not match'));
}
self.model.getUserFromClient(client, function (err, user) {
if (err) return done(error('server_error', false, err));
if (!user) {
return done(error('invalid_grant', 'Client credentials are invalid'));
}
// The request contains valid params so any errors after this point
// are redirected to the redirect_uri
self.res.redirectUri = client.redirectUri;
self.res.oauthRedirect = true;
self.req.oauth = { bearerToken: user };
self.req.user = { id: user.id };
done();
});
});
}
/**
* Get bearer token
*
* Extract token from request according to RFC6750
*
* @param {Function} done
* @this OAuth
*/
function getBearerToken (done) {
var headerToken = this.req.get('Authorization'),
getToken = this.req.query.access_token,
postToken = this.req.body ? this.req.body.access_token : undefined;
// Check exactly one method was used
var methodsUsed = (headerToken !== undefined) + (getToken !== undefined) +
(postToken !== undefined);
if (methodsUsed > 1) {
return done(error('invalid_request',
'Only one method may be used to authenticate at a time (Auth header, ' +
'GET or POST).'));
} else if (methodsUsed === 0) {
return done(error('invalid_request', 'The access token was not found'));
}
// Header: http://tools.ietf.org/html/rfc6750#section-2.1
if (headerToken) {
var matches = headerToken.match(/Bearer\s(\S+)/);
if (!matches) {
return done(error('invalid_request', 'Malformed auth header'));
}
headerToken = matches[1];
}
// POST: http://tools.ietf.org/html/rfc6750#section-2.2
if (postToken) {
if (this.req.method === 'GET') {
return done(error('invalid_request',
'Method cannot be GET When putting the token in the body.'));
}
if (!this.req.is('application/x-www-form-urlencoded')) {
return done(error('invalid_request', 'When putting the token in the ' +
'body, content type must be application/x-www-form-urlencoded.'));
}
}
this.bearerToken = headerToken || postToken || getToken;
checkToken.call(this, done);
}
/**
* Check token
*
* Check it against model, ensure it's not expired
* @param {Function} done
* @this OAuth
*/
function checkToken (done) {
var self = this;
this.model.getAccessToken(this.bearerToken, function (err, token) {
if (err) return done(error('server_error', false, err));
if (!token) {
return done(error('invalid_token',
'The access token provided is invalid.'));
}
if (token.expires !== null &&
(!token.expires || token.expires < new Date())) {
return done(error('invalid_token',
'The access token provided has expired.'));
}
// Expose params
self.req.oauth = { bearerToken: token };
self.req.user = token.user ? token.user : { id: token.userId };
done();
});
}
/**
* Check scope
*
* @param {Function} done
* @this OAuth
*/
function checkScope (done) {
if (!this.model.authoriseScope) return done();
this.model.authoriseScope(this.req.oauth.bearerToken, this.options.scope,
function (err, invalid) {
if (err) return done(error('server_error', false, err));
if (invalid) return done(error('invalid_scope', invalid));
done();
});
}
| {
var self = this;
this.model.getClient(this.client.clientId, this.client.clientSecret,
function (err, client) {
if (err) return done(error('server_error', false, err));
if (!client) {
return done(error('invalid_client', 'Client credentials are invalid'));
}
self.model.getUserFromClient(client, function (err, user) {
if (err) return done(error('server_error', false, err));
if (!user) {
return done(error('invalid_grant', 'Client credentials are invalid'));
}
self.req.oauth = { bearerToken: user };
self.req.user = { id: user.id };
done();
});
});
} | identifier_body |
authorise.js | /**
* Copyright 2013-present NightWorld.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var error = require('./error'),
runner = require('./runner'),
Client = require('./client');
module.exports = Authorise;
/**
* This is the function order used by the runner
*
* @type {Array}
*/
var fns = [
checkAuthoriseType,
checkScope
];
/**
* Authorise
*
* @param {Object} config Instance of OAuth object
* @param {Object} req
* @param {Object} res
* @param {Object} options
* @param {Function} next
*/
function Authorise (config, req, res, options, next) {
options = options || {};
this.config = config;
this.model = config.model;
this.req = req;
this.res = res;
this.options = options;
runner(fns, this, next);
}
function checkAuthoriseType(done) {
var client = Client.credsFromBasic(this.req) || Client.credsFromBody(this.req);
if (this.options.implicit) {
if (this.req.body.response_type === 'token') {
if (client.clientId) {
this.redirectUri = this.req.body.redirect_uri || this.req.query.redirect_uri;
this.clientId = client.clientId;
this.req.auth_type = 'implicit';
return checkImplicitClient.call(this, done);
}
}
}
if (this.options.client_credentials) {
if (client.clientId && client.clientSecret) {
this.client = client;
this.req.auth_type = 'client_credentials';
return getUserFromClient.call(this, done);
}
}
getBearerToken.call(this, done);
}
function getUserFromClient(done) {
var self = this;
this.model.getClient(this.client.clientId, this.client.clientSecret,
function (err, client) {
if (err) return done(error('server_error', false, err));
if (!client) {
return done(error('invalid_client', 'Client credentials are invalid'));
}
self.model.getUserFromClient(client, function (err, user) {
if (err) return done(error('server_error', false, err));
if (!user) {
return done(error('invalid_grant', 'Client credentials are invalid'));
}
self.req.oauth = { bearerToken: user };
self.req.user = { id: user.id };
done();
});
});
}
function checkImplicitClient (done) {
var self = this;
this.model.getClient(this.clientId, null, function (err, client) {
if (err) return done(error('server_error', false, err));
if (!client) {
return done(error('invalid_client', 'Invalid client credentials'));
} else if (self.redirectUri && Array.isArray(client.redirectUri)) {
if (client.redirectUri.indexOf(self.redirectUri) === -1) {
return done(error('invalid_request', 'redirect_uri does not match'));
}
client.redirectUri = self.redirectUri;
} else if (self.redirectUri && client.redirectUri !== self.redirectUri) {
return done(error('invalid_request', 'redirect_uri does not match'));
}
self.model.getUserFromClient(client, function (err, user) {
if (err) return done(error('server_error', false, err));
if (!user) {
return done(error('invalid_grant', 'Client credentials are invalid'));
}
// The request contains valid params so any errors after this point
// are redirected to the redirect_uri
self.res.redirectUri = client.redirectUri;
self.res.oauthRedirect = true;
self.req.oauth = { bearerToken: user };
self.req.user = { id: user.id };
done();
});
});
}
/**
* Get bearer token
*
* Extract token from request according to RFC6750
*
* @param {Function} done
* @this OAuth
*/
function getBearerToken (done) {
var headerToken = this.req.get('Authorization'),
getToken = this.req.query.access_token,
postToken = this.req.body ? this.req.body.access_token : undefined;
// Check exactly one method was used
var methodsUsed = (headerToken !== undefined) + (getToken !== undefined) +
(postToken !== undefined);
if (methodsUsed > 1) {
return done(error('invalid_request',
'Only one method may be used to authenticate at a time (Auth header, ' +
'GET or POST).'));
} else if (methodsUsed === 0) {
return done(error('invalid_request', 'The access token was not found'));
}
// Header: http://tools.ietf.org/html/rfc6750#section-2.1
if (headerToken) {
var matches = headerToken.match(/Bearer\s(\S+)/);
if (!matches) {
return done(error('invalid_request', 'Malformed auth header'));
}
headerToken = matches[1];
}
// POST: http://tools.ietf.org/html/rfc6750#section-2.2
if (postToken) {
if (this.req.method === 'GET') {
return done(error('invalid_request',
'Method cannot be GET When putting the token in the body.'));
}
if (!this.req.is('application/x-www-form-urlencoded')) {
return done(error('invalid_request', 'When putting the token in the ' +
'body, content type must be application/x-www-form-urlencoded.'));
}
}
this.bearerToken = headerToken || postToken || getToken;
checkToken.call(this, done);
}
/**
* Check token
*
* Check it against model, ensure it's not expired
* @param {Function} done
* @this OAuth
*/
function | (done) {
var self = this;
this.model.getAccessToken(this.bearerToken, function (err, token) {
if (err) return done(error('server_error', false, err));
if (!token) {
return done(error('invalid_token',
'The access token provided is invalid.'));
}
if (token.expires !== null &&
(!token.expires || token.expires < new Date())) {
return done(error('invalid_token',
'The access token provided has expired.'));
}
// Expose params
self.req.oauth = { bearerToken: token };
self.req.user = token.user ? token.user : { id: token.userId };
done();
});
}
/**
* Check scope
*
* @param {Function} done
* @this OAuth
*/
function checkScope (done) {
if (!this.model.authoriseScope) return done();
this.model.authoriseScope(this.req.oauth.bearerToken, this.options.scope,
function (err, invalid) {
if (err) return done(error('server_error', false, err));
if (invalid) return done(error('invalid_scope', invalid));
done();
});
}
| checkToken | identifier_name |
authorise.js | /**
* Copyright 2013-present NightWorld.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and | * limitations under the License.
*/
var error = require('./error'),
runner = require('./runner'),
Client = require('./client');
module.exports = Authorise;
/**
* This is the function order used by the runner
*
* @type {Array}
*/
var fns = [
checkAuthoriseType,
checkScope
];
/**
* Authorise
*
* @param {Object} config Instance of OAuth object
* @param {Object} req
* @param {Object} res
* @param {Object} options
* @param {Function} next
*/
function Authorise (config, req, res, options, next) {
options = options || {};
this.config = config;
this.model = config.model;
this.req = req;
this.res = res;
this.options = options;
runner(fns, this, next);
}
function checkAuthoriseType(done) {
var client = Client.credsFromBasic(this.req) || Client.credsFromBody(this.req);
if (this.options.implicit) {
if (this.req.body.response_type === 'token') {
if (client.clientId) {
this.redirectUri = this.req.body.redirect_uri || this.req.query.redirect_uri;
this.clientId = client.clientId;
this.req.auth_type = 'implicit';
return checkImplicitClient.call(this, done);
}
}
}
if (this.options.client_credentials) {
if (client.clientId && client.clientSecret) {
this.client = client;
this.req.auth_type = 'client_credentials';
return getUserFromClient.call(this, done);
}
}
getBearerToken.call(this, done);
}
function getUserFromClient(done) {
var self = this;
this.model.getClient(this.client.clientId, this.client.clientSecret,
function (err, client) {
if (err) return done(error('server_error', false, err));
if (!client) {
return done(error('invalid_client', 'Client credentials are invalid'));
}
self.model.getUserFromClient(client, function (err, user) {
if (err) return done(error('server_error', false, err));
if (!user) {
return done(error('invalid_grant', 'Client credentials are invalid'));
}
self.req.oauth = { bearerToken: user };
self.req.user = { id: user.id };
done();
});
});
}
function checkImplicitClient (done) {
var self = this;
this.model.getClient(this.clientId, null, function (err, client) {
if (err) return done(error('server_error', false, err));
if (!client) {
return done(error('invalid_client', 'Invalid client credentials'));
} else if (self.redirectUri && Array.isArray(client.redirectUri)) {
if (client.redirectUri.indexOf(self.redirectUri) === -1) {
return done(error('invalid_request', 'redirect_uri does not match'));
}
client.redirectUri = self.redirectUri;
} else if (self.redirectUri && client.redirectUri !== self.redirectUri) {
return done(error('invalid_request', 'redirect_uri does not match'));
}
self.model.getUserFromClient(client, function (err, user) {
if (err) return done(error('server_error', false, err));
if (!user) {
return done(error('invalid_grant', 'Client credentials are invalid'));
}
// The request contains valid params so any errors after this point
// are redirected to the redirect_uri
self.res.redirectUri = client.redirectUri;
self.res.oauthRedirect = true;
self.req.oauth = { bearerToken: user };
self.req.user = { id: user.id };
done();
});
});
}
/**
* Get bearer token
*
* Extract token from request according to RFC6750
*
* @param {Function} done
* @this OAuth
*/
function getBearerToken (done) {
var headerToken = this.req.get('Authorization'),
getToken = this.req.query.access_token,
postToken = this.req.body ? this.req.body.access_token : undefined;
// Check exactly one method was used
var methodsUsed = (headerToken !== undefined) + (getToken !== undefined) +
(postToken !== undefined);
if (methodsUsed > 1) {
return done(error('invalid_request',
'Only one method may be used to authenticate at a time (Auth header, ' +
'GET or POST).'));
} else if (methodsUsed === 0) {
return done(error('invalid_request', 'The access token was not found'));
}
// Header: http://tools.ietf.org/html/rfc6750#section-2.1
if (headerToken) {
var matches = headerToken.match(/Bearer\s(\S+)/);
if (!matches) {
return done(error('invalid_request', 'Malformed auth header'));
}
headerToken = matches[1];
}
// POST: http://tools.ietf.org/html/rfc6750#section-2.2
if (postToken) {
if (this.req.method === 'GET') {
return done(error('invalid_request',
'Method cannot be GET When putting the token in the body.'));
}
if (!this.req.is('application/x-www-form-urlencoded')) {
return done(error('invalid_request', 'When putting the token in the ' +
'body, content type must be application/x-www-form-urlencoded.'));
}
}
this.bearerToken = headerToken || postToken || getToken;
checkToken.call(this, done);
}
/**
* Check token
*
* Check it against model, ensure it's not expired
* @param {Function} done
* @this OAuth
*/
function checkToken (done) {
var self = this;
this.model.getAccessToken(this.bearerToken, function (err, token) {
if (err) return done(error('server_error', false, err));
if (!token) {
return done(error('invalid_token',
'The access token provided is invalid.'));
}
if (token.expires !== null &&
(!token.expires || token.expires < new Date())) {
return done(error('invalid_token',
'The access token provided has expired.'));
}
// Expose params
self.req.oauth = { bearerToken: token };
self.req.user = token.user ? token.user : { id: token.userId };
done();
});
}
/**
* Check scope
*
* @param {Function} done
* @this OAuth
*/
function checkScope (done) {
if (!this.model.authoriseScope) return done();
this.model.authoriseScope(this.req.oauth.bearerToken, this.options.scope,
function (err, invalid) {
if (err) return done(error('server_error', false, err));
if (invalid) return done(error('invalid_scope', invalid));
done();
});
} | random_line_split | |
utils.py | from django.conf import settings
from mock import Mock
from cabot.cabotapp import defs
from datetime import datetime
def build_absolute_url(relative_url):
"""Prepend https?://host to a url, useful for links going into emails"""
return '{}://{}{}'.format(settings.WWW_SCHEME, settings.WWW_HTTP_HOST, relative_url)
def create_failing_service_mock():
"""
Create a Mock object mimicking a critical service, with a single (also mocked) failing check.
Note that not all attributes are mocked (notably hipchat_instance, mattermost_instance).
Primary keys/IDs are mocked to be 0. Functions that return querysets in reality (like active_status_checks)
will return hard-coded lists.
This is typically called by an AlertPlugin.send_test_alert() implementation, and further configured by calling
service_mock.configure_mock(attr=value, ...) to add any plugin-specific attributes (like mattermost_instance).
:return: Mock emulating a service with 1 failing check
"""
check_mock = Mock()
check_mock.configure_mock(id=0, pk=0, name='Alert Testing Check', active=True,
get_status_image=lambda: None, check_category=lambda: "Mock Check",
get_importance_display=lambda: "Critical")
service_mock = Mock()
service_mock.configure_mock(id=0, pk=0, name='Alert Testing Service', alerts_enabled=True,
# plugins use service.CRITICAL_STATUS etc, so we mock these constants too
CRITICAL_STATUS=defs.CRITICAL_STATUS, PASSING_STATUS=defs.PASSING_STATUS,
WARNING_STATUS=defs.WARNING_STATUS, ERROR_STATUS=defs.ERROR_STATUS,
status_checks=[check_mock], recent_snapshots=[],
overall_status=defs.CRITICAL_STATUS,
active_status_checks=lambda: [check_mock],
all_passing_checks=lambda: [], all_failing_checks=lambda: [check_mock])
return service_mock
def | (dt):
'''
Convert datetime to string. None is converted to empty string. This is used
primarily for formatting datetimes in API responses, whereas format_timestamp
is used for a more human-readable format to be displayed on the web.
'''
return '' if dt is None else datetime.strftime(dt, '%Y-%m-%d %H:%M:%S')
| format_datetime | identifier_name |
utils.py | from django.conf import settings
from mock import Mock
from cabot.cabotapp import defs
from datetime import datetime
def build_absolute_url(relative_url):
|
def create_failing_service_mock():
"""
Create a Mock object mimicking a critical service, with a single (also mocked) failing check.
Note that not all attributes are mocked (notably hipchat_instance, mattermost_instance).
Primary keys/IDs are mocked to be 0. Functions that return querysets in reality (like active_status_checks)
will return hard-coded lists.
This is typically called by an AlertPlugin.send_test_alert() implementation, and further configured by calling
service_mock.configure_mock(attr=value, ...) to add any plugin-specific attributes (like mattermost_instance).
:return: Mock emulating a service with 1 failing check
"""
check_mock = Mock()
check_mock.configure_mock(id=0, pk=0, name='Alert Testing Check', active=True,
get_status_image=lambda: None, check_category=lambda: "Mock Check",
get_importance_display=lambda: "Critical")
service_mock = Mock()
service_mock.configure_mock(id=0, pk=0, name='Alert Testing Service', alerts_enabled=True,
# plugins use service.CRITICAL_STATUS etc, so we mock these constants too
CRITICAL_STATUS=defs.CRITICAL_STATUS, PASSING_STATUS=defs.PASSING_STATUS,
WARNING_STATUS=defs.WARNING_STATUS, ERROR_STATUS=defs.ERROR_STATUS,
status_checks=[check_mock], recent_snapshots=[],
overall_status=defs.CRITICAL_STATUS,
active_status_checks=lambda: [check_mock],
all_passing_checks=lambda: [], all_failing_checks=lambda: [check_mock])
return service_mock
def format_datetime(dt):
'''
Convert datetime to string. None is converted to empty string. This is used
primarily for formatting datetimes in API responses, whereas format_timestamp
is used for a more human-readable format to be displayed on the web.
'''
return '' if dt is None else datetime.strftime(dt, '%Y-%m-%d %H:%M:%S')
| """Prepend https?://host to a url, useful for links going into emails"""
return '{}://{}{}'.format(settings.WWW_SCHEME, settings.WWW_HTTP_HOST, relative_url) | identifier_body |
utils.py | from django.conf import settings
from mock import Mock
from cabot.cabotapp import defs
from datetime import datetime
def build_absolute_url(relative_url):
"""Prepend https?://host to a url, useful for links going into emails"""
return '{}://{}{}'.format(settings.WWW_SCHEME, settings.WWW_HTTP_HOST, relative_url)
def create_failing_service_mock():
"""
Create a Mock object mimicking a critical service, with a single (also mocked) failing check.
Note that not all attributes are mocked (notably hipchat_instance, mattermost_instance).
Primary keys/IDs are mocked to be 0. Functions that return querysets in reality (like active_status_checks)
will return hard-coded lists.
This is typically called by an AlertPlugin.send_test_alert() implementation, and further configured by calling
service_mock.configure_mock(attr=value, ...) to add any plugin-specific attributes (like mattermost_instance).
:return: Mock emulating a service with 1 failing check
"""
check_mock = Mock()
check_mock.configure_mock(id=0, pk=0, name='Alert Testing Check', active=True,
get_status_image=lambda: None, check_category=lambda: "Mock Check",
get_importance_display=lambda: "Critical")
service_mock = Mock()
service_mock.configure_mock(id=0, pk=0, name='Alert Testing Service', alerts_enabled=True,
# plugins use service.CRITICAL_STATUS etc, so we mock these constants too
CRITICAL_STATUS=defs.CRITICAL_STATUS, PASSING_STATUS=defs.PASSING_STATUS,
WARNING_STATUS=defs.WARNING_STATUS, ERROR_STATUS=defs.ERROR_STATUS,
status_checks=[check_mock], recent_snapshots=[],
overall_status=defs.CRITICAL_STATUS,
active_status_checks=lambda: [check_mock],
all_passing_checks=lambda: [], all_failing_checks=lambda: [check_mock])
| '''
Convert datetime to string. None is converted to empty string. This is used
primarily for formatting datetimes in API responses, whereas format_timestamp
is used for a more human-readable format to be displayed on the web.
'''
return '' if dt is None else datetime.strftime(dt, '%Y-%m-%d %H:%M:%S') | return service_mock
def format_datetime(dt): | random_line_split |
setup.py | import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.txt')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.txt')) as f: |
requires = [
'pyramid',
'pyramid_chameleon',
'pyramid_debugtoolbar',
'pyramid_tm',
'SQLAlchemy',
'transaction',
'zope.sqlalchemy',
'waitress',
'pyramid_layout'
]
setup(name='MyShop',
version='0.0',
description='MyShop',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='',
author_email='',
url='',
keywords='web wsgi bfg pylons pyramid',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='myshop',
install_requires=requires,
entry_points="""\
[paste.app_factory]
main = myshop:main
[console_scripts]
initialize_MyShop_db = myshop.scripts.initializedb:main
""",
) | CHANGES = f.read() | random_line_split |
problem.rs | struct Container(i32, i32);
// A trait which checks if 2 items are stored inside of container.
// Also retrieves first or last value.
trait Contains<A, B> {
fn contains(&self, &A, &B) -> bool; // Explicitly requires `A` and `B`.
fn first(&self) -> i32; // Doesn't explicitly require `A` or `B`.
fn last(&self) -> i32; // Doesn't explicitly require `A` or `B`.
}
impl Contains<i32, i32> for Container {
// True if the numbers stored are equal.
fn contains(&self, number_1: &i32, number_2: &i32) -> bool {
(&self.0 == number_1) && (&self.1 == number_2)
}
// Grab the first number.
fn first(&self) -> i32 { self.0 }
// Grab the last number.
fn | (&self) -> i32 { self.1 }
}
// `C` contains `A` and `B`. In light of that, having to express `A` and
// `B` again is a nuisance.
fn difference<A, B, C>(container: &C) -> i32 where
C: Contains<A, B> {
container.last() - container.first()
}
fn main() {
let number_1 = 3;
let number_2 = 10;
let container = Container(number_1, number_2);
println!("Does container contain {} and {}: {}",
&number_1, &number_2,
container.contains(&number_1, &number_2));
println!("First number: {}", container.first());
println!("Last number: {}", container.last());
println!("The difference is: {}", difference(&container));
}
| last | identifier_name |
problem.rs | struct Container(i32, i32);
// A trait which checks if 2 items are stored inside of container.
// Also retrieves first or last value.
trait Contains<A, B> {
fn contains(&self, &A, &B) -> bool; // Explicitly requires `A` and `B`.
fn first(&self) -> i32; // Doesn't explicitly require `A` or `B`.
fn last(&self) -> i32; // Doesn't explicitly require `A` or `B`.
}
impl Contains<i32, i32> for Container {
// True if the numbers stored are equal.
fn contains(&self, number_1: &i32, number_2: &i32) -> bool {
(&self.0 == number_1) && (&self.1 == number_2)
}
// Grab the first number.
fn first(&self) -> i32 { self.0 }
// Grab the last number.
fn last(&self) -> i32 { self.1 }
}
// `C` contains `A` and `B`. In light of that, having to express `A` and
// `B` again is a nuisance.
fn difference<A, B, C>(container: &C) -> i32 where
C: Contains<A, B> |
fn main() {
let number_1 = 3;
let number_2 = 10;
let container = Container(number_1, number_2);
println!("Does container contain {} and {}: {}",
&number_1, &number_2,
container.contains(&number_1, &number_2));
println!("First number: {}", container.first());
println!("Last number: {}", container.last());
println!("The difference is: {}", difference(&container));
}
| {
container.last() - container.first()
} | identifier_body |
problem.rs | struct Container(i32, i32);
// A trait which checks if 2 items are stored inside of container.
// Also retrieves first or last value.
trait Contains<A, B> { | }
impl Contains<i32, i32> for Container {
// True if the numbers stored are equal.
fn contains(&self, number_1: &i32, number_2: &i32) -> bool {
(&self.0 == number_1) && (&self.1 == number_2)
}
// Grab the first number.
fn first(&self) -> i32 { self.0 }
// Grab the last number.
fn last(&self) -> i32 { self.1 }
}
// `C` contains `A` and `B`. In light of that, having to express `A` and
// `B` again is a nuisance.
fn difference<A, B, C>(container: &C) -> i32 where
C: Contains<A, B> {
container.last() - container.first()
}
fn main() {
let number_1 = 3;
let number_2 = 10;
let container = Container(number_1, number_2);
println!("Does container contain {} and {}: {}",
&number_1, &number_2,
container.contains(&number_1, &number_2));
println!("First number: {}", container.first());
println!("Last number: {}", container.last());
println!("The difference is: {}", difference(&container));
} | fn contains(&self, &A, &B) -> bool; // Explicitly requires `A` and `B`.
fn first(&self) -> i32; // Doesn't explicitly require `A` or `B`.
fn last(&self) -> i32; // Doesn't explicitly require `A` or `B`. | random_line_split |
processes.py | """This module holds the ``Process``es for NER."""
from copy import deepcopy
from dataclasses import dataclass
from typing import Any, List
from boltons.cacheutils import cachedproperty
from cltk.core.data_types import Doc, Process
from cltk.ner.ner import tag_ner
@dataclass
class NERProcess(Process):
"""To be inherited for each language's NER declarations.
>>> from cltk.core.data_types import Doc
>>> from cltk.ner.processes import NERProcess
>>> from cltk.core.data_types import Process
>>> issubclass(NERProcess, Process)
True
>>> emb_proc = NERProcess()
"""
language: str = None
@cachedproperty
def algorithm(self):
return tag_ner
def run(self, input_doc: Doc) -> Doc:
output_doc = deepcopy(input_doc)
ner_obj = self.algorithm
entity_values = ner_obj(
iso_code=self.language, input_tokens=input_doc.tokens
) # type: List[Any]
for index, word_obj in enumerate(output_doc.words):
word_obj.named_entity = entity_values[index]
output_doc.words[index] = word_obj
return output_doc
@dataclass
class GreekNERProcess(NERProcess):
"""The default Greek NER algorithm.
.. todo::
Update doctest w/ production model
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.languages.example_texts import get_example_text
>>> from boltons.strutils import split_punct_ws
>>> text = "ἐπὶ δ᾽ οὖν τοῖς πρώτοις τοῖσδε Περικλῆς ὁ Ξανθίππου ᾑρέθη λέγειν. καὶ ἐπειδὴ καιρὸς ἐλάμβανε, προελθὼν ἀπὸ τοῦ σήματος ἐπὶ βῆμα ὑψηλὸν πεποιημένον, ὅπως ἀκούοιτο ὡς ἐπὶ πλεῖστον τοῦ ὁμίλου, ἔλεγε τοιάδε."
>>> tokens = [Word(string=token) for token in split_punct_ws(text)]
>>> a_process = GreekNERProcess()
>>> output_doc = a_process.run(Doc(raw=text, words=tokens))
>>> output_doc.words[7].string
'ὁ'
>>> output_doc.words[7].named_entity
False
>>> output_doc.words[8].string
'Ξανθίππου'
>>> output_doc.words[8].named_entity
False
|
@dataclass
class OldEnglishNERProcess(NERProcess):
"""The default OE NER algorithm.
.. todo::
Update doctest w/ production model
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.languages.example_texts import get_example_text
>>> from boltons.strutils import split_punct_ws
>>> text = get_example_text(iso_code="ang")
>>> tokens = [Word(string=token) for token in split_punct_ws(text)]
>>> a_process = OldEnglishNERProcess()
>>> output_doc = a_process.run(Doc(raw=text, words=tokens))
>>> output_doc.words[2].string, output_doc.words[2].named_entity
('Gardena', 'LOCATION')
"""
language: str = "ang"
description: str = "Default NER for Old English."
@dataclass
class LatinNERProcess(NERProcess):
"""The default Latin NER algorithm.
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.languages.example_texts import get_example_text
>>> from boltons.strutils import split_punct_ws
>>> tokens = [Word(string=token) for token in split_punct_ws(get_example_text("lat"))]
>>> a_process = LatinNERProcess()
>>> output_doc = a_process.run(Doc(raw=get_example_text("lat"), words=tokens))
>>> [word.named_entity for word in output_doc.words][:20]
['LOCATION', False, False, False, False, False, False, False, False, False, 'LOCATION', False, 'LOCATION', False, False, False, False, 'LOCATION', False, 'LOCATION']
"""
language: str = "lat"
description: str = "Default NER for Latin."
@dataclass
class OldFrenchNERProcess(NERProcess):
"""The default Old French NER algorithm.
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.languages.example_texts import get_example_text
>>> from boltons.strutils import split_punct_ws
>>> tokens = [Word(string=token) for token in split_punct_ws(get_example_text("fro"))]
>>> a_process = OldFrenchNERProcess()
>>> output_doc = a_process.run(Doc(raw=get_example_text("fro"), words=tokens))
>>> output_doc.words[30].string
'Bretaigne'
>>> output_doc.words[30].named_entity
'LOC'
>>> output_doc.words[31].named_entity
False
"""
language: str = "fro"
description: str = "Default NER for Old French." | """
language: str = "grc"
description: str = "Default NER for Greek." | random_line_split |
processes.py | """This module holds the ``Process``es for NER."""
from copy import deepcopy
from dataclasses import dataclass
from typing import Any, List
from boltons.cacheutils import cachedproperty
from cltk.core.data_types import Doc, Process
from cltk.ner.ner import tag_ner
@dataclass
class NERProcess(Process):
"""To be inherited for each language's NER declarations.
>>> from cltk.core.data_types import Doc
>>> from cltk.ner.processes import NERProcess
>>> from cltk.core.data_types import Process
>>> issubclass(NERProcess, Process)
True
>>> emb_proc = NERProcess()
"""
language: str = None
@cachedproperty
def algorithm(self):
return tag_ner
def run(self, input_doc: Doc) -> Doc:
output_doc = deepcopy(input_doc)
ner_obj = self.algorithm
entity_values = ner_obj(
iso_code=self.language, input_tokens=input_doc.tokens
) # type: List[Any]
for index, word_obj in enumerate(output_doc.words):
|
return output_doc
@dataclass
class GreekNERProcess(NERProcess):
"""The default Greek NER algorithm.
.. todo::
Update doctest w/ production model
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.languages.example_texts import get_example_text
>>> from boltons.strutils import split_punct_ws
>>> text = "ἐπὶ δ᾽ οὖν τοῖς πρώτοις τοῖσδε Περικλῆς ὁ Ξανθίππου ᾑρέθη λέγειν. καὶ ἐπειδὴ καιρὸς ἐλάμβανε, προελθὼν ἀπὸ τοῦ σήματος ἐπὶ βῆμα ὑψηλὸν πεποιημένον, ὅπως ἀκούοιτο ὡς ἐπὶ πλεῖστον τοῦ ὁμίλου, ἔλεγε τοιάδε."
>>> tokens = [Word(string=token) for token in split_punct_ws(text)]
>>> a_process = GreekNERProcess()
>>> output_doc = a_process.run(Doc(raw=text, words=tokens))
>>> output_doc.words[7].string
'ὁ'
>>> output_doc.words[7].named_entity
False
>>> output_doc.words[8].string
'Ξανθίππου'
>>> output_doc.words[8].named_entity
False
"""
language: str = "grc"
description: str = "Default NER for Greek."
@dataclass
class OldEnglishNERProcess(NERProcess):
"""The default OE NER algorithm.
.. todo::
Update doctest w/ production model
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.languages.example_texts import get_example_text
>>> from boltons.strutils import split_punct_ws
>>> text = get_example_text(iso_code="ang")
>>> tokens = [Word(string=token) for token in split_punct_ws(text)]
>>> a_process = OldEnglishNERProcess()
>>> output_doc = a_process.run(Doc(raw=text, words=tokens))
>>> output_doc.words[2].string, output_doc.words[2].named_entity
('Gardena', 'LOCATION')
"""
language: str = "ang"
description: str = "Default NER for Old English."
@dataclass
class LatinNERProcess(NERProcess):
"""The default Latin NER algorithm.
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.languages.example_texts import get_example_text
>>> from boltons.strutils import split_punct_ws
>>> tokens = [Word(string=token) for token in split_punct_ws(get_example_text("lat"))]
>>> a_process = LatinNERProcess()
>>> output_doc = a_process.run(Doc(raw=get_example_text("lat"), words=tokens))
>>> [word.named_entity for word in output_doc.words][:20]
['LOCATION', False, False, False, False, False, False, False, False, False, 'LOCATION', False, 'LOCATION', False, False, False, False, 'LOCATION', False, 'LOCATION']
"""
language: str = "lat"
description: str = "Default NER for Latin."
@dataclass
class OldFrenchNERProcess(NERProcess):
"""The default Old French NER algorithm.
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.languages.example_texts import get_example_text
>>> from boltons.strutils import split_punct_ws
>>> tokens = [Word(string=token) for token in split_punct_ws(get_example_text("fro"))]
>>> a_process = OldFrenchNERProcess()
>>> output_doc = a_process.run(Doc(raw=get_example_text("fro"), words=tokens))
>>> output_doc.words[30].string
'Bretaigne'
>>> output_doc.words[30].named_entity
'LOC'
>>> output_doc.words[31].named_entity
False
"""
language: str = "fro"
description: str = "Default NER for Old French."
| word_obj.named_entity = entity_values[index]
output_doc.words[index] = word_obj | conditional_block |
processes.py | """This module holds the ``Process``es for NER."""
from copy import deepcopy
from dataclasses import dataclass
from typing import Any, List
from boltons.cacheutils import cachedproperty
from cltk.core.data_types import Doc, Process
from cltk.ner.ner import tag_ner
@dataclass
class NERProcess(Process):
"""To be inherited for each language's NER declarations.
>>> from cltk.core.data_types import Doc
>>> from cltk.ner.processes import NERProcess
>>> from cltk.core.data_types import Process
>>> issubclass(NERProcess, Process)
True
>>> emb_proc = NERProcess()
"""
language: str = None
@cachedproperty
def | (self):
return tag_ner
def run(self, input_doc: Doc) -> Doc:
output_doc = deepcopy(input_doc)
ner_obj = self.algorithm
entity_values = ner_obj(
iso_code=self.language, input_tokens=input_doc.tokens
) # type: List[Any]
for index, word_obj in enumerate(output_doc.words):
word_obj.named_entity = entity_values[index]
output_doc.words[index] = word_obj
return output_doc
@dataclass
class GreekNERProcess(NERProcess):
"""The default Greek NER algorithm.
.. todo::
Update doctest w/ production model
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.languages.example_texts import get_example_text
>>> from boltons.strutils import split_punct_ws
>>> text = "ἐπὶ δ᾽ οὖν τοῖς πρώτοις τοῖσδε Περικλῆς ὁ Ξανθίππου ᾑρέθη λέγειν. καὶ ἐπειδὴ καιρὸς ἐλάμβανε, προελθὼν ἀπὸ τοῦ σήματος ἐπὶ βῆμα ὑψηλὸν πεποιημένον, ὅπως ἀκούοιτο ὡς ἐπὶ πλεῖστον τοῦ ὁμίλου, ἔλεγε τοιάδε."
>>> tokens = [Word(string=token) for token in split_punct_ws(text)]
>>> a_process = GreekNERProcess()
>>> output_doc = a_process.run(Doc(raw=text, words=tokens))
>>> output_doc.words[7].string
'ὁ'
>>> output_doc.words[7].named_entity
False
>>> output_doc.words[8].string
'Ξανθίππου'
>>> output_doc.words[8].named_entity
False
"""
language: str = "grc"
description: str = "Default NER for Greek."
@dataclass
class OldEnglishNERProcess(NERProcess):
"""The default OE NER algorithm.
.. todo::
Update doctest w/ production model
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.languages.example_texts import get_example_text
>>> from boltons.strutils import split_punct_ws
>>> text = get_example_text(iso_code="ang")
>>> tokens = [Word(string=token) for token in split_punct_ws(text)]
>>> a_process = OldEnglishNERProcess()
>>> output_doc = a_process.run(Doc(raw=text, words=tokens))
>>> output_doc.words[2].string, output_doc.words[2].named_entity
('Gardena', 'LOCATION')
"""
language: str = "ang"
description: str = "Default NER for Old English."
@dataclass
class LatinNERProcess(NERProcess):
"""The default Latin NER algorithm.
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.languages.example_texts import get_example_text
>>> from boltons.strutils import split_punct_ws
>>> tokens = [Word(string=token) for token in split_punct_ws(get_example_text("lat"))]
>>> a_process = LatinNERProcess()
>>> output_doc = a_process.run(Doc(raw=get_example_text("lat"), words=tokens))
>>> [word.named_entity for word in output_doc.words][:20]
['LOCATION', False, False, False, False, False, False, False, False, False, 'LOCATION', False, 'LOCATION', False, False, False, False, 'LOCATION', False, 'LOCATION']
"""
language: str = "lat"
description: str = "Default NER for Latin."
@dataclass
class OldFrenchNERProcess(NERProcess):
"""The default Old French NER algorithm.
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.languages.example_texts import get_example_text
>>> from boltons.strutils import split_punct_ws
>>> tokens = [Word(string=token) for token in split_punct_ws(get_example_text("fro"))]
>>> a_process = OldFrenchNERProcess()
>>> output_doc = a_process.run(Doc(raw=get_example_text("fro"), words=tokens))
>>> output_doc.words[30].string
'Bretaigne'
>>> output_doc.words[30].named_entity
'LOC'
>>> output_doc.words[31].named_entity
False
"""
language: str = "fro"
description: str = "Default NER for Old French."
| algorithm | identifier_name |
processes.py | """This module holds the ``Process``es for NER."""
from copy import deepcopy
from dataclasses import dataclass
from typing import Any, List
from boltons.cacheutils import cachedproperty
from cltk.core.data_types import Doc, Process
from cltk.ner.ner import tag_ner
@dataclass
class NERProcess(Process):
"""To be inherited for each language's NER declarations.
>>> from cltk.core.data_types import Doc
>>> from cltk.ner.processes import NERProcess
>>> from cltk.core.data_types import Process
>>> issubclass(NERProcess, Process)
True
>>> emb_proc = NERProcess()
"""
language: str = None
@cachedproperty
def algorithm(self):
return tag_ner
def run(self, input_doc: Doc) -> Doc:
output_doc = deepcopy(input_doc)
ner_obj = self.algorithm
entity_values = ner_obj(
iso_code=self.language, input_tokens=input_doc.tokens
) # type: List[Any]
for index, word_obj in enumerate(output_doc.words):
word_obj.named_entity = entity_values[index]
output_doc.words[index] = word_obj
return output_doc
@dataclass
class GreekNERProcess(NERProcess):
"""The default Greek NER algorithm.
.. todo::
Update doctest w/ production model
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.languages.example_texts import get_example_text
>>> from boltons.strutils import split_punct_ws
>>> text = "ἐπὶ δ᾽ οὖν τοῖς πρώτοις τοῖσδε Περικλῆς ὁ Ξανθίππου ᾑρέθη λέγειν. καὶ ἐπειδὴ καιρὸς ἐλάμβανε, προελθὼν ἀπὸ τοῦ σήματος ἐπὶ βῆμα ὑψηλὸν πεποιημένον, ὅπως ἀκούοιτο ὡς ἐπὶ πλεῖστον τοῦ ὁμίλου, ἔλεγε τοιάδε."
>>> tokens = [Word(string=token) for token in split_punct_ws(text)]
>>> a_process = GreekNERProcess()
>>> output_doc = a_process.run(Doc(raw=text, words=tokens))
>>> output_doc.words[7].string
'ὁ'
>>> output_doc.words[7].named_entity
False
>>> output_doc.words[8].string
'Ξανθίππου'
>>> output_doc.words[8].named_entity
False
"""
language: str = "grc"
description: str = "Default NER for Greek."
@dataclass
class OldEnglishNERProcess(NERProcess):
"""The default OE NER algorithm.
.. todo::
Update doctest w/ production model
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.languages.example_texts import get_example_text
| >>> from boltons.strutils import split_punct_ws
>>> tokens = [Word(string=token) for token in split_punct_ws(get_example_text("lat"))]
>>> a_process = LatinNERProcess()
>>> output_doc = a_process.run(Doc(raw=get_example_text("lat"), words=tokens))
>>> [word.named_entity for word in output_doc.words][:20]
['LOCATION', False, False, False, False, False, False, False, False, False, 'LOCATION', False, 'LOCATION', False, False, False, False, 'LOCATION', False, 'LOCATION']
"""
language: str = "lat"
description: str = "Default NER for Latin."
@dataclass
class OldFrenchNERProcess(NERProcess):
"""The default Old French NER algorithm.
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.languages.example_texts import get_example_text
>>> from boltons.strutils import split_punct_ws
>>> tokens = [Word(string=token) for token in split_punct_ws(get_example_text("fro"))]
>>> a_process = OldFrenchNERProcess()
>>> output_doc = a_process.run(Doc(raw=get_example_text("fro"), words=tokens))
>>> output_doc.words[30].string
'Bretaigne'
>>> output_doc.words[30].named_entity
'LOC'
>>> output_doc.words[31].named_entity
False
"""
language: str = "fro"
description: str = "Default NER for Old French."
| >>> from boltons.strutils import split_punct_ws
>>> text = get_example_text(iso_code="ang")
>>> tokens = [Word(string=token) for token in split_punct_ws(text)]
>>> a_process = OldEnglishNERProcess()
>>> output_doc = a_process.run(Doc(raw=text, words=tokens))
>>> output_doc.words[2].string, output_doc.words[2].named_entity
('Gardena', 'LOCATION')
"""
language: str = "ang"
description: str = "Default NER for Old English."
@dataclass
class LatinNERProcess(NERProcess):
"""The default Latin NER algorithm.
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.languages.example_texts import get_example_text
| identifier_body |
schema.rs | table! {
__diesel_schema_migrations (version) {
version -> VarChar,
run_on -> Timestamp,
}
}
pub struct NewMigration<'a>(pub &'a str);
use backend::Backend;
use expression::AsExpression;
use expression::helper_types::AsExpr;
use persistable::{Insertable, ColumnInsertValue, InsertValues};
impl<'update: 'a, 'a, DB> Insertable<__diesel_schema_migrations::table, DB>
for &'update NewMigration<'a> where
DB: Backend,
(ColumnInsertValue<
__diesel_schema_migrations::version,
AsExpr<&'a str, __diesel_schema_migrations::version>,
>,): InsertValues<DB>,
{
type Values = (ColumnInsertValue<
__diesel_schema_migrations::version,
AsExpr<&'a str, __diesel_schema_migrations::version>,
>,);
| }
} | fn values(self) -> Self::Values {
(ColumnInsertValue::Expression(
__diesel_schema_migrations::version,
AsExpression::<::types::VarChar>::as_expression(self.0),
),) | random_line_split |
schema.rs | table! {
__diesel_schema_migrations (version) {
version -> VarChar,
run_on -> Timestamp,
}
}
pub struct | <'a>(pub &'a str);
use backend::Backend;
use expression::AsExpression;
use expression::helper_types::AsExpr;
use persistable::{Insertable, ColumnInsertValue, InsertValues};
impl<'update: 'a, 'a, DB> Insertable<__diesel_schema_migrations::table, DB>
for &'update NewMigration<'a> where
DB: Backend,
(ColumnInsertValue<
__diesel_schema_migrations::version,
AsExpr<&'a str, __diesel_schema_migrations::version>,
>,): InsertValues<DB>,
{
type Values = (ColumnInsertValue<
__diesel_schema_migrations::version,
AsExpr<&'a str, __diesel_schema_migrations::version>,
>,);
fn values(self) -> Self::Values {
(ColumnInsertValue::Expression(
__diesel_schema_migrations::version,
AsExpression::<::types::VarChar>::as_expression(self.0),
),)
}
}
| NewMigration | identifier_name |
autobind.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn f<T>(x: Vec<T>) -> T { return x.into_iter().next().unwrap(); }
fn g(act: |Vec<int> | -> int) -> int |
pub fn main() {
assert_eq!(g(f), 1);
let f1: |Vec<String>| -> String = f;
assert_eq!(f1(vec!["x".to_string(), "y".to_string(), "z".to_string()]),
"x".to_string());
}
| { return act(vec!(1, 2, 3)); } | identifier_body |
autobind.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT. | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn f<T>(x: Vec<T>) -> T { return x.into_iter().next().unwrap(); }
fn g(act: |Vec<int> | -> int) -> int { return act(vec!(1, 2, 3)); }
pub fn main() {
assert_eq!(g(f), 1);
let f1: |Vec<String>| -> String = f;
assert_eq!(f1(vec!["x".to_string(), "y".to_string(), "z".to_string()]),
"x".to_string());
} | // | random_line_split |
autobind.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn | <T>(x: Vec<T>) -> T { return x.into_iter().next().unwrap(); }
fn g(act: |Vec<int> | -> int) -> int { return act(vec!(1, 2, 3)); }
pub fn main() {
assert_eq!(g(f), 1);
let f1: |Vec<String>| -> String = f;
assert_eq!(f1(vec!["x".to_string(), "y".to_string(), "z".to_string()]),
"x".to_string());
}
| f | identifier_name |
global.js | $(document).ready(() => {
const href = window.location.href;
const last = href.substr(href.lastIndexOf('/') + 1);
if (last == '' || last == 'index') {
loadImageSlider();
} else if (last == 'signup' || last == 'admin-signup') {
// Validation and state population
$(".error").hide();
$("#submit").on("click", validateEvent);
$("#country").on("change", populateStates);
} else if (last.startsWith("post")) {
renderFullPost();
// Make our REST call to get similar posts
getSimilars();
}
// Event for when the login form gets submitted.
$("#login-form").on("submit", (event) => {
event.preventDefault();
$("#err").fadeOut().text("");
$.ajax({
method: "POST",
url: '/api/login-form',
dataType: "json",
data: $('#login-form').serialize(),
success: (data) => {
if (!data.status || data.status == "Bad") {
$("#err").fadeIn().text("Unknown email or incorrect password.");
} else if (data.status == "OK"){
window.location.href = "/profile";
} else if (data.status == "Attempts") {
$("#err").fadeIn().text("You've tried to use this too many times. Try again after 30 seconds.");
} else {
$("#err").fadeIn().text("This account is " + data.status + ".");
}
},
error: (err) => {
console.log(err);
}
});
});
// Event for when reset password gets clicked.
$("#reset-pass").on("click", (event) => {
event.preventDefault();
$("#err").fadeOut().text("");
$.ajax({
method: "POST",
url: '/api/reset-pass',
dataType: "json",
data: $('#login-form').serialize(),
success: (data) => {
if (data.status == "Bad") {
$("#err").fadeIn().text("Unknown email or inactivated account.");
} else if (data.status == "OK"){
$("#err").fadeIn().text("Check the email address for a new password.");
} else if (data.status == "Attempts") {
$("#err").fadeIn().text("You've tried to use this too many times. Try again after 30 seconds.");
}
},
error: (err) => {
console.log(err);
}
});
});
// Admin function.
$("#repopulate").on("click", (event) => {
$.ajax({
method: "GET",
url: '/api/repopulate',
dataType: "json", | window.location.href = "/";
} else {
console.log("There was an error.");
}
},
error: (err) => {
console.log(err);
}
});
});
}); | data: {},
success: (data) => {
if (data.status){
// Database is gone, reload | random_line_split |
suggestions.rs | #[cfg(feature = "suggestions")]
use strsim;
/// Produces a string from a given list of possible values which is similar to
/// the passed in value `v` with a certain confidence.
/// Thus in a list of possible values like ["foo", "bar"], the value "fop" will yield
/// `Some("foo")`, whereas "blark" would yield `None`.
#[cfg(feature = "suggestions")]
#[cfg_attr(feature = "lints", allow(needless_lifetimes))]
pub fn | <'a, T, I>(v: &str,
possible_values: I)
-> Option<&'a str>
where T: AsRef<str> + 'a,
I: IntoIterator<Item = &'a T>
{
let mut candidate: Option<(f64, &str)> = None;
for pv in possible_values.into_iter() {
let confidence = strsim::jaro_winkler(v, pv.as_ref());
if confidence > 0.8 &&
(candidate.is_none() || (candidate.as_ref().unwrap().0 < confidence)) {
candidate = Some((confidence, pv.as_ref()));
}
}
match candidate {
None => None,
Some((_, candidate)) => Some(candidate),
}
}
#[cfg(not(feature = "suggestions"))]
pub fn did_you_mean<'a, T, I>(_: &str,
_: I)
-> Option<&'a str>
where T: AsRef<str> + 'a,
I: IntoIterator<Item = &'a T>
{
None
}
/// A helper to determine message formatting
pub enum DidYouMeanMessageStyle {
/// Suggested value is a long flag
LongFlag,
/// Suggested value is one of various possible values
EnumValue,
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn did_you_mean_possible_values() {
let p_vals = ["test", "possible", "values"];
assert_eq!(did_you_mean("tst", p_vals.iter()), Some("test"));
assert!(did_you_mean("hahaahahah", p_vals.iter()).is_none());
}
}
| did_you_mean | identifier_name |
suggestions.rs | #[cfg(feature = "suggestions")]
use strsim;
/// Produces a string from a given list of possible values which is similar to
/// the passed in value `v` with a certain confidence.
/// Thus in a list of possible values like ["foo", "bar"], the value "fop" will yield
/// `Some("foo")`, whereas "blark" would yield `None`.
#[cfg(feature = "suggestions")]
#[cfg_attr(feature = "lints", allow(needless_lifetimes))]
pub fn did_you_mean<'a, T, I>(v: &str,
possible_values: I)
-> Option<&'a str>
where T: AsRef<str> + 'a,
I: IntoIterator<Item = &'a T>
{
let mut candidate: Option<(f64, &str)> = None;
for pv in possible_values.into_iter() {
let confidence = strsim::jaro_winkler(v, pv.as_ref());
if confidence > 0.8 &&
(candidate.is_none() || (candidate.as_ref().unwrap().0 < confidence)) {
candidate = Some((confidence, pv.as_ref()));
}
}
match candidate {
None => None,
Some((_, candidate)) => Some(candidate),
}
}
#[cfg(not(feature = "suggestions"))]
pub fn did_you_mean<'a, T, I>(_: &str,
_: I)
-> Option<&'a str>
where T: AsRef<str> + 'a, | /// A helper to determine message formatting
pub enum DidYouMeanMessageStyle {
/// Suggested value is a long flag
LongFlag,
/// Suggested value is one of various possible values
EnumValue,
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn did_you_mean_possible_values() {
let p_vals = ["test", "possible", "values"];
assert_eq!(did_you_mean("tst", p_vals.iter()), Some("test"));
assert!(did_you_mean("hahaahahah", p_vals.iter()).is_none());
}
} | I: IntoIterator<Item = &'a T>
{
None
}
| random_line_split |
suggestions.rs | #[cfg(feature = "suggestions")]
use strsim;
/// Produces a string from a given list of possible values which is similar to
/// the passed in value `v` with a certain confidence.
/// Thus in a list of possible values like ["foo", "bar"], the value "fop" will yield
/// `Some("foo")`, whereas "blark" would yield `None`.
#[cfg(feature = "suggestions")]
#[cfg_attr(feature = "lints", allow(needless_lifetimes))]
pub fn did_you_mean<'a, T, I>(v: &str,
possible_values: I)
-> Option<&'a str>
where T: AsRef<str> + 'a,
I: IntoIterator<Item = &'a T>
|
#[cfg(not(feature = "suggestions"))]
pub fn did_you_mean<'a, T, I>(_: &str,
_: I)
-> Option<&'a str>
where T: AsRef<str> + 'a,
I: IntoIterator<Item = &'a T>
{
None
}
/// A helper to determine message formatting
pub enum DidYouMeanMessageStyle {
/// Suggested value is a long flag
LongFlag,
/// Suggested value is one of various possible values
EnumValue,
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn did_you_mean_possible_values() {
let p_vals = ["test", "possible", "values"];
assert_eq!(did_you_mean("tst", p_vals.iter()), Some("test"));
assert!(did_you_mean("hahaahahah", p_vals.iter()).is_none());
}
}
| {
let mut candidate: Option<(f64, &str)> = None;
for pv in possible_values.into_iter() {
let confidence = strsim::jaro_winkler(v, pv.as_ref());
if confidence > 0.8 &&
(candidate.is_none() || (candidate.as_ref().unwrap().0 < confidence)) {
candidate = Some((confidence, pv.as_ref()));
}
}
match candidate {
None => None,
Some((_, candidate)) => Some(candidate),
}
} | identifier_body |
outputLinkComputer.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { IMirrorModel, IWorkerContext } from 'vs/editor/common/services/editorSimpleWorker';
import { ILink } from 'vs/editor/common/modes';
import { URI } from 'vs/base/common/uri';
import * as paths from 'vs/base/common/paths';
import * as resources from 'vs/base/common/resources';
import * as strings from 'vs/base/common/strings';
import * as arrays from 'vs/base/common/arrays';
import { Range } from 'vs/editor/common/core/range';
export interface ICreateData {
workspaceFolders: string[];
}
export interface IResourceCreator {
toResource: (folderRelativePath: string) => URI | null;
}
export class OutputLinkComputer {
private ctx: IWorkerContext;
private patterns: Map<URI /* folder uri */, RegExp[]>;
| (ctx: IWorkerContext, createData: ICreateData) {
this.ctx = ctx;
this.patterns = new Map<URI, RegExp[]>();
this.computePatterns(createData);
}
private computePatterns(createData: ICreateData): void {
// Produce patterns for each workspace root we are configured with
// This means that we will be able to detect links for paths that
// contain any of the workspace roots as segments.
const workspaceFolders = createData.workspaceFolders.map(r => URI.parse(r));
workspaceFolders.forEach(workspaceFolder => {
const patterns = OutputLinkComputer.createPatterns(workspaceFolder);
this.patterns.set(workspaceFolder, patterns);
});
}
private getModel(uri: string): IMirrorModel | null {
const models = this.ctx.getMirrorModels();
for (let i = 0; i < models.length; i++) {
const model = models[i];
if (model.uri.toString() === uri) {
return model;
}
}
return null;
}
public computeLinks(uri: string): Promise<ILink[]> {
const model = this.getModel(uri);
if (!model) {
return Promise.resolve([]);
}
const links: ILink[] = [];
const lines = model.getValue().split(/\r\n|\r|\n/);
// For each workspace root patterns
this.patterns.forEach((folderPatterns, folderUri) => {
const resourceCreator: IResourceCreator = {
toResource: (folderRelativePath: string): URI | null => {
if (typeof folderRelativePath === 'string') {
return resources.joinPath(folderUri, folderRelativePath);
}
return null;
}
};
for (let i = 0, len = lines.length; i < len; i++) {
links.push(...OutputLinkComputer.detectLinks(lines[i], i + 1, folderPatterns, resourceCreator));
}
});
return Promise.resolve(links);
}
public static createPatterns(workspaceFolder: URI): RegExp[] {
const patterns: RegExp[] = [];
const workspaceFolderPath = workspaceFolder.scheme === 'file' ? workspaceFolder.fsPath : workspaceFolder.path;
const workspaceFolderVariants = arrays.distinct([
paths.normalize(workspaceFolderPath, true),
paths.normalize(workspaceFolderPath, false)
]);
workspaceFolderVariants.forEach(workspaceFolderVariant => {
const validPathCharacterPattern = '[^\\s\\(\\):<>"]';
const validPathCharacterOrSpacePattern = `(?:${validPathCharacterPattern}| ${validPathCharacterPattern})`;
const pathPattern = `${validPathCharacterOrSpacePattern}+\\.${validPathCharacterPattern}+`;
const strictPathPattern = `${validPathCharacterPattern}+`;
// Example: /workspaces/express/server.js on line 8, column 13
patterns.push(new RegExp(strings.escapeRegExpCharacters(workspaceFolderVariant) + `(${pathPattern}) on line ((\\d+)(, column (\\d+))?)`, 'gi'));
// Example: /workspaces/express/server.js:line 8, column 13
patterns.push(new RegExp(strings.escapeRegExpCharacters(workspaceFolderVariant) + `(${pathPattern}):line ((\\d+)(, column (\\d+))?)`, 'gi'));
// Example: /workspaces/mankala/Features.ts(45): error
// Example: /workspaces/mankala/Features.ts (45): error
// Example: /workspaces/mankala/Features.ts(45,18): error
// Example: /workspaces/mankala/Features.ts (45,18): error
// Example: /workspaces/mankala/Features Special.ts (45,18): error
patterns.push(new RegExp(strings.escapeRegExpCharacters(workspaceFolderVariant) + `(${pathPattern})(\\s?\\((\\d+)(,(\\d+))?)\\)`, 'gi'));
// Example: at /workspaces/mankala/Game.ts
// Example: at /workspaces/mankala/Game.ts:336
// Example: at /workspaces/mankala/Game.ts:336:9
patterns.push(new RegExp(strings.escapeRegExpCharacters(workspaceFolderVariant) + `(${strictPathPattern})(:(\\d+))?(:(\\d+))?`, 'gi'));
});
return patterns;
}
/**
* Detect links. Made public static to allow for tests.
*/
public static detectLinks(line: string, lineIndex: number, patterns: RegExp[], resourceCreator: IResourceCreator): ILink[] {
const links: ILink[] = [];
patterns.forEach(pattern => {
pattern.lastIndex = 0; // the holy grail of software development
let match: RegExpExecArray | null;
let offset = 0;
while ((match = pattern.exec(line)) !== null) {
// Convert the relative path information to a resource that we can use in links
const folderRelativePath = strings.rtrim(match[1], '.').replace(/\\/g, '/'); // remove trailing "." that likely indicate end of sentence
let resourceString: string | undefined;
try {
const resource = resourceCreator.toResource(folderRelativePath);
if (resource) {
resourceString = resource.toString();
}
} catch (error) {
continue; // we might find an invalid URI and then we dont want to loose all other links
}
// Append line/col information to URI if matching
if (match[3]) {
const lineNumber = match[3];
if (match[5]) {
const columnNumber = match[5];
resourceString = strings.format('{0}#{1},{2}', resourceString, lineNumber, columnNumber);
} else {
resourceString = strings.format('{0}#{1}', resourceString, lineNumber);
}
}
const fullMatch = strings.rtrim(match[0], '.'); // remove trailing "." that likely indicate end of sentence
const index = line.indexOf(fullMatch, offset);
offset += index + fullMatch.length;
const linkRange = {
startColumn: index + 1,
startLineNumber: lineIndex,
endColumn: index + 1 + fullMatch.length,
endLineNumber: lineIndex
};
if (links.some(link => Range.areIntersectingOrTouching(link.range, linkRange))) {
return; // Do not detect duplicate links
}
links.push({
range: linkRange,
url: resourceString
});
}
});
return links;
}
}
export function create(ctx: IWorkerContext, createData: ICreateData): OutputLinkComputer {
return new OutputLinkComputer(ctx, createData);
}
| constructor | identifier_name |
outputLinkComputer.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { IMirrorModel, IWorkerContext } from 'vs/editor/common/services/editorSimpleWorker';
import { ILink } from 'vs/editor/common/modes';
import { URI } from 'vs/base/common/uri';
import * as paths from 'vs/base/common/paths';
import * as resources from 'vs/base/common/resources';
import * as strings from 'vs/base/common/strings';
import * as arrays from 'vs/base/common/arrays';
import { Range } from 'vs/editor/common/core/range';
export interface ICreateData {
workspaceFolders: string[];
}
export interface IResourceCreator {
toResource: (folderRelativePath: string) => URI | null;
}
export class OutputLinkComputer {
private ctx: IWorkerContext;
private patterns: Map<URI /* folder uri */, RegExp[]>;
constructor(ctx: IWorkerContext, createData: ICreateData) |
private computePatterns(createData: ICreateData): void {
// Produce patterns for each workspace root we are configured with
// This means that we will be able to detect links for paths that
// contain any of the workspace roots as segments.
const workspaceFolders = createData.workspaceFolders.map(r => URI.parse(r));
workspaceFolders.forEach(workspaceFolder => {
const patterns = OutputLinkComputer.createPatterns(workspaceFolder);
this.patterns.set(workspaceFolder, patterns);
});
}
private getModel(uri: string): IMirrorModel | null {
const models = this.ctx.getMirrorModels();
for (let i = 0; i < models.length; i++) {
const model = models[i];
if (model.uri.toString() === uri) {
return model;
}
}
return null;
}
public computeLinks(uri: string): Promise<ILink[]> {
const model = this.getModel(uri);
if (!model) {
return Promise.resolve([]);
}
const links: ILink[] = [];
const lines = model.getValue().split(/\r\n|\r|\n/);
// For each workspace root patterns
this.patterns.forEach((folderPatterns, folderUri) => {
const resourceCreator: IResourceCreator = {
toResource: (folderRelativePath: string): URI | null => {
if (typeof folderRelativePath === 'string') {
return resources.joinPath(folderUri, folderRelativePath);
}
return null;
}
};
for (let i = 0, len = lines.length; i < len; i++) {
links.push(...OutputLinkComputer.detectLinks(lines[i], i + 1, folderPatterns, resourceCreator));
}
});
return Promise.resolve(links);
}
public static createPatterns(workspaceFolder: URI): RegExp[] {
const patterns: RegExp[] = [];
const workspaceFolderPath = workspaceFolder.scheme === 'file' ? workspaceFolder.fsPath : workspaceFolder.path;
const workspaceFolderVariants = arrays.distinct([
paths.normalize(workspaceFolderPath, true),
paths.normalize(workspaceFolderPath, false)
]);
workspaceFolderVariants.forEach(workspaceFolderVariant => {
const validPathCharacterPattern = '[^\\s\\(\\):<>"]';
const validPathCharacterOrSpacePattern = `(?:${validPathCharacterPattern}| ${validPathCharacterPattern})`;
const pathPattern = `${validPathCharacterOrSpacePattern}+\\.${validPathCharacterPattern}+`;
const strictPathPattern = `${validPathCharacterPattern}+`;
// Example: /workspaces/express/server.js on line 8, column 13
patterns.push(new RegExp(strings.escapeRegExpCharacters(workspaceFolderVariant) + `(${pathPattern}) on line ((\\d+)(, column (\\d+))?)`, 'gi'));
// Example: /workspaces/express/server.js:line 8, column 13
patterns.push(new RegExp(strings.escapeRegExpCharacters(workspaceFolderVariant) + `(${pathPattern}):line ((\\d+)(, column (\\d+))?)`, 'gi'));
// Example: /workspaces/mankala/Features.ts(45): error
// Example: /workspaces/mankala/Features.ts (45): error
// Example: /workspaces/mankala/Features.ts(45,18): error
// Example: /workspaces/mankala/Features.ts (45,18): error
// Example: /workspaces/mankala/Features Special.ts (45,18): error
patterns.push(new RegExp(strings.escapeRegExpCharacters(workspaceFolderVariant) + `(${pathPattern})(\\s?\\((\\d+)(,(\\d+))?)\\)`, 'gi'));
// Example: at /workspaces/mankala/Game.ts
// Example: at /workspaces/mankala/Game.ts:336
// Example: at /workspaces/mankala/Game.ts:336:9
patterns.push(new RegExp(strings.escapeRegExpCharacters(workspaceFolderVariant) + `(${strictPathPattern})(:(\\d+))?(:(\\d+))?`, 'gi'));
});
return patterns;
}
/**
* Detect links. Made public static to allow for tests.
*/
public static detectLinks(line: string, lineIndex: number, patterns: RegExp[], resourceCreator: IResourceCreator): ILink[] {
const links: ILink[] = [];
patterns.forEach(pattern => {
pattern.lastIndex = 0; // the holy grail of software development
let match: RegExpExecArray | null;
let offset = 0;
while ((match = pattern.exec(line)) !== null) {
// Convert the relative path information to a resource that we can use in links
const folderRelativePath = strings.rtrim(match[1], '.').replace(/\\/g, '/'); // remove trailing "." that likely indicate end of sentence
let resourceString: string | undefined;
try {
const resource = resourceCreator.toResource(folderRelativePath);
if (resource) {
resourceString = resource.toString();
}
} catch (error) {
continue; // we might find an invalid URI and then we dont want to loose all other links
}
// Append line/col information to URI if matching
if (match[3]) {
const lineNumber = match[3];
if (match[5]) {
const columnNumber = match[5];
resourceString = strings.format('{0}#{1},{2}', resourceString, lineNumber, columnNumber);
} else {
resourceString = strings.format('{0}#{1}', resourceString, lineNumber);
}
}
const fullMatch = strings.rtrim(match[0], '.'); // remove trailing "." that likely indicate end of sentence
const index = line.indexOf(fullMatch, offset);
offset += index + fullMatch.length;
const linkRange = {
startColumn: index + 1,
startLineNumber: lineIndex,
endColumn: index + 1 + fullMatch.length,
endLineNumber: lineIndex
};
if (links.some(link => Range.areIntersectingOrTouching(link.range, linkRange))) {
return; // Do not detect duplicate links
}
links.push({
range: linkRange,
url: resourceString
});
}
});
return links;
}
}
export function create(ctx: IWorkerContext, createData: ICreateData): OutputLinkComputer {
return new OutputLinkComputer(ctx, createData);
}
| {
this.ctx = ctx;
this.patterns = new Map<URI, RegExp[]>();
this.computePatterns(createData);
} | identifier_body |
outputLinkComputer.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { IMirrorModel, IWorkerContext } from 'vs/editor/common/services/editorSimpleWorker';
import { ILink } from 'vs/editor/common/modes';
import { URI } from 'vs/base/common/uri';
import * as paths from 'vs/base/common/paths';
import * as resources from 'vs/base/common/resources';
import * as strings from 'vs/base/common/strings';
import * as arrays from 'vs/base/common/arrays';
import { Range } from 'vs/editor/common/core/range';
export interface ICreateData {
workspaceFolders: string[];
}
export interface IResourceCreator {
toResource: (folderRelativePath: string) => URI | null;
}
export class OutputLinkComputer {
private ctx: IWorkerContext;
private patterns: Map<URI /* folder uri */, RegExp[]>;
constructor(ctx: IWorkerContext, createData: ICreateData) {
this.ctx = ctx;
this.patterns = new Map<URI, RegExp[]>();
this.computePatterns(createData);
}
private computePatterns(createData: ICreateData): void {
// Produce patterns for each workspace root we are configured with
// This means that we will be able to detect links for paths that
// contain any of the workspace roots as segments.
const workspaceFolders = createData.workspaceFolders.map(r => URI.parse(r));
workspaceFolders.forEach(workspaceFolder => {
const patterns = OutputLinkComputer.createPatterns(workspaceFolder);
this.patterns.set(workspaceFolder, patterns);
});
}
private getModel(uri: string): IMirrorModel | null {
const models = this.ctx.getMirrorModels();
for (let i = 0; i < models.length; i++) {
const model = models[i];
if (model.uri.toString() === uri) {
return model;
}
}
return null;
}
public computeLinks(uri: string): Promise<ILink[]> {
const model = this.getModel(uri);
if (!model) {
return Promise.resolve([]);
}
const links: ILink[] = [];
const lines = model.getValue().split(/\r\n|\r|\n/);
// For each workspace root patterns
this.patterns.forEach((folderPatterns, folderUri) => {
const resourceCreator: IResourceCreator = {
toResource: (folderRelativePath: string): URI | null => {
if (typeof folderRelativePath === 'string') {
return resources.joinPath(folderUri, folderRelativePath);
}
return null;
}
};
for (let i = 0, len = lines.length; i < len; i++) {
links.push(...OutputLinkComputer.detectLinks(lines[i], i + 1, folderPatterns, resourceCreator));
}
});
return Promise.resolve(links);
}
public static createPatterns(workspaceFolder: URI): RegExp[] {
const patterns: RegExp[] = [];
const workspaceFolderPath = workspaceFolder.scheme === 'file' ? workspaceFolder.fsPath : workspaceFolder.path;
const workspaceFolderVariants = arrays.distinct([
paths.normalize(workspaceFolderPath, true),
paths.normalize(workspaceFolderPath, false)
]);
workspaceFolderVariants.forEach(workspaceFolderVariant => {
const validPathCharacterPattern = '[^\\s\\(\\):<>"]';
const validPathCharacterOrSpacePattern = `(?:${validPathCharacterPattern}| ${validPathCharacterPattern})`;
const pathPattern = `${validPathCharacterOrSpacePattern}+\\.${validPathCharacterPattern}+`;
const strictPathPattern = `${validPathCharacterPattern}+`;
// Example: /workspaces/express/server.js on line 8, column 13
patterns.push(new RegExp(strings.escapeRegExpCharacters(workspaceFolderVariant) + `(${pathPattern}) on line ((\\d+)(, column (\\d+))?)`, 'gi'));
// Example: /workspaces/express/server.js:line 8, column 13
patterns.push(new RegExp(strings.escapeRegExpCharacters(workspaceFolderVariant) + `(${pathPattern}):line ((\\d+)(, column (\\d+))?)`, 'gi'));
// Example: /workspaces/mankala/Features.ts(45): error
// Example: /workspaces/mankala/Features.ts (45): error
// Example: /workspaces/mankala/Features.ts(45,18): error | // Example: /workspaces/mankala/Features Special.ts (45,18): error
patterns.push(new RegExp(strings.escapeRegExpCharacters(workspaceFolderVariant) + `(${pathPattern})(\\s?\\((\\d+)(,(\\d+))?)\\)`, 'gi'));
// Example: at /workspaces/mankala/Game.ts
// Example: at /workspaces/mankala/Game.ts:336
// Example: at /workspaces/mankala/Game.ts:336:9
patterns.push(new RegExp(strings.escapeRegExpCharacters(workspaceFolderVariant) + `(${strictPathPattern})(:(\\d+))?(:(\\d+))?`, 'gi'));
});
return patterns;
}
/**
* Detect links. Made public static to allow for tests.
*/
public static detectLinks(line: string, lineIndex: number, patterns: RegExp[], resourceCreator: IResourceCreator): ILink[] {
const links: ILink[] = [];
patterns.forEach(pattern => {
pattern.lastIndex = 0; // the holy grail of software development
let match: RegExpExecArray | null;
let offset = 0;
while ((match = pattern.exec(line)) !== null) {
// Convert the relative path information to a resource that we can use in links
const folderRelativePath = strings.rtrim(match[1], '.').replace(/\\/g, '/'); // remove trailing "." that likely indicate end of sentence
let resourceString: string | undefined;
try {
const resource = resourceCreator.toResource(folderRelativePath);
if (resource) {
resourceString = resource.toString();
}
} catch (error) {
continue; // we might find an invalid URI and then we dont want to loose all other links
}
// Append line/col information to URI if matching
if (match[3]) {
const lineNumber = match[3];
if (match[5]) {
const columnNumber = match[5];
resourceString = strings.format('{0}#{1},{2}', resourceString, lineNumber, columnNumber);
} else {
resourceString = strings.format('{0}#{1}', resourceString, lineNumber);
}
}
const fullMatch = strings.rtrim(match[0], '.'); // remove trailing "." that likely indicate end of sentence
const index = line.indexOf(fullMatch, offset);
offset += index + fullMatch.length;
const linkRange = {
startColumn: index + 1,
startLineNumber: lineIndex,
endColumn: index + 1 + fullMatch.length,
endLineNumber: lineIndex
};
if (links.some(link => Range.areIntersectingOrTouching(link.range, linkRange))) {
return; // Do not detect duplicate links
}
links.push({
range: linkRange,
url: resourceString
});
}
});
return links;
}
}
export function create(ctx: IWorkerContext, createData: ICreateData): OutputLinkComputer {
return new OutputLinkComputer(ctx, createData);
} | // Example: /workspaces/mankala/Features.ts (45,18): error | random_line_split |
outputLinkComputer.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { IMirrorModel, IWorkerContext } from 'vs/editor/common/services/editorSimpleWorker';
import { ILink } from 'vs/editor/common/modes';
import { URI } from 'vs/base/common/uri';
import * as paths from 'vs/base/common/paths';
import * as resources from 'vs/base/common/resources';
import * as strings from 'vs/base/common/strings';
import * as arrays from 'vs/base/common/arrays';
import { Range } from 'vs/editor/common/core/range';
export interface ICreateData {
workspaceFolders: string[];
}
export interface IResourceCreator {
toResource: (folderRelativePath: string) => URI | null;
}
export class OutputLinkComputer {
private ctx: IWorkerContext;
private patterns: Map<URI /* folder uri */, RegExp[]>;
constructor(ctx: IWorkerContext, createData: ICreateData) {
this.ctx = ctx;
this.patterns = new Map<URI, RegExp[]>();
this.computePatterns(createData);
}
private computePatterns(createData: ICreateData): void {
// Produce patterns for each workspace root we are configured with
// This means that we will be able to detect links for paths that
// contain any of the workspace roots as segments.
const workspaceFolders = createData.workspaceFolders.map(r => URI.parse(r));
workspaceFolders.forEach(workspaceFolder => {
const patterns = OutputLinkComputer.createPatterns(workspaceFolder);
this.patterns.set(workspaceFolder, patterns);
});
}
private getModel(uri: string): IMirrorModel | null {
const models = this.ctx.getMirrorModels();
for (let i = 0; i < models.length; i++) {
const model = models[i];
if (model.uri.toString() === uri) {
return model;
}
}
return null;
}
public computeLinks(uri: string): Promise<ILink[]> {
const model = this.getModel(uri);
if (!model) {
return Promise.resolve([]);
}
const links: ILink[] = [];
const lines = model.getValue().split(/\r\n|\r|\n/);
// For each workspace root patterns
this.patterns.forEach((folderPatterns, folderUri) => {
const resourceCreator: IResourceCreator = {
toResource: (folderRelativePath: string): URI | null => {
if (typeof folderRelativePath === 'string') {
return resources.joinPath(folderUri, folderRelativePath);
}
return null;
}
};
for (let i = 0, len = lines.length; i < len; i++) {
links.push(...OutputLinkComputer.detectLinks(lines[i], i + 1, folderPatterns, resourceCreator));
}
});
return Promise.resolve(links);
}
public static createPatterns(workspaceFolder: URI): RegExp[] {
const patterns: RegExp[] = [];
const workspaceFolderPath = workspaceFolder.scheme === 'file' ? workspaceFolder.fsPath : workspaceFolder.path;
const workspaceFolderVariants = arrays.distinct([
paths.normalize(workspaceFolderPath, true),
paths.normalize(workspaceFolderPath, false)
]);
workspaceFolderVariants.forEach(workspaceFolderVariant => {
const validPathCharacterPattern = '[^\\s\\(\\):<>"]';
const validPathCharacterOrSpacePattern = `(?:${validPathCharacterPattern}| ${validPathCharacterPattern})`;
const pathPattern = `${validPathCharacterOrSpacePattern}+\\.${validPathCharacterPattern}+`;
const strictPathPattern = `${validPathCharacterPattern}+`;
// Example: /workspaces/express/server.js on line 8, column 13
patterns.push(new RegExp(strings.escapeRegExpCharacters(workspaceFolderVariant) + `(${pathPattern}) on line ((\\d+)(, column (\\d+))?)`, 'gi'));
// Example: /workspaces/express/server.js:line 8, column 13
patterns.push(new RegExp(strings.escapeRegExpCharacters(workspaceFolderVariant) + `(${pathPattern}):line ((\\d+)(, column (\\d+))?)`, 'gi'));
// Example: /workspaces/mankala/Features.ts(45): error
// Example: /workspaces/mankala/Features.ts (45): error
// Example: /workspaces/mankala/Features.ts(45,18): error
// Example: /workspaces/mankala/Features.ts (45,18): error
// Example: /workspaces/mankala/Features Special.ts (45,18): error
patterns.push(new RegExp(strings.escapeRegExpCharacters(workspaceFolderVariant) + `(${pathPattern})(\\s?\\((\\d+)(,(\\d+))?)\\)`, 'gi'));
// Example: at /workspaces/mankala/Game.ts
// Example: at /workspaces/mankala/Game.ts:336
// Example: at /workspaces/mankala/Game.ts:336:9
patterns.push(new RegExp(strings.escapeRegExpCharacters(workspaceFolderVariant) + `(${strictPathPattern})(:(\\d+))?(:(\\d+))?`, 'gi'));
});
return patterns;
}
/**
* Detect links. Made public static to allow for tests.
*/
public static detectLinks(line: string, lineIndex: number, patterns: RegExp[], resourceCreator: IResourceCreator): ILink[] {
const links: ILink[] = [];
patterns.forEach(pattern => {
pattern.lastIndex = 0; // the holy grail of software development
let match: RegExpExecArray | null;
let offset = 0;
while ((match = pattern.exec(line)) !== null) {
// Convert the relative path information to a resource that we can use in links
const folderRelativePath = strings.rtrim(match[1], '.').replace(/\\/g, '/'); // remove trailing "." that likely indicate end of sentence
let resourceString: string | undefined;
try {
const resource = resourceCreator.toResource(folderRelativePath);
if (resource) {
resourceString = resource.toString();
}
} catch (error) {
continue; // we might find an invalid URI and then we dont want to loose all other links
}
// Append line/col information to URI if matching
if (match[3]) {
const lineNumber = match[3];
if (match[5]) {
const columnNumber = match[5];
resourceString = strings.format('{0}#{1},{2}', resourceString, lineNumber, columnNumber);
} else {
resourceString = strings.format('{0}#{1}', resourceString, lineNumber);
}
}
const fullMatch = strings.rtrim(match[0], '.'); // remove trailing "." that likely indicate end of sentence
const index = line.indexOf(fullMatch, offset);
offset += index + fullMatch.length;
const linkRange = {
startColumn: index + 1,
startLineNumber: lineIndex,
endColumn: index + 1 + fullMatch.length,
endLineNumber: lineIndex
};
if (links.some(link => Range.areIntersectingOrTouching(link.range, linkRange))) |
links.push({
range: linkRange,
url: resourceString
});
}
});
return links;
}
}
export function create(ctx: IWorkerContext, createData: ICreateData): OutputLinkComputer {
return new OutputLinkComputer(ctx, createData);
}
| {
return; // Do not detect duplicate links
} | conditional_block |
workqueue.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! A work queue for scheduling units of work across threads in a fork-join fashion.
//!
//! Data associated with queues is simply a pair of unsigned integers. It is expected that a
//! higher-level API on top of this could allow safe fork-join parallelism.
use deque::{Abort, BufferPool, Data, Empty, Stealer, Worker};
use libc::funcs::posix88::unistd::usleep;
use rand::{Rng, XorShiftRng, weak_rng};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::{Receiver, Sender, channel};
use task::spawn_named;
use task_state;
/// A unit of work.
///
/// # Type parameters
///
/// - `QueueData`: global custom data for the entire work queue.
/// - `WorkData`: custom data specific to each unit of work.
pub struct WorkUnit<QueueData, WorkData> {
/// The function to execute.
pub fun: extern "Rust" fn(WorkData, &mut WorkerProxy<QueueData, WorkData>),
/// Arbitrary data.
pub data: WorkData,
}
/// Messages from the supervisor to the worker.
enum WorkerMsg<QueueData: 'static, WorkData: 'static> {
/// Tells the worker to start work.
Start(Worker<WorkUnit<QueueData, WorkData>>, *mut AtomicUsize, *const QueueData),
/// Tells the worker to stop. It can be restarted again with a `WorkerMsg::Start`.
Stop,
/// Tells the worker to measure the heap size of its TLS using the supplied function.
HeapSizeOfTLS(fn() -> usize),
/// Tells the worker thread to terminate.
Exit,
}
unsafe impl<QueueData: 'static, WorkData: 'static> Send for WorkerMsg<QueueData, WorkData> {}
/// Messages to the supervisor.
enum SupervisorMsg<QueueData: 'static, WorkData: 'static> {
Finished,
HeapSizeOfTLS(usize),
ReturnDeque(usize, Worker<WorkUnit<QueueData, WorkData>>),
}
unsafe impl<QueueData: 'static, WorkData: 'static> Send for SupervisorMsg<QueueData, WorkData> {}
/// Information that the supervisor thread keeps about the worker threads.
struct WorkerInfo<QueueData: 'static, WorkData: 'static> {
/// The communication channel to the workers.
chan: Sender<WorkerMsg<QueueData, WorkData>>,
/// The worker end of the deque, if we have it.
deque: Option<Worker<WorkUnit<QueueData, WorkData>>>,
/// The thief end of the work-stealing deque.
thief: Stealer<WorkUnit<QueueData, WorkData>>,
}
/// Information specific to each worker thread that the thread keeps.
struct WorkerThread<QueueData: 'static, WorkData: 'static> {
/// The index of this worker.
index: usize,
/// The communication port from the supervisor.
port: Receiver<WorkerMsg<QueueData, WorkData>>,
/// The communication channel on which messages are sent to the supervisor.
chan: Sender<SupervisorMsg<QueueData, WorkData>>,
/// The thief end of the work-stealing deque for all other workers.
other_deques: Vec<Stealer<WorkUnit<QueueData, WorkData>>>,
/// The random number generator for this worker.
rng: XorShiftRng,
}
unsafe impl<QueueData: 'static, WorkData: 'static> Send for WorkerThread<QueueData, WorkData> {}
const SPINS_UNTIL_BACKOFF: u32 = 128;
const BACKOFF_INCREMENT_IN_US: u32 = 5;
const BACKOFFS_UNTIL_CONTROL_CHECK: u32 = 6;
fn next_power_of_two(mut v: u32) -> u32 {
v -= 1;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v += 1;
v
}
impl<QueueData: Sync, WorkData: Send> WorkerThread<QueueData, WorkData> {
/// The main logic. This function starts up the worker and listens for
/// messages.
fn start(&mut self) {
let deque_index_mask = next_power_of_two(self.other_deques.len() as u32) - 1;
loop {
// Wait for a start message.
let (mut deque, ref_count, queue_data) = match self.port.recv().unwrap() {
WorkerMsg::Start(deque, ref_count, queue_data) => (deque, ref_count, queue_data),
WorkerMsg::Stop => panic!("unexpected stop message"),
WorkerMsg::Exit => return,
WorkerMsg::HeapSizeOfTLS(f) => {
self.chan.send(SupervisorMsg::HeapSizeOfTLS(f())).unwrap();
continue;
}
};
let mut back_off_sleep = 0 as u32;
// We're off!
'outer: loop {
let work_unit;
match deque.pop() {
Some(work) => work_unit = work,
None => {
// Become a thief.
let mut i = 0;
loop {
// Don't just use `rand % len` because that's slow on ARM.
let mut victim;
loop {
victim = self.rng.next_u32() & deque_index_mask;
if (victim as usize) < self.other_deques.len() {
break
}
}
match self.other_deques[victim as usize].steal() {
Empty | Abort => {
// Continue.
}
Data(work) => {
work_unit = work;
back_off_sleep = 0 as u32;
break
}
}
if i > SPINS_UNTIL_BACKOFF {
if back_off_sleep >= BACKOFF_INCREMENT_IN_US *
BACKOFFS_UNTIL_CONTROL_CHECK {
match self.port.try_recv() {
Ok(WorkerMsg::Stop) => break 'outer,
Ok(WorkerMsg::Exit) => return,
Ok(_) => panic!("unexpected message"),
_ => {}
}
}
unsafe {
usleep(back_off_sleep as u32);
}
back_off_sleep += BACKOFF_INCREMENT_IN_US;
i = 0
} else {
i += 1
}
}
}
}
// At this point, we have some work. Perform it.
let mut proxy = WorkerProxy {
worker: &mut deque,
ref_count: ref_count,
// queue_data is kept alive in the stack frame of
// WorkQueue::run until we send the
// SupervisorMsg::ReturnDeque message below.
queue_data: unsafe { &*queue_data },
worker_index: self.index as u8,
};
(work_unit.fun)(work_unit.data, &mut proxy);
// The work is done. Now decrement the count of outstanding work items. If this was
// the last work unit in the queue, then send a message on the channel.
unsafe {
if (*ref_count).fetch_sub(1, Ordering::Release) == 1 {
self.chan.send(SupervisorMsg::Finished).unwrap()
}
}
}
// Give the deque back to the supervisor.
self.chan.send(SupervisorMsg::ReturnDeque(self.index, deque)).unwrap()
}
}
}
/// A handle to the work queue that individual work units have.
pub struct WorkerProxy<'a, QueueData: 'a, WorkData: 'a> {
worker: &'a mut Worker<WorkUnit<QueueData, WorkData>>,
ref_count: *mut AtomicUsize,
queue_data: &'a QueueData,
worker_index: u8,
}
impl<'a, QueueData: 'static, WorkData: Send + 'static> WorkerProxy<'a, QueueData, WorkData> {
/// Enqueues a block into the work queue.
#[inline]
pub fn push(&mut self, work_unit: WorkUnit<QueueData, WorkData>) {
unsafe {
drop((*self.ref_count).fetch_add(1, Ordering::Relaxed));
}
self.worker.push(work_unit);
}
/// Retrieves the queue user data.
#[inline]
pub fn user_data(&self) -> &'a QueueData |
/// Retrieves the index of the worker.
#[inline]
pub fn worker_index(&self) -> u8 {
self.worker_index
}
}
/// A work queue on which units of work can be submitted.
pub struct WorkQueue<QueueData: 'static, WorkData: 'static> {
/// Information about each of the workers.
workers: Vec<WorkerInfo<QueueData, WorkData>>,
/// A port on which deques can be received from the workers.
port: Receiver<SupervisorMsg<QueueData, WorkData>>,
/// The amount of work that has been enqueued.
work_count: usize,
}
impl<QueueData: Sync, WorkData: Send> WorkQueue<QueueData, WorkData> {
/// Creates a new work queue and spawns all the threads associated with
/// it.
pub fn new(task_name: &'static str,
state: task_state::TaskState,
thread_count: usize) -> WorkQueue<QueueData, WorkData> {
// Set up data structures.
let (supervisor_chan, supervisor_port) = channel();
let (mut infos, mut threads) = (vec!(), vec!());
for i in 0..thread_count {
let (worker_chan, worker_port) = channel();
let pool = BufferPool::new();
let (worker, thief) = pool.deque();
infos.push(WorkerInfo {
chan: worker_chan,
deque: Some(worker),
thief: thief,
});
threads.push(WorkerThread {
index: i,
port: worker_port,
chan: supervisor_chan.clone(),
other_deques: vec!(),
rng: weak_rng(),
});
}
// Connect workers to one another.
for (i, mut thread) in threads.iter_mut().enumerate() {
for (j, info) in infos.iter().enumerate() {
if i != j {
thread.other_deques.push(info.thief.clone())
}
}
assert!(thread.other_deques.len() == thread_count - 1)
}
// Spawn threads.
for (i, thread) in threads.into_iter().enumerate() {
spawn_named(
format!("{} worker {}/{}", task_name, i + 1, thread_count),
move || {
task_state::initialize(state | task_state::IN_WORKER);
let mut thread = thread;
thread.start()
})
}
WorkQueue {
workers: infos,
port: supervisor_port,
work_count: 0,
}
}
/// Enqueues a block into the work queue.
#[inline]
pub fn push(&mut self, work_unit: WorkUnit<QueueData, WorkData>) {
let deque = &mut self.workers[0].deque;
match *deque {
None => {
panic!("tried to push a block but we don't have the deque?!")
}
Some(ref mut deque) => deque.push(work_unit),
}
self.work_count += 1
}
/// Synchronously runs all the enqueued tasks and waits for them to complete.
pub fn run(&mut self, data: &QueueData) {
// Tell the workers to start.
let mut work_count = AtomicUsize::new(self.work_count);
for worker in &mut self.workers {
worker.chan.send(WorkerMsg::Start(worker.deque.take().unwrap(),
&mut work_count,
data)).unwrap()
}
// Wait for the work to finish.
drop(self.port.recv());
self.work_count = 0;
// Tell everyone to stop.
for worker in &self.workers {
worker.chan.send(WorkerMsg::Stop).unwrap()
}
// Get our deques back.
for _ in 0..self.workers.len() {
match self.port.recv().unwrap() {
SupervisorMsg::ReturnDeque(index, deque) => self.workers[index].deque = Some(deque),
SupervisorMsg::HeapSizeOfTLS(_) => panic!("unexpected HeapSizeOfTLS message"),
SupervisorMsg::Finished => panic!("unexpected finished message!"),
}
}
}
/// Synchronously measure memory usage of any thread-local storage.
pub fn heap_size_of_tls(&self, f: fn() -> usize) -> Vec<usize> {
// Tell the workers to measure themselves.
for worker in &self.workers {
worker.chan.send(WorkerMsg::HeapSizeOfTLS(f)).unwrap()
}
// Wait for the workers to finish measuring themselves.
let mut sizes = vec![];
for _ in 0..self.workers.len() {
match self.port.recv().unwrap() {
SupervisorMsg::HeapSizeOfTLS(size) => {
sizes.push(size);
}
_ => panic!("unexpected message!"),
}
}
sizes
}
pub fn shutdown(&mut self) {
for worker in &self.workers {
worker.chan.send(WorkerMsg::Exit).unwrap()
}
}
}
| {
self.queue_data
} | identifier_body |
workqueue.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! A work queue for scheduling units of work across threads in a fork-join fashion.
//!
//! Data associated with queues is simply a pair of unsigned integers. It is expected that a
//! higher-level API on top of this could allow safe fork-join parallelism.
use deque::{Abort, BufferPool, Data, Empty, Stealer, Worker};
use libc::funcs::posix88::unistd::usleep;
use rand::{Rng, XorShiftRng, weak_rng};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::{Receiver, Sender, channel};
use task::spawn_named;
use task_state;
/// A unit of work.
///
/// # Type parameters
///
/// - `QueueData`: global custom data for the entire work queue.
/// - `WorkData`: custom data specific to each unit of work.
pub struct WorkUnit<QueueData, WorkData> {
/// The function to execute.
pub fun: extern "Rust" fn(WorkData, &mut WorkerProxy<QueueData, WorkData>),
/// Arbitrary data.
pub data: WorkData,
}
/// Messages from the supervisor to the worker.
enum WorkerMsg<QueueData: 'static, WorkData: 'static> {
/// Tells the worker to start work.
Start(Worker<WorkUnit<QueueData, WorkData>>, *mut AtomicUsize, *const QueueData),
/// Tells the worker to stop. It can be restarted again with a `WorkerMsg::Start`.
Stop,
/// Tells the worker to measure the heap size of its TLS using the supplied function.
HeapSizeOfTLS(fn() -> usize),
/// Tells the worker thread to terminate.
Exit,
}
unsafe impl<QueueData: 'static, WorkData: 'static> Send for WorkerMsg<QueueData, WorkData> {}
/// Messages to the supervisor.
enum SupervisorMsg<QueueData: 'static, WorkData: 'static> {
Finished,
HeapSizeOfTLS(usize),
ReturnDeque(usize, Worker<WorkUnit<QueueData, WorkData>>),
}
unsafe impl<QueueData: 'static, WorkData: 'static> Send for SupervisorMsg<QueueData, WorkData> {}
/// Information that the supervisor thread keeps about the worker threads.
struct WorkerInfo<QueueData: 'static, WorkData: 'static> {
/// The communication channel to the workers.
chan: Sender<WorkerMsg<QueueData, WorkData>>,
/// The worker end of the deque, if we have it.
deque: Option<Worker<WorkUnit<QueueData, WorkData>>>,
/// The thief end of the work-stealing deque.
thief: Stealer<WorkUnit<QueueData, WorkData>>,
}
/// Information specific to each worker thread that the thread keeps.
struct WorkerThread<QueueData: 'static, WorkData: 'static> {
/// The index of this worker.
index: usize,
/// The communication port from the supervisor.
port: Receiver<WorkerMsg<QueueData, WorkData>>,
/// The communication channel on which messages are sent to the supervisor.
chan: Sender<SupervisorMsg<QueueData, WorkData>>,
/// The thief end of the work-stealing deque for all other workers.
other_deques: Vec<Stealer<WorkUnit<QueueData, WorkData>>>,
/// The random number generator for this worker.
rng: XorShiftRng,
}
unsafe impl<QueueData: 'static, WorkData: 'static> Send for WorkerThread<QueueData, WorkData> {}
const SPINS_UNTIL_BACKOFF: u32 = 128;
const BACKOFF_INCREMENT_IN_US: u32 = 5;
const BACKOFFS_UNTIL_CONTROL_CHECK: u32 = 6;
fn next_power_of_two(mut v: u32) -> u32 {
v -= 1;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v += 1;
v
}
impl<QueueData: Sync, WorkData: Send> WorkerThread<QueueData, WorkData> {
/// The main logic. This function starts up the worker and listens for
/// messages.
fn start(&mut self) {
let deque_index_mask = next_power_of_two(self.other_deques.len() as u32) - 1;
loop {
// Wait for a start message.
let (mut deque, ref_count, queue_data) = match self.port.recv().unwrap() {
WorkerMsg::Start(deque, ref_count, queue_data) => (deque, ref_count, queue_data),
WorkerMsg::Stop => panic!("unexpected stop message"),
WorkerMsg::Exit => return,
WorkerMsg::HeapSizeOfTLS(f) => {
self.chan.send(SupervisorMsg::HeapSizeOfTLS(f())).unwrap();
continue;
}
};
let mut back_off_sleep = 0 as u32;
// We're off!
'outer: loop {
let work_unit;
match deque.pop() {
Some(work) => work_unit = work,
None => {
// Become a thief.
let mut i = 0;
loop {
// Don't just use `rand % len` because that's slow on ARM.
let mut victim; | break
}
}
match self.other_deques[victim as usize].steal() {
Empty | Abort => {
// Continue.
}
Data(work) => {
work_unit = work;
back_off_sleep = 0 as u32;
break
}
}
if i > SPINS_UNTIL_BACKOFF {
if back_off_sleep >= BACKOFF_INCREMENT_IN_US *
BACKOFFS_UNTIL_CONTROL_CHECK {
match self.port.try_recv() {
Ok(WorkerMsg::Stop) => break 'outer,
Ok(WorkerMsg::Exit) => return,
Ok(_) => panic!("unexpected message"),
_ => {}
}
}
unsafe {
usleep(back_off_sleep as u32);
}
back_off_sleep += BACKOFF_INCREMENT_IN_US;
i = 0
} else {
i += 1
}
}
}
}
// At this point, we have some work. Perform it.
let mut proxy = WorkerProxy {
worker: &mut deque,
ref_count: ref_count,
// queue_data is kept alive in the stack frame of
// WorkQueue::run until we send the
// SupervisorMsg::ReturnDeque message below.
queue_data: unsafe { &*queue_data },
worker_index: self.index as u8,
};
(work_unit.fun)(work_unit.data, &mut proxy);
// The work is done. Now decrement the count of outstanding work items. If this was
// the last work unit in the queue, then send a message on the channel.
unsafe {
if (*ref_count).fetch_sub(1, Ordering::Release) == 1 {
self.chan.send(SupervisorMsg::Finished).unwrap()
}
}
}
// Give the deque back to the supervisor.
self.chan.send(SupervisorMsg::ReturnDeque(self.index, deque)).unwrap()
}
}
}
/// A handle to the work queue that individual work units have.
pub struct WorkerProxy<'a, QueueData: 'a, WorkData: 'a> {
worker: &'a mut Worker<WorkUnit<QueueData, WorkData>>,
ref_count: *mut AtomicUsize,
queue_data: &'a QueueData,
worker_index: u8,
}
impl<'a, QueueData: 'static, WorkData: Send + 'static> WorkerProxy<'a, QueueData, WorkData> {
/// Enqueues a block into the work queue.
#[inline]
pub fn push(&mut self, work_unit: WorkUnit<QueueData, WorkData>) {
unsafe {
drop((*self.ref_count).fetch_add(1, Ordering::Relaxed));
}
self.worker.push(work_unit);
}
/// Retrieves the queue user data.
#[inline]
pub fn user_data(&self) -> &'a QueueData {
self.queue_data
}
/// Retrieves the index of the worker.
#[inline]
pub fn worker_index(&self) -> u8 {
self.worker_index
}
}
/// A work queue on which units of work can be submitted.
pub struct WorkQueue<QueueData: 'static, WorkData: 'static> {
/// Information about each of the workers.
workers: Vec<WorkerInfo<QueueData, WorkData>>,
/// A port on which deques can be received from the workers.
port: Receiver<SupervisorMsg<QueueData, WorkData>>,
/// The amount of work that has been enqueued.
work_count: usize,
}
impl<QueueData: Sync, WorkData: Send> WorkQueue<QueueData, WorkData> {
/// Creates a new work queue and spawns all the threads associated with
/// it.
pub fn new(task_name: &'static str,
state: task_state::TaskState,
thread_count: usize) -> WorkQueue<QueueData, WorkData> {
// Set up data structures.
let (supervisor_chan, supervisor_port) = channel();
let (mut infos, mut threads) = (vec!(), vec!());
for i in 0..thread_count {
let (worker_chan, worker_port) = channel();
let pool = BufferPool::new();
let (worker, thief) = pool.deque();
infos.push(WorkerInfo {
chan: worker_chan,
deque: Some(worker),
thief: thief,
});
threads.push(WorkerThread {
index: i,
port: worker_port,
chan: supervisor_chan.clone(),
other_deques: vec!(),
rng: weak_rng(),
});
}
// Connect workers to one another.
for (i, mut thread) in threads.iter_mut().enumerate() {
for (j, info) in infos.iter().enumerate() {
if i != j {
thread.other_deques.push(info.thief.clone())
}
}
assert!(thread.other_deques.len() == thread_count - 1)
}
// Spawn threads.
for (i, thread) in threads.into_iter().enumerate() {
spawn_named(
format!("{} worker {}/{}", task_name, i + 1, thread_count),
move || {
task_state::initialize(state | task_state::IN_WORKER);
let mut thread = thread;
thread.start()
})
}
WorkQueue {
workers: infos,
port: supervisor_port,
work_count: 0,
}
}
/// Enqueues a block into the work queue.
#[inline]
pub fn push(&mut self, work_unit: WorkUnit<QueueData, WorkData>) {
let deque = &mut self.workers[0].deque;
match *deque {
None => {
panic!("tried to push a block but we don't have the deque?!")
}
Some(ref mut deque) => deque.push(work_unit),
}
self.work_count += 1
}
/// Synchronously runs all the enqueued tasks and waits for them to complete.
pub fn run(&mut self, data: &QueueData) {
// Tell the workers to start.
let mut work_count = AtomicUsize::new(self.work_count);
for worker in &mut self.workers {
worker.chan.send(WorkerMsg::Start(worker.deque.take().unwrap(),
&mut work_count,
data)).unwrap()
}
// Wait for the work to finish.
drop(self.port.recv());
self.work_count = 0;
// Tell everyone to stop.
for worker in &self.workers {
worker.chan.send(WorkerMsg::Stop).unwrap()
}
// Get our deques back.
for _ in 0..self.workers.len() {
match self.port.recv().unwrap() {
SupervisorMsg::ReturnDeque(index, deque) => self.workers[index].deque = Some(deque),
SupervisorMsg::HeapSizeOfTLS(_) => panic!("unexpected HeapSizeOfTLS message"),
SupervisorMsg::Finished => panic!("unexpected finished message!"),
}
}
}
/// Synchronously measure memory usage of any thread-local storage.
pub fn heap_size_of_tls(&self, f: fn() -> usize) -> Vec<usize> {
// Tell the workers to measure themselves.
for worker in &self.workers {
worker.chan.send(WorkerMsg::HeapSizeOfTLS(f)).unwrap()
}
// Wait for the workers to finish measuring themselves.
let mut sizes = vec![];
for _ in 0..self.workers.len() {
match self.port.recv().unwrap() {
SupervisorMsg::HeapSizeOfTLS(size) => {
sizes.push(size);
}
_ => panic!("unexpected message!"),
}
}
sizes
}
pub fn shutdown(&mut self) {
for worker in &self.workers {
worker.chan.send(WorkerMsg::Exit).unwrap()
}
}
} | loop {
victim = self.rng.next_u32() & deque_index_mask;
if (victim as usize) < self.other_deques.len() { | random_line_split |
workqueue.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! A work queue for scheduling units of work across threads in a fork-join fashion.
//!
//! Data associated with queues is simply a pair of unsigned integers. It is expected that a
//! higher-level API on top of this could allow safe fork-join parallelism.
use deque::{Abort, BufferPool, Data, Empty, Stealer, Worker};
use libc::funcs::posix88::unistd::usleep;
use rand::{Rng, XorShiftRng, weak_rng};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::{Receiver, Sender, channel};
use task::spawn_named;
use task_state;
/// A unit of work.
///
/// # Type parameters
///
/// - `QueueData`: global custom data for the entire work queue.
/// - `WorkData`: custom data specific to each unit of work.
pub struct WorkUnit<QueueData, WorkData> {
/// The function to execute.
pub fun: extern "Rust" fn(WorkData, &mut WorkerProxy<QueueData, WorkData>),
/// Arbitrary data.
pub data: WorkData,
}
/// Messages from the supervisor to the worker.
enum WorkerMsg<QueueData: 'static, WorkData: 'static> {
/// Tells the worker to start work.
Start(Worker<WorkUnit<QueueData, WorkData>>, *mut AtomicUsize, *const QueueData),
/// Tells the worker to stop. It can be restarted again with a `WorkerMsg::Start`.
Stop,
/// Tells the worker to measure the heap size of its TLS using the supplied function.
HeapSizeOfTLS(fn() -> usize),
/// Tells the worker thread to terminate.
Exit,
}
unsafe impl<QueueData: 'static, WorkData: 'static> Send for WorkerMsg<QueueData, WorkData> {}
/// Messages to the supervisor.
enum SupervisorMsg<QueueData: 'static, WorkData: 'static> {
Finished,
HeapSizeOfTLS(usize),
ReturnDeque(usize, Worker<WorkUnit<QueueData, WorkData>>),
}
unsafe impl<QueueData: 'static, WorkData: 'static> Send for SupervisorMsg<QueueData, WorkData> {}
/// Information that the supervisor thread keeps about the worker threads.
struct WorkerInfo<QueueData: 'static, WorkData: 'static> {
/// The communication channel to the workers.
chan: Sender<WorkerMsg<QueueData, WorkData>>,
/// The worker end of the deque, if we have it.
deque: Option<Worker<WorkUnit<QueueData, WorkData>>>,
/// The thief end of the work-stealing deque.
thief: Stealer<WorkUnit<QueueData, WorkData>>,
}
/// Information specific to each worker thread that the thread keeps.
struct WorkerThread<QueueData: 'static, WorkData: 'static> {
/// The index of this worker.
index: usize,
/// The communication port from the supervisor.
port: Receiver<WorkerMsg<QueueData, WorkData>>,
/// The communication channel on which messages are sent to the supervisor.
chan: Sender<SupervisorMsg<QueueData, WorkData>>,
/// The thief end of the work-stealing deque for all other workers.
other_deques: Vec<Stealer<WorkUnit<QueueData, WorkData>>>,
/// The random number generator for this worker.
rng: XorShiftRng,
}
unsafe impl<QueueData: 'static, WorkData: 'static> Send for WorkerThread<QueueData, WorkData> {}
const SPINS_UNTIL_BACKOFF: u32 = 128;
const BACKOFF_INCREMENT_IN_US: u32 = 5;
const BACKOFFS_UNTIL_CONTROL_CHECK: u32 = 6;
fn | (mut v: u32) -> u32 {
v -= 1;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v += 1;
v
}
impl<QueueData: Sync, WorkData: Send> WorkerThread<QueueData, WorkData> {
/// The main logic. This function starts up the worker and listens for
/// messages.
fn start(&mut self) {
let deque_index_mask = next_power_of_two(self.other_deques.len() as u32) - 1;
loop {
// Wait for a start message.
let (mut deque, ref_count, queue_data) = match self.port.recv().unwrap() {
WorkerMsg::Start(deque, ref_count, queue_data) => (deque, ref_count, queue_data),
WorkerMsg::Stop => panic!("unexpected stop message"),
WorkerMsg::Exit => return,
WorkerMsg::HeapSizeOfTLS(f) => {
self.chan.send(SupervisorMsg::HeapSizeOfTLS(f())).unwrap();
continue;
}
};
let mut back_off_sleep = 0 as u32;
// We're off!
'outer: loop {
let work_unit;
match deque.pop() {
Some(work) => work_unit = work,
None => {
// Become a thief.
let mut i = 0;
loop {
// Don't just use `rand % len` because that's slow on ARM.
let mut victim;
loop {
victim = self.rng.next_u32() & deque_index_mask;
if (victim as usize) < self.other_deques.len() {
break
}
}
match self.other_deques[victim as usize].steal() {
Empty | Abort => {
// Continue.
}
Data(work) => {
work_unit = work;
back_off_sleep = 0 as u32;
break
}
}
if i > SPINS_UNTIL_BACKOFF {
if back_off_sleep >= BACKOFF_INCREMENT_IN_US *
BACKOFFS_UNTIL_CONTROL_CHECK {
match self.port.try_recv() {
Ok(WorkerMsg::Stop) => break 'outer,
Ok(WorkerMsg::Exit) => return,
Ok(_) => panic!("unexpected message"),
_ => {}
}
}
unsafe {
usleep(back_off_sleep as u32);
}
back_off_sleep += BACKOFF_INCREMENT_IN_US;
i = 0
} else {
i += 1
}
}
}
}
// At this point, we have some work. Perform it.
let mut proxy = WorkerProxy {
worker: &mut deque,
ref_count: ref_count,
// queue_data is kept alive in the stack frame of
// WorkQueue::run until we send the
// SupervisorMsg::ReturnDeque message below.
queue_data: unsafe { &*queue_data },
worker_index: self.index as u8,
};
(work_unit.fun)(work_unit.data, &mut proxy);
// The work is done. Now decrement the count of outstanding work items. If this was
// the last work unit in the queue, then send a message on the channel.
unsafe {
if (*ref_count).fetch_sub(1, Ordering::Release) == 1 {
self.chan.send(SupervisorMsg::Finished).unwrap()
}
}
}
// Give the deque back to the supervisor.
self.chan.send(SupervisorMsg::ReturnDeque(self.index, deque)).unwrap()
}
}
}
/// A handle to the work queue that individual work units have.
pub struct WorkerProxy<'a, QueueData: 'a, WorkData: 'a> {
worker: &'a mut Worker<WorkUnit<QueueData, WorkData>>,
ref_count: *mut AtomicUsize,
queue_data: &'a QueueData,
worker_index: u8,
}
impl<'a, QueueData: 'static, WorkData: Send + 'static> WorkerProxy<'a, QueueData, WorkData> {
/// Enqueues a block into the work queue.
#[inline]
pub fn push(&mut self, work_unit: WorkUnit<QueueData, WorkData>) {
unsafe {
drop((*self.ref_count).fetch_add(1, Ordering::Relaxed));
}
self.worker.push(work_unit);
}
/// Retrieves the queue user data.
#[inline]
pub fn user_data(&self) -> &'a QueueData {
self.queue_data
}
/// Retrieves the index of the worker.
#[inline]
pub fn worker_index(&self) -> u8 {
self.worker_index
}
}
/// A work queue on which units of work can be submitted.
pub struct WorkQueue<QueueData: 'static, WorkData: 'static> {
/// Information about each of the workers.
workers: Vec<WorkerInfo<QueueData, WorkData>>,
/// A port on which deques can be received from the workers.
port: Receiver<SupervisorMsg<QueueData, WorkData>>,
/// The amount of work that has been enqueued.
work_count: usize,
}
impl<QueueData: Sync, WorkData: Send> WorkQueue<QueueData, WorkData> {
/// Creates a new work queue and spawns all the threads associated with
/// it.
pub fn new(task_name: &'static str,
state: task_state::TaskState,
thread_count: usize) -> WorkQueue<QueueData, WorkData> {
// Set up data structures.
let (supervisor_chan, supervisor_port) = channel();
let (mut infos, mut threads) = (vec!(), vec!());
for i in 0..thread_count {
let (worker_chan, worker_port) = channel();
let pool = BufferPool::new();
let (worker, thief) = pool.deque();
infos.push(WorkerInfo {
chan: worker_chan,
deque: Some(worker),
thief: thief,
});
threads.push(WorkerThread {
index: i,
port: worker_port,
chan: supervisor_chan.clone(),
other_deques: vec!(),
rng: weak_rng(),
});
}
// Connect workers to one another.
for (i, mut thread) in threads.iter_mut().enumerate() {
for (j, info) in infos.iter().enumerate() {
if i != j {
thread.other_deques.push(info.thief.clone())
}
}
assert!(thread.other_deques.len() == thread_count - 1)
}
// Spawn threads.
for (i, thread) in threads.into_iter().enumerate() {
spawn_named(
format!("{} worker {}/{}", task_name, i + 1, thread_count),
move || {
task_state::initialize(state | task_state::IN_WORKER);
let mut thread = thread;
thread.start()
})
}
WorkQueue {
workers: infos,
port: supervisor_port,
work_count: 0,
}
}
/// Enqueues a block into the work queue.
#[inline]
pub fn push(&mut self, work_unit: WorkUnit<QueueData, WorkData>) {
let deque = &mut self.workers[0].deque;
match *deque {
None => {
panic!("tried to push a block but we don't have the deque?!")
}
Some(ref mut deque) => deque.push(work_unit),
}
self.work_count += 1
}
/// Synchronously runs all the enqueued tasks and waits for them to complete.
pub fn run(&mut self, data: &QueueData) {
// Tell the workers to start.
let mut work_count = AtomicUsize::new(self.work_count);
for worker in &mut self.workers {
worker.chan.send(WorkerMsg::Start(worker.deque.take().unwrap(),
&mut work_count,
data)).unwrap()
}
// Wait for the work to finish.
drop(self.port.recv());
self.work_count = 0;
// Tell everyone to stop.
for worker in &self.workers {
worker.chan.send(WorkerMsg::Stop).unwrap()
}
// Get our deques back.
for _ in 0..self.workers.len() {
match self.port.recv().unwrap() {
SupervisorMsg::ReturnDeque(index, deque) => self.workers[index].deque = Some(deque),
SupervisorMsg::HeapSizeOfTLS(_) => panic!("unexpected HeapSizeOfTLS message"),
SupervisorMsg::Finished => panic!("unexpected finished message!"),
}
}
}
/// Synchronously measure memory usage of any thread-local storage.
pub fn heap_size_of_tls(&self, f: fn() -> usize) -> Vec<usize> {
// Tell the workers to measure themselves.
for worker in &self.workers {
worker.chan.send(WorkerMsg::HeapSizeOfTLS(f)).unwrap()
}
// Wait for the workers to finish measuring themselves.
let mut sizes = vec![];
for _ in 0..self.workers.len() {
match self.port.recv().unwrap() {
SupervisorMsg::HeapSizeOfTLS(size) => {
sizes.push(size);
}
_ => panic!("unexpected message!"),
}
}
sizes
}
pub fn shutdown(&mut self) {
for worker in &self.workers {
worker.chan.send(WorkerMsg::Exit).unwrap()
}
}
}
| next_power_of_two | identifier_name |
data-resource-data.component.ts | /*
* Lumeer: Modern Data Definition and Processing Platform
*
* Copyright (C) since 2017 Lumeer.io, s.r.o. and/or its affiliates.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
import {
ChangeDetectionStrategy,
Component,
EventEmitter,
HostListener,
Input,
OnChanges,
OnDestroy,
OnInit,
Output,
QueryList,
SimpleChanges,
TemplateRef,
ViewChild,
ViewChildren,
} from '@angular/core';
import {Attribute} from '../../../../core/store/collections/collection';
import {AllowedPermissions} from '../../../../core/model/allowed-permissions';
import {DataRow, DataRowService} from '../../../data/data-row.service';
import {Query} from '../../../../core/store/navigation/query/query';
import {DataResourceDataRowComponent} from './row/data-resource-data-row.component';
import {filterUnusedAttributes} from '../../../utils/attribute.utils';
import {HiddenInputComponent} from '../../../input/hidden-input/hidden-input.component';
import {DataRowFocusService} from '../../../data/data-row-focus-service';
import {BehaviorSubject, Observable, of, Subscription} from 'rxjs';
import {Workspace} from '../../../../core/store/navigation/workspace';
import {AppState} from '../../../../core/store/app.state';
import {select, Store} from '@ngrx/store';
import {selectCollectionById} from '../../../../core/store/collections/collections.state';
import {selectDocumentById} from '../../../../core/store/documents/documents.state';
import {AttributesResource, AttributesResourceType, DataResource} from '../../../../core/model/resource';
import {selectLinkTypeById} from '../../../../core/store/link-types/link-types.state';
import {selectLinkInstanceById} from '../../../../core/store/link-instances/link-instances.state';
import {ResourceAttributeSettings, View} from '../../../../core/store/views/view';
import {objectChanged} from '../../../utils/common.utils';
import {ConstraintData} from '@lumeer/data-filters';
import {User} from '../../../../core/store/users/user';
@Component({
selector: 'data-resource-data',
templateUrl: './data-resource-data.component.html',
changeDetection: ChangeDetectionStrategy.OnPush,
providers: [DataRowService],
})
export class DataResourceDataComponent implements OnInit, OnChanges, OnDestroy {
@Input()
public resource: AttributesResource;
@Input()
public dataResource: DataResource;
@Input()
public resourceType: AttributesResourceType;
@Input()
public constraintData: ConstraintData;
@Input()
public permissions: AllowedPermissions;
@Input()
public query: Query;
@Input()
public workspace: Workspace;
@Input()
public user: User;
@Input()
public view: View;
@Input()
public toolbarRef: TemplateRef<any>;
@Input()
public preventEventBubble: boolean;
@Input()
public editableKeys = false;
@Input()
public attributeSettings: ResourceAttributeSettings[];
@Output()
public attributeTypeClick = new EventEmitter<Attribute>();
@Output()
public attributeFunctionCLick = new EventEmitter<Attribute>();
@ViewChildren(DataResourceDataRowComponent)
public rows: QueryList<DataResourceDataRowComponent>;
@ViewChild(HiddenInputComponent)
public hiddenInputComponent: HiddenInputComponent;
@Output()
public switchToTable = new EventEmitter();
@Output()
public removeDocument = new EventEmitter();
@Output()
public dataResourceChanged = new EventEmitter<DataResource>();
public unusedAttributes$ = new BehaviorSubject<Attribute[]>([]);
public resource$: Observable<AttributesResource>;
public dataResource$: Observable<DataResource>;
private dataRowFocusService: DataRowFocusService;
private subscriptions = new Subscription();
constructor(public dataRowService: DataRowService, private store$: Store<AppState>) {
this.dataRowFocusService = new DataRowFocusService(
() => 2,
() => this.dataRowService.rows$.value.length,
() => this.rows.toArray(),
() => this.hiddenInputComponent,
(row, column) => this.dataRowService.rows$.value[row]?.attribute?.constraint?.isDirectlyEditable
);
}
public ngOnInit() {
const subscription = this.dataRowService.rows$.subscribe(() => {
const currentDataResource = this.getCurrentDataResource();
const unusedAttributes = filterUnusedAttributes(this.resource?.attributes, currentDataResource?.data);
this.unusedAttributes$.next(unusedAttributes);
this.dataResourceChanged.emit(currentDataResource);
});
this.subscriptions.add(subscription);
}
public ngOnChanges(changes: SimpleChanges) {
if (this.shouldRefreshObservables(changes)) {
this.dataRowService.init(this.resource, this.dataResource, this.attributeSettings);
this.resource$ = this.selectResource$();
this.dataResource$ = this.selectDataResource$();
} else if (changes.attributeSettings || changes.permissions) {
this.dataRowService.setSettings(this.attributeSettings);
}
if (changes.workspace) {
this.dataRowService.setWorkspace(this.workspace);
}
if (objectChanged(changes.dataResource)) {
this.focusFirstDataInput();
}
}
private focusFirstDataInput() {
setTimeout(() => this.rows?.first?.onValueFocus());
}
private selectResource$(): Observable<AttributesResource> {
if (this.resourceType === AttributesResourceType.Collection) {
return this.store$.pipe(select(selectCollectionById(this.resource.id)));
}
return this.store$.pipe(select(selectLinkTypeById(this.resource.id)));
}
private selectDataResource$(): Observable<DataResource> {
if (!this.dataResource.id) {
return of(this.dataResource);
}
if (this.resourceType === AttributesResourceType.Collection) {
return this.store$.pipe(select(selectDocumentById(this.dataResource.id)));
}
return this.store$.pipe(select(selectLinkInstanceById(this.dataResource.id)));
}
private shouldRefreshObservables(changes: SimpleChanges): boolean {
if (this.resource && this.dataResource) {
if (this.dataResource.id) {
return objectChanged(changes.resource) || objectChanged(changes.dataResource);
} else {
return !!changes.resource || !!changes.dataResource;
}
}
return false;
}
public onNewKey(value: string, index: number) {
this.dataRowService.updateRow(index, value);
}
public onNewValue(value: any, row: DataRow, index: number) {
this.dataRowService.updateRow(index, null, value);
}
public ngOnDestroy() {
this.subscriptions.unsubscribe();
this.dataRowService.destroy();
}
public onRemoveRow(index: number) {
this.dataRowService.deleteRow(index);
}
public onAttributeFunction(row: DataRow) |
public onAttributeType(row: DataRow) {
if (row.attribute) {
this.attributeTypeClick.emit(row.attribute);
}
}
public onFocus(row: number, column: number) {
this.dataRowFocusService.focus(row, this.editableKeys ? column : 1);
}
public onResetFocusAndEdit(row: number, column: number) {
this.dataRowFocusService.resetFocusAndEdit(row, this.editableKeys ? column : 1);
}
public onEdit(row: number, column: number) {
this.dataRowFocusService.edit(row, this.editableKeys ? column : 1);
}
@HostListener('document:keydown', ['$event'])
public onKeyDown(event: KeyboardEvent) {
this.dataRowFocusService.onKeyDown(event, {column: !this.editableKeys});
}
public trackByRow(index: number, row: DataRow): string {
return row.id;
}
public onNewHiddenInput(value: string) {
this.dataRowFocusService.newHiddenInput(value);
}
private getCurrentDataResource(): DataResource {
if (!this.dataResource) {
return null;
}
const rows = this.dataRowService.rows$.value;
const data = rows
.filter(row => row.attribute && row.attribute.id)
.reduce((d, row) => {
if (row.attribute.constraint) {
d[row.attribute.id] = row.attribute.constraint.createDataValue(row.value, this.constraintData).serialize();
} else {
d[row.attribute.id] = row.value;
}
return d;
}, {});
const currentAttributeNames = (this.resource?.attributes || []).map(attr => attr.name);
const newData = rows
.filter(row => row.key && (!row.attribute || !row.attribute.id) && !currentAttributeNames.includes(row.key))
.reduce(
(d, row) => ({
...d,
[row.key]: row.value,
}),
{}
);
return {...this.dataResource, data, newData};
}
}
| {
if (row.attribute) {
this.attributeFunctionCLick.emit(row.attribute);
}
} | identifier_body |
data-resource-data.component.ts | /*
* Lumeer: Modern Data Definition and Processing Platform
*
* Copyright (C) since 2017 Lumeer.io, s.r.o. and/or its affiliates.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
import {
ChangeDetectionStrategy,
Component,
EventEmitter,
HostListener,
Input,
OnChanges,
OnDestroy,
OnInit,
Output,
QueryList,
SimpleChanges,
TemplateRef,
ViewChild,
ViewChildren,
} from '@angular/core';
import {Attribute} from '../../../../core/store/collections/collection';
import {AllowedPermissions} from '../../../../core/model/allowed-permissions';
import {DataRow, DataRowService} from '../../../data/data-row.service';
import {Query} from '../../../../core/store/navigation/query/query';
import {DataResourceDataRowComponent} from './row/data-resource-data-row.component';
import {filterUnusedAttributes} from '../../../utils/attribute.utils';
import {HiddenInputComponent} from '../../../input/hidden-input/hidden-input.component';
import {DataRowFocusService} from '../../../data/data-row-focus-service';
import {BehaviorSubject, Observable, of, Subscription} from 'rxjs';
import {Workspace} from '../../../../core/store/navigation/workspace';
import {AppState} from '../../../../core/store/app.state';
import {select, Store} from '@ngrx/store';
import {selectCollectionById} from '../../../../core/store/collections/collections.state';
import {selectDocumentById} from '../../../../core/store/documents/documents.state';
import {AttributesResource, AttributesResourceType, DataResource} from '../../../../core/model/resource';
import {selectLinkTypeById} from '../../../../core/store/link-types/link-types.state';
import {selectLinkInstanceById} from '../../../../core/store/link-instances/link-instances.state';
import {ResourceAttributeSettings, View} from '../../../../core/store/views/view';
import {objectChanged} from '../../../utils/common.utils';
import {ConstraintData} from '@lumeer/data-filters';
import {User} from '../../../../core/store/users/user';
@Component({
selector: 'data-resource-data',
templateUrl: './data-resource-data.component.html',
changeDetection: ChangeDetectionStrategy.OnPush,
providers: [DataRowService],
})
export class DataResourceDataComponent implements OnInit, OnChanges, OnDestroy {
@Input()
public resource: AttributesResource;
@Input()
public dataResource: DataResource;
@Input()
public resourceType: AttributesResourceType;
@Input()
public constraintData: ConstraintData;
@Input()
public permissions: AllowedPermissions;
@Input()
public query: Query;
@Input()
public workspace: Workspace;
@Input()
public user: User;
@Input()
public view: View;
@Input()
public toolbarRef: TemplateRef<any>;
@Input()
public preventEventBubble: boolean;
@Input()
public editableKeys = false;
@Input()
public attributeSettings: ResourceAttributeSettings[];
@Output()
public attributeTypeClick = new EventEmitter<Attribute>();
@Output()
public attributeFunctionCLick = new EventEmitter<Attribute>();
@ViewChildren(DataResourceDataRowComponent)
public rows: QueryList<DataResourceDataRowComponent>;
@ViewChild(HiddenInputComponent)
public hiddenInputComponent: HiddenInputComponent;
@Output()
public switchToTable = new EventEmitter();
@Output()
public removeDocument = new EventEmitter();
@Output()
public dataResourceChanged = new EventEmitter<DataResource>();
public unusedAttributes$ = new BehaviorSubject<Attribute[]>([]);
public resource$: Observable<AttributesResource>;
public dataResource$: Observable<DataResource>;
private dataRowFocusService: DataRowFocusService;
private subscriptions = new Subscription();
constructor(public dataRowService: DataRowService, private store$: Store<AppState>) {
this.dataRowFocusService = new DataRowFocusService(
() => 2,
() => this.dataRowService.rows$.value.length,
() => this.rows.toArray(),
() => this.hiddenInputComponent,
(row, column) => this.dataRowService.rows$.value[row]?.attribute?.constraint?.isDirectlyEditable
);
}
public ngOnInit() {
const subscription = this.dataRowService.rows$.subscribe(() => {
const currentDataResource = this.getCurrentDataResource();
const unusedAttributes = filterUnusedAttributes(this.resource?.attributes, currentDataResource?.data);
this.unusedAttributes$.next(unusedAttributes);
this.dataResourceChanged.emit(currentDataResource);
});
this.subscriptions.add(subscription);
}
public ngOnChanges(changes: SimpleChanges) {
if (this.shouldRefreshObservables(changes)) {
this.dataRowService.init(this.resource, this.dataResource, this.attributeSettings);
this.resource$ = this.selectResource$();
this.dataResource$ = this.selectDataResource$();
} else if (changes.attributeSettings || changes.permissions) {
this.dataRowService.setSettings(this.attributeSettings);
}
if (changes.workspace) {
this.dataRowService.setWorkspace(this.workspace);
}
if (objectChanged(changes.dataResource)) {
this.focusFirstDataInput();
}
}
private focusFirstDataInput() {
setTimeout(() => this.rows?.first?.onValueFocus());
}
private selectResource$(): Observable<AttributesResource> {
if (this.resourceType === AttributesResourceType.Collection) {
return this.store$.pipe(select(selectCollectionById(this.resource.id)));
}
return this.store$.pipe(select(selectLinkTypeById(this.resource.id)));
}
private selectDataResource$(): Observable<DataResource> {
if (!this.dataResource.id) {
return of(this.dataResource);
}
if (this.resourceType === AttributesResourceType.Collection) {
return this.store$.pipe(select(selectDocumentById(this.dataResource.id)));
}
return this.store$.pipe(select(selectLinkInstanceById(this.dataResource.id)));
}
private shouldRefreshObservables(changes: SimpleChanges): boolean {
if (this.resource && this.dataResource) {
if (this.dataResource.id) {
return objectChanged(changes.resource) || objectChanged(changes.dataResource);
} else {
return !!changes.resource || !!changes.dataResource;
}
}
return false;
}
public onNewKey(value: string, index: number) {
this.dataRowService.updateRow(index, value);
}
public onNewValue(value: any, row: DataRow, index: number) {
this.dataRowService.updateRow(index, null, value);
}
public ngOnDestroy() {
this.subscriptions.unsubscribe();
this.dataRowService.destroy();
}
public onRemoveRow(index: number) {
this.dataRowService.deleteRow(index);
}
public onAttributeFunction(row: DataRow) {
if (row.attribute) {
this.attributeFunctionCLick.emit(row.attribute);
}
}
public onAttributeType(row: DataRow) {
if (row.attribute) {
this.attributeTypeClick.emit(row.attribute);
}
}
public onFocus(row: number, column: number) {
this.dataRowFocusService.focus(row, this.editableKeys ? column : 1);
}
public onResetFocusAndEdit(row: number, column: number) {
this.dataRowFocusService.resetFocusAndEdit(row, this.editableKeys ? column : 1);
}
public onEdit(row: number, column: number) {
this.dataRowFocusService.edit(row, this.editableKeys ? column : 1);
}
@HostListener('document:keydown', ['$event'])
public onKeyDown(event: KeyboardEvent) {
this.dataRowFocusService.onKeyDown(event, {column: !this.editableKeys});
}
public trackByRow(index: number, row: DataRow): string {
return row.id;
}
public onNewHiddenInput(value: string) {
this.dataRowFocusService.newHiddenInput(value);
}
private getCurrentDataResource(): DataResource {
if (!this.dataResource) {
return null;
}
const rows = this.dataRowService.rows$.value;
const data = rows
.filter(row => row.attribute && row.attribute.id)
.reduce((d, row) => {
if (row.attribute.constraint) {
d[row.attribute.id] = row.attribute.constraint.createDataValue(row.value, this.constraintData).serialize();
} else |
return d;
}, {});
const currentAttributeNames = (this.resource?.attributes || []).map(attr => attr.name);
const newData = rows
.filter(row => row.key && (!row.attribute || !row.attribute.id) && !currentAttributeNames.includes(row.key))
.reduce(
(d, row) => ({
...d,
[row.key]: row.value,
}),
{}
);
return {...this.dataResource, data, newData};
}
}
| {
d[row.attribute.id] = row.value;
} | conditional_block |
data-resource-data.component.ts | /*
* Lumeer: Modern Data Definition and Processing Platform
*
* Copyright (C) since 2017 Lumeer.io, s.r.o. and/or its affiliates.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
import {
ChangeDetectionStrategy,
Component,
EventEmitter,
HostListener,
Input,
OnChanges,
OnDestroy,
OnInit,
Output,
QueryList,
SimpleChanges,
TemplateRef,
ViewChild,
ViewChildren,
} from '@angular/core';
import {Attribute} from '../../../../core/store/collections/collection';
import {AllowedPermissions} from '../../../../core/model/allowed-permissions';
import {DataRow, DataRowService} from '../../../data/data-row.service';
import {Query} from '../../../../core/store/navigation/query/query';
import {DataResourceDataRowComponent} from './row/data-resource-data-row.component';
import {filterUnusedAttributes} from '../../../utils/attribute.utils';
import {HiddenInputComponent} from '../../../input/hidden-input/hidden-input.component';
import {DataRowFocusService} from '../../../data/data-row-focus-service';
import {BehaviorSubject, Observable, of, Subscription} from 'rxjs';
import {Workspace} from '../../../../core/store/navigation/workspace';
import {AppState} from '../../../../core/store/app.state';
import {select, Store} from '@ngrx/store';
import {selectCollectionById} from '../../../../core/store/collections/collections.state';
import {selectDocumentById} from '../../../../core/store/documents/documents.state';
import {AttributesResource, AttributesResourceType, DataResource} from '../../../../core/model/resource';
import {selectLinkTypeById} from '../../../../core/store/link-types/link-types.state';
import {selectLinkInstanceById} from '../../../../core/store/link-instances/link-instances.state';
import {ResourceAttributeSettings, View} from '../../../../core/store/views/view';
import {objectChanged} from '../../../utils/common.utils';
import {ConstraintData} from '@lumeer/data-filters';
import {User} from '../../../../core/store/users/user';
@Component({
selector: 'data-resource-data',
templateUrl: './data-resource-data.component.html',
changeDetection: ChangeDetectionStrategy.OnPush,
providers: [DataRowService],
})
export class DataResourceDataComponent implements OnInit, OnChanges, OnDestroy {
@Input()
public resource: AttributesResource;
@Input()
public dataResource: DataResource;
@Input()
public resourceType: AttributesResourceType;
@Input()
public constraintData: ConstraintData;
@Input()
public permissions: AllowedPermissions;
@Input()
public query: Query;
@Input()
public workspace: Workspace;
@Input()
public user: User;
@Input()
public view: View;
@Input()
public toolbarRef: TemplateRef<any>;
@Input()
public preventEventBubble: boolean;
@Input()
public editableKeys = false;
@Input()
public attributeSettings: ResourceAttributeSettings[];
@Output()
public attributeTypeClick = new EventEmitter<Attribute>();
@Output()
public attributeFunctionCLick = new EventEmitter<Attribute>();
@ViewChildren(DataResourceDataRowComponent)
public rows: QueryList<DataResourceDataRowComponent>;
@ViewChild(HiddenInputComponent)
public hiddenInputComponent: HiddenInputComponent;
@Output()
public switchToTable = new EventEmitter();
@Output()
public removeDocument = new EventEmitter();
@Output()
public dataResourceChanged = new EventEmitter<DataResource>();
public unusedAttributes$ = new BehaviorSubject<Attribute[]>([]);
public resource$: Observable<AttributesResource>;
public dataResource$: Observable<DataResource>;
private dataRowFocusService: DataRowFocusService;
private subscriptions = new Subscription();
constructor(public dataRowService: DataRowService, private store$: Store<AppState>) {
this.dataRowFocusService = new DataRowFocusService(
() => 2,
() => this.dataRowService.rows$.value.length,
() => this.rows.toArray(),
() => this.hiddenInputComponent,
(row, column) => this.dataRowService.rows$.value[row]?.attribute?.constraint?.isDirectlyEditable
);
}
public ngOnInit() {
const subscription = this.dataRowService.rows$.subscribe(() => {
const currentDataResource = this.getCurrentDataResource();
const unusedAttributes = filterUnusedAttributes(this.resource?.attributes, currentDataResource?.data);
this.unusedAttributes$.next(unusedAttributes);
this.dataResourceChanged.emit(currentDataResource);
});
this.subscriptions.add(subscription);
}
public ngOnChanges(changes: SimpleChanges) {
if (this.shouldRefreshObservables(changes)) {
this.dataRowService.init(this.resource, this.dataResource, this.attributeSettings);
this.resource$ = this.selectResource$();
this.dataResource$ = this.selectDataResource$();
} else if (changes.attributeSettings || changes.permissions) {
this.dataRowService.setSettings(this.attributeSettings);
}
if (changes.workspace) {
this.dataRowService.setWorkspace(this.workspace);
}
if (objectChanged(changes.dataResource)) {
this.focusFirstDataInput();
}
}
private focusFirstDataInput() {
setTimeout(() => this.rows?.first?.onValueFocus());
}
private selectResource$(): Observable<AttributesResource> {
if (this.resourceType === AttributesResourceType.Collection) {
return this.store$.pipe(select(selectCollectionById(this.resource.id)));
}
return this.store$.pipe(select(selectLinkTypeById(this.resource.id)));
}
private selectDataResource$(): Observable<DataResource> {
if (!this.dataResource.id) {
return of(this.dataResource);
}
if (this.resourceType === AttributesResourceType.Collection) {
return this.store$.pipe(select(selectDocumentById(this.dataResource.id)));
}
return this.store$.pipe(select(selectLinkInstanceById(this.dataResource.id)));
}
private shouldRefreshObservables(changes: SimpleChanges): boolean {
if (this.resource && this.dataResource) {
if (this.dataResource.id) {
return objectChanged(changes.resource) || objectChanged(changes.dataResource);
} else {
return !!changes.resource || !!changes.dataResource;
}
}
return false; | this.dataRowService.updateRow(index, value);
}
public onNewValue(value: any, row: DataRow, index: number) {
this.dataRowService.updateRow(index, null, value);
}
public ngOnDestroy() {
this.subscriptions.unsubscribe();
this.dataRowService.destroy();
}
public onRemoveRow(index: number) {
this.dataRowService.deleteRow(index);
}
public onAttributeFunction(row: DataRow) {
if (row.attribute) {
this.attributeFunctionCLick.emit(row.attribute);
}
}
public onAttributeType(row: DataRow) {
if (row.attribute) {
this.attributeTypeClick.emit(row.attribute);
}
}
public onFocus(row: number, column: number) {
this.dataRowFocusService.focus(row, this.editableKeys ? column : 1);
}
public onResetFocusAndEdit(row: number, column: number) {
this.dataRowFocusService.resetFocusAndEdit(row, this.editableKeys ? column : 1);
}
public onEdit(row: number, column: number) {
this.dataRowFocusService.edit(row, this.editableKeys ? column : 1);
}
@HostListener('document:keydown', ['$event'])
public onKeyDown(event: KeyboardEvent) {
this.dataRowFocusService.onKeyDown(event, {column: !this.editableKeys});
}
public trackByRow(index: number, row: DataRow): string {
return row.id;
}
public onNewHiddenInput(value: string) {
this.dataRowFocusService.newHiddenInput(value);
}
private getCurrentDataResource(): DataResource {
if (!this.dataResource) {
return null;
}
const rows = this.dataRowService.rows$.value;
const data = rows
.filter(row => row.attribute && row.attribute.id)
.reduce((d, row) => {
if (row.attribute.constraint) {
d[row.attribute.id] = row.attribute.constraint.createDataValue(row.value, this.constraintData).serialize();
} else {
d[row.attribute.id] = row.value;
}
return d;
}, {});
const currentAttributeNames = (this.resource?.attributes || []).map(attr => attr.name);
const newData = rows
.filter(row => row.key && (!row.attribute || !row.attribute.id) && !currentAttributeNames.includes(row.key))
.reduce(
(d, row) => ({
...d,
[row.key]: row.value,
}),
{}
);
return {...this.dataResource, data, newData};
}
} | }
public onNewKey(value: string, index: number) { | random_line_split |
data-resource-data.component.ts | /*
* Lumeer: Modern Data Definition and Processing Platform
*
* Copyright (C) since 2017 Lumeer.io, s.r.o. and/or its affiliates.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
import {
ChangeDetectionStrategy,
Component,
EventEmitter,
HostListener,
Input,
OnChanges,
OnDestroy,
OnInit,
Output,
QueryList,
SimpleChanges,
TemplateRef,
ViewChild,
ViewChildren,
} from '@angular/core';
import {Attribute} from '../../../../core/store/collections/collection';
import {AllowedPermissions} from '../../../../core/model/allowed-permissions';
import {DataRow, DataRowService} from '../../../data/data-row.service';
import {Query} from '../../../../core/store/navigation/query/query';
import {DataResourceDataRowComponent} from './row/data-resource-data-row.component';
import {filterUnusedAttributes} from '../../../utils/attribute.utils';
import {HiddenInputComponent} from '../../../input/hidden-input/hidden-input.component';
import {DataRowFocusService} from '../../../data/data-row-focus-service';
import {BehaviorSubject, Observable, of, Subscription} from 'rxjs';
import {Workspace} from '../../../../core/store/navigation/workspace';
import {AppState} from '../../../../core/store/app.state';
import {select, Store} from '@ngrx/store';
import {selectCollectionById} from '../../../../core/store/collections/collections.state';
import {selectDocumentById} from '../../../../core/store/documents/documents.state';
import {AttributesResource, AttributesResourceType, DataResource} from '../../../../core/model/resource';
import {selectLinkTypeById} from '../../../../core/store/link-types/link-types.state';
import {selectLinkInstanceById} from '../../../../core/store/link-instances/link-instances.state';
import {ResourceAttributeSettings, View} from '../../../../core/store/views/view';
import {objectChanged} from '../../../utils/common.utils';
import {ConstraintData} from '@lumeer/data-filters';
import {User} from '../../../../core/store/users/user';
@Component({
selector: 'data-resource-data',
templateUrl: './data-resource-data.component.html',
changeDetection: ChangeDetectionStrategy.OnPush,
providers: [DataRowService],
})
export class DataResourceDataComponent implements OnInit, OnChanges, OnDestroy {
@Input()
public resource: AttributesResource;
@Input()
public dataResource: DataResource;
@Input()
public resourceType: AttributesResourceType;
@Input()
public constraintData: ConstraintData;
@Input()
public permissions: AllowedPermissions;
@Input()
public query: Query;
@Input()
public workspace: Workspace;
@Input()
public user: User;
@Input()
public view: View;
@Input()
public toolbarRef: TemplateRef<any>;
@Input()
public preventEventBubble: boolean;
@Input()
public editableKeys = false;
@Input()
public attributeSettings: ResourceAttributeSettings[];
@Output()
public attributeTypeClick = new EventEmitter<Attribute>();
@Output()
public attributeFunctionCLick = new EventEmitter<Attribute>();
@ViewChildren(DataResourceDataRowComponent)
public rows: QueryList<DataResourceDataRowComponent>;
@ViewChild(HiddenInputComponent)
public hiddenInputComponent: HiddenInputComponent;
@Output()
public switchToTable = new EventEmitter();
@Output()
public removeDocument = new EventEmitter();
@Output()
public dataResourceChanged = new EventEmitter<DataResource>();
public unusedAttributes$ = new BehaviorSubject<Attribute[]>([]);
public resource$: Observable<AttributesResource>;
public dataResource$: Observable<DataResource>;
private dataRowFocusService: DataRowFocusService;
private subscriptions = new Subscription();
constructor(public dataRowService: DataRowService, private store$: Store<AppState>) {
this.dataRowFocusService = new DataRowFocusService(
() => 2,
() => this.dataRowService.rows$.value.length,
() => this.rows.toArray(),
() => this.hiddenInputComponent,
(row, column) => this.dataRowService.rows$.value[row]?.attribute?.constraint?.isDirectlyEditable
);
}
public ngOnInit() {
const subscription = this.dataRowService.rows$.subscribe(() => {
const currentDataResource = this.getCurrentDataResource();
const unusedAttributes = filterUnusedAttributes(this.resource?.attributes, currentDataResource?.data);
this.unusedAttributes$.next(unusedAttributes);
this.dataResourceChanged.emit(currentDataResource);
});
this.subscriptions.add(subscription);
}
public ngOnChanges(changes: SimpleChanges) {
if (this.shouldRefreshObservables(changes)) {
this.dataRowService.init(this.resource, this.dataResource, this.attributeSettings);
this.resource$ = this.selectResource$();
this.dataResource$ = this.selectDataResource$();
} else if (changes.attributeSettings || changes.permissions) {
this.dataRowService.setSettings(this.attributeSettings);
}
if (changes.workspace) {
this.dataRowService.setWorkspace(this.workspace);
}
if (objectChanged(changes.dataResource)) {
this.focusFirstDataInput();
}
}
private focusFirstDataInput() {
setTimeout(() => this.rows?.first?.onValueFocus());
}
private | (): Observable<AttributesResource> {
if (this.resourceType === AttributesResourceType.Collection) {
return this.store$.pipe(select(selectCollectionById(this.resource.id)));
}
return this.store$.pipe(select(selectLinkTypeById(this.resource.id)));
}
private selectDataResource$(): Observable<DataResource> {
if (!this.dataResource.id) {
return of(this.dataResource);
}
if (this.resourceType === AttributesResourceType.Collection) {
return this.store$.pipe(select(selectDocumentById(this.dataResource.id)));
}
return this.store$.pipe(select(selectLinkInstanceById(this.dataResource.id)));
}
private shouldRefreshObservables(changes: SimpleChanges): boolean {
if (this.resource && this.dataResource) {
if (this.dataResource.id) {
return objectChanged(changes.resource) || objectChanged(changes.dataResource);
} else {
return !!changes.resource || !!changes.dataResource;
}
}
return false;
}
public onNewKey(value: string, index: number) {
this.dataRowService.updateRow(index, value);
}
public onNewValue(value: any, row: DataRow, index: number) {
this.dataRowService.updateRow(index, null, value);
}
public ngOnDestroy() {
this.subscriptions.unsubscribe();
this.dataRowService.destroy();
}
public onRemoveRow(index: number) {
this.dataRowService.deleteRow(index);
}
public onAttributeFunction(row: DataRow) {
if (row.attribute) {
this.attributeFunctionCLick.emit(row.attribute);
}
}
public onAttributeType(row: DataRow) {
if (row.attribute) {
this.attributeTypeClick.emit(row.attribute);
}
}
public onFocus(row: number, column: number) {
this.dataRowFocusService.focus(row, this.editableKeys ? column : 1);
}
public onResetFocusAndEdit(row: number, column: number) {
this.dataRowFocusService.resetFocusAndEdit(row, this.editableKeys ? column : 1);
}
public onEdit(row: number, column: number) {
this.dataRowFocusService.edit(row, this.editableKeys ? column : 1);
}
@HostListener('document:keydown', ['$event'])
public onKeyDown(event: KeyboardEvent) {
this.dataRowFocusService.onKeyDown(event, {column: !this.editableKeys});
}
public trackByRow(index: number, row: DataRow): string {
return row.id;
}
public onNewHiddenInput(value: string) {
this.dataRowFocusService.newHiddenInput(value);
}
private getCurrentDataResource(): DataResource {
if (!this.dataResource) {
return null;
}
const rows = this.dataRowService.rows$.value;
const data = rows
.filter(row => row.attribute && row.attribute.id)
.reduce((d, row) => {
if (row.attribute.constraint) {
d[row.attribute.id] = row.attribute.constraint.createDataValue(row.value, this.constraintData).serialize();
} else {
d[row.attribute.id] = row.value;
}
return d;
}, {});
const currentAttributeNames = (this.resource?.attributes || []).map(attr => attr.name);
const newData = rows
.filter(row => row.key && (!row.attribute || !row.attribute.id) && !currentAttributeNames.includes(row.key))
.reduce(
(d, row) => ({
...d,
[row.key]: row.value,
}),
{}
);
return {...this.dataResource, data, newData};
}
}
| selectResource$ | identifier_name |
mod.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use syntax::diagnostic;
use back::link;
use driver::driver::{Input, FileInput, StrInput};
use driver::session::{Session, build_session};
use lint::Lint;
use lint;
use metadata;
use std::any::AnyRefExt;
use std::io;
use std::os;
use std::task::TaskBuilder;
use syntax::ast;
use syntax::parse;
use syntax::diagnostic::Emitter;
use syntax::diagnostics;
use getopts;
pub mod driver;
pub mod session;
pub mod config;
pub fn main_args(args: &[String]) -> int {
let owned_args = args.to_vec();
monitor(proc() run_compiler(owned_args.as_slice()));
0
}
static BUG_REPORT_URL: &'static str =
"http://doc.rust-lang.org/complement-bugreport.html";
fn | (args: &[String]) {
let matches = match handle_options(Vec::from_slice(args)) {
Some(matches) => matches,
None => return
};
let descriptions = diagnostics::registry::Registry::new(super::DIAGNOSTICS);
match matches.opt_str("explain") {
Some(ref code) => {
match descriptions.find_description(code.as_slice()) {
Some(ref description) => {
println!("{}", description);
}
None => {
early_error(format!("no extended information for {}", code).as_slice());
}
}
return;
},
None => ()
}
let sopts = config::build_session_options(&matches);
let (input, input_file_path) = match matches.free.len() {
0u => {
if sopts.describe_lints {
let mut ls = lint::LintStore::new();
ls.register_builtin(None);
describe_lints(&ls, false);
return;
}
early_error("no input filename given");
}
1u => {
let ifile = matches.free.get(0).as_slice();
if ifile == "-" {
let contents = io::stdin().read_to_end().unwrap();
let src = String::from_utf8(contents).unwrap();
(StrInput(src), None)
} else {
(FileInput(Path::new(ifile)), Some(Path::new(ifile)))
}
}
_ => early_error("multiple input filenames provided")
};
let sess = build_session(sopts, input_file_path, descriptions);
let cfg = config::build_configuration(&sess);
let odir = matches.opt_str("out-dir").map(|o| Path::new(o));
let ofile = matches.opt_str("o").map(|o| Path::new(o));
let pretty = matches.opt_default("pretty", "normal").map(|a| {
parse_pretty(&sess, a.as_slice())
});
match pretty {
Some((ppm, opt_uii)) => {
driver::pretty_print_input(sess, cfg, &input, ppm, opt_uii, ofile);
return;
}
None => {/* continue */ }
}
let r = matches.opt_strs("Z");
if r.contains(&("ls".to_string())) {
match input {
FileInput(ref ifile) => {
let mut stdout = io::stdout();
list_metadata(&sess, &(*ifile), &mut stdout).unwrap();
}
StrInput(_) => {
early_error("can not list metadata for stdin");
}
}
return;
}
if print_crate_info(&sess, &input, &odir, &ofile) {
return;
}
driver::compile_input(sess, cfg, &input, &odir, &ofile, None);
}
/// Prints version information and returns None on success or an error
/// message on failure.
pub fn version(binary: &str, matches: &getopts::Matches) -> Option<String> {
let verbose = match matches.opt_str("version").as_ref().map(|s| s.as_slice()) {
None => false,
Some("verbose") => true,
Some(s) => return Some(format!("Unrecognized argument: {}", s))
};
println!("{} {}", binary, env!("CFG_VERSION"));
if verbose {
println!("binary: {}", binary);
println!("commit-hash: {}", option_env!("CFG_VER_HASH").unwrap_or("unknown"));
println!("commit-date: {}", option_env!("CFG_VER_DATE").unwrap_or("unknown"));
println!("host: {}", driver::host_triple());
println!("release: {}", env!("CFG_RELEASE"));
}
None
}
fn usage() {
let message = format!("Usage: rustc [OPTIONS] INPUT");
println!("{}\n\
Additional help:
-C help Print codegen options
-W help Print 'lint' options and default settings
-Z help Print internal options for debugging rustc\n",
getopts::usage(message.as_slice(),
config::optgroups().as_slice()));
}
fn describe_lints(lint_store: &lint::LintStore, loaded_plugins: bool) {
println!("
Available lint options:
-W <foo> Warn about <foo>
-A <foo> Allow <foo>
-D <foo> Deny <foo>
-F <foo> Forbid <foo> (deny, and deny all overrides)
");
fn sort_lints(lints: Vec<(&'static Lint, bool)>) -> Vec<&'static Lint> {
let mut lints: Vec<_> = lints.move_iter().map(|(x, _)| x).collect();
lints.sort_by(|x: &&Lint, y: &&Lint| {
match x.default_level.cmp(&y.default_level) {
// The sort doesn't case-fold but it's doubtful we care.
Equal => x.name.cmp(&y.name),
r => r,
}
});
lints
}
let (plugin, builtin) = lint_store.get_lints().partitioned(|&(_, p)| p);
let plugin = sort_lints(plugin);
let builtin = sort_lints(builtin);
// FIXME (#7043): We should use the width in character cells rather than
// the number of codepoints.
let max_name_len = plugin.iter().chain(builtin.iter())
.map(|&s| s.name.char_len())
.max().unwrap_or(0);
let padded = |x: &str| {
" ".repeat(max_name_len - x.char_len()).append(x)
};
println!("Lint checks provided by rustc:\n");
println!(" {} {:7.7s} {}", padded("name"), "default", "meaning");
println!(" {} {:7.7s} {}", padded("----"), "-------", "-------");
let print_lints = |lints: Vec<&Lint>| {
for lint in lints.move_iter() {
let name = lint.name_lower().replace("_", "-");
println!(" {} {:7.7s} {}",
padded(name.as_slice()), lint.default_level.as_str(), lint.desc);
}
println!("\n");
};
print_lints(builtin);
match (loaded_plugins, plugin.len()) {
(false, 0) => {
println!("Compiler plugins can provide additional lints. To see a listing of these, \
re-run `rustc -W help` with a crate filename.");
}
(false, _) => fail!("didn't load lint plugins but got them anyway!"),
(true, 0) => println!("This crate does not load any lint plugins."),
(true, _) => {
println!("Lint checks provided by plugins loaded by this crate:\n");
print_lints(plugin);
}
}
}
fn describe_debug_flags() {
println!("\nAvailable debug options:\n");
let r = config::debugging_opts_map();
for tuple in r.iter() {
match *tuple {
(ref name, ref desc, _) => {
println!(" -Z {:>20s} -- {}", *name, *desc);
}
}
}
}
fn describe_codegen_flags() {
println!("\nAvailable codegen options:\n");
let mut cg = config::basic_codegen_options();
for &(name, parser, desc) in config::CG_OPTIONS.iter() {
// we invoke the parser function on `None` to see if this option needs
// an argument or not.
let (width, extra) = if parser(&mut cg, None) {
(25, "")
} else {
(21, "=val")
};
println!(" -C {:>width$s}{} -- {}", name.replace("_", "-"),
extra, desc, width=width);
}
}
/// Process command line options. Emits messages as appropriate. If compilation
/// should continue, returns a getopts::Matches object parsed from args, otherwise
/// returns None.
pub fn handle_options(mut args: Vec<String>) -> Option<getopts::Matches> {
// Throw away the first argument, the name of the binary
let _binary = args.shift().unwrap();
if args.is_empty() {
usage();
return None;
}
let matches =
match getopts::getopts(args.as_slice(), config::optgroups().as_slice()) {
Ok(m) => m,
Err(f) => {
early_error(f.to_string().as_slice());
}
};
if matches.opt_present("h") || matches.opt_present("help") {
usage();
return None;
}
// Don't handle -W help here, because we might first load plugins.
let r = matches.opt_strs("Z");
if r.iter().any(|x| x.as_slice() == "help") {
describe_debug_flags();
return None;
}
let cg_flags = matches.opt_strs("C");
if cg_flags.iter().any(|x| x.as_slice() == "help") {
describe_codegen_flags();
return None;
}
if cg_flags.contains(&"passes=list".to_string()) {
unsafe { ::llvm::LLVMRustPrintPasses(); }
return None;
}
if matches.opt_present("version") {
match version("rustc", &matches) {
Some(err) => early_error(err.as_slice()),
None => return None
}
}
Some(matches)
}
fn print_crate_info(sess: &Session,
input: &Input,
odir: &Option<Path>,
ofile: &Option<Path>)
-> bool {
let (crate_name, crate_file_name) = sess.opts.print_metas;
// these nasty nested conditions are to avoid doing extra work
if crate_name || crate_file_name {
let attrs = parse_crate_attrs(sess, input);
let t_outputs = driver::build_output_filenames(input,
odir,
ofile,
attrs.as_slice(),
sess);
let id = link::find_crate_name(Some(sess), attrs.as_slice(), input);
if crate_name {
println!("{}", id);
}
if crate_file_name {
let crate_types = driver::collect_crate_types(sess, attrs.as_slice());
let metadata = driver::collect_crate_metadata(sess, attrs.as_slice());
*sess.crate_metadata.borrow_mut() = metadata;
for &style in crate_types.iter() {
let fname = link::filename_for_input(sess, style, id.as_slice(),
&t_outputs.with_extension(""));
println!("{}", fname.filename_display());
}
}
true
} else {
false
}
}
#[deriving(PartialEq, Show)]
pub enum PpSourceMode {
PpmNormal,
PpmExpanded,
PpmTyped,
PpmIdentified,
PpmExpandedIdentified,
}
#[deriving(PartialEq, Show)]
pub enum PpMode {
PpmSource(PpSourceMode),
PpmFlowGraph,
}
fn parse_pretty(sess: &Session, name: &str) -> (PpMode, Option<driver::UserIdentifiedItem>) {
let mut split = name.splitn(1, '=');
let first = split.next().unwrap();
let opt_second = split.next();
let first = match first {
"normal" => PpmSource(PpmNormal),
"expanded" => PpmSource(PpmExpanded),
"typed" => PpmSource(PpmTyped),
"expanded,identified" => PpmSource(PpmExpandedIdentified),
"identified" => PpmSource(PpmIdentified),
"flowgraph" => PpmFlowGraph,
_ => {
sess.fatal(format!(
"argument to `pretty` must be one of `normal`, \
`expanded`, `flowgraph=<nodeid>`, `typed`, `identified`, \
or `expanded,identified`; got {}", name).as_slice());
}
};
let opt_second = opt_second.and_then::<driver::UserIdentifiedItem>(from_str);
(first, opt_second)
}
fn parse_crate_attrs(sess: &Session, input: &Input) ->
Vec<ast::Attribute> {
let result = match *input {
FileInput(ref ifile) => {
parse::parse_crate_attrs_from_file(ifile,
Vec::new(),
&sess.parse_sess)
}
StrInput(ref src) => {
parse::parse_crate_attrs_from_source_str(
driver::anon_src().to_string(),
src.to_string(),
Vec::new(),
&sess.parse_sess)
}
};
result.move_iter().collect()
}
pub fn early_error(msg: &str) -> ! {
let mut emitter = diagnostic::EmitterWriter::stderr(diagnostic::Auto, None);
emitter.emit(None, msg, None, diagnostic::Fatal);
fail!(diagnostic::FatalError);
}
pub fn early_warn(msg: &str) {
let mut emitter = diagnostic::EmitterWriter::stderr(diagnostic::Auto, None);
emitter.emit(None, msg, None, diagnostic::Warning);
}
pub fn list_metadata(sess: &Session, path: &Path,
out: &mut io::Writer) -> io::IoResult<()> {
metadata::loader::list_file_metadata(sess.targ_cfg.os, path, out)
}
/// Run a procedure which will detect failures in the compiler and print nicer
/// error messages rather than just failing the test.
///
/// The diagnostic emitter yielded to the procedure should be used for reporting
/// errors of the compiler.
pub fn monitor(f: proc():Send) {
// FIXME: This is a hack for newsched since it doesn't support split stacks.
// rustc needs a lot of stack! When optimizations are disabled, it needs
// even *more* stack than usual as well.
#[cfg(rtopt)]
static STACK_SIZE: uint = 6000000; // 6MB
#[cfg(not(rtopt))]
static STACK_SIZE: uint = 20000000; // 20MB
let (tx, rx) = channel();
let w = io::ChanWriter::new(tx);
let mut r = io::ChanReader::new(rx);
let mut task = TaskBuilder::new().named("rustc").stderr(box w);
// FIXME: Hacks on hacks. If the env is trying to override the stack size
// then *don't* set it explicitly.
if os::getenv("RUST_MIN_STACK").is_none() {
task = task.stack_size(STACK_SIZE);
}
match task.try(f) {
Ok(()) => { /* fallthrough */ }
Err(value) => {
// Task failed without emitting a fatal diagnostic
if !value.is::<diagnostic::FatalError>() {
let mut emitter = diagnostic::EmitterWriter::stderr(diagnostic::Auto, None);
// a .span_bug or .bug call has already printed what
// it wants to print.
if !value.is::<diagnostic::ExplicitBug>() {
emitter.emit(
None,
"unexpected failure",
None,
diagnostic::Bug);
}
let xs = [
"the compiler hit an unexpected failure path. this is a bug.".to_string(),
format!("we would appreciate a bug report: {}",
BUG_REPORT_URL),
"run with `RUST_BACKTRACE=1` for a backtrace".to_string(),
];
for note in xs.iter() {
emitter.emit(None, note.as_slice(), None, diagnostic::Note)
}
match r.read_to_string() {
Ok(s) => println!("{}", s),
Err(e) => {
emitter.emit(None,
format!("failed to read internal \
stderr: {}",
e).as_slice(),
None,
diagnostic::Error)
}
}
}
// Fail so the process returns a failure code, but don't pollute the
// output with some unnecessary failure messages, we've already
// printed everything that we needed to.
io::stdio::set_stderr(box io::util::NullWriter);
fail!();
}
}
}
| run_compiler | identifier_name |
mod.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use syntax::diagnostic;
use back::link;
use driver::driver::{Input, FileInput, StrInput};
use driver::session::{Session, build_session};
use lint::Lint;
use lint;
use metadata;
use std::any::AnyRefExt;
use std::io;
use std::os;
use std::task::TaskBuilder;
use syntax::ast;
use syntax::parse;
use syntax::diagnostic::Emitter;
use syntax::diagnostics;
use getopts;
pub mod driver;
pub mod session;
pub mod config;
pub fn main_args(args: &[String]) -> int {
let owned_args = args.to_vec();
monitor(proc() run_compiler(owned_args.as_slice()));
0
}
static BUG_REPORT_URL: &'static str =
"http://doc.rust-lang.org/complement-bugreport.html";
fn run_compiler(args: &[String]) { |
let descriptions = diagnostics::registry::Registry::new(super::DIAGNOSTICS);
match matches.opt_str("explain") {
Some(ref code) => {
match descriptions.find_description(code.as_slice()) {
Some(ref description) => {
println!("{}", description);
}
None => {
early_error(format!("no extended information for {}", code).as_slice());
}
}
return;
},
None => ()
}
let sopts = config::build_session_options(&matches);
let (input, input_file_path) = match matches.free.len() {
0u => {
if sopts.describe_lints {
let mut ls = lint::LintStore::new();
ls.register_builtin(None);
describe_lints(&ls, false);
return;
}
early_error("no input filename given");
}
1u => {
let ifile = matches.free.get(0).as_slice();
if ifile == "-" {
let contents = io::stdin().read_to_end().unwrap();
let src = String::from_utf8(contents).unwrap();
(StrInput(src), None)
} else {
(FileInput(Path::new(ifile)), Some(Path::new(ifile)))
}
}
_ => early_error("multiple input filenames provided")
};
let sess = build_session(sopts, input_file_path, descriptions);
let cfg = config::build_configuration(&sess);
let odir = matches.opt_str("out-dir").map(|o| Path::new(o));
let ofile = matches.opt_str("o").map(|o| Path::new(o));
let pretty = matches.opt_default("pretty", "normal").map(|a| {
parse_pretty(&sess, a.as_slice())
});
match pretty {
Some((ppm, opt_uii)) => {
driver::pretty_print_input(sess, cfg, &input, ppm, opt_uii, ofile);
return;
}
None => {/* continue */ }
}
let r = matches.opt_strs("Z");
if r.contains(&("ls".to_string())) {
match input {
FileInput(ref ifile) => {
let mut stdout = io::stdout();
list_metadata(&sess, &(*ifile), &mut stdout).unwrap();
}
StrInput(_) => {
early_error("can not list metadata for stdin");
}
}
return;
}
if print_crate_info(&sess, &input, &odir, &ofile) {
return;
}
driver::compile_input(sess, cfg, &input, &odir, &ofile, None);
}
/// Prints version information and returns None on success or an error
/// message on failure.
pub fn version(binary: &str, matches: &getopts::Matches) -> Option<String> {
let verbose = match matches.opt_str("version").as_ref().map(|s| s.as_slice()) {
None => false,
Some("verbose") => true,
Some(s) => return Some(format!("Unrecognized argument: {}", s))
};
println!("{} {}", binary, env!("CFG_VERSION"));
if verbose {
println!("binary: {}", binary);
println!("commit-hash: {}", option_env!("CFG_VER_HASH").unwrap_or("unknown"));
println!("commit-date: {}", option_env!("CFG_VER_DATE").unwrap_or("unknown"));
println!("host: {}", driver::host_triple());
println!("release: {}", env!("CFG_RELEASE"));
}
None
}
fn usage() {
let message = format!("Usage: rustc [OPTIONS] INPUT");
println!("{}\n\
Additional help:
-C help Print codegen options
-W help Print 'lint' options and default settings
-Z help Print internal options for debugging rustc\n",
getopts::usage(message.as_slice(),
config::optgroups().as_slice()));
}
fn describe_lints(lint_store: &lint::LintStore, loaded_plugins: bool) {
println!("
Available lint options:
-W <foo> Warn about <foo>
-A <foo> Allow <foo>
-D <foo> Deny <foo>
-F <foo> Forbid <foo> (deny, and deny all overrides)
");
fn sort_lints(lints: Vec<(&'static Lint, bool)>) -> Vec<&'static Lint> {
let mut lints: Vec<_> = lints.move_iter().map(|(x, _)| x).collect();
lints.sort_by(|x: &&Lint, y: &&Lint| {
match x.default_level.cmp(&y.default_level) {
// The sort doesn't case-fold but it's doubtful we care.
Equal => x.name.cmp(&y.name),
r => r,
}
});
lints
}
let (plugin, builtin) = lint_store.get_lints().partitioned(|&(_, p)| p);
let plugin = sort_lints(plugin);
let builtin = sort_lints(builtin);
// FIXME (#7043): We should use the width in character cells rather than
// the number of codepoints.
let max_name_len = plugin.iter().chain(builtin.iter())
.map(|&s| s.name.char_len())
.max().unwrap_or(0);
let padded = |x: &str| {
" ".repeat(max_name_len - x.char_len()).append(x)
};
println!("Lint checks provided by rustc:\n");
println!(" {} {:7.7s} {}", padded("name"), "default", "meaning");
println!(" {} {:7.7s} {}", padded("----"), "-------", "-------");
let print_lints = |lints: Vec<&Lint>| {
for lint in lints.move_iter() {
let name = lint.name_lower().replace("_", "-");
println!(" {} {:7.7s} {}",
padded(name.as_slice()), lint.default_level.as_str(), lint.desc);
}
println!("\n");
};
print_lints(builtin);
match (loaded_plugins, plugin.len()) {
(false, 0) => {
println!("Compiler plugins can provide additional lints. To see a listing of these, \
re-run `rustc -W help` with a crate filename.");
}
(false, _) => fail!("didn't load lint plugins but got them anyway!"),
(true, 0) => println!("This crate does not load any lint plugins."),
(true, _) => {
println!("Lint checks provided by plugins loaded by this crate:\n");
print_lints(plugin);
}
}
}
fn describe_debug_flags() {
println!("\nAvailable debug options:\n");
let r = config::debugging_opts_map();
for tuple in r.iter() {
match *tuple {
(ref name, ref desc, _) => {
println!(" -Z {:>20s} -- {}", *name, *desc);
}
}
}
}
fn describe_codegen_flags() {
println!("\nAvailable codegen options:\n");
let mut cg = config::basic_codegen_options();
for &(name, parser, desc) in config::CG_OPTIONS.iter() {
// we invoke the parser function on `None` to see if this option needs
// an argument or not.
let (width, extra) = if parser(&mut cg, None) {
(25, "")
} else {
(21, "=val")
};
println!(" -C {:>width$s}{} -- {}", name.replace("_", "-"),
extra, desc, width=width);
}
}
/// Process command line options. Emits messages as appropriate. If compilation
/// should continue, returns a getopts::Matches object parsed from args, otherwise
/// returns None.
pub fn handle_options(mut args: Vec<String>) -> Option<getopts::Matches> {
// Throw away the first argument, the name of the binary
let _binary = args.shift().unwrap();
if args.is_empty() {
usage();
return None;
}
let matches =
match getopts::getopts(args.as_slice(), config::optgroups().as_slice()) {
Ok(m) => m,
Err(f) => {
early_error(f.to_string().as_slice());
}
};
if matches.opt_present("h") || matches.opt_present("help") {
usage();
return None;
}
// Don't handle -W help here, because we might first load plugins.
let r = matches.opt_strs("Z");
if r.iter().any(|x| x.as_slice() == "help") {
describe_debug_flags();
return None;
}
let cg_flags = matches.opt_strs("C");
if cg_flags.iter().any(|x| x.as_slice() == "help") {
describe_codegen_flags();
return None;
}
if cg_flags.contains(&"passes=list".to_string()) {
unsafe { ::llvm::LLVMRustPrintPasses(); }
return None;
}
if matches.opt_present("version") {
match version("rustc", &matches) {
Some(err) => early_error(err.as_slice()),
None => return None
}
}
Some(matches)
}
fn print_crate_info(sess: &Session,
input: &Input,
odir: &Option<Path>,
ofile: &Option<Path>)
-> bool {
let (crate_name, crate_file_name) = sess.opts.print_metas;
// these nasty nested conditions are to avoid doing extra work
if crate_name || crate_file_name {
let attrs = parse_crate_attrs(sess, input);
let t_outputs = driver::build_output_filenames(input,
odir,
ofile,
attrs.as_slice(),
sess);
let id = link::find_crate_name(Some(sess), attrs.as_slice(), input);
if crate_name {
println!("{}", id);
}
if crate_file_name {
let crate_types = driver::collect_crate_types(sess, attrs.as_slice());
let metadata = driver::collect_crate_metadata(sess, attrs.as_slice());
*sess.crate_metadata.borrow_mut() = metadata;
for &style in crate_types.iter() {
let fname = link::filename_for_input(sess, style, id.as_slice(),
&t_outputs.with_extension(""));
println!("{}", fname.filename_display());
}
}
true
} else {
false
}
}
#[deriving(PartialEq, Show)]
pub enum PpSourceMode {
PpmNormal,
PpmExpanded,
PpmTyped,
PpmIdentified,
PpmExpandedIdentified,
}
#[deriving(PartialEq, Show)]
pub enum PpMode {
PpmSource(PpSourceMode),
PpmFlowGraph,
}
fn parse_pretty(sess: &Session, name: &str) -> (PpMode, Option<driver::UserIdentifiedItem>) {
let mut split = name.splitn(1, '=');
let first = split.next().unwrap();
let opt_second = split.next();
let first = match first {
"normal" => PpmSource(PpmNormal),
"expanded" => PpmSource(PpmExpanded),
"typed" => PpmSource(PpmTyped),
"expanded,identified" => PpmSource(PpmExpandedIdentified),
"identified" => PpmSource(PpmIdentified),
"flowgraph" => PpmFlowGraph,
_ => {
sess.fatal(format!(
"argument to `pretty` must be one of `normal`, \
`expanded`, `flowgraph=<nodeid>`, `typed`, `identified`, \
or `expanded,identified`; got {}", name).as_slice());
}
};
let opt_second = opt_second.and_then::<driver::UserIdentifiedItem>(from_str);
(first, opt_second)
}
fn parse_crate_attrs(sess: &Session, input: &Input) ->
Vec<ast::Attribute> {
let result = match *input {
FileInput(ref ifile) => {
parse::parse_crate_attrs_from_file(ifile,
Vec::new(),
&sess.parse_sess)
}
StrInput(ref src) => {
parse::parse_crate_attrs_from_source_str(
driver::anon_src().to_string(),
src.to_string(),
Vec::new(),
&sess.parse_sess)
}
};
result.move_iter().collect()
}
pub fn early_error(msg: &str) -> ! {
let mut emitter = diagnostic::EmitterWriter::stderr(diagnostic::Auto, None);
emitter.emit(None, msg, None, diagnostic::Fatal);
fail!(diagnostic::FatalError);
}
pub fn early_warn(msg: &str) {
let mut emitter = diagnostic::EmitterWriter::stderr(diagnostic::Auto, None);
emitter.emit(None, msg, None, diagnostic::Warning);
}
pub fn list_metadata(sess: &Session, path: &Path,
out: &mut io::Writer) -> io::IoResult<()> {
metadata::loader::list_file_metadata(sess.targ_cfg.os, path, out)
}
/// Run a procedure which will detect failures in the compiler and print nicer
/// error messages rather than just failing the test.
///
/// The diagnostic emitter yielded to the procedure should be used for reporting
/// errors of the compiler.
pub fn monitor(f: proc():Send) {
// FIXME: This is a hack for newsched since it doesn't support split stacks.
// rustc needs a lot of stack! When optimizations are disabled, it needs
// even *more* stack than usual as well.
#[cfg(rtopt)]
static STACK_SIZE: uint = 6000000; // 6MB
#[cfg(not(rtopt))]
static STACK_SIZE: uint = 20000000; // 20MB
let (tx, rx) = channel();
let w = io::ChanWriter::new(tx);
let mut r = io::ChanReader::new(rx);
let mut task = TaskBuilder::new().named("rustc").stderr(box w);
// FIXME: Hacks on hacks. If the env is trying to override the stack size
// then *don't* set it explicitly.
if os::getenv("RUST_MIN_STACK").is_none() {
task = task.stack_size(STACK_SIZE);
}
match task.try(f) {
Ok(()) => { /* fallthrough */ }
Err(value) => {
// Task failed without emitting a fatal diagnostic
if !value.is::<diagnostic::FatalError>() {
let mut emitter = diagnostic::EmitterWriter::stderr(diagnostic::Auto, None);
// a .span_bug or .bug call has already printed what
// it wants to print.
if !value.is::<diagnostic::ExplicitBug>() {
emitter.emit(
None,
"unexpected failure",
None,
diagnostic::Bug);
}
let xs = [
"the compiler hit an unexpected failure path. this is a bug.".to_string(),
format!("we would appreciate a bug report: {}",
BUG_REPORT_URL),
"run with `RUST_BACKTRACE=1` for a backtrace".to_string(),
];
for note in xs.iter() {
emitter.emit(None, note.as_slice(), None, diagnostic::Note)
}
match r.read_to_string() {
Ok(s) => println!("{}", s),
Err(e) => {
emitter.emit(None,
format!("failed to read internal \
stderr: {}",
e).as_slice(),
None,
diagnostic::Error)
}
}
}
// Fail so the process returns a failure code, but don't pollute the
// output with some unnecessary failure messages, we've already
// printed everything that we needed to.
io::stdio::set_stderr(box io::util::NullWriter);
fail!();
}
}
} | let matches = match handle_options(Vec::from_slice(args)) {
Some(matches) => matches,
None => return
}; | random_line_split |
mod.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use syntax::diagnostic;
use back::link;
use driver::driver::{Input, FileInput, StrInput};
use driver::session::{Session, build_session};
use lint::Lint;
use lint;
use metadata;
use std::any::AnyRefExt;
use std::io;
use std::os;
use std::task::TaskBuilder;
use syntax::ast;
use syntax::parse;
use syntax::diagnostic::Emitter;
use syntax::diagnostics;
use getopts;
pub mod driver;
pub mod session;
pub mod config;
pub fn main_args(args: &[String]) -> int {
let owned_args = args.to_vec();
monitor(proc() run_compiler(owned_args.as_slice()));
0
}
static BUG_REPORT_URL: &'static str =
"http://doc.rust-lang.org/complement-bugreport.html";
fn run_compiler(args: &[String]) {
let matches = match handle_options(Vec::from_slice(args)) {
Some(matches) => matches,
None => return
};
let descriptions = diagnostics::registry::Registry::new(super::DIAGNOSTICS);
match matches.opt_str("explain") {
Some(ref code) => {
match descriptions.find_description(code.as_slice()) {
Some(ref description) => {
println!("{}", description);
}
None => {
early_error(format!("no extended information for {}", code).as_slice());
}
}
return;
},
None => ()
}
let sopts = config::build_session_options(&matches);
let (input, input_file_path) = match matches.free.len() {
0u => {
if sopts.describe_lints {
let mut ls = lint::LintStore::new();
ls.register_builtin(None);
describe_lints(&ls, false);
return;
}
early_error("no input filename given");
}
1u => {
let ifile = matches.free.get(0).as_slice();
if ifile == "-" {
let contents = io::stdin().read_to_end().unwrap();
let src = String::from_utf8(contents).unwrap();
(StrInput(src), None)
} else {
(FileInput(Path::new(ifile)), Some(Path::new(ifile)))
}
}
_ => early_error("multiple input filenames provided")
};
let sess = build_session(sopts, input_file_path, descriptions);
let cfg = config::build_configuration(&sess);
let odir = matches.opt_str("out-dir").map(|o| Path::new(o));
let ofile = matches.opt_str("o").map(|o| Path::new(o));
let pretty = matches.opt_default("pretty", "normal").map(|a| {
parse_pretty(&sess, a.as_slice())
});
match pretty {
Some((ppm, opt_uii)) => {
driver::pretty_print_input(sess, cfg, &input, ppm, opt_uii, ofile);
return;
}
None => {/* continue */ }
}
let r = matches.opt_strs("Z");
if r.contains(&("ls".to_string())) {
match input {
FileInput(ref ifile) => {
let mut stdout = io::stdout();
list_metadata(&sess, &(*ifile), &mut stdout).unwrap();
}
StrInput(_) => |
}
return;
}
if print_crate_info(&sess, &input, &odir, &ofile) {
return;
}
driver::compile_input(sess, cfg, &input, &odir, &ofile, None);
}
/// Prints version information and returns None on success or an error
/// message on failure.
pub fn version(binary: &str, matches: &getopts::Matches) -> Option<String> {
let verbose = match matches.opt_str("version").as_ref().map(|s| s.as_slice()) {
None => false,
Some("verbose") => true,
Some(s) => return Some(format!("Unrecognized argument: {}", s))
};
println!("{} {}", binary, env!("CFG_VERSION"));
if verbose {
println!("binary: {}", binary);
println!("commit-hash: {}", option_env!("CFG_VER_HASH").unwrap_or("unknown"));
println!("commit-date: {}", option_env!("CFG_VER_DATE").unwrap_or("unknown"));
println!("host: {}", driver::host_triple());
println!("release: {}", env!("CFG_RELEASE"));
}
None
}
fn usage() {
let message = format!("Usage: rustc [OPTIONS] INPUT");
println!("{}\n\
Additional help:
-C help Print codegen options
-W help Print 'lint' options and default settings
-Z help Print internal options for debugging rustc\n",
getopts::usage(message.as_slice(),
config::optgroups().as_slice()));
}
fn describe_lints(lint_store: &lint::LintStore, loaded_plugins: bool) {
println!("
Available lint options:
-W <foo> Warn about <foo>
-A <foo> Allow <foo>
-D <foo> Deny <foo>
-F <foo> Forbid <foo> (deny, and deny all overrides)
");
fn sort_lints(lints: Vec<(&'static Lint, bool)>) -> Vec<&'static Lint> {
let mut lints: Vec<_> = lints.move_iter().map(|(x, _)| x).collect();
lints.sort_by(|x: &&Lint, y: &&Lint| {
match x.default_level.cmp(&y.default_level) {
// The sort doesn't case-fold but it's doubtful we care.
Equal => x.name.cmp(&y.name),
r => r,
}
});
lints
}
let (plugin, builtin) = lint_store.get_lints().partitioned(|&(_, p)| p);
let plugin = sort_lints(plugin);
let builtin = sort_lints(builtin);
// FIXME (#7043): We should use the width in character cells rather than
// the number of codepoints.
let max_name_len = plugin.iter().chain(builtin.iter())
.map(|&s| s.name.char_len())
.max().unwrap_or(0);
let padded = |x: &str| {
" ".repeat(max_name_len - x.char_len()).append(x)
};
println!("Lint checks provided by rustc:\n");
println!(" {} {:7.7s} {}", padded("name"), "default", "meaning");
println!(" {} {:7.7s} {}", padded("----"), "-------", "-------");
let print_lints = |lints: Vec<&Lint>| {
for lint in lints.move_iter() {
let name = lint.name_lower().replace("_", "-");
println!(" {} {:7.7s} {}",
padded(name.as_slice()), lint.default_level.as_str(), lint.desc);
}
println!("\n");
};
print_lints(builtin);
match (loaded_plugins, plugin.len()) {
(false, 0) => {
println!("Compiler plugins can provide additional lints. To see a listing of these, \
re-run `rustc -W help` with a crate filename.");
}
(false, _) => fail!("didn't load lint plugins but got them anyway!"),
(true, 0) => println!("This crate does not load any lint plugins."),
(true, _) => {
println!("Lint checks provided by plugins loaded by this crate:\n");
print_lints(plugin);
}
}
}
fn describe_debug_flags() {
println!("\nAvailable debug options:\n");
let r = config::debugging_opts_map();
for tuple in r.iter() {
match *tuple {
(ref name, ref desc, _) => {
println!(" -Z {:>20s} -- {}", *name, *desc);
}
}
}
}
fn describe_codegen_flags() {
println!("\nAvailable codegen options:\n");
let mut cg = config::basic_codegen_options();
for &(name, parser, desc) in config::CG_OPTIONS.iter() {
// we invoke the parser function on `None` to see if this option needs
// an argument or not.
let (width, extra) = if parser(&mut cg, None) {
(25, "")
} else {
(21, "=val")
};
println!(" -C {:>width$s}{} -- {}", name.replace("_", "-"),
extra, desc, width=width);
}
}
/// Process command line options. Emits messages as appropriate. If compilation
/// should continue, returns a getopts::Matches object parsed from args, otherwise
/// returns None.
pub fn handle_options(mut args: Vec<String>) -> Option<getopts::Matches> {
// Throw away the first argument, the name of the binary
let _binary = args.shift().unwrap();
if args.is_empty() {
usage();
return None;
}
let matches =
match getopts::getopts(args.as_slice(), config::optgroups().as_slice()) {
Ok(m) => m,
Err(f) => {
early_error(f.to_string().as_slice());
}
};
if matches.opt_present("h") || matches.opt_present("help") {
usage();
return None;
}
// Don't handle -W help here, because we might first load plugins.
let r = matches.opt_strs("Z");
if r.iter().any(|x| x.as_slice() == "help") {
describe_debug_flags();
return None;
}
let cg_flags = matches.opt_strs("C");
if cg_flags.iter().any(|x| x.as_slice() == "help") {
describe_codegen_flags();
return None;
}
if cg_flags.contains(&"passes=list".to_string()) {
unsafe { ::llvm::LLVMRustPrintPasses(); }
return None;
}
if matches.opt_present("version") {
match version("rustc", &matches) {
Some(err) => early_error(err.as_slice()),
None => return None
}
}
Some(matches)
}
fn print_crate_info(sess: &Session,
input: &Input,
odir: &Option<Path>,
ofile: &Option<Path>)
-> bool {
let (crate_name, crate_file_name) = sess.opts.print_metas;
// these nasty nested conditions are to avoid doing extra work
if crate_name || crate_file_name {
let attrs = parse_crate_attrs(sess, input);
let t_outputs = driver::build_output_filenames(input,
odir,
ofile,
attrs.as_slice(),
sess);
let id = link::find_crate_name(Some(sess), attrs.as_slice(), input);
if crate_name {
println!("{}", id);
}
if crate_file_name {
let crate_types = driver::collect_crate_types(sess, attrs.as_slice());
let metadata = driver::collect_crate_metadata(sess, attrs.as_slice());
*sess.crate_metadata.borrow_mut() = metadata;
for &style in crate_types.iter() {
let fname = link::filename_for_input(sess, style, id.as_slice(),
&t_outputs.with_extension(""));
println!("{}", fname.filename_display());
}
}
true
} else {
false
}
}
#[deriving(PartialEq, Show)]
pub enum PpSourceMode {
PpmNormal,
PpmExpanded,
PpmTyped,
PpmIdentified,
PpmExpandedIdentified,
}
#[deriving(PartialEq, Show)]
pub enum PpMode {
PpmSource(PpSourceMode),
PpmFlowGraph,
}
fn parse_pretty(sess: &Session, name: &str) -> (PpMode, Option<driver::UserIdentifiedItem>) {
let mut split = name.splitn(1, '=');
let first = split.next().unwrap();
let opt_second = split.next();
let first = match first {
"normal" => PpmSource(PpmNormal),
"expanded" => PpmSource(PpmExpanded),
"typed" => PpmSource(PpmTyped),
"expanded,identified" => PpmSource(PpmExpandedIdentified),
"identified" => PpmSource(PpmIdentified),
"flowgraph" => PpmFlowGraph,
_ => {
sess.fatal(format!(
"argument to `pretty` must be one of `normal`, \
`expanded`, `flowgraph=<nodeid>`, `typed`, `identified`, \
or `expanded,identified`; got {}", name).as_slice());
}
};
let opt_second = opt_second.and_then::<driver::UserIdentifiedItem>(from_str);
(first, opt_second)
}
fn parse_crate_attrs(sess: &Session, input: &Input) ->
Vec<ast::Attribute> {
let result = match *input {
FileInput(ref ifile) => {
parse::parse_crate_attrs_from_file(ifile,
Vec::new(),
&sess.parse_sess)
}
StrInput(ref src) => {
parse::parse_crate_attrs_from_source_str(
driver::anon_src().to_string(),
src.to_string(),
Vec::new(),
&sess.parse_sess)
}
};
result.move_iter().collect()
}
pub fn early_error(msg: &str) -> ! {
let mut emitter = diagnostic::EmitterWriter::stderr(diagnostic::Auto, None);
emitter.emit(None, msg, None, diagnostic::Fatal);
fail!(diagnostic::FatalError);
}
pub fn early_warn(msg: &str) {
let mut emitter = diagnostic::EmitterWriter::stderr(diagnostic::Auto, None);
emitter.emit(None, msg, None, diagnostic::Warning);
}
pub fn list_metadata(sess: &Session, path: &Path,
out: &mut io::Writer) -> io::IoResult<()> {
metadata::loader::list_file_metadata(sess.targ_cfg.os, path, out)
}
/// Run a procedure which will detect failures in the compiler and print nicer
/// error messages rather than just failing the test.
///
/// The diagnostic emitter yielded to the procedure should be used for reporting
/// errors of the compiler.
pub fn monitor(f: proc():Send) {
// FIXME: This is a hack for newsched since it doesn't support split stacks.
// rustc needs a lot of stack! When optimizations are disabled, it needs
// even *more* stack than usual as well.
#[cfg(rtopt)]
static STACK_SIZE: uint = 6000000; // 6MB
#[cfg(not(rtopt))]
static STACK_SIZE: uint = 20000000; // 20MB
let (tx, rx) = channel();
let w = io::ChanWriter::new(tx);
let mut r = io::ChanReader::new(rx);
let mut task = TaskBuilder::new().named("rustc").stderr(box w);
// FIXME: Hacks on hacks. If the env is trying to override the stack size
// then *don't* set it explicitly.
if os::getenv("RUST_MIN_STACK").is_none() {
task = task.stack_size(STACK_SIZE);
}
match task.try(f) {
Ok(()) => { /* fallthrough */ }
Err(value) => {
// Task failed without emitting a fatal diagnostic
if !value.is::<diagnostic::FatalError>() {
let mut emitter = diagnostic::EmitterWriter::stderr(diagnostic::Auto, None);
// a .span_bug or .bug call has already printed what
// it wants to print.
if !value.is::<diagnostic::ExplicitBug>() {
emitter.emit(
None,
"unexpected failure",
None,
diagnostic::Bug);
}
let xs = [
"the compiler hit an unexpected failure path. this is a bug.".to_string(),
format!("we would appreciate a bug report: {}",
BUG_REPORT_URL),
"run with `RUST_BACKTRACE=1` for a backtrace".to_string(),
];
for note in xs.iter() {
emitter.emit(None, note.as_slice(), None, diagnostic::Note)
}
match r.read_to_string() {
Ok(s) => println!("{}", s),
Err(e) => {
emitter.emit(None,
format!("failed to read internal \
stderr: {}",
e).as_slice(),
None,
diagnostic::Error)
}
}
}
// Fail so the process returns a failure code, but don't pollute the
// output with some unnecessary failure messages, we've already
// printed everything that we needed to.
io::stdio::set_stderr(box io::util::NullWriter);
fail!();
}
}
}
| {
early_error("can not list metadata for stdin");
} | conditional_block |
mod.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use syntax::diagnostic;
use back::link;
use driver::driver::{Input, FileInput, StrInput};
use driver::session::{Session, build_session};
use lint::Lint;
use lint;
use metadata;
use std::any::AnyRefExt;
use std::io;
use std::os;
use std::task::TaskBuilder;
use syntax::ast;
use syntax::parse;
use syntax::diagnostic::Emitter;
use syntax::diagnostics;
use getopts;
pub mod driver;
pub mod session;
pub mod config;
pub fn main_args(args: &[String]) -> int {
let owned_args = args.to_vec();
monitor(proc() run_compiler(owned_args.as_slice()));
0
}
static BUG_REPORT_URL: &'static str =
"http://doc.rust-lang.org/complement-bugreport.html";
fn run_compiler(args: &[String]) {
let matches = match handle_options(Vec::from_slice(args)) {
Some(matches) => matches,
None => return
};
let descriptions = diagnostics::registry::Registry::new(super::DIAGNOSTICS);
match matches.opt_str("explain") {
Some(ref code) => {
match descriptions.find_description(code.as_slice()) {
Some(ref description) => {
println!("{}", description);
}
None => {
early_error(format!("no extended information for {}", code).as_slice());
}
}
return;
},
None => ()
}
let sopts = config::build_session_options(&matches);
let (input, input_file_path) = match matches.free.len() {
0u => {
if sopts.describe_lints {
let mut ls = lint::LintStore::new();
ls.register_builtin(None);
describe_lints(&ls, false);
return;
}
early_error("no input filename given");
}
1u => {
let ifile = matches.free.get(0).as_slice();
if ifile == "-" {
let contents = io::stdin().read_to_end().unwrap();
let src = String::from_utf8(contents).unwrap();
(StrInput(src), None)
} else {
(FileInput(Path::new(ifile)), Some(Path::new(ifile)))
}
}
_ => early_error("multiple input filenames provided")
};
let sess = build_session(sopts, input_file_path, descriptions);
let cfg = config::build_configuration(&sess);
let odir = matches.opt_str("out-dir").map(|o| Path::new(o));
let ofile = matches.opt_str("o").map(|o| Path::new(o));
let pretty = matches.opt_default("pretty", "normal").map(|a| {
parse_pretty(&sess, a.as_slice())
});
match pretty {
Some((ppm, opt_uii)) => {
driver::pretty_print_input(sess, cfg, &input, ppm, opt_uii, ofile);
return;
}
None => {/* continue */ }
}
let r = matches.opt_strs("Z");
if r.contains(&("ls".to_string())) {
match input {
FileInput(ref ifile) => {
let mut stdout = io::stdout();
list_metadata(&sess, &(*ifile), &mut stdout).unwrap();
}
StrInput(_) => {
early_error("can not list metadata for stdin");
}
}
return;
}
if print_crate_info(&sess, &input, &odir, &ofile) {
return;
}
driver::compile_input(sess, cfg, &input, &odir, &ofile, None);
}
/// Prints version information and returns None on success or an error
/// message on failure.
pub fn version(binary: &str, matches: &getopts::Matches) -> Option<String> |
fn usage() {
let message = format!("Usage: rustc [OPTIONS] INPUT");
println!("{}\n\
Additional help:
-C help Print codegen options
-W help Print 'lint' options and default settings
-Z help Print internal options for debugging rustc\n",
getopts::usage(message.as_slice(),
config::optgroups().as_slice()));
}
fn describe_lints(lint_store: &lint::LintStore, loaded_plugins: bool) {
println!("
Available lint options:
-W <foo> Warn about <foo>
-A <foo> Allow <foo>
-D <foo> Deny <foo>
-F <foo> Forbid <foo> (deny, and deny all overrides)
");
fn sort_lints(lints: Vec<(&'static Lint, bool)>) -> Vec<&'static Lint> {
let mut lints: Vec<_> = lints.move_iter().map(|(x, _)| x).collect();
lints.sort_by(|x: &&Lint, y: &&Lint| {
match x.default_level.cmp(&y.default_level) {
// The sort doesn't case-fold but it's doubtful we care.
Equal => x.name.cmp(&y.name),
r => r,
}
});
lints
}
let (plugin, builtin) = lint_store.get_lints().partitioned(|&(_, p)| p);
let plugin = sort_lints(plugin);
let builtin = sort_lints(builtin);
// FIXME (#7043): We should use the width in character cells rather than
// the number of codepoints.
let max_name_len = plugin.iter().chain(builtin.iter())
.map(|&s| s.name.char_len())
.max().unwrap_or(0);
let padded = |x: &str| {
" ".repeat(max_name_len - x.char_len()).append(x)
};
println!("Lint checks provided by rustc:\n");
println!(" {} {:7.7s} {}", padded("name"), "default", "meaning");
println!(" {} {:7.7s} {}", padded("----"), "-------", "-------");
let print_lints = |lints: Vec<&Lint>| {
for lint in lints.move_iter() {
let name = lint.name_lower().replace("_", "-");
println!(" {} {:7.7s} {}",
padded(name.as_slice()), lint.default_level.as_str(), lint.desc);
}
println!("\n");
};
print_lints(builtin);
match (loaded_plugins, plugin.len()) {
(false, 0) => {
println!("Compiler plugins can provide additional lints. To see a listing of these, \
re-run `rustc -W help` with a crate filename.");
}
(false, _) => fail!("didn't load lint plugins but got them anyway!"),
(true, 0) => println!("This crate does not load any lint plugins."),
(true, _) => {
println!("Lint checks provided by plugins loaded by this crate:\n");
print_lints(plugin);
}
}
}
fn describe_debug_flags() {
println!("\nAvailable debug options:\n");
let r = config::debugging_opts_map();
for tuple in r.iter() {
match *tuple {
(ref name, ref desc, _) => {
println!(" -Z {:>20s} -- {}", *name, *desc);
}
}
}
}
fn describe_codegen_flags() {
println!("\nAvailable codegen options:\n");
let mut cg = config::basic_codegen_options();
for &(name, parser, desc) in config::CG_OPTIONS.iter() {
// we invoke the parser function on `None` to see if this option needs
// an argument or not.
let (width, extra) = if parser(&mut cg, None) {
(25, "")
} else {
(21, "=val")
};
println!(" -C {:>width$s}{} -- {}", name.replace("_", "-"),
extra, desc, width=width);
}
}
/// Process command line options. Emits messages as appropriate. If compilation
/// should continue, returns a getopts::Matches object parsed from args, otherwise
/// returns None.
pub fn handle_options(mut args: Vec<String>) -> Option<getopts::Matches> {
// Throw away the first argument, the name of the binary
let _binary = args.shift().unwrap();
if args.is_empty() {
usage();
return None;
}
let matches =
match getopts::getopts(args.as_slice(), config::optgroups().as_slice()) {
Ok(m) => m,
Err(f) => {
early_error(f.to_string().as_slice());
}
};
if matches.opt_present("h") || matches.opt_present("help") {
usage();
return None;
}
// Don't handle -W help here, because we might first load plugins.
let r = matches.opt_strs("Z");
if r.iter().any(|x| x.as_slice() == "help") {
describe_debug_flags();
return None;
}
let cg_flags = matches.opt_strs("C");
if cg_flags.iter().any(|x| x.as_slice() == "help") {
describe_codegen_flags();
return None;
}
if cg_flags.contains(&"passes=list".to_string()) {
unsafe { ::llvm::LLVMRustPrintPasses(); }
return None;
}
if matches.opt_present("version") {
match version("rustc", &matches) {
Some(err) => early_error(err.as_slice()),
None => return None
}
}
Some(matches)
}
fn print_crate_info(sess: &Session,
input: &Input,
odir: &Option<Path>,
ofile: &Option<Path>)
-> bool {
let (crate_name, crate_file_name) = sess.opts.print_metas;
// these nasty nested conditions are to avoid doing extra work
if crate_name || crate_file_name {
let attrs = parse_crate_attrs(sess, input);
let t_outputs = driver::build_output_filenames(input,
odir,
ofile,
attrs.as_slice(),
sess);
let id = link::find_crate_name(Some(sess), attrs.as_slice(), input);
if crate_name {
println!("{}", id);
}
if crate_file_name {
let crate_types = driver::collect_crate_types(sess, attrs.as_slice());
let metadata = driver::collect_crate_metadata(sess, attrs.as_slice());
*sess.crate_metadata.borrow_mut() = metadata;
for &style in crate_types.iter() {
let fname = link::filename_for_input(sess, style, id.as_slice(),
&t_outputs.with_extension(""));
println!("{}", fname.filename_display());
}
}
true
} else {
false
}
}
#[deriving(PartialEq, Show)]
pub enum PpSourceMode {
PpmNormal,
PpmExpanded,
PpmTyped,
PpmIdentified,
PpmExpandedIdentified,
}
#[deriving(PartialEq, Show)]
pub enum PpMode {
PpmSource(PpSourceMode),
PpmFlowGraph,
}
fn parse_pretty(sess: &Session, name: &str) -> (PpMode, Option<driver::UserIdentifiedItem>) {
let mut split = name.splitn(1, '=');
let first = split.next().unwrap();
let opt_second = split.next();
let first = match first {
"normal" => PpmSource(PpmNormal),
"expanded" => PpmSource(PpmExpanded),
"typed" => PpmSource(PpmTyped),
"expanded,identified" => PpmSource(PpmExpandedIdentified),
"identified" => PpmSource(PpmIdentified),
"flowgraph" => PpmFlowGraph,
_ => {
sess.fatal(format!(
"argument to `pretty` must be one of `normal`, \
`expanded`, `flowgraph=<nodeid>`, `typed`, `identified`, \
or `expanded,identified`; got {}", name).as_slice());
}
};
let opt_second = opt_second.and_then::<driver::UserIdentifiedItem>(from_str);
(first, opt_second)
}
fn parse_crate_attrs(sess: &Session, input: &Input) ->
Vec<ast::Attribute> {
let result = match *input {
FileInput(ref ifile) => {
parse::parse_crate_attrs_from_file(ifile,
Vec::new(),
&sess.parse_sess)
}
StrInput(ref src) => {
parse::parse_crate_attrs_from_source_str(
driver::anon_src().to_string(),
src.to_string(),
Vec::new(),
&sess.parse_sess)
}
};
result.move_iter().collect()
}
pub fn early_error(msg: &str) -> ! {
let mut emitter = diagnostic::EmitterWriter::stderr(diagnostic::Auto, None);
emitter.emit(None, msg, None, diagnostic::Fatal);
fail!(diagnostic::FatalError);
}
pub fn early_warn(msg: &str) {
let mut emitter = diagnostic::EmitterWriter::stderr(diagnostic::Auto, None);
emitter.emit(None, msg, None, diagnostic::Warning);
}
pub fn list_metadata(sess: &Session, path: &Path,
out: &mut io::Writer) -> io::IoResult<()> {
metadata::loader::list_file_metadata(sess.targ_cfg.os, path, out)
}
/// Run a procedure which will detect failures in the compiler and print nicer
/// error messages rather than just failing the test.
///
/// The diagnostic emitter yielded to the procedure should be used for reporting
/// errors of the compiler.
pub fn monitor(f: proc():Send) {
// FIXME: This is a hack for newsched since it doesn't support split stacks.
// rustc needs a lot of stack! When optimizations are disabled, it needs
// even *more* stack than usual as well.
#[cfg(rtopt)]
static STACK_SIZE: uint = 6000000; // 6MB
#[cfg(not(rtopt))]
static STACK_SIZE: uint = 20000000; // 20MB
let (tx, rx) = channel();
let w = io::ChanWriter::new(tx);
let mut r = io::ChanReader::new(rx);
let mut task = TaskBuilder::new().named("rustc").stderr(box w);
// FIXME: Hacks on hacks. If the env is trying to override the stack size
// then *don't* set it explicitly.
if os::getenv("RUST_MIN_STACK").is_none() {
task = task.stack_size(STACK_SIZE);
}
match task.try(f) {
Ok(()) => { /* fallthrough */ }
Err(value) => {
// Task failed without emitting a fatal diagnostic
if !value.is::<diagnostic::FatalError>() {
let mut emitter = diagnostic::EmitterWriter::stderr(diagnostic::Auto, None);
// a .span_bug or .bug call has already printed what
// it wants to print.
if !value.is::<diagnostic::ExplicitBug>() {
emitter.emit(
None,
"unexpected failure",
None,
diagnostic::Bug);
}
let xs = [
"the compiler hit an unexpected failure path. this is a bug.".to_string(),
format!("we would appreciate a bug report: {}",
BUG_REPORT_URL),
"run with `RUST_BACKTRACE=1` for a backtrace".to_string(),
];
for note in xs.iter() {
emitter.emit(None, note.as_slice(), None, diagnostic::Note)
}
match r.read_to_string() {
Ok(s) => println!("{}", s),
Err(e) => {
emitter.emit(None,
format!("failed to read internal \
stderr: {}",
e).as_slice(),
None,
diagnostic::Error)
}
}
}
// Fail so the process returns a failure code, but don't pollute the
// output with some unnecessary failure messages, we've already
// printed everything that we needed to.
io::stdio::set_stderr(box io::util::NullWriter);
fail!();
}
}
}
| {
let verbose = match matches.opt_str("version").as_ref().map(|s| s.as_slice()) {
None => false,
Some("verbose") => true,
Some(s) => return Some(format!("Unrecognized argument: {}", s))
};
println!("{} {}", binary, env!("CFG_VERSION"));
if verbose {
println!("binary: {}", binary);
println!("commit-hash: {}", option_env!("CFG_VER_HASH").unwrap_or("unknown"));
println!("commit-date: {}", option_env!("CFG_VER_DATE").unwrap_or("unknown"));
println!("host: {}", driver::host_triple());
println!("release: {}", env!("CFG_RELEASE"));
}
None
} | identifier_body |
DesktopView.tsx | /*
*
* Tabs
*
*/
import { FC, useEffect, useRef, useState, useCallback, memo } from 'react'
import { isEmpty, findIndex } from 'ramda'
import type { TSIZE_SM, TTabItem, TC11NLayout } from '@/spec'
import usePlatform from '@/hooks/usePlatform'
import { SIZE, C11N } from '@/constant'
import { isString } from '@/utils/validator'
import { buildLog } from '@/utils/logger'
import TabItem from './TabItem'
import { Wrapper, Nav, SlipBar, RealBar } from '../styles/tabs'
import { getSlipMargin } from '../styles/metric/tabs'
/* eslint-disable-next-line */
const log = buildLog('c:Tabs:index')
// const defaultItems2 = ['帖子', '开源项目', 'Cheatsheet', '工作机会', '职场']
const temItems = [
{
title: '帖子',
raw: 'posts',
// icon: `${ICON_CMD}/navi/fire.svg`,
localIcon: 'settings',
},
]
/**
* get default active key in tabs array
* if not found, return 0 as first
*
* @param {array of string or object} items
* @param {string} activeKey
* @returns number
*/
const getDefaultActiveTabIndex = (
items: TTabItem[],
activeKey: string,
): number => {
if (isEmpty(activeKey)) return 0
const index = findIndex((item) => {
return activeKey === (item.raw || item.title)
}, items)
return index >= 0 ? index : 0
}
type TProps = {
items?: TTabItem[]
layout?: TC11NLayout
onChange: () => void
activeKey?: string
size: TSIZE_SM
slipHeight: '1px' | '2px'
bottomSpace?: number
}
const Tabs: FC<TProps> = ({
size = SIZE.MEDIUM,
onChange = log,
items = temItems,
layout = C11N.CLASSIC,
activeKey = '',
slipHeight = '2px',
bottomSpace = 0,
}) => {
const { isMobile } = usePlatform()
const defaultActiveTabIndex = getDefaultActiveTabIndex(items, activeKey)
const [active, setActive] = useState(defaultActiveTabIndex)
const [slipWidth, setSlipWidth] = useState(0)
const [tabWidthList, setTabWidthList] = useState([])
const navRef = useRef(null)
// set initial slipbar with of active item
// 给 slipbar 设置一个初始宽度
useEffect(() => {
if (navRef.current) {
const activeSlipWidth =
navRef | faultActiveTabIndex])
// set slipbar with for current nav item
// 为下面的滑动条设置当前 TabItem 的宽度
const handleNaviItemWith = useCallback(
(index, width) => {
tabWidthList[index] = width
setTabWidthList(tabWidthList)
},
[tabWidthList],
)
const handleItemClick = useCallback(
(index, e) => {
const item = items[index]
setSlipWidth(e.target.offsetWidth)
setActive(index)
onChange(isString(item) ? item : item.raw || item.title)
},
[setSlipWidth, setActive, onChange, items],
)
const translateX = `${
tabWidthList.slice(0, active).reduce((a, b) => a + b, 0) +
getSlipMargin(size, isMobile) * active
}px`
return (
<Wrapper testid="tabs">
<Nav ref={navRef}>
{items.map((item, index) => (
<TabItem
key={isString(item) ? item : item.raw || item.title}
mobileView={isMobile}
holyGrailView={layout === C11N.HOLY_GRAIL}
activeKey={activeKey}
index={index}
item={item}
size={size}
bottomSpace={bottomSpace}
setItemWidth={handleNaviItemWith}
onClick={handleItemClick}
/>
))}
<SlipBar
translateX={translateX}
width={`${tabWidthList[active]}px`}
slipHeight={slipHeight}
>
<RealBar
width={`${size === SIZE.MEDIUM ? slipWidth : slipWidth - 6}px`}
/>
</SlipBar>
</Nav>
</Wrapper>
)
}
export default memo(Tabs)
| .current.childNodes[defaultActiveTabIndex].firstElementChild
.offsetWidth
setSlipWidth(activeSlipWidth)
}
setActive(defaultActiveTabIndex)
}, [de | conditional_block |
DesktopView.tsx | /*
*
* Tabs
*
*/
import { FC, useEffect, useRef, useState, useCallback, memo } from 'react'
import { isEmpty, findIndex } from 'ramda'
import type { TSIZE_SM, TTabItem, TC11NLayout } from '@/spec'
import usePlatform from '@/hooks/usePlatform'
import { SIZE, C11N } from '@/constant'
import { isString } from '@/utils/validator'
import { buildLog } from '@/utils/logger'
import TabItem from './TabItem'
import { Wrapper, Nav, SlipBar, RealBar } from '../styles/tabs'
import { getSlipMargin } from '../styles/metric/tabs'
/* eslint-disable-next-line */
const log = buildLog('c:Tabs:index')
// const defaultItems2 = ['帖子', '开源项目', 'Cheatsheet', '工作机会', '职场']
const temItems = [
{
title: '帖子',
raw: 'posts',
// icon: `${ICON_CMD}/navi/fire.svg`,
localIcon: 'settings',
},
]
/**
* get default active key in tabs array
* if not found, return 0 as first
*
* @param {array of string or object} items
* @param {string} activeKey
* @returns number
*/
const getDefaultActiveTabIndex = (
items: TTabItem[],
activeKey: string,
): number => {
if (isEmpty(activeKey)) return 0
const index = findIndex((item) => {
return activeKey === (item.raw || item.title)
}, items)
return index >= 0 ? index : 0
}
type TProps = {
items?: TTabItem[]
layout?: TC11NLayout
onChange: () => void
activeKey?: string
size: TSIZE_SM
slipHeight: '1px' | '2px'
bottomSpace?: number
}
const Tabs: FC<TProps> = ({
size = SIZE.MEDIUM,
onChange = log,
items = temItems,
layout = C11N.CLASSIC,
activeKey = '',
slipHeight = '2px',
bottomSpace = 0,
}) => {
const { isMobile } = usePlatform()
const defaultActiveTabIndex = getDefaultActiveTabIndex(items, activeKey)
const [active, setActive] = useState(defaultActiveTabIndex)
const [slipWidth, setSlipWidth] = useState(0)
const [tabWidthList, setTabWidthList] = useState([])
const navRef = useRef(null)
// set initial slipbar with of active item
// 给 slipbar 设置一个初始宽度
useEffect(() => {
if (navRef.current) {
const activeSlipWidth =
navRef.current.childNodes[defaultActiveTabIndex].firstElementChild
.offsetWidth
setSlipWidth(activeSlipWidth)
}
setActive(defaultActiveTabIndex)
}, [defaultActiveTabIndex])
// set slipbar with for current nav item
// 为下面的滑动条设置当前 TabItem 的宽度
const handleNaviItemWith = useCallback(
(index, width) => {
tabWidthList[index] = width
setTabWidthList(tabWidthList)
},
[tabWidthList],
)
const handleItemClick = useCallback(
(index, e) => {
const item = items[index]
setSlipWidth(e.target.offsetWidth)
setActive(index)
onChange(isString(item) ? item : item.raw || item.title)
},
[setSlipWidth, setActive, onChange, items],
)
const translateX = `${
tabWidthList.slice(0, active).reduce((a, b) => a + b, 0) +
getSlipMargin(size, isMobile) * active
}px`
return (
<Wrapper testid="tabs">
<Nav ref={navRef}>
{items.map((item, index) => (
<TabItem
key={isString(item) ? item : item.raw || item.title}
mobileView={isMobile}
holyGrailView={layout === C11N.HOLY_GRAIL}
activeKey={activeKey}
index={index}
item={item}
size={size}
bottomSpace={bottomSpace}
setItemWidth={handleNaviItemWith}
onClick={handleItemClick}
/>
))}
<SlipBar
translateX={translateX}
width={`${tabWidthList[active]}px`}
slipHeight={slipHeight}
>
<RealBar
width={`${size === SIZE.MEDIUM ? slipWidth : slipWidth - 6}px`}
/>
</SlipBar>
</Nav>
</Wrapper>
)
} |
export default memo(Tabs) | random_line_split | |
general_spinless_majorana_opstr_test.py | from __future__ import print_function, division
import sys,os
qspin_path = os.path.join(os.getcwd(),"../")
sys.path.insert(0,qspin_path)
from quspin.basis import spinless_fermion_basis_general
from quspin.operators import hamiltonian
import numpy as np
J=-np.sqrt(2.0) # hoppping
U=+1.0 # nn interaction
no_checks=dict(check_symm=False, check_pcon=False, check_herm=False)
for N in range(6,10):
###### setting up user-defined symmetry transformations for 2d lattice ######
| s = np.arange(N) # sites [0,1,2,....]
T = (s+1)%N # translation
P = s[::-1] # reflection
#
###### setting up bases ######
basis=spinless_fermion_basis_general(N, tblock=(T,0),pblock=(P,0),)
#basis=spinless_fermion_basis_general(N,pblock=(P,0),)#pblock=(P,0),)
#basis=spinless_fermion_basis_general(N,tblock=(T,0),)#pblock=(P,0),)
#print(basis)
#
#
##### Hamiltonian using Majorana fermions
#
#
hop_term_p=[[+0.5j*J,j,(j+1)%N] for j in range(N)]
hop_term_m=[[-0.5j*J,j,(j+1)%N] for j in range(N)]
density_term=[[+0.5j*U,j,j] for j in range(N)]
int_term=[[-0.25*U,j,j,(j+1)%N,(j+1)%N] for j in range(N)]
id_term=[[0.25*U,j] for j in range(N)]
#
static=[['xy',hop_term_p],['yx',hop_term_m], # kinetic energy
['I',id_term],['xy',density_term],['xyxy',int_term], # nn interaction energy
]
dynamic=[]
#
H_majorana=hamiltonian(static,[],basis=basis,dtype=np.float64,**no_checks)
#
#
##### Hamiltonian using complex fermions
#
#
hopping_pm=[[+J,j,(j+1)%N] for j in range(N)]
hopping_mp=[[-J,j,(j+1)%N] for j in range(N)]
nn_int=[[U,j,(j+1)%N] for j in range(N)]
#
static=[["+-",hopping_pm],["-+",hopping_mp],["nn",nn_int]]
dynamic=[]
#
H=hamiltonian(static,[],basis=basis,dtype=np.float64,**no_checks)
#######################################
print("\ntesting N={}...".format(N))
print(H.toarray())
print()
print(H_majorana.toarray())
print()
print(np.linalg.norm((H-H_majorana).toarray()))
np.testing.assert_allclose((H_majorana-H).toarray(),0,atol=1e-12) | conditional_block | |
general_spinless_majorana_opstr_test.py | from __future__ import print_function, division
import sys,os
qspin_path = os.path.join(os.getcwd(),"../")
sys.path.insert(0,qspin_path)
from quspin.basis import spinless_fermion_basis_general
from quspin.operators import hamiltonian
import numpy as np
J=-np.sqrt(2.0) # hoppping
U=+1.0 # nn interaction
no_checks=dict(check_symm=False, check_pcon=False, check_herm=False)
for N in range(6,10):
###### setting up user-defined symmetry transformations for 2d lattice ######
s = np.arange(N) # sites [0,1,2,....]
T = (s+1)%N # translation
P = s[::-1] # reflection
#
###### setting up bases ######
basis=spinless_fermion_basis_general(N, tblock=(T,0),pblock=(P,0),)
#basis=spinless_fermion_basis_general(N,pblock=(P,0),)#pblock=(P,0),)
#basis=spinless_fermion_basis_general(N,tblock=(T,0),)#pblock=(P,0),)
#print(basis)
#
# | hop_term_m=[[-0.5j*J,j,(j+1)%N] for j in range(N)]
density_term=[[+0.5j*U,j,j] for j in range(N)]
int_term=[[-0.25*U,j,j,(j+1)%N,(j+1)%N] for j in range(N)]
id_term=[[0.25*U,j] for j in range(N)]
#
static=[['xy',hop_term_p],['yx',hop_term_m], # kinetic energy
['I',id_term],['xy',density_term],['xyxy',int_term], # nn interaction energy
]
dynamic=[]
#
H_majorana=hamiltonian(static,[],basis=basis,dtype=np.float64,**no_checks)
#
#
##### Hamiltonian using complex fermions
#
#
hopping_pm=[[+J,j,(j+1)%N] for j in range(N)]
hopping_mp=[[-J,j,(j+1)%N] for j in range(N)]
nn_int=[[U,j,(j+1)%N] for j in range(N)]
#
static=[["+-",hopping_pm],["-+",hopping_mp],["nn",nn_int]]
dynamic=[]
#
H=hamiltonian(static,[],basis=basis,dtype=np.float64,**no_checks)
#######################################
print("\ntesting N={}...".format(N))
print(H.toarray())
print()
print(H_majorana.toarray())
print()
print(np.linalg.norm((H-H_majorana).toarray()))
np.testing.assert_allclose((H_majorana-H).toarray(),0,atol=1e-12) | ##### Hamiltonian using Majorana fermions
#
#
hop_term_p=[[+0.5j*J,j,(j+1)%N] for j in range(N)] | random_line_split |
combine_array_len.rs | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
| }
fn main() {
assert_eq!(norm2([3.0, 4.0]), 5.0*5.0);
}
// END RUST SOURCE
// START rustc.norm2.InstCombine.before.mir
// _4 = Len(_1);
// ...
// _8 = Len(_1);
// END rustc.norm2.InstCombine.before.mir
// START rustc.norm2.InstCombine.after.mir
// _4 = const 2usize;
// ...
// _8 = const 2usize;
// END rustc.norm2.InstCombine.after.mir | fn norm2(x: [f32; 2]) -> f32 {
let a = x[0];
let b = x[1];
a*a + b*b | random_line_split |
combine_array_len.rs | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn norm2(x: [f32; 2]) -> f32 {
let a = x[0];
let b = x[1];
a*a + b*b
}
fn main() |
// END RUST SOURCE
// START rustc.norm2.InstCombine.before.mir
// _4 = Len(_1);
// ...
// _8 = Len(_1);
// END rustc.norm2.InstCombine.before.mir
// START rustc.norm2.InstCombine.after.mir
// _4 = const 2usize;
// ...
// _8 = const 2usize;
// END rustc.norm2.InstCombine.after.mir
| {
assert_eq!(norm2([3.0, 4.0]), 5.0*5.0);
} | identifier_body |
combine_array_len.rs | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn norm2(x: [f32; 2]) -> f32 {
let a = x[0];
let b = x[1];
a*a + b*b
}
fn | () {
assert_eq!(norm2([3.0, 4.0]), 5.0*5.0);
}
// END RUST SOURCE
// START rustc.norm2.InstCombine.before.mir
// _4 = Len(_1);
// ...
// _8 = Len(_1);
// END rustc.norm2.InstCombine.before.mir
// START rustc.norm2.InstCombine.after.mir
// _4 = const 2usize;
// ...
// _8 = const 2usize;
// END rustc.norm2.InstCombine.after.mir
| main | identifier_name |
datatype-date-format_fi.js | /*
Copyright (c) 2010, Yahoo! Inc. All rights reserved.
Code licensed under the BSD License:
| build: 3167
*/
YUI.add("lang/datatype-date-format_fi",function(a){a.Intl.add("datatype-date-format","fi",{"a":["su","ma","ti","ke","to","pe","la"],"A":["sunnuntaina","maanantaina","tiistaina","keskiviikkona","torstaina","perjantaina","lauantaina"],"b":["tammikuuta","helmikuuta","maaliskuuta","huhtikuuta","toukokuuta","kesäkuuta","heinäkuuta","elokuuta","syyskuuta","lokakuuta","marraskuuta","joulukuuta"],"B":["tammikuuta","helmikuuta","maaliskuuta","huhtikuuta","toukokuuta","kesäkuuta","heinäkuuta","elokuuta","syyskuuta","lokakuuta","marraskuuta","joulukuuta"],"c":"%a %d. %b %Y %k.%M.%S %Z","p":["AP.","IP."],"P":["ap.","ip."],"x":"%d.%m.%Y","X":"%k.%M.%S"});},"3.3.0"); | http://developer.yahoo.com/yui/license.html
version: 3.3.0
| random_line_split |
Case.js | /**
* @providesModule Case
*/
const DOM = require('DOM');
var Case = (function () {
/**
* A Case is a test against an element.
*/
function Case (attributes) {
return new Case.fn.init(attributes);
}
// Prototype object of the Case.
Case.fn = Case.prototype = {
constructor: Case,
init: function (attributes) {
this.listeners = {};
this.timeout = null;
this.attributes = attributes || {};
var that = this;
// Dispatch a resolve event if the case is initiated with a status.
if (this.attributes.status) {
// Delay the status dispatch to the next execution cycle so that the
// Case will register listeners in this execution cycle first.
setTimeout(function () {
that.resolve();
}, 0);
}
// Set up a time out for this case to resolve within.
else {
this.attributes.status = 'untested';
this.timeout = setTimeout(function () {
that.giveup();
}, 350);
}
return this;
},
// Details of the Case.
attributes: null,
get: function (attr) {
return this.attributes[attr];
},
set: function (attr, value) {
var isStatusChanged = false;
// Allow an object of attributes to be passed in.
if (typeof attr === 'object') {
for (var prop in attr) {
if (attr.hasOwnProperty(prop)) {
if (prop === 'status') {
isStatusChanged = true;
}
this.attributes[prop] = attr[prop];
}
}
}
// Assign a single attribute value.
else {
if (attr === 'status') {
isStatusChanged = true;
}
this.attributes[attr] = value;
}
if (isStatusChanged) {
this.resolve();
}
return this;
},
/**
* A test that determines if a case has one of a set of statuses.
*
* @return boolean
* A bit that indicates if the case has one of the supplied statuses.
*/
hasStatus: function (statuses) {
// This is a rought test of arrayness.
if (typeof statuses !== 'object') {
statuses = [statuses];
}
var status = this.get('status');
for (var i = 0, il = statuses.length; i < il; ++i) {
if (statuses[i] === status) {
return true;
}
}
return false;
},
/**
* Dispatches the resolve event; clears the timeout fallback event.
*/
resolve: function () {
clearTimeout(this.timeout);
var el = this.attributes.element;
var outerEl;
// Get a selector and HTML if an element is provided.
if (el && el.nodeType && el.nodeType === 1) {
// Allow a test to provide a selector. Programmatically find one if none
// is provided.
this.attributes.selector = this.defineUniqueSelector(el);
// Get a serialized HTML representation of the element the raised the error
// if the Test did not provide it.
if (!this.attributes.html) {
this.attributes.html = '';
// If the element is either the <html> or <body> elements,
// just report that. Otherwise we might be returning the entire page
// as a string.
if (el.nodeName === 'HTML' || el.nodeName === 'BODY') {
this.attributes.html = '<' + el.nodeName + '>';
}
// Get the parent node in order to get the innerHTML for the selected
// element. Trim wrapping whitespace, remove linebreaks and spaces.
else if (typeof el.outerHTML === 'string') {
outerEl = el.outerHTML.trim().replace(/(\r\n|\n|\r)/gm, '').replace(/>\s+</g, '><');
// Guard against insanely long elements.
// @todo, make this length configurable eventually.
if (outerEl.length > 200) {
outerEl = outerEl.substr(0, 200) + '... [truncated]';
}
this.attributes.html = outerEl;
}
}
}
this.dispatch('resolve', this);
},
/**
* Abandons the Case if it not resolved within the timeout period.
*/
giveup: function () {
clearTimeout(this.timeout);
// @todo, the set method should really have a 'silent' option.
this.attributes.status = 'untested';
this.dispatch('timeout', this);
},
// @todo, make this a set of methods that all classes extend.
listenTo: function (dispatcher, eventName, handler) {
handler = handler.bind(this);
dispatcher.registerListener.call(dispatcher, eventName, handler);
},
registerListener: function (eventName, handler) {
if (!this.listeners[eventName]) {
this.listeners[eventName] = [];
}
this.listeners[eventName].push(handler);
},
dispatch: function (eventName) {
if (this.listeners[eventName] && this.listeners[eventName].length) {
var eventArgs = [].slice.call(arguments);
this.listeners[eventName].forEach(function (handler) {
// Pass any additional arguments from the event dispatcher to the
// handler function.
handler.apply(null, eventArgs);
});
}
},
/**
* Creates a page-unique selector for the selected DOM element.
*
* @param {jQuery} element
* An element in a jQuery wrapper.
*
* @return {string}
* A unique selector for this element.
*/
defineUniqueSelector: function (element) {
/**
* Indicates whether the selector string represents a unique DOM element.
*
* @param {string} selector
* A string selector that can be used to query a DOM element.
*
* @return Boolean
* Whether or not the selector string represents a unique DOM element.
*/
function isUniquePath (selector) {
return DOM.scry(selector).length === 1;
}
/**
* Creates a selector from the element's id attribute.
*
* Temporary IDs created by the module that contain "visitorActions" are excluded.
*
* @param {HTMLElement} element
*
* @return {string}
* An id selector or an empty string.
*/
function applyID (element) {
var selector = '';
var id = element.id || '';
if (id.length > 0) {
selector = '#' + id;
}
return selector;
}
/**
* Creates a selector from classes on the element.
*
* Classes with known functional components like the word 'active' are
* excluded because these often denote state, not identity.
*
* @param {HTMLElement} element
*
* @return {string}
* A selector of classes or an empty string.
*/
function applyClasses (element) {
var selector = '';
// Try to make a selector from the element's classes.
var classes = element.className || '';
if (classes.length > 0) {
classes = classes.split(/\s+/);
// Filter out classes that might represent state.
classes = reject(classes, function (cl) {
return (/active|enabled|disabled|first|last|only|collapsed|open|clearfix|processed/).test(cl);
});
if (classes.length > 0) {
return '.' + classes.join('.');
}
}
return selector;
}
/**
* Finds attributes on the element and creates a selector from them.
*
* @param {HTMLElement} element
*
* @return {string}
* A selector of attributes or an empty string.
*/
function applyAttributes (element) {
var selector = '';
// Whitelisted attributes to include in a selector to disambiguate it.
var attributes = ['href', 'type', 'title', 'alt'];
var value;
if (typeof element === 'undefined' ||
typeof element.attributes === 'undefined' ||
element.attributes === null) {
return selector;
}
// Try to make a selector from the element's classes.
for (var i = 0, len = attributes.length; i < len; i++) {
value = element.attributes[attributes[i]] && element.attributes[attributes[i]].value;
if (value) {
selector += '[' + attributes[i] + '="' + value + '"]';
}
}
return selector;
}
/**
* Creates a unique selector using id, classes and attributes.
*
* It is possible that the selector will not be unique if there is no
* unique description using only ids, classes and attributes of an
* element that exist on the page already. If uniqueness cannot be
* determined and is required, you will need to add a unique identifier
* to the element through theming development.
*
* @param {HTMLElement} element
*
* @return {string}
* A unique selector for the element.
*/
function generateSelector (element) {
var selector = '';
var scopeSelector = '';
var pseudoUnique = false;
var firstPass = true;
do {
scopeSelector = '';
// Try to apply an ID.
if ((scopeSelector = applyID(element)).length > 0) {
selector = scopeSelector + ' ' + selector;
// Assume that a selector with an ID in the string is unique.
break;
}
// Try to apply classes.
if (!pseudoUnique && (scopeSelector = applyClasses(element)).length > 0) {
// If the classes don't create a unique path, tack them on and
// continue.
selector = scopeSelector + ' ' + selector;
// If the classes do create a unique path, mark this selector as
// pseudo unique. We will keep attempting to find an ID to really
// guarantee uniqueness.
if (isUniquePath(selector)) {
pseudoUnique = true;
}
}
// Process the original element.
if (firstPass) {
// Try to add attributes.
if ((scopeSelector = applyAttributes(element)).length > 0) {
// Do not include a space because the attributes qualify the
// element. Append classes if they exist.
selector = scopeSelector + selector;
}
// Add the element nodeName.
selector = element.nodeName.toLowerCase() + selector;
// The original element has been processed.
firstPass = false;
}
// Try the parent element to apply some scope.
element = element.parentNode;
} while (element && element.nodeType === 1 && element.nodeName !== 'BODY' && element.nodeName !== 'HTML');
return selector.trim();
}
/**
* Helper function to filter items from a list that pass the comparator
* test.
*
* @param {Array} list
* @param {function} comparator
* A function that return a boolean. True means the list item will be
* discarded from the list.
* @return array
* A list of items the excludes items that passed the comparator test.
*/
function | (list, comparator) {
var keepers = [];
for (var i = 0, il = list.length; i < il; i++) {
if (!comparator.call(null, list[i])) {
keepers.push(list[i]);
}
}
return keepers;
}
return element && generateSelector(element);
},
push: [].push,
sort: [].sort,
concat: [].concat,
splice: [].splice
};
// Give the init function the Case prototype.
Case.fn.init.prototype = Case.fn;
return Case;
}());
module.exports = Case;
| reject | identifier_name |
Case.js | /**
* @providesModule Case
*/
const DOM = require('DOM');
var Case = (function () {
/**
* A Case is a test against an element.
*/
function Case (attributes) {
return new Case.fn.init(attributes);
}
// Prototype object of the Case.
Case.fn = Case.prototype = {
constructor: Case,
init: function (attributes) {
this.listeners = {};
this.timeout = null;
this.attributes = attributes || {};
var that = this;
// Dispatch a resolve event if the case is initiated with a status.
if (this.attributes.status) {
// Delay the status dispatch to the next execution cycle so that the
// Case will register listeners in this execution cycle first.
setTimeout(function () {
that.resolve();
}, 0);
}
// Set up a time out for this case to resolve within.
else {
this.attributes.status = 'untested';
this.timeout = setTimeout(function () {
that.giveup();
}, 350);
}
return this;
},
// Details of the Case.
attributes: null,
get: function (attr) {
return this.attributes[attr];
},
set: function (attr, value) {
var isStatusChanged = false;
// Allow an object of attributes to be passed in.
if (typeof attr === 'object') {
for (var prop in attr) {
if (attr.hasOwnProperty(prop)) {
if (prop === 'status') {
isStatusChanged = true;
}
this.attributes[prop] = attr[prop];
}
}
}
// Assign a single attribute value.
else {
if (attr === 'status') {
isStatusChanged = true;
}
this.attributes[attr] = value;
}
if (isStatusChanged) {
this.resolve();
}
return this;
},
/**
* A test that determines if a case has one of a set of statuses.
*
* @return boolean
* A bit that indicates if the case has one of the supplied statuses.
*/
hasStatus: function (statuses) {
// This is a rought test of arrayness.
if (typeof statuses !== 'object') {
statuses = [statuses];
}
var status = this.get('status');
for (var i = 0, il = statuses.length; i < il; ++i) {
if (statuses[i] === status) {
return true;
}
}
return false;
},
/**
* Dispatches the resolve event; clears the timeout fallback event.
*/
resolve: function () {
clearTimeout(this.timeout);
var el = this.attributes.element;
var outerEl;
// Get a selector and HTML if an element is provided.
if (el && el.nodeType && el.nodeType === 1) {
// Allow a test to provide a selector. Programmatically find one if none
// is provided.
this.attributes.selector = this.defineUniqueSelector(el);
// Get a serialized HTML representation of the element the raised the error
// if the Test did not provide it.
if (!this.attributes.html) {
this.attributes.html = '';
// If the element is either the <html> or <body> elements,
// just report that. Otherwise we might be returning the entire page
// as a string.
if (el.nodeName === 'HTML' || el.nodeName === 'BODY') {
this.attributes.html = '<' + el.nodeName + '>';
}
// Get the parent node in order to get the innerHTML for the selected
// element. Trim wrapping whitespace, remove linebreaks and spaces.
else if (typeof el.outerHTML === 'string') {
outerEl = el.outerHTML.trim().replace(/(\r\n|\n|\r)/gm, '').replace(/>\s+</g, '><');
// Guard against insanely long elements.
// @todo, make this length configurable eventually.
if (outerEl.length > 200) {
outerEl = outerEl.substr(0, 200) + '... [truncated]';
}
this.attributes.html = outerEl;
}
}
}
this.dispatch('resolve', this);
},
/**
* Abandons the Case if it not resolved within the timeout period.
*/
giveup: function () {
clearTimeout(this.timeout);
// @todo, the set method should really have a 'silent' option.
this.attributes.status = 'untested';
this.dispatch('timeout', this);
},
// @todo, make this a set of methods that all classes extend.
listenTo: function (dispatcher, eventName, handler) {
handler = handler.bind(this);
dispatcher.registerListener.call(dispatcher, eventName, handler);
},
registerListener: function (eventName, handler) {
if (!this.listeners[eventName]) {
this.listeners[eventName] = [];
}
this.listeners[eventName].push(handler);
},
dispatch: function (eventName) {
if (this.listeners[eventName] && this.listeners[eventName].length) {
var eventArgs = [].slice.call(arguments);
this.listeners[eventName].forEach(function (handler) {
// Pass any additional arguments from the event dispatcher to the
// handler function.
handler.apply(null, eventArgs);
});
}
},
/**
* Creates a page-unique selector for the selected DOM element.
*
* @param {jQuery} element
* An element in a jQuery wrapper.
*
* @return {string}
* A unique selector for this element.
*/
defineUniqueSelector: function (element) {
/**
* Indicates whether the selector string represents a unique DOM element.
*
* @param {string} selector
* A string selector that can be used to query a DOM element.
*
* @return Boolean
* Whether or not the selector string represents a unique DOM element.
*/
function isUniquePath (selector) {
return DOM.scry(selector).length === 1;
}
/**
* Creates a selector from the element's id attribute.
*
* Temporary IDs created by the module that contain "visitorActions" are excluded.
*
* @param {HTMLElement} element
*
* @return {string}
* An id selector or an empty string.
*/
function applyID (element) {
var selector = '';
var id = element.id || '';
if (id.length > 0) {
selector = '#' + id;
}
return selector;
}
/**
* Creates a selector from classes on the element.
*
* Classes with known functional components like the word 'active' are
* excluded because these often denote state, not identity.
*
* @param {HTMLElement} element
*
* @return {string}
* A selector of classes or an empty string.
*/
function applyClasses (element) {
var selector = '';
// Try to make a selector from the element's classes.
var classes = element.className || '';
if (classes.length > 0) {
classes = classes.split(/\s+/);
// Filter out classes that might represent state.
classes = reject(classes, function (cl) {
return (/active|enabled|disabled|first|last|only|collapsed|open|clearfix|processed/).test(cl);
});
if (classes.length > 0) {
return '.' + classes.join('.');
}
}
return selector;
}
/**
* Finds attributes on the element and creates a selector from them.
*
* @param {HTMLElement} element
*
* @return {string}
* A selector of attributes or an empty string.
*/
function applyAttributes (element) {
var selector = '';
// Whitelisted attributes to include in a selector to disambiguate it.
var attributes = ['href', 'type', 'title', 'alt'];
var value;
if (typeof element === 'undefined' ||
typeof element.attributes === 'undefined' ||
element.attributes === null) {
return selector;
}
// Try to make a selector from the element's classes.
for (var i = 0, len = attributes.length; i < len; i++) {
value = element.attributes[attributes[i]] && element.attributes[attributes[i]].value;
if (value) {
selector += '[' + attributes[i] + '="' + value + '"]';
}
}
return selector;
}
/**
* Creates a unique selector using id, classes and attributes.
*
* It is possible that the selector will not be unique if there is no
* unique description using only ids, classes and attributes of an
* element that exist on the page already. If uniqueness cannot be
* determined and is required, you will need to add a unique identifier
* to the element through theming development.
*
* @param {HTMLElement} element
*
* @return {string}
* A unique selector for the element.
*/
function generateSelector (element) {
var selector = '';
var scopeSelector = '';
var pseudoUnique = false;
var firstPass = true;
do {
scopeSelector = '';
// Try to apply an ID.
if ((scopeSelector = applyID(element)).length > 0) {
selector = scopeSelector + ' ' + selector;
// Assume that a selector with an ID in the string is unique.
break;
}
// Try to apply classes.
if (!pseudoUnique && (scopeSelector = applyClasses(element)).length > 0) {
// If the classes don't create a unique path, tack them on and
// continue.
selector = scopeSelector + ' ' + selector;
// If the classes do create a unique path, mark this selector as
// pseudo unique. We will keep attempting to find an ID to really
// guarantee uniqueness.
if (isUniquePath(selector)) {
pseudoUnique = true;
}
}
// Process the original element.
if (firstPass) {
// Try to add attributes.
if ((scopeSelector = applyAttributes(element)).length > 0) {
// Do not include a space because the attributes qualify the
// element. Append classes if they exist.
selector = scopeSelector + selector;
}
// Add the element nodeName.
selector = element.nodeName.toLowerCase() + selector;
// The original element has been processed.
firstPass = false;
}
// Try the parent element to apply some scope.
element = element.parentNode;
} while (element && element.nodeType === 1 && element.nodeName !== 'BODY' && element.nodeName !== 'HTML');
return selector.trim();
}
/**
* Helper function to filter items from a list that pass the comparator
* test.
*
* @param {Array} list
* @param {function} comparator
* A function that return a boolean. True means the list item will be
* discarded from the list.
* @return array
* A list of items the excludes items that passed the comparator test.
*/ | function reject (list, comparator) {
var keepers = [];
for (var i = 0, il = list.length; i < il; i++) {
if (!comparator.call(null, list[i])) {
keepers.push(list[i]);
}
}
return keepers;
}
return element && generateSelector(element);
},
push: [].push,
sort: [].sort,
concat: [].concat,
splice: [].splice
};
// Give the init function the Case prototype.
Case.fn.init.prototype = Case.fn;
return Case;
}());
module.exports = Case; | random_line_split | |
Case.js | /**
* @providesModule Case
*/
const DOM = require('DOM');
var Case = (function () {
/**
* A Case is a test against an element.
*/
function Case (attributes) {
return new Case.fn.init(attributes);
}
// Prototype object of the Case.
Case.fn = Case.prototype = {
constructor: Case,
init: function (attributes) {
this.listeners = {};
this.timeout = null;
this.attributes = attributes || {};
var that = this;
// Dispatch a resolve event if the case is initiated with a status.
if (this.attributes.status) {
// Delay the status dispatch to the next execution cycle so that the
// Case will register listeners in this execution cycle first.
setTimeout(function () {
that.resolve();
}, 0);
}
// Set up a time out for this case to resolve within.
else {
this.attributes.status = 'untested';
this.timeout = setTimeout(function () {
that.giveup();
}, 350);
}
return this;
},
// Details of the Case.
attributes: null,
get: function (attr) {
return this.attributes[attr];
},
set: function (attr, value) {
var isStatusChanged = false;
// Allow an object of attributes to be passed in.
if (typeof attr === 'object') {
for (var prop in attr) {
if (attr.hasOwnProperty(prop)) {
if (prop === 'status') {
isStatusChanged = true;
}
this.attributes[prop] = attr[prop];
}
}
}
// Assign a single attribute value.
else {
if (attr === 'status') {
isStatusChanged = true;
}
this.attributes[attr] = value;
}
if (isStatusChanged) {
this.resolve();
}
return this;
},
/**
* A test that determines if a case has one of a set of statuses.
*
* @return boolean
* A bit that indicates if the case has one of the supplied statuses.
*/
hasStatus: function (statuses) {
// This is a rought test of arrayness.
if (typeof statuses !== 'object') {
statuses = [statuses];
}
var status = this.get('status');
for (var i = 0, il = statuses.length; i < il; ++i) {
if (statuses[i] === status) {
return true;
}
}
return false;
},
/**
* Dispatches the resolve event; clears the timeout fallback event.
*/
resolve: function () {
clearTimeout(this.timeout);
var el = this.attributes.element;
var outerEl;
// Get a selector and HTML if an element is provided.
if (el && el.nodeType && el.nodeType === 1) {
// Allow a test to provide a selector. Programmatically find one if none
// is provided.
this.attributes.selector = this.defineUniqueSelector(el);
// Get a serialized HTML representation of the element the raised the error
// if the Test did not provide it.
if (!this.attributes.html) {
this.attributes.html = '';
// If the element is either the <html> or <body> elements,
// just report that. Otherwise we might be returning the entire page
// as a string.
if (el.nodeName === 'HTML' || el.nodeName === 'BODY') {
this.attributes.html = '<' + el.nodeName + '>';
}
// Get the parent node in order to get the innerHTML for the selected
// element. Trim wrapping whitespace, remove linebreaks and spaces.
else if (typeof el.outerHTML === 'string') {
outerEl = el.outerHTML.trim().replace(/(\r\n|\n|\r)/gm, '').replace(/>\s+</g, '><');
// Guard against insanely long elements.
// @todo, make this length configurable eventually.
if (outerEl.length > 200) {
outerEl = outerEl.substr(0, 200) + '... [truncated]';
}
this.attributes.html = outerEl;
}
}
}
this.dispatch('resolve', this);
},
/**
* Abandons the Case if it not resolved within the timeout period.
*/
giveup: function () {
clearTimeout(this.timeout);
// @todo, the set method should really have a 'silent' option.
this.attributes.status = 'untested';
this.dispatch('timeout', this);
},
// @todo, make this a set of methods that all classes extend.
listenTo: function (dispatcher, eventName, handler) {
handler = handler.bind(this);
dispatcher.registerListener.call(dispatcher, eventName, handler);
},
registerListener: function (eventName, handler) {
if (!this.listeners[eventName]) {
this.listeners[eventName] = [];
}
this.listeners[eventName].push(handler);
},
dispatch: function (eventName) {
if (this.listeners[eventName] && this.listeners[eventName].length) {
var eventArgs = [].slice.call(arguments);
this.listeners[eventName].forEach(function (handler) {
// Pass any additional arguments from the event dispatcher to the
// handler function.
handler.apply(null, eventArgs);
});
}
},
/**
* Creates a page-unique selector for the selected DOM element.
*
* @param {jQuery} element
* An element in a jQuery wrapper.
*
* @return {string}
* A unique selector for this element.
*/
defineUniqueSelector: function (element) {
/**
* Indicates whether the selector string represents a unique DOM element.
*
* @param {string} selector
* A string selector that can be used to query a DOM element.
*
* @return Boolean
* Whether or not the selector string represents a unique DOM element.
*/
function isUniquePath (selector) {
return DOM.scry(selector).length === 1;
}
/**
* Creates a selector from the element's id attribute.
*
* Temporary IDs created by the module that contain "visitorActions" are excluded.
*
* @param {HTMLElement} element
*
* @return {string}
* An id selector or an empty string.
*/
function applyID (element) {
var selector = '';
var id = element.id || '';
if (id.length > 0) {
selector = '#' + id;
}
return selector;
}
/**
* Creates a selector from classes on the element.
*
* Classes with known functional components like the word 'active' are
* excluded because these often denote state, not identity.
*
* @param {HTMLElement} element
*
* @return {string}
* A selector of classes or an empty string.
*/
function applyClasses (element) {
var selector = '';
// Try to make a selector from the element's classes.
var classes = element.className || '';
if (classes.length > 0) {
classes = classes.split(/\s+/);
// Filter out classes that might represent state.
classes = reject(classes, function (cl) {
return (/active|enabled|disabled|first|last|only|collapsed|open|clearfix|processed/).test(cl);
});
if (classes.length > 0) {
return '.' + classes.join('.');
}
}
return selector;
}
/**
* Finds attributes on the element and creates a selector from them.
*
* @param {HTMLElement} element
*
* @return {string}
* A selector of attributes or an empty string.
*/
function applyAttributes (element) {
var selector = '';
// Whitelisted attributes to include in a selector to disambiguate it.
var attributes = ['href', 'type', 'title', 'alt'];
var value;
if (typeof element === 'undefined' ||
typeof element.attributes === 'undefined' ||
element.attributes === null) {
return selector;
}
// Try to make a selector from the element's classes.
for (var i = 0, len = attributes.length; i < len; i++) {
value = element.attributes[attributes[i]] && element.attributes[attributes[i]].value;
if (value) {
selector += '[' + attributes[i] + '="' + value + '"]';
}
}
return selector;
}
/**
* Creates a unique selector using id, classes and attributes.
*
* It is possible that the selector will not be unique if there is no
* unique description using only ids, classes and attributes of an
* element that exist on the page already. If uniqueness cannot be
* determined and is required, you will need to add a unique identifier
* to the element through theming development.
*
* @param {HTMLElement} element
*
* @return {string}
* A unique selector for the element.
*/
function generateSelector (element) {
var selector = '';
var scopeSelector = '';
var pseudoUnique = false;
var firstPass = true;
do {
scopeSelector = '';
// Try to apply an ID.
if ((scopeSelector = applyID(element)).length > 0) {
selector = scopeSelector + ' ' + selector;
// Assume that a selector with an ID in the string is unique.
break;
}
// Try to apply classes.
if (!pseudoUnique && (scopeSelector = applyClasses(element)).length > 0) {
// If the classes don't create a unique path, tack them on and
// continue.
selector = scopeSelector + ' ' + selector;
// If the classes do create a unique path, mark this selector as
// pseudo unique. We will keep attempting to find an ID to really
// guarantee uniqueness.
if (isUniquePath(selector)) {
pseudoUnique = true;
}
}
// Process the original element.
if (firstPass) {
// Try to add attributes.
if ((scopeSelector = applyAttributes(element)).length > 0) {
// Do not include a space because the attributes qualify the
// element. Append classes if they exist.
selector = scopeSelector + selector;
}
// Add the element nodeName.
selector = element.nodeName.toLowerCase() + selector;
// The original element has been processed.
firstPass = false;
}
// Try the parent element to apply some scope.
element = element.parentNode;
} while (element && element.nodeType === 1 && element.nodeName !== 'BODY' && element.nodeName !== 'HTML');
return selector.trim();
}
/**
* Helper function to filter items from a list that pass the comparator
* test.
*
* @param {Array} list
* @param {function} comparator
* A function that return a boolean. True means the list item will be
* discarded from the list.
* @return array
* A list of items the excludes items that passed the comparator test.
*/
function reject (list, comparator) {
var keepers = [];
for (var i = 0, il = list.length; i < il; i++) {
if (!comparator.call(null, list[i])) |
}
return keepers;
}
return element && generateSelector(element);
},
push: [].push,
sort: [].sort,
concat: [].concat,
splice: [].splice
};
// Give the init function the Case prototype.
Case.fn.init.prototype = Case.fn;
return Case;
}());
module.exports = Case;
| {
keepers.push(list[i]);
} | conditional_block |
Case.js | /**
* @providesModule Case
*/
const DOM = require('DOM');
var Case = (function () {
/**
* A Case is a test against an element.
*/
function Case (attributes) |
// Prototype object of the Case.
Case.fn = Case.prototype = {
constructor: Case,
init: function (attributes) {
this.listeners = {};
this.timeout = null;
this.attributes = attributes || {};
var that = this;
// Dispatch a resolve event if the case is initiated with a status.
if (this.attributes.status) {
// Delay the status dispatch to the next execution cycle so that the
// Case will register listeners in this execution cycle first.
setTimeout(function () {
that.resolve();
}, 0);
}
// Set up a time out for this case to resolve within.
else {
this.attributes.status = 'untested';
this.timeout = setTimeout(function () {
that.giveup();
}, 350);
}
return this;
},
// Details of the Case.
attributes: null,
get: function (attr) {
return this.attributes[attr];
},
set: function (attr, value) {
var isStatusChanged = false;
// Allow an object of attributes to be passed in.
if (typeof attr === 'object') {
for (var prop in attr) {
if (attr.hasOwnProperty(prop)) {
if (prop === 'status') {
isStatusChanged = true;
}
this.attributes[prop] = attr[prop];
}
}
}
// Assign a single attribute value.
else {
if (attr === 'status') {
isStatusChanged = true;
}
this.attributes[attr] = value;
}
if (isStatusChanged) {
this.resolve();
}
return this;
},
/**
* A test that determines if a case has one of a set of statuses.
*
* @return boolean
* A bit that indicates if the case has one of the supplied statuses.
*/
hasStatus: function (statuses) {
// This is a rought test of arrayness.
if (typeof statuses !== 'object') {
statuses = [statuses];
}
var status = this.get('status');
for (var i = 0, il = statuses.length; i < il; ++i) {
if (statuses[i] === status) {
return true;
}
}
return false;
},
/**
* Dispatches the resolve event; clears the timeout fallback event.
*/
resolve: function () {
clearTimeout(this.timeout);
var el = this.attributes.element;
var outerEl;
// Get a selector and HTML if an element is provided.
if (el && el.nodeType && el.nodeType === 1) {
// Allow a test to provide a selector. Programmatically find one if none
// is provided.
this.attributes.selector = this.defineUniqueSelector(el);
// Get a serialized HTML representation of the element the raised the error
// if the Test did not provide it.
if (!this.attributes.html) {
this.attributes.html = '';
// If the element is either the <html> or <body> elements,
// just report that. Otherwise we might be returning the entire page
// as a string.
if (el.nodeName === 'HTML' || el.nodeName === 'BODY') {
this.attributes.html = '<' + el.nodeName + '>';
}
// Get the parent node in order to get the innerHTML for the selected
// element. Trim wrapping whitespace, remove linebreaks and spaces.
else if (typeof el.outerHTML === 'string') {
outerEl = el.outerHTML.trim().replace(/(\r\n|\n|\r)/gm, '').replace(/>\s+</g, '><');
// Guard against insanely long elements.
// @todo, make this length configurable eventually.
if (outerEl.length > 200) {
outerEl = outerEl.substr(0, 200) + '... [truncated]';
}
this.attributes.html = outerEl;
}
}
}
this.dispatch('resolve', this);
},
/**
* Abandons the Case if it not resolved within the timeout period.
*/
giveup: function () {
clearTimeout(this.timeout);
// @todo, the set method should really have a 'silent' option.
this.attributes.status = 'untested';
this.dispatch('timeout', this);
},
// @todo, make this a set of methods that all classes extend.
listenTo: function (dispatcher, eventName, handler) {
handler = handler.bind(this);
dispatcher.registerListener.call(dispatcher, eventName, handler);
},
registerListener: function (eventName, handler) {
if (!this.listeners[eventName]) {
this.listeners[eventName] = [];
}
this.listeners[eventName].push(handler);
},
dispatch: function (eventName) {
if (this.listeners[eventName] && this.listeners[eventName].length) {
var eventArgs = [].slice.call(arguments);
this.listeners[eventName].forEach(function (handler) {
// Pass any additional arguments from the event dispatcher to the
// handler function.
handler.apply(null, eventArgs);
});
}
},
/**
* Creates a page-unique selector for the selected DOM element.
*
* @param {jQuery} element
* An element in a jQuery wrapper.
*
* @return {string}
* A unique selector for this element.
*/
defineUniqueSelector: function (element) {
/**
* Indicates whether the selector string represents a unique DOM element.
*
* @param {string} selector
* A string selector that can be used to query a DOM element.
*
* @return Boolean
* Whether or not the selector string represents a unique DOM element.
*/
function isUniquePath (selector) {
return DOM.scry(selector).length === 1;
}
/**
* Creates a selector from the element's id attribute.
*
* Temporary IDs created by the module that contain "visitorActions" are excluded.
*
* @param {HTMLElement} element
*
* @return {string}
* An id selector or an empty string.
*/
function applyID (element) {
var selector = '';
var id = element.id || '';
if (id.length > 0) {
selector = '#' + id;
}
return selector;
}
/**
* Creates a selector from classes on the element.
*
* Classes with known functional components like the word 'active' are
* excluded because these often denote state, not identity.
*
* @param {HTMLElement} element
*
* @return {string}
* A selector of classes or an empty string.
*/
function applyClasses (element) {
var selector = '';
// Try to make a selector from the element's classes.
var classes = element.className || '';
if (classes.length > 0) {
classes = classes.split(/\s+/);
// Filter out classes that might represent state.
classes = reject(classes, function (cl) {
return (/active|enabled|disabled|first|last|only|collapsed|open|clearfix|processed/).test(cl);
});
if (classes.length > 0) {
return '.' + classes.join('.');
}
}
return selector;
}
/**
* Finds attributes on the element and creates a selector from them.
*
* @param {HTMLElement} element
*
* @return {string}
* A selector of attributes or an empty string.
*/
function applyAttributes (element) {
var selector = '';
// Whitelisted attributes to include in a selector to disambiguate it.
var attributes = ['href', 'type', 'title', 'alt'];
var value;
if (typeof element === 'undefined' ||
typeof element.attributes === 'undefined' ||
element.attributes === null) {
return selector;
}
// Try to make a selector from the element's classes.
for (var i = 0, len = attributes.length; i < len; i++) {
value = element.attributes[attributes[i]] && element.attributes[attributes[i]].value;
if (value) {
selector += '[' + attributes[i] + '="' + value + '"]';
}
}
return selector;
}
/**
* Creates a unique selector using id, classes and attributes.
*
* It is possible that the selector will not be unique if there is no
* unique description using only ids, classes and attributes of an
* element that exist on the page already. If uniqueness cannot be
* determined and is required, you will need to add a unique identifier
* to the element through theming development.
*
* @param {HTMLElement} element
*
* @return {string}
* A unique selector for the element.
*/
function generateSelector (element) {
var selector = '';
var scopeSelector = '';
var pseudoUnique = false;
var firstPass = true;
do {
scopeSelector = '';
// Try to apply an ID.
if ((scopeSelector = applyID(element)).length > 0) {
selector = scopeSelector + ' ' + selector;
// Assume that a selector with an ID in the string is unique.
break;
}
// Try to apply classes.
if (!pseudoUnique && (scopeSelector = applyClasses(element)).length > 0) {
// If the classes don't create a unique path, tack them on and
// continue.
selector = scopeSelector + ' ' + selector;
// If the classes do create a unique path, mark this selector as
// pseudo unique. We will keep attempting to find an ID to really
// guarantee uniqueness.
if (isUniquePath(selector)) {
pseudoUnique = true;
}
}
// Process the original element.
if (firstPass) {
// Try to add attributes.
if ((scopeSelector = applyAttributes(element)).length > 0) {
// Do not include a space because the attributes qualify the
// element. Append classes if they exist.
selector = scopeSelector + selector;
}
// Add the element nodeName.
selector = element.nodeName.toLowerCase() + selector;
// The original element has been processed.
firstPass = false;
}
// Try the parent element to apply some scope.
element = element.parentNode;
} while (element && element.nodeType === 1 && element.nodeName !== 'BODY' && element.nodeName !== 'HTML');
return selector.trim();
}
/**
* Helper function to filter items from a list that pass the comparator
* test.
*
* @param {Array} list
* @param {function} comparator
* A function that return a boolean. True means the list item will be
* discarded from the list.
* @return array
* A list of items the excludes items that passed the comparator test.
*/
function reject (list, comparator) {
var keepers = [];
for (var i = 0, il = list.length; i < il; i++) {
if (!comparator.call(null, list[i])) {
keepers.push(list[i]);
}
}
return keepers;
}
return element && generateSelector(element);
},
push: [].push,
sort: [].sort,
concat: [].concat,
splice: [].splice
};
// Give the init function the Case prototype.
Case.fn.init.prototype = Case.fn;
return Case;
}());
module.exports = Case;
| {
return new Case.fn.init(attributes);
} | identifier_body |
fast_forward_button.js | /*! @license
* Shaka Player
* Copyright 2016 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
goog.provide('shaka.ui.FastForwardButton');
goog.require('shaka.ui.Controls');
goog.require('shaka.ui.Element');
goog.require('shaka.ui.Enums');
goog.require('shaka.ui.Locales');
goog.require('shaka.ui.Localization');
goog.require('shaka.util.Dom');
/**
* @extends {shaka.ui.Element}
* @final
* @export
*/
shaka.ui.FastForwardButton = class extends shaka.ui.Element {
/**
* @param {!HTMLElement} parent
* @param {!shaka.ui.Controls} controls
*/
constructor(parent, controls) {
super(parent, controls);
/** @private {!HTMLButtonElement} */
this.button_ = shaka.util.Dom.createButton();
this.button_.classList.add('material-icons-round');
this.button_.classList.add('shaka-fast-forward-button');
this.button_.classList.add('shaka-tooltip-status');
this.button_.setAttribute('shaka-status', '1x');
this.button_.textContent =
shaka.ui.Enums.MaterialDesignIcons.FAST_FORWARD;
this.parent.appendChild(this.button_);
this.updateAriaLabel_();
/** @private {!Array.<number>} */
this.fastForwardRates_ = this.controls.getConfig().fastForwardRates;
this.eventManager.listen(
this.localization, shaka.ui.Localization.LOCALE_UPDATED, () => {
this.updateAriaLabel_();
});
this.eventManager.listen(
this.localization, shaka.ui.Localization.LOCALE_CHANGED, () => {
this.updateAriaLabel_();
});
this.eventManager.listen(this.button_, 'click', () => {
this.fastForward_();
});
}
/**
* @private
*/
updateAriaLabel_() {
this.button_.ariaLabel =
this.localization.resolve(shaka.ui.Locales.Ids.FAST_FORWARD);
}
/**
* Cycles trick play rate between the selected fast forward rates.
* @private
*/
fastForward_() {
if (!this.video.duration) |
const trickPlayRate = this.player.getPlaybackRate();
const newRateIndex = this.fastForwardRates_.indexOf(trickPlayRate) + 1;
// When the button is clicked, the next rate in this.fastForwardRates_ is
// selected. If no more rates are available, the first one is set.
const newRate = (newRateIndex != this.fastForwardRates_.length) ?
this.fastForwardRates_[newRateIndex] : this.fastForwardRates_[0];
this.player.trickPlay(newRate);
this.button_.setAttribute('shaka-status', newRate + 'x');
}
};
/**
* @implements {shaka.extern.IUIElement.Factory}
* @final
*/
shaka.ui.FastForwardButton.Factory = class {
/** @override */
create(rootElement, controls) {
return new shaka.ui.FastForwardButton(rootElement, controls);
}
};
shaka.ui.Controls.registerElement(
'fast_forward', new shaka.ui.FastForwardButton.Factory());
| {
return;
} | conditional_block |
fast_forward_button.js | /*! @license
* Shaka Player
* Copyright 2016 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
goog.provide('shaka.ui.FastForwardButton');
goog.require('shaka.ui.Controls');
goog.require('shaka.ui.Element');
goog.require('shaka.ui.Enums');
goog.require('shaka.ui.Locales');
goog.require('shaka.ui.Localization');
goog.require('shaka.util.Dom');
/**
* @extends {shaka.ui.Element}
* @final
* @export
*/
shaka.ui.FastForwardButton = class extends shaka.ui.Element {
/**
* @param {!HTMLElement} parent
* @param {!shaka.ui.Controls} controls
*/
constructor(parent, controls) {
super(parent, controls);
/** @private {!HTMLButtonElement} */
this.button_ = shaka.util.Dom.createButton();
this.button_.classList.add('material-icons-round');
this.button_.classList.add('shaka-fast-forward-button');
this.button_.classList.add('shaka-tooltip-status');
this.button_.setAttribute('shaka-status', '1x');
this.button_.textContent =
shaka.ui.Enums.MaterialDesignIcons.FAST_FORWARD;
this.parent.appendChild(this.button_);
this.updateAriaLabel_();
/** @private {!Array.<number>} */
this.fastForwardRates_ = this.controls.getConfig().fastForwardRates;
this.eventManager.listen(
this.localization, shaka.ui.Localization.LOCALE_UPDATED, () => {
this.updateAriaLabel_();
});
this.eventManager.listen(
this.localization, shaka.ui.Localization.LOCALE_CHANGED, () => {
this.updateAriaLabel_();
});
this.eventManager.listen(this.button_, 'click', () => {
this.fastForward_();
});
}
/**
* @private
*/
updateAriaLabel_() {
this.button_.ariaLabel =
this.localization.resolve(shaka.ui.Locales.Ids.FAST_FORWARD);
}
/**
* Cycles trick play rate between the selected fast forward rates.
* @private
*/
fastForward_() {
if (!this.video.duration) {
return;
}
const trickPlayRate = this.player.getPlaybackRate();
const newRateIndex = this.fastForwardRates_.indexOf(trickPlayRate) + 1;
// When the button is clicked, the next rate in this.fastForwardRates_ is
// selected. If no more rates are available, the first one is set.
const newRate = (newRateIndex != this.fastForwardRates_.length) ?
this.fastForwardRates_[newRateIndex] : this.fastForwardRates_[0];
this.player.trickPlay(newRate);
this.button_.setAttribute('shaka-status', newRate + 'x');
}
};
/**
* @implements {shaka.extern.IUIElement.Factory}
* @final
*/
shaka.ui.FastForwardButton.Factory = class {
/** @override */
| (rootElement, controls) {
return new shaka.ui.FastForwardButton(rootElement, controls);
}
};
shaka.ui.Controls.registerElement(
'fast_forward', new shaka.ui.FastForwardButton.Factory());
| create | identifier_name |
fast_forward_button.js | /*! @license
* Shaka Player
* Copyright 2016 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
goog.provide('shaka.ui.FastForwardButton');
goog.require('shaka.ui.Controls');
goog.require('shaka.ui.Element');
goog.require('shaka.ui.Enums');
goog.require('shaka.ui.Locales');
goog.require('shaka.ui.Localization');
goog.require('shaka.util.Dom');
/**
* @extends {shaka.ui.Element}
* @final
* @export
*/
shaka.ui.FastForwardButton = class extends shaka.ui.Element {
/**
* @param {!HTMLElement} parent
* @param {!shaka.ui.Controls} controls
*/
constructor(parent, controls) |
/**
* @private
*/
updateAriaLabel_() {
this.button_.ariaLabel =
this.localization.resolve(shaka.ui.Locales.Ids.FAST_FORWARD);
}
/**
* Cycles trick play rate between the selected fast forward rates.
* @private
*/
fastForward_() {
if (!this.video.duration) {
return;
}
const trickPlayRate = this.player.getPlaybackRate();
const newRateIndex = this.fastForwardRates_.indexOf(trickPlayRate) + 1;
// When the button is clicked, the next rate in this.fastForwardRates_ is
// selected. If no more rates are available, the first one is set.
const newRate = (newRateIndex != this.fastForwardRates_.length) ?
this.fastForwardRates_[newRateIndex] : this.fastForwardRates_[0];
this.player.trickPlay(newRate);
this.button_.setAttribute('shaka-status', newRate + 'x');
}
};
/**
* @implements {shaka.extern.IUIElement.Factory}
* @final
*/
shaka.ui.FastForwardButton.Factory = class {
/** @override */
create(rootElement, controls) {
return new shaka.ui.FastForwardButton(rootElement, controls);
}
};
shaka.ui.Controls.registerElement(
'fast_forward', new shaka.ui.FastForwardButton.Factory());
| {
super(parent, controls);
/** @private {!HTMLButtonElement} */
this.button_ = shaka.util.Dom.createButton();
this.button_.classList.add('material-icons-round');
this.button_.classList.add('shaka-fast-forward-button');
this.button_.classList.add('shaka-tooltip-status');
this.button_.setAttribute('shaka-status', '1x');
this.button_.textContent =
shaka.ui.Enums.MaterialDesignIcons.FAST_FORWARD;
this.parent.appendChild(this.button_);
this.updateAriaLabel_();
/** @private {!Array.<number>} */
this.fastForwardRates_ = this.controls.getConfig().fastForwardRates;
this.eventManager.listen(
this.localization, shaka.ui.Localization.LOCALE_UPDATED, () => {
this.updateAriaLabel_();
});
this.eventManager.listen(
this.localization, shaka.ui.Localization.LOCALE_CHANGED, () => {
this.updateAriaLabel_();
});
this.eventManager.listen(this.button_, 'click', () => {
this.fastForward_();
});
} | identifier_body |
fast_forward_button.js | /*! @license
* Shaka Player
* Copyright 2016 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
goog.provide('shaka.ui.FastForwardButton');
goog.require('shaka.ui.Controls');
goog.require('shaka.ui.Element');
goog.require('shaka.ui.Enums'); |
/**
* @extends {shaka.ui.Element}
* @final
* @export
*/
shaka.ui.FastForwardButton = class extends shaka.ui.Element {
/**
* @param {!HTMLElement} parent
* @param {!shaka.ui.Controls} controls
*/
constructor(parent, controls) {
super(parent, controls);
/** @private {!HTMLButtonElement} */
this.button_ = shaka.util.Dom.createButton();
this.button_.classList.add('material-icons-round');
this.button_.classList.add('shaka-fast-forward-button');
this.button_.classList.add('shaka-tooltip-status');
this.button_.setAttribute('shaka-status', '1x');
this.button_.textContent =
shaka.ui.Enums.MaterialDesignIcons.FAST_FORWARD;
this.parent.appendChild(this.button_);
this.updateAriaLabel_();
/** @private {!Array.<number>} */
this.fastForwardRates_ = this.controls.getConfig().fastForwardRates;
this.eventManager.listen(
this.localization, shaka.ui.Localization.LOCALE_UPDATED, () => {
this.updateAriaLabel_();
});
this.eventManager.listen(
this.localization, shaka.ui.Localization.LOCALE_CHANGED, () => {
this.updateAriaLabel_();
});
this.eventManager.listen(this.button_, 'click', () => {
this.fastForward_();
});
}
/**
* @private
*/
updateAriaLabel_() {
this.button_.ariaLabel =
this.localization.resolve(shaka.ui.Locales.Ids.FAST_FORWARD);
}
/**
* Cycles trick play rate between the selected fast forward rates.
* @private
*/
fastForward_() {
if (!this.video.duration) {
return;
}
const trickPlayRate = this.player.getPlaybackRate();
const newRateIndex = this.fastForwardRates_.indexOf(trickPlayRate) + 1;
// When the button is clicked, the next rate in this.fastForwardRates_ is
// selected. If no more rates are available, the first one is set.
const newRate = (newRateIndex != this.fastForwardRates_.length) ?
this.fastForwardRates_[newRateIndex] : this.fastForwardRates_[0];
this.player.trickPlay(newRate);
this.button_.setAttribute('shaka-status', newRate + 'x');
}
};
/**
* @implements {shaka.extern.IUIElement.Factory}
* @final
*/
shaka.ui.FastForwardButton.Factory = class {
/** @override */
create(rootElement, controls) {
return new shaka.ui.FastForwardButton(rootElement, controls);
}
};
shaka.ui.Controls.registerElement(
'fast_forward', new shaka.ui.FastForwardButton.Factory()); | goog.require('shaka.ui.Locales');
goog.require('shaka.ui.Localization');
goog.require('shaka.util.Dom'); | random_line_split |
pattern_type_mismatch.rs | use clippy_utils::diagnostics::span_lint_and_help;
use clippy_utils::last_path_segment;
use rustc_hir::{
intravisit, Body, Expr, ExprKind, FnDecl, HirId, LocalSource, MatchSource, Mutability, Pat, PatField, PatKind,
QPath, Stmt, StmtKind,
};
use rustc_lint::{LateContext, LateLintPass, LintContext};
use rustc_middle::lint::in_external_macro;
use rustc_middle::ty::subst::SubstsRef;
use rustc_middle::ty::{AdtDef, FieldDef, Ty, TyKind, VariantDef};
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::source_map::Span;
use std::iter;
declare_clippy_lint! {
/// ### What it does
/// Checks for patterns that aren't exact representations of the types
/// they are applied to.
///
/// To satisfy this lint, you will have to adjust either the expression that is matched
/// against or the pattern itself, as well as the bindings that are introduced by the
/// adjusted patterns. For matching you will have to either dereference the expression
/// with the `*` operator, or amend the patterns to explicitly match against `&<pattern>`
/// or `&mut <pattern>` depending on the reference mutability. For the bindings you need
/// to use the inverse. You can leave them as plain bindings if you wish for the value
/// to be copied, but you must use `ref mut <variable>` or `ref <variable>` to construct
/// a reference into the matched structure.
///
/// If you are looking for a way to learn about ownership semantics in more detail, it
/// is recommended to look at IDE options available to you to highlight types, lifetimes
/// and reference semantics in your code. The available tooling would expose these things
/// in a general way even outside of the various pattern matching mechanics. Of course
/// this lint can still be used to highlight areas of interest and ensure a good understanding
/// of ownership semantics.
///
/// ### Why is this bad?
/// It isn't bad in general. But in some contexts it can be desirable
/// because it increases ownership hints in the code, and will guard against some changes
/// in ownership.
///
/// ### Example
/// This example shows the basic adjustments necessary to satisfy the lint. Note how
/// the matched expression is explicitly dereferenced with `*` and the `inner` variable
/// is bound to a shared borrow via `ref inner`.
///
/// ```rust,ignore
/// // Bad
/// let value = &Some(Box::new(23));
/// match value {
/// Some(inner) => println!("{}", inner),
/// None => println!("none"),
/// }
///
/// // Good
/// let value = &Some(Box::new(23));
/// match *value {
/// Some(ref inner) => println!("{}", inner),
/// None => println!("none"),
/// }
/// ```
///
/// The following example demonstrates one of the advantages of the more verbose style.
/// Note how the second version uses `ref mut a` to explicitly declare `a` a shared mutable
/// borrow, while `b` is simply taken by value. This ensures that the loop body cannot
/// accidentally modify the wrong part of the structure.
///
/// ```rust,ignore
/// // Bad
/// let mut values = vec![(2, 3), (3, 4)];
/// for (a, b) in &mut values {
/// *a += *b;
/// }
///
/// // Good
/// let mut values = vec![(2, 3), (3, 4)];
/// for &mut (ref mut a, b) in &mut values {
/// *a += b;
/// }
/// ```
pub PATTERN_TYPE_MISMATCH,
restriction,
"type of pattern does not match the expression type"
}
declare_lint_pass!(PatternTypeMismatch => [PATTERN_TYPE_MISMATCH]);
impl<'tcx> LateLintPass<'tcx> for PatternTypeMismatch {
fn check_stmt(&mut self, cx: &LateContext<'tcx>, stmt: &'tcx Stmt<'_>) {
if let StmtKind::Local(local) = stmt.kind {
if let Some(init) = &local.init {
if let Some(init_ty) = cx.typeck_results().node_type_opt(init.hir_id) {
let pat = &local.pat;
if in_external_macro(cx.sess(), pat.span) {
return;
}
let deref_possible = match local.source {
LocalSource::Normal => DerefPossible::Possible,
_ => DerefPossible::Impossible,
};
apply_lint(cx, pat, init_ty, deref_possible);
}
}
}
}
fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
if let ExprKind::Match(scrutinee, arms, MatchSource::Normal) = expr.kind {
if let Some(expr_ty) = cx.typeck_results().node_type_opt(scrutinee.hir_id) {
'pattern_checks: for arm in arms {
let pat = &arm.pat;
if in_external_macro(cx.sess(), pat.span) {
continue 'pattern_checks;
}
if apply_lint(cx, pat, expr_ty, DerefPossible::Possible) {
break 'pattern_checks;
}
}
}
}
if let ExprKind::Let(let_pat, let_expr, _) = expr.kind {
if let Some(ref expr_ty) = cx.typeck_results().node_type_opt(let_expr.hir_id) {
if in_external_macro(cx.sess(), let_pat.span) {
return;
}
apply_lint(cx, let_pat, expr_ty, DerefPossible::Possible);
}
}
}
fn check_fn(
&mut self,
cx: &LateContext<'tcx>,
_: intravisit::FnKind<'tcx>,
_: &'tcx FnDecl<'_>,
body: &'tcx Body<'_>,
_: Span,
hir_id: HirId,
) {
if let Some(fn_sig) = cx.typeck_results().liberated_fn_sigs().get(hir_id) {
for (param, ty) in iter::zip(body.params, fn_sig.inputs()) {
apply_lint(cx, param.pat, ty, DerefPossible::Impossible);
}
}
}
}
#[derive(Debug, Clone, Copy)]
enum DerefPossible {
Possible,
Impossible,
}
fn apply_lint<'tcx>(cx: &LateContext<'tcx>, pat: &Pat<'_>, expr_ty: Ty<'tcx>, deref_possible: DerefPossible) -> bool {
let maybe_mismatch = find_first_mismatch(cx, pat, expr_ty, Level::Top);
if let Some((span, mutability, level)) = maybe_mismatch {
span_lint_and_help(
cx,
PATTERN_TYPE_MISMATCH,
span,
"type of pattern does not match the expression type",
None,
&format!(
"{}explicitly match against a `{}` pattern and adjust the enclosed variable bindings",
match (deref_possible, level) {
(DerefPossible::Possible, Level::Top) => "use `*` to dereference the match expression or ",
_ => "",
},
match mutability {
Mutability::Mut => "&mut _",
Mutability::Not => "&_",
},
),
);
true
} else {
false
}
}
#[derive(Debug, Copy, Clone)]
enum Level {
Top,
Lower,
}
#[allow(rustc::usage_of_ty_tykind)]
fn find_first_mismatch<'tcx>(
cx: &LateContext<'tcx>,
pat: &Pat<'_>,
ty: Ty<'tcx>,
level: Level,
) -> Option<(Span, Mutability, Level)> {
if let PatKind::Ref(sub_pat, _) = pat.kind {
if let TyKind::Ref(_, sub_ty, _) = ty.kind() {
return find_first_mismatch(cx, sub_pat, sub_ty, Level::Lower);
}
}
if let TyKind::Ref(_, _, mutability) = *ty.kind() {
if is_non_ref_pattern(&pat.kind) {
return Some((pat.span, mutability, level));
}
}
if let PatKind::Struct(ref qpath, field_pats, _) = pat.kind {
if let TyKind::Adt(adt_def, substs_ref) = ty.kind() {
if let Some(variant) = get_variant(adt_def, qpath) {
let field_defs = &variant.fields;
return find_first_mismatch_in_struct(cx, field_pats, field_defs, substs_ref);
}
}
}
if let PatKind::TupleStruct(ref qpath, pats, _) = pat.kind {
if let TyKind::Adt(adt_def, substs_ref) = ty.kind() {
if let Some(variant) = get_variant(adt_def, qpath) {
let field_defs = &variant.fields;
let ty_iter = field_defs.iter().map(|field_def| field_def.ty(cx.tcx, substs_ref));
return find_first_mismatch_in_tuple(cx, pats, ty_iter);
}
}
}
if let PatKind::Tuple(pats, _) = pat.kind {
if let TyKind::Tuple(..) = ty.kind() {
return find_first_mismatch_in_tuple(cx, pats, ty.tuple_fields());
}
}
if let PatKind::Or(sub_pats) = pat.kind {
for pat in sub_pats {
let maybe_mismatch = find_first_mismatch(cx, pat, ty, level);
if let Some(mismatch) = maybe_mismatch {
return Some(mismatch);
}
}
}
None
}
fn get_variant<'a>(adt_def: &'a AdtDef, qpath: &QPath<'_>) -> Option<&'a VariantDef> {
if adt_def.is_struct() {
if let Some(variant) = adt_def.variants.iter().next() {
return Some(variant);
}
}
if adt_def.is_enum() {
let pat_ident = last_path_segment(qpath).ident;
for variant in &adt_def.variants {
if variant.ident == pat_ident {
return Some(variant);
}
}
}
None
}
fn find_first_mismatch_in_tuple<'tcx, I>(
cx: &LateContext<'tcx>,
pats: &[Pat<'_>],
ty_iter_src: I,
) -> Option<(Span, Mutability, Level)>
where
I: IntoIterator<Item = Ty<'tcx>>,
{
let mut field_tys = ty_iter_src.into_iter();
'fields: for pat in pats {
let field_ty = if let Some(ty) = field_tys.next() {
ty
} else {
break 'fields;
};
let maybe_mismatch = find_first_mismatch(cx, pat, field_ty, Level::Lower);
if let Some(mismatch) = maybe_mismatch {
return Some(mismatch);
}
}
None
}
fn | <'tcx>(
cx: &LateContext<'tcx>,
field_pats: &[PatField<'_>],
field_defs: &[FieldDef],
substs_ref: SubstsRef<'tcx>,
) -> Option<(Span, Mutability, Level)> {
for field_pat in field_pats {
'definitions: for field_def in field_defs {
if field_pat.ident == field_def.ident {
let field_ty = field_def.ty(cx.tcx, substs_ref);
let pat = &field_pat.pat;
let maybe_mismatch = find_first_mismatch(cx, pat, field_ty, Level::Lower);
if let Some(mismatch) = maybe_mismatch {
return Some(mismatch);
}
break 'definitions;
}
}
}
None
}
fn is_non_ref_pattern(pat_kind: &PatKind<'_>) -> bool {
match pat_kind {
PatKind::Struct(..) | PatKind::Tuple(..) | PatKind::TupleStruct(..) | PatKind::Path(..) => true,
PatKind::Or(sub_pats) => sub_pats.iter().any(|pat| is_non_ref_pattern(&pat.kind)),
_ => false,
}
}
| find_first_mismatch_in_struct | identifier_name |
pattern_type_mismatch.rs | use clippy_utils::diagnostics::span_lint_and_help;
use clippy_utils::last_path_segment;
use rustc_hir::{
intravisit, Body, Expr, ExprKind, FnDecl, HirId, LocalSource, MatchSource, Mutability, Pat, PatField, PatKind,
QPath, Stmt, StmtKind,
};
use rustc_lint::{LateContext, LateLintPass, LintContext};
use rustc_middle::lint::in_external_macro;
use rustc_middle::ty::subst::SubstsRef;
use rustc_middle::ty::{AdtDef, FieldDef, Ty, TyKind, VariantDef};
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::source_map::Span;
use std::iter;
declare_clippy_lint! {
/// ### What it does
/// Checks for patterns that aren't exact representations of the types
/// they are applied to.
///
/// To satisfy this lint, you will have to adjust either the expression that is matched
/// against or the pattern itself, as well as the bindings that are introduced by the
/// adjusted patterns. For matching you will have to either dereference the expression
/// with the `*` operator, or amend the patterns to explicitly match against `&<pattern>`
/// or `&mut <pattern>` depending on the reference mutability. For the bindings you need
/// to use the inverse. You can leave them as plain bindings if you wish for the value
/// to be copied, but you must use `ref mut <variable>` or `ref <variable>` to construct
/// a reference into the matched structure.
///
/// If you are looking for a way to learn about ownership semantics in more detail, it
/// is recommended to look at IDE options available to you to highlight types, lifetimes
/// and reference semantics in your code. The available tooling would expose these things
/// in a general way even outside of the various pattern matching mechanics. Of course
/// this lint can still be used to highlight areas of interest and ensure a good understanding
/// of ownership semantics.
///
/// ### Why is this bad?
/// It isn't bad in general. But in some contexts it can be desirable
/// because it increases ownership hints in the code, and will guard against some changes
/// in ownership.
///
/// ### Example
/// This example shows the basic adjustments necessary to satisfy the lint. Note how
/// the matched expression is explicitly dereferenced with `*` and the `inner` variable
/// is bound to a shared borrow via `ref inner`.
///
/// ```rust,ignore
/// // Bad
/// let value = &Some(Box::new(23));
/// match value {
/// Some(inner) => println!("{}", inner),
/// None => println!("none"),
/// }
///
/// // Good
/// let value = &Some(Box::new(23));
/// match *value {
/// Some(ref inner) => println!("{}", inner),
/// None => println!("none"),
/// }
/// ```
///
/// The following example demonstrates one of the advantages of the more verbose style.
/// Note how the second version uses `ref mut a` to explicitly declare `a` a shared mutable
/// borrow, while `b` is simply taken by value. This ensures that the loop body cannot
/// accidentally modify the wrong part of the structure.
///
/// ```rust,ignore
/// // Bad
/// let mut values = vec![(2, 3), (3, 4)];
/// for (a, b) in &mut values {
/// *a += *b;
/// }
///
/// // Good
/// let mut values = vec![(2, 3), (3, 4)];
/// for &mut (ref mut a, b) in &mut values {
/// *a += b;
/// }
/// ```
pub PATTERN_TYPE_MISMATCH,
restriction,
"type of pattern does not match the expression type"
}
declare_lint_pass!(PatternTypeMismatch => [PATTERN_TYPE_MISMATCH]);
impl<'tcx> LateLintPass<'tcx> for PatternTypeMismatch {
fn check_stmt(&mut self, cx: &LateContext<'tcx>, stmt: &'tcx Stmt<'_>) {
if let StmtKind::Local(local) = stmt.kind {
if let Some(init) = &local.init {
if let Some(init_ty) = cx.typeck_results().node_type_opt(init.hir_id) {
let pat = &local.pat;
if in_external_macro(cx.sess(), pat.span) {
return;
}
let deref_possible = match local.source {
LocalSource::Normal => DerefPossible::Possible,
_ => DerefPossible::Impossible,
};
apply_lint(cx, pat, init_ty, deref_possible);
}
}
}
}
fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
if let ExprKind::Match(scrutinee, arms, MatchSource::Normal) = expr.kind {
if let Some(expr_ty) = cx.typeck_results().node_type_opt(scrutinee.hir_id) {
'pattern_checks: for arm in arms {
let pat = &arm.pat;
if in_external_macro(cx.sess(), pat.span) {
continue 'pattern_checks;
}
if apply_lint(cx, pat, expr_ty, DerefPossible::Possible) {
break 'pattern_checks;
}
}
}
}
if let ExprKind::Let(let_pat, let_expr, _) = expr.kind {
if let Some(ref expr_ty) = cx.typeck_results().node_type_opt(let_expr.hir_id) {
if in_external_macro(cx.sess(), let_pat.span) {
return;
}
apply_lint(cx, let_pat, expr_ty, DerefPossible::Possible);
}
}
}
fn check_fn(
&mut self,
cx: &LateContext<'tcx>,
_: intravisit::FnKind<'tcx>,
_: &'tcx FnDecl<'_>,
body: &'tcx Body<'_>,
_: Span,
hir_id: HirId,
) {
if let Some(fn_sig) = cx.typeck_results().liberated_fn_sigs().get(hir_id) {
for (param, ty) in iter::zip(body.params, fn_sig.inputs()) {
apply_lint(cx, param.pat, ty, DerefPossible::Impossible);
}
}
}
}
#[derive(Debug, Clone, Copy)]
enum DerefPossible {
Possible,
Impossible,
}
fn apply_lint<'tcx>(cx: &LateContext<'tcx>, pat: &Pat<'_>, expr_ty: Ty<'tcx>, deref_possible: DerefPossible) -> bool {
let maybe_mismatch = find_first_mismatch(cx, pat, expr_ty, Level::Top);
if let Some((span, mutability, level)) = maybe_mismatch {
span_lint_and_help(
cx,
PATTERN_TYPE_MISMATCH,
span,
"type of pattern does not match the expression type",
None,
&format!(
"{}explicitly match against a `{}` pattern and adjust the enclosed variable bindings",
match (deref_possible, level) {
(DerefPossible::Possible, Level::Top) => "use `*` to dereference the match expression or ",
_ => "",
},
match mutability {
Mutability::Mut => "&mut _",
Mutability::Not => "&_",
},
),
);
true
} else {
false
}
}
#[derive(Debug, Copy, Clone)]
enum Level {
Top,
Lower,
}
#[allow(rustc::usage_of_ty_tykind)]
fn find_first_mismatch<'tcx>(
cx: &LateContext<'tcx>,
pat: &Pat<'_>,
ty: Ty<'tcx>,
level: Level,
) -> Option<(Span, Mutability, Level)> {
if let PatKind::Ref(sub_pat, _) = pat.kind {
if let TyKind::Ref(_, sub_ty, _) = ty.kind() {
return find_first_mismatch(cx, sub_pat, sub_ty, Level::Lower);
}
}
if let TyKind::Ref(_, _, mutability) = *ty.kind() {
if is_non_ref_pattern(&pat.kind) {
return Some((pat.span, mutability, level));
}
}
if let PatKind::Struct(ref qpath, field_pats, _) = pat.kind {
if let TyKind::Adt(adt_def, substs_ref) = ty.kind() {
if let Some(variant) = get_variant(adt_def, qpath) {
let field_defs = &variant.fields;
return find_first_mismatch_in_struct(cx, field_pats, field_defs, substs_ref);
}
}
}
if let PatKind::TupleStruct(ref qpath, pats, _) = pat.kind {
if let TyKind::Adt(adt_def, substs_ref) = ty.kind() {
if let Some(variant) = get_variant(adt_def, qpath) {
let field_defs = &variant.fields;
let ty_iter = field_defs.iter().map(|field_def| field_def.ty(cx.tcx, substs_ref));
return find_first_mismatch_in_tuple(cx, pats, ty_iter);
}
}
}
if let PatKind::Tuple(pats, _) = pat.kind { | if let TyKind::Tuple(..) = ty.kind() {
return find_first_mismatch_in_tuple(cx, pats, ty.tuple_fields());
}
}
if let PatKind::Or(sub_pats) = pat.kind {
for pat in sub_pats {
let maybe_mismatch = find_first_mismatch(cx, pat, ty, level);
if let Some(mismatch) = maybe_mismatch {
return Some(mismatch);
}
}
}
None
}
fn get_variant<'a>(adt_def: &'a AdtDef, qpath: &QPath<'_>) -> Option<&'a VariantDef> {
if adt_def.is_struct() {
if let Some(variant) = adt_def.variants.iter().next() {
return Some(variant);
}
}
if adt_def.is_enum() {
let pat_ident = last_path_segment(qpath).ident;
for variant in &adt_def.variants {
if variant.ident == pat_ident {
return Some(variant);
}
}
}
None
}
fn find_first_mismatch_in_tuple<'tcx, I>(
cx: &LateContext<'tcx>,
pats: &[Pat<'_>],
ty_iter_src: I,
) -> Option<(Span, Mutability, Level)>
where
I: IntoIterator<Item = Ty<'tcx>>,
{
let mut field_tys = ty_iter_src.into_iter();
'fields: for pat in pats {
let field_ty = if let Some(ty) = field_tys.next() {
ty
} else {
break 'fields;
};
let maybe_mismatch = find_first_mismatch(cx, pat, field_ty, Level::Lower);
if let Some(mismatch) = maybe_mismatch {
return Some(mismatch);
}
}
None
}
fn find_first_mismatch_in_struct<'tcx>(
cx: &LateContext<'tcx>,
field_pats: &[PatField<'_>],
field_defs: &[FieldDef],
substs_ref: SubstsRef<'tcx>,
) -> Option<(Span, Mutability, Level)> {
for field_pat in field_pats {
'definitions: for field_def in field_defs {
if field_pat.ident == field_def.ident {
let field_ty = field_def.ty(cx.tcx, substs_ref);
let pat = &field_pat.pat;
let maybe_mismatch = find_first_mismatch(cx, pat, field_ty, Level::Lower);
if let Some(mismatch) = maybe_mismatch {
return Some(mismatch);
}
break 'definitions;
}
}
}
None
}
fn is_non_ref_pattern(pat_kind: &PatKind<'_>) -> bool {
match pat_kind {
PatKind::Struct(..) | PatKind::Tuple(..) | PatKind::TupleStruct(..) | PatKind::Path(..) => true,
PatKind::Or(sub_pats) => sub_pats.iter().any(|pat| is_non_ref_pattern(&pat.kind)),
_ => false,
}
} | random_line_split | |
pattern_type_mismatch.rs | use clippy_utils::diagnostics::span_lint_and_help;
use clippy_utils::last_path_segment;
use rustc_hir::{
intravisit, Body, Expr, ExprKind, FnDecl, HirId, LocalSource, MatchSource, Mutability, Pat, PatField, PatKind,
QPath, Stmt, StmtKind,
};
use rustc_lint::{LateContext, LateLintPass, LintContext};
use rustc_middle::lint::in_external_macro;
use rustc_middle::ty::subst::SubstsRef;
use rustc_middle::ty::{AdtDef, FieldDef, Ty, TyKind, VariantDef};
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::source_map::Span;
use std::iter;
declare_clippy_lint! {
/// ### What it does
/// Checks for patterns that aren't exact representations of the types
/// they are applied to.
///
/// To satisfy this lint, you will have to adjust either the expression that is matched
/// against or the pattern itself, as well as the bindings that are introduced by the
/// adjusted patterns. For matching you will have to either dereference the expression
/// with the `*` operator, or amend the patterns to explicitly match against `&<pattern>`
/// or `&mut <pattern>` depending on the reference mutability. For the bindings you need
/// to use the inverse. You can leave them as plain bindings if you wish for the value
/// to be copied, but you must use `ref mut <variable>` or `ref <variable>` to construct
/// a reference into the matched structure.
///
/// If you are looking for a way to learn about ownership semantics in more detail, it
/// is recommended to look at IDE options available to you to highlight types, lifetimes
/// and reference semantics in your code. The available tooling would expose these things
/// in a general way even outside of the various pattern matching mechanics. Of course
/// this lint can still be used to highlight areas of interest and ensure a good understanding
/// of ownership semantics.
///
/// ### Why is this bad?
/// It isn't bad in general. But in some contexts it can be desirable
/// because it increases ownership hints in the code, and will guard against some changes
/// in ownership.
///
/// ### Example
/// This example shows the basic adjustments necessary to satisfy the lint. Note how
/// the matched expression is explicitly dereferenced with `*` and the `inner` variable
/// is bound to a shared borrow via `ref inner`.
///
/// ```rust,ignore
/// // Bad
/// let value = &Some(Box::new(23));
/// match value {
/// Some(inner) => println!("{}", inner),
/// None => println!("none"),
/// }
///
/// // Good
/// let value = &Some(Box::new(23));
/// match *value {
/// Some(ref inner) => println!("{}", inner),
/// None => println!("none"),
/// }
/// ```
///
/// The following example demonstrates one of the advantages of the more verbose style.
/// Note how the second version uses `ref mut a` to explicitly declare `a` a shared mutable
/// borrow, while `b` is simply taken by value. This ensures that the loop body cannot
/// accidentally modify the wrong part of the structure.
///
/// ```rust,ignore
/// // Bad
/// let mut values = vec![(2, 3), (3, 4)];
/// for (a, b) in &mut values {
/// *a += *b;
/// }
///
/// // Good
/// let mut values = vec![(2, 3), (3, 4)];
/// for &mut (ref mut a, b) in &mut values {
/// *a += b;
/// }
/// ```
pub PATTERN_TYPE_MISMATCH,
restriction,
"type of pattern does not match the expression type"
}
declare_lint_pass!(PatternTypeMismatch => [PATTERN_TYPE_MISMATCH]);
impl<'tcx> LateLintPass<'tcx> for PatternTypeMismatch {
fn check_stmt(&mut self, cx: &LateContext<'tcx>, stmt: &'tcx Stmt<'_>) {
if let StmtKind::Local(local) = stmt.kind {
if let Some(init) = &local.init {
if let Some(init_ty) = cx.typeck_results().node_type_opt(init.hir_id) {
let pat = &local.pat;
if in_external_macro(cx.sess(), pat.span) {
return;
}
let deref_possible = match local.source {
LocalSource::Normal => DerefPossible::Possible,
_ => DerefPossible::Impossible,
};
apply_lint(cx, pat, init_ty, deref_possible);
}
}
}
}
fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
if let ExprKind::Match(scrutinee, arms, MatchSource::Normal) = expr.kind {
if let Some(expr_ty) = cx.typeck_results().node_type_opt(scrutinee.hir_id) {
'pattern_checks: for arm in arms {
let pat = &arm.pat;
if in_external_macro(cx.sess(), pat.span) {
continue 'pattern_checks;
}
if apply_lint(cx, pat, expr_ty, DerefPossible::Possible) {
break 'pattern_checks;
}
}
}
}
if let ExprKind::Let(let_pat, let_expr, _) = expr.kind {
if let Some(ref expr_ty) = cx.typeck_results().node_type_opt(let_expr.hir_id) {
if in_external_macro(cx.sess(), let_pat.span) {
return;
}
apply_lint(cx, let_pat, expr_ty, DerefPossible::Possible);
}
}
}
fn check_fn(
&mut self,
cx: &LateContext<'tcx>,
_: intravisit::FnKind<'tcx>,
_: &'tcx FnDecl<'_>,
body: &'tcx Body<'_>,
_: Span,
hir_id: HirId,
) {
if let Some(fn_sig) = cx.typeck_results().liberated_fn_sigs().get(hir_id) {
for (param, ty) in iter::zip(body.params, fn_sig.inputs()) {
apply_lint(cx, param.pat, ty, DerefPossible::Impossible);
}
}
}
}
#[derive(Debug, Clone, Copy)]
enum DerefPossible {
Possible,
Impossible,
}
fn apply_lint<'tcx>(cx: &LateContext<'tcx>, pat: &Pat<'_>, expr_ty: Ty<'tcx>, deref_possible: DerefPossible) -> bool |
#[derive(Debug, Copy, Clone)]
enum Level {
Top,
Lower,
}
#[allow(rustc::usage_of_ty_tykind)]
fn find_first_mismatch<'tcx>(
cx: &LateContext<'tcx>,
pat: &Pat<'_>,
ty: Ty<'tcx>,
level: Level,
) -> Option<(Span, Mutability, Level)> {
if let PatKind::Ref(sub_pat, _) = pat.kind {
if let TyKind::Ref(_, sub_ty, _) = ty.kind() {
return find_first_mismatch(cx, sub_pat, sub_ty, Level::Lower);
}
}
if let TyKind::Ref(_, _, mutability) = *ty.kind() {
if is_non_ref_pattern(&pat.kind) {
return Some((pat.span, mutability, level));
}
}
if let PatKind::Struct(ref qpath, field_pats, _) = pat.kind {
if let TyKind::Adt(adt_def, substs_ref) = ty.kind() {
if let Some(variant) = get_variant(adt_def, qpath) {
let field_defs = &variant.fields;
return find_first_mismatch_in_struct(cx, field_pats, field_defs, substs_ref);
}
}
}
if let PatKind::TupleStruct(ref qpath, pats, _) = pat.kind {
if let TyKind::Adt(adt_def, substs_ref) = ty.kind() {
if let Some(variant) = get_variant(adt_def, qpath) {
let field_defs = &variant.fields;
let ty_iter = field_defs.iter().map(|field_def| field_def.ty(cx.tcx, substs_ref));
return find_first_mismatch_in_tuple(cx, pats, ty_iter);
}
}
}
if let PatKind::Tuple(pats, _) = pat.kind {
if let TyKind::Tuple(..) = ty.kind() {
return find_first_mismatch_in_tuple(cx, pats, ty.tuple_fields());
}
}
if let PatKind::Or(sub_pats) = pat.kind {
for pat in sub_pats {
let maybe_mismatch = find_first_mismatch(cx, pat, ty, level);
if let Some(mismatch) = maybe_mismatch {
return Some(mismatch);
}
}
}
None
}
fn get_variant<'a>(adt_def: &'a AdtDef, qpath: &QPath<'_>) -> Option<&'a VariantDef> {
if adt_def.is_struct() {
if let Some(variant) = adt_def.variants.iter().next() {
return Some(variant);
}
}
if adt_def.is_enum() {
let pat_ident = last_path_segment(qpath).ident;
for variant in &adt_def.variants {
if variant.ident == pat_ident {
return Some(variant);
}
}
}
None
}
fn find_first_mismatch_in_tuple<'tcx, I>(
cx: &LateContext<'tcx>,
pats: &[Pat<'_>],
ty_iter_src: I,
) -> Option<(Span, Mutability, Level)>
where
I: IntoIterator<Item = Ty<'tcx>>,
{
let mut field_tys = ty_iter_src.into_iter();
'fields: for pat in pats {
let field_ty = if let Some(ty) = field_tys.next() {
ty
} else {
break 'fields;
};
let maybe_mismatch = find_first_mismatch(cx, pat, field_ty, Level::Lower);
if let Some(mismatch) = maybe_mismatch {
return Some(mismatch);
}
}
None
}
fn find_first_mismatch_in_struct<'tcx>(
cx: &LateContext<'tcx>,
field_pats: &[PatField<'_>],
field_defs: &[FieldDef],
substs_ref: SubstsRef<'tcx>,
) -> Option<(Span, Mutability, Level)> {
for field_pat in field_pats {
'definitions: for field_def in field_defs {
if field_pat.ident == field_def.ident {
let field_ty = field_def.ty(cx.tcx, substs_ref);
let pat = &field_pat.pat;
let maybe_mismatch = find_first_mismatch(cx, pat, field_ty, Level::Lower);
if let Some(mismatch) = maybe_mismatch {
return Some(mismatch);
}
break 'definitions;
}
}
}
None
}
fn is_non_ref_pattern(pat_kind: &PatKind<'_>) -> bool {
match pat_kind {
PatKind::Struct(..) | PatKind::Tuple(..) | PatKind::TupleStruct(..) | PatKind::Path(..) => true,
PatKind::Or(sub_pats) => sub_pats.iter().any(|pat| is_non_ref_pattern(&pat.kind)),
_ => false,
}
}
| {
let maybe_mismatch = find_first_mismatch(cx, pat, expr_ty, Level::Top);
if let Some((span, mutability, level)) = maybe_mismatch {
span_lint_and_help(
cx,
PATTERN_TYPE_MISMATCH,
span,
"type of pattern does not match the expression type",
None,
&format!(
"{}explicitly match against a `{}` pattern and adjust the enclosed variable bindings",
match (deref_possible, level) {
(DerefPossible::Possible, Level::Top) => "use `*` to dereference the match expression or ",
_ => "",
},
match mutability {
Mutability::Mut => "&mut _",
Mutability::Not => "&_",
},
),
);
true
} else {
false
}
} | identifier_body |
pattern_type_mismatch.rs | use clippy_utils::diagnostics::span_lint_and_help;
use clippy_utils::last_path_segment;
use rustc_hir::{
intravisit, Body, Expr, ExprKind, FnDecl, HirId, LocalSource, MatchSource, Mutability, Pat, PatField, PatKind,
QPath, Stmt, StmtKind,
};
use rustc_lint::{LateContext, LateLintPass, LintContext};
use rustc_middle::lint::in_external_macro;
use rustc_middle::ty::subst::SubstsRef;
use rustc_middle::ty::{AdtDef, FieldDef, Ty, TyKind, VariantDef};
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::source_map::Span;
use std::iter;
declare_clippy_lint! {
/// ### What it does
/// Checks for patterns that aren't exact representations of the types
/// they are applied to.
///
/// To satisfy this lint, you will have to adjust either the expression that is matched
/// against or the pattern itself, as well as the bindings that are introduced by the
/// adjusted patterns. For matching you will have to either dereference the expression
/// with the `*` operator, or amend the patterns to explicitly match against `&<pattern>`
/// or `&mut <pattern>` depending on the reference mutability. For the bindings you need
/// to use the inverse. You can leave them as plain bindings if you wish for the value
/// to be copied, but you must use `ref mut <variable>` or `ref <variable>` to construct
/// a reference into the matched structure.
///
/// If you are looking for a way to learn about ownership semantics in more detail, it
/// is recommended to look at IDE options available to you to highlight types, lifetimes
/// and reference semantics in your code. The available tooling would expose these things
/// in a general way even outside of the various pattern matching mechanics. Of course
/// this lint can still be used to highlight areas of interest and ensure a good understanding
/// of ownership semantics.
///
/// ### Why is this bad?
/// It isn't bad in general. But in some contexts it can be desirable
/// because it increases ownership hints in the code, and will guard against some changes
/// in ownership.
///
/// ### Example
/// This example shows the basic adjustments necessary to satisfy the lint. Note how
/// the matched expression is explicitly dereferenced with `*` and the `inner` variable
/// is bound to a shared borrow via `ref inner`.
///
/// ```rust,ignore
/// // Bad
/// let value = &Some(Box::new(23));
/// match value {
/// Some(inner) => println!("{}", inner),
/// None => println!("none"),
/// }
///
/// // Good
/// let value = &Some(Box::new(23));
/// match *value {
/// Some(ref inner) => println!("{}", inner),
/// None => println!("none"),
/// }
/// ```
///
/// The following example demonstrates one of the advantages of the more verbose style.
/// Note how the second version uses `ref mut a` to explicitly declare `a` a shared mutable
/// borrow, while `b` is simply taken by value. This ensures that the loop body cannot
/// accidentally modify the wrong part of the structure.
///
/// ```rust,ignore
/// // Bad
/// let mut values = vec![(2, 3), (3, 4)];
/// for (a, b) in &mut values {
/// *a += *b;
/// }
///
/// // Good
/// let mut values = vec![(2, 3), (3, 4)];
/// for &mut (ref mut a, b) in &mut values {
/// *a += b;
/// }
/// ```
pub PATTERN_TYPE_MISMATCH,
restriction,
"type of pattern does not match the expression type"
}
declare_lint_pass!(PatternTypeMismatch => [PATTERN_TYPE_MISMATCH]);
impl<'tcx> LateLintPass<'tcx> for PatternTypeMismatch {
fn check_stmt(&mut self, cx: &LateContext<'tcx>, stmt: &'tcx Stmt<'_>) {
if let StmtKind::Local(local) = stmt.kind {
if let Some(init) = &local.init {
if let Some(init_ty) = cx.typeck_results().node_type_opt(init.hir_id) {
let pat = &local.pat;
if in_external_macro(cx.sess(), pat.span) {
return;
}
let deref_possible = match local.source {
LocalSource::Normal => DerefPossible::Possible,
_ => DerefPossible::Impossible,
};
apply_lint(cx, pat, init_ty, deref_possible);
}
}
}
}
fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
if let ExprKind::Match(scrutinee, arms, MatchSource::Normal) = expr.kind {
if let Some(expr_ty) = cx.typeck_results().node_type_opt(scrutinee.hir_id) {
'pattern_checks: for arm in arms {
let pat = &arm.pat;
if in_external_macro(cx.sess(), pat.span) {
continue 'pattern_checks;
}
if apply_lint(cx, pat, expr_ty, DerefPossible::Possible) {
break 'pattern_checks;
}
}
}
}
if let ExprKind::Let(let_pat, let_expr, _) = expr.kind {
if let Some(ref expr_ty) = cx.typeck_results().node_type_opt(let_expr.hir_id) {
if in_external_macro(cx.sess(), let_pat.span) {
return;
}
apply_lint(cx, let_pat, expr_ty, DerefPossible::Possible);
}
}
}
fn check_fn(
&mut self,
cx: &LateContext<'tcx>,
_: intravisit::FnKind<'tcx>,
_: &'tcx FnDecl<'_>,
body: &'tcx Body<'_>,
_: Span,
hir_id: HirId,
) {
if let Some(fn_sig) = cx.typeck_results().liberated_fn_sigs().get(hir_id) {
for (param, ty) in iter::zip(body.params, fn_sig.inputs()) {
apply_lint(cx, param.pat, ty, DerefPossible::Impossible);
}
}
}
}
#[derive(Debug, Clone, Copy)]
enum DerefPossible {
Possible,
Impossible,
}
fn apply_lint<'tcx>(cx: &LateContext<'tcx>, pat: &Pat<'_>, expr_ty: Ty<'tcx>, deref_possible: DerefPossible) -> bool {
let maybe_mismatch = find_first_mismatch(cx, pat, expr_ty, Level::Top);
if let Some((span, mutability, level)) = maybe_mismatch {
span_lint_and_help(
cx,
PATTERN_TYPE_MISMATCH,
span,
"type of pattern does not match the expression type",
None,
&format!(
"{}explicitly match against a `{}` pattern and adjust the enclosed variable bindings",
match (deref_possible, level) {
(DerefPossible::Possible, Level::Top) => "use `*` to dereference the match expression or ",
_ => "",
},
match mutability {
Mutability::Mut => "&mut _",
Mutability::Not => "&_",
},
),
);
true
} else {
false
}
}
#[derive(Debug, Copy, Clone)]
enum Level {
Top,
Lower,
}
#[allow(rustc::usage_of_ty_tykind)]
fn find_first_mismatch<'tcx>(
cx: &LateContext<'tcx>,
pat: &Pat<'_>,
ty: Ty<'tcx>,
level: Level,
) -> Option<(Span, Mutability, Level)> {
if let PatKind::Ref(sub_pat, _) = pat.kind {
if let TyKind::Ref(_, sub_ty, _) = ty.kind() {
return find_first_mismatch(cx, sub_pat, sub_ty, Level::Lower);
}
}
if let TyKind::Ref(_, _, mutability) = *ty.kind() {
if is_non_ref_pattern(&pat.kind) {
return Some((pat.span, mutability, level));
}
}
if let PatKind::Struct(ref qpath, field_pats, _) = pat.kind {
if let TyKind::Adt(adt_def, substs_ref) = ty.kind() {
if let Some(variant) = get_variant(adt_def, qpath) {
let field_defs = &variant.fields;
return find_first_mismatch_in_struct(cx, field_pats, field_defs, substs_ref);
}
}
}
if let PatKind::TupleStruct(ref qpath, pats, _) = pat.kind {
if let TyKind::Adt(adt_def, substs_ref) = ty.kind() {
if let Some(variant) = get_variant(adt_def, qpath) {
let field_defs = &variant.fields;
let ty_iter = field_defs.iter().map(|field_def| field_def.ty(cx.tcx, substs_ref));
return find_first_mismatch_in_tuple(cx, pats, ty_iter);
}
}
}
if let PatKind::Tuple(pats, _) = pat.kind {
if let TyKind::Tuple(..) = ty.kind() {
return find_first_mismatch_in_tuple(cx, pats, ty.tuple_fields());
}
}
if let PatKind::Or(sub_pats) = pat.kind {
for pat in sub_pats {
let maybe_mismatch = find_first_mismatch(cx, pat, ty, level);
if let Some(mismatch) = maybe_mismatch {
return Some(mismatch);
}
}
}
None
}
fn get_variant<'a>(adt_def: &'a AdtDef, qpath: &QPath<'_>) -> Option<&'a VariantDef> {
if adt_def.is_struct() {
if let Some(variant) = adt_def.variants.iter().next() {
return Some(variant);
}
}
if adt_def.is_enum() {
let pat_ident = last_path_segment(qpath).ident;
for variant in &adt_def.variants {
if variant.ident == pat_ident {
return Some(variant);
}
}
}
None
}
fn find_first_mismatch_in_tuple<'tcx, I>(
cx: &LateContext<'tcx>,
pats: &[Pat<'_>],
ty_iter_src: I,
) -> Option<(Span, Mutability, Level)>
where
I: IntoIterator<Item = Ty<'tcx>>,
{
let mut field_tys = ty_iter_src.into_iter();
'fields: for pat in pats {
let field_ty = if let Some(ty) = field_tys.next() {
ty
} else {
break 'fields;
};
let maybe_mismatch = find_first_mismatch(cx, pat, field_ty, Level::Lower);
if let Some(mismatch) = maybe_mismatch |
}
None
}
fn find_first_mismatch_in_struct<'tcx>(
cx: &LateContext<'tcx>,
field_pats: &[PatField<'_>],
field_defs: &[FieldDef],
substs_ref: SubstsRef<'tcx>,
) -> Option<(Span, Mutability, Level)> {
for field_pat in field_pats {
'definitions: for field_def in field_defs {
if field_pat.ident == field_def.ident {
let field_ty = field_def.ty(cx.tcx, substs_ref);
let pat = &field_pat.pat;
let maybe_mismatch = find_first_mismatch(cx, pat, field_ty, Level::Lower);
if let Some(mismatch) = maybe_mismatch {
return Some(mismatch);
}
break 'definitions;
}
}
}
None
}
fn is_non_ref_pattern(pat_kind: &PatKind<'_>) -> bool {
match pat_kind {
PatKind::Struct(..) | PatKind::Tuple(..) | PatKind::TupleStruct(..) | PatKind::Path(..) => true,
PatKind::Or(sub_pats) => sub_pats.iter().any(|pat| is_non_ref_pattern(&pat.kind)),
_ => false,
}
}
| {
return Some(mismatch);
} | conditional_block |
vanilla_lstm.py | '''
Build a tweet sentiment analyzer
'''
from __future__ import print_function
import cPickle as pickle
import sys
import time
from collections import OrderedDict
import numpy
import theano
import theano.tensor as tensor
from theano import config
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import imdb
datasets = {'imdb': (imdb.load_data, imdb.prepare_data)}
# Set the random number generators' seeds for consistency
SEED = 123
numpy.random.seed(SEED)
def numpy_floatX(data):
return numpy.asarray(data, dtype=config.floatX)
# NOTE (bitesandbytes) : Important; set minibatch_size = 1 ?
def get_minibatches_idx(n, minibatch_size, shuffle=False):
"""
Used to shuffle the dataset at each iteration.
"""
idx_list = numpy.arange(n, dtype="int32")
if shuffle:
numpy.random.shuffle(idx_list)
minibatches = []
minibatch_start = 0
for i in range(n // minibatch_size):
minibatches.append(idx_list[minibatch_start:
minibatch_start + minibatch_size])
minibatch_start += minibatch_size
if (minibatch_start != n):
# Make a minibatch out of what is left
minibatches.append(idx_list[minibatch_start:])
return zip(range(len(minibatches)), minibatches)
# NOTE (bitesandbytes) : Not needed.
def get_dataset(name):
return datasets[name][0], datasets[name][1]
def zipp(params, tparams):
"""
When we reload the model. Needed for the GPU stuff.
"""
for kk, vv in params.items():
tparams[kk].set_value(vv)
def unzip(zipped):
"""
When we pickle the model. Needed for the GPU stuff.
"""
new_params = OrderedDict()
for kk, vv in zipped.items():
new_params[kk] = vv.get_value()
return new_params
def dropout_layer(state_before, use_noise, trng):
proj = tensor.switch(use_noise,
(state_before *
trng.binomial(state_before.shape,
p=0.5, n=1,
dtype=state_before.dtype)),
state_before * 0.5)
return proj
def _p(pp, name):
return '%s_%s' % (pp, name)
def init_params(options):
"""
Global (not LSTM) parameter. For the embedding and the classifier.
"""
params = OrderedDict()
params = get_layer(options['encoder'])[0](options,
params,
prefix=options['encoder'])
# classifier
params['U'] = 0.01 * numpy.random.randn(options['dim_proj'],
options['ydim']).astype(config.floatX)
params['b'] = numpy.zeros((options['ydim'],)).astype(config.floatX)
return params
def load_params(path, params):
pp = numpy.load(path)
for kk, vv in params.items():
if kk not in pp:
raise Warning('%s is not in the archive' % kk)
params[kk] = pp[kk]
return params
def init_tparams(params):
tparams = OrderedDict()
for kk, pp in params.items():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
def get_layer(name):
fns = layers[name]
return fns
def ortho_weight(ndim):
W = numpy.random.randn(ndim, ndim)
u, s, v = numpy.linalg.svd(W)
return u.astype(config.floatX)
def param_init_lstm(options, params, prefix='lstm'):
"""
Init the LSTM parameter:
:see: init_params
"""
W = numpy.concatenate([ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj'])], axis=1)
params[_p(prefix, 'W')] = W
U = numpy.concatenate([ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj'])], axis=1)
params[_p(prefix, 'U')] = U
b = numpy.zeros((4 * options['dim_proj'],))
params[_p(prefix, 'b')] = b.astype(config.floatX)
return params
def lstm_layer(tparams, state_below, options, prefix='lstm', mask=None):
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
assert mask is not None
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n * dim:(n + 1) * dim]
return _x[:, n * dim:(n + 1) * dim]
def _step(m_, x_, h_, c_):
preact = tensor.dot(h_, tparams[_p(prefix, 'U')])
preact += x_
i = tensor.nnet.sigmoid(_slice(preact, 0, options['dim_proj']))
f = tensor.nnet.sigmoid(_slice(preact, 1, options['dim_proj']))
o = tensor.nnet.sigmoid(_slice(preact, 2, options['dim_proj']))
c = tensor.tanh(_slice(preact, 3, options['dim_proj']))
c = f * c_ + i * c
c = m_[:, None] * c + (1. - m_)[:, None] * c_
h = o * tensor.tanh(c)
h = m_[:, None] * h + (1. - m_)[:, None] * h_
return h, c
# Donno what this is doing :/
state_below = (tensor.dot(state_below, tparams[_p(prefix, 'W')]) +
tparams[_p(prefix, 'b')])
dim_proj = options['dim_proj']
rval, updates = theano.scan(_step,
sequences=[mask, state_below],
outputs_info=[tensor.alloc(numpy_floatX(0.),
n_samples,
dim_proj),
tensor.alloc(numpy_floatX(0.),
n_samples,
dim_proj)],
name=_p(prefix, '_layers'),
n_steps=nsteps)
return rval[0]
# ff: Feed Forward (normal neural net), only useful to put after lstm
# before the classifier.
layers = {'lstm': (param_init_lstm, lstm_layer)}
def sgd(lr, tparams, grads, x, mask, y, cost):
""" Stochastic Gradient Descent
:note: A more complicated version of sgd then needed. This is
done like that for adadelta and rmsprop.
"""
# New set of shared variable that will contain the gradient
# for a mini-batch.
gshared = [theano.shared(p.get_value() * 0., name='%s_grad' % k)
for k, p in tparams.items()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
# Function that computes gradients for a mini-batch, but do not
# updates the weights.
f_grad_shared = theano.function([x, mask, y], cost, updates=gsup,
name='sgd_f_grad_shared')
pup = [(p, p - lr * g) for p, g in zip(tparams.values(), gshared)]
# Function that updates the weights from the previously computed
# gradient.
f_update = theano.function([lr], [], updates=pup,
name='sgd_f_update')
return f_grad_shared, f_update
def adadelta(lr, tparams, grads, x, mask, y, cost):
"""
An adaptive learning rate optimizer
Parameters
----------
lr : Theano SharedVariable
Initial learning rate
tpramas: Theano SharedVariable
Model parameters
grads: Theano variable
Gradients of cost w.r.t to parameres
x: Theano variable
Model inputs
mask: Theano variable
Sequence mask
y: Theano variable
Targets
cost: Theano variable
Objective fucntion to minimize
Notes
-----
For more information, see [ADADELTA]_.
.. [ADADELTA] Matthew D. Zeiler, *ADADELTA: An Adaptive Learning
Rate Method*, arXiv:1212.5701.
"""
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_grad' % k)
for k, p in tparams.items()]
running_up2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rup2' % k)
for k, p in tparams.items()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad2' % k)
for k, p in tparams.items()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function([x, mask, y], cost, updates=zgup + rg2up,
name='adadelta_f_grad_shared')
updir = [-tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6) * zg
for zg, ru2, rg2 in zip(zipped_grads,
running_up2,
running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2))
for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(tparams.values(), updir)]
f_update = theano.function([lr], [], updates=ru2up + param_up,
on_unused_input='ignore',
name='adadelta_f_update')
return f_grad_shared, f_update
def rmsprop(lr, tparams, grads, x, mask, y, cost):
"""
A variant of SGD that scales the step size by running average of the
recent step norms.
Parameters
----------
lr : Theano SharedVariable
Initial learning rate
tpramas: Theano SharedVariable
Model parameters
grads: Theano variable
Gradients of cost w.r.t to parameres
x: Theano variable
Model inputs
mask: Theano variable
Sequence mask
y: Theano variable
Targets
cost: Theano variable
Objective fucntion to minimize
Notes
-----
For more information, see [Hint2014]_.
.. [Hint2014] Geoff Hinton, *Neural Networks for Machine Learning*,
lecture 6a,
http://cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf
"""
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_grad' % k)
for k, p in tparams.items()]
running_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad' % k)
for k, p in tparams.items()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad2' % k)
for k, p in tparams.items()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rgup = [(rg, 0.95 * rg + 0.05 * g) for rg, g in zip(running_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function([x, mask, y], cost,
updates=zgup + rgup + rg2up,
name='rmsprop_f_grad_shared')
updir = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_updir' % k)
for k, p in tparams.items()]
updir_new = [(ud, 0.9 * ud - 1e-4 * zg / tensor.sqrt(rg2 - rg ** 2 + 1e-4))
for ud, zg, rg, rg2 in zip(updir, zipped_grads, running_grads,
running_grads2)]
param_up = [(p, p + udn[1])
for p, udn in zip(tparams.values(), updir_new)]
f_update = theano.function([lr], [], updates=updir_new + param_up,
on_unused_input='ignore',
name='rmsprop_f_update')
return f_grad_shared, f_update
def build_model(tparams, options):
trng = RandomStreams(SEED)
# Used for dropout.
use_noise = theano.shared(numpy_floatX(0.))
x = tensor.matrix('x', dtype='int64')
mask = tensor.matrix('mask', dtype=config.floatX)
y = tensor.vector('y', dtype='int64')
n_timesteps = x.shape[0]
n_samples = x.shape[1]
proj = get_layer(options['encoder'])[1](tparams, x, options,
prefix=options['encoder'],
mask=mask)
if options['encoder'] == 'lstm':
proj = (proj * mask[:, :, None]).sum(axis=0)
proj = proj / mask.sum(axis=0)[:, None]
if options['use_dropout']:
proj = dropout_layer(proj, use_noise, trng)
pred = tensor.nnet.softmax(tensor.dot(proj.T, tparams['U']) + tparams['b'])
f_pred_prob = theano.function([x, mask], pred, name='f_pred_prob')
f_pred = theano.function([x, mask], pred.argmax(axis=1), name='f_pred')
off = 1e-8
if pred.dtype == 'float16':
off = 1e-6
cost = -tensor.log(pred[tensor.arange(n_samples), y] + off).mean()
return use_noise, x, mask, y, f_pred_prob, f_pred, cost
def pred_probs(f_pred_prob, prepare_data, data, iterator, verbose=False):
""" If you want to use a trained model, this is useful to compute
the probabilities of new examples.
"""
n_samples = len(data[0])
probs = numpy.zeros((n_samples, 2)).astype(config.floatX)
n_done = 0
for _, valid_index in iterator:
x, mask, y = prepare_data([data[0][t] for t in valid_index],
numpy.array(data[1])[valid_index],
maxlen=None)
pred_probs = f_pred_prob(x, mask)
probs[valid_index, :] = pred_probs
n_done += len(valid_index)
if verbose:
print('%d/%d samples classified' % (n_done, n_samples))
| """
Just compute the error
f_pred: Theano fct computing the prediction
prepare_data: usual prepare_data for that dataset.
"""
valid_err = 0
for _, valid_index in iterator:
x, mask, y = prepare_data([data[0][t] for t in valid_index],
numpy.array(data[1])[valid_index],
maxlen=None)
preds = f_pred(x, mask)
targets = numpy.array(data[1])[valid_index]
valid_err += (preds == targets).sum()
valid_err = 1. - numpy_floatX(valid_err) / len(data[0])
return valid_err
def train_lstm(
dim_proj=128, # word embeding dimension and LSTM number of hidden units.
patience=10, # Number of epoch to wait before early stop if no progress
max_epochs=5000, # The maximum number of epoch to run
dispFreq=10, # Display to stdout the training progress every N updates
decay_c=0., # Weight decay for the classifier applied to the U weights.
lrate=0.0001, # Learning rate for sgd (not used for adadelta and rmsprop)
n_words=10000, # Vocabulary size
optimizer=adadelta,
# sgd, adadelta and rmsprop available, sgd very hard to use, not recommanded (probably need momentum and decaying learning rate).
encoder='lstm', # TODO: can be removed must be lstm.
saveto='lstm_model.npz', # The best model will be saved there
validFreq=370, # Compute the validation error after this number of update.
saveFreq=1110, # Save the parameters after every saveFreq updates
maxlen=100, # Sequence longer then this get ignored
batch_size=16, # The batch size during training.
valid_batch_size=64, # The batch size used for validation/test set.
dataset='imdb',
# Parameter for extra option
noise_std=0.,
use_dropout=True, # if False slightly faster, but worst test error
# This frequently need a bigger model.
reload_model=None, # Path to a saved model we want to start from.
test_size=-1, # If >0, we keep only this number of test example.
):
# Model options
model_options = locals().copy()
print("model options", model_options)
load_data, prepare_data = get_dataset(dataset)
print('Loading data')
train, valid, test = load_data(n_words=n_words, valid_portion=0.05,
maxlen=maxlen)
if test_size > 0:
# The test set is sorted by size, but we want to keep random
# size example. So we must select a random selection of the
# examples.
idx = numpy.arange(len(test[0]))
numpy.random.shuffle(idx)
idx = idx[:test_size]
test = ([test[0][n] for n in idx], [test[1][n] for n in idx])
ydim = numpy.max(train[1]) + 1
# TOOD(bitesandbytes) : Change ydim to |num words| + 1 (0 -> no word | empty)
model_options['ydim'] = ydim
print('Building model')
# This create the initial parameters as numpy ndarrays.
# Dict name (string) -> numpy ndarray
params = init_params(model_options)
if reload_model:
load_params('lstm_model.npz', params)
# This create Theano Shared Variable from the parameters.
# Dict name (string) -> Theano Tensor Shared Variable
# params and tparams have different copy of the weights.
tparams = init_tparams(params)
# use_noise is for dropout
(use_noise, x, mask,
y, f_pred_prob, f_pred, cost) = build_model(tparams, model_options)
if decay_c > 0.:
decay_c = theano.shared(numpy_floatX(decay_c), name='decay_c')
weight_decay = 0.
weight_decay += (tparams['U'] ** 2).sum()
weight_decay *= decay_c
cost += weight_decay
f_cost = theano.function([x, mask, y], cost, name='f_cost')
grads = tensor.grad(cost, wrt=list(tparams.values()))
f_grad = theano.function([x, mask, y], grads, name='f_grad')
lr = tensor.scalar(name='lr')
f_grad_shared, f_update = optimizer(lr, tparams, grads,
x, mask, y, cost)
print('Optimization')
kf_valid = get_minibatches_idx(len(valid[0]), valid_batch_size)
kf_test = get_minibatches_idx(len(test[0]), valid_batch_size)
print("%d train examples" % len(train[0]))
print("%d valid examples" % len(valid[0]))
print("%d test examples" % len(test[0]))
history_errs = []
best_p = None
bad_count = 0
if validFreq == -1:
validFreq = len(train[0]) // batch_size
if saveFreq == -1:
saveFreq = len(train[0]) // batch_size
uidx = 0 # the number of update done
estop = False # early stop
start_time = time.time()
try:
for eidx in range(max_epochs):
n_samples = 0
# Get new shuffled index for the training set.
kf = get_minibatches_idx(len(train[0]), batch_size, shuffle=True)
for _, train_index in kf:
uidx += 1
use_noise.set_value(1.)
# Select the random examples for this minibatch
y = [train[1][t] for t in train_index]
x = [train[0][t] for t in train_index]
# Get the data in numpy.ndarray format
# This swap the axis!
# Return something of shape (minibatch maxlen, n samples)
x, mask, y = prepare_data(x, y)
n_samples += x.shape[1]
cost = f_grad_shared(x, mask, y)
f_update(lrate)
if numpy.isnan(cost) or numpy.isinf(cost):
print('bad cost detected: ', cost)
return 1., 1., 1.
if numpy.mod(uidx, dispFreq) == 0:
print('Epoch ', eidx, 'Update ', uidx, 'Cost ', cost)
if saveto and numpy.mod(uidx, saveFreq) == 0:
print('Saving...')
if best_p is not None:
params = best_p
else:
params = unzip(tparams)
numpy.savez(saveto, history_errs=history_errs, **params)
pickle.dump(model_options, open('%s.pkl' % saveto, 'wb'), -1)
print('Done')
if numpy.mod(uidx, validFreq) == 0:
use_noise.set_value(0.)
train_err = pred_error(f_pred, prepare_data, train, kf)
valid_err = pred_error(f_pred, prepare_data, valid,
kf_valid)
test_err = pred_error(f_pred, prepare_data, test, kf_test)
history_errs.append([valid_err, test_err])
if (best_p is None or
valid_err <= numpy.array(history_errs)[:,
0].min()):
best_p = unzip(tparams)
bad_counter = 0
print('Train ', train_err, 'Valid ', valid_err,
'Test ', test_err)
if (len(history_errs) > patience and
valid_err >= numpy.array(history_errs)[:-patience,
0].min()):
bad_counter += 1
if bad_counter > patience:
print('Early Stop!')
estop = True
break
print('Seen %d samples' % n_samples)
if estop:
break
except KeyboardInterrupt:
print("Training interupted")
end_time = time.time()
if best_p is not None:
zipp(best_p, tparams)
else:
best_p = unzip(tparams)
use_noise.set_value(0.)
kf_train_sorted = get_minibatches_idx(len(train[0]), batch_size)
train_err = pred_error(f_pred, prepare_data, train, kf_train_sorted)
valid_err = pred_error(f_pred, prepare_data, valid, kf_valid)
test_err = pred_error(f_pred, prepare_data, test, kf_test)
print('Train ', train_err, 'Valid ', valid_err, 'Test ', test_err)
if saveto:
numpy.savez(saveto, train_err=train_err,
valid_err=valid_err, test_err=test_err,
history_errs=history_errs, **best_p)
print('The code run for %d epochs, with %f sec/epochs' % (
(eidx + 1), (end_time - start_time) / (1. * (eidx + 1))))
print(('Training took %.1fs' %
(end_time - start_time)), file=sys.stderr)
return train_err, valid_err, test_err
if __name__ == '__main__':
# See function train for all possible parameter and there definition.
train_lstm(
max_epochs=100,
test_size=500,
) | return probs
def pred_error(f_pred, prepare_data, data, iterator, verbose=False): | random_line_split |
vanilla_lstm.py | '''
Build a tweet sentiment analyzer
'''
from __future__ import print_function
import cPickle as pickle
import sys
import time
from collections import OrderedDict
import numpy
import theano
import theano.tensor as tensor
from theano import config
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import imdb
datasets = {'imdb': (imdb.load_data, imdb.prepare_data)}
# Set the random number generators' seeds for consistency
SEED = 123
numpy.random.seed(SEED)
def numpy_floatX(data):
return numpy.asarray(data, dtype=config.floatX)
# NOTE (bitesandbytes) : Important; set minibatch_size = 1 ?
def get_minibatches_idx(n, minibatch_size, shuffle=False):
"""
Used to shuffle the dataset at each iteration.
"""
idx_list = numpy.arange(n, dtype="int32")
if shuffle:
numpy.random.shuffle(idx_list)
minibatches = []
minibatch_start = 0
for i in range(n // minibatch_size):
minibatches.append(idx_list[minibatch_start:
minibatch_start + minibatch_size])
minibatch_start += minibatch_size
if (minibatch_start != n):
# Make a minibatch out of what is left
minibatches.append(idx_list[minibatch_start:])
return zip(range(len(minibatches)), minibatches)
# NOTE (bitesandbytes) : Not needed.
def get_dataset(name):
return datasets[name][0], datasets[name][1]
def zipp(params, tparams):
"""
When we reload the model. Needed for the GPU stuff.
"""
for kk, vv in params.items():
tparams[kk].set_value(vv)
def unzip(zipped):
"""
When we pickle the model. Needed for the GPU stuff.
"""
new_params = OrderedDict()
for kk, vv in zipped.items():
new_params[kk] = vv.get_value()
return new_params
def dropout_layer(state_before, use_noise, trng):
proj = tensor.switch(use_noise,
(state_before *
trng.binomial(state_before.shape,
p=0.5, n=1,
dtype=state_before.dtype)),
state_before * 0.5)
return proj
def _p(pp, name):
return '%s_%s' % (pp, name)
def init_params(options):
"""
Global (not LSTM) parameter. For the embedding and the classifier.
"""
params = OrderedDict()
params = get_layer(options['encoder'])[0](options,
params,
prefix=options['encoder'])
# classifier
params['U'] = 0.01 * numpy.random.randn(options['dim_proj'],
options['ydim']).astype(config.floatX)
params['b'] = numpy.zeros((options['ydim'],)).astype(config.floatX)
return params
def load_params(path, params):
pp = numpy.load(path)
for kk, vv in params.items():
if kk not in pp:
raise Warning('%s is not in the archive' % kk)
params[kk] = pp[kk]
return params
def init_tparams(params):
tparams = OrderedDict()
for kk, pp in params.items():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
def get_layer(name):
fns = layers[name]
return fns
def ortho_weight(ndim):
W = numpy.random.randn(ndim, ndim)
u, s, v = numpy.linalg.svd(W)
return u.astype(config.floatX)
def param_init_lstm(options, params, prefix='lstm'):
"""
Init the LSTM parameter:
:see: init_params
"""
W = numpy.concatenate([ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj'])], axis=1)
params[_p(prefix, 'W')] = W
U = numpy.concatenate([ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj'])], axis=1)
params[_p(prefix, 'U')] = U
b = numpy.zeros((4 * options['dim_proj'],))
params[_p(prefix, 'b')] = b.astype(config.floatX)
return params
def lstm_layer(tparams, state_below, options, prefix='lstm', mask=None):
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
assert mask is not None
def | (_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n * dim:(n + 1) * dim]
return _x[:, n * dim:(n + 1) * dim]
def _step(m_, x_, h_, c_):
preact = tensor.dot(h_, tparams[_p(prefix, 'U')])
preact += x_
i = tensor.nnet.sigmoid(_slice(preact, 0, options['dim_proj']))
f = tensor.nnet.sigmoid(_slice(preact, 1, options['dim_proj']))
o = tensor.nnet.sigmoid(_slice(preact, 2, options['dim_proj']))
c = tensor.tanh(_slice(preact, 3, options['dim_proj']))
c = f * c_ + i * c
c = m_[:, None] * c + (1. - m_)[:, None] * c_
h = o * tensor.tanh(c)
h = m_[:, None] * h + (1. - m_)[:, None] * h_
return h, c
# Donno what this is doing :/
state_below = (tensor.dot(state_below, tparams[_p(prefix, 'W')]) +
tparams[_p(prefix, 'b')])
dim_proj = options['dim_proj']
rval, updates = theano.scan(_step,
sequences=[mask, state_below],
outputs_info=[tensor.alloc(numpy_floatX(0.),
n_samples,
dim_proj),
tensor.alloc(numpy_floatX(0.),
n_samples,
dim_proj)],
name=_p(prefix, '_layers'),
n_steps=nsteps)
return rval[0]
# ff: Feed Forward (normal neural net), only useful to put after lstm
# before the classifier.
layers = {'lstm': (param_init_lstm, lstm_layer)}
def sgd(lr, tparams, grads, x, mask, y, cost):
""" Stochastic Gradient Descent
:note: A more complicated version of sgd then needed. This is
done like that for adadelta and rmsprop.
"""
# New set of shared variable that will contain the gradient
# for a mini-batch.
gshared = [theano.shared(p.get_value() * 0., name='%s_grad' % k)
for k, p in tparams.items()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
# Function that computes gradients for a mini-batch, but do not
# updates the weights.
f_grad_shared = theano.function([x, mask, y], cost, updates=gsup,
name='sgd_f_grad_shared')
pup = [(p, p - lr * g) for p, g in zip(tparams.values(), gshared)]
# Function that updates the weights from the previously computed
# gradient.
f_update = theano.function([lr], [], updates=pup,
name='sgd_f_update')
return f_grad_shared, f_update
def adadelta(lr, tparams, grads, x, mask, y, cost):
"""
An adaptive learning rate optimizer
Parameters
----------
lr : Theano SharedVariable
Initial learning rate
tpramas: Theano SharedVariable
Model parameters
grads: Theano variable
Gradients of cost w.r.t to parameres
x: Theano variable
Model inputs
mask: Theano variable
Sequence mask
y: Theano variable
Targets
cost: Theano variable
Objective fucntion to minimize
Notes
-----
For more information, see [ADADELTA]_.
.. [ADADELTA] Matthew D. Zeiler, *ADADELTA: An Adaptive Learning
Rate Method*, arXiv:1212.5701.
"""
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_grad' % k)
for k, p in tparams.items()]
running_up2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rup2' % k)
for k, p in tparams.items()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad2' % k)
for k, p in tparams.items()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function([x, mask, y], cost, updates=zgup + rg2up,
name='adadelta_f_grad_shared')
updir = [-tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6) * zg
for zg, ru2, rg2 in zip(zipped_grads,
running_up2,
running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2))
for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(tparams.values(), updir)]
f_update = theano.function([lr], [], updates=ru2up + param_up,
on_unused_input='ignore',
name='adadelta_f_update')
return f_grad_shared, f_update
def rmsprop(lr, tparams, grads, x, mask, y, cost):
"""
A variant of SGD that scales the step size by running average of the
recent step norms.
Parameters
----------
lr : Theano SharedVariable
Initial learning rate
tpramas: Theano SharedVariable
Model parameters
grads: Theano variable
Gradients of cost w.r.t to parameres
x: Theano variable
Model inputs
mask: Theano variable
Sequence mask
y: Theano variable
Targets
cost: Theano variable
Objective fucntion to minimize
Notes
-----
For more information, see [Hint2014]_.
.. [Hint2014] Geoff Hinton, *Neural Networks for Machine Learning*,
lecture 6a,
http://cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf
"""
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_grad' % k)
for k, p in tparams.items()]
running_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad' % k)
for k, p in tparams.items()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad2' % k)
for k, p in tparams.items()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rgup = [(rg, 0.95 * rg + 0.05 * g) for rg, g in zip(running_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function([x, mask, y], cost,
updates=zgup + rgup + rg2up,
name='rmsprop_f_grad_shared')
updir = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_updir' % k)
for k, p in tparams.items()]
updir_new = [(ud, 0.9 * ud - 1e-4 * zg / tensor.sqrt(rg2 - rg ** 2 + 1e-4))
for ud, zg, rg, rg2 in zip(updir, zipped_grads, running_grads,
running_grads2)]
param_up = [(p, p + udn[1])
for p, udn in zip(tparams.values(), updir_new)]
f_update = theano.function([lr], [], updates=updir_new + param_up,
on_unused_input='ignore',
name='rmsprop_f_update')
return f_grad_shared, f_update
def build_model(tparams, options):
trng = RandomStreams(SEED)
# Used for dropout.
use_noise = theano.shared(numpy_floatX(0.))
x = tensor.matrix('x', dtype='int64')
mask = tensor.matrix('mask', dtype=config.floatX)
y = tensor.vector('y', dtype='int64')
n_timesteps = x.shape[0]
n_samples = x.shape[1]
proj = get_layer(options['encoder'])[1](tparams, x, options,
prefix=options['encoder'],
mask=mask)
if options['encoder'] == 'lstm':
proj = (proj * mask[:, :, None]).sum(axis=0)
proj = proj / mask.sum(axis=0)[:, None]
if options['use_dropout']:
proj = dropout_layer(proj, use_noise, trng)
pred = tensor.nnet.softmax(tensor.dot(proj.T, tparams['U']) + tparams['b'])
f_pred_prob = theano.function([x, mask], pred, name='f_pred_prob')
f_pred = theano.function([x, mask], pred.argmax(axis=1), name='f_pred')
off = 1e-8
if pred.dtype == 'float16':
off = 1e-6
cost = -tensor.log(pred[tensor.arange(n_samples), y] + off).mean()
return use_noise, x, mask, y, f_pred_prob, f_pred, cost
def pred_probs(f_pred_prob, prepare_data, data, iterator, verbose=False):
""" If you want to use a trained model, this is useful to compute
the probabilities of new examples.
"""
n_samples = len(data[0])
probs = numpy.zeros((n_samples, 2)).astype(config.floatX)
n_done = 0
for _, valid_index in iterator:
x, mask, y = prepare_data([data[0][t] for t in valid_index],
numpy.array(data[1])[valid_index],
maxlen=None)
pred_probs = f_pred_prob(x, mask)
probs[valid_index, :] = pred_probs
n_done += len(valid_index)
if verbose:
print('%d/%d samples classified' % (n_done, n_samples))
return probs
def pred_error(f_pred, prepare_data, data, iterator, verbose=False):
"""
Just compute the error
f_pred: Theano fct computing the prediction
prepare_data: usual prepare_data for that dataset.
"""
valid_err = 0
for _, valid_index in iterator:
x, mask, y = prepare_data([data[0][t] for t in valid_index],
numpy.array(data[1])[valid_index],
maxlen=None)
preds = f_pred(x, mask)
targets = numpy.array(data[1])[valid_index]
valid_err += (preds == targets).sum()
valid_err = 1. - numpy_floatX(valid_err) / len(data[0])
return valid_err
def train_lstm(
dim_proj=128, # word embeding dimension and LSTM number of hidden units.
patience=10, # Number of epoch to wait before early stop if no progress
max_epochs=5000, # The maximum number of epoch to run
dispFreq=10, # Display to stdout the training progress every N updates
decay_c=0., # Weight decay for the classifier applied to the U weights.
lrate=0.0001, # Learning rate for sgd (not used for adadelta and rmsprop)
n_words=10000, # Vocabulary size
optimizer=adadelta,
# sgd, adadelta and rmsprop available, sgd very hard to use, not recommanded (probably need momentum and decaying learning rate).
encoder='lstm', # TODO: can be removed must be lstm.
saveto='lstm_model.npz', # The best model will be saved there
validFreq=370, # Compute the validation error after this number of update.
saveFreq=1110, # Save the parameters after every saveFreq updates
maxlen=100, # Sequence longer then this get ignored
batch_size=16, # The batch size during training.
valid_batch_size=64, # The batch size used for validation/test set.
dataset='imdb',
# Parameter for extra option
noise_std=0.,
use_dropout=True, # if False slightly faster, but worst test error
# This frequently need a bigger model.
reload_model=None, # Path to a saved model we want to start from.
test_size=-1, # If >0, we keep only this number of test example.
):
# Model options
model_options = locals().copy()
print("model options", model_options)
load_data, prepare_data = get_dataset(dataset)
print('Loading data')
train, valid, test = load_data(n_words=n_words, valid_portion=0.05,
maxlen=maxlen)
if test_size > 0:
# The test set is sorted by size, but we want to keep random
# size example. So we must select a random selection of the
# examples.
idx = numpy.arange(len(test[0]))
numpy.random.shuffle(idx)
idx = idx[:test_size]
test = ([test[0][n] for n in idx], [test[1][n] for n in idx])
ydim = numpy.max(train[1]) + 1
# TOOD(bitesandbytes) : Change ydim to |num words| + 1 (0 -> no word | empty)
model_options['ydim'] = ydim
print('Building model')
# This create the initial parameters as numpy ndarrays.
# Dict name (string) -> numpy ndarray
params = init_params(model_options)
if reload_model:
load_params('lstm_model.npz', params)
# This create Theano Shared Variable from the parameters.
# Dict name (string) -> Theano Tensor Shared Variable
# params and tparams have different copy of the weights.
tparams = init_tparams(params)
# use_noise is for dropout
(use_noise, x, mask,
y, f_pred_prob, f_pred, cost) = build_model(tparams, model_options)
if decay_c > 0.:
decay_c = theano.shared(numpy_floatX(decay_c), name='decay_c')
weight_decay = 0.
weight_decay += (tparams['U'] ** 2).sum()
weight_decay *= decay_c
cost += weight_decay
f_cost = theano.function([x, mask, y], cost, name='f_cost')
grads = tensor.grad(cost, wrt=list(tparams.values()))
f_grad = theano.function([x, mask, y], grads, name='f_grad')
lr = tensor.scalar(name='lr')
f_grad_shared, f_update = optimizer(lr, tparams, grads,
x, mask, y, cost)
print('Optimization')
kf_valid = get_minibatches_idx(len(valid[0]), valid_batch_size)
kf_test = get_minibatches_idx(len(test[0]), valid_batch_size)
print("%d train examples" % len(train[0]))
print("%d valid examples" % len(valid[0]))
print("%d test examples" % len(test[0]))
history_errs = []
best_p = None
bad_count = 0
if validFreq == -1:
validFreq = len(train[0]) // batch_size
if saveFreq == -1:
saveFreq = len(train[0]) // batch_size
uidx = 0 # the number of update done
estop = False # early stop
start_time = time.time()
try:
for eidx in range(max_epochs):
n_samples = 0
# Get new shuffled index for the training set.
kf = get_minibatches_idx(len(train[0]), batch_size, shuffle=True)
for _, train_index in kf:
uidx += 1
use_noise.set_value(1.)
# Select the random examples for this minibatch
y = [train[1][t] for t in train_index]
x = [train[0][t] for t in train_index]
# Get the data in numpy.ndarray format
# This swap the axis!
# Return something of shape (minibatch maxlen, n samples)
x, mask, y = prepare_data(x, y)
n_samples += x.shape[1]
cost = f_grad_shared(x, mask, y)
f_update(lrate)
if numpy.isnan(cost) or numpy.isinf(cost):
print('bad cost detected: ', cost)
return 1., 1., 1.
if numpy.mod(uidx, dispFreq) == 0:
print('Epoch ', eidx, 'Update ', uidx, 'Cost ', cost)
if saveto and numpy.mod(uidx, saveFreq) == 0:
print('Saving...')
if best_p is not None:
params = best_p
else:
params = unzip(tparams)
numpy.savez(saveto, history_errs=history_errs, **params)
pickle.dump(model_options, open('%s.pkl' % saveto, 'wb'), -1)
print('Done')
if numpy.mod(uidx, validFreq) == 0:
use_noise.set_value(0.)
train_err = pred_error(f_pred, prepare_data, train, kf)
valid_err = pred_error(f_pred, prepare_data, valid,
kf_valid)
test_err = pred_error(f_pred, prepare_data, test, kf_test)
history_errs.append([valid_err, test_err])
if (best_p is None or
valid_err <= numpy.array(history_errs)[:,
0].min()):
best_p = unzip(tparams)
bad_counter = 0
print('Train ', train_err, 'Valid ', valid_err,
'Test ', test_err)
if (len(history_errs) > patience and
valid_err >= numpy.array(history_errs)[:-patience,
0].min()):
bad_counter += 1
if bad_counter > patience:
print('Early Stop!')
estop = True
break
print('Seen %d samples' % n_samples)
if estop:
break
except KeyboardInterrupt:
print("Training interupted")
end_time = time.time()
if best_p is not None:
zipp(best_p, tparams)
else:
best_p = unzip(tparams)
use_noise.set_value(0.)
kf_train_sorted = get_minibatches_idx(len(train[0]), batch_size)
train_err = pred_error(f_pred, prepare_data, train, kf_train_sorted)
valid_err = pred_error(f_pred, prepare_data, valid, kf_valid)
test_err = pred_error(f_pred, prepare_data, test, kf_test)
print('Train ', train_err, 'Valid ', valid_err, 'Test ', test_err)
if saveto:
numpy.savez(saveto, train_err=train_err,
valid_err=valid_err, test_err=test_err,
history_errs=history_errs, **best_p)
print('The code run for %d epochs, with %f sec/epochs' % (
(eidx + 1), (end_time - start_time) / (1. * (eidx + 1))))
print(('Training took %.1fs' %
(end_time - start_time)), file=sys.stderr)
return train_err, valid_err, test_err
if __name__ == '__main__':
# See function train for all possible parameter and there definition.
train_lstm(
max_epochs=100,
test_size=500,
)
| _slice | identifier_name |
vanilla_lstm.py | '''
Build a tweet sentiment analyzer
'''
from __future__ import print_function
import cPickle as pickle
import sys
import time
from collections import OrderedDict
import numpy
import theano
import theano.tensor as tensor
from theano import config
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import imdb
datasets = {'imdb': (imdb.load_data, imdb.prepare_data)}
# Set the random number generators' seeds for consistency
SEED = 123
numpy.random.seed(SEED)
def numpy_floatX(data):
return numpy.asarray(data, dtype=config.floatX)
# NOTE (bitesandbytes) : Important; set minibatch_size = 1 ?
def get_minibatches_idx(n, minibatch_size, shuffle=False):
"""
Used to shuffle the dataset at each iteration.
"""
idx_list = numpy.arange(n, dtype="int32")
if shuffle:
numpy.random.shuffle(idx_list)
minibatches = []
minibatch_start = 0
for i in range(n // minibatch_size):
minibatches.append(idx_list[minibatch_start:
minibatch_start + minibatch_size])
minibatch_start += minibatch_size
if (minibatch_start != n):
# Make a minibatch out of what is left
minibatches.append(idx_list[minibatch_start:])
return zip(range(len(minibatches)), minibatches)
# NOTE (bitesandbytes) : Not needed.
def get_dataset(name):
return datasets[name][0], datasets[name][1]
def zipp(params, tparams):
"""
When we reload the model. Needed for the GPU stuff.
"""
for kk, vv in params.items():
tparams[kk].set_value(vv)
def unzip(zipped):
"""
When we pickle the model. Needed for the GPU stuff.
"""
new_params = OrderedDict()
for kk, vv in zipped.items():
new_params[kk] = vv.get_value()
return new_params
def dropout_layer(state_before, use_noise, trng):
proj = tensor.switch(use_noise,
(state_before *
trng.binomial(state_before.shape,
p=0.5, n=1,
dtype=state_before.dtype)),
state_before * 0.5)
return proj
def _p(pp, name):
return '%s_%s' % (pp, name)
def init_params(options):
"""
Global (not LSTM) parameter. For the embedding and the classifier.
"""
params = OrderedDict()
params = get_layer(options['encoder'])[0](options,
params,
prefix=options['encoder'])
# classifier
params['U'] = 0.01 * numpy.random.randn(options['dim_proj'],
options['ydim']).astype(config.floatX)
params['b'] = numpy.zeros((options['ydim'],)).astype(config.floatX)
return params
def load_params(path, params):
pp = numpy.load(path)
for kk, vv in params.items():
if kk not in pp:
raise Warning('%s is not in the archive' % kk)
params[kk] = pp[kk]
return params
def init_tparams(params):
tparams = OrderedDict()
for kk, pp in params.items():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
def get_layer(name):
fns = layers[name]
return fns
def ortho_weight(ndim):
W = numpy.random.randn(ndim, ndim)
u, s, v = numpy.linalg.svd(W)
return u.astype(config.floatX)
def param_init_lstm(options, params, prefix='lstm'):
"""
Init the LSTM parameter:
:see: init_params
"""
W = numpy.concatenate([ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj'])], axis=1)
params[_p(prefix, 'W')] = W
U = numpy.concatenate([ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj'])], axis=1)
params[_p(prefix, 'U')] = U
b = numpy.zeros((4 * options['dim_proj'],))
params[_p(prefix, 'b')] = b.astype(config.floatX)
return params
def lstm_layer(tparams, state_below, options, prefix='lstm', mask=None):
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
assert mask is not None
def _slice(_x, n, dim):
|
def _step(m_, x_, h_, c_):
preact = tensor.dot(h_, tparams[_p(prefix, 'U')])
preact += x_
i = tensor.nnet.sigmoid(_slice(preact, 0, options['dim_proj']))
f = tensor.nnet.sigmoid(_slice(preact, 1, options['dim_proj']))
o = tensor.nnet.sigmoid(_slice(preact, 2, options['dim_proj']))
c = tensor.tanh(_slice(preact, 3, options['dim_proj']))
c = f * c_ + i * c
c = m_[:, None] * c + (1. - m_)[:, None] * c_
h = o * tensor.tanh(c)
h = m_[:, None] * h + (1. - m_)[:, None] * h_
return h, c
# Donno what this is doing :/
state_below = (tensor.dot(state_below, tparams[_p(prefix, 'W')]) +
tparams[_p(prefix, 'b')])
dim_proj = options['dim_proj']
rval, updates = theano.scan(_step,
sequences=[mask, state_below],
outputs_info=[tensor.alloc(numpy_floatX(0.),
n_samples,
dim_proj),
tensor.alloc(numpy_floatX(0.),
n_samples,
dim_proj)],
name=_p(prefix, '_layers'),
n_steps=nsteps)
return rval[0]
# ff: Feed Forward (normal neural net), only useful to put after lstm
# before the classifier.
layers = {'lstm': (param_init_lstm, lstm_layer)}
def sgd(lr, tparams, grads, x, mask, y, cost):
""" Stochastic Gradient Descent
:note: A more complicated version of sgd then needed. This is
done like that for adadelta and rmsprop.
"""
# New set of shared variable that will contain the gradient
# for a mini-batch.
gshared = [theano.shared(p.get_value() * 0., name='%s_grad' % k)
for k, p in tparams.items()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
# Function that computes gradients for a mini-batch, but do not
# updates the weights.
f_grad_shared = theano.function([x, mask, y], cost, updates=gsup,
name='sgd_f_grad_shared')
pup = [(p, p - lr * g) for p, g in zip(tparams.values(), gshared)]
# Function that updates the weights from the previously computed
# gradient.
f_update = theano.function([lr], [], updates=pup,
name='sgd_f_update')
return f_grad_shared, f_update
def adadelta(lr, tparams, grads, x, mask, y, cost):
"""
An adaptive learning rate optimizer
Parameters
----------
lr : Theano SharedVariable
Initial learning rate
tpramas: Theano SharedVariable
Model parameters
grads: Theano variable
Gradients of cost w.r.t to parameres
x: Theano variable
Model inputs
mask: Theano variable
Sequence mask
y: Theano variable
Targets
cost: Theano variable
Objective fucntion to minimize
Notes
-----
For more information, see [ADADELTA]_.
.. [ADADELTA] Matthew D. Zeiler, *ADADELTA: An Adaptive Learning
Rate Method*, arXiv:1212.5701.
"""
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_grad' % k)
for k, p in tparams.items()]
running_up2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rup2' % k)
for k, p in tparams.items()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad2' % k)
for k, p in tparams.items()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function([x, mask, y], cost, updates=zgup + rg2up,
name='adadelta_f_grad_shared')
updir = [-tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6) * zg
for zg, ru2, rg2 in zip(zipped_grads,
running_up2,
running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2))
for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(tparams.values(), updir)]
f_update = theano.function([lr], [], updates=ru2up + param_up,
on_unused_input='ignore',
name='adadelta_f_update')
return f_grad_shared, f_update
def rmsprop(lr, tparams, grads, x, mask, y, cost):
"""
A variant of SGD that scales the step size by running average of the
recent step norms.
Parameters
----------
lr : Theano SharedVariable
Initial learning rate
tpramas: Theano SharedVariable
Model parameters
grads: Theano variable
Gradients of cost w.r.t to parameres
x: Theano variable
Model inputs
mask: Theano variable
Sequence mask
y: Theano variable
Targets
cost: Theano variable
Objective fucntion to minimize
Notes
-----
For more information, see [Hint2014]_.
.. [Hint2014] Geoff Hinton, *Neural Networks for Machine Learning*,
lecture 6a,
http://cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf
"""
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_grad' % k)
for k, p in tparams.items()]
running_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad' % k)
for k, p in tparams.items()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad2' % k)
for k, p in tparams.items()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rgup = [(rg, 0.95 * rg + 0.05 * g) for rg, g in zip(running_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function([x, mask, y], cost,
updates=zgup + rgup + rg2up,
name='rmsprop_f_grad_shared')
updir = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_updir' % k)
for k, p in tparams.items()]
updir_new = [(ud, 0.9 * ud - 1e-4 * zg / tensor.sqrt(rg2 - rg ** 2 + 1e-4))
for ud, zg, rg, rg2 in zip(updir, zipped_grads, running_grads,
running_grads2)]
param_up = [(p, p + udn[1])
for p, udn in zip(tparams.values(), updir_new)]
f_update = theano.function([lr], [], updates=updir_new + param_up,
on_unused_input='ignore',
name='rmsprop_f_update')
return f_grad_shared, f_update
def build_model(tparams, options):
trng = RandomStreams(SEED)
# Used for dropout.
use_noise = theano.shared(numpy_floatX(0.))
x = tensor.matrix('x', dtype='int64')
mask = tensor.matrix('mask', dtype=config.floatX)
y = tensor.vector('y', dtype='int64')
n_timesteps = x.shape[0]
n_samples = x.shape[1]
proj = get_layer(options['encoder'])[1](tparams, x, options,
prefix=options['encoder'],
mask=mask)
if options['encoder'] == 'lstm':
proj = (proj * mask[:, :, None]).sum(axis=0)
proj = proj / mask.sum(axis=0)[:, None]
if options['use_dropout']:
proj = dropout_layer(proj, use_noise, trng)
pred = tensor.nnet.softmax(tensor.dot(proj.T, tparams['U']) + tparams['b'])
f_pred_prob = theano.function([x, mask], pred, name='f_pred_prob')
f_pred = theano.function([x, mask], pred.argmax(axis=1), name='f_pred')
off = 1e-8
if pred.dtype == 'float16':
off = 1e-6
cost = -tensor.log(pred[tensor.arange(n_samples), y] + off).mean()
return use_noise, x, mask, y, f_pred_prob, f_pred, cost
def pred_probs(f_pred_prob, prepare_data, data, iterator, verbose=False):
""" If you want to use a trained model, this is useful to compute
the probabilities of new examples.
"""
n_samples = len(data[0])
probs = numpy.zeros((n_samples, 2)).astype(config.floatX)
n_done = 0
for _, valid_index in iterator:
x, mask, y = prepare_data([data[0][t] for t in valid_index],
numpy.array(data[1])[valid_index],
maxlen=None)
pred_probs = f_pred_prob(x, mask)
probs[valid_index, :] = pred_probs
n_done += len(valid_index)
if verbose:
print('%d/%d samples classified' % (n_done, n_samples))
return probs
def pred_error(f_pred, prepare_data, data, iterator, verbose=False):
"""
Just compute the error
f_pred: Theano fct computing the prediction
prepare_data: usual prepare_data for that dataset.
"""
valid_err = 0
for _, valid_index in iterator:
x, mask, y = prepare_data([data[0][t] for t in valid_index],
numpy.array(data[1])[valid_index],
maxlen=None)
preds = f_pred(x, mask)
targets = numpy.array(data[1])[valid_index]
valid_err += (preds == targets).sum()
valid_err = 1. - numpy_floatX(valid_err) / len(data[0])
return valid_err
def train_lstm(
dim_proj=128, # word embeding dimension and LSTM number of hidden units.
patience=10, # Number of epoch to wait before early stop if no progress
max_epochs=5000, # The maximum number of epoch to run
dispFreq=10, # Display to stdout the training progress every N updates
decay_c=0., # Weight decay for the classifier applied to the U weights.
lrate=0.0001, # Learning rate for sgd (not used for adadelta and rmsprop)
n_words=10000, # Vocabulary size
optimizer=adadelta,
# sgd, adadelta and rmsprop available, sgd very hard to use, not recommanded (probably need momentum and decaying learning rate).
encoder='lstm', # TODO: can be removed must be lstm.
saveto='lstm_model.npz', # The best model will be saved there
validFreq=370, # Compute the validation error after this number of update.
saveFreq=1110, # Save the parameters after every saveFreq updates
maxlen=100, # Sequence longer then this get ignored
batch_size=16, # The batch size during training.
valid_batch_size=64, # The batch size used for validation/test set.
dataset='imdb',
# Parameter for extra option
noise_std=0.,
use_dropout=True, # if False slightly faster, but worst test error
# This frequently need a bigger model.
reload_model=None, # Path to a saved model we want to start from.
test_size=-1, # If >0, we keep only this number of test example.
):
# Model options
model_options = locals().copy()
print("model options", model_options)
load_data, prepare_data = get_dataset(dataset)
print('Loading data')
train, valid, test = load_data(n_words=n_words, valid_portion=0.05,
maxlen=maxlen)
if test_size > 0:
# The test set is sorted by size, but we want to keep random
# size example. So we must select a random selection of the
# examples.
idx = numpy.arange(len(test[0]))
numpy.random.shuffle(idx)
idx = idx[:test_size]
test = ([test[0][n] for n in idx], [test[1][n] for n in idx])
ydim = numpy.max(train[1]) + 1
# TOOD(bitesandbytes) : Change ydim to |num words| + 1 (0 -> no word | empty)
model_options['ydim'] = ydim
print('Building model')
# This create the initial parameters as numpy ndarrays.
# Dict name (string) -> numpy ndarray
params = init_params(model_options)
if reload_model:
load_params('lstm_model.npz', params)
# This create Theano Shared Variable from the parameters.
# Dict name (string) -> Theano Tensor Shared Variable
# params and tparams have different copy of the weights.
tparams = init_tparams(params)
# use_noise is for dropout
(use_noise, x, mask,
y, f_pred_prob, f_pred, cost) = build_model(tparams, model_options)
if decay_c > 0.:
decay_c = theano.shared(numpy_floatX(decay_c), name='decay_c')
weight_decay = 0.
weight_decay += (tparams['U'] ** 2).sum()
weight_decay *= decay_c
cost += weight_decay
f_cost = theano.function([x, mask, y], cost, name='f_cost')
grads = tensor.grad(cost, wrt=list(tparams.values()))
f_grad = theano.function([x, mask, y], grads, name='f_grad')
lr = tensor.scalar(name='lr')
f_grad_shared, f_update = optimizer(lr, tparams, grads,
x, mask, y, cost)
print('Optimization')
kf_valid = get_minibatches_idx(len(valid[0]), valid_batch_size)
kf_test = get_minibatches_idx(len(test[0]), valid_batch_size)
print("%d train examples" % len(train[0]))
print("%d valid examples" % len(valid[0]))
print("%d test examples" % len(test[0]))
history_errs = []
best_p = None
bad_count = 0
if validFreq == -1:
validFreq = len(train[0]) // batch_size
if saveFreq == -1:
saveFreq = len(train[0]) // batch_size
uidx = 0 # the number of update done
estop = False # early stop
start_time = time.time()
try:
for eidx in range(max_epochs):
n_samples = 0
# Get new shuffled index for the training set.
kf = get_minibatches_idx(len(train[0]), batch_size, shuffle=True)
for _, train_index in kf:
uidx += 1
use_noise.set_value(1.)
# Select the random examples for this minibatch
y = [train[1][t] for t in train_index]
x = [train[0][t] for t in train_index]
# Get the data in numpy.ndarray format
# This swap the axis!
# Return something of shape (minibatch maxlen, n samples)
x, mask, y = prepare_data(x, y)
n_samples += x.shape[1]
cost = f_grad_shared(x, mask, y)
f_update(lrate)
if numpy.isnan(cost) or numpy.isinf(cost):
print('bad cost detected: ', cost)
return 1., 1., 1.
if numpy.mod(uidx, dispFreq) == 0:
print('Epoch ', eidx, 'Update ', uidx, 'Cost ', cost)
if saveto and numpy.mod(uidx, saveFreq) == 0:
print('Saving...')
if best_p is not None:
params = best_p
else:
params = unzip(tparams)
numpy.savez(saveto, history_errs=history_errs, **params)
pickle.dump(model_options, open('%s.pkl' % saveto, 'wb'), -1)
print('Done')
if numpy.mod(uidx, validFreq) == 0:
use_noise.set_value(0.)
train_err = pred_error(f_pred, prepare_data, train, kf)
valid_err = pred_error(f_pred, prepare_data, valid,
kf_valid)
test_err = pred_error(f_pred, prepare_data, test, kf_test)
history_errs.append([valid_err, test_err])
if (best_p is None or
valid_err <= numpy.array(history_errs)[:,
0].min()):
best_p = unzip(tparams)
bad_counter = 0
print('Train ', train_err, 'Valid ', valid_err,
'Test ', test_err)
if (len(history_errs) > patience and
valid_err >= numpy.array(history_errs)[:-patience,
0].min()):
bad_counter += 1
if bad_counter > patience:
print('Early Stop!')
estop = True
break
print('Seen %d samples' % n_samples)
if estop:
break
except KeyboardInterrupt:
print("Training interupted")
end_time = time.time()
if best_p is not None:
zipp(best_p, tparams)
else:
best_p = unzip(tparams)
use_noise.set_value(0.)
kf_train_sorted = get_minibatches_idx(len(train[0]), batch_size)
train_err = pred_error(f_pred, prepare_data, train, kf_train_sorted)
valid_err = pred_error(f_pred, prepare_data, valid, kf_valid)
test_err = pred_error(f_pred, prepare_data, test, kf_test)
print('Train ', train_err, 'Valid ', valid_err, 'Test ', test_err)
if saveto:
numpy.savez(saveto, train_err=train_err,
valid_err=valid_err, test_err=test_err,
history_errs=history_errs, **best_p)
print('The code run for %d epochs, with %f sec/epochs' % (
(eidx + 1), (end_time - start_time) / (1. * (eidx + 1))))
print(('Training took %.1fs' %
(end_time - start_time)), file=sys.stderr)
return train_err, valid_err, test_err
if __name__ == '__main__':
# See function train for all possible parameter and there definition.
train_lstm(
max_epochs=100,
test_size=500,
)
| if _x.ndim == 3:
return _x[:, :, n * dim:(n + 1) * dim]
return _x[:, n * dim:(n + 1) * dim] | identifier_body |
vanilla_lstm.py | '''
Build a tweet sentiment analyzer
'''
from __future__ import print_function
import cPickle as pickle
import sys
import time
from collections import OrderedDict
import numpy
import theano
import theano.tensor as tensor
from theano import config
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import imdb
datasets = {'imdb': (imdb.load_data, imdb.prepare_data)}
# Set the random number generators' seeds for consistency
SEED = 123
numpy.random.seed(SEED)
def numpy_floatX(data):
return numpy.asarray(data, dtype=config.floatX)
# NOTE (bitesandbytes) : Important; set minibatch_size = 1 ?
def get_minibatches_idx(n, minibatch_size, shuffle=False):
"""
Used to shuffle the dataset at each iteration.
"""
idx_list = numpy.arange(n, dtype="int32")
if shuffle:
numpy.random.shuffle(idx_list)
minibatches = []
minibatch_start = 0
for i in range(n // minibatch_size):
minibatches.append(idx_list[minibatch_start:
minibatch_start + minibatch_size])
minibatch_start += minibatch_size
if (minibatch_start != n):
# Make a minibatch out of what is left
minibatches.append(idx_list[minibatch_start:])
return zip(range(len(minibatches)), minibatches)
# NOTE (bitesandbytes) : Not needed.
def get_dataset(name):
return datasets[name][0], datasets[name][1]
def zipp(params, tparams):
"""
When we reload the model. Needed for the GPU stuff.
"""
for kk, vv in params.items():
tparams[kk].set_value(vv)
def unzip(zipped):
"""
When we pickle the model. Needed for the GPU stuff.
"""
new_params = OrderedDict()
for kk, vv in zipped.items():
new_params[kk] = vv.get_value()
return new_params
def dropout_layer(state_before, use_noise, trng):
proj = tensor.switch(use_noise,
(state_before *
trng.binomial(state_before.shape,
p=0.5, n=1,
dtype=state_before.dtype)),
state_before * 0.5)
return proj
def _p(pp, name):
return '%s_%s' % (pp, name)
def init_params(options):
"""
Global (not LSTM) parameter. For the embedding and the classifier.
"""
params = OrderedDict()
params = get_layer(options['encoder'])[0](options,
params,
prefix=options['encoder'])
# classifier
params['U'] = 0.01 * numpy.random.randn(options['dim_proj'],
options['ydim']).astype(config.floatX)
params['b'] = numpy.zeros((options['ydim'],)).astype(config.floatX)
return params
def load_params(path, params):
pp = numpy.load(path)
for kk, vv in params.items():
if kk not in pp:
|
params[kk] = pp[kk]
return params
def init_tparams(params):
tparams = OrderedDict()
for kk, pp in params.items():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
def get_layer(name):
fns = layers[name]
return fns
def ortho_weight(ndim):
W = numpy.random.randn(ndim, ndim)
u, s, v = numpy.linalg.svd(W)
return u.astype(config.floatX)
def param_init_lstm(options, params, prefix='lstm'):
"""
Init the LSTM parameter:
:see: init_params
"""
W = numpy.concatenate([ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj'])], axis=1)
params[_p(prefix, 'W')] = W
U = numpy.concatenate([ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj'])], axis=1)
params[_p(prefix, 'U')] = U
b = numpy.zeros((4 * options['dim_proj'],))
params[_p(prefix, 'b')] = b.astype(config.floatX)
return params
def lstm_layer(tparams, state_below, options, prefix='lstm', mask=None):
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
assert mask is not None
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n * dim:(n + 1) * dim]
return _x[:, n * dim:(n + 1) * dim]
def _step(m_, x_, h_, c_):
preact = tensor.dot(h_, tparams[_p(prefix, 'U')])
preact += x_
i = tensor.nnet.sigmoid(_slice(preact, 0, options['dim_proj']))
f = tensor.nnet.sigmoid(_slice(preact, 1, options['dim_proj']))
o = tensor.nnet.sigmoid(_slice(preact, 2, options['dim_proj']))
c = tensor.tanh(_slice(preact, 3, options['dim_proj']))
c = f * c_ + i * c
c = m_[:, None] * c + (1. - m_)[:, None] * c_
h = o * tensor.tanh(c)
h = m_[:, None] * h + (1. - m_)[:, None] * h_
return h, c
# Donno what this is doing :/
state_below = (tensor.dot(state_below, tparams[_p(prefix, 'W')]) +
tparams[_p(prefix, 'b')])
dim_proj = options['dim_proj']
rval, updates = theano.scan(_step,
sequences=[mask, state_below],
outputs_info=[tensor.alloc(numpy_floatX(0.),
n_samples,
dim_proj),
tensor.alloc(numpy_floatX(0.),
n_samples,
dim_proj)],
name=_p(prefix, '_layers'),
n_steps=nsteps)
return rval[0]
# ff: Feed Forward (normal neural net), only useful to put after lstm
# before the classifier.
layers = {'lstm': (param_init_lstm, lstm_layer)}
def sgd(lr, tparams, grads, x, mask, y, cost):
""" Stochastic Gradient Descent
:note: A more complicated version of sgd then needed. This is
done like that for adadelta and rmsprop.
"""
# New set of shared variable that will contain the gradient
# for a mini-batch.
gshared = [theano.shared(p.get_value() * 0., name='%s_grad' % k)
for k, p in tparams.items()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
# Function that computes gradients for a mini-batch, but do not
# updates the weights.
f_grad_shared = theano.function([x, mask, y], cost, updates=gsup,
name='sgd_f_grad_shared')
pup = [(p, p - lr * g) for p, g in zip(tparams.values(), gshared)]
# Function that updates the weights from the previously computed
# gradient.
f_update = theano.function([lr], [], updates=pup,
name='sgd_f_update')
return f_grad_shared, f_update
def adadelta(lr, tparams, grads, x, mask, y, cost):
"""
An adaptive learning rate optimizer
Parameters
----------
lr : Theano SharedVariable
Initial learning rate
tpramas: Theano SharedVariable
Model parameters
grads: Theano variable
Gradients of cost w.r.t to parameres
x: Theano variable
Model inputs
mask: Theano variable
Sequence mask
y: Theano variable
Targets
cost: Theano variable
Objective fucntion to minimize
Notes
-----
For more information, see [ADADELTA]_.
.. [ADADELTA] Matthew D. Zeiler, *ADADELTA: An Adaptive Learning
Rate Method*, arXiv:1212.5701.
"""
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_grad' % k)
for k, p in tparams.items()]
running_up2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rup2' % k)
for k, p in tparams.items()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad2' % k)
for k, p in tparams.items()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function([x, mask, y], cost, updates=zgup + rg2up,
name='adadelta_f_grad_shared')
updir = [-tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6) * zg
for zg, ru2, rg2 in zip(zipped_grads,
running_up2,
running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2))
for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(tparams.values(), updir)]
f_update = theano.function([lr], [], updates=ru2up + param_up,
on_unused_input='ignore',
name='adadelta_f_update')
return f_grad_shared, f_update
def rmsprop(lr, tparams, grads, x, mask, y, cost):
"""
A variant of SGD that scales the step size by running average of the
recent step norms.
Parameters
----------
lr : Theano SharedVariable
Initial learning rate
tpramas: Theano SharedVariable
Model parameters
grads: Theano variable
Gradients of cost w.r.t to parameres
x: Theano variable
Model inputs
mask: Theano variable
Sequence mask
y: Theano variable
Targets
cost: Theano variable
Objective fucntion to minimize
Notes
-----
For more information, see [Hint2014]_.
.. [Hint2014] Geoff Hinton, *Neural Networks for Machine Learning*,
lecture 6a,
http://cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf
"""
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_grad' % k)
for k, p in tparams.items()]
running_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad' % k)
for k, p in tparams.items()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad2' % k)
for k, p in tparams.items()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rgup = [(rg, 0.95 * rg + 0.05 * g) for rg, g in zip(running_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function([x, mask, y], cost,
updates=zgup + rgup + rg2up,
name='rmsprop_f_grad_shared')
updir = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_updir' % k)
for k, p in tparams.items()]
updir_new = [(ud, 0.9 * ud - 1e-4 * zg / tensor.sqrt(rg2 - rg ** 2 + 1e-4))
for ud, zg, rg, rg2 in zip(updir, zipped_grads, running_grads,
running_grads2)]
param_up = [(p, p + udn[1])
for p, udn in zip(tparams.values(), updir_new)]
f_update = theano.function([lr], [], updates=updir_new + param_up,
on_unused_input='ignore',
name='rmsprop_f_update')
return f_grad_shared, f_update
def build_model(tparams, options):
trng = RandomStreams(SEED)
# Used for dropout.
use_noise = theano.shared(numpy_floatX(0.))
x = tensor.matrix('x', dtype='int64')
mask = tensor.matrix('mask', dtype=config.floatX)
y = tensor.vector('y', dtype='int64')
n_timesteps = x.shape[0]
n_samples = x.shape[1]
proj = get_layer(options['encoder'])[1](tparams, x, options,
prefix=options['encoder'],
mask=mask)
if options['encoder'] == 'lstm':
proj = (proj * mask[:, :, None]).sum(axis=0)
proj = proj / mask.sum(axis=0)[:, None]
if options['use_dropout']:
proj = dropout_layer(proj, use_noise, trng)
pred = tensor.nnet.softmax(tensor.dot(proj.T, tparams['U']) + tparams['b'])
f_pred_prob = theano.function([x, mask], pred, name='f_pred_prob')
f_pred = theano.function([x, mask], pred.argmax(axis=1), name='f_pred')
off = 1e-8
if pred.dtype == 'float16':
off = 1e-6
cost = -tensor.log(pred[tensor.arange(n_samples), y] + off).mean()
return use_noise, x, mask, y, f_pred_prob, f_pred, cost
def pred_probs(f_pred_prob, prepare_data, data, iterator, verbose=False):
""" If you want to use a trained model, this is useful to compute
the probabilities of new examples.
"""
n_samples = len(data[0])
probs = numpy.zeros((n_samples, 2)).astype(config.floatX)
n_done = 0
for _, valid_index in iterator:
x, mask, y = prepare_data([data[0][t] for t in valid_index],
numpy.array(data[1])[valid_index],
maxlen=None)
pred_probs = f_pred_prob(x, mask)
probs[valid_index, :] = pred_probs
n_done += len(valid_index)
if verbose:
print('%d/%d samples classified' % (n_done, n_samples))
return probs
def pred_error(f_pred, prepare_data, data, iterator, verbose=False):
"""
Just compute the error
f_pred: Theano fct computing the prediction
prepare_data: usual prepare_data for that dataset.
"""
valid_err = 0
for _, valid_index in iterator:
x, mask, y = prepare_data([data[0][t] for t in valid_index],
numpy.array(data[1])[valid_index],
maxlen=None)
preds = f_pred(x, mask)
targets = numpy.array(data[1])[valid_index]
valid_err += (preds == targets).sum()
valid_err = 1. - numpy_floatX(valid_err) / len(data[0])
return valid_err
def train_lstm(
dim_proj=128, # word embeding dimension and LSTM number of hidden units.
patience=10, # Number of epoch to wait before early stop if no progress
max_epochs=5000, # The maximum number of epoch to run
dispFreq=10, # Display to stdout the training progress every N updates
decay_c=0., # Weight decay for the classifier applied to the U weights.
lrate=0.0001, # Learning rate for sgd (not used for adadelta and rmsprop)
n_words=10000, # Vocabulary size
optimizer=adadelta,
# sgd, adadelta and rmsprop available, sgd very hard to use, not recommanded (probably need momentum and decaying learning rate).
encoder='lstm', # TODO: can be removed must be lstm.
saveto='lstm_model.npz', # The best model will be saved there
validFreq=370, # Compute the validation error after this number of update.
saveFreq=1110, # Save the parameters after every saveFreq updates
maxlen=100, # Sequence longer then this get ignored
batch_size=16, # The batch size during training.
valid_batch_size=64, # The batch size used for validation/test set.
dataset='imdb',
# Parameter for extra option
noise_std=0.,
use_dropout=True, # if False slightly faster, but worst test error
# This frequently need a bigger model.
reload_model=None, # Path to a saved model we want to start from.
test_size=-1, # If >0, we keep only this number of test example.
):
# Model options
model_options = locals().copy()
print("model options", model_options)
load_data, prepare_data = get_dataset(dataset)
print('Loading data')
train, valid, test = load_data(n_words=n_words, valid_portion=0.05,
maxlen=maxlen)
if test_size > 0:
# The test set is sorted by size, but we want to keep random
# size example. So we must select a random selection of the
# examples.
idx = numpy.arange(len(test[0]))
numpy.random.shuffle(idx)
idx = idx[:test_size]
test = ([test[0][n] for n in idx], [test[1][n] for n in idx])
ydim = numpy.max(train[1]) + 1
# TOOD(bitesandbytes) : Change ydim to |num words| + 1 (0 -> no word | empty)
model_options['ydim'] = ydim
print('Building model')
# This create the initial parameters as numpy ndarrays.
# Dict name (string) -> numpy ndarray
params = init_params(model_options)
if reload_model:
load_params('lstm_model.npz', params)
# This create Theano Shared Variable from the parameters.
# Dict name (string) -> Theano Tensor Shared Variable
# params and tparams have different copy of the weights.
tparams = init_tparams(params)
# use_noise is for dropout
(use_noise, x, mask,
y, f_pred_prob, f_pred, cost) = build_model(tparams, model_options)
if decay_c > 0.:
decay_c = theano.shared(numpy_floatX(decay_c), name='decay_c')
weight_decay = 0.
weight_decay += (tparams['U'] ** 2).sum()
weight_decay *= decay_c
cost += weight_decay
f_cost = theano.function([x, mask, y], cost, name='f_cost')
grads = tensor.grad(cost, wrt=list(tparams.values()))
f_grad = theano.function([x, mask, y], grads, name='f_grad')
lr = tensor.scalar(name='lr')
f_grad_shared, f_update = optimizer(lr, tparams, grads,
x, mask, y, cost)
print('Optimization')
kf_valid = get_minibatches_idx(len(valid[0]), valid_batch_size)
kf_test = get_minibatches_idx(len(test[0]), valid_batch_size)
print("%d train examples" % len(train[0]))
print("%d valid examples" % len(valid[0]))
print("%d test examples" % len(test[0]))
history_errs = []
best_p = None
bad_count = 0
if validFreq == -1:
validFreq = len(train[0]) // batch_size
if saveFreq == -1:
saveFreq = len(train[0]) // batch_size
uidx = 0 # the number of update done
estop = False # early stop
start_time = time.time()
try:
for eidx in range(max_epochs):
n_samples = 0
# Get new shuffled index for the training set.
kf = get_minibatches_idx(len(train[0]), batch_size, shuffle=True)
for _, train_index in kf:
uidx += 1
use_noise.set_value(1.)
# Select the random examples for this minibatch
y = [train[1][t] for t in train_index]
x = [train[0][t] for t in train_index]
# Get the data in numpy.ndarray format
# This swap the axis!
# Return something of shape (minibatch maxlen, n samples)
x, mask, y = prepare_data(x, y)
n_samples += x.shape[1]
cost = f_grad_shared(x, mask, y)
f_update(lrate)
if numpy.isnan(cost) or numpy.isinf(cost):
print('bad cost detected: ', cost)
return 1., 1., 1.
if numpy.mod(uidx, dispFreq) == 0:
print('Epoch ', eidx, 'Update ', uidx, 'Cost ', cost)
if saveto and numpy.mod(uidx, saveFreq) == 0:
print('Saving...')
if best_p is not None:
params = best_p
else:
params = unzip(tparams)
numpy.savez(saveto, history_errs=history_errs, **params)
pickle.dump(model_options, open('%s.pkl' % saveto, 'wb'), -1)
print('Done')
if numpy.mod(uidx, validFreq) == 0:
use_noise.set_value(0.)
train_err = pred_error(f_pred, prepare_data, train, kf)
valid_err = pred_error(f_pred, prepare_data, valid,
kf_valid)
test_err = pred_error(f_pred, prepare_data, test, kf_test)
history_errs.append([valid_err, test_err])
if (best_p is None or
valid_err <= numpy.array(history_errs)[:,
0].min()):
best_p = unzip(tparams)
bad_counter = 0
print('Train ', train_err, 'Valid ', valid_err,
'Test ', test_err)
if (len(history_errs) > patience and
valid_err >= numpy.array(history_errs)[:-patience,
0].min()):
bad_counter += 1
if bad_counter > patience:
print('Early Stop!')
estop = True
break
print('Seen %d samples' % n_samples)
if estop:
break
except KeyboardInterrupt:
print("Training interupted")
end_time = time.time()
if best_p is not None:
zipp(best_p, tparams)
else:
best_p = unzip(tparams)
use_noise.set_value(0.)
kf_train_sorted = get_minibatches_idx(len(train[0]), batch_size)
train_err = pred_error(f_pred, prepare_data, train, kf_train_sorted)
valid_err = pred_error(f_pred, prepare_data, valid, kf_valid)
test_err = pred_error(f_pred, prepare_data, test, kf_test)
print('Train ', train_err, 'Valid ', valid_err, 'Test ', test_err)
if saveto:
numpy.savez(saveto, train_err=train_err,
valid_err=valid_err, test_err=test_err,
history_errs=history_errs, **best_p)
print('The code run for %d epochs, with %f sec/epochs' % (
(eidx + 1), (end_time - start_time) / (1. * (eidx + 1))))
print(('Training took %.1fs' %
(end_time - start_time)), file=sys.stderr)
return train_err, valid_err, test_err
if __name__ == '__main__':
# See function train for all possible parameter and there definition.
train_lstm(
max_epochs=100,
test_size=500,
)
| raise Warning('%s is not in the archive' % kk) | conditional_block |
timeEntry.py | ### Copyright (C) 2005 Thomas M. Hinkle
### Copyright (C) 2009 Rolf Leggewie
###
### This library is free software; you can redistribute it and/or
### modify it under the terms of the GNU General Public License as
### published by the Free Software Foundation; either version 2 of the
### License, or (at your option) any later version.
###
### This library is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
### General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with this library; if not, write to the Free Software
### Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
### USA
import gtk
from gettext import gettext as _
import gourmet.convert as convert
import validatingEntry
TIME_TO_READ = 1000
class TimeEntry (validatingEntry.ValidatingEntry):
__gtype_name__ = 'TimeEntry'
def __init__ (self, conv=None):
if not conv: self.conv = convert.get_converter()
else: self.conv = conv
validatingEntry.ValidatingEntry.__init__(self)
self.entry.get_value = self.get_value
self.entry.set_value = self.set_value
def find_errors_in_progress (self, txt):
if (not txt) or self.conv.timestring_to_seconds(txt):
return None
elif not convert.NUMBER_MATCHER.match(txt.split()[0]):
return _('Time must begin with a number or fraction followed by a unit (minutes, hours, etc.).')
else:
words = txt.split()
#if len(words) == 1:
# self._hide_warning_slowly()
# return
if convert.NUMBER_MATCHER.match(words[-1]):
return None
else:
partial_unit = words[-1]
for u in self.conv.unit_to_seconds.keys():
if u.lower().find(partial_unit.lower())==0:
return None
#self._hide_warning_slowly()
#return
return _('Invalid input.') + \
_('Time must be expressed in hours, minutes, seconds, etc.')
self._show_warning()
#else:
# self.set_warning_text("Invalid or incomplete time")
# self._show_warning()
def find_completed_errors (self,*args):
txt = self.entry.get_text()
if txt and not self.conv.timestring_to_seconds(txt):
return _('Invalid input.') + \
_('Time must be expressed in hours, minutes, seconds, etc.')
words = txt.split()
if len(words) == 1:
self._hide_warning_slowly()
return
elif convert.NUMBER_MATCHER.match(words[-1]):
return
else:
partial_unit = words[-1]
for u in self.conv.unit_to_seconds.keys():
if u.lower().find(partial_unit.lower())==0:
self._hide_warning_slowly()
return
self.valid = False
self.warn = True
self.set_warning_text('Invalid input.' + 'Time must be expressed in hours, minutes, seconds, etc.')
self._show_warning()
def set_value (self,seconds):
self.entry.set_text(
convert.seconds_to_timestring(seconds,
fractions=convert.FRACTIONS_ASCII)
)
def get_value (self):
return self.conv.timestring_to_seconds(self.entry.get_text())
def make_time_entry():
te=TimeEntry()
te.show()
return te
if __name__ == '__main__':
w=gtk.Window()
vb = gtk.VBox() | l.set_use_underline(True)
l.set_alignment(0,0.5)
hb.pack_start(l)
te=TimeEntry()
import sys
te.connect('changed',lambda w: sys.stderr.write('Time value: %s'%w.get_value()))
l.set_mnemonic_widget(te)
hb.pack_start(te,expand=False,fill=False)
vb.add(hb)
qb = gtk.Button(stock=gtk.STOCK_QUIT)
vb.add(qb)
l.show()
hb.show()
qb.show()
te.show()
vb.show()
qb.connect('clicked',lambda *args: w.hide() and gtk.main_quit() or gtk.main_quit())
w.add(vb)
w.show()
w.connect('delete_event',gtk.main_quit)
gtk.main() | hb = gtk.HBox()
l=gtk.Label('_Label') | random_line_split |
timeEntry.py | ### Copyright (C) 2005 Thomas M. Hinkle
### Copyright (C) 2009 Rolf Leggewie
###
### This library is free software; you can redistribute it and/or
### modify it under the terms of the GNU General Public License as
### published by the Free Software Foundation; either version 2 of the
### License, or (at your option) any later version.
###
### This library is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
### General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with this library; if not, write to the Free Software
### Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
### USA
import gtk
from gettext import gettext as _
import gourmet.convert as convert
import validatingEntry
TIME_TO_READ = 1000
class TimeEntry (validatingEntry.ValidatingEntry):
__gtype_name__ = 'TimeEntry'
def __init__ (self, conv=None):
if not conv: self.conv = convert.get_converter()
else: self.conv = conv
validatingEntry.ValidatingEntry.__init__(self)
self.entry.get_value = self.get_value
self.entry.set_value = self.set_value
def find_errors_in_progress (self, txt):
if (not txt) or self.conv.timestring_to_seconds(txt):
return None
elif not convert.NUMBER_MATCHER.match(txt.split()[0]):
return _('Time must begin with a number or fraction followed by a unit (minutes, hours, etc.).')
else:
words = txt.split()
#if len(words) == 1:
# self._hide_warning_slowly()
# return
if convert.NUMBER_MATCHER.match(words[-1]):
return None
else:
partial_unit = words[-1]
for u in self.conv.unit_to_seconds.keys():
if u.lower().find(partial_unit.lower())==0:
return None
#self._hide_warning_slowly()
#return
return _('Invalid input.') + \
_('Time must be expressed in hours, minutes, seconds, etc.')
self._show_warning()
#else:
# self.set_warning_text("Invalid or incomplete time")
# self._show_warning()
def find_completed_errors (self,*args):
txt = self.entry.get_text()
if txt and not self.conv.timestring_to_seconds(txt):
return _('Invalid input.') + \
_('Time must be expressed in hours, minutes, seconds, etc.')
words = txt.split()
if len(words) == 1:
self._hide_warning_slowly()
return
elif convert.NUMBER_MATCHER.match(words[-1]):
return
else:
partial_unit = words[-1]
for u in self.conv.unit_to_seconds.keys():
if u.lower().find(partial_unit.lower())==0:
|
self.valid = False
self.warn = True
self.set_warning_text('Invalid input.' + 'Time must be expressed in hours, minutes, seconds, etc.')
self._show_warning()
def set_value (self,seconds):
self.entry.set_text(
convert.seconds_to_timestring(seconds,
fractions=convert.FRACTIONS_ASCII)
)
def get_value (self):
return self.conv.timestring_to_seconds(self.entry.get_text())
def make_time_entry():
te=TimeEntry()
te.show()
return te
if __name__ == '__main__':
w=gtk.Window()
vb = gtk.VBox()
hb = gtk.HBox()
l=gtk.Label('_Label')
l.set_use_underline(True)
l.set_alignment(0,0.5)
hb.pack_start(l)
te=TimeEntry()
import sys
te.connect('changed',lambda w: sys.stderr.write('Time value: %s'%w.get_value()))
l.set_mnemonic_widget(te)
hb.pack_start(te,expand=False,fill=False)
vb.add(hb)
qb = gtk.Button(stock=gtk.STOCK_QUIT)
vb.add(qb)
l.show()
hb.show()
qb.show()
te.show()
vb.show()
qb.connect('clicked',lambda *args: w.hide() and gtk.main_quit() or gtk.main_quit())
w.add(vb)
w.show()
w.connect('delete_event',gtk.main_quit)
gtk.main()
| self._hide_warning_slowly()
return | conditional_block |
timeEntry.py | ### Copyright (C) 2005 Thomas M. Hinkle
### Copyright (C) 2009 Rolf Leggewie
###
### This library is free software; you can redistribute it and/or
### modify it under the terms of the GNU General Public License as
### published by the Free Software Foundation; either version 2 of the
### License, or (at your option) any later version.
###
### This library is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
### General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with this library; if not, write to the Free Software
### Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
### USA
import gtk
from gettext import gettext as _
import gourmet.convert as convert
import validatingEntry
TIME_TO_READ = 1000
class TimeEntry (validatingEntry.ValidatingEntry):
|
def make_time_entry():
te=TimeEntry()
te.show()
return te
if __name__ == '__main__':
w=gtk.Window()
vb = gtk.VBox()
hb = gtk.HBox()
l=gtk.Label('_Label')
l.set_use_underline(True)
l.set_alignment(0,0.5)
hb.pack_start(l)
te=TimeEntry()
import sys
te.connect('changed',lambda w: sys.stderr.write('Time value: %s'%w.get_value()))
l.set_mnemonic_widget(te)
hb.pack_start(te,expand=False,fill=False)
vb.add(hb)
qb = gtk.Button(stock=gtk.STOCK_QUIT)
vb.add(qb)
l.show()
hb.show()
qb.show()
te.show()
vb.show()
qb.connect('clicked',lambda *args: w.hide() and gtk.main_quit() or gtk.main_quit())
w.add(vb)
w.show()
w.connect('delete_event',gtk.main_quit)
gtk.main()
| __gtype_name__ = 'TimeEntry'
def __init__ (self, conv=None):
if not conv: self.conv = convert.get_converter()
else: self.conv = conv
validatingEntry.ValidatingEntry.__init__(self)
self.entry.get_value = self.get_value
self.entry.set_value = self.set_value
def find_errors_in_progress (self, txt):
if (not txt) or self.conv.timestring_to_seconds(txt):
return None
elif not convert.NUMBER_MATCHER.match(txt.split()[0]):
return _('Time must begin with a number or fraction followed by a unit (minutes, hours, etc.).')
else:
words = txt.split()
#if len(words) == 1:
# self._hide_warning_slowly()
# return
if convert.NUMBER_MATCHER.match(words[-1]):
return None
else:
partial_unit = words[-1]
for u in self.conv.unit_to_seconds.keys():
if u.lower().find(partial_unit.lower())==0:
return None
#self._hide_warning_slowly()
#return
return _('Invalid input.') + \
_('Time must be expressed in hours, minutes, seconds, etc.')
self._show_warning()
#else:
# self.set_warning_text("Invalid or incomplete time")
# self._show_warning()
def find_completed_errors (self,*args):
txt = self.entry.get_text()
if txt and not self.conv.timestring_to_seconds(txt):
return _('Invalid input.') + \
_('Time must be expressed in hours, minutes, seconds, etc.')
words = txt.split()
if len(words) == 1:
self._hide_warning_slowly()
return
elif convert.NUMBER_MATCHER.match(words[-1]):
return
else:
partial_unit = words[-1]
for u in self.conv.unit_to_seconds.keys():
if u.lower().find(partial_unit.lower())==0:
self._hide_warning_slowly()
return
self.valid = False
self.warn = True
self.set_warning_text('Invalid input.' + 'Time must be expressed in hours, minutes, seconds, etc.')
self._show_warning()
def set_value (self,seconds):
self.entry.set_text(
convert.seconds_to_timestring(seconds,
fractions=convert.FRACTIONS_ASCII)
)
def get_value (self):
return self.conv.timestring_to_seconds(self.entry.get_text()) | identifier_body |
timeEntry.py | ### Copyright (C) 2005 Thomas M. Hinkle
### Copyright (C) 2009 Rolf Leggewie
###
### This library is free software; you can redistribute it and/or
### modify it under the terms of the GNU General Public License as
### published by the Free Software Foundation; either version 2 of the
### License, or (at your option) any later version.
###
### This library is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
### General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with this library; if not, write to the Free Software
### Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
### USA
import gtk
from gettext import gettext as _
import gourmet.convert as convert
import validatingEntry
TIME_TO_READ = 1000
class | (validatingEntry.ValidatingEntry):
__gtype_name__ = 'TimeEntry'
def __init__ (self, conv=None):
if not conv: self.conv = convert.get_converter()
else: self.conv = conv
validatingEntry.ValidatingEntry.__init__(self)
self.entry.get_value = self.get_value
self.entry.set_value = self.set_value
def find_errors_in_progress (self, txt):
if (not txt) or self.conv.timestring_to_seconds(txt):
return None
elif not convert.NUMBER_MATCHER.match(txt.split()[0]):
return _('Time must begin with a number or fraction followed by a unit (minutes, hours, etc.).')
else:
words = txt.split()
#if len(words) == 1:
# self._hide_warning_slowly()
# return
if convert.NUMBER_MATCHER.match(words[-1]):
return None
else:
partial_unit = words[-1]
for u in self.conv.unit_to_seconds.keys():
if u.lower().find(partial_unit.lower())==0:
return None
#self._hide_warning_slowly()
#return
return _('Invalid input.') + \
_('Time must be expressed in hours, minutes, seconds, etc.')
self._show_warning()
#else:
# self.set_warning_text("Invalid or incomplete time")
# self._show_warning()
def find_completed_errors (self,*args):
txt = self.entry.get_text()
if txt and not self.conv.timestring_to_seconds(txt):
return _('Invalid input.') + \
_('Time must be expressed in hours, minutes, seconds, etc.')
words = txt.split()
if len(words) == 1:
self._hide_warning_slowly()
return
elif convert.NUMBER_MATCHER.match(words[-1]):
return
else:
partial_unit = words[-1]
for u in self.conv.unit_to_seconds.keys():
if u.lower().find(partial_unit.lower())==0:
self._hide_warning_slowly()
return
self.valid = False
self.warn = True
self.set_warning_text('Invalid input.' + 'Time must be expressed in hours, minutes, seconds, etc.')
self._show_warning()
def set_value (self,seconds):
self.entry.set_text(
convert.seconds_to_timestring(seconds,
fractions=convert.FRACTIONS_ASCII)
)
def get_value (self):
return self.conv.timestring_to_seconds(self.entry.get_text())
def make_time_entry():
te=TimeEntry()
te.show()
return te
if __name__ == '__main__':
w=gtk.Window()
vb = gtk.VBox()
hb = gtk.HBox()
l=gtk.Label('_Label')
l.set_use_underline(True)
l.set_alignment(0,0.5)
hb.pack_start(l)
te=TimeEntry()
import sys
te.connect('changed',lambda w: sys.stderr.write('Time value: %s'%w.get_value()))
l.set_mnemonic_widget(te)
hb.pack_start(te,expand=False,fill=False)
vb.add(hb)
qb = gtk.Button(stock=gtk.STOCK_QUIT)
vb.add(qb)
l.show()
hb.show()
qb.show()
te.show()
vb.show()
qb.connect('clicked',lambda *args: w.hide() and gtk.main_quit() or gtk.main_quit())
w.add(vb)
w.show()
w.connect('delete_event',gtk.main_quit)
gtk.main()
| TimeEntry | identifier_name |
mod.rs | /// Type definitions and serializations of types used in the VM and in other modules
#[derive(Serialize, Deserialize, Clone)]
pub struct Instruction {
pub opcode: Opcode,
pub target: Register,
pub left: Register,
pub right: Register
}
#[derive(Serialize, Deserialize)]
pub struct Module {
pub functions: Vec<u64>,
pub constants: Vec<i64>,
pub entry_point: u64,
pub code: Vec<Instruction>
}
pub struct | <'a> {
pub functions: &'a [u64],
pub constants: &'a [i64],
pub code: &'a [Instruction],
pub registers: &'a mut [i64],
pub base: usize
}
/// Definition of the register type and a list of special registers
pub type Register = u8;
pub mod reg {
use super::*;
pub const RET: Register = 0;
pub const VAL: Register = 1;
}
/// Definition of the opcode type and a listing of valid operations
pub type Opcode = u8;
pub mod ops {
use super::*;
pub const HLT: Opcode = 0;
pub const LD: Opcode = 1;
pub const LDB: Opcode = 2;
pub const LDR: Opcode = 3;
pub const ADD: Opcode = 4;
pub const SUB: Opcode = 5;
pub const MUL: Opcode = 6;
pub const DIV: Opcode = 7;
pub const AND: Opcode = 8;
pub const OR: Opcode = 9;
pub const NOT: Opcode = 10;
pub const EQ: Opcode = 11;
pub const LT: Opcode = 12;
pub const LE: Opcode = 13;
pub const GT: Opcode = 14;
pub const GE: Opcode = 15;
pub const NEQ: Opcode = 16;
pub const CAL: Opcode = 17;
pub const TLC: Opcode = 18;
pub const RET: Opcode = 19;
pub const MOV: Opcode = 20;
pub const MVO: Opcode = 21;
pub const JMF: Opcode = 22;
pub const JMB: Opcode = 23;
pub const JTF: Opcode = 24;
pub const WRI: Opcode = 25;
pub const RDI: Opcode = 26;
}
/// A listing of possible types
pub type Type = u8;
pub mod types {
use super::*;
pub const INT: Type = 0;
//pub const FLOAT: Type = 0;
//pub const INTLIST: Type = 0;
//pub const FLOATLIST: Type = 0;
} | Thread | identifier_name |
mod.rs | /// Type definitions and serializations of types used in the VM and in other modules
#[derive(Serialize, Deserialize, Clone)]
pub struct Instruction {
pub opcode: Opcode,
pub target: Register,
pub left: Register,
pub right: Register
}
#[derive(Serialize, Deserialize)]
pub struct Module {
pub functions: Vec<u64>,
pub constants: Vec<i64>,
pub entry_point: u64,
pub code: Vec<Instruction>
}
pub struct Thread<'a> {
pub functions: &'a [u64],
pub constants: &'a [i64],
pub code: &'a [Instruction],
pub registers: &'a mut [i64],
pub base: usize
}
/// Definition of the register type and a list of special registers
pub type Register = u8;
pub mod reg {
use super::*;
pub const RET: Register = 0;
pub const VAL: Register = 1;
}
/// Definition of the opcode type and a listing of valid operations
pub type Opcode = u8;
pub mod ops {
use super::*;
pub const HLT: Opcode = 0;
pub const LD: Opcode = 1;
pub const LDB: Opcode = 2;
pub const LDR: Opcode = 3;
pub const ADD: Opcode = 4;
pub const SUB: Opcode = 5;
pub const MUL: Opcode = 6;
pub const DIV: Opcode = 7;
pub const AND: Opcode = 8;
pub const OR: Opcode = 9;
pub const NOT: Opcode = 10;
pub const EQ: Opcode = 11;
pub const LT: Opcode = 12;
pub const LE: Opcode = 13;
pub const GT: Opcode = 14;
pub const GE: Opcode = 15;
pub const NEQ: Opcode = 16;
pub const CAL: Opcode = 17;
pub const TLC: Opcode = 18;
pub const RET: Opcode = 19;
pub const MOV: Opcode = 20;
pub const MVO: Opcode = 21;
pub const JMF: Opcode = 22;
pub const JMB: Opcode = 23;
pub const JTF: Opcode = 24;
pub const WRI: Opcode = 25;
pub const RDI: Opcode = 26;
}
/// A listing of possible types
pub type Type = u8;
pub mod types {
use super::*;
pub const INT: Type = 0;
//pub const FLOAT: Type = 0;
//pub const INTLIST: Type = 0;
//pub const FLOATLIST: Type = 0; | } | random_line_split | |
countdowntimer.js | function getTimeRemaining(endtime) {
var t = Date.parse(endtime) - Date.parse(new Date());
var seconds = Math.floor((t / 1000) % 60);
var minutes = Math.floor((t / 1000 / 60) % 60); | var days = Math.floor(t / (1000 * 60 * 60 * 24));
// Make sure display does not show negative time
if (seconds < 0)
seconds = 0;
if (minutes < 0)
minutes = 0;
if (hours < 0)
hours = 0;
if (days < 0)
days = 0;
return {
'total': t,
'days': days,
'hours': hours,
'minutes': minutes,
'seconds': seconds
};
}
function initializeClock(id, endtime) {
var clock = document.getElementById(id);
var daysSpan = clock.querySelector('.days');
var hoursSpan = clock.querySelector('.hours');
var minutesSpan = clock.querySelector('.minutes');
var secondsSpan = clock.querySelector('.seconds');
function updateClock() {
var t = getTimeRemaining(endtime);
daysSpan.innerHTML = t.days;
hoursSpan.innerHTML = ('0' + t.hours).slice(-2);
minutesSpan.innerHTML = ('0' + t.minutes).slice(-2);
secondsSpan.innerHTML = ('0' + t.seconds).slice(-2);
if (t.total <= 0) {
clearInterval(timeinterval);
}
}
updateClock();
var timeinterval = setInterval(updateClock, 1000);
}
//var deadline = new Date(Date.parse(new Date()) + 15 * 24 * 60 * 60 * 1000);
var deadline = 'July 8 2016 15:00:00';
initializeClock('clockdiv', deadline); | var hours = Math.floor((t / (1000 * 60 * 60)) % 24); | random_line_split |
countdowntimer.js | function getTimeRemaining(endtime) {
var t = Date.parse(endtime) - Date.parse(new Date());
var seconds = Math.floor((t / 1000) % 60);
var minutes = Math.floor((t / 1000 / 60) % 60);
var hours = Math.floor((t / (1000 * 60 * 60)) % 24);
var days = Math.floor(t / (1000 * 60 * 60 * 24));
// Make sure display does not show negative time
if (seconds < 0)
seconds = 0;
if (minutes < 0)
minutes = 0;
if (hours < 0)
hours = 0;
if (days < 0)
days = 0;
return {
'total': t,
'days': days,
'hours': hours,
'minutes': minutes,
'seconds': seconds
};
}
function initializeClock(id, endtime) |
//var deadline = new Date(Date.parse(new Date()) + 15 * 24 * 60 * 60 * 1000);
var deadline = 'July 8 2016 15:00:00';
initializeClock('clockdiv', deadline); | {
var clock = document.getElementById(id);
var daysSpan = clock.querySelector('.days');
var hoursSpan = clock.querySelector('.hours');
var minutesSpan = clock.querySelector('.minutes');
var secondsSpan = clock.querySelector('.seconds');
function updateClock() {
var t = getTimeRemaining(endtime);
daysSpan.innerHTML = t.days;
hoursSpan.innerHTML = ('0' + t.hours).slice(-2);
minutesSpan.innerHTML = ('0' + t.minutes).slice(-2);
secondsSpan.innerHTML = ('0' + t.seconds).slice(-2);
if (t.total <= 0) {
clearInterval(timeinterval);
}
}
updateClock();
var timeinterval = setInterval(updateClock, 1000);
} | identifier_body |
countdowntimer.js | function | (endtime) {
var t = Date.parse(endtime) - Date.parse(new Date());
var seconds = Math.floor((t / 1000) % 60);
var minutes = Math.floor((t / 1000 / 60) % 60);
var hours = Math.floor((t / (1000 * 60 * 60)) % 24);
var days = Math.floor(t / (1000 * 60 * 60 * 24));
// Make sure display does not show negative time
if (seconds < 0)
seconds = 0;
if (minutes < 0)
minutes = 0;
if (hours < 0)
hours = 0;
if (days < 0)
days = 0;
return {
'total': t,
'days': days,
'hours': hours,
'minutes': minutes,
'seconds': seconds
};
}
function initializeClock(id, endtime) {
var clock = document.getElementById(id);
var daysSpan = clock.querySelector('.days');
var hoursSpan = clock.querySelector('.hours');
var minutesSpan = clock.querySelector('.minutes');
var secondsSpan = clock.querySelector('.seconds');
function updateClock() {
var t = getTimeRemaining(endtime);
daysSpan.innerHTML = t.days;
hoursSpan.innerHTML = ('0' + t.hours).slice(-2);
minutesSpan.innerHTML = ('0' + t.minutes).slice(-2);
secondsSpan.innerHTML = ('0' + t.seconds).slice(-2);
if (t.total <= 0) {
clearInterval(timeinterval);
}
}
updateClock();
var timeinterval = setInterval(updateClock, 1000);
}
//var deadline = new Date(Date.parse(new Date()) + 15 * 24 * 60 * 60 * 1000);
var deadline = 'July 8 2016 15:00:00';
initializeClock('clockdiv', deadline); | getTimeRemaining | identifier_name |
panic_in_result_fn_assertions.rs | #![warn(clippy::panic_in_result_fn)]
#![allow(clippy::unnecessary_wraps)]
struct A;
impl A {
fn result_with_assert_with_message(x: i32) -> Result<bool, String> // should emit lint
{
assert!(x == 5, "wrong argument");
Ok(true)
}
fn result_with_assert_eq(x: i32) -> Result<bool, String> // should emit lint
{
assert_eq!(x, 5);
Ok(true)
}
fn result_with_assert_ne(x: i32) -> Result<bool, String> // should emit lint
{
assert_ne!(x, 1);
Ok(true)
}
fn other_with_assert_with_message(x: i32) // should not emit lint | assert!(x == 5, "wrong argument");
}
fn other_with_assert_eq(x: i32) // should not emit lint
{
assert_eq!(x, 5);
}
fn other_with_assert_ne(x: i32) // should not emit lint
{
assert_ne!(x, 1);
}
fn result_without_banned_functions() -> Result<bool, String> // should not emit lint
{
let assert = "assert!";
println!("No {}", assert);
Ok(true)
}
}
fn main() {} | { | random_line_split |
panic_in_result_fn_assertions.rs | #![warn(clippy::panic_in_result_fn)]
#![allow(clippy::unnecessary_wraps)]
struct A;
impl A {
fn result_with_assert_with_message(x: i32) -> Result<bool, String> // should emit lint
{
assert!(x == 5, "wrong argument");
Ok(true)
}
fn result_with_assert_eq(x: i32) -> Result<bool, String> // should emit lint
|
fn result_with_assert_ne(x: i32) -> Result<bool, String> // should emit lint
{
assert_ne!(x, 1);
Ok(true)
}
fn other_with_assert_with_message(x: i32) // should not emit lint
{
assert!(x == 5, "wrong argument");
}
fn other_with_assert_eq(x: i32) // should not emit lint
{
assert_eq!(x, 5);
}
fn other_with_assert_ne(x: i32) // should not emit lint
{
assert_ne!(x, 1);
}
fn result_without_banned_functions() -> Result<bool, String> // should not emit lint
{
let assert = "assert!";
println!("No {}", assert);
Ok(true)
}
}
fn main() {}
| {
assert_eq!(x, 5);
Ok(true)
} | identifier_body |
panic_in_result_fn_assertions.rs | #![warn(clippy::panic_in_result_fn)]
#![allow(clippy::unnecessary_wraps)]
struct A;
impl A {
fn result_with_assert_with_message(x: i32) -> Result<bool, String> // should emit lint
{
assert!(x == 5, "wrong argument");
Ok(true)
}
fn | (x: i32) -> Result<bool, String> // should emit lint
{
assert_eq!(x, 5);
Ok(true)
}
fn result_with_assert_ne(x: i32) -> Result<bool, String> // should emit lint
{
assert_ne!(x, 1);
Ok(true)
}
fn other_with_assert_with_message(x: i32) // should not emit lint
{
assert!(x == 5, "wrong argument");
}
fn other_with_assert_eq(x: i32) // should not emit lint
{
assert_eq!(x, 5);
}
fn other_with_assert_ne(x: i32) // should not emit lint
{
assert_ne!(x, 1);
}
fn result_without_banned_functions() -> Result<bool, String> // should not emit lint
{
let assert = "assert!";
println!("No {}", assert);
Ok(true)
}
}
fn main() {}
| result_with_assert_eq | identifier_name |
backdrop.ts | import {Animate} from "../../core/util/animate";
import {ElementRef, ViewEncapsulation, Component, Input, Output, EventEmitter} from "angular2/core";
import {DOM} from "angular2/src/platform/dom/dom_adapter";
/**
* An overlay for content on the page.
* Can optionally dismiss when clicked on.
* Has outputs for show/showing and hide/hiding.
*/
@Component({
selector: 'md-backdrop',
template: '',
encapsulation: ViewEncapsulation.None,
host: {
'class': 'md-backdrop',
'(click)': 'onClick()',
},
})
export class MdBackdrop {
/**
* When true, clicking on the backdrop will close it
*/
@Input()
clickClose: boolean = false;
/**
* When true, disable the parent container scroll while the backdrop is active.
*/
@Input()
hideScroll: boolean = true;
/**
* Emits when the backdrop begins to hide.
*/
@Output()
onHiding: EventEmitter<MdBackdrop> = new EventEmitter<MdBackdrop>(false);
/**
* Emits when the backdrop has finished being hidden.
*/
@Output()
onHidden: EventEmitter<MdBackdrop> = new EventEmitter<MdBackdrop>(false);
/**
* Emits when the backdrop begins to be shown.
*/
@Output()
onShowing: EventEmitter<MdBackdrop> = new EventEmitter<MdBackdrop>();
/**
* Emits when the backdrop has finished being shown.
*/
@Output()
onShown: EventEmitter<MdBackdrop> = new EventEmitter<MdBackdrop>();
constructor(public element: ElementRef) {
}
/**
* The CSS class name to transition on/off when the backdrop is hidden/shown.
*/
@Input()
public transitionClass: string = 'md-active';
/**
* Whether to add the {@see transitionClass} or remove it when the backdrop is shown. The
* opposite will happen when the backdrop is hidden.
*/
@Input()
public transitionAddClass = true;
/**
* Whether the backdrop is visible.
*/
get visible(): boolean {
return this._visible;
}
@Input()
set visible(value: boolean) {
this.toggle(value);
}
private _visible: boolean = false;
private _transitioning: boolean = false;
private _previousOverflow: string = null;
private _body: HTMLBodyElement = DOM.query('body');
onClick() {
if (this.clickClose && !this._transitioning && this.visible) {
this.hide();
}
}
/**
* Hide the backdrop and return a promise that is resolved when the hide animations are
* complete.
*/
| (): Promise<any> {
return this.toggle(false);
}
/**
* Show the backdrop and return a promise that is resolved when the show animations are
* complete.
*/
show(): Promise<any> {
return this.toggle(true);
}
/**
* Toggle the visibility of the backdrop.
* @param visible whether or not the backdrop should be visible
* @returns {any}
*/
toggle(visible: boolean = !this.visible): any {
if (visible === this._visible) {
return Promise.resolve();
}
let beginEvent = visible ? this.onShowing : this.onHiding;
let endEvent = visible ? this.onShown : this.onHidden;
this._visible = visible;
this._transitioning = true;
beginEvent.emit(this);
let action = visible ?
(this.transitionAddClass ? Animate.enter : Animate.leave) :
(this.transitionAddClass ? Animate.leave : Animate.enter);
// Page scroll
if (visible && this.hideScroll && this.element && !this._previousOverflow) {
let style = DOM.getStyle(this._body, 'overflow');
if (style !== 'hidden') {
this._previousOverflow = style;
DOM.setStyle(this._body, 'overflow', 'hidden');
}
}
else if (!visible && this.hideScroll && this.element && this._previousOverflow !== null) {
DOM.setStyle(this._body, 'overflow', this._previousOverflow);
this._previousOverflow = null;
}
// Animate transition class in/out and then finally emit the completed event.
return action(this.element.nativeElement, this.transitionClass).then(() => {
this._transitioning = false;
endEvent.emit(this);
});
}
}
| hide | identifier_name |
backdrop.ts | import {Animate} from "../../core/util/animate";
import {ElementRef, ViewEncapsulation, Component, Input, Output, EventEmitter} from "angular2/core";
import {DOM} from "angular2/src/platform/dom/dom_adapter";
/**
* An overlay for content on the page.
* Can optionally dismiss when clicked on.
* Has outputs for show/showing and hide/hiding.
*/
@Component({
selector: 'md-backdrop',
template: '',
encapsulation: ViewEncapsulation.None,
host: {
'class': 'md-backdrop',
'(click)': 'onClick()',
},
})
export class MdBackdrop {
/**
* When true, clicking on the backdrop will close it
*/
@Input()
clickClose: boolean = false;
/**
* When true, disable the parent container scroll while the backdrop is active.
*/
@Input()
hideScroll: boolean = true;
/**
* Emits when the backdrop begins to hide.
*/
@Output()
onHiding: EventEmitter<MdBackdrop> = new EventEmitter<MdBackdrop>(false);
/**
* Emits when the backdrop has finished being hidden.
*/
@Output()
onHidden: EventEmitter<MdBackdrop> = new EventEmitter<MdBackdrop>(false);
/**
* Emits when the backdrop begins to be shown.
*/
@Output()
onShowing: EventEmitter<MdBackdrop> = new EventEmitter<MdBackdrop>();
/**
* Emits when the backdrop has finished being shown.
*/
@Output()
onShown: EventEmitter<MdBackdrop> = new EventEmitter<MdBackdrop>();
constructor(public element: ElementRef) {
}
/**
* The CSS class name to transition on/off when the backdrop is hidden/shown.
*/
@Input()
public transitionClass: string = 'md-active';
/**
* Whether to add the {@see transitionClass} or remove it when the backdrop is shown. The
* opposite will happen when the backdrop is hidden.
*/
@Input()
public transitionAddClass = true;
/**
* Whether the backdrop is visible.
*/
get visible(): boolean {
return this._visible;
}
@Input()
set visible(value: boolean) {
this.toggle(value);
}
private _visible: boolean = false;
private _transitioning: boolean = false;
private _previousOverflow: string = null;
private _body: HTMLBodyElement = DOM.query('body');
onClick() |
/**
* Hide the backdrop and return a promise that is resolved when the hide animations are
* complete.
*/
hide(): Promise<any> {
return this.toggle(false);
}
/**
* Show the backdrop and return a promise that is resolved when the show animations are
* complete.
*/
show(): Promise<any> {
return this.toggle(true);
}
/**
* Toggle the visibility of the backdrop.
* @param visible whether or not the backdrop should be visible
* @returns {any}
*/
toggle(visible: boolean = !this.visible): any {
if (visible === this._visible) {
return Promise.resolve();
}
let beginEvent = visible ? this.onShowing : this.onHiding;
let endEvent = visible ? this.onShown : this.onHidden;
this._visible = visible;
this._transitioning = true;
beginEvent.emit(this);
let action = visible ?
(this.transitionAddClass ? Animate.enter : Animate.leave) :
(this.transitionAddClass ? Animate.leave : Animate.enter);
// Page scroll
if (visible && this.hideScroll && this.element && !this._previousOverflow) {
let style = DOM.getStyle(this._body, 'overflow');
if (style !== 'hidden') {
this._previousOverflow = style;
DOM.setStyle(this._body, 'overflow', 'hidden');
}
}
else if (!visible && this.hideScroll && this.element && this._previousOverflow !== null) {
DOM.setStyle(this._body, 'overflow', this._previousOverflow);
this._previousOverflow = null;
}
// Animate transition class in/out and then finally emit the completed event.
return action(this.element.nativeElement, this.transitionClass).then(() => {
this._transitioning = false;
endEvent.emit(this);
});
}
}
| {
if (this.clickClose && !this._transitioning && this.visible) {
this.hide();
}
} | identifier_body |
backdrop.ts | import {Animate} from "../../core/util/animate";
import {ElementRef, ViewEncapsulation, Component, Input, Output, EventEmitter} from "angular2/core";
import {DOM} from "angular2/src/platform/dom/dom_adapter";
/**
* An overlay for content on the page.
* Can optionally dismiss when clicked on.
* Has outputs for show/showing and hide/hiding.
*/
@Component({
selector: 'md-backdrop',
template: '',
encapsulation: ViewEncapsulation.None,
host: {
'class': 'md-backdrop',
'(click)': 'onClick()',
},
})
export class MdBackdrop {
/**
* When true, clicking on the backdrop will close it
*/
@Input()
clickClose: boolean = false;
/**
* When true, disable the parent container scroll while the backdrop is active.
*/
@Input()
hideScroll: boolean = true;
/**
* Emits when the backdrop begins to hide.
*/
@Output()
onHiding: EventEmitter<MdBackdrop> = new EventEmitter<MdBackdrop>(false);
/**
* Emits when the backdrop has finished being hidden.
*/
@Output()
onHidden: EventEmitter<MdBackdrop> = new EventEmitter<MdBackdrop>(false);
/**
* Emits when the backdrop begins to be shown.
*/
@Output()
onShowing: EventEmitter<MdBackdrop> = new EventEmitter<MdBackdrop>();
/**
* Emits when the backdrop has finished being shown.
*/
@Output()
onShown: EventEmitter<MdBackdrop> = new EventEmitter<MdBackdrop>();
constructor(public element: ElementRef) {
}
/**
* The CSS class name to transition on/off when the backdrop is hidden/shown.
*/
@Input()
public transitionClass: string = 'md-active'; | * opposite will happen when the backdrop is hidden.
*/
@Input()
public transitionAddClass = true;
/**
* Whether the backdrop is visible.
*/
get visible(): boolean {
return this._visible;
}
@Input()
set visible(value: boolean) {
this.toggle(value);
}
private _visible: boolean = false;
private _transitioning: boolean = false;
private _previousOverflow: string = null;
private _body: HTMLBodyElement = DOM.query('body');
onClick() {
if (this.clickClose && !this._transitioning && this.visible) {
this.hide();
}
}
/**
* Hide the backdrop and return a promise that is resolved when the hide animations are
* complete.
*/
hide(): Promise<any> {
return this.toggle(false);
}
/**
* Show the backdrop and return a promise that is resolved when the show animations are
* complete.
*/
show(): Promise<any> {
return this.toggle(true);
}
/**
* Toggle the visibility of the backdrop.
* @param visible whether or not the backdrop should be visible
* @returns {any}
*/
toggle(visible: boolean = !this.visible): any {
if (visible === this._visible) {
return Promise.resolve();
}
let beginEvent = visible ? this.onShowing : this.onHiding;
let endEvent = visible ? this.onShown : this.onHidden;
this._visible = visible;
this._transitioning = true;
beginEvent.emit(this);
let action = visible ?
(this.transitionAddClass ? Animate.enter : Animate.leave) :
(this.transitionAddClass ? Animate.leave : Animate.enter);
// Page scroll
if (visible && this.hideScroll && this.element && !this._previousOverflow) {
let style = DOM.getStyle(this._body, 'overflow');
if (style !== 'hidden') {
this._previousOverflow = style;
DOM.setStyle(this._body, 'overflow', 'hidden');
}
}
else if (!visible && this.hideScroll && this.element && this._previousOverflow !== null) {
DOM.setStyle(this._body, 'overflow', this._previousOverflow);
this._previousOverflow = null;
}
// Animate transition class in/out and then finally emit the completed event.
return action(this.element.nativeElement, this.transitionClass).then(() => {
this._transitioning = false;
endEvent.emit(this);
});
}
} |
/**
* Whether to add the {@see transitionClass} or remove it when the backdrop is shown. The | random_line_split |
auth.py | # coding=utf-8
"""Request handler for authentication."""
from __future__ import unicode_literals
import logging
import random
import string
import time
from builtins import range
import jwt
from medusa import app, helpers, notifiers
from medusa.logger.adapters.style import BraceAdapter
from medusa.server.api.v2.base import BaseRequestHandler
from six import text_type
from tornado.escape import json_decode
log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())
class | (BaseRequestHandler):
"""Auth request handler."""
#: resource name
name = 'authenticate'
#: allowed HTTP methods
allowed_methods = ('POST', )
def _check_authentication(self):
"""Override authentication check for the authentication endpoint."""
return None
def post(self, *args, **kwargs):
"""Request JWT."""
username = app.WEB_USERNAME
password = app.WEB_PASSWORD
# If the user hasn't set a username and/or password just let them login
if not username.strip() or not password.strip():
return self._login()
if not self.request.body:
return self._failed_login(error='No Credentials Provided')
if self.request.headers['content-type'] != 'application/json':
return self._failed_login(error='Incorrect content-type')
request_body = json_decode(self.request.body)
submitted_username = request_body.get('username')
submitted_password = request_body.get('password')
submitted_exp = request_body.get('exp', 86400)
if username != submitted_username or password != submitted_password:
return self._failed_login(error='Invalid credentials')
return self._login(submitted_exp)
def _login(self, exp=86400):
self.set_header('Content-Type', 'application/json')
if app.NOTIFY_ON_LOGIN and not helpers.is_ip_private(self.request.remote_ip):
notifiers.notify_login(self.request.remote_ip)
log.info('{user} logged into the API v2', {'user': app.WEB_USERNAME})
time_now = int(time.time())
return self._ok(data={
'token': jwt.encode({
'iss': 'Medusa ' + text_type(app.APP_VERSION),
'iat': time_now,
# @TODO: The jti should be saved so we can revoke tokens
'jti': ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20)),
'exp': time_now + int(exp),
'username': app.WEB_USERNAME,
'apiKey': app.API_KEY
}, app.ENCRYPTION_SECRET, algorithm='HS256').decode('utf-8')
})
def _failed_login(self, error=None):
log.warning('{user} attempted a failed login to the API v2 from IP: {ip}', {
'user': app.WEB_USERNAME,
'ip': self.request.remote_ip
})
return self._unauthorized(error=error)
| AuthHandler | identifier_name |
auth.py | # coding=utf-8
"""Request handler for authentication."""
from __future__ import unicode_literals
import logging
import random
import string
import time
from builtins import range
import jwt
from medusa import app, helpers, notifiers
from medusa.logger.adapters.style import BraceAdapter
from medusa.server.api.v2.base import BaseRequestHandler
from six import text_type
from tornado.escape import json_decode
log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())
class AuthHandler(BaseRequestHandler):
"""Auth request handler."""
#: resource name
name = 'authenticate'
#: allowed HTTP methods
allowed_methods = ('POST', )
def _check_authentication(self):
"""Override authentication check for the authentication endpoint."""
return None
def post(self, *args, **kwargs):
"""Request JWT."""
username = app.WEB_USERNAME
password = app.WEB_PASSWORD
# If the user hasn't set a username and/or password just let them login
if not username.strip() or not password.strip():
return self._login()
if not self.request.body:
return self._failed_login(error='No Credentials Provided')
if self.request.headers['content-type'] != 'application/json':
return self._failed_login(error='Incorrect content-type')
request_body = json_decode(self.request.body)
submitted_username = request_body.get('username')
submitted_password = request_body.get('password')
submitted_exp = request_body.get('exp', 86400)
if username != submitted_username or password != submitted_password:
return self._failed_login(error='Invalid credentials')
return self._login(submitted_exp)
def _login(self, exp=86400):
|
def _failed_login(self, error=None):
log.warning('{user} attempted a failed login to the API v2 from IP: {ip}', {
'user': app.WEB_USERNAME,
'ip': self.request.remote_ip
})
return self._unauthorized(error=error)
| self.set_header('Content-Type', 'application/json')
if app.NOTIFY_ON_LOGIN and not helpers.is_ip_private(self.request.remote_ip):
notifiers.notify_login(self.request.remote_ip)
log.info('{user} logged into the API v2', {'user': app.WEB_USERNAME})
time_now = int(time.time())
return self._ok(data={
'token': jwt.encode({
'iss': 'Medusa ' + text_type(app.APP_VERSION),
'iat': time_now,
# @TODO: The jti should be saved so we can revoke tokens
'jti': ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20)),
'exp': time_now + int(exp),
'username': app.WEB_USERNAME,
'apiKey': app.API_KEY
}, app.ENCRYPTION_SECRET, algorithm='HS256').decode('utf-8')
}) | identifier_body |
auth.py | # coding=utf-8
"""Request handler for authentication."""
from __future__ import unicode_literals
import logging
import random
import string
import time
from builtins import range
import jwt
from medusa import app, helpers, notifiers
from medusa.logger.adapters.style import BraceAdapter
from medusa.server.api.v2.base import BaseRequestHandler
from six import text_type
from tornado.escape import json_decode
log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())
class AuthHandler(BaseRequestHandler):
"""Auth request handler."""
#: resource name
name = 'authenticate'
#: allowed HTTP methods
allowed_methods = ('POST', )
def _check_authentication(self):
"""Override authentication check for the authentication endpoint."""
return None
def post(self, *args, **kwargs):
"""Request JWT."""
username = app.WEB_USERNAME
password = app.WEB_PASSWORD
# If the user hasn't set a username and/or password just let them login
if not username.strip() or not password.strip():
return self._login()
if not self.request.body:
return self._failed_login(error='No Credentials Provided')
if self.request.headers['content-type'] != 'application/json':
return self._failed_login(error='Incorrect content-type')
request_body = json_decode(self.request.body)
submitted_username = request_body.get('username')
submitted_password = request_body.get('password') | if username != submitted_username or password != submitted_password:
return self._failed_login(error='Invalid credentials')
return self._login(submitted_exp)
def _login(self, exp=86400):
self.set_header('Content-Type', 'application/json')
if app.NOTIFY_ON_LOGIN and not helpers.is_ip_private(self.request.remote_ip):
notifiers.notify_login(self.request.remote_ip)
log.info('{user} logged into the API v2', {'user': app.WEB_USERNAME})
time_now = int(time.time())
return self._ok(data={
'token': jwt.encode({
'iss': 'Medusa ' + text_type(app.APP_VERSION),
'iat': time_now,
# @TODO: The jti should be saved so we can revoke tokens
'jti': ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20)),
'exp': time_now + int(exp),
'username': app.WEB_USERNAME,
'apiKey': app.API_KEY
}, app.ENCRYPTION_SECRET, algorithm='HS256').decode('utf-8')
})
def _failed_login(self, error=None):
log.warning('{user} attempted a failed login to the API v2 from IP: {ip}', {
'user': app.WEB_USERNAME,
'ip': self.request.remote_ip
})
return self._unauthorized(error=error) | submitted_exp = request_body.get('exp', 86400) | random_line_split |
auth.py | # coding=utf-8
"""Request handler for authentication."""
from __future__ import unicode_literals
import logging
import random
import string
import time
from builtins import range
import jwt
from medusa import app, helpers, notifiers
from medusa.logger.adapters.style import BraceAdapter
from medusa.server.api.v2.base import BaseRequestHandler
from six import text_type
from tornado.escape import json_decode
log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())
class AuthHandler(BaseRequestHandler):
"""Auth request handler."""
#: resource name
name = 'authenticate'
#: allowed HTTP methods
allowed_methods = ('POST', )
def _check_authentication(self):
"""Override authentication check for the authentication endpoint."""
return None
def post(self, *args, **kwargs):
"""Request JWT."""
username = app.WEB_USERNAME
password = app.WEB_PASSWORD
# If the user hasn't set a username and/or password just let them login
if not username.strip() or not password.strip():
return self._login()
if not self.request.body:
return self._failed_login(error='No Credentials Provided')
if self.request.headers['content-type'] != 'application/json':
|
request_body = json_decode(self.request.body)
submitted_username = request_body.get('username')
submitted_password = request_body.get('password')
submitted_exp = request_body.get('exp', 86400)
if username != submitted_username or password != submitted_password:
return self._failed_login(error='Invalid credentials')
return self._login(submitted_exp)
def _login(self, exp=86400):
self.set_header('Content-Type', 'application/json')
if app.NOTIFY_ON_LOGIN and not helpers.is_ip_private(self.request.remote_ip):
notifiers.notify_login(self.request.remote_ip)
log.info('{user} logged into the API v2', {'user': app.WEB_USERNAME})
time_now = int(time.time())
return self._ok(data={
'token': jwt.encode({
'iss': 'Medusa ' + text_type(app.APP_VERSION),
'iat': time_now,
# @TODO: The jti should be saved so we can revoke tokens
'jti': ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20)),
'exp': time_now + int(exp),
'username': app.WEB_USERNAME,
'apiKey': app.API_KEY
}, app.ENCRYPTION_SECRET, algorithm='HS256').decode('utf-8')
})
def _failed_login(self, error=None):
log.warning('{user} attempted a failed login to the API v2 from IP: {ip}', {
'user': app.WEB_USERNAME,
'ip': self.request.remote_ip
})
return self._unauthorized(error=error)
| return self._failed_login(error='Incorrect content-type') | conditional_block |
device.ts | import logger from "@/logger";
import i18next from "i18next";
import AdBanner from "@/utils/AdBanner";
import AdInterstitial from "@/utils/AdInterstitial";
import Tracking from "@/utils/Tracking";
function showExitAppDialog(): void {
navigator.notification.confirm(
i18next.t("close_dialog_message"),
(choice) => {
if (choice === 1) {
logger.info("exitApp");
navigator.app.exitApp();
}
},
i18next.t("close_dialog_title")
);
}
function setup(): Promise<void> {
return new Promise((resolve) => {
try {
AdBanner.init();
AdInterstitial.init();
} catch (err) {
logger.error("[Device] Ad initialize failed");
}
try {
Tracking.auth();
} catch (err) {
logger.error("[Device] Tracking initialize failed");
}
try {
window.StatusBar.styleDefault();
if (
window.cordova.platformId === "android" ||
window.cordova.platformId === "ios"
) {
window.StatusBar.overlaysWebView(true);
window.StatusBar.backgroundColorByHexString("#A0483C46");
window.StatusBar.styleBlackTranslucent();
}
} catch (err) {
logger.error("[Device] Statusbar setting failed");
}
| // Setup localization for cordova devices
navigator.globalization.getPreferredLanguage(
(language) => {
logger.info("ChangeLanguage: [" + language.value + "]");
i18next.changeLanguage(language.value);
},
(error: GlobalizationError) => {
logger.error("ChangeLanguage Error: " + error);
}
);
} catch (err) {
logger.error("[Device] globalization setting failed");
}
document.addEventListener(
"backbutton",
function (e) {
e.preventDefault();
showExitAppDialog();
},
false
);
navigator.splashscreen.hide();
resolve();
});
}
const Device = {
init: (): Promise<void> => {
return new Promise((resolve) => {
if (/^(http|https)/.exec(document.URL)) {
logger.log("[Device] Running on Web plaform");
resolve();
} else {
logger.log("[Device] Running on Cordova plaform");
document.addEventListener("deviceready", () => {
setup()
.catch((err) => {
logger.error("[Device] setup failed: ", err);
})
.finally(() => {
logger.log("[Device] ready");
resolve();
});
});
}
});
},
};
export default Device; | try { | random_line_split |
device.ts | import logger from "@/logger";
import i18next from "i18next";
import AdBanner from "@/utils/AdBanner";
import AdInterstitial from "@/utils/AdInterstitial";
import Tracking from "@/utils/Tracking";
function showExitAppDialog(): void {
navigator.notification.confirm(
i18next.t("close_dialog_message"),
(choice) => {
if (choice === 1) {
logger.info("exitApp");
navigator.app.exitApp();
}
},
i18next.t("close_dialog_title")
);
}
function | (): Promise<void> {
return new Promise((resolve) => {
try {
AdBanner.init();
AdInterstitial.init();
} catch (err) {
logger.error("[Device] Ad initialize failed");
}
try {
Tracking.auth();
} catch (err) {
logger.error("[Device] Tracking initialize failed");
}
try {
window.StatusBar.styleDefault();
if (
window.cordova.platformId === "android" ||
window.cordova.platformId === "ios"
) {
window.StatusBar.overlaysWebView(true);
window.StatusBar.backgroundColorByHexString("#A0483C46");
window.StatusBar.styleBlackTranslucent();
}
} catch (err) {
logger.error("[Device] Statusbar setting failed");
}
try {
// Setup localization for cordova devices
navigator.globalization.getPreferredLanguage(
(language) => {
logger.info("ChangeLanguage: [" + language.value + "]");
i18next.changeLanguage(language.value);
},
(error: GlobalizationError) => {
logger.error("ChangeLanguage Error: " + error);
}
);
} catch (err) {
logger.error("[Device] globalization setting failed");
}
document.addEventListener(
"backbutton",
function (e) {
e.preventDefault();
showExitAppDialog();
},
false
);
navigator.splashscreen.hide();
resolve();
});
}
const Device = {
init: (): Promise<void> => {
return new Promise((resolve) => {
if (/^(http|https)/.exec(document.URL)) {
logger.log("[Device] Running on Web plaform");
resolve();
} else {
logger.log("[Device] Running on Cordova plaform");
document.addEventListener("deviceready", () => {
setup()
.catch((err) => {
logger.error("[Device] setup failed: ", err);
})
.finally(() => {
logger.log("[Device] ready");
resolve();
});
});
}
});
},
};
export default Device;
| setup | identifier_name |
device.ts | import logger from "@/logger";
import i18next from "i18next";
import AdBanner from "@/utils/AdBanner";
import AdInterstitial from "@/utils/AdInterstitial";
import Tracking from "@/utils/Tracking";
function showExitAppDialog(): void {
navigator.notification.confirm(
i18next.t("close_dialog_message"),
(choice) => {
if (choice === 1) {
logger.info("exitApp");
navigator.app.exitApp();
}
},
i18next.t("close_dialog_title")
);
}
function setup(): Promise<void> |
const Device = {
init: (): Promise<void> => {
return new Promise((resolve) => {
if (/^(http|https)/.exec(document.URL)) {
logger.log("[Device] Running on Web plaform");
resolve();
} else {
logger.log("[Device] Running on Cordova plaform");
document.addEventListener("deviceready", () => {
setup()
.catch((err) => {
logger.error("[Device] setup failed: ", err);
})
.finally(() => {
logger.log("[Device] ready");
resolve();
});
});
}
});
},
};
export default Device;
| {
return new Promise((resolve) => {
try {
AdBanner.init();
AdInterstitial.init();
} catch (err) {
logger.error("[Device] Ad initialize failed");
}
try {
Tracking.auth();
} catch (err) {
logger.error("[Device] Tracking initialize failed");
}
try {
window.StatusBar.styleDefault();
if (
window.cordova.platformId === "android" ||
window.cordova.platformId === "ios"
) {
window.StatusBar.overlaysWebView(true);
window.StatusBar.backgroundColorByHexString("#A0483C46");
window.StatusBar.styleBlackTranslucent();
}
} catch (err) {
logger.error("[Device] Statusbar setting failed");
}
try {
// Setup localization for cordova devices
navigator.globalization.getPreferredLanguage(
(language) => {
logger.info("ChangeLanguage: [" + language.value + "]");
i18next.changeLanguage(language.value);
},
(error: GlobalizationError) => {
logger.error("ChangeLanguage Error: " + error);
}
);
} catch (err) {
logger.error("[Device] globalization setting failed");
}
document.addEventListener(
"backbutton",
function (e) {
e.preventDefault();
showExitAppDialog();
},
false
);
navigator.splashscreen.hide();
resolve();
});
} | identifier_body |
device.ts | import logger from "@/logger";
import i18next from "i18next";
import AdBanner from "@/utils/AdBanner";
import AdInterstitial from "@/utils/AdInterstitial";
import Tracking from "@/utils/Tracking";
function showExitAppDialog(): void {
navigator.notification.confirm(
i18next.t("close_dialog_message"),
(choice) => {
if (choice === 1) {
logger.info("exitApp");
navigator.app.exitApp();
}
},
i18next.t("close_dialog_title")
);
}
function setup(): Promise<void> {
return new Promise((resolve) => {
try {
AdBanner.init();
AdInterstitial.init();
} catch (err) {
logger.error("[Device] Ad initialize failed");
}
try {
Tracking.auth();
} catch (err) {
logger.error("[Device] Tracking initialize failed");
}
try {
window.StatusBar.styleDefault();
if (
window.cordova.platformId === "android" ||
window.cordova.platformId === "ios"
) |
} catch (err) {
logger.error("[Device] Statusbar setting failed");
}
try {
// Setup localization for cordova devices
navigator.globalization.getPreferredLanguage(
(language) => {
logger.info("ChangeLanguage: [" + language.value + "]");
i18next.changeLanguage(language.value);
},
(error: GlobalizationError) => {
logger.error("ChangeLanguage Error: " + error);
}
);
} catch (err) {
logger.error("[Device] globalization setting failed");
}
document.addEventListener(
"backbutton",
function (e) {
e.preventDefault();
showExitAppDialog();
},
false
);
navigator.splashscreen.hide();
resolve();
});
}
const Device = {
init: (): Promise<void> => {
return new Promise((resolve) => {
if (/^(http|https)/.exec(document.URL)) {
logger.log("[Device] Running on Web plaform");
resolve();
} else {
logger.log("[Device] Running on Cordova plaform");
document.addEventListener("deviceready", () => {
setup()
.catch((err) => {
logger.error("[Device] setup failed: ", err);
})
.finally(() => {
logger.log("[Device] ready");
resolve();
});
});
}
});
},
};
export default Device;
| {
window.StatusBar.overlaysWebView(true);
window.StatusBar.backgroundColorByHexString("#A0483C46");
window.StatusBar.styleBlackTranslucent();
} | conditional_block |
service-server.ts | /**
* Created by Deakin on 2017/3/20 0020.
*/
import { Injectable } from '@angular/core';
import { ServiceItemData } from './service-item-data';
@Injectable()
export class | {
public serviceData: ServiceItemData[] = [
{
icon: '',
bg: 'assets/img/service_bg_01.jpg',
title: '移动app定制开发',
describe: 'Android、iOS系统软件开发<br/>满足移动APP多平台开发需求'
},
{
icon: '',
bg: 'assets/img/service_bg_02.jpg',
title: '网站定制开发',
describe: '根据需求,为您”量身定制“的个性化的解决方案<br/>从根本上为您和您的企业带来革命性的改变'
},
{
icon: '',
bg: 'assets/img/service_bg_03.jpg',
title: '微信定制开发',
describe: '从企业的需求根本出发,为企业量身定制<br/>微信公众号二次开发、微商城定制、小程序开发'
},
{
icon: '',
bg: 'assets/img/service_bg_04.jpg',
title: 'O2O移动电商',
describe: '利用手机、平板等无线终端进行的O2O的电子商务<br/>实现随时随地、线上线下的购物与交易'
},
{
icon: '',
bg: 'assets/img/service_bg_05.jpg',
title: '物联网软件开发',
describe: '为客户快速开发出安全稳定的智能硬件的软件,<br/>并为企业提供安全可靠的设备云服务接入。'
},
{
icon: '',
bg: 'assets/img/service_bg_06.jpg',
title: '云服务',
describe: '基于互联网的相关服务设备的增加、使用和交付购置,<br/>提供涉及通过互联网来提供动态易扩展计算的资源。'
}
];
public getServiceData(): ServiceItemData[] {
return this.serviceData;
}
}
| ServiceServer | identifier_name |
service-server.ts | /** | * Created by Deakin on 2017/3/20 0020.
*/
import { Injectable } from '@angular/core';
import { ServiceItemData } from './service-item-data';
@Injectable()
export class ServiceServer {
public serviceData: ServiceItemData[] = [
{
icon: '',
bg: 'assets/img/service_bg_01.jpg',
title: '移动app定制开发',
describe: 'Android、iOS系统软件开发<br/>满足移动APP多平台开发需求'
},
{
icon: '',
bg: 'assets/img/service_bg_02.jpg',
title: '网站定制开发',
describe: '根据需求,为您”量身定制“的个性化的解决方案<br/>从根本上为您和您的企业带来革命性的改变'
},
{
icon: '',
bg: 'assets/img/service_bg_03.jpg',
title: '微信定制开发',
describe: '从企业的需求根本出发,为企业量身定制<br/>微信公众号二次开发、微商城定制、小程序开发'
},
{
icon: '',
bg: 'assets/img/service_bg_04.jpg',
title: 'O2O移动电商',
describe: '利用手机、平板等无线终端进行的O2O的电子商务<br/>实现随时随地、线上线下的购物与交易'
},
{
icon: '',
bg: 'assets/img/service_bg_05.jpg',
title: '物联网软件开发',
describe: '为客户快速开发出安全稳定的智能硬件的软件,<br/>并为企业提供安全可靠的设备云服务接入。'
},
{
icon: '',
bg: 'assets/img/service_bg_06.jpg',
title: '云服务',
describe: '基于互联网的相关服务设备的增加、使用和交付购置,<br/>提供涉及通过互联网来提供动态易扩展计算的资源。'
}
];
public getServiceData(): ServiceItemData[] {
return this.serviceData;
}
} | random_line_split | |
service-server.ts | /**
* Created by Deakin on 2017/3/20 0020.
*/
import { Injectable } from '@angular/core';
import { ServiceItemData } from './service-item-data';
@Injectable()
export class ServiceServer {
public serviceData: ServiceItemData[] = [
{
icon: '',
bg: 'assets/img/service_bg_01.jpg',
title: '移动app定制开发',
describe: 'Android、iOS系统软件开发<br/>满足移动APP多平台开发需求'
},
{
icon: '',
bg: 'assets/img/service_bg_02.jpg',
title: '网站定制开发',
describe: '根据需求,为您”量身定制“的个性化的解决方案<br/>从根本上为您和您的企业带来革命性的改变'
},
{
icon: '',
bg: 'assets/img/service_bg_03.jpg',
title: '微信定制开发',
describe: '从企业的需求根本出发,为企业量身定制<br/>微信公众号二次开发、微商城定制、小程序开发'
},
{
icon: '',
bg: 'assets/img/service_bg_04.jpg',
title: 'O2O移动电商',
describe: '利用手机、平板等无线终端进行的O2O的电子商务<br/>实现随时随地、线上线下的购物与交易'
},
{
icon: '',
bg: 'assets/img/service_bg_05.jpg',
title: '物联网软件开发',
describe: '为客户快速开发出安全稳定的智能硬件的软件,<br/>并为企业提供安全可靠的设备云服务接入。'
},
{
icon: '',
bg: 'assets/img/service_bg_06.jpg',
title: '云服务',
describe: '基于互联网的相关服务设备的增加、使用和交付购置,<br/>提供涉及通过互联网来提供动态易扩展计算的资源。'
}
];
public getServiceData(): ServiceItemData[] {
return this.serviceData;
}
}
| identifier_body | ||
destroyObject.js | /*global define*/
define([
'./defaultValue',
'./DeveloperError'
], function(
defaultValue,
DeveloperError) {
"use strict";
function | () {
return true;
}
/**
* Destroys an object. Each of the object's functions, including functions in its prototype,
* is replaced with a function that throws a {@link DeveloperError}, except for the object's
* <code>isDestroyed</code> function, which is set to a function that returns <code>true</code>.
* The object's properties are removed with <code>delete</code>.
* <br /><br />
* This function is used by objects that hold native resources, e.g., WebGL resources, which
* need to be explicitly released. Client code calls an object's <code>destroy</code> function,
* which then releases the native resource and calls <code>destroyObject</code> to put itself
* in a destroyed state.
*
* @exports destroyObject
*
* @param {Object} object The object to destroy.
* @param {String} [message] The message to include in the exception that is thrown if
* a destroyed object's function is called.
*
* @see DeveloperError
*
* @example
* // How a texture would destroy itself.
* this.destroy = function () {
* _gl.deleteTexture(_texture);
* return Cesium.destroyObject(this);
* };
*/
var destroyObject = function(object, message) {
message = defaultValue(message, 'This object was destroyed, i.e., destroy() was called.');
function throwOnDestroyed() {
throw new DeveloperError(message);
}
for ( var key in object) {
if (typeof object[key] === 'function') {
object[key] = throwOnDestroyed;
}
}
object.isDestroyed = returnTrue;
return undefined;
};
return destroyObject;
}); | returnTrue | identifier_name |
destroyObject.js | /*global define*/
define([
'./defaultValue',
'./DeveloperError'
], function(
defaultValue,
DeveloperError) {
"use strict";
function returnTrue() {
return true;
}
/**
* Destroys an object. Each of the object's functions, including functions in its prototype,
* is replaced with a function that throws a {@link DeveloperError}, except for the object's
* <code>isDestroyed</code> function, which is set to a function that returns <code>true</code>.
* The object's properties are removed with <code>delete</code>.
* <br /><br />
* This function is used by objects that hold native resources, e.g., WebGL resources, which
* need to be explicitly released. Client code calls an object's <code>destroy</code> function,
* which then releases the native resource and calls <code>destroyObject</code> to put itself
* in a destroyed state.
*
* @exports destroyObject
*
* @param {Object} object The object to destroy.
* @param {String} [message] The message to include in the exception that is thrown if
* a destroyed object's function is called.
*
* @see DeveloperError
*
* @example
* // How a texture would destroy itself.
* this.destroy = function () {
* _gl.deleteTexture(_texture);
* return Cesium.destroyObject(this);
* };
*/
var destroyObject = function(object, message) {
message = defaultValue(message, 'This object was destroyed, i.e., destroy() was called.');
function throwOnDestroyed() {
throw new DeveloperError(message);
}
for ( var key in object) {
if (typeof object[key] === 'function') |
}
object.isDestroyed = returnTrue;
return undefined;
};
return destroyObject;
}); | {
object[key] = throwOnDestroyed;
} | conditional_block |
destroyObject.js | /*global define*/
define([
'./defaultValue',
'./DeveloperError'
], function(
defaultValue,
DeveloperError) {
"use strict";
function returnTrue() {
return true;
}
/**
* Destroys an object. Each of the object's functions, including functions in its prototype,
* is replaced with a function that throws a {@link DeveloperError}, except for the object's
* <code>isDestroyed</code> function, which is set to a function that returns <code>true</code>.
* The object's properties are removed with <code>delete</code>.
* <br /><br />
* This function is used by objects that hold native resources, e.g., WebGL resources, which
* need to be explicitly released. Client code calls an object's <code>destroy</code> function,
* which then releases the native resource and calls <code>destroyObject</code> to put itself
* in a destroyed state.
*
* @exports destroyObject
*
* @param {Object} object The object to destroy.
* @param {String} [message] The message to include in the exception that is thrown if
* a destroyed object's function is called.
*
* @see DeveloperError
*
* @example | * return Cesium.destroyObject(this);
* };
*/
var destroyObject = function(object, message) {
message = defaultValue(message, 'This object was destroyed, i.e., destroy() was called.');
function throwOnDestroyed() {
throw new DeveloperError(message);
}
for ( var key in object) {
if (typeof object[key] === 'function') {
object[key] = throwOnDestroyed;
}
}
object.isDestroyed = returnTrue;
return undefined;
};
return destroyObject;
}); | * // How a texture would destroy itself.
* this.destroy = function () {
* _gl.deleteTexture(_texture); | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.