function stringlengths 11 56k | repo_name stringlengths 5 60 | features list |
|---|---|---|
def test_rule_periodic_constrains(self):
"""Tests the constrains for the periodic rules."""
# constrain: periodic_qty_per_period < 1
with self.assertRaises(ValidationError):
self._create_stock_cycle_count_rule_periodic(self.manager, "rule_0", [0, 0])
# constrain: periodic_count_period < 0
with self.assertRaises(ValidationError):
self._create_stock_cycle_count_rule_periodic(
self.manager, "rule_0", [1, -1]
) | OCA/stock-logistics-warehouse | [
248,
611,
248,
91,
1402934883
] |
def test_auto_link_inventory_to_cycle_count_1(self):
"""Create an inventory that could fit a planned cycle count should
auto-link it to that cycle count."""
self.assertEqual(self.cycle_count_1.state, "draft")
inventory = self.inventory_model.create(
{
"name": "new inventory",
"location_ids": [(4, self.count_loc.id)],
"exclude_sublocation": True,
}
)
self.assertEqual(inventory.cycle_count_id, self.cycle_count_1)
self.assertEqual(self.cycle_count_1.state, "open") | OCA/stock-logistics-warehouse | [
248,
611,
248,
91,
1402934883
] |
def _name_search(
self, name, args=None, operator="ilike", limit=100, name_get_uid=None | OCA/l10n-romania | [
19,
24,
19,
7,
1427193295
] |
def _name_search(
self, name, args=None, operator="ilike", limit=100, name_get_uid=None | OCA/l10n-romania | [
19,
24,
19,
7,
1427193295
] |
def _onchange_zone_id(self):
if self.zone_id:
self.country_id = self.zone_id.country_id.id | OCA/l10n-romania | [
19,
24,
19,
7,
1427193295
] |
def _name_search(
self, name, args=None, operator="ilike", limit=100, name_get_uid=None | OCA/l10n-romania | [
19,
24,
19,
7,
1427193295
] |
def _onchange_state_id(self):
if self.state_id:
self.zone_id = self.state_id.zone_id.id
self.country_id = self.state_id.country_id.id | OCA/l10n-romania | [
19,
24,
19,
7,
1427193295
] |
def _onchange_zone_id(self):
if self.zone_id:
self.state_id = False
self.country_id = self.zone_id.country_id.id | OCA/l10n-romania | [
19,
24,
19,
7,
1427193295
] |
def _name_search(
self, name, args=None, operator="ilike", limit=100, name_get_uid=None | OCA/l10n-romania | [
19,
24,
19,
7,
1427193295
] |
def _onchange_commune_id(self):
if self.commune_id:
self.state_id = self.commune_id.state_id.id
self.zone_id = self.commune_id.zone_id.id
self.country_id = self.commune_id.country_id.id | OCA/l10n-romania | [
19,
24,
19,
7,
1427193295
] |
def _onchange_state_id(self):
if self.state_id:
self.commune_id = False
self.zone_id = self.state_id.zone_id.id
self.country_id = self.state_id.country_id.id | OCA/l10n-romania | [
19,
24,
19,
7,
1427193295
] |
def _onchange_zone_id(self):
if self.zone_id:
self.commune_id = False
self.state_id = False
self.country_id = self.zone_id.country_id.id | OCA/l10n-romania | [
19,
24,
19,
7,
1427193295
] |
def test_user_delete_deletes_user(db_session):
john = users.john()
db_session.add(john)
db_session.commit()
john_id = john.id
assert john_id is not None
assert db_session.query(User).get(john_id) is not None
john.delete()
db_session.commit()
assert db_session.query(User).get(john_id) is None | skylines-project/skylines | [
367,
102,
367,
81,
1324989203
] |
def test_user_delete_deletes_owned_igc_files(db_session):
with open(igcs.simple_path, "rb") as f:
filename = files.add_file("simple.igc", f)
assert filename is not None
assert os.path.isfile(files.filename_to_path(filename))
john = users.john()
igc = igcs.simple(owner=john, filename=filename)
db_session.add(igc)
db_session.commit()
assert db_session.query(IGCFile).count() == 1
assert db_session.query(IGCFile).get(igc.id).owner_id == john.id
john.delete()
db_session.commit()
assert db_session.query(IGCFile).count() == 0
assert not os.path.isfile(files.filename_to_path(filename)) | skylines-project/skylines | [
367,
102,
367,
81,
1324989203
] |
def reset(self):
self.file_hash_cache = {} | damianmoore/photo-manager | [
1555,
107,
1555,
143,
1488925832
] |
def set_file_hash(self, fn, hash_type, hash_val):
if fn not in self.file_hash_cache:
self.file_hash_cache[fn] = {}
self.file_hash_cache[fn][hash_type] = hash_val | damianmoore/photo-manager | [
1555,
107,
1555,
143,
1488925832
] |
def determine_same_file(origpath, destpath, fhc=None):
'''
First check if hashes of the two files match. If they don't match, they
could still be the same image if metadata has changed so open the pixel
data using PIL and compare hashes of that.
'''
if not fhc:
fhc = FileHashCache()
if len(fhc.file_hash_cache) > 1000:
fhc.reset()
orig_hash = fhc.get_file_hash(origpath, 'file')
if not orig_hash:
orig_hash = md5(fhc.get_file(origpath, 'orig')).hexdigest()
fhc.set_file_hash(origpath, 'file', orig_hash)
dest_hash = fhc.get_file_hash(destpath, 'file')
if not dest_hash:
dest_hash = md5(fhc.get_file(destpath, 'dest')).hexdigest()
fhc.set_file_hash(destpath, 'file', dest_hash)
if orig_hash == dest_hash:
return True
# Try matching on image data (ignoring EXIF)
if os.path.splitext(origpath)[1][1:].lower() in ['jpg', 'jpeg', 'png', ]:
orig_hash = fhc.get_file_hash(origpath, 'image')
if not orig_hash:
orig_hash = md5(Image.open(StringIO(fhc.get_file(origpath, 'orig'))).tobytes()).hexdigest()
fhc.set_file_hash(origpath, 'image', orig_hash)
dest_hash = fhc.get_file_hash(destpath, 'image')
if not dest_hash:
dest_hash = md5(Image.open(StringIO(fhc.get_file(destpath, 'dest'))).tobytes()).hexdigest()
fhc.set_file_hash(destpath, 'image', dest_hash)
if orig_hash == dest_hash:
return True
# TODO: Convert raw photos into temp jpgs to do proper comparison
return False | damianmoore/photo-manager | [
1555,
107,
1555,
143,
1488925832
] |
def import_photos_from_dir(orig, move=False):
imported = 0
were_duplicates = 0
were_bad = 0
for r, d, f in os.walk(orig):
if SYNOLOGY_THUMBNAILS_DIR_NAME in r:
continue
for fn in sorted(f):
filepath = os.path.join(r, fn)
dest = determine_destination(filepath)
if blacklisted_type(fn):
# Blacklisted type
were_bad += 1
elif not dest:
# No filters match this file type
pass
else:
t = get_datetime(filepath)
if t:
destpath = '%02d/%02d/%02d' % (t.year, t.month, t.day)
destpath = os.path.join(dest, destpath)
mkdir_p(destpath)
destpath = os.path.join(destpath, fn)
if filepath == destpath:
# File is already in the right place so be very careful not to do anything like delete it
pass
elif not os.path.exists(destpath):
if move:
shutil.move(filepath, destpath)
else:
shutil.copyfile(filepath, destpath)
record_photo(destpath)
imported += 1
print('IMPORTED {} -> {}'.format(filepath, destpath))
else:
print('PATH EXISTS {} -> {}'.format(filepath, destpath))
same = determine_same_file(filepath, destpath)
print('PHOTO IS THE SAME')
if same:
if move:
os.remove(filepath)
were_duplicates += 1
print('DELETED FROM SOURCE')
else:
print('NEED TO IMPORT UNDER DIFFERENT NAME')
exit(1)
destpath = find_new_file_name(destpath)
shutil.move(filepath, destpath)
record_photo(destpath)
imported += 1
# print 'IMPORTED {} -> {}'.format(filepath, destpath)
else:
print('ERROR READING DATE: {}'.format(filepath))
were_bad += 1
if imported or were_duplicates:
print('\n{} PHOTOS IMPORTED\n{} WERE DUPLICATES\n{} WERE BAD'.format(imported, were_duplicates, were_bad)) | damianmoore/photo-manager | [
1555,
107,
1555,
143,
1488925832
] |
def get_distance_term(C, mag, rrup):
"""
Returns the distance attenuation factor
"""
rval = np.sqrt(rrup ** 2. + CONSTANTS["h"] ** 2.)
rref_val = np.sqrt(CONSTANTS["Rref"] ** 2. +
CONSTANTS["h"] ** 2.)
f_r = (C["c1"] + C["c2"] * (mag - CONSTANTS["Mref"])) *\
np.log(rval / rref_val) + (C["c3"] * (rval - rref_val) / 100.)
return f_r | gem/oq-engine | [
291,
241,
291,
48,
1277737182
] |
def get_magnitude_scaling(C, mag):
"""
Returns the magnitude scaling term
"""
d_m = mag - CONSTANTS["Mh"]
return np.where(mag <= CONSTANTS["Mh"],
C["e1"] + C["b1"] * d_m + C["b2"] * d_m ** 2.0,
C["e1"] + C["b3"] * d_m) | gem/oq-engine | [
291,
241,
291,
48,
1277737182
] |
def get_stddevs(ergodic, tau_model, TAU, PHI_SS, imt, ctx):
"""
Returns the standard deviations for either the ergodic or
non-ergodic models
"""
phi = get_phi_ss(imt, ctx.mag, PHI_SS)
if ergodic:
phi_s2s = get_stewart_2019_phis2s(imt, ctx.vs30)
phi = np.sqrt(phi ** 2. + phi_s2s ** 2.)
tau = TAU_EXECUTION[tau_model](imt, ctx.mag, TAU)
sigma = np.sqrt(tau ** 2. + phi ** 2.)
return [sigma, tau, phi] | gem/oq-engine | [
291,
241,
291,
48,
1277737182
] |
def __init__(self, **kwargs):
"""
Instantiates the class with additional terms controlling both the
epistemic uncertainty in the median and the preferred aleatory
uncertainty model ('global', 'cena_constant', 'cena'), and the quantile
of the epistemic uncertainty model (float in the range 0 to 1, or None)
"""
super().__init__(**kwargs)
self.epsilon = kwargs.get("epsilon", 0.0)
self.tau_model = kwargs.get("tau_model", "global")
self.phi_model = kwargs.get("phi_model", "global")
self.ergodic = kwargs.get("ergodic", True)
self.tau_quantile = kwargs.get("tau_quantile", None)
self.phi_ss_quantile = kwargs.get("phi_ss_quantile", None)
self.site_epsilon = kwargs.get("site_epsilon", 0.0)
self.PHI_S2SS = None
# define the standard deviation model from the NGA East aleatory
# uncertainty model according to the calibrations specified by the user
# setup tau
self.TAU = get_tau_at_quantile(TAU_SETUP[self.tau_model]["MEAN"],
TAU_SETUP[self.tau_model]["STD"],
self.tau_quantile)
# setup phi
self.PHI_SS = get_phi_ss_at_quantile(PHI_SETUP[self.phi_model],
self.phi_ss_quantile) | gem/oq-engine | [
291,
241,
291,
48,
1277737182
] |
def test_gfx_submodule_loads():
"gfx is present in a Pixmap"
c = Pixmap(1, 1, core.C.PIXEL_RGB888)
assert c.gfx | gfxprim/gfxprim | [
37,
7,
37,
17,
1341042188
] |
def test_all_methods_are_known():
"All methods of gfx submodule have known param types in this test"
c = Pixmap(1, 1, core.C.PIXEL_RGB888)
for name in dir(c.gfx):
if name[0] != '_' and name not in ['C', 'ctx']:
assert name in gfx_params | gfxprim/gfxprim | [
37,
7,
37,
17,
1341042188
] |
def test_method_call(n, params):
"Calling with dummy parameters:"
c = PixmapRand(10, 10, core.C.PIXEL_RGB888)
if isinstance(params, str):
c.gfx.__getattribute__(n)(*gen_dummy_args(params))
else:
assert isinstance(params, tuple) and isinstance(params[-1], dict)
c.gfx.__getattribute__(n)(*params[:-1], **params[-1]) | gfxprim/gfxprim | [
37,
7,
37,
17,
1341042188
] |
def __init__(self,typ): | microelly2/geodata | [
22,
16,
22,
18,
1453641631
] |
def getParam(self,param):
return self.params[param] | microelly2/geodata | [
22,
16,
22,
18,
1453641631
] |
def addContent(self,c):
self.content += [c] | microelly2/geodata | [
22,
16,
22,
18,
1453641631
] |
def getiterator(self,typ):
rc=[]
for obj in self.content:
if obj.typ==typ:
rc += [obj]
rc += obj.getiterator(typ)
return rc | microelly2/geodata | [
22,
16,
22,
18,
1453641631
] |
def getData(fn,pb=None): | microelly2/geodata | [
22,
16,
22,
18,
1453641631
] |
def showFace(rbf,rbf2,x,y,gridsize,shapeColor,bound):
import Draft
makeLoft=False | microelly2/geodata | [
22,
16,
22,
18,
1453641631
] |
def interpolate(x,y,z, gridsize,mode='thin_plate',rbfmode=True,shape=None):
mode=str(mode)
grids=gridsize
dx=np.max(x)-np.min(x)
dy=np.max(y)-np.min(y)
if dx>dy:
gridx=grids
gridy=int(round(dy/dx*grids))
else:
gridy=grids
gridx=int(round(dx/dy*grids))
if shape != None:
(gridy,gridx)=shape
xi, yi = np.linspace(np.min(x), np.max(x), gridx), np.linspace(np.min(y), np.max(y), gridy)
xi, yi = np.meshgrid(xi, yi)
if rbfmode:
rbf = scipy.interpolate.Rbf(x, y, z, function=mode)
rbf2 = scipy.interpolate.Rbf( y,x, z, function=mode)
else:
sayErr("interp2d nicht implementiert")
x=np.array(x)
y=np.array(y)
z=np.array(z)
xi, yi = np.linspace(np.min(x), np.max(x), gridx), np.linspace(np.min(y), np.max(y), gridy)
rbf = scipy.interpolate.interp2d(x, y, z, kind=mode)
rbf2 = scipy.interpolate.interp2d(y, x, z, kind=mode)
zi=rbf2(yi,xi)
return [rbf,xi,yi,zi] | microelly2/geodata | [
22,
16,
22,
18,
1453641631
] |
def is_atari(env):
if (
hasattr(env.observation_space, "shape")
and env.observation_space.shape is not None
and len(env.observation_space.shape) <= 2
):
return False
return hasattr(env, "unwrapped") and hasattr(env.unwrapped, "ale") | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def __init__(self, env=None):
"""Record episodes stats prior to EpisodicLifeEnv, etc."""
gym.Wrapper.__init__(self, env)
self._current_reward = None
self._num_steps = None
self._total_steps = None
self._episode_rewards = []
self._episode_lengths = []
self._num_episodes = 0
self._num_returned = 0 | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def step(self, action):
obs, rew, done, info = self.env.step(action)
self._current_reward += rew
self._num_steps += 1
self._total_steps += 1
return (obs, rew, done, info) | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def get_episode_lengths(self):
return self._episode_lengths | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def next_episode_results(self):
for i in range(self._num_returned, len(self._episode_rewards)):
yield (self._episode_rewards[i], self._episode_lengths[i])
self._num_returned = len(self._episode_rewards) | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == "NOOP" | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def step(self, ac):
return self.env.step(ac) | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def __init__(self, env):
gym.RewardWrapper.__init__(self, env) | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def __init__(self, env):
"""Take action on reset.
For environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == "FIRE"
assert len(env.unwrapped.get_action_meanings()) >= 3 | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def step(self, ac):
return self.env.step(ac) | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,) + env.observation_space.shape, dtype=np.uint8)
self._skip = skip | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def reset(self, **kwargs):
return self.env.reset(**kwargs) | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def __init__(self, env, dim):
"""Warp frames to the specified size (dim x dim)."""
gym.ObservationWrapper.__init__(self, env)
self.width = dim
self.height = dim
self.observation_space = spaces.Box(
low=0, high=255, shape=(self.height, self.width, 1), dtype=np.uint8
) | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def __init__(self, env, k):
"""Stack k last frames."""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(
low=0,
high=255,
shape=(shp[0], shp[1], shp[2] * k),
dtype=env.observation_space.dtype,
) | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def __init__(self, env):
"""No stacking. Trajectory View API takes care of this."""
gym.Wrapper.__init__(self, env)
shp = env.observation_space.shape
assert shp[2] == 1
self.observation_space = spaces.Box(
low=0, high=255, shape=(shp[0], shp[1]), dtype=env.observation_space.dtype
) | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(
low=0, high=1, shape=env.observation_space.shape, dtype=np.float32
) | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def query_in_sync(func):
@wraps(func)
def wrapper(api, data, error, n, last):
api.req_id = max(api.req_id, n)
result = func(api, data, last)
if last:
api.gateway.on_query(api.api_name, n, result)
return wrapper | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def __init__(self, gateway, temp_path, user_id, password, broker_id, address, api_name='ctp_md'):
super(CtpMdApi, self).__init__()
self.gateway = gateway
self.temp_path = temp_path
self.req_id = 0
self.connected = False
self.logged_in = False
self.user_id = user_id
self.password = password
self.broker_id = broker_id
self.address = address
self.api_name = api_name | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onFrontDisconnected(self, n):
"""服务器断开"""
self.connected = False
self.logged_in = False | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspError(self, error, n, last):
"""错误回报"""
self.gateway.on_err(error) | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
if error['ErrorID'] == 0:
self.logged_in = False
else:
self.gateway.on_err(error) | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspUnSubMarketData(self, data, error, n, last):
"""退订合约回报"""
pass | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspSubForQuoteRsp(self, data, error, n, last):
"""订阅期权询价"""
pass | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRtnForQuoteRsp(self, data):
"""期权询价推送"""
pass | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def subscribe(self, order_book_id):
"""订阅合约"""
ins_dict = self.gateway.get_ins_dict(order_book_id)
if ins_dict is None:
return None
instrument_id = ins_dict.instrument_id
if instrument_id:
self.subscribeMarketData(str(instrument_id)) | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def close(self):
"""关闭"""
self.exit() | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def __init__(self, gateway, temp_path, user_id, password, broker_id, address, auth_code, user_production_info, api_name='ctp_td'):
super(CtpTdApi, self).__init__()
self.gateway = gateway
self.temp_path = temp_path
self.req_id = 0
self.connected = False
self.logged_in = False
self.authenticated = False
self.user_id = user_id
self.password = password
self.broker_id = broker_id
self.address = address
self.auth_code = auth_code
self.user_production_info = user_production_info
self.front_id = 0
self.session_id = 0
self.require_authentication = False
self.pos_cache = {}
self.ins_cache = {}
self.order_cache = {}
self.api_name = api_name | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onFrontDisconnected(self, n):
"""服务器断开"""
self.connected = False
self.logged_in = False | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspAuthenticate(self, data, error, n, last):
"""验证客户端回报"""
if error['ErrorID'] == 0:
self.authenticated = True
self.login()
else:
self.gateway.on_err(error) | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
if error['ErrorID'] == 0:
self.logged_in = False
else:
self.gateway.on_err(error) | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspTradingAccountPasswordUpdate(self, data, error, n, last):
""""""
pass | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspParkedOrderInsert(self, data, error, n, last):
""""""
pass | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspOrderAction(self, data, error, n, last):
"""撤单错误(柜台)"""
self.gateway.on_err(error) | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspSettlementInfoConfirm(self, data, error, n, last):
"""确认结算信息回报"""
pass | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspRemoveParkedOrder(self, data, error, n, last):
""""""
pass | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspExecOrderInsert(self, data, error, n, last):
""""""
pass | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspForQuoteInsert(self, data, error, n, last):
""""""
pass | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspQuoteAction(self, data, error, n, last):
""""""
pass | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspCombActionInsert(self, data, error, n, last):
""""""
pass | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspQryOrder(self, data, last):
"""报单回报"""
order_dict = OrderDict(data)
if order_dict.is_valid:
self.order_cache[order_dict.order_id] = order_dict
if last:
return self.order_cache | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspQryInvestorPosition(self, data, last):
"""持仓查询回报"""
if data['InstrumentID']:
order_book_id = make_order_book_id(data['InstrumentID'])
if order_book_id not in self.pos_cache:
self.pos_cache[order_book_id] = PositionDict(data, self.gateway.get_ins_dict(order_book_id))
else:
self.pos_cache[order_book_id].update_data(data)
if last:
return self.pos_cache | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspQryTradingAccount(self, data, last):
"""资金账户查询回报"""
return AccountDict(data) | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspQryTradingCode(self, data, error, n, last):
""""""
pass | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspQryInstrumentCommissionRate(self, data, last):
"""请求查询合约手续费率响应"""
return CommissionDict(data) | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspQryProduct(self, data, error, n, last):
""""""
pass | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspQryInstrument(self, data, last):
"""合约查询回报"""
ins_dict = InstrumentDict(data)
if ins_dict.is_valid:
self.ins_cache[ins_dict.order_book_id] = ins_dict
if last:
return self.ins_cache | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspQrySettlementInfo(self, data, error, n, last):
""""""
pass | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspQryInvestorPositionDetail(self, data, error, n, last):
""""""
pass | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspQrySettlementInfoConfirm(self, data, error, n, last):
""""""
pass | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspQryCFMMCTradingAccountKey(self, data, error, n, last):
""""""
pass | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspQryInvestorProductGroupMargin(self, data, error, n, last):
""""""
pass | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspQryExchangeMarginRateAdjust(self, data, error, n, last):
""""""
pass | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspQrySecAgentACIDMap(self, data, error, n, last):
""""""
pass | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspQryProductGroup(self, data, error, n, last):
""""""
pass | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspQryOptionInstrCommRate(self, data, error, n, last):
""""""
pass | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspQryForQuote(self, data, error, n, last):
""""""
pass | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspQryLock(self, data, error, n, last):
""""""
pass | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspQryInvestorLevel(self, data, error, n, last):
""""""
pass | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspQryCombInstrumentGuard(self, data, error, n, last):
""""""
pass | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspQryTransferSerial(self, data, error, n, last):
""""""
pass | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRspError(self, error, n, last):
"""错误回报"""
self.gateway.on_err(error) | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRtnTrade(self, data):
"""成交回报"""
trade_dict = TradeDict(data)
self.gateway.on_trade(trade_dict) | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onErrRtnOrderAction(self, data, error):
"""撤单错误回报(交易所)"""
self.gateway.on_err(error) | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRtnTradingNotice(self, data):
""""""
pass | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
def onRtnExecOrder(self, data):
""""""
pass | ricequant/rqalpha-mod-vnpy | [
295,
58,
295,
5,
1488780949
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.