query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Returns true if the current user is following the desired user
def is_following_by_username(self, id): return self.followed.filter(followers.c.followed_id == id).count() > 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_user_following(self, user_id):\n return user_id in self.following", "def is_following(self, you, them):\n if self.filter(from_user=you, to_user=them).count() > 0:\n return True\n return False", "def is_following(self, user):\n return self.followed.filter_by(\n ...
[ "0.7629707", "0.75313014", "0.74600714", "0.7424218", "0.7424218", "0.73141986", "0.7265916", "0.7074228", "0.7049804", "0.6913712", "0.6912288", "0.682332", "0.6789204", "0.6776134", "0.67695856", "0.67149085", "0.65792125", "0.6536938", "0.6522163", "0.6520679", "0.6476891"...
0.70594585
8
Returns the username of the desired user
def get_username_by_id(self, id): return User.query.get(id).username
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def username(self) -> str:", "def username(self) -> str:", "def get_username(self):\r\n return self.username", "def get_username(self):\n return self.username", "def get_username(self, request):\r\n try:\r\n return request.user.username\r\n except AttributeError:\r\n ...
[ "0.8169498", "0.8169498", "0.8081674", "0.8035194", "0.8018373", "0.7989936", "0.79892623", "0.798525", "0.79793316", "0.79793316", "0.79586726", "0.79586726", "0.79586726", "0.79513943", "0.7948783", "0.7941232", "0.79404753", "0.79382294", "0.7898637", "0.7894659", "0.78907...
0.7294774
99
Gets the list of banks.
def get_banks() -> List[BankDetails]: from paynlsdk.api.transaction.getbanks import Request client = APIClient() request = Request() client.perform_request(request) return request.response.banks
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def banks(self):\n return self.__banks", "def banks(self):\n return self.__banks", "def banks(self):\n return self.__banks", "def banks(self):\n return self.__banks", "def banks(self):\n return self.__banks", "def banks(self):\n return self.__banks", "def banks...
[ "0.83688354", "0.83688354", "0.83688354", "0.83688354", "0.83688354", "0.83688354", "0.83688354", "0.76620173", "0.664025", "0.664025", "0.664025", "0.664025", "0.664025", "0.664025", "0.664025", "0.6498705", "0.6401011", "0.63876766", "0.6261225", "0.6159119", "0.6064427", ...
0.7611785
8
Refund (part of) a transaction
def refund(transaction_id: str, amount: int=None, description: str=None, process_date: datetime=None): return Transaction.refund_response(transaction_id, amount, description, process_date)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refund_payment(self, **kwargs):", "def refund_payment(self, **kwargs):", "def refund(self, amount_in_cents=None):\r\n self.require_item()\r\n\r\n url = self.get_url()\r\n params = base.get_params(('amount_in_cents',), locals())\r\n if params:\r\n url = url + '?' + htt...
[ "0.8025384", "0.8025384", "0.72545916", "0.7059862", "0.6983477", "0.69578123", "0.6895562", "0.68423915", "0.65162545", "0.64691937", "0.6408193", "0.6375899", "0.63735026", "0.6373101", "0.6268878", "0.6206176", "0.61452", "0.6065676", "0.606385", "0.60265565", "0.5964484",...
0.7412963
2
Show portfolio of stocks
def index(): rows=db.execute("SELECT * FROM portofolio WHERE user_id=:s",s=session["user_id"]) row=db.execute("SELECT * FROM users WHERE id=:s",s=session["user_id"]) overall=0 for line in rows: overall+=line["total"] overall+=row[0]["cash"] return render_template("portofolio.html",rows=rows,cash=usd(row[0]["cash"]),overall=usd(overall))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def index():\n stocks = []\n username = session.get(\"username\")\n symbol_list = db.execute(\"SELECT stock_symbol FROM history WHERE username=:username GROUP BY stock_symbol\", username=username)\n cash_balance = db.execute(\"SELECT cash FROM users WHERE username=:username\", username=username)[0][\"c...
[ "0.7349525", "0.7257509", "0.7092902", "0.70840114", "0.70700926", "0.70322937", "0.70166755", "0.6955458", "0.69515324", "0.6818638", "0.67683095", "0.67541313", "0.6739686", "0.66745", "0.66665316", "0.6584005", "0.65794265", "0.6577333", "0.6556571", "0.65012944", "0.64815...
0.0
-1
Buy shares of stock
def buy(): if request.method == "POST": if not request.form.get("symbol") or not lookup(request.form.get("symbol")): return apology("must provide valid symbol",400) if not request.form.get("shares") or int(request.form.get("shares")) <= 0: return apology("shares must be positive integer!",400) row=db.execute("SELECT * FROM users WHERE id=:s",s=session["user_id"]) dict=lookup(request.form.get("symbol")) cost=dict["price"]* int(request.form.get("shares")) if row[0]["cash"]>cost: db.execute("INSERT INTO history(symbol,shares,price,transacted,user_id,status) VALUES (:s,:sh,:p,:t,:u_i,:status)",s=dict["symbol"],sh=int(request.form.get("shares")),p=dict["price"],t=time.asctime( time.localtime(time.time())),u_i=session["user_id"],status='bought') row[0]["cash"]=row[0]["cash"]-cost db.execute("UPDATE users SET cash = :cash WHERE id=:s",cash=row[0]["cash"],s=session["user_id"]) exist=db.execute("SELECT * FROM portofolio WHERE symbol=:s AND user_id=:u_i",s=dict["symbol"],u_i=session["user_id"]) if len(exist) == 0 : db.execute("INSERT INTO portofolio(symbol,name,shares,price,total,user_id) VALUES (:s,:n,:sh,:p,:t,:u_i)",s=dict["symbol"],n=dict["name"],sh=int(request.form.get("shares")),p=dict["price"],t=cost,u_i=session["user_id"]) else: db.execute("UPDATE portofolio SET shares =shares+:sh, price=:p, total=total+:t WHERE symbol=:s AND user_id=:u_i",sh=int(request.form.get("shares")),p=dict["price"],t=dict["price"] * int(request.form.get("shares")),s=dict["symbol"],u_i=session["user_id"]) else: return apology("Can't afford!",400) return redirect("/") else: return render_template("buy.html")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def buy(self, ctx, name, shares: int):\n\t\tplural = 's' if shares != 1 else ''\n\t\tcurrency = await bank.get_currency_name(ctx.guild)\n\t\tif shares < 1:\n\t\t\tawait ctx.send('You cannot buy less than one share.')\n\t\t\treturn\n\t\tname = name.upper()\n\t\ttry:\n\t\t\tstock_data = await self._get_stock_d...
[ "0.75090504", "0.73669976", "0.719782", "0.7076133", "0.702035", "0.70142156", "0.69631433", "0.6943623", "0.6915625", "0.691014", "0.6897359", "0.68731195", "0.6865242", "0.68647367", "0.6850216", "0.6848369", "0.6818854", "0.67891747", "0.6722476", "0.6719868", "0.67188966"...
0.64506495
55
Show history of transactions
def history(): user_history=db.execute("SELECT * FROM history WHERE user_id=:u_i",u_i=session["user_id"]) return render_template("history.html",s=user_history)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def history():\n transactions = db.execute(\"SELECT Symbol, Shares, Transacted FROM cash WHERE id=:id\", id=session[\"user_id\"])\n return render_template(\"history.html\", transactions=transactions)", "def history():\n\n #Query transactions by user id\n trans = Transactions.query.filter_by(owner=ses...
[ "0.82002884", "0.8140903", "0.8114919", "0.8042388", "0.8033679", "0.80168986", "0.79338187", "0.7822898", "0.77456313", "0.77341163", "0.7718728", "0.7716838", "0.7708978", "0.7628142", "0.7624304", "0.7530955", "0.74965966", "0.7481556", "0.74478734", "0.7423578", "0.738227...
0.68055445
50
Sell shares of stock
def sell(): if request.method == "POST": dict=lookup(request.form.get("symbol")) if not request.form.get("symbol") or not request.form.get("shares") or not lookup(request.form.get("symbol")): return apology("Must provide valid symbol and positive integer",400) else: row=db.execute("SELECT *FROM portofolio WHERE symbol=:s AND user_id=:u_i",s=request.form.get("symbol"),u_i=session["user_id"]) if len(row) == 0 or int(request.form.get("shares")) > row[0]["shares"]: return apology("you don't have enough shares of this company",400) else: db.execute("INSERT INTO history(symbol,shares,price,transacted,user_id,status) VALUES (:s,:sh,:p,:t,:u_i,:status)",s=dict["symbol"],sh=int(request.form.get("shares")),p=dict["price"],t=time.asctime( time.localtime(time.time())),u_i=session["user_id"],status='sold') db.execute("UPDATE portofolio SET shares =shares-:sh, price=:p, total=total-:t WHERE symbol=:s AND user_id=:u_i",sh=int(request.form.get("shares")),p=dict["price"],t=dict["price"] * int(request.form.get("shares")),s=dict["symbol"],u_i=session["user_id"]) db.execute("UPDATE users SET cash=cash+:extra WHERE id=:i",extra=int(request.form.get("shares")) * dict["price"],i=session["user_id"]) db.execute("DELETE FROM portofolio WHERE shares=0") return redirect("/") else: rows=db.execute("SELECT *FROM portofolio where user_id=:u_i ",u_i=session["user_id"]) arr=[] for row in rows: arr.append(row['symbol']) return render_template("selling.html",arr=arr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sell_stock(self, symbol):\n amount_to_sell = self.get_equity(symbol)\n chirp.order_sell_fractional_by_price(symbol, amount_to_sell)\n self.L.add_line('', symbol, 'SOLD', amount_to_sell)", "async def sell(self, ctx, name, shares: int):\n\t\tplural = 's' if shares != 1 else ''\n\t\tif shar...
[ "0.76451164", "0.748746", "0.7260813", "0.71164954", "0.7057435", "0.70490545", "0.70268077", "0.69872165", "0.6972749", "0.6966809", "0.6915586", "0.68977857", "0.68829334", "0.6849716", "0.68191767", "0.68124473", "0.68022114", "0.6801836", "0.67799073", "0.67698354", "0.67...
0.6538862
39
auth_state enabled and available
async def test_auth_state(app, auth_state_enabled): name = 'kiwi' user = add_user(app.db, app, name=name) assert user.encrypted_auth_state is None cookies = await app.login_user(name) auth_state = await user.get_auth_state() assert auth_state == app.authenticator.auth_state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_auth_state(self):\n raise NotImplementedError()", "def check_auth():", "def requires_auth(self):\n return True", "def get_authorization():\n return True", "def is_authenticated(self):\n return True #self.authenticated", "def is_authenticated(self):\n return True", "de...
[ "0.74882525", "0.71273947", "0.6910631", "0.6909217", "0.670079", "0.6680911", "0.6628001", "0.65065384", "0.65065384", "0.65065384", "0.65065384", "0.65065384", "0.65065384", "0.65065384", "0.6505308", "0.6504012", "0.64769554", "0.64769554", "0.64572227", "0.64142615", "0.6...
0.7185515
1
admin should be passed through for nonadmin users
async def test_auth_admin_non_admin(app): name = 'kiwi' user = add_user(app.db, app, name=name, admin=False) assert user.admin is False cookies = await app.login_user(name) assert user.admin is False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_admin():\n\tif not current_user.is_admin:\n\t\tabort(403)", "def check_admin():\r\n if not current_user.is_admin:\r\n abort(403)", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "d...
[ "0.76604193", "0.76227117", "0.7580729", "0.7580729", "0.74723893", "0.7399326", "0.73662555", "0.72489077", "0.7212591", "0.71992993", "0.7144615", "0.7074819", "0.7029829", "0.7020532", "0.70080376", "0.7003052", "0.7002116", "0.6998605", "0.69832844", "0.6980152", "0.69801...
0.0
-1
admin should be passed through for admin users
async def test_auth_admin_is_admin(app): # Admin user defined in MockPAMAuthenticator. name = 'admin' user = add_user(app.db, app, name=name, admin=False) assert user.admin is False cookies = await app.login_user(name) assert user.admin is True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GET_adminon(self):\r\n #check like this because c.user_is_admin is still false\r\n if not c.user.name in g.admins:\r\n return self.abort404()\r\n self.login(c.user, admin = True)\r\n\r\n dest = request.referer or '/'\r\n return self.redirect(dest)", "def admin_re...
[ "0.76515234", "0.7589409", "0.72544324", "0.7240122", "0.72308195", "0.72102034", "0.72102034", "0.71380514", "0.71223193", "0.71042573", "0.7030925", "0.70082307", "0.6999681", "0.6966063", "0.6965032", "0.6940669", "0.69357306", "0.6932168", "0.6897163", "0.6887465", "0.687...
0.0
-1
admin should be unchanged if authenticator doesn't return admin value
async def test_auth_admin_retained_if_unset(app): name = 'kiwi' # Add user as admin. user = add_user(app.db, app, name=name, admin=True) assert user.admin is True # User should remain unchanged. cookies = await app.login_user(name) assert user.admin is True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def admin_required(handler):\n def admin_login(self, *args, **kwargs):\n auth = self.auth\n if not auth.get_user_by_session():\n self.redirect('/auth/login', abort=True)\n \n user = auth.get_user_by_session()\n queried_entity = User.get_by_id(user['user_id'])\n ...
[ "0.6545929", "0.6443032", "0.6328436", "0.6289202", "0.62141764", "0.62122965", "0.6134274", "0.6130718", "0.6127599", "0.61198676", "0.6079004", "0.60773593", "0.6006329", "0.6006329", "0.59977925", "0.5994712", "0.59755987", "0.5956938", "0.59537655", "0.59465075", "0.59193...
0.62106174
6
auth_state enabled at the Authenticator level, but unavailable due to no crypto keys.
def auth_state_unavailable(auth_state_enabled): crypto.CryptKeeper.instance().keys = [] yield
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_auth_state(self):\n raise NotImplementedError()", "async def test_auth_state(app, auth_state_enabled):\n name = 'kiwi'\n user = add_user(app.db, app, name=name)\n assert user.encrypted_auth_state is None\n cookies = await app.login_user(name)\n auth_state = await user.get_auth_state...
[ "0.70946044", "0.6600317", "0.5862487", "0.58282715", "0.577362", "0.5701615", "0.5661563", "0.5567852", "0.55177164", "0.55092674", "0.55005485", "0.54370195", "0.5430303", "0.53735715", "0.5352972", "0.53514665", "0.5348289", "0.5340672", "0.5339704", "0.5333827", "0.532115...
0.6792563
1
Tests whether ``SoundboardSound.__repr__`` works as intended.
def test__SoundboardSound__repr(): available = False emoji = BUILTIN_EMOJIS['heart'] name = 'rember' user_id = 202305240032 volume = 0.69 sound_id = 202305240033 guild_id = 202305240034 sound = SoundboardSound.precreate( sound_id, guild_id = guild_id, available = available, emoji = emoji, name = name, user_id = user_id, volume = volume, ) vampytest.assert_instance(repr(sound), str)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_repr(self):\n self.assertEqual(repr(self.deck), \"Deck of 52 cards.\")", "def test_repr(self):\n self.assertEqual(repr(self.card), \"A of Spades\")", "def test_repr(self):\n dummy = DummyCryptographicObject()\n repr(dummy)", "def test_repr_show(self):\n self.assert...
[ "0.6914809", "0.6808723", "0.66963106", "0.6534174", "0.6505421", "0.64757746", "0.6391462", "0.6254702", "0.6244902", "0.6209125", "0.6178", "0.6166957", "0.6156544", "0.6156544", "0.6156544", "0.6156544", "0.6156544", "0.6116914", "0.6116562", "0.6096828", "0.60628986", "...
0.77653193
0
Tests whether ``SoundboardSound.__hash__`` works as intended.
def test__SoundboardSound__hash(): available = False emoji = BUILTIN_EMOJIS['heart'] name = 'rember' user_id = 202305240035 volume = 0.69 sound_id = 202305240036 guild_id = 202305240037 keyword_parameters = { 'available': available, 'emoji': emoji, 'name': name, 'user_id': user_id, 'volume': volume, } sound = SoundboardSound.precreate( sound_id, guild_id = guild_id, **keyword_parameters, ) vampytest.assert_instance(repr(sound), str) sound = SoundboardSound(**keyword_parameters) vampytest.assert_instance(repr(sound), str)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test__SoundboardSound__eq():\n available = False\n emoji = BUILTIN_EMOJIS['heart']\n name = 'rember'\n user_id = 202305240038\n volume = 0.69\n \n sound_id = 202305240039\n guild_id = 202305240040\n \n keyword_parameters = {\n 'available': available,\n 'emoji': emoji...
[ "0.64707255", "0.63552177", "0.6180331", "0.61213714", "0.6096645", "0.60493755", "0.60439104", "0.603657", "0.603657", "0.60337245", "0.6027019", "0.6023845", "0.60236806", "0.59823734", "0.5976516", "0.5973415", "0.59712046", "0.59241724", "0.58853847", "0.5853793", "0.5851...
0.78902173
0
Tests whether ``SoundboardSound.__eq__`` works as intended.
def test__SoundboardSound__eq(): available = False emoji = BUILTIN_EMOJIS['heart'] name = 'rember' user_id = 202305240038 volume = 0.69 sound_id = 202305240039 guild_id = 202305240040 keyword_parameters = { 'available': available, 'emoji': emoji, 'name': name, 'user_id': user_id, 'volume': volume, } sound = SoundboardSound.precreate( sound_id, guild_id = guild_id, **keyword_parameters, ) vampytest.assert_eq(sound, sound) vampytest.assert_ne(sound, object()) test_sound = SoundboardSound(**keyword_parameters,) vampytest.assert_eq(sound, test_sound) for field_name, field_value in ( ('available', True), ('emoji', BUILTIN_EMOJIS['x']), ('name', 'happy day'), ('user_id', 202305240041), ('volume', 0.70), ): test_sound = SoundboardSound(**{**keyword_parameters, field_name: field_value}) vampytest.assert_ne(test_sound, sound)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other):\n if not isinstance(other, NhlOddsScoringPlay):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, DiarizeAudio):\n return False\n\n return self.__dict__ == other.__dict__", ...
[ "0.7053136", "0.69082266", "0.68528724", "0.6663054", "0.6607504", "0.65843195", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", ...
0.7741757
0
Getter method for usr_ping_count, mapped from YANG variable /mpls_state/statistics_oam/usr_ping_count (uint32)
def _get_usr_ping_count(self): return self.__usr_ping_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_usr_ping_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-ping-count\", rest_name=\"usr-ping-count\", pa...
[ "0.7919465", "0.62490904", "0.5974101", "0.5872466", "0.5848729", "0.5829324", "0.58061343", "0.57633376", "0.5698802", "0.5670618", "0.55348474", "0.54909444", "0.54774773", "0.54751647", "0.54595673", "0.54140943", "0.5395396", "0.5368198", "0.5319455", "0.531552", "0.52972...
0.6955739
1
Setter method for usr_ping_count, mapped from YANG variable /mpls_state/statistics_oam/usr_ping_count (uint32)
def _set_usr_ping_count(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="usr-ping-count", rest_name="usr-ping-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """usr_ping_count must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="usr-ping-count", rest_name="usr-ping-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""", }) self.__usr_ping_count = t if hasattr(self, '_set'): self._set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_usr_ping_count(self):\n return self.__usr_ping_count", "def _set_usr_traceroute_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), ...
[ "0.66030264", "0.6556253", "0.5894535", "0.568632", "0.56651974", "0.56644946", "0.5605921", "0.5584418", "0.55039364", "0.55039364", "0.5503759", "0.5493639", "0.54372156", "0.53541523", "0.5247895", "0.5217682", "0.5213357", "0.5187056", "0.516838", "0.5158767", "0.51152784...
0.8726256
0
Getter method for usr_traceroute_count, mapped from YANG variable /mpls_state/statistics_oam/usr_traceroute_count (uint32)
def _get_usr_traceroute_count(self): return self.__usr_traceroute_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_usr_traceroute_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-traceroute-count\", rest_name=\"usr-trac...
[ "0.8096988", "0.58348656", "0.5540107", "0.5337788", "0.5088764", "0.50769204", "0.5072222", "0.50714403", "0.5059296", "0.50581366", "0.50281113", "0.49717057", "0.4894099", "0.48607355", "0.48384598", "0.48260984", "0.48260832", "0.48071045", "0.47291836", "0.47040775", "0....
0.7333012
1
Setter method for usr_traceroute_count, mapped from YANG variable /mpls_state/statistics_oam/usr_traceroute_count (uint32)
def _set_usr_traceroute_count(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="usr-traceroute-count", rest_name="usr-traceroute-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """usr_traceroute_count must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="usr-traceroute-count", rest_name="usr-traceroute-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""", }) self.__usr_traceroute_count = t if hasattr(self, '_set'): self._set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_usr_traceroute_count(self):\n return self.__usr_traceroute_count", "def _set_usr_ping_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size...
[ "0.69384295", "0.67749554", "0.55161387", "0.5156745", "0.5149116", "0.51029897", "0.506374", "0.5044057", "0.5044057", "0.4962718", "0.49115133", "0.49114674", "0.48995262", "0.48913693", "0.48689324", "0.4850962", "0.48443764", "0.4837305", "0.48038968", "0.47544253", "0.46...
0.88829786
0
Getter method for echo_req_sent_count, mapped from YANG variable /mpls_state/statistics_oam/echo_req_sent_count (uint32)
def _get_echo_req_sent_count(self): return self.__echo_req_sent_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_req_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-s...
[ "0.7922508", "0.6860566", "0.67645717", "0.65473276", "0.64664423", "0.5968414", "0.5861087", "0.5787943", "0.5689521", "0.56807286", "0.5430021", "0.54220355", "0.5201689", "0.5130782", "0.49798772", "0.4948916", "0.48980132", "0.48834348", "0.4872083", "0.48253825", "0.4824...
0.7223332
1
Setter method for echo_req_sent_count, mapped from YANG variable /mpls_state/statistics_oam/echo_req_sent_count (uint32)
def _set_echo_req_sent_count(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-req-sent-count", rest_name="echo-req-sent-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """echo_req_sent_count must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-req-sent-count", rest_name="echo-req-sent-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""", }) self.__echo_req_sent_count = t if hasattr(self, '_set'): self._set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_resp_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-res...
[ "0.7546443", "0.74266785", "0.69616437", "0.66970843", "0.6288053", "0.6279342", "0.6064423", "0.57884806", "0.5556439", "0.5305215", "0.52710307", "0.5181049", "0.50654066", "0.50303006", "0.50086975", "0.49919927", "0.48444322", "0.4777001", "0.4770685", "0.4731099", "0.468...
0.86451477
0
Getter method for echo_req_received_count, mapped from YANG variable /mpls_state/statistics_oam/echo_req_received_count (uint32)
def _get_echo_req_received_count(self): return self.__echo_req_received_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_req_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"ec...
[ "0.77231425", "0.7023055", "0.6542562", "0.62707675", "0.5976226", "0.5851045", "0.58174163", "0.54856044", "0.54173255", "0.5317395", "0.50623125", "0.5022893", "0.5022014", "0.4883163", "0.4860375", "0.48226205", "0.48192063", "0.4816786", "0.4814243", "0.4797339", "0.46779...
0.70718306
1
Setter method for echo_req_received_count, mapped from YANG variable /mpls_state/statistics_oam/echo_req_received_count (uint32)
def _set_echo_req_received_count(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-req-received-count", rest_name="echo-req-received-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """echo_req_received_count must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-req-received-count", rest_name="echo-req-received-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""", }) self.__echo_req_received_count = t if hasattr(self, '_set'): self._set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_resp_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"...
[ "0.7757586", "0.7065394", "0.68073034", "0.67364514", "0.66612166", "0.62458676", "0.56309354", "0.5338627", "0.52505404", "0.50096846", "0.48576525", "0.48448968", "0.48185655", "0.47561345", "0.4739262", "0.46860245", "0.46509308", "0.46177593", "0.45707104", "0.45225346", ...
0.84776396
0
Getter method for echo_req_timeout_count, mapped from YANG variable /mpls_state/statistics_oam/echo_req_timeout_count (uint32)
def _get_echo_req_timeout_count(self): return self.__echo_req_timeout_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_req_timeout_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo...
[ "0.80206865", "0.6734563", "0.6540391", "0.6376731", "0.61047983", "0.60804176", "0.60245043", "0.5819295", "0.55777395", "0.49859354", "0.49369183", "0.4863983", "0.48448384", "0.48247913", "0.4788822", "0.4729328", "0.47008264", "0.4682831", "0.46340755", "0.46337748", "0.4...
0.7347358
1
Setter method for echo_req_timeout_count, mapped from YANG variable /mpls_state/statistics_oam/echo_req_timeout_count (uint32)
def _set_echo_req_timeout_count(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-req-timeout-count", rest_name="echo-req-timeout-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """echo_req_timeout_count must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-req-timeout-count", rest_name="echo-req-timeout-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""", }) self.__echo_req_timeout_count = t if hasattr(self, '_set'): self._set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_req_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"ec...
[ "0.7362202", "0.7095978", "0.6999127", "0.6642217", "0.64998084", "0.60558325", "0.57168615", "0.5617509", "0.52749455", "0.50046265", "0.4905859", "0.49030608", "0.48003668", "0.4683387", "0.4644038", "0.45746693", "0.45659736", "0.4561842", "0.45596886", "0.4557894", "0.448...
0.87411803
0
Getter method for echo_resp_sent_count, mapped from YANG variable /mpls_state/statistics_oam/echo_resp_sent_count (uint32)
def _get_echo_resp_sent_count(self): return self.__echo_resp_sent_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_resp_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-res...
[ "0.7885906", "0.71892625", "0.7101065", "0.69950885", "0.66274446", "0.6617462", "0.635762", "0.5800941", "0.577533", "0.55431056", "0.5397536", "0.5257677", "0.5200311", "0.5128361", "0.5111158", "0.51002544", "0.50733143", "0.50320685", "0.50222456", "0.4920297", "0.4917206...
0.7451523
1
Setter method for echo_resp_sent_count, mapped from YANG variable /mpls_state/statistics_oam/echo_resp_sent_count (uint32)
def _set_echo_resp_sent_count(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-resp-sent-count", rest_name="echo-resp-sent-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """echo_resp_sent_count must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-resp-sent-count", rest_name="echo-resp-sent-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""", }) self.__echo_resp_sent_count = t if hasattr(self, '_set'): self._set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_resp_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"...
[ "0.7872074", "0.7738624", "0.7278106", "0.7268996", "0.6715367", "0.65274954", "0.6379835", "0.60264504", "0.5693287", "0.55500114", "0.5237852", "0.515169", "0.50731426", "0.5025889", "0.49551892", "0.4954932", "0.4935209", "0.49100864", "0.4884356", "0.48487535", "0.4827424...
0.8611856
0
Getter method for echo_resp_received_count, mapped from YANG variable /mpls_state/statistics_oam/echo_resp_received_count (uint32)
def _get_echo_resp_received_count(self): return self.__echo_resp_received_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_resp_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"...
[ "0.785285", "0.70278424", "0.6552268", "0.64812785", "0.6124184", "0.55265635", "0.5396158", "0.5115494", "0.5112692", "0.50826174", "0.49409315", "0.4853131", "0.4822724", "0.47961304", "0.4737998", "0.47299495", "0.47204226", "0.46952853", "0.46684587", "0.46589735", "0.465...
0.731385
1
Setter method for echo_resp_received_count, mapped from YANG variable /mpls_state/statistics_oam/echo_resp_received_count (uint32)
def _set_echo_resp_received_count(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-resp-received-count", rest_name="echo-resp-received-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """echo_resp_received_count must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-resp-received-count", rest_name="echo-resp-received-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""", }) self.__echo_resp_received_count = t if hasattr(self, '_set'): self._set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_req_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"ec...
[ "0.76704717", "0.73230594", "0.70944583", "0.6276271", "0.618727", "0.6063231", "0.60443354", "0.51781255", "0.5142111", "0.50479865", "0.49492952", "0.48183542", "0.47569573", "0.47515997", "0.47491166", "0.4727505", "0.46277964", "0.45390615", "0.45219365", "0.44867164", "0...
0.8556093
0
Getter method for return_codes, mapped from YANG variable /mpls_state/statistics_oam/return_codes (list)
def _get_return_codes(self): return self.__return_codes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_return_codes(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"number\",return_codes.return_codes, yang_name=\"return-codes\", rest_name=\"return-codes\", parent=self, is_container='list', user_ordered=False, path_helper=se...
[ "0.74746984", "0.6001439", "0.5503843", "0.52020866", "0.51311105", "0.5087448", "0.5053257", "0.50515985", "0.49911606", "0.48414174", "0.48189038", "0.46903574", "0.4685551", "0.4676945", "0.46544784", "0.46471697", "0.45722067", "0.45684677", "0.45653287", "0.44813254", "0...
0.6220025
1
Setter method for return_codes, mapped from YANG variable /mpls_state/statistics_oam/return_codes (list)
def _set_return_codes(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("number",return_codes.return_codes, yang_name="return-codes", rest_name="return-codes", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='number', extensions={u'tailf-common': {u'callpoint': u'mpls-statistics-oam-retcode', u'cli-suppress-show-path': None}}), is_container='list', yang_name="return-codes", rest_name="return-codes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-statistics-oam-retcode', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """return_codes must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("number",return_codes.return_codes, yang_name="return-codes", rest_name="return-codes", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='number', extensions={u'tailf-common': {u'callpoint': u'mpls-statistics-oam-retcode', u'cli-suppress-show-path': None}}), is_container='list', yang_name="return-codes", rest_name="return-codes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-statistics-oam-retcode', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)""", }) self.__return_codes = t if hasattr(self, '_set'): self._set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_return_codes(self):\n return self.__return_codes", "def traffic_statuscodes_cachecodes(self, **kwargs):\n url_path = 'traffic/statuscodes/cachecodes'\n self.logger.debug(f\"Get list of cache codes\")\n body = self._make_body(kwargs)\n return self._common_get(request_path=u...
[ "0.6291366", "0.614847", "0.55107534", "0.54416496", "0.5277496", "0.5117377", "0.50821906", "0.50539535", "0.49968654", "0.4987946", "0.49695534", "0.47850552", "0.47627786", "0.4757852", "0.4730072", "0.4728713", "0.47217613", "0.46919942", "0.46385816", "0.4602432", "0.459...
0.8261662
0
Assumes binary array of 1 and 0 as input. Calculate longest ranges of 1's.
def count_ranges(a): ranges = [] count = 0 for i, v in enumerate(a): if v == 1: # same as previous value count += 1 else: if count > 1: ranges.append([i, count]) # [end, length] count = 0 return ranges
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solution(N):\n # write your code in Python 3.6\n bin_number = str(bin(N))[2:]\n new_bin_gap = False\n longest_bin_gap = 0\n bin_gap_counter = 0\n for char in bin_number:\n if char == '1':\n if bin_gap_counter > longest_bin_gap:\n longest_bin_gap = bin_gap_coun...
[ "0.6214653", "0.61803985", "0.61673665", "0.6140894", "0.60812706", "0.60509235", "0.60332197", "0.6032968", "0.59139353", "0.5893834", "0.5799357", "0.5777712", "0.57581043", "0.57554233", "0.57196367", "0.56999797", "0.5694365", "0.5683879", "0.56704044", "0.56537473", "0.5...
0.6188714
1
from range of count_ranges, return the 'howmany' longest ranges
def find_longest_ranges(range, howmany): range.sort(key=lambda x: x[1]) # sort by length if howmany > 1: range = range[-howmany:] # get last few range.sort(key=lambda x: x[0]) # sorted by starttime return range else: return range[-1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def overlap_len(range1, range2):\n return min(range1[1], range2[1]) - max(range1[0], range2[0])", "def count_ranges(a):\n ranges = []\n count = 0\n for i, v in enumerate(a):\n if v == 1: # same as previous value\n count += 1\n else:\n if count > 1:\n ...
[ "0.6553023", "0.63709086", "0.6276035", "0.618169", "0.61786216", "0.60901237", "0.60474753", "0.6026283", "0.6022194", "0.6012892", "0.5978638", "0.58781105", "0.58559895", "0.58521396", "0.5841774", "0.58397764", "0.58287156", "0.58229816", "0.5816759", "0.5807792", "0.5768...
0.74052256
0
Calc all kinds of properties for a line. The line should be an list of arrays.
def calc_output(line, react_cap=None, gen_res_high=225, gen_res_low=50): # unpack t, v, i = line t_diff = t[1] - t[0] # assert t_diff == 1e-9 # time scale should be 1ns. # values based on current measurment. Assuming voltage waveform is aligned. # validation on the real maxima/minima of current assert i.argmax() < i.argmin(), 'Current valley before peak, signal is inverted!' v_min = min(v) v_max = max(v) v_max_time = np.where(v == v_max)[0][0] # first value where voltage has maximum # v_min_time = np.where(v == v_min)[0][-1] # last value where voltage has minimum # assert v_max_time < v_min_time, 'Voltage valley before peak, signal inverted!' c_peak_time = i[0:v_max_time].argmax() # current peak is before voltage maximum c_max = i[c_peak_time] c_valley_time = i.argmin() c_min = min(i) assert i[c_valley_time] == c_min # some validation assert c_peak_time < c_valley_time, 'Current valley before peak, signal is inverted!' assert MAX_VOLTAGE_MIN <= v_max < MAX_VOLTAGE_MAX, 'Max voltage error (%r)' % v_max assert MAX_CURRENT_MIN <= c_max < MAX_CURRENT_MAX, 'Max current error (%r)' % c_max # Find the settling time of the current. Than use the time where the current is stable # to calculate the final pulse voltage. This pulse final voltage is then used to calculate # the settling time and risetime of the voltage. # all parts of current inside 10% of maximum, till end of pulse i_time_settling_options = [abs(x) < 0.1 * c_max for x in i[0:c_valley_time]] ranges = count_ranges(i_time_settling_options) range_before, range_pulse = find_longest_ranges(ranges, 2) # [end, length] end_pulse = range_pulse[0] i_time_settling = range_pulse[0] - range_pulse[1] # average of voltage during pulse when current is < 5% of max current v_pulse = np.mean(v[i_time_settling:end_pulse]) # all parts of current inside 10% of maximum, till end of pulse v_time_settling_options = [abs(x - v_pulse) < (0.1 * v_pulse) for x in v] ranges = count_ranges(v_time_settling_options) if ranges == []: # if too much oscillations, a range cannot be found. Increase the bounds: # all parts of current inside 10% of maximum, till end of pulse v_time_settling_options = [abs(x - v_pulse) < (0.3 * v_pulse) for x in v] ranges = count_ranges(v_time_settling_options) print('Warning, voltage settling options increased from 10% to 30%!') assert ranges != [], "Error! Line is too unstable." pulse = find_longest_ranges(ranges, 1) # pulse=[end,length] of voltage pulse stable settling_end = pulse[0] - pulse[1] # voltage pulse stable start # recalculate pulse voltage v_pulse_new = np.mean(v[settling_end:pulse[0]]) if v_pulse > 13e3: # pulses for highest voltages have to be stable. Lower voltages are always less stable. assert abs(v_pulse-v_pulse_new)/v_pulse_new < 0.01, 'Pulse voltage unstable.' t_settling_end = t[settling_end] # voltage pulse stable start time v05 = 0.05 * v_pulse settling_start = np.where(v > v05)[0][0] t_settling_start = t[settling_start] # when v first rises above 0.05 of final t_settling = t_settling_end - t_settling_start v10 = 0.1 * v_pulse v90 = 0.9 * v_pulse t_rise_start = t[np.where(v > v10)[0][0]] t_rise_end = t[np.where(v > v90)[0][0]] t_rise = t_rise_end - t_rise_start rise_rate = (v90 - v10) / (t_rise) v_overshoot = v_max / v_pulse pulse_stable = int((settling_end + end_pulse) / 2) # point where the pulse is very stable # energy p = (v * i) # for this to be correct, make sure lines are aligned in b_correct_lines using offset 'v_div' e = integrate.cumtrapz(p, t, initial=0) p_rise = p[settling_start:pulse_stable] e_rise = e[settling_start:pulse_stable][-1] p_res = np.append(i[0:pulse_stable] ** 2 * gen_res_high, i[pulse_stable:] ** 2 * gen_res_low) # 1/2*C*V^2 is energy stored in capacitor, which is lost after discharging pulse. # e_cap = 1 / 2 * react_cap * v_pulse ** 2 e_res = integrate.cumtrapz(p_res, t, initial=0) e_res_total = e_res[-1] e_plasma = e[-1] # energy to plasma is energy in positive pulse except charge on capacitor. # Correct the time axis to have 0 at the start of the pulse start = t[settling_start] t = t - start # all these values are added to the pickle and xlsx with 'output_' prepend in calc_run.py data = { 't': t, 'v': v, 'c': i, 'c_min': c_min, 'c_max': c_max, 'v_min': v_min, 'v_max': v_max, 'v_pulse': v_pulse, 't_settling': t_settling, 't_rise': t_rise, 'rise_rate': rise_rate, 'v_overshoot': v_overshoot, 'p': p, 'e': e, 'p_rise': p_rise, 'e_rise': e_rise, 'p_res': p_res, 'e_res': e_res, 'e_res_total': e_res_total, # 'e_cap': e_cap, 'e_plasma': e_plasma, 'start': start, 'end': t[end_pulse], # 'start_index': settling_start, # 'end_index': end_pulse, # 'test': i_time_settling } return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_properties_sp(lines):\n\n # TODO Better logging for crashed xtb\n if not read_status(lines):\n return None\n\n keywords = [\n \"final structure:\",\n \":: SUMMARY ::\",\n \"Property Printout \",\n \"ITERATIONS\",\n ]\n...
[ "0.59829354", "0.58867705", "0.56100065", "0.56093836", "0.53970766", "0.5322736", "0.5254309", "0.5200317", "0.5133646", "0.51039666", "0.5046336", "0.50146914", "0.5009376", "0.4989455", "0.4974461", "0.49629325", "0.4949585", "0.49364555", "0.49333733", "0.49223906", "0.49...
0.0
-1
Show dialog of RETRY,SKIP,ABORT
def retryskipabort(message, timeout=20): root = tk.Tk() root.geometry("400x200") root.title("Exception handle") root.eval('tk::PlaceWindow %s center' % root.winfo_pathname(root.winfo_id())) root.attributes("-topmost", True) _kvs = {"result": "abort"} def cancel_timer(*args): root.after_cancel(_kvs['root']) root.title("Manual") def update_prompt(): cancel_timer() def f(result): def _inner(): _kvs['result'] = result root.destroy() return _inner tk.Label(root, text=message).pack(side=tk.TOP, fill=tk.X, pady=10) frmbtns = tk.Frame(root) tk.Button(frmbtns, text="Skip", command=f('skip')).pack(side=tk.LEFT) tk.Button(frmbtns, text="Retry", command=f('retry')).pack(side=tk.LEFT) tk.Button(frmbtns, text="ABORT", command=f('abort')).pack(side=tk.LEFT) frmbtns.pack(side=tk.BOTTOM) prompt = tk.StringVar() label1 = tk.Label(root, textvariable=prompt) #, width=len(prompt)) label1.pack() deadline = time.time() + timeout def _refresh_timer(): leftseconds = deadline - time.time() if leftseconds <= 0: root.destroy() return root.title("Test will stop after " + str(int(leftseconds)) + " s") _kvs['root'] = root.after(500, _refresh_timer) _kvs['root'] = root.after(0, _refresh_timer) root.bind('<Button-1>', cancel_timer) root.mainloop() return _kvs['result']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def askContinue(parent,message,continueKey,title=_('Warning')):\r\n #--ContinueKey set?\r\n if _settings.get(continueKey): return wx.ID_OK\r\n #--Generate/show dialog\r\n dialog = wx.Dialog(parent,defId,title,size=(350,200),style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)\r\n icon = wx.StaticBitmap(d...
[ "0.64992744", "0.63597643", "0.6132208", "0.5936326", "0.5899217", "0.5896812", "0.5886796", "0.5824935", "0.5818185", "0.57966095", "0.5755113", "0.5753816", "0.5747904", "0.5747904", "0.5747904", "0.5747384", "0.574394", "0.57047844", "0.5692576", "0.56602055", "0.5655979",...
0.57067364
17
Retrieves users' notifications based on current `auth_token`
def get_notifications( self, all: bool = False, participating: bool = False, since: Optional[datetime] = None, before: Optional[datetime] = None, per_page: int = 10, page: int = 1, ) -> List[Notification]: raw_res = self._notifications( all=all, participating=participating, since=since, before=before, per_page=per_page, page=page, ) return Notification.load_from_json_str(raw_res)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self):\n user_id = get_jwt_identity()\n user = find_user(user_id)\n\n args = pagination_parser.parse_args()\n return get_notifications(user, args['from_id'])", "def get_user_notifications(self, login):", "async def get_user_notifications(self):\n self._old_notificatio...
[ "0.72785807", "0.70493317", "0.6741458", "0.6740879", "0.638995", "0.6368157", "0.62646663", "0.6193626", "0.60971195", "0.6062733", "0.5904651", "0.58698857", "0.58336437", "0.5826497", "0.58177674", "0.5768257", "0.5741692", "0.5732743", "0.5713906", "0.56797826", "0.566113...
0.54637754
28
API call for getting notifications
def _notifications( self, all: bool = False, participating: bool = False, since: Optional[datetime] = None, before: Optional[datetime] = None, page: int = 1, per_page: int = 10, ) -> str: headers = { "Authorization": "token {}".format(self.auth_token), "accept": "application/vnd.github.v3+json", } params = { "all": "true" if all else "false", "participating": "true" if participating else "false", "page": page, "per_page": per_page, } if since is not None: params["since"] = since.isoformat() if before is not None: params["before"] = before.isoformat() if per_page > 100: raise Exception( "Github API support maximum 100 notifications per page for api calls" ) res = request("GET", self.NOTIFICATIONS_URL, headers=headers, params=params) return res.text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_notifications(self: object, *args, parameters: dict = None, **kwargs) -> dict:\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/recon/GetNotificationsV1\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n ...
[ "0.7631317", "0.70797265", "0.7076586", "0.695784", "0.6907027", "0.6840182", "0.6796614", "0.6696409", "0.6680031", "0.6636662", "0.6489067", "0.64789927", "0.6347963", "0.6330483", "0.6290125", "0.6276469", "0.62763685", "0.626309", "0.6229939", "0.61899424", "0.6159832", ...
0.6299571
14
Just a Hello World function
def hello(): return 'Hello, World!'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hello_world():\n\n return \"Hello World\"", "def hello():\r\n return 'Hello World!'", "def hello():\n print(\"Hello World\")", "def hello():\n print(\"Hello World.\")", "def print_hello_world():\n print(\"Hello World\")", "def hello_world():\n return 'Hello World!'", "def hello():...
[ "0.8689984", "0.86293316", "0.8605817", "0.8595374", "0.85618377", "0.85127413", "0.85047764", "0.8472644", "0.84356874", "0.84356874", "0.84356874", "0.8434335", "0.838036", "0.83421856", "0.833462", "0.8314161", "0.8306801", "0.82825506", "0.81582916", "0.805908", "0.799055...
0.8384513
13
Function used to reinject values back into forms for accessing by themes
def build_custom_user_fields( form_cls, include_entries=False, fields_kwargs=None, field_entries_kwargs=None, blacklisted_items=(), ): if fields_kwargs is None: fields_kwargs = {} if field_entries_kwargs is None: field_entries_kwargs = {} fields = [] new_fields = UserFields.query.filter_by(**fields_kwargs).all() user_fields = {} # Only include preexisting values if asked if include_entries is True: for f in UserFieldEntries.query.filter_by(**field_entries_kwargs).all(): user_fields[f.field_id] = f.value for field in new_fields: if field.name.lower() in blacklisted_items: continue form_field = getattr(form_cls, f"fields[{field.id}]") # Add the field_type to the field so we know how to render it form_field.field_type = field.field_type # Only include preexisting values if asked if include_entries is True: initial = user_fields.get(field.id, "") form_field.data = initial if form_field.render_kw: form_field.render_kw["data-initial"] = initial else: form_field.render_kw = {"data-initial": initial} fields.append(form_field) return fields
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def form_tweaks(self):\n pass", "def _replace_fields(self):\n for name, value in self._cleaned_data.items():\n setattr(self, name, value)", "def render_form():", "def get_context_data(self, **kwargs):\n form_context = {}\n for form_key, form_class in self.get_form_class...
[ "0.7147261", "0.62756866", "0.5939157", "0.57501096", "0.5736135", "0.57243216", "0.5698981", "0.55505884", "0.55351985", "0.5531961", "0.55112284", "0.54195195", "0.54063284", "0.539079", "0.5381074", "0.53664094", "0.53374904", "0.5327692", "0.5265599", "0.5257497", "0.5251...
0.0
-1
Function used to attach form fields to wtforms. Not really a great solution but is approved by wtforms.
def attach_custom_user_fields(form_cls, **kwargs): new_fields = UserFields.query.filter_by(**kwargs).all() for field in new_fields: validators = [] if field.required: validators.append(InputRequired()) if field.field_type == "text": input_field = StringField( field.name, description=field.description, validators=validators ) elif field.field_type == "boolean": input_field = BooleanField( field.name, description=field.description, validators=validators ) setattr(form_cls, f"fields[{field.id}]", input_field)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def form_CustomisedFormLayoutFields(request):\n schema = schemaish.Structure()\n schema.add( 'firstName', schemaish.String())\n schema.add( 'surname', schemaish.String())\n schema.add( 'age', schemaish.Integer())\n schema.add( 'sex', schemaish.String())\n\n form = formish.Form(schema, 'form')\n\n...
[ "0.698078", "0.6600586", "0.659325", "0.6585638", "0.6533927", "0.6412825", "0.6412825", "0.6412825", "0.629907", "0.62634355", "0.62466544", "0.6142649", "0.6102095", "0.60763484", "0.60400486", "0.5991732", "0.5991732", "0.5946256", "0.5897828", "0.5873216", "0.58706456", ...
0.6660687
1
Build the appropriate field so we can render it via the extra property. Add field_type so Jinja knows how to render it.
def build_registration_code_field(form_cls): if Configs.registration_code: field = getattr(form_cls, "registration_code") # noqa B009 field.field_type = "text" return [field] else: return []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def render_field(field, bulk_nullable=False, label=None):\n return {\n 'field': field,\n 'label': label or field.label,\n 'bulk_nullable': bulk_nullable,\n }", "def field_wrapper(field):\n return {'field': field}", "def build_standard_field(self, field_name, model_field_type):\n ...
[ "0.6215978", "0.6009022", "0.5988297", "0.59636", "0.5934223", "0.590157", "0.5843997", "0.58085185", "0.5790124", "0.5744198", "0.5619677", "0.56059074", "0.5605764", "0.55969447", "0.55805063", "0.55735326", "0.55544263", "0.55516434", "0.55483365", "0.5527651", "0.5524747"...
0.51042384
66
If we have a registration code required, we attach it to the form similar to attach_custom_user_fields
def attach_registration_code_field(form_cls): if Configs.registration_code: setattr( # noqa B010 form_cls, "registration_code", StringField( "Registration Code", description="Registration code required to create account", validators=[InputRequired()], ), )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def registration(request, code=None):\n if request.method == \"POST\":\n form = RegistrationForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect(\"confirm.html\")\n else:\n form = RegistrationForm()\n return render(request, 'registration.html'...
[ "0.6838169", "0.66669655", "0.6514536", "0.6208826", "0.6137646", "0.6134475", "0.6013371", "0.6006505", "0.6004355", "0.5999936", "0.5989894", "0.5950262", "0.59496284", "0.5925366", "0.59166074", "0.58668554", "0.58395535", "0.5811587", "0.5776874", "0.5757824", "0.57516545...
0.78658634
0
Custom init to persist the obj parameter to the rest of the form
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) obj = kwargs.get("obj") if obj: self.obj = obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, *args, **kwargs):\n\n self._obj = kwargs.get('obj', None)\n super(ModelForm, self).__init__(*args, **kwargs)", "def __init__(self, obj, attribs):\n self.obj = obj\n self.attribs = attribs\n if self.obj:\n self._save()", "def __init__(self...
[ "0.81270677", "0.7214874", "0.6869673", "0.66981107", "0.66653264", "0.66653264", "0.6648626", "0.65440965", "0.6481102", "0.6445112", "0.6415407", "0.6336561", "0.6326652", "0.63131803", "0.6255758", "0.6211104", "0.6211104", "0.62063265", "0.6182554", "0.613536", "0.6123644...
0.7258114
1
Present the first document in the queue for labeling
def main(): return render_template('doc.html', docid=queue.pop(0))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def first(self):\n\t\tif self.is_empty():\n\t\t\traise Empty('Queue is empty')\n\t\treturn self._head._element", "def first(self):\n if self.is_empty():\n raise ValueError('Queue is empty!')\n return self.root().element().value()", "def first(self):\r\n if self.is_empty():\r\n ...
[ "0.59403706", "0.5902959", "0.57828766", "0.5667334", "0.56622183", "0.5638245", "0.56327254", "0.5631978", "0.5631978", "0.56066155", "0.56028295", "0.55972004", "0.55730265", "0.5565737", "0.55282176", "0.55227655", "0.55021554", "0.5459005", "0.545217", "0.54516345", "0.54...
0.5553572
14
Present a particular document for labeling
def contract(docid): return render_template('doc.html', docid=docid)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def XCAFDoc_DocumentTool_DocLabel(*args):\n return _XCAFDoc.XCAFDoc_DocumentTool_DocLabel(*args)", "def DocLabel(*args):\n return _XCAFDoc.XCAFDoc_DocumentTool_DocLabel(*args)", "def document(self):\n ...", "def doc(docid):\n\t\n\tdata = {'id':docid,\n\t\t\t'text':\"Some made up text for class...
[ "0.6995251", "0.6902461", "0.6584978", "0.63693833", "0.6363506", "0.6272244", "0.6196975", "0.59724045", "0.59635377", "0.58901525", "0.586877", "0.58180165", "0.581586", "0.5797789", "0.57909936", "0.5767639", "0.57195663", "0.5712125", "0.5709976", "0.5680226", "0.5668638"...
0.0
-1
Build a queue of docs to be labeled Exclude those doc_cloud_ids that have already been labeled
def get_queue(filename): build_queue = [q.replace("\n", "") for q in open(filename)] build_queue = [l for l in build_queue\ if not os.path.exists(SETTINGS.XML_LOCATION + l + ".xml")] build_queue = list(set(build_queue)) # dedupe build_queue.sort(key=sort_have_labels) return build_queue
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_docs():\n docs = []\n for base_id in range(DOCUMENTS_PER_LEVEL):\n d = jina_pb2.Document()\n d.granularity = 0\n d.adjacency = 0\n d.id = base_id\n docs.append(d)\n iterate_build(d, 0, 2, 0, 2)\n return docs", "def keep_documents(self, idx):\n p...
[ "0.56112474", "0.5495247", "0.53238213", "0.52911776", "0.52044696", "0.5167606", "0.51129067", "0.5092232", "0.5007754", "0.5002671", "0.50004286", "0.49999186", "0.4982563", "0.4973685", "0.49599707", "0.4940291", "0.49395326", "0.4921682", "0.4908651", "0.49044877", "0.489...
0.51931
5
Labeled tokens come back from the UI as JSON. This method pulls them from the json and dumps
def get_labels(): json_request = request.json # get the json from the server keys = sort_keys(json_request.keys()) # sort the keys (i.e. the token ids) labels = [] for k in keys: # get the labels that the user input to the UI val = (json_request[k]['text'], json_request[k]['value']) labels.append(val) return labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tokens_json(self):\n token_id, secret = self.decoded_token\n token_row = self.unauthenticated_token_row\n tokens_encoded = Fernet(secret).decrypt(\n token_row.tokens_fernet.encode('ascii'))\n return json.loads(tokens_encoded.decode('ascii'))", "def tokens():\n pass",...
[ "0.5711956", "0.55669457", "0.5513183", "0.54622537", "0.52287966", "0.5219712", "0.5186664", "0.51845807", "0.5176791", "0.5157778", "0.5122742", "0.51104176", "0.5076709", "0.5054146", "0.50533843", "0.5040823", "0.5034359", "0.502462", "0.50213176", "0.49873635", "0.497497...
0.65568817
0
The UI is requesting parserator's tags. If they've been processed, send them to client side Else, send a bunch of blank tags
def tags(docid): page = request.args.get('page') filename = SETTINGS.LABELED_LOCATION + '/' + docid page_text = get_document_page(docid, page) if not os.path.isfile(filename): return spanify(page_text, page) else: with open(filename) as tokens_file: labels = json.load(tokens_file) return spanify(page_text, page, labels)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_tag_list(self, taglist):\r\n self.do_before()\r\n for tag in taglist:\r\n self.feed(tag)\r\n self.do_after()", "def get_all_tags():\n try:\n data = ReadTag().run()\n except Exception as ex:\n return jsonify({'code': '500','message':'Internal server ...
[ "0.594429", "0.57760024", "0.57655793", "0.5725321", "0.5628237", "0.56022376", "0.55851656", "0.55792296", "0.5511034", "0.55045795", "0.54749936", "0.545959", "0.545458", "0.5436897", "0.54287684", "0.54231405", "0.5336545", "0.52850664", "0.5254251", "0.5225636", "0.522103...
0.0
-1
The UI is sending tagged tokens back to the server. Save them to train parserator
def tokens_dump(docid): tagged_strings = set() labels = get_labels() tagged_sequence = labels # replacing prep_inputs method. still works? tagged_strings.add(tuple(tagged_sequence)) outfile = SETTINGS.XML_LOCATION + "/" + docid + ".xml" try: os.remove(outfile) except OSError: pass appendListToXMLfile(tagged_strings, MODULE, outfile) if len(queue) == 0: return "All done!" else: return queue.pop(0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse(self, tokenizer):\n pass", "def __init__(self):\n self.tokens = []", "def tokenize(self):\n\n x = [] # input documents\n for file_path in glob.glob(self.train_dir + '*.txt'):\n file_as_string = open(file_path).read()\n x.append(file_as_string)\n\n self.tokenizer.fit_o...
[ "0.61516076", "0.6044435", "0.5898327", "0.5834319", "0.58167106", "0.5813318", "0.5673961", "0.5612146", "0.5571863", "0.55666846", "0.55541897", "0.5546777", "0.553209", "0.5514731", "0.5511445", "0.5496212", "0.5490531", "0.54755586", "0.54755586", "0.54755586", "0.5452871...
0.5427992
23
A custom sort. Favors cases where there are already labels for the tokens from parserator. For big corpuses, parsing takes time so you don't want to parse the whole corpus just to see how it is doing
def sort_have_labels(doc_cloud_id): filename = SETTINGS.LABELED_LOCATION + "/" + doc_cloud_id if os.path.isfile(filename): return 0 return 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sorting(tokens: list):\n tokens.sort(key=lambda x: (x[0], x[1]))", "def sort_tokens(tokens: Iterable[Cwf]) -> List[Cwf]:\n return sorted(tokens, key=lambda t: (t.get_sent(), int(t.get_offset())))", "def sort_by_tokens(self, token_order):\n\n remaining_tokens = list(set(self.dictionary.tok...
[ "0.6858186", "0.6613914", "0.657223", "0.64046764", "0.640126", "0.63479173", "0.62306327", "0.6093973", "0.6057724", "0.59947205", "0.59803766", "0.5903831", "0.5857513", "0.5817867", "0.5800997", "0.57934964", "0.5732152", "0.57284874", "0.57194114", "0.5718229", "0.5717523...
0.0
-1
Allows the user to change his/her password. If the existing password matches, and both new password fields match, then the password is changed. A ?next=/... query parameter can be added, so after the password is changed, the user is redirected back to the original referring page.
def password_req(request): next = request.POST.get('next', request.META.get('HTTP_REFERER', DEFAULT_REDIRECT)) args = default_context(request, username=request.user.username, next=next) try: password = request.POST['password'] pw1 = request.POST['pw1'] pw2 = request.POST['pw2'] except KeyError: pass else: if pw1 != pw2: args['mismatch'] = True elif not request.user.check_password(password): args['error'] = True else: request.user.set_password(pw1) request.user.save() return HttpResponseRedirect(next) return render_to_response('registration/password.html', args)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_password(self, old_password, new_password):\n data = dict(password = new_password)\n data['old-password'] = old_password\n return self.app.post('/_changePassword', data = data, follow_redirects = True)", "def change_password():\n form = PasswordResetForm()\n\n if form.valida...
[ "0.7684161", "0.7678293", "0.7636813", "0.7593512", "0.7537619", "0.7510125", "0.75019175", "0.748036", "0.7474108", "0.7473725", "0.74614745", "0.7304231", "0.7270338", "0.72622114", "0.72228485", "0.7220767", "0.72164667", "0.71916544", "0.7117814", "0.71164465", "0.7103846...
0.7167825
18
Handles the 'forgotten password' form. The user can ask for a specific username to be reset. The user can also specify an email address instead of a username. An email containing encrypted and signed links is sent to the particular email address. There is no difference in the output if 0, 1, or many users are found matching the particular username/email. This prevents people from guessing valid usernames or emails.
def forgot_req(request): server = request.META['SERVER_NAME'] recover_url = urljoin(full_url(request), 'recover') if request.POST and not request.user.is_authenticated(): try: username_or_email = request.POST['username'] except KeyError: pass else: if '@' in username_or_email: qs = User.objects.filter(email = username_or_email) else: qs = User.objects.filter(username = username_or_email) users = [] user = None for user in qs: query = 'salt=%s&user=%s' % (urlsafe_b64encode(urandom(8)),\ user.username) url = add_encrypted_query_string(recover_url, query, settings.SECRET_KEY) url = sign_query_string(settings.SECRET_KEY + user.password, url) users.append(dict(username = user.username, url = url)) template = get_template('registration/recover-password.txt') context = Context(dict(users = users, ApplianceName = server)) if len(users) == 1: plural = '' else: plural = 's' if user: user.email_user(subject = "Your %s console account%s" % (server, plural), from_email = FROM_EMAIL, message = template.render(context)) return HttpResponseRedirect('sent') return render_to_response('registration/forgotten.html', dict(username=request.GET.get('username', ''), META=request.META, root=settings.ROOT_URL, media=settings.MEDIA_URL))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forgot_password():\n if request.method == 'POST':\n if 'username' in request.form:\n username = request.form['username']\n user = Users.query.get(username)\n if user:\n reset_slug = utils.encrypt(username)\n reset_url = request.host_url +...
[ "0.8209941", "0.7987478", "0.7894088", "0.7582648", "0.7481974", "0.744047", "0.7361025", "0.73203427", "0.7314185", "0.7305841", "0.73045814", "0.72920185", "0.72777385", "0.71073365", "0.7094137", "0.703783", "0.7018474", "0.7012398", "0.69860375", "0.6957978", "0.6949704",...
0.7435538
6
Allows the user to change his/her password.
def recover_req(request): query_string = request.META['QUERY_STRING'] query_args = parse_qs(get_encrypted_query_string(query_string, settings.SECRET_KEY)) template_map = dict(META = request.META, enc = request.GET['enc'], hmac = request.GET['hmac'], root = settings.ROOT_URL, media = settings.MEDIA_URL) if request.POST and 'user' in query_args: username = query_args['user'][0] user = User.objects.get(username = username) pw1 = request.POST['pw1'] pw2 = request.POST['pw2'] if pw1 != pw2: template_map['mismatch'] = True elif not is_signed_query_string_valid(settings.SECRET_KEY + user.password, query_string): template_map['error'] = True else: user.set_password(pw1) user.save() user = authenticate(username = username, password = pw1) login(request, user) return HttpResponseRedirect('..') return render_to_response('registration/recover.html', template_map)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_password(self, new_pass):\n self.manager.change_user_password(self, new_pass)", "def ChangePassword():\n if self.ChangePassword():\n # Update successful, return to main screen\n self.confirm_pass.set('')\n self.password.set('')\n ...
[ "0.8333886", "0.8186766", "0.8097396", "0.7924953", "0.79134744", "0.79105085", "0.7877887", "0.78761387", "0.7860327", "0.784283", "0.7838352", "0.78312457", "0.78055656", "0.7788097", "0.77424246", "0.77323174", "0.77305025", "0.7709434", "0.7682401", "0.76711226", "0.76426...
0.0
-1
Cancels a user session. Puts up the login form.
def logout_req(request): if request.user.is_authenticated(): logout(request) return render_to_response('registration/login.html', dict(next = DEFAULT_REDIRECT, META = request.META, root = settings.ROOT_URL, media = settings.MEDIA_URL))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def signout(self):\n username = cherrypy.session.get('username')\n if username is None:\n pass\n else:\n cherrypy.lib.sessions.expire()\n raise cherrypy.HTTPRedirect('/')", "def logout():\r\n form = LoginForm()\r\n user = current_user\r\n user.authentica...
[ "0.7182822", "0.69931966", "0.69211394", "0.67985123", "0.67923224", "0.6713557", "0.66810346", "0.6674477", "0.6670308", "0.66364765", "0.66233695", "0.6603241", "0.65926504", "0.6584644", "0.65809464", "0.6569001", "0.6568214", "0.65630865", "0.65387446", "0.6528097", "0.65...
0.0
-1
Create an object from its rdf type
def oid(identifier_or_rdf_type, rdf_type, context, base_type=None): identifier = identifier_or_rdf_type if rdf_type is None: rdf_type = identifier_or_rdf_type identifier = None cls = None if context is not None: cls = context.resolve_class(rdf_type) if cls is None and context is not None: for types in _superclass_iter(context.rdf_graph(), rdf_type): for typ in types: cls = context.resolve_class(typ) if cls is not None: break if cls is not None: break if cls is None: cls = base_type # if its our class name, then make our own object # if there's a part after that, that's the property name if context is not None: cls = context(cls) if identifier is not None: o = cls.query(ident=identifier, no_type_decl=True) else: o = cls.query() return o
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __generate_object_term__(self, datatype, value):\n if datatype == NS_MGR.xsd.anyURI.rdflib:\n term = rdflib.URIRef(value)\n elif datatype:\n term = rdflib.Literal(value, datatype=datatype)\n else:\n term = rdflib.Literal(value)\n return term", "def...
[ "0.6613706", "0.62533885", "0.5993754", "0.5973116", "0.59641635", "0.59002733", "0.58277136", "0.58260673", "0.5804339", "0.579951", "0.57783437", "0.5778184", "0.5754997", "0.56360924", "0.56325865", "0.56170595", "0.5612792", "0.55933315", "0.55877227", "0.5579284", "0.557...
0.5583555
19
Load dataset from csv file
def load_simulator_data(self, csvfname): data = [] with open(csvfname, 'r') as csvfile: data_tmp = list(csv.reader(csvfile, delimiter=',')) for row in data_tmp: x7 = [float(x) for x in row[7].split(':')] x8 = [float(x) for x in row[8].split(':')] data.append(((row[0], row[1], row[2]), np.array([float(row[3]), float(row[4]), float(row[5]), float(row[6])] + x7 + x8))) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_dataset(path):\n training_data = []\n with open(path) as csv_file:\n reader = csv.reader(csv_file, delimiter=\",\")\n next(reader)\n for row in reader:\n training_data.append(row[1])\n return training_data", "def loadCSV(input_file):", "def load():\n filepat...
[ "0.7754483", "0.77290475", "0.7686873", "0.76696587", "0.75522053", "0.74628526", "0.7399509", "0.7380369", "0.7370351", "0.7362085", "0.72771174", "0.7227899", "0.72193736", "0.7215024", "0.72012967", "0.7180689", "0.71734875", "0.7173242", "0.7164557", "0.7121014", "0.71139...
0.0
-1
Gets info about all datasets. Returns str with MarkDown syntax
def get_info() -> str: req = Request(URL + '/info') context = ssl._create_unverified_context() with urlopen(req, context=context) as response: return response.read().decode('utf-8')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_datasets():\n return METADATA.keys()", "def info(self, datasets=(), tasks=()):\n pp = pprint.PrettyPrinter(indent=4)\n print_text_box('Dataset')\n if any(datasets) or any(tasks):\n data = self._get_select_tasks_and_datasets_data(datasets, tasks)\n else:\n ...
[ "0.7014192", "0.6812912", "0.6607309", "0.65944916", "0.64315784", "0.6382068", "0.636666", "0.63273066", "0.63251984", "0.6180837", "0.6164181", "0.61418563", "0.61337954", "0.61007583", "0.6100278", "0.6081731", "0.60703063", "0.60678405", "0.60557", "0.6037266", "0.6016712...
0.0
-1
Load dataset by dataset_name. Run get_info() to get dataset information
def load_dataset(dataset_name: str, internals_folder_path: str = None) -> Dataset: dataset_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), dataset_name) if glob(dataset_path + '*'): return Dataset(glob(dataset_path + '*')[0], internals_folder_path=internals_folder_path) req = Request(URL + '/download') context = ssl._create_unverified_context() values = {'dataset-name': dataset_name} data = urllib.parse.urlencode(values).encode("utf-8") with urlopen(req, data=data, context=context) as answer: total_size = int(answer.headers.get('content-length', 0)) block_size = 1024 save_path = dataset_path + answer.getheader('file-extension') t = tqdm(total=total_size, unit='iB', unit_scale=True) with open(save_path + '.gz', 'wb') as f: while True: chunk = answer.read(block_size) if not chunk: break t.update(len(chunk)) f.write(chunk) t.close() if total_size != 0 and t.n != total_size: print("Failed to download file") return None else: with gzip.open(save_path + '.gz', 'rb') as gz: with open(save_path, 'wb') as f: f.write(gz.read()) os.remove(save_path + '.gz') return Dataset(save_path, internals_folder_path=internals_folder_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(name):\n if name in datasets:\n\n return pd.read_csv(os.path.join(datasets_path, \"%s.csv\" % name))\n else:\n raise ValueError(\"Dataset not found!\")", "def load_dataset(dataset_name):\n url = METADATA[dataset_name][\"url\"]\n f = urlopen(url)\n data = _read_rows(f)\n f...
[ "0.77818465", "0.7501332", "0.7296937", "0.7217461", "0.71556944", "0.70442104", "0.7043356", "0.7002447", "0.69755036", "0.69738203", "0.6970711", "0.6962691", "0.6954843", "0.69172084", "0.69021535", "0.68848944", "0.6871628", "0.6846385", "0.68101037", "0.6778666", "0.6768...
0.69610137
12
Creates a GSEA analysis This GSEA implementation is based on gseaPy.
def __init__(self, data_source, num_resamples=NUM_RESAMPLES, method=GSEA_RANKING_SNR, case=None, control=None, preprocessors=None): logger.debug('GSEA initialised with num_resamples=%d and ranking_method=%s' % (num_resamples, method)) super().__init__(data_source, preprocessors=preprocessors) self.num_resamples = num_resamples self.method = method self.case = case self.control = control
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _run_gsea(self, gct_file, gmt_file, cls_file, gsea_dir):\n r = robjects.r\n r.source(self.gsea_r_location)\n r(\"\"\"GSEA( # Input/Output Files :-------------------------------------------\n input.ds = \"{}\", # Input gene expressi...
[ "0.6862905", "0.59698844", "0.5741826", "0.5694374", "0.5569399", "0.55237055", "0.5385206", "0.53803694", "0.5294734", "0.5274186", "0.5242113", "0.51879853", "0.5178812", "0.5157552", "0.51452965", "0.51294", "0.5125397", "0.5094075", "0.50884384", "0.5040239", "0.4964143",...
0.49073586
23
Main method to perform GSEA/MSEA analysis
def get_results(self, preprocess=True): logger.debug('Calculating GSEA') measurement_df = self._get_measurement_df(preprocess) annot_df = self.data_source.get_annotations() joined = pd.merge(left=measurement_df, right=annot_df, left_index=True, right_index=True) joined = joined.set_index('entity_id') unique_ids = [self.data_source._get_unique_id(x) for x in joined.index.values] joined.index = unique_ids joined = joined.drop_duplicates(keep='first').sort_index() # gene_sets is a dict. key is pw name, values are a list of entries in that pathway gene_sets = {} assert len(self.data_source.dataset_pathways) > 0, 'No pathways found in the dataset' pathways = list(self.data_source.dataset_pathways) for pw in pathways: pathway_row_ids = self.data_source.dataset_pathways_to_row_ids[pw] pw_unique_ids = [] for row_id in pathway_row_ids: pw_unique_ids.extend(self.data_source.dataset_row_id_to_unique_ids[row_id]) pw_unique_ids = list(set(pw_unique_ids)) gene_sets[pw] = pw_unique_ids # run GSEA for all comparisons all_dfs = [] for comp in self.data_source.comparisons: if not is_comparison_used(comp, self.case, self.control): continue case = comp['case'] control = comp['control'] logger.debug('Running comparison case=%s control=%s' % (case, control)) pheno_cols = set(self.data_source.get_experimental_design()['groups'][case]) df_cols = measurement_df.columns.values # for each comparison, we need to create C (phenotype labels) # Loop over df_cols and store an indicator into C. # Entries in C is 1 if that column belongs to the case group, otherwise it's a 0 C = [] for col in df_cols: if col in pheno_cols: C.append(1) else: C.append(0) C = np.array(C) # actually runs GSEA here data = joined cls = C.tolist() outdir = None min_size = 1 max_size = 1000 permutation_num = self.num_resamples weighted_score_type = 1 permutation_type = 'phenotype' method = self.method ascending = True processes = 1 figsize = (6.5, 6) format = 'pdf', graph_num = 20 no_plot = True seed = None verbose = False msea = MSEA(data, gene_sets, cls, outdir, min_size, max_size, permutation_num, weighted_score_type, permutation_type, method, ascending, processes, figsize, format, graph_num, no_plot, seed, verbose) msea.run() # convert GSEA results to dataframe df = msea.res2d df = df.reset_index() selected = df[['Term', 'pval', 'fdr', 'es']] selected = selected.rename(columns={'Term': 'mapids'}).set_index('mapids') col_name = comp['name'] + ' p-value' es_colname = comp['name'] + ' ES_score' if self.data_source.database_name is not None: comb_col_name = '%s %s %s' % (self.data_source.database_name, comp['name'], 'comb_p') else: comb_col_name = '%s %s' % (comp['name'], 'comb_p') pathway_df = selected.rename(columns={ 'pval': col_name, 'es': es_colname, 'fdr': comb_col_name }) all_dfs.append(pathway_df) # combine all the results across all comparisons combined_df = pd.concat(all_dfs, axis=1, sort=False) combined_df.index.name = 'mapids' # create a dataframe of pathway mapids and names pw_name_df = [] for map_id in pathways: pw_name = self.data_source.pathway_dict[map_id]['display_name'] pw_name_df.append((map_id, pw_name)) pw_name_df = pd.DataFrame(pw_name_df, columns=['mapids', 'pw_name']).set_index(['mapids']) combined_df = pw_name_df.merge(combined_df, left_index=True, right_index=True) # add formula coverage information mapids = combined_df.index.values.tolist() cov_df = self.data_source._calculate_coverage_df(mapids) coverage_df = cov_df.reindex(combined_df.index) # make sure dfs are in same order before merging # Merge the two dfs together pathway_df = pd.merge(combined_df, coverage_df, left_index=True, right_index=True, how='outer') # del pathway_df.index.name pathway_df.rename_axis(None, inplace=True) # post-processing to filter pathway dataframe by the minimum number of hits pathway_df = post_filter_df_by_min_hits(pathway_df, self.data_source.min_hits) return pathway_df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\r\n graphPerformance = False # Built in graphing ability, currently not functional, but mechanism is in place.\r\n trainData = \"2_1000_0_1600_0_0_CV_0_Train.txt\"\r\n testData = \"2_1000_0_1600_0_0_CV_0_Test.txt\"\r\n outProg = \"GH_GALE_ProgressTrack\"\r\n outPop = \"GH_GALE_Population...
[ "0.71552646", "0.67432594", "0.6589852", "0.65624803", "0.6458963", "0.6455297", "0.64501965", "0.63569146", "0.6244363", "0.62363696", "0.6217135", "0.6186109", "0.6183862", "0.6182799", "0.6166517", "0.61521643", "0.61434954", "0.6135868", "0.6134632", "0.6118668", "0.61104...
0.6480827
4
forward procedure. No need for inputs to be sorted
def forward(self, input_seqs, input_lens, hidden=None): batch_size = input_seqs.size(1) embedded = self.embedding(input_seqs) embedded = embedded.transpose(0, 1) # [B,T,E] sort_idx = np.argsort(-input_lens) unsort_idx = cuda_(torch.LongTensor(np.argsort(sort_idx))) input_lens = input_lens[sort_idx] sort_idx = cuda_(torch.LongTensor(sort_idx)) embedded = embedded[sort_idx].transpose(0, 1) # [T,B,E] packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, input_lens) outputs, hidden = self.gru(packed, hidden) outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(outputs) outputs = outputs[:,:,:self.hidden_size] + outputs[:,:,self.hidden_size:] outputs = outputs.transpose(0, 1)[unsort_idx].transpose(0, 1).contiguous() hidden = hidden.transpose(0, 1)[unsort_idx].transpose(0, 1).contiguous() return outputs, hidden
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self, input):\n raise NotImplementedError", "def forward(self, input):\n raise NotImplementedError", "def forward(self, inputs):\n raise NotImplementedError", "def forward(self, input):\n raise NotImplementedError()", "def forward(self, *inputs):\n raise NotIm...
[ "0.7305762", "0.7305762", "0.72925854", "0.71346843", "0.70455575", "0.69174004", "0.6737259", "0.6706367", "0.665543", "0.6645303", "0.6645303", "0.6637352", "0.66037333", "0.6563919", "0.6515652", "0.6515652", "0.6511204", "0.64887595", "0.64721954", "0.6462899", "0.6424968...
0.0
-1
Used in ``mezzanine.pages.views.page`` to ensure ``PageMiddleware`` or a subclass has been installed. We cache the result on the ``PageMiddleware._installed`` to only run this once.
def installed(cls): try: return cls._installed except AttributeError: name = "mezzanine.pages.middleware.PageMiddleware" installed = middlewares_or_subclasses_installed([name]) setattr(cls, "_installed", installed) return installed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Install (self):\n if self in sys.meta_path:\n return\n sys.meta_path.insert (0, self)", "def is_installed(self):\n pass", "def pre_installation(self):\n pass", "def autodiscover():\n from django.utils.importlib import import_module\n global LOADED\n if LOAD...
[ "0.5708004", "0.5457926", "0.53875947", "0.5337035", "0.53081757", "0.5224973", "0.52124566", "0.5208954", "0.51006395", "0.5072566", "0.5057796", "0.5034346", "0.5021708", "0.50119513", "0.5004418", "0.5003487", "0.4994785", "0.49845058", "0.49206704", "0.49062628", "0.49048...
0.8444131
0
Perrequest mechanics for the current page object.
def process_view(self, request, view_func, view_args, view_kwargs): # Load the closest matching page by slug, and assign it to the # request object. If none found, skip all further processing. slug = path_to_slug(request.path_info) pages = Page.objects.with_ascendants_for_slug( slug, for_user=request.user, include_login_required=True ) if pages: page = pages[0] setattr(request, "page", page) context_processors.page(request) else: return # Handle ``page.login_required``. if page.login_required and not is_authenticated(request.user): return redirect_to_login(request.get_full_path()) # If the view isn't Mezzanine's page view, try to return the result # immediately. In the case of a 404 with an URL slug that matches a # page exactly, swallow the exception and try Mezzanine's page view. # # This allows us to set up pages with URLs that also match non-page # urlpatterns. For example, a page could be created with the URL # /blog/about/, which would match the blog urlpattern, and assuming # there wasn't a blog post with the slug "about", would raise a 404 # and subsequently be rendered by Mezzanine's page view. if view_func != page_view: try: return view_func(request, *view_args, **view_kwargs) except Http404: if page.slug != slug: raise # Run page processors. extra_context = {} if request.resolver_match: extra_context = request.resolver_match.kwargs.get("extra_context", {}) model_processors = page_processors.processors[page.content_model] slug_processors = page_processors.processors["slug:%s" % page.slug] for (processor, exact_page) in slug_processors + model_processors: if exact_page and not page.is_current: continue processor_response = processor(request, page) if isinstance(processor_response, HttpResponse): return processor_response elif processor_response: try: for k, v in processor_response.items(): if k not in extra_context: extra_context[k] = v except (TypeError, ValueError): name = "%s.%s" % (processor.__module__, processor.__name__) error = ( "The page processor %s returned %s but must " "return HttpResponse or dict." % (name, type(processor_response)) ) raise ValueError(error) return page_view(request, slug, extra_context=extra_context)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self):\r\n self.init_data = td.import_data(self.__module__)\r\n self.page1() # GET navigation (requests 101-153)\r\n\r\n grinder.sleep(20)\r\n self.page2() # GET case (requests 201-252)\r\n\r\n grinder.sleep(20)\r\n self.page3() # GET view (requ...
[ "0.7065218", "0.6655661", "0.6454575", "0.6180861", "0.6127758", "0.59273225", "0.58338803", "0.5830356", "0.579896", "0.57571733", "0.5753007", "0.56619835", "0.5652793", "0.557103", "0.5565266", "0.55646205", "0.5558261", "0.5553076", "0.5551274", "0.5549091", "0.552509", ...
0.0
-1
Checks if an object is a number. That is, a ``float`` or an ``int``. Where this differs from simply checking if an object is an instance of ``numbers.Number`` is that boolean objects are also a number by that measure (which, technically,
def is_numeric(number): if isinstance(number, bool): return False elif isinstance(number, int) or isinstance(number, float): return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isNumeric(obj):\n # type: (Any) -> bool\n return isinstance(obj, numbers.Number)", "def isNumeric(obj):\n return isinstance(obj, (int, float, bool))", "def is_numeric(obj):\n return isinstance(obj, (int, float, complex))", "def is_number(x):\n if isinstance(x, (int, float)):\n r...
[ "0.84229404", "0.8354426", "0.80287915", "0.8011571", "0.79731923", "0.7880432", "0.78278196", "0.78255373", "0.7822248", "0.7775982", "0.7771816", "0.76862437", "0.7623797", "0.76032704", "0.7492383", "0.74228925", "0.7375729", "0.7342082", "0.73297185", "0.7325915", "0.7303...
0.75077426
14
Checks if all the arguments it receives are numeric (according to
def are_numeric(*values): for value in values: if not is_numeric(value): return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_arguments(arguments):\n quit = False\n for argument, value in vars(arguments).items():\n try:\n float(value)\n except:\n print(\"{} must be numeric\".format(argument))\n quit = True\n if quit:\n exit(1)", "def check_for_float_and_int(check)...
[ "0.79766375", "0.72621953", "0.7188788", "0.7124984", "0.7036451", "0.7026137", "0.6995436", "0.69920516", "0.6922569", "0.6913737", "0.68856776", "0.68640095", "0.68215996", "0.67944366", "0.6732328", "0.6717718", "0.6717057", "0.6711378", "0.664611", "0.6630572", "0.6597417...
0.7544063
1
Drop the unit definition silently
def _drop_units(q): try: return q.magnitude except: return q
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removeUnitDefinition(self, *args):\n return _libsbml.Model_removeUnitDefinition(self, *args)", "def unsetName(self):\n return _libsbml.UnitDefinition_unsetName(self)", "def unsetKind(self):\n return _libsbml.Unit_unsetKind(self)", "def removeUnit(self, *args):\n return _libsbm...
[ "0.7016923", "0.671953", "0.65005463", "0.6406769", "0.6294587", "0.6132818", "0.61080724", "0.61080724", "0.6019801", "0.6007988", "0.5972027", "0.58067775", "0.5797811", "0.5779521", "0.5779516", "0.5762775", "0.574033", "0.57371294", "0.57115144", "0.5674466", "0.5610218",...
0.61350024
5
Adapt the resolution of the spectra to match the lick definitions Lick definitions have different resolution elements as function of wavelength. These definition are hardcoded in this function
def reduce_resolution(wi, fi, fwhm0=0.55, sigma_floor=0.2): # all in AA w_lick_res = (4000., 4400., 4900., 5400., 6000.) lick_res = (11.5, 9.2, 8.4, 8.4, 9.8) # FWHM in AA w = np.asarray(wi) flux = np.atleast_2d(fi) # Linear interpolation of lick_res over w # numpy interp does constant instead of extrapolation # res = np.interp(w, w_lick_res, lick_res) # spline order: 1 linear, 2 quadratic, 3 cubic ... from scipy.interpolate import InterpolatedUnivariateSpline res = InterpolatedUnivariateSpline(w_lick_res, lick_res, k=1)(w) # Compute width from fwhm const = 2. * np.sqrt(2. * np.log(2)) # conversion fwhm --> sigma lick_sigma = np.sqrt((res ** 2 - fwhm0 ** 2)) / const # Convolution by g=1/sqrt(2*pi*sigma^2) * exp(-r^2/(2*sigma^2)) flux_red = np.zeros(flux.shape, dtype=flux.dtype) for i, sigma in enumerate(lick_sigma): maxsigma = 3. * sigma # sampling floor: min (0.2, sigma * 0.1) delta = min(sigma_floor, sigma * 0.1) delta_wj = np.arange(-maxsigma, + maxsigma, delta) wj = delta_wj + w[i] for k, fk in enumerate(flux): fluxj = np.interp(wj, w, fk, left=0., right=0.) flux_red[k, i] = np.sum(fluxj * delta * np.exp(-0.5 * (delta_wj / sigma) ** 2)) flux_red /= lick_sigma * const return flux_red.reshape(np.shape(fi))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_resolution( self ):\r\n offset = 0\r\n # if current and skinned resolutions differ and skinned resolution is not\r\n # 1080i or 720p (they have no 4:3), calculate widescreen offset\r\n if ( ( not ( self.currentResolution == self.resolution ) ) and self.resolution > 1 ):\r\n ...
[ "0.6454344", "0.62690383", "0.58017045", "0.57871443", "0.5685021", "0.56665695", "0.5640041", "0.5633661", "0.56125605", "0.5611095", "0.55840474", "0.55816716", "0.5537546", "0.55354196", "0.54867184", "0.54815644", "0.5469447", "0.54505324", "0.5441042", "0.5431353", "0.54...
0.0
-1
return a dictionary of the current index
def to_dict(self): d = {} d.update(**self._lick) return d
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def index(self):\n return dict(data='index')", "def _get_index(self):\n\n return self.index\n\n # to avoid having differences bigger than 2pi", "def index(self):\n return self._index", "def index(self):\n path = self.path.format('index')\n \n with open(path, 'r', ...
[ "0.7655854", "0.71581787", "0.7109777", "0.7092145", "0.6977287", "0.6977287", "0.6973881", "0.69227993", "0.6913379", "0.6897256", "0.68841493", "0.6860583", "0.68244886", "0.68158996", "0.6750094", "0.67243034", "0.67243034", "0.67243034", "0.67243034", "0.67243034", "0.672...
0.0
-1
return the unitwise definition corresponding to attrname
def _get_wavelength_attrs_with_units(self, attrname, units='AA'): attr = self._lick[attrname] if self.wavelength_unit is not None: if units is None: return attr * unit[self.wavelength_unit] else: return (attr * unit[self.wavelength_unit]).to(units) else: return attr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_wavelength_attrs_with_units(self, attrname, units='AA'):\n attr = self._lick[attrname]\n if self.wavelength_unit is not None:\n if units is None:\n return attr * Unit(self.wavelength_unit)\n else:\n return (attr * Unit(self.wavelength_unit)...
[ "0.63725454", "0.60422206", "0.5772077", "0.57339555", "0.5650211", "0.5626752", "0.5624979", "0.55762106", "0.55762106", "0.5543511", "0.55427015", "0.55405295", "0.5533721", "0.5516388", "0.54795426", "0.5476638", "0.5459001", "0.5459001", "0.5439841", "0.54318297", "0.5421...
0.6441973
0
display information about the current Index
def info(self): txt = """Lick Index {s.name} wavelength units: {s.wavelength_unit} Index Band: {s.band} Blue continuum band: {s.blue} Red continuum band: {s.red} Measurement unit: {s.index_unit}""".format(s=self) print(txt)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def index(self):\r\n return None", "def get_info_format(self):\n return self.session.api.get_index(self)", "def show_elasticsearch_index_info(cluster: str, index: str):\n\n elastic = sreElastic(host=cluster)\n pp = pprint.PrettyPrinter(indent=2, width=120)\n\n print(\"\\nLocation:\")\n ...
[ "0.7144294", "0.6980231", "0.6847654", "0.68377274", "0.67996037", "0.67142814", "0.6706408", "0.6706408", "0.66565186", "0.66213363", "0.6611364", "0.65879714", "0.65692246", "0.65692246", "0.65692246", "0.65692246", "0.65692246", "0.65692246", "0.65692246", "0.65692246", "0...
0.6661338
9
compute spectral index after continuum subtraction
def __call__(self, *args, **kwargs): return self.get(*args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_spectral_index(self):\n try:\n return self.alpha\n except AttributeError:\n return None", "def refractive_index_fused_silica(wavelength):\n wavelength_um = wavelength / 1000\n\n A0 = 2.104025406E+00\n A1 = -1.456000330E-04\n A2 = -9.049135390E-03\n A3 = ...
[ "0.6287062", "0.6141436", "0.59078664", "0.58944124", "0.58218056", "0.5802506", "0.5760204", "0.5752179", "0.5743206", "0.57092756", "0.5689199", "0.5686934", "0.56437397", "0.5623954", "0.5587823", "0.5585669", "0.5536266", "0.5534484", "0.5534108", "0.5532117", "0.55112505...
0.0
-1
compute spectral index after continuum subtraction
def get(self, wave, flux, **kwargs): if hasUnit(wave): _w = wave.to('AA').magnitude else: print("Warning: assuming units are in Angstroms") _w = _drop_units(wave) _f = _drop_units(flux) blue = self._get_wavelength_attrs_with_units('blue').magnitude red = self._get_wavelength_attrs_with_units('red').magnitude band = self._get_wavelength_attrs_with_units('band').magnitude nocheck = kwargs.pop('nocheck', False) not_covered = (blue[0] < _w[0]) | (red[-1] > _w[-1]) if (not_covered): if (not nocheck): raise ValueError("Spectrum does not cover this index.") else: return np.zeros(_f.shape[0]) * float('nan') else: return self._get_indice(_w, _f, blue, red, band, self.index_unit, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_spectral_index(self):\n try:\n return self.alpha\n except AttributeError:\n return None", "def refractive_index_fused_silica(wavelength):\n wavelength_um = wavelength / 1000\n\n A0 = 2.104025406E+00\n A1 = -1.456000330E-04\n A2 = -9.049135390E-03\n A3 = ...
[ "0.6287113", "0.6141516", "0.59084785", "0.5892982", "0.58204657", "0.58018076", "0.5760177", "0.5751786", "0.5741839", "0.57074076", "0.5688546", "0.5685407", "0.5644722", "0.5622003", "0.55865675", "0.5584795", "0.55340075", "0.55338055", "0.55332595", "0.5530644", "0.55118...
0.0
-1
compute spectral index after continuum subtraction
def _get_indice(cls, w, flux, blue, red, band=None, unit='ew', degree=1, **kwargs): wi, fi = cls.continuum_normalized_region_around_line(w, flux, blue, red, band=band, degree=degree) if unit in (0, 'ew', 'EW'): return np.trapz(1. - fi, wi, axis=-1) else: m = np.trapz(fi, wi, axis=-1) m = -2.5 * np.log10(m / np.ptp(wi)) return m
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_spectral_index(self):\n try:\n return self.alpha\n except AttributeError:\n return None", "def refractive_index_fused_silica(wavelength):\n wavelength_um = wavelength / 1000\n\n A0 = 2.104025406E+00\n A1 = -1.456000330E-04\n A2 = -9.049135390E-03\n A3 = ...
[ "0.6286422", "0.61412746", "0.59083164", "0.58922887", "0.58213574", "0.580119", "0.57615376", "0.5752075", "0.5741933", "0.5708254", "0.56879544", "0.56858265", "0.56429803", "0.56224555", "0.55869263", "0.558585", "0.55347013", "0.5534027", "0.5532789", "0.553044", "0.55096...
0.0
-1
cut out and normalize flux around a line
def continuum_normalized_region_around_line(cls, wi, fi, blue, red, band=None, degree=1): w = np.asarray(wi) flux = np.atleast_2d(fi) # index is true in the region where we fit the polynomial indcont = (((w >= blue[0]) & (w <= blue[1])) | ((w >= red[0]) & (w <= red[1])) ) # index of the region we want to return if band is None: band = blue[0], red[1] indrange = (w > band[0]) & (w < band[1]) wnew = w[indrange] wcont = w[indcont] # make a flux array of shape # (number of spectra, number of points in indrange) f = np.zeros((flux.shape[0], indrange.sum())) for i in range(flux.shape[0]): # fit polynomial of second order to the continuum region linecoeff = np.polyfit(wcont, flux[i, indcont], degree) # divide the flux by the polynomial and put the result in our new flux # array f[i, :] = flux[i, indrange] / np.polyval(linecoeff, wnew) return wnew, np.squeeze(f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def line_lum(line_flux, dist): \n line_lum = 4 * pi * (dist*u.pc)**2 * line_flux * u.erg / (u.s * (u.cm)**2)\n line_lum = line_lum.decompose().to(u.W)\n return line_lum/u.W", "def normalize_vector(line):\n if isinstance(line, pd.DataFrame):\n line = line.values\n try:\n n = np.sqr...
[ "0.6464081", "0.59551424", "0.58137214", "0.5795742", "0.5787084", "0.57819617", "0.57134414", "0.5706386", "0.56656975", "0.5642102", "0.56365347", "0.55645335", "0.5524223", "0.5506707", "0.54946476", "0.5492588", "0.54654", "0.5452259", "0.5443912", "0.54263026", "0.541494...
0.51007783
57
any comment in the input file
def description(self): return self._hdr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_comments(self):\n fp = FilePath(self.mktemp())\n fp.setContent('something\\n#commented\\ncool')\n self.assertEqual(list(inventoryReader(fp.path)), ['something', 'cool'])", "def skip_comments(filepointer):\n\tcomments = []\n\tdata = '#'\n\ttry:\n\t\tpos = filepointer.tell()\n\texcept...
[ "0.7160076", "0.6916625", "0.6867486", "0.6765558", "0.6749642", "0.6743258", "0.6716404", "0.670146", "0.6650719", "0.65236187", "0.64683205", "0.64683205", "0.64408094", "0.6426981", "0.6329149", "0.63261276", "0.6201037", "0.6192405", "0.61553055", "0.6140866", "0.613756",...
0.0
-1
read the list of lick indices
def _read_lick_list(cls, fname=__default__, comment='#'): with open(fname, 'r') as f: data = {} hdr = [] for line in f: if line[0] != comment: l = line.split() attr = dict( band=(float(l[1]), float(l[2])), blue=(float(l[3]), float(l[4])), red=(float(l[5]), float(l[6])), unit='mag' if int(l[7]) > 0 else 'ew', ) name = l[8] data[name] = attr else: hdr.append(line[1:-1]) return data, hdr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _read_indices(path):\n paths = sorted(tf.io.gfile.glob('%s-*-of-*_index.json' % path))\n all_indices = []\n for path in paths:\n json_str = epath.Path(path).read_text()\n # parse it back into a proto.\n shard_index = json.loads(json_str)\n all_indices.append(list(shard_index['index']))\n return...
[ "0.64467746", "0.6441595", "0.6411123", "0.63643146", "0.6275804", "0.6236173", "0.6204214", "0.6117496", "0.6063966", "0.6062045", "0.60506487", "0.59956235", "0.59590757", "0.59548295", "0.5933144", "0.59210235", "0.59165895", "0.5916158", "0.59155154", "0.588967", "0.58896...
0.0
-1
Size of the library
def __len__(self): return len(self.content)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __get_size(self):\n\t\treturn 4*self.version + 17", "def get_size(self):", "def getSize(self) -> long:\n ...", "def get_size(self):\n ...", "def getSize(self) -> int:\n ...", "def getSize(self) -> int:\n ...", "def calc_size(self):\r\n pass", "def size(self):\n ...
[ "0.80670005", "0.7656748", "0.76485074", "0.74939597", "0.7419034", "0.7419034", "0.74108195", "0.7401268", "0.73442686", "0.7325931", "0.7315922", "0.72330016", "0.72330016", "0.72330016", "0.72193474", "0.7187202", "0.71814", "0.7137367", "0.7112294", "0.70785505", "0.70591...
0.0
-1
Make this object like a dictionary and load one or multiple filters
def __getitem__(self, name): with self as s: try: f = s._load_filter(name) except TypeError: f = [s._load_filter(k) for k in name] return f
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filters(self):\n filters = IterDict()\n for key in self.FILTERS:\n filter = IterDict()\n filter_param = ((self.prefix or '') + '-' + key).strip('-')\n\n for value, display in self.fields[key].choices:\n choice = {}\n choice['value'] =...
[ "0.69429255", "0.68280834", "0.678945", "0.67390245", "0.6707644", "0.6680645", "0.66653675", "0.664991", "0.66167235", "0.6582854", "0.6568152", "0.6519853", "0.6499183", "0.64964235", "0.64200693", "0.6409539", "0.6404884", "0.63648516", "0.6337536", "0.62964696", "0.627265...
0.6210194
24
Load a given filter from the library
def _load_filter(self, fname, **kwargs): with self as s: return LickIndex(fname, s._content[fname])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadFilterFromString(spec):\n return _loadPluginFromString(spec, \"ufo2ft.filters\", isValidFilter)", "def _load_filter(self, *args, **kwargs):\n raise NotImplementedError", "def _load_filter(self, fname, interp=True, lamb=None):\n ftab = self.hdf\n if hasattr(fname, 'decode'):\n ...
[ "0.7596461", "0.73965544", "0.70921934", "0.6901158", "0.6694745", "0.6442038", "0.6273887", "0.6259627", "0.62564397", "0.6248318", "0.623958", "0.62291145", "0.6115416", "0.6082468", "0.6026562", "0.59056175", "0.5899553", "0.5848221", "0.58212197", "0.5782896", "0.57552046...
0.6228943
12
Scan for independent loops and set up dictionaries.
def main(self, verbose=0): indepdict=self.scan_for_loop(self.indeploop) pegdict1 = self.scan_for_loop(self.pegloop1) pegdict2 = self.scan_for_loop(self.pegloop2) if len(indepdict.keys()) == 0 and len(pegdict1.keys()) == 0 and len(pegdict2.keys()) == 0: return dict() alldict = dict(indepdict) alldict.update(pegdict1) alldict.update(pegdict2) indepcomb=self.get_combo_list(indepdict, 0) pegcomb1=self.get_combo_list(pegdict1, 1) pegcomb2=self.get_combo_list(pegdict2, 1) allcombs = self.combine_three_combo_lists(indepcomb, pegcomb1, pegcomb2) datasets = self.prepare_looped_datasets(alldict, allcombs) createdfiles = self.create_input_files(datasets) if verbose == 1: self.print_list(indepcomb) self.print_list(pegcomb1) self.print_list(pegcomb2) self.print_list(allcombs) for datakey in datasets: self.print_list(datasets[datakey]) return createdfiles
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepare_looped_datasets(self, alldict, allcombs):\n datasets_dict=dict()\n numcombs = len(allcombs)\n combct = 0\n while combct < numcombs:\n newdata = list(self.baseinput.data)\n loopedlines = dict()\n loopedlines = self.prepare_looped_lines(alldict...
[ "0.60553885", "0.5839073", "0.5820277", "0.5816718", "0.5751431", "0.5579861", "0.5559903", "0.55534524", "0.5551435", "0.55467737", "0.5465791", "0.5446664", "0.54339755", "0.54255664", "0.53983235", "0.5374087", "0.53734773", "0.53571624", "0.53484", "0.53369623", "0.533300...
0.59351087
1
Prepare a combination list of looping indices.
def get_combo_list(self, loopdict, pegged=0): combolist=list() flatlists=list() loopkeys = list(loopdict.keys()) loopkeys.sort() if pegged == 0: for loopkey in loopkeys: numloop = len(loopdict[loopkey]['looplist']) loopct=0 flatlist=list() while loopct < numloop: flatlist.append(str(loopkey) + '-' + str(loopct)) loopct = loopct + 1 flatlists.append(flatlist) import itertools prod_list = itertools.product(*flatlists) stopiter = 0 while not stopiter: try: mycomb = prod_list.next() except StopIteration: stopiter = 1 if stopiter == 0: combolist.append(list(mycomb)) elif pegged == 1: if len(loopkeys) == 0: return combolist #Empty list numloop = len(loopdict[loopkeys[0]]['looplist']) #all same len numct=0 while numct < numloop: flatlist=list() for loopkey in loopkeys: flatlist.append(str(loopkey) + '-' + str(numct)) numct = numct + 1 combolist.append(flatlist) #print "TTM DEBUG: ", flatlists return combolist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_index_iterator(indexes, length):\n return combinations(indexes, length)", "def construct_indices(after_pooling):\n our_indices = np.zeros_like(after_pooling, dtype=np.int64)\n batch_num, channel_num, row_num, col_num = after_pooling.shape\n for batch_id in range(batch_num):\n for ...
[ "0.6275713", "0.6154315", "0.6077849", "0.6049278", "0.5934963", "0.5888981", "0.5846226", "0.5782302", "0.57822573", "0.5774816", "0.5757438", "0.57275736", "0.56466776", "0.56323755", "0.56219804", "0.55682474", "0.5567099", "0.5565119", "0.55425763", "0.54685265", "0.54512...
0.51132363
60
Combine two pegged lists and one independent list.
def combine_three_combo_lists(self, indeplist, peglist1, peglist2): templist=list() threelist=list() templist = self.combine_combo_lists(indeplist, peglist1) threelist = self.combine_combo_lists(templist, peglist2) return threelist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_lists(l1, l2):\n return [ *l1, *l2 ]", "def _merge_lists(cls, li1, li2):\n if not li1:\n return li2[:]\n elif not li2:\n return li1[:]\n else:\n li = li1[:]\n for el in li2:\n if el not in li:\n li.app...
[ "0.7555634", "0.7349701", "0.7179665", "0.70421684", "0.6992282", "0.6892206", "0.6813232", "0.68128705", "0.6796102", "0.67734545", "0.6721897", "0.6630253", "0.6618824", "0.65558565", "0.6551609", "0.6527701", "0.64997417", "0.64883775", "0.64660096", "0.6453299", "0.642906...
0.6404212
23
Prepare looped lines from looping dictionary.
def prepare_looped_lines(self, alldict, comblist): loopline_dict=dict() for stridx in comblist: lidx = int(stridx.split('-')[0]) loopidx = int(stridx.split('-')[1]) loopline_dict[lidx] = alldict[lidx]['prepend'] + alldict[lidx]['looplist'][loopidx].strip() + alldict[lidx]['append'] + '\n' return loopline_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepare_looped_datasets(self, alldict, allcombs):\n datasets_dict=dict()\n numcombs = len(allcombs)\n combct = 0\n while combct < numcombs:\n newdata = list(self.baseinput.data)\n loopedlines = dict()\n loopedlines = self.prepare_looped_lines(alldict...
[ "0.62352186", "0.5535244", "0.540953", "0.538531", "0.53788745", "0.5370846", "0.53634304", "0.5115916", "0.510933", "0.50691956", "0.5020476", "0.5014774", "0.50135577", "0.50128126", "0.5011278", "0.5003016", "0.49997228", "0.49952468", "0.49902463", "0.49880245", "0.498213...
0.7311217
0
Prepare looped datasets from looping lines.
def prepare_looped_datasets(self, alldict, allcombs): datasets_dict=dict() numcombs = len(allcombs) combct = 0 while combct < numcombs: newdata = list(self.baseinput.data) loopedlines = dict() loopedlines = self.prepare_looped_lines(alldict, allcombs[combct]) for lvalidx in loopedlines.keys(): newdata[lvalidx] = loopedlines[lvalidx] datasets_dict[combct] = newdata combct = combct + 1 return datasets_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_preprocessing():\n lineid_content = get_lineid_content()\n print('Read movie_lines.txt file complete...')\n convos = get_convos()\n print('Read movie_conversations.txt file complete...')\n print('Building dataset')\n get_data(lineid_content, convos)", "def _prepare_sets(self):\n\n ...
[ "0.6232418", "0.6178191", "0.61240345", "0.6034664", "0.5999491", "0.59856147", "0.5969796", "0.5958377", "0.5950267", "0.5871935", "0.58671767", "0.5818414", "0.5809877", "0.5804624", "0.5801337", "0.5794393", "0.5745656", "0.5720152", "0.5712723", "0.5677284", "0.56412107",...
0.6497661
0
Create independently looped input files.
def create_input_files(self, datasets_dict): ifname = self.keywords['inputfile'] dirstem = os.path.dirname(ifname) basename = os.path.basename(ifname).split('.')[0] createdfiles=list() if dirstem == "": dirstem = os.getcwd() dkeys = datasets_dict.keys() dkeys.sort() dct=1 for didx in dkeys: newfile = MASTFile() newfile.data = list(datasets_dict[didx]) newname="%s/loop_%s_%s.inp" % (dirstem, basename, str(dct).zfill(2)) newfile.to_file(newname) #createdfiles.append(os.path.basename(newname)) createdfiles.append(newname) dct=dct+1 return createdfiles
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_files(self, dir, num_files=10):\n for i in range(num_files):\n self._make_random_file(dir)", "def create_input_files(in_dir, R, I):\n def get_filepath(in_volume, infiles_partition):\n _3d_pos = numeric_to_3d_pos(in_volume.index, infiles_partition, order='F')\n i, j, k...
[ "0.6580419", "0.6510424", "0.6231249", "0.6147335", "0.61276037", "0.60895264", "0.60482085", "0.6024064", "0.59860575", "0.5985176", "0.59534013", "0.58648974", "0.58312774", "0.582473", "0.5797607", "0.57878214", "0.5783304", "0.5764239", "0.5764155", "0.57330567", "0.57255...
0.6638289
0
Extract constant names from sybdb.h to use as python constants
def extract_constants(freetds_include="sybdb.h", constants_file="bcp_constants.py"): fileno, source_file = mkstemp(suffix=".c", text=True) write(fileno, "#include <{}>".format(freetds_include).encode()) close(fileno) fileno, include_directives = mkstemp(suffix=".txt") close(fileno) if ON_WINDOWS: cmd_template = "cl /E {includes} {source} > {output}" else: cmd_template = "cpp {includes} '{source}' > '{output}'" cmd = cmd_template.format( output=normpath(include_directives), source=normpath(source_file), includes=" ".join( "-I{}".format(normpath(_include)) for _include in include_dirs ) ) fifo = Popen(cmd, shell=True, stdin=None, stdout=None, stderr=None, close_fds=True) fifo.communicate() fifo.wait() remove(source_file) if fifo.returncode < 0: raise Exception("Cannot run preprocessor step") row_regex = re.compile('[\r\n]+') field_regex = re.compile('[\s]+') with open(include_directives, "r") as fd: include_paths = list( _filename for contents in [fd.read()] for _row in row_regex.split(contents) if _row.find(freetds_include) > -1 for _index, _word in enumerate(field_regex.split(_row)) if _index == 2 for _filename in [_word.strip('"')] if exists(_filename) ) remove(include_directives) for include_file in include_paths: with open(include_file, "r") as fd: definition_pairs = [ (_values[1], int(_values[2])) for contents in [fd.read()] for _row in row_regex.split(contents) for _values in [field_regex.split(_row)] if len(_values) == 3 and _values[0] == "#define" and _values[2].isdigit() ] if len(definition_pairs): with open(constants_file, "w") as output_fd: output_fd.write("\n".join("%s=%d" % _row for _row in definition_pairs)) break else: raise Exception("Couldn't find a freetds include file")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_declarations(self):\n return \"extern const unsigned int %s;\\n\" % self.name", "def get_calculable_constant_names_latex():\n return r\"t_0\", r\"S_{rr}\", r\"S_{r\\theta}\", r\"S_{rz}\", r\"S_{zz}\" \\\n r\"\\alpha\", r\"\\beta\", r\"\\gamma\", r\"C_{13}\", r\"C_{33}\", \...
[ "0.63279045", "0.6142163", "0.594471", "0.5942139", "0.59368765", "0.59215355", "0.590987", "0.58534914", "0.57482266", "0.57082814", "0.5656697", "0.56237125", "0.55894226", "0.55852455", "0.5570691", "0.5560409", "0.5546228", "0.5487835", "0.5454318", "0.5445472", "0.543823...
0.6312631
1
Return holdout indices respecting hte temporal ordering of the data
def time_series_hold_out_validation(random_state: np.random.RandomState, val_share: float, indices: np.ndarray, **kwargs: Any) \ -> Tuple[np.ndarray, np.ndarray]: n_prediction_steps = kwargs['n_prediction_steps'] n_repeats = kwargs['n_repeats'] # Time Series prediction only requires on set of prediction for each # This implement needs to be combined with time series forecasting dataloader, where each time an entire # time series is used for prediction cv = TimeSeriesSplit(n_splits=2, test_size=1 + n_prediction_steps * (n_repeats - 1), gap=n_prediction_steps - 1) train, val = holdout_split_forecasting(holdout=cv, indices=indices, n_prediction_steps=n_prediction_steps, n_repeats=n_repeats) return train, val
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_pulling_indices(self, weight):\n pass", "def getBreakIndices(self):\n for i in self.raw.index[:-1]:\n if self.raw['stress'][i+1] > self.raw['stress'][i] and \\\n self.raw['stress'][i+2] < self.raw['stress'][i+1]:\n brkIdx1 = i+1 # brkIdx1: start of ...
[ "0.6033882", "0.5888383", "0.5833695", "0.576953", "0.5758707", "0.56996095", "0.5611121", "0.5609971", "0.5606583", "0.5600059", "0.5593295", "0.5524574", "0.54750305", "0.54693586", "0.5458139", "0.54525465", "0.54286104", "0.54272556", "0.5421725", "0.5421557", "0.5390778"...
0.0
-1
Standard k fold cross validation.
def k_fold_cross_validation(random_state: np.random.RandomState, num_splits: int, indices: np.ndarray, **kwargs: Any ) -> List[Tuple[np.ndarray, np.ndarray]]: shuffle = kwargs.get('shuffle', True) cv = KFold(n_splits=num_splits, random_state=random_state if shuffle else None, shuffle=shuffle) splits = list(cv.split(indices)) return splits
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def k_folds_cross_validate(self, k):\n start_time = time.time()\n partition_size = int(self.num_samples / k)\n partitions = [\n (i * partition_size, (i + 1) * partition_size) for i in range(k)\n ]\n average_accuracy = 0.0\n for start, end in partitions:\n ...
[ "0.77007174", "0.7679591", "0.76575476", "0.76463956", "0.7470647", "0.7434266", "0.7416312", "0.73822284", "0.7370632", "0.73616713", "0.73327446", "0.73284346", "0.722847", "0.7186071", "0.71467024", "0.7122442", "0.70457053", "0.6964643", "0.6961495", "0.6944377", "0.69214...
0.63284665
56
Returns train and validation indices respecting the temporal ordering of the data.
def time_series_cross_validation(random_state: np.random.RandomState, num_splits: int, indices: np.ndarray, **kwargs: Any ) -> List[Tuple[np.ndarray, np.ndarray]]: test_size = kwargs['n_prediction_steps'] n_repeats = kwargs['n_repeats'] cv = TimeSeriesSplit(n_splits=num_splits, test_size=test_size * n_repeats, gap=0) splits = [( indices[split[0]], indices[split[1][[-1 - n * test_size for n in reversed(range(n_repeats))]]]) for split in cv.split(indices)] return splits
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_train_index():\n data_size = (NUM_CLASS - 1) * NUM_DATA_PER_CLASS\n return np.array([i for i in range(0, data_size)])", "def get_training_index():\n return list(range(0, 305))", "def split_data(train, parameters):\n labels = train.labels\n train_indices, val_indices = train_test_split(ra...
[ "0.6553474", "0.6376504", "0.6233736", "0.5944764", "0.59190506", "0.5901625", "0.58788", "0.58554393", "0.58391976", "0.5713936", "0.56517017", "0.563909", "0.56207126", "0.56050724", "0.5597223", "0.5592754", "0.55752015", "0.5573872", "0.5544355", "0.5543345", "0.55142456"...
0.0
-1
Returns the indices without performing any operation on them. To be used for fitting on the whole dataset. This strategy is not compatible with HPO search.
def no_resampling(random_state: np.random.RandomState, indices: np.ndarray) -> np.ndarray: return indices
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def indices(self) -> np.ndarray:\n return self.impl.indices", "def get_indices(self):\r\n return self._indices", "def vir_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==0:\n indices.append(index)\n return indices", "d...
[ "0.7181901", "0.69676125", "0.69128466", "0.6892532", "0.68412507", "0.67869836", "0.6754658", "0.67145926", "0.6703281", "0.66633874", "0.65803033", "0.6534292", "0.6509533", "0.64898914", "0.6441676", "0.6402624", "0.63538474", "0.6317454", "0.6311111", "0.6274377", "0.6259...
0.65598696
11
Get open accounts Returns array with active account numbers
async def get_open_accounts(self): result = [] URL = API_HOST + "/api/resources/header" async with async_timeout.timeout(TIMEOUT): response = await self.session.get(URL) json_data = await response.json() accounts = json_data["data"]["accounts"]["data"]["data"] for account in accounts: if account["statusCategory"] == STATUS_CATEGORY_OPEN: result.append(account["accountNumber"]) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_accounts(self):\n\n data = {\n 'customerId': self.personal_identity_number,\n 'responseControl': {\n 'filter': {\n 'includes': ['ALL']\n }\n }\n }\n\n headers = {'Content-type': 'application/json',\n ...
[ "0.63580984", "0.6303217", "0.62876016", "0.6213196", "0.61325455", "0.61324793", "0.6089535", "0.6086548", "0.6079627", "0.6079134", "0.6050323", "0.6013716", "0.59957004", "0.59836334", "0.59793425", "0.5972705", "0.5965284", "0.59572", "0.5945932", "0.59283507", "0.5911551...
0.8321022
0
Logging out from fpl
async def logout(self): _LOGGER.info("Logging out") URL_LOGOUT = API_HOST + "/api/resources/logout" try: async with async_timeout.timeout(TIMEOUT): await self.session.get(URL_LOGOUT) except Exception as e: _LOGGER.error(e)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logout():", "def logout():\n login()", "def logout(self):", "def logout(self):\n pass", "def do_logout():\n\n session['authenticated'] = False\n session['username'] = None\n session['name'] = None\n session['cpi'] = None\n session['grp_size'] = None...
[ "0.7692397", "0.73293793", "0.7156266", "0.70811003", "0.7076655", "0.70410043", "0.70080215", "0.6955402", "0.6835801", "0.6776245", "0.67325246", "0.6707584", "0.67055", "0.6672805", "0.66292965", "0.661934", "0.6611719", "0.6587299", "0.6579601", "0.6559363", "0.6532657", ...
0.0
-1
Get data from resources endpoint
async def update(self, account) -> dict: data = {} URL_RESOURCES_ACCOUNT = API_HOST + "/api/resources/account/{account}" async with async_timeout.timeout(TIMEOUT): response = await self.session.get( URL_RESOURCES_ACCOUNT.format(account=account) ) account_data = (await response.json())["data"] premise = account_data.get("premiseNumber").zfill(9) data["meterSerialNo"] = account_data["meterSerialNo"] # currentBillDate currentBillDate = datetime.strptime( account_data["currentBillDate"].replace("-", "").split("T")[0], "%Y%m%d" ).date() # nextBillDate nextBillDate = datetime.strptime( account_data["nextBillDate"].replace("-", "").split("T")[0], "%Y%m%d" ).date() data["current_bill_date"] = str(currentBillDate) data["next_bill_date"] = str(nextBillDate) today = datetime.now().date() data["service_days"] = (nextBillDate - currentBillDate).days data["as_of_days"] = (today - currentBillDate).days data["remaining_days"] = (nextBillDate - today).days # zip code # zip_code = accountData["serviceAddress"]["zip"] # projected bill pbData = await self.__getFromProjectedBill(account, premise, currentBillDate) data.update(pbData) # programs programsData = account_data["programs"]["data"] programs = dict() _LOGGER.info("Getting Programs") for program in programsData: if "enrollmentStatus" in program.keys(): key = program["name"] programs[key] = program["enrollmentStatus"] == ENROLLED def hasProgram(programName) -> bool: return programName in programs and programs[programName] # Budget Billing program if hasProgram("BBL"): data["budget_bill"] = True bbl_data = await self.__getBBL_async(account, data) data.update(bbl_data) else: data["budget_bill"] = False # Get data from energy service data.update( await self.__getDataFromEnergyService(account, premise, currentBillDate) ) # Get data from energy service ( hourly ) # data.update( # await self.__getDataFromEnergyServiceHourly( # account, premise, currentBillDate # ) # ) data.update(await self.__getDataFromApplianceUsage(account, currentBillDate)) data.update(await self.__getDataFromBalance(account)) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_resources():\n return Response(f\"{Resource.get_all_resources()}\", 200, mimetype='text/plain')", "def list(self, **kwargs):\n data, self.endpoint = self.data_endpoint(kwargs)\n r = super(Resource, self).list(**data)\n\n # Change display settings and data format for human consumpt...
[ "0.7040653", "0.7014533", "0.6972194", "0.6868909", "0.6711269", "0.67088366", "0.6694223", "0.6652421", "0.65987545", "0.6597155", "0.6576477", "0.6572885", "0.65307814", "0.6527663", "0.6498717", "0.6484466", "0.64421904", "0.64192057", "0.63806015", "0.63806015", "0.638060...
0.0
-1
get data from projected bill endpoint
async def __getFromProjectedBill(self, account, premise, currentBillDate) -> dict: data = {} try: async with async_timeout.timeout(TIMEOUT): response = await self.session.get( URL_RESOURCES_PROJECTED_BILL.format( account=account, premise=premise, lastBillDate=currentBillDate.strftime("%m%d%Y"), ) ) if response.status == 200: projectedBillData = (await response.json())["data"] billToDate = float(projectedBillData["billToDate"]) projectedBill = float(projectedBillData["projectedBill"]) dailyAvg = float(projectedBillData["dailyAvg"]) avgHighTemp = int(projectedBillData["avgHighTemp"]) data["bill_to_date"] = billToDate data["projected_bill"] = projectedBill data["daily_avg"] = dailyAvg data["avg_high_temp"] = avgHighTemp except Exception as e: _LOGGER.error(e) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def __getBBL_async(self, account, projectedBillData) -> dict:\n _LOGGER.info(\"Getting budget billing data\")\n data = {}\n\n try:\n async with async_timeout.timeout(TIMEOUT):\n response = await self.session.get(\n URL_BUDGET_BILLING_PREMISE_D...
[ "0.6360357", "0.6014319", "0.59525716", "0.5819394", "0.5776328", "0.57017654", "0.5693053", "0.56899124", "0.5679887", "0.5679887", "0.5679887", "0.56725025", "0.56725025", "0.5663544", "0.5658554", "0.5646016", "0.56169564", "0.5605687", "0.5596649", "0.5570589", "0.5530671...
0.59895027
2
Get budget billing data
async def __getBBL_async(self, account, projectedBillData) -> dict: _LOGGER.info("Getting budget billing data") data = {} try: async with async_timeout.timeout(TIMEOUT): response = await self.session.get( URL_BUDGET_BILLING_PREMISE_DETAILS.format(account=account) ) if response.status == 200: r = (await response.json())["data"] dataList = r["graphData"] # startIndex = len(dataList) - 1 billingCharge = 0 budgetBillDeferBalance = r["defAmt"] projectedBill = projectedBillData["projected_bill"] asOfDays = projectedBillData["as_of_days"] for det in dataList: billingCharge += det["actuallBillAmt"] calc1 = (projectedBill + billingCharge) / 12 calc2 = (1 / 12) * (budgetBillDeferBalance) projectedBudgetBill = round(calc1 + calc2, 2) bbDailyAvg = round(projectedBudgetBill / 30, 2) bbAsOfDateAmt = round(projectedBudgetBill / 30 * asOfDays, 2) data["budget_billing_daily_avg"] = bbDailyAvg data["budget_billing_bill_to_date"] = bbAsOfDateAmt data["budget_billing_projected_bill"] = float(projectedBudgetBill) async with async_timeout.timeout(TIMEOUT): response = await self.session.get( URL_BUDGET_BILLING_GRAPH.format(account=account) ) if response.status == 200: r = (await response.json())["data"] data["bill_to_date"] = float(r["eleAmt"]) data["defered_amount"] = float(r["defAmt"]) except Exception as e: _LOGGER.error(e) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetCampaignBudget(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def budget(self):\n return self._budget", "def billing_info(self):\r\n r...
[ "0.66888595", "0.6544355", "0.64032984", "0.6293646", "0.62567914", "0.625594", "0.62317514", "0.6158417", "0.6107173", "0.60221004", "0.59583", "0.586195", "0.58142555", "0.57493407", "0.56994355", "0.5688151", "0.5630534", "0.5616468", "0.56075585", "0.5590992", "0.5573143"...
0.7448803
0
get data from appliance usage
async def __getDataFromApplianceUsage(self, account, lastBilledDate) -> dict: _LOGGER.info("Getting appliance usage data") JSON = {"startDate": str(lastBilledDate.strftime("%m%d%Y"))} data = {} try: async with async_timeout.timeout(TIMEOUT): response = await self.session.post( URL_APPLIANCE_USAGE.format(account=account), json=JSON ) if response.status == 200: electric = (await response.json())["data"]["electric"] full = 100 for e in electric: rr = round(float(e["percentageDollar"])) if rr < full: full = full - rr else: rr = full data[e["category"].replace(" ", "_")] = rr except Exception as e: _LOGGER.error(e) return {"energy_percent_by_applicance": data}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getUsageInfo(self):\n return self.jsonRequest(\"/api/v1/usage\", { \"apiKey\": self._apiKey })", "def get_application_api_usage_get(self, applicationId, end, start):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\...
[ "0.6542614", "0.6461803", "0.6203412", "0.6135864", "0.5930974", "0.5910394", "0.5888209", "0.5844207", "0.58017164", "0.5787487", "0.57435703", "0.5714978", "0.5709852", "0.564539", "0.5628239", "0.5616134", "0.5614402", "0.56101", "0.5602017", "0.55986047", "0.5594599", "...
0.6843256
0
get data from appliance usage
async def __getDataFromBalance(self, account) -> dict: _LOGGER.info("Getting appliance usage data") data = {} URL_BALANCE = API_HOST + "/api/resources/account/{account}/balance?count=-1" try: async with async_timeout.timeout(TIMEOUT): response = await self.session.get(URL_BALANCE.format(account=account)) if response.status == 200: data = (await response.json())["data"] indice = [i for i, x in enumerate(data) if x["details"] == "DEBT"][ 0 ] deb = data[indice]["amount"] except Exception as e: _LOGGER.error(e) return {"balance_data": data}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def __getDataFromApplianceUsage(self, account, lastBilledDate) -> dict:\n _LOGGER.info(\"Getting appliance usage data\")\n\n JSON = {\"startDate\": str(lastBilledDate.strftime(\"%m%d%Y\"))}\n data = {}\n\n try:\n async with async_timeout.timeout(TIMEOUT):\n ...
[ "0.68421215", "0.6543652", "0.64636284", "0.6203195", "0.61375993", "0.5911429", "0.58880204", "0.5845489", "0.5802097", "0.57885844", "0.57432395", "0.57148707", "0.5710095", "0.5646816", "0.56286925", "0.5616645", "0.5614922", "0.56103456", "0.56019914", "0.55968064", "0.55...
0.5929427
5
Return the default form class used for user registration.
def get_form_class(self, request): return RegistrationForm
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_form_class(self, request):\n\t\treturn RegistrationForm", "def get_form_class(self):\n return self.form_class", "def get_form_class(self):\n if self.form_class:\n return self.form_class\n else:\n raise ImproperlyConfigured(\n \"在定义类视图%s的时候,你必须明确...
[ "0.8111784", "0.770102", "0.74282926", "0.72233987", "0.71631217", "0.7097246", "0.7082445", "0.696311", "0.692484", "0.6767896", "0.67418265", "0.66456175", "0.66020036", "0.64767134", "0.64665145", "0.64210325", "0.63494134", "0.6314577", "0.62687373", "0.6240669", "0.62336...
0.7956627
1
Creates the sum tree data structure for the given replay capacity.
def __init__(self, capacity): assert isinstance(capacity, int) if capacity <= 0: raise ValueError( 'Sum tree capacity should be positive. Got: {}'.format(capacity)) self.nodes = [] self.depth = int(np.ceil(np.log2(capacity))) self.low_idx = (2**self.depth) - 1 # pri_idx + low_idx -> tree_idx self.high_idx = capacity + self.low_idx self.nodes = np.zeros(2**(self.depth + 1) - 1) # Double precision. self.capacity = capacity self.highest_set = 0 self.max_recorded_priority = 1.0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, capacity, tuple, alpha=0.6, beta=0.4):\n self.tree = SumTree(capacity)\n self.capacity = capacity\n self.alpha = alpha\n self.beta = beta\n self.tuple = tuple", "def __init__(self, memory_size, batch_size, alpha):\n self.tree = sum_tree.SumTree(memory_...
[ "0.6020408", "0.5820842", "0.5724238", "0.55728537", "0.55458015", "0.5542693", "0.5513752", "0.5476557", "0.53817534", "0.53465384", "0.53336674", "0.53084284", "0.5296613", "0.5293262", "0.5262831", "0.5256954", "0.5208593", "0.519735", "0.5179887", "0.51658785", "0.5162465...
0.70326006
0