function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
list
def one_drop(self, one_drop): assert_is_type(one_drop, None, bool) self._parms["one_drop"] = one_drop
h2oai/h2o-dev
[ 6169, 1943, 6169, 208, 1393862887 ]
def skip_drop(self): """ For booster=dart only: skip_drop (0..1) Type: ``float`` (default: ``0``). """ return self._parms.get("skip_drop")
h2oai/h2o-dev
[ 6169, 1943, 6169, 208, 1393862887 ]
def skip_drop(self, skip_drop): assert_is_type(skip_drop, None, float) self._parms["skip_drop"] = skip_drop
h2oai/h2o-dev
[ 6169, 1943, 6169, 208, 1393862887 ]
def tree_method(self): """ Tree method One of: ``"auto"``, ``"exact"``, ``"approx"``, ``"hist"`` (default: ``"auto"``). """ return self._parms.get("tree_method")
h2oai/h2o-dev
[ 6169, 1943, 6169, 208, 1393862887 ]
def tree_method(self, tree_method): assert_is_type(tree_method, None, Enum("auto", "exact", "approx", "hist")) self._parms["tree_method"] = tree_method
h2oai/h2o-dev
[ 6169, 1943, 6169, 208, 1393862887 ]
def grow_policy(self): """ Grow policy - depthwise is standard GBM, lossguide is LightGBM One of: ``"depthwise"``, ``"lossguide"`` (default: ``"depthwise"``). """ return self._parms.get("grow_policy")
h2oai/h2o-dev
[ 6169, 1943, 6169, 208, 1393862887 ]
def grow_policy(self, grow_policy): assert_is_type(grow_policy, None, Enum("depthwise", "lossguide")) self._parms["grow_policy"] = grow_policy
h2oai/h2o-dev
[ 6169, 1943, 6169, 208, 1393862887 ]
def booster(self): """ Booster type One of: ``"gbtree"``, ``"gblinear"``, ``"dart"`` (default: ``"gbtree"``). """ return self._parms.get("booster")
h2oai/h2o-dev
[ 6169, 1943, 6169, 208, 1393862887 ]
def booster(self, booster): assert_is_type(booster, None, Enum("gbtree", "gblinear", "dart")) self._parms["booster"] = booster
h2oai/h2o-dev
[ 6169, 1943, 6169, 208, 1393862887 ]
def reg_lambda(self): """ L2 regularization Type: ``float`` (default: ``1``). """ return self._parms.get("reg_lambda")
h2oai/h2o-dev
[ 6169, 1943, 6169, 208, 1393862887 ]
def reg_lambda(self, reg_lambda): assert_is_type(reg_lambda, None, float) self._parms["reg_lambda"] = reg_lambda
h2oai/h2o-dev
[ 6169, 1943, 6169, 208, 1393862887 ]
def reg_alpha(self): """ L1 regularization Type: ``float`` (default: ``0``). """ return self._parms.get("reg_alpha")
h2oai/h2o-dev
[ 6169, 1943, 6169, 208, 1393862887 ]
def reg_alpha(self, reg_alpha): assert_is_type(reg_alpha, None, float) self._parms["reg_alpha"] = reg_alpha
h2oai/h2o-dev
[ 6169, 1943, 6169, 208, 1393862887 ]
def dmatrix_type(self): """ Type of DMatrix. For sparse, NAs and 0 are treated equally. One of: ``"auto"``, ``"dense"``, ``"sparse"`` (default: ``"auto"``). """ return self._parms.get("dmatrix_type")
h2oai/h2o-dev
[ 6169, 1943, 6169, 208, 1393862887 ]
def dmatrix_type(self, dmatrix_type): assert_is_type(dmatrix_type, None, Enum("auto", "dense", "sparse")) self._parms["dmatrix_type"] = dmatrix_type
h2oai/h2o-dev
[ 6169, 1943, 6169, 208, 1393862887 ]
def backend(self): """ Backend. By default (auto), a GPU is used if available. One of: ``"auto"``, ``"gpu"``, ``"cpu"`` (default: ``"auto"``). """ return self._parms.get("backend")
h2oai/h2o-dev
[ 6169, 1943, 6169, 208, 1393862887 ]
def backend(self, backend): assert_is_type(backend, None, Enum("auto", "gpu", "cpu")) self._parms["backend"] = backend
h2oai/h2o-dev
[ 6169, 1943, 6169, 208, 1393862887 ]
def gpu_id(self): """ Which GPU to use. Type: ``int`` (default: ``0``). """ return self._parms.get("gpu_id")
h2oai/h2o-dev
[ 6169, 1943, 6169, 208, 1393862887 ]
def gpu_id(self, gpu_id): assert_is_type(gpu_id, None, int) self._parms["gpu_id"] = gpu_id
h2oai/h2o-dev
[ 6169, 1943, 6169, 208, 1393862887 ]
def __init__( self, type: OperationType, create_account_op: CreateAccountOp = None, payment_op: PaymentOp = None, path_payment_strict_receive_op: PathPaymentStrictReceiveOp = None, manage_sell_offer_op: ManageSellOfferOp = None, create_passive_sell_offer_op: CreatePassiveSellOfferOp = None, set_options_op: SetOptionsOp = None, change_trust_op: ChangeTrustOp = None, allow_trust_op: AllowTrustOp = None, destination: MuxedAccount = None, manage_data_op: ManageDataOp = None, bump_sequence_op: BumpSequenceOp = None, manage_buy_offer_op: ManageBuyOfferOp = None, path_payment_strict_send_op: PathPaymentStrictSendOp = None, create_claimable_balance_op: CreateClaimableBalanceOp = None, claim_claimable_balance_op: ClaimClaimableBalanceOp = None, begin_sponsoring_future_reserves_op: BeginSponsoringFutureReservesOp = None, revoke_sponsorship_op: RevokeSponsorshipOp = None, clawback_op: ClawbackOp = None, clawback_claimable_balance_op: ClawbackClaimableBalanceOp = None, set_trust_line_flags_op: SetTrustLineFlagsOp = None, liquidity_pool_deposit_op: LiquidityPoolDepositOp = None, liquidity_pool_withdraw_op: LiquidityPoolWithdrawOp = None,
StellarCN/py-stellar-base
[ 328, 158, 328, 6, 1443187561 ]
def pack(self, packer: Packer) -> None: self.type.pack(packer) if self.type == OperationType.CREATE_ACCOUNT: if self.create_account_op is None: raise ValueError("create_account_op should not be None.") self.create_account_op.pack(packer) return if self.type == OperationType.PAYMENT: if self.payment_op is None: raise ValueError("payment_op should not be None.") self.payment_op.pack(packer) return if self.type == OperationType.PATH_PAYMENT_STRICT_RECEIVE: if self.path_payment_strict_receive_op is None: raise ValueError("path_payment_strict_receive_op should not be None.") self.path_payment_strict_receive_op.pack(packer) return if self.type == OperationType.MANAGE_SELL_OFFER: if self.manage_sell_offer_op is None: raise ValueError("manage_sell_offer_op should not be None.") self.manage_sell_offer_op.pack(packer) return if self.type == OperationType.CREATE_PASSIVE_SELL_OFFER: if self.create_passive_sell_offer_op is None: raise ValueError("create_passive_sell_offer_op should not be None.") self.create_passive_sell_offer_op.pack(packer) return if self.type == OperationType.SET_OPTIONS: if self.set_options_op is None: raise ValueError("set_options_op should not be None.") self.set_options_op.pack(packer) return if self.type == OperationType.CHANGE_TRUST: if self.change_trust_op is None: raise ValueError("change_trust_op should not be None.") self.change_trust_op.pack(packer) return if self.type == OperationType.ALLOW_TRUST: if self.allow_trust_op is None: raise ValueError("allow_trust_op should not be None.") self.allow_trust_op.pack(packer) return if self.type == OperationType.ACCOUNT_MERGE: if self.destination is None: raise ValueError("destination should not be None.") self.destination.pack(packer) return if self.type == OperationType.INFLATION: return if self.type == OperationType.MANAGE_DATA: if self.manage_data_op is None: raise ValueError("manage_data_op should not be None.") self.manage_data_op.pack(packer) return if self.type == OperationType.BUMP_SEQUENCE: if self.bump_sequence_op is None: raise ValueError("bump_sequence_op should not be None.") self.bump_sequence_op.pack(packer) return if self.type == OperationType.MANAGE_BUY_OFFER: if self.manage_buy_offer_op is None: raise ValueError("manage_buy_offer_op should not be None.") self.manage_buy_offer_op.pack(packer) return if self.type == OperationType.PATH_PAYMENT_STRICT_SEND: if self.path_payment_strict_send_op is None: raise ValueError("path_payment_strict_send_op should not be None.") self.path_payment_strict_send_op.pack(packer) return if self.type == OperationType.CREATE_CLAIMABLE_BALANCE: if self.create_claimable_balance_op is None: raise ValueError("create_claimable_balance_op should not be None.") self.create_claimable_balance_op.pack(packer) return if self.type == OperationType.CLAIM_CLAIMABLE_BALANCE: if self.claim_claimable_balance_op is None: raise ValueError("claim_claimable_balance_op should not be None.") self.claim_claimable_balance_op.pack(packer) return if self.type == OperationType.BEGIN_SPONSORING_FUTURE_RESERVES: if self.begin_sponsoring_future_reserves_op is None: raise ValueError( "begin_sponsoring_future_reserves_op should not be None." ) self.begin_sponsoring_future_reserves_op.pack(packer) return if self.type == OperationType.END_SPONSORING_FUTURE_RESERVES: return if self.type == OperationType.REVOKE_SPONSORSHIP: if self.revoke_sponsorship_op is None: raise ValueError("revoke_sponsorship_op should not be None.") self.revoke_sponsorship_op.pack(packer) return if self.type == OperationType.CLAWBACK: if self.clawback_op is None: raise ValueError("clawback_op should not be None.") self.clawback_op.pack(packer) return if self.type == OperationType.CLAWBACK_CLAIMABLE_BALANCE: if self.clawback_claimable_balance_op is None: raise ValueError("clawback_claimable_balance_op should not be None.") self.clawback_claimable_balance_op.pack(packer) return if self.type == OperationType.SET_TRUST_LINE_FLAGS: if self.set_trust_line_flags_op is None: raise ValueError("set_trust_line_flags_op should not be None.") self.set_trust_line_flags_op.pack(packer) return if self.type == OperationType.LIQUIDITY_POOL_DEPOSIT: if self.liquidity_pool_deposit_op is None: raise ValueError("liquidity_pool_deposit_op should not be None.") self.liquidity_pool_deposit_op.pack(packer) return if self.type == OperationType.LIQUIDITY_POOL_WITHDRAW: if self.liquidity_pool_withdraw_op is None: raise ValueError("liquidity_pool_withdraw_op should not be None.") self.liquidity_pool_withdraw_op.pack(packer) return
StellarCN/py-stellar-base
[ 328, 158, 328, 6, 1443187561 ]
def unpack(cls, unpacker: Unpacker) -> "OperationBody": type = OperationType.unpack(unpacker) if type == OperationType.CREATE_ACCOUNT: create_account_op = CreateAccountOp.unpack(unpacker) return cls(type=type, create_account_op=create_account_op) if type == OperationType.PAYMENT: payment_op = PaymentOp.unpack(unpacker) return cls(type=type, payment_op=payment_op) if type == OperationType.PATH_PAYMENT_STRICT_RECEIVE: path_payment_strict_receive_op = PathPaymentStrictReceiveOp.unpack(unpacker) return cls( type=type, path_payment_strict_receive_op=path_payment_strict_receive_op ) if type == OperationType.MANAGE_SELL_OFFER: manage_sell_offer_op = ManageSellOfferOp.unpack(unpacker) return cls(type=type, manage_sell_offer_op=manage_sell_offer_op) if type == OperationType.CREATE_PASSIVE_SELL_OFFER: create_passive_sell_offer_op = CreatePassiveSellOfferOp.unpack(unpacker) return cls( type=type, create_passive_sell_offer_op=create_passive_sell_offer_op ) if type == OperationType.SET_OPTIONS: set_options_op = SetOptionsOp.unpack(unpacker) return cls(type=type, set_options_op=set_options_op) if type == OperationType.CHANGE_TRUST: change_trust_op = ChangeTrustOp.unpack(unpacker) return cls(type=type, change_trust_op=change_trust_op) if type == OperationType.ALLOW_TRUST: allow_trust_op = AllowTrustOp.unpack(unpacker) return cls(type=type, allow_trust_op=allow_trust_op) if type == OperationType.ACCOUNT_MERGE: destination = MuxedAccount.unpack(unpacker) return cls(type=type, destination=destination) if type == OperationType.INFLATION: return cls(type=type) if type == OperationType.MANAGE_DATA: manage_data_op = ManageDataOp.unpack(unpacker) return cls(type=type, manage_data_op=manage_data_op) if type == OperationType.BUMP_SEQUENCE: bump_sequence_op = BumpSequenceOp.unpack(unpacker) return cls(type=type, bump_sequence_op=bump_sequence_op) if type == OperationType.MANAGE_BUY_OFFER: manage_buy_offer_op = ManageBuyOfferOp.unpack(unpacker) return cls(type=type, manage_buy_offer_op=manage_buy_offer_op) if type == OperationType.PATH_PAYMENT_STRICT_SEND: path_payment_strict_send_op = PathPaymentStrictSendOp.unpack(unpacker) return cls( type=type, path_payment_strict_send_op=path_payment_strict_send_op ) if type == OperationType.CREATE_CLAIMABLE_BALANCE: create_claimable_balance_op = CreateClaimableBalanceOp.unpack(unpacker) return cls( type=type, create_claimable_balance_op=create_claimable_balance_op ) if type == OperationType.CLAIM_CLAIMABLE_BALANCE: claim_claimable_balance_op = ClaimClaimableBalanceOp.unpack(unpacker) return cls(type=type, claim_claimable_balance_op=claim_claimable_balance_op) if type == OperationType.BEGIN_SPONSORING_FUTURE_RESERVES: begin_sponsoring_future_reserves_op = ( BeginSponsoringFutureReservesOp.unpack(unpacker) ) return cls( type=type, begin_sponsoring_future_reserves_op=begin_sponsoring_future_reserves_op, ) if type == OperationType.END_SPONSORING_FUTURE_RESERVES: return cls(type=type) if type == OperationType.REVOKE_SPONSORSHIP: revoke_sponsorship_op = RevokeSponsorshipOp.unpack(unpacker) return cls(type=type, revoke_sponsorship_op=revoke_sponsorship_op) if type == OperationType.CLAWBACK: clawback_op = ClawbackOp.unpack(unpacker) return cls(type=type, clawback_op=clawback_op) if type == OperationType.CLAWBACK_CLAIMABLE_BALANCE: clawback_claimable_balance_op = ClawbackClaimableBalanceOp.unpack(unpacker) return cls( type=type, clawback_claimable_balance_op=clawback_claimable_balance_op ) if type == OperationType.SET_TRUST_LINE_FLAGS: set_trust_line_flags_op = SetTrustLineFlagsOp.unpack(unpacker) return cls(type=type, set_trust_line_flags_op=set_trust_line_flags_op) if type == OperationType.LIQUIDITY_POOL_DEPOSIT: liquidity_pool_deposit_op = LiquidityPoolDepositOp.unpack(unpacker) return cls(type=type, liquidity_pool_deposit_op=liquidity_pool_deposit_op) if type == OperationType.LIQUIDITY_POOL_WITHDRAW: liquidity_pool_withdraw_op = LiquidityPoolWithdrawOp.unpack(unpacker) return cls(type=type, liquidity_pool_withdraw_op=liquidity_pool_withdraw_op) return cls(type=type)
StellarCN/py-stellar-base
[ 328, 158, 328, 6, 1443187561 ]
def from_xdr_bytes(cls, xdr: bytes) -> "OperationBody": unpacker = Unpacker(xdr) return cls.unpack(unpacker)
StellarCN/py-stellar-base
[ 328, 158, 328, 6, 1443187561 ]
def from_xdr(cls, xdr: str) -> "OperationBody": xdr_bytes = base64.b64decode(xdr.encode()) return cls.from_xdr_bytes(xdr_bytes)
StellarCN/py-stellar-base
[ 328, 158, 328, 6, 1443187561 ]
def expression(symbolAction, nextState): #expression ::= | pathitem pathtail #pathitem ::= | "(" pathlist ")" # | "[" propertylist "]" # | "{" formulacontent "}" # | boolean # | literal # | numericliteral # | quickvariable # | symbol if not isinstance(nextState,tuple): nextState = (nextState,) nextState = nextState + ('pathtail',) return [ #pathlist (r'\(', Punctuation, nextState + ('list',)), #properylist (r'\[', Punctuation, nextState + ('propertyList',)), #formulacontent (r'\{', Punctuation, nextState + ('root',)), #boolean (r'@false|@true', Keyword.Constant, nextState), #literal (r'("""[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*""")|("[^"\\]*(?:\\.[^"\\]*)*")', String, nextState + ('dtlang',)), #numericliteral ::= double|integer|rational (r'[-+]?[0-9]+(\.[0-9]+)?([eE][-+]?[0-9]+)', Number.Float, nextState), (r'[-+]?[0-9]+', Number.Integer, nextState), (r'[-+]?[0-9]+/[0-9]+', Number, nextState), #quickvariable (_quickvariable, Name.Variable, nextState), #symbol (_symbol, symbolAction, nextState), ]
gniezen/n3pygments
[ 22, 6, 22, 3, 1327326868 ]
def series_rolling_median(): series = pd.Series([4, 3, 5, 2, 6]) # Series of 4, 3, 5, 2, 6 out_series = series.rolling(3).median() return out_series # Expect series of NaN, NaN, 4.0, 3.0, 5.0
IntelLabs/hpat
[ 645, 65, 645, 54, 1496336381 ]
def Xval_on_single_patient(predictor_cls, feature_extractor, patient_name="Dog_1",preprocess=True): """ Single patient cross validation Returns 2 lists of cross validation performances :param predictor_cls: :param feature_extractor :param patient_name: :return: """ # predictor_cls is a handle to an instance of PredictorBase # Instantiate the predictor predictor = predictor_cls() base_dir = Global.path_map('clips_folder') base_dir = '/nfs/data3/kaggle_seizure/clips/' loader = DataLoader(base_dir, feature_extractor) X_list,y_seizure, y_early = loader.blocks_for_Xvalidation(patient_name,preprocess=preprocess) #X_train,y_seizure, y_early = loader.training_data(patient_name) #y_train = [y_seizure,y_early] #X_list,y_list = train_test_split(X_train,y_train) # running cross validation print patient_name print "\ncross validation: seizures vs not" result_seizure = XValidation.evaluate(X_list, y_seizure, predictor, evaluation=auc) print 'cross-validation results: mean = %.3f, sd = %.3f, raw scores = %s' \ % (np.mean(result_seizure), np.std(result_seizure), result_seizure) print "\ncross validation: early_vs_not" result_early = XValidation.evaluate(X_list, y_early, predictor, evaluation=auc) print 'cross-validation results: mean = %.3f, sd = %.3f, raw scores = %s' \ % (np.mean(result_early), np.std(result_early), result_early) return result_seizure,result_early
vincentadam87/gatsby-hackathon-seizure
[ 3, 1, 3, 5, 1403950691 ]
def main(): # code run at script launch #patient_name = sys.argv[1] # There are Dog_[1-4] and Patient_[1-8] patients_list = ["Dog_%d" % i for i in range(1, 5)] + ["Patient_%d" % i for i in range(1, 9)] patients_list = ["Dog_%d" % i for i in [1]] #["Patient_%d" % i for i in range(1, 9)]#++
vincentadam87/gatsby-hackathon-seizure
[ 3, 1, 3, 5, 1403950691 ]
def generate(env): """Add Builders and construction variables for ar to an Environment.""" SCons.Tool.createStaticLibBuilder(env)
kerwinxu/barcodeManager
[ 4, 1, 4, 3, 1447294107 ]
def exists(env): return env.Detect('CC') or env.Detect('ar')
kerwinxu/barcodeManager
[ 4, 1, 4, 3, 1447294107 ]
def test_install_packages(): d = dun.CreateDummy() d() package.install_package('./dummy/dummytest_1.0.0.tar.gz', verbose=True) d._clean()
biokit/biokit
[ 47, 22, 47, 13, 1410258271 ]
def test_get_r_version(): package.get_R_version()
biokit/biokit
[ 47, 22, 47, 13, 1410258271 ]
def forwards(self, orm): # Changing field 'Student.student_id' db.alter_column('publications_student', 'student_id', self.gf('django.db.models.fields.CharField')(unique=True, max_length=12))
evildmp/arkestra-publications
[ 2, 3, 2, 6, 1320326961 ]
def forwards(self, orm): # Adding field 'Face.district_id' db.add_column(u'faces_face', 'district_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['faces.District'], null=True), keep_default=False)
RuralIndia/pari
[ 22, 9, 22, 39, 1360646573 ]
def __init__(self, input=None, n_visible=784, n_hidden=500, \ W=None, hbias=None, vbias=None, numpy_rng=None, theano_rng=None): """ RBM constructor. Defines the parameters of the model along with basic operations for inferring hidden from visible (and vice-versa), as well as for performing CD updates. :param input: None for standalone RBMs or symbolic variable if RBM is part of a larger graph. :param n_visible: number of visible units :param n_hidden: number of hidden units :param W: None for standalone RBMs or symbolic variable pointing to a shared weight matrix in case RBM is part of a DBN network; in a DBN, the weights are shared between RBMs and layers of a MLP :param hbias: None for standalone RBMs or symbolic variable pointing to a shared hidden units bias vector in case RBM is part of a different network :param vbias: None for standalone RBMs or a symbolic variable pointing to a shared visible units bias """ self.n_visible = n_visible self.n_hidden = n_hidden if numpy_rng is None: # create a number generator numpy_rng = numpy.random.RandomState(1234) if theano_rng is None: theano_rng = RandomStreams(numpy_rng.randint(2 ** 30)) if W is None: # W is initialized with `initial_W` which is uniformely # sampled from -4*sqrt(6./(n_visible+n_hidden)) and # 4*sqrt(6./(n_hidden+n_visible)) the output of uniform if # converted using asarray to dtype theano.config.floatX so # that the code is runable on GPU initial_W = numpy.asarray(numpy_rng.uniform( low=-4 * numpy.sqrt(6. / (n_hidden + n_visible)), high=4 * numpy.sqrt(6. / (n_hidden + n_visible)), size=(n_visible, n_hidden)), dtype=theano.config.floatX) # theano shared variables for weights and biases W = theano.shared(value=initial_W, name='W', borrow=True) if hbias is None: # create shared variable for hidden units bias hbias = theano.shared(value=numpy.zeros(n_hidden, dtype=theano.config.floatX), name='hbias', borrow=True) if vbias is None: # create shared variable for visible units bias vbias = theano.shared(value=numpy.zeros(n_visible, dtype=theano.config.floatX), name='vbias', borrow=True) # initialize input layer for standalone RBM or layer0 of DBN self.input = input if not input: self.input = T.matrix('input') self.W = W self.hbias = hbias self.vbias = vbias self.theano_rng = theano_rng # **** WARNING: It is not a good idea to put things in this list # other than shared variables created in this function. self.params = [self.W, self.hbias, self.vbias]
yifeng-li/DECRES
[ 33, 13, 33, 5, 1434585720 ]
def propup(self, vis): '''This function propagates the visible units activation upwards to the hidden units Note that we return also the pre-sigmoid activation of the layer. As it will turn out later, due to how Theano deals with optimizations, this symbolic variable will be needed to write down a more stable computational graph (see details in the reconstruction cost function) ''' pre_sigmoid_activation = T.dot(vis, self.W) + self.hbias return [pre_sigmoid_activation, T.nnet.sigmoid(pre_sigmoid_activation)]
yifeng-li/DECRES
[ 33, 13, 33, 5, 1434585720 ]
def propdown(self, hid): '''This function propagates the hidden units activation downwards to the visible units Note that we return also the pre_sigmoid_activation of the layer. As it will turn out later, due to how Theano deals with optimizations, this symbolic variable will be needed to write down a more stable computational graph (see details in the reconstruction cost function) ''' pre_sigmoid_activation = T.dot(hid, self.W.T) + self.vbias return [pre_sigmoid_activation, T.nnet.sigmoid(pre_sigmoid_activation)]
yifeng-li/DECRES
[ 33, 13, 33, 5, 1434585720 ]
def gibbs_hvh(self, h0_sample): ''' This function implements one step of Gibbs sampling, starting from the hidden state''' pre_sigmoid_v1, v1_mean, v1_sample = self.sample_v_given_h(h0_sample) pre_sigmoid_h1, h1_mean, h1_sample = self.sample_h_given_v(v1_sample) return [pre_sigmoid_v1, v1_mean, v1_sample, pre_sigmoid_h1, h1_mean, h1_sample]
yifeng-li/DECRES
[ 33, 13, 33, 5, 1434585720 ]
def get_cost_updates(self, lr=0.1, persistent=None, k=1): """This functions implements one step of CD-k or PCD-k :param lr: learning rate used to train the RBM :param persistent: None for CD. For PCD, shared variable containing old state of Gibbs chain. This must be a shared variable of size (batch size, number of hidden units). :param k: number of Gibbs steps to do in CD-k/PCD-k Returns a proxy for the cost and the updates dictionary. The dictionary contains the update rules for weights and biases but also an update of the shared variable used to store the persistent chain, if one is used. """ # compute positive phase pre_sigmoid_ph, ph_mean, ph_sample = self.sample_h_given_v(self.input) # decide how to initialize persistent chain: # for CD, we use the newly generate hidden sample # for PCD, we initialize from the old state of the chain if persistent is None: chain_start = ph_sample else: chain_start = persistent # perform actual negative phase # in order to implement CD-k/PCD-k we need to scan over the # function that implements one gibbs step k times. # Read Theano tutorial on scan for more information : # http://deeplearning.net/software/theano/library/scan.html # the scan will return the entire Gibbs chain # udpate is a dictionary type, updates of values of shared variables # including model parameters and persistent chain [pre_sigmoid_nvs, nv_means, nv_samples, pre_sigmoid_nhs, nh_means, nh_samples], updates = \ theano.scan(self.gibbs_hvh, # the None are place holders, saying that # chain_start is the initial state corresponding to the # 6th output outputs_info=[None, None, None, None, None, chain_start], n_steps=k) # determine gradients on RBM parameters # not that we only need the sample at the end of the chain chain_end = nv_samples[-1] cost = T.mean(self.free_energy(self.input)) - T.mean( self.free_energy(chain_end)) # We must not compute the gradient through the gibbs sampling gparams = T.grad(cost, self.params, consider_constant=[chain_end]) # constructs the update dictionary for gparam, param in zip(gparams, self.params): # make sure that the learning rate is of the right dtype # update is a dictionary, add the parameter update dictionary items updates[param] = param - gparam * T.cast(lr, dtype=theano.config.floatX) if persistent: # Note that this works only if persistent is a shared variable updates[persistent] = nh_samples[-1] # pseudo-likelihood is a better proxy for PCD monitoring_cost = self.get_pseudo_likelihood_cost(updates) else: # reconstruction cross-entropy is a better proxy for CD monitoring_cost = self.get_reconstruction_cost(updates, pre_sigmoid_nvs[-1]) return monitoring_cost, updates
yifeng-li/DECRES
[ 33, 13, 33, 5, 1434585720 ]
def get_reconstruction_cost(self, updates, pre_sigmoid_nv): """Approximation to the reconstruction error Note that this function requires the pre-sigmoid activation as input. To understand why this is so you need to understand a bit about how Theano works. Whenever you compile a Theano function, the computational graph that you pass as input gets optimized for speed and stability. This is done by changing several parts of the subgraphs with others. One such optimization expresses terms of the form log(sigmoid(x)) in terms of softplus. We need this optimization for the cross-entropy since sigmoid of numbers larger than 30. (or even less then that) turn to 1. and numbers smaller than -30. turn to 0 which in terms will force theano to compute log(0) and therefore we will get either -inf or NaN as cost. If the value is expressed in terms of softplus we do not get this undesirable behaviour. This optimization usually works fine, but here we have a special case. The sigmoid is applied inside the scan op, while the log is outside. Therefore Theano will only see log(scan(..)) instead of log(sigmoid(..)) and will not apply the wanted optimization. We can not go and replace the sigmoid in scan with something else also, because this only needs to be done on the last step. Therefore the easiest and more efficient way is to get also the pre-sigmoid activation as an output of scan, and apply both the log and sigmoid outside scan such that Theano can catch and optimize the expression. """ cross_entropy = T.mean( T.sum(self.input * T.log(T.nnet.sigmoid(pre_sigmoid_nv)) + (1 - self.input) * T.log(1 - T.nnet.sigmoid(pre_sigmoid_nv)), axis=1)) return cross_entropy
yifeng-li/DECRES
[ 33, 13, 33, 5, 1434585720 ]
def test_model(model_trained,test_set_x_org=None): """ Get the reduced data using the model learned.
yifeng-li/DECRES
[ 33, 13, 33, 5, 1434585720 ]
def sample_model(rng,model_trained,test_set_x_org=None,n_chains=20,n_samples=10,sample_gap=1000): """ Sample from the trained RBM given some actual examples to initialize the algorithm.
yifeng-li/DECRES
[ 33, 13, 33, 5, 1434585720 ]
def is_valid(self, raise_exception=True): result = super().is_valid(raise_exception=raise_exception) if result: if not self.phone_number or PHONE_NUMBER_REGEXP and not re.match(PHONE_NUMBER_REGEXP, self.phone_number): if not raise_exception: return False raise InvalidParameter(param="phone_number") if not id_code_ee_is_valid(self.id_code): if not raise_exception: return False raise InvalidIdCode if not (self.get("language") and self.language in Languages.ALL): self.language = settings.MOBILE_ID_DEFAULT_LANGUAGE return result
thorgate/django-esteid
[ 19, 2, 19, 3, 1445964219 ]
def format(self, value): self._format = value return self
popego/neocortex-api-python
[ 8, 2, 8, 1, 1262800862 ]
def input(self, text): self._input = self._params["input"] = text return self
popego/neocortex-api-python
[ 8, 2, 8, 1, 1262800862 ]
def categories(self, tree_key=None, additionals=None): params = dict(additionals or []) if tree_key is not None: params.update(dict(tree_key=tree_key))
popego/neocortex-api-python
[ 8, 2, 8, 1, 1262800862 ]
def keywords(self): self._functions["keywords"] = True return self
popego/neocortex-api-python
[ 8, 2, 8, 1, 1262800862 ]
def entities(self): self._functions["entities"] = True return self
popego/neocortex-api-python
[ 8, 2, 8, 1, 1262800862 ]
def language(self): self._functions["language"] = True return self
popego/neocortex-api-python
[ 8, 2, 8, 1, 1262800862 ]
def meaningfy(self): fs = [] for k,v in self._functions.items(): kk = k if isinstance(v, dict): if v.has_key("additionals"): for a in v["additionals"]: kk = "%s+%s" % (kk, a) if v.has_key("tree_key") and v["tree_key"] is not None and kk == 'categories': self._params["tree_key"] = v["tree_key"] fs.append(kk) fs = ";".join(fs) url = "%s.%s" % (fs, self._format)
popego/neocortex-api-python
[ 8, 2, 8, 1, 1262800862 ]
def _reset(self): self._functions = {} self._params = {}
popego/neocortex-api-python
[ 8, 2, 8, 1, 1262800862 ]
def get_builder(self): if self.__builder__ is None: self.__builder__ = NeocortexRestClient.Builder(self.BASE_URL, self.api_key).format(ResponseFormats.JSON)
popego/neocortex-api-python
[ 8, 2, 8, 1, 1262800862 ]
def categories(self, input, tree_key=None, additionals=None): builder = self.get_builder() return builder.format(ResponseFormats.JSON).input(input).categories(tree_key, additionals).meaningfy().payload["categories"]
popego/neocortex-api-python
[ 8, 2, 8, 1, 1262800862 ]
def entities(self, input): builder = self.get_builder() return builder.format(ResponseFormats.JSON).input(input).entities().meaningfy().payload["entities"]
popego/neocortex-api-python
[ 8, 2, 8, 1, 1262800862 ]
def setUp(self): super(BcryptTests, self).setUp() User.objects.create_user('john', 'johndoe@example.com', password='123456') User.objects.create_user('jane', 'janedoe@example.com', password='abc') User.objects.create_user('jude', 'jeromedoe@example.com', password=u'abcéäêëôøà')
fwenzel/django-sha2
[ 109, 18, 109, 3, 1293582874 ]
def test_bcrypt_auth(self): """Try authenticating.""" assert authenticate(username='john', password='123456') assert authenticate(username='jane', password='abc') assert not authenticate(username='jane', password='123456') assert authenticate(username='jude', password=u'abcéäêëôøà') assert not authenticate(username='jude', password=u'çççbbbààà')
fwenzel/django-sha2
[ 109, 18, 109, 3, 1293582874 ]
def test_nokey(self): """With no HMAC key, no dice.""" assert not authenticate(username='john', password='123456') assert not authenticate(username='jane', password='abc') assert not authenticate(username='jane', password='123456') assert not authenticate(username='jude', password=u'abcéäêëôøà') assert not authenticate(username='jude', password=u'çççbbbààà')
fwenzel/django-sha2
[ 109, 18, 109, 3, 1293582874 ]
def test_hmac_autoupdate(self): """Auto-update HMAC key if hash in DB is outdated.""" # Get HMAC key IDs to compare old_key_id = max(settings.HMAC_KEYS.keys()) new_key_id = '2020-01-01' # Add a new HMAC key new_keys = settings.HMAC_KEYS.copy() new_keys[new_key_id] = 'a_new_key' with patch.object(settings._wrapped, 'HMAC_KEYS', new_keys): # Make sure the database has the old key ID. john = User.objects.get(username='john') eq_(john.password.rsplit('$', 1)[1], old_key_id) # Log in. assert authenticate(username='john', password='123456') # Make sure the DB now has a new password hash. john = User.objects.get(username='john') eq_(john.password.rsplit('$', 1)[1], new_key_id)
fwenzel/django-sha2
[ 109, 18, 109, 3, 1293582874 ]
def extractNotoriousOnlineBlogspotCom(item): ''' Parser for 'notorious-online.blogspot.com' ''' vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or "preview" in item['title'].lower(): return None tagmap = [ ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
fake-name/ReadableWebProxy
[ 191, 16, 191, 3, 1437712243 ]
def __init__(self, config, name): """ :type name: str """ self.config = config self.name = name
ThomasGerstenberg/serial_monitor
[ 9, 5, 9, 4, 1440644224 ]
def open(self): raise NotImplementedError
ThomasGerstenberg/serial_monitor
[ 9, 5, 9, 4, 1440644224 ]
def close(self): raise NotImplementedError
ThomasGerstenberg/serial_monitor
[ 9, 5, 9, 4, 1440644224 ]
def read(self, num_bytes=1): raise NotImplementedError
ThomasGerstenberg/serial_monitor
[ 9, 5, 9, 4, 1440644224 ]
def write(self, data): raise NotImplementedError
ThomasGerstenberg/serial_monitor
[ 9, 5, 9, 4, 1440644224 ]
def command_oraakkeli(bot, user, channel, args): """Asks a question from the oracle (http://www.lintukoto.net/viihde/oraakkeli/)""" if not args: return args = urllib.quote_plus(args) answer = getUrl("http://www.lintukoto.net/viihde/oraakkeli/index.php?kysymys=%s&html=0" % args).getContent() answer = unicode(answer) answer = answer.encode("utf-8")
nigeljonez/newpyfibot
[ 8, 3, 8, 1, 1285336020 ]
def __unicode__(self): return unicode(self.user)
mozilla/spark
[ 5, 6, 5, 4, 1297724745 ]
def get_absolute_url(self): return reverse('desktop.user', args=[self.user.username])
mozilla/spark
[ 5, 6, 5, 4, 1297724745 ]
def generic_sharing_url(self): url = urlparams(django_reverse('desktop.user', args=[self.user.username])) return absolute_url(url)
mozilla/spark
[ 5, 6, 5, 4, 1297724745 ]
def _social_sharing_url(self, service): # django_reverse used instead of reverse because we don't want a locale preprended to sharing links. url = urlparams(django_reverse('desktop.user', args=[self.user.username]), f=service) return absolute_url(url)
mozilla/spark
[ 5, 6, 5, 4, 1297724745 ]
def twitter_sharing_url(self): return self._social_sharing_url('t')
mozilla/spark
[ 5, 6, 5, 4, 1297724745 ]
def facebook_sharing_url(self): return self._social_sharing_url('fb')
mozilla/spark
[ 5, 6, 5, 4, 1297724745 ]
def poster_sharing_url(self): return self._social_sharing_url('p')
mozilla/spark
[ 5, 6, 5, 4, 1297724745 ]
def badges(self): """Returns a list of dicts used for badge list rendering. They represent all badges earned by the user in the Spark game. """ badges = [] completed_challenges = CompletedChallenge.objects.filter(profile=self, date_badge_earned__isnull=False) for cc in completed_challenges: badge_id = utils.get_challenge_id(cc.challenge.level, cc.challenge.number) badge_description = cc.challenge.badge_description badges.append({ 'id': badge_id, 'name': cc.challenge.badge_name, 'description': badge_description, 'date_earned': cc.date_badge_earned, 'new': cc.new_badge, 'twitter_msg': urlquote(unicode(TWITTER_BADGE_MSG % {'badge_name':cc.challenge.badge_name, 'short_url':''})), 'facebook_msg': urlquote(unicode(FACEBOOK_BADGE_MSG % {'badge_name':cc.challenge.badge_name})), 'facebook_img': absolute_url(settings.MEDIA_URL+'img/badges/fb/'+badge_id.replace('_','-')+'.png'), 'facebook_desc': urlquote(badge_description) }) return badges
mozilla/spark
[ 5, 6, 5, 4, 1297724745 ]
def has_badge(self, badge_id): """Returns whether this user has earned the given badge.""" if badge_id: return CompletedChallenge.objects.filter(profile=self, challenge__pk=badge_id, date_badge_earned__isnull=False).count() == 1 else: return False
mozilla/spark
[ 5, 6, 5, 4, 1297724745 ]
def total_badges_earned(self): """Returns the total number of badges earned by the user. Doesn't include hidden unlocked badges from an upper level. """ return CompletedChallenge.objects.filter(profile=self, date_badge_earned__isnull=False).count()
mozilla/spark
[ 5, 6, 5, 4, 1297724745 ]
def spark_started_with(self): if self.parent_username is not None: return self.parent_username return ''
mozilla/spark
[ 5, 6, 5, 4, 1297724745 ]
def most_recent_share(self): """Most recent share stat displayed on desktop dashboard/user pages.""" from stats.models import SharingHistory
mozilla/spark
[ 5, 6, 5, 4, 1297724745 ]
def shares_over_time(self): """Aggregate data of Spark shares since the start of the campaign. Used by the 'shares over time' diagram in the user dashboard. """ from stats.models import SharingHistory return SharingHistory.get_shares_over_time(self)
mozilla/spark
[ 5, 6, 5, 4, 1297724745 ]
def sparked_countries(self): """List of countries this user has shared their Spark with.""" from .utils import user_node countries = set() node = user_node(self.user) for child in node.get_children(): cc = child.user.profile.country_code if cc: countries.add(cc.lower())
mozilla/spark
[ 5, 6, 5, 4, 1297724745 ]
def total_shares(self): """Total shares stat displayed on desktop dashboard/user pages.""" from stats.models import SharingHistory
mozilla/spark
[ 5, 6, 5, 4, 1297724745 ]
def challenge_info(self): """Returns a list of dicts containing level/challenge completion information. Used to render both desktop and mobile collapsing challenge lists. """ return utils.get_profile_levels(self)
mozilla/spark
[ 5, 6, 5, 4, 1297724745 ]
def new_challenge_count(self): """Returns the number of newly available challenges in the user's current level.""" if self.new_challenges: challenge_count = utils.CHALLENGE_COUNT_PER_LVL[self.level-1] completed_challenge_count = len(CompletedChallenge.objects.filter(profile=self, challenge__level=self.level)) return challenge_count - completed_challenge_count else: return 0
mozilla/spark
[ 5, 6, 5, 4, 1297724745 ]
def new_badge_count(self): """Returns the number of recently earned badges.""" return len([b for b in self.badges if b['new']])
mozilla/spark
[ 5, 6, 5, 4, 1297724745 ]
def qr_code_download(self): """Returns the URL of a QR code which, when scanned, points to: https://[domain]/download?f=qr&user=[username] """ url = absolute_url(urlparams(django_reverse('sharing.download'), user=self.user.username)) return sharing_utils.url2qr(url)
mozilla/spark
[ 5, 6, 5, 4, 1297724745 ]
def continent_code(self): from geo.continents import countries_continents code = '' if self.country_code: code = countries_continents[self.country_code]
mozilla/spark
[ 5, 6, 5, 4, 1297724745 ]
def total_countries_sparked(self): """Returns the total number of countries where the user's children are located.""" return len(self.sparked_countries)
mozilla/spark
[ 5, 6, 5, 4, 1297724745 ]
def total_continents_sparked(self): """Returns the total number of continents where the user's children are located.""" from geo.continents import countries_continents from .utils import user_node
mozilla/spark
[ 5, 6, 5, 4, 1297724745 ]
def children_profiles(self): """Returns a list of profiles of the user's children in the user tree.""" from .utils import user_node
mozilla/spark
[ 5, 6, 5, 4, 1297724745 ]
def clear_new_badges(self): """Clears notifications of recently earned badges.""" CompletedChallenge.objects.filter(profile=self, new_badge=True).update(new_badge=False)
mozilla/spark
[ 5, 6, 5, 4, 1297724745 ]
def clear_new_challenges(self): """Clears notifications of new available challenges.""" self.new_challenges = False self.save()
mozilla/spark
[ 5, 6, 5, 4, 1297724745 ]
def trigger_multisparker_badge(self): from challenges.tasks import update_completed_challenges
mozilla/spark
[ 5, 6, 5, 4, 1297724745 ]
def update_ancestors_longest_chain(self): """Updates 'longest chain' stat of all ancestors of this user when relevant. Used after Boost step 2 confirmation so that all users involved have their longest chain stat updated. """ from .utils import user_node ancestors = user_node(self.user).get_ancestors() chain_length = len(ancestors)
mozilla/spark
[ 5, 6, 5, 4, 1297724745 ]
def add_city_shares_for_children(self): """Creates city shares in the CitySharingHistory for the global visualization. This is useful when a user already has children when he completes boost 1 (geolocation). As soon as it's completed, city shares are created for all geolocated children. """ from stats.models import CitySharingHistory
mozilla/spark
[ 5, 6, 5, 4, 1297724745 ]
def __unicode__(self): return "%s <-> %s" % (self.profile, self.challenge)
mozilla/spark
[ 5, 6, 5, 4, 1297724745 ]
def binarize_vector(u): return u > 0
mikekestemont/PyStyl
[ 55, 13, 55, 6, 1409592784 ]
def cosine_distance_binary(u, v): u = binarize_vector(u) v = binarize_vector(v) return (1.0 * (u * v).sum()) / numpy.sqrt((u.sum() * v.sum()))
mikekestemont/PyStyl
[ 55, 13, 55, 6, 1409592784 ]
def cityblock_distance(u, v): """Return the Manhattan/City Block distance between two vectors.""" return abs(u-v).sum()
mikekestemont/PyStyl
[ 55, 13, 55, 6, 1409592784 ]
def correlation(u, v): """Return the correlation distance between two vectors.""" u_var = u - u.mean() v_var = v - v.mean() return 1.0 - dot(u_var, v_var) / (sqrt(dot(u_var, u_var)) * sqrt(dot(v_var, v_var)))
mikekestemont/PyStyl
[ 55, 13, 55, 6, 1409592784 ]
def jaccard_distance(u, v): """return jaccard distance""" u = numpy.asarray(u) v = numpy.asarray(v) return (numpy.double(numpy.bitwise_and((u != v), numpy.bitwise_or(u != 0, v != 0)).sum()) / numpy.double(numpy.bitwise_or(u != 0, v != 0).sum()))
mikekestemont/PyStyl
[ 55, 13, 55, 6, 1409592784 ]
def setUp(self): mock_client = mock.MagicMock() mock_client.user.return_value = 'mocked user' self.request = Request('http://a.b/path') self.request.grant_type = 'password' self.request.username = 'john' self.request.password = 'doe' self.request.client = mock_client self.request.scopes = ('mocked', 'scopes') self.mock_validator = mock.MagicMock() self.auth = ResourceOwnerPasswordCredentialsGrant( request_validator=self.mock_validator)
idan/oauthlib
[ 2555, 477, 2555, 82, 1321744131 ]