query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Calculates the output size of the last conv layer.
def _get_conv_out(self, shape) -> int: conv_out = self.conv(torch.zeros(1, *shape)) return int(np.prod(conv_out.size()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def output_size(self) -> int:\n return self.output_dim", "def get_output_shape(self):\n weights = self.W.get_shape().as_list()\n input_size = np.asarray(self.incoming_shape[-3:-1])\n strides = np.asarray(self.strides[-3:-1])\n kernels = np.asarray(weights[0:2])\n num_out...
[ "0.72758055", "0.7067759", "0.70510364", "0.7000888", "0.68895096", "0.6863038", "0.6814944", "0.68022835", "0.67512447", "0.67106795", "0.6696877", "0.6696877", "0.66832334", "0.66832334", "0.6663041", "0.6635598", "0.6611119", "0.6571467", "0.6549242", "0.65476173", "0.6491...
0.6925207
5
Forward pass through network. Calculates the Q using the value and advantage.
def forward(self, input_x): adv, val = self.adv_val(input_x) return val + (adv - adv.mean(dim=1, keepdim=True))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self, state):\n x = state\n feature = self.feature_layer(x)\n action_value = self.value_layer(feature)\n advantage = self.advantage_layer(feature)\n \n q_value = action_value + (advantage - advantage.mean(dim=1, keepdim=True))\n return q_value", "def a...
[ "0.6747889", "0.62147856", "0.6128476", "0.60471773", "0.6019139", "0.60117346", "0.6002128", "0.5985154", "0.5965721", "0.59572846", "0.5952152", "0.59481573", "0.5945076", "0.5932837", "0.5910055", "0.5899582", "0.5890279", "0.58570886", "0.5830228", "0.5819873", "0.5801153...
0.56493485
43
Gets the advantage and value by passing out of the base network through the value and advantage heads.
def adv_val(self, input_x): float_x = input_x.float() base_out = self.conv(input_x).view(float_x.size()[0], -1) return self.head_adv(base_out), self.head_val(base_out)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_bias(self):", "def __get_net_probs(self):\n return np.array([node.value for node in self.net]).reshape(5,5)", "def net_output(self):\n result = self.gives()\n for k, v in self.needs().items():\n result[k] = result.get(k, 0) - v\n\n return result", "def forward(self,...
[ "0.5776241", "0.5750878", "0.5623462", "0.54842955", "0.5386926", "0.52231693", "0.52097505", "0.5113644", "0.5109669", "0.5086685", "0.5073465", "0.50708735", "0.5056254", "0.5033763", "0.5021825", "0.5015476", "0.50106674", "0.5005514", "0.4995753", "0.49826834", "0.4966393...
0.5197751
7
Calculates the output size of the last conv layer.
def _get_conv_out(self, shape) -> int: conv_out = self.conv(torch.zeros(1, *shape)) return int(np.prod(conv_out.size()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def output_size(self) -> int:\n return self.output_dim", "def get_output_shape(self):\n weights = self.W.get_shape().as_list()\n input_size = np.asarray(self.incoming_shape[-3:-1])\n strides = np.asarray(self.strides[-3:-1])\n kernels = np.asarray(weights[0:2])\n num_out...
[ "0.72754544", "0.70678294", "0.70496345", "0.7000903", "0.68878484", "0.6862838", "0.6813435", "0.6802265", "0.67503273", "0.6709293", "0.6696119", "0.6696119", "0.66828674", "0.66828674", "0.66640866", "0.6636642", "0.66120887", "0.65727687", "0.65476394", "0.6546968", "0.64...
0.6926029
6
Forward pass through network.
def forward(self, input_x) -> Tensor: conv_out = self.conv(input_x).view(input_x.size()[0], -1) return self.head(conv_out)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self):\n pass", "def forward(self):\n pass", "def forward(self, input):\n\n return self.network(input)", "def forward(self, input):\n\n return self.network(input)", "def forward(self, input):\n\n return self.network(input)", "def forward_pass(self):", "def...
[ "0.7486405", "0.7486405", "0.72931826", "0.72931826", "0.72931826", "0.72568643", "0.71754724", "0.70931304", "0.70689535", "0.7054133", "0.69913656", "0.6969786", "0.69356275", "0.69356275", "0.69356275", "0.6921335", "0.6920985", "0.6747466", "0.6711534", "0.67010707", "0.6...
0.0
-1
Initializes or resets the paramseter of the layer.
def reset_parameters(self) -> None: std = math.sqrt(3 / self.in_features) self.weight.data.uniform_(-std, std) self.bias.data.uniform_(-std, std)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset_parameters(self, param_init):\n logger.info('===== Initialize %s with lecun style =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_with_lecun_normal(n, p, param_init)", "def reset_parameters(self):\n init_method = getattr(init, self.initial...
[ "0.75554377", "0.7497733", "0.74958974", "0.73909175", "0.73410285", "0.7286859", "0.7208283", "0.71635944", "0.7134587", "0.71306086", "0.71203345", "0.71100825", "0.71006113", "0.70917517", "0.7006945", "0.69947183", "0.69425595", "0.69156086", "0.6810136", "0.6797556", "0....
0.6573624
38
Forward pass of the layer.
def forward(self, input_x: Tensor) -> Tensor: self.epsilon_weight.normal_() bias = self.bias if bias is not None: self.epsilon_bias.normal_() bias = bias + self.sigma_bias * self.epsilon_bias.data noisy_weights = self.sigma_weight * self.epsilon_weight.data + self.weight return F.linear(input_x, noisy_weights, bias)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self, x):\n return self.layers(x)", "def _forward(self, z):\n raise NotImplementedError(\"Forward shouldn't be called!\")", "def __feed_forward(self, X):\n # go over all layers\n for layer in self.__layers:\n X = layer.compute_act(X)\n\n return X", "def f...
[ "0.7267547", "0.7236484", "0.72101915", "0.72012955", "0.7173363", "0.7161442", "0.71286094", "0.71286094", "0.71286094", "0.7043126", "0.70097", "0.6996241", "0.69847035", "0.69749874", "0.6956434", "0.6956434", "0.694666", "0.694666", "0.6942731", "0.6934393", "0.69069153",...
0.0
-1
Takes in a distribution and actions and returns log prob of actions under the distribution.
def get_log_prob(self, pi: Categorical, actions: Tensor): return pi.log_prob(actions)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_log_prob(self, states, actions):\n dist, _ = self.get_dist_and_mode(states)\n log_probs = dist.log_prob(actions)\n log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting\n return log_probs", "def action_log_probs(self, state):\n dist = self.action_distribution(state)\n ...
[ "0.78015953", "0.76985294", "0.74659884", "0.7390132", "0.7234296", "0.718054", "0.71776885", "0.71425647", "0.7029758", "0.694614", "0.6867766", "0.67514867", "0.67146116", "0.6675534", "0.66467613", "0.6508409", "0.647576", "0.6389438", "0.6382274", "0.63791734", "0.6377181...
0.72526956
4
Takes in a distribution and actions and returns log prob of actions under the distribution.
def get_log_prob(self, pi: Normal, actions: Tensor): return pi.log_prob(actions).sum(axis=-1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_log_prob(self, states, actions):\n dist, _ = self.get_dist_and_mode(states)\n log_probs = dist.log_prob(actions)\n log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting\n return log_probs", "def action_log_probs(self, state):\n dist = self.action_distribution(state)\n ...
[ "0.78019595", "0.7698052", "0.74672115", "0.7391153", "0.7252926", "0.71811974", "0.71788377", "0.71434426", "0.7029142", "0.69469213", "0.686866", "0.67506903", "0.6712733", "0.6674502", "0.664789", "0.6507693", "0.6476692", "0.6389269", "0.63830537", "0.63797027", "0.637702...
0.72346884
5
Optimizes the distribution of allocations for a set of stock symbols.
def optimize_portfolio(sd=dt.datetime(2008,1,1), ed=dt.datetime(2009,1,1), \ syms=['GOOG','AAPL','GLD','XOM'], gen_plot=False): # Read in adjusted closing prices for given symbols, date range dates = pd.date_range(sd, ed) prices_all = get_data(syms, dates) # automatically adds SPY prices = prices_all[syms] # only portfolio symbols prices_SPY = prices_all['SPY'] # only SPY, for comparison later # find the allocations for the optimal portfolio #1 provide an initial guess for x allocs = np.ones(len(syms))/len(syms) #2 Provide constraints to the optimizer bounds = [(0,1) for i in syms] constraints = ({ 'type': 'eq', 'fun': lambda inputs: 1.0 - np.sum(inputs) }) #3 call the optimizer res = spo.minimize(get_sharpe_ratio, allocs, args=prices, bounds = bounds, constraints=constraints) allocs = res.x # Get daily portfolio value port_val = get_portfolio_value(prices, allocs, 1.0) # Get portfolio statistics cr, adr, sddr, sr = get_portfolio_stats(port_val, daily_rf=0.0, samples_per_year=252) # Compare daily portfolio value with SPY using a normalized plot if gen_plot: # add code to plot here df_temp = pd.concat([port_val, prices_SPY], keys=['Portfolio', 'SPY'], axis=1) plot_normalized_data(df_temp) return allocs, cr, adr, sddr, sr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_best_allocation():\n\n # symbols = ['BRCM', 'TXN', 'IBM', 'HNZ'] \n symbols = ['AAPL', 'GOOG', 'IBM', 'MSFT']\n # ['GOOG','AAPL','GLD','XOM']\n basic_portfolio = BasicPortfolio(symbols, dt.datetime(2014, 1, 1), dt.datetime(2014, 12, 31))\n\n alloc = range(4)\n\n sharpe_max = 0\n alloc...
[ "0.6063045", "0.53763187", "0.5290051", "0.52126926", "0.5207097", "0.5145416", "0.50932026", "0.50518227", "0.50404334", "0.49763635", "0.49679303", "0.49524197", "0.48846778", "0.48805937", "0.48631468", "0.4856346", "0.4832151", "0.48185173", "0.48169646", "0.48079696", "0...
0.55457914
1
Given a starting value and prices of stocks in portfolio with allocations return the portfolio value over time.
def get_portfolio_value(prices, allocs, start_val): normed = prices/prices.iloc[0] alloced = np.multiply(allocs, normed) pos_vals = alloced * start_val port_val = pos_vals.sum(axis=1) return port_val
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_portvals(start_date, end_date, orders_file, start_val):\n \n #Read order file\n orders = pd.read_csv( orders_file, parse_dates = [0])\n \n #Get symbols making up the portfolio\n stock_symbols = list( set( orders[\"Symbol\"] ) )\n dates = pd.date_range(start_date, end_date)\n \n...
[ "0.7288363", "0.6751513", "0.649566", "0.639736", "0.6342396", "0.58377224", "0.5771629", "0.57363266", "0.5704653", "0.5654795", "0.56540567", "0.564203", "0.5640176", "0.5620506", "0.55888516", "0.55628633", "0.5536075", "0.553035", "0.55150396", "0.55067617", "0.5476467", ...
0.7756678
0
Calculate sharpe ratio for minimizer.
def get_sharpe_ratio(allocs, prices): port_val = get_portfolio_value(prices, allocs, start_val=1.0) sharpe_ratio = get_portfolio_stats(port_val, daily_rf=0.0, samples_per_year=252)[3] return -sharpe_ratio
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sharpe_ratio(self, r_f):\n return (\n self.cumulative_returns().last('1D').iat[0] - r_f\n ) / self.cumulative_returns().std()", "def sharpe_ratio(r1, r2, rf, o1, o2, cov):\n def sr(x):\n w1 = x[0]\n w2 = 1 - w1\n\n Rp = w1 * r1 + w2 * r2\n STDEVp = math...
[ "0.6567362", "0.6437101", "0.63409936", "0.6130169", "0.6042319", "0.60273135", "0.5931197", "0.59244883", "0.5785139", "0.5736135", "0.57228225", "0.5692179", "0.5680298", "0.5669893", "0.5615704", "0.5592618", "0.55892605", "0.55826575", "0.55295515", "0.5504721", "0.549964...
0.67490816
0
Plot stock prices with a custom title and meaningful axis labels.
def plot_normalized_data(df, title="Daily portfolio value and SPY", xlabel="Date", ylabel="Normalized price"): plot_data(df/df.iloc[0], title, xlabel, ylabel)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_data(df, title=\"normalized Stock prices\", ylabel=\"Price\", xlabel=\"Date\"):\n plt.clf()\n ax = df.plot(title=title, fontsize=12)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n plt.savefig('files/output/' + title + '.png')", "def plot_data(df, title=\"normalized Stock prices\", ylab...
[ "0.731029", "0.7286386", "0.68272114", "0.6824816", "0.6797167", "0.67936105", "0.6715498", "0.6569525", "0.64421463", "0.6384674", "0.63568515", "0.634274", "0.63376296", "0.6326378", "0.62356347", "0.62333536", "0.6199969", "0.6193358", "0.6185668", "0.618083", "0.61708236"...
0.59642863
33
Creates a SnowflakeSource object.
def __init__( self, name: Optional[str] = None, database: Optional[str] = None, schema: Optional[str] = None, table: Optional[str] = None, query: Optional[str] = None, event_timestamp_column: Optional[str] = "", created_timestamp_column: Optional[str] = "", field_mapping: Optional[Dict[str, str]] = None, date_partition_column: Optional[str] = "", ): if table is None and query is None: raise ValueError('No "table" argument provided.') # If no name, use the table as the default name _name = name if not _name: if table: _name = table else: raise DataSourceNoNameException() super().__init__( _name, event_timestamp_column, created_timestamp_column, field_mapping, date_partition_column, ) # The default Snowflake schema is named "PUBLIC". _schema = "PUBLIC" if (database and table and not schema) else schema self.snowflake_options = SnowflakeOptions( database=database, schema=_schema, table=table, query=query )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_proto(data_source: DataSourceProto):\n return SnowflakeSource(\n field_mapping=dict(data_source.field_mapping),\n database=data_source.snowflake_options.database,\n schema=data_source.snowflake_options.schema,\n table=data_source.snowflake_options.table,\...
[ "0.70331", "0.5826544", "0.571125", "0.56064165", "0.5491815", "0.5480783", "0.5440601", "0.54300314", "0.54277056", "0.5396629", "0.5375585", "0.5310123", "0.5300051", "0.5264698", "0.52542096", "0.5248735", "0.5194064", "0.5170257", "0.51294655", "0.5129411", "0.5090866", ...
0.5692184
3
Creates a SnowflakeSource from a protobuf representation of a SnowflakeSource.
def from_proto(data_source: DataSourceProto): return SnowflakeSource( field_mapping=dict(data_source.field_mapping), database=data_source.snowflake_options.database, schema=data_source.snowflake_options.schema, table=data_source.snowflake_options.table, event_timestamp_column=data_source.event_timestamp_column, created_timestamp_column=data_source.created_timestamp_column, date_partition_column=data_source.date_partition_column, query=data_source.snowflake_options.query, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def FromProto(cls, proto_obj):\n source = GameSource()\n source.type = proto_obj.type\n if proto_obj.update_time_utc_str:\n source.update_date_time = datetime.strptime(\n proto_obj.update_time_utc_str, tweets.DATE_PARSE_FMT_STR)\n else:\n source.update_date_time = datetime.now()\n ...
[ "0.69648314", "0.6726757", "0.62188435", "0.6085031", "0.54869676", "0.53810257", "0.5301759", "0.52652085", "0.5256398", "0.52558035", "0.52312374", "0.5175181", "0.51523924", "0.51177007", "0.5046059", "0.5044137", "0.50330454", "0.50251067", "0.5004873", "0.49959263", "0.4...
0.8149464
0
Returns the database of this snowflake source.
def database(self): return self.snowflake_options.database
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_database(self):\n return self.database", "def database(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database\")", "def database(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database\")", "def database(self) -> pulumi.Input[str]:\n ...
[ "0.7446689", "0.7409722", "0.7409722", "0.7361835", "0.73268086", "0.70755297", "0.70755297", "0.70755297", "0.70755297", "0.7035698", "0.70008326", "0.70008326", "0.6984017", "0.69791114", "0.6932914", "0.69217163", "0.6915366", "0.6904952", "0.6886391", "0.68326914", "0.683...
0.85056674
0
Returns the schema of this snowflake source.
def schema(self): return self.snowflake_options.schema
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def schema(self):\n return self.table_info.schema", "def get_source_schema(cls) -> dict:\n source_schema = get_base_schema(\n root=True,\n id_=\"source.schema.json\",\n title=\"Source data schema\",\n description=\"Schema for the source data, files and di...
[ "0.7523006", "0.7474272", "0.731446", "0.72799426", "0.7248296", "0.72096664", "0.72077894", "0.7188622", "0.71739715", "0.71583384", "0.7152522", "0.71101445", "0.6935228", "0.68277", "0.6764784", "0.67362326", "0.67253757", "0.6720299", "0.67055655", "0.6627854", "0.6607061...
0.8667721
0
Returns the table of this snowflake source.
def table(self): return self.snowflake_options.table
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTable(self):\n return self.table", "def _get_table(self):\n\t\treturn self._table", "def get_tablename(self):\n return self.ds_table", "def getTable(self):\n\n raise NotImplementedError", "def table(self):\n if not self.exists:\n return None\n return sel...
[ "0.7333813", "0.72519577", "0.7144599", "0.7141145", "0.6996592", "0.6968488", "0.6951184", "0.6948223", "0.69295055", "0.69295055", "0.68013984", "0.6795885", "0.674512", "0.66221476", "0.65775824", "0.6573351", "0.6545", "0.65317136", "0.6509364", "0.6500206", "0.6481945", ...
0.8042992
0
Returns the snowflake options of this snowflake source.
def query(self): return self.snowflake_options.query
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_options(self):\n\t\treturn self.options", "def options(self):\r\n return self._options", "def options(self):\n return self.__options", "def options(self) -> Mapping[str, str]:\n return pulumi.get(self, \"options\")", "def _get_options(self):\n return self.options", "def option...
[ "0.7124274", "0.7121195", "0.70052445", "0.7004528", "0.70010746", "0.6985663", "0.6985663", "0.6985663", "0.6985663", "0.6985663", "0.67881876", "0.6759905", "0.6664762", "0.65246856", "0.64745015", "0.64547145", "0.64213645", "0.63637906", "0.6305649", "0.62842727", "0.6252...
0.59917724
30
Converts a SnowflakeSource object to its protobuf representation.
def to_proto(self) -> DataSourceProto: data_source_proto = DataSourceProto( type=DataSourceProto.BATCH_SNOWFLAKE, field_mapping=self.field_mapping, snowflake_options=self.snowflake_options.to_proto(), ) data_source_proto.event_timestamp_column = self.event_timestamp_column data_source_proto.created_timestamp_column = self.created_timestamp_column data_source_proto.date_partition_column = self.date_partition_column return data_source_proto
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_proto(data_source: DataSourceProto):\n return SnowflakeSource(\n field_mapping=dict(data_source.field_mapping),\n database=data_source.snowflake_options.database,\n schema=data_source.snowflake_options.schema,\n table=data_source.snowflake_options.table,\...
[ "0.71726424", "0.6312265", "0.57572246", "0.56970835", "0.5353327", "0.5324823", "0.52810025", "0.5244373", "0.51959056", "0.51375407", "0.51266086", "0.51017046", "0.50727355", "0.50727355", "0.5006445", "0.5004341", "0.49543244", "0.48841015", "0.48770934", "0.48703986", "0...
0.7205786
0
Returns a string that can directly be used to reference this table in SQL.
def get_table_query_string(self) -> str: if self.database and self.table: return f'"{self.database}"."{self.schema}"."{self.table}"' elif self.table: return f'"{self.table}"' else: return f"({self.query})"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def table_name() -> str:\n pass", "def __repr__(self):\n cls_name = self.__class__.__name__\n conn_name = str(self._connection)\n tbl_name = self._table\n return '{0}({1}, table={2!r})'.format(cls_name, conn_name, tbl_name)", "def __repr__(self):\n cls_name = self.__cl...
[ "0.72242546", "0.7145293", "0.7145293", "0.71200347", "0.70594376", "0.70300525", "0.6961341", "0.6907269", "0.6881874", "0.6875941", "0.68361664", "0.68174165", "0.68159", "0.6803258", "0.67897487", "0.67895746", "0.67832047", "0.6771481", "0.6731299", "0.67075336", "0.66725...
0.76032066
0
Returns a mapping of column names to types for this snowflake source.
def get_table_column_names_and_types( self, config: RepoConfig ) -> Iterable[Tuple[str, str]]: from feast.infra.offline_stores.snowflake import SnowflakeOfflineStoreConfig from feast.infra.utils.snowflake_utils import ( execute_snowflake_statement, get_snowflake_conn, ) assert isinstance(config.offline_store, SnowflakeOfflineStoreConfig) snowflake_conn = get_snowflake_conn(config.offline_store) if self.database and self.table: query = f'SELECT * FROM "{self.database}"."{self.schema}"."{self.table}" LIMIT 1' elif self.table: query = f'SELECT * FROM "{self.table}" LIMIT 1' else: query = f"SELECT * FROM ({self.query}) LIMIT 1" result = execute_snowflake_statement(snowflake_conn, query).fetch_pandas_all() if not result.empty: metadata = result.dtypes.apply(str) return list(zip(metadata.index, metadata)) else: raise ValueError("The following source:\n" + query + "\n ... is empty")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_column_types(self, table_name):\n\n curs = self.cursor()\n curs.execute('PRAGMA table_info(%s)' % table_name)\n\n types = {str(d[1].lower()): _TYPE_MAP[d[2].split()[0]] for d in curs.fetchall()}\n\n curs.close()\n\n return types", "def to_schema(cls):\n result = ...
[ "0.73652345", "0.6958067", "0.69160265", "0.6754187", "0.6642904", "0.66274935", "0.6622891", "0.6481625", "0.6360409", "0.63488346", "0.63274217", "0.6270515", "0.6258899", "0.6238819", "0.61439264", "0.6115824", "0.6091439", "0.6059644", "0.59835035", "0.59506655", "0.59282...
0.6247801
13
Returns the snowflake SQL query referenced by this source.
def query(self): return self._query
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query(self):\n return self.snowflake_options.query", "def sql_query(self):\n return self._project.sql_query", "def get_sql_connection(self):\n return self.sql", "def sql(self):\n return ';\\n'.join([x.sql() for x in self._statements]) + ';'", "def get_query(self):\n c...
[ "0.7604513", "0.7548185", "0.644608", "0.636173", "0.62809396", "0.6275104", "0.6264084", "0.62564576", "0.6220409", "0.60543776", "0.59246194", "0.5899583", "0.58373946", "0.57994354", "0.57862854", "0.5758391", "0.57509285", "0.5746539", "0.5732655", "0.56832737", "0.567286...
0.57847404
17
Sets the snowflake SQL query referenced by this source.
def query(self, query): self._query = query
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sql_query(self, new_query):\n self._project.sql_query = new_query", "def set_sa_query(self, query):\n self.sa_query = query", "def query(self):\n return self.snowflake_options.query", "def set_query(self, query):\n query = pylastica.query.Query.create(query)\n data = qu...
[ "0.72115916", "0.6802297", "0.6187164", "0.6164766", "0.60703343", "0.6025312", "0.5980297", "0.5747391", "0.57371366", "0.57371366", "0.57371366", "0.5711442", "0.55724436", "0.55319154", "0.5473519", "0.54341775", "0.54232275", "0.5422666", "0.5417376", "0.5378323", "0.5360...
0.59033763
7
Returns the database name of this snowflake table.
def database(self): return self._database
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def database_name(self) -> str:\n return pulumi.get(self, \"database_name\")", "def database_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_name\")", "def getDatabaseName(self):\n return self._base.getDatabaseName()", "def getDatabaseName(self):\n raise Not...
[ "0.8153906", "0.79219604", "0.7853548", "0.77648026", "0.7695105", "0.76031363", "0.7507584", "0.74824643", "0.74470735", "0.7436588", "0.7392899", "0.73886544", "0.7282045", "0.7271273", "0.7229642", "0.72007596", "0.7197087", "0.7124021", "0.7117816", "0.7098378", "0.700156...
0.5763545
71
Sets the database ref of this snowflake table.
def database(self, database): self._database = database
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def table_ref(self, table_ref):\n self._table_ref = table_ref", "def set_db(self, db):\n self._db = db", "def db_name(self, db_name):\n\n self._db_name = db_name", "def setDB(dbname):\n global DBNAME\n DBNAME = dbname", "def change(cls, db):\n cls.configs['db'] = db\n\n ...
[ "0.69317377", "0.6532754", "0.6392685", "0.5966977", "0.5806949", "0.5773162", "0.5773162", "0.57592016", "0.56473815", "0.56473684", "0.5647304", "0.55943197", "0.5568484", "0.54553974", "0.54033595", "0.53913724", "0.53913724", "0.5374103", "0.5366539", "0.5345408", "0.5270...
0.63701606
3
Returns the schema name of this snowflake table.
def schema(self): return self._schema
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def schema(self):\n return self.table_info.schema", "def get_table_name(self):\n return self._table", "def schema(self):\n return self.snowflake_options.schema", "def schema_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"schema_name\")", "def table(self):\n ...
[ "0.7543852", "0.7407979", "0.73744506", "0.72745496", "0.7205025", "0.7046401", "0.70127237", "0.7001271", "0.6922719", "0.6922719", "0.6922719", "0.69220996", "0.6912203", "0.68869877", "0.6849201", "0.6825561", "0.6613302", "0.6605695", "0.66030455", "0.6539277", "0.6509380...
0.5733677
56
Sets the schema of this snowflake table.
def schema(self, schema): self._schema = schema
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_schema(self, schema):\r\n self.__schema = schema", "def schema(self, schema):\n\n self._schema = schema", "def schema(self, schema):\n\n self._schema = schema", "def schema(self, schema):\n\n self._schema = schema", "def set_schema(self, schema, set_num_columns=True):\n ...
[ "0.7617374", "0.7328084", "0.7328084", "0.7328084", "0.7075173", "0.6481325", "0.63790625", "0.63008064", "0.6234697", "0.61715263", "0.61460024", "0.61385447", "0.6092166", "0.60661066", "0.60366577", "0.6025765", "0.5955309", "0.59028065", "0.5798888", "0.5796591", "0.57555...
0.72960466
4
Returns the table name of this snowflake table.
def table(self): return self._table
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_table_name(self):\n return self._table", "def table_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"table_name\")", "def table_name() -> str:\n pass", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table_name(se...
[ "0.872357", "0.8587216", "0.8301262", "0.82909656", "0.82909656", "0.82909656", "0.82124346", "0.8067328", "0.80299294", "0.8022681", "0.79793465", "0.758069", "0.74290794", "0.7428557", "0.7422888", "0.7326025", "0.723662", "0.71609664", "0.71264887", "0.7048128", "0.7046885...
0.64299667
48
Sets the table ref of this snowflake table.
def table(self, table): self._table = table
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def table_ref(self, table_ref):\n self._table_ref = table_ref", "def setTable(self, tabledef):\n if isinstance(tabledef, str):\n self._table = Table.Get ( tabledef )\n elif isinstance(tabledef, Table):\n self._table = tabledef\n else:\n raise ValueErro...
[ "0.840069", "0.7259923", "0.68128806", "0.6655275", "0.6641437", "0.62347263", "0.61731863", "0.6130862", "0.60809314", "0.6049684", "0.60375315", "0.5879953", "0.58563435", "0.5758894", "0.5728879", "0.5722117", "0.5694347", "0.5653783", "0.5653783", "0.5530361", "0.5422365"...
0.6948597
3
Creates a SnowflakeOptions from a protobuf representation of a snowflake option.
def from_proto(cls, snowflake_options_proto: DataSourceProto.SnowflakeOptions): snowflake_options = cls( database=snowflake_options_proto.database, schema=snowflake_options_proto.schema, table=snowflake_options_proto.table, query=snowflake_options_proto.query, ) return snowflake_options
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_proto(self) -> DataSourceProto.SnowflakeOptions:\n snowflake_options_proto = DataSourceProto.SnowflakeOptions(\n database=self.database,\n schema=self.schema,\n table=self.table,\n query=self.query,\n )\n\n return snowflake_options_proto", "...
[ "0.72627205", "0.67112297", "0.6254082", "0.5537946", "0.5431298", "0.54273605", "0.5401342", "0.53541476", "0.53435814", "0.5294465", "0.523745", "0.5237094", "0.5193706", "0.5100491", "0.5098164", "0.50886667", "0.50617826", "0.5013392", "0.50065786", "0.49748728", "0.49595...
0.8055073
0
Converts an SnowflakeOptionsProto object to its protobuf representation.
def to_proto(self) -> DataSourceProto.SnowflakeOptions: snowflake_options_proto = DataSourceProto.SnowflakeOptions( database=self.database, schema=self.schema, table=self.table, query=self.query, ) return snowflake_options_proto
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_proto(cls, snowflake_options_proto: DataSourceProto.SnowflakeOptions):\n snowflake_options = cls(\n database=snowflake_options_proto.database,\n schema=snowflake_options_proto.schema,\n table=snowflake_options_proto.table,\n query=snowflake_options_proto....
[ "0.7185293", "0.6591346", "0.63469636", "0.6190279", "0.609588", "0.6037695", "0.59587693", "0.57750213", "0.574301", "0.5736975", "0.5731854", "0.5646835", "0.5621101", "0.55236673", "0.5520179", "0.54465526", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.537267...
0.7964391
0
Given a dict of lang>names, return a default one
def primary_name(names): langs = names.keys() if 'en' in langs: return names['en'] return names[langs[0]]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def localizedWithFallback(field, allowEmpty=True):\n for lang in [''] + FallbackLanguages():\n t = field[lang]\n if allowEmpty:\n if isinstance(t, basestring):\n return t\n elif t:\n return t\n return u\"\"", "def fallback_trans(x):\r\n t = _(x)\...
[ "0.60933506", "0.60726446", "0.6008112", "0.5990976", "0.59868455", "0.5985267", "0.5922496", "0.590694", "0.58639705", "0.5810866", "0.5780663", "0.5768472", "0.57512575", "0.5717193", "0.5704346", "0.5672763", "0.5649485", "0.56342536", "0.563318", "0.5624843", "0.5548256",...
0.6597497
0
Initializes an instance of the InstagramBot class.
def __init__(self, username = None, password = None): self.username = config['AUTH']['USERNAME'] self.password = config['AUTH']['PASSWORD'] self.login = config['URL']['LOGIN'] self.nav_url = config['URL']['NAV'] self.tag_url = config['URL']['TAGS'] self.direct_url = config['URL']['DM'] self.driver = webdriver.Chrome(config['ENVIRONMENT']['CHROMEDRIVER']) self.stay_logged = False self.api = InstagramAPI(self.username, self.password)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start(self):\r\n self._instagram_api = InstagramAPI(mongo_api=self._mongo_api)\r\n self._inst_run()", "def __init__(self, bot=BNBot):\n self.bot = bot", "def __init__(self, client_id=None, access_token=None):\r\n if not client_id and not access_token:\r\n raise TypeErr...
[ "0.7040264", "0.6685926", "0.66698956", "0.6649861", "0.6649861", "0.6602354", "0.6402114", "0.6402114", "0.62316155", "0.614077", "0.6138122", "0.60492367", "0.60134923", "0.5993596", "0.59222096", "0.59110945", "0.5900702", "0.58524567", "0.5842189", "0.5841665", "0.5826000...
0.75979745
0
Method allows user to log in through the web
def login(self): self.driver.get(self.login) PAUSE = 2 time.sleep(PAUSE) user_input = self.driver.find_element_by_name('username') pass_input = self.driver.find_element_by_name('password') login_button = self.driver.find_elements_by_xpath("//div[contains(text(),'Log In')]")[0] user_input.send_keys(self.username) pass_input.send_keys(self.password) login_button.click() time.sleep(PAUSE)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def login(self):", "def login():", "def login():", "def login(self):\n\t\treturn", "def log_in(self):\n\n # Get login page.\n self.get_endpoint(endpoint=self.config['paths']['login'])\n\n # Post log-in data.\n email_form = self.browser.find_element_by_xpath(\"//input[@id='email'...
[ "0.80300766", "0.795173", "0.795173", "0.78314114", "0.76917493", "0.7586301", "0.7556712", "0.7542856", "0.75261706", "0.7460586", "0.74421704", "0.7429035", "0.74237245", "0.7412092", "0.7370598", "0.73602885", "0.7355475", "0.7346966", "0.7308393", "0.73024786", "0.7280960...
0.7128185
29
Method allows users to navigate through a user's profile page
def nav_user(self, user): self.driver.get(self.nav_url.format(user))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def profile_url(self):\n return reverse(\"auth_profile\", args=[self.user.username])", "def user_view(cls, user, profile):\r\n pass", "def profile():\n if g.user:\n return render_template('profile.html', user=g.user)\n return redirect(url_for('login'))", "def user_view(cls, user, p...
[ "0.72839487", "0.7275138", "0.72487134", "0.72350943", "0.7165833", "0.70807487", "0.7042888", "0.7016116", "0.69886965", "0.696752", "0.6904682", "0.6819126", "0.6698022", "0.6682588", "0.6592525", "0.6578378", "0.6544088", "0.6506118", "0.6474697", "0.64575464", "0.6455914"...
0.7264308
2
Method goes to posts with a specific tag
def search_tag(self, tag): self.driver.get(self.tag_url.format(tag))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_tag(tag, page):\n per_page = current_app.config['POSTS_PER_PAGE']\n tag = Tag.query.filter_by(name=tag).first() or abort(404)\n posts = tag.posts.order_by(Post.id.desc())\n if not session.get('logged_in'): posts = posts.filter_by(visible=True)\n items = posts.limit(per_page).offset((page - ...
[ "0.66354024", "0.6565476", "0.649461", "0.6490434", "0.6155304", "0.61414886", "0.6071974", "0.6021756", "0.59992045", "0.5903175", "0.5880634", "0.5843738", "0.58239686", "0.5765947", "0.57510775", "0.5750838", "0.5728099", "0.57190305", "0.56963086", "0.56737995", "0.566945...
0.61920655
4
Method allows bot to automatically type and send dm to user
def direct_message(self, user, msg, num): PAUSE = 1 logging.info('Send message {} to {}'.format(msg,user)) self.driver.get(self.direct_url) self.driver.find_elements_by_xpath('/html/body/div[2]/div/div/div[2]/div[1]/div/div[2]/input')[0].send_keys(user) time.sleep(PAUSE) self.driver.find_elements_by_xpath('/html/body/div[5]/div/div/div/div[3]/button[2]')[0].click() #Edge case to get rid of notification time.sleep(PAUSE) self.driver.find_elements_by_xpath('/html/body/div[2]/div/div/div[2]/div[2]/div/div/div[3]/button')[0].click() self.driver.find_elements_by_xpath('/html/body/div[2]/div/div/div[1]/div/div[2]/div/button')[0].click() time.sleep(PAUSE) # The message will be placed and sent self.driver.find_elements_by_xpath('//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[2]/textarea')[0].send_keys(msg) time.sleep(PAUSE) self.driver.find_elements_by_xpath('//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[3]/button')[0].click() # Special feature involving reacting with heart for x in range(num): self.driver.find_elements_by_xpath('//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/button[2]')[0].click() time.sleep(PAUSE)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def send_dm(user, message, embed=None):\n\n if type(user) is discord.User or type(user) is discord.Member:\n if user.dm_channel is None:\n await user.create_dm()\n\n await user.dm_channel.send(message, embed=embed)", "async def _dm(self, ctx, user: str, *, message: str = None):\...
[ "0.6927895", "0.67939085", "0.67668486", "0.6541166", "0.63509613", "0.62982935", "0.629352", "0.61479294", "0.61420435", "0.6082516", "0.60807395", "0.6071102", "0.59702575", "0.59686375", "0.5959844", "0.59589547", "0.5958455", "0.5929721", "0.58878124", "0.5874848", "0.587...
0.0
-1
Method finds the button to follow or unfollow users. It filters the follow elements to find buttons. The default method looks for only follow buttons.
def find_buttons(self, button_txt): button = self.driver.find_elements_by_xpath("//*[text()='{}']".format(button_txt)) return button
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def click_to_follow(browser):\n # browser.find_elements_by_css_selector(\"button\")\n # my_button_xpath: str = \"//button\"\n # browser.find_elements_by_xpath(my_button_xpath)\n\n # <button>\n my_follow_btn_xpath: str = \"//button[contains(text(), 'Follow')][not(contains(text(), 'Following'))]\"\n ...
[ "0.7135644", "0.66539246", "0.6400043", "0.63385546", "0.6108239", "0.598225", "0.5863128", "0.57678187", "0.5675595", "0.56031513", "0.5592132", "0.556185", "0.55301255", "0.5489775", "0.5486279", "0.54782057", "0.547135", "0.5470326", "0.54567957", "0.54306024", "0.5411247"...
0.51461345
47
Method likes a specific number of a user's posts.
def latest_likes(self, user, number_posts, likes): WAIT = 1 if likes: action = 'Like' else: action = 'Unlike' self.nav_user(user) image_container = [] image_container.extend(self.driver.find_elements_by_class_name('_9AhH0')) for image in image_container[:number_posts]: image.click() time.sleep(WAIT) try: self.driver.find_element_by_xpath("//*[@aria-label='{}']".format(action).click()) except Exception as e: print(e) self.driver.find_elements_by_xpath('/html/body/div[4]/div[2]/div/article/div[3]/section[1]/span[1]/button')[0].click() # clicks the heart symbol time.sleep(WAIT) self.driver.find_elements_by_xpath('/html/body/div[4]/div[3]/button')[0].click() #Makes sure to close out of current picture time.sleep(WAIT) # Tested users_list = [] def get_likes_list(self, username): """ Method gets a list of users who like a post """ api = self.api api.searchUsername(username) result = api.LastJson username_id = result['user']['pk'] #Gets the user ID user_posts = api.getUserFeed(username_id) # gets the user feed result = api.LastJson media_id = result['items'][0]['id'] #gets the most recent post api.getMediaLikers(media_id) #gets users who liked users = api.LastJson('users') for user in users: #appends the users to the list users_list.append({'pk':user['pk'], 'username':user['username']})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def like_user_posts(self, user:str, n_posts:int, like:bool=True):\n\n action = 'Like' if like else 'Unlike'\n\n self._nav_user(user)\n\n imgs = []\n elements = self._find_element(EC.presence_of_all_elements_located((By.CLASS_NAME, '_9AhH0')))\n imgs.extend(elements)\n\n fo...
[ "0.7468292", "0.7368493", "0.7155223", "0.70347476", "0.6748949", "0.67359304", "0.6665075", "0.6645786", "0.6584126", "0.65746444", "0.6570663", "0.64926827", "0.64644575", "0.64383584", "0.63599944", "0.6310028", "0.6255511", "0.62522185", "0.6192657", "0.6181631", "0.61792...
0.67711866
4
Method gets a list of users who like a post
def get_likes_list(self, username): api = self.api api.searchUsername(username) result = api.LastJson username_id = result['user']['pk'] #Gets the user ID user_posts = api.getUserFeed(username_id) # gets the user feed result = api.LastJson media_id = result['items'][0]['id'] #gets the most recent post api.getMediaLikers(media_id) #gets users who liked users = api.LastJson('users') for user in users: #appends the users to the list users_list.append({'pk':user['pk'], 'username':user['username']})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_user_likes(self, data_base):\n cursor = data_base.cursor(dictionary=True)\n cursor.execute(f\"SELECT user_id FROM user_like WHERE post_id = {self.id}\")\n user_likes = tuple(map(lambda x: str(x['user_id']), cursor.fetchall()))\n if not user_likes:\n return []\n ...
[ "0.80609584", "0.7395297", "0.69443375", "0.6895322", "0.68074024", "0.67923427", "0.6753025", "0.6566023", "0.6552493", "0.63974077", "0.639381", "0.6380745", "0.6334056", "0.6326958", "0.6302191", "0.62897587", "0.62885493", "0.62881505", "0.6275099", "0.6275099", "0.623825...
0.76121527
1
Initialize a ``TFSPredictor``. See ``sagemaker.RealTimePredictor`` for more info about parameters.
def __init__(self, endpoint_name, sagemaker_session=None, serializer=json_serializer, deserializer=json_deserializer, content_type=None, model_name=None, model_version=None): super(Predictor, self).__init__(endpoint_name, sagemaker_session, serializer, deserializer, content_type) attributes = [] if model_name: attributes.append('tfs-model-name={}'.format(model_name)) if model_version: attributes.append('tfs-model-version={}'.format(model_version)) self._model_attributes = ','.join(attributes) if attributes else None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_predictor(self):\n try: \n predict_fn = tf.contrib.predictor.from_saved_model(self.saved_path)\n except OSError as err: \n print(f\"OSError: {err}\")\n self._predict_fn = predict_fn", "def init_tf(FLAGS):\n gpus = tf.config.experimental.list_physical_devic...
[ "0.6117408", "0.60252637", "0.5911408", "0.5851764", "0.5833206", "0.57801265", "0.5762533", "0.5741081", "0.57022196", "0.5699099", "0.569854", "0.5625027", "0.55992603", "0.5584619", "0.55735224", "0.55720514", "0.556565", "0.55640054", "0.55431277", "0.5542662", "0.5516902...
0.5464787
24
Load sample images for image manipulation. Loads both, ``china`` and ``flower``. Returns
def load_sample_images(): # Try to import imread from scipy. We do this lazily here to prevent # this module from depending on PIL. try: try: from scipy.misc import imread except ImportError: from scipy.misc.pilutil import imread except ImportError: raise ImportError("The Python Imaging Library (PIL) " "is required to load data from jpeg files") ROOT_Dir = os.getcwd() module_path = os.path.join(ROOT_Dir, "images") with open(os.path.join(module_path, 'README.txt')) as f: descr = f.read() filenames = [os.path.join(module_path, filename) for filename in os.listdir(module_path) if filename.endswith(".jpg")] # Load image data for each image in the source folder. images = [imread(filename) for filename in filenames] return Bunch(images=images, filenames=filenames, DESCR=descr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_images(self):\r\n self.standing_frame = [load_image(\"cat1.png\")]\r\n self.walk_frames_r = [load_image(\"cat2.png\"), load_image(\"cat3.png\"),\r\n load_image(\"cat4.png\")]", "def _preload_all_samples(self):\n if self.mode in ['train_noval', 'train_wit...
[ "0.6988628", "0.6685474", "0.66373193", "0.66370726", "0.6426989", "0.6391403", "0.63717645", "0.62725484", "0.62609285", "0.62502795", "0.6230983", "0.62293315", "0.6186863", "0.6182871", "0.61326575", "0.6113", "0.6112325", "0.6069663", "0.6068178", "0.6067434", "0.6062822"...
0.7011758
0
Load the numpy array of a single sample image
def load_sample_image(image_name): images = load_sample_images() index = None for i, filename in enumerate(images.filenames): if filename.endswith(image_name): index = i break if index is None: raise AttributeError("Cannot find sample image: %s" % image_name) return images.images[index]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadImage(img_path):\n\n img = Image.open(img_path)\n np_img = np.array(img)\n return (np_img)", "def load_img(path: str) -> np.ndarray:\n \n return np.array(Image.open(path))", "def load(path):\n print(\"path\", path)\n print(Path(path).is_file())\n if Path(path).is_fil...
[ "0.6871681", "0.6734598", "0.66831964", "0.6642155", "0.66406447", "0.6627881", "0.65667224", "0.65525585", "0.65401435", "0.6525812", "0.6514177", "0.6475732", "0.645297", "0.63903517", "0.63789004", "0.63789004", "0.6344578", "0.6340352", "0.63352865", "0.6329638", "0.63233...
0.625068
28
Recreate the (compressed) image from the code book & labels
def recreate_image(codebook, labels, w, h): d = codebook.shape[1] image = np.zeros((w, h, d)) label_idx = 0 for i in range(w): for j in range(h): image[i][j] = codebook[labels[label_idx]] label_idx += 1 return image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recreate_image(codebook, labels, w, h):\n d = codebook.shape[1]\n image = np.zeros((w, h, d))\n label_idx = 0\n for i in range(w):\n for j in range(h):\n image[i][j] = codebook[labels[label_idx]]\n label_idx += 1\n return image", "de...
[ "0.79495394", "0.7899799", "0.7884121", "0.7781928", "0.7687692", "0.60020334", "0.5997393", "0.5904447", "0.58416754", "0.5806961", "0.5723574", "0.5705346", "0.56988686", "0.56837463", "0.5680749", "0.5661601", "0.5639231", "0.55816334", "0.55626464", "0.555133", "0.5549232...
0.79031765
1
Custom easyconfig parameters for CrayPEToolchain
def extra_options(): extra_vars = { 'PrgEnv': [None, 'PrgEnv module to load, e.g., cray to load PrgEnv-cray, or None for automatic determination', CUSTOM], 'PrgEnv_load': [True, 'Load the PrgEnv module (if True) or just set the corresponding environment variable (if False)', CUSTOM], 'PrgEnv_family': [None, 'Declare to be a member of the PrgEnv family (if \'PrgEnv\), of the cpeToolchain family (if \'cpeToolchain\') or manually unload all known PrgEnv and cpe* modules (if None, needed when LMOD is not used)', CUSTOM], 'CPE_compiler': [None, 'Versionless compiler module to load, or None for automatic determination', CUSTOM], 'CPE_version': [None, 'Version of the CPE, if different from the version of the module', CUSTOM], 'CPE_load': [ 'first', 'First load the cpe module (if \'first\'), after the PrgEnv module (if \'after\'), load it at the end (if \'last\'), or do not load the cpe module (if None)', CUSTOM], 'cray_targets': [[], 'Targetting modules to load', CUSTOM], #'optional_example_param': [None, "Example optional custom parameter", CUSTOM], } return Bundle.extra_options(extra_vars)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def config( **kwargs ):", "def config():", "def config():", "def configuration():", "def get_config(self):\n config = super(Sc2Policy, self).get_config()\n config['eps'] = self.eps\n config['testing'] = self.testing\n return config", "def config(ctx):\n return", "def get_...
[ "0.68244946", "0.6493101", "0.6493101", "0.64695317", "0.63397986", "0.62136227", "0.6132952", "0.60887027", "0.6073328", "0.6053588", "0.60361594", "0.6020469", "0.60147214", "0.6013279", "0.5986166", "0.5982526", "0.5982526", "0.59803295", "0.5972104", "0.59715974", "0.5953...
0.5761126
51
Prepare build environment (skip loaded of dependencies).
def prepare_step(self, *args, **kwargs): kwargs['load_tc_deps_modules'] = False super(CrayPEToolchain, self).prepare_step(*args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_environment():\n global _ENV_SETUP_DONE\n if _ENV_SETUP_DONE:\n return\n _ENV_SETUP_DONE = True\n\n _configure_libraries()\n\n custom_module_path = os.environ.get(\"DETECTRON2_ENV_MODULE\")\n\n if custom_module_path:\n setup_custom_environment(custom_module_path)\n else...
[ "0.68191844", "0.679081", "0.6736225", "0.67010844", "0.6668821", "0.66451913", "0.66270363", "0.6605766", "0.6512834", "0.6408785", "0.6339571", "0.63157713", "0.6313925", "0.6302971", "0.6236211", "0.62336695", "0.62183577", "0.6171169", "0.6163672", "0.6096524", "0.6091713...
0.6293774
14
Generate load/swap statements for the module file
def make_module_dep(self): # # First do some processing of and checks on the parameters # # One value that we will need a lot if self.cfg['PrgEnv_family'] == None: PrgEnv_family = None else: PrgEnv_family = self.cfg['PrgEnv_family'].lower() # Illegal parameter combination: PrgEnv_family True and PrgEnv_load True. if PrgEnv_family == 'prgenv' and self.cfg['PrgEnv_load']: raise EasyBuildError('Setting PrgEnv_family to \'PrgEnv\' and PrgEnv_load to True is not a valid combination.') # Illegal parameter combination: PrgEnv_load False and CPE_load == 'after' if self.cfg['CPE_load'] == 'after' and not self.cfg['PrgEnv_load']: raise EasyBuildError('Setting CPE_load to \'after\' and PrgEnv_load to False is not a valid combination.') # Determine the PrgEnv module if self.cfg['PrgEnv'] is None: try: prgenv_name = MAP_TOOLCHAIN_PRGENV[self.cfg['name']] except: raise EasyBuildError('%s is not a supported toolchain, you\'ll need to specify both PrgEnv and CPE_compiler.', self.cfg['name']) else: prgenv_name = self.cfg['PrgEnv'] if not 'PrgEnv-' + prgenv_name in KNOWN_PRGENVS: print_warning('PrgEnv-%s is not a supported PrgEnv module. Are you sure it is not a typo?', prgenv_mod) prgenv_mod = 'PrgEnv-' + prgenv_name self.log.debug("Detected PrgEnv-module: %s (version may be added through dependencies)", prgenv_mod) # Determine the compiler module if self.cfg['CPE_compiler'] in [ None, 'auto']: try: compiler_mod = MAP_TOOLCHAIN_COMPILER[self.cfg['name']] except: raise EasyBuildError('%s is not a supported toolchain, you\'ll need to specify both PrgEnv and CPE_compiler.', self.cfg['name']) else: compiler_mod = self.cfg['CPE_compiler'] self.log.debug("Detected compiler module: %s (version may be added through dependencies", compiler_mod) # Cray wrappers module craype_mod = 'craype' # Determine the cpe module (if needed) if self.cfg['CPE_load'] != None: if self.cfg['CPE_version'] is None: cpe_load_version = self.cfg['version'] else: cpe_load_version = self.cfg['CPE_version'] self.log.debug("Loading CPE version: %s (may be overwritten by dependencies)", cpe_load_version) cpe_mod = 'cpe/' + cpe_load_version # Build a list of dependencies without version collect_deps = [] force_compiler = False force_craype = False for dep in self.toolchain.dependencies: mod_name = dep['full_mod_name'] if mod_name.startswith(prgenv_mod): prgenv_mod = mod_name elif mod_name.startswith(compiler_mod): compiler_mod = mod_name force_compiler = True elif mod_name.startswith(craype_mod): craype_mod = mod_name force_craype = True elif not (mod_name == 'cpe' or mod_name.startswith('cpe/')): collect_deps.append(mod_name) # # Now start generating the load commands and other stuff. # collect_statements = [''] # Will start with an empty line. # Do we need a family directive? if PrgEnv_family == 'prgenv': collect_statements = collect_statements + [ 'family(\'PrgEnv\')', '' ] elif PrgEnv_family == 'cpetoolchain': collect_statements = collect_statements + [ 'family(\'cpeToolchain\')', '' ] # Do we need to unload the PrgEnv modules? if PrgEnv_family == None and self.cfg['PrgEnv_load']: # Need to unload all PrgEnv-* modules except the one used by the module for prgenv in [prgenv for prgenv in KNOWN_PRGENVS if not prgenv_mod.startswith(prgenv)]: collect_statements.append(self.module_generator.unload_module(prgenv).strip()) collect_statements.append('') elif (PrgEnv_family == 'cpetoolchain' or PrgEnv_family == None) and not self.cfg['PrgEnv_load'] : # Need to unload all PrgEnv-* modules. for prgenv in KNOWN_PRGENVS: collect_statements.append(self.module_generator.unload_module(prgenv).strip()) collect_statements.append('') # Do we need to unload the cpe* modules? if PrgEnv_family == None: for cpe in [cpe for cpe in KNOWN_CPEMODS if not self.name.startswith(cpe)]: collect_statements.append(self.module_generator.unload_module(cpe).strip()) collect_statements.append('') # Set PE_ENV if no PrgEnv-* module is loaded. if not self.cfg['PrgEnv_load']: collect_statements.append(self.module_generator.set_environment('PE_ENV', prgenv_name.upper(), False).lstrip()) # Load the cpe module (if CPE_load is first) if self.cfg['CPE_load'] != None and self.cfg['CPE_load'].lower() == 'first': collect_statements.append(self.module_generator.load_module(cpe_mod, recursive_unload=False).lstrip()) # load statement for selected PrgEnv module (only when not loaded yet) if self.cfg['PrgEnv_load']: collect_statements.append(self.module_generator.load_module(prgenv_mod, recursive_unload=False).lstrip()) # Load the cpe module (if CPE_load is after) if self.cfg['CPE_load'] != None and self.cfg['CPE_load'].lower() == 'after': collect_statements.append(self.module_generator.load_module(cpe_mod, recursive_unload=False).lstrip()) # Prepare the load statements for the targeting modules for dep in self.cfg['cray_targets']: collect_statements.append(self.module_generator.load_module(dep, recursive_unload=False).lstrip()) # Load the selected compiler module, if not done through the dependencies or PrgEnv if (not self.cfg['PrgEnv_load']) or force_compiler: collect_statements.append(self.module_generator.load_module(compiler_mod, recursive_unload=False).lstrip()) # Load the Cray compiler wrapper module, if not done through the dependencies if (not self.cfg['PrgEnv_load']) or force_craype: collect_statements.append(self.module_generator.load_module(craype_mod, recursive_unload=False).lstrip()) # Now load the dependencies, using the full name and version if they are specified that way. for dep in collect_deps: collect_statements.append(self.module_generator.load_module(dep).lstrip()) # Load the cpe module (if CPE_load is last) if self.cfg['CPE_load'] != None and self.cfg['CPE_load'].lower() == 'last': collect_statements.append(self.module_generator.load_module(cpe_mod, recursive_unload=False).lstrip()) # Assemble all module unload/load statements. txt = '\n'.join(collect_statements) return txt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exec_module(self, module):\n\n if not self.filename.endswith(config.FILE_EXT) and not self.filename.endswith(\n \"__init__.py\"\n ):\n print(\"Fatal error: ExtensionLoader is asked to load a normal file.\")\n print(\"filename:\", self.filename)\n print(...
[ "0.62639445", "0.59777355", "0.5830551", "0.58056414", "0.57554215", "0.57490885", "0.5679191", "0.56280315", "0.55938977", "0.55713147", "0.55694884", "0.55570555", "0.55020046", "0.5476978", "0.5473481", "0.54465485", "0.54001313", "0.53518814", "0.531311", "0.5309852", "0....
0.0
-1
Plotting and scaling data
def exercise_6(path_to_data, path_to_figure): print('='*30) print('Running exercise_6()') #### YOUR CODE HERE #### walk_arr = numpy.loadtxt('data/walk.txt') #### YOUR CODE HERE #### # plot the data using matplotlib plot! plt.plot(walk_arr) plt.ylabel('Location') plt.xlabel('Step') plt.title('Random Walk') plt.savefig('figures/walk.png') print(f'walk_arr.shape: {walk_arr.shape}') #### YOUR CODE HERE #### walk_min = numpy.amin(walk_arr) print(f'walk_min: {walk_min}') #### YOUR CODE HERE #### walk_max = numpy.amax(walk_arr) print(f'walk_max: {walk_max}') #### YOUR CODE HERE #### def scale01(arr): """ linearly scale the values of an array in the range [0, 1] :param a: input ndarray :return: scaled ndarray """ walk_arr_01 = numpy.interp(arr, (numpy.amin(arr), numpy.amax(arr)), (-1, +1)) # linear scaling return walk_arr_01 #return the scaled array walk_arr_scaled = scale01(walk_arr) print('DONE exercise_6()') return walk_arr, walk_min, walk_max, walk_arr_scaled
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_data(self):", "def setPlotScaling(x,y):\n dislin.trfscl(x,y)", "def scale_data(x):\n mu = x.mean(axis=0)\n sigma = x.std(axis=0)\n x = (x - mu) / sigma\n return (x, mu, sigma)", "def myscale(g, factor=1.0):\n g.setdata(factor * g.getdata())\n # if !g.frozen eq 0 then show", "d...
[ "0.68279326", "0.66121054", "0.64640146", "0.6433218", "0.64242774", "0.6404947", "0.6313454", "0.6287877", "0.61888176", "0.618611", "0.61710227", "0.6164797", "0.6157777", "0.6145128", "0.61247987", "0.61205786", "0.61193025", "0.6103496", "0.60717255", "0.60140777", "0.599...
0.0
-1
linearly scale the values of an array in the range [0, 1]
def scale01(arr): walk_arr_01 = numpy.interp(arr, (numpy.amin(arr), numpy.amax(arr)), (-1, +1)) # linear scaling return walk_arr_01 #return the scaled array
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scale0to1(img):\r\n\r\n min = np.min(img)\r\n max = np.max(img)\r\n\r\n if min == max:\r\n img.fill(0.5)\r\n else:\r\n img = (img-min) / (max-min)\r\n\r\n return img.astype(np.float32)", "def scale0to1(img):\r\n\r\n img = img.astype(np.float32)\r\n\r\n min = np.min(img)\r\n...
[ "0.7274082", "0.71151817", "0.7070224", "0.70607144", "0.695351", "0.6909782", "0.6863304", "0.683024", "0.6815376", "0.68095505", "0.6759205", "0.6644985", "0.6604848", "0.65765154", "0.65633816", "0.65503216", "0.6484777", "0.6470204", "0.644784", "0.64431757", "0.64138615"...
0.7821479
0
This is a doc string
def exercise_7(): print('=' * 30) print('Running exercise_7()') #### YOUR CODE HERE #### numpy.random.seed(7) # set the numpy random seed to 7 # This determines how many times we "throw" the # 2 six-sided dice in an experiment num_dice_throws = 10000 # don't edit this! # This determines how many trials in each experiment # ... that is, how many times we'll throw our two # 6-sided dice num_dice_throws times num_trials = 10 # don't edit this! # Yes, you can have functions inside of functions! def run_experiment(): trial_outcomes = list() for trial in range(num_trials): #### YOUR CODE HERE #### doubles = 0 # number of the occurrence of doubles in one trial for throws in range(num_dice_throws): throw_0 = 1 # number of throws in one trial outcome = numpy.random.randint(1,7, size=2) #generate two throws if outcome[0] == outcome[1]: doubles = doubles + 1 #count the number of doubles # In the following, make it so that probability_estimate is an estimate # of the probability of throwing 'doubles' with two fair six-sided dice # (i.e., the probability that the dice end up with teh same values) # based on throwing the two dice num_dice_throws times. probability_estimate = doubles/num_dice_throws # Save the probability estimate for each trial (you don't need to change # this next line) trial_outcomes.append(probability_estimate) trial = trial + 1 return trial_outcomes experiment_outcomes_1 = run_experiment() print(f'experiment_outcomes_1: {experiment_outcomes_1}') print(f'do it again!') experiment_outcomes_2 = run_experiment() print(f'experiment_outcomes_2: {experiment_outcomes_2}') print('Now reset the seed') #### YOUR CODE HERE #### numpy.random.seed(7) # reset the numpy random seed back to 7 experiment_outcomes_3 = run_experiment() print(f'experiment_outcomes_3: {experiment_outcomes_3}') print("DONE exercise_7()") return experiment_outcomes_1, experiment_outcomes_2, experiment_outcomes_3
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def DocString():\n return", "def doc_string():\n pass # pass does nothing", "def get_doc_string(self) -> str:\n r = \"Undocumented\"\n if self.doc is not None: r = self.doc\n return r", "def raw_doc(self):\n try:\n return str(self.definition.docstr)\n ex...
[ "0.8623083", "0.8082154", "0.74954015", "0.7324442", "0.7286685", "0.72765845", "0.72732335", "0.71542895", "0.71460176", "0.7124491", "0.70944595", "0.70730394", "0.7053277", "0.70136875", "0.6961591", "0.696057", "0.69478667", "0.6903653", "0.69020563", "0.6898957", "0.6898...
0.0
-1
Random vectors and matrices, and some linear algebra operations
def exercise_8(): print("=" * 30) print("Running exercise_8()") #### YOUR CODE HERE #### numpy.random.seed(seed= 7) # set the numpy random seed to 7 #### YOUR CODE HERE #### # Set x to a 2-d array of random number of shape (3, 1) x = numpy.random.rand(3, 1) print(f'x:\n{x}') #### YOUR CODE HERE #### # Set 7 to a 2-d array of random number of shape (3, 1) y = numpy.random.rand(3,1) print(f'y:\n{y}') #### YOUR CODE HERE #### # Calclate the sum of x and y v1 = x + y print(f'v1:\n{v1}') #### YOUR CODE HERE #### # Calclate the sum of x and y v2 = numpy.multiply(x, y) print(f'v2:\n{v2}') #### YOUR CODE HERE #### # Transpose x xT = numpy.transpose(x) print(f'xT: {xT}') #### YOUR CODE HERE #### # Calculate the dot product of x and y v3 = numpy.dot(xT, y) print(f'v3: {v3}') #### YOUR CODE HERE #### # Set A to a 2-d array of random numbers of shape (3, 3) A = numpy.random.rand(3,3) print(f'A:\n{A}') #### YOUR CODE HERE #### # Compute the dot product of x-transpose with A v4 = numpy.dot(xT, A) print(f'v4: {v4}') #### YOUR CODE HERE #### # Compute the dot product of x-transpose with A and the product with y v5 = numpy.dot(v4, y) print(f'v5: {v5}') #### YOUR CODE HERE #### # Compute the inverse of A v6 = numpy.linalg.inv(A) print(f'v6:\n{v6}') #### YOUR CODE HERE #### # Compute the dot product of A with its inverse. # Should be near identity (save for some numerical error) v7 = numpy.dot(v6, A) print(f'v7:\n{v7}') return x, y, v1, v2, xT, v3, A, v4, v5, v6, v7
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_norm_vector():\n random_state = np.random.RandomState(0)\n for n in range(1, 6):\n v = pr.random_vector(random_state, n)\n u = pr.norm_vector(v)\n assert_almost_equal(np.linalg.norm(u), 1)", "def random_matrix():\n # Initialize random angles\n theta1 = np.random.rand() *...
[ "0.6085895", "0.6054807", "0.59700584", "0.5968296", "0.5936185", "0.59123176", "0.5900732", "0.5889028", "0.58634204", "0.58405113", "0.5830244", "0.580792", "0.5787771", "0.57753277", "0.5718604", "0.5715407", "0.5711817", "0.5695124", "0.56888473", "0.5683767", "0.5669956"...
0.53885347
43
Implementing scalar versus vector math
def exercise_9(path_to_X_data, path_to_w_data): print("="*30) print("Running exercise_9()") #### YOUR CODE HERE #### # load the X and w data from file into arrays X = numpy.loadtxt('data/X.txt', delimiter=',') w = numpy.loadtxt('data/w.txt', delimiter=',') print(f'X:\n{X}') print(f'w: {w}') #### YOUR CODE HERE #### # Extract the column 0 (x_n1) and column 1 (x_n2) vectors from X x_n1 = X[numpy.array([0,1,2,3,4]), 0] x_n2 = X[numpy.array([0,1,2,3,4]), 1] print(f'x_n1: {x_n1}') print(f'x_n2: {x_n2}') #### YOUR CODE HERE #### w_0 = w[0] w_1 = w[1] scalar_result_0 = w_0 * w_0 * sum(x_n1*x_n1) + 2 * w_0 * w_1 * sum(x_n2 * x_n1) + w_1 * w_1 * sum(x_n2*x_n2) # Use scalar arithmetic to compute the right-hand side of Exercise 3 # (Exercise 1.3 from FCMA p.35) # Set the final value to scalar_result = scalar_result_0 print(f'scalar_result: {scalar_result}') #### YOUR CODE HERE #### # Now you will compute the same result but using linear algebra operators. # (i.e., the left-hand of the equation in Exercise 1.3 from FCMA p.35) # You can compute the values in any linear order you want (but remember, # linear algebra is *NOT* commutative!), however here will require you to # first compute the inner term: X-transpose times X (XX), and then # below you complete the computation by multiplying on the left and right # by w (wXXw) X_transpose = numpy.transpose(X) XX = numpy.dot(X_transpose, X) print(f'XX:\n{XX}') #### YOUR CODE HERE #### # Now you'll complete the computation by multiplying on the left and right # by w to determine the final value: wXXw wXX = numpy.dot(w, XX) wXXw = numpy.dot(wXX, w) print(f'wXXw: {wXXw}') print("DONE exercise_9()") return X, w, x_n1, x_n2, scalar_result, XX, wXXw
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __div__(self, scalar):\n return Vector(self.x / scalar, self.y / scalar)", "def scalar_vector_ext(alpha, v, a, b):\n return [alpha * v[0],\n alpha * v[0] * a + b]", "def uv(vec):\n return vec / sqrt(dot(vec, vec))", "def __rdiv__(self, scalar):\n return Vector(self.x / ...
[ "0.7258965", "0.70950174", "0.6895658", "0.6889606", "0.6887228", "0.68746805", "0.68746805", "0.6856566", "0.6807518", "0.67988276", "0.67679286", "0.67590684", "0.6742905", "0.6713513", "0.6690686", "0.6619376", "0.66111094", "0.6601558", "0.6597171", "0.6592091", "0.659029...
0.0
-1
extends the init_buffer of OffsetColorProgram class by creating the additional carry flag VBO
def _init_buffers(self, v, n, _): super()._init_buffers(v, n, _) self.vbos.append(gl.glGenBuffers(1)) # init VBO 2 - dynamic color data gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbos[3]) loc = self.get_attribute_location("carried") gl.glEnableVertexAttribArray(loc) gl.glVertexAttribPointer(loc, 1, gl.GL_FLOAT, gl.GL_FALSE, 0, ctypes.c_void_p(0)) gl.glVertexAttribDivisor(loc, 1) gl.glBufferData(gl.GL_ARRAY_BUFFER, 0, np.array([], dtype=np.float32), gl.GL_DYNAMIC_DRAW)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_buffer(self):\n \n self.shape.buf = [pi3d.Buffer(self.shape, self.verts, self.texcoords, self.inds, self.norms)]\n self.shape.set_draw_details(self.shader, [self.spritesheet.img])", "def setupVAO(self, gpuShape):\n glBindVertexArray(gpuShape.vao)\n\n glBindBuffer(GL_AR...
[ "0.60485387", "0.5838095", "0.58365405", "0.58271587", "0.58271587", "0.5774059", "0.5710976", "0.5705954", "0.5652349", "0.5554869", "0.5520962", "0.55178773", "0.54938525", "0.53955853", "0.5374403", "0.53721666", "0.5345584", "0.53194004", "0.5283975", "0.5239198", "0.5213...
0.69480604
0
updates the carry flag data (VBO3)
def update_carried(self, data): self.use() gpu_data = np.array(data, dtype=np.float32) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbos[3]) gl.glBufferData(gl.GL_ARRAY_BUFFER, gpu_data.nbytes, gpu_data, gl.GL_DYNAMIC_DRAW)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bcs(self, arg):\n\n self.pc += arg if self.p & const.FLAG_CARRY else 0\n self.pc = c_uint16(self.pc).value", "def bvc(self, arg):\n\n self.pc += arg if not self.p & const.FLAG_OVERFLOW else 0\n self.pc = c_uint16(self.pc).value", "def update_flags(self):\n # view mode, fi...
[ "0.57576525", "0.55997807", "0.55879223", "0.5496893", "0.54818845", "0.5372455", "0.530587", "0.5240245", "0.51702", "0.5161561", "0.515714", "0.5140299", "0.51236033", "0.5092073", "0.50008583", "0.49901924", "0.49867123", "0.49156582", "0.4907488", "0.4875377", "0.48464125...
0.65785253
0
Sets scale control bitword = 0 x, y frozen scales + 1 x is interactive + 2 y is interactive bit value 0/1 frozen/interactive
def set_scale_control(self, scale_ctl=3): self._scale_ctl = scale_ctl
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _force_rescale(self, setpoint_x, setpoint_y):", "def scale(self,id,x,y,s):\n if id not in self.elements.keys():\n print(\"Id input not registered! Please check your process\")\n return False\n element=self.elements[id]\n state=element.scale(self.h-1-y,x,s,self.w,sel...
[ "0.67285424", "0.6584716", "0.6431193", "0.6370215", "0.6340126", "0.6294655", "0.62531334", "0.6227982", "0.6212142", "0.6209266", "0.6207113", "0.6194933", "0.6148316", "0.61001164", "0.6055724", "0.60446364", "0.60115176", "0.6009035", "0.59821504", "0.59555095", "0.591816...
0.67066544
1
Run probabilistic road map planning
def prm_planning(start_x, start_y, goal_x, goal_y, obstacle_x_list, obstacle_y_list, robot_radius, *, rng=None): obstacle_kd_tree = KDTree(np.vstack((obstacle_x_list, obstacle_y_list)).T) sample_x, sample_y = sample_points(start_x, start_y, goal_x, goal_y, robot_radius, obstacle_x_list, obstacle_y_list, obstacle_kd_tree, rng) if show_animation: plt.plot(sample_x, sample_y, ".b") road_map = generate_road_map(sample_x, sample_y, robot_radius, obstacle_kd_tree) rx, ry = dijkstra_planning( start_x, start_y, goal_x, goal_y, road_map, sample_x, sample_y) return rx, ry
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n for task in range(1, 6):\n # get map object for the current task\n map_obj = MapObj(task=task)\n # display map\n map_obj.show_map()\n # find cost optimal path using a-star\n node = search(\n map_obj=map_obj,\n heuristic=euclidian_dist...
[ "0.66793424", "0.64969844", "0.63846135", "0.6238592", "0.61426294", "0.6089071", "0.60473144", "0.6015712", "0.5973049", "0.5957199", "0.5950292", "0.59151304", "0.58676654", "0.585917", "0.5856333", "0.5838859", "0.5832239", "0.58292663", "0.58258164", "0.58152735", "0.5795...
0.5963495
9
Removes all values of arg from the given string
def pippo(value): return value.replace('BPM', '<abbr title="Banca Popolare di Milano">BPM</abbr>').replace('Rino Snaidero Scientific Foundation', '<a href="http://www.snaiderofoundation.org/">Rino Snaidero Scientific Foundation</a>')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def strip_value(value, arg):\n return value.replace(arg, '')", "def cut_string(value, arg):\n\n return value.replace(arg, '')", "def cut(value, arg):\n return value.replace(arg, '')", "def cut(value, arg):\n return value.replace(arg, '')", "def cut(value,arg):\n return value.replace(arg, '')...
[ "0.7558784", "0.7307502", "0.70154285", "0.70154285", "0.69781893", "0.6924862", "0.676274", "0.6756168", "0.67021793", "0.6586963", "0.64901525", "0.6459049", "0.6459049", "0.6459049", "0.6459049", "0.6459049", "0.6406707", "0.6270779", "0.62498343", "0.62498343", "0.6242943...
0.0
-1
Monta uma API flask e registra seus blueprints.
def setup(): LOG.info("Creating API.") api = Flask(__name__) LOG.info("Registering blueprints.") api.register_blueprint(health_check_blueprint.setup()) LOG.info("Registering error handlers.") api.register_error_handler(Exception, default_error_handler) LOG.info("Setting up config variables.") api.config['PROPAGATE_EXCEPTIONS'] = True return api
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register_blueprints(self):\n # Local import due to flask/blueprint circular imports.\n from mmapi.views import api_bp\n self.app.register_blueprint(api_bp, url_prefix='/api')", "def register_blueprints_on_app(app):\n app.register_blueprint(views.main_pages)\n app.register_blueprint...
[ "0.7972771", "0.764559", "0.7619169", "0.7566443", "0.74812996", "0.74501944", "0.7441911", "0.7426413", "0.7388967", "0.73659635", "0.7289081", "0.7288449", "0.72654545", "0.724093", "0.7233655", "0.71914", "0.71732324", "0.71653473", "0.71482444", "0.71245396", "0.70734113"...
0.66193205
47
generate samples of mixture Gaussian distribution
def mix_gaussian(mu, sigma_list, weights, num_sample): """ inputs: ------- mu mean list, numpy array sigma_list sigma list weights weights corresponding to each components num_sample the number of samples returns: -------- samples probability density function (pdf) of mixture Gaussian distribution """ dim = mu.shape[1] num_components = mu.shape[0] assert (len(weights) == num_components) and (num_components == len(sigma_list)) data = np.zeros((num_sample, dim)) for i in range(num_sample): idx_component = np.random.choice(num_components, p=weights) mean = mu[idx_component] cov = sigma_list[idx_component] data[i, :] = np.random.multivariate_normal(mean, cov) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_samples(mu1,cov,number_of_samples):\n samples = np.random.multivariate_normal(mu1, cov,number_of_samples)\n return samples", "def gen_mixture():\n npr.seed(0)\n num_exp = int(1e4)\n x_dim = 2\n z_dim = 2\n mu1 = [5, 5,]\n mu2 = [-5, -5]\n theta = np.array([[2,1],[-1,-2]])\...
[ "0.71659434", "0.70966846", "0.7007388", "0.6865769", "0.677201", "0.67334753", "0.6702295", "0.6684492", "0.6668763", "0.66618276", "0.6634252", "0.6606611", "0.6601144", "0.648774", "0.6463612", "0.64621603", "0.6461179", "0.64429975", "0.6393839", "0.6344043", "0.6344043",...
0.70466185
2
Show info to the user depending on verbosity level
def message(self, data, newline="\n"): # Are we logging to screen, file or both? if not self.quiet: print(data) if self.log_fo: self.log_fo.write(data + newline) self.log_fo.flush()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verbosity(v):\n assert v in [0,1,2] # debug, warn, info\n GLOBAL['VERBOSITY'] = v", "def say(self, verbosity, msg):\n if self.verbosity >= verbosity:\n print(msg)", "def verbose():\n return Verbose.level()", "def _verbose(self,text):\n if self.verbose:\n pr...
[ "0.7465201", "0.7334355", "0.71003336", "0.69231784", "0.6840511", "0.6791418", "0.6718375", "0.6699678", "0.6693496", "0.66344607", "0.6631135", "0.66225713", "0.66161317", "0.6589693", "0.6587105", "0.65763825", "0.65573853", "0.6548502", "0.65454394", "0.6543679", "0.65303...
0.0
-1
Wrapper to make an API GET request, return the response and handle errors
def __make_api_get(self, api_path): try: self.last_response = urllib2.urlopen( self.api_server + api_path, cafile=self.cacert_path) json_data = self.last_response.read() # Check for errors except urllib2.HTTPError as err: error = "API HTTP error [%s] - '%s'" % (err.code, err.read()) raise EFIgyCliError(error, self.last_response) except urllib2.URLError as err: error = 'Problem calling API at location %s - %s' % ( self.api_server + api_path, err) raise EFIgyCliError(error, self.last_response) # Decode json response into an object try: ret = json.loads(json_data) except ValueError as err: error = "Problem deserialising data, expecting JSON.\nError: %s\nData: %s" % ( err, json_data) raise EFIgyCliError(error, self.last_response) # Return JSON deserialised object return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _http_get(self, url, params={}):\n if not self.token:\n self.get_token()\n headers = {'Authorization': self.token, 'Accept': 'application/json; indent=4'}\n url = self.server + '/api2' + url\n try:\n r = requests.get(url=url, headers=headers, params=params)\n ...
[ "0.778995", "0.768153", "0.7418646", "0.739191", "0.73206294", "0.72995013", "0.7262301", "0.7259874", "0.7223338", "0.7208629", "0.71811944", "0.7158694", "0.7158694", "0.71570796", "0.7130304", "0.7127936", "0.7122294", "0.709202", "0.7063909", "0.7061155", "0.70599645", ...
0.72182447
9
Wrapper to make an API POST request, return the response and handle errors
def __make_api_post(self, api_path, data=None): headers = { "Content-type": "application/json", "Accept": "application/json"} x = json.dumps(data) try: req = urllib2.Request(self.api_server + api_path, x, headers) self.last_response = urllib2.urlopen(req, cafile=self.cacert_path) json_data = self.last_response.read() # Check for errors except urllib2.HTTPError as err: error = "API HTTP error [%s] - '%s'" % (err.code, err) raise EFIgyCliError(error, err) except urllib2.URLError as err: error = 'Problem calling API at location %s - %s' % ( self.api_server + api_path, err) raise EFIgyCliError(error, self.last_response) # Decode json response into an object try: ret = json.loads(json_data) except ValueError as err: error = "Problem deserialising data, expecting JSON.\nError: %s\nData: %s" % ( err, json_data) raise EFIgyCliError(error, self.last_response) # Return JSON deserialised object # print "DEBUG - %s"%(ret), type(ret) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post(self, *args, **kwargs):\n return self._requests_call(util.requests_post, *args, **kwargs)", "def make_post_request(api_endpoint: str, data: dict):\n response = requests.post(api_endpoint, data=data)\n logprint_request(api_endpoint)\n\n logprint_response(response)\n log.debug(\"Response di...
[ "0.7550158", "0.72697294", "0.72025406", "0.7184173", "0.7127111", "0.7112929", "0.7065529", "0.7061984", "0.70534045", "0.7048021", "0.69344455", "0.6925657", "0.68850005", "0.68809766", "0.68611604", "0.6811874", "0.67909265", "0.6754902", "0.6729885", "0.67012155", "0.6694...
0.6983514
10
Validate the response that came back from the API, return True if it's good, False if bad
def _validate_response(self, response): # Check for unexpected response - all should be JSON dicts that have # already been deserialised if not isinstance(response, types.DictionaryType): self.message( "\t\t[!] ERROR - Unexpected value returned from the API: '%s'" % (response)) return False # Check for valid errors if "error" in response and "msg" in response: self.message( "\t\t[!] ERROR - %s (%s)" % (response["msg"], response["timestamp"])) return False # Is this a valid response message if "msg" in response: return True # Catch all...dictionary returned but does not contain expected keys? # Who know's what's going on here?! else: self.message( "\t\t[!] ERROR - Unexpected dictionary response returned from the API: '%s'" % (response)) return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate(self, response):\n return response[\"status_code\"] == 1", "def is_valid_response(self, response):\r\n if response.status_code in VALID_CODES:\r\n return True\r\n return False", "def validate_response(self, response):\n pass", "def validate_response(respons...
[ "0.8024064", "0.78625363", "0.77986264", "0.7515456", "0.74777824", "0.74465626", "0.73384", "0.7265737", "0.7262182", "0.72441804", "0.7198832", "0.712405", "0.7028976", "0.701251", "0.69981617", "0.69959635", "0.6970569", "0.6948191", "0.6897628", "0.68025744", "0.68025744"...
0.76538676
3
Validate the supplied json file to make sure it is json in the expected format
def _validate_json(self): # Do we find valid json? try: with open(self.batch_json_path, "rb") as fd: batch_json = json.loads(fd.read()) except Exception as err: raise self.message( "[-] Error reading JSON batch file '%s' : '%s'" % (self.batch_json_path, err)) return False # Does the json represent a dictionary of the expected form? if not isinstance(batch_json, types.DictionaryType): self.message( "[-] JSON batch file '%s' deserialises to unexpected object type '%s'" % (self.batch_json_path, type(batch_json))) return False # If it is a dictionary does it have the expected characteristics? for endpoint, sys_info in batch_json.items(): # Endpoint should be a hostname, IP or some other string # identifier, difficult to validate much beyond 'string' if type(endpoint) not in [types.StringType, types.UnicodeType]: self.message( "[-] Element within JSON batch file '%s' conatins unexpected object type for an endpoint element '%s'. %s : %s" % (self.batch_json_path, type(endpoint), endpoint, sys_info)) return False # Does the sys_info dict contain the expected keys? if set(sys_info.keys()).symmetric_difference( set(self.json_batch_template)): self.message( "[-] Unexpected sys_info structure within JSON batch file %s, expected keys '%s' %s : %s" % (self.batch_json_path, self.json_batch_template, endpoint, sys_info)) return False # Create a psuedononymised hash of the uuid using MAC addr as salt mac_repr = "0x" + sys_info["mac_addr"].lower().replace(":", "") sys_info["hashed_uuid"] = hashlib.sha256( mac_repr + sys_info["sys_uuid"]).hexdigest() # Remove both the real sys_uuid and the mac_addr from the structure so they do not get submitted to the API # and remain confidential to the submitter del sys_info["sys_uuid"] del sys_info["mac_addr"] # Set the read in json structure as the structure of system data to # walk and send to the API self.endpoints_to_check = batch_json self.message("[+] Batch JSON file validated") return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_input(update_file):\n try:\n json.load(open(update_file))\n print \"\\nValid JSON\"\n return True\n except ValueError:\n print \"\\nInvalid JSON\"\n exit(-1)\n return False", "def validate_input(update_file):\n try:\n json.load(open(update_fi...
[ "0.7859762", "0.77667236", "0.7729442", "0.7575843", "0.73721087", "0.7308936", "0.7206595", "0.7121494", "0.708791", "0.7065255", "0.7045056", "0.6959003", "0.6920872", "0.6845389", "0.68421036", "0.6783331", "0.6779957", "0.67763454", "0.67711896", "0.67469054", "0.67342323...
0.70778465
9
Get versions of EFI, Boot ROM, OS & Mac Device as well as the SysUUID
def gather_system_versions(self): # Get Mac model ID self.hw_version = str( IORegistryEntryCreateCFProperty( IOServiceGetMatchingService( 0, IOServiceMatching("IOPlatformExpertDevice")), "model", None, 0)).replace( "\x00", "") if "imacpro" in self.hw_version.lower(): # iMac Pro stores it's EFI data different due it's new architecture # so grab the EFI & SMC ROM versions appropriately raw_efi_list = [] raw_rom_info = str( IORegistryEntryCreateCFProperty( IORegistryEntryFromPath( 0, "IODeviceTree:/rom"), "apple-rom-info", None, 0)) for data in raw_rom_info.split("\n"): if data.strip().startswith("BIOS ID"): raw_efi_list = data.split(":")[1].strip().split(".") break else: self.message( "[-] Could not find raw EFI data to determine EFI versions. Exiting....") return False self.efi_version = "%s.%s.%s" % ( raw_efi_list[0], raw_efi_list[2], raw_efi_list[3]) # Can't currently find the SMC version like this on imac pros .... # self.smc_version = str(IORegistryEntryCreateCFProperty(IOServiceGetMatchingService(0, IOServiceMatching("AppleSMC")), "smc-version", None, 0)) self.smc_version = "" else: # EFI & SMC ROM versions self.smc_version = str( IORegistryEntryCreateCFProperty( IOServiceGetMatchingService( 0, IOServiceMatching("AppleSMC")), "smc-version", None, 0)) raw_efi = str( IORegistryEntryCreateCFProperty( IORegistryEntryFromPath( 0, "IODeviceTree:/rom"), "version", None, 0)).replace( "\x00", "").split(".") self.efi_version = "%s.%s.%s" % ( raw_efi[0], raw_efi[2], raw_efi[3]) # Set the salt to be the MAC address of the system, using the MAC as a salt in this manner # helps ensure that the hashed sysuuid is pseudonymous. We don't want to know the sysuuid's # value, but we do want it to be unique however. The Salt value is # never submitted to the API salt = hex(getnode()) sys_uuid = str( IORegistryEntryCreateCFProperty( IOServiceGetMatchingService( 0, IOServiceMatching("IOPlatformExpertDevice")), "IOPlatformUUID", None, 0)).replace( "\x00", "") self.h_sys_uuid = hashlib.sha256(salt + sys_uuid).hexdigest() # Get the Board-ID, this is how EFI files are matched to running # hardware - Nastee self.board_id = str( IORegistryEntryCreateCFProperty( IOServiceGetMatchingService( 0, IOServiceMatching("IOPlatformExpertDevice")), "board-id", None, 0)).replace( "\x00", "") # Get OS version self.os_version = commands.getoutput("sw_vers -productVersion") # Get build number self.build_num = commands.getoutput("sw_vers -buildVersion") # Carve out the major version as we use this a bunch # self.os_maj_ver = ".".join(self.os_version.split(".")[:2]) # Add gathered info to the dictionary to query the API with self.endpoints_to_check["127.0.0.1"] = { "hashed_uuid": self.h_sys_uuid, "hw_ver": self.hw_version, "rom_ver": self.efi_version, "smc_ver": self.smc_version, "board_id": self.board_id, "os_ver": self.os_version, "build_num": self.build_num} return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_firmware_version():\r\n return utils.run('crossystem fwid').stdout.strip()", "def _get_release_infos():\n \n # support RHEL or CentOS, we don't care about the rest...\n with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=True):\n infos = run('cat /etc/redhat-releas...
[ "0.6673707", "0.6660692", "0.6526378", "0.628747", "0.62810564", "0.62695277", "0.62047887", "0.619877", "0.6136058", "0.613013", "0.61185354", "0.61024153", "0.60582775", "0.6051702", "0.5979974", "0.5965632", "0.59549516", "0.59407663", "0.59400725", "0.5934368", "0.5919144...
0.7456519
0
Send the System info to the API so as the expected EFI version and other data can be returned relevant to this system
def submit_system_data(self, data_to_submit=None): endpoint = "/apple/oneshot" # if not data_to_submit: # data_to_submit = {"hashed_uuid":self.h_sys_uuid, "hw_ver":self.hw_version, "rom_ver":self.efi_version, # "smc_ver":self.smc_version, "board_id":self.board_id, "os_ver":self.os_version, "build_num":self.build_num} # POST this data to the API to get relevant info back result_dict = self.__make_api_post(endpoint, data=data_to_submit) return result_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def system_info(self, system_id):\n\n\t\tpath = f'{self.BIKE_ENDPOINT}system/{system_id}/{self.secret_key}'\n\t\tresponse = requests.get(path).json()\n\t\tself.check_api_key(response)\n\n\t\treturn response", "async def get_system_info(self) -> Dict[str, Any]:\n assert self._client is not None\n re...
[ "0.71641546", "0.6972458", "0.6943339", "0.6877875", "0.68028116", "0.6581513", "0.6516118", "0.65054244", "0.6417727", "0.62935936", "0.62915426", "0.6286806", "0.6262276", "0.6224786", "0.6207814", "0.61550796", "0.6147702", "0.6147702", "0.6147702", "0.6147702", "0.6147702...
0.0
-1
Given the OS version are you running, what is the highest available build number? Are you running it?
def check_highest_build(self, sys_info, api_results): if not api_results.get("latest_build_number"): self.results[self.current_endpoint]["latest_build_number"] = self.__make_api_get( '/apple/latest_build_number/%s' % (".".join(sys_info.get("os_ver").split(".")[:2]))) self.message("\n\tHighest build number check:") # Validate response from API if self._validate_response(api_results["latest_build_number"]): # Valid response from API - now interpret it if api_results["latest_build_number"][ "msg"] == sys_info.get("build_num"): self.message( "\t\t[+] SUCCESS - You are running the latest build number (%s) of the OS version you have installed (%s)" % (sys_info.get("build_num"), sys_info.get("os_ver"))) elif sys_info.get("build_num")[-1].isalpha(): self.message( "\t\t[!] ATTENTION - It looks like you might be running a development OS build '%s' (%s). The EFIgy API currently only has reliable data for production OS releases." % (sys_info.get("build_num"), sys_info.get("os_ver"))) else: self.message( "\t\t[-] ATTENTION - You are NOT running the latest release build number of your OS version (%s). Your build number is %s, the latest release build number is %s" % (sys_info.get("os_ver"), sys_info.get("build_num"), api_results["latest_build_number"]["msg"]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def version_max():\n return VERSION_MAX", "def get_max_build_version(version: str) -> str:\n return Version(version).bump_minor().get_stable().dumps()", "def get_build_number():\n try:\n return int(os.getenv(*legion.config.BUILD_NUMBER))\n except ValueError:\n raise Exception('Cannot ...
[ "0.74187696", "0.71662873", "0.7020225", "0.70104754", "0.6995423", "0.6940619", "0.67905074", "0.6742232", "0.66234714", "0.6523554", "0.65176624", "0.6482276", "0.6474209", "0.6407805", "0.6395835", "0.63706833", "0.63454497", "0.6344706", "0.6331214", "0.6328722", "0.63287...
0.7350579
1
Given your major OS version are you running the latest minor patch?
def check_os_up_to_date(self, sys_info, api_results): if not api_results.get("latest_os_version"): self.results[self.current_endpoint]["latest_os_version"] = self.__make_api_get( '/apple/latest_os_version/%s' % (".".join(sys_info.get("os_ver").split(".")[:2]))) self.message("\n\tUp-to-date OS check:") # Validate response from API if self._validate_response(api_results["latest_os_version"]): # Valid response from API - now interpret it my_os_ver_str = sys_info.get("os_ver").split(".") my_os_ver_num = int( "%s%s%s" % (my_os_ver_str[0], my_os_ver_str[1], my_os_ver_str[2])) api_os_ver_str = api_results["latest_os_version"]["msg"].split(".") api_os_ver_num = int( "%s%s%s" % (api_os_ver_str[0], api_os_ver_str[1], api_os_ver_str[2])) # if sys_info.get("os_ver") != # api_results["latest_os_version"]["msg"]: if my_os_ver_num < api_os_ver_num: self.message( "\t\t[-] ATTENTION - You are NOT running the most up to date version of the OS. Your OS version is %s, the latest versions is %s" % (sys_info.get("os_ver"), api_results["latest_os_version"]["msg"])) elif my_os_ver_num > api_os_ver_num: self.message( "\t\t[!] ATTENTION - It looks like you might be running a development OS build %s. The EFIgy API currently only has reliable data for production OS releases." % (sys_info.get("os_ver"))) else: self.message( "\t\t[+] SUCCESS - You are running the latest major/minor/micro version of the OS you have installed (%s)" % (sys_info.get("os_ver")))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def osversion():\n return platform()", "def minor_version(self):\n return self.unpack_dword(0x18)", "def operatingsystem_version_minor(self):\n # type: () -> string_types\n return self._operatingsystem_version_minor", "def get_host_os_minor(self):\n\t\treturn call_sdk_function('PrlSrv...
[ "0.72008204", "0.68455917", "0.6837146", "0.679873", "0.6781404", "0.67812794", "0.675668", "0.6753822", "0.6739434", "0.6729564", "0.6697316", "0.6688061", "0.65758175", "0.6563506", "0.65488297", "0.6537565", "0.64876926", "0.6484937", "0.64745766", "0.64729935", "0.6472536...
0.0
-1
Does it look like this mac model is still receiving EFI firmware updates?
def check_fw_being_updated(self, sys_info, api_results): if not api_results.get("efi_updates_released"): # Call the API to see what the latest version of EFI you are # expected to be running given OS ver and mac model self.results[ self.current_endpoint]["efi_updates_released"] = self.__make_api_get( '/apple/no_firmware_updates_released/%s' % (sys_info.get("hw_ver"))) # Validate response from API if self._validate_response(api_results["efi_updates_released"]): # Check to see if this is a model that has seen any EFI firmware # updates if api_results["efi_updates_released"]["msg"] is False: self.message("\n\tEFI firmware version check:") self.message( "\t\t[-]ATTENTION - Your Mac model (%s) is older than the models Apple currently provides updates for, EFIgy has no data for it." % (sys_info.get("hw_ver"))) return False else: return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gather_system_versions(self):\n # Get Mac model ID\n self.hw_version = str(\n IORegistryEntryCreateCFProperty(\n IOServiceGetMatchingService(\n 0,\n IOServiceMatching(\"IOPlatformExpertDevice\")),\n \"model\",\n ...
[ "0.66273475", "0.64601934", "0.63994926", "0.63584757", "0.6354337", "0.6339835", "0.62948906", "0.62154347", "0.62096334", "0.61546713", "0.6140502", "0.61369526", "0.61060196", "0.60729986", "0.6002015", "0.59328735", "0.59070283", "0.58983636", "0.5769682", "0.5757605", "0...
0.64189225
2
Compare this systems versions to the firmware table to see if FW is at latest versions
def check_fw_versions(self, sys_info, api_results): if not api_results.get("latest_efi_version"): # Call the API to see what the latest version of EFI you are # expected to be running given OS ver and mac model api_results[ self.current_endpoint]["latest_efi_version"] = self.__make_api_get( '/apple/latest_efi_firmware/%s/%s' % (sys_info.get("hw_ver"), sys_info.get("build_num"))) self.message("\n\tEFI firmware version check:") # Validate response from API if self._validate_response(api_results["latest_efi_version"]): # Valid response from API - now interpret it # This is kind messy but it's so as we can detect newer and older firmware and message accordingly rather than just looking for 'different' versions # the way that EFI versions are denoted by Apple makes this more of # a pain thatit really needs to be quite honestly api_efi_str = api_results["latest_efi_version"]["msg"].split(".") my_efi_str = sys_info.get("rom_ver").split(".") api_efi_ver = int(api_efi_str[1], 16) api_efi_build = int(api_efi_str[2].replace("B", ""), 16) if all([x.isdigit() for x in my_efi_str]): # Newer EFI versions do not include a build number # or the Mac model code. The output will be something # like 256.0.0, whereas with the old format it would # be MBP133.0256.B00. my_efi_ver = int(my_efi_str[0], 16) my_efi_build = 0 else: my_efi_ver = int(my_efi_str[1], 16) my_efi_build = int(my_efi_str[2].replace("B", ""), 16) if api_efi_str == my_efi_str: self.message( "\t\t[+] SUCCESS - The EFI Firmware you are running (%s) is the expected version for the OS build you have installed (%s) on your %s" % (sys_info.get("rom_ver"), sys_info.get("build_num"), sys_info.get("hw_ver"))) elif my_efi_ver == api_efi_ver and my_efi_build == api_efi_build: self.message( "\t\t[+] SUCCESS - The EFI Firmware you are running (%s) is the expected version for the OS build you have installed (%s) on your %s" % (sys_info.get("rom_ver"), sys_info.get("build_num"), sys_info.get("hw_ver"))) elif (my_efi_ver > api_efi_ver) or (my_efi_ver > api_efi_ver and my_efi_build > api_efi_build) or (my_efi_ver == api_efi_ver and my_efi_build > api_efi_build): # Looks like you're running a beta or a dev build - pretty much # all bets are off here as the dataset doens't cover dev builds # but a nicer message makes sense self.message( "\t\t[!] ATTENTION - It looks like your EFI version (%s) is NEWER than the latest production release that is in the dataset (%s). This is most likely because you are now, or have in the past, installed a developer preview OS and as part of that you also had newer EFI firmware installed. The EFIgy API currently only has reliable data for production OS releases." % (sys_info.get("rom_ver"), api_results["latest_efi_version"]["msg"])) else: self.message( "\t\t[-] ATTENTION - You are running an unexpected firmware version given the model of your system (%s) and OS build you have installed (%s). Your firmware is %s, the firmware we expected to see is %s.\n" % (sys_info.get("hw_ver"), sys_info.get("build_num"), sys_info.get("rom_ver"), api_results["latest_efi_version"]["msg"]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def firmware_newer(self):\n if self.firmware_outdated():\n return False\n return self.firmware_version != self.compatible_firmware_version", "def firmware_outdated(self):\n datefmt = ' %b %d %Y %H:%M:%S'\n\n compat_date = self.compatible_firmware_version.split('compiled')[1...
[ "0.70892644", "0.708643", "0.70024616", "0.64528227", "0.64317346", "0.64306974", "0.6340362", "0.6231141", "0.61943406", "0.6168598", "0.61505914", "0.612571", "0.6075346", "0.60498714", "0.6030455", "0.60061157", "0.59848315", "0.59787464", "0.5975126", "0.59665155", "0.596...
0.7005981
2
Output results in a json format which can be useful to ingest into other tools
def dump_json(self): # JSON output not requested if not self.json_results: return # Are we writing to a file or stdout? if self.json_results == "-": json_results_fd = sys.stdout else: try: json_results_fd = open( os.path.expanduser( os.path.expandvars( self.json_results)), "wb") except Exception as err: self.message( "[-] Problem opening file '%s' to write JSON results to: %s" % (self.json_results, err)) self.message( "[!] Defaulting to writing JSON results to stdout instead") json_results_fd = sys.stdout try: json.dump(self.results, json_results_fd) except Exception as err: self.message( "[-] Problem writing JSON output to %s : %s" % (self.json_results, err)) if self.json_results != "-": self.message("[+] Written JSON results to %s" % (os.path.abspath(self.json_results)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_json(results):\r\n import json\r\n stats = calc_stats(results)\r\n print(json.dumps(stats._asdict()))", "def format_json(self,query_results):\n results=query_results.data\n factory=factory_json()\n dump=factory.dumps(results)\n print(dump)\n # TODO return out...
[ "0.75546306", "0.7512504", "0.73262453", "0.7086924", "0.698022", "0.6938096", "0.6925703", "0.6855479", "0.67590535", "0.67026407", "0.66884065", "0.6552769", "0.64993656", "0.6483085", "0.6483022", "0.64827317", "0.6453574", "0.6452356", "0.64301246", "0.6410083", "0.639727...
0.66968524
10
Cleanup up so nothing dangles
def cleanup(self): if self.log_fo: self.log_fo.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def horde_cleanup(self):", "def cleanup(self):\n self.subpixel, self.pixel = self.stepup(self.subpixel, self.pixel, AxisDistance.pixelsize)\n self.pixel, self.tile = self.stepup(self.pixel, self.tile, AxisDistance.tilesize)", "def cleanup():", "def clean(self):\n for i in range(len(self....
[ "0.7524904", "0.7230241", "0.71339816", "0.7000594", "0.6759445", "0.6759445", "0.6759445", "0.66640466", "0.66440374", "0.65345025", "0.6510747", "0.6510747", "0.64583874", "0.64583874", "0.64583874", "0.64583874", "0.64583874", "0.64583874", "0.64583874", "0.64583874", "0.6...
0.0
-1
Get a translation for the given message. This proxies for the internal translations object.
def gettext(self, string): return self._translations.gettext(string)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gettext(self, message):\n if self._translations.has_key(message):\n return self._translations[message]\n return super(Translations, self).gettext(message)", "def get(self, msgid):\r\n return self.trans.get(msgid, str(msgid))", "def get_translation(self):\n return self...
[ "0.76010066", "0.7040674", "0.66040176", "0.6410865", "0.63327235", "0.6329257", "0.63121814", "0.6301266", "0.62613475", "0.61739236", "0.61739236", "0.6169561", "0.61192596", "0.6037068", "0.60274595", "0.6026245", "0.60075307", "0.5990841", "0.5933024", "0.58953375", "0.58...
0.5226103
59
A decorator that can exclude a view from csrf protection.
def exempt(self, view): if isinstance(view, Blueprint): self._exempt_blueprints.add(view.name) return view if isinstance(view, string_types): view_location = view else: view_location = '%s.%s' % (view.__module__, view.__name__) self._exempt_views.add(view_location) return view
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def csrf_exempt(view_func):\r\n # We could just do view_func.csrf_exempt = True, but decorators\r\n # are nicer if they don't have side-effects, so we return a new\r\n # function.\r\n def wrapped_view(*args, **kwargs):\r\n return view_func(*args, **kwargs)\r\n wrapped_view.csrf_exempt = True\...
[ "0.81446385", "0.8072049", "0.7513782", "0.7303216", "0.72622657", "0.71674573", "0.67435366", "0.6682247", "0.6664801", "0.6651317", "0.6612883", "0.6552151", "0.64375037", "0.6431673", "0.6428775", "0.63179326", "0.6248671", "0.62409854", "0.621281", "0.6179554", "0.6161637...
0.61926156
19
A decorator that set the error response handler.
def error_handler(self, view): self._error_response = view return view
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def errorhandler(self, status_code_or_exception_class):\n def decorated(f):\n self.error_handlers[status_code_or_exception_class] = f\n return f\n return decorated", "def error(self, func):\n self.error_handler = func\n return func", "def on_error(self, namespa...
[ "0.7790632", "0.77100396", "0.727186", "0.70996445", "0.695573", "0.6953457", "0.6853296", "0.6826386", "0.67512983", "0.67437077", "0.6728367", "0.669882", "0.66451806", "0.6628181", "0.6596329", "0.658002", "0.6575629", "0.65438235", "0.6527638", "0.65149397", "0.6502375", ...
0.7654537
2
Process raw inputs into a dataset.
def build_dataset(words): count = [] # count.extend(collections.Counter(words).most_common(n_words - 1)) count.extend(collections.Counter(words).most_common()) dictionary = dict() for word, _ in count: dictionary[word] = len(dictionary) data = list() # unk_count = 0 for word in words: index = dictionary.get(word, 0) # if index == 0: # dictionary['UNK'] # unk_count += 1 data.append(index) # count[0][1] = unk_count reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys())) data = [data[::2],data[1::2]] new_data = list() for i in range(len(data[0])): new_data.append([data[0][i],data[1][i]]) data = new_data vocabulary_size = len(dictionary) print("\n\ndictionary size = ") print(len(dictionary)) return data, count, dictionary, reversed_dictionary, vocabulary_size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_inputs(self, inputs):", "def processInputs(self):", "def input_fn(self, ctx=None):\n sup_dataset = self.supervised_input.make_parsed_dataset(ctx)\n unsup_dataset = self.unsupervised_input.make_parsed_dataset(ctx)\n\n dataset = tf.data.Dataset.zip((sup_dataset, unsup_dataset))\n dataset ...
[ "0.72874445", "0.6668611", "0.64020455", "0.63785404", "0.63785404", "0.6376394", "0.6374364", "0.62511826", "0.6218678", "0.6197006", "0.61118746", "0.6104894", "0.6071575", "0.60514355", "0.60514355", "0.6045758", "0.6040936", "0.6020063", "0.6003791", "0.59921783", "0.5986...
0.0
-1
Make sure the tables are dropped.
def test_drop_tables(self): self.assertEqual(Manager.table_exists().run_sync(), True) self.assertEqual(Band.table_exists().run_sync(), True) drop_tables(Manager, Band) self.assertEqual(Manager.table_exists().run_sync(), False) self.assertEqual(Band.table_exists().run_sync(), False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def drop(self):\n self.__init__()\n cursor = self.connection.cursor()\n cursor.execute(drop_tables)\n queries = cursor.fetchall()\n for i in queries:\n cursor.execute(i[0])\n\n self.commit()\n self.__init__()", "def drop_tables():\n drop_table(Shoppi...
[ "0.83170086", "0.824251", "0.8148625", "0.80837333", "0.80614614", "0.80125594", "0.80077255", "0.798122", "0.79612356", "0.78991616", "0.7891649", "0.7845138", "0.775593", "0.77060443", "0.76956725", "0.7687249", "0.7681075", "0.76451164", "0.7631582", "0.76200426", "0.76058...
0.7902523
9
Load multiple datasets (simultaneously)
def processing_handler( datasets: list, load: Callable[[dict], None], cores: int, threads: int ) -> None: # Data output output = [] # Multi-core processing if cores > 1 and len(datasets) > 1: # Create process pool with Pool(cores) as pool: # Process datasets in pool output = pool.starmap(load, datasets) # Wait for Pool to finish pool.close() pool.join() # Multi-thread processing elif threads > 1 and len(datasets) > 1: # Create process pool with ThreadPool(threads) as pool: # Process datasets in pool output = pool.starmap(load, datasets) # Wait for Pool to finish pool.close() pool.join() # Single-thread processing else: for dataset in datasets: output.append(load(*dataset)) # Remove empty DataFrames filtered = list(filter(lambda df: df.index.size > 0, output)) return pd.concat(filtered) if len(filtered) > 0 else output[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_datasets():\n idx, data_paths, data_names, desc_paths, descrips, sql_paths, \\\n sql_names, loaded, table_size, \\\n loaded_names = mgr.build_datasets_table()\n return render_template('load_datasets.html',\n zip=zip(idx, data_paths, data_names, desc_paths,\n ...
[ "0.68618584", "0.6650741", "0.6626212", "0.66049284", "0.65869296", "0.6572509", "0.6537458", "0.65346485", "0.65204453", "0.65077704", "0.65056384", "0.64877063", "0.64608717", "0.644231", "0.64391696", "0.6436018", "0.64284915", "0.6427725", "0.64143807", "0.63900393", "0.6...
0.0
-1
Load a single CSV file into a DataFrame
def load_handler( endpoint: str, path: str, columns: list, types: Union[dict, None], parse_dates: list, coerce_dates: bool = False, ) -> pd.DataFrame: try: # Read CSV file from Meteostat endpoint df = pd.read_csv( endpoint + path, compression="gzip", names=columns, dtype=types, parse_dates=parse_dates, ) # Force datetime conversion if coerce_dates: df.iloc[:, parse_dates] = df.iloc[:, parse_dates].apply( pd.to_datetime, errors="coerce" ) except (FileNotFoundError, HTTPError): # Create empty DataFrane df = pd.DataFrame(columns=[*types]) # Display warning warn(f"Cannot load {path} from {endpoint}") # Return DataFrame return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_from_csv(self, filepath):\n self.dataframe = pd.load_csv(filepath, separator='')", "def _parse_csv(csv_file: str) -> pd.DataFrame:\n return pd.read_csv(csv_file, header=0)", "def _load_csv_into_df(csv_file: Any, csv_name: str) -> pd.DataFrame:\n try:\n df = pd.read_csv(csv_file...
[ "0.82277906", "0.8012845", "0.7859813", "0.7839427", "0.7722293", "0.7609823", "0.75901395", "0.7588599", "0.75861067", "0.75579536", "0.7539103", "0.75356525", "0.74848354", "0.7478136", "0.74515945", "0.74494445", "0.7425229", "0.7384577", "0.73813176", "0.73813176", "0.738...
0.0
-1
Preprocess graphs by casting into FloatTensor and setting to cuda if available
def preprocess(dataset, cuda): for g, _ in dataset: for key_g, val_g in g.ndata.items(): processed = g.ndata.pop(key_g) processed = processed.type('torch.FloatTensor') if cuda: processed = processed.cuda() g.ndata[key_g] = processed for key_g, val_g in g.edata.items(): processed = g.edata.pop(key_g) processed = processed.type('torch.FloatTensor') if cuda: processed = processed.cuda() g.edata[key_g] = processed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_cuda(network):\n network.cuda()\n\n network._to_cuda_forward_cache = network.forward\n\n def cuda_forward(x):\n return network._to_cuda_forward_cache(x.cuda(non_blocking=True))\n\n network.forward = cuda_forward", "def cuda_if_gpu(T):\n\n return T.cuda() if use_cuda else T", "def _...
[ "0.5934677", "0.5921635", "0.58703035", "0.581513", "0.5797795", "0.5776158", "0.57602173", "0.57533664", "0.5746811", "0.5742912", "0.5733459", "0.5706499", "0.56763935", "0.56657827", "0.5658226", "0.5653087", "0.5644797", "0.5627843", "0.55857766", "0.55729073", "0.5570853...
0.69795823
0
The init of this class converts all of the downloaded data into usable lists which can then be analysed or plotted through the use of other functions and modules
def __init__(self, stock, start_date, end_date): try: self.data = yahoo_finance.Share(stock).get_historical(start_date, end_date) self.close = [dic['Close'] for dic in self.data] self.open = [dic['Open'] for dic in self.data] self.date = [dic['Date'] for dic in self.data] except Exception, error_StockClass__init__: print 'error_StockClass__init__: ', error_StockClass__init__
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n self._distance_data = []\n self._location_data = []\n self._package_data = []", "def __init__(self, dataset_dir, listfile=None):\n Reader.__init__(self, dataset_dir, listfile)\n self._data = [line.split(',') for line in self._data]\n\n def process_i...
[ "0.70053685", "0.6822351", "0.68023336", "0.67671615", "0.67606527", "0.6706474", "0.65779483", "0.65768045", "0.65497845", "0.65438706", "0.64940745", "0.64903885", "0.6471294", "0.6471034", "0.6459046", "0.64494765", "0.64485466", "0.6430215", "0.6430215", "0.6430215", "0.6...
0.0
-1
The delete method has yet to be designed. NOT IN USE
def __delete__(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self):\n ...", "def delete():", "def delete(self):\n pass", "def delete(self):\n pass", "def delete(self):\n pass", "def delete(self):\n pass", "def delete(self):\n raise NotImplementedError", "def delete(self):\n raise NotImplementedError()...
[ "0.8987653", "0.8940316", "0.8721936", "0.8721936", "0.8721936", "0.8721936", "0.85827905", "0.84170693", "0.84170693", "0.83346015", "0.83346015", "0.8312875", "0.8245658", "0.81583333", "0.8149139", "0.80602145", "0.80241364", "0.80155075", "0.79894155", "0.7970612", "0.783...
0.7856196
20
Log a message to ``kastle`` logger.
def log(level: str, *messages: str) -> None: for message in messages: getattr(logger, level)(message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log(self, message: str):", "def log(self, message):", "def log(self, message):\n self._logger.write(message)", "def log(self, _strMessage=\"\"):\n self.edLogging.log(_strMessage)", "def log( loglevel, message ):\n E.log( loglevel, message )", "def log(\n message: str,\n ...
[ "0.6749791", "0.66110027", "0.62929875", "0.61028975", "0.60638314", "0.60285443", "0.6009939", "0.6003057", "0.59888184", "0.5934584", "0.5918162", "0.59128755", "0.5903931", "0.5870506", "0.5867751", "0.58582294", "0.58571404", "0.58528185", "0.5843078", "0.5834469", "0.582...
0.0
-1
Plot the languages stored in the dictionaries
def plot_languages(dict_usage_complexities, dict_cognitive_complexity): attested_languages = ( frozenset(['nor', 'and', 'or', 'not']), frozenset(['and', 'or', 'not']), frozenset(['and', 'not']), frozenset(['or', 'not']), ) fig, ax = plt.subplots(figsize=(8.27,4)) for name in dict_usage_complexities.keys(): # if not any([i in ['nc', 'nic', 'bc', 'XOR', 'c', 'ic'] for i in name]) and 'not' in name: if 'not' in name: # if True: usage_complexity = dict_usage_complexities[name] cognitive_complexity = dict_cognitive_complexity[name] if name in attested_languages: color = 'red' zorder = 10 if name == frozenset(['or', 'not']): yshift = 0.4 else: yshift = 0 ax.text( usage_complexity + 0.02, cognitive_complexity + 0.3 + yshift, s=','.join(name), fontsize='x-small' ) else: color='black' zorder = 1 # ax.scatter( # usage_complexity, cognitive_complexity, # color=color, # zorder=zorder # ) # ax.text( # usage_complexity, cognitive_complexity, # s=','.join(name), # fontsize='xx-small', # rotation=90, # color=color # ) ax.scatter(usage_complexity,cognitive_complexity,color=color) ax.set_xlabel('Usage complexity') ax.set_ylabel('Conceptual complexity') # ax.set_xlim(0,3) ax.set_xlim(1.05,2.8) # plt.show() plt.savefig('figure.png', dpi=300, transparent=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visualize_vecDict(vecDict):\n for url in vecDict:\n plt.plot(vecDict[url])\n plt.legend([key for key in vecDict])\n plt.title(f'Vectors for {len(vecDict)} Documents')\n plt.xlabel('Vector Dimensions')\n plt.ylabel('Document Value')\n plt.show()", "def draw_all_plots(self):\n\n ...
[ "0.60507584", "0.5974315", "0.5905134", "0.5861216", "0.58539575", "0.57940316", "0.5775768", "0.5712009", "0.57002044", "0.5681444", "0.56463766", "0.5642815", "0.5637291", "0.5622407", "0.5596009", "0.5593054", "0.5562897", "0.55299807", "0.5506552", "0.5498803", "0.5488566...
0.6960431
0
Requests frames for a product.
def find(cls, product_id, start=None, end=None, limit=None, sort=None, reruns=None, **kwargs): return super(ProductFrame, cls).find(product_id=product_id, start=start, end=end, limit=limit, sort=sort, reruns=reruns, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _request_frame(self):\n self._send_command('GET_FRAME')", "def GetProduct(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def products(self, star...
[ "0.62623674", "0.61581105", "0.55546266", "0.55507904", "0.5484503", "0.5441134", "0.5407963", "0.5333419", "0.5302095", "0.5175288", "0.5174018", "0.5140395", "0.5135881", "0.51053953", "0.5032891", "0.50136805", "0.50059825", "0.49878338", "0.4946766", "0.49369532", "0.4900...
0.5573586
2
Requests frames for a product.
def find(cls, forecast_id, start=None, end=None, limit=None, sort=None, reruns=None, **kwargs): return super(ForecastFrame, cls).find(forecast_id=forecast_id, start=start, end=end, limit=limit, sort=sort, reruns=reruns, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _request_frame(self):\n self._send_command('GET_FRAME')", "def GetProduct(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def find(cls, product_i...
[ "0.62623674", "0.61581105", "0.5573586", "0.55546266", "0.55507904", "0.5484503", "0.5441134", "0.5407963", "0.5333419", "0.5302095", "0.5175288", "0.5174018", "0.5140395", "0.5135881", "0.51053953", "0.5032891", "0.50136805", "0.50059825", "0.49878338", "0.4946766", "0.49369...
0.0
-1
calculate total residual for fits to several data sets held in a 2D array, and modeled by Gaussian functions
def objective(self, params, x, data): # make residual per data set ndata, nx = data.shape resid = 0.0*data[:] resid[0, :] = data[0, :] - self.thermo(params, x) resid[1, :] = data[1, :] - self.density(params, x) # now flatten this to a 1D array, as minimize() needs return resid.flatten()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def residual(pars, data= None):\n\n\t\t\tresid = np.array([])\n\n\n\t\t\t# make residual per data set\n\n\t\t\tfor N in range(n_annulus):\n\n\t\t\t\tmdl_ev = 0\n\t\t\t\tr_space_k = rings_pos[N+1] - rings_pos[N] \n\t\t\t\tmask = np.where( (r_n >= rings_pos[N] ) & (r_n < rings_pos[N+1]) )\n\t\t\t\tx,y = XY_mesh[0][m...
[ "0.6726535", "0.6726535", "0.6726535", "0.6638139", "0.6608545", "0.6222338", "0.6214014", "0.6180789", "0.61609596", "0.6121418", "0.6121418", "0.6110018", "0.6100451", "0.60828114", "0.602451", "0.6002838", "0.59453905", "0.5928811", "0.59279364", "0.5912145", "0.5909047", ...
0.5425459
65
Merge draft invoices. Work only with same partner. You can merge invoices and refund invoices with echa other. Moves all lines on the first invoice.
def merge_invoice(self, cr, uid, invoices, context=None): order_ids = [] pick_ids = [] if len(invoices) <= 1: return False parent = self.pool.get('account.invoice').browse(cr, uid, context['active_id']) for inv in invoices: if parent.partner_id != inv.partner_id: raise osv.except_osv(_("Partners don't match!"), _("Can not merge invoice(s) on different partners or states !.")) if inv.state != 'draft': raise osv.except_osv(_("Invalid action !"), _("You can merge only invoices in draft state.")) # Merge invoices that are in draft state inv_line_obj = self.pool.get('account.invoice.line') name = parent.name comment = parent.comment origin = parent.origin for inv in invoices: if inv.id == parent.id: continue # check if a line with the same product already exist. if so add quantity. else hang up invoice line to first invoice head. if inv.name: # Find if the same name already exist, if yes, skip to add. name_list = name.replace(' ', '').split(',') if inv.name not in name_list: name += ', %s' % inv.name if inv.comment: comment = comment and comment + ', %s' % inv.comment or inv.comment if inv.origin: origin += ', %s' % inv.origin line_ids = inv_line_obj.search(cr, uid, [('invoice_id', '=', inv.id)]) for inv_lin in inv_line_obj.browse(cr, uid, line_ids): mrg_pdt_ids = inv_line_obj.search(cr, uid, [('invoice_id', '=', parent.id), ('product_id', '=', inv_lin.product_id.id), ('uos_id', '=', inv_lin.uos_id.id), ('price_unit', '=', inv_lin.price_unit) # kittiu: extra condition, unit price must also be the same. ]) if len(mrg_pdt_ids) == 1 and inv.type == parent.type: # product found --> add quantity inv_line_obj.write(cr, uid, mrg_pdt_ids, {'quantity': inv_line_obj._can_merge_quantity(cr, uid, mrg_pdt_ids[0], inv_lin.id)}) inv_line_obj.unlink(cr, uid, inv_lin.id) elif inv.type == parent.type: inv_line_obj.write(cr, uid, inv_lin.id, {'invoice_id': parent.id}) else: inv_line_obj.write(cr, uid, inv_lin.id, {'invoice_id': parent.id, 'quantity': -inv_lin.quantity}) if inv.sale_order_ids: order_ids += [order.id for order in inv.sale_order_ids] if inv.picking_ids: pick_ids += [picking.id for picking in inv.picking_ids] self.write(cr, uid, parent.id, {'origin': origin, 'name': name, 'comment': comment}) #Remove By DRB #cr.execute('update sale_order_invoice_rel set invoice_id = %s where invoice_id = %s', (parent.id, inv.id)) #cr.execute('update picking_invoice_rel set invoice_id = %s where invoice_id = %s', (parent.id, inv.id)) self.unlink(cr, uid, [inv.id]) #Distinct List order_ids = list(set(order_ids)) pick_ids = list(set(pick_ids)) self.write(cr, uid, parent.id, {'sale_order_ids': [(6, 0, order_ids)], 'picking_ids': [(6, 0, pick_ids)]}) self.button_reset_taxes(cr, uid, [parent.id]) return parent.id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_purchase_invoice(self):\r\n active_id = self.env['purchase.order'].browse(self.env['purchase.order']._context.get('active_ids'))\r\n journal_id = self.env['account.journal'].search([('type', '=', 'purchase')]) \r\n active_id_count = 0\r\n active_count = 0\r\n exist_vend...
[ "0.6526457", "0.60499185", "0.6039584", "0.5971947", "0.5949904", "0.5877273", "0.58699375", "0.5868027", "0.5864572", "0.57496387", "0.56165034", "0.5592037", "0.55525774", "0.5516271", "0.5495002", "0.5483541", "0.5412485", "0.5412102", "0.53373694", "0.53041404", "0.529060...
0.7914905
0
r"""Return the standard path to the shared area on the current platform.
def shared_area_path() -> str: try: return os.environ["OITG_SHARED_AREA"] except KeyError: pass if os.name == "nt": # Windows return "Z:\\" if os.name == "unix" or os.name == "posix": # Linux / OSX / ... return os.path.expanduser("~/steaneShared/") raise Exception("Unknown OS")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_share_path():\n cwd = os.path.dirname(__file__)\n share = os.path.join(cwd, '../share')\n return os.path.abspath(share)", "def path_share(self) -> Path:\n return self.path_supervisor / SHARE_DATA", "def get_path(self):\n\t\treturn call_sdk_function('PrlShare_GetPath', self.handle)", "...
[ "0.75354296", "0.6952207", "0.67871875", "0.67391086", "0.67256176", "0.6657467", "0.6635167", "0.661767", "0.66105354", "0.6436675", "0.6340287", "0.6331047", "0.63205075", "0.6297639", "0.62504154", "0.6217222", "0.6186836", "0.6157988", "0.61453235", "0.6119978", "0.611485...
0.85109854
0
Return the path to the given users analysis directory on the shared area (``/Users//analysis``).
def analysis_root_path(user: Optional[str] = None) -> str: if user is None: user = _get_user() return os.path.join(shared_area_path(), "Users", user, "analysis")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def todays_analysis_path(day: Optional[str] = None, user: Optional[str] = None) -> str:\n if day is None:\n day = date.today().isoformat()\n if user is None:\n user = _get_user()\n path = os.path.join(analysis_root_path(user=user), day)\n\n if not os.access(path, os.R_OK):\n # If t...
[ "0.6710544", "0.66245717", "0.64282644", "0.61189187", "0.5994886", "0.5977346", "0.5951611", "0.59258217", "0.5919063", "0.5871089", "0.5855553", "0.5822797", "0.5740604", "0.5706992", "0.56850857", "0.5682241", "0.5660873", "0.5612151", "0.5608156", "0.5605027", "0.560351",...
0.84479433
0
Return the path to the analysis directory for the given day, defaulting to today. The analysis directory is intended to be used as working space for analysing data while it is taken, so that the code can easily be found again later if the data or conclusions reached are reexamined. If the directory does not exist, it is created.
def todays_analysis_path(day: Optional[str] = None, user: Optional[str] = None) -> str: if day is None: day = date.today().isoformat() if user is None: user = _get_user() path = os.path.join(analysis_root_path(user=user), day) if not os.access(path, os.R_OK): # If the dir does not exist, create it os.mkdir(path) return path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_day_data_path(self, days_ago=0):\n home = os.environ.get('USERPROFILE').replace('\\\\', '/')\n self.data_dir= os.path.join(home, 'TimeData')\n if not os.path.isdir(self.data_dir):\n mkdir(self.data_dir)\n today_filename = os.path.join(\n self.data_dir,\n ...
[ "0.62076753", "0.5934615", "0.59194785", "0.58207476", "0.57378083", "0.5428975", "0.54136276", "0.53955543", "0.5333741", "0.5282543", "0.5224942", "0.519696", "0.519696", "0.5180282", "0.5148305", "0.51434815", "0.5098279", "0.5076191", "0.5064754", "0.50544584", "0.5054282...
0.7822021
0
Return the path to an experiment's ARTIQ results directory. The standard results path is ``/artiqResults/``.
def artiq_results_path(experiment: Optional[str] = None) -> str: path = os.path.join(shared_area_path(), "artiqResults") if experiment is None: try: experiment = os.environ["OITG_EXPERIMENT"] except KeyError: raise Exception( "No experiment supplied, and no OITG_EXPERIMENT environment key") return os.path.join(path, experiment)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_abex_results_dir(experiment_name: str) -> Path: # pragma: no cover\n return experiment_dir(experiment_name) / \"Results\"", "def data_abex_results_iteration_dir(experiment_name: str, iteration: int) -> Path: # pragma: no cover\n return data_abex_results_dir(experiment_name) / iteration_name(iter...
[ "0.80512667", "0.7191955", "0.68996114", "0.6845597", "0.68339694", "0.66065645", "0.65667313", "0.65667313", "0.65667313", "0.65351224", "0.63939637", "0.6320295", "0.61587936", "0.6123075", "0.60877836", "0.60146594", "0.5972927", "0.5961885", "0.5882054", "0.58809346", "0....
0.873001
0
estimate an MxF user factor matrix and an FxN item factor matrix from the MxN rating matrix
def factor_mat(all_dat, f_num, iterations, regularization): # get # of users and # of items [u_num, i_num] = all_dat.shape # init user factors and item factors with random values u_fac = np.matrix(np.random.rand(u_num, f_num)) # MxF i_fac = np.matrix(np.random.rand(i_num, f_num)) # NxF # calculate the preference matrix preference = cal_preference(all_dat) # calculate the confidence matrix confidence = cal_confidence(all_dat) # recalculate the user factors and item factors using the alternating least square method for itr in range(iterations): u_fac = alternate_ls(u_num, i_fac, preference, confidence, regularization) #print itr, "u_fac" i_fac = alternate_ls(i_num, u_fac, preference.T, confidence.T, regularization) #print itr, "i_fac" # save the output df = pd.DataFrame(u_fac) df.to_csv("tmp/u_fac.tmp", index=False, header=False, sep='\t', encoding='utf-8') df = pd.DataFrame(i_fac.T) df.to_csv("tmp/i_fac.tmp", index=False, header=False, sep='\t', encoding='utf-8') # an MxF user factor matrix and an FxN item factor matrix return [u_fac, i_fac.T]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_matrix(self):\n\n self.matrix = np.zeros((len(self.users), len(self.items)))\n\n for user in self.train_set['users']:\n for item in self.train_set['feedback'][user]:\n self.matrix[self.user_to_user_id[user]][self.item_to_item_id[item]] = \\\n se...
[ "0.6282623", "0.62621325", "0.60587424", "0.6045789", "0.6040702", "0.6002245", "0.5951851", "0.59179777", "0.59059614", "0.58943605", "0.589419", "0.587945", "0.5858747", "0.58264637", "0.5798391", "0.57919794", "0.574544", "0.5737683", "0.5693354", "0.5668427", "0.56560904"...
0.72323316
0
calculate the confidence of each useritem pair
def cal_confidence(dat): alpha = 40.0 confidence = np.zeros(dat.shape) confidence = 1 + alpha * dat return np.matrix(confidence)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _predict_user_item(self, user, item):\n if not isinstance(user, int):\n user = self._user_to_ndx[user]\n if not isinstance(item, int):\n item = self._item_to_ndx[item]\n\n try:\n rating_mean = self._averages[user]\n except AttributeError:\n ...
[ "0.6476335", "0.6331386", "0.6273888", "0.62594044", "0.62203914", "0.6011443", "0.58879673", "0.58871895", "0.58729005", "0.58711815", "0.582148", "0.57926047", "0.57851225", "0.5779903", "0.57435066", "0.5729716", "0.5727403", "0.57262325", "0.56652945", "0.5631243", "0.560...
0.50412726
94
calculate the preference of each useritem pair
def cal_preference(dat): preference = np.ones(dat.shape) preference[dat == 0] = 0 return np.matrix(preference)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _predict_user_item(self, user, item):\n if not isinstance(user, int):\n user = self._user_to_ndx[user]\n if not isinstance(item, int):\n item = self._item_to_ndx[item]\n\n try:\n rating_mean = self._averages[user]\n except AttributeError:\n ...
[ "0.6196895", "0.6023507", "0.6007207", "0.59735894", "0.5927089", "0.59151995", "0.5914826", "0.590741", "0.5795329", "0.56757665", "0.56184", "0.5602994", "0.5584905", "0.55800134", "0.5564539", "0.5547924", "0.55471706", "0.54899234", "0.5459986", "0.5451552", "0.54258966",...
0.0
-1
calculate latent factors using the alternating least square method applicable to computing both user factors and item factors
def alternate_ls (u_num, Y, P, C, reg): # get # of items/users and # of latent factors [i_num, f_num] = Y.shape # output buffer X = np.zeros((u_num, f_num)) # precalculate YtY to improve the performance YtY = Y.T * Y # iterate over each user/item for u in range(u_num): # store the diagonal elements of the matrix Cu discussed in the paper in a vector Cu = C[u,:] # store the coresponding row/column of the preference matrix Pu = P[u,:] # compute Cu-I Cu_I = Cu - 1 # calculate Yt(Cu-I)Y YtCu_IY = np.zeros((f_num, f_num)) CuIY = np.multiply(Y, Cu_I.T) # weight each row of Y with Cu-I for row in range(f_num): for col in range(f_num): YtCu_IY[row,col] = Y[:,row].T * CuIY[:,col] # left term : ((YtCuY + regI)^(-1)) = (YtY + Yt(Cu-I)Y + regI)^(-1) left_inv = YtY + YtCu_IY + reg*np.eye(f_num) left = np.linalg.inv(left_inv) # right term : YtCuPu right = Y.T * np.multiply(Cu.T, Pu.T) # compute the latent factor of the user/item x = left * right # store it in a matrix X[u,:] = x.T # return an MxF or NxF matrix return np.matrix(X)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_factors():", "def factors(self):\n X = [Var(i,2) for i in range(self.nvar)]\n factors = [Factor([],np.exp(self.c))] \n # TODO: exclude if zero? or exclude if inf/-inf, or if in \"assigned\", or?\n factors = factors + [Factor([X[i]],[-th,th]).exp() for i,th in enumerate(self.h) if self.dims[i]...
[ "0.6412927", "0.6364732", "0.6273934", "0.59649765", "0.58943266", "0.5739722", "0.56869096", "0.5642237", "0.5588103", "0.55404395", "0.55167574", "0.54780066", "0.54257447", "0.5419568", "0.5391013", "0.53886884", "0.53792053", "0.53650635", "0.53555024", "0.53386456", "0.5...
0.5874729
5
Get authorization header for GoDaddy Developer API.
def _get_headers() -> dict: api_key = API_KEY_CRED_LOADER.load_credentials() api_secret = API_SECRET_CRED_LOADER.load_credentials() return {"Authorization": "sso-key {}:{}".format(api_key, api_secret)}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def api_client_authz_header():\n return assemble_authorization_header(API_TOKEN)", "def api_client_authz_header():\n return assemble_authorization_header(API_TOKEN)", "def get_authorization_header(self):\n return {\"Authorization\": \"Bearer {}\".format(self.get_jwt())}", "def get_authorization_...
[ "0.77325", "0.77325", "0.7386359", "0.73046136", "0.71326786", "0.70995873", "0.7064967", "0.69181406", "0.67679745", "0.6723393", "0.67186785", "0.66517", "0.66500354", "0.66368353", "0.6635809", "0.6585172", "0.6572229", "0.6569699", "0.65522623", "0.65466243", "0.65304196"...
0.6833908
8
Call GoDaddy developer API endpoint. Only supports GET endpoints to keep access readonly.
def _call_endpoint(url_suffix: str, base_url: str = BASE_URL) -> dict: headers = _get_headers() url = os.path.join(base_url, url_suffix) resp = requests.get(url, headers=headers) return resp.json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def api_call():\n\tresponse = requests.get(URL_API)\n\treturn response", "def call_api(url):\n\n req = requests.get(url)\n return req", "def call_api(url):\n\n req = requests.get(url)\n return req", "def requester(get_args: dict) -> dict:\n get_args.update(dict(apikey = apikey))\n response ...
[ "0.6853146", "0.6268339", "0.6268339", "0.62518954", "0.62271094", "0.605959", "0.60268193", "0.60259753", "0.59603226", "0.59556425", "0.59494823", "0.5872682", "0.5860177", "0.58262163", "0.57653207", "0.5758254", "0.57468194", "0.57456577", "0.5684184", "0.567575", "0.5663...
0.55546147
38
Get list of Domains for this API key.
def get_domains() -> List[str]: ret = _call_endpoint("v1/domains") # Example response: # [{'createdAt': '2016-06-25T03:08:44.000Z', # 'domain': 'mydomain.com', # 'domainId': 12345678, # 'expirationProtected': False, # 'expires': '2020-06-25T03:08:44.000Z', # 'holdRegistrar': False, # 'locked': True, # 'nameServers': None, # 'privacy': False, # 'renewAuto': True, # 'renewDeadline': '2020-08-09T03:08:44.000Z', # 'renewable': True, # 'status': 'ACTIVE', # 'transferProtected': False},] domains = [d["domain"] for d in ret] return domains
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_domains(self):\n\n response = self.call(method='getDomains')\n domains = []\n for d in response:\n domain = self.domain(domain=d['domain'])\n domains.append(domain)\n return domains", "def listDomains(self):\n reply = self.rpc.getDomains(self.usern...
[ "0.82744664", "0.76725835", "0.7275466", "0.72130734", "0.7188468", "0.7123102", "0.7106527", "0.71019924", "0.7034431", "0.70254606", "0.7023339", "0.69961786", "0.6931258", "0.69296205", "0.6853687", "0.6843187", "0.6822089", "0.6772705", "0.6753809", "0.6742991", "0.669397...
0.78274804
1