query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Get the number of bits needed for an item.
def _get_nr_of_bits(self): return sum(self._size_var)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __len__(self):\r\n return numBits(self.n)", "def number_of_bits(self) -> int:\n raise NotImplementedError('To be Overidden by the derived class')", "def bitSizeOf(self) -> int:\n\n return self._numBits", "def bitSizeOf(self) -> int:\n\n return self._numBits", "def number_of_...
[ "0.7077946", "0.70436716", "0.69864804", "0.69864804", "0.6966107", "0.68727595", "0.68669325", "0.68431276", "0.6806957", "0.67431825", "0.67405957", "0.6726725", "0.66758716", "0.6658841", "0.6658841", "0.6658841", "0.6658841", "0.6658841", "0.6658841", "0.6658841", "0.6658...
0.7367046
0
Get a random genom.
def get_random(self): base_genom = "1" * sum(self._size_var) return utils.randomise_a_string(base_genom)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_random_genome(self):\n return random.choice(self.genomes)", "def generate_random_individual():\n genotype = []\n ### Your code here\n return {'genotype': genotype, 'fitness': None }", "def random_gene(self):\n size = random.randint(1,50)\n gene = \"\"\n for i in ran...
[ "0.7129977", "0.68136334", "0.65213746", "0.64397925", "0.6434923", "0.6387772", "0.63817495", "0.6367737", "0.63332325", "0.63026583", "0.62996596", "0.6297405", "0.6288606", "0.62817526", "0.62786907", "0.6232036", "0.6221588", "0.6218463", "0.61816496", "0.6164786", "0.612...
0.6945635
1
Create a new block cipher, configured in CTR mode.
def __init__(self, block_cipher, initial_counter_block, prefix_len, counter_len, little_endian): if len(initial_counter_block) == prefix_len + counter_len: self.nonce = _copy_bytes(None, prefix_len, initial_counter_block) """Nonce; not available if there is a fixed suffix""" self._state = VoidPointer() result = raw_ctr_lib.CTR_start_operation(block_cipher.get(), c_uint8_ptr(initial_counter_block), c_size_t(len(initial_counter_block)), c_size_t(prefix_len), counter_len, little_endian, self._state.address_of()) if result: raise ValueError("Error %X while instantiating the CTR mode" % result) # Ensure that object disposal of this Python object will (eventually) # free the memory allocated by the raw library for the cipher mode self._state = SmartPointer(self._state.get(), raw_ctr_lib.CTR_stop_operation) # Memory allocated for the underlying block cipher is now owed # by the cipher mode block_cipher.release() self.block_size = len(initial_counter_block) """The block size of the underlying cipher, in bytes.""" self._next = [self.encrypt, self.decrypt]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_ctr_cipher(factory, **kwargs):\n\n cipher_state = factory._create_base_cipher(kwargs)\n\n counter = kwargs.pop(\"counter\", None)\n nonce = kwargs.pop(\"nonce\", None)\n initial_value = kwargs.pop(\"initial_value\", None)\n if kwargs:\n raise TypeError(\"Invalid parameters for CTR...
[ "0.7619631", "0.7126915", "0.67098886", "0.65550065", "0.6327081", "0.62329865", "0.6143115", "0.60997117", "0.60525525", "0.59431106", "0.59406626", "0.588603", "0.5855224", "0.5798523", "0.5777665", "0.573847", "0.5719348", "0.5686902", "0.5685684", "0.56328213", "0.55945",...
0.714244
1
Encrypt data with the key and the parameters set at initialization.
def encrypt(self, plaintext, output=None): if self.encrypt not in self._next: raise TypeError("encrypt() cannot be called after decrypt()") self._next = [self.encrypt] if output is None: ciphertext = create_string_buffer(len(plaintext)) else: ciphertext = output if not is_writeable_buffer(output): raise TypeError("output must be a bytearray or a writeable memoryview") if len(plaintext) != len(output): raise ValueError("output must have the same length as the input" " (%d bytes)" % len(plaintext)) result = raw_ctr_lib.CTR_encrypt(self._state.get(), c_uint8_ptr(plaintext), c_uint8_ptr(ciphertext), c_size_t(len(plaintext))) if result: if result == 0x60002: raise OverflowError("The counter has wrapped around in" " CTR mode") raise ValueError("Error %X while encrypting in CTR mode" % result) if output is None: return get_raw_buffer(ciphertext) else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encrypt_data(self, params):\n raise NotImplementedError", "def encrypt(self, sensor_data):\r\n \r\n # set encryption parameters\r\n encryption1 = aes(self.ivkey, 2, self.staticiv)\r\n encryption2 = aes(self.datakey, 2, self.iv)\r\n # encrypt data\r\n self.encr...
[ "0.7956872", "0.7449426", "0.73192656", "0.7263974", "0.7056065", "0.70533365", "0.69593483", "0.6946052", "0.68951267", "0.68717563", "0.68052113", "0.6775894", "0.67707175", "0.67657804", "0.67324567", "0.6702431", "0.66863525", "0.6679284", "0.66733557", "0.66417927", "0.6...
0.0
-1
Decrypt data with the key and the parameters set at initialization.
def decrypt(self, ciphertext, output=None): if self.decrypt not in self._next: raise TypeError("decrypt() cannot be called after encrypt()") self._next = [self.decrypt] if output is None: plaintext = create_string_buffer(len(ciphertext)) else: plaintext = output if not is_writeable_buffer(output): raise TypeError("output must be a bytearray or a writeable memoryview") if len(ciphertext) != len(output): raise ValueError("output must have the same length as the input" " (%d bytes)" % len(plaintext)) result = raw_ctr_lib.CTR_decrypt(self._state.get(), c_uint8_ptr(ciphertext), c_uint8_ptr(plaintext), c_size_t(len(ciphertext))) if result: if result == 0x60002: raise OverflowError("The counter has wrapped around in" " CTR mode") raise ValueError("Error %X while decrypting in CTR mode" % result) if output is None: return get_raw_buffer(plaintext) else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decrypt_data(self, encrypted_data):\n raise NotImplementedError", "def _decrypt(self, data, key):\n seed1 = key\n seed2 = 0xEEEEEEEE\n result = BytesIO()\n\n for i in range(len(data) // 4):\n seed2 += self.encryption_table[0x400 + (seed1 & 0xFF)]\n see...
[ "0.7820853", "0.7805203", "0.77293396", "0.76565194", "0.7467573", "0.74420536", "0.73967624", "0.72905445", "0.72817254", "0.7259105", "0.7258132", "0.71645397", "0.71422887", "0.70957357", "0.70479023", "0.69664323", "0.695035", "0.6944712", "0.68905735", "0.6879039", "0.68...
0.0
-1
Instantiate a cipher object that performs CTR encryption/decryption.
def _create_ctr_cipher(factory, **kwargs): cipher_state = factory._create_base_cipher(kwargs) counter = kwargs.pop("counter", None) nonce = kwargs.pop("nonce", None) initial_value = kwargs.pop("initial_value", None) if kwargs: raise TypeError("Invalid parameters for CTR mode: %s" % str(kwargs)) if counter is not None and (nonce, initial_value) != (None, None): raise TypeError("'counter' and 'nonce'/'initial_value'" " are mutually exclusive") if counter is None: # Crypto.Util.Counter is not used if nonce is None: if factory.block_size < 16: raise TypeError("Impossible to create a safe nonce for short" " block sizes") nonce = get_random_bytes(factory.block_size // 2) else: if len(nonce) >= factory.block_size: raise ValueError("Nonce is too long") # What is not nonce is counter counter_len = factory.block_size - len(nonce) if initial_value is None: initial_value = 0 if is_native_int(initial_value): if (1 << (counter_len * 8)) - 1 < initial_value: raise ValueError("Initial counter value is too large") initial_counter_block = nonce + long_to_bytes(initial_value, counter_len) else: if len(initial_value) != counter_len: raise ValueError("Incorrect length for counter byte string (%d bytes, expected %d)" % (len(initial_value), counter_len)) initial_counter_block = nonce + initial_value return CtrMode(cipher_state, initial_counter_block, len(nonce), # prefix counter_len, False) # little_endian # Crypto.Util.Counter is used # 'counter' used to be a callable object, but now it is # just a dictionary for backward compatibility. _counter = dict(counter) try: counter_len = _counter.pop("counter_len") prefix = _counter.pop("prefix") suffix = _counter.pop("suffix") initial_value = _counter.pop("initial_value") little_endian = _counter.pop("little_endian") except KeyError: raise TypeError("Incorrect counter object" " (use Crypto.Util.Counter.new)") # Compute initial counter block words = [] while initial_value > 0: words.append(struct.pack('B', initial_value & 255)) initial_value >>= 8 words += [ b'\x00' ] * max(0, counter_len - len(words)) if not little_endian: words.reverse() initial_counter_block = prefix + b"".join(words) + suffix if len(initial_counter_block) != factory.block_size: raise ValueError("Size of the counter block (%d bytes) must match" " block size (%d)" % (len(initial_counter_block), factory.block_size)) return CtrMode(cipher_state, initial_counter_block, len(prefix), counter_len, little_endian)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, block_cipher, initial_counter_block,\n prefix_len, counter_len, little_endian):\n\n if len(initial_counter_block) == prefix_len + counter_len:\n self.nonce = _copy_bytes(None, prefix_len, initial_counter_block)\n \"\"\"Nonce; not available if there is...
[ "0.72857714", "0.7123023", "0.6978616", "0.6857513", "0.6587187", "0.6543356", "0.65047663", "0.645677", "0.64431256", "0.6244246", "0.6212461", "0.618751", "0.60572654", "0.6051523", "0.6047843", "0.6029323", "0.592563", "0.5921373", "0.5918702", "0.59144205", "0.5887465", ...
0.7571308
0
Connect to the database
def attach(self): # if i have an existing connection to the back end, do nothing if self.connection is not None: return # otherwise, build the connection specification string spec = [ # the name of the database is required ['dbname', self.database] ] # the others are optional, depending on how the database is configured if self.username is not None: spec.append(['user', self.username]) if self.password is not None: spec.append(('password', self.password)) if self.application is not None: spec.append(('application_name', self.application)) # put it all together spec = ' '.join('='.join(entry) for entry in spec) # establish the connection self.connection = self.postgres.connect(spec) # if the user asked for {quiet} operation if self.quiet: # set the minimum diagnostic level to {warning} self.execute("SET client_min_messages = warning;") # all done return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connect_db(self):\n try:\n self.connection = self.engine.connect()\n except Exception:\n self.print_std_error()", "def connect_to_db(self):\n self.read_config()\n print('Connecting to database...', end=\"\")\n self.db_conn = pymysql.connect(host=self.h...
[ "0.86798936", "0.8313488", "0.8166338", "0.8110618", "0.8100089", "0.8085869", "0.8058323", "0.80558985", "0.8038659", "0.7968234", "0.7959786", "0.7954142", "0.78795606", "0.7867048", "0.78208673", "0.7815891", "0.7813048", "0.7778143", "0.77519286", "0.775042", "0.7729294",...
0.0
-1
Close the connection to the database Closing a connection makes it unsuitable for any further database access. This applies to all objects that may retain a reference to the connection being closed. Any uncommitted changes will be lost
def detach(self): # if i don't have an existing connection to the back end, do nothing if self.connection is None: return # otherwise, close the connection status = self.postgres.disconnect(self.connection) # invalidate the member self.connection = None # and return the status return status
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close_db_connection(cls):\n db.close()", "def close_database(self):\n if self._conn is not None:\n self._conn.close()\n self._conn = None", "def close_connection(self):\n if self.cursor is None and self.database is None:\n # if we don't have an open con...
[ "0.84439224", "0.83842707", "0.8336061", "0.8335705", "0.83256894", "0.8237229", "0.81866556", "0.8157642", "0.8143764", "0.8094697", "0.80411196", "0.7991679", "0.7938622", "0.7927069", "0.7838937", "0.7826264", "0.780774", "0.7779079", "0.77731264", "0.7767926", "0.77654034...
0.0
-1
Execute the sequence of SQL statements in {sql} as a single command
def execute(self, *sql): # assemble the command and pass it on to the connection return self.postgres.execute(self.connection, "\n".join(sql))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def execute(self,sql):\n # self.results = self.execute_silent(sql)\n # return self.results\n # sql = self.format_sql(sql, **kwargs)\n sql_list = sql.split(';')\n for stmt in sql_list:\n if stmt:\n stmt = stmt.strip()\n if len(stmt) < 10:\n...
[ "0.7174249", "0.7070815", "0.70517427", "0.7040094", "0.7025959", "0.6985139", "0.6980579", "0.6964437", "0.69108903", "0.6902813", "0.6886875", "0.6844708", "0.6828679", "0.6801381", "0.67388976", "0.67349917", "0.67162806", "0.66934794", "0.66897595", "0.66596186", "0.66156...
0.7186094
0
Hook invoked when the context manager is entered
def __enter__(self): # mark the beginning of a transaction self.execute(*self.sql.transaction()) # and hand me back to the caller return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def in_context(self):\n pass", "def context_started(self, cls, example):", "def handle_context_missing(self):", "def on_start(self, ctx):\n pass", "def __enter__(self):\n self._logger.debug(\"__enter__()\")\n self.install(\"PRE\")", "def on_hook(self) -> None:", "def context...
[ "0.7259149", "0.72568685", "0.6937752", "0.69183266", "0.6744398", "0.6600733", "0.65994066", "0.65975237", "0.6585466", "0.6491403", "0.64669836", "0.64669746", "0.6438095", "0.64119595", "0.63947785", "0.637796", "0.6347448", "0.6327832", "0.6199883", "0.6191347", "0.618087...
0.0
-1
Hook invoked when the context manager's block exits
def __exit__(self, exc_type, exc_instance, exc_traceback): # if there were no errors detected if exc_type is None: # commit the transaction to the datastore self.execute(*self.sql.commit()) # otherwise else: # roll back self.execute(*self.sql.rollback()) # indicate that we want to re-raise any exceptions that occurred while executing the # body of the {with} statement return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_exit(self):\n pass", "def __exit__(self, *args, **kwargs):\n\n pass", "def on_exit(self, userdata):\n pass", "def on_end(self, ctx):\n pass", "def __exit__(self, *args):\n pass", "def __exit__(self, *args):\n if self.teardown:\n super().__exit__...
[ "0.6937684", "0.67767626", "0.6692932", "0.66752976", "0.666442", "0.6626548", "0.66104794", "0.64507276", "0.63927466", "0.63927466", "0.6357777", "0.6347324", "0.6330202", "0.6329335", "0.6317543", "0.6316851", "0.6314924", "0.6314924", "0.6314924", "0.62950003", "0.6293853...
0.0
-1
Returns the model properties as a dict
def to_dict(self): result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: if attr in self.sensitive_list: result[attr] = "****" else: result[attr] = value return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_dict(self):\n return self.properties", "def to_dict(self):\n return self.properties", "def get_properties(self):\n return self.properties", "def asdict(self):\n return self._prop_dict", "def json(self):\n rv = {\n prop: getattr(self, prop)\n f...
[ "0.7751993", "0.7751993", "0.73391134", "0.7334895", "0.7297356", "0.727818", "0.7159078", "0.71578115", "0.71494967", "0.71494967", "0.71283495", "0.71275014", "0.7122587", "0.71079814", "0.7060394", "0.7043251", "0.7034103", "0.70233124", "0.69635814", "0.69586295", "0.6900...
0.0
-1
Returns the string representation of the model
def to_str(self): import simplejson as json if six.PY2: import sys reload(sys) sys.setdefaultencoding("utf-8") return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n return super().__str__() + self.model.__str__()", "def __str__(self) -> str:\n # noinspection PyUnresolvedReferences\n opts = self._meta\n if self.name_field:\n result = str(opts.get_field(self.name_field).value_from_object(self))\n else:\n ...
[ "0.85856134", "0.7814518", "0.77898884", "0.7751367", "0.7751367", "0.7712228", "0.76981676", "0.76700574", "0.7651133", "0.7597206", "0.75800353", "0.7568254", "0.7538184", "0.75228703", "0.7515832", "0.7498764", "0.74850684", "0.74850684", "0.7467648", "0.74488163", "0.7442...
0.0
-1
Returns true if both objects are equal
def __eq__(self, other): if not isinstance(other, ShowProjectWorkHoursResponseBodyWorkHours): return False return self.__dict__ == other.__dict__
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self,other):\n try: return self.object==other.object and isinstance(self,type(other))\n except: return False", "def __eq__(self, other):\n if i...
[ "0.8088132", "0.8088132", "0.8054589", "0.7982687", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", ...
0.0
-1
Returns true if both objects are not equal
def __ne__(self, other): return not self == other
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __ne__(self, other: object) -> bool:\n if self.__eq__(other):\n return False\n return True", "def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)", "def __ne__(self, other) -> bool:\n return not self.__eq__(other)", "def __eq__(self, other):\n ...
[ "0.845611", "0.8391477", "0.8144138", "0.81410587", "0.8132492", "0.8093973", "0.80920255", "0.80920255", "0.80920255", "0.8085325", "0.8085325", "0.8076365", "0.8076365", "0.8065748" ]
0.0
-1
This is the view handler for the "/" url.
async def index(request): # Note: we return a dict not a response because of the @template decorator return { 'title': request.app['name'], 'intro': "Success! you've setup a basic aiohttp app.", }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def root(self, request):\n return ''", "def index(_):\n template = loader.get_template('route/home.html')\n return HttpResponse(template.render(Context({})))", "def root(self):\n return self.app.get('/',headers=self.headers)", "def get(self):\n\n self.response.out.write(template.re...
[ "0.6833893", "0.68323773", "0.68276465", "0.6732128", "0.6723868", "0.67115694", "0.670275", "0.670275", "0.6678917", "0.6669672", "0.66280764", "0.6614279", "0.6596165", "0.6596165", "0.6596165", "0.6584849", "0.6568513", "0.6565733", "0.6544685", "0.6540552", "0.6508354", ...
0.0
-1
Return RGB image representing fractal.
def render(self, width=300, height=300, zoom=None, itermax=50, colors=5, color_offset=0.5, **kwargs): if zoom is None: zoom = self._defaultZoom() complex_plane = self._complexPlane(width, height, *zoom) fractal = self._computeFractal(complex_plane, itermax, **kwargs) rgb_image = self._toRgbImage(fractal, colors, color_offset) # Display fractal on screen. self._show(rgb_image)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _toRgbImage(self, fractal, colors, color_offset):\n soln_real = adjustRange(fractal[0], 0, 127)\n soln_imag = adjustRange(fractal[1], 0, 127)\n iters = adjustRange(fractal[2], 0, 128)\n\n rgb_image = np.array([\n soln_real + iters,\n soln_imag + iters,\...
[ "0.72595185", "0.70602125", "0.70376635", "0.6999839", "0.6523746", "0.6408264", "0.6224894", "0.6194157", "0.61483246", "0.6030305", "0.601784", "0.59868425", "0.5976731", "0.59150046", "0.5913448", "0.58958936", "0.5891541", "0.58815885", "0.5854497", "0.5818988", "0.580588...
0.0
-1
Display given RGB array.
def _show(self, a): fig = plt.figure() fig.set_size_inches((2, 2)) ax = plt.Axes(fig, [0., 0., 1., 1.]) ax.set_axis_off() fig.add_axes(ax) plt.set_cmap('hot') ax.imshow(a, aspect='equal') plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display(self, colorArray):\n pass", "def _show_rgb(self):\n R, G, B = self._rgb_frames()\n image = numpy.dstack((R, G, B))\n imageItem = self.parent.image.getImageItem()\n imageItem.updateImage(image)", "def display(array):\n plt.figure()\n plt.imshow(array)...
[ "0.8111257", "0.7347514", "0.72989017", "0.6935118", "0.67059577", "0.6704922", "0.663102", "0.6547356", "0.63414645", "0.6225402", "0.6129868", "0.61273956", "0.6029412", "0.6021858", "0.5998369", "0.59782404", "0.59257555", "0.59240973", "0.59080243", "0.59073865", "0.58943...
0.0
-1
Converts the generated fractal into an RGB image array.
def _toRgbImage(self, fractal, colors, color_offset): hsv_img = np.array( [ # Cycle through color wheel. (fractal * colors + color_offset) % 1, # Saturation = 1 where fractal values > 0, # Saturation = 0 otherwise. fractal.astype(dtype=bool).astype(dtype=float), # Invert colours 1 - fractal ] ).astype(dtype=float).T rgb_img = (mpl.colors.hsv_to_rgb(hsv_img) * 255).astype(dtype=np.uint8) return rgb_img
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _colored_img_to_arr(image, verbose=False):\n height, width = image.size\n arr = np.array(image.getdata())\n arr = arr.reshape(3, height, width)\n r = arr[0]\n g = arr[1]\n b = arr[2]\n return r, g, b", "def _toRgbImage(self, fractal, colors, color_offset):\n soln_real = adjustRang...
[ "0.7121028", "0.70841926", "0.6746689", "0.66450286", "0.6585525", "0.6470386", "0.64537305", "0.6411404", "0.63537574", "0.63390535", "0.62979555", "0.62929916", "0.6276879", "0.62607044", "0.62399733", "0.6200515", "0.61336935", "0.6093553", "0.6084001", "0.608396", "0.6082...
0.67405456
3
Return matrix representing the complex plane.
def _complexPlane(self, n, m, xmin, xmax, ymin, ymax): # Create two matrices of size n x m ix, iy = np.mgrid[0:n, 0:m] # Create range of values in the x- and y-axis real_part = np.linspace(xmin, xmax, n)[ix] imag_part = np.linspace(ymin, ymax, m)[iy] * complex(0, 1) complex_plane = real_part + imag_part return complex_plane
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_matrix(self):\n return numpy.array([[1, 0],\n [0, 1]], dtype=complex)", "def to_matrix(self):\n return numpy.array([[1, 1],\n [1, -1]], dtype=complex) / numpy.sqrt(2)", "def __complex__(self):\n return complex(self._reNum, self._...
[ "0.7560941", "0.7344105", "0.71097344", "0.69919634", "0.6817131", "0.6790059", "0.66661674", "0.66255957", "0.65777934", "0.65038025", "0.6480605", "0.64471483", "0.6363316", "0.6363316", "0.6363316", "0.6363316", "0.6363316", "0.6363316", "0.6363316", "0.6363316", "0.636331...
0.6266454
30
Return default zoom setting.
def _defaultZoom(self): return (-1.0, 1.0, -1.0, 1.0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def zoom(self):\n return self['zoom']", "def zoom(self):\n return self.container['zoom']", "def _get_zoom(self) :\n \n # TODO : make it absolute zoom value : a zoom of 1 displays one data\n # pixel in one viewport pixel.\n \n return self._zoom", "def zoom(self...
[ "0.7723424", "0.7299348", "0.72973317", "0.71936", "0.70050627", "0.68496126", "0.65712726", "0.625384", "0.6145273", "0.6142065", "0.61153233", "0.6006282", "0.5985987", "0.5982547", "0.59785664", "0.59773666", "0.5952824", "0.58633906", "0.5851663", "0.58423215", "0.5797127...
0.8090395
0
Converts the generated fractal into an RGB image array
def _toRgbImage(self, fractal, colors, color_offset): hsv_img = np.array( [ # Cycle through color wheel. (fractal * colors + color_offset) % 1, # Saturation = fractal value. fractal, # Maximum value. np.ones(fractal.shape) ] ).astype(dtype=float).T rgb_img = (mpl.colors.hsv_to_rgb(hsv_img) * 255).astype(dtype=np.uint8) return rgb_img
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _toRgbImage(self, fractal, colors, color_offset):\n soln_real = adjustRange(fractal[0], 0, 127)\n soln_imag = adjustRange(fractal[1], 0, 127)\n iters = adjustRange(fractal[2], 0, 128)\n\n rgb_image = np.array([\n soln_real + iters,\n soln_imag + iters,\...
[ "0.70360917", "0.7032956", "0.6748998", "0.66770595", "0.64644593", "0.6463771", "0.64612466", "0.6436402", "0.6378915", "0.6336183", "0.6302488", "0.6263761", "0.624275", "0.6217485", "0.62155837", "0.61859244", "0.61853856", "0.61381644", "0.61203206", "0.60929006", "0.6056...
0.6756124
2
Approximates root of this function using single iteration of Newton's method.
def newtonsMethod(self, x, a): return x - a * (self._f(x) / self._df(x))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def newton(f, xinit, tol, N):\n if f(xinit) < tol:\n return xinit\n else:\n n = 1\n while n < N:\n xnew = xinit - (f(xinit) / derivative(f, xinit))\n if abs(f(xnew)) < tol:\n print('Root found. Number of iterations: ', n)\n return xnew\...
[ "0.7563373", "0.75321746", "0.7448436", "0.74475104", "0.7388106", "0.7297471", "0.7297028", "0.72617483", "0.72546536", "0.7249506", "0.72134876", "0.7199049", "0.7197292", "0.71775806", "0.71667355", "0.71341896", "0.71326596", "0.7126753", "0.7094095", "0.7091968", "0.7057...
0.76958627
0
Converts the generated fractal into an RGB image array
def _toRgbImage(self, fractal, colors, color_offset): soln_real = adjustRange(fractal[0], 0, 127) soln_imag = adjustRange(fractal[1], 0, 127) iters = adjustRange(fractal[2], 0, 128) rgb_image = np.array([ soln_real + iters, soln_imag + iters, iters ] ).astype(dtype=np.uint8) return rgb_image.T
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _colored_img_to_arr(image, verbose=False):\n height, width = image.size\n arr = np.array(image.getdata())\n arr = arr.reshape(3, height, width)\n r = arr[0]\n g = arr[1]\n b = arr[2]\n return r, g, b", "def _toRgbImage(self, fractal, colors, color_offset):\n hsv_img = np.array(\n ...
[ "0.7032956", "0.6756124", "0.6748998", "0.66770595", "0.64644593", "0.6463771", "0.64612466", "0.6436402", "0.6378915", "0.6336183", "0.6302488", "0.6263761", "0.624275", "0.6217485", "0.62155837", "0.61859244", "0.61853856", "0.61381644", "0.61203206", "0.60929006", "0.60567...
0.70360917
0
Return array with values compressed into given range.
def adjustRange(a, vmin=0, vmax=255): new_a = ( ( # Represent array as floats ranging between 0 and 1. a.astype(dtype=float) / np.nanmax(a) # Fill given range. * (vmax - vmin) + vmin ) # Convert back to regular array. .astype(dtype=np.uint8) ) return new_a
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inflate_lateral(source,inflate_factor):\n\treturn source[np.meshgrid(*[np.arange(-inflate_factor,i+inflate_factor+1)%i for i in source.shape])]", "def rangeArray(first, last):\n \n return np.arange(first, last+1)", "def crange(*args):\r\n result = [[]]\r\n for arg in args:\r\n result = [...
[ "0.6024952", "0.5948004", "0.5860119", "0.5817936", "0.575059", "0.5741007", "0.5694915", "0.56648386", "0.56388867", "0.5611858", "0.5607776", "0.5596188", "0.55553037", "0.55267346", "0.5508638", "0.5502483", "0.54900694", "0.5425912", "0.54251516", "0.53815985", "0.5372165...
0.49630272
72
Return a bind with the given name.
def __getitem__(self, name): try: field = self.fields[name] except KeyError: raise KeyError( "Key '%s' not found in '%s'. Choices are: %s." % ( name, self.__class__.__name__, ', '.join(sorted(f for f in self.fields)), ) ) return self._fields[name]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_named_bind_string(self, name):\n\n return \":\" + name", "def bound(name):", "def __new__(cls, loc=None, name=None):\n assert ((loc is None and isinstance(name, str)) or\n (name is None and 0 <= loc))\n return super(Bind, cls).__new__(cls, loc, name)", "def reg_binding(self, g...
[ "0.7514782", "0.6765802", "0.6392351", "0.63033557", "0.62660104", "0.6254184", "0.6237753", "0.60957813", "0.6089807", "0.60743713", "0.6036943", "0.60255086", "0.6018643", "0.6014774", "0.59750015", "0.5960484", "0.5835848", "0.57503366", "0.56634986", "0.5630385", "0.56151...
0.0
-1
solver qui recherche dans le dictionnaire
def solver2_init(self): result = [] colors = ["white" for e in self.lettersList] for letter in self.dico: #print(letter) self.solver2_rec([], letter, result, colors, self.dico) res = set(result) res = self.decreasingList(res) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve(self):", "def solve(self):\n ...", "def get_sol(self):", "def solve(self):\n pass", "def solve(self):\n pass", "def solve(ctx):\n my_solver(ctx.obj['filename'])", "def solve(self, solver):\n solver.solve()", "def satisfied_constraints(self,word_id, possibl...
[ "0.64691514", "0.62616104", "0.6139838", "0.60149825", "0.60149825", "0.58877885", "0.57446116", "0.5717959", "0.5711279", "0.565116", "0.5614572", "0.5610222", "0.559666", "0.5590503", "0.5546544", "0.5545591", "0.5545591", "0.5508435", "0.5506832", "0.54851216", "0.54851216...
0.53376806
30
retourne les words d'une liste en ordre decroissant de longueur
def decreasingList(self, words): dic = {} words = self.d.lowerList(words) for word in words: if not len(word) in dic: dic[len(word)] = [] dic[len(word)].append(word) dic = dic.items() dic = sorted(dic) #dic.reverse() return [f for e in dic for f in e[1]]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_words():\n # words\n words_list = list()\n for i in range(1, 114+1):\n sura = quran.get_sura(i)\n for aya in sura:\n wordsList = aya.split(' ')\n for word in wordsList:\n words_list.append(word)\n\n return words_list", "def longwords_Li_Comp...
[ "0.7187629", "0.70133877", "0.7005348", "0.69585055", "0.69476235", "0.68506676", "0.68327844", "0.6649206", "0.6611271", "0.65282124", "0.65262544", "0.6518681", "0.6517774", "0.6490736", "0.645253", "0.6450386", "0.64484304", "0.63865644", "0.6377032", "0.63758963", "0.6374...
0.6082544
57
filter the proposal boxes
def boxes_filter(dets, PRE_NMS_TOPN, NMS_THRESH, POST_NMS_TOPN, CONF_THRESH, USE_GPU=False): # speed up nms if PRE_NMS_TOPN > 0: dets = dets[: min(len(dets), PRE_NMS_TOPN), :] # apply nms if NMS_THRESH > 0 and NMS_THRESH < 1: if USE_GPU: keep = nms_gpu(dets, NMS_THRESH) else: keep = nms(dets, NMS_THRESH) dets = dets[keep, :] if POST_NMS_TOPN > 0: dets = dets[: min(len(dets), POST_NMS_TOPN), :] inds = np.where(dets[:, -1] >= CONF_THRESH)[0] dets = dets[inds, :] return dets
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _filter_boxes(self, boxes, box_confidences, box_class_probs):\n box_scores = box_confidences * box_class_probs\n box_classes = np.argmax(box_scores, axis=-1)\n box_class_scores = np.max(box_scores, axis=-1)\n pos = np.where(box_class_scores >= self.object_threshold)\n\n boxes...
[ "0.67611915", "0.6558098", "0.6493744", "0.6487064", "0.644464", "0.64197695", "0.6314156", "0.6236991", "0.6236991", "0.62138236", "0.6198868", "0.61670756", "0.6158014", "0.61504215", "0.6140429", "0.61391824", "0.603643", "0.59337205", "0.59050465", "0.5873898", "0.5776954...
0.56218785
27
Draw detected bounding boxes.
def vis_detections(im, class_name, dets, thresh=0.5): im = im[:, :, (2, 1, 0)] fig, ax = plt.subplots(figsize=(12, 12)) ax.imshow(im, aspect='equal') for i, det in enumerate(dets): bbox = dets[i, :4] score = dets[i, -1] ax.add_patch( plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor='red', linewidth=3.5) ) ax.text(bbox[0], bbox[1] - 2, '{:s} {:.3f}'.format(class_name, score), bbox=dict(facecolor='blue', alpha=0.5), fontsize=14, color='white') ax.set_title(('{} detections with ' 'p({} | box) >= {:.1f}').format(class_name, class_name, thresh), fontsize=14) plt.axis('off') plt.tight_layout() plt.draw()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_bounding_boxes(self, bounding_boxes, output):\n for i, bb in enumerate(bounding_boxes):\n\n if bb[2] > output.shape[1] or bb[3] > output.shape[0] or bb[0] < 0 or bb[1] < 0:\n continue\n cv2.rectangle(\n np.asarray(output), (bb[0], bb[1]), (bb[2], ...
[ "0.77598673", "0.7638101", "0.7510348", "0.72977304", "0.7150563", "0.7103931", "0.7101873", "0.7083905", "0.7020769", "0.6993739", "0.6974172", "0.69637305", "0.6859061", "0.68521667", "0.68319523", "0.6830371", "0.6809837", "0.6805064", "0.67750585", "0.6774759", "0.6742592...
0.0
-1
Test a region proposal network on a image dataset
def test_imdb(net, imdb, anchors): output_dir = get_output_dir(imdb, net) cache_file = os.path.join(output_dir, 'res_boxes.pkl') # load cache result boxes (filtered) if os.path.exists(cache_file): with open(cache_file, 'rb') as f: proposal_boxes = cPickle.load(f) print 'load res boxes from \'{}\''.format(cache_file) return proposal_boxes if not os.path.exists(output_dir): os.makedirs(output_dir) print 'Generating proposal boxes by rpn model...' proposal_boxes = test_net(net, imdb, anchors) print 'Get proposal boxes done!' print 'Current NMS configuration:' print NMS_CONFIG expand_val = lambda boxes: np.array([boxes[:,0] - boxes[:,2], boxes[:,1] - boxes[:,3], boxes[:,2] - boxes[:,0], boxes[:,3] - boxes[:,1], np.zeros(boxes.shape[0])]).T * EXPAND_RATIO # filter boxes print 'Filtering proposal boxes...' for i in xrange(len(proposal_boxes)): proposal_boxes[i] = boxes_filter(proposal_boxes[i], PRE_NMS_TOPN=NMS_CONFIG['PRE_NMS_TOPN'], NMS_THRESH=NMS_CONFIG['NMS_THRESH'], POST_NMS_TOPN=NMS_CONFIG['POST_NMS_TOPN'], CONF_THRESH=CONF_THRESH, USE_GPU=NMS_CONFIG['USE_GPU']) # expand bounding box if len(proposal_boxes[i]) > 0: proposal_boxes[i] = proposal_boxes[i] + expand_val(proposal_boxes[i]) print 'filter proposal box: {:d}/{:d}'.format(i+1, len(proposal_boxes)) print 'Filter proposal boxes done!' # save file with open(cache_file, 'wb') as f: cPickle.dump(proposal_boxes, f, cPickle.HIGHEST_PROTOCOL) print 'save result boxes to `{:s}`'.format(cache_file) return proposal_boxes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_net(args, dataset_name, proposal_file, output_dir, ind_range=None, gpu_id=0, early_stop=False):\n # print('test_net')\n roidb, dataset, start_ind, end_ind, total_num_images = get_roidb_and_dataset(dataset_name, proposal_file, ind_range)\n model = initialize_model_from_cfg(args, gpu_id=gpu_id)\n ...
[ "0.60389805", "0.5884871", "0.57754153", "0.5745751", "0.5740149", "0.5732082", "0.5713616", "0.5691783", "0.56859934", "0.56745535", "0.563614", "0.56359076", "0.5585057", "0.5569545", "0.5568741", "0.55645275", "0.5541818", "0.5533012", "0.5529382", "0.5483159", "0.5482852"...
0.0
-1
checking return values for `start` and `end` when calling channel_messages for numbers not multiples of 50.
def test_channel_messages_unlimited_pagination(): clear() userOne = auth_register('firstuser@gmail.com', '123abc!@#', 'First', 'User') randChannel = channels_create(userOne['token'], 'randChannel', True) for _ in range(149): message_send(userOne['token'], randChannel['channel_id'], 'Hello') messages = channel_messages(userOne['token'], randChannel['channel_id'], 0) assert(messages['start'] == 0) assert(messages['end'] == 50) messages2 = channel_messages(userOne['token'], randChannel['channel_id'], 50) assert(messages2['start'] == 50) assert(messages2['end'] == 100) messages3 = channel_messages(userOne['token'], randChannel['channel_id'], 100) assert(messages3['start'] == 100) assert(messages3['end'] == -1) assert(len(messages3['messages']) == 49) # an error should be raised when start is beyond 149 messages with pytest.raises(InputError): channel_messages(userOne['token'], randChannel['channel_id'], 150)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def channel_messages(token, channel_id, start):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n\n # check if user is a member of channel with channel_ID and return AccessError if not\n if is_user_channel_member(channel_id, curr_id) is False:\n ...
[ "0.6338927", "0.58790916", "0.53648764", "0.53105456", "0.52494335", "0.5227476", "0.519632", "0.51526994", "0.5122233", "0.5106034", "0.50966465", "0.50898653", "0.50656456", "0.5056774", "0.50472474", "0.50439817", "0.5038363", "0.50265247", "0.50246954", "0.50190175", "0.5...
0.6797525
0
checking for validation of token raises accesserror
def test_channel_leave_invalid_token(): clear() user = auth_register('user@gmail.com', '123abc!@#', 'First', 'Last') userchannel_id = channels_create(user['token'], 'userchannel', True) auth_logout(user['token']) with pytest.raises(AccessError): channel_leave(user['token'], userchannel_id['channel_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _validate_token(self):\n if not self.token:\n self.login()\n if not self.token:\n # TODO: create exception for this\n # Access is denied!!\n raise Exception(\"AccessDenied\")", "def check_token_validate(self, token):\n payload = {'key': self._l...
[ "0.8248678", "0.78147525", "0.7386542", "0.71018034", "0.70842767", "0.7034177", "0.6963336", "0.69433403", "0.68867075", "0.6882854", "0.6882577", "0.6882458", "0.68776894", "0.6838688", "0.6802286", "0.67813826", "0.67801815", "0.67788696", "0.6763725", "0.67588973", "0.674...
0.0
-1
check for accesserror when user isn't in the specified channel
def test_channel_leave_invalid_user(): clear() user = auth_register('user@gmail.com', '123abc!@#', 'first', 'last') leaver = auth_register('leaver@gmail.com', '123abc!@#', 'first', 'last') userchannel_id = channels_create(user['token'], 'userchannel', True) with pytest.raises(AccessError): channel_leave(leaver['token'], userchannel_id['channel_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_channel_request(self, kind, chanid):\n return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED", "async def channel_manage_error(self, ctx: commands.context, error):\n if isinstance(error, commands.ChannelNotFound):\n await ctx.send(\"That channel was not found, make sure the channel ex...
[ "0.680053", "0.6787445", "0.6721486", "0.6431934", "0.6347937", "0.6341007", "0.6327553", "0.61701375", "0.60786307", "0.60661197", "0.60619277", "0.60619277", "0.60619277", "0.60619277", "0.60603285", "0.60603285", "0.6012092", "0.6003716", "0.59488", "0.5939925", "0.5939417...
0.65093845
3
check if inputerror is raised if the channel_id is invalid
def test_channel_leave_invalid_channel(): clear() user = auth_register('user@gmail.com', '123abc!@#', 'first', 'last') leaver = auth_register('leaver@gmail.com', '123abc!@#', 'first', 'last') channels_create(user['token'], 'userchannel', True) invalid_id = 0 with pytest.raises(InputError): channel_leave(leaver['token'], invalid_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validateChannel( self, name ):\n if name not in self.d.keys(): raise Exception('Invalid device channel {}'.format(name))", "def test_react_invalid_message_id_in_channel():\n clear()\n user_a = register_n_users(1)\n channels_create(user_a[\"token\"], \"channel_a\", True)\n invalid_channel_i...
[ "0.70518154", "0.6931385", "0.6793265", "0.6561746", "0.6464727", "0.6447893", "0.62791675", "0.6238365", "0.61477757", "0.6107388", "0.6075252", "0.60303783", "0.60014194", "0.59643716", "0.59615505", "0.5909318", "0.5904439", "0.58474076", "0.5836723", "0.5830802", "0.58000...
0.62529725
7
if the person removed is a normal user of flockr, check that they were actually removed from flockr
def test_channel_leave_normal_case(): clear() user = auth_register('user@gmail.com', '123abc!@#', 'first', 'last') leaver = auth_register('leaver@gmail.com', '123abc!@#', 'first', 'last') userchannel_id = channels_create(user['token'], 'userchannel', True) channel_join(leaver['token'], userchannel_id['channel_id']) channel_leave(leaver['token'], userchannel_id['channel_id']) randChannel_details = channel_details(user['token'], userchannel_id['channel_id']) assert(randChannel_details['all_members'] == [ { 'u_id' : user['u_id'], 'name_first' : 'first', 'name_last' : 'last', 'profile_img_url': '' } ])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_user_del(user):\n\twith open('tracked_users', 'r') as myfile:\n\t\tuserfile = myfile.read()\n\t\tif user.lower() in userfile.lower():\n\t\t\treturn 1\n\treturn 0", "def test_remove_already_not_subbed(self) -> None:\n result = self.attempt_unsubscribe_of_principal(\n query_count=11,\n ...
[ "0.65418464", "0.6525852", "0.61570674", "0.6097376", "0.60110086", "0.5990641", "0.59904647", "0.5983225", "0.5961415", "0.5946392", "0.59444773", "0.59234893", "0.5917741", "0.58916634", "0.58891475", "0.58835924", "0.58744335", "0.58742744", "0.5868517", "0.5852744", "0.58...
0.0
-1
if the person removed is an owner of flockr, check that they were actually removed from flockr
def test_channel_leave_normal_case_owner(): clear() leaver = auth_register('leaver@gmail.com', '123abc!@#', 'first', 'last') user = auth_register('user@gmail.com', '123abc!@#', 'first', 'last') userchannel_id = channels_create(user['token'], 'userchannel', True) channel_join(leaver['token'], userchannel_id['channel_id']) channel_addowner(leaver['token'], userchannel_id['channel_id'], leaver['u_id']) channel_leave(leaver['token'], userchannel_id['channel_id']) randChannel_details = channel_details(user['token'], userchannel_id['channel_id']) assert(randChannel_details['owner_members'] == [ { 'u_id' : user['u_id'], 'name_first' : 'first', 'name_last' : 'last', 'profile_img_url': '' } ])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register...
[ "0.6447291", "0.6402777", "0.637274", "0.61781394", "0.6064277", "0.59293276", "0.58794194", "0.58277196", "0.58173835", "0.5780744", "0.5778175", "0.5695273", "0.5692215", "0.56889635", "0.56681126", "0.5662332", "0.5635315", "0.56060123", "0.5600414", "0.5581912", "0.557282...
0.5255025
65
check if the channel_id is invalid an inputerror is raised
def test_channel_join_invalid_channel(): clear() user = auth_register('user@gmail.com', '123abc!@#', 'first', 'last') joiner = auth_register('joiner@gmail.com', '123abc!@#', 'first', 'last') channels_create(user['token'], 'userchannel', True) invalid_id = 0 with pytest.raises(InputError): channel_join(joiner['token'], invalid_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validateChannel( self, name ):\n if name not in self.d.keys(): raise Exception('Invalid device channel {}'.format(name))", "def test_react_invalid_message_id_in_channel():\n clear()\n user_a = register_n_users(1)\n channels_create(user_a[\"token\"], \"channel_a\", True)\n invalid_channel_i...
[ "0.7181985", "0.70581347", "0.6617073", "0.6448678", "0.64280605", "0.6396151", "0.6361458", "0.63370305", "0.62574375", "0.61125195", "0.61097026", "0.60695", "0.6059686", "0.6048345", "0.6008819", "0.5979831", "0.5916335", "0.5861755", "0.58391106", "0.5798364", "0.5798364"...
0.662972
2
checking if the user is already in the channel, raise accesserror if they are
def test_channel_join_already_in_channel(): clear() user = auth_register('user@gmail.com', '123abc!@#', 'first', 'last') userchannel_id = channels_create(user['token'], 'userchannel', True) with pytest.raises(AccessError): channel_join(user['token'], userchannel_id['channel_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def have_channel_open(channels, user):\n for x in channels:\n chan = channels[x]\n if 'is_member' in chan:\n continue\n if chan['user'] == user:\n return True\n return False", "def have_channel_open(channels, user):\n for x in channels:\n chan = chan...
[ "0.6544167", "0.6521434", "0.64236474", "0.6420696", "0.63337475", "0.63098836", "0.6305338", "0.62802494", "0.6259734", "0.6218568", "0.6136835", "0.60846996", "0.6068947", "0.6049151", "0.6002369", "0.59844506", "0.597856", "0.5952112", "0.5945146", "0.5934715", "0.59036577...
0.7157796
0
check if channel_join behaves correctly given valid input
def test_channel_join_normal_case(): clear() user = auth_register('user@gmail.com', '123abc!@#', 'first', 'last') joiner = auth_register('joiner@gmail.com', '123abc!@#', 'first', 'last') userchannel_id = channels_create(user['token'], 'userchannel', True) channel_join(joiner['token'], userchannel_id['channel_id']) randChannel_details = channel_details(user['token'], userchannel_id['channel_id']) assert(randChannel_details['all_members'] == [ { 'u_id' : user['u_id'], 'name_first' : 'first', 'name_last' : 'last', 'profile_img_url': '' }, { 'u_id' : joiner['u_id'], 'name_first' : 'first', 'name_last' : 'last', 'profile_img_url': '' } ])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_channel_join_except_channel():\n # Clear the data structure\n clear_v1()\n # Call other functions to create the data and store in data structure\n auth_dict1 = auth_register_v2(\"johnsmith@gmail.com\", \"123456\", \"john\", \"smith\")\n auth_dict2 = auth_register_v2(\"harrypotter@gmail.com\...
[ "0.71999747", "0.7167301", "0.70121294", "0.68428326", "0.6786554", "0.66476434", "0.6459602", "0.6427017", "0.6345139", "0.6233202", "0.6227343", "0.6181276", "0.6101245", "0.59976065", "0.5990562", "0.59204394", "0.5870411", "0.5867968", "0.58485466", "0.5762381", "0.575636...
0.6979165
3
if the channel is private, but no invite is given to the user, then the owner of flockr can join the channel
def test_channel_join_private_owner(): clear() joiner = auth_register('joiner@gmail.com', '123abc!@#', 'first', 'last') user = auth_register('user@gmail.com', '123abc!@#', 'first', 'last') userchannel_id = channels_create(user['token'], 'userchannel', False) channel_join(joiner['token'], userchannel_id['channel_id']) randChannel_details = channel_details(user['token'], userchannel_id['channel_id']) assert(randChannel_details['all_members'] == [ { 'u_id' : user['u_id'], 'name_first' : 'first', 'name_last' : 'last', 'profile_img_url': '' }, { 'u_id' : joiner['u_id'], 'name_first' : 'first', 'name_last' : 'last', 'profile_img_url': '' } ])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def join(self, ctx, invite : discord.Invite):\r\n if ctx.message.author.id == \"481270883701358602\":\r\n await self.client.accept_invite(invite)\r\n await self.client.say(\"Joined the server.\")\r\n else:\r\n await self.client.say(\"**Owner only command.**\")",...
[ "0.7135079", "0.6953994", "0.69395274", "0.68968785", "0.6849321", "0.6775492", "0.6759105", "0.67089385", "0.6606643", "0.6557231", "0.6467635", "0.64343905", "0.64186656", "0.64149857", "0.63559216", "0.6322832", "0.63140005", "0.630644", "0.6246793", "0.6224127", "0.621600...
0.73244226
0
checking if adding another owner from the current owner's token works as expected.
def test_channel_addowner_standard_input(): clear() auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen') channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id']) assert(channel_details(register_second_result['token'], randChannel_id['channel_id']) == { 'name' : 'Random Channel', 'owner_members': [ { 'u_id': 2, 'name_first': 'Jane', 'name_last': 'Citizen', 'profile_img_url': '' }, { 'u_id': 3, 'name_first' : 'Jane', 'name_last': 'Citizen', 'profile_img_url': '' } ], 'all_members': [ { 'u_id': 2, 'name_first': 'Jane', 'name_last': 'Citizen', 'profile_img_url': '' }, { 'u_id': 3, 'name_first' : 'Jane', 'name_last': 'Citizen', 'profile_img_url': '' } ] })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_channel_addowner_already_an_owner():\n clear()\n auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Rand...
[ "0.68776894", "0.67999893", "0.6754783", "0.668292", "0.6530196", "0.64863986", "0.6447112", "0.6423208", "0.62997204", "0.6244084", "0.6210982", "0.612194", "0.61106676", "0.60979927", "0.6089244", "0.5948427", "0.59481156", "0.59479606", "0.59267443", "0.5896144", "0.584883...
0.6017289
15
checking whether adding an owner after the user has logged out raises an accesserror as expected
def test_channel_addowner_invalid_token_after_logout(): clear() auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) assert(auth_logout(register_second_result['token'])["is_success"] is True) with pytest.raises(AccessError): assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)", "def _check_owner(user, study):\n if not user.id == study.owner:\n raise HTTPError(403, \"U...
[ "0.7260797", "0.70191914", "0.6896338", "0.67979467", "0.6756282", "0.67163837", "0.6641159", "0.65771526", "0.65635395", "0.6563323", "0.6472536", "0.6423421", "0.6411672", "0.6408827", "0.63631994", "0.63437754", "0.6336135", "0.6251188", "0.62497646", "0.62204605", "0.6216...
0.64897716
10
checking if an inputerror is raised if attempting to add a user as an owner who is already an owner
def test_channel_addowner_already_an_owner(): clear() auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen') channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id']) with pytest.raises(InputError): assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_owner(self):\n username = self.cleaned_data['owner']\n owner = User.objects.filter(username=username).first()\n if owner is None:\n raise forms.ValidationError(\n _('User %(username)s does not exist'),\n params={'username': username},\n ...
[ "0.66523606", "0.62497765", "0.6178345", "0.61579764", "0.61557573", "0.6095019", "0.60053355", "0.6005061", "0.59492403", "0.5933165", "0.59200364", "0.590281", "0.58125436", "0.57823414", "0.5743092", "0.5735133", "0.5727868", "0.5720738", "0.57188445", "0.5711546", "0.5709...
0.6923221
0
checking if an inputerror is raised if an invalid Channel ID is inputted into the function
def test_channel_addowner_invalid_channel_id(): clear() auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen') register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen') with pytest.raises(InputError): assert channel_addowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validateChannel( self, name ):\n if name not in self.d.keys(): raise Exception('Invalid device channel {}'.format(name))", "def _check_channel_input(self, channel):\n # da `.get` `None` zurueckgibt wenn der Schluessel `channel` nicht existiert,\n # wird auch bei fehlender Konfiguration d...
[ "0.6859394", "0.6717858", "0.6630168", "0.64727926", "0.6436091", "0.6371448", "0.63068455", "0.6227858", "0.61192703", "0.60558033", "0.60163033", "0.59885454", "0.5890788", "0.58832693", "0.58782107", "0.586804", "0.58606905", "0.58477086", "0.5845201", "0.58342546", "0.581...
0.63421535
6
checking if owner of the flockr who is not the channel owner can add owner
def test_channel_addowner_owner_flockr(): clear() register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen') register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) channel_join(register_first_result['token'], randChannel_id['channel_id']) channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_channel_owner():\n\n async def check(ctx):\n if ctx.guild:\n owner = ctx.author == ctx.guild.owner\n if not owner:\n await ctx.send(\"I guess you are not this server's pogchamp. Bruh.\")\n return owner\n return True\n\n return commands.chec...
[ "0.7385676", "0.7370181", "0.7258386", "0.70770735", "0.700007", "0.69392306", "0.6930852", "0.6926534", "0.6849369", "0.6614387", "0.6608435", "0.65947664", "0.65030146", "0.6469669", "0.6452825", "0.64186174", "0.63679105", "0.6348153", "0.6341457", "0.6329557", "0.6293552"...
0.7151373
3
checking if AccessError is returned as expected if the owner of flockr is not a member of the channel
def test_channel_addowner_owner_flockr_not_member(): clear() register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen') register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) with pytest.raises(AccessError): assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register...
[ "0.7090575", "0.6885205", "0.68405485", "0.650094", "0.63612247", "0.6272654", "0.6271527", "0.6261493", "0.62362427", "0.616833", "0.61552966", "0.611927", "0.6118742", "0.6108415", "0.60745674", "0.60703945", "0.6057438", "0.6057438", "0.6045126", "0.60374135", "0.6017522",...
0.7249187
0
checking if AccessError is returned as expected if member is not an owner
def test_channel_addowner_not_owner(): clear() auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen') register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen') register_forth_result = auth_register('randemail4@gmail.com', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) with pytest.raises(AccessError): assert channel_addowner(register_third_result['token'], randChannel_id['channel_id'], register_forth_result['u_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ensure_access(self, target_member : M, accessor : M, permission : str):\n if not permission: \n return True\n if accessor is None:\n raise errors.NotAllowed(\"Accessor not found\")\n if target_member != accessor:\n raise errors.NotAllowed(\"Access not allow...
[ "0.71381223", "0.679959", "0.67456234", "0.65781903", "0.6487132", "0.64113057", "0.6391973", "0.6355338", "0.6317414", "0.6261393", "0.6230763", "0.6224077", "0.62140906", "0.6203331", "0.6188333", "0.6188333", "0.6179087", "0.6173048", "0.6162917", "0.6162814", "0.6100645",...
0.60019386
29
checking if able to remove an owner who is an owner with authorised token is sucessful
def test_channel_removeowner_standard_input(): clear() auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen') channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id']) channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id']) assert(channel_details(register_second_result['token'], randChannel_id['channel_id']) == { 'name' : 'Random Channel', 'owner_members': [ { 'u_id': 2, 'name_first': 'Jane', 'name_last': 'Citizen', 'profile_img_url': '' } ], 'all_members': [ { 'u_id': 2, 'name_first': 'Jane', 'name_last': 'Citizen', 'profile_img_url': '' }, { 'u_id': 3, 'name_first' : 'Jane', 'name_last': 'Citizen', 'profile_img_url': '' } ] })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_owner(model, request):\n auth_token = request.headers.get('Authentication-Token')\n user = _token_loader(auth_token)\n if model.owner != user:\n abort(401)", "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('randemail@gm...
[ "0.6920005", "0.6687383", "0.66869026", "0.652796", "0.65141463", "0.6513035", "0.6434651", "0.6367297", "0.6212406", "0.6208368", "0.62047887", "0.61992234", "0.61753887", "0.61734676", "0.61203825", "0.6116047", "0.61136854", "0.60691315", "0.60652006", "0.6058773", "0.6047...
0.627882
8
checking if inputerror is raised as expected if attempting to use an invalid Channel ID
def test_channel_removeowner_invalid_channel_id(): clear() auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen') register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen') with pytest.raises(InputError): assert channel_removeowner(register_second_result['token'], 'INVALIDID', register_third_result['u_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validateChannel( self, name ):\n if name not in self.d.keys(): raise Exception('Invalid device channel {}'.format(name))", "def test_react_invalid_message_id_in_channel():\n clear()\n user_a = register_n_users(1)\n channels_create(user_a[\"token\"], \"channel_a\", True)\n invalid_channel_i...
[ "0.698628", "0.6813436", "0.6781097", "0.6761938", "0.67249954", "0.6659239", "0.64130294", "0.6412095", "0.63696516", "0.628801", "0.62493896", "0.6245839", "0.6161329", "0.61323416", "0.60636854", "0.59984154", "0.59828895", "0.59685725", "0.5941027", "0.5924498", "0.591388...
0.63203907
9
checking if removing an owner with an invalid user ID raises an inputerror
def test_channel_removeowner_invalid_user_id(): clear() auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen') with pytest.raises(InputError): assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], "invalidemail@gmail.com")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_channel_removeowner_invalid_channel_id():\n clear()\n auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('randemail3@gmail.com', 'pas...
[ "0.6941911", "0.67129594", "0.6252531", "0.62405235", "0.6239856", "0.62261367", "0.62119555", "0.62015164", "0.61586416", "0.6044805", "0.60105914", "0.6007393", "0.59954685", "0.59929734", "0.59919494", "0.5990558", "0.598856", "0.598853", "0.598045", "0.594045", "0.5923282...
0.72741085
0
checking whether removing an owner after the user has logged out raises an accesserror as expected
def test_channel_removeowner_invalid_token_after_logout(): clear() auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen') register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) channels_create(register_third_result['token'], 'Random Channel 2', True) channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id']) auth_logout(register_second_result['token']) with pytest.raises(AccessError): assert channel_removeowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register...
[ "0.70658934", "0.7061854", "0.673374", "0.66135883", "0.65965587", "0.6513615", "0.64951664", "0.6487198", "0.6482636", "0.6462151", "0.6452183", "0.64459676", "0.6433408", "0.6396297", "0.63769233", "0.63756007", "0.6364545", "0.63642037", "0.6347555", "0.6339679", "0.632756...
0.70261496
2
checking if removing an owner without owner permissions raises an accesserror
def test_channel_removeowner_not_owner_permissions(): clear() auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen') register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) with pytest.raises(AccessError): assert channel_removeowner(register_third_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register...
[ "0.6874314", "0.6790394", "0.6770987", "0.67394656", "0.6579873", "0.6498488", "0.6473038", "0.642164", "0.63643247", "0.63571066", "0.6270544", "0.62456524", "0.62216616", "0.62143797", "0.6190839", "0.6165569", "0.6157032", "0.6144713", "0.61358136", "0.6118853", "0.6115285...
0.7237803
0
checking if able to remove an owner who is the last owner of the channel
def test_channel_removeowner_last_owner(): clear() register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) channel_join(register_first_result['token'], randChannel_id['channel_id']) #register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen') #channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id']) # removing third user channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def channel_removeowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n use...
[ "0.73305464", "0.7195283", "0.7129515", "0.712579", "0.70183945", "0.6805983", "0.67637455", "0.66506666", "0.65761715", "0.6486065", "0.6440695", "0.6412174", "0.6344784", "0.6314831", "0.6306158", "0.62849295", "0.62610173", "0.6227882", "0.6078866", "0.607737", "0.60758984...
0.7552469
0
checking if owner of the flockr who is not the channel owner can remove owner
def test_channel_removeowner_owner_flockr(): clear() register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) channel_join(register_first_result['token'], randChannel_id['channel_id']) channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_channel_removeowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register...
[ "0.7614531", "0.74451256", "0.7375203", "0.70898795", "0.695579", "0.68203926", "0.68009984", "0.6557617", "0.6531711", "0.651857", "0.64480776", "0.639784", "0.6383161", "0.6346579", "0.63352793", "0.62960595", "0.618517", "0.6165083", "0.6161233", "0.61429024", "0.61311483"...
0.7702833
0
checking if AccessError is returned as expected if the owner of flockr is not a member of the channel
def test_channel_removeowner_owner_flockr_not_member(): clear() register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen') register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen') register_third_result = auth_register('randemail3@gmail.com', 'password1234', 'Jane', 'Citizen') randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True) channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id']) with pytest.raises(AccessError): assert channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_channel_addowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('randemail@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('randemail2@gmail.com', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('r...
[ "0.72484523", "0.688483", "0.68403906", "0.65012413", "0.6361957", "0.62719554", "0.6271346", "0.626074", "0.6235811", "0.6169581", "0.6156016", "0.6120171", "0.61183786", "0.6108167", "0.6076352", "0.6069219", "0.6057893", "0.6057893", "0.60458887", "0.6039936", "0.6019325",...
0.7089838
1
Sets a system Hamiltonian to the Hubbard Hamiltonian. Does exactly this. If the system hamiltonian has some other terms on it, there are not touched. So be sure to use this function only in newly created `System` objects.
def set_hamiltonian(self, system): system.clear_hamiltonian() if 'bh' in system.left_block.operators.keys(): system.add_to_hamiltonian(left_block_op='bh') if 'bh' in system.right_block.operators.keys(): system.add_to_hamiltonian(right_block_op='bh') system.add_to_hamiltonian('dimer', 'id', 'id', 'id', -(1. - self.U)) system.add_to_hamiltonian('id', 'dimer', 'id', 'id', -(1. - self.U)) system.add_to_hamiltonian('id', 'id', 'dimer', 'id', -(1. - self.U)) system.add_to_hamiltonian('id', 'id', 'id', 'dimer', -(1. - self.U)) # system.add_to_hamiltonian('dimer', 'id', 'id', 'id', self.U) # system.add_to_hamiltonian('id', 'dimer', 'id', 'id', self.U) # system.add_to_hamiltonian('id', 'id', 'dimer', 'id', self.U) # system.add_to_hamiltonian('id', 'id', 'id', 'dimer', self.U) system.add_to_hamiltonian('rprm_up_minus_dag', 'rprm_up_plus', 'id', 'id', -(1. + self.U)/2.) system.add_to_hamiltonian('rprm_down_minus_dag', 'rprm_down_plus', 'id', 'id', -(1. + self.U)/2.) system.add_to_hamiltonian('rprm_up_minus', 'rprm_up_plus_dag', 'id', 'id', (1. + self.U)/2.) system.add_to_hamiltonian('rprm_down_minus', 'rprm_down_plus_dag', 'id', 'id', (1. + self.U)/2.) system.add_to_hamiltonian('id', 'rprm_up_minus_dag', 'rprm_up_plus', 'id', -(1.+self.U)/2.) system.add_to_hamiltonian('id', 'rprm_down_minus_dag', 'rprm_down_plus', 'id', -(1.+self.U)/2.) system.add_to_hamiltonian('id', 'rprm_up_minus', 'rprm_up_plus_dag', 'id', (1.+self.U)/2.) system.add_to_hamiltonian('id', 'rprm_down_minus', 'rprm_down_plus_dag', 'id', (1.+self.U)/2.) system.add_to_hamiltonian('id','id', 'rprm_up_minus_dag', 'rprm_up_plus', -(1.+self.U)/2.) system.add_to_hamiltonian('id','id', 'rprm_down_minus_dag', 'rprm_down_plus', -(1.+self.U)/2.) system.add_to_hamiltonian('id','id', 'rprm_up_minus', 'rprm_up_plus_dag', (1.+self.U)/2.) system.add_to_hamiltonian('id','id', 'rprm_down_minus', 'rprm_down_plus_dag', (1.+self.U)/2.)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_hamiltonian(self, system):\n system.clear_hamiltonian()\n if 'bh' in system.left_block.operators.keys():\n system.add_to_hamiltonian(left_block_op='bh')\n if 'bh' in system.right_block.operators.keys():\n system.add_to_hamiltonian(right_block_op='bh')\n sys...
[ "0.77002096", "0.7557386", "0.74360436", "0.7119834", "0.7010251", "0.6295535", "0.60164326", "0.6015403", "0.59823936", "0.58867145", "0.58867145", "0.5827976", "0.57418686", "0.56327134", "0.56063366", "0.55363756", "0.5516487", "0.5502509", "0.54257786", "0.53823394", "0.5...
0.7601821
1
Sets the block Hamiltonian to the Hubbard model block Hamiltonian.
def set_block_hamiltonian(self, tmp_matrix_for_bh, system): # If you have a block hamiltonian in your block, add it if 'bh' in system.growing_block.operators.keys(): system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'bh', 'id') system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'id', 'dimer', -(1. - self.U)) system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'dimer', 'id', -(1. - self.U)) # system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'id', 'dimer', self.U) # system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'dimer', 'id', self.U) system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_up_minus_dag', 'rprm_up_plus', -(1.+self.U)/2.) system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_down_minus_dag', 'rprm_down_plus', -(1.+self.U)/2.) system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_up_minus', 'rprm_up_plus_dag', (1.+self.U)/2.) system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'rprm_down_minus', 'rprm_down_plus_dag', (1.+self.U)/2.)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_block_hamiltonian(self, system):\n # If you have a block hamiltonian in your block, add it\n if 'bh' in system.growing_block.operators.keys():\n system.add_to_block_hamiltonian('bh', 'id')\n system.add_to_block_hamiltonian('c_up', 'c_up_dag', -1.)\n system.add_to_bloc...
[ "0.7961094", "0.72680366", "0.70075583", "0.69372654", "0.6821623", "0.6276145", "0.61712486", "0.60667217", "0.59148175", "0.5814612", "0.5814612", "0.55943906", "0.55346644", "0.5519819", "0.54950064", "0.5393633", "0.5378804", "0.53554547", "0.5317289", "0.5309732", "0.530...
0.77650434
1
Sets the operators to update to the ones for the Hubbard model.
def set_operators_to_update(self, system): system.add_to_operators_to_update('rprm_up_plus_dag', site_op='rprm_up_plus_dag') system.add_to_operators_to_update('rprm_down_plus_dag', site_op='rprm_down_plus_dag') system.add_to_operators_to_update('rprm_up_minus_dag', site_op='rprm_up_minus_dag') system.add_to_operators_to_update('rprm_down_minus_dag', site_op='rprm_down_minus_dag') system.add_to_operators_to_update('rprm_up_plus', site_op='rprm_up_plus') system.add_to_operators_to_update('rprm_down_plus', site_op='rprm_down_plus') system.add_to_operators_to_update('rprm_up_minus', site_op='rprm_up_minus') system.add_to_operators_to_update('rprm_down_minus', site_op='rprm_down_minus') system.add_to_operators_to_update('dimer', site_op='dimer') #system.add_to_operators_to_update('u', site_op='u')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_operators_to_update(self, system):\n # If you have a block hamiltonian in your block, update it\n if 'bh' in system.growing_block.operators.keys():\n system.add_to_operators_to_update('bh', block_op='bh')\n system.add_to_operators_to_update('c_up', site_op='c_up')\n s...
[ "0.7131101", "0.71062386", "0.68636316", "0.64068896", "0.62529534", "0.6246277", "0.62326676", "0.6152808", "0.61323714", "0.6125695", "0.61225945", "0.6067522", "0.60651207", "0.59958494", "0.59198344", "0.58076245", "0.5751448", "0.57447195", "0.5720005", "0.5720005", "0.5...
0.6803695
3
Returns the unit vector of the vector.
def unit_vector(vector): return vector / np.linalg.norm(vector)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unit_vector(vector):\n #print 'unit_vector'\n #print vector\n #print type(vector)\n #npvector = np.array(vector)\n return vector / np.linalg.norm(vector)", "def _get_unit_vector(self, v):\n return v / np.linalg.norm(v)", "def get_unit_vector(self, vector):\n return vector / la....
[ "0.8355314", "0.8344175", "0.83402044", "0.83041203", "0.8288785", "0.82710177", "0.8263843", "0.8237869", "0.8219826", "0.8203201", "0.8157419", "0.8116172", "0.8116172", "0.8116172", "0.8116172", "0.8116172", "0.8116172", "0.8116172", "0.8116172", "0.8116172", "0.8116172", ...
0.8180198
12
Reduces a series of points to a simplified version that loses detail, but maintains the general shape of the series.
def rdp(points, epsilon): dmax = 0.0 index = 0 for i in range(1, len(points) - 1): d = point_line_distance(points[i], points[0], points[-1]) if d > dmax: index = i dmax = d if dmax >= epsilon: results = rdp(points[:index+1], epsilon)[:-1] + rdp(points[index:], epsilon) else: results = [points[0], points[-1]] return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_real_series(self, data: pd.Series) -> pd.Series:\n ...", "def rescale(self, points, inplace=True):\n if inplace == False:\n points = points.copy()\n points *= self.scale_factor\n points += self.origin\n return points", "def reprocessSeries(self, tiltseriesda...
[ "0.6333831", "0.6145088", "0.5858357", "0.5856983", "0.55326074", "0.54662484", "0.5412836", "0.5344801", "0.5280518", "0.5280117", "0.5262521", "0.5240614", "0.5206454", "0.5202019", "0.5178632", "0.51716375", "0.51524824", "0.5152478", "0.5152375", "0.51393616", "0.5135507"...
0.0
-1
Test ExponentialFamily class initialization.
def test_exponential_family_init(): D = 4 N = 100 exp_fam = ExponentialFamily(D) assert exp_fam.D == D assert exp_fam.support_layer is None assert exp_fam.D_eta == D with raises(TypeError): exp_fam = ExponentialFamily('foo') with raises(ValueError): exp_fam = ExponentialFamily(0) with raises(TypeError): exp_fam = ExponentialFamily(4, int) with raises(NotImplementedError): exp_fam.sample_eta(N) mu = np.zeros((D,)) with raises(NotImplementedError): exp_fam.mu_to_eta(mu) eta = np.zeros((D,)) with raises(NotImplementedError): exp_fam.eta_to_mu(eta) z = np.zeros((D,)) with raises(NotImplementedError): exp_fam.T(z) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test___init__(self):\n f0 = 5 * (np.random.rand(10, 5) - 0.5)\n ga = population.Evolver(f0, eval_one_max)\n self.assertTrue(hasattr(ga, 'register'))\n\n # should have called evalute\n self.assertEqual(ga.generations[-1].new, 0)\n\n # should have registered a default ra...
[ "0.64224786", "0.60437495", "0.6004442", "0.5973904", "0.5874628", "0.5863269", "0.5862954", "0.5851496", "0.5843871", "0.58273435", "0.58007246", "0.5793445", "0.5762569", "0.57617235", "0.57417697", "0.571784", "0.5715807", "0.5695528", "0.5693813", "0.5684522", "0.5677164"...
0.83874583
0
Read a raw file into a list of Sentences.
def read_raw(f): if type(f) is str: f = file(f, "r") for (li,line) in enumerate(f): sent = process_sgml_line(line) mark = sent.getmark() if mark: tag, attrs = mark attrs = attrs_to_dict(attrs) if False and tag == "seg" and "id" in attrs: sent.id = attrs["id"] else: sent.id = str(li) else: sent.id = str(li) yield sent
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_file(input_file):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n sentences = f.read().splitlines()\n return sentences", "def _read_sentences(filename):\n with tf.gfile.GFile(filename, \"r\") as f:\n return [sentence.split() for sentence in f.read().split('\\n')]", ...
[ "0.77226895", "0.7688293", "0.7130299", "0.7058475", "0.6907598", "0.67035824", "0.66702956", "0.6655785", "0.6560921", "0.6560921", "0.6558752", "0.65419894", "0.6525789", "0.652241", "0.6511044", "0.65064627", "0.64996123", "0.64872473", "0.64834857", "0.6471241", "0.647055...
0.0
-1
Builds a menu entry with its callback function to call when finished
def __init__(self, callback): self.callback = callback self.selected = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_menu():", "def _createDisplayMenu(ned, menu):\n pass", "def create_menus( self ):", "def main_menu(self):\n menu_string = \"Main menu\\n\"\n menu_string += \"\\t1. Modify a list\\n\"\n menu_string += \"\\t2. Grade submenu\\n\"\n menu_string += \"\\t3. Search for some...
[ "0.69971764", "0.6512389", "0.6462643", "0.6236843", "0.621201", "0.61666703", "0.6126377", "0.60749704", "0.6072576", "0.59767383", "0.5965778", "0.5853985", "0.5805146", "0.57861257", "0.5747815", "0.5744853", "0.5727659", "0.5717768", "0.57175213", "0.5701339", "0.5688219"...
0.0
-1
Return the printable length of the Entry's Text
def getTextLength(self): return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def length(self):\n return len(self.text)", "def __len__(self):\n return len(self.spricht())", "def width(self, text):\n return len(text) * (self.font_width + 1)", "def getLength(self):\n return len(self.entries)", "def getLength(self, text):\n\n return len(text[self.tabl...
[ "0.69978696", "0.6772337", "0.6772269", "0.6674462", "0.66585314", "0.6562858", "0.64422786", "0.63689303", "0.63642585", "0.63120514", "0.6311637", "0.6309694", "0.6292428", "0.6272306", "0.6256533", "0.62360454", "0.6224102", "0.62151045", "0.6209286", "0.6154031", "0.61448...
0.7582059
0
Return the text of the Menu Entry
def getText(self): return ""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_header_menu_text(self, menu):\n if menu == BasePage.HOME:\n home = self.browser.find_element(*locators.HOME_LINK).text\n return home\n elif menu == BasePage.SERVICE:\n services = self.browser.find_element(*locators.SERVICE_LINK).text\n return servic...
[ "0.6761892", "0.67316175", "0.66230667", "0.6615068", "0.65632343", "0.65293497", "0.65000373", "0.6497349", "0.64363074", "0.64161533", "0.6374737", "0.6374737", "0.6374737", "0.6374737", "0.6374737", "0.63471115", "0.6344143", "0.633642", "0.6325014", "0.62955856", "0.62814...
0.56504595
84
Helper function to generate jitted lanczos function used in JaxBackend.eigsh_lanczos. The function `jax_lanczos` returned by this higherorder function has the following
def _generate_jitted_eigsh_lanczos(jax: types.ModuleType) -> Callable: @functools.partial(jax.jit, static_argnums=(3, 4, 5, 6)) def jax_lanczos(matvec, arguments, init, ncv, neig, landelta, reortho): """ Jitted lanczos routine. Args: matvec: A callable implementing the matrix-vector product of a linear operator. arguments: Arguments to `matvec` additional to an input vector. `matvec` will be called as `matvec(init, *args)`. init: An initial input state to `matvec`. ncv: Number of krylov iterations (i.e. dimension of the Krylov space). neig: Number of eigenvalue-eigenvector pairs to be computed. landelta: Convergence parameter: if the norm of the current Lanczos vector falls below `landelta`, iteration is stopped. reortho: If `True`, reorthogonalize all krylov vectors at each step. This should be used if `neig>1`. Returns: jax.numpy.ndarray: Eigenvalues list: Eigenvectors """ def body_modified_gram_schmidt(i, vals): vector, krylov_vectors = vals v = krylov_vectors[i, :] vector -= jax.numpy.vdot(v, vector) * jax.numpy.reshape(v, vector.shape) return [vector, krylov_vectors] def body_lanczos(vals): current_vector, krylov_vectors, vector_norms = vals[0:3] diagonal_elements, matvec, args, _ = vals[3:7] threshold, i, maxiteration = vals[7:] norm = jax.numpy.linalg.norm(current_vector) normalized_vector = current_vector / norm normalized_vector, krylov_vectors = jax.lax.cond( reortho, True, lambda x: jax.lax.fori_loop(0, i, body_modified_gram_schmidt, [normalized_vector, krylov_vectors]), False, lambda x: [normalized_vector, krylov_vectors]) Av = matvec(normalized_vector, *args) diag_element = jax.numpy.vdot(normalized_vector, Av) res = jax.numpy.reshape( jax.numpy.ravel(Av) - jax.numpy.ravel(normalized_vector) * diag_element - krylov_vectors[i - 1] * norm, Av.shape) krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[i, :], jax.numpy.ravel(normalized_vector)) vector_norms = jax.ops.index_update(vector_norms, jax.ops.index[i - 1], norm) diagonal_elements = jax.ops.index_update(diagonal_elements, jax.ops.index[i - 1], diag_element) return [ res, krylov_vectors, vector_norms, diagonal_elements, matvec, args, norm, threshold, i + 1, maxiteration ] def cond_fun(vals): _, _, _, _, _, _, norm, threshold, iteration, maxiteration = vals def check_thresh(check_vals): val, thresh = check_vals return jax.lax.cond(val < thresh, False, lambda x: x, True, lambda x: x) return jax.lax.cond(iteration <= maxiteration, [norm, threshold], check_thresh, False, lambda x: x) numel = jax.numpy.prod(init.shape) krylov_vecs = jax.numpy.zeros((ncv + 1, numel), dtype=init.dtype) norms = jax.numpy.zeros(ncv, dtype=init.dtype) diag_elems = jax.numpy.zeros(ncv, dtype=init.dtype) norms = jax.ops.index_update(norms, jax.ops.index[0], 1.0) norms_dtype = jax.numpy.real(jax.numpy.empty((0, 0), dtype=init.dtype)).dtype initvals = [ init, krylov_vecs, norms, diag_elems, matvec, arguments, norms_dtype.type(1.0), landelta, 1, ncv ] output = jax.lax.while_loop(cond_fun, body_lanczos, initvals) final_state, krylov_vecs, norms, diags, _, _, _, _, it, _ = output krylov_vecs = jax.ops.index_update(krylov_vecs, jax.ops.index[it, :], jax.numpy.ravel(final_state)) A_tridiag = jax.numpy.diag(diags) + jax.numpy.diag( norms[1:], 1) + jax.numpy.diag(jax.numpy.conj(norms[1:]), -1) eigvals, U = jax.numpy.linalg.eigh(A_tridiag) eigvals = eigvals.astype(A_tridiag.dtype) def body_vector(i, vals): krv, unitary, states = vals dim = unitary.shape[1] n, m = jax.numpy.divmod(i, dim) states = jax.ops.index_add(states, jax.ops.index[n, :], krv[m + 1, :] * unitary[m, n]) return [krv, unitary, states] state_vectors = jax.numpy.zeros([neig, numel], dtype=init.dtype) _, _, vectors = jax.lax.fori_loop(0, neig * (krylov_vecs.shape[0] - 1), body_vector, [krylov_vecs, U, state_vectors]) return jax.numpy.array(eigvals[0:neig]), [ jax.numpy.reshape(vectors[n, :], init.shape) / jax.numpy.linalg.norm(vectors[n, :]) for n in range(neig) ] return jax_lanczos
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_arnoldi_factorization(jax: types.ModuleType) -> Callable:\n\n @jax.jit\n def modified_gram_schmidt_step_arnoldi(j, vals):\n \"\"\"\n Single step of a modified gram-schmidt orthogonalization.\n Args:\n j: Integer value denoting the vector to be orthogonalized.\n vals: A list of va...
[ "0.61316943", "0.5759136", "0.5484182", "0.53870416", "0.5306601", "0.5300693", "0.52789545", "0.52645034", "0.525278", "0.5248642", "0.52068466", "0.5197051", "0.5172847", "0.5171192", "0.5147542", "0.5092073", "0.5066736", "0.5065881", "0.50415236", "0.5041109", "0.50320095...
0.8053774
0
Helper function to create a jitted arnoldi factorization. The function returns a function `_arnoldi_fact` which performs an mstep arnoldi factorization. `_arnoldi_fact` computes an mstep arnoldi factorization of an input callable `matvec`, with m = min(`it`,`num_krylov_vecs`). `_arnoldi_fact` will do at most `num_krylov_vecs` steps. `_arnoldi_fact` returns arrays `kv` and `H` which satisfy the Arnoldi recurrence relation ``` matrix @ Vm Vm @ Hm fm em = 0 ``` with `matrix` the matrix representation of `matvec` and
def _generate_arnoldi_factorization(jax: types.ModuleType) -> Callable: @jax.jit def modified_gram_schmidt_step_arnoldi(j, vals): """ Single step of a modified gram-schmidt orthogonalization. Args: j: Integer value denoting the vector to be orthogonalized. vals: A list of variables: `vector`: The current vector to be orthogonalized to all previous ones `krylov_vectors`: jax.array of collected krylov vectors `n`: integer denoting the column-position of the overlap <`krylov_vector`|`vector`> within `H`. Returns: updated vals. """ vector, krylov_vectors, n, H = vals v = krylov_vectors[j, :] h = jax.numpy.vdot(v, vector) H = jax.ops.index_update(H, jax.ops.index[j, n], h) vector = vector - h * jax.numpy.reshape(v, vector.shape) return [vector, krylov_vectors, n, H] @functools.partial(jax.jit, static_argnums=(5, 6, 7)) def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs, eps): """ Compute an m-step arnoldi factorization of `matvec`, with m = min(`it`,`num_krylov_vecs`). The factorization will do at most `num_krylov_vecs` steps. The returned arrays `kv` and `H` will satisfy the Arnoldi recurrence relation ``` matrix @ Vm - Vm @ Hm - fm * em = 0 ``` with `matrix` the matrix representation of `matvec` and `Vm = jax.numpy.transpose(kv[:it, :])`, `Hm = H[:it, :it]`, `fm = np.expand_dims(kv[it, :] * H[it, it - 1]`,1) and `em` a cartesian basis vector of shape `(1, kv.shape[1])` with `em[0, -1] == 1` and 0 elsewhere. Note that the caller is responsible for dtype consistency between the inputs, i.e. dtypes between all input arrays have to match. Args: matvec: The matrix vector product. args: List of arguments to `matvec`. v0: Initial state to `matvec`. krylov_vectors: An array for storing the krylov vectors. The individual vectors are stored as columns. The shape of `krylov_vecs` has to be (num_krylov_vecs + 1, np.ravel(v0).shape[0]). H: Matrix of overlaps. The shape has to be (num_krylov_vecs + 1,num_krylov_vecs + 1). start: Integer denoting the start position where the first produced krylov_vector should be inserted into `krylov_vectors` num_krylov_vecs: Number of krylov iterations, should be identical to `krylov_vectors.shape[0] + 1` eps: Convergence parameter. Iteration is terminated if the norm of a krylov-vector falls below `eps`. Returns: kv: An array of krylov vectors H: A matrix of overlaps it: The number of performed iterations. """ Z = jax.numpy.linalg.norm(v0) v = v0 / Z krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[start, :], jax.numpy.ravel(v)) H = jax.lax.cond( start > 0, start, lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None, lambda x: H) # body of the arnoldi iteration def body(vals): krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals Av = matvec(vector, *args) initial_vals = [Av, krylov_vectors, i, H] Av, krylov_vectors, _, H = jax.lax.fori_loop( 0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals) norm = jax.numpy.linalg.norm(Av) Av /= norm H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm) krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[i + 1, :], jax.numpy.ravel(Av)) return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter] def cond_fun(vals): # Continue loop while iteration < num_krylov_vecs and norm > eps _, _, _, _, norm, _, iteration, _ = vals counter_done = (iteration >= num_krylov_vecs) norm_not_too_small = norm > eps continue_iteration = jax.lax.cond(counter_done, _, lambda x: False, _, lambda x: norm_not_too_small) return continue_iteration initial_norm = v.real.dtype.type(1.0+eps) initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start, num_krylov_vecs] final_values = jax.lax.while_loop(cond_fun, body, initial_values) kvfinal, Hfinal, _, _, norm, _, it, _ = final_values return kvfinal, Hfinal, it, norm < eps return _arnoldi_fact
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs,\n eps):\n Z = jax.numpy.linalg.norm(v0)\n v = v0 / Z\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[start, :],\n ...
[ "0.7236", "0.69279444", "0.60326505", "0.5435932", "0.53381103", "0.5186424", "0.50212055", "0.49177843", "0.4913222", "0.48340198", "0.47836974", "0.4736391", "0.47337383", "0.47162333", "0.467649", "0.46651557", "0.46549806", "0.46410066", "0.4637888", "0.4630094", "0.46174...
0.79677343
0
Single step of a modified gramschmidt orthogonalization.
def modified_gram_schmidt_step_arnoldi(j, vals): vector, krylov_vectors, n, H = vals v = krylov_vectors[j, :] h = jax.numpy.vdot(v, vector) H = jax.ops.index_update(H, jax.ops.index[j, n], h) vector = vector - h * jax.numpy.reshape(v, vector.shape) return [vector, krylov_vectors, n, H]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_orthogonal(self):\n pass", "def orthogonal_component(self, basis: Vector) -> Vector:\n return self - self.parallel_component(basis)", "def orthogonal(v):\n return np.array([-v[1], v[0]])", "def _orthogonal_vector(vector):\n return -1 * vector[1], vector[0]", "def _orthogonal_init...
[ "0.61191195", "0.57736254", "0.5773286", "0.5687321", "0.56248266", "0.5618349", "0.5608474", "0.55393374", "0.55151355", "0.541601", "0.53893787", "0.5387959", "0.53768814", "0.53333694", "0.5328658", "0.53105354", "0.52937436", "0.5284233", "0.52705336", "0.5248712", "0.524...
0.0
-1
Compute an mstep arnoldi factorization of `matvec`, with m = min(`it`,`num_krylov_vecs`). The factorization will do at most `num_krylov_vecs` steps. The returned arrays `kv` and `H` will satisfy the Arnoldi recurrence relation ``` matrix @ Vm Vm @ Hm fm em = 0 ``` with `matrix` the matrix representation of `matvec` and
def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs, eps): Z = jax.numpy.linalg.norm(v0) v = v0 / Z krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[start, :], jax.numpy.ravel(v)) H = jax.lax.cond( start > 0, start, lambda x: jax.ops.index_update(H, jax.ops.index[x, x - 1], Z), None, lambda x: H) # body of the arnoldi iteration def body(vals): krylov_vectors, H, matvec, vector, _, threshold, i, maxiter = vals Av = matvec(vector, *args) initial_vals = [Av, krylov_vectors, i, H] Av, krylov_vectors, _, H = jax.lax.fori_loop( 0, i + 1, modified_gram_schmidt_step_arnoldi, initial_vals) norm = jax.numpy.linalg.norm(Av) Av /= norm H = jax.ops.index_update(H, jax.ops.index[i + 1, i], norm) krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[i + 1, :], jax.numpy.ravel(Av)) return [krylov_vectors, H, matvec, Av, norm, threshold, i + 1, maxiter] def cond_fun(vals): # Continue loop while iteration < num_krylov_vecs and norm > eps _, _, _, _, norm, _, iteration, _ = vals counter_done = (iteration >= num_krylov_vecs) norm_not_too_small = norm > eps continue_iteration = jax.lax.cond(counter_done, _, lambda x: False, _, lambda x: norm_not_too_small) return continue_iteration initial_norm = v.real.dtype.type(1.0+eps) initial_values = [krylov_vectors, H, matvec, v, initial_norm, eps, start, num_krylov_vecs] final_values = jax.lax.while_loop(cond_fun, body, initial_values) kvfinal, Hfinal, _, _, norm, _, it, _ = final_values return kvfinal, Hfinal, it, norm < eps
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def implicitly_restarted_arnoldi_method(\n matvec, args, initial_state, num_krylov_vecs, numeig, which, eps, maxiter,\n res_thresh) -> Tuple[List[Tensor], List[Tensor]]:\n N = np.prod(initial_state.shape)\n p = num_krylov_vecs - numeig\n num_krylov_vecs = np.min([num_krylov_vecs, N])\n if (p ...
[ "0.57746553", "0.57497203", "0.5428247", "0.5418458", "0.5324983", "0.5313086", "0.5289994", "0.52694213", "0.5208599", "0.5204189", "0.5199041", "0.5128154", "0.5054749", "0.5047108", "0.49921957", "0.4964914", "0.49533275", "0.49489313", "0.4900625", "0.48871598", "0.488664...
0.6143391
0
Helper function to generate a jitted function to do an implicitly restarted arnoldi factorization of `matvec`. The returned routine finds the lowest `numeig` eigenvectoreigenvalue pairs of `matvec` by alternating between compression and reexpansion of an initial `num_krylov_vecs`step Arnoldi factorization.
def _implicitly_restarted_arnoldi(jax: types.ModuleType) -> Callable: arnoldi_fact = _generate_arnoldi_factorization(jax) # ###################################################### # ####### NEW SORTING FUCTIONS INSERTED HERE ######### # ###################################################### @functools.partial(jax.jit, static_argnums=(1,)) def LR_sort(evals, p): inds = np.argsort(jax.numpy.real(evals), kind='stable')[::-1] shifts = evals[inds][-p:] return shifts, inds @functools.partial(jax.jit, static_argnums=(1,)) def LM_sort(evals, p): inds = np.argsort(jax.numpy.abs(evals), kind='stable')[::-1] shifts = evals[inds][-p:] return shifts, inds # ####################################################### # ####################################################### # ####################################################### @functools.partial(jax.jit, static_argnums=(4, 5, 6)) def shifted_QR(Vm, Hm, fm, evals, k, p, which, res_thresh): funs = [LR_sort, LM_sort] shifts, _ = funs[which](evals, p) # compress to k = numeig q = jax.numpy.zeros(Hm.shape[0]) q = jax.ops.index_update(q, jax.ops.index[-1], 1) m = Hm.shape[0] for shift in shifts: Qj, _ = jax.numpy.linalg.qr(Hm - shift * jax.numpy.eye(m)) Hm = Qj.T.conj() @ Hm @ Qj Vm = Qj.T @ Vm q = q @ Qj fk = Vm[k, :] * Hm[k, k - 1] + fm * q[k - 1] Vk = Vm[0:k, :] Hk = Hm[0:k, 0:k] H = jax.numpy.zeros((k + p + 1, k + p), dtype=fm.dtype) H = jax.ops.index_update(H, jax.ops.index[0:k, 0:k], Hk) Z = jax.numpy.linalg.norm(fk) v = fk / Z krylov_vectors = jax.numpy.zeros((k + p + 1, Vm.shape[1]), dtype=fm.dtype) krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[0:k, :], Vk) krylov_vectors = jax.ops.index_update(krylov_vectors, jax.ops.index[k:], v) Z = jax.numpy.linalg.norm(fk) #if fk is a zero-vector then arnoldi has exactly converged. #use small threshold to check this return krylov_vectors, H, fk, Z < res_thresh @functools.partial(jax.jit, static_argnums=(2,)) def update_data(Vm_tmp, Hm_tmp, numits): Vm = Vm_tmp[0:numits, :] Hm = Hm_tmp[0:numits, 0:numits] fm = Vm_tmp[numits, :] * Hm_tmp[numits, numits - 1] return Vm, Hm, fm @functools.partial(jax.jit, static_argnums=(3,)) def get_vectors(Vm, unitary, inds, numeig): def body_vector(i, vals): krv, unitary, states, inds = vals dim = unitary.shape[1] n, m = jax.numpy.divmod(i, dim) states = jax.ops.index_add(states, jax.ops.index[n, :], krv[m, :] * unitary[m, inds[n]]) return [krv, unitary, states, inds] state_vectors = jax.numpy.zeros([numeig, Vm.shape[1]], dtype=Vm.dtype) _, _, state_vectors, _ = jax.lax.fori_loop( 0, numeig * Vm.shape[0], body_vector, [Vm, unitary, state_vectors, inds]) state_norms = jax.numpy.linalg.norm(state_vectors, axis=1) state_vectors = state_vectors / state_norms[:, None] return state_vectors def implicitly_restarted_arnoldi_method( matvec, args, initial_state, num_krylov_vecs, numeig, which, eps, maxiter, res_thresh) -> Tuple[List[Tensor], List[Tensor]]: """ Implicitly restarted arnoldi factorization of `matvec`. The routine finds the lowest `numeig` eigenvector-eigenvalue pairs of `matvec` by alternating between compression and re-expansion of an initial `num_krylov_vecs`-step Arnoldi factorization. Note: The caller has to ensure that the dtype of the return value of `matvec` matches the dtype of the initial state. Otherwise jax will raise a TypeError. Args: matvec: A callable representing the linear operator. args: Arguments to `matvec`. `matvec` is called with `matvec(x, *args)` with `x` the input array on which `matvec` should act. initial_state: An starting vector for the iteration. num_krylov_vecs: Number of krylov vectors of the arnoldi factorization. numeig: The number of desired eigenvector-eigenvalue pairs. which: Which eigenvalues to target. Currently supported: `which = 'LR'` or `which = 'LM'`. eps: Convergence flag. If the norm of a krylov vector drops below `eps` the iteration is terminated. maxiter: Maximum number of (outer) iteration steps. Returns: eta, U: Two lists containing eigenvalues and eigenvectors. """ N = np.prod(initial_state.shape) p = num_krylov_vecs - numeig num_krylov_vecs = np.min([num_krylov_vecs, N]) if (p <= 1) and (num_krylov_vecs < N): raise ValueError(f"`num_krylov_vecs` must be between `numeig` + 1 <" f" `num_krylov_vecs` <= N={N}," f" `num_krylov_vecs`={num_krylov_vecs}") dtype = initial_state.dtype # initialize arrays krylov_vectors = jax.numpy.zeros( (num_krylov_vecs + 1, jax.numpy.ravel(initial_state).shape[0]), dtype=dtype) H = jax.numpy.zeros((num_krylov_vecs + 1, num_krylov_vecs), dtype=dtype) # perform initial arnoldi factorization Vm_tmp, Hm_tmp, numits, converged = arnoldi_fact(matvec, args, initial_state, krylov_vectors, H, 0, num_krylov_vecs, eps) # obtain an m-step arnoldi factorization Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, numits) it = 0 if which == 'LR': _which = 0 elif which == 'LM': _which = 1 else: raise ValueError(f"which = {which} not implemented") # make sure the dtypes are matching if maxiter > 0: if Vm.dtype == np.float64: dtype = np.complex128 elif Vm.dtype == np.float32: dtype = np.complex64 elif Vm.dtype == np.complex128: dtype = Vm.dtype elif Vm.dtype == np.complex64: dtype = Vm.dtype else: raise TypeError(f'dtype {Vm.dtype} not supported') Vm = Vm.astype(dtype) Hm = Hm.astype(dtype) fm = fm.astype(dtype) while (it < maxiter) and (not converged): evals, _ = jax.numpy.linalg.eig(Hm) krylov_vectors, H, fk, converged = shifted_QR(Vm, Hm, fm, evals, numeig, p, _which, res_thresh) if converged: break v0 = jax.numpy.reshape(fk, initial_state.shape) # restart Vm_tmp, Hm_tmp, _, converged = arnoldi_fact(matvec, args, v0, krylov_vectors, H, numeig, num_krylov_vecs, eps) Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, num_krylov_vecs) it += 1 ev_, U_ = np.linalg.eig(np.array(Hm)) eigvals = jax.numpy.array(ev_) U = jax.numpy.array(U_) _, inds = LR_sort(eigvals, _which) vectors = get_vectors(Vm, U, inds, numeig) return eigvals[inds[0:numeig]], [ jax.numpy.reshape(vectors[n, :], initial_state.shape) for n in range(numeig) ] return implicitly_restarted_arnoldi_method
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_arnoldi_factorization(jax: types.ModuleType) -> Callable:\n\n @jax.jit\n def modified_gram_schmidt_step_arnoldi(j, vals):\n \"\"\"\n Single step of a modified gram-schmidt orthogonalization.\n Args:\n j: Integer value denoting the vector to be orthogonalized.\n vals: A list of va...
[ "0.6499808", "0.6398598", "0.59417737", "0.57333624", "0.5555907", "0.54603535", "0.5243884", "0.5202897", "0.51741433", "0.5137478", "0.5136315", "0.50940466", "0.50810033", "0.50402594", "0.50402594", "0.5007165", "0.49688074", "0.49596807", "0.49502552", "0.49388844", "0.4...
0.5256993
6
Implicitly restarted arnoldi factorization of `matvec`. The routine finds the lowest `numeig` eigenvectoreigenvalue pairs of `matvec` by alternating between compression and reexpansion of an initial `num_krylov_vecs`step Arnoldi factorization.
def implicitly_restarted_arnoldi_method( matvec, args, initial_state, num_krylov_vecs, numeig, which, eps, maxiter, res_thresh) -> Tuple[List[Tensor], List[Tensor]]: N = np.prod(initial_state.shape) p = num_krylov_vecs - numeig num_krylov_vecs = np.min([num_krylov_vecs, N]) if (p <= 1) and (num_krylov_vecs < N): raise ValueError(f"`num_krylov_vecs` must be between `numeig` + 1 <" f" `num_krylov_vecs` <= N={N}," f" `num_krylov_vecs`={num_krylov_vecs}") dtype = initial_state.dtype # initialize arrays krylov_vectors = jax.numpy.zeros( (num_krylov_vecs + 1, jax.numpy.ravel(initial_state).shape[0]), dtype=dtype) H = jax.numpy.zeros((num_krylov_vecs + 1, num_krylov_vecs), dtype=dtype) # perform initial arnoldi factorization Vm_tmp, Hm_tmp, numits, converged = arnoldi_fact(matvec, args, initial_state, krylov_vectors, H, 0, num_krylov_vecs, eps) # obtain an m-step arnoldi factorization Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, numits) it = 0 if which == 'LR': _which = 0 elif which == 'LM': _which = 1 else: raise ValueError(f"which = {which} not implemented") # make sure the dtypes are matching if maxiter > 0: if Vm.dtype == np.float64: dtype = np.complex128 elif Vm.dtype == np.float32: dtype = np.complex64 elif Vm.dtype == np.complex128: dtype = Vm.dtype elif Vm.dtype == np.complex64: dtype = Vm.dtype else: raise TypeError(f'dtype {Vm.dtype} not supported') Vm = Vm.astype(dtype) Hm = Hm.astype(dtype) fm = fm.astype(dtype) while (it < maxiter) and (not converged): evals, _ = jax.numpy.linalg.eig(Hm) krylov_vectors, H, fk, converged = shifted_QR(Vm, Hm, fm, evals, numeig, p, _which, res_thresh) if converged: break v0 = jax.numpy.reshape(fk, initial_state.shape) # restart Vm_tmp, Hm_tmp, _, converged = arnoldi_fact(matvec, args, v0, krylov_vectors, H, numeig, num_krylov_vecs, eps) Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, num_krylov_vecs) it += 1 ev_, U_ = np.linalg.eig(np.array(Hm)) eigvals = jax.numpy.array(ev_) U = jax.numpy.array(U_) _, inds = LR_sort(eigvals, _which) vectors = get_vectors(Vm, U, inds, numeig) return eigvals[inds[0:numeig]], [ jax.numpy.reshape(vectors[n, :], initial_state.shape) for n in range(numeig) ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _arnoldi_fact(matvec, args, v0, krylov_vectors, H, start, num_krylov_vecs,\n eps):\n Z = jax.numpy.linalg.norm(v0)\n v = v0 / Z\n krylov_vectors = jax.ops.index_update(krylov_vectors,\n jax.ops.index[start, :],\n ...
[ "0.61382663", "0.5593468", "0.55151695", "0.5378377", "0.5306997", "0.53011614", "0.5231085", "0.5190737", "0.5158738", "0.5093091", "0.5071404", "0.50447", "0.50041604", "0.49797606", "0.49743566", "0.49714258", "0.49670354", "0.49553815", "0.4946754", "0.49402496", "0.49396...
0.67910457
0
Allows Jax (the module) to be passed in as an argument rather than imported, since doing the latter breaks the build. In addition, instantiates certain of the enclosed functions as concrete objects within a Dict, allowing them to be cached. This avoids spurious recompilations that would otherwise be triggered by attempts to pass callables into Jitted functions. The important function here is functions["gmres_m"], which implements GMRES. The other functions are exposed only for testing.
def gmres_wrapper(jax: types.ModuleType): jnp = jax.numpy def gmres_m(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray, x0: jax.ShapedArray, tol: float, atol: float, num_krylov_vectors: int, maxiter: int) -> Tuple[jax.ShapedArray, float, int, bool]: """ Solve A x = b for x using the m-restarted GMRES method. This is intended to be called via jax_backend.gmres. Given a linear mapping with (n x n) matrix representation A = A_mv(*A_args) gmres_m solves Ax = b (1) where x and b are length-n vectors, using the method of Generalized Minimum RESiduals with M iterations per restart (GMRES_M). Args: A_mv: A function v0 = A_mv(v, *A_args) where v0 and v have the same shape. A_args: A list of positional arguments to A_mv. b: The b in A @ x = b. x0: Initial guess solution. tol, atol: Solution tolerance to achieve, norm(residual) <= max(tol * norm(b), atol). tol is also used to set the threshold at which the Arnoldi factorization terminates. num_krylov_vectors: Size of the Krylov space to build at each restart. maxiter: The Krylov space will be repeatedly rebuilt up to this many times. Returns: x: The approximate solution. beta: Norm of the residual at termination. n_iter: Number of iterations at termination. converged: Whether the desired tolerance was achieved. """ num_krylov_vectors = min(num_krylov_vectors, b.size) x = x0 b_norm = jnp.linalg.norm(b) tol = max(tol * b_norm, atol) for n_iter in range(maxiter): done, beta, x = gmres(A_mv, A_args, b, x, num_krylov_vectors, x0, tol, b_norm) if done: break return x, beta, n_iter, done def gmres(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray, x: jax.ShapedArray, num_krylov_vectors: int, x0: jax.ShapedArray, tol: float, b_norm: float) -> Tuple[bool, float, jax.ShapedArray]: """ A single restart of GMRES. Args: A_mv: A function `v0 = A_mv(v, *A_args)` where `v0` and `v` have the same shape. A_args: A list of positional arguments to A_mv. b: The `b` in `A @ x = b`. x: Initial guess solution. tol: Solution tolerance to achieve, num_krylov_vectors : Size of the Krylov space to build. Returns: done: Whether convergence was achieved. beta: Magnitude of residual (i.e. the error estimate). x: The approximate solution. """ r, beta = gmres_residual(A_mv, A_args, b, x) k, V, R, beta_vec = gmres_krylov(A_mv, A_args, num_krylov_vectors, x0, r, beta, tol, b_norm) x = gmres_update(k, V, R, beta_vec, x0) done = k < num_krylov_vectors - 1 return done, beta, x @jax.jit def gmres_residual(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray, x: jax.ShapedArray) -> Tuple[jax.ShapedArray, float]: """ Computes the residual vector r and its norm, beta, which is minimized by GMRES. Args: A_mv: A function v0 = A_mv(v, *A_args) where v0 and v have the same shape. A_args: A list of positional arguments to A_mv. b: The b in A @ x = b. x: Initial guess solution. Returns: r: The residual vector. beta: Its magnitude. """ r = b - A_mv(x, *A_args) beta = jnp.linalg.norm(r) return r, beta def gmres_update(k: int, V: jax.ShapedArray, R: jax.ShapedArray, beta_vec: jax.ShapedArray, x0: jax.ShapedArray) -> jax.ShapedArray: """ Updates the solution in response to the information computed by the main GMRES loop. Args: k: The final iteration which was reached by GMRES before convergence. V: The Arnoldi matrix of Krylov vectors. R: The R factor in H = QR where H is the Arnoldi overlap matrix. beta_vec: Stores the Givens factors used to map H into QR. x0: The initial guess solution. Returns: x: The updated solution. """ q = min(k, R.shape[1]) y = jax.scipy.linalg.solve_triangular(R[:q, :q], beta_vec[:q]) x = x0 + V[:, :q] @ y return x @functools.partial(jax.jit, static_argnums=(2,)) def gmres_krylov(A_mv: Callable, A_args: Sequence, n_kry: int, x0: jax.ShapedArray, r: jax.ShapedArray, beta: float, tol: float, b_norm: float) -> Tuple[int, jax.ShapedArray, jax.ShapedArray, jax.ShapedArray]: """ Builds the Arnoldi decomposition of (A, v), where v is the normalized residual of the current solution estimate. The decomposition is returned as V, R, where V is the usual matrix of Krylov vectors and R is the upper triangular matrix in H = QR, with H the usual matrix of overlaps. Args: A_mv: A function `v0 = A_mv(v, *A_args)` where `v0` and `v` have the same shape. A_args: A list of positional arguments to A_mv. n_kry: Size of the Krylov space to build; this is called num_krylov_vectors in higher level code. x0: Guess solution. r: Residual vector. beta: Magnitude of r. tol: Solution tolerance to achieve. b_norm: Magnitude of b in Ax = b. Returns: k: Counts the number of iterations before convergence. V: The Arnoldi matrix of Krylov vectors. R: From H = QR where H is the Arnoldi matrix of overlaps. beta_vec: Stores Q implicitly as Givens factors. """ n = r.size err = beta v = r / beta # These will store the Givens rotations used to update the QR decompositions # of the Arnoldi matrices. # cos : givens[0, :] # sine: givens[1, :] givens = jnp.zeros((2, n_kry), dtype=x0.dtype) beta_vec = jnp.zeros((n_kry + 1), dtype=x0.dtype) beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[0], beta) V = jnp.zeros((n, n_kry + 1), dtype=x0.dtype) V = jax.ops.index_update(V, jax.ops.index[:, 0], v) R = jnp.zeros((n_kry + 1, n_kry), dtype=x0.dtype) # The variable data for the carry call. Each iteration modifies these # values and feeds the results to the next iteration. k = 0 gmres_variables = (k, V, R, beta_vec, err, # < The actual output we need. givens) # < Modified between iterations. gmres_constants = (tol, A_mv, A_args, b_norm, n_kry) gmres_carry = (gmres_variables, gmres_constants) # The 'x' input for the carry call. Each iteration will receive an ascending # loop index (from the jnp.arange) along with the constant data # in gmres_constants. gmres_carry = jax.lax.while_loop(gmres_krylov_loop_condition, gmres_krylov_work, gmres_carry) gmres_variables, gmres_constants = gmres_carry k, V, R, beta_vec, err, givens = gmres_variables return (k, V, R, beta_vec) VarType = Tuple[int, jax.ShapedArray, jax.ShapedArray, jax.ShapedArray, float, jax.ShapedArray] ConstType = Tuple[float, Callable, Sequence, jax.ShapedArray, int] GmresCarryType = Tuple[VarType, ConstType] @jax.jit def gmres_krylov_loop_condition(gmres_carry: GmresCarryType) -> bool: """ This function dictates whether the main GMRES while loop will proceed. It is equivalent to: if k < n_kry and err > tol: return True else: return False where k, n_kry, err, and tol are unpacked from gmres_carry. Args: gmres_carry: The gmres_carry from gmres_krylov. Returns: (bool): Whether to continue iterating. """ gmres_constants, gmres_variables = gmres_carry tol = gmres_constants[0] k = gmres_variables[0] err = gmres_variables[4] n_kry = gmres_constants[4] def is_iterating(k, n_kry): return k < n_kry def not_converged(args): err, tol = args return err >= tol return jax.lax.cond(is_iterating(k, n_kry), # Predicate. not_converged, # Called if True. lambda x: False, # Called if False. (err, tol)) # Arguments to calls. @jax.jit def gmres_krylov_work(gmres_carry: GmresCarryType) -> GmresCarryType: """ Performs a single iteration of gmres_krylov. See that function for a more detailed description. Args: gmres_carry: The gmres_carry from gmres_krylov. Returns: gmres_carry: The updated gmres_carry. """ gmres_variables, gmres_constants = gmres_carry k, V, R, beta_vec, err, givens = gmres_variables tol, A_mv, A_args, b_norm, _ = gmres_constants V, H = kth_arnoldi_step(k, A_mv, A_args, V, R, tol) R_col, givens = apply_givens_rotation(H[:, k], givens, k) R = jax.ops.index_update(R, jax.ops.index[:, k], R_col[:]) # Update the residual vector. cs, sn = givens[:, k] * beta_vec[k] beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k], cs) beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k + 1], sn) err = jnp.abs(sn) / b_norm gmres_variables = (k + 1, V, R, beta_vec, err, givens) return (gmres_variables, gmres_constants) @jax.jit def _gs_step(r: jax.ShapedArray, v_i: jax.ShapedArray) -> Tuple[jax.ShapedArray, jax.ShapedArray]: """ Performs one iteration of the stabilized Gram-Schmidt procedure, with r to be orthonormalized against {v} = {v_0, v_1, ...}. Args: r: The new vector which is not in the initially orthonormal set. v_i: The i'th vector in that set. Returns: r_i: The updated r which is now orthonormal with v_i. h_i: The overlap of r with v_i. """ h_i = jnp.vdot(v_i, r) r_i = r - h_i * v_i return r_i, h_i @jax.jit def kth_arnoldi_step(k: int, A_mv: Callable, A_args: Sequence, V: jax.ShapedArray, H: jax.ShapedArray, tol: float) -> Tuple[jax.ShapedArray, jax.ShapedArray]: """ Performs the kth iteration of the Arnoldi reduction procedure. Args: k: The current iteration. A_mv, A_args: A function A_mv(v, *A_args) performing a linear transformation on v. V: A matrix of size (n, K + 1), K > k such that each column in V[n, :k+1] stores a Krylov vector and V[:, k+1] is all zeroes. H: A matrix of size (K, K), K > k with H[:, k] all zeroes. Returns: V, H: With their k'th columns respectively filled in by a new orthogonalized Krylov vector and new overlaps. """ v = A_mv(V[:, k], *A_args) v_new, H_k = jax.lax.scan(_gs_step, v, xs=V.T) v_norm = jnp.linalg.norm(v_new) r_new = v_new / v_norm # Normalize v unless it is the zero vector. r_new = jax.lax.cond(v_norm > tol, lambda x: x[0] / x[1], lambda x: 0.*x[0], (v_new, v_norm) ) H = jax.ops.index_update(H, jax.ops.index[:, k], H_k) H = jax.ops.index_update(H, jax.ops.index[k+1, k], v_norm) V = jax.ops.index_update(V, jax.ops.index[:, k+1], r_new) return V, H #################################################################### # GIVENS ROTATIONS #################################################################### @jax.jit def apply_rotations(H_col: jax.ShapedArray, givens: jax.ShapedArray, k: int) -> jax.ShapedArray: """ Successively applies each of the rotations stored in givens to H_col. Args: H_col : The vector to be rotated. givens: 2 x K, K > k matrix of rotation factors. k : Iteration number. Returns: H_col : The rotated vector. """ rotation_carry = (H_col, 0, k, givens) def loop_condition(carry): i = carry[1] k = carry[2] return jax.lax.cond(i < k, lambda x: True, lambda x: False, 0) def apply_ith_rotation(carry): H_col, i, k, givens = carry cs = givens[0, i] sn = givens[1, i] H_i = cs * H_col[i] - sn * H_col[i + 1] H_ip1 = sn * H_col[i] + cs * H_col[i + 1] H_col = jax.ops.index_update(H_col, jax.ops.index[i], H_i) H_col = jax.ops.index_update(H_col, jax.ops.index[i + 1], H_ip1) return (H_col, i + 1, k, givens) rotation_carry = jax.lax.while_loop(loop_condition, apply_ith_rotation, rotation_carry) H_col = rotation_carry[0] return H_col @jax.jit def apply_givens_rotation(H_col: jax.ShapedArray, givens: jax.ShapedArray, k: int) -> Tuple[jax.ShapedArray, jax.ShapedArray]: """ Applies the Givens rotations stored in the vectors cs and sn to the vector H_col. Then constructs a new Givens rotation that eliminates H_col's k'th element, yielding the corresponding column of the R in H's QR decomposition. Returns the new column of R along with the new Givens factors. Args: H_col : The column of H to be rotated. givens: A matrix representing the cosine and sine factors of the previous GMRES Givens rotations, in that order (i.e. givens[0, :] -> the cos factor). k : Iteration number. Returns: R_col : The column of R obtained by transforming H_col. givens_k: The new elements of givens that zeroed out the k+1'th element of H_col. """ # This call successively applies each of the # Givens rotations stored in givens[:, :k] to H_col. H_col = apply_rotations(H_col, givens, k) cs_k, sn_k = givens_rotation(H_col[k], H_col[k + 1]) givens = jax.ops.index_update(givens, jax.ops.index[0, k], cs_k) givens = jax.ops.index_update(givens, jax.ops.index[1, k], sn_k) r_k = cs_k * H_col[k] - sn_k * H_col[k + 1] R_col = jax.ops.index_update(H_col, jax.ops.index[k], r_k) R_col = jax.ops.index_update(R_col, jax.ops.index[k + 1], 0.) return R_col, givens @jax.jit def givens_rotation(v1: float, v2: float) -> Tuple[float, float]: """ Given scalars v1 and v2, computes cs = cos(theta) and sn = sin(theta) so that [cs -sn] @ [v1] = [r] [sn cs] [v2] [0] Args: v1, v2: The scalars. Returns: cs, sn: The rotation factors. """ t = jnp.sqrt(v1**2 + v2**2) cs = v1 / t sn = -v2 / t return cs, sn fnames = [ "gmres_m", "gmres_residual", "gmres_krylov", "gs_step", "kth_arnoldi_step", "givens_rotation" ] functions = [ gmres_m, gmres_residual, gmres_krylov, _gs_step, kth_arnoldi_step, givens_rotation ] class Functions: def __init__(self, fun_dict): self.dict = fun_dict def __getattr__(self, name): return self.dict[name] return Functions(dict(zip(fnames, functions)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_scipy_special__cephes(finder, module):\n module.AddGlobalName(\"gammaln\")", "def _refresh_cache():\n global _num_types, _num_funcs\n\n num_types = interrogate_number_of_global_types()\n num_funcs = interrogate_number_of_functions()\n\n if num_types != _num_types:\n for i in range(...
[ "0.54267865", "0.5393932", "0.53602004", "0.51983255", "0.5163334", "0.51566917", "0.51326257", "0.5130478", "0.5116886", "0.51109326", "0.51055855", "0.509391", "0.50782514", "0.50449574", "0.501213", "0.5006291", "0.49983588", "0.4979815", "0.49753696", "0.4935354", "0.4883...
0.48797792
21
Solve A x = b for x using the mrestarted GMRES method. This is intended to be called via jax_backend.gmres. Given a linear mapping with (n x n) matrix representation A = A_mv(A_args) gmres_m solves Ax = b (1) where x and b are lengthn vectors, using the method of Generalized Minimum RESiduals with M iterations per restart (GMRES_M).
def gmres_m(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray, x0: jax.ShapedArray, tol: float, atol: float, num_krylov_vectors: int, maxiter: int) -> Tuple[jax.ShapedArray, float, int, bool]: num_krylov_vectors = min(num_krylov_vectors, b.size) x = x0 b_norm = jnp.linalg.norm(b) tol = max(tol * b_norm, atol) for n_iter in range(maxiter): done, beta, x = gmres(A_mv, A_args, b, x, num_krylov_vectors, x0, tol, b_norm) if done: break return x, beta, n_iter, done
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gmres_wrapper(jax: types.ModuleType):\n jnp = jax.numpy\n\n def gmres_m(A_mv: Callable, A_args: Sequence,\n b: jax.ShapedArray, x0: jax.ShapedArray, tol: float,\n atol: float, num_krylov_vectors: int,\n maxiter: int) -> Tuple[jax.ShapedArray, float, int, bool]:\n \"\...
[ "0.8164", "0.72392", "0.7206821", "0.68150455", "0.678389", "0.6487322", "0.63835514", "0.624065", "0.598083", "0.5946647", "0.5763055", "0.56070817", "0.55459785", "0.55261856", "0.55072945", "0.54999584", "0.54536366", "0.53874725", "0.5362347", "0.5356209", "0.5320891", ...
0.7316262
1
A single restart of GMRES.
def gmres(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray, x: jax.ShapedArray, num_krylov_vectors: int, x0: jax.ShapedArray, tol: float, b_norm: float) -> Tuple[bool, float, jax.ShapedArray]: r, beta = gmres_residual(A_mv, A_args, b, x) k, V, R, beta_vec = gmres_krylov(A_mv, A_args, num_krylov_vectors, x0, r, beta, tol, b_norm) x = gmres_update(k, V, R, beta_vec, x0) done = k < num_krylov_vectors - 1 return done, beta, x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def restart(self):\n\t\treturn self.reset().start()", "def restart(self) -> None:", "def restart(self):", "def restart():\n stop()\n start()", "def restart(self):\n pass", "def restart(self):\r\n pass", "def _restart(self):\n pass", "def repl_restart(restart: bool = True) -...
[ "0.74949425", "0.74385256", "0.73926336", "0.73311555", "0.7275", "0.71946764", "0.7092151", "0.69505244", "0.6848349", "0.6821594", "0.6792471", "0.6755524", "0.67521834", "0.6737451", "0.6737451", "0.6691218", "0.66691226", "0.6651869", "0.66393805", "0.66291237", "0.661580...
0.0
-1
Computes the residual vector r and its norm, beta, which is minimized by GMRES.
def gmres_residual(A_mv: Callable, A_args: Sequence, b: jax.ShapedArray, x: jax.ShapedArray) -> Tuple[jax.ShapedArray, float]: r = b - A_mv(x, *A_args) beta = jnp.linalg.norm(r) return r, beta
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def residual(self, y,r):\n u,v,tt = self.split(y)\n fiu,fiv,fitt = self.problem.internal_forces(u,v,tt)\n R = np.concatenate((fiu,fiv,fitt))\n R = self.residualApplyBCs(R,y,r)\n return R", "def get_residual(self, beta: ndarray) -> ndarray:\n return self.data.weight*(self...
[ "0.69030577", "0.68404454", "0.6596661", "0.6463848", "0.6344658", "0.63295376", "0.6283406", "0.6267384", "0.6263794", "0.62514615", "0.6221709", "0.6146919", "0.61445266", "0.6131107", "0.6113608", "0.61097836", "0.6103144", "0.6083334", "0.6065997", "0.6033683", "0.6020700...
0.68639606
1
Updates the solution in response to the information computed by the main GMRES loop.
def gmres_update(k: int, V: jax.ShapedArray, R: jax.ShapedArray, beta_vec: jax.ShapedArray, x0: jax.ShapedArray) -> jax.ShapedArray: q = min(k, R.shape[1]) y = jax.scipy.linalg.solve_triangular(R[:q, :q], beta_vec[:q]) x = x0 + V[:, :q] @ y return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self):\n\n SolidSolver.update(self)\n\n self.__nextStep()", "def exchange_solution(self):\n for ss in self.solvers:\n ss.register_solution()\n\n if self.has_amr:\n self.tioga.data_update_amr()\n else:\n raise NotImplementedError(\"Inv...
[ "0.71704924", "0.6632163", "0.66200024", "0.6557541", "0.6423378", "0.63818824", "0.6320821", "0.62160337", "0.6194195", "0.61916894", "0.6184975", "0.6170712", "0.61512583", "0.6085049", "0.6084356", "0.60457534", "0.6041477", "0.6041477", "0.6041477", "0.60402924", "0.60007...
0.0
-1
Builds the Arnoldi decomposition of (A, v), where v is the normalized residual of the current solution estimate. The decomposition is returned as V, R, where V is the usual matrix of Krylov vectors and R is the upper triangular matrix in H = QR, with H the usual matrix of overlaps.
def gmres_krylov(A_mv: Callable, A_args: Sequence, n_kry: int, x0: jax.ShapedArray, r: jax.ShapedArray, beta: float, tol: float, b_norm: float) -> Tuple[int, jax.ShapedArray, jax.ShapedArray, jax.ShapedArray]: n = r.size err = beta v = r / beta # These will store the Givens rotations used to update the QR decompositions # of the Arnoldi matrices. # cos : givens[0, :] # sine: givens[1, :] givens = jnp.zeros((2, n_kry), dtype=x0.dtype) beta_vec = jnp.zeros((n_kry + 1), dtype=x0.dtype) beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[0], beta) V = jnp.zeros((n, n_kry + 1), dtype=x0.dtype) V = jax.ops.index_update(V, jax.ops.index[:, 0], v) R = jnp.zeros((n_kry + 1, n_kry), dtype=x0.dtype) # The variable data for the carry call. Each iteration modifies these # values and feeds the results to the next iteration. k = 0 gmres_variables = (k, V, R, beta_vec, err, # < The actual output we need. givens) # < Modified between iterations. gmres_constants = (tol, A_mv, A_args, b_norm, n_kry) gmres_carry = (gmres_variables, gmres_constants) # The 'x' input for the carry call. Each iteration will receive an ascending # loop index (from the jnp.arange) along with the constant data # in gmres_constants. gmres_carry = jax.lax.while_loop(gmres_krylov_loop_condition, gmres_krylov_work, gmres_carry) gmres_variables, gmres_constants = gmres_carry k, V, R, beta_vec, err, givens = gmres_variables return (k, V, R, beta_vec)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def obtain_Q(self):\n \n #create the initial triangular matrix as a copy of the m x n - matrix A\n \n v_list = Householder.vector(self)\n n_v = len(v_list) # number of vectors, not equal to number of columns in R\n q_m = len(v_list[0]) # longest vector, should...
[ "0.606377", "0.591735", "0.57787323", "0.57064146", "0.5663122", "0.563016", "0.5629043", "0.5616047", "0.5532304", "0.5528181", "0.55007565", "0.54783535", "0.5475328", "0.5461482", "0.5454432", "0.5375918", "0.5353961", "0.5350844", "0.53398156", "0.53277934", "0.5286591", ...
0.0
-1
This function dictates whether the main GMRES while loop will proceed.
def gmres_krylov_loop_condition(gmres_carry: GmresCarryType) -> bool: gmres_constants, gmres_variables = gmres_carry tol = gmres_constants[0] k = gmres_variables[0] err = gmres_variables[4] n_kry = gmres_constants[4] def is_iterating(k, n_kry): return k < n_kry def not_converged(args): err, tol = args return err >= tol return jax.lax.cond(is_iterating(k, n_kry), # Predicate. not_converged, # Called if True. lambda x: False, # Called if False. (err, tol)) # Arguments to calls.
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nanny(self): \n while not self.started and not self.failed:\n eventlet.sleep(.1)\n return not self.failed", "def infinite_loop():\n return True", "def runnable(self):\n if \"calculations\" not in self.ctx:\n return True # if no calculations have run\n ...
[ "0.6914823", "0.6818568", "0.66382414", "0.66197145", "0.64698076", "0.64450336", "0.64174724", "0.64160615", "0.6401387", "0.6371226", "0.6364635", "0.6352241", "0.6348979", "0.632865", "0.632865", "0.632865", "0.629387", "0.6276963", "0.62566316", "0.6251263", "0.6192301", ...
0.0
-1
Performs a single iteration of gmres_krylov. See that function for a more detailed description.
def gmres_krylov_work(gmres_carry: GmresCarryType) -> GmresCarryType: gmres_variables, gmres_constants = gmres_carry k, V, R, beta_vec, err, givens = gmres_variables tol, A_mv, A_args, b_norm, _ = gmres_constants V, H = kth_arnoldi_step(k, A_mv, A_args, V, R, tol) R_col, givens = apply_givens_rotation(H[:, k], givens, k) R = jax.ops.index_update(R, jax.ops.index[:, k], R_col[:]) # Update the residual vector. cs, sn = givens[:, k] * beta_vec[k] beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k], cs) beta_vec = jax.ops.index_update(beta_vec, jax.ops.index[k + 1], sn) err = jnp.abs(sn) / b_norm gmres_variables = (k + 1, V, R, beta_vec, err, givens) return (gmres_variables, gmres_constants)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gmres_krylov(A_mv: Callable, A_args: Sequence, n_kry: int,\n x0: jax.ShapedArray, r: jax.ShapedArray, beta: float,\n tol: float,\n b_norm: float) -> Tuple[int, jax.ShapedArray,\n jax.ShapedArray, jax.ShapedArray]:\n...
[ "0.62931263", "0.601181", "0.5886399", "0.57915413", "0.56589055", "0.56554955", "0.55133504", "0.53384787", "0.53033984", "0.52407485", "0.5225457", "0.5210151", "0.5209773", "0.52093935", "0.518312", "0.51578075", "0.51371425", "0.5087555", "0.50848186", "0.50783587", "0.50...
0.6446188
0
Performs one iteration of the stabilized GramSchmidt procedure, with r to be orthonormalized against {v} = {v_0, v_1, ...}.
def _gs_step(r: jax.ShapedArray, v_i: jax.ShapedArray) -> Tuple[jax.ShapedArray, jax.ShapedArray]: h_i = jnp.vdot(v_i, r) r_i = r - h_i * v_i return r_i, h_i
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stabilizer_vector(v, g, n):\n vg = v.copy()\n w = v.copy()\n for i in range(1, n):\n vg *= g \n w += vg\n assert v == vg * g\n if (w['B'] == 0).all():\n return None\n return w", "def rothesstri(A, b):\n n = shape(A)[0]\n A = hstack([A, b])\n for k in range(n-1)...
[ "0.5948944", "0.5672769", "0.55784595", "0.5571189", "0.544097", "0.5343572", "0.5310347", "0.5287816", "0.5228561", "0.5202186", "0.5167944", "0.5159567", "0.5158173", "0.5155729", "0.51552105", "0.51497984", "0.5149065", "0.51292133", "0.5123503", "0.5118129", "0.510955", ...
0.50629514
26
Performs the kth iteration of the Arnoldi reduction procedure.
def kth_arnoldi_step(k: int, A_mv: Callable, A_args: Sequence, V: jax.ShapedArray, H: jax.ShapedArray, tol: float) -> Tuple[jax.ShapedArray, jax.ShapedArray]: v = A_mv(V[:, k], *A_args) v_new, H_k = jax.lax.scan(_gs_step, v, xs=V.T) v_norm = jnp.linalg.norm(v_new) r_new = v_new / v_norm # Normalize v unless it is the zero vector. r_new = jax.lax.cond(v_norm > tol, lambda x: x[0] / x[1], lambda x: 0.*x[0], (v_new, v_norm) ) H = jax.ops.index_update(H, jax.ops.index[:, k], H_k) H = jax.ops.index_update(H, jax.ops.index[k+1, k], v_norm) V = jax.ops.index_update(V, jax.ops.index[:, k+1], r_new) return V, H
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fn(k):\n if k <= 1: return 1\n return fn(k-1) + fn(k-2)", "def fn(n, k):\n if n == 1: return k # base case \n return sum(fn(n-1, kk) for kk in range(1, k+1))", "def count_k(n, k):\n if n == 0:\n return 1\n elif n < 0:\n return 0\n else:\n ...
[ "0.6492725", "0.6413587", "0.63328785", "0.62609035", "0.61912435", "0.6108325", "0.61041903", "0.60753584", "0.60625505", "0.6060784", "0.60554206", "0.60479176", "0.6025259", "0.60064083", "0.60026884", "0.6001473", "0.59914064", "0.59914064", "0.5979783", "0.5960043", "0.5...
0.0
-1
Successively applies each of the rotations stored in givens to H_col.
def apply_rotations(H_col: jax.ShapedArray, givens: jax.ShapedArray, k: int) -> jax.ShapedArray: rotation_carry = (H_col, 0, k, givens) def loop_condition(carry): i = carry[1] k = carry[2] return jax.lax.cond(i < k, lambda x: True, lambda x: False, 0) def apply_ith_rotation(carry): H_col, i, k, givens = carry cs = givens[0, i] sn = givens[1, i] H_i = cs * H_col[i] - sn * H_col[i + 1] H_ip1 = sn * H_col[i] + cs * H_col[i + 1] H_col = jax.ops.index_update(H_col, jax.ops.index[i], H_i) H_col = jax.ops.index_update(H_col, jax.ops.index[i + 1], H_ip1) return (H_col, i + 1, k, givens) rotation_carry = jax.lax.while_loop(loop_condition, apply_ith_rotation, rotation_carry) H_col = rotation_carry[0] return H_col
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def apply_givens_rotation(H_col: jax.ShapedArray, givens: jax.ShapedArray,\n k: int) -> Tuple[jax.ShapedArray, jax.ShapedArray]:\n # This call successively applies each of the\n # Givens rotations stored in givens[:, :k] to H_col.\n H_col = apply_rotations(H_col, givens, k)\n\n ...
[ "0.73926467", "0.5681619", "0.549413", "0.5357701", "0.53283906", "0.5293677", "0.520022", "0.519057", "0.5088467", "0.50847656", "0.50769705", "0.50462455", "0.50274307", "0.49993315", "0.49852484", "0.4983559", "0.4973589", "0.49634326", "0.49592367", "0.49521154", "0.49075...
0.7330481
1
Applies the Givens rotations stored in the vectors cs and sn to the vector H_col. Then constructs a new Givens rotation that eliminates H_col's k'th element, yielding the corresponding column of the R in H's QR decomposition. Returns the new column of R along with the new Givens factors.
def apply_givens_rotation(H_col: jax.ShapedArray, givens: jax.ShapedArray, k: int) -> Tuple[jax.ShapedArray, jax.ShapedArray]: # This call successively applies each of the # Givens rotations stored in givens[:, :k] to H_col. H_col = apply_rotations(H_col, givens, k) cs_k, sn_k = givens_rotation(H_col[k], H_col[k + 1]) givens = jax.ops.index_update(givens, jax.ops.index[0, k], cs_k) givens = jax.ops.index_update(givens, jax.ops.index[1, k], sn_k) r_k = cs_k * H_col[k] - sn_k * H_col[k + 1] R_col = jax.ops.index_update(H_col, jax.ops.index[k], r_k) R_col = jax.ops.index_update(R_col, jax.ops.index[k + 1], 0.) return R_col, givens
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def apply_rotations(H_col: jax.ShapedArray, givens: jax.ShapedArray,\n k: int) -> jax.ShapedArray:\n rotation_carry = (H_col, 0, k, givens)\n\n def loop_condition(carry):\n i = carry[1]\n k = carry[2]\n return jax.lax.cond(i < k, lambda x: True, lambda x: False, 0)\n\n ...
[ "0.6138148", "0.5132266", "0.5044118", "0.50204897", "0.49985307", "0.49635363", "0.4903446", "0.48277012", "0.48053998", "0.47816756", "0.4768826", "0.4737764", "0.4733043", "0.4726679", "0.47147375", "0.4713533", "0.47095165", "0.4691508", "0.46743643", "0.46470118", "0.464...
0.6756806
0
Given scalars v1 and v2, computes cs = cos(theta) and sn = sin(theta) so that [cs sn] @ [v1] = [r] [sn cs] [v2] [0]
def givens_rotation(v1: float, v2: float) -> Tuple[float, float]: t = jnp.sqrt(v1**2 + v2**2) cs = v1 / t sn = -v2 / t return cs, sn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cos_sim(v1, v2):\r\n return np.inner(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))", "def get_angle(v1, v2):\n return np.arccos(np.dot(v1, v2))", "def cos_sim(v1: Union[np.ndarray, np.iterable, int, float], v2: Union[np.ndarray, np.iterable, int, float]) -> float:\n return np.dot(v1, v2) / (...
[ "0.72262084", "0.7002232", "0.68205464", "0.68178624", "0.67799133", "0.67180985", "0.66046625", "0.65430385", "0.65345", "0.6518306", "0.6469074", "0.6411598", "0.6373019", "0.6347909", "0.6329347", "0.62953633", "0.6269493", "0.6250731", "0.6243701", "0.6227481", "0.6226452...
0.6803314
4
Check if quote already exists in Nostalgiabot's memory for this Person.
def has_said(self, quote: str) -> bool: return any(q for q in self.quotes if q.content.lower() == quote.lower())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_term_exist(self, term):\n return term in self.postingDict", "def check_existed_did(self):\n for wallet in self.wallet_state_manager.wallets.values():\n if (\n wallet.type() == WalletType.DECENTRALIZED_ID\n and self.did_info.origin_coin.name() == wall...
[ "0.5549986", "0.5529568", "0.55025846", "0.54980576", "0.54824877", "0.54619014", "0.5449591", "0.52898127", "0.52789867", "0.5272745", "0.5230741", "0.52132195", "0.5202484", "0.5193138", "0.5162743", "0.5162743", "0.5120887", "0.51052827", "0.5090298", "0.50702953", "0.5042...
0.6199974
0
Create a state network.
def __init__( self, name_or_scope, output_dim, env_spec=None, observation_dim=None, observation_input=None, **kwargs): self.setup_serialization(locals()) super(StateNetwork, self).__init__(name_or_scope, **kwargs) self.output_dim = output_dim assert env_spec or observation_dim self.observation_dim = (observation_dim or env_spec.observation_space.flat_dim) with tf.variable_scope(self.scope_name): if observation_input is None: if not isinstance(self.observation_dim, collections.Iterable): observation_input = tf.placeholder( tf.float32, [None, self.observation_dim], "_observation") else: observation_input = tf.placeholder( tf.float32, [None] + list(self.observation_dim), "_observation") self.observation_input = observation_input self._create_network(observation_input=observation_input)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_network(layers):\r\n return NeuronNetwork(layers)", "def build_network(self):\n # Position the node centers\n self.set_node_centers()\n\n # Set the nodes\n self.nodes = []\n for i in range(self.n_states):\n node = Node(\n self.node_center...
[ "0.6997173", "0.6979348", "0.6762708", "0.67333984", "0.6585905", "0.65094924", "0.64667505", "0.6448271", "0.6415188", "0.635982", "0.6349135", "0.63338166", "0.63314515", "0.62870693", "0.62643075", "0.6260604", "0.62521416", "0.6246148", "0.6229527", "0.6212824", "0.621168...
0.0
-1
Builds a Visit object
def __init__(self, id_visit, id_stay_point, pivot_arrival_fix: GpsFix, pivot_departure_fix: GpsFix, detection_arrival_fix: GpsFix, detection_departure_fix: GpsFix): self.id_visit = id_visit self.id_stay_point = id_stay_point self.pivot_arrival_fix = pivot_arrival_fix self.pivot_departure_fix = pivot_departure_fix self.detection_arrival_fix = detection_arrival_fix self.detection_departure_fix = detection_departure_fix self.stay_time = None self.update_stay_time()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_visit_plan(self):\n self.visit_plan = VisitPlanner(self.detector, self.NSAMP,\n self.SAMPSEQ, self.SUBARRAY,\n self.num_orbits,\n # TEMP! to make observations sparser\n ...
[ "0.650446", "0.6304008", "0.5627681", "0.5627681", "0.53474486", "0.53437334", "0.5272759", "0.52613014", "0.52547306", "0.52441776", "0.52312696", "0.51499623", "0.51391286", "0.50897574", "0.50847214", "0.5052277", "0.49540627", "0.49472973", "0.48501718", "0.4815049", "0.4...
0.0
-1
Updates the stay time of visit
def update_stay_time(self): # It would not be better to simply self.stay_time = self.get_length() ?? self.stay_time = self.get_length()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_time(self):\n pass # Do nothing", "def update(self):\n super().update()\n self.checkTimeToLive()", "def update(self):\n if not self.exists:\n return\n if AT.TIME_TO_EXPIRE in self.attributes:\n if not self.calculate_time_left():\n ...
[ "0.6485091", "0.63547605", "0.6233127", "0.5951948", "0.59343076", "0.58443356", "0.58252096", "0.57942843", "0.5782025", "0.5782025", "0.5764802", "0.5756116", "0.57427424", "0.5739974", "0.57184714", "0.5699622", "0.56783235", "0.56636465", "0.56427634", "0.56412554", "0.56...
0.75728226
0
Gets the length of visit in seconds
def get_length(self) -> int: return (self.pivot_departure_fix.timestamp - self.pivot_arrival_fix.timestamp).total_seconds()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def secondsPassed(self)->int:\n return self._lic.params['sessionTimeUsed'].value", "def duration(self):\n\t\tif self.status():\n\t\t\t# Currently on, return time since session was started\n\t\t\treturn self.length()\n\t\telse:\n\t\t\t# Otherwise return time until last bit of work\n\t\t\t# Check that this ...
[ "0.73857874", "0.7209997", "0.71742225", "0.71742225", "0.7137135", "0.7124937", "0.7086354", "0.7061175", "0.7056725", "0.70382965", "0.7029751", "0.69703007", "0.6957682", "0.69457513", "0.6928", "0.6925967", "0.6889457", "0.68751544", "0.684056", "0.6830896", "0.68183225",...
0.6827581
20
Repeats a message multiple times.
async def repeat(self,ctx, times: int, content='repeating...'): for i in range(times): await ctx.send(content)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def repeat(ctx, times: int, content='repeating...'):\n for i in range(times):\n await ctx.send(content)", "async def repeat(times : int, content='repeating...'):\n for i in range(times):\n await bot.say(content)", "async def repeat(ctx, times : int, content='repeating...'):\n for i...
[ "0.75642896", "0.75241643", "0.74671906", "0.6782859", "0.6725025", "0.66473454", "0.6640616", "0.6501877", "0.6463185", "0.6406237", "0.62558305", "0.6149078", "0.61431366", "0.61045724", "0.60727173", "0.60414445", "0.6028678", "0.59844077", "0.59493715", "0.59287435", "0.5...
0.7628505
0
create user for testing
def create_user(username,password): return User.objects.create_user(username=username,password=password)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def create_user(self):\n User.objects.create_user('test', 'testing@test.com', 'testing')", "def create_test_user():\n return User.objects.create(username='test_us...
[ "0.87459725", "0.87459725", "0.87459725", "0.8526213", "0.8520784", "0.82702565", "0.8198614", "0.8178401", "0.81581074", "0.81339604", "0.8086235", "0.80827844", "0.80777", "0.8074274", "0.80639285", "0.80639285", "0.80639285", "0.8062536", "0.8042246", "0.8039984", "0.80323...
0.75613874
85
create data that use Question model
def create_question(user,title='title',text='text'): return Question.objects.create(created_by=user, title=title, text=text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _setData(self):\n #offset = datetime.timedelta(prefs.getNoOfDaysBeforeQuestionSchedule())\n date_formatter = date.getLocaleFormatter(self.request, \"date\", \"long\")\n def _q_data_item(q):\n item = {}\n item[\"qid\"]= \"q_%s\" % q.question_id\n if q.questi...
[ "0.70364606", "0.68326944", "0.6766363", "0.65983987", "0.6595654", "0.65301675", "0.6528276", "0.6509688", "0.6469362", "0.63934886", "0.63322735", "0.6294712", "0.6284928", "0.6249892", "0.62365043", "0.62052953", "0.6195677", "0.61907166", "0.6157784", "0.61300015", "0.612...
0.63760406
10
create data that use Choice model
def create_choices(question_model, text="text", total_votes = 0): return Choice.objects.create(question=question_model, text=text, total_votes=total_votes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, choice):\r\n self.choice = choice", "def __init__(self, *args, **kwargs):\n super(ChoiceFieldType, self).__init__(*args, **kwargs)\n\n self.choices = self.get_field_info_key('choices')", "def get_choicesdata(self):\n # selected_value = self.get_cleaned_value()\n ...
[ "0.6384629", "0.637918", "0.6361359", "0.6212173", "0.6101701", "0.60345143", "0.5951211", "0.5886236", "0.5857764", "0.5848246", "0.58469194", "0.58059937", "0.5702655", "0.56995493", "0.56930524", "0.5691047", "0.568869", "0.5666549", "0.56657976", "0.5663346", "0.5648866",...
0.6831114
0
create data that use Answer model
def create_answer(question, user): return Answer.objects.create(question=question,answered_by=user)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_features(self, answer):\n # Get the teacher's stuff\n a_stopwords = sf.remove_stopwords(self.teacher_answer)\n a_stemmed = sf.stem_sentence(a_stopwords)\n a_stemmed_ordered = sf.order_sentence(a_stemmed)\n teacher_answers = [\n a_stemmed,\n a_stem...
[ "0.64814955", "0.63967526", "0.63827956", "0.6260587", "0.6246229", "0.62410545", "0.6156967", "0.61112285", "0.61091954", "0.61057824", "0.6091933", "0.6012156", "0.6009843", "0.59592485", "0.59487975", "0.5926686", "0.59090936", "0.59057355", "0.5905192", "0.58983946", "0.5...
0.6539529
0
same as create_user but using user manager
def create_user_using_manager(username,password): manager = UserManager() return manager.create_user(username=username, password=password)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_user(self):\n User.objects.create_user('test', 'testing@test.com', 'testing')", "def create_user(self):\n return User.objects.create_user(**self.user_data)", "def create_user(email, password, f_name, l_name):\n pass", "def _create(cls, model_class, *args, **kwargs):\n manag...
[ "0.8181188", "0.7908192", "0.78201514", "0.77046186", "0.7671698", "0.7660212", "0.7622274", "0.7616442", "0.759572", "0.7572086", "0.7542684", "0.75390494", "0.7530234", "0.7527077", "0.75035036", "0.7481546", "0.74714476", "0.74663395", "0.74663395", "0.74663395", "0.742654...
0.8066032
1
create random string using string.printable
def create_random_string(total_character): feed=string.printable words="" i=0 while i < total_character: words += feed[random.randrange(0,len(feed)-1)] i+=1 return words
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rand_string():\n out = ''\n for _ in range(24):\n out += choice(ascii_letters)\n return out", "def generate_random_string():\n return \"\".join(random.choice(string.ascii_lowercase + string.digits) for _ in range(16)) # nosec", "def generateRandomString():\n return ''.join(b64encode(...
[ "0.76754934", "0.74476475", "0.7420788", "0.7411069", "0.7401505", "0.72970307", "0.72862035", "0.7236748", "0.7232668", "0.71930367", "0.717935", "0.71791947", "0.71789944", "0.71474946", "0.7141961", "0.7137273", "0.712956", "0.71288073", "0.712568", "0.71228766", "0.710253...
0.74168414
3
give random value between 0 ~ max
def seed_random(max_integer): return random.randrange(0,max_integer);
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_number(maxValue):\r\n return random.randint(1, maxValue)", "def random_int(max=1000):\r\n return randint(0, max)", "def randInt(max):\n return int(max * random.random())", "def mt_rand(min = 0, max = sys.maxint):\n return random.randint(min, max)", "def get_random_value():\n re...
[ "0.78364855", "0.7738007", "0.7737629", "0.7560306", "0.7451346", "0.7409387", "0.7381858", "0.73814666", "0.7321527", "0.7308225", "0.7287057", "0.72776306", "0.7269391", "0.7227846", "0.7195339", "0.7195339", "0.714679", "0.7145737", "0.71094", "0.70787364", "0.7073735", ...
0.7360128
8
populate question object with random string and user
def populate_poll(user="",total=10): user_list = None #create random user only when user argument empty if user == "": create_random_user(20) user_list = User.objects.all() for i in range(total): Question.objects.create( created_by=random.choice(user_list) if user_list is not None else user, title=create_random_string(seed_random(10)), text=create_random_string(seed_random(300)), slug=create_random_string(seed_random(100)) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, question):\n self.question = question\n self.responses = []", "def __init__(self, question, answer):\n\n self.question = question\n self.answer = answer\n\n self.q_and_a = {\n 'Question:': self.question,\n 'Correct Answer:': self.answer,...
[ "0.6517821", "0.6408259", "0.6362986", "0.6347021", "0.6313082", "0.61639714", "0.6121847", "0.6114391", "0.6093224", "0.6044101", "0.6040264", "0.5993357", "0.59512204", "0.5943956", "0.5889812", "0.58555055", "0.5852014", "0.58435476", "0.58026284", "0.57996535", "0.5797478...
0.69050956
0
create CreatePollQuestion dummy form
def create_dummy_form(title,text,fill_choice=[],choice_length=[]): # fill it with blank for dummy choices count=0 choices=[] while count < 8: choices.append(None) count+=1 # fill choices based on value on fill_choice for i in fill_choice: try : length = choice_length[i] except IndexError : length = 10 choices[i] = create_random_string(length) dummy_form=CreatePollQuestion( {"question_title":title, "question_text" :text, "choice_1":choices[0], "choice_2":choices[1], "choice_3":choices[2], "choice_4":choices[3], "choice_5":choices[4], "choice_6":choices[5], "choice_7":choices[6], "choice_8":choices[7], }) return dummy_form
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_freeform(cls, name, question, default_response, contacts, user): \n poll = Poll.objects.create(\n name=name,\n question=question,\n default_response=default_response, \n user=user,\n type=Poll.TYPE_TE...
[ "0.68965745", "0.664663", "0.66033286", "0.6529158", "0.64919966", "0.64177126", "0.6361779", "0.6354872", "0.6351356", "0.6324996", "0.62474936", "0.62169385", "0.6213089", "0.62068975", "0.6191489", "0.6160336", "0.61465067", "0.6138712", "0.6129136", "0.61282086", "0.61165...
0.7856849
0