_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q267700
ABweb._login
test
def _login(self, username, password): '''login and update cached cookies''' self.logger.debug('login ...') res = self.session.http.get(self.login_url) input_list = self._input_re.findall(res.text) if not input_list: raise PluginError('Missing input data on login website.') data = {} for _input_data in input_list: try: _input_name = self._name_re.search(_input_data).group(1) except AttributeError: continue try: _input_value = self._value_re.search(_input_data).group(1) except AttributeError: _input_value = '' data[_input_name] = _input_value login_data = { 'ctl00$Login1$UserName': username, 'ctl00$Login1$Password': password, 'ctl00$Login1$LoginButton.x': '0', 'ctl00$Login1$LoginButton.y': '0' } data.update(login_data) res = self.session.http.post(self.login_url, data=data) for cookie in self.session.http.cookies: self._session_attributes.set(cookie.name, cookie.value, expires=3600 * 24) if self._session_attributes.get('ASP.NET_SessionId') and self._session_attributes.get('.abportail1'): self.logger.debug('New session data') self.set_expires_time_cache() return True else: self.logger.error('Failed to login, check your username/password') return False
python
{ "resource": "" }
q267701
StreamMapper.map
test
def map(self, key, func, *args, **kwargs): """Creates a key-function mapping. The return value from the function should be either - A tuple containing a name and stream - A iterator of tuples containing a name and stream Any extra arguments will be passed to the function. """ self._map.append((key, partial(func, *args, **kwargs)))
python
{ "resource": "" }
q267702
CrunchyrollAPI._api_call
test
def _api_call(self, entrypoint, params=None, schema=None): """Makes a call against the api. :param entrypoint: API method to call. :param params: parameters to include in the request data. :param schema: schema to use to validate the data """ url = self._api_url.format(entrypoint) # Default params params = params or {} if self.session_id: params.update({ "session_id": self.session_id }) else: params.update({ "device_id": self.device_id, "device_type": self._access_type, "access_token": self._access_token, "version": self._version_code }) params.update({ "locale": self.locale.replace('_', ''), }) if self.session_id: params["session_id"] = self.session_id # The certificate used by Crunchyroll cannot be verified in some environments. res = self.session.http.post(url, data=params, headers=self.headers, verify=False) json_res = self.session.http.json(res, schema=_api_schema) if json_res["error"]: err_msg = json_res.get("message", "Unknown error") err_code = json_res.get("code", "unknown_error") raise CrunchyrollAPIError(err_msg, err_code) data = json_res.get("data") if schema: data = schema.validate(data, name="API response") return data
python
{ "resource": "" }
q267703
CrunchyrollAPI.start_session
test
def start_session(self): """ Starts a session against Crunchyroll's server. Is recommended that you call this method before making any other calls to make sure you have a valid session against the server. """ params = {} if self.auth: params["auth"] = self.auth self.session_id = self._api_call("start_session", params, schema=_session_schema) log.debug("Session created with ID: {0}".format(self.session_id)) return self.session_id
python
{ "resource": "" }
q267704
CrunchyrollAPI.get_info
test
def get_info(self, media_id, fields=None, schema=None): """ Returns the data for a certain media item. :param media_id: id that identifies the media item to be accessed. :param fields: list of the media"s field to be returned. By default the API returns some fields, but others are not returned unless they are explicity asked for. I have no real documentation on the fields, but they all seem to start with the "media." prefix (e.g. media.name, media.stream_data). :param schema: validation schema to use """ params = {"media_id": media_id} if fields: params["fields"] = ",".join(fields) return self._api_call("info", params, schema=schema)
python
{ "resource": "" }
q267705
Crunchyroll._create_api
test
def _create_api(self): """Creates a new CrunchyrollAPI object, initiates it's session and tries to authenticate it either by using saved credentials or the user's username and password. """ if self.options.get("purge_credentials"): self.cache.set("session_id", None, 0) self.cache.set("auth", None, 0) self.cache.set("session_id", None, 0) # use the crunchyroll locale as an override, for backwards compatibility locale = self.get_option("locale") or self.session.localization.language_code api = CrunchyrollAPI(self.cache, self.session, session_id=self.get_option("session_id"), locale=locale) if not self.get_option("session_id"): self.logger.debug("Creating session with locale: {0}", locale) api.start_session() if api.auth: self.logger.debug("Using saved credentials") login = api.authenticate() self.logger.info("Successfully logged in as '{0}'", login["user"]["username"] or login["user"]["email"]) elif self.options.get("username"): try: self.logger.debug("Attempting to login using username and password") api.login(self.options.get("username"), self.options.get("password")) login = api.authenticate() self.logger.info("Logged in as '{0}'", login["user"]["username"] or login["user"]["email"]) except CrunchyrollAPIError as err: raise PluginError(u"Authentication error: {0}".format(err.msg)) else: self.logger.warning( "No authentication provided, you won't be able to access " "premium restricted content" ) return api
python
{ "resource": "" }
q267706
compress
test
def compress(string, mode=MODE_GENERIC, quality=11, lgwin=22, lgblock=0): """Compress a byte string. Args: string (bytes): The input data. mode (int, optional): The compression mode can be MODE_GENERIC (default), MODE_TEXT (for UTF-8 format text input) or MODE_FONT (for WOFF 2.0). quality (int, optional): Controls the compression-speed vs compression- density tradeoff. The higher the quality, the slower the compression. Range is 0 to 11. Defaults to 11. lgwin (int, optional): Base 2 logarithm of the sliding window size. Range is 10 to 24. Defaults to 22. lgblock (int, optional): Base 2 logarithm of the maximum input block size. Range is 16 to 24. If set to 0, the value will be set based on the quality. Defaults to 0. Returns: The compressed byte string. Raises: brotli.error: If arguments are invalid, or compressor fails. """ compressor = Compressor(mode=mode, quality=quality, lgwin=lgwin, lgblock=lgblock) return compressor.process(string) + compressor.finish()
python
{ "resource": "" }
q267707
outputCharFormatter
test
def outputCharFormatter(c): """Show character in readable format """ #TODO 2: allow hex only output if 32<c<127: return chr(c) elif c==10: return '\\n' elif c==13: return '\\r' elif c==32: return '" "' else: return '\\x{:02x}'.format(c)
python
{ "resource": "" }
q267708
outputFormatter
test
def outputFormatter(s): """Show string or char. """ result = '' def formatSubString(s): for c in s: if c==32: yield ' ' else: yield outputCharFormatter(c) if len(result)<200: return ''.join(formatSubString(s)) else: return ''.join(formatSubString(s[:100]))+'...'+ \ ''.join(formatSubString(s[-100:]))
python
{ "resource": "" }
q267709
BitStream.readBytes
test
def readBytes(self, n): """Read n bytes from the stream on a byte boundary. """ if self.pos&7: raise ValueError('readBytes: need byte boundary') result = self.data[self.pos>>3:(self.pos>>3)+n] self.pos += 8*n return result
python
{ "resource": "" }
q267710
Symbol.value
test
def value(self, extra=None): """The value used for processing. Can be a tuple. with optional extra bits """ if isinstance(self.code, WithExtra): if not 0<=extra<1<<self.extraBits(): raise ValueError("value: extra value doesn't fit in extraBits") return self.code.value(self.index, extra) if extra is not None: raise ValueError('value: no extra bits for this code') return self.code.value(self.index)
python
{ "resource": "" }
q267711
Symbol.explanation
test
def explanation(self, extra=None): """Long explanation of the value from the numeric value with optional extra bits Used by Layout.verboseRead when printing the value """ if isinstance(self.code, WithExtra): return self.code.callback(self, extra) return self.code.callback(self)
python
{ "resource": "" }
q267712
PrefixDecoder.setDecode
test
def setDecode(self, decodeTable): """Store decodeTable, and compute lengthTable, minLength, maxLength from encodings. """ self.decodeTable = decodeTable #set of symbols with unknown length todo = set(decodeTable) #bit size under investigation maskLength = 0 lengthTable = {} while todo: mask = (1<<maskLength)-1 #split the encodings that we didn't find yet using b bits splitSymbols = defaultdict(list) for s in todo: splitSymbols[s&mask].append(s) #unique encodings have a length of maskLength bits #set length, and remove from todo list for s,subset in splitSymbols.items(): if len(subset)==1: lengthTable[self.decodeTable[s]] = maskLength todo.remove(s) #now investigate with longer mask maskLength +=1 #save result self.lengthTable = lengthTable self.minLength = min(lengthTable.values()) self.maxLength = max(lengthTable.values()) self.switchToPrefix()
python
{ "resource": "" }
q267713
PrefixDecoder.setLength
test
def setLength(self, lengthTable): """Given the bit pattern lengths for symbols given in lengthTable, set decodeTable, minLength, maxLength """ self.lengthTable = lengthTable self.minLength = min(lengthTable.values()) self.maxLength = max(lengthTable.values()) #compute the backwards codes first; then reverse them #compute (backwards) first code for every separate lengths nextCodes = [] #build codes for each length, from right to left code = 0 for bits in range(self.maxLength+1): code <<= 1 nextCodes.append(code) code += sum(x==bits for x in lengthTable.values()) self.decodeTable = {} #count codes for each length, and store reversed in the table for symbol in sorted(lengthTable): bits = lengthTable[symbol] bitpattern = '{:0{}b}'.format(nextCodes[bits], bits) self.decodeTable[int(bitpattern[::-1], 2)] = symbol nextCodes[bits] += 1 self.switchToPrefix()
python
{ "resource": "" }
q267714
Code.showCode
test
def showCode(self, width=80): """Show all words of the code in a nice format. """ #make table of all symbols with binary strings symbolStrings = [ (self.bitPattern(s.index), self.mnemonic(s.index)) for s in self ] #determine column widths the way Lisp programmers do it leftColWidth, rightColWidth = map(max, map( map, repeat(len), zip(*symbolStrings) )) colwidth = leftColWidth+rightColWidth columns = 81//(colwidth+2) rows = -(-len(symbolStrings)//columns) def justify(bs): b,s = bs return b.rjust(leftColWidth)+':'+s.ljust(rightColWidth) for i in range(rows): print(' '.join(map(justify, symbolStrings[i::rows])).rstrip())
python
{ "resource": "" }
q267715
Code.readTuple
test
def readTuple(self, stream): """Read symbol from stream. Returns symbol, length. """ length, symbol = self.decodePeek(stream.peek(self.maxLength)) stream.pos += length return length, symbol
python
{ "resource": "" }
q267716
WithExtra.explanation
test
def explanation(self, index, extra=None): """Expanded version of Code.explanation supporting extra bits. If you don't supply extra, it is not mentioned. """ extraBits = 0 if extra is None else self.extraBits(index) if not hasattr(self, 'extraTable'): formatString = '{0}{3}' lo = hi = value = self.value(index, extra) elif extraBits==0: formatString = '{0}{2}: {3}' lo, hi = self.span(index) value = lo else: formatString = '{0}{1} {2}: {3}-{4}; {3}+{5}={6}' lo, hi = self.span(index) value = lo+extra return formatString.format( self.description and self.description+': ', 'x'*extraBits, self.bitPattern(index), lo, hi, extra, value, )
python
{ "resource": "" }
q267717
Enumerator.value
test
def value(self, index, extra): """Override if you don't define value0 and extraTable """ lower, upper = self.span(index) value = lower+(extra or 0) if value>upper: raise ValueError('value: extra out of range') return value
python
{ "resource": "" }
q267718
Enumerator.span
test
def span(self, index): """Give the range of possible values in a tuple Useful for mnemonic and explanation """ lower = self.value0+sum(1<<x for x in self.extraTable[:index]) upper = lower+(1<<self.extraTable[index]) return lower, upper-1
python
{ "resource": "" }
q267719
TreeAlphabet.value
test
def value(self, index, extra): """Give count and value.""" index = index if index==0: return 1, 0 if index<=self.RLEMAX: return (1<<index)+extra, 0 return 1, index-self.RLEMAX
python
{ "resource": "" }
q267720
InsertAndCopyAlphabet.mnemonic
test
def mnemonic(self, index): """Make a nice mnemonic """ i,c,d0 = self.splitSymbol(index) iLower, _ = i.code.span(i.index) iExtra = i.extraBits() cLower, _ = c.code.span(c.index) cExtra = c.extraBits() return 'I{}{}{}C{}{}{}{}'.format( iLower, '+' if iExtra else '', 'x'*iExtra if iExtra<6 else '[{}*x]'.format(iExtra), cLower, '+' if cExtra else '', 'x'*cExtra if cExtra<6 else '[{}*x]'.format(cExtra), '&D=0' if d0 else '')
python
{ "resource": "" }
q267721
DistanceAlphabet.mnemonic
test
def mnemonic(self, index, verbose=False): """Give mnemonic representation of meaning. verbose compresses strings of x's """ if index<16: return ['last', '2last', '3last', '4last', 'last-1', 'last+1', 'last-2', 'last+2', 'last-3', 'last+3', '2last-1', '2last+1', '2last-2', '2last+2', '2last-3', '2last+3' ][index] if index<16+self.NDIRECT: return str(index-16) #construct strings like "1xx01-15" index -= self.NDIRECT+16 hcode = index >> self.NPOSTFIX lcode = index & (1<<self.NPOSTFIX)-1 if self.NPOSTFIX: formatString = '1{0}{1}{2:0{3}b}{4:+d}' else: formatString = '1{0}{1}{4:+d}' return formatString.format( hcode&1, 'x'*(2+hcode>>1) if hcode<13 or verbose else '[{}*x]'.format(2+hcode>>1), lcode, self.NPOSTFIX, self.NDIRECT+1-(4<<self.NPOSTFIX))
python
{ "resource": "" }
q267722
WordList.compileActions
test
def compileActions(self): """Build the action table from the text above """ import re self.actionList = actions = [None]*121 #Action 73, which is too long, looks like this when expanded: actions[73] = "b' the '+w+b' of the '" #find out what the columns are actionLines = self.actionTable.splitlines() colonPositions = [m.start() for m in re.finditer(':',actionLines[1]) ]+[100] columns = [(colonPositions[i]-3,colonPositions[i+1]-3) for i in range(len(colonPositions)-1)] for line in self.actionTable.splitlines(keepends=False): for start,end in columns: action = line[start:end] #skip empty actions if not action or action.isspace(): continue #chop it up, and check if the colon is properly placed index, colon, action = action[:3], action[3], action[4:] assert colon==':' #remove filler spaces at right action = action.rstrip() #replace space symbols action = action.replace('_', ' ') wPos = action.index('w') #add quotes around left string when present #translation: any pattern from beginning, up to #(but not including) a + following by a w later on action = re.sub(r"^(.*)(?=\+[U(]*w)", r"b'\1'", action) #add quotes around right string when present #translation: anything with a w in it, followed by a + #and a pattern up to the end #(there is no variable lookbehind assertion, #so we have to copy the pattern) action = re.sub(r"(w[[:\-1\]).U]*)\+(.*)$", r"\1+b'\2'", action) #expand shortcut for uppercaseAll action = action.replace(".U", ".upper()") #store action actions[int(index)] = action
python
{ "resource": "" }
q267723
WordList.doAction
test
def doAction(self, w, action): """Perform the proper action """ #set environment for the UpperCaseFirst U = self.upperCase1 return eval(self.actionList[action], locals())
python
{ "resource": "" }
q267724
Layout.makeHexData
test
def makeHexData(self, pos): """Produce hex dump of all data containing the bits from pos to stream.pos """ firstAddress = pos+7>>3 lastAddress = self.stream.pos+7>>3 return ''.join(map('{:02x} '.format, self.stream.data[firstAddress:lastAddress]))
python
{ "resource": "" }
q267725
Layout.processStream
test
def processStream(self): """Process a brotli stream. """ print('addr hex{:{}s}binary context explanation'.format( '', self.width-10)) print('Stream header'.center(60, '-')) self.windowSize = self.verboseRead(WindowSizeAlphabet()) print('Metablock header'.center(60, '=')) self.ISLAST = False self.output = bytearray() while not self.ISLAST: self.ISLAST = self.verboseRead( BoolCode('LAST', description="Last block")) if self.ISLAST: if self.verboseRead( BoolCode('EMPTY', description="Empty block")): break if self.metablockLength(): continue if not self.ISLAST and self.uncompressed(): continue print('Block type descriptors'.center(60, '-')) self.numberOfBlockTypes = {} self.currentBlockCounts = {} self.blockTypeCodes = {} self.blockCountCodes = {} for blockType in (L,I,D): self.blockType(blockType) print('Distance code parameters'.center(60, '-')) self.NPOSTFIX, self.NDIRECT = self.verboseRead(DistanceParamAlphabet()) self.readLiteralContextModes() print('Context maps'.center(60, '-')) self.cmaps = {} #keep the number of each kind of prefix tree for the last loop numberOfTrees = {I: self.numberOfBlockTypes[I]} for blockType in (L,D): numberOfTrees[blockType] = self.contextMap(blockType) print('Prefix code lists'.center(60, '-')) self.prefixCodes = {} for blockType in (L,I,D): self.readPrefixArray(blockType, numberOfTrees[blockType]) self.metablock()
python
{ "resource": "" }
q267726
Layout.metablockLength
test
def metablockLength(self): """Read MNIBBLES and meta block length; if empty block, skip block and return true. """ self.MLEN = self.verboseRead(MetablockLengthAlphabet()) if self.MLEN: return False #empty block; skip and return False self.verboseRead(ReservedAlphabet()) MSKIP = self.verboseRead(SkipLengthAlphabet()) self.verboseRead(FillerAlphabet(streamPos=self.stream.pos)) self.stream.pos += 8*MSKIP print("Skipping to {:x}".format(self.stream.pos>>3)) return True
python
{ "resource": "" }
q267727
Layout.uncompressed
test
def uncompressed(self): """If true, handle uncompressed data """ ISUNCOMPRESSED = self.verboseRead( BoolCode('UNCMPR', description='Is uncompressed?')) if ISUNCOMPRESSED: self.verboseRead(FillerAlphabet(streamPos=self.stream.pos)) print('Uncompressed data:') self.output += self.stream.readBytes(self.MLEN) print(outputFormatter(self.output[-self.MLEN:])) return ISUNCOMPRESSED
python
{ "resource": "" }
q267728
Layout.blockType
test
def blockType(self, kind): """Read block type switch descriptor for given kind of blockType.""" NBLTYPES = self.verboseRead(TypeCountAlphabet( 'BT#'+kind[0].upper(), description='{} block types'.format(kind), )) self.numberOfBlockTypes[kind] = NBLTYPES if NBLTYPES>=2: self.blockTypeCodes[kind] = self.readPrefixCode( BlockTypeAlphabet('BT'+kind[0].upper(), NBLTYPES)) self.blockCountCodes[kind] = self.readPrefixCode( BlockCountAlphabet('BC'+kind[0].upper())) blockCount = self.verboseRead(self.blockCountCodes[kind]) else: blockCount = 1<<24 self.currentBlockCounts[kind] = blockCount
python
{ "resource": "" }
q267729
Layout.IMTF
test
def IMTF(v): """In place inverse move to front transform. """ #mtf is initialized virtually with range(infinity) mtf = [] for i, vi in enumerate(v): #get old value from mtf. If never seen, take virtual value try: value = mtf.pop(vi) except IndexError: value = vi #put value at front mtf.insert(0, value) #replace transformed value v[i] = value
python
{ "resource": "" }
q267730
Layout.readPrefixArray
test
def readPrefixArray(self, kind, numberOfTrees): """Read prefix code array""" prefixes = [] for i in range(numberOfTrees): if kind==L: alphabet = LiteralAlphabet(i) elif kind==I: alphabet = InsertAndCopyAlphabet(i) elif kind==D: alphabet = DistanceAlphabet( i, NPOSTFIX=self.NPOSTFIX, NDIRECT=self.NDIRECT) self.readPrefixCode(alphabet) prefixes.append(alphabet) self.prefixCodes[kind] = prefixes
python
{ "resource": "" }
q267731
monochrome
test
def monochrome(I, color, vmin=None, vmax=None): """Turns a intensity array to a monochrome 'image' by replacing each intensity by a scaled 'color' Values in I between vmin and vmax get scaled between 0 and 1, and values outside this range are clipped to this. Example >>> I = np.arange(16.).reshape(4,4) >>> color = (0, 0, 1) # red >>> rgb = vx.image.monochrome(I, color) # shape is (4,4,3) :param I: ndarray of any shape (2d for image) :param color: sequence of a (r, g and b) value :param vmin: normalization minimum for I, or np.nanmin(I) when None :param vmax: normalization maximum for I, or np.nanmax(I) when None :return: """ if vmin is None: vmin = np.nanmin(I) if vmax is None: vmax = np.nanmax(I) normalized = (I - vmin) / (vmax - vmin) return np.clip(normalized[..., np.newaxis], 0, 1) * np.array(color)
python
{ "resource": "" }
q267732
polychrome
test
def polychrome(I, colors, vmin=None, vmax=None, axis=-1): """Similar to monochrome, but now do it for multiple colors Example >>> I = np.arange(32.).reshape(4,4,2) >>> colors = [(0, 0, 1), (0, 1, 0)] # red and green >>> rgb = vx.image.polychrome(I, colors) # shape is (4,4,3) :param I: ndarray of any shape (3d will result in a 2d image) :param colors: sequence of [(r,g,b), ...] values :param vmin: normalization minimum for I, or np.nanmin(I) when None :param vmax: normalization maximum for I, or np.nanmax(I) when None :param axis: axis which to sum over, by default the last :return: """ axes_length = len(I.shape) allaxes = list(range(axes_length)) otheraxes = list(allaxes) otheraxes.remove((axis + axes_length) % axes_length) otheraxes = tuple(otheraxes) if vmin is None: vmin = np.nanmin(I, axis=otheraxes) if vmax is None: vmax = np.nanmax(I, axis=otheraxes) normalized = (I - vmin) / (vmax - vmin) return np.clip(normalized, 0, 1).dot(colors)
python
{ "resource": "" }
q267733
arrow_table_from_vaex_df
test
def arrow_table_from_vaex_df(ds, column_names=None, selection=None, strings=True, virtual=False): """Implementation of Dataset.to_arrow_table""" names = [] arrays = [] for name, array in ds.to_items(column_names=column_names, selection=selection, strings=strings, virtual=virtual): names.append(name) arrays.append(arrow_array_from_numpy_array(array)) return pyarrow.Table.from_arrays(arrays, names)
python
{ "resource": "" }
q267734
patch
test
def patch(f): '''Adds method f to the Dataset class''' name = f.__name__ Dataset.__hidden__[name] = f return f
python
{ "resource": "" }
q267735
add_virtual_columns_cartesian_velocities_to_pmvr
test
def add_virtual_columns_cartesian_velocities_to_pmvr(self, x="x", y="y", z="z", vx="vx", vy="vy", vz="vz", vr="vr", pm_long="pm_long", pm_lat="pm_lat", distance=None): """Concert velocities from a cartesian system to proper motions and radial velocities TODO: errors :param x: name of x column (input) :param y: y :param z: z :param vx: vx :param vy: vy :param vz: vz :param vr: name of the column for the radial velocity in the r direction (output) :param pm_long: name of the column for the proper motion component in the longitude direction (output) :param pm_lat: name of the column for the proper motion component in the latitude direction, positive points to the north pole (output) :param distance: Expression for distance, if not given defaults to sqrt(x**2+y**2+z**2), but if this column already exists, passing this expression may lead to a better performance :return: """ if distance is None: distance = "sqrt({x}**2+{y}**2+{z}**2)".format(**locals()) k = 4.74057 self.add_variable("k", k, overwrite=False) self.add_virtual_column(vr, "({x}*{vx}+{y}*{vy}+{z}*{vz})/{distance}".format(**locals())) self.add_virtual_column(pm_long, "-({vx}*{y}-{x}*{vy})/sqrt({x}**2+{y}**2)/{distance}/k".format(**locals())) self.add_virtual_column(pm_lat, "-({z}*({x}*{vx}+{y}*{vy}) - ({x}**2+{y}**2)*{vz})/( ({x}**2+{y}**2+{z}**2) * sqrt({x}**2+{y}**2) )/k".format(**locals()))
python
{ "resource": "" }
q267736
add_virtual_columns_proper_motion2vperpendicular
test
def add_virtual_columns_proper_motion2vperpendicular(self, distance="distance", pm_long="pm_l", pm_lat="pm_b", vl="vl", vb="vb", propagate_uncertainties=False, radians=False): """Convert proper motion to perpendicular velocities. :param distance: :param pm_long: :param pm_lat: :param vl: :param vb: :param cov_matrix_distance_pm_long_pm_lat: :param uncertainty_postfix: :param covariance_postfix: :param radians: :return: """ k = 4.74057 self.add_variable("k", k, overwrite=False) self.add_virtual_column(vl, "k*{pm_long}*{distance}".format(**locals())) self.add_virtual_column(vb, "k* {pm_lat}*{distance}".format(**locals())) if propagate_uncertainties: self.propagate_uncertainties([self[vl], self[vb]])
python
{ "resource": "" }
q267737
Expression._graphviz
test
def _graphviz(self, dot=None): """Return a graphviz.Digraph object with a graph of the expression""" from graphviz import Graph, Digraph node = self._graph() dot = dot or Digraph(comment=self.expression) def walk(node): if isinstance(node, six.string_types): dot.node(node, node) return node, node else: node_repr, fname, fobj, deps = node node_id = node_repr dot.node(node_id, node_repr) for dep in deps: dep_id, dep = walk(dep) dot.edge(node_id, dep_id) return node_id, node walk(node) return dot
python
{ "resource": "" }
q267738
Expression.value_counts
test
def value_counts(self, dropna=False, dropnull=True, ascending=False, progress=False): """Computes counts of unique values. WARNING: * If the expression/column is not categorical, it will be converted on the fly * dropna is False by default, it is True by default in pandas :param dropna: when True, it will not report the missing values :param ascending: when False (default) it will report the most frequent occuring item first :returns: Pandas series containing the counts """ from pandas import Series dtype = self.dtype transient = self.transient or self.ds.filtered or self.ds.is_masked(self.expression) if self.dtype == str_type and not transient: # string is a special case, only ColumnString are not transient ar = self.ds.columns[self.expression] if not isinstance(ar, ColumnString): transient = True counter_type = counter_type_from_dtype(self.dtype, transient) counters = [None] * self.ds.executor.thread_pool.nthreads def map(thread_index, i1, i2, ar): if counters[thread_index] is None: counters[thread_index] = counter_type() if dtype == str_type: previous_ar = ar ar = _to_string_sequence(ar) if not transient: assert ar is previous_ar.string_sequence if np.ma.isMaskedArray(ar): mask = np.ma.getmaskarray(ar) counters[thread_index].update(ar, mask) else: counters[thread_index].update(ar) return 0 def reduce(a, b): return a+b self.ds.map_reduce(map, reduce, [self.expression], delay=False, progress=progress, name='value_counts', info=True, to_numpy=False) counters = [k for k in counters if k is not None] counter0 = counters[0] for other in counters[1:]: counter0.merge(other) value_counts = counter0.extract() index = np.array(list(value_counts.keys())) counts = np.array(list(value_counts.values())) order = np.argsort(counts) if not ascending: order = order[::-1] counts = counts[order] index = index[order] if not dropna or not dropnull: index = index.tolist() counts = counts.tolist() if not dropna and counter0.nan_count: index = [np.nan] + index counts = [counter0.nan_count] + counts if not dropnull and counter0.null_count: index = ['null'] + index counts = [counter0.null_count] + counts return Series(counts, index=index)
python
{ "resource": "" }
q267739
Expression.map
test
def map(self, mapper, nan_mapping=None, null_mapping=None): """Map values of an expression or in memory column accoring to an input dictionary or a custom callable function. Example: >>> import vaex >>> df = vaex.from_arrays(color=['red', 'red', 'blue', 'red', 'green']) >>> mapper = {'red': 1, 'blue': 2, 'green': 3} >>> df['color_mapped'] = df.color.map(mapper) >>> df # color color_mapped 0 red 1 1 red 1 2 blue 2 3 red 1 4 green 3 >>> import numpy as np >>> df = vaex.from_arrays(type=[0, 1, 2, 2, 2, np.nan]) >>> df['role'] = df['type'].map({0: 'admin', 1: 'maintainer', 2: 'user', np.nan: 'unknown'}) >>> df # type role 0 0 admin 1 1 maintainer 2 2 user 3 2 user 4 2 user 5 nan unknown :param mapper: dict like object used to map the values from keys to values :param nan_mapping: value to be used when a nan is present (and not in the mapper) :param null_mapping: value to use used when there is a missing value :return: A vaex expression :rtype: vaex.expression.Expression """ assert isinstance(mapper, collectionsAbc.Mapping), "mapper should be a dict like object" df = self.ds mapper_keys = np.array(list(mapper.keys())) # we map the keys to a ordinal values [0, N-1] using the set key_set = df._set(self.expression) found_keys = key_set.keys() mapper_has_nan = any([key != key for key in mapper_keys]) # we want all possible values to be converted # so mapper's key should be a superset of the keys found if not set(mapper_keys).issuperset(found_keys): missing = set(found_keys).difference(mapper_keys) missing0 = list(missing)[0] if missing0 == missing0: # safe nan check raise ValueError('Missing values in mapper: %s' % missing) # and these are the corresponding choices choices = [mapper[key] for key in found_keys] if key_set.has_nan: if mapper_has_nan: choices = [mapper[np.nan]] + choices else: choices = [nan_mapping] + choices if key_set.has_null: choices = [null_mapping] + choices choices = np.array(choices) key_set_name = df.add_variable('map_key_set', key_set, unique=True) choices_name = df.add_variable('map_choices', choices, unique=True) expr = '_choose(_ordinal_values({}, {}), {})'.format(self, key_set_name, choices_name) return Expression(df, expr)
python
{ "resource": "" }
q267740
app
test
def app(*args, **kwargs): """Create a vaex app, the QApplication mainloop must be started. In ipython notebook/jupyter do the following: >>> import vaex.ui.main # this causes the qt api level to be set properly >>> import vaex Next cell: >>> %gui qt Next cell: >>> app = vaex.app() From now on, you can run the app along with jupyter """ import vaex.ui.main return vaex.ui.main.VaexApp()
python
{ "resource": "" }
q267741
open_many
test
def open_many(filenames): """Open a list of filenames, and return a DataFrame with all DataFrames cocatenated. :param list[str] filenames: list of filenames/paths :rtype: DataFrame """ dfs = [] for filename in filenames: filename = filename.strip() if filename and filename[0] != "#": dfs.append(open(filename)) return vaex.dataframe.DataFrameConcatenated(dfs=dfs)
python
{ "resource": "" }
q267742
from_samp
test
def from_samp(username=None, password=None): """Connect to a SAMP Hub and wait for a single table load event, disconnect, download the table and return the DataFrame. Useful if you want to send a single table from say TOPCAT to vaex in a python console or notebook. """ print("Waiting for SAMP message...") import vaex.samp t = vaex.samp.single_table(username=username, password=password) return from_astropy_table(t.to_table())
python
{ "resource": "" }
q267743
from_astropy_table
test
def from_astropy_table(table): """Create a vaex DataFrame from an Astropy Table.""" import vaex.file.other return vaex.file.other.DatasetAstropyTable(table=table)
python
{ "resource": "" }
q267744
from_arrays
test
def from_arrays(**arrays): """Create an in memory DataFrame from numpy arrays. Example >>> import vaex, numpy as np >>> x = np.arange(5) >>> y = x ** 2 >>> vaex.from_arrays(x=x, y=y) # x y 0 0 0 1 1 1 2 2 4 3 3 9 4 4 16 >>> some_dict = {'x': x, 'y': y} >>> vaex.from_arrays(**some_dict) # in case you have your columns in a dict # x y 0 0 0 1 1 1 2 2 4 3 3 9 4 4 16 :param arrays: keyword arguments with arrays :rtype: DataFrame """ import numpy as np import six from .column import Column df = vaex.dataframe.DataFrameArrays("array") for name, array in arrays.items(): if isinstance(array, Column): df.add_column(name, array) else: array = np.asanyarray(array) df.add_column(name, array) return df
python
{ "resource": "" }
q267745
from_scalars
test
def from_scalars(**kwargs): """Similar to from_arrays, but convenient for a DataFrame of length 1. Example: >>> import vaex >>> df = vaex.from_scalars(x=1, y=2) :rtype: DataFrame """ import numpy as np return from_arrays(**{k: np.array([v]) for k, v in kwargs.items()})
python
{ "resource": "" }
q267746
from_pandas
test
def from_pandas(df, name="pandas", copy_index=True, index_name="index"): """Create an in memory DataFrame from a pandas DataFrame. :param: pandas.DataFrame df: Pandas DataFrame :param: name: unique for the DataFrame >>> import vaex, pandas as pd >>> df_pandas = pd.from_csv('test.csv') >>> df = vaex.from_pandas(df_pandas) :rtype: DataFrame """ import six vaex_df = vaex.dataframe.DataFrameArrays(name) def add(name, column): values = column.values try: vaex_df.add_column(name, values) except Exception as e: print("could not convert column %s, error: %r, will try to convert it to string" % (name, e)) try: values = values.astype("S") vaex_df.add_column(name, values) except Exception as e: print("Giving up column %s, error: %r" % (name, e)) for name in df.columns: add(name, df[name]) if copy_index: add(index_name, df.index) return vaex_df
python
{ "resource": "" }
q267747
from_csv
test
def from_csv(filename_or_buffer, copy_index=True, **kwargs): """Shortcut to read a csv file using pandas and convert to a DataFrame directly. :rtype: DataFrame """ import pandas as pd return from_pandas(pd.read_csv(filename_or_buffer, **kwargs), copy_index=copy_index)
python
{ "resource": "" }
q267748
server
test
def server(url, **kwargs): """Connect to hostname supporting the vaex web api. :param str hostname: hostname or ip address of server :return vaex.dataframe.ServerRest: returns a server object, note that it does not connect to the server yet, so this will always succeed :rtype: ServerRest """ from vaex.remote import ServerRest url = urlparse(url) if url.scheme == "ws": websocket = True else: websocket = False assert url.scheme in ["ws", "http"] port = url.port base_path = url.path hostname = url.hostname return vaex.remote.ServerRest(hostname, base_path=base_path, port=port, websocket=websocket, **kwargs)
python
{ "resource": "" }
q267749
zeldovich
test
def zeldovich(dim=2, N=256, n=-2.5, t=None, scale=1, seed=None): """Creates a zeldovich DataFrame. """ import vaex.file return vaex.file.other.Zeldovich(dim=dim, N=N, n=n, t=t, scale=scale)
python
{ "resource": "" }
q267750
concat
test
def concat(dfs): '''Concatenate a list of DataFrames. :rtype: DataFrame ''' ds = reduce((lambda x, y: x.concat(y)), dfs) return ds
python
{ "resource": "" }
q267751
vrange
test
def vrange(start, stop, step=1, dtype='f8'): """Creates a virtual column which is the equivalent of numpy.arange, but uses 0 memory""" from .column import ColumnVirtualRange return ColumnVirtualRange(start, stop, step, dtype)
python
{ "resource": "" }
q267752
VaexApp.open
test
def open(self, path): """Add a dataset and add it to the UI""" logger.debug("open dataset: %r", path) if path.startswith("http") or path.startswith("ws"): dataset = vaex.open(path, thread_mover=self.call_in_main_thread) else: dataset = vaex.open(path) self.add_recently_opened(path) self.dataset_selector.add(dataset) return dataset
python
{ "resource": "" }
q267753
DatasetRest.evaluate
test
def evaluate(self, expression, i1=None, i2=None, out=None, selection=None, delay=False): expression = _ensure_strings_from_expressions(expression) """basic support for evaluate at server, at least to run some unittest, do not expect this to work from strings""" result = self.server._call_dataset("evaluate", self, expression=expression, i1=i1, i2=i2, selection=selection, delay=delay) # TODO: we ignore out return result
python
{ "resource": "" }
q267754
delayed
test
def delayed(f): '''Decorator to transparantly accept delayed computation. Example: >>> delayed_sum = ds.sum(ds.E, binby=ds.x, limits=limits, >>> shape=4, delay=True) >>> @vaex.delayed >>> def total_sum(sums): >>> return sums.sum() >>> sum_of_sums = total_sum(delayed_sum) >>> ds.execute() >>> sum_of_sums.get() See the tutorial for a more complete example https://docs.vaex.io/en/latest/tutorial.html#Parallel-computations ''' def wrapped(*args, **kwargs): # print "calling", f, "with", kwargs # key_values = kwargs.items() key_promise = list([(key, promisify(value)) for key, value in kwargs.items()]) # key_promise = [(key, promisify(value)) for key, value in key_values] arg_promises = list([promisify(value) for value in args]) kwarg_promises = list([promise for key, promise in key_promise]) promises = arg_promises + kwarg_promises for promise in promises: def echo_error(exc, promise=promise): print("error with ", promise, "exception is", exc) # raise exc def echo(value, promise=promise): print("done with ", repr(promise), "value is", value) # promise.then(echo, echo_error) # print promises allarguments = aplus.listPromise(*promises) def call(_): kwargs_real = {key: promise.get() for key, promise in key_promise} args_real = list([promise.get() for promise in arg_promises]) return f(*args_real, **kwargs_real) def error(exc): print("error", exc) raise exc return allarguments.then(call, error) return wrapped
python
{ "resource": "" }
q267755
Selection._depending_columns
test
def _depending_columns(self, ds): '''Find all columns that this selection depends on for df ds''' depending = set() for expression in self.expressions: expression = ds._expr(expression) # make sure it is an expression depending |= expression.variables() if self.previous_selection: depending |= self.previous_selection._depending_columns(ds) return depending
python
{ "resource": "" }
q267756
SubspaceLocal._task
test
def _task(self, task, progressbar=False): """Helper function for returning tasks results, result when immediate is True, otherwise the task itself, which is a promise""" if self.delay: # should return a task or a promise nesting it return self.executor.schedule(task) else: import vaex.utils callback = None try: if progressbar == True: def update(fraction): bar.update(fraction) return True bar = vaex.utils.progressbar(task.name) callback = self.executor.signal_progress.connect(update) elif progressbar: callback = self.executor.signal_progress.connect(progressbar) result = self.executor.run(task) if progressbar == True: bar.finish() sys.stdout.write('\n') return result finally: if callback: self.executor.signal_progress.disconnect(callback)
python
{ "resource": "" }
q267757
____RankingTableModel.sort
test
def sort(self, Ncol, order): """Sort table by given column number. """ self.emit(QtCore.SIGNAL("layoutAboutToBeChanged()")) if Ncol == 0: print("by name") # get indices, sorted by pair name sortlist = list(zip(self.pairs, list(range(len(self.pairs))))) print(sortlist) sortlist.sort(key=operator.itemgetter(0)) print(sortlist) self.indices = list(map(operator.itemgetter(1), sortlist)) print((self.indices)) if Ncol == 1: # get indices, sorted by ranking, or no sorting if None not in self.ranking: sortlist = list(zip(self.ranking, list(range(len(self.pairs))))) sortlist.sort(key=operator.itemgetter(0)) self.indices = list(map(operator.itemgetter(1), sortlist)) else: self.indices = list(range(len(self.pairs))) print((self.indices)) if order == QtCore.Qt.DescendingOrder: self.indices.reverse() print((self.indices)) self.emit(QtCore.SIGNAL("layoutChanged()"))
python
{ "resource": "" }
q267758
getinfo
test
def getinfo(filename, seek=None): """Read header data from Gadget data file 'filename' with Gadget file type 'gtype'. Returns offsets of positions and velocities.""" DESC = '=I4sII' # struct formatting string HEAD = '=I6I6dddii6iiiddddii6ii60xI' # struct formatting string keys = ('Npart', 'Massarr', 'Time', 'Redshift', 'FlagSfr', 'FlagFeedback', 'Nall', 'FlagCooling', 'NumFiles', 'BoxSize', 'Omega0', 'OmegaLambda', 'HubbleParam', 'FlagAge', 'FlagMetals', 'NallHW', 'flag_entr_ics', 'filename') f = open(filename, 'rb') """Detects Gadget file type (type 1 or 2; resp. without or with the 16 byte block headers).""" firstbytes = struct.unpack('I',f.read(4)) if firstbytes[0] == 8: gtype = 2 else: gtype = 1 if gtype == 2: f.seek(16) else: f.seek(0) if seek is not None: f.seek(seek) raw = struct.unpack(HEAD,f.read(264))[1:-1] values = (raw[:6], raw[6:12]) + raw[12:16] + (raw[16:22],) + raw[22:30] + (raw[30:36], raw[36], filename) header = dict(list(zip(keys, values))) f.close() if gtype == 2: posoffset = (2*16 + (8 + 256)) else: posoffset = (8 + 256) Npart = sum(header['Npart']) if gtype == 2: veloffset = 3*16 + (8 + 256) + (8 + 3*4*Npart) else: veloffset= (8 + 256) + (8 + 3*4*Npart) return Npart, posoffset+4, veloffset+4, header
python
{ "resource": "" }
q267759
Slicer.clear
test
def clear(self, event): """clear the cursor""" if self.useblit: self.background = ( self.canvas.copy_from_bbox(self.canvas.figure.bbox)) for line in self.vlines + self.hlines: line.set_visible(False) self.ellipse.set_visible(False)
python
{ "resource": "" }
q267760
PlotDialog._wait
test
def _wait(self): """Used for unittesting to make sure the plots are all done""" logger.debug("will wait for last plot to finish") self._plot_event = threading.Event() self.queue_update._wait() self.queue_replot._wait() self.queue_redraw._wait() qt_app = QtCore.QCoreApplication.instance() sleep = 10 while not self._plot_event.is_set(): logger.debug("waiting for last plot to finish") qt_app.processEvents() QtTest.QTest.qSleep(sleep) logger.debug("waiting for plot finished")
python
{ "resource": "" }
q267761
os_open
test
def os_open(document): """Open document by the default handler of the OS, could be a url opened by a browser, a text file by an editor etc""" osname = platform.system().lower() if osname == "darwin": os.system("open \"" + document + "\"") if osname == "linux": cmd = "xdg-open \"" + document + "\"&" os.system(cmd) if osname == "windows": os.system("start \"" + document + "\"")
python
{ "resource": "" }
q267762
write_to
test
def write_to(f, mode): """Flexible writing, where f can be a filename or f object, if filename, closed after writing""" if hasattr(f, 'write'): yield f else: f = open(f, mode) yield f f.close()
python
{ "resource": "" }
q267763
_split_and_combine_mask
test
def _split_and_combine_mask(arrays): '''Combines all masks from a list of arrays, and logically ors them into a single mask''' masks = [np.ma.getmaskarray(block) for block in arrays if np.ma.isMaskedArray(block)] arrays = [block.data if np.ma.isMaskedArray(block) else block for block in arrays] mask = None if masks: mask = masks[0].copy() for other in masks[1:]: mask |= other return arrays, mask
python
{ "resource": "" }
q267764
DataFrame.nop
test
def nop(self, expression, progress=False, delay=False): """Evaluates expression, and drop the result, usefull for benchmarking, since vaex is usually lazy""" expression = _ensure_string_from_expression(expression) def map(ar): pass def reduce(a, b): pass return self.map_reduce(map, reduce, [expression], delay=delay, progress=progress, name='nop', to_numpy=False)
python
{ "resource": "" }
q267765
DataFrame.first
test
def first(self, expression, order_expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, edges=False, progress=None): """Return the first element of a binned `expression`, where the values each bin are sorted by `order_expression`. Example: >>> import vaex >>> df = vaex.example() >>> df.first(df.x, df.y, shape=8) >>> df.first(df.x, df.y, shape=8, binby=[df.y]) >>> df.first(df.x, df.y, shape=8, binby=[df.y]) array([-4.81883764, 11.65378 , 9.70084476, -7.3025589 , 4.84954977, 8.47446537, -5.73602629, 10.18783 ]) :param expression: The value to be placed in the bin. :param order_expression: Order the values in the bins by this expression. :param binby: {binby} :param limits: {limits} :param shape: {shape} :param selection: {selection} :param delay: {delay} :param progress: {progress} :param edges: {edges} :return: Ndarray containing the first elements. :rtype: numpy.array """ return self._compute_agg('first', expression, binby, limits, shape, selection, delay, edges, progress, extra_expressions=[order_expression]) logger.debug("count(%r, binby=%r, limits=%r)", expression, binby, limits) logger.debug("count(%r, binby=%r, limits=%r)", expression, binby, limits) expression = _ensure_strings_from_expressions(expression) order_expression = _ensure_string_from_expression(order_expression) binby = _ensure_strings_from_expressions(binby) waslist, [expressions,] = vaex.utils.listify(expression) @delayed def finish(*counts): counts = np.asarray(counts) return vaex.utils.unlistify(waslist, counts) progressbar = vaex.utils.progressbars(progress) limits = self.limits(binby, limits, delay=True, shape=shape) stats = [self._first_calculation(expression, order_expression, binby=binby, limits=limits, shape=shape, selection=selection, edges=edges, progressbar=progressbar) for expression in expressions] var = finish(*stats) return self._delay(delay, var)
python
{ "resource": "" }
q267766
DataFrame.mean
test
def mean(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None, edges=False): """Calculate the mean for expression, possibly on a grid defined by binby. Example: >>> df.mean("x") -0.067131491264005971 >>> df.mean("(x**2+y**2)**0.5", binby="E", shape=4) array([ 2.43483742, 4.41840721, 8.26742458, 15.53846476]) :param expression: {expression} :param binby: {binby} :param limits: {limits} :param shape: {shape} :param selection: {selection} :param delay: {delay} :param progress: {progress} :return: {return_stat_scalar} """ return self._compute_agg('mean', expression, binby, limits, shape, selection, delay, edges, progress) logger.debug("mean of %r, with binby=%r, limits=%r, shape=%r, selection=%r, delay=%r", expression, binby, limits, shape, selection, delay) expression = _ensure_strings_from_expressions(expression) selection = _ensure_strings_from_expressions(selection) binby = _ensure_strings_from_expressions(binby) @delayed def calculate(expression, limits): task = tasks.TaskStatistic(self, binby, shape, limits, weight=expression, op=tasks.OP_ADD_WEIGHT_MOMENTS_01, selection=selection) self.executor.schedule(task) progressbar.add_task(task, "mean for %s" % expression) return task @delayed def finish(*stats_args): stats = np.array(stats_args) counts = stats[..., 0] with np.errstate(divide='ignore', invalid='ignore'): mean = stats[..., 1] / counts return vaex.utils.unlistify(waslist, mean) waslist, [expressions, ] = vaex.utils.listify(expression) progressbar = vaex.utils.progressbars(progress) limits = self.limits(binby, limits, delay=True) stats = [calculate(expression, limits) for expression in expressions] var = finish(*stats) return self._delay(delay, var)
python
{ "resource": "" }
q267767
DataFrame.sum
test
def sum(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None, edges=False): """Calculate the sum for the given expression, possible on a grid defined by binby Example: >>> df.sum("L") 304054882.49378014 >>> df.sum("L", binby="E", shape=4) array([ 8.83517994e+06, 5.92217598e+07, 9.55218726e+07, 1.40008776e+08]) :param expression: {expression} :param binby: {binby} :param limits: {limits} :param shape: {shape} :param selection: {selection} :param delay: {delay} :param progress: {progress} :return: {return_stat_scalar} """ return self._compute_agg('sum', expression, binby, limits, shape, selection, delay, edges, progress) @delayed def finish(*sums): return vaex.utils.unlistify(waslist, sums) expression = _ensure_strings_from_expressions(expression) binby = _ensure_strings_from_expressions(binby) waslist, [expressions, ] = vaex.utils.listify(expression) progressbar = vaex.utils.progressbars(progress) limits = self.limits(binby, limits, delay=True) # stats = [calculate(expression, limits) for expression in expressions] sums = [self._sum_calculation(expression, binby=binby, limits=limits, shape=shape, selection=selection, progressbar=progressbar) for expression in expressions] s = finish(*sums) return self._delay(delay, s)
python
{ "resource": "" }
q267768
DataFrame.std
test
def std(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None): """Calculate the standard deviation for the given expression, possible on a grid defined by binby >>> df.std("vz") 110.31773397535071 >>> df.std("vz", binby=["(x**2+y**2)**0.5"], shape=4) array([ 123.57954851, 85.35190177, 61.14345748, 38.0740619 ]) :param expression: {expression} :param binby: {binby} :param limits: {limits} :param shape: {shape} :param selection: {selection} :param delay: {delay} :param progress: {progress} :return: {return_stat_scalar} """ @delayed def finish(var): return var**0.5 return self._delay(delay, finish(self.var(expression, binby=binby, limits=limits, shape=shape, selection=selection, delay=True, progress=progress)))
python
{ "resource": "" }
q267769
DataFrame.cov
test
def cov(self, x, y=None, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None): """Calculate the covariance matrix for x and y or more expressions, possibly on a grid defined by binby. Either x and y are expressions, e.g: >>> df.cov("x", "y") Or only the x argument is given with a list of expressions, e,g.: >>> df.cov(["x, "y, "z"]) Example: >>> df.cov("x", "y") array([[ 53.54521742, -3.8123135 ], [ -3.8123135 , 60.62257881]]) >>> df.cov(["x", "y", "z"]) array([[ 53.54521742, -3.8123135 , -0.98260511], [ -3.8123135 , 60.62257881, 1.21381057], [ -0.98260511, 1.21381057, 25.55517638]]) >>> df.cov("x", "y", binby="E", shape=2) array([[[ 9.74852878e+00, -3.02004780e-02], [ -3.02004780e-02, 9.99288215e+00]], [[ 8.43996546e+01, -6.51984181e+00], [ -6.51984181e+00, 9.68938284e+01]]]) :param x: {expression} :param y: {expression_single} :param binby: {binby} :param limits: {limits} :param shape: {shape} :param selection: {selection} :param delay: {delay} :return: {return_stat_scalar}, the last dimensions are of shape (2,2) """ selection = _ensure_strings_from_expressions(selection) if y is None: if not _issequence(x): raise ValueError("if y argument is not given, x is expected to be sequence, not %r", x) expressions = x else: expressions = [x, y] N = len(expressions) binby = _ensure_list(binby) shape = _expand_shape(shape, len(binby)) progressbar = vaex.utils.progressbars(progress) limits = self.limits(binby, limits, selection=selection, delay=True) @delayed def calculate(expressions, limits): # print('limits', limits) task = tasks.TaskStatistic(self, binby, shape, limits, weights=expressions, op=tasks.OP_COV, selection=selection) self.executor.schedule(task) progressbar.add_task(task, "covariance values for %r" % expressions) return task @delayed def finish(values): N = len(expressions) counts = values[..., :N] sums = values[..., N:2 * N] with np.errstate(divide='ignore', invalid='ignore'): means = sums / counts # matrix of means * means.T meansxy = means[..., None] * means[..., None, :] counts = values[..., 2 * N:2 * N + N**2] sums = values[..., 2 * N + N**2:] shape = counts.shape[:-1] + (N, N) counts = counts.reshape(shape) sums = sums.reshape(shape) with np.errstate(divide='ignore', invalid='ignore'): moments2 = sums / counts cov_matrix = moments2 - meansxy return cov_matrix progressbar = vaex.utils.progressbars(progress) values = calculate(expressions, limits) cov_matrix = finish(values) return self._delay(delay, cov_matrix)
python
{ "resource": "" }
q267770
DataFrame.minmax
test
def minmax(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None): """Calculate the minimum and maximum for expressions, possibly on a grid defined by binby. Example: >>> df.minmax("x") array([-128.293991, 271.365997]) >>> df.minmax(["x", "y"]) array([[-128.293991 , 271.365997 ], [ -71.5523682, 146.465836 ]]) >>> df.minmax("x", binby="x", shape=5, limits=[-10, 10]) array([[-9.99919128, -6.00010443], [-5.99972439, -2.00002384], [-1.99991322, 1.99998057], [ 2.0000093 , 5.99983597], [ 6.0004878 , 9.99984646]]) :param expression: {expression} :param binby: {binby} :param limits: {limits} :param shape: {shape} :param selection: {selection} :param delay: {delay} :param progress: {progress} :return: {return_stat_scalar}, the last dimension is of shape (2) """ # vmin = self._compute_agg('min', expression, binby, limits, shape, selection, delay, edges, progress) # vmax = self._compute_agg('max', expression, binby, limits, shape, selection, delay, edges, progress) @delayed def finish(*minmax_list): value = vaex.utils.unlistify(waslist, np.array(minmax_list)) value = value.astype(dtype0) return value @delayed def calculate(expression, limits): task = tasks.TaskStatistic(self, binby, shape, limits, weight=expression, op=tasks.OP_MIN_MAX, selection=selection) self.executor.schedule(task) progressbar.add_task(task, "minmax for %s" % expression) return task @delayed def finish(*minmax_list): value = vaex.utils.unlistify(waslist, np.array(minmax_list)) value = value.astype(dtype0) return value expression = _ensure_strings_from_expressions(expression) binby = _ensure_strings_from_expressions(binby) waslist, [expressions, ] = vaex.utils.listify(expression) dtypes = [self.dtype(expr) for expr in expressions] dtype0 = dtypes[0] if not all([k.kind == dtype0.kind for k in dtypes]): raise ValueError("cannot mix datetime and non-datetime expressions") progressbar = vaex.utils.progressbars(progress, name="minmaxes") limits = self.limits(binby, limits, selection=selection, delay=True) all_tasks = [calculate(expression, limits) for expression in expressions] result = finish(*all_tasks) return self._delay(delay, result)
python
{ "resource": "" }
q267771
DataFrame.min
test
def min(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None, edges=False): """Calculate the minimum for given expressions, possibly on a grid defined by binby. Example: >>> df.min("x") array(-128.293991) >>> df.min(["x", "y"]) array([-128.293991 , -71.5523682]) >>> df.min("x", binby="x", shape=5, limits=[-10, 10]) array([-9.99919128, -5.99972439, -1.99991322, 2.0000093 , 6.0004878 ]) :param expression: {expression} :param binby: {binby} :param limits: {limits} :param shape: {shape} :param selection: {selection} :param delay: {delay} :param progress: {progress} :return: {return_stat_scalar}, the last dimension is of shape (2) """ return self._compute_agg('min', expression, binby, limits, shape, selection, delay, edges, progress) @delayed def finish(result): return result[..., 0] return self._delay(delay, finish(self.minmax(expression, binby=binby, limits=limits, shape=shape, selection=selection, delay=delay, progress=progress)))
python
{ "resource": "" }
q267772
DataFrame.median_approx
test
def median_approx(self, expression, percentage=50., binby=[], limits=None, shape=default_shape, percentile_shape=256, percentile_limits="minmax", selection=False, delay=False): """Calculate the median , possibly on a grid defined by binby. NOTE: this value is approximated by calculating the cumulative distribution on a grid defined by percentile_shape and percentile_limits :param expression: {expression} :param binby: {binby} :param limits: {limits} :param shape: {shape} :param percentile_limits: {percentile_limits} :param percentile_shape: {percentile_shape} :param selection: {selection} :param delay: {delay} :return: {return_stat_scalar} """ return self.percentile_approx(expression, 50, binby=binby, limits=limits, shape=shape, percentile_shape=percentile_shape, percentile_limits=percentile_limits, selection=selection, delay=delay)
python
{ "resource": "" }
q267773
DataFrame.plot_widget
test
def plot_widget(self, x, y, z=None, grid=None, shape=256, limits=None, what="count(*)", figsize=None, f="identity", figure_key=None, fig=None, axes=None, xlabel=None, ylabel=None, title=None, show=True, selection=[None, True], colormap="afmhot", grid_limits=None, normalize="normalize", grid_before=None, what_kwargs={}, type="default", scales=None, tool_select=False, bq_cleanup=True, backend="bqplot", **kwargs): """Viz 1d, 2d or 3d in a Jupyter notebook .. note:: This API is not fully settled and may change in the future Example: >>> df.plot_widget(df.x, df.y, backend='bqplot') >>> df.plot_widget(df.pickup_longitude, df.pickup_latitude, backend='ipyleaflet') :param backend: Widget backend to use: 'bqplot', 'ipyleaflet', 'ipyvolume', 'matplotlib' """ import vaex.jupyter.plot backend = vaex.jupyter.plot.create_backend(backend) cls = vaex.jupyter.plot.get_type(type) x = _ensure_strings_from_expressions(x) y = _ensure_strings_from_expressions(y) z = _ensure_strings_from_expressions(z) for name in 'vx vy vz'.split(): if name in kwargs: kwargs[name] = _ensure_strings_from_expressions(kwargs[name]) plot2d = cls(backend=backend, dataset=self, x=x, y=y, z=z, grid=grid, shape=shape, limits=limits, what=what, f=f, figure_key=figure_key, fig=fig, selection=selection, grid_before=grid_before, grid_limits=grid_limits, normalize=normalize, colormap=colormap, what_kwargs=what_kwargs, **kwargs) if show: plot2d.show() return plot2d
python
{ "resource": "" }
q267774
DataFrame.healpix_count
test
def healpix_count(self, expression=None, healpix_expression=None, healpix_max_level=12, healpix_level=8, binby=None, limits=None, shape=default_shape, delay=False, progress=None, selection=None): """Count non missing value for expression on an array which represents healpix data. :param expression: Expression or column for which to count non-missing values, or None or '*' for counting the rows :param healpix_expression: {healpix_max_level} :param healpix_max_level: {healpix_max_level} :param healpix_level: {healpix_level} :param binby: {binby}, these dimension follow the first healpix dimension. :param limits: {limits} :param shape: {shape} :param selection: {selection} :param delay: {delay} :param progress: {progress} :return: """ # if binby is None: import healpy as hp if healpix_expression is None: if self.ucds.get("source_id", None) == 'meta.id;meta.main': # we now assume we have gaia data healpix_expression = "source_id/34359738368" if healpix_expression is None: raise ValueError("no healpix_expression given, and was unable to guess") reduce_level = healpix_max_level - healpix_level NSIDE = 2**healpix_level nmax = hp.nside2npix(NSIDE) scaling = 4**reduce_level expr = "%s/%s" % (healpix_expression, scaling) binby = [expr] + ([] if binby is None else _ensure_list(binby)) shape = (nmax,) + _expand_shape(shape, len(binby) - 1) epsilon = 1. / scaling / 2 limits = [[-epsilon, nmax - epsilon]] + ([] if limits is None else limits) return self.count(expression, binby=binby, limits=limits, shape=shape, delay=delay, progress=progress, selection=selection)
python
{ "resource": "" }
q267775
DataFrame.healpix_plot
test
def healpix_plot(self, healpix_expression="source_id/34359738368", healpix_max_level=12, healpix_level=8, what="count(*)", selection=None, grid=None, healpix_input="equatorial", healpix_output="galactic", f=None, colormap="afmhot", grid_limits=None, image_size=800, nest=True, figsize=None, interactive=False, title="", smooth=None, show=False, colorbar=True, rotation=(0, 0, 0), **kwargs): """Viz data in 2d using a healpix column. :param healpix_expression: {healpix_max_level} :param healpix_max_level: {healpix_max_level} :param healpix_level: {healpix_level} :param what: {what} :param selection: {selection} :param grid: {grid} :param healpix_input: Specificy if the healpix index is in "equatorial", "galactic" or "ecliptic". :param healpix_output: Plot in "equatorial", "galactic" or "ecliptic". :param f: function to apply to the data :param colormap: matplotlib colormap :param grid_limits: Optional sequence [minvalue, maxvalue] that determine the min and max value that map to the colormap (values below and above these are clipped to the the min/max). (default is [min(f(grid)), max(f(grid))) :param image_size: size for the image that healpy uses for rendering :param nest: If the healpix data is in nested (True) or ring (False) :param figsize: If given, modify the matplotlib figure size. Example (14,9) :param interactive: (Experimental, uses healpy.mollzoom is True) :param title: Title of figure :param smooth: apply gaussian smoothing, in degrees :param show: Call matplotlib's show (True) or not (False, defaut) :param rotation: Rotatate the plot, in format (lon, lat, psi) such that (lon, lat) is the center, and rotate on the screen by angle psi. All angles are degrees. :return: """ # plot_level = healpix_level #healpix_max_level-reduce_level import healpy as hp import pylab as plt if grid is None: reduce_level = healpix_max_level - healpix_level NSIDE = 2**healpix_level nmax = hp.nside2npix(NSIDE) # print nmax, np.sqrt(nmax) scaling = 4**reduce_level # print nmax epsilon = 1. / scaling / 2 grid = self._stat(what=what, binby="%s/%s" % (healpix_expression, scaling), limits=[-epsilon, nmax - epsilon], shape=nmax, selection=selection) if grid_limits: grid_min, grid_max = grid_limits else: grid_min = grid_max = None f_org = f f = _parse_f(f) if smooth: if nest: grid = hp.reorder(grid, inp="NEST", out="RING") nest = False # grid[np.isnan(grid)] = np.nanmean(grid) grid = hp.smoothing(grid, sigma=np.radians(smooth)) fgrid = f(grid) coord_map = dict(equatorial='C', galactic='G', ecliptic="E") fig = plt.gcf() if figsize is not None: fig.set_size_inches(*figsize) what_label = what if f_org: what_label = f_org + " " + what_label f = hp.mollzoom if interactive else hp.mollview with warnings.catch_warnings(): warnings.simplefilter("ignore") coord = coord_map[healpix_input], coord_map[healpix_output] if coord_map[healpix_input] == coord_map[healpix_output]: coord = None f(fgrid, unit=what_label, rot=rotation, nest=nest, title=title, coord=coord, cmap=colormap, hold=True, xsize=image_size, min=grid_min, max=grid_max, cbar=colorbar, **kwargs) if show: plt.show()
python
{ "resource": "" }
q267776
DataFrame.plot3d
test
def plot3d(self, x, y, z, vx=None, vy=None, vz=None, vwhat=None, limits=None, grid=None, what="count(*)", shape=128, selection=[None, True], f=None, vcount_limits=None, smooth_pre=None, smooth_post=None, grid_limits=None, normalize="normalize", colormap="afmhot", figure_key=None, fig=None, lighting=True, level=[0.1, 0.5, 0.9], opacity=[0.01, 0.05, 0.1], level_width=0.1, show=True, **kwargs): """Use at own risk, requires ipyvolume""" import vaex.ext.ipyvolume # vaex.ext.ipyvolume. cls = vaex.ext.ipyvolume.PlotDefault plot3d = cls(df=self, x=x, y=y, z=z, vx=vx, vy=vy, vz=vz, grid=grid, shape=shape, limits=limits, what=what, f=f, figure_key=figure_key, fig=fig, selection=selection, smooth_pre=smooth_pre, smooth_post=smooth_post, grid_limits=grid_limits, vcount_limits=vcount_limits, normalize=normalize, colormap=colormap, **kwargs) if show: plot3d.show() return plot3d
python
{ "resource": "" }
q267777
DataFrame.dtype
test
def dtype(self, expression, internal=False): """Return the numpy dtype for the given expression, if not a column, the first row will be evaluated to get the dtype.""" expression = _ensure_string_from_expression(expression) if expression in self.variables: return np.float64(1).dtype elif expression in self.columns.keys(): column = self.columns[expression] data = column[0:1] dtype = data.dtype else: data = self.evaluate(expression, 0, 1, filtered=False) dtype = data.dtype if not internal: if dtype != str_type: if dtype.kind in 'US': return str_type if dtype.kind == 'O': # we lie about arrays containing strings if isinstance(data[0], six.string_types): return str_type return dtype
python
{ "resource": "" }
q267778
DataFrame.get_private_dir
test
def get_private_dir(self, create=False): """Each DataFrame has a directory where files are stored for metadata etc. Example >>> import vaex >>> ds = vaex.example() >>> vaex.get_private_dir() '/Users/users/breddels/.vaex/dfs/_Users_users_breddels_vaex-testing_data_helmi-dezeeuw-2000-10p.hdf5' :param bool create: is True, it will create the directory if it does not exist """ if self.is_local(): name = os.path.abspath(self.path).replace(os.path.sep, "_")[:250] # should not be too long for most os'es name = name.replace(":", "_") # for windows drive names else: server = self.server name = "%s_%s_%s_%s" % (server.hostname, server.port, server.base_path.replace("/", "_"), self.name) dir = os.path.join(vaex.utils.get_private_dir(), "dfs", name) if create and not os.path.exists(dir): os.makedirs(dir) return dir
python
{ "resource": "" }
q267779
DataFrame.state_get
test
def state_get(self): """Return the internal state of the DataFrame in a dictionary Example: >>> import vaex >>> df = vaex.from_scalars(x=1, y=2) >>> df['r'] = (df.x**2 + df.y**2)**0.5 >>> df.state_get() {'active_range': [0, 1], 'column_names': ['x', 'y', 'r'], 'description': None, 'descriptions': {}, 'functions': {}, 'renamed_columns': [], 'selections': {'__filter__': None}, 'ucds': {}, 'units': {}, 'variables': {}, 'virtual_columns': {'r': '(((x ** 2) + (y ** 2)) ** 0.5)'}} """ virtual_names = list(self.virtual_columns.keys()) + list(self.variables.keys()) units = {key: str(value) for key, value in self.units.items()} ucds = {key: value for key, value in self.ucds.items() if key in virtual_names} descriptions = {key: value for key, value in self.descriptions.items()} import vaex.serialize def check(key, value): if not vaex.serialize.can_serialize(value.f): warnings.warn('Cannot serialize function for virtual column {} (use vaex.serialize.register)'.format(key)) return False return True def clean(value): return vaex.serialize.to_dict(value.f) functions = {key: clean(value) for key, value in self.functions.items() if check(key, value)} virtual_columns = {key: value for key, value in self.virtual_columns.items()} selections = {name: self.get_selection(name) for name, history in self.selection_histories.items()} selections = {name: selection.to_dict() if selection is not None else None for name, selection in selections.items()} # if selection is not None} state = dict(virtual_columns=virtual_columns, column_names=self.column_names, renamed_columns=self._renamed_columns, variables=self.variables, functions=functions, selections=selections, ucds=ucds, units=units, descriptions=descriptions, description=self.description, active_range=[self._index_start, self._index_end]) return state
python
{ "resource": "" }
q267780
DataFrame.state_set
test
def state_set(self, state, use_active_range=False): """Sets the internal state of the df Example: >>> import vaex >>> df = vaex.from_scalars(x=1, y=2) >>> df # x y r 0 1 2 2.23607 >>> df['r'] = (df.x**2 + df.y**2)**0.5 >>> state = df.state_get() >>> state {'active_range': [0, 1], 'column_names': ['x', 'y', 'r'], 'description': None, 'descriptions': {}, 'functions': {}, 'renamed_columns': [], 'selections': {'__filter__': None}, 'ucds': {}, 'units': {}, 'variables': {}, 'virtual_columns': {'r': '(((x ** 2) + (y ** 2)) ** 0.5)'}} >>> df2 = vaex.from_scalars(x=3, y=4) >>> df2.state_set(state) # now the virtual functions are 'copied' >>> df2 # x y r 0 3 4 5 :param state: dict as returned by :meth:`DataFrame.state_get`. :param bool use_active_range: Whether to use the active range or not. """ self.description = state['description'] if use_active_range: self._index_start, self._index_end = state['active_range'] self._length_unfiltered = self._index_end - self._index_start if 'renamed_columns' in state: for old, new in state['renamed_columns']: self._rename(old, new) for name, value in state['functions'].items(): self.add_function(name, vaex.serialize.from_dict(value)) if 'column_names' in state: # we clear all columns, and add them later on, since otherwise self[name] = ... will try # to rename the columns (which is unsupported for remote dfs) self.column_names = [] self.virtual_columns = collections.OrderedDict() for name, value in state['virtual_columns'].items(): self[name] = self._expr(value) # self._save_assign_expression(name) self.column_names = state['column_names'] else: # old behaviour self.virtual_columns = collections.OrderedDict() for name, value in state['virtual_columns'].items(): self[name] = self._expr(value) self.variables = state['variables'] import astropy # TODO: make this dep optional? units = {key: astropy.units.Unit(value) for key, value in state["units"].items()} self.units.update(units) for name, selection_dict in state['selections'].items(): # TODO: make selection use the vaex.serialize framework if selection_dict is None: selection = None else: selection = selections.selection_from_dict(selection_dict) self.set_selection(selection, name=name)
python
{ "resource": "" }
q267781
DataFrame.remove_virtual_meta
test
def remove_virtual_meta(self): """Removes the file with the virtual column etc, it does not change the current virtual columns etc.""" dir = self.get_private_dir(create=True) path = os.path.join(dir, "virtual_meta.yaml") try: if os.path.exists(path): os.remove(path) if not os.listdir(dir): os.rmdir(dir) except: logger.exception("error while trying to remove %s or %s", path, dir)
python
{ "resource": "" }
q267782
DataFrame.write_virtual_meta
test
def write_virtual_meta(self): """Writes virtual columns, variables and their ucd,description and units. The default implementation is to write this to a file called virtual_meta.yaml in the directory defined by :func:`DataFrame.get_private_dir`. Other implementation may store this in the DataFrame file itself. This method is called after virtual columns or variables are added. Upon opening a file, :func:`DataFrame.update_virtual_meta` is called, so that the information is not lost between sessions. Note: opening a DataFrame twice may result in corruption of this file. """ path = os.path.join(self.get_private_dir(create=True), "virtual_meta.yaml") virtual_names = list(self.virtual_columns.keys()) + list(self.variables.keys()) units = {key: str(value) for key, value in self.units.items() if key in virtual_names} ucds = {key: value for key, value in self.ucds.items() if key in virtual_names} descriptions = {key: value for key, value in self.descriptions.items() if key in virtual_names} meta_info = dict(virtual_columns=self.virtual_columns, variables=self.variables, ucds=ucds, units=units, descriptions=descriptions) vaex.utils.write_json_or_yaml(path, meta_info)
python
{ "resource": "" }
q267783
DataFrame.write_meta
test
def write_meta(self): """Writes all meta data, ucd,description and units The default implementation is to write this to a file called meta.yaml in the directory defined by :func:`DataFrame.get_private_dir`. Other implementation may store this in the DataFrame file itself. (For instance the vaex hdf5 implementation does this) This method is called after virtual columns or variables are added. Upon opening a file, :func:`DataFrame.update_meta` is called, so that the information is not lost between sessions. Note: opening a DataFrame twice may result in corruption of this file. """ # raise NotImplementedError path = os.path.join(self.get_private_dir(create=True), "meta.yaml") units = {key: str(value) for key, value in self.units.items()} meta_info = dict(description=self.description, ucds=self.ucds, units=units, descriptions=self.descriptions, ) vaex.utils.write_json_or_yaml(path, meta_info)
python
{ "resource": "" }
q267784
DataFrame.subspaces
test
def subspaces(self, expressions_list=None, dimensions=None, exclude=None, **kwargs): """Generate a Subspaces object, based on a custom list of expressions or all possible combinations based on dimension :param expressions_list: list of list of expressions, where the inner list defines the subspace :param dimensions: if given, generates a subspace with all possible combinations for that dimension :param exclude: list of """ if dimensions is not None: expressions_list = list(itertools.combinations(self.get_column_names(), dimensions)) if exclude is not None: import six def excluded(expressions): if callable(exclude): return exclude(expressions) elif isinstance(exclude, six.string_types): return exclude in expressions elif isinstance(exclude, (list, tuple)): # $#expressions = set(expressions) for e in exclude: if isinstance(e, six.string_types): if e in expressions: return True elif isinstance(e, (list, tuple)): if set(e).issubset(expressions): return True else: raise ValueError("elements of exclude should contain a string or a sequence of strings") else: raise ValueError("exclude should contain a string, a sequence of strings, or should be a callable") return False # test if any of the elements of exclude are a subset of the expression expressions_list = [expr for expr in expressions_list if not excluded(expr)] logger.debug("expression list generated: %r", expressions_list) import vaex.legacy return vaex.legacy.Subspaces([self(*expressions, **kwargs) for expressions in expressions_list])
python
{ "resource": "" }
q267785
DataFrame.set_variable
test
def set_variable(self, name, expression_or_value, write=True): """Set the variable to an expression or value defined by expression_or_value. Example >>> df.set_variable("a", 2.) >>> df.set_variable("b", "a**2") >>> df.get_variable("b") 'a**2' >>> df.evaluate_variable("b") 4.0 :param name: Name of the variable :param write: write variable to meta file :param expression: value or expression """ self.variables[name] = expression_or_value
python
{ "resource": "" }
q267786
DataFrame.evaluate_variable
test
def evaluate_variable(self, name): """Evaluates the variable given by name.""" if isinstance(self.variables[name], six.string_types): # TODO: this does not allow more than one level deep variable, like a depends on b, b on c, c is a const value = eval(self.variables[name], expression_namespace, self.variables) return value else: return self.variables[name]
python
{ "resource": "" }
q267787
DataFrame._evaluate_selection_mask
test
def _evaluate_selection_mask(self, name="default", i1=None, i2=None, selection=None, cache=False): """Internal use, ignores the filter""" i1 = i1 or 0 i2 = i2 or len(self) scope = scopes._BlockScopeSelection(self, i1, i2, selection, cache=cache) return scope.evaluate(name)
python
{ "resource": "" }
q267788
DataFrame.to_dict
test
def to_dict(self, column_names=None, selection=None, strings=True, virtual=False): """Return a dict containing the ndarray corresponding to the evaluated data :param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used :param selection: {selection} :param strings: argument passed to DataFrame.get_column_names when column_names is None :param virtual: argument passed to DataFrame.get_column_names when column_names is None :return: dict """ return dict(self.to_items(column_names=column_names, selection=selection, strings=strings, virtual=virtual))
python
{ "resource": "" }
q267789
DataFrame.to_copy
test
def to_copy(self, column_names=None, selection=None, strings=True, virtual=False, selections=True): """Return a copy of the DataFrame, if selection is None, it does not copy the data, it just has a reference :param column_names: list of column names, to copy, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used :param selection: {selection} :param strings: argument passed to DataFrame.get_column_names when column_names is None :param virtual: argument passed to DataFrame.get_column_names when column_names is None :param selections: copy selections to a new DataFrame :return: dict """ if column_names: column_names = _ensure_strings_from_expressions(column_names) df = vaex.from_items(*self.to_items(column_names=column_names, selection=selection, strings=strings, virtual=False)) if virtual: for name, value in self.virtual_columns.items(): df.add_virtual_column(name, value) if selections: # the filter selection does not need copying for key, value in self.selection_histories.items(): if key != FILTER_SELECTION_NAME: df.selection_histories[key] = list(value) for key, value in self.selection_history_indices.items(): if key != FILTER_SELECTION_NAME: df.selection_history_indices[key] = value df.functions.update(self.functions) df.copy_metadata(self) return df
python
{ "resource": "" }
q267790
DataFrame.to_pandas_df
test
def to_pandas_df(self, column_names=None, selection=None, strings=True, virtual=False, index_name=None): """Return a pandas DataFrame containing the ndarray corresponding to the evaluated data If index is given, that column is used for the index of the dataframe. Example >>> df_pandas = df.to_pandas_df(["x", "y", "z"]) >>> df_copy = vaex.from_pandas(df_pandas) :param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used :param selection: {selection} :param strings: argument passed to DataFrame.get_column_names when column_names is None :param virtual: argument passed to DataFrame.get_column_names when column_names is None :param index_column: if this column is given it is used for the index of the DataFrame :return: pandas.DataFrame object """ import pandas as pd data = self.to_dict(column_names=column_names, selection=selection, strings=strings, virtual=virtual) if index_name is not None: if index_name in data: index = data.pop(index_name) else: index = self.evaluate(index_name, selection=selection) else: index = None df = pd.DataFrame(data=data, index=index) if index is not None: df.index.name = index_name return df
python
{ "resource": "" }
q267791
DataFrame.to_arrow_table
test
def to_arrow_table(self, column_names=None, selection=None, strings=True, virtual=False): """Returns an arrow Table object containing the arrays corresponding to the evaluated data :param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used :param selection: {selection} :param strings: argument passed to DataFrame.get_column_names when column_names is None :param virtual: argument passed to DataFrame.get_column_names when column_names is None :return: pyarrow.Table object """ from vaex_arrow.convert import arrow_table_from_vaex_df return arrow_table_from_vaex_df(self, column_names, selection, strings, virtual)
python
{ "resource": "" }
q267792
DataFrame.to_astropy_table
test
def to_astropy_table(self, column_names=None, selection=None, strings=True, virtual=False, index=None): """Returns a astropy table object containing the ndarrays corresponding to the evaluated data :param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used :param selection: {selection} :param strings: argument passed to DataFrame.get_column_names when column_names is None :param virtual: argument passed to DataFrame.get_column_names when column_names is None :param index: if this column is given it is used for the index of the DataFrame :return: astropy.table.Table object """ from astropy.table import Table, Column, MaskedColumn meta = dict() meta["name"] = self.name meta["description"] = self.description table = Table(meta=meta) for name, data in self.to_items(column_names=column_names, selection=selection, strings=strings, virtual=virtual): if self.dtype(name) == str_type: # for astropy we convert it to unicode, it seems to ignore object type data = np.array(data).astype('U') meta = dict() if name in self.ucds: meta["ucd"] = self.ucds[name] if np.ma.isMaskedArray(data): cls = MaskedColumn else: cls = Column table[name] = cls(data, unit=self.unit(name), description=self.descriptions.get(name), meta=meta) return table
python
{ "resource": "" }
q267793
DataFrame.add_column
test
def add_column(self, name, f_or_array): """Add an in memory array as a column.""" if isinstance(f_or_array, (np.ndarray, Column)): data = ar = f_or_array # it can be None when we have an 'empty' DataFrameArrays if self._length_original is None: self._length_unfiltered = _len(data) self._length_original = _len(data) self._index_end = self._length_unfiltered if _len(ar) != self.length_original(): if self.filtered: # give a better warning to avoid confusion if len(self) == len(ar): raise ValueError("Array is of length %s, while the length of the DataFrame is %s due to the filtering, the (unfiltered) length is %s." % (len(ar), len(self), self.length_unfiltered())) raise ValueError("array is of length %s, while the length of the DataFrame is %s" % (len(ar), self.length_original())) # assert self.length_unfiltered() == len(data), "columns should be of equal length, length should be %d, while it is %d" % ( self.length_unfiltered(), len(data)) self.columns[name] = f_or_array if name not in self.column_names: self.column_names.append(name) else: raise ValueError("functions not yet implemented") self._save_assign_expression(name, Expression(self, name))
python
{ "resource": "" }
q267794
DataFrame.rename_column
test
def rename_column(self, name, new_name, unique=False, store_in_state=True): """Renames a column, not this is only the in memory name, this will not be reflected on disk""" new_name = vaex.utils.find_valid_name(new_name, used=[] if not unique else list(self)) data = self.columns.get(name) if data is not None: del self.columns[name] self.column_names[self.column_names.index(name)] = new_name self.columns[new_name] = data else: expression = self.virtual_columns[name] del self.virtual_columns[name] self.virtual_columns[new_name] = expression if store_in_state: self._renamed_columns.append((name, new_name)) for d in [self.ucds, self.units, self.descriptions]: if name in d: d[new_name] = d[name] del d[name] return new_name
python
{ "resource": "" }
q267795
DataFrame.add_virtual_columns_cartesian_to_polar
test
def add_virtual_columns_cartesian_to_polar(self, x="x", y="y", radius_out="r_polar", azimuth_out="phi_polar", propagate_uncertainties=False, radians=False): """Convert cartesian to polar coordinates :param x: expression for x :param y: expression for y :param radius_out: name for the virtual column for the radius :param azimuth_out: name for the virtual column for the azimuth angle :param propagate_uncertainties: {propagate_uncertainties} :param radians: if True, azimuth is in radians, defaults to degrees :return: """ x = self[x] y = self[y] if radians: to_degrees = "" else: to_degrees = "*180/pi" r = np.sqrt(x**2 + y**2) self[radius_out] = r phi = np.arctan2(y, x) if not radians: phi = phi * 180/np.pi self[azimuth_out] = phi if propagate_uncertainties: self.propagate_uncertainties([self[radius_out], self[azimuth_out]])
python
{ "resource": "" }
q267796
DataFrame.add_virtual_columns_cartesian_velocities_to_spherical
test
def add_virtual_columns_cartesian_velocities_to_spherical(self, x="x", y="y", z="z", vx="vx", vy="vy", vz="vz", vr="vr", vlong="vlong", vlat="vlat", distance=None): """Concert velocities from a cartesian to a spherical coordinate system TODO: errors :param x: name of x column (input) :param y: y :param z: z :param vx: vx :param vy: vy :param vz: vz :param vr: name of the column for the radial velocity in the r direction (output) :param vlong: name of the column for the velocity component in the longitude direction (output) :param vlat: name of the column for the velocity component in the latitude direction, positive points to the north pole (output) :param distance: Expression for distance, if not given defaults to sqrt(x**2+y**2+z**2), but if this column already exists, passing this expression may lead to a better performance :return: """ # see http://www.astrosurf.com/jephem/library/li110spherCart_en.htm if distance is None: distance = "sqrt({x}**2+{y}**2+{z}**2)".format(**locals()) self.add_virtual_column(vr, "({x}*{vx}+{y}*{vy}+{z}*{vz})/{distance}".format(**locals())) self.add_virtual_column(vlong, "-({vx}*{y}-{x}*{vy})/sqrt({x}**2+{y}**2)".format(**locals())) self.add_virtual_column(vlat, "-({z}*({x}*{vx}+{y}*{vy}) - ({x}**2+{y}**2)*{vz})/( {distance}*sqrt({x}**2+{y}**2) )".format(**locals()))
python
{ "resource": "" }
q267797
DataFrame.add_virtual_columns_cartesian_velocities_to_polar
test
def add_virtual_columns_cartesian_velocities_to_polar(self, x="x", y="y", vx="vx", radius_polar=None, vy="vy", vr_out="vr_polar", vazimuth_out="vphi_polar", propagate_uncertainties=False,): """Convert cartesian to polar velocities. :param x: :param y: :param vx: :param radius_polar: Optional expression for the radius, may lead to a better performance when given. :param vy: :param vr_out: :param vazimuth_out: :param propagate_uncertainties: {propagate_uncertainties} :return: """ x = self._expr(x) y = self._expr(y) vx = self._expr(vx) vy = self._expr(vy) if radius_polar is None: radius_polar = np.sqrt(x**2 + y**2) radius_polar = self._expr(radius_polar) self[vr_out] = (x*vx + y*vy) / radius_polar self[vazimuth_out] = (x*vy - y*vx) / radius_polar if propagate_uncertainties: self.propagate_uncertainties([self[vr_out], self[vazimuth_out]])
python
{ "resource": "" }
q267798
DataFrame.add_virtual_columns_polar_velocities_to_cartesian
test
def add_virtual_columns_polar_velocities_to_cartesian(self, x='x', y='y', azimuth=None, vr='vr_polar', vazimuth='vphi_polar', vx_out='vx', vy_out='vy', propagate_uncertainties=False): """ Convert cylindrical polar velocities to Cartesian. :param x: :param y: :param azimuth: Optional expression for the azimuth in degrees , may lead to a better performance when given. :param vr: :param vazimuth: :param vx_out: :param vy_out: :param propagate_uncertainties: {propagate_uncertainties} """ x = self._expr(x) y = self._expr(y) vr = self._expr(vr) vazimuth = self._expr(vazimuth) if azimuth is not None: azimuth = self._expr(azimuth) azimuth = np.deg2rad(azimuth) else: azimuth = np.arctan2(y, x) azimuth = self._expr(azimuth) self[vx_out] = vr * np.cos(azimuth) - vazimuth * np.sin(azimuth) self[vy_out] = vr * np.sin(azimuth) + vazimuth * np.cos(azimuth) if propagate_uncertainties: self.propagate_uncertainties([self[vx_out], self[vy_out]])
python
{ "resource": "" }
q267799
DataFrame.add_virtual_columns_rotation
test
def add_virtual_columns_rotation(self, x, y, xnew, ynew, angle_degrees, propagate_uncertainties=False): """Rotation in 2d. :param str x: Name/expression of x column :param str y: idem for y :param str xnew: name of transformed x column :param str ynew: :param float angle_degrees: rotation in degrees, anti clockwise :return: """ x = _ensure_string_from_expression(x) y = _ensure_string_from_expression(y) theta = np.radians(angle_degrees) matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) m = matrix_name = x + "_" + y + "_rot" for i in range(2): for j in range(2): self.set_variable(matrix_name + "_%d%d" % (i, j), matrix[i, j].item()) self[xnew] = self._expr("{m}_00 * {x} + {m}_01 * {y}".format(**locals())) self[ynew] = self._expr("{m}_10 * {x} + {m}_11 * {y}".format(**locals())) if propagate_uncertainties: self.propagate_uncertainties([self[xnew], self[ynew]])
python
{ "resource": "" }