text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_script(self, script_name, keys=None, args=None): """ Execute a walrus script with the given arguments. :param script_name: The base name of the script to execute. :param list keys: Keys referenced by the script. :param list args: Arguments passed in to the script. :returns: Return value of script. .. note:: Redis scripts require two parameters, ``keys`` and ``args``, which are referenced in lua as ``KEYS`` and ``ARGV``. """
return self._scripts[script_name](keys, args)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rate_limit(self, name, limit=5, per=60, debug=False): """ Rate limit implementation. Allows up to `limit` of events every `per` seconds. See :ref:`rate-limit` for more information. """
return RateLimit(self, name, limit, per, debug)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cas(self, key, value, new_value): """ Perform an atomic compare-and-set on the value in "key", using a prefix match on the provided value. """
return self.run_script('cas', keys=[key], args=[value, new_value])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def listener(self, channels=None, patterns=None, is_async=False): """ Decorator for wrapping functions used to listen for Redis pub-sub messages. The listener will listen until the decorated function raises a ``StopIteration`` exception. :param list channels: Channels to listen on. :param list patterns: Patterns to match. :param bool is_async: Whether to start the listener in a separate thread. """
def decorator(fn): _channels = channels or [] _patterns = patterns or [] @wraps(fn) def inner(): pubsub = self.pubsub() def listen(): for channel in _channels: pubsub.subscribe(channel) for pattern in _patterns: pubsub.psubscribe(pattern) for data_dict in pubsub.listen(): try: ret = fn(**data_dict) except StopIteration: pubsub.close() break if is_async: worker = threading.Thread(target=listen) worker.start() return worker else: listen() return inner return decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stream_log(self, callback, connection_id='monitor'): """ Stream Redis activity one line at a time to the given callback. :param callback: A function that accepts a single argument, the Redis command. """
conn = self.connection_pool.get_connection(connection_id, None) conn.send_command('monitor') while callback(conn.read_response()): pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_key(self, *parts): """Generate a namespaced key for the given path."""
separator = getattr(self.model_class, 'index_separator', '.') parts = map(decode, parts) return '%s%s' % (self._base_key, separator.join(map(str, parts)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(cls, expression): """ Retrieve the model instance matching the given expression. If the number of matching results is not equal to one, then a ``ValueError`` will be raised. :param expression: A boolean expression to filter by. :returns: The matching :py:class:`Model` instance. :raises: ``ValueError`` if result set size is not 1. """
executor = Executor(cls.__database__) result = executor.execute(expression) if len(result) != 1: raise ValueError('Got %s results, expected 1.' % len(result)) return cls.load(result._first_or_any(), convert_key=False)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(cls, primary_key, convert_key=True): """ Retrieve a model instance by primary key. :param primary_key: The primary key of the model instance. :returns: Corresponding :py:class:`Model` instance. :raises: ``KeyError`` if object with given primary key does not exist. """
if convert_key: primary_key = cls._query.get_primary_hash_key(primary_key) if not cls.__database__.hash_exists(primary_key): raise KeyError('Object not found.') raw_data = cls.__database__.hgetall(primary_key) if PY3: raw_data = decode_dict_keys(raw_data) data = {} for name, field in cls._fields.items(): if isinstance(field, _ContainerField): continue elif name in raw_data: data[name] = field.python_value(raw_data[name]) else: data[name] = None return cls(**data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete(self, for_update=False): """ Delete the given model instance. """
hash_key = self.get_hash_id() try: original_instance = self.load(hash_key, convert_key=False) except KeyError: return # Remove from the `all` index. all_index = self._query.all_index() all_index.remove(hash_key) # Remove from the secondary indexes. for field in self._indexes: for index in field.get_indexes(): index.remove(original_instance) if not for_update: for field in self._fields.values(): if isinstance(field, _ContainerField): field._delete(self) # Remove the object itself. self.__database__.delete(hash_key)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def limit(self, key): """ Function to log an event with the given key. If the ``key`` has not exceeded their alotted events, then the function returns ``False`` to indicate that no limit is being imposed. If the ``key`` has exceeded the number of events, then the function returns ``True`` indicating rate-limiting should occur. :param str key: A key identifying the source of the event. :returns: Boolean indicating whether the event should be rate-limited or not. """
if self._debug: return False counter = self.database.List(self.name + ':' + key) n = len(counter) is_limited = False if n < self._limit: counter.prepend(str(time.time())) else: oldest = float(counter[-1]) if time.time() - oldest < self._per: is_limited = True else: counter.prepend(str(time.time())) del counter[:self._limit] counter.pexpire(int(self._per * 2000)) return is_limited
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rate_limited(self, key_function=None): """ Function or method decorator that will prevent calls to the decorated function when the number of events has been exceeded for the given time period. It is probably important that you take care to choose an appropiate key function. For instance, if rate-limiting a web-page you might use the requesting user's IP as the key. If the number of allowed events has been exceedd, a ``RateLimitException`` will be raised. :param key_function: Function that accepts the params of the decorated function and returns a string key. If not provided, a hash of the args and kwargs will be used. :returns: If the call is not rate-limited, then the return value will be that of the decorated function. :raises: ``RateLimitException``. """
if key_function is None: def key_function(*args, **kwargs): data = pickle.dumps((args, sorted(kwargs.items()))) return hashlib.md5(data).hexdigest() def decorator(fn): @wraps(fn) def inner(*args, **kwargs): key = key_function(*args, **kwargs) if self.limit(key): raise RateLimitException( 'Call to %s exceeded %s events in %s seconds.' % ( fn.__name__, self._limit, self._per)) return fn(*args, **kwargs) return inner return decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_monotonic(df, items=None, increasing=None, strict=False): """ Asserts that the DataFrame is monotonic. Parameters ========== df : Series or DataFrame items : dict mapping columns to conditions (increasing, strict) increasing : None or bool None is either increasing or decreasing. strict : whether the comparison should be strict Returns ======= df : DataFrame """
if items is None: items = {k: (increasing, strict) for k in df} for col, (increasing, strict) in items.items(): s = pd.Index(df[col]) if increasing: good = getattr(s, 'is_monotonic_increasing') elif increasing is None: good = getattr(s, 'is_monotonic') | getattr(s, 'is_monotonic_decreasing') else: good = getattr(s, 'is_monotonic_decreasing') if strict: if increasing: good = good & (s.to_series().diff().dropna() > 0).all() elif increasing is None: good = good & ((s.to_series().diff().dropna() > 0).all() | (s.to_series().diff().dropna() < 0).all()) else: good = good & (s.to_series().diff().dropna() < 0).all() if not good: raise AssertionError return df
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_shape(df, shape): """ Asserts that the DataFrame is of a known shape. Parameters ========== df : DataFrame shape : tuple (n_rows, n_columns). Use None or -1 if you don't care about a dimension. Returns ======= df : DataFrame """
try: check = np.all(np.equal(df.shape, shape) | (np.equal(shape, [-1, -1]) | np.equal(shape, [None, None]))) assert check except AssertionError as e: msg = ("Expected shape: {}\n" "\t\tActual shape: {}".format(shape, df.shape)) e.args = (msg,) raise return df
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def unique(df, columns=None): """ Asserts that columns in the DataFrame only have unique values. Parameters df : DataFrame columns : list list of columns to restrict the check to. If None, check all columns. Returns ------- df : DataFrame same as the original """
if columns is None: columns = df.columns for col in columns: if not df[col].is_unique: raise AssertionError("Column {!r} contains non-unique values".format(col)) return df
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def unique_index(df): """ Assert that the index is unique Parameters ========== df : DataFrame Returns ======= df : DataFrame """
try: assert df.index.is_unique except AssertionError as e: e.args = df.index.get_duplicates() raise return df
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def within_n_std(df, n=3): """ Assert that every value is within ``n`` standard deviations of its column's mean. Parameters ========== df : DataFame n : int number of standard deviations from the mean Returns ======= df : DataFrame """
means = df.mean() stds = df.std() inliers = (np.abs(df[means.index] - means) < n * stds) if not np.all(inliers): msg = generic.bad_locations(~inliers) raise AssertionError(msg) return df
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def has_dtypes(df, items): """ Assert that a DataFrame has ``dtypes`` Parameters ========== df: DataFrame items: dict mapping of columns to dtype. Returns ======= df : DataFrame """
dtypes = df.dtypes for k, v in items.items(): if not dtypes[k] == v: raise AssertionError("{} has the wrong dtype. Should be ({}), is ({})".format(k, v,dtypes[k])) return df
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def one_to_many(df, unitcol, manycol): """ Assert that a many-to-one relationship is preserved between two columns. For example, a retail store will have have distinct departments, each with several employees. If each employee may only work in a single department, then the relationship of the department to the employees is one to many. Parameters ========== df : DataFrame unitcol : str The column that encapulates the groups in ``manycol``. manycol : str The column that must remain unique in the distict pairs between ``manycol`` and ``unitcol`` Returns ======= df : DataFrame """
subset = df[[manycol, unitcol]].drop_duplicates() for many in subset[manycol].unique(): if subset[subset[manycol] == many].shape[0] > 1: msg = "{} in {} has multiple values for {}".format(many, manycol, unitcol) raise AssertionError(msg) return df
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_same_as(df, df_to_compare, **kwargs): """ Assert that two pandas dataframes are the equal Parameters ========== df : pandas DataFrame df_to_compare : pandas DataFrame **kwargs : dict keyword arguments passed through to panda's ``assert_frame_equal`` Returns ======= df : DataFrame """
try: tm.assert_frame_equal(df, df_to_compare, **kwargs) except AssertionError as exc: six.raise_from(AssertionError("DataFrames are not equal"), exc) return df
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ensure_pyplot(self): """ Ensures that pyplot has been imported into the embedded IPython shell. Also, makes sure to set the backend appropriately if not set already. """
# We are here if the @figure pseudo decorator was used. Thus, it's # possible that we could be here even if python_mplbackend were set to # `None`. That's also strange and perhaps worthy of raising an # exception, but for now, we just set the backend to 'agg'. if not self._pyplot_imported: if 'matplotlib.backends' not in sys.modules: # Then ipython_matplotlib was set to None but there was a # call to the @figure decorator (and ipython_execlines did # not set a backend). #raise Exception("No backend was set, but @figure was used!") import matplotlib matplotlib.use('agg') # Always import pyplot into embedded shell. self.process_input_line('import matplotlib.pyplot as plt', store_history=False) self._pyplot_imported = True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _fetch_remote_json(service_url, params=None, use_http_post=False): """Retrieves a JSON object from a URL."""
if not params: params = {} request_url, response = _fetch_remote(service_url, params, use_http_post) if six.PY3: str_response = response.read().decode('utf-8') return (request_url, json.loads(str_response, parse_float=Decimal)) return (request_url, json.load(response, parse_float=Decimal))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _fetch_remote_file(service_url, params=None, use_http_post=False): """Retrieves a file from a URL. Returns a tuple (mimetype, filename, data) """
if not params: params = {} request_url, response = _fetch_remote(service_url, params, use_http_post) dummy, params = cgi.parse_header( response.headers.get('Content-Disposition', '')) fn = params['filename'] return (response.headers.get('content-type'), fn, response.read(), response.geturl())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def geocode_location(location, sensor=False, api_key=None): """Converts a human-readable location to lat-lng. Returns a dict with lat and lng keys. keyword arguments: location -- A human-readable location, e.g 'London, England' sensor -- Boolean flag denoting if the location came from a device using its' location sensor (default False) api_key -- A valid Google Places API key. raises: GooglePlacesError -- if the geocoder fails to find a location. """
params = {'address': location, 'sensor': str(sensor).lower()} if api_key is not None: params['key'] = api_key url, geo_response = _fetch_remote_json( GooglePlaces.GEOCODE_API_URL, params) _validate_response(url, geo_response) if geo_response['status'] == GooglePlaces.RESPONSE_STATUS_ZERO_RESULTS: error_detail = ('Lat/Lng for location \'%s\' can\'t be determined.' % location) raise GooglePlacesError(error_detail) return geo_response['results'][0]['geometry']['location']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_place_details(place_id, api_key, sensor=False, language=lang.ENGLISH): """Gets a detailed place response. keyword arguments: place_id -- The unique identifier for the required place. """
url, detail_response = _fetch_remote_json(GooglePlaces.DETAIL_API_URL, {'placeid': place_id, 'sensor': str(sensor).lower(), 'key': api_key, 'language': language}) _validate_response(url, detail_response) return detail_response['result']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _validate_response(url, response): """Validates that the response from Google was successful."""
if response['status'] not in [GooglePlaces.RESPONSE_STATUS_OK, GooglePlaces.RESPONSE_STATUS_ZERO_RESULTS]: error_detail = ('Request to URL %s failed with response code: %s' % (url, response['status'])) raise GooglePlacesError(error_detail)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def nearby_search(self, language=lang.ENGLISH, keyword=None, location=None, lat_lng=None, name=None, radius=3200, rankby=ranking.PROMINENCE, sensor=False, type=None, types=[], pagetoken=None): """Perform a nearby search using the Google Places API. One of either location, lat_lng or pagetoken are required, the rest of the keyword arguments are optional. keyword arguments: keyword -- A term to be matched against all available fields, including but not limited to name, type, and address (default None) location -- A human readable location, e.g 'London, England' (default None) language -- The language code, indicating in which language the results should be returned, if possible. (default lang.ENGLISH) lat_lng -- A dict containing the following keys: lat, lng (default None) name -- A term to be matched against the names of the Places. Results will be restricted to those containing the passed name value. (default None) radius -- The radius (in meters) around the location/lat_lng to restrict the search to. The maximum is 50000 meters. (default 3200) rankby -- Specifies the order in which results are listed : ranking.PROMINENCE (default) or ranking.DISTANCE (imply no radius argument). sensor -- Indicates whether or not the Place request came from a device using a location sensor (default False). type -- Optional type param used to indicate place category. types -- An optional list of types, restricting the results to Places (default []). If there is only one item the request will be send as type param. pagetoken-- Optional parameter to force the search result to return the next 20 results from a previously run search. Setting this parameter will execute a search with the same parameters used previously. (default None) """
if location is None and lat_lng is None and pagetoken is None: raise ValueError('One of location, lat_lng or pagetoken must be passed in.') if rankby == 'distance': # As per API docs rankby == distance: # One or more of keyword, name, or types is required. if keyword is None and types == [] and name is None: raise ValueError('When rankby = googleplaces.ranking.DISTANCE, ' + 'name, keyword or types kwargs ' + 'must be specified.') self._sensor = sensor radius = (radius if radius <= GooglePlaces.MAXIMUM_SEARCH_RADIUS else GooglePlaces.MAXIMUM_SEARCH_RADIUS) lat_lng_str = self._generate_lat_lng_string(lat_lng, location) self._request_params = {'location': lat_lng_str} if rankby == 'prominence': self._request_params['radius'] = radius else: self._request_params['rankby'] = rankby if type: self._request_params['type'] = type elif types: if len(types) == 1: self._request_params['type'] = types[0] elif len(types) > 1: self._request_params['types'] = '|'.join(types) if keyword is not None: self._request_params['keyword'] = keyword if name is not None: self._request_params['name'] = name if pagetoken is not None: self._request_params['pagetoken'] = pagetoken if language is not None: self._request_params['language'] = language self._add_required_param_keys() url, places_response = _fetch_remote_json( GooglePlaces.NEARBY_SEARCH_API_URL, self._request_params) _validate_response(url, places_response) return GooglePlacesSearchResult(self, places_response)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def text_search(self, query=None, language=lang.ENGLISH, lat_lng=None, radius=3200, type=None, types=[], location=None, pagetoken=None): """Perform a text search using the Google Places API. Only the one of the query or pagetoken kwargs are required, the rest of the keyword arguments are optional. keyword arguments: lat_lng -- A dict containing the following keys: lat, lng (default None) location -- A human readable location, e.g 'London, England' (default None) pagetoken-- Optional parameter to force the search result to return the next 20 results from a previously run search. Setting this parameter will execute a search with the same parameters used previously. (default None) radius -- The radius (in meters) around the location/lat_lng to restrict the search to. The maximum is 50000 meters. (default 3200) query -- The text string on which to search, for example: "Restaurant in New York". type -- Optional type param used to indicate place category. types -- An optional list of types, restricting the results to Places (default []). If there is only one item the request will be send as type param. """
self._request_params = {'query': query} if lat_lng is not None or location is not None: lat_lng_str = self._generate_lat_lng_string(lat_lng, location) self._request_params['location'] = lat_lng_str self._request_params['radius'] = radius if type: self._request_params['type'] = type elif types: if len(types) == 1: self._request_params['type'] = types[0] elif len(types) > 1: self._request_params['types'] = '|'.join(types) if language is not None: self._request_params['language'] = language if pagetoken is not None: self._request_params['pagetoken'] = pagetoken self._add_required_param_keys() url, places_response = _fetch_remote_json( GooglePlaces.TEXT_SEARCH_API_URL, self._request_params) _validate_response(url, places_response) return GooglePlacesSearchResult(self, places_response)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def autocomplete(self, input, lat_lng=None, location=None, radius=3200, language=lang.ENGLISH, types=None, components=[]): """ Perform an autocomplete search using the Google Places API. Only the input kwarg is required, the rest of the keyword arguments are optional. keyword arguments: input -- The text string on which to search, for example: "Hattie B's". lat_lng -- A dict containing the following keys: lat, lng (default None) location -- A human readable location, e.g 'London, England' (default None) radius -- The radius (in meters) around the location to which the search is to be restricted. The maximum is 50000 meters. (default 3200) language -- The language code, indicating in which language the results should be returned, if possible. (default lang.ENGLISH) types -- A type to search against. See `types.py` "autocomplete types" for complete list https://developers.google.com/places/documentation/autocomplete#place_types. components -- An optional grouping of places to which you would like to restrict your results. An array containing one or more tuples of: * country: matches a country name or a two letter ISO 3166-1 country code. eg: [('country','US')] """
self._request_params = {'input': input} if lat_lng is not None or location is not None: lat_lng_str = self._generate_lat_lng_string(lat_lng, location) self._request_params['location'] = lat_lng_str self._request_params['radius'] = radius if types: self._request_params['types'] = types if len(components) > 0: self._request_params['components'] = '|'.join(['{}:{}'.format( c[0],c[1]) for c in components]) if language is not None: self._request_params['language'] = language self._add_required_param_keys() url, places_response = _fetch_remote_json( GooglePlaces.AUTOCOMPLETE_API_URL, self._request_params) _validate_response(url, places_response) return GoogleAutocompleteSearchResult(self, places_response)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def radar_search(self, sensor=False, keyword=None, name=None, language=lang.ENGLISH, lat_lng=None, opennow=False, radius=3200, type=None, types=[], location=None): """Perform a radar search using the Google Places API. One of lat_lng or location are required, the rest of the keyword arguments are optional. keyword arguments: keyword -- A term to be matched against all available fields, including but not limited to name, type, and address (default None) name -- A term to be matched against the names of Places. Results will be restricted to those containing the passed name value. language -- The language code, indicating in which language the results should be returned, if possible. (default lang.ENGLISH) lat_lng -- A dict containing the following keys: lat, lng (default None) location -- A human readable location, e.g 'London, England' (default None) radius -- The radius (in meters) around the location/lat_lng to restrict the search to. The maximum is 50000 meters. (default 3200) opennow -- Returns only those Places that are open for business at the time the query is sent. (default False) sensor -- Indicates whether or not the Place request came from a device using a location sensor (default False). type -- Optional type param used to indicate place category types -- An optional list of types, restricting the results to Places (default []). If there is only one item the request will be send as type param """
if keyword is None and name is None and len(types) is 0: raise ValueError('One of keyword, name or types must be supplied.') if location is None and lat_lng is None: raise ValueError('One of location or lat_lng must be passed in.') try: radius = int(radius) except: raise ValueError('radius must be passed supplied as an integer.') if sensor not in [True, False]: raise ValueError('sensor must be passed in as a boolean value.') self._request_params = {'radius': radius} self._sensor = sensor self._request_params['location'] = self._generate_lat_lng_string( lat_lng, location) if keyword is not None: self._request_params['keyword'] = keyword if name is not None: self._request_params['name'] = name if type: self._request_params['type'] = type elif types: if len(types) == 1: self._request_params['type'] = types[0] elif len(types) > 1: self._request_params['types'] = '|'.join(types) if language is not None: self._request_params['language'] = language if opennow is True: self._request_params['opennow'] = 'true' self._add_required_param_keys() url, places_response = _fetch_remote_json( GooglePlaces.RADAR_SEARCH_API_URL, self._request_params) _validate_response(url, places_response) return GooglePlacesSearchResult(self, places_response)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def checkin(self, place_id, sensor=False): """Checks in a user to a place. keyword arguments: place_id -- The unique Google identifier for the relevant place. sensor -- Boolean flag denoting if the location came from a device using its location sensor (default False). """
data = {'placeid': place_id} url, checkin_response = _fetch_remote_json( GooglePlaces.CHECKIN_API_URL % (str(sensor).lower(), self.api_key), json.dumps(data), use_http_post=True) _validate_response(url, checkin_response)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_place(self, place_id, sensor=False, language=lang.ENGLISH): """Gets a detailed place object. keyword arguments: place_id -- The unique Google identifier for the required place. sensor -- Boolean flag denoting if the location came from a device using its' location sensor (default False). language -- The language code, indicating in which language the results should be returned, if possible. (default lang.ENGLISH) """
place_details = _get_place_details(place_id, self.api_key, sensor, language=language) return Place(self, place_details)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_place(self, **kwargs): """Adds a place to the Google Places database. On a successful request, this method will return a dict containing the the new Place's place_id and id in keys 'place_id' and 'id' respectively. keyword arguments: name -- The full text name of the Place. Limited to 255 characters. lat_lng -- A dict containing the following keys: lat, lng. accuracy -- The accuracy of the location signal on which this request is based, expressed in meters. types -- The category in which this Place belongs. Only one type can currently be specified for a Place. A string or single element list may be passed in. language -- The language in which the Place's name is being reported. (defaults 'en'). sensor -- Boolean flag denoting if the location came from a device using its location sensor (default False). """
required_kwargs = {'name': [str], 'lat_lng': [dict], 'accuracy': [int], 'types': [str, list]} request_params = {} for key in required_kwargs: if key not in kwargs or kwargs[key] is None: raise ValueError('The %s argument is required.' % key) expected_types = required_kwargs[key] type_is_valid = False for expected_type in expected_types: if isinstance(kwargs[key], expected_type): type_is_valid = True break if not type_is_valid: raise ValueError('Invalid value for %s' % key) if key is not 'lat_lng': request_params[key] = kwargs[key] if len(kwargs['name']) > 255: raise ValueError('The place name must not exceed 255 characters ' + 'in length.') try: kwargs['lat_lng']['lat'] kwargs['lat_lng']['lng'] request_params['location'] = kwargs['lat_lng'] except KeyError: raise ValueError('Invalid keys for lat_lng.') request_params['language'] = (kwargs.get('language') if kwargs.get('language') is not None else lang.ENGLISH) sensor = (kwargs.get('sensor') if kwargs.get('sensor') is not None else False) # At some point Google might support multiple types, so this supports # strings and lists. if isinstance(kwargs['types'], str): request_params['types'] = [kwargs['types']] else: request_params['types'] = kwargs['types'] url, add_response = _fetch_remote_json( GooglePlaces.ADD_API_URL % (str(sensor).lower(), self.api_key), json.dumps(request_params), use_http_post=True) _validate_response(url, add_response) return {'place_id': add_response['place_id'], 'id': add_response['id']}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete_place(self, place_id, sensor=False): """Deletes a place from the Google Places database. keyword arguments: place_id -- The textual identifier that uniquely identifies this Place, returned from a Place Search request. sensor -- Boolean flag denoting if the location came from a device using its location sensor (default False). """
request_params = {'place_id': place_id} url, delete_response = _fetch_remote_json( GooglePlaces.DELETE_API_URL % (str(sensor).lower(), self.api_key), json.dumps(request_params), use_http_post=True) _validate_response(url, delete_response)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def types(self): """ Returns a list of feature types describing the given result. """
if self._types == '' and self.details != None and 'types' in self.details: self._icon = self.details['types'] return self._types
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def icon(self): """Returns the URL of a recommended icon for display."""
if self._icon == '' and self.details != None and 'icon' in self.details: self._icon = self.details['icon'] return self._icon
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def name(self): """Returns the human-readable name of the place."""
if self._name == '' and self.details != None and 'name' in self.details: self._name = self.details['name'] return self._name
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def vicinity(self): """Returns a feature name of a nearby location. Often this feature refers to a street or neighborhood within the given results. """
if self._vicinity == '' and self.details != None and 'vicinity' in self.details: self._vicinity = self.details['vicinity'] return self._vicinity
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rating(self): """Returns the Place's rating, from 0.0 to 5.0, based on user reviews. This method will return None for places that have no rating. """
if self._rating == '' and self.details != None and 'rating' in self.details: self._rating = self.details['rating'] return self._rating
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def checkin(self): """Checks in an anonymous user in."""
self._query_instance.checkin(self.place_id, self._query_instance.sensor)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self, maxheight=None, maxwidth=None, sensor=False): """Fetch photo from API."""
if not maxheight and not maxwidth: raise GooglePlacesError('You must specify maxheight or maxwidth!') result = _get_place_photo(self.photo_reference, self._query_instance.api_key, maxheight=maxheight, maxwidth=maxwidth, sensor=sensor) self.mimetype, self.filename, self.data, self.url = result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_png(data, size, level=6, output=None): # type: (bytes, Tuple[int, int], int, Optional[str]) -> Optional[bytes] """ Dump data to a PNG file. If `output` is `None`, create no file but return the whole PNG data. :param tuple size: The (width, height) pair. :param int level: PNG compression level. :param str output: Output file name. """
width, height = size line = width * 3 png_filter = struct.pack(">B", 0) scanlines = b"".join( [png_filter + data[y * line : y * line + line] for y in range(height)] ) magic = struct.pack(">8B", 137, 80, 78, 71, 13, 10, 26, 10) # Header: size, marker, data, CRC32 ihdr = [b"", b"IHDR", b"", b""] ihdr[2] = struct.pack(">2I5B", width, height, 8, 2, 0, 0, 0) ihdr[3] = struct.pack(">I", zlib.crc32(b"".join(ihdr[1:3])) & 0xFFFFFFFF) ihdr[0] = struct.pack(">I", len(ihdr[2])) # Data: size, marker, data, CRC32 idat = [b"", b"IDAT", zlib.compress(scanlines, level), b""] idat[3] = struct.pack(">I", zlib.crc32(b"".join(idat[1:3])) & 0xFFFFFFFF) idat[0] = struct.pack(">I", len(idat[2])) # Footer: size, marker, None, CRC32 iend = [b"", b"IEND", b"", b""] iend[3] = struct.pack(">I", zlib.crc32(iend[1]) & 0xFFFFFFFF) iend[0] = struct.pack(">I", len(iend[2])) if not output: # Returns raw bytes of the whole PNG data return magic + b"".join(ihdr + idat + iend) with open(output, "wb") as fileh: fileh.write(magic) fileh.write(b"".join(ihdr)) fileh.write(b"".join(idat)) fileh.write(b"".join(iend)) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_size(cls, data, width, height): # type: (bytearray, int, int) -> ScreenShot """ Instantiate a new class given only screen shot's data and size. """
monitor = {"left": 0, "top": 0, "width": width, "height": height} return cls(data, monitor)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rgb(self): # type: () -> bytes """ Compute RGB values from the BGRA raw pixels. :return bytes: RGB pixels. """
if not self.__rgb: rgb = bytearray(self.height * self.width * 3) raw = self.raw rgb[0::3] = raw[2::4] rgb[1::3] = raw[1::4] rgb[2::3] = raw[0::4] self.__rgb = bytes(rgb) return self.__rgb
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pixel(self, coord_x, coord_y): # type: (int, int) -> Pixel """ Returns the pixel value at a given position. :param int coord_x: The x coordinate. :param int coord_y: The y coordinate. :return tuple: The pixel value as (R, G, B). """
try: return self.pixels[coord_y][coord_x] # type: ignore except IndexError: raise ScreenShotError( "Pixel location ({}, {}) is out of range.".format(coord_x, coord_y) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(args=None): # type: (Optional[List[str]]) -> int """ Main logic. """
cli_args = ArgumentParser() cli_args.add_argument( "-c", "--coordinates", default="", type=str, help="the part of the screen to capture: top, left, width, height", ) cli_args.add_argument( "-l", "--level", default=6, type=int, choices=list(range(10)), help="the PNG compression level", ) cli_args.add_argument( "-m", "--monitor", default=0, type=int, help="the monitor to screen shot" ) cli_args.add_argument( "-o", "--output", default="monitor-{mon}.png", help="the output file name" ) cli_args.add_argument( "-q", "--quiet", default=False, action="store_true", help="do not print created files", ) cli_args.add_argument("-v", "--version", action="version", version=__version__) options = cli_args.parse_args(args) kwargs = {"mon": options.monitor, "output": options.output} if options.coordinates: try: top, left, width, height = options.coordinates.split(",") except ValueError: print("Coordinates syntax: top, left, width, height") return 2 kwargs["mon"] = { "top": int(top), "left": int(left), "width": int(width), "height": int(height), } if options.output == "monitor-{mon}.png": kwargs["output"] = "sct-{top}x{left}_{width}x{height}.png" try: with mss() as sct: if options.coordinates: output = kwargs["output"].format(**kwargs["mon"]) sct_img = sct.grab(kwargs["mon"]) to_png(sct_img.rgb, sct_img.size, level=options.level, output=output) if not options.quiet: print(os.path.realpath(output)) else: for file_name in sct.save(**kwargs): if not options.quiet: print(os.path.realpath(file_name)) return 0 except ScreenShotError: return 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def error_handler(_, event): # type: (Any, Any) -> int """ Specifies the program's supplied error handler. """
evt = event.contents ERROR.details = { "type": evt.type, "serial": evt.serial, "error_code": evt.error_code, "request_code": evt.request_code, "minor_code": evt.minor_code, } return 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate(retval, func, args): # type: (int, Any, Tuple[Any, Any]) -> Optional[Tuple[Any, Any]] """ Validate the returned value of a Xlib or XRANDR function. """
if retval != 0 and not ERROR.details: return args err = "{}() failed".format(func.__name__) details = {"retval": retval, "args": args} raise ScreenShotError(err, details=details)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_error_details(self): # type: () -> Optional[Dict[str, Any]] """ Get more information about the latest X server error. """
details = {} # type: Dict[str, Any] if ERROR.details: details = {"xerror_details": ERROR.details} ERROR.details = None xserver_error = ctypes.create_string_buffer(1024) self.xlib.XGetErrorText( MSS.display, details.get("xerror_details", {}).get("error_code", 0), xserver_error, len(xserver_error), ) xerror = xserver_error.value.decode("utf-8") if xerror != "0": details["xerror"] = xerror return details
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def on_exists(fname): # type: (str) -> None """ Callback example when we try to overwrite an existing screenshot. """
if os.path.isfile(fname): newfile = fname + ".old" print("{} -> {}".format(fname, newfile)) os.rename(fname, newfile)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self, mon=0, output="monitor-{mon}.png", callback=None): # type: (int, str, Callable[[str], None]) -> Iterator[str] """ Grab a screen shot and save it to a file. :param int mon: The monitor to screen shot (default=0). -1: grab one screen shot of all monitors 0: grab one screen shot by monitor N: grab the screen shot of the monitor N :param str output: The output filename. It can take several keywords to customize the filename: - `{mon}`: the monitor number - `{top}`: the screen shot y-coordinate of the upper-left corner - `{left}`: the screen shot x-coordinate of the upper-left corner - `{width}`: the screen shot's width - `{height}`: the screen shot's height - `{date}`: the current date using the default formatter As it is using the `format()` function, you can specify formatting options like `{date:%Y-%m-%s}`. :param callable callback: Callback called before saving the screen shot to a file. Take the `output` argument as parameter. :return generator: Created file(s). """
monitors = self.monitors if not monitors: raise ScreenShotError("No monitor found.") if mon == 0: # One screen shot by monitor for idx, monitor in enumerate(monitors[1:], 1): fname = output.format(mon=idx, date=datetime.now(), **monitor) if callable(callback): callback(fname) sct = self.grab(monitor) to_png(sct.rgb, sct.size, level=self.compression_level, output=fname) yield fname else: # A screen shot of all monitors together or # a screen shot of the monitor N. mon = 0 if mon == -1 else mon try: monitor = monitors[mon] except IndexError: raise ScreenShotError("Monitor {!r} does not exist.".format(mon)) output = output.format(mon=mon, date=datetime.now(), **monitor) if callable(callback): callback(output) sct = self.grab(monitor) to_png(sct.rgb, sct.size, level=self.compression_level, output=output) yield output
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def shot(self, **kwargs): # type: (Any) -> str """ Helper to save the screen shot of the 1st monitor, by default. You can pass the same arguments as for ``save``. """
kwargs["mon"] = kwargs.get("mon", 1) return next(self.save(**kwargs))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _cfactory(attr, func, argtypes, restype, errcheck=None): # type: (Any, str, List[Any], Any, Optional[Callable]) -> None """ Factory to create a ctypes function and automatically manage errors. """
meth = getattr(attr, func) meth.argtypes = argtypes meth.restype = restype if errcheck: meth.errcheck = errcheck
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _set_dpi_awareness(self): """ Set DPI aware to capture full screen on Hi-DPI monitors. """
version = sys.getwindowsversion()[:2] # pylint: disable=no-member if version >= (6, 3): # Windows 8.1+ # Here 2 = PROCESS_PER_MONITOR_DPI_AWARE, which means: # per monitor DPI aware. This app checks for the DPI when it is # created and adjusts the scale factor whenever the DPI changes. # These applications are not automatically scaled by the system. ctypes.windll.shcore.SetProcessDpiAwareness(2) elif (6, 0) <= version < (6, 3): # Windows Vista, 7, 8 and Server 2012 self.user32.SetProcessDPIAware()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mss(**kwargs): # type: (Any) -> MSSMixin """ Factory returning a proper MSS class instance. It detects the plateform we are running on and choose the most adapted mss_class to take screenshots. It then proxies its arguments to the class for instantiation. """
os_ = platform.system().lower() if os_ == "darwin": from . import darwin return darwin.MSS(**kwargs) if os_ == "linux": from . import linux return linux.MSS(**kwargs) if os_ == "windows": from . import windows return windows.MSS(**kwargs) raise ScreenShotError("System {!r} not (yet?) implemented.".format(os_))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def hsl2rgb(hsl): """Convert HSL representation towards RGB :param h: Hue, position around the chromatic circle (h=1 equiv h=0) :param s: Saturation, color saturation (0=full gray, 1=full color) :param l: Ligthness, Overhaul lightness (0=full black, 1=full white) :rtype: 3-uple for RGB values in float between 0 and 1 Hue, Saturation, Range from Lightness is a float between 0 and 1 Note that Hue can be set to any value but as it is a rotation around the chromatic circle, any value above 1 or below 0 can be expressed by a value between 0 and 1 (Note that h=0 is equiv to h=1). This algorithm came from: http://www.easyrgb.com/index.php?X=MATH&H=19#text19 Here are some quick notion of HSL to RGB conversion: With a lightness put at 0, RGB is always rgbblack (0.0, 0.0, 0.0) (0.0, 0.0, 0.0) (0.0, 0.0, 0.0) Same for lightness put at 1, RGB is always rgbwhite (1.0, 1.0, 1.0) (1.0, 1.0, 1.0) (1.0, 1.0, 1.0) With saturation put at 0, the RGB should be equal to Lightness: (0.25, 0.25, 0.25) (0.5, 0.5, 0.5) (0.75, 0.75, 0.75) With saturation put at 1, and lightness put to 0.5, we can find normal full red, green, blue colors: (1.0, 0.0, 0.0) (1.0, 0.0, 0.0) (0.0, 1.0, 0.0) (0.0, 0.0, 1.0) Of course: Traceback (most recent call last): ValueError: Saturation must be between 0 and 1. And: Traceback (most recent call last): ValueError: Lightness must be between 0 and 1. """
h, s, l = [float(v) for v in hsl] if not (0.0 - FLOAT_ERROR <= s <= 1.0 + FLOAT_ERROR): raise ValueError("Saturation must be between 0 and 1.") if not (0.0 - FLOAT_ERROR <= l <= 1.0 + FLOAT_ERROR): raise ValueError("Lightness must be between 0 and 1.") if s == 0: return l, l, l if l < 0.5: v2 = l * (1.0 + s) else: v2 = (l + s) - (s * l) v1 = 2.0 * l - v2 r = _hue2rgb(v1, v2, h + (1.0 / 3)) g = _hue2rgb(v1, v2, h) b = _hue2rgb(v1, v2, h - (1.0 / 3)) return r, g, b
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rgb2hsl(rgb): """Convert RGB representation towards HSL :param r: Red amount (float between 0 and 1) :param g: Green amount (float between 0 and 1) :param b: Blue amount (float between 0 and 1) :rtype: 3-uple for HSL values in float between 0 and 1 This algorithm came from: http://www.easyrgb.com/index.php?X=MATH&H=19#text19 Here are some quick notion of RGB to HSL conversion: Note that if red amount is equal to green and blue, then you should have a gray value (from black to white). If only one color is different from the others, it defines the direct Hue: Having only one value set, you can check that: (0.0, 1.0, 0.5) Regression check upon very close values in every component of red, green and blue: Of course: Traceback (most recent call last): ValueError: Green must be between 0 and 1. You provided 2.0. And: Traceback (most recent call last): ValueError: Blue must be between 0 and 1. You provided 1.5. """
r, g, b = [float(v) for v in rgb] for name, v in {'Red': r, 'Green': g, 'Blue': b}.items(): if not (0 - FLOAT_ERROR <= v <= 1 + FLOAT_ERROR): raise ValueError("%s must be between 0 and 1. You provided %r." % (name, v)) vmin = min(r, g, b) ## Min. value of RGB vmax = max(r, g, b) ## Max. value of RGB diff = vmax - vmin ## Delta RGB value vsum = vmin + vmax l = vsum / 2 if diff < FLOAT_ERROR: ## This is a gray, no chroma... return (0.0, 0.0, l) ## ## Chromatic data... ## ## Saturation if l < 0.5: s = diff / vsum else: s = diff / (2.0 - vsum) dr = (((vmax - r) / 6) + (diff / 2)) / diff dg = (((vmax - g) / 6) + (diff / 2)) / diff db = (((vmax - b) / 6) + (diff / 2)) / diff if r == vmax: h = db - dg elif g == vmax: h = (1.0 / 3) + dr - db elif b == vmax: h = (2.0 / 3) + dg - dr if h < 0: h += 1 if h > 1: h -= 1 return (h, s, l)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rgb2hex(rgb, force_long=False): """Transform RGB tuple to hex RGB representation :param rgb: RGB 3-uple of float between 0 and 1 :rtype: 3 hex char or 6 hex char string representation Usage ----- '#0f0' Rounding try to be as natural as possible: '#0ff' And if not possible, the 6 hex char representation is used: '#3bffff' '#00ffff' """
hx = ''.join(["%02x" % int(c * 255 + 0.5 - FLOAT_ERROR) for c in rgb]) if not force_long and hx[0::2] == hx[1::2]: hx = ''.join(hx[0::2]) return "#%s" % hx
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def hex2rgb(str_rgb): """Transform hex RGB representation to RGB tuple :param str_rgb: 3 hex char or 6 hex char string representation :rtype: RGB 3-uple of float between 0 and 1 (0.0, 1.0, 0.0) (0.0, 1.0, 0.0) Traceback (most recent call last): ValueError: Invalid value '#aa' provided for rgb color. """
try: rgb = str_rgb[1:] if len(rgb) == 6: r, g, b = rgb[0:2], rgb[2:4], rgb[4:6] elif len(rgb) == 3: r, g, b = rgb[0] * 2, rgb[1] * 2, rgb[2] * 2 else: raise ValueError() except: raise ValueError("Invalid value %r provided for rgb color." % str_rgb) return tuple([float(int(v, 16)) / 255 for v in (r, g, b)])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def hex2web(hex): """Converts HEX representation to WEB :param rgb: 3 hex char or 6 hex char string representation :rtype: web string representation (human readable if possible) WEB representation uses X11 rgb.txt to define conversion between RGB and english color names. Usage ===== 'red' '#aaa' '#abc' '#acacac' """
dec_rgb = tuple(int(v * 255) for v in hex2rgb(hex)) if dec_rgb in RGB_TO_COLOR_NAMES: ## take the first one color_name = RGB_TO_COLOR_NAMES[dec_rgb][0] ## Enforce full lowercase for single worded color name. return color_name if len(re.sub(r"[^A-Z]", "", color_name)) > 1 \ else color_name.lower() # Hex format is verified by hex2rgb function. And should be 3 or 6 digit if len(hex) == 7: if hex[1] == hex[2] and \ hex[3] == hex[4] and \ hex[5] == hex[6]: return '#' + hex[1] + hex[3] + hex[5] return hex
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def web2hex(web, force_long=False): """Converts WEB representation to HEX :param rgb: web string representation (human readable if possible) :rtype: 3 hex char or 6 hex char string representation WEB representation uses X11 rgb.txt to define conversion between RGB and english color names. Usage ===== '#f00' '#aaa' Traceback (most recent call last): AttributeError: '#foo' is not in web format. Need 3 or 6 hex digit. '#aaaaaa' '#aaaaaa' Traceback (most recent call last): AttributeError: '#aaaa' is not in web format. Need 3 or 6 hex digit. Traceback (most recent call last): ValueError: 'pinky' is not a recognized color. And color names are case insensitive: <Color red> """
if web.startswith('#'): if (LONG_HEX_COLOR.match(web) or (not force_long and SHORT_HEX_COLOR.match(web))): return web.lower() elif SHORT_HEX_COLOR.match(web) and force_long: return '#' + ''.join([("%s" % (t, )) * 2 for t in web[1:]]) raise AttributeError( "%r is not in web format. Need 3 or 6 hex digit." % web) web = web.lower() if web not in COLOR_NAME_TO_RGB: raise ValueError("%r is not a recognized color." % web) ## convert dec to hex: return rgb2hex([float(int(v)) / 255 for v in COLOR_NAME_TO_RGB[web]], force_long)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def color_scale(begin_hsl, end_hsl, nb): """Returns a list of nb color HSL tuples between begin_hsl and end_hsl ['#f00', '#0f0', '#00f', '#f00'] Of course, asking for negative values is not supported: Traceback (most recent call last): ValueError: Unsupported negative number of colors (nb=-2). """
if nb < 0: raise ValueError( "Unsupported negative number of colors (nb=%r)." % nb) step = tuple([float(end_hsl[i] - begin_hsl[i]) / nb for i in range(0, 3)]) \ if nb > 0 else (0, 0, 0) def mul(step, value): return tuple([v * value for v in step]) def add_v(step, step2): return tuple([v + step2[i] for i, v in enumerate(step)]) return [add_v(begin_hsl, mul(step, r)) for r in range(0, nb + 1)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def RGB_color_picker(obj): """Build a color representation from the string representation of an object This allows to quickly get a color from some data, with the additional benefit that the color will be the same as long as the (string representation of the) data is the same:: Same inputs produce the same result:: True True In any case, we still get a ``Color`` object:: True """
## Turn the input into a by 3-dividable string. SHA-384 is good because it ## divides into 3 components of the same size, which will be used to ## represent the RGB values of the color. digest = hashlib.sha384(str(obj).encode('utf-8')).hexdigest() ## Split the digest into 3 sub-strings of equivalent size. subsize = int(len(digest) / 3) splitted_digest = [digest[i * subsize: (i + 1) * subsize] for i in range(3)] ## Convert those hexadecimal sub-strings into integer and scale them down ## to the 0..1 range. max_value = float(int("f" * subsize, 16)) components = ( int(d, 16) ## Make a number from a list with hex digits / max_value ## Scale it down to [0.0, 1.0] for d in splitted_digest) return Color(rgb2hex(components))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _strip_marker_elem(elem_name, elements): """Remove the supplied element from the marker. This is not a comprehensive implementation, but relies on an important characteristic of metadata generation: The element's operand is always associated with an "and" operator. This means that we can simply remove the operand and the "and" operator associated with it. """
extra_indexes = [] preceding_operators = ["and"] if elem_name == "extra" else ["and", "or"] for i, element in enumerate(elements): if isinstance(element, list): cancelled = _strip_marker_elem(elem_name, element) if cancelled: extra_indexes.append(i) elif isinstance(element, tuple) and element[0].value == elem_name: extra_indexes.append(i) for i in reversed(extra_indexes): del elements[i] if i > 0 and elements[i - 1] in preceding_operators: # Remove the "and" before it. del elements[i - 1] elif elements: # This shouldn't ever happen, but is included for completeness. # If there is not an "and" before this element, try to remove the # operator after it. del elements[0] return not elements
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_stripped_marker(marker, strip_func): """Build a new marker which is cleaned according to `strip_func`"""
if not marker: return None marker = _ensure_marker(marker) elements = marker._markers strip_func(elements) if elements: return marker return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_contained_pyversions(marker): """Collect all `python_version` operands from a marker. """
collection = [] if not marker: return set() marker = _ensure_marker(marker) # Collect the (Variable, Op, Value) tuples and string joiners from the marker _markers_collect_pyversions(marker._markers, collection) marker_str = " and ".join(sorted(collection)) if not marker_str: return set() # Use the distlib dictionary parser to create a dictionary 'trie' which is a bit # easier to reason about marker_dict = distlib.markers.parse_marker(marker_str)[0] version_set = set() pyversions, _ = parse_marker_dict(marker_dict) if isinstance(pyversions, set): version_set.update(pyversions) elif pyversions is not None: version_set.add(pyversions) # Each distinct element in the set was separated by an "and" operator in the marker # So we will need to reduce them with an intersection here rather than a union # in order to find the boundaries versions = set() if version_set: versions = reduce(lambda x, y: x & y, version_set) return versions
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def contains_pyversion(marker): """Check whether a marker contains a python_version operand. """
if not marker: return False marker = _ensure_marker(marker) return _markers_contains_pyversion(marker._markers)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def distance(a, b): """Calculates distance between two latitude-longitude coordinates."""
R = 3963 # radius of Earth (miles) lat1, lon1 = math.radians(a[0]), math.radians(a[1]) lat2, lon2 = math.radians(b[0]), math.radians(b[1]) return math.acos(math.sin(lat1) * math.sin(lat2) + math.cos(lat1) * math.cos(lat2) * math.cos(lon1 - lon2)) * R
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def energy(self): """Calculates the length of the route."""
e = 0 for i in range(len(self.state)): e += self.distance_matrix[self.state[i-1]][self.state[i]] return e
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def round_figures(x, n): """Returns x rounded to n significant figures."""
return round(x, int(n - math.ceil(math.log10(abs(x)))))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_state(self, fname=None): """Saves state to pickle"""
if not fname: date = datetime.datetime.now().strftime("%Y-%m-%dT%Hh%Mm%Ss") fname = date + "_energy_" + str(self.energy()) + ".state" with open(fname, "wb") as fh: pickle.dump(self.state, fh)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_state(self, fname=None): """Loads state from pickle"""
with open(fname, 'rb') as fh: self.state = pickle.load(fh)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_schedule(self, schedule): """Takes the output from `auto` and sets the attributes """
self.Tmax = schedule['tmax'] self.Tmin = schedule['tmin'] self.steps = int(schedule['steps']) self.updates = int(schedule['updates'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def copy_state(self, state): """Returns an exact copy of the provided state Implemented according to self.copy_strategy, one of * deepcopy : use copy.deepcopy (slow but reliable) * slice: use list slices (faster but only works if state is list-like) * method: use the state's copy() method """
if self.copy_strategy == 'deepcopy': return copy.deepcopy(state) elif self.copy_strategy == 'slice': return state[:] elif self.copy_strategy == 'method': return state.copy() else: raise RuntimeError('No implementation found for ' + 'the self.copy_strategy "%s"' % self.copy_strategy)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def default_update(self, step, T, E, acceptance, improvement): """Default update, outputs to stderr. Prints the current temperature, energy, acceptance rate, improvement rate, elapsed time, and remaining time. The acceptance rate indicates the percentage of moves since the last update that were accepted by the Metropolis algorithm. It includes moves that decreased the energy, moves that left the energy unchanged, and moves that increased the energy yet were reached by thermal excitation. The improvement rate indicates the percentage of moves since the last update that strictly decreased the energy. At high temperatures it will include both moves that improved the overall state and moves that simply undid previously accepted moves that increased the energy by thermal excititation. At low temperatures it will tend toward zero as the moves that can decrease the energy are exhausted and moves that would increase the energy are no longer thermally accessible."""
elapsed = time.time() - self.start if step == 0: print(' Temperature Energy Accept Improve Elapsed Remaining', file=sys.stderr) print('\r%12.5f %12.2f %s ' % (T, E, time_string(elapsed)), file=sys.stderr, end="\r") sys.stderr.flush() else: remain = (self.steps - step) * (elapsed / step) print('\r%12.5f %12.2f %7.2f%% %7.2f%% %s %s\r' % (T, E, 100.0 * acceptance, 100.0 * improvement, time_string(elapsed), time_string(remain)), file=sys.stderr, end="\r") sys.stderr.flush()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def anneal(self): """Minimizes the energy of a system by simulated annealing. Parameters state : an initial arrangement of the system Returns (state, energy): the best state and energy found. """
step = 0 self.start = time.time() # Precompute factor for exponential cooling from Tmax to Tmin if self.Tmin <= 0.0: raise Exception('Exponential cooling requires a minimum "\ "temperature greater than zero.') Tfactor = -math.log(self.Tmax / self.Tmin) # Note initial state T = self.Tmax E = self.energy() prevState = self.copy_state(self.state) prevEnergy = E self.best_state = self.copy_state(self.state) self.best_energy = E trials, accepts, improves = 0, 0, 0 if self.updates > 0: updateWavelength = self.steps / self.updates self.update(step, T, E, None, None) # Attempt moves to new states while step < self.steps and not self.user_exit: step += 1 T = self.Tmax * math.exp(Tfactor * step / self.steps) self.move() E = self.energy() dE = E - prevEnergy trials += 1 if dE > 0.0 and math.exp(-dE / T) < random.random(): # Restore previous state self.state = self.copy_state(prevState) E = prevEnergy else: # Accept new state and compare to best state accepts += 1 if dE < 0.0: improves += 1 prevState = self.copy_state(self.state) prevEnergy = E if E < self.best_energy: self.best_state = self.copy_state(self.state) self.best_energy = E if self.updates > 1: if (step // updateWavelength) > ((step - 1) // updateWavelength): self.update( step, T, E, accepts / trials, improves / trials) trials, accepts, improves = 0, 0, 0 self.state = self.copy_state(self.best_state) if self.save_state_on_exit: self.save_state() # Return best state and energy return self.best_state, self.best_energy
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def auto(self, minutes, steps=2000): """Explores the annealing landscape and estimates optimal temperature settings. Returns a dictionary suitable for the `set_schedule` method. """
def run(T, steps): """Anneals a system at constant temperature and returns the state, energy, rate of acceptance, and rate of improvement.""" E = self.energy() prevState = self.copy_state(self.state) prevEnergy = E accepts, improves = 0, 0 for _ in range(steps): self.move() E = self.energy() dE = E - prevEnergy if dE > 0.0 and math.exp(-dE / T) < random.random(): self.state = self.copy_state(prevState) E = prevEnergy else: accepts += 1 if dE < 0.0: improves += 1 prevState = self.copy_state(self.state) prevEnergy = E return E, float(accepts) / steps, float(improves) / steps step = 0 self.start = time.time() # Attempting automatic simulated anneal... # Find an initial guess for temperature T = 0.0 E = self.energy() self.update(step, T, E, None, None) while T == 0.0: step += 1 self.move() T = abs(self.energy() - E) # Search for Tmax - a temperature that gives 98% acceptance E, acceptance, improvement = run(T, steps) step += steps while acceptance > 0.98: T = round_figures(T / 1.5, 2) E, acceptance, improvement = run(T, steps) step += steps self.update(step, T, E, acceptance, improvement) while acceptance < 0.98: T = round_figures(T * 1.5, 2) E, acceptance, improvement = run(T, steps) step += steps self.update(step, T, E, acceptance, improvement) Tmax = T # Search for Tmin - a temperature that gives 0% improvement while improvement > 0.0: T = round_figures(T / 1.5, 2) E, acceptance, improvement = run(T, steps) step += steps self.update(step, T, E, acceptance, improvement) Tmin = T # Calculate anneal duration elapsed = time.time() - self.start duration = round_figures(int(60.0 * minutes * step / elapsed), 2) # Don't perform anneal, just return params return {'tmax': Tmax, 'tmin': Tmin, 'steps': duration, 'updates': self.updates}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(self, shapefile=None): """Opens a shapefile from a filename or file-like object. Normally this method would be called by the constructor with the file object or file name as an argument."""
if shapefile: (shapeName, ext) = os.path.splitext(shapefile) self.shapeName = shapeName try: self.shp = open("%s.shp" % shapeName, "rb") except IOError: raise ShapefileException("Unable to open %s.shp" % shapeName) try: self.shx = open("%s.shx" % shapeName, "rb") except IOError: raise ShapefileException("Unable to open %s.shx" % shapeName) try: self.dbf = open("%s.dbf" % shapeName, "rb") except IOError: raise ShapefileException("Unable to open %s.dbf" % shapeName) if self.shp: self.__shpHeader() if self.dbf: self.__dbfHeader()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def shapes(self): """Returns all shapes in a shapefile."""
shp = self.__getFileObj(self.shp) shp.seek(100) shapes = [] while shp.tell() < self.shpLength: shapes.append(self.__shape()) return shapes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __dbfHeaderLength(self): """Retrieves the header length of a dbf file header."""
if not self.__dbfHdrLength: if not self.dbf: raise ShapefileException("Shapefile Reader requires a shapefile or file-like object. (no dbf file found)") dbf = self.dbf (self.numRecords, self.__dbfHdrLength) = \ unpack("<xxxxLH22x", dbf.read(32)) return self.__dbfHdrLength
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __recordFmt(self): """Calculates the size of a .shp geometry record."""
if not self.numRecords: self.__dbfHeader() fmt = ''.join(['%ds' % fieldinfo[2] for fieldinfo in self.fields]) fmtSize = calcsize(fmt) return (fmt, fmtSize)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def records(self): """Returns all records in a dbf file."""
if not self.numRecords: self.__dbfHeader() records = [] f = self.__getFileObj(self.dbf) f.seek(self.__dbfHeaderLength()) for i in range(self.numRecords): r = self.__record() if r: records.append(r) return records
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def point(self, x, y, z=0, m=0): """Creates a point shape."""
pointShape = _Shape(self.shapeType) pointShape.points.append([x, y, z, m]) self._shapes.append(pointShape)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def saveShp(self, target): """Save an shp file."""
if not hasattr(target, "write"): target = os.path.splitext(target)[0] + '.shp' if not self.shapeType: self.shapeType = self._shapes[0].shapeType self.shp = self.__getFileObj(target) self.__shapefileHeader(self.shp, headerType='shp') self.__shpRecords()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def saveShx(self, target): """Save an shx file."""
if not hasattr(target, "write"): target = os.path.splitext(target)[0] + '.shx' if not self.shapeType: self.shapeType = self._shapes[0].shapeType self.shx = self.__getFileObj(target) self.__shapefileHeader(self.shx, headerType='shx') self.__shxRecords()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def saveDbf(self, target): """Save a dbf file."""
if not hasattr(target, "write"): target = os.path.splitext(target)[0] + '.dbf' self.dbf = self.__getFileObj(target) self.__dbfHeader() self.__dbfRecords()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self, target=None, shp=None, shx=None, dbf=None): """Save the shapefile data to three files or three file-like objects. SHP and DBF files can also be written exclusively using saveShp, saveShx, and saveDbf respectively."""
# TODO: Create a unique filename for target if None. if shp: self.saveShp(shp) if shx: self.saveShx(shx) if dbf: self.saveDbf(dbf) elif target: self.saveShp(target) self.shp.close() self.saveShx(target) self.shx.close() self.saveDbf(target) self.dbf.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete(self, shape=None, part=None, point=None): """Deletes the specified part of any shape by specifying a shape number, part number, or point number."""
# shape, part, point if shape and part and point: del self._shapes[shape][part][point] # shape, part elif shape and part and not point: del self._shapes[shape][part] # shape elif shape and not part and not point: del self._shapes[shape] # point elif not shape and not part and point: for s in self._shapes: if s.shapeType == 1: del self._shapes[point] else: for part in s.parts: del s[part][point] # part, point elif not shape and part and point: for s in self._shapes: del s[part][point] # part elif not shape and part and not point: for s in self._shapes: del s[part]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def balance(self): """Adds a corresponding empty attribute or null geometry record depending on which type of record was created to make sure all three files are in synch."""
if len(self.records) > len(self._shapes): self.null() elif len(self.records) < len(self._shapes): self.record()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __fieldNorm(self, fieldName): """Normalizes a dbf field name to fit within the spec and the expectations of certain ESRI software."""
if len(fieldName) > 11: fieldName = fieldName[:11] fieldName = fieldName.upper() fieldName.replace(' ', '_')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def diff_main(self, text1, text2, checklines=True, deadline=None): """Find the differences between two texts. Simplifies the problem by stripping any common prefix or suffix off the texts before diffing. Args: text1: Old string to be diffed. text2: New string to be diffed. checklines: Optional speedup flag. If present and false, then don't run a line-level diff first to identify the changed areas. Defaults to true, which does a faster, slightly less optimal diff. deadline: Optional time when the diff should be complete by. Used internally for recursive calls. Users should set DiffTimeout instead. Returns: Array of changes. """
# Set a deadline by which time the diff must be complete. if deadline == None: # Unlike in most languages, Python counts time in seconds. if self.Diff_Timeout <= 0: deadline = sys.maxsize else: deadline = time.time() + self.Diff_Timeout # Check for null inputs. if text1 == None or text2 == None: raise ValueError("Null inputs. (diff_main)") # Check for equality (speedup). if text1 == text2: if text1: return [(self.DIFF_EQUAL, text1)] return [] # Trim off common prefix (speedup). commonlength = self.diff_commonPrefix(text1, text2) commonprefix = text1[:commonlength] text1 = text1[commonlength:] text2 = text2[commonlength:] # Trim off common suffix (speedup). commonlength = self.diff_commonSuffix(text1, text2) if commonlength == 0: commonsuffix = '' else: commonsuffix = text1[-commonlength:] text1 = text1[:-commonlength] text2 = text2[:-commonlength] # Compute the diff on the middle block. diffs = self.diff_compute(text1, text2, checklines, deadline) # Restore the prefix and suffix. if commonprefix: diffs[:0] = [(self.DIFF_EQUAL, commonprefix)] if commonsuffix: diffs.append((self.DIFF_EQUAL, commonsuffix)) self.diff_cleanupMerge(diffs) return diffs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def diff_compute(self, text1, text2, checklines, deadline): """Find the differences between two texts. Assumes that the texts do not have any common prefix or suffix. Args: text1: Old string to be diffed. text2: New string to be diffed. checklines: Speedup flag. If false, then don't run a line-level diff first to identify the changed areas. If true, then run a faster, slightly less optimal diff. deadline: Time when the diff should be complete by. Returns: Array of changes. """
if not text1: # Just add some text (speedup). return [(self.DIFF_INSERT, text2)] if not text2: # Just delete some text (speedup). return [(self.DIFF_DELETE, text1)] if len(text1) > len(text2): (longtext, shorttext) = (text1, text2) else: (shorttext, longtext) = (text1, text2) i = longtext.find(shorttext) if i != -1: # Shorter text is inside the longer text (speedup). diffs = [(self.DIFF_INSERT, longtext[:i]), (self.DIFF_EQUAL, shorttext), (self.DIFF_INSERT, longtext[i + len(shorttext):])] # Swap insertions for deletions if diff is reversed. if len(text1) > len(text2): diffs[0] = (self.DIFF_DELETE, diffs[0][1]) diffs[2] = (self.DIFF_DELETE, diffs[2][1]) return diffs if len(shorttext) == 1: # Single character string. # After the previous speedup, the character can't be an equality. return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)] # Check to see if the problem can be split in two. hm = self.diff_halfMatch(text1, text2) if hm: # A half-match was found, sort out the return data. (text1_a, text1_b, text2_a, text2_b, mid_common) = hm # Send both pairs off for separate processing. diffs_a = self.diff_main(text1_a, text2_a, checklines, deadline) diffs_b = self.diff_main(text1_b, text2_b, checklines, deadline) # Merge the results. return diffs_a + [(self.DIFF_EQUAL, mid_common)] + diffs_b if checklines and len(text1) > 100 and len(text2) > 100: return self.diff_lineMode(text1, text2, deadline) return self.diff_bisect(text1, text2, deadline)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def diff_lineMode(self, text1, text2, deadline): """Do a quick line-level diff on both strings, then rediff the parts for greater accuracy. This speedup can produce non-minimal diffs. Args: text1: Old string to be diffed. text2: New string to be diffed. deadline: Time when the diff should be complete by. Returns: Array of changes. """
# Scan the text on a line-by-line basis first. (text1, text2, linearray) = self.diff_linesToChars(text1, text2) diffs = self.diff_main(text1, text2, False, deadline) # Convert the diff back to original text. self.diff_charsToLines(diffs, linearray) # Eliminate freak matches (e.g. blank lines) self.diff_cleanupSemantic(diffs) # Rediff any replacement blocks, this time character-by-character. # Add a dummy entry at the end. diffs.append((self.DIFF_EQUAL, '')) pointer = 0 count_delete = 0 count_insert = 0 text_delete = '' text_insert = '' while pointer < len(diffs): if diffs[pointer][0] == self.DIFF_INSERT: count_insert += 1 text_insert += diffs[pointer][1] elif diffs[pointer][0] == self.DIFF_DELETE: count_delete += 1 text_delete += diffs[pointer][1] elif diffs[pointer][0] == self.DIFF_EQUAL: # Upon reaching an equality, check for prior redundancies. if count_delete >= 1 and count_insert >= 1: # Delete the offending records and add the merged ones. subDiff = self.diff_main(text_delete, text_insert, False, deadline) diffs[pointer - count_delete - count_insert : pointer] = subDiff pointer = pointer - count_delete - count_insert + len(subDiff) count_insert = 0 count_delete = 0 text_delete = '' text_insert = '' pointer += 1 diffs.pop() # Remove the dummy entry at the end. return diffs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def diff_bisectSplit(self, text1, text2, x, y, deadline): """Given the location of the 'middle snake', split the diff in two parts and recurse. Args: text1: Old string to be diffed. text2: New string to be diffed. x: Index of split point in text1. y: Index of split point in text2. deadline: Time at which to bail if not yet complete. Returns: Array of diff tuples. """
text1a = text1[:x] text2a = text2[:y] text1b = text1[x:] text2b = text2[y:] # Compute both diffs serially. diffs = self.diff_main(text1a, text2a, False, deadline) diffsb = self.diff_main(text1b, text2b, False, deadline) return diffs + diffsb
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def diff_linesToChars(self, text1, text2): """Split two texts into an array of strings. Reduce the texts to a string of hashes where each Unicode character represents one line. Args: text1: First string. text2: Second string. Returns: Three element tuple, containing the encoded text1, the encoded text2 and the array of unique strings. The zeroth element of the array of unique strings is intentionally blank. """
lineArray = [] # e.g. lineArray[4] == "Hello\n" lineHash = {} # e.g. lineHash["Hello\n"] == 4 # "\x00" is a valid character, but various debuggers don't like it. # So we'll insert a junk entry to avoid generating a null character. lineArray.append('') def diff_linesToCharsMunge(text): """Split a text into an array of strings. Reduce the texts to a string of hashes where each Unicode character represents one line. Modifies linearray and linehash through being a closure. Args: text: String to encode. Returns: Encoded string. """ chars = [] # Walk the text, pulling out a substring for each line. # text.split('\n') would would temporarily double our memory footprint. # Modifying text would create many large strings to garbage collect. lineStart = 0 lineEnd = -1 while lineEnd < len(text) - 1: lineEnd = text.find('\n', lineStart) if lineEnd == -1: lineEnd = len(text) - 1 line = text[lineStart:lineEnd + 1] if line in lineHash: chars.append(chr(lineHash[line])) else: if len(lineArray) == maxLines: # Bail out at 1114111 because chr(1114112) throws. line = text[lineStart:] lineEnd = len(text) lineArray.append(line) lineHash[line] = len(lineArray) - 1 chars.append(chr(len(lineArray) - 1)) lineStart = lineEnd + 1 return "".join(chars) # Allocate 2/3rds of the space for text1, the rest for text2. maxLines = 666666 chars1 = diff_linesToCharsMunge(text1) maxLines = 1114111 chars2 = diff_linesToCharsMunge(text2) return (chars1, chars2, lineArray)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def diff_charsToLines(self, diffs, lineArray): """Rehydrate the text in a diff from a string of line hashes to real lines of text. Args: diffs: Array of diff tuples. lineArray: Array of unique strings. """
for i in range(len(diffs)): text = [] for char in diffs[i][1]: text.append(lineArray[ord(char)]) diffs[i] = (diffs[i][0], "".join(text))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def diff_commonPrefix(self, text1, text2): """Determine the common prefix of two strings. Args: text1: First string. text2: Second string. Returns: The number of characters common to the start of each string. """
# Quick check for common null cases. if not text1 or not text2 or text1[0] != text2[0]: return 0 # Binary search. # Performance analysis: https://neil.fraser.name/news/2007/10/09/ pointermin = 0 pointermax = min(len(text1), len(text2)) pointermid = pointermax pointerstart = 0 while pointermin < pointermid: if text1[pointerstart:pointermid] == text2[pointerstart:pointermid]: pointermin = pointermid pointerstart = pointermin else: pointermax = pointermid pointermid = (pointermax - pointermin) // 2 + pointermin return pointermid
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def diff_commonSuffix(self, text1, text2): """Determine the common suffix of two strings. Args: text1: First string. text2: Second string. Returns: The number of characters common to the end of each string. """
# Quick check for common null cases. if not text1 or not text2 or text1[-1] != text2[-1]: return 0 # Binary search. # Performance analysis: https://neil.fraser.name/news/2007/10/09/ pointermin = 0 pointermax = min(len(text1), len(text2)) pointermid = pointermax pointerend = 0 while pointermin < pointermid: if (text1[-pointermid:len(text1) - pointerend] == text2[-pointermid:len(text2) - pointerend]): pointermin = pointermid pointerend = pointermin else: pointermax = pointermid pointermid = (pointermax - pointermin) // 2 + pointermin return pointermid
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def diff_commonOverlap(self, text1, text2): """Determine if the suffix of one string is the prefix of another. Args: text1 First string. text2 Second string. Returns: The number of characters common to the end of the first string and the start of the second string. """
# Cache the text lengths to prevent multiple calls. text1_length = len(text1) text2_length = len(text2) # Eliminate the null case. if text1_length == 0 or text2_length == 0: return 0 # Truncate the longer string. if text1_length > text2_length: text1 = text1[-text2_length:] elif text1_length < text2_length: text2 = text2[:text1_length] text_length = min(text1_length, text2_length) # Quick check for the worst case. if text1 == text2: return text_length # Start by looking for a single character match # and increase length until no match is found. # Performance analysis: https://neil.fraser.name/news/2010/11/04/ best = 0 length = 1 while True: pattern = text1[-length:] found = text2.find(pattern) if found == -1: return best length += found if found == 0 or text1[-length:] == text2[:length]: best = length length += 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def diff_halfMatch(self, text1, text2): """Do the two texts share a substring which is at least half the length of the longer text? This speedup can produce non-minimal diffs. Args: text1: First string. text2: Second string. Returns: Five element Array, containing the prefix of text1, the suffix of text1, the prefix of text2, the suffix of text2 and the common middle. Or None if there was no match. """
if self.Diff_Timeout <= 0: # Don't risk returning a non-optimal diff if we have unlimited time. return None if len(text1) > len(text2): (longtext, shorttext) = (text1, text2) else: (shorttext, longtext) = (text1, text2) if len(longtext) < 4 or len(shorttext) * 2 < len(longtext): return None # Pointless. def diff_halfMatchI(longtext, shorttext, i): """Does a substring of shorttext exist within longtext such that the substring is at least half the length of longtext? Closure, but does not reference any external variables. Args: longtext: Longer string. shorttext: Shorter string. i: Start index of quarter length substring within longtext. Returns: Five element Array, containing the prefix of longtext, the suffix of longtext, the prefix of shorttext, the suffix of shorttext and the common middle. Or None if there was no match. """ seed = longtext[i:i + len(longtext) // 4] best_common = '' j = shorttext.find(seed) while j != -1: prefixLength = self.diff_commonPrefix(longtext[i:], shorttext[j:]) suffixLength = self.diff_commonSuffix(longtext[:i], shorttext[:j]) if len(best_common) < suffixLength + prefixLength: best_common = (shorttext[j - suffixLength:j] + shorttext[j:j + prefixLength]) best_longtext_a = longtext[:i - suffixLength] best_longtext_b = longtext[i + prefixLength:] best_shorttext_a = shorttext[:j - suffixLength] best_shorttext_b = shorttext[j + prefixLength:] j = shorttext.find(seed, j + 1) if len(best_common) * 2 >= len(longtext): return (best_longtext_a, best_longtext_b, best_shorttext_a, best_shorttext_b, best_common) else: return None # First check if the second quarter is the seed for a half-match. hm1 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 3) // 4) # Check again based on the third quarter. hm2 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 1) // 2) if not hm1 and not hm2: return None elif not hm2: hm = hm1 elif not hm1: hm = hm2 else: # Both matched. Select the longest. if len(hm1[4]) > len(hm2[4]): hm = hm1 else: hm = hm2 # A half-match was found, sort out the return data. if len(text1) > len(text2): (text1_a, text1_b, text2_a, text2_b, mid_common) = hm else: (text2_a, text2_b, text1_a, text1_b, mid_common) = hm return (text1_a, text1_b, text2_a, text2_b, mid_common)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def diff_cleanupEfficiency(self, diffs): """Reduce the number of edits by eliminating operationally trivial equalities. Args: diffs: Array of diff tuples. """
changes = False equalities = [] # Stack of indices where equalities are found. lastEquality = None # Always equal to diffs[equalities[-1]][1] pointer = 0 # Index of current position. pre_ins = False # Is there an insertion operation before the last equality. pre_del = False # Is there a deletion operation before the last equality. post_ins = False # Is there an insertion operation after the last equality. post_del = False # Is there a deletion operation after the last equality. while pointer < len(diffs): if diffs[pointer][0] == self.DIFF_EQUAL: # Equality found. if (len(diffs[pointer][1]) < self.Diff_EditCost and (post_ins or post_del)): # Candidate found. equalities.append(pointer) pre_ins = post_ins pre_del = post_del lastEquality = diffs[pointer][1] else: # Not a candidate, and can never become one. equalities = [] lastEquality = None post_ins = post_del = False else: # An insertion or deletion. if diffs[pointer][0] == self.DIFF_DELETE: post_del = True else: post_ins = True # Five types to be split: # <ins>A</ins><del>B</del>XY<ins>C</ins><del>D</del> # <ins>A</ins>X<ins>C</ins><del>D</del> # <ins>A</ins><del>B</del>X<ins>C</ins> # <ins>A</del>X<ins>C</ins><del>D</del> # <ins>A</ins><del>B</del>X<del>C</del> if lastEquality and ((pre_ins and pre_del and post_ins and post_del) or ((len(lastEquality) < self.Diff_EditCost / 2) and (pre_ins + pre_del + post_ins + post_del) == 3)): # Duplicate record. diffs.insert(equalities[-1], (self.DIFF_DELETE, lastEquality)) # Change second copy to insert. diffs[equalities[-1] + 1] = (self.DIFF_INSERT, diffs[equalities[-1] + 1][1]) equalities.pop() # Throw away the equality we just deleted. lastEquality = None if pre_ins and pre_del: # No changes made which could affect previous entry, keep going. post_ins = post_del = True equalities = [] else: if len(equalities): equalities.pop() # Throw away the previous equality. if len(equalities): pointer = equalities[-1] else: pointer = -1 post_ins = post_del = False changes = True pointer += 1 if changes: self.diff_cleanupMerge(diffs)