text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bethe_fermi_ene(energy, quasipart, shift, hopping, beta):
"""product of the bethe lattice dos, fermi distribution an weighted by energy""" |
return energy * bethe_fermi(energy, quasipart, shift, hopping, beta) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bethe_filling_zeroT(fermi_energy, hopping):
"""Returns the particle average count given a certan fermi energy, for the semicircular density of states of the bethe lattice""" |
fermi_energy = np.asarray(fermi_energy).clip(-2*hopping, 2*hopping)
return 1/2. + fermi_energy/2 * bethe_lattice(fermi_energy, hopping) \
+ np.arcsin(fermi_energy/2/hopping)/np.pi |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bethe_findfill_zeroT(particles, orbital_e, hopping):
"""Return the fermi energy that correspond to the given particle quantity in a semicircular density of states of a bethe lattice in a multi orbital case that can be non-degenerate""" |
assert 0. <= particles <= len(orbital_e)
zero = lambda e: np.sum([bethe_filling_zeroT(e-e_m, t) \
for t, e_m in zip(hopping, orbital_e)]) - particles
return fsolve(zero, 0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bethe_find_crystalfield(populations, hopping):
"""Return the orbital energies to have the system populates as desired by the given individual populations""" |
zero = lambda orb: [bethe_filling_zeroT(-em, tz) - pop \
for em, tz, pop in zip(orb, hopping, populations)]
return fsolve(zero, np.zeros(len(populations))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_var_from_string(item):
""" Get resource variable. """ |
modname, varname = _split_mod_var_names(item)
if modname:
mod = __import__(modname, globals(), locals(), [varname], -1)
return getattr(mod, varname)
else:
return globals()[varname] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _handle_list(reclist):
""" Return list of resources that have access_controller defined. """ |
ret = []
for item in reclist:
recs = _handle_resource_setting(item)
ret += [resource for resource in recs if resource.access_controller]
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _ensure_content_type():
""" Add the bulldog content type to the database if it's missing. """ |
from django.contrib.contenttypes.models import ContentType
try:
row = ContentType.objects.get(app_label=PERM_APP_NAME)
except ContentType.DoesNotExist:
row = ContentType(name=PERM_APP_NAME, app_label=PERM_APP_NAME, model=PERM_APP_NAME)
row.save()
return row.id |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_permission_description(permission_name):
""" Generate a descriptive string based on the permission name. For example: 'resource_Order_get' -> 'Can GET order' todo: add support for the resource name to have underscores """ |
parts = permission_name.split('_')
parts.pop(0)
method = parts.pop()
resource = ('_'.join(parts)).lower()
return 'Can %s %s' % (method.upper(), resource) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _populate_permissions(resources, content_type_id):
""" Add all missing permissions to the database. """ |
from django.contrib.auth.models import Permission
# read the whole auth_permission table into memory
db_perms = [perm.codename for perm in Permission.objects.all()]
for resource in resources:
# get all resource's permissions that are not already in db
perms = [perm for perm in resource.access_controller.get_perm_names(resource) if perm not in db_perms]
for perm in perms:
_save_new_permission(perm, content_type_id) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def init_default(self):
"""Overriden to take default database and save locally The issue was that init_default() sets self.filename to None; however there can be no SQLite database without a corresponding file (not using *memory* here) Should not keep default file open either (as it is in the API directory and shouldn't be messed by user) """ |
import f311
if self.default_filename is None:
raise RuntimeError("Class '{}' has no default filename".format(self.__class__.__name__))
fullpath = f311.get_default_data_path(self.default_filename, class_=self.__class__)
self.load(fullpath)
name, ext = os.path.splitext(self.default_filename)
new = a99.new_filename(os.path.join("./", name), ext)
self.save_as(new) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _do_save_as(self, filename):
"""Closes connection, copies DB file, and opens again pointing to new file **Note** if filename equals current filename, does nothing! """ |
if filename != self.filename:
self._ensure_filename()
self._close_if_open()
shutil.copyfile(self.filename, filename)
self.__get_conn(filename=filename) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ensure_schema(self):
"""Create file and schema if it does not exist yet.""" |
self._ensure_filename()
if not os.path.isfile(self.filename):
self.create_schema() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_table_info(self, tablename):
"""Returns information about fields of a specific table **Note** Fields "caption" and "tooltip" are added to rows using information in moldb.gui_info """ |
conn = self.__get_conn()
ret = a99.get_table_info(conn, tablename)
if len(ret) == 0:
raise RuntimeError("Cannot get info for table '{}'".format(tablename))
more = self.gui_info.get(tablename)
for row in ret.values():
caption, tooltip = None, None
if more:
info = more.get(row["name"])
if info:
caption, tooltip = info
row["caption"] = caption
row["tooltip"] = tooltip
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __get_conn(self, flag_force_new=False, filename=None):
"""Returns connection to database. Tries to return existing connection, unless flag_force_new Args: flag_force_new: filename: Returns: sqlite3.Connection object **Note** this is a private method because you can get a connection to any file, so it has to be used in the right moment """ |
flag_open_new = flag_force_new or not self._conn_is_open()
if flag_open_new:
if filename is None:
filename = self.filename
# funny that __get_conn() calls _get_conn() but that's it
conn = self._get_conn(filename)
self._conn = conn
else:
conn = self._conn
return conn |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def flatten_multidict(multidict):
"""Return flattened dictionary from ``MultiDict``.""" |
return dict([(key, value if len(value) > 1 else value[0])
for (key, value) in multidict.iterlists()]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __setitem(self, chunk, key, keys, value, extend=False):
"""Helper function to fill up the dictionary.""" |
def setitem(chunk):
if keys:
return self.__setitem(chunk, keys[0], keys[1:], value, extend)
else:
return value
if key in ['.', ']']:
chunk[key] = value
elif ']' in key: # list
key = int(key[:-1].replace('n', '-1'))
if extend:
if chunk is None:
chunk = [None, ]
else:
if not isinstance(chunk, list):
chunk = [chunk, ]
if key != -1:
chunk.insert(key, None)
else:
chunk.append(None)
else:
if chunk is None:
chunk = [None, ]
chunk[key] = setitem(chunk[key])
else: # dict
if extend:
if chunk is None:
chunk = {}
chunk[key] = None
chunk[key] = setitem(chunk[key])
elif key not in chunk:
chunk[key] = None
chunk[key] = setitem(chunk[key])
else:
if keys:
chunk[key] = setitem(chunk[key])
else:
if not isinstance(chunk[key], list):
chunk[key] = [chunk[key], ]
chunk[key].append(None)
chunk[key][-1] = setitem(chunk[key][-1])
else:
if chunk is None:
chunk = {}
if key not in chunk:
chunk[key] = None
chunk[key] = setitem(chunk[key])
return chunk |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set(self, key, value, extend=False, **kwargs):
"""Extended standard set function.""" |
self.__setitem__(key, value, extend, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_default_database(reset: bool = False) -> GraphDatabaseInterface: """ Creates and returns a default SQLAlchemy database interface to use. Arguments: reset (bool):
Whether to reset the database if it happens to exist already. """ |
import sqlalchemy
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import StaticPool
Base = declarative_base()
engine = sqlalchemy.create_engine("sqlite:///SpotifyArtistGraph.db", poolclass=StaticPool)
Session = sessionmaker(bind=engine)
dbi: GraphDatabaseInterface = create_graph_database_interface(
sqlalchemy, Session(), Base, sqlalchemy.orm.relationship
)
if reset:
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
return dbi |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _create_node(self, index: int, name: str, external_id: Optional[str] = None) -> SpotifyArtistNode: """ Returns a new `SpotifyArtistNode` instance with the given index and name. Arguments: index (int):
The index of the node to create. name (str):
The name of the node to create. external_id (Optional[str]):
The external ID of the node. """ |
if external_id is None:
graph: SpotifyArtistGraph = self._graph
items: List[NameExternalIDPair] = graph.client.search_artists_by_name(name)
for item in items:
if item.name == name:
external_id = item.external_id
break
return SpotifyArtistNode(graph=self._graph, index=index, name=name, external_id=external_id) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def access_token(self) -> str: """ The access token stored within the requested token. """ |
if self._token_expires_at < time.time() + self._REFRESH_THRESHOLD:
self.request_token()
return self._token["access_token"] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def request_token(self) -> None: """ Requests a new Client Credentials Flow authentication token from the Spotify API and stores it in the `token` property of the object. Raises: requests.HTTPError: If an HTTP error occurred during the request. """ |
response: requests.Response = requests.post(
self._TOKEN_URL,
auth=HTTPBasicAuth(self._client_id, self._client_key),
data={"grant_type": self._GRANT_TYPE},
verify=True
)
response.raise_for_status()
self._token = response.json()
self._token_expires_at = time.time() + self._token["expires_in"] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def search_artists_by_name(self, artist_name: str, limit: int = 5) -> List[NameExternalIDPair]: """ Returns zero or more artist name - external ID pairs that match the specified artist name. Arguments: artist_name (str):
The artist name to search in the Spotify API. limit (int):
The maximum number of results to return. Returns: Zero or more artist name - external ID pairs. Raises: requests.HTTPError: If an HTTP error occurred during the request. SpotifyClientError: If an invalid item is found. """ |
response: requests.Response = requests.get(
self._API_URL_TEMPLATE.format("search"),
params={"q": artist_name, "type": "artist", "limit": limit},
headers={"Authorization": "Bearer {}".format(self._token.access_token)}
)
# TODO: handle API rate limiting
response.raise_for_status()
if not response.text:
return []
result: List[NameExternalIDPair] = []
data: List[Dict] = response.json()["artists"]["items"]
for artist in data:
artist = NameExternalIDPair(artist["name"].strip(), artist["id"].strip())
if not artist.name or not artist.external_id:
raise SpotifyClientError("Name or ID is missing")
result.append(artist)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def colors(lang="en"):
"""This resource returns all dyes in the game, including localized names and their color component information. :param lang: The language to query the names for. The response is a dictionary where color ids are mapped to an dictionary containing the following properties: name (string):
The name of the dye. base_rgb (list):
The base RGB values. cloth (object):
Detailed information on its appearance when applied on cloth armor. leather (object):
Detailed information on its appearance when applied on leather armor. metal (object):
Detailed information on its appearance when applied on metal armor. The detailed information object contains the following properties: brightness (number):
The brightness. contrast (number):
The contrast. hue (number):
The hue in the HSL colorspace. saturation (number):
The saturation in the HSL colorspace. lightness (number):
The lightness in the HSL colorspace. rgb (list):
A list containing precalculated RGB values. """ |
cache_name = "colors.%s.json" % lang
data = get_cached("colors.json", cache_name, params=dict(lang=lang))
return data["colors"] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def event_names(lang="en"):
"""This resource returns an unordered list of the localized event names for the specified language. :param lang: The language to query the names for. :return: A dictionary where the key is the event id and the value is the name of the event in the specified language. """ |
cache_name = "event_names.%s.json" % lang
data = get_cached("event_names.json", cache_name, params=dict(lang=lang))
return dict([(event["id"], event["name"]) for event in data]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def event_details(event_id=None, lang="en"):
"""This resource returns static details about available events. :param event_id: Only list this event. :param lang: Show localized texts in the specified language. The response is a dictionary where the key is the event id, and the value is a dictionary containing the following properties: name (string) The name of the event. level (int) The event level. map_id (int) The map where the event takes place. flags (list) A list of additional flags. Possible flags are: ``group_event`` For group events ``map_wide`` For map-wide events. location (object) The location of the event. type (string) The type of the event location, can be ``sphere``, ``cylinder`` or ``poly``. center (list) X, Y, Z coordinates of the event location. radius (number) (type ``sphere`` and ``cylinder``) Radius of the event location. z_range (list) (type ``poly``) List of Minimum and Maximum Z coordinate. points (list) (type ``poly``) List of Points (X, Y) denoting the event location perimeter. If a event_id is given, only the values for that event are returned. """ |
if event_id:
cache_name = "event_details.%s.%s.json" % (event_id, lang)
params = {"event_id": event_id, "lang": lang}
else:
cache_name = "event_details.%s.json" % lang
params = {"lang": lang}
data = get_cached("event_details.json", cache_name, params=params)
events = data["events"]
return events.get(event_id) if event_id else events |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def PhenomModel(self, r):
"""Fit to field map A phenomenological fit by Ryan Bayes (Glasgow) to a field map generated by Bob Wands (FNAL). It assumes a 1 cm plate. This is dated January 30th, 2012. Not defined for r <= 0""" |
if r <= 0:
raise ValueError
field = self.B0 + self.B1 * G4.m / r + self.B2 * math.exp(-1 * self.H * r / G4.m)
return field |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_dir(self, dir_):
"""Sets directory, auto-loads, updates all GUI contents.""" |
self.__lock_set_dir(dir_)
self.__lock_auto_load()
self.__lock_update_table()
self.__update_info()
self.__update_window_title() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __update_info(self):
"""Updates "visualization options" and "file info" areas.""" |
from f311 import explorer as ex
import f311
t = self.tableWidget
z = self.listWidgetVis
z.clear()
classes = self.__vis_classes = []
propss = self.__lock_get_current_propss()
npp = len(propss)
s0, s1 = "", ""
if npp == 1:
p = propss[0]
# Visualization options
if p.flag_scanned:
if isinstance(p.f, f311.DataFile):
classes.extend(f311.get_suitable_vis_classes(p.f))
if ex.VisPrint in classes:
classes.remove(ex.VisPrint)
if p.flag_text:
# This is an exception, since "txt" is not a Vis descendant.
# This will be properly handled in __visualize()
classes.append("txt")
for x in classes:
if x == "txt":
text = "View plain text"
else:
text = x.action
text += " ("+x.__name__+")"
item = QListWidgetItem(text)
z.addItem(item)
# File info
s0 = p.get_summary()
s1 = p.get_info()
elif npp >= 2:
s0 = "{0:d} selected".format(npp)
ff = [p.f for p in propss]
flag_spectra = all([isinstance(f, f311.FileSpectrum) for f in ff])
# gambiarra to visualize several PFANT .mod files
has_pyfant = False
try:
import pyfant
has_pyfant = True
except:
pass
flag_mod = False
if has_pyfant:
flag_mod = all([isinstance(f, pyfant.FileModBin) and len(f.records) > 1 for f in ff])
if flag_spectra:
z.addItem(QListWidgetItem("Plot spectra stacked"))
classes.append("sta")
z.addItem(QListWidgetItem("Plot spectra overlapped"))
classes.append("ovl")
elif flag_mod:
# TODO plugin-based way to handle visualization of multiple selection
z.addItem(QListWidgetItem("View model grid"))
classes.append("modgrid")
# File info
self.labelSummary.setText(s0)
self.textEditInfo.setPlainText(s1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate(self, value):
""" This was overridden to have our own ``empty_values``. """ |
if value in self.empty_values and self.required:
raise ValidationError(self.error_messages['required']) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clean(self, value):
""" Clean the data and validate the nested spec. Implementation is the same as for other fields but in addition, this will propagate the validation to the nested spec. """ |
obj = self.factory.create(value)
# todo: what if the field defines properties that have any of
# these names:
if obj:
del obj.fields
del obj.alias
del obj.validators
del obj.required
del obj.factory
# do own cleaning first...
self._validate_existence(obj)
self._run_validators(obj)
# ret = {}
# for name in self.fields.keys():
# ret[name] = getattr(obj, name)
# return ret
return obj |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def serialize(self, value, entity, request):
""" Propagate to nested fields. :returns: data dictionary or ``None`` if no fields are present. """ |
self._validate_existence(value)
self._run_validators(value)
if not value:
return value
return self.factory.serialize(value, request) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _run_validators(self, value):
""" Execute all associated validators. """ |
errors = []
for v in self.validators:
try:
v(value)
except ValidationError, e:
errors.extend(e.messages)
if errors:
raise ValidationError(errors) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_all_active(self):
""" Get all of the active messages ordered by the active_datetime. """ |
now = timezone.now()
return self.select_related().filter(active_datetime__lte=now,
inactive_datetime__gte=now).order_by('active_datetime') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def render_word(self,word,size,color):
'''Creates a surface that contains a word.'''
pygame.font.init()
font = pygame.font.Font(None,size)
self.rendered_word = font.render(word,0,color)
self.word_size = font.size(word) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def plot_word(self,position):
'''Blits a rendered word on to the main display surface'''
posrectangle = pygame.Rect(position,self.word_size)
self.used_pos.append(posrectangle)
self.cloud.blit(self.rendered_word,position) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def collides(self,position,size):
'''Returns True if the word collides with another plotted word.'''
word_rect = pygame.Rect(position,self.word_size)
if word_rect.collidelistall(self.used_pos) == []:
return False
else:
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def expand(self,delta_width,delta_height):
'''Makes the cloud surface bigger. Maintains all word positions.'''
temp_surface = pygame.Surface((self.width + delta_width,self.height + delta_height))
(self.width,self.height) = (self.width + delta_width, self.height + delta_height)
temp_surface.blit(self.cloud,(0,0))
self.cloud = temp_surface |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def smart_cloud(self,input,max_text_size=72,min_text_size=12,exclude_words = True):
'''Creates a word cloud using the input.
Input can be a file, directory, or text.
Set exclude_words to true if you want to eliminate words that only occur once.'''
self.exclude_words = exclude_words
if isdir(input):
self.directory_cloud(input,max_text_size,min_text_size)
elif isfile(input):
text = read_file(input)
self.text_cloud(text,max_text_size,min_text_size)
elif isinstance(input, basestring):
self.text_cloud(input,max_text_size,min_text_size)
else:
print 'Input type not supported.'
print 'Supported types: String, Directory, .txt file' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def directory_cloud(self,directory,max_text_size=72,min_text_size=12,expand_width=50,expand_height=50,max_count=100000):
'''Creates a word cloud using files from a directory.
The color of the words correspond to the amount of documents the word occurs in.'''
worddict = assign_fonts(tuplecount(read_dir(directory)),max_text_size,min_text_size,self.exclude_words)
sorted_worddict = list(reversed(sorted(worddict.keys(), key=lambda x: worddict[x])))
colordict = assign_colors(dir_freq(directory))
num_words = 0
for word in sorted_worddict:
self.render_word(word,worddict[word],colordict[word])
if self.width < self.word_size[0]:
#If the word is bigger than the surface, expand the surface.
self.expand(self.word_size[0]-self.width,0)
elif self.height < self.word_size[1]:
self.expand(0,self.word_size[1]-self.height)
position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]
#the initial position is determined
loopcount = 0
while self.collides(position,self.word_size):
if loopcount > max_count:
#If it can't find a position for the word, create a bigger cloud.
self.expand(expand_width,expand_height)
loopcount = 0
position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]
loopcount += 1
self.plot_word(position)
num_words += 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def text_cloud(self,text,max_text_size=72,min_text_size=12,expand_width=50,expand_height=50,max_count=100000):
'''Creates a word cloud using plain text.'''
worddict = assign_fonts(tuplecount(text),max_text_size,min_text_size,self.exclude_words)
sorted_worddict = list(reversed(sorted(worddict.keys(), key=lambda x: worddict[x])))
for word in sorted_worddict:
self.render_word(word,worddict[word],(randint(0,255),randint(0,255),randint(0,255)))
if self.width < self.word_size[0]:
#If the word is bigger than the surface, expand the surface.
self.expand(self.word_size[0]-self.width,0)
elif self.height < self.word_size[1]:
self.expand(0,self.word_size[1]-self.height)
position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]
loopcount = 0
while self.collides(position,self.word_size):
if loopcount > max_count:
#If it can't find a position for the word, expand the cloud.
self.expand(expand_width,expand_height)
loopcount = 0
position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]
loopcount += 1
self.plot_word(position) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def display(self):
'''Displays the word cloud to the screen.'''
pygame.init()
self.display = pygame.display.set_mode((self.width,self.height))
self.display.blit(self.cloud,(0,0))
pygame.display.update()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fermi_dist(energy, beta):
""" Fermi Dirac distribution""" |
exponent = np.asarray(beta*energy).clip(-600, 600)
return 1./(np.exp(exponent) + 1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def diagonalize(operator):
"""diagonalizes single site Spin Hamiltonian""" |
eig_values, eig_vecs = LA.eigh(operator)
emin = np.amin(eig_values)
eig_values -= emin
return eig_values, eig_vecs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gf_lehmann(eig_e, eig_states, d_dag, beta, omega, d=None):
"""Outputs the lehmann representation of the greens function omega has to be given, as matsubara or real frequencies""" |
ew = np.exp(-beta*eig_e)
zet = ew.sum()
G = np.zeros_like(omega)
basis_create = np.dot(eig_states.T, d_dag.dot(eig_states))
if d is None:
tmat = np.square(basis_create)
else:
tmat = np.dot(eig_states.T, d.T.dot(eig_states))*basis_create
tmat *= np.add.outer(ew, ew)
gap = np.add.outer(-eig_e, eig_e)
N = eig_e.size
for i, j in product(range(N), range(N)):
G += tmat[i, j] / (omega + gap[i, j])
return G / zet |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def expected_value(operator, eig_values, eig_states, beta):
"""Calculates the average value of an observable it requires that states and operators have the same base""" |
aux = np.einsum('i,ji,ji', np.exp(-beta*eig_values),
eig_states, operator.dot(eig_states))
return aux / partition_func(beta, eig_values) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_plain_text(self):
"""Returns a list""" |
_msg = self.message if self.message is not None else [""]
msg = _msg if isinstance(_msg, list) else [_msg]
line = "" if not self.line else ", line {}".format(self.line)
ret = ["{} found in file '{}'{}::".format(self.type.capitalize(), self.filename, line),
" <<"]+ \
[" "+x for x in msg]+ \
[" >>"]
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_plain_text(self):
"""Returns a list of strings""" |
ret = []
for occ in self.occurrences:
ret.extend(occ.get_plain_text())
return ret |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def crunch_dir(name, n=50):
|
if len(name) > n + 3:
name = "..." + name[-n:]
return name |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _add_log_tab(self):
"""Adds element to pages and new tab""" |
# text_tab = "Log (Alt+&{})".format(len(self.pages)+1)
text_tab = "Log"
self.pages.append(MyPage(text_tab=text_tab))
# ### Log tab
te = self.textEdit_log = self.keep_ref(QTextEdit())
te.setReadOnly(True)
self.tabWidget.addTab(te, text_tab) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def keyPressEvent(self, evt):
"""This handles Ctrl+PageUp, Ctrl+PageDown, Ctrl+Tab, Ctrl+Shift+Tab""" |
incr = 0
if evt.modifiers() == Qt.ControlModifier:
n = self.tabWidget.count()
if evt.key() in [Qt.Key_PageUp, Qt.Key_Backtab]:
incr = -1
elif evt.key() in [Qt.Key_PageDown, Qt.Key_Tab]:
incr = 1
if incr != 0:
new_index = self._get_tab_index() + incr
if new_index < 0:
new_index = n - 1
elif new_index >= n:
new_index = 0
self.tabWidget.setCurrentIndex(new_index) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _on_changed(self):
"""Slot for changed events""" |
page = self._get_page()
if not page.flag_autosave:
page.flag_changed = True
self._update_gui_text_tabs() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _update_gui_text_tabs(self):
"""Iterates through pages to update tab texts""" |
for index, page in enumerate(self.pages):
self.tabWidget.setTabText(index, "{} (Alt+&{}){}".format(page.text_tab, index+1, (" (changed)" if page.flag_changed else ""))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __generic_save(self):
"""Returns False if user has cancelled a "save as" operation, otherwise True.""" |
page = self._get_page()
f = page.editor.f
if not f:
return True
if not page.editor.flag_valid:
a99.show_error("Cannot save, {0!s} has error(s)!".format(f.description))
return True
if f.filename:
f.save_as()
self.add_log("Saved '{}'".format(f.filename))
page.flag_changed = False
self._update_gui_text_tabs()
if hasattr(page.editor, "update_gui_label_fn"):
page.editor.update_gui_label_fn() # duck typing
return True
else:
return self.__generic_save_as() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __generic_save_as(self):
"""Returns False if user has cancelled operation, otherwise True.""" |
page = self._get_page()
if not page.editor.f:
return True
if page.editor.f.filename:
d = page.editor.f.filename
else:
d = os.path.join(self.save_dir if self.save_dir is not None \
else self.load_dir if self.load_dir is not None \
else ".", page.editor.f.default_filename)
new_filename = QFileDialog.getSaveFileName(self, page.make_text_saveas(), d, page.wild)[0]
if new_filename:
self.save_dir, _ = os.path.split(str(new_filename))
page.editor.f.save_as(str(new_filename))
page.flag_changed = False
self._update_gui_text_tabs()
page.editor.update_gui_label_fn()
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _start_nodes_sequentially(self, nodes):
""" Start the nodes sequentially without forking. Return set of nodes that were actually started. """ |
started_nodes = set()
for node in copy(nodes):
started = self._start_node(node)
if started:
started_nodes.add(node)
# checkpoint cluster state
self.repository.save_or_update(self)
return started_nodes |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _start_nodes_parallel(self, nodes, max_thread_pool_size):
""" Start the nodes using a pool of multiprocessing threads for speed-up. Return set of nodes that were actually started. """ |
# Create one thread for each node to start
thread_pool_size = min(len(nodes), max_thread_pool_size)
thread_pool = Pool(processes=thread_pool_size)
log.debug("Created pool of %d threads", thread_pool_size)
# pressing Ctrl+C flips this flag, which in turn stops the main loop
# down below
keep_running = True
def sigint_handler(signal, frame):
"""
Makes sure the cluster is saved, before the sigint results in
exiting during node startup.
"""
log.error(
"Interrupted: will save cluster state and exit"
" after all nodes have started.")
keep_running = False
# intercept Ctrl+C
with sighandler(signal.SIGINT, sigint_handler):
result = thread_pool.map_async(self._start_node, nodes)
while not result.ready():
result.wait(1)
# check if Ctrl+C was pressed
if not keep_running:
log.error("Aborting upon user interruption ...")
# FIXME: `.close()` will keep the pool running until all
# nodes have been started; should we use `.terminate()`
# instead to interrupt node creation as soon as possible?
thread_pool.close()
thread_pool.join()
self.repository.save_or_update(self)
# FIXME: should raise an exception instead!
sys.exit(1)
# keep only nodes that were successfully started
return set(node for node, ok
in itertools.izip(nodes, result.get()) if ok) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _start_node(node):
""" Start the given node VM. :return: bool -- True on success, False otherwise """ |
log.debug("_start_node: working on node `%s`", node.name)
# FIXME: the following check is not optimal yet. When a node is still
# in a starting state, it will start another node here, since the
# `is_alive` method will only check for running nodes (see issue #13)
if node.is_alive():
log.info("Not starting node `%s` which is "
"already up&running.", node.name)
return True
else:
try:
node.start()
log.info("Node `%s` has been started.", node.name)
return True
except Exception as err:
log.exception("Could not start node `%s`: %s -- %s",
node.name, err, err.__class__)
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_all_nodes(self):
"""Returns a list of all nodes in this cluster as a mixed list of different node kinds. :return: list of :py:class:`Node` """ |
nodes = self.nodes.values()
if nodes:
return reduce(operator.add, nodes, list())
else:
return [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_frontend_node(self):
"""Returns the first node of the class specified in the configuration file as `ssh_to`, or the first node of the first class in alphabetic order. :return: :py:class:`Node` :raise: :py:class:`elasticluster.exceptions.NodeNotFound` if no valid frontend node is found """ |
if self.ssh_to:
if self.ssh_to in self.nodes:
cls = self.nodes[self.ssh_to]
if cls:
return cls[0]
else:
log.warning(
"preferred `ssh_to` `%s` is empty: unable to "
"get the choosen frontend node from that class.",
self.ssh_to)
else:
raise NodeNotFound(
"Invalid ssh_to `%s`. Please check your "
"configuration file." % self.ssh_to)
# If we reach this point, the preferred class was empty. Pick
# one using the default logic.
for cls in sorted(self.nodes.keys()):
if self.nodes[cls]:
return self.nodes[cls][0]
# Uh-oh, no nodes in this cluster.
raise NodeNotFound("Unable to find a valid frontend: "
"cluster has no nodes!") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def new(self, kind, **extra):
""" Return a host name for a new node of the given kind. The new name is formed by interpolating ``{}``-format specifiers in the string given as ``pattern`` argument to the class constructor. The following names can be used in the ``{}``-format specifiers: * ``kind`` -- the `kind` argument * ``index`` -- a positive integer number, garanteed to be unique (per kind) * any other keyword argument used in the call to :meth:`new` Example:: 'node-foo-1bar' 'node-foo-2quux' """ |
if self._free[kind]:
index = self._free[kind].pop()
else:
self._top[kind] += 1
index = self._top[kind]
return self._format(self.pattern, kind=kind, index=index, **extra) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def use(self, kind, name):
""" Mark a node name as used. """ |
try:
params = self._parse(name)
index = int(params['index'], 10)
if index in self._free[kind]:
self._free[kind].remove(index)
top = self._top[kind]
if index > top:
self._free[kind].update(range(top + 1, index))
self._top[kind] = index
except ValueError:
log.warning(
"Cannot extract numerical index"
" from node name `%s`!", name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def free(self, kind, name):
""" Mark a node name as no longer in use. It could thus be recycled to name a new node. """ |
try:
params = self._parse(name)
index = int(params['index'], 10)
self._free[kind].add(index)
assert index <= self._top[kind]
if index == self._top[kind]:
self._top[kind] -= 1
except ValueError:
# ignore failures in self._parse()
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_alive(self):
"""Checks if the current node is up and running in the cloud. It only checks the status provided by the cloud interface. Therefore a node might be running, but not yet ready to ssh into it. """ |
running = False
if not self.instance_id:
return False
try:
log.debug("Getting information for instance %s",
self.instance_id)
running = self._cloud_provider.is_instance_running(
self.instance_id)
except Exception as ex:
log.debug("Ignoring error while looking for vm id %s: %s",
self.instance_id, str(ex))
if running:
log.debug("node `%s` (instance id %s) is up and running",
self.name, self.instance_id)
self.update_ips()
else:
log.debug("node `%s` (instance id `%s`) still building...",
self.name, self.instance_id)
return running |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def connect(self, keyfile=None):
"""Connect to the node via ssh using the paramiko library. :return: :py:class:`paramiko.SSHClient` - ssh connection or None on failure """ |
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if keyfile and os.path.exists(keyfile):
ssh.load_host_keys(keyfile)
# Try connecting using the `preferred_ip`, if
# present. Otherwise, try all of them and set `preferred_ip`
# using the first that is working.
ips = self.ips[:]
# This is done in order to "sort" the IPs and put the preferred_ip first.
if self.preferred_ip:
if self.preferred_ip in ips:
ips.remove(self.preferred_ip)
else:
# Preferred is changed?
log.debug("IP %s does not seem to belong to %s anymore. Ignoring!", self.preferred_ip, self.name)
self.preferred_ip = ips[0]
for ip in itertools.chain([self.preferred_ip], ips):
if not ip:
continue
try:
log.debug("Trying to connect to host %s (%s)",
self.name, ip)
addr, port = parse_ip_address_and_port(ip, SSH_PORT)
ssh.connect(str(addr),
username=self.image_user,
allow_agent=True,
key_filename=self.user_key_private,
timeout=Node.connection_timeout,
port=port)
log.debug("Connection to %s succeeded on port %d!", ip, port)
if ip != self.preferred_ip:
log.debug("Setting `preferred_ip` to %s", ip)
self.preferred_ip = ip
# Connection successful.
return ssh
except socket.error as ex:
log.debug("Host %s (%s) not reachable: %s.",
self.name, ip, ex)
except paramiko.BadHostKeyException as ex:
log.error("Invalid host key: host %s (%s); check keyfile: %s",
self.name, ip, keyfile)
except paramiko.SSHException as ex:
log.debug("Ignoring error %s connecting to %s",
str(ex), self.name)
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_item(self, path_, value):
"""Sets item and automatically saves file""" |
section, path_ = self._get_section(path_)
section[path_[-1]] = value
self.write() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def all_subslices(itr):
""" generates every possible slice that can be generated from an iterable """ |
assert iterable(itr), 'generators.all_subslices only accepts iterable arguments, not {}'.format(itr)
if not hasattr(itr, '__len__'): # if itr isnt materialized, make it a deque
itr = deque(itr)
len_itr = len(itr)
for start,_ in enumerate(itr):
d = deque()
for i in islice(itr, start, len_itr): # how many slices for this round
d.append(i)
yield tuple(d) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def data_read_write(data_path_in, data_path_out, format_type, **kwargs):
""" General function to read, format, and write data. Parameters data_path_in : str Path to the file that will be read data_path_out : str Path of the file that will be output format_type : str Either 'dense', 'grid', 'columnar', or 'transect' kwargs Specific keyword args for given data types. See Notes Notes ----- 'Dense Parameters' non_label_cols : str Comma separated list of non label columns. ex. "lat, long, tree" sep : str The delimiter for the dense data. Default, "," na_values : int, float, str Value to be labeled as NA. Default, "" See misc.format_dense() for additional keyword parameters """ |
if format_type == "dense":
# Set dense defaults
kwargs = _set_dense_defaults_and_eval(kwargs)
# Try to parse non label columns appropriately
try:
nlc = [nm.strip() for nm in kwargs['non_label_cols'].split(",")]
kwargs.pop('non_label_cols', None)
except KeyError:
raise KeyError("'non_label_cols' is a required keyword dense data")
# Read data with dense specific keywords
arch_data = pd.read_csv(data_path_in, sep=kwargs['delimiter'],
na_values=kwargs['na_values'])
form_data = format_dense(arch_data, nlc, **kwargs)
elif format_type == "grid":
pass
elif format_type == "stacked":
pass
elif format_type == "transect":
pass
else:
raise NameError("%s is not a supported data format" % format_type)
form_data.to_csv(data_path_out, index=False) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def format_dense(base_data, non_label_cols, **kwargs):
""" Formats dense data type to stacked data type. Takes in a dense data type and converts into a stacked data type. Parameters data : DataFrame The dense data non_label_cols : list A list of columns in the data that are not label columns label_col : str Name of the label column in the formatted data. Default, "label" count_col : str Name of the count column in the formatted data. Default, "count" nan_to_zero : bool Set all nans to zero. Default, False drop_na : bool Drop all columns with nan in the dataset. Default, False Returns ------- : DataFrame A formatted DataFrame in the stacked format Notes ----- Example of Dense Data conversion 'labelA': [1,0,3,4], 'labelB' : [3,2,1,4]}) column labelA labelB row 0 1 1 3 1 1 1 0 2 2 2 2 3 1 1 3 2 4 4 2 [4 rows x 4 columns] # labelA and labelB might be species names. 'row' and 'column' # are non-species names so pass these in as non_label_cols row column label count 0 1 1 labelA 1 1 1 1 labelB 3 2 2 1 labelA 0 3 2 1 labelB 2 4 1 2 labelA 3 5 1 2 labelB 1 6 2 2 labelA 4 7 2 2 labelB 4 [8 rows x 4 columns] """ |
kwargs = _set_dense_defaults_and_eval(kwargs)
# Stack data in columnar form.
indexed_data = base_data.set_index(keys=non_label_cols)
columnar_data = indexed_data.stack(dropna=False)
columnar_data = columnar_data.reset_index()
# Rename columns
num = len(non_label_cols)
columnar_data.rename(columns={0: kwargs['count_col'], 'level_%i' % num:
kwargs['label_col']}, inplace=True)
# Set nans to zero?
if kwargs['nan_to_zero']:
ind = np.isnan(columnar_data[kwargs['count_col']])
columnar_data.loc[ind, kwargs['count_col']] = 0
columnar_data.reset_index(inplace=True, drop=True)
# Drop nans?
if kwargs['drop_na']:
columnar_data = columnar_data.dropna(how="any")
columnar_data.reset_index(inplace=True, drop=True)
return columnar_data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _set_dense_defaults_and_eval(kwargs):
""" Sets default values in kwargs if kwargs are not already given. Evaluates all values using eval Parameters kwargs : dict Dictionary of dense specific keyword args Returns ------- : dict Default, evaluated dictionary """ |
kwargs['delimiter'] = kwargs.get('delimiter', ',')
kwargs['na_values'] = kwargs.get('na_values', '')
kwargs['nan_to_zero'] = kwargs.get('nan_to_zero', False)
kwargs['drop_na'] = kwargs.get('drop_na', False)
kwargs['label_col'] = kwargs.get('label_col', 'label')
kwargs['count_col'] = kwargs.get('count_col', 'count')
for key, val in kwargs.iteritems():
try:
kwargs[key] = eval(val)
except:
kwargs[key] = val
return kwargs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot_spectra_stacked(ss, title=None, num_rows=None, setup=_default_setup):
""" Plots one or more stacked in subplots sharing same x-axis. Args: ss: list of Spectrum objects title=None: window title num_rows=None: (optional) number of rows for subplot grid. If not passed, num_rows will be the number of plots, and the number of columns will be 1. If passed, number of columns is calculated automatically. setup: PlotSpectrumSetup object """ |
draw_spectra_stacked(ss, title, num_rows, setup)
plt.show() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot_spectra_overlapped(ss, title=None, setup=_default_setup):
""" Plots one or more spectra in the same plot. Args: ss: list of Spectrum objects title=None: window title setup: PlotSpectrumSetup object """ |
plt.figure()
draw_spectra_overlapped(ss, title, setup)
plt.show() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot_spectra_pieces_pdf(ss, aint=10, pdf_filename='pieces.pdf', setup=_default_setup):
""" Plots spectra, overlapped, in small wavelength intervals into a PDF file, one interval per page of the PDF file. Args: ss: list of Spectrum objects aint: wavelength interval for each plot pdf_filename: name of output file setup: PlotSpectrumSetup object **Note** overrides setup.fmt_xlabel; leaves y-labell and title blank """ |
import f311.explorer as ex
xmin, xmax, ymin_, ymax, _, yspan = calc_max_min(ss)
ymin = ymin_ if setup.ymin is None else setup.ymin
num_pages = int(math.ceil((xmax-xmin)/aint)) # rightmost point may be left out...or not
# num_spectra = len(ss)
a99.format_BLB()
# pdf = matplotlib.backends.backend_pdf.PdfPages(pdf_filename)
pdf = matplotlib.backends.backend_pdf.PdfPages(pdf_filename)
logger = a99.get_python_logger()
for h in range(num_pages):
fig = plt.figure()
lambda0 = xmin+h*aint
lambda1 = lambda0+aint
logger.info("Printing page {0:d}/{1:d} ([{2:g}, {3:g}])".format(h+1, num_pages, lambda0, lambda1))
for i, s in enumerate(ss):
s_cut = ex.cut_spectrum(s, lambda0, lambda1)
ax = plt.gca()
ax.plot(s_cut.x, s_cut.y, label=s.title)
if setup.flag_xlabel and setup.fmt_xlabel:
plt.xlabel('Wavelength (interval: [{0:g}, {1:g}])'.format(lambda0, lambda1))
xspan = lambda1-lambda0
ax.set_xlim([lambda0 - xspan * _T, lambda1 + xspan * _T])
ax.set_ylim([ymin - yspan * _T, ymax + yspan * _T])
if setup.flag_legend:
leg = plt.legend(loc=0)
a99.format_legend(leg)
plt.tight_layout()
pdf.savefig(fig)
plt.close()
# for fig in xrange(1, figure().number): ## will open an empty extra figure :(
# pdf.savefig( fig )
pdf.close()
logger.info("File {0!s} successfully created.".format(pdf_filename)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot_spectra_pages_pdf(ss, pdf_filename='pages.pdf', setup=_default_setup):
""" Plots spectra into a PDF file, one spectrum per page. Splits into several pieces of width Args: ss: list of Spectrum objects pdf_filename: name of output file """ |
logger = a99.get_python_logger()
xmin, xmax, ymin_, ymax, xspan, yspan = calc_max_min(ss)
ymin = ymin_ if setup.ymin is None else setup.ymin
num_pages = len(ss)
a99.format_BLB()
pdf = matplotlib.backends.backend_pdf.PdfPages(pdf_filename)
for i, s in enumerate(ss):
title = s.title
fig = plt.figure()
plt.plot(s.x, s.y, c=_FAV_COLOR)
if setup.flag_xlabel and setup.fmt_xlabel:
_set_plot(plt.xlabel, setup.fmt_xlabel, s)
if setup.flag_ylabel and setup.fmt_ylabel:
_set_plot(plt.ylabel, setup.fmt_ylabel, s)
_set_plot(plt.title, setup.fmt_title, s)
plt.xlim([xmin-xspan*_T, xmax+xspan*_T])
plt.ylim([ymin-yspan*_T, ymax+yspan*_T])
plt.tight_layout()
plt.subplots_adjust(top=0.94) # workaround for cropped title
logger.info("Printing page {0:d}/{1:d} ('{2!s}')".format(i+1, num_pages, title))
pdf.savefig(fig)
plt.close()
pdf.close()
logger.info("File {0!s} successfully created.".format(pdf_filename)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def repeal_target(self):
"""The resolution this resolution has repealed, or is attempting to repeal. Returns ------- :class:`ApiQuery` of :class:`Resolution` Raises ------ TypeError: If the resolution doesn't repeal anything. """ |
if not self.category == 'Repeal':
raise TypeError("This resolution doesn't repeal anything")
return wa.resolution(int(self.option) + 1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def resolution(self, index):
"""Resolution with a given index. Parameters index : int Resolution index. Global if this is the ``aionationstates.wa`` object, local if this is ``aionationstates.ga`` or ``aionationstates.sc``. Returns ------- :class:`ApiQuery` of :class:`Resolution` Raises ------ :class:`NotFound` If a resolution with the requested index doesn't exist. """ |
@api_query('resolution', id=str(index))
async def result(_, root):
elem = root.find('RESOLUTION')
if not elem:
raise NotFound(f'No resolution found with index {index}')
return Resolution(elem)
return result(self) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def resolution_at_vote(self, root):
"""The proposal currently being voted on. Returns ------- :class:`ApiQuery` of :class:`ResolutionAtVote` :class:`ApiQuery` of None If no resolution is currently at vote. """ |
elem = root.find('RESOLUTION')
if elem:
resolution = ResolutionAtVote(elem)
resolution._council_id = self._council_id
return resolution |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def indent(self, levels, first_line=None):
"""Increase indentation by ``levels`` levels.""" |
self._indentation_levels.append(levels)
self._indent_first_line.append(first_line) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def wrap(self, text, width=None, indent=None):
"""Return ``text`` wrapped to ``width`` and indented with ``indent``. By default: * ``width`` is ``self.options.wrap_length`` * ``indent`` is ``self.indentation``. """ |
width = width if width is not None else self.options.wrap_length
indent = indent if indent is not None else self.indentation
initial_indent = self.initial_indentation
return textwrap.fill(text, width=width,
initial_indent=initial_indent,
subsequent_indent=indent) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Construct(self):
# pylint: disable-msg=C0103 """Construct a cuboid from a GDML file without sensitive detector""" |
# Parse the GDML
self.gdml_parser.Read(self.filename)
self.world = self.gdml_parser.GetWorldVolume()
self.log.info("Materials:")
self.log.info(G4.G4Material.GetMaterialTable())
# Return pointer to world volume
return self.world |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Construct(self):
# pylint: disable-msg=C0103 """Construct nuSTORM from a GDML file""" |
# Parse the GDML
self.world = self.gdml_parser.GetWorldVolume()
# Create sensitive detector
self.sensitive_detector = ScintSD()
# Get logical volume for X view, then attach SD
my_lv = G4.G4LogicalVolumeStore.GetInstance().GetVolumeID(1)
assert my_lv.GetName() == "ScintillatorBarX"
my_lv.SetSensitiveDetector(self.sensitive_detector)
# Get logical volume for Y view, then attach SD
my_lv = G4.G4LogicalVolumeStore.GetInstance().GetVolumeID(2)
assert my_lv.GetName() == "ScintillatorBarY"
my_lv.SetSensitiveDetector(self.sensitive_detector)
my_lv = G4.G4LogicalVolumeStore.GetInstance().GetVolumeID(0)
assert my_lv.GetName() == "SteelPlane"
# field
self.field_manager = G4.G4FieldManager()
self.my_field = MagneticField.WandsToroidField(self.field_polarity)
self.field_manager.SetDetectorField(self.my_field)
self.field_manager.CreateChordFinder(self.my_field)
my_lv.SetFieldManager(self.field_manager, False)
self.log.info("Materials:")
self.log.info(G4.G4Material.GetMaterialTable())
# Return pointer to world volume
return self.world |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate(bbllines:iter, *, profiling=False):
"""Yield lines of warnings and errors about input bbl lines. profiling -- yield also info lines about input bbl file. If bbllines is a valid file name, it will be read. Else, it should be an iterable of bubble file lines. """ |
if isinstance(bbllines, str):
if os.path.exists(bbllines): # filename containing bubble
bbllines = utils.file_lines(bbllines)
elif '\n' not in bbllines or '\t' not in bbllines:
# probably a bad file name: let's rise the proper error
bbllines = utils.file_lines(bbllines)
else: # bubble itself
bbllines = bbllines.split('\n')
bubble = tuple(bbllines)
data = tuple(utils.line_data(line) for line in bubble)
types = tuple(utils.line_type(line) for line in bubble)
# launch profiling
if profiling:
ltype_counts = Counter(types)
for ltype, count in ltype_counts.items():
yield 'INFO {} lines of type {}'.format(count, ltype)
yield 'INFO {} lines of payload'.format(
ltype_counts['EDGE'] + ltype_counts['IN'] +
ltype_counts['NODE'] + ltype_counts['SET'])
# launch validation
for errline in (l for l, t in zip(bubble, types) if t == 'ERROR'):
yield 'ERROR line is not bubble: "{}"'.format(errline)
tree = BubbleTree.from_bubble_data(data)
cc, subroots = tree.connected_components()
# print('cc:', cc)
# print('subroots:', subroots)
if profiling:
yield 'INFO {} top (power)nodes'.format(len(tree.roots))
yield 'INFO {} connected components'.format(len(cc))
yield 'INFO {} nodes are defined, {} are used'.format(
ltype_counts['NODE'], len(tuple(tree.nodes())))
yield 'INFO {} powernodes are defined, {} are used'.format(
ltype_counts['SET'], len(tuple(tree.powernodes())))
yield from inclusions_validation(tree)
yield from mergeability_validation(tree) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def inclusions_validation(tree:BubbleTree) -> iter: """Yield message about inclusions inconsistancies""" |
# search for powernode overlapping
for one, two in it.combinations(tree.inclusions, 2):
assert len(one) == len(one.strip())
assert len(two) == len(two.strip())
one_inc = set(included(one, tree.inclusions))
two_inc = set(included(two, tree.inclusions))
common_inc = one_inc & two_inc
if len(common_inc) == one_inc:
if not two in one_inc:
yield ("ERROR inconsistency in inclusions: {} is both"
" included and not included in {}.".format(two, one))
if len(common_inc) == two_inc:
if not one in two_inc:
yield ("ERROR inconsistency in inclusions: {} is both"
" included and not included in {}.".format(one, two))
if len(common_inc) > 0: # one and two are not disjoint
if len(common_inc) == len(one_inc) or len(common_inc) == len(two_inc):
# one is included in the other
pass
else: # problem: some nodes are shared, but not all
yield ("ERROR overlapping powernodes:"
" {} nodes are shared by {} and {},"
" which are not in inclusion."
" Shared nodes are {}".format(
len(common_inc), one, two, common_inc))
for pwn in tree.powernodes():
# search for empty powernodes
if len(tree.inclusions[pwn]) == 0:
yield ("WARNING empty powernode: {} is defined,"
" but contains nothing".format(pwn))
# search for singleton powernodes
if len(tree.inclusions[pwn]) == 1:
yield ("WARNING singleton powernode: {} is defined,"
" but contains only {}".format(pwn, tree.inclusions[pwn]))
# search for cycles
nodes_in_cycles = utils.have_cycle(tree.inclusions)
if nodes_in_cycles:
yield ("ERROR inclusion cycle: the following {}"
" nodes are involved: {}".format(
len(nodes_in_cycles), set(nodes_in_cycles))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mergeability_validation(tree:BubbleTree) -> iter: """Yield message about mergables powernodes""" |
def gen_warnings(one, two, inc_message:str) -> [str]:
"Yield the warning for given (power)nodes if necessary"
nodetype = ''
if tree.inclusions[one] and tree.inclusions[two]:
nodetype = 'power'
elif tree.inclusions[one] or tree.inclusions[two]:
nodetype = '(power)'
if one > two: one, two = two, one
shared = set(tree.edges.get(one, ())) & set(tree.edges.get(two, ()))
if shared:
yield (f"WARNING mergeable {nodetype}nodes: {one} and {two}"
f" are {inc_message}, and share"
f" {len(shared)} neigbor{'s' if len(shared) > 1 else ''}")
for one, two in it.combinations(tree.roots, 2):
yield from gen_warnings(one, two, inc_message='both roots')
for parent, childs in tree.inclusions.items():
for one, two in it.combinations(childs, 2):
yield from gen_warnings(one, two, inc_message=f'in the same level (under {parent})') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def guild_details(guild_id=None, name=None):
"""This resource returns details about a guild. :param guild_id: The guild id to query for. :param name: The guild name to query for. *Note: Only one parameter is required; if both are set, the guild Id takes precedence and a warning will be logged.* The response is a dictionary with the following keys: guild_id (string):
The guild id. guild_name (string):
The guild name. tag (string):
The guild tag. emblem (object):
If present, it holds detailed information about the guilds emblem. The emblem dictionary contains the following information: background_id (number):
The id of the background image. foreground_id (number):
The id of the foreground image. flags (list):
A list of additional flags, possible values are: ``FlipBackgroundHorizontal``, ``FlipBackgroundVertical``, ``FlipForegroundHorizontal`` and ``FlipForegroundVertical``. background_color_id (number):
The background color id. foreground_primary_color_id (number):
The primary foreground color id. foreground_secondary_color_id (number):
The secondary foreground color id. """ |
if guild_id and name:
warnings.warn("both guild_id and name are specified, "
"name will be ignored")
if guild_id:
params = {"guild_id": guild_id}
cache_name = "guild_details.%s.json" % guild_id
elif name:
params = {"guild_name": name}
cache_name = "guild_details.%s.json" % name
else:
raise Exception("specify either guild_id or name")
return get_cached("guild_details.json", cache_name, params=params) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def chunks(stream, chunk_size, output_type=tuple):
''' returns chunks of a stream '''
assert iterable(stream), 'chunks needs stream to be iterable'
assert (isinstance(chunk_size, int) and chunk_size > 0) or callable(chunk_size), 'chunks needs chunk_size to be a positive int or callable'
assert callable(output_type), 'chunks needs output_type to be callable'
if callable(chunk_size):
''' chunk_size is acting as a separator function '''
for chunk in chunk_on(stream, chunk_size, output_type):
yield chunk
else:
it = iter(stream)
marker = object()
iters = [it] * chunk_size
pipeline = apply_to_last(
zip_longest(*iters, fillvalue=marker),
lambda last_chunk: tuple(i for i in last_chunk if i is not marker)
)
if output_type is not tuple:
pipeline = map(output_type, pipeline)
for chunk in pipeline:
yield chunk |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_charset(request):
""" Extract charset from the content type """ |
content_type = request.META.get('CONTENT_TYPE', None)
if content_type:
return extract_charset(content_type) if content_type else None
else:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_accept_header(accept):
""" Parse the Accept header todo: memoize :returns: list with pairs of (media_type, q_value), ordered by q values. """ |
def parse_media_range(accept_item):
""" Parse media range and subtype """
return accept_item.split('/', 1)
def comparator(a, b):
""" Compare accept items a and b """
# first compare q values
result = -cmp(a[2], b[2])
if result is not 0:
# q values differ, no need to compare media types
return result
# parse media types and compare them (asterisks are lower in precedence)
mtype_a, subtype_a = parse_media_range(a[0])
mtype_b, subtype_b = parse_media_range(b[0])
if mtype_a == '*' and subtype_a == '*':
return 1
if mtype_b == '*' and subtype_b == '*':
return -1
if subtype_a == '*':
return 1
if subtype_b == '*':
return -1
return 0
if not accept:
return []
result = []
for media_range in accept.split(","):
parts = media_range.split(";")
media_type = parts.pop(0).strip()
media_params = []
q = 1.0
for part in parts:
(key, value) = part.lstrip().split("=", 1)
if key == "q":
q = float(value)
else:
media_params.append((key, value))
result.append((media_type, tuple(media_params), q))
result.sort(comparator)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def in_session(self):
"""Provide a session scope around a series of operations.""" |
session = self.get_session()
try:
yield session
session.commit()
except IntegrityError:
session.rollback()
raise DuplicateError("Duplicate unique value detected!")
except (OperationalError, DisconnectionError):
session.rollback()
self.close()
logger.warn("Database Connection Lost!")
raise DatabaseConnectionError()
except Exception:
session.rollback()
raise
finally:
session.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def info(self, req) -> ResponseInfo: """ Since this will always respond with height=0, Tendermint will resync this app from the begining """ |
r = ResponseInfo()
r.version = "1.0"
r.last_block_height = 0
r.last_block_app_hash = b''
return r |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_tx(self, tx) -> ResponseCheckTx: """ Validate the Tx before entry into the mempool If not an order, a non-zero code is returned and the tx will be dropped. """ |
value = decode_number(tx)
if not value == (self.txCount + 1):
# respond with non-zero code
return ResponseCheckTx(code=1)
return ResponseCheckTx(code=CodeTypeOk) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def query(self, req) -> ResponseQuery: """Return the last tx count""" |
v = encode_number(self.txCount)
return ResponseQuery(code=CodeTypeOk, value=v, height=self.last_block_height) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def commit(self) -> ResponseCommit: """Return the current encode state value to tendermint""" |
hash = struct.pack('>Q', self.txCount)
return ResponseCommit(data=hash) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def track(context, file_names):
"""Keep track of each file in list file_names. Tracking does not create or delete the actual file, it only tells the version control system whether to maintain versions (to keep track) of the file. """ |
context.obj.find_repo_type()
for fn in file_names:
context.obj.call([context.obj.vc_name, 'add', fn]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def untrack(context, file_names):
"""Forget about tracking each file in the list file_names Tracking does not create or delete the actual file, it only tells the version control system whether to maintain versions (to keep track) of the file. """ |
context.obj.find_repo_type()
for fn in file_names:
if context.obj.vc_name == 'git':
context.obj.call(['git', 'rm', '--cached', fn])
elif context.obj.vc_name == 'hg':
context.obj.call(['hg', 'forget', fn]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def commit(context, message, name):
"""Commit saved changes to the repository. message - commit message name - tag name """ |
context.obj.find_repo_type()
if context.obj.vc_name == 'git':
context.obj.call(['git', 'commit', '-a', '-m', message])
elif context.obj.vc_name == 'hg':
context.obj.call(['hg', 'commit', '-m', message])
if name != '' and context.obj.vc_name == 'git':
context.obj.call(['git', 'tag', '-a', name, '-m', message])
elif name != '' and context.obj.vc_name == 'hg':
context.obj.call(['hg', 'tag', '-m', message, name]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def revert(context, file_names):
"""Revert each file in the list file_names back to version in repo""" |
context.obj.find_repo_type()
if len(file_names) == 0:
click.echo('No file names to checkout specified.')
click.echo('The following have changed since the last check in.')
context.invoke(status)
for fn in file_names:
if context.obj.vc_name == 'git':
context.obj.call(['git', 'checkout', '--', fn])
elif context.obj.vc_name == 'hg':
context.obj.call(['hg', 'revert', '--no-backup', fn]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def status(context):
"""See which files have changed, checked in, and uploaded""" |
context.obj.find_repo_type()
context.obj.call([context.obj.vc_name, 'status']) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def diff(context, file_name):
"""See changes that occured since last check in""" |
context.obj.find_repo_type()
if context.obj.vc_name == 'git':
context.obj.call(['git', 'diff', '--color-words',
'--ignore-space-change', file_name])
elif context.obj.vc_name == 'hg':
context.obj.call(['hg', 'diff', file_name]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_repo_type(self):
"""Check for git or hg repository""" |
is_git = self.call(['git', 'rev-parse', '--is-inside-work-tree'],
devnull=True)
if is_git != 0:
if self.debug:
click.echo('not git')
is_hg = self.call(['hg', '-q', 'stat'], devnull=True)
if is_hg != 0:
if self.debug:
click.echo('not hg')
exit(1)
else:
self.vc_name = 'hg' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main():
"""The main entry point of the program""" |
# Parse command line arguments
argp = _cli_argument_parser()
args = argp.parse_args()
# setup logging
logging.basicConfig(
level=args.loglevel,
format="%(levelname)s %(message)s")
console.display("Collecting documentation from files")
collector_metrics = metrics.Metrics()
docs = collector.parse(args.path, args.trace_parser,
metrics=collector_metrics)
collector_metrics.display()
console.display("Rendering documentation")
try:
if args.output:
template = renderer.template_from_filename(args.output)
else:
template = "json"
out = renderer.render(docs, template)
except ValueError as err:
logging.error(err)
sys.exit(1)
except TemplateNotFound as err:
logging.error(
"Template `{}` not found. Available templates are: {}".format(
err.name, renderer.list_templates()))
sys.exit(1)
if not args.output:
print(out)
else:
console.display("Writing documentation to", args.output)
with io.open(args.output, "w", encoding="utf-8") as fp:
fp.write(out) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.