function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
list
def preprocess_data(config, force=False): """ Ensures that all the necessary data have been inserted in db from the raw opendata files. :params config: A config dictionary. :params force: Whether to force rebuild or not. :return bool: Whether data have been built or not. """ # Check if a build is required get_session = database.init_db(config["database"], config["search_index"]) with get_session() as session: is_built = session.query(PublicTransport).count() > 0 and session.query(PostalCode).count() > 0 if is_built and not force: # No need to rebuild the database, skip return False # Otherwise, purge all existing data session.query(PublicTransport).delete() session.query(PostalCode).delete() # Build all opendata files LOGGER.info("Rebuilding data...") for preprocess in data_files.PREPROCESSING_FUNCTIONS: data_objects = preprocess() if not data_objects: raise flatisfy.exceptions.DataBuildError("Error with %s." % preprocess.__name__) with get_session() as session: session.add_all(data_objects) LOGGER.info("Done building data!") return True
Phyks/Flatisfy
[ 16, 6, 16, 4, 1492106528 ]
def get_band_edges(): """ Calculate the band edge locations relative to the vacuum level for a semiconductor. For a metal, returns the fermi level. Returns: edges (dict): {'up_cbm': , 'up_vbm': , 'dn_cbm': , 'dn_vbm': , 'efermi'} """ # Vacuum level energy from LOCPOT. locpot = Locpot.from_file('LOCPOT') evac = max(locpot.get_average_along_axis(2)) vasprun = Vasprun('vasprun.xml') bs = vasprun.get_band_structure() eigenvals = vasprun.eigenvalues efermi = vasprun.efermi - evac if bs.is_metal(): edges = {'up_cbm': None, 'up_vbm': None, 'dn_cbm': None, 'dn_vbm': None, 'efermi': efermi} elif bs.is_spin_polarized: up_cbm = min( [min([e[0] for e in eigenvals[Spin.up][i] if not e[1]]) for i in range(len(eigenvals[Spin.up]))]) - evac up_vbm = max( [max([e[0] for e in eigenvals[Spin.up][i] if e[1]]) for i in range(len(eigenvals[Spin.up]))]) - evac dn_cbm = min( [min([e[0] for e in eigenvals[Spin.down][i] if not e[1]]) for i in range(len(eigenvals[Spin.down]))]) - evac dn_vbm = max( [max([e[0] for e in eigenvals[Spin.down][i] if e[1]]) for i in range(len(eigenvals[Spin.down]))]) - evac edges = {'up_cbm': up_cbm, 'up_vbm': up_vbm, 'dn_cbm': dn_cbm, 'dn_vbm': dn_vbm, 'efermi': efermi} else: cbm = bs.get_cbm()['energy'] - evac vbm = bs.get_vbm()['energy'] - evac edges = {'up_cbm': cbm, 'up_vbm': vbm, 'dn_cbm': cbm, 'dn_vbm': vbm, 'efermi': efermi} return edges
henniggroup/MPInterfaces
[ 58, 46, 58, 8, 1435027973 ]
def plot_local_potential(axis=2, ylim=(-20, 0), fmt='pdf'): """ Plot data from the LOCPOT file along any of the 3 primary axes. Useful for determining surface dipole moments and electric potentials on the interior of the material. Args: axis (int): 0 = x, 1 = y, 2 = z ylim (tuple): minimum and maximum potentials for the plot's y-axis. fmt (str): matplotlib format style. Check the matplotlib docs for options. """ ax = plt.figure(figsize=(16, 10)).gca() locpot = Locpot.from_file('LOCPOT') structure = Structure.from_file('CONTCAR') vd = VolumetricData(structure, locpot.data) abs_potentials = vd.get_average_along_axis(axis) vacuum_level = max(abs_potentials) vasprun = Vasprun('vasprun.xml') bs = vasprun.get_band_structure() if not bs.is_metal(): cbm = bs.get_cbm()['energy'] - vacuum_level vbm = bs.get_vbm()['energy'] - vacuum_level potentials = [potential - vacuum_level for potential in abs_potentials] axis_length = structure.lattice.lengths[axis] positions = np.arange(0, axis_length, axis_length / len(potentials)) ax.plot(positions, potentials, linewidth=2, color='k') ax.set_xlim(0, axis_length) ax.set_ylim(ylim[0], ylim[1]) ax.set_xticklabels( [r'$\mathrm{%s}$' % tick for tick in ax.get_xticks()], size=20) ax.set_yticklabels( [r'$\mathrm{%s}$' % tick for tick in ax.get_yticks()], size=20) ax.set_xlabel(r'$\mathrm{\AA}$', size=24) ax.set_ylabel(r'$\mathrm{V\/(eV)}$', size=24) if not bs.is_metal(): ax.text(ax.get_xlim()[1], cbm, r'$\mathrm{CBM}$', horizontalalignment='right', verticalalignment='bottom', size=20) ax.text(ax.get_xlim()[1], vbm, r'$\mathrm{VBM}$', horizontalalignment='right', verticalalignment='top', size=20) ax.fill_between(ax.get_xlim(), cbm, ax.get_ylim()[1], facecolor=plt.cm.jet(0.3), zorder=0, linewidth=0) ax.fill_between(ax.get_xlim(), ax.get_ylim()[0], vbm, facecolor=plt.cm.jet(0.7), zorder=0, linewidth=0) if fmt == "None": return ax else: plt.savefig('locpot.{}'.format(fmt)) plt.close()
henniggroup/MPInterfaces
[ 58, 46, 58, 8, 1435027973 ]
def plot_band_structure(ylim=(-5, 5), draw_fermi=False, fmt="pdf"): """ Plot a standard band structure with no projections. Requires EIGENVAL, OUTCAR and KPOINTS files in the current working directory. Args: ylim (tuple): minimum and maximum potentials for the plot's y-axis. draw_fermi (bool): whether or not to draw a dashed line at E_F. fmt (str): matplotlib format style. Check the matplotlib docs for options. """ eigenval_lines = open("EIGENVAL").readlines() kpoints_lines = open("KPOINTS").readlines() # IBZ k-points used for SCF but not useful for plotting bands. ibz_kpoints = [k for k in kpoints_lines[3:] if int(k.split()[3]) != 0] # Lines containing hig-symmetry k-points (e.g. Gamma) vertex_lines = [k for k in kpoints_lines[3:] if len(k.split()) == 5] n_bands = int(eigenval_lines[5].split()[2]) with open("OUTCAR", "r") as outcar: for line in outcar: if "E-fermi" in line: efermi = float(line.split()[2]) break spin_polarized = False if len(eigenval_lines[8].split()) == 5: spin_polarized = True bs_kpoints = [] vertices = [] bands = [[[], []] for x in range(n_bands)] i = 7 + len(ibz_kpoints)*(n_bands+2) while i < len(eigenval_lines): kpt_coords = [float(x) for x in eigenval_lines[i].split()[:3]] for kpt in vertex_lines: ref_coords = [float(x) for x in kpt.split()[:3]] if euclidean(kpt_coords, ref_coords) < 0.0001: kpt_coords.append(kpt.split()[-1]) vertices.append(kpt_coords) break bs_kpoints.append(kpt_coords) for j in range(n_bands): i += 1 split_line = eigenval_lines[i].split() bands[j][0].append(float(split_line[1]) - efermi) if spin_polarized: bands[j][1].append(float(split_line[2]) - efermi) i += 2 path_lengths, kpt_distances = [], [0] discontinuity = False for i in range(1, len(vertices)): if discontinuity: path_lengths.append(0) else: path_lengths.append(euclidean(vertices[i][:3],vertices[i-1][:3])) if i < len(vertices)-1 and vertices[i][3] != vertices[i-1][3] and\ vertices[i][3] != vertices[i+1][3] and not discontinuity: discontinuity = True else: discontinuity = False n_kpt_divs = len(bs_kpoints) / float(len(path_lengths)) x, j = 0, 0 for i in range(1, len(bs_kpoints)): if len(bs_kpoints[i]) == 4 and len(bs_kpoints[i-1]) == 4 and \ bs_kpoints[i][3] != bs_kpoints[i-1][3]: x += 0 else: x += euclidean(bs_kpoints[i][:3], bs_kpoints[i-1][:3]) kpt_distances.append(x) ax = plt.figure(figsize=(11, 8.5)).gca() font = FontProperties() font.set_size(24) font.set_family("serif") large_font = font.copy() large_font.set_size(32) for b in bands: ax.plot(kpt_distances, b[0], 'b-') if spin_polarized: ax.plot(kpt_distances, b[1], 'r--') if draw_fermi: ax.plot([min(kpt_distances), max(kpt_distances)], [0, 0], 'k-') ax.set_xlim(min(kpt_distances), max(kpt_distances)) ax.set_xticks([]) d = 0 ax.text(d, ylim[0]*1.05, r"$\mathrm{%s}$" % vertices[0][-1], fontproperties=font, verticalalignment="top", horizontalalignment="center") for i in range(len(path_lengths)): d += path_lengths[i] if i < len(path_lengths)-1 and path_lengths[i+1] == 0 and\ vertices[i+1][-1] != vertices[i+2][-1]: label = "{}|{}".format(vertices[i+1][-1], vertices[i+2][-1]) else: label = vertices[i+1][-1] if path_lengths[i] != 0: ax.text(d, ylim[0]*1.05, r"$\mathrm{%s}$" % label, fontproperties=font, verticalalignment="top", horizontalalignment="center") ax.plot([d, d], [ylim[0], ylim[1]], 'k--') ax.set_ylim(ylim) ax.set_ylabel(r"$\mathrm{E - E_F (eV)}$", fontproperties=large_font) ax.set_yticklabels([int(t) for t in ax.get_yticks()], fontproperties=font) plt.savefig("band_structure.{}".format(fmt))
henniggroup/MPInterfaces
[ 58, 46, 58, 8, 1435027973 ]
def plot_elt_projected_bands(ylim=(-5, 5), fmt='pdf'): """ Plot separate band structures for each element where the size of the markers indicates the elemental character of the eigenvalue. Args: ylim (tuple): minimum and maximum energies for the plot's y-axis. fmt (str): matplotlib format style. Check the matplotlib docs for options. """ vasprun = Vasprun('vasprun.xml', parse_projected_eigen=True) bs = vasprun.get_band_structure('KPOINTS', line_mode=True) bspp = BSPlotterProjected(bs) ax = bspp.get_elt_projected_plots(ylim=ylim).gcf().gca() ax.set_xticklabels([r'$\mathrm{%s}$' % t for t in ax.get_xticklabels()]) ax.set_yticklabels([r'$\mathrm{%s}$' % t for t in ax.get_yticklabels()]) if fmt == "None": return ax else: plt.savefig('elt_projected_bands.{}'.format(fmt)) plt.close()
henniggroup/MPInterfaces
[ 58, 46, 58, 8, 1435027973 ]
def get_effective_mass(): """ This function is in a beta stage, and its results are not guaranteed to be useful. Finds effective masses from a band structure, using parabolic fitting to determine the band curvature at the CBM for electrons and at the VBM for holes. This curvature enters the equation m* = (hbar)**2 / (d^2E/dk^2). To consider anisotropy, the k-space directions to the left and right of the CBM/VBM in the band diagram are returned separately. *NOTE* Only works for semiconductors and linemode calculations (non- spin polarized). >30 k-points per string recommended to obtain reliable curvatures. *NOTE* The parabolic fit can be quite sensitive to the number of k-points fit to, so it might be worthwhile adjusting N_KPTS to obtain some sense of the error bar. TODO: Warn user if CBM/VBM is at the edge of the diagram, and which direction (either left or right) was not actually fit to. Until fixed, this (most likely) explains any negative masses returned. Returns: Dictionary of the form {'electron': {'left': e_m_eff_l, 'right': e_m_eff_r}, 'hole': {'left': h_m_eff_l, 'right': h_m_eff_r}} where 'left' and 'right' indicate the reciprocal directions to the left and right of the extremum in the band structure. """ H_BAR = 6.582119514e-16 # eV*s M_0 = 9.10938356e-31 # kg N_KPTS = 6 # Number of k-points included in the parabola. spin_up = Spin(1) band_structure = Vasprun('vasprun.xml').get_band_structure() # Locations of CBM and VBM in band_structure.bands cbm_band_index = band_structure.get_cbm()['band_index'][spin_up][0] cbm_kpoint_index = band_structure.get_cbm()['kpoint_index'][0] vbm_band_index = band_structure.get_vbm()['band_index'][spin_up][0] vbm_kpoint_index = band_structure.get_vbm()['kpoint_index'][0] k = {'electron': {'left': [], 'right': []}, 'hole': {'left': [], 'right': []}} E = {'electron': {'left': [], 'right': []}, 'hole': {'left': [], 'right': []}} e_ref_coords = band_structure.kpoints[cbm_kpoint_index]._ccoords h_ref_coords = band_structure.kpoints[vbm_kpoint_index]._ccoords for n in range(-N_KPTS, 1): e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords k['electron']['left'].append( ((e_coords[0] - e_ref_coords[0])**2 + (e_coords[1] - e_ref_coords[1])**2 + (e_coords[2] - e_ref_coords[2])**2)**0.5 ) k['hole']['left'].append( ((h_coords[0] - h_ref_coords[0])**2 + (h_coords[1] - h_ref_coords[1])**2 + (h_coords[2] - h_ref_coords[2])**2)**0.5 ) e_energy = band_structure.bands[ spin_up][cbm_band_index][cbm_kpoint_index + n] h_energy = band_structure.bands[ spin_up][vbm_band_index][vbm_kpoint_index + n] E['electron']['left'].append(e_energy) E['hole']['left'].append(h_energy) for n in range(1, 1 + N_KPTS): e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords k['electron']['right'].append( ((e_coords[0] - e_ref_coords[0])**2 + (e_coords[1] - e_ref_coords[1])**2 + (e_coords[2] - e_ref_coords[2])**2)**0.5 ) k['hole']['right'].append( ((h_coords[0] - h_ref_coords[0])**2 + (h_coords[1] - h_ref_coords[1])**2 + (h_coords[2] - h_ref_coords[2])**2)**0.5 ) e_energy = band_structure.bands[ spin_up][cbm_band_index][cbm_kpoint_index + n] h_energy = band_structure.bands[ spin_up][vbm_band_index][vbm_kpoint_index + n] E['electron']['right'].append(e_energy) E['hole']['right'].append(h_energy) # 2nd order fits e_l_fit = np.poly1d( np.polyfit(k['electron']['left'], E['electron']['left'], 2)) e_r_fit = np.poly1d( np.polyfit(k['electron']['right'], E['electron']['right'], 2)) h_l_fit = np.poly1d( np.polyfit(k['hole']['left'], E['hole']['left'], 2)) h_r_fit = np.poly1d( np.polyfit(k['hole']['right'], E['hole']['right'], 2)) # Curvatures e_l_curvature = e_l_fit.deriv().deriv()[0] e_r_curvature = e_r_fit.deriv().deriv()[0] h_l_curvature = h_l_fit.deriv().deriv()[0] h_r_curvature = h_r_fit.deriv().deriv()[0] # Unit conversion e_m_eff_l = 10 * ((H_BAR ** 2) / e_l_curvature) / M_0 e_m_eff_r = 10 * ((H_BAR ** 2) / e_r_curvature) / M_0 h_m_eff_l = -10 * ((H_BAR ** 2) / h_l_curvature) / M_0 h_m_eff_r = -10 * ((H_BAR ** 2) / h_r_curvature) / M_0 return {'electron': {'left': e_m_eff_l, 'right': e_m_eff_r}, 'hole': {'left': h_m_eff_l, 'right': h_m_eff_r}}
henniggroup/MPInterfaces
[ 58, 46, 58, 8, 1435027973 ]
def get_fermi_velocities(): """ Calculates the fermi velocity of each band that crosses the fermi level, according to v_F = dE/(h_bar*dk). Returns: fermi_velocities (list). The absolute values of the adjusted slopes of each band, in Angstroms/s. """ vr = Vasprun('vasprun.xml') # eigenvalues = vr.eigenvalues bs = vr.get_band_structure() bands = bs.bands kpoints = bs.kpoints efermi = bs.efermi h_bar = 6.582e-16 # eV*s fermi_bands = [] for spin in bands: for i in range(len(bands[spin])): if max(bands[spin][i]) > efermi > min(bands[spin][i]): fermi_bands.append(bands[spin][i]) fermi_velocities = [] for band in fermi_bands: for i in range(len(band)-1): if (band[i] < efermi < band[i+1]) or (band[i] > efermi > band[i+1]): dk = np.sqrt((kpoints[i+1].cart_coords[0] - kpoints[i].cart_coords[0])**2 + (kpoints[i+1].cart_coords[1] - kpoints[i].cart_coords[1])**2) v_f = abs((band[i+1] - band[i]) / (h_bar * dk)) fermi_velocities.append(v_f) return fermi_velocities # Values are in Angst./s
henniggroup/MPInterfaces
[ 58, 46, 58, 8, 1435027973 ]
def __unicode__(self): return _("%(title)s, from %(author)s") % { 'title': self.title, 'author': self.author.username , }
n1k0/djortunes
[ 7, 1, 7, 4, 1257240065 ]
def get_absolute_url(self): "Retrieves the absolute django url of a fortune" return ('fortune_detail', (), { 'slug': self.slug, 'year': self.pub_date.year, 'month': self.pub_date.month, 'day': self.pub_date.day })
n1k0/djortunes
[ 7, 1, 7, 4, 1257240065 ]
def initialize(cfg): # import and initialize plugins plugin_utils.initialize_plugins(cfg['plugin_dirs'], cfg['load_plugins']) # entryparser callback is run here first to allow other # plugins register what file extensions can be used extensions = tools.run_callback( "entryparser", {'txt': blosxom_entry_parser}, mappingfunc=lambda x, y: y, defaultfunc=lambda x: x) # go through the config.py and override entryparser extensions for ext, parser_module in cfg['entryparsers'].items(): module, callable_name = parser_module.rsplit(':', 1) module = tools.importname(None, module) extensions[ext] = getattr(module, callable_name) # FIXME - this is a lousy place to store this cfg['extensions'] = extensions
willkg/douglas
[ 2, 3, 2, 14, 1385584147 ]
def __init__(self, config, environ, data=None): """Sets configuration and environment and creates the Request object. :param config: dict containing the configuration variables. :param environ: dict containing the environment variables. :param data: dict containing data variables. """ if data is None: data = {} data['douglas_name'] = "Douglas" data['douglas_version'] = __version__ self._config = config self._request = Request(config, environ, data)
willkg/douglas
[ 2, 3, 2, 14, 1385584147 ]
def cleanup(self): """This cleans up Douglas after a run. This should be called when Douglas has done everything it needs to do before exiting. """ # Log some useful stuff for debugging. log = logging.getLogger() response = self.get_response() log.debug('status = %s' % response.status) log.debug('headers = %s' % response.headers)
willkg/douglas
[ 2, 3, 2, 14, 1385584147 ]
def get_response(self): """Returns the Response object associated with this Request. """ return self._request.get_response()
willkg/douglas
[ 2, 3, 2, 14, 1385584147 ]
def run_render_one(self, url, headers): """Renders a single page from the blog. :param url: the url to render--this has to be relative to the base url for this blog. :param headers: True if you want headers to be rendered and False if not. """ self.initialize() config = self._request.get_configuration() if url.find("?") != -1: url = url[:url.find("?")] query = url[url.find("?")+1:] else: query = "" url = url.replace(os.sep, "/") response = tools.render_url(config, url, query) if headers: response.send_headers(sys.stdout) response.send_body(sys.stdout) print response.read() # we're done, clean up self.cleanup()
willkg/douglas
[ 2, 3, 2, 14, 1385584147 ]
def run_collectstatic(self): """Collects static files and copies them to compiledir""" # FIXME: rewrite using tools.get_static_files(cfg) cfg = self._request.get_configuration() self.initialize() # Copy over static files print 'Copying over static files ...' dst = os.path.join(cfg['compiledir'], 'static') if not os.path.exists(dst): os.makedirs(dst) def notifyfun(filename): print ' Copying {0}'.format(filename) # Copy over static_files_dirs files first static_files_dirs = cfg['static_files_dirs'] static_files_dirs.append(os.path.join(cfg['datadir'], '..', 'static')) for mem in static_files_dirs: tools.copy_dir(mem, dst, notifyfun=notifyfun) # Copy over themes static dirs for mem in os.listdir(cfg['themedir']): path = os.path.join(cfg['themedir'], mem, 'static') if os.path.exists(path): tools.copy_dir(path, dst, notifyfun=notifyfun) # We're done, clean up self.cleanup()
willkg/douglas
[ 2, 3, 2, 14, 1385584147 ]
def __init__(self, environ=None, start_response=None, configini=None): """ Make WSGI app for Douglas. :param environ: FIXME :param start_response: FIXME :param configini: Dict encapsulating information from a ``config.ini`` file or any other property file that will override the ``config.py`` file. """ self.environ = environ self.start_response = start_response self.config = import_config() if configini is not None: self.config.update(tools.convert_configini_values(configini)) tools.setup_logging(self.config) initialize(self.config)
willkg/douglas
[ 2, 3, 2, 14, 1385584147 ]
def __call__(self, env, start_response): return [self.run_douglas(env, start_response)]
willkg/douglas
[ 2, 3, 2, 14, 1385584147 ]
def douglas_app_factory(global_config, **local_config): """App factory for paste. :returns: WSGI application """ conf = global_config.copy() conf.update(local_config) conf.update(dict(local_config=local_config, global_config=global_config)) if "configpydir" in conf: sys.path.insert(0, conf["configpydir"]) return DouglasWSGIApp(configini=conf)
willkg/douglas
[ 2, 3, 2, 14, 1385584147 ]
def __init__(self, request, env): """Wraps an environment (which is a dict) and a request. :param request: the Request object for this request. :param env: the environment dict for this request. """ dict.__init__(self) self._request = request self.update(env)
willkg/douglas
[ 2, 3, 2, 14, 1385584147 ]
def __init__(self, config, environ, data): """Sets configuration and environment. Creates the Response object which handles all output related functionality. :param config: dict containing configuration variables. :param environ: dict containing environment variables. :param data: dict containing data variables. """ # this holds configuration data that the user changes in # config.py self._configuration = config # this holds HTTP/CGI oriented data specific to the request # and the environment in which the request was created self._http = EnvDict(self, environ) # this holds run-time data which gets created and transformed # by douglas during execution if data is None: self._data = dict() else: self._data = data # this holds the input stream. initialized for dynamic # rendering in Douglas.run. for compiling there is no input # stream. self._in = StringIO() # copy methods to the Request object. self.read = self._in.read self.readline = self._in.readline self.readlines = self._in.readlines self.seek = self._in.seek self.tell = self._in.tell # this holds the FieldStorage instance. # initialized when request.get_form is called the first time self._form = None self._response = None # create and set the Response self.set_response(Response(self))
willkg/douglas
[ 2, 3, 2, 14, 1385584147 ]
def buffer_input_stream(self): """ Buffer the input stream in a StringIO instance. This is done to have a known/consistent way of accessing incomming data. For example the input stream passed by mod_python does not offer the same functionallity as ``sys.stdin``. """ # TODO: tests on memory consumption when uploading huge files pyhttp = self.get_http() winput = pyhttp['wsgi.input'] method = pyhttp['REQUEST_METHOD'] # there's no data on stdin for a GET request. douglas # will block indefinitely on the read for a GET request with # thttpd. if method != 'GET': try: length = int(pyhttp.get('CONTENT_LENGTH', 0)) except ValueError: length = 0 if length > 0: self._in.write(winput.read(length)) # rewind to start self._in.seek(0)
willkg/douglas
[ 2, 3, 2, 14, 1385584147 ]
def get_response(self): """Returns the Response for this request.""" return self._response
willkg/douglas
[ 2, 3, 2, 14, 1385584147 ]
def get_form(self): """Returns the form data submitted by the client. The ``form`` instance is created only when requested to prevent overhead and unnecessary consumption of the input stream. :returns: a ``cgi.FieldStorage`` instance. """ if self._form is None: self._form = self._getform() return self._form
willkg/douglas
[ 2, 3, 2, 14, 1385584147 ]
def get_configuration(self): """Returns the *actual* configuration dict. The configuration dict holds values that the user sets in their ``config.py`` file. Modifying the contents of the dict will affect all downstream processing. """ return self._configuration
willkg/douglas
[ 2, 3, 2, 14, 1385584147 ]
def get_data(self): """Returns the *actual* data dict. Holds run-time data which is created and transformed by douglas during execution. Modifying the contents of the dict will affect all downstream processing. """ return self._data
willkg/douglas
[ 2, 3, 2, 14, 1385584147 ]
def add_data(self, d): """Takes in a dict and adds/overrides values in the existing data dict with the new values. """ self._data.update(d)
willkg/douglas
[ 2, 3, 2, 14, 1385584147 ]
def __getattr__(self, name): if name in ["config", "configuration", "conf"]: return self._configuration if name == "data": return self._data if name == "http": return self._http raise AttributeError(name)
willkg/douglas
[ 2, 3, 2, 14, 1385584147 ]
def __init__(self, request): """Sets the ``Request`` object that leaded to this response. Creates a ``StringIO`` that is used as a output buffer. """ self._request = request self._out = StringIO() self._headers_sent = False self.headers = {} self.status = "200 OK" self.close = self._out.close self.flush = self._out.flush self.read = self._out.read self.readline = self._out.readline self.readlines = self._out.readlines self.seek = self._out.seek self.tell = self._out.tell self.write = self._out.write self.writelines = self._out.writelines
willkg/douglas
[ 2, 3, 2, 14, 1385584147 ]
def set_status(self, status): """Sets the status code for this response. The status should be a valid HTTP response status. Examples: >>> resp.set_status("200 OK") >>> resp.set_status("404 Not Found") :param status: the status string. """ self.status = status
willkg/douglas
[ 2, 3, 2, 14, 1385584147 ]
def add_header(self, key, value): """Populates the HTTP header with lines of text. Sets the status code on this response object if the given argument list containes a 'Status' header. Example: >>> resp.add_header("Content-type", "text/plain") >>> resp.add_header("Content-Length", "10500") :raises ValueError: This happens when the parameters are not correct. """ key = key.strip() if key.find(' ') != -1 or key.find(':') != -1: raise ValueError('There should be no spaces in header keys') value = value.strip() if key.lower() == "status": self.set_status(str(value)) else: self.headers.update({key: str(value)})
willkg/douglas
[ 2, 3, 2, 14, 1385584147 ]
def send_headers(self, out): """Send HTTP Headers to the given output stream. .. Note:: This prints the headers and then the ``\\n\\n`` that separates headers from the body. :param out: The file-like object to print headers to. """ out.write("Status: %s\n" % self.status) out.write('\n'.join(['%s: %s' % (hkey, self.headers[hkey]) for hkey in self.headers.keys()])) out.write('\n\n') self._headers_sent = True
willkg/douglas
[ 2, 3, 2, 14, 1385584147 ]
def blosxom_handler(request): """This is the default blosxom handler. It calls the renderer callback to get a renderer. If there is no renderer, it uses the blosxom renderer. It calls the pathinfo callback to process the path_info http variable. It calls the filelist callback to build a list of entries to display. It calls the prepare callback to do any additional preparation before rendering the entries. Then it tells the renderer to render the entries. :param request: the request object. """ config = request.get_configuration() data = request.get_data() # go through the renderer callback to see if anyone else wants to # render. this renderer gets stored in the data dict for # downstream processing. rend = tools.run_callback('renderer', {'request': request}, donefunc=lambda x: x is not None, defaultfunc=lambda x: None) if not rend: # get the renderer we want to use rend = config['renderer'] # import the renderer rend = tools.importname('douglas.renderers', rend) # get the renderer object rend = rend.Renderer(request, config.get('stdoutput', sys.stdout)) data['renderer'] = rend # generate the timezone variable data["timezone"] = time.tzname[time.localtime()[8]] # process the path info to determine what kind of blog entry(ies) # this is tools.run_callback('pathinfo', {'request': request}, donefunc=lambda x: x is not None, defaultfunc=blosxom_process_path_info) # call the filelist callback to generate a list of entries data['entry_list'] = tools.run_callback( 'filelist', {'request': request}, donefunc=lambda x: x is not None, defaultfunc=blosxom_file_list_handler) # figure out the blog-level mtime which is the mtime of the head # of the entry_list entry_list = data['entry_list'] if isinstance(entry_list, list) and len(entry_list) > 0: mtime = entry_list[0].get('mtime', time.time()) else: mtime = time.time() mtime_tuple = time.localtime(mtime) mtime_gmtuple = time.gmtime(mtime) data['latest_date'] = time.strftime('%a, %d %b %Y', mtime_tuple) # Make sure we get proper 'English' dates when using standards loc = locale.getlocale(locale.LC_ALL) locale.setlocale(locale.LC_ALL, 'C') data['latest_w3cdate'] = time.strftime('%Y-%m-%dT%H:%M:%SZ', mtime_gmtuple) data['latest_rfc822date'] = time.strftime('%a, %d %b %Y %H:%M GMT', mtime_gmtuple) # set the locale back locale.setlocale(locale.LC_ALL, loc) # we pass the request with the entry_list through the prepare # callback giving everyone a chance to transform the data. the # request is modified in place. tools.run_callback('prepare', {'request': request}) # now we pass the entry_list through the renderer entry_list = data['entry_list'] renderer = data['renderer'] if renderer and not renderer.rendered: if entry_list: renderer.set_content(entry_list) else: # FIXME - We should have a 404 template. Instead, we're # going to fake the entry and use the entry template. data['bl_type'] = 'entry' renderer.add_header('Status', '404 Not Found') renderer.set_content( {'title': 'The page you are looking for is not available', 'body': 'Somehow I cannot find the page you want. ' 'Go Back to <a href="{0}">{1}</a>?'.format( config['base_url'], config['blog_title'])}) renderer.render() elif not renderer: output = config.get('stdoutput', sys.stdout) output.write( 'Content-Type: text/plain\n\n' 'There is something wrong with your setup.\n' 'Check your config files and verify that your ' 'configuration is correct.\n')
willkg/douglas
[ 2, 3, 2, 14, 1385584147 ]
def blosxom_file_list_handler(args): """This is the default handler for getting entries. It takes the request object in and figures out which entries based on the default behavior that we want to show and generates a list of EntryBase subclass objects which it returns. :param args: dict containing the incoming Request object :returns: the content we want to render """ request = args["request"] data = request.get_data() config = request.get_configuration() if data['bl_type'] == 'entry_list': filelist = tools.get_entries( config, data['root_datadir'], int(config['depth'])) elif data['bl_type'] == 'entry': filelist = [data['root_datadir']] else: filelist = [] entrylist = [FileEntry(request, e, data["root_datadir"]) for e in filelist] # if we're looking at a set of archives, remove all the entries # that aren't in the archive if data.get("pi_yr"): datestr = "%s%s%s" % (data["pi_yr"], data.get("pi_mo", ""), data.get("pi_da", "")) entrylist = [ x for x in entrylist if (time.strftime("%Y%m%d%H%M%S", x["timetuple"]) .startswith(datestr))] args = {"request": request, "entry_list": entrylist} entrylist = tools.run_callback("sortlist", args, donefunc=lambda x: x is not None, defaultfunc=blosxom_sort_list_handler) args = {"request": request, "entry_list": entrylist} entrylist = tools.run_callback("truncatelist", args, donefunc=lambda x: x is not None, defaultfunc=blosxom_truncate_list_handler) return entrylist
willkg/douglas
[ 2, 3, 2, 14, 1385584147 ]
def blosxom_truncate_list_handler(args): """If ``config["num_entries"]`` is not 0 and ``data["truncate"]`` is not 0, then this truncates ``args["entry_list"]`` by ``config["num_entries"]``. :param args: args dict with ``request`` object and ``entry_list`` list of entries :returns: the truncated ``entry_list``. """ request = args['request'] data = request.get_data() config = request.get_configuration() num_entries = config['num_entries'] if data.get('truncate', False) and num_entries: entrylist = args['entry_list'][:num_entries] return entrylist
willkg/douglas
[ 2, 3, 2, 14, 1385584147 ]
def route_file(cfg, url, data): path = os.path.join(cfg['datadir'], data['path'].lstrip('/')) ext = tools.what_ext(cfg['extensions'].keys(), path) if ext: data.update({ 'root_datadir': path + '.' + ext, 'bl_type': 'entry' }) return data
willkg/douglas
[ 2, 3, 2, 14, 1385584147 ]
def blosxom_process_path_info(args): """Process HTTP ``PATH_INFO`` for URI according to path specifications, fill in data dict accordingly. The paths specification looks like this: - ``/foo.html`` and ``/cat/foo.html`` - file foo.* in / and /cat - ``/cat`` - category - ``/2002`` - category (if that's a directory) - ``/2002`` - year index - ``/2002/02`` - year/month index - ``/2002/02/04`` - year/month/day index :param args: dict containing the incoming Request object """ request = args['request'] cfg = request.get_configuration() data = request.get_data() pyhttp = request.get_http() # Populate with default values new_data = { 'path_info': pyhttp.get('PATH_INFO', ''), 'pi_yr': '', 'pi_mo': '', 'pi_da': '', 'pi_bl': pyhttp.get('PATH_INFO', ''), 'bl_type': '', 'theme': request.get_theme(), 'root_datadir': cfg['datadir'] } routed_data = ROUTER.match(cfg, new_data['path_info']) if not routed_data: # If we have no idea what this is, then treat it like a file. routed_data = { 'bl_type': '', 'root_datadir': os.path.join(cfg['datadir'].rstrip(), pyhttp.get('PATH_INFO', '').lstrip()) } new_data.update(routed_data) # Construct final URL new_data['url'] = '/'.join([ cfg['base_url'].rstrip('/\\'), new_data['pi_bl'].lstrip('/\\')]) # Figure out whether to truncate the entry list truncate = False if new_data.get('pi_yr'): truncate = cfg['truncate_date'] elif new_data.get('bl_type') == 'entry_list': if new_data['path_info'] in ([''], ['index']): truncate = cfg['truncate_frontpage'] else: truncate = cfg['truncate_category'] new_data['truncate'] = truncate # Update the data dict in-place data.update(new_data) return data
willkg/douglas
[ 2, 3, 2, 14, 1385584147 ]
def __init__( self, plotly_name="enabled", parent_name="densitymapbox.colorbar.tickformatstop", **kwargs
plotly/python-api
[ 13052, 2308, 13052, 1319, 1385013188 ]
def company_prefix(self) -> str: """ :example: 'ห้างหุ้นส่วนจำกัด' """ return self.random_element(self.company_prefixes)
joke2k/faker
[ 15491, 1733, 15491, 19, 1352761209 ]
def company_limited_suffix(self) -> str: """ :example: 'จำกัด' """ return self.random_element(self.company_limited_suffixes)
joke2k/faker
[ 15491, 1733, 15491, 19, 1352761209 ]
def __init__(self, locale=None): # noqa: E501 """PreferencesV30Rc1 - a model defined in Swagger""" # noqa: E501 self._locale = None self.discriminator = None if locale is not None: self.locale = locale
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def locale(self): """Gets the locale of this PreferencesV30Rc1. # noqa: E501 :return: The locale of this PreferencesV30Rc1. # noqa: E501 :rtype: str """ return self._locale
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def locale(self, locale): """Sets the locale of this PreferencesV30Rc1. :param locale: The locale of this PreferencesV30Rc1. # noqa: E501 :type: str """ allowed_values = ["AR", "CS", "DE", "EN", "ES", "FR", "IT", "JA", "KO", "PT", "RU", "ZH_CN", "ZH_TW", "XX"] # noqa: E501 if locale not in allowed_values: raise ValueError( "Invalid value for `locale` ({0}), must be one of {1}" # noqa: E501 .format(locale, allowed_values) ) self._locale = locale
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict())
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, PreferencesV30Rc1): return False return self.__dict__ == other.__dict__
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def option_list(self): """ For compatibility with Django<1.10 """ try: return BaseCommand.option_list + ( make_option('-c', '--clean', **clean_option_kwargs) ) except: return None
hzdg/django-staticbuilder
[ 76, 4, 76, 6, 1349907733 ]
def handle(self, *args, **options): self.clean = options['clean'] self.verbosity = int(options.get('verbosity', '1')) build_dir = settings.STATICBUILDER_BUILD_ROOT if not build_dir: raise ImproperlyConfigured('STATICBUILDER_BUILD_ROOT must be set.') # Copy the static assets to a the build directory. self.log(t.bold('Collecting static assets for building...')) self.call_command_func(self.collect_for_build, build_dir)
hzdg/django-staticbuilder
[ 76, 4, 76, 6, 1349907733 ]
def collect_for_build(self, build_dir): with patched_finders(): with patched_settings(STATICBUILDER_COLLECT_BUILT=False): # Patch the static files storage used by collectstatic storage = BuiltFileStorage() old_storage = djstorage.staticfiles_storage djstorage.staticfiles_storage = storage try: call_command('collectstatic', verbosity=self.verbosity - 1, interactive=False, ignore_patterns=settings.STATICBUILDER_EXCLUDE_FILES) finally: djstorage.staticfiles_storage = old_storage # Delete the files that have been removed. if self.clean: self.clean_built(storage)
hzdg/django-staticbuilder
[ 76, 4, 76, 6, 1349907733 ]
def clean_built(self, storage): """ Clear any static files that aren't from the apps. """ build_dirs, built_files = self.find_all(storage) found_files = set() for finder in finders.get_finders(): for path, s in finder.list([]): # Prefix the relative path if the source storage contains it if getattr(s, 'prefix', None): prefixed_path = os.path.join(s.prefix, path) else: prefixed_path = path found_files.add(prefixed_path) stale_files = built_files - found_files for fpath in stale_files: self.log(u"Deleting '%s'" % smart_text(fpath), level=1) storage.delete(fpath) found_dirs = set() for f in found_files: path = f while True: path = os.path.dirname(path) found_dirs.add(path) if not path: break stale_dirs = set(build_dirs) - found_dirs for fpath in stale_dirs: try: storage.delete(fpath) except OSError: self.log(u"Couldn't remove empty directory '%s'" % smart_text(fpath), level=1) else: self.log(u"Deleted empty directory '%s'" % smart_text(fpath), level=1)
hzdg/django-staticbuilder
[ 76, 4, 76, 6, 1349907733 ]
def queryValue(key, name): value, type_id = QueryValueEx(key, name) return value
ActiveState/code
[ 1884, 686, 1884, 41, 1500923597 ]
def main(): try: path = r'SYSTEM\CurrentControlSet\Control\Session Manager\Environment' reg = ConnectRegistry(None, HKEY_LOCAL_MACHINE) key = OpenKey(reg, path, 0, KEY_ALL_ACCESS)
ActiveState/code
[ 1884, 686, 1884, 41, 1500923597 ]
def password_check(password): """ Verify the strength of 'password' Returns a dict indicating the wrong criteria A password is considered strong if: 12 characters length or more 1 digit or more 1 symbol or more 1 uppercase letter or more 1 lowercase letter or more """ # calculating the length length_error = len(password) < 12 # searching for digits digit_error = re.search(r"\d", password) is None # searching for uppercase uppercase_error = re.search(r"[A-Z]", password) is None # searching for lowercase lowercase_error = re.search(r"[a-z]", password) is None # searching for symbols # ]\;',./!@#$%^&*()_+-= symbol_error = not any(i in "]\;',./!@#$%^&*()_+-=]" for i in password) # overall result password_ok = not ( length_error or digit_error or uppercase_error or lowercase_error or symbol_error ) return password_ok
usnistgov/corr
[ 6, 4, 6, 34, 1458070055 ]
def create_admin(email, password, fname, lname): """ Creates the first admin user Returns boolean to indicate if the account was created or not """ if not password_check(password): return False else: hash_pwd = hashlib.sha256(('CoRRPassword_%s'%password).encode("ascii")).hexdigest() (account, created) = get_or_create(document=UserModel, created_at=str(datetime.datetime.utcnow()), email=email, group='admin', api_token=hashlib.sha256(('CoRRToken_%s_%s'%(email, str(datetime.datetime.utcnow()))).encode("ascii")).hexdigest()) if created: account.password = hash_pwd account.save() (profile_model, created) = get_or_create(document=ProfileModel, created_at=str(datetime.datetime.utcnow()), user=account, fname=fname, lname=lname) if created: return True else: return False else: return False
usnistgov/corr
[ 6, 4, 6, 34, 1458070055 ]
def is_token_allowed(token): ''' Only allow valid tokens which are not stop words and punctuation symbols. ''' # if (not token or not token.string.strip() or token.is_stop or token.is_punct): if (not token or not token.text.strip() or token.is_stop or token.is_punct): return False return True
bflaven/BlogArticlesExamples
[ 6, 4, 6, 49, 1442137714 ]
def main(args): if len(args) != 2: print("Usage: python project-diff.py [path-to-project-1] [path-to-project-2]") return dir1 = args[0] dir2 = args[1] project1 = collect_text_files(dir1) project2 = collect_text_files(dir2) files_only_in_1 = [] files_only_in_2 = [] files_in_both = [] perform_venn_analysis(set(project1.keys()), set(project2.keys()), files_only_in_1, files_only_in_2, files_in_both) if len(files_only_in_1) > 0: print("The following files are only in Project 1:") for file in files_only_in_1: print(" " + file) print("") if len(files_only_in_2) > 0: print("The following files are only in Project 2:") for file in files_only_in_2: print(" " + file) print("") print(str(len(files_in_both)) + " files in both projects.") print("") files_in_both.sort() files_with_diffs = [] for file in files_in_both: text_1 = project1[file] text_2 = project2[file] diff = perform_diff(text_1, text_2) if len(diff) > 0: files_with_diffs.append(file) print("There's a difference in " + file) print("\n".join(diff)) print("") if len(files_with_diffs) == 0: print("No files with text differences.") else: print("Diffs were in the following files:") print("\n".join(files_with_diffs)) print("")
blakeohare/crayon
[ 110, 10, 110, 59, 1425530602 ]
def collect_text_files(root): output = {} root = root.replace('\\', '/') if root.endswith('/'): root = root[:-1] collect_text_files_impl(root, '', output) return output
blakeohare/crayon
[ 110, 10, 110, 59, 1425530602 ]
def is_text_file(path): ext = get_file_extension(path) return ext not in FILE_EXTENSION_IGNORE_LIST
blakeohare/crayon
[ 110, 10, 110, 59, 1425530602 ]
def perform_diff(text_1, text_2): if text_1 == text_2: return [] lines_1 = text_1.split('\n') lines_2 = text_2.split('\n') trimmed_front = 0 trimmed_back = 0 # Remove identical lines at the beginning and end of the file while len(lines_1) > trimmed_front and len(lines_2) > trimmed_front and lines_1[trimmed_front] == lines_2[trimmed_front]: trimmed_front += 1 lines_1 = lines_1[trimmed_front:] lines_2 = lines_2[trimmed_front:] while len(lines_1) > trimmed_back and len(lines_2) > trimmed_back and lines_1[-1 - trimmed_back] == lines_2[-1 - trimmed_back]: trimmed_back += 1 lines_1 = lines_1[:-trimmed_back] lines_2 = lines_2[:-trimmed_back] length_1 = len(lines_1) length_2 = len(lines_2) grid = [] for x in range(length_2 + 1): column = [] for y in range(length_1 + 1): column.append(None) grid.append(column) # Perform levenshtein difference # each grid cell will consist of a tuple: (diff-size, previous-path: up|left|diag) # Each step to the right indicates taking a line from lines 2 # Each step downwards indicates taking a line from lines 1 # Prepopulate the left and top rows indicating starting the diff by removing all # lines from lines 1 and adding all lines from lines 2. for x in range(length_2 + 1): grid[x][0] = (x, 'left') for y in range(length_1 + 1): grid[0][y] = (y, 'up') grid[0][0] = (0, 'diag') # Populate the grid. Figure out the minimum diff to get to each point. for y in range(1, length_1 + 1): for x in range(1, length_2 + 1): if lines_1[y - 1] == lines_2[x - 1]: grid[x][y] = (grid[x - 1][y - 1][0], 'diag') elif (grid[x - 1][y][0] <= grid[x][y - 1][0]): grid[x][y] = (grid[x - 1][y][0] + 1, 'left') else: grid[x][y] = (grid[x][y - 1][0] + 1, 'up') # Start from the bottom right corner and walk backwards to the origin x = length_2 y = length_1 diff_chain = [] ellipsis_used = False while x != 0 and y != 0: node = grid[x][y] if node[1] == 'diag': if not ellipsis_used: diff_chain.append('...') ellipsis_used = True x -= 1 y -= 1 elif node[1] == 'left': diff_chain.append('+ [' + str(trimmed_front + x) + '] ' + lines_2[x - 1]) x -= 1 ellipsis_used = False else: diff_chain.append('- [' + str(trimmed_front + y) + '] ' + lines_1[y - 1]) y -= 1 ellipsis_used = False diff_chain.reverse() return diff_chain
blakeohare/crayon
[ 110, 10, 110, 59, 1425530602 ]
def __init__(self): self._fpg = None self._fp = None self._spg = None self._sp = None
funkybob/rattle
[ 14, 6, 14, 2, 1394944394 ]
def fpg(self): if self._fpg is None: self._fpg = rply.ParserGenerator( [rule.name for rule in lexers.flg.rules], precedence=[] ) return self._fpg
funkybob/rattle
[ 14, 6, 14, 2, 1394944394 ]
def fp(self): if self._fp is None: self._fp = self.fpg.build() return self._fp
funkybob/rattle
[ 14, 6, 14, 2, 1394944394 ]
def spg(self): if self._spg is None: self._spg = rply.ParserGenerator( [rule.name for rule in lexers.slg.rules], precedence=[] ) return self._spg
funkybob/rattle
[ 14, 6, 14, 2, 1394944394 ]
def sp(self): if self._sp is None: self._sp = self.spg.build() return self._sp
funkybob/rattle
[ 14, 6, 14, 2, 1394944394 ]
def default_backups_directory(): return os.path.join(paths.application_data_directory(), 'Backups')
scottrice/Ice
[ 818, 108, 818, 209, 1356404560 ]
def shortcuts_backup_path(directory, user, timestamp_format="%Y%m%d%H%M%S"): """ Returns the path for a shortcuts.vdf backup file. This path is in the designated backup directory, and includes a timestamp before the extension to allow many backups to exist at once. """ assert(directory is not None) return os.path.join( directory, str(user.user_id), backup_filename(user, timestamp_format) )
scottrice/Ice
[ 818, 108, 818, 209, 1356404560 ]
def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def get_long_running_output(pipeline_response): response = pipeline_response.http_response deserialized = self._deserialize('MonitoringSettingResource', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def get_long_running_output(pipeline_response): response = pipeline_response.http_response deserialized = self._deserialize('MonitoringSettingResource', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def introMessage(): print '==============================================================================================' print ' Author: Lewis Mervin\n Email: lhm30@cam.ac.uk\n Supervisor: Dr. A. Bender' print ' Address: Centre For Molecular Informatics, Dept. Chemistry, Lensfield Road, Cambridge CB2 1EW' print '==============================================================================================\n' return
lhm30/PIDGINv2
[ 28, 8, 28, 1, 1442588431 ]
def calcFingerprints(smiles): m1 = Chem.MolFromSmiles(smiles) fp = AllChem.GetMorganFingerprintAsBitVect(m1,2, nBits=2048) binary = fp.ToBitString() return list(binary)
lhm30/PIDGINv2
[ 28, 8, 28, 1, 1442588431 ]
def arrayFP(inp): outfp = [] for i in inp: try: outfp.append(calcFingerprints(i)) except: print 'SMILES Parse Error: ' + i return outfp
lhm30/PIDGINv2
[ 28, 8, 28, 1, 1442588431 ]
def importQuery(in_file): query = open(in_file).read().splitlines() #discard IDs, if present if len(query[0].split()) > 1: query = [line.split()[0] for line in query] matrix = np.empty((len(query), 2048), dtype=np.uint8) smiles_per_core = int(math.ceil(len(query) / N_cores)+1) chunked_smiles = [query[x:x+smiles_per_core] for x in xrange(0, len(query), smiles_per_core)] pool = Pool(processes=N_cores) # set up resources jobs = pool.imap(arrayFP, chunked_smiles) current_end = 0 for i, result in enumerate(jobs): matrix[current_end:current_end+len(result), :] = result current_end += len(result) pool.close() pool.join() return matrix[:current_end]
lhm30/PIDGINv2
[ 28, 8, 28, 1, 1442588431 ]
def getUniprotInfo(): if os.name == 'nt': sep = '\\' else: sep = '/' model_info = [l.split('\t') for l in open(os.path.dirname(os.path.abspath(__file__)) + sep + 'classes_in_model.txt').read().splitlines()] return_dict = {l[0] : l[0:8] for l in model_info} return return_dict
lhm30/PIDGINv2
[ 28, 8, 28, 1, 1442588431 ]
def getDisgenetInfo(): if os.name == 'nt': sep = '\\' else: sep = '/' return_dict1 = dict() return_dict2 = dict() disease_file = [l.split('\t') for l in open(os.path.dirname(os.path.abspath(__file__)) + sep + 'DisGeNET_diseases.txt').read().splitlines()] for l in disease_file: try: return_dict1[l[0]].append(l[1]) except KeyError: return_dict1[l[0]] = [l[1]] try: return_dict2[(l[1],l[0])] = float(l[2]) except ValueError: pass return return_dict1, return_dict2
lhm30/PIDGINv2
[ 28, 8, 28, 1, 1442588431 ]
def getPathwayInfo(): if os.name == 'nt': sep = '\\' else: sep = '/' return_dict1 = dict() return_dict2 = dict() pathway_info = [l.split('\t') for l in open(os.path.dirname(os.path.abspath(__file__)) + sep + 'biosystems.txt').read().splitlines()] for l in pathway_info: try: return_dict1[l[0]].append(l[1]) except KeyError: return_dict1[l[0]] = [l[1]] return_dict2[l[1]] = l[2:] return return_dict1, return_dict2
lhm30/PIDGINv2
[ 28, 8, 28, 1, 1442588431 ]
def getBGhits(threshold): if os.name == 'nt': sep = '\\' else: sep = '/' bg_column = int((threshold*100)+1) bg_file = [l.split('\t') for l in open(os.path.dirname(os.path.abspath(__file__)) + sep + 'bg_predictions.txt').read().splitlines()] bg_file.pop(0) bg_predictions = {l[0] : int(l[bg_column]) for l in bg_file} return bg_predictions
lhm30/PIDGINv2
[ 28, 8, 28, 1, 1442588431 ]
def calcPredictionRatio(preds1,preds2): preds1_percentage = float(preds1)/float(len(querymatrix1)) preds2_percentage = float(preds2)/float(2000000) if preds1 == 0 and preds2 == 0: return None if preds1 == 0: return 999.0, round(preds1_percentage,3), round(preds2_percentage,3) if preds2 == 0: return 0.0, round(preds1_percentage,3), round(preds2_percentage,3) return round(preds2_percentage/preds1_percentage,3), round(preds1_percentage,3), round(preds2_percentage,3)
lhm30/PIDGINv2
[ 28, 8, 28, 1, 1442588431 ]
def open_Model(mod): if os.name == 'nt': sep = '\\' else: sep = '/' with zipfile.ZipFile(os.path.dirname(os.path.abspath(__file__)) + sep + 'models' + sep + mod + '.pkl.zip', 'r') as zfile: with zfile.open(mod + '.pkl', 'r') as fid: clf = cPickle.load(fid) return clf
lhm30/PIDGINv2
[ 28, 8, 28, 1, 1442588431 ]
def doTargetPrediction(pickled_model_name): if os.name == 'nt': sep = '\\' else: sep = '/' mod = pickled_model_name.split(sep)[-1].split('.')[0] clf = open_Model(mod) preds1 = sum(clf.predict_proba(querymatrix1)[:,1] > threshold) preds2 = bg_preds[mod] oddsratio, pvalue = stats.fisher_exact([[preds2,2000000-preds2],[preds1,len(querymatrix1)-preds1]]) try: ratio, preds1_percentage, preds2_percentage = calcPredictionRatio(preds1,preds2) return ratio, mod, preds1, preds1_percentage, preds2, preds2_percentage, oddsratio, pvalue except TypeError: return None
lhm30/PIDGINv2
[ 28, 8, 28, 1, 1442588431 ]
def performTargetPrediction(models): prediction_results = [] pool = Pool(processes=N_cores, initializer=initPool, initargs=(querymatrix1,threshold,bg_preds)) # set up resources jobs = pool.imap_unordered(doTargetPrediction, models) for i, result in enumerate(jobs): percent = (float(i)/float(len(models)))*100 + 1 sys.stdout.write(' Performing Classification on Query Molecules: %3d%%\r' % percent) sys.stdout.flush() if result is not None: prediction_results.append(result) updateHits(disease_links,disease_hits,result[1],result[2],result[4]) updateHits(pathway_links,pathway_hits,result[1],result[2],result[4]) pool.close() pool.join() return prediction_results
lhm30/PIDGINv2
[ 28, 8, 28, 1, 1442588431 ]
def updateHits(links,hits,uniprot,hit1,hit2): try: for idx in links[uniprot]: #try checks if pw or dnet try: if disease_score[(idx,uniprot)] < dgn_threshold: continue except KeyError: pass try: hits[idx] = hits[idx] + np.array([hit1,hit2]) except KeyError: hits[idx] = np.array([hit1,hit2]) except KeyError: return return
lhm30/PIDGINv2
[ 28, 8, 28, 1, 1442588431 ]
def doHitProcess(inp): idx, hits, n_f1_hits, n_f2_hits = inp if hits[0] == 0 and hits[1] == 0: return if hits[0] == 0: return idx, 999.0, 0, 0, hits[1], float(hits[1])/float(n_f2_hits), 'NA', 'NA' if hits[1] == 0: return idx, 0.0, hits[0], float(hits[0])/float(n_f1_hits), 0, 0, 'NA', 'NA' h1_p = float(hits[0])/float(n_f1_hits) h2_p = float(hits[1])/float(n_f2_hits) chi, pvalue, _, _ = stats.chi2_contingency([[hits[1],n_f2_hits-hits[1]],[hits[0],n_f1_hits-hits[0]]]) return idx, round(h2_p/h1_p,3), hits[0], h1_p, hits[1], h2_p, chi, pvalue
lhm30/PIDGINv2
[ 28, 8, 28, 1, 1442588431 ]
def processHits(inp_dict): out_dict = dict() total_hits = np.array(inp_dict.values()).sum(axis=0) if total_hits.shape is (): return out_dict, 0, 0 n_f1_hits = total_hits[0] n_f2_hits = total_hits[1] tasks = [[idx,hits,n_f1_hits,n_f2_hits] for idx, hits in inp_dict.iteritems()] pool = Pool(processes=N_cores) # set up resources jobs = pool.imap_unordered(doHitProcess, tasks) for i, result in enumerate(jobs): percent = (float(i)/float(len(tasks)))*100 + 1 sys.stdout.write(" Calculating Fisher's test: %3d%%\r" % percent) sys.stdout.flush() if result is None: continue out_dict[result[0]] = result[1:] return out_dict, n_f1_hits, n_f2_hits
lhm30/PIDGINv2
[ 28, 8, 28, 1, 1442588431 ]
def initPool(querymatrix1_, threshold_, bg_preds_): global querymatrix1, threshold, bg_preds querymatrix1 = querymatrix1_ threshold = threshold_ bg_preds = bg_preds_
lhm30/PIDGINv2
[ 28, 8, 28, 1, 1442588431 ]
def __init__(self, event_type, timestamp, **kwargs): self.event_type = event_type # All events have these two attributes self.timestamp = timestamp for attribute in self.KNOWN_ATTRIBUTES: self._set(attribute, kwargs.get(attribute))
thefactory/marathon-python
[ 199, 152, 199, 25, 1398275002 ]
def _set(self, attribute_name, attribute): if not attribute: return # Special handling for lists... if isinstance(attribute, list): name = self.seq_name_to_singular.get(attribute_name) attribute = [ self.__to_marathon_object(name, v) for v in attribute ] else: attribute = self.__to_marathon_object(attribute_name, attribute) setattr(self, attribute_name, attribute)
thefactory/marathon-python
[ 199, 152, 199, 25, 1398275002 ]
def __init__(self): pass
thefactory/marathon-python
[ 199, 152, 199, 25, 1398275002 ]
def _pdfrate_wrapper(ntuple): ''' A helper function to parallelize calls to gdkde(). ''' try: return pdfrate_once(*ntuple) except Exception as e: return e
uvasrg/EvadeML
[ 98, 40, 98, 1, 1449699729 ]
def _pdfrate_feat_wrapper(ntuple): ''' A helper function to parallelize calls to gdkde(). ''' try: return pdfrate_feature_once(*ntuple) except Exception as e: return e
uvasrg/EvadeML
[ 98, 40, 98, 1, 1449699729 ]
def get_classifier(): scenario_name = "FTC" scenario = _scenarios[scenario_name] # Set up classifier classifier = 0 if scenario['classifier'] == 'rf': classifier = RandomForest() print 'Using RANDOM FOREST' elif scenario['classifier'] == 'svm': classifier = sklearn_SVC() print 'Using SVM' print 'Loading model from "{}"'.format(scenario['model']) classifier.load_model(scenario['model']) return classifier
uvasrg/EvadeML
[ 98, 40, 98, 1, 1449699729 ]
def pdfrate_feature(pdf_file_paths, speed_up = True): classifier = pdfrate_classifier scaler = pdfrate_scaler if not isinstance(pdf_file_paths, list): pdf_file_paths = [pdf_file_paths] if speed_up == True: # The Pool has to be moved outside the function. Otherwise, every call of this function will result a new Pool. The processes in old Pool would not terminate. args = [(classifier, scaler, file_path) for file_path in pdf_file_paths] feats = pool.map(_pdfrate_feat_wrapper, args) else: feats = [] for pdf_file_path in pdf_file_paths: pdf_feats = pdfrate_feature_once(classifier, scaler, pdf_file_path) feats.append(pdf_feats) all_feat_np = None for feat_np in feats: if all_feat_np == None: all_feat_np = feat_np else: all_feat_np = np.append(all_feat_np, feat_np, axis=0) return all_feat_np
uvasrg/EvadeML
[ 98, 40, 98, 1, 1449699729 ]
def pdfrate_with_feature(all_feat_np, speed_up = True): classifier = pdfrate_classifier scores = classifier.decision_function(all_feat_np) scores = [s[0] for s in scores] return scores
uvasrg/EvadeML
[ 98, 40, 98, 1, 1449699729 ]
def pdfrate(pdf_file_paths, speed_up = True): if type(pdf_file_paths) != list: pdf_file_paths = [pdf_file_paths] classifier = pdfrate_classifier all_feat_np = pdfrate_feature(pdf_file_paths, speed_up) scores = classifier.decision_function(all_feat_np) scores = [float(s[0]) for s in scores] return scores
uvasrg/EvadeML
[ 98, 40, 98, 1, 1449699729 ]
def __init__(self, title, *sections): """Create a new `Document` with `title` and zero or more `sections`. :param title: the tile of the document (`Document` or `Element` type) :param sections: document sections (`Document` or `Element` type) """ self._title = promote_to_string(title) self._sections = [promote_to_string(s) for s in sections]
roman-kutlak/nlglib
[ 45, 18, 45, 3, 1444561978 ]
def __hash__(self): return hash(str(self))
roman-kutlak/nlglib
[ 45, 18, 45, 3, 1444561978 ]
def __str__(self): if self.title: return str(self.title) + '\n\n' + '\n\n'.join([str(s) for s in self.sections]) else: return '\n\n'.join([str(s) for s in self.sections])
roman-kutlak/nlglib
[ 45, 18, 45, 3, 1444561978 ]
def title(self): return self._title
roman-kutlak/nlglib
[ 45, 18, 45, 3, 1444561978 ]
def title(self, value): self._title = promote_to_string(value)
roman-kutlak/nlglib
[ 45, 18, 45, 3, 1444561978 ]
def sections(self): return self._sections
roman-kutlak/nlglib
[ 45, 18, 45, 3, 1444561978 ]
def sections(self, *sections): self._sections = [promote_to_string(s) for s in sections]
roman-kutlak/nlglib
[ 45, 18, 45, 3, 1444561978 ]
def to_xml(self, depth=0, indent=' '): """Return an XML representation of the document" :param depth: the initial indentation offset (depth * indent) :param indent: the indent for nested elements. """ offset = indent * depth result = offset + '<document>\n' result += offset + indent + '<title>\n' result += self.title.to_xml(depth=depth + 1) result += offset + indent + '</title>\n' result += offset + indent + '<sections>\n' for s in self.sections: result += s.to_xml(depth=depth + 1) result += offset + indent + '</sections>\n' result += offset + '</document>\n' return result
roman-kutlak/nlglib
[ 45, 18, 45, 3, 1444561978 ]