content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from decimal import Decimal
from django import forms
from django.template.loader import render_to_string
from django.template.defaultfilters import slugify
class BaseWidget(forms.TextInput):
"""
Base widget. Do not use this directly.
"""
template = None
instance = None
class SliderWidget(BaseWidget):
"""
Slider widget.
In order to use this widget you must load the jQuery.ui slider
javascript.
This widget triggers the following javascript events:
- *slider_change* with the vote value as argument
(fired when the user changes his vote)
- *slider_delete* without arguments
(fired when the user deletes his vote)
It's easy to bind these events using jQuery, e.g.::
$(document).bind('slider_change', function(event, value) {
alert('New vote: ' + value);
});
"""
def __init__(self, min_value, max_value, step, instance=None,
can_delete_vote=True, key='', read_only=False, default='',
template='ratings/slider_widget.html', attrs=None):
"""
The argument *default* is used when the initial value is None.
"""
super(SliderWidget, self).__init__(attrs)
self.min_value = min_value
self.max_value = max_value
self.step = step
self.instance = instance
self.can_delete_vote = can_delete_vote
self.read_only = read_only
self.default = default
self.template = template
self.key = key
class StarWidget(BaseWidget):
"""
Starrating widget.
In order to use this widget you must download the
jQuery Star Rating Plugin available at
http://www.fyneworks.com/jquery/star-rating/#tab-Download
and then load the required javascripts and css, e.g.::
<link href="/path/to/jquery.rating.css" rel="stylesheet" type="text/css" />
<script type="text/javascript" src="/path/to/jquery.MetaData.js"></script>
<script type="text/javascript" src="/path/to/jquery.rating.js"></script>
This widget triggers the following javascript events:
- *star_change* with the vote value as argument
(fired when the user changes his vote)
- *star_delete* without arguments
(fired when the user deletes his vote)
It's easy to bind these events using jQuery, e.g.::
$(document).bind('star_change', function(event, value) {
alert('New vote: ' + value);
});
"""
| [
6738,
32465,
1330,
4280,
4402,
198,
198,
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
28243,
13,
29356,
1330,
8543,
62,
1462,
62,
8841,
198,
6738,
42625,
14208,
13,
28243,
13,
12286,
10379,
1010,
1330,
31065,
1958,
198,
... | 2.722838 | 902 |
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt
import dataset_processing as data_proc
if __name__ == "__main__":
abalone(verbose=False, show_plots=False)
online_shopping(verbose=False, show_plots=False)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
1341,
35720,
13,
21048,
1330,
26423,
27660,
9487,
7483,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
3272,
62,
2100,
62,
26675,
198,
11748,
2603,
294... | 3.026549 | 113 |
#
# PySNMP MIB module MY-PROCESS-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/MY-PROCESS-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:06:38 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ValueSizeConstraint")
myMgmt, = mibBuilder.importSymbols("MY-SMI", "myMgmt")
ObjectGroup, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "ModuleCompliance", "NotificationGroup")
Counter32, ObjectIdentity, MibIdentifier, TimeTicks, Bits, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, Integer32, Counter64, NotificationType, Unsigned32, IpAddress, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "ObjectIdentity", "MibIdentifier", "TimeTicks", "Bits", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "Integer32", "Counter64", "NotificationType", "Unsigned32", "IpAddress", "iso")
TruthValue, TextualConvention, RowStatus, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "TextualConvention", "RowStatus", "DisplayString")
myProcessMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36))
myProcessMIB.setRevisions(('2003-10-14 00:00',))
if mibBuilder.loadTexts: myProcessMIB.setLastUpdated('200310140000Z')
if mibBuilder.loadTexts: myProcessMIB.setOrganization('D-Link Crop.')
myCPUMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1))
myCpuGeneralMibsGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 1))
myCPUUtilization5Sec = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 1, 1), Percent()).setMaxAccess("readonly")
if mibBuilder.loadTexts: myCPUUtilization5Sec.setStatus('current')
myCPUUtilization1Min = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 1, 2), Percent()).setMaxAccess("readonly")
if mibBuilder.loadTexts: myCPUUtilization1Min.setStatus('current')
myCPUUtilization5Min = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 1, 3), Percent()).setMaxAccess("readonly")
if mibBuilder.loadTexts: myCPUUtilization5Min.setStatus('current')
myCPUUtilizationWarning = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 1, 4), Percent()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: myCPUUtilizationWarning.setStatus('current')
myCPUUtilizationCritical = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 1, 5), Percent()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: myCPUUtilizationCritical.setStatus('current')
myNodeCPUTotalTable = MibTable((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 2), )
if mibBuilder.loadTexts: myNodeCPUTotalTable.setStatus('current')
myNodeCPUTotalEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 2, 1), ).setIndexNames((0, "MY-PROCESS-MIB", "myNodeCPUTotalIndex"))
if mibBuilder.loadTexts: myNodeCPUTotalEntry.setStatus('current')
myNodeCPUTotalIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: myNodeCPUTotalIndex.setStatus('current')
myNodeCPUTotalName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: myNodeCPUTotalName.setStatus('current')
myNodeCPUTotal5sec = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 2, 1, 3), Percent()).setMaxAccess("readonly")
if mibBuilder.loadTexts: myNodeCPUTotal5sec.setStatus('current')
myNodeCPUTotal1min = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 2, 1, 4), Percent()).setMaxAccess("readonly")
if mibBuilder.loadTexts: myNodeCPUTotal1min.setStatus('current')
myNodeCPUTotal5min = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 2, 1, 5), Percent()).setMaxAccess("readonly")
if mibBuilder.loadTexts: myNodeCPUTotal5min.setStatus('current')
myNodeCPUTotalWarning = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 2, 1, 6), Percent()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: myNodeCPUTotalWarning.setStatus('current')
myNodeCPUTotalCritical = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 2, 1, 7), Percent()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: myNodeCPUTotalCritical.setStatus('current')
myProcessMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 2))
myProcessMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 2, 1))
myProcessMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 2, 2))
myProcessMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 2, 1, 1)).setObjects(("MY-PROCESS-MIB", "myCPUUtilizationMIBGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
myProcessMIBCompliance = myProcessMIBCompliance.setStatus('current')
myCPUUtilizationMIBGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 2, 2, 1)).setObjects(("MY-PROCESS-MIB", "myCPUUtilization5Sec"), ("MY-PROCESS-MIB", "myCPUUtilization1Min"), ("MY-PROCESS-MIB", "myCPUUtilization5Min"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
myCPUUtilizationMIBGroup = myCPUUtilizationMIBGroup.setStatus('current')
myNodeCPUTotalGroups = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 2, 2, 2)).setObjects(("MY-PROCESS-MIB", "myNodeCPUTotalIndex"), ("MY-PROCESS-MIB", "myNodeCPUTotalName"), ("MY-PROCESS-MIB", "myNodeCPUTotal5sec"), ("MY-PROCESS-MIB", "myNodeCPUTotal1min"), ("MY-PROCESS-MIB", "myNodeCPUTotal5min"), ("MY-PROCESS-MIB", "myNodeCPUTotalWarning"), ("MY-PROCESS-MIB", "myNodeCPUTotalCritical"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
myNodeCPUTotalGroups = myNodeCPUTotalGroups.setStatus('current')
mibBuilder.exportSymbols("MY-PROCESS-MIB", myCPUMIBObjects=myCPUMIBObjects, myCPUUtilizationWarning=myCPUUtilizationWarning, myProcessMIBCompliances=myProcessMIBCompliances, myCPUUtilization5Sec=myCPUUtilization5Sec, Percent=Percent, myNodeCPUTotalEntry=myNodeCPUTotalEntry, myNodeCPUTotal5min=myNodeCPUTotal5min, myNodeCPUTotal5sec=myNodeCPUTotal5sec, myCpuGeneralMibsGroup=myCpuGeneralMibsGroup, myNodeCPUTotalCritical=myNodeCPUTotalCritical, myCPUUtilizationCritical=myCPUUtilizationCritical, myNodeCPUTotalWarning=myNodeCPUTotalWarning, myProcessMIBConformance=myProcessMIBConformance, myCPUUtilization1Min=myCPUUtilization1Min, myCPUUtilization5Min=myCPUUtilization5Min, PYSNMP_MODULE_ID=myProcessMIB, myNodeCPUTotalTable=myNodeCPUTotalTable, myProcessMIBCompliance=myProcessMIBCompliance, myNodeCPUTotalGroups=myNodeCPUTotalGroups, myNodeCPUTotalIndex=myNodeCPUTotalIndex, myProcessMIB=myProcessMIB, myProcessMIBGroups=myProcessMIBGroups, myCPUUtilizationMIBGroup=myCPUUtilizationMIBGroup, myNodeCPUTotalName=myNodeCPUTotalName, myNodeCPUTotal1min=myNodeCPUTotal1min)
| [
2,
198,
2,
9485,
15571,
7378,
337,
9865,
8265,
17615,
12,
4805,
4503,
7597,
12,
8895,
33,
357,
4023,
1378,
16184,
76,
489,
8937,
13,
785,
14,
79,
893,
11632,
8,
198,
2,
7054,
45,
13,
16,
2723,
2393,
1378,
14,
14490,
14,
67,
615,... | 2.564212 | 2,811 |
#!/bin/env python
#=============================================================================================
# MODULE DOCSTRING
#=============================================================================================
"""
forcefield_utils.py
Utilities relating to OpenMM ForceField replacement using SMIRKS-based matching.
AUTHORS
David L. Mobley <dmobley@mobleylab.org>
Based loosely on code from github.com/choderalab/openmoltools, and especially
parts from John Chodera and Kyle Beauchamp.
"""
#=============================================================================================
# GLOBAL IMPORTS
#=============================================================================================
import os
from openforcefield.typing.engines.smirnoff import ForceField
from openforcefield.utils import get_data_filename
import simtk.openmm
from simtk.openmm import app
import simtk.openmm as mm
from simtk.openmm.app import element as elem
from simtk.openmm.app import Topology
import numpy as np
from openmoltools import system_checker
import copy
import openeye.oechem
import openeye.oeomega
import openeye.oequacpac
from openeye import oechem
from simtk import openmm, unit
import parmed
#=============================================================================
# UTILITY FUNCTIONS
#=============================================================================
def create_system_from_amber( prmtop_filename, crd_filename, verbose = False ):
"""Utility function. Create and return an OpenMM System given a prmtop and
crd, AMBER format files.
Parameters
----------
prmtop_filename : str (filename)
Filename of input AMBER format prmtop file
crd_filename : str (filename)
Filename of input AMBER format crd file
Returns
_______
topology : OpenMM Topology
system : OpenMM System
positions : initial atomic positions (OpenMM)
"""
# Create System object
prmtop = app.AmberPrmtopFile(prmtop_filename)
topology = prmtop.topology
system = prmtop.createSystem(nonbondedMethod = app.NoCutoff, constraints = None, implicitSolvent = None )
# Read coordinates
crd = app.AmberInpcrdFile( crd_filename )
positions = crd.getPositions()
return (topology, system, positions)
def create_system_from_molecule(forcefield, mol, verbose=False):
"""
Generate a System from the given OEMol and SMIRNOFF forcefield, return the resulting System.
Parameters
----------
forcefield : ForceField
SMIRNOFF forcefield
mol : oechem.OEMol
Molecule to test (must have coordinates)
Returns
----------
topology : OpenMM Topology
system : OpenMM System
positions : initial atomic positions (OpenMM)
"""
# Create system
from openforcefield.utils import generateTopologyFromOEMol
topology = generateTopologyFromOEMol(mol)
system = forcefield.createSystem(topology, [mol], verbose=verbose)
# Get positions
coordinates = mol.GetCoords()
natoms = len(coordinates)
positions = np.zeros([natoms,3], np.float32)
for index in range(natoms):
(x,y,z) = coordinates[index]
positions[index,0] = x
positions[index,1] = y
positions[index,2] = z
positions = unit.Quantity(positions, unit.angstroms)
return topology, system, positions
def compare_system_energies( topology0, topology1, system0, system1, positions0, positions1=None, label0="AMBER system", label1 = "SMIRNOFF system", verbose = True, skip_assert = False, skip_improper = False ):
"""
Given two OpenMM systems, check that their energies and component-wise
energies are consistent, and return these. The same positions will be used
for both systems unless a second set of positions is provided.
Parameters
----------
topology0 : OpenMM Topology
Topology of first system
topology1 : OpenMM Topology
Topology of second system
system0 : OpenMM System
First system for comparison (usually from AMBER)
system1 : OpenMM System
Second system for comparison (usually from SMIRNOFF)
positions0 : simtk.unit.Quantity wrapped
Positions to use for energy evaluation comparison
positions1 (optional) : simtk.unit.Quantity wrapped (optional)
Positions to use for second OpenMM system; original positions are used
if this is not provided
label0 (optional) : str
String labeling system0 for output. Default, "AMBER system"
label1 (optional) : str
String labeling system1 for output. Default, "SMIRNOFF system"
verbose (optional) : bool
Print out info on energies, True/False (default True)
skip_assert (optional) : bool
Skip assertion that energies must be equal within specified tolerance. Default False.
skip_improper (optional) : bool
Skip detailed checking of force terms on impropers (helpful here if comparing with AMBER force fields using different definitions of impropers.) Default False.
Returns
----------
groups0 : dict
As returned by openmoltools.system_checker.check_energy_groups,
a dictionary with keys "bond", "angle", "nb", "torsion" and values
corresponding to the energies of these components for the first simulation object
groups1 : dict
As returned by openmoltools.system_checker.check_energy_groups,
a dictionary with keys "bond", "angle", "nb", "torsion" and values
corresponding to the energies of these components for the second simulation object
energy0 : simtk.unit.Quantity
Energy of first system
energy1 : simtk.unit.Quantity
Energy of second system
TO DO:
Allow energy extraction/comparison of terms specified by particular
SMARTS queries i.e. for specific bond, angle, or torsional terms.
"""
# Create integrator
timestep = 1.0 * unit.femtoseconds
integrator0 = simtk.openmm.VerletIntegrator( timestep )
integrator1 = simtk.openmm.VerletIntegrator( timestep )
# Grab second positions
if positions1 == None:
positions1 = copy.deepcopy( positions0 )
# Create simulations
platform = simtk.openmm.Platform.getPlatformByName("Reference")
simulation0 = app.Simulation( topology0, system0, integrator0, platform = platform )
simulation0.context.setPositions(positions0)
simulation1 = app.Simulation( topology1, system1, integrator1, platform = platform )
simulation1.context.setPositions(positions1)
# Print what torsions were found if verbose
if verbose:
# Build list of atoms for debugging info
atoms0 = [ atom for atom in simulation0.topology.atoms() ]
atoms1 = [ atom for atom in simulation1.topology.atoms() ]
# Loop over first system and print torsion info
for force in simulation0.system.getForces():
if type(force) == mm.PeriodicTorsionForce:
print("Num (type) \t Num (type) \t Num (type) \t Num (type) \t per \t phase \t k0")
for k in range(force.getNumTorsions()):
i0, i1, i2, i3, per, phase, k0 = force.getTorsionParameters(k)
print("%3s (%3s)- %3s (%3s)- \t %s (%3s)- \t %3s (%3s)- \t %f \t %f \t %f " % (i0, atoms0[i0].name, i1, atoms0[i1].name, i2, atoms0[i2].name, i3, atoms0[i3].name, per, phase/unit.degree, k0/unit.kilojoule_per_mole) )
for force in simulation1.system.getForces():
if type(force) == mm.PeriodicTorsionForce:
print("Num (type) \t Num (type) \t Num (type) \t Num (type) \t per \t phase \t k0")
for k in range(force.getNumTorsions()):
i0, i1, i2, i3, per, phase, k0 = force.getTorsionParameters(k)
print("%3s (%3s)- %3s (%3s)- %3s (%3s)- %3s (%3s) - %f \t %f \t %f " % (i0, atoms1[i0].name, i1, atoms1[i1].name, i2, atoms1[i2].name, i3, atoms1[i3].name, per, phase/unit.degree, k0/unit.kilojoule_per_mole) )
# Do energy comparison, print info if desired
syscheck = system_checker.SystemChecker( simulation0, simulation1 )
if not skip_assert:
# Only check force terms if we want to make sure energies are identical
syscheck.check_force_parameters(skipImpropers = skip_improper)
groups0, groups1 = syscheck.check_energy_groups(skip_assert = skip_assert)
energy0, energy1 = syscheck.check_energies(skip_assert = skip_assert)
if verbose:
print("Energy of %s: " % label0, energy0 )
print("Energy of %s: " % label1, energy1 )
print("\nComponents of %s:" % label0 )
for key in groups0.keys():
print("%s: " % key, groups0[key] )
print("\nComponents of %s:" % label1 )
for key in groups1.keys():
print("%s: " % key, groups1[key] )
# Return
return groups0, groups1, energy0, energy1
def compare_molecule_energies( prmtop, crd, forcefield, mol, verbose = True, skip_assert=False, skip_improper = False):
"""
Compare energies for OpenMM Systems/topologies created from an AMBER prmtop
and crd versus from a SMIRNOFF forcefield file and OEMol which should
parameterize the same system with same parameters.
Parameters
----------
prmtop_filename : str (filename)
Filename of input AMBER format prmtop file
crd_filename : str (filename)
Filename of input AMBER format crd file
forcefield : ForceField
SMIRNOFF forcefield
mol : oechem.OEMol
Molecule to test
verbose (optional): Bool
Print out info. Default: True
skip_assert : bool
Skip assertion that energies must be equal within tolerance. Default, False.
skip_improper (optional) : bool
Skip detailed checking of force terms on impropers (helpful here if comparing with AMBER force fields using different definitions of impropers.) Default False.
Returns
--------
groups0 : dict
As returned by openmoltools.system_checker.check_energy_groups,
a dictionary with keys "bond", "angle", "nb", "torsion" and values
corresponding to the energies of these components for the first simulation object
groups1 : dict
As returned by openmoltools.system_checker.check_energy_groups,
a dictionary with keys "bond", "angle", "nb", "torsion" and values
corresponding to the energies of these components for the second simulation object
energy0 : simtk.unit.Quantity
Energy of first system
energy1 : simtk.unit.Quantity
Energy of second system
"""
ambertop, ambersys, amberpos = create_system_from_amber( prmtop, crd )
smirfftop, smirffsys, smirffpos = create_system_from_molecule(forcefield, mol, verbose = verbose)
groups0, groups1, energy0, energy1 = compare_system_energies( ambertop,
smirfftop, ambersys, smirffsys, amberpos, verbose = verbose, skip_assert = skip_assert, skip_improper = skip_improper )
return groups0, groups1, energy0, energy1
def get_molecule_parameterIDs( oemols, ffxml):
"""Process a list of oemols with a specified SMIRNOFF ffxml file and determine which parameters are used by which molecules, returning collated results.
Parameters
----------
oemols : list
List of OpenEye OEChem molecules to parse; must have explicit hydrogens.
Returns
-------
parameters_by_molecule : dict
Parameter IDs used in each molecule, keyed by isomeric SMILES
generated from provided OEMols. Each entry in the dict is a list
which does not necessarily have unique entries; i.e. parameter IDs
which are used more than once will occur multiple times.
parameters_by_ID : dict
Molecules in which each parameter ID occur, keyed by parameter ID.
Each entry in the dict is a set of isomeric SMILES for molecules
in which that parameter occurs. No frequency information is stored.
"""
# Create storage
parameters_by_molecule = {}
parameters_by_ID = {}
# Generate isomeric SMILES
isosmiles = list()
for mol in oemols:
smi = oechem.OECreateIsoSmiString(mol)
if not smi in isosmiles:
isosmiles.append(smi)
# If the molecule is already here, raise exception
else:
raise ValueError("Error: get_molecule_parameterIDs has been provided a list of oemols which contains the same molecule, having isomeric smiles %s, more than once." % smi )
# Label molecules
ff = ForceField( ffxml )
labels = ff.labelMolecules( oemols )
# Organize labels into output dictionary by looping over all molecules/smiles
for idx in range(len(isosmiles)):
# Pull smiles, initialize storage
smi = isosmiles[idx]
parameters_by_molecule[smi] = []
# Organize data for this molecule
data = labels[idx]
for force_type in data.keys():
for (atom_indices, pid, smirks) in data[force_type]:
# Store pid to molecule
parameters_by_molecule[smi].append(pid)
# Store which molecule this pid occurred in
if pid not in parameters_by_ID:
parameters_by_ID[pid] = set()
parameters_by_ID[pid].add(smi)
else:
parameters_by_ID[pid].add(smi)
return parameters_by_molecule, parameters_by_ID
def getMolParamIDToAtomIndex( oemol, ff):
"""Take an OEMol and a SMIRNOFF forcefield object and return a dictionary, keyed by parameter ID, where each entry is a tuple of ( smirks, [[atom1, ... atomN], [atom1, ... atomN]) giving the SMIRKS corresponding to that parameter ID and a list of the atom groups in that molecule that parameter is applied to.
Parameters
----------
oemol : OEMol
OpenEye OEMol with the molecule to investigate.
ff : ForceField
SMIRNOFF ForceField object (obtained from an ffxml via ForceField(ffxml)) containing FF of interest.
Returns
-------
param_usage : dictionary
Dictionary, keyed by parameter ID, where each entry is a tuple of ( smirks, [[atom1, ... atomN], [atom1, ... atomN]) giving the SMIRKS corresponding to that parameter ID and a list of the atom groups in that molecule that parameter is applied to.
"""
labels = ff.labelMolecules([oemol])
param_usage = {}
for mol_entry in range(len(labels)):
for force in labels[mol_entry].keys():
for (atom_indices, pid, smirks) in labels[mol_entry][force]:
if not pid in param_usage:
param_usage[pid] = (smirks, [atom_indices])
else:
param_usage[pid][1].append( atom_indices )
return param_usage
def merge_system( topology0, topology1, system0, system1, positions0, positions1, label0="AMBER system", label1 = "SMIRNOFF system", verbose = True):
"""Merge two given OpenMM systems. Returns the merged OpenMM System.
Parameters
----------
topology0 : OpenMM Topology
Topology of first system (i.e. a protein)
topology1 : OpenMM Topology
Topology of second system (i.e. a ligand)
system0 : OpenMM System
First system for merging (usually from AMBER)
system1 : OpenMM System
Second system for merging (usually from SMIRNOFF)
positions0 : simtk.unit.Quantity wrapped
Positions to use for energy evaluation comparison
positions1 (optional) : simtk.unit.Quantity wrapped (optional)
Positions to use for second OpenMM system
label0 (optional) : str
String labeling system0 for output. Default, "AMBER system"
label1 (optional) : str
String labeling system1 for output. Default, "SMIRNOFF system"
verbose (optional) : bool
Print out info on topologies, True/False (default True)
Returns
----------
topology : OpenMM Topology
system : OpenMM System
positions: unit.Quantity position array
"""
#Load OpenMM Systems to ParmEd Structures
structure0 = parmed.openmm.load_topology( topology0, system0 )
structure1 = parmed.openmm.load_topology( topology1, system1 )
#Merge parameterized Structure
structure = structure0 + structure1
topology = structure.topology
#Concatenate positions arrays
positions_unit = unit.angstroms
positions0_dimensionless = np.array( positions0 / positions_unit )
positions1_dimensionless = np.array( positions1 / positions_unit )
coordinates = np.vstack((positions0_dimensionless,positions1_dimensionless))
natoms = len(coordinates)
positions = np.zeros([natoms,3], np.float32)
for index in range(natoms):
(x,y,z) = coordinates[index]
positions[index,0] = x
positions[index,1] = y
positions[index,2] = z
positions = unit.Quantity(positions, positions_unit)
#Generate merged OpenMM system
system = structure.createSystem()
if verbose:
print("Generating ParmEd Structures...\n \t{}: {}\n \t{}: {}\n".format(label0, structure0, label1, structure1))
print("Merged ParmEd Structure: {}".format( structure ))
return topology, system, positions
def save_system_to_amber( topology, system, positions, prmtop, crd ):
"""Save an OpenMM System, with provided topology and positions, to AMBER prmtop and coordinate files.
Parameters
----------
topology : OpenMM Topology
Topology of the system to be saved, perhaps as loaded from a PDB file or similar.
system : OpenMM System
Parameterized System to be saved, containing components represented by Topology
positions : unit.Quantity position array
Position array containing positions of atoms in topology/system
prmtop : filename
AMBER parameter file name to write
crd : filename
AMBER coordinate file name (ASCII crd format) to write
"""
structure = parmed.openmm.topsystem.load_topology( topology, system, positions )
structure.save( prmtop, overwrite = True, format="amber" )
structure.save( crd, format='rst7', overwrite = True)
def save_system_to_gromacs( topology, system, positions, top, gro ):
"""Save an OpenMM System, with provided topology and positions, to AMBER prmtop and coordinate files.
Parameters
----------
topology : OpenMM Topology
Topology of the system to be saved, perhaps as loaded from a PDB file or similar.
system : OpenMM System
Parameterized System to be saved, containing components represented by Topology
positions : unit.Quantity position array
Position array containing positions of atoms in topology/system
top : filename
GROMACS topology file name to write
gro : filename
GROMACS coordinate file name (.gro format) to write
"""
structure = parmed.openmm.topsystem.load_topology( topology, system, positions )
structure.save( top, overwrite = True, format="gromacs")
structure.save( gro, overwrite = True, format="gro")
| [
2,
48443,
8800,
14,
24330,
21015,
198,
198,
2,
23926,
4770,
25609,
28,
198,
2,
33893,
37760,
18601,
2751,
198,
2,
23926,
4770,
25609,
28,
198,
198,
37811,
198,
3174,
3245,
62,
26791,
13,
9078,
198,
198,
18274,
2410,
11270,
284,
4946,
... | 2.79239 | 6,859 |
# coding: utf-8
import bisect
import codecs
import collections
import datetime
import locale
import logging
import math
import os
import re
import tempfile
import time
import traceback
import unicodedata
import urllib.parse
from codecs import BOM_UTF8, BOM_UTF16_BE, BOM_UTF16_LE, BOM_UTF32_BE, BOM_UTF32_LE
import chardet
import numpy as np
import heroku3
import requests
import simplejson as json
import sqlalchemy
import unicodecsv as csv
from flask import current_app
from flask_jwt_extended import get_jwt_identity
from requests.adapters import HTTPAdapter
from simplejson import dumps
from sqlalchemy import exc
from sqlalchemy import sql
from unidecode import unidecode
from werkzeug.wsgi import ClosingIterator
try:
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') #use locale.format for commafication
except locale.Error:
locale.setlocale(locale.LC_ALL, '') #set to default locale (works on windows)
# from http://stackoverflow.com/a/3233356/596939
# returns dict with values that are proportion of all values
# good for deduping strings. warning: output removes spaces so isn't readable.
# from http://stackoverflow.com/a/11066579/596939
# from http://stackoverflow.com/a/22238613/596939
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, datetime):
serial = obj.isoformat()
return serial
raise TypeError ("Type not serializable")
def median(my_list):
"""
Find the median of a list of ints
from https://stackoverflow.com/questions/24101524/finding-median-of-list-in-python/24101655#comment37177662_24101655
"""
my_list = sorted(my_list)
if len(my_list) < 1:
return None
if len(my_list) %2 == 1:
return my_list[((len(my_list)+1)/2)-1]
if len(my_list) %2 == 0:
return float(sum(my_list[(len(my_list)/2)-1:(len(my_list)/2)+1]))/2.0
def chunks(l, n):
"""
Yield successive n-sized chunks from l.
from http://stackoverflow.com/a/312464
"""
for i in range(0, len(l), n):
yield l[i:i+n]
# from http://stackoverflow.com/a/20007730/226013
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(n/10%10!=1)*(n%10<4)*n%10::4])
#from http://farmdev.com/talks/unicode/
# getting a "decoding Unicode is not supported" error in this function?
# might need to reinstall libaries as per
# http://stackoverflow.com/questions/17092849/flask-login-typeerror-decoding-unicode-is-not-supported
# could also make the random request have other filters
# see docs here: https://github.com/CrossRef/rest-api-doc/blob/master/rest_api.md#sample
# usage:
# dois = get_random_dois(50000, from_date="2002-01-01", only_journal_articles=True)
# dois = get_random_dois(100000, only_journal_articles=True)
# fh = open("data/random_dois_articles_100k.txt", "w")
# fh.writelines(u"\n".join(dois))
# fh.close()
# from https://github.com/elastic/elasticsearch-py/issues/374
# to work around unicode problem
# class JSONSerializerPython2(elasticsearch.serializer.JSONSerializer):
# """Override elasticsearch library serializer to ensure it encodes utf characters during json dump.
# See original at: https://github.com/elastic/elasticsearch-py/blob/master/elasticsearch/serializer.py#L42
# A description of how ensure_ascii encodes unicode characters to ensure they can be sent across the wire
# as ascii can be found here: https://docs.python.org/2/library/json.html#basic-usage
# """
# def dumps(self, data):
# # don't serialize strings
# if isinstance(data, elasticsearch.compat.string_types):
# return data
# try:
# return json.dumps(data, default=self.default, ensure_ascii=True)
# except (ValueError, TypeError) as e:
# raise elasticsearch.exceptions.SerializationError(data, e)
# https://github.com/psycopg/psycopg2/issues/897
# from https://gist.github.com/douglasmiranda/5127251
# deletes a key from nested dict
# from https://stackoverflow.com/a/50762571/596939
# from https://stackoverflow.com/a/50762571/596939
# this is to support fully after-flask response sent efforts
# from # https://stackoverflow.com/a/51013358/596939
# use like
# @app.after_response
# def say_hi():
# print("hi")
# f5 from https://www.peterbe.com/plog/uniqifiers-benchmark
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
11748,
47457,
478,
198,
11748,
40481,
82,
198,
11748,
17268,
198,
11748,
4818,
8079,
198,
11748,
36693,
198,
11748,
18931,
198,
11748,
10688,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
2021... | 2.668096 | 1,633 |
import numpy as np
from scipy import ndimage
import cv2
from ..world.world import World
from .binary_renderer import BinaryRenderer
from .grid import Grid
from .robot_renderer import RobotRenderer
| [
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
1330,
299,
67,
9060,
198,
11748,
269,
85,
17,
198,
6738,
11485,
6894,
13,
6894,
1330,
2159,
198,
6738,
764,
39491,
62,
10920,
11882,
1330,
45755,
49,
437,
11882,
198,
6738,
764,
... | 3.355932 | 59 |
if __name__ == "__main__":
main()
| [
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419,
198
] | 2.166667 | 18 |
"""This module provides basic server endpoints."""
from http import HTTPStatus
from flask import Blueprint, request
from app.utils.misc import make_response
internal_blueprint = Blueprint('traffic-stuck-internal', __name__)
@internal_blueprint.route("/health", methods=['GET'])
def get_health():
"""Return health OK http status."""
return make_response(True, "OK", HTTPStatus.OK)
def handle_404(error):
"""Return custom response for 404 http status code."""
return make_response(
False,
f"The endpoint ({request.path}) you are trying to access could not be found on the server.",
error.code
)
def handle_405(error):
"""Return custom response for 405 http status code."""
return make_response(
False,
f"The method ({request.method}) you are trying to use for this URL could not be handled on the server.",
error.code
)
def handle_500(error):
"""Return custom response for 500 http status code."""
return make_response(
False,
"Something has gone wrong on the server side. Please, try again later.",
error.code
)
| [
37811,
1212,
8265,
3769,
4096,
4382,
886,
13033,
526,
15931,
198,
198,
6738,
2638,
1330,
14626,
19580,
198,
198,
6738,
42903,
1330,
39932,
11,
2581,
198,
198,
6738,
598,
13,
26791,
13,
44374,
1330,
787,
62,
26209,
628,
198,
32538,
62,
... | 3.007916 | 379 |
import os
import typing as t
import getpass
from pathlib import Path
import yaml
from starwhale.consts import (
UserRoleType,
SW_CLI_CONFIG,
DEFAULT_PROJECT,
DEFAULT_INSTANCE,
SW_LOCAL_STORAGE,
ENV_SW_CLI_CONFIG,
STANDALONE_INSTANCE,
LOCAL_CONFIG_VERSION,
)
from starwhale.utils.error import NotFoundError
from . import console, now_str, fmt_http_server
from .fs import ensure_dir, ensure_file
_config: t.Dict[str, t.Any] = {}
_CURRENT_SHELL_USERNAME = getpass.getuser()
# TODO: abstract better common base or mixed class
| [
11748,
28686,
198,
11748,
19720,
355,
256,
198,
11748,
651,
6603,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
331,
43695,
198,
198,
6738,
3491,
1929,
1000,
13,
1102,
6448,
1330,
357,
198,
220,
220,
220,
11787,
47445,
6030,
11,
... | 2.579909 | 219 |
import math
n, i = 600851475143, 2
while n > 1:
if n % i == 0:
print(i)
n /= i
else:
i += 1
| [
11748,
10688,
198,
198,
77,
11,
1312,
796,
10053,
5332,
1415,
2425,
21139,
11,
362,
198,
198,
4514,
299,
1875,
352,
25,
198,
220,
220,
220,
611,
299,
4064,
1312,
6624,
657,
25,
198,
220,
220,
220,
220,
220,
220,
220,
3601,
7,
72,
... | 1.68 | 75 |
#!/usr/bin/env python
"""
Advent of Code 2020: Day 15
"""
import os
import re
import signal
import sys
from types import FrameType
from typing import List, Mapping
from pathlib import Path
DEBUG = False
def spoken_number(starting_numbers: List[int], turns: int) -> int:
"""
Compute spoken number after a given number of turns
:param starting_numbers: list of starting numbers
:param turns: number of rounds
:return: spoken number
"""
spoken_numbers = list()
last_index = lambda li, n: next(i for i in reversed(range(len(li)))
if li[i] == n)
for turn, n in enumerate(starting_numbers):
if DEBUG:
print(f'Turn {1 + turn}: The number spoken is a starting number, {n}.')
spoken_numbers.append(n)
while 1 + turn < turns:
turn += 1
last_number = spoken_numbers[-1]
spoken_before = last_number in spoken_numbers[:-1]
new_spoken_number = 0 if not spoken_before else \
turn - (1 + last_index(spoken_numbers[:-1], last_number))
spoken_numbers.append(new_spoken_number)
if DEBUG:
print(f'Turn {1 + turn}: Last number spoken {last_number}, '
f'was {"" if spoken_before else "not"} spoken before. Number spoken {new_spoken_number}')
return new_spoken_number
def spoken_number_part2(starting_numbers: List[int], turns: int) -> int:
"""
Compute spoken number after a given number of turns (optimized)
:param starting_numbers: list of starting numbers
:param turns: number of rounds
:return: spoken number
"""
spoken_numbers = dict()
last_number: int = 0
last_number_spoken_before: bool = False
turn: int = 0
for i, n in enumerate(starting_numbers):
if turn > 0:
spoken_numbers[last_number] = turn
turn = 1 + i
if DEBUG:
print(f'Turn {turn}: The number spoken is a starting number, {n}.')
last_number = n
last_number_spoken_before = last_number in spoken_numbers
while turn < turns:
turn += 1
new_spoken_number = 0 if not last_number_spoken_before else \
(turn - 1) - spoken_numbers[last_number]
if DEBUG:
print(f'Turn {turn}: Last number spoken {last_number}, '
f'was {"" if last_number_spoken_before else "not"} '
f'spoken before. Number spoken {new_spoken_number}')
spoken_numbers[last_number] = turn - 1
last_number = new_spoken_number
last_number_spoken_before = last_number in spoken_numbers
return last_number
def process(file: Path) -> int:
"""
Process input file yielding the submission value
:param file: file containing the input values
:param part_two: true for processing part 2
:return: value to submit
"""
debug = False
numbers_list = [list(int(n) for n in l.strip().split(','))
for l in open(file)]
number = 0
for numbers in numbers_list:
number = spoken_number(starting_numbers=numbers, turns=20200)
submission = number
return submission
def process_part2(file: Path) -> int:
"""
Process input file yielding the submission value
:param file: file containing the input values
:param part_two: true for processing part 2
:return: value to submit
"""
debug = False
numbers_list = [list(int(n) for n in l.strip().split(','))
for l in open(file)]
number = 0
for numbers in numbers_list:
number = spoken_number_part2(starting_numbers=numbers, turns=30000000)
submission = number
return submission
def main() -> int:
"""
Main function
:return: Shell exit code
"""
file = './input.txt'
submission = process(file=Path(file))
print(f'In file {file}, submission: {submission}')
print(f'Part 2')
file = './input.txt'
submission = process_part2(file=Path(file))
print(f'In file {file}, submission: {submission}')
return 0
def handle_sigint(signal_value: signal.Signals, frame: FrameType) -> None:
"""
Interrupt signal call-back method
:param signal_value: signal (expected SIGINT)
:param frame: current stack frame at the time of signal
:return: nothing
"""
assert signal_value == signal.SIGINT
print(frame.f_locals)
sys.exit(1)
def install_signal_handler() -> None:
"""
Install interrupt signal handler
:return: nothing
"""
signal.signal(signal.SIGINT, handle_sigint)
if __name__ == '__main__':
install_signal_handler()
sys.exit(main())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
2782,
1151,
286,
6127,
12131,
25,
3596,
1315,
198,
37811,
198,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
6737,
198,
11748,
25064,
198,
6738,
3858,
1330,
25184,
6030,
19... | 2.549535 | 1,827 |
from django.contrib import admin
import users.admin
from modules.smartq import models
@admin.register(models.Question)
@admin.register(models.GeneratedQuestion)
@admin.register(models.StaffGeneratedQuestion)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
11748,
2985,
13,
28482,
198,
6738,
13103,
13,
27004,
80,
1330,
4981,
628,
198,
31,
28482,
13,
30238,
7,
27530,
13,
24361,
8,
628,
198,
31,
28482,
13,
30238,
7,
27530,
13,
864... | 3.55 | 60 |
from django.shortcuts import render
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from yangram.conversations.models import *
from django.http import JsonResponse
from . import constants, sqls, serializers
from rest_framework import status
from django.shortcuts import get_object_or_404
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import get_user_model
User = get_user_model()
from django.utils import timezone
from django.contrib.humanize.templatetags.humanize import naturaltime
# Create your views here.
@login_required
@csrf_exempt
@login_required
@login_required
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
12501,
273,
2024,
1330,
17594,
62,
35827,
198,
6738,
331,
648,
859,
13,
1102... | 3.326531 | 196 |
import os
from time import sleep
import json
#use hastags and mentions from cyber war news articles as filters on twitter
if __name__ == "__main__":
hashtags = getMentionsAndHashtags()
for hashtag in hashtags:
outfile = "../../../outputfiles/twitter/"+str(hashtag).replace("#","").replace("@","")+".json"
os.system("scrapy crawl TweetScraper -a query="+hashtag + " -a top_tweet=True" + " -a crawl_user=True"+ " -o "+outfile)
print(" Scraped tweets from "+hashtag)
sleep(30)
| [
11748,
28686,
198,
6738,
640,
1330,
3993,
198,
11748,
33918,
628,
198,
2,
1904,
19338,
3775,
290,
15802,
422,
10075,
1175,
1705,
6685,
355,
16628,
319,
17044,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
... | 2.726316 | 190 |
#!/usr/bin/env python3
# You can create the script manually or clone the script and use it script via a symlink:
# For example `ln -s /Users/brian/rando-scripts/jira.py /usr/local/bin/jira`
# Make sure you create an environment variable named JIRA_URL for your terminal, such as .zshenv. Set the env variable equal to https://your-jira.net/browse/
import webbrowser, sys, pyperclip, os
jira_url = os.environ['JIRA_URL']
sys.argv
if len(sys.argv) > 1:
issue_number = ' '.join(sys.argv[1:])
else:
issue_number = pyperclip.paste()
webbrowser.open_new_tab(jira_url + issue_number)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
921,
460,
2251,
262,
4226,
14500,
393,
17271,
262,
4226,
290,
779,
340,
4226,
2884,
257,
827,
4029,
676,
25,
198,
2,
1114,
1672,
4600,
18755,
532,
82,
1220,
14490,
14,
... | 2.706422 | 218 |
# treetech.py
# boj 16235
from sys import stdin
# input = stdin.readline
dx = (-1, -1, -1, 0, 0, 1, 1, 1)
dy = (-1, 0, 1, -1, 1, -1, 0, 1)
n, m, k = map(int, input().split())
mp = [[5] * n for _ in range(n)]
fert = [list(map(int, input().split())) for _ in range(n)]
tree = list()
for _ in range(m):
_x, _y, _z = map(int, input().split())
# x, y, yo
tree.append([_x-1, _y-1, _z])
for i in range(k):
tree = sorted(tree, key=lambda x:(x[0], x[1], x[2]))
# spring & summer
tmp = list()
dead = list()
for item in tree:
if mp[item[0]][item[1]] < item[2]:
mp[item[0]][item[1]] += (item[2] // 2)
else:
mp[item[0]][item[1]] -= item[2]
tmp.append([item[0], item[1], item[2] + 1])
tree = list(tmp)
# fall
for items in tree:
if items[2] % 5 == 0:
for i in range(8):
if 0 <= items[0]+dx[i] < n and 0 <= items[1]+dy[i] < n:
tree.append([items[0]+dx[i], items[1]+dy[i], 1])
# winter
for i in range(n):
for j in range(n):
mp[i][j] += fert[i][j]
print(len(tree)) | [
2,
2054,
316,
3055,
13,
9078,
198,
2,
1489,
73,
1467,
22370,
198,
198,
6738,
25064,
1330,
14367,
259,
198,
2,
5128,
796,
14367,
259,
13,
961,
1370,
198,
198,
34350,
796,
13841,
16,
11,
532,
16,
11,
532,
16,
11,
657,
11,
657,
11,... | 1.845541 | 628 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
MOJANG_AUTH = "https://authserver.mojang.com/authenticate"
JSON_POST_HEADERS = {
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/91.0.4472.164 Safari/537.36 "
}
HTTP_HEADERS = {
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/91.0.4472.164 Safari/537.36"
}
HTTPS_HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/91.0.4472.164 Safari/537.36"
}
CONFIG = """[Monolith]
debug = true
"""
ASCII_TITLE = """ ███▄ ▄███▓ ▒█████ ███▄ █ ▒█████ ██▓ ██▓▄▄▄█████▓ ██░ ██
▓██▒▀█▀ ██▒▒██▒ ██▒ ██ ▀█ █ ▒██▒ ██▒▓██▒ ▓██▒▓ ██▒ ▓▒▓██░ ██▒
▓██ ▓██░▒██░ ██▒▓██ ▀█ ██▒▒██░ ██▒▒██░ ▒██▒▒ ▓██░ ▒░▒██▀▀██░
▒██ ▒██ ▒██ ██░▓██▒ ▐▌██▒▒██ ██░▒██░ ░██░░ ▓██▓ ░ ░▓█ ░██
▒██▒ ░██▒░ ████▓▒░▒██░ ▓██░░ ████▓▒░░██████▒░██░ ▒██▒ ░ ░▓█▒░██▓
░ ▒░ ░ ░░ ▒░▒░▒░ ░ ▒░ ▒ ▒ ░ ▒░▒░▒░ ░ ▒░▓ ░░▓ ▒ ░░ ▒ ░░▒░▒
░ ░ ░ ░ ▒ ▒░ ░ ░░ ░ ▒░ ░ ▒ ▒░ ░ ░ ▒ ░ ▒ ░ ░ ▒ ░▒░ ░
░ ░ ░ ░ ░ ▒ ░ ░ ░ ░ ░ ░ ▒ ░ ░ ▒ ░ ░ ░ ░░ ░
░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░
"""
COMBO_REG = ".+?@.+?\..+?:.+?"
| [
11770,
41,
15567,
62,
32,
24318,
796,
366,
5450,
1378,
18439,
15388,
13,
5908,
73,
648,
13,
785,
14,
41299,
5344,
1,
201,
198,
201,
198,
40386,
62,
32782,
62,
37682,
4877,
796,
1391,
201,
198,
220,
220,
220,
366,
19746,
12,
6030,
... | 1.449807 | 1,036 |
from django.core.management.base import BaseCommand, CommandError
from django.db.models.loading import get_model
from django_usda.models import Food, FoodGroup, FoodLanguaLFactor, LanguaLFactor, NutrientData, Nutrient, Source, Derivation, Weight, Footnote, DataLink, DataSource, DeletedFood, DeletedNutrient, DeletedFootnote
import zipfile
import csv
import json
import time
from django.db import IntegrityError
from django import db
appLabel = "django_usda"
modelMap = [
{"fileName": "DATA_SRC.txt", "model": DataSource},
{"fileName": "FD_GROUP.txt", "model": FoodGroup},
{"fileName": "FOOD_DES.txt", "model": Food},
{"fileName": "LANGDESC.txt", "model": LanguaLFactor},
{"fileName": "LANGUAL.txt", "model": FoodLanguaLFactor},
{"fileName": "NUTR_DEF.txt", "model": Nutrient},
{"fileName": "DERIV_CD.txt", "model": Derivation},
{"fileName": "SRC_CD.txt", "model": Source},
{"fileName": "NUT_DATA.txt", "model": NutrientData},
{"fileName": "WEIGHT.txt", "model": Weight},
{"fileName": "FOOTNOTE.txt", "model": Footnote},
{"fileName": "DATSRCLN.txt", "model": DataLink}
]
| [
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
11,
9455,
12331,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
13,
25138,
1330,
651,
62,
19849,
198,
6738,
42625,
14208,
62,
385,
6814,
13,
27530,
1330,
7318,
11,
7... | 2.647196 | 428 |
# Copyright 2018 University of Groningen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains helper functions for tests.
"""
import operator
import os
import networkx.algorithms.isomorphism as iso
def make_into_set(iter_of_dict):
"""
Convenience function that turns an iterator of dicts into a set of
frozenset of the dict items.
"""
return set(frozenset(dict_.items()) for dict_ in iter_of_dict)
def equal_graphs(g1, g2,
node_attrs=('resid', 'resname', 'atomname', 'chain', 'charge_group', 'atype'),
edge_attrs=()):
"""
Parameters
----------
g1: networkx.Graph
g2: networkx.Graph
node_attrs: collections.abc.Iterable or None
Node attributes to consider. If `None`, the node attribute dicts must
be equal.
edge_attrs: collections.abc.Iterable or None
Edge attributes to consider. If `None`, the edge attribute dicts must
be equal.
Returns
-------
bool
True if `g1` and `g2` are isomorphic, False otherwise.
"""
if node_attrs is None:
node_equal = operator.eq
else:
node_equal = iso.categorical_node_match(node_attrs, [''] * len(node_attrs))
if edge_attrs is None:
edge_equal = operator.eq
else:
edge_equal = iso.categorical_node_match(edge_attrs, [''] * len(edge_attrs))
matcher = iso.GraphMatcher(g1, g2, node_match=node_equal, edge_match=edge_equal)
return matcher.is_isomorphic()
def find_in_path(names=('martinize2', 'martinize2.py')):
"""
Finds and returns the location of one of `names` in PATH, and returns the
first match.
Parameters
----------
names: collections.abc.Sequence
Names to look for in PATH.
Returns
-------
os.PathLike or None
"""
for folder in os.getenv("PATH", '').split(os.pathsep):
for name in names:
fullpath = os.path.join(folder, name)
if os.path.isfile(fullpath):
return fullpath | [
2,
15069,
2864,
2059,
286,
40214,
36795,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
92... | 2.650526 | 950 |
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
from os import path
from .io.meas_info import Info
from . import pick_types
from .utils import logger, verbose
@verbose
def read_selection(name, fname=None, info=None, verbose=None):
"""Read channel selection from file
By default, the selections used in ``mne_browse_raw`` are supported.
Additional selections can be added by specifying a selection file (e.g.
produced using ``mne_browse_raw``) using the ``fname`` parameter.
The ``name`` parameter can be a string or a list of string. The returned
selection will be the combination of all selections in the file where
(at least) one element in name is a substring of the selection name in
the file. For example, ``name=['temporal', 'Right-frontal']`` will produce
a combination of ``'Left-temporal'``, ``'Right-temporal'``, and
``'Right-frontal'``.
The included selections are:
* ``'Vertex'``
* ``'Left-temporal'``
* ``'Right-temporal'``
* ``'Left-parietal'``
* ``'Right-parietal'``
* ``'Left-occipital'``
* ``'Right-occipital'``
* ``'Left-frontal'``
* ``'Right-frontal'``
Parameters
----------
name : str or list of str
Name of the selection. If is a list, the selections are combined.
fname : str
Filename of the selection file (if None, built-in selections are used).
info : instance of Info
Measurement info file, which will be used to determine the spacing
of channel names to return, e.g. ``'MEG 0111'`` for old Neuromag
systems and ``'MEG0111'`` for new ones.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
sel : list of string
List with channel names in the selection.
"""
# convert name to list of string
if not isinstance(name, (list, tuple)):
name = [name]
if isinstance(info, Info):
picks = pick_types(info, meg=True, exclude=())
if len(picks) > 0 and ' ' not in info['ch_names'][picks[0]]:
spacing = 'new'
else:
spacing = 'old'
elif info is not None:
raise TypeError('info must be an instance of Info or None, not %s'
% (type(info),))
else: # info is None
spacing = 'old'
# use built-in selections by default
if fname is None:
fname = path.join(path.dirname(__file__), 'data', 'mne_analyze.sel')
if not path.isfile(fname):
raise ValueError('The file %s does not exist.' % fname)
# use this to make sure we find at least one match for each name
name_found = dict((n, False) for n in name)
with open(fname, 'r') as fid:
sel = []
for line in fid:
line = line.strip()
# skip blank lines and comments
if len(line) == 0 or line[0] == '#':
continue
# get the name of the selection in the file
pos = line.find(':')
if pos < 0:
logger.info('":" delimiter not found in selections file, '
'skipping line')
continue
sel_name_file = line[:pos]
# search for substring match with name provided
for n in name:
if sel_name_file.find(n) >= 0:
sel.extend(line[pos + 1:].split('|'))
name_found[n] = True
break
# make sure we found at least one match for each name
for n, found in name_found.items():
if not found:
raise ValueError('No match for selection name "%s" found' % n)
# make the selection a sorted list with unique elements
sel = list(set(sel))
sel.sort()
if spacing == 'new': # "new" or "old" by now, "old" is default
sel = [s.replace('MEG ', 'MEG') for s in sel]
return sel
| [
2,
46665,
25,
21000,
260,
20159,
3319,
1279,
1000,
87,
49078,
13,
4546,
3319,
31,
46813,
785,
12,
1845,
396,
3055,
13,
8310,
29,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
4705,
72,
4345,
282,
391,
268,
1279,
907,
71,
... | 2.341283 | 1,761 |
#!/usr/bin/env python
# --------------------------------------------------------------------------------
# --- PARAMS
# --------------------------------------------------------------------------------
Types=['integer','double precision','real','logical']
Dims =[1,0]
# --------------------------------------------------------------------------------
# ---
# --------------------------------------------------------------------------------
import os
import sys
import glob
if len(sys.argv)>1:
Files=sys.argv[1:]
else:
Files=glob.glob('*.Template')
# print('Template files:')
# print(Files)
if len(Files)>0:
filebase=Files[0].replace('.Template','')
#
for typ in Types:
for dim in Dims:
#
TD=typ[0]+'%d'%dim
TD=TD.upper()
td=TD.lower()
filename=filebase+TD+'.f90'
if dim==0:
TYPE_AND_DIM=typ
else:
TYPE_AND_DIM=typ+', dimension(n1)'
#
fr=open(Files[0],'r')
fw=open(filename,'w')
for l in fr.readlines():
l=l.replace('<TD>',TD)
l=l.replace('<N1>','n1')
l=l.replace('<td>',td)
l=l.replace('<TYPE_AND_DIM>',TYPE_AND_DIM)
fw.write(l)
fw.close()
fr.close()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
628,
198,
2,
16529,
1783,
198,
2,
11420,
220,
29463,
40834,
198,
2,
16529,
1783,
198,
31431,
28,
17816,
41433,
41707,
23352,
15440,
41707,
5305,
41707,
6404,
605,
20520,
198,
35,
12078,
796,
... | 2.175439 | 627 |
import grequests
import requests | [
11748,
308,
8897,
3558,
198,
11748,
7007
] | 4.571429 | 7 |
"""Register the YunTemp to blacs.
This is boilerplate and should be only minimally changed.
"""
from labscript_devices import register_classes
register_classes(
"DummyDevice",
BLACS_tab="user_devices.dummy_device.blacs_tabs.DummyDeviceTab",
runviewer_parser=None,
)
| [
37811,
38804,
262,
20757,
30782,
284,
698,
16436,
13,
198,
198,
1212,
318,
36741,
6816,
290,
815,
307,
691,
10356,
453,
3421,
13,
198,
37811,
198,
198,
6738,
2248,
12048,
62,
42034,
1330,
7881,
62,
37724,
198,
198,
30238,
62,
37724,
7... | 2.927083 | 96 |
# this contains imports plugins that configure py.test for astropy tests.
# by importing them here in conftest.py they are discoverable by py.test
# no matter how it is invoked within the source tree.
from astropy.tests.pytest_plugins import *
from astropy.tests.pytest_plugins import pytest_addoption as astropy_pytest_addoption
# Uncomment the following line to treat all DeprecationWarnings as
# exceptions
enable_deprecations_as_exceptions()
import os
from astropy.tests.helper import pytest
@pytest.fixture
| [
2,
428,
4909,
17944,
20652,
326,
17425,
12972,
13,
9288,
329,
6468,
28338,
5254,
13,
198,
2,
416,
33332,
606,
994,
287,
369,
701,
395,
13,
9078,
484,
389,
7073,
540,
416,
12972,
13,
9288,
198,
2,
645,
2300,
703,
340,
318,
24399,
1... | 3.541096 | 146 |
# Copyright 2018-present Kensho Technologies, LLC.
from collections import namedtuple
from six.moves.urllib.parse import quote_plus
from .. import test_backend
DEFAULT_ROOT_PASSWORD = "root" # nosec
SQL_BACKENDS = {
test_backend.POSTGRES,
test_backend.MYSQL,
test_backend.MARIADB,
test_backend.MSSQL,
test_backend.SQLITE,
}
# sqlite does not require that a DB be created/dropped for testing
EXPLICIT_DB_BACKENDS = {
test_backend.POSTGRES,
test_backend.MYSQL,
test_backend.MARIADB,
test_backend.MSSQL,
}
MATCH_BACKENDS = {
test_backend.ORIENTDB,
}
# Split Neo4j and RedisGraph because RedisGraph doesn't support all Neo4j features.
NEO4J_BACKENDS = {
test_backend.NEO4J,
}
REDISGRAPH_BACKENDS = {
test_backend.REDISGRAPH,
}
pyodbc_parameter_string = "DRIVER={driver};SERVER={server};UID={uid};PWD={pwd}".format( # nosec
driver="{ODBC Driver 17 for SQL SERVER}",
server="127.0.0.1,1434", # Do not change to 'localhost'.
# You won't be able to connect with the db.
uid="SA", # System Administrator.
pwd="Root-secure1",
)
# delimeters must be URL escaped
escaped_pyodbc_parameter_string = quote_plus(pyodbc_parameter_string)
SQL_BACKEND_TO_CONNECTION_STRING = {
# HACK(bojanserafimov): Entries are commented-out because MSSQL is the only one whose scheme
# initialization is properly configured, with a hierarchy of multiple
# databases and schemas. I'm keeping the code to remember the connection
# string formats.
#
test_backend.POSTGRES: "postgresql://postgres:{password}@localhost:5433".format(
password=DEFAULT_ROOT_PASSWORD
),
# test_backend.MYSQL:
# 'mysql://root:{password}@127.0.0.1:3307'.format(password=DEFAULT_ROOT_PASSWORD),
# test_backend.MARIADB:
# 'mysql://root:{password}@127.0.0.1:3308'.format(password=DEFAULT_ROOT_PASSWORD),
test_backend.MSSQL: "mssql+pyodbc:///?odbc_connect={}".format(escaped_pyodbc_parameter_string),
# test_backend.SQLITE:
# 'sqlite:///:memory:',
}
SqlTestBackend = namedtuple(
"SqlTestBackend",
(
"engine",
"base_connection_string",
),
)
| [
2,
15069,
2864,
12,
25579,
29018,
8873,
21852,
11,
11419,
13,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
198,
6738,
2237,
13,
76,
5241,
13,
333,
297,
571,
13,
29572,
1330,
9577,
62,
9541,
198,
198,
6738,
11485,
1330,
1332,
62,
18... | 2.326382 | 959 |
import os
import json
import statistics
import pandas as pd
import numpy as np
class CSVDataSet(DataSet):
""" A dataset living locally in a .csv file
"""
def getResource(self, index):
"""Get a specific data point from the data set.
Parameters
----------
index : int or string
The index of the data point in `D`, either positional or a string.
Returns
-------
:obj:`ndarray`
A ndarray of the data point.
"""
if type(index) is int:
return self.D.iloc[index].as_matrix()
else:
return self.D.loc[index].as_matrix()
def getColumn(self, index):
"""Get a column of the dataframe.
Parameters
----------
index : int or string
The index of the column in `D`, either positional or a string.
Returns
-------
:obj:`ndarray`
The values in the column.
"""
if type(index) is int:
return self.D.iloc[:, index].as_matrix()
else:
return self.D[index].as_matrix()
def getColumnValues(self, index):
"""Get the unique values of a column.
Parameters
----------
index : int or string
The index of the column in `D`, either positional or a string.
Returns
-------
:obj:`ndarray`
A ndarray of the unique values.
"""
column = self.getColumn(index)
if column.dtype == "float64":
column = column[~np.isnan(column)]
else:
column = column[np.array([x != "NA" for x in column])]
return np.unique(column)
def getColumnDistribution(self, index):
"""Get the distribution of values in a column.
Parameters
----------
index : int or string
The index of the column in `D`, either positional or a string.
Returns
-------
:obj:`ndarray`, :obj:`ndarray`
An array x of the unique labels, and an array y of the count of that label
"""
x = self.getColumnValues(index)
column = self.getColumn(index)
y = [np.sum(column == v) for v in x]
return x, y
def getColumnDescription(self, index, sep="\n"):
"""Get a description of the column.
"""
desc = []
if type(index) is int:
index = self.D.columns.values[index]
for i, name in enumerate(self.D.columns.names):
desc.append(name + ": " + index[i])
return sep.join(desc)
| [
11748,
28686,
198,
11748,
33918,
198,
11748,
7869,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
628,
628,
198,
4871,
44189,
6601,
7248,
7,
6601,
7248,
2599,
198,
220,
220,
220,
37227,
317,
27039,
2877,
... | 2.206926 | 1,184 |
import webapp2
class MainHandler(webapp2.RequestHandler):
"""Main Handler for the blog"""
def get(self):
"""Redirects to the blog page"""
self.redirect("/blog")
| [
11748,
3992,
1324,
17,
628,
198,
4871,
8774,
25060,
7,
12384,
1324,
17,
13,
18453,
25060,
2599,
198,
220,
220,
220,
37227,
13383,
32412,
329,
262,
4130,
37811,
628,
220,
220,
220,
825,
651,
7,
944,
2599,
198,
220,
220,
220,
220,
220... | 2.647887 | 71 |
import os
import time
import numpy as np
from numba import jit, types
from numba.typed import Dict
from blockbased_synapseaware.hole_filling.connected_components.cc3d import connected_components
from blockbased_synapseaware.utilities.dataIO import PickleData, PickleNumbaData, WriteH5File
from blockbased_synapseaware.utilities.constants import *
@jit(nopython=True)
| [
198,
11748,
28686,
198,
11748,
640,
628,
198,
198,
11748,
299,
32152,
355,
45941,
628,
198,
198,
6738,
997,
7012,
1330,
474,
270,
11,
3858,
198,
6738,
997,
7012,
13,
774,
9124,
1330,
360,
713,
628,
198,
198,
6738,
2512,
3106,
62,
28... | 3.046875 | 128 |
#!/usr/bin/env python
'''
@author Luke Campbel <LCampbell@ASAScience.com>
@file
@date 03/27/12 15:30
@description DESCRIPTION
'''
from pyon.util.arg_check import validate_is_instance, validate_in, validate_equal, validate_true
class ArgCheckService(object):
'''
Example Service illustrating how to use the various validateion mechanisms
'''
def pass_integer(self, val=''):
'''
Say you were expecting an integer from the client...
'''
validate_is_instance(val,int,'Value is not an integer.')
return val
def pass_float(self, val=1.0):
'''
Say you were expecting a float from the client
'''
validate_is_instance(val,float,'Value is not a float.')
return val
def handle_list(self, needle, haystack):
'''
You needed to be certain that something was in the list or dict
'''
validate_in(needle,haystack,'Can\'t find %s in %s.' % (needle, haystack))
return needle
def check_equality(self, a,b):
'''
You needed to be sure that two items we're equivalent
'''
validate_equal(a,b,'%s != %s' %(str(a), str(b)))
return True
def list_len(self,l):
'''
You needed to be certain that a list had len >0
'''
validate_true(len(l)>0, 'list=%s was empty.' % str(l))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
7061,
6,
198,
31,
9800,
11336,
5425,
6667,
1279,
5639,
696,
7923,
31,
1921,
1921,
4234,
13,
785,
29,
198,
31,
7753,
220,
198,
31,
4475,
7643,
14,
1983,
14,
1065,
1315,
25,
1270,
19... | 2.380623 | 578 |
import dpkt
from dpkt.tcp import *
from tcp_util import *
| [
11748,
288,
79,
21841,
198,
198,
6738,
288,
79,
21841,
13,
83,
13155,
1330,
1635,
198,
6738,
48265,
62,
22602,
1330,
1635,
628
] | 2.608696 | 23 |
'''
Written by Kenny William Nyallau ©2020
This is a python implementation of Rover challenge
'''
| [
7061,
6,
198,
25354,
416,
22102,
3977,
17735,
439,
559,
10673,
42334,
198,
1212,
318,
257,
21015,
7822,
286,
36718,
4427,
198,
7061,
6,
628,
198
] | 3.846154 | 26 |
import pickle
experts_file_path = "../experts/sampled_experts.obs"
with open(experts_file_path, "rb") as f:
expert_trajectories = pickle.load(f)
# (num_experts)
print(len(expert_trajectories))
# (trajectory_length)
print(len(expert_trajectories[0]["trajectory"]))
# (num_observations, 1)
print(expert_trajectories[0]["trajectory"][0]["state"].shape)
print(expert_trajectories[0]["context"])
# Should have 30 for each context - 2,3,4,5
context_bins = [0]*4
for expert_trajectory in expert_trajectories:
context_decimal = expert_trajectory["context"][0] + 2 * expert_trajectory["context"][1]
context_bins[context_decimal] += 1
print(context_bins)
| [
11748,
2298,
293,
198,
198,
23100,
912,
62,
7753,
62,
6978,
796,
366,
40720,
23100,
912,
14,
37687,
10137,
62,
23100,
912,
13,
8158,
1,
198,
198,
4480,
1280,
7,
23100,
912,
62,
7753,
62,
6978,
11,
366,
26145,
4943,
355,
277,
25,
1... | 2.492481 | 266 |
"""
Lots of code gripes.
"""
import os
import shlex
import subprocess
import sys
from typing import List
from navio_tasks import settings as settings
from navio_tasks.cli_commands import check_command_exists, config_pythonpath
from navio_tasks.output import say_and_exit
from navio_tasks.pure_reports.cli_pygount import total_loc
from navio_tasks.settings import (
IS_DJANGO,
IS_GITLAB,
PROBLEMS_FOLDER,
PROJECT_NAME,
VENV_SHELL,
)
from navio_tasks.utils import inform
def do_lint(folder_type: str) -> str:
"""
Execute pylint
"""
# pylint: disable=too-many-locals
check_command_exists("pylint")
if folder_type == PROJECT_NAME:
pylintrc = f"{settings.CONFIG_FOLDER}/.pylintrc"
lint_output_file_name = f"{PROBLEMS_FOLDER}/lint.txt"
else:
pylintrc = f"{settings.CONFIG_FOLDER}/.pylintrc_{folder_type}"
lint_output_file_name = f"{PROBLEMS_FOLDER}/lint_{folder_type}.txt"
if os.path.isfile(lint_output_file_name):
os.remove(lint_output_file_name)
if IS_DJANGO:
django_bits = "--load-plugins pylint_django "
else:
django_bits = ""
# pylint: disable=pointless-string-statement
command_text = (
f"{VENV_SHELL} pylint {django_bits} " f"--rcfile={pylintrc} {folder_type} "
)
command_text += " "
"--msg-template={path}:{line}: [{msg_id}({symbol}), {obj}] {msg}"
"".strip().replace(" ", " ")
inform(command_text)
command = shlex.split(command_text)
with open(lint_output_file_name, "w") as outfile:
env = config_pythonpath()
subprocess.call(command, stdout=outfile, env=env)
return lint_output_file_name
def evaluated_lint_results(
lint_output_file_name: str,
small_code_base_cut_off: int,
maximum_lint: int,
fatals: List[str],
) -> str:
"""Deciding if the lint is bad enough to fail
Also treats certain errors as fatal even if under the maximum cutoff.
"""
with open(lint_output_file_name) as file_handle:
full_text = file_handle.read()
lint_did_indeed_run = "Your code has been rated at" in full_text
with open(lint_output_file_name) as file_handle:
fatal_errors = sum(1 for line in file_handle if ": E" in line or ": F" in line)
for fatal in fatals:
for line in file_handle:
if fatal in file_handle or ": E" in line or ": F" in line:
fatal_errors += 1
if fatal_errors > 0:
with open(lint_output_file_name) as file_handle:
for line in file_handle:
if "*************" in line:
continue
if not line or not line.strip("\n "):
continue
inform(line.strip("\n "))
message = f"Fatal lint errors and possibly others, too : {fatal_errors}"
if IS_GITLAB:
with open(lint_output_file_name) as error_file:
inform(error_file.read())
say_and_exit(message, "lint")
return message
with open(lint_output_file_name) as lint_file_handle:
for line in [
line
for line in lint_file_handle
if not (
"*************" in line
or "---------------------" in line
or "Your code has been rated at" in line
or line == "\n"
)
]:
inform(line)
if total_loc() > small_code_base_cut_off:
cutoff = maximum_lint
else:
cutoff = 0
with open(lint_output_file_name) as lint_file_handle:
num_lines = sum(
1
for line in lint_file_handle
if not (
"*************" in line
or "---------------------" in line
or "Your code has been rated at" in line
or line == "\n"
)
)
if num_lines > cutoff:
say_and_exit(f"Too many lines of lint : {num_lines}, max {cutoff}", "pylint")
sys.exit(-1)
with open(lint_output_file_name) as lint_file_handle:
num_lines_all_output = sum(1 for _ in lint_file_handle)
if (
not lint_did_indeed_run
and num_lines_all_output == 0
and os.path.isfile(lint_output_file_name)
):
# should always have at least 'found 0 errors' in output
# force lint to re-run, because empty file will be missing
os.remove(lint_output_file_name)
say_and_exit(
"No lint messages at all, did pylint fail to run or is it installed?",
"pylint",
)
sys.exit(-1)
return "pylint succeeded"
| [
37811,
198,
43643,
286,
2438,
11762,
274,
13,
198,
37811,
198,
11748,
28686,
198,
11748,
427,
2588,
198,
11748,
850,
14681,
198,
11748,
25064,
198,
6738,
19720,
1330,
7343,
198,
198,
6738,
6812,
952,
62,
83,
6791,
1330,
6460,
355,
6460,... | 2.125343 | 2,186 |
#!/usr/bin/python3
# -*- coding:utf-8 -*-
"""
mysql> desc userinfors;
+----------+----------+------+-----+---------+----------------+
| Field | Type | Null | Key | Default | Extra |
+----------+----------+------+-----+---------+----------------+
| id | int(11) | NO | PRI | NULL | auto_increment |
| name | char(40) | YES | | NULL | |
| passwd | char(40) | YES | | NULL | |
| isdelete | bit(1) | YES | | b'0' | |
+----------+----------+------+-----+---------+----------------+
"""
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
28744,
13976,
29,
1715,
2836,
10745,
669,
26,
198,
10,
35937,
10,
35937,
10,
23031,
10,
650,
19529,
982,
1... | 2.185874 | 269 |
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
| [
6738,
220,
844,
27349,
62,
2118,
9078,
13,
8692,
1330,
7308,
198,
6738,
220,
844,
27349,
62,
2118,
9078,
13,
16624,
1330,
13283,
628
] | 3.375 | 24 |
import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.autograd as autograd
from torch.autograd import Variable
from models.plain_lstm import PlainLSTM
from utils import Utils
from data_loader import DataLoader
import argparse
from tqdm import tqdm
import matplotlib.pyplot as plt
from torchviz import make_dot, make_dot_from_trace
parser = argparse.ArgumentParser(description='Training Parameter')
parser.add_argument('--cuda', action='store', default=None, type=int)
opt = parser.parse_args()
print(opt)
# all constants
total_epochs = 20
batch_size = 16
data_path = './data/math_equation_data.txt'
g_seq_length = 15
g_emb_dim = 8
g_hidden_dim = 8
vocab_size = 7 # need to not hard code this. Todo for later.
if __name__ == '__main__':
main() | [
11748,
28686,
198,
11748,
4738,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
40085,
355,
6436,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
2... | 3.010638 | 282 |
"""
Scrapes OIT's Web Feeds to add courses and sections to database.
Procedure:
- Get list of departments (3-letter department codes)
- Run this: http://etcweb.princeton.edu/webfeeds/courseofferings/?term=current&subject=COS
- Parse it for courses, sections, and lecture times (as recurring events)
"""
from lxml import etree
import HTMLParser
import urllib2
from bs4 import BeautifulSoup
import re
| [
37811,
198,
3351,
2416,
274,
440,
2043,
338,
5313,
18272,
82,
284,
751,
10902,
290,
9004,
284,
6831,
13,
198,
198,
2964,
771,
495,
25,
198,
198,
12,
3497,
1351,
286,
13346,
357,
18,
12,
9291,
5011,
12416,
8,
198,
12,
5660,
428,
25... | 3.284553 | 123 |
from pyramid.view import view_config
from kotti.interfaces import IFile
@view_config(
name="view",
context=IFile,
permission="view",
renderer="kotti:templates/view/file.pt",
)
@view_config(name="inline-view", context=IFile, permission="view")
@view_config(name="attachment-view", context=IFile, permission="view")
def includeme(config):
""" Pyramid includeme hook.
:param config: app config
:type config: :class:`pyramid.config.Configurator`
"""
config.scan(__name__)
| [
6738,
27944,
13,
1177,
1330,
1570,
62,
11250,
198,
198,
6738,
479,
26380,
13,
3849,
32186,
1330,
314,
8979,
628,
198,
31,
1177,
62,
11250,
7,
198,
220,
220,
220,
1438,
2625,
1177,
1600,
198,
220,
220,
220,
4732,
28,
5064,
576,
11,
... | 2.798913 | 184 |
#!/usr/bin/python2
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
A simple recursive-descent parser for the table file format.
The grammar implemented here is roughly (taking some liberties with whitespace
and comment parsing):
table_file ::= ( BLANK_LINE | table_def ) end_of_file ;
table_def ::= "--" IDENT CITATION NL
table_header
( table_row )+ ;
table_header ::= ( IDENT "(" BITRANGE ")" )+ ;
table_row ::= ( PATTERN )+ ACTION ;
IDENT = /[a-z0-9_]+/
CITATION = "(" /[^)]+/ ")"
BITRANGE = /[0-9]+/ (":" /[0-9]+/)?
PATTERN = /[10x_]+/
ACTION = ( "=" IDENT | "->" IDENT ) ( "(" IDENT ")" )?
NL = a newline
BLANK_LINE = what you might expect it to be
"""
import re
import dgen_core
# These globals track the parser state.
_in = None
_line_no = None
_tables = None
_line = None
_last_row = None
def parse_tables(input):
"""Entry point for the parser. Input should be a file or file-like."""
global _in, _line_no, _tables
_in = input
_line_no = 0
_tables = []
next_line()
while not end_of_file():
blank_line() or table_def() or unexpected()
return _tables
| [
2,
48443,
14629,
14,
8800,
14,
29412,
17,
198,
2,
15069,
357,
66,
8,
2321,
383,
12547,
20985,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
1... | 2.686022 | 465 |
import cv2
import numpy as np
import logging
from argparse import ArgumentParser
from src.classifier import Classifier
from src.reader import ImageReader
from src.solver import SudokuSolver
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("image", help="Input image")
parser.add_argument("-w", "--weights", help="CNN weights", default="src/models/model.h5")
args = parser.parse_args()
file_path = args.image
model_path = args.weights
reader = ImageReader()
try:
cells = reader.extract_board_cells(file_path)
except AttributeError:
print()
logging.error('\nThe image has not been read correctly - file not found!\n')
exit(0)
try:
classifier = Classifier(model_path)
classifications = classifier.classify_cells(cells)
classifications = [str(c) for c in classifications]
grid = ''.join(classifications)
except OSError:
logging.error('\nThe model weights have not been loaded - file not found!\n')
exit(0)
solver = SudokuSolver()
try:
solver.solve(grid)
except TypeError:
logging.error('The image has not been read correctly - solution not found!\n')
exit(0) | [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
18931,
198,
6738,
1822,
29572,
1330,
45751,
46677,
198,
6738,
12351,
13,
4871,
7483,
1330,
5016,
7483,
198,
6738,
12351,
13,
46862,
1330,
7412,
33634,
198,
6738,
12351,
... | 2.634454 | 476 |
from django.test import TestCase
from django.test import Client
# Create your tests here.
from django.conf import settings
from django.test import TestCase
from .models import *
"""
Data is not testing or returning 0 due to authentication. Turn off authentication to get proper result
we can use more test case as CRUD.
Instead of testing every possible scenario i have tried to test on local and client and model.
"""
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
42625,
14208,
13,
9288,
1330,
20985,
198,
2,
13610,
534,
5254,
994,
13,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
19... | 4.147059 | 102 |
# pylint: disable=no-member,invalid-name,redefined-outer-name
"""ArviZ plotting backends."""
import re
import numpy as np
from pandas import DataFrame
from ...rcparams import rcParams
__all__ = [
"to_cds",
"output_notebook",
"output_file",
"ColumnDataSource",
"create_layout",
"show_layout",
]
def to_cds(
data,
var_names=None,
groups=None,
dimensions=None,
group_info=True,
var_name_format=None,
index_origin=None,
):
"""Transform data to ColumnDataSource (CDS) compatible with Bokeh.
Uses `_ARVIZ_GROUP_` and `_ARVIZ_CDS_SELECTION_` to separate var_name
from group and dimensions in CDS columns.
Parameters
----------
data : obj
Any object that can be converted to an az.InferenceData object
Refer to documentation of az.convert_to_inference_data for details
var_names : str or list of str, optional
Variables to be processed, if None all variables are processed.
groups : str or list of str, optional
Select groups for CDS. Default groups are {"posterior_groups", "prior_groups",
"posterior_groups_warmup"}
- posterior_groups: posterior, posterior_predictive, sample_stats
- prior_groups: prior, prior_predictive, sample_stats_prior
- posterior_groups_warmup: warmup_posterior, warmup_posterior_predictive,
warmup_sample_stats
ignore_groups : str or list of str, optional
Ignore specific groups from CDS.
dimension : str, or list of str, optional
Select dimensions along to slice the data. By default uses ("chain", "draw").
group_info : bool
Add group info for `var_name_format`
var_name_format : str or tuple of tuple of string, optional
Select column name format for non-scalar input.
Predefined options are {"brackets", "underscore", "cds"}
"brackets":
- add_group_info == False: ``theta[0,0]``
- add_group_info == True: ``theta_posterior[0,0]``
"underscore":
- add_group_info == False: ``theta_0_0``
- add_group_info == True: ``theta_posterior_0_0_``
"cds":
- add_group_info == False: ``theta_ARVIZ_CDS_SELECTION_0_0``
- add_group_info == True: ``theta_ARVIZ_GROUP_posterior__ARVIZ_CDS_SELECTION_0_0``
tuple:
Structure:
- tuple: (dim_info, group_info)
- dim_info: (str: `.join` separator,
str: dim_separator_start,
str: dim_separator_end)
- group_info: (str: group separator start, str: group separator end)
Example: ((",", "[", "]"), ("_", ""))
- add_group_info == False: ``theta[0,0]``
- add_group_info == True: ``theta_posterior[0,0]``
index_origin : int, optional
Start parameter indices from `index_origin`. Either 0 or 1.
Returns
-------
bokeh.models.ColumnDataSource object
"""
from ...utils import flatten_inference_data_to_dict
if var_name_format is None:
var_name_format = "cds"
cds_dict = flatten_inference_data_to_dict(
data=data,
var_names=var_names,
groups=groups,
dimensions=dimensions,
group_info=group_info,
index_origin=index_origin,
var_name_format=var_name_format,
)
cds_data = ColumnDataSource(DataFrame.from_dict(cds_dict, orient="columns"))
return cds_data
def output_notebook(*args, **kwargs):
"""Wrap func:`bokeh.plotting.output_notebook`."""
import bokeh.plotting as bkp
return bkp.output_notebook(*args, **kwargs)
def output_file(*args, **kwargs):
"""Wrap :func:`bokeh.plotting.output_file`."""
import bokeh.plotting as bkp
return bkp.output_file(*args, **kwargs)
def ColumnDataSource(*args, **kwargs):
"""Wrap bokeh.models.ColumnDataSource."""
from bokeh.models import ColumnDataSource
return ColumnDataSource(*args, **kwargs)
def create_layout(ax, force_layout=False):
"""Transform bokeh array of figures to layout."""
ax = np.atleast_2d(ax)
subplot_order = rcParams["plot.bokeh.layout.order"]
if force_layout:
from bokeh.layouts import gridplot as layout
ax = ax.tolist()
layout_args = {
"sizing_mode": rcParams["plot.bokeh.layout.sizing_mode"],
"toolbar_location": rcParams["plot.bokeh.layout.toolbar_location"],
}
elif any(item in subplot_order for item in ("row", "column")):
# check number of rows
match = re.match(r"(\d*)(row|column)", subplot_order)
n = int(match.group(1)) if match.group(1) is not None else 1
subplot_order = match.group(2)
# set up 1D list of axes
ax = [item for item in ax.ravel().tolist() if item is not None]
layout_args = {"sizing_mode": rcParams["plot.bokeh.layout.sizing_mode"]}
if subplot_order == "row" and n == 1:
from bokeh.layouts import row as layout
elif subplot_order == "column" and n == 1:
from bokeh.layouts import column as layout
else:
from bokeh.layouts import layout
if n != 1:
ax = np.array(ax + [None for _ in range(int(np.ceil(len(ax) / n)) - len(ax))])
if subplot_order == "row":
ax = ax.reshape(n, -1)
else:
ax = ax.reshape(-1, n)
ax = ax.tolist()
else:
if subplot_order in ("square", "square_trimmed"):
ax = [item for item in ax.ravel().tolist() if item is not None]
n = int(np.ceil(len(ax) ** 0.5))
ax = ax + [None for _ in range(n**2 - len(ax))]
ax = np.array(ax).reshape(n, n)
ax = ax.tolist()
if (subplot_order == "square_trimmed") and any(
all(item is None for item in row) for row in ax
):
from bokeh.layouts import layout
ax = [row for row in ax if not all(item is None for item in row)]
layout_args = {"sizing_mode": rcParams["plot.bokeh.layout.sizing_mode"]}
else:
from bokeh.layouts import gridplot as layout
layout_args = {
"sizing_mode": rcParams["plot.bokeh.layout.sizing_mode"],
"toolbar_location": rcParams["plot.bokeh.layout.toolbar_location"],
}
# ignore "fixed" sizing_mode without explicit width and height
if layout_args.get("sizing_mode", "") == "fixed":
layout_args.pop("sizing_mode")
return layout(ax, **layout_args)
def show_layout(ax, show=True, force_layout=False):
"""Create a layout and call bokeh show."""
if show is None:
show = rcParams["plot.bokeh.show"]
if show:
import bokeh.plotting as bkp
layout = create_layout(ax, force_layout=force_layout)
bkp.show(layout)
def _copy_docstring(lib, function):
"""Extract docstring from function."""
import importlib
try:
module = importlib.import_module(lib)
func = getattr(module, function)
doc = func.__doc__
except ImportError:
doc = f"Failed to import function {function} from {lib}"
if not isinstance(doc, str):
doc = ""
return doc
# TODO: try copying substitutions too, or autoreplace them ourselves
output_notebook.__doc__ += "\n\n" + _copy_docstring("bokeh.plotting", "output_notebook")
output_file.__doc__ += "\n\n" + _copy_docstring("bokeh.plotting", "output_file")
ColumnDataSource.__doc__ += "\n\n" + _copy_docstring("bokeh.models", "ColumnDataSource")
| [
2,
279,
2645,
600,
25,
15560,
28,
3919,
12,
19522,
11,
259,
12102,
12,
3672,
11,
445,
18156,
12,
39605,
12,
3672,
198,
37811,
3163,
8903,
57,
29353,
736,
2412,
526,
15931,
198,
11748,
302,
198,
198,
11748,
299,
32152,
355,
45941,
19... | 2.245035 | 3,424 |
#!/usr/bin/env python
import sys
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
import vnaas
PORT = 8080
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
25064,
198,
6738,
33718,
13,
18504,
12397,
1330,
25290,
38,
2149,
756,
10613,
198,
6738,
33718,
13,
5450,
18497,
1330,
38288,
18497,
198,
6738,
33718,
13,
1669,
11224,
1330,
314,
... | 2.972222 | 72 |
first_name = input()
last_name = input()
age = int(input())
town = input()
concatanate_variables(first_name, last_name, age, town)
| [
11085,
62,
3672,
796,
5128,
3419,
198,
12957,
62,
3672,
796,
5128,
3419,
198,
496,
796,
493,
7,
15414,
28955,
198,
12735,
796,
5128,
3419,
628,
198,
198,
1102,
9246,
272,
378,
62,
25641,
2977,
7,
11085,
62,
3672,
11,
938,
62,
3672,
... | 2.734694 | 49 |
import random
import time
| [
11748,
4738,
198,
11748,
640,
628,
628,
198
] | 3.75 | 8 |
"""Test the OUTCAR io interface"""
# pylint: disable=unused-import,redefined-outer-name,unused-argument,unused-wildcard-import,wildcard-import
import pytest
from aiida_vasp.utils.fixtures import *
from aiida_vasp.utils.fixtures.testdata import data_path
from aiida_vasp.io.outcar import OutcarParser
def test_parse_outcar():
"""Parse a reference OUTCAR file with the OutcarParser and compare the result to a reference value."""
file_name = 'OUTCAR'
path = data_path('outcar', file_name)
parser = OutcarParser(file_path=path)
params = parser.get_quantity('outcar-parameters', {})
result = params['outcar-parameters'].get_dict()
assert result['outcar-volume'] == 65.94
assert result['outcar-efermi'] == 7.2948
assert result['outcar-energies']
assert result['symmetries']['num_space_group_operations'] == 48
assert result['symmetries']['num_point_group_operations'] == 48
assert result['symmetries']['point_symmetry'] == 'O_h'
assert result['symmetries']['space_group'] == 'D_2d'
| [
37811,
14402,
262,
16289,
20034,
33245,
7071,
37811,
198,
2,
279,
2645,
600,
25,
15560,
28,
403,
1484,
12,
11748,
11,
445,
18156,
12,
39605,
12,
3672,
11,
403,
1484,
12,
49140,
11,
403,
1484,
12,
21992,
9517,
12,
11748,
11,
21992,
9... | 2.740053 | 377 |
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
import torch
import torch.nn as nn
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from timm.models.registry import register_model
import logging
from einops import rearrange, reduce, repeat
from timm.models import resnet50, tv_resnet101, tv_resnet152
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
import torchvision.models as models
_logger = logging.getLogger(__name__)
default_cfgs = {
# ResNet
'resnet18': _cfg(url='https://download.pytorch.org/models/resnet18-5c106cde.pth'),
'resnet34': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34-43635321.pth'),
'resnet50': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet50_ram-a26f946b.pth',
interpolation='bicubic'),
'resnet101': _cfg(url='', interpolation='bicubic'),
'resnet101d': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet101d_ra2-2803ffab.pth',
interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8),
crop_pct=1.0, test_input_size=(3, 320, 320)),
'resnet152': _cfg(url='', interpolation='bicubic'),
'resnet200': _cfg(url='', interpolation='bicubic'),
}
class ConvActionModule(nn.Module):
r""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
"""
@register_model
@register_model
@register_model
'''
@register_model
def action_tf_efficientnetv2_m_in21k(pretrained=False, **kwargs):
num_features = 2048
model_kwargs = dict(num_features=num_features, **kwargs)
model = ConvActionModule(backbone=None, **model_kwargs)
backbone = action_tf_efficientnetv2_m_in21k(pretrained=pretrained)
print (backone)
model.backbone = backbone
model.default_cfg = backbone.default_cfga
return model
'''
| [
2,
20368,
22369,
198,
2,
2451,
259,
3602,
16354,
198,
2,
15069,
357,
66,
8,
33448,
5413,
198,
2,
49962,
739,
383,
17168,
13789,
685,
3826,
38559,
24290,
329,
3307,
60,
198,
2,
22503,
416,
9033,
18258,
198,
2,
20368,
22369,
198,
198,... | 2.708302 | 1,337 |
from typing import Union, Iterable, Mapping, Any
import peewee
from backend.library.decorators.cache import unified
| [
6738,
19720,
1330,
4479,
11,
40806,
540,
11,
337,
5912,
11,
4377,
198,
11748,
613,
413,
1453,
198,
198,
6738,
30203,
13,
32016,
13,
12501,
273,
2024,
13,
23870,
1330,
22706,
628
] | 3.6875 | 32 |
import time
import random
import requests
from lxml import etree
headers = {
"UserAgent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36"
}
if __name__ == "__main__":
crawl_main()
| [
11748,
640,
201,
198,
11748,
4738,
201,
198,
11748,
7007,
201,
198,
6738,
300,
19875,
1330,
2123,
631,
201,
198,
201,
198,
201,
198,
50145,
796,
1391,
201,
198,
220,
220,
220,
366,
12982,
36772,
1298,
366,
44,
8590,
5049,
14,
20,
13... | 2.246032 | 126 |
#!/usr/bin/env python
"""End to end tests for lib.flows.general.grep."""
from grr.endtoend_tests import base
from grr.lib import aff4
from grr.lib.rdfvalues import client as rdf_client
class TestSearchFiles(base.AutomatedTest):
"""Test SearchFileContent."""
platforms = ["Linux"]
flow = "SearchFileContent"
test_output_path = "analysis/SearchFiles/testing"
args = {"output": test_output_path,
"paths": ["/bin/ls*"],
"also_download": True}
class TestSearchFilesGrep(base.AutomatedTest):
"""Test SearchFileContent with grep."""
platforms = ["Linux"]
flow = "SearchFileContent"
test_output_path = "analysis/SearchFilesGrep/testing"
args = {"output": test_output_path,
"paths": ["/bin/ls*"],
"grep": rdf_client.BareGrepSpec(literal="ELF"),
"also_download": True}
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
12915,
284,
886,
5254,
329,
9195,
13,
44041,
13,
24622,
13,
70,
7856,
526,
15931,
628,
198,
6738,
1036,
81,
13,
437,
1462,
437,
62,
41989,
1330,
2779,
198,
6738,
1036,
81,
13,... | 2.684887 | 311 |
from django.contrib import admin
# from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
# from django.contrib.auth.models import User
# Register your models here.
from .models import LessonPeriods,DailyAttendance
admin.site.register(LessonPeriods)
admin.site.register(DailyAttendance)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
2,
422,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
28482,
1330,
11787,
46787,
355,
7308,
12982,
46787,
198,
2,
422,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
... | 3.311111 | 90 |
import logging
from odoo import http
from odoo.exceptions import except_orm, UserError, Warning
from odoo.http import request, serialize_exception as _serialize_exception, content_disposition
from odoo import api, fields, models, tools, _
import datetime
from urllib.parse import urlencode, quote as quote
import urllib.parse
import requests
import re
import json
from functools import reduce
_logger = logging.getLogger(__name__)
try:
from jinja2.sandbox import SandboxedEnvironment
mako_template_env = SandboxedEnvironment(
block_start_string="<%",
block_end_string="%>",
variable_start_string="${",
variable_end_string="}",
comment_start_string="<%doc>",
comment_end_string="</%doc>",
line_statement_prefix="%",
line_comment_prefix="##",
trim_blocks=True, # do not output newline after blocks
autoescape=True, # XML/HTML automatic escaping
)
mako_template_env.globals.update({
'str': str,
'quote': quote,
'urlencode': urlencode,
'datetime': datetime,
'len': len,
'abs': abs,
'min': min,
'max': max,
'sum': sum,
'filter': filter,
'reduce': reduce,
'map': map,
'round': round,
'relativedelta': lambda *a, **kw : relativedelta.relativedelta(*a, **kw),
})
except ImportError:
_logger.warning("jinja2 not available, templating features will not work!")
| [
11748,
18931,
198,
6738,
16298,
2238,
1330,
2638,
198,
6738,
16298,
2238,
13,
1069,
11755,
1330,
2845,
62,
579,
11,
11787,
12331,
11,
15932,
198,
6738,
16298,
2238,
13,
4023,
1330,
2581,
11,
11389,
1096,
62,
1069,
4516,
355,
4808,
46911... | 2.362776 | 634 |
import opensees as ops
ops.wipe()
ops.uniaxialMaterial("Elastic", 1, 1000.);
ops.testUniaxialMaterial(1);
for strain in [0.01, 0.02, 0.03, 0.04, 0.05]:
ops.setStrain(strain);
print("strain: ", str(ops.getStrain()), " stress: ", str(ops.getStress()), " tangent: ", str(ops.getTangent()));
ops.uniaxialMaterial("Elastic", 2, 1000.);
ops.uniaxialMaterial("Parallel", 3, 1, 2);
ops.testUniaxialMaterial(3);
for strain in [0.01, 0.02, 0.03, 0.04, 0.05]:
ops.setStrain(strain);
print("strain: ", str(ops.getStrain()), " stress: ", str(ops.getStress()), " tangent: ", str(ops.getTangent()));
| [
11748,
1034,
1072,
274,
355,
39628,
198,
198,
2840,
13,
86,
3757,
3419,
198,
198,
2840,
13,
39934,
87,
498,
17518,
7203,
9527,
3477,
1600,
352,
11,
8576,
13,
1776,
198,
2840,
13,
9288,
3118,
544,
87,
498,
17518,
7,
16,
1776,
198,
... | 2.339768 | 259 |
"""pieces of thing for inspiration
"""
import itertools
from dataclasses import dataclass
from itertools import takewhile as itertools_takewhile
from atypes import Slab
from creek import Creek
from creek.util import to_iterator
from i2 import MultiObj, FuncFanout, ContextFanout, Pipe
from typing import Callable, Iterable, Iterator, Any, Mapping, Dict
from i2 import Pipe
from know.util import (
Name,
SlabService,
iterate,
FiltFunc,
iterate_dict_values,
always_false,
always_true,
StreamId,
Stream,
SlabCallback,
)
from taped import chunk_indices
always: FiltFunc
Hunker: HunkerType
# TODO: Make smart default for stop_condition. If finite iterable, use any_value_is_none?
no_more_data = type('no_more_data', (), {})
# class DictZip:
# def __init__(self, *unnamed, takewhile=None, **named):
# self.multi_iterator = MultiIterator(*unnamed, **named)
# self.objects = self.multi_iterator.objects
# self.takewhile = takewhile
#
# def __iter__(self):
# while True:
# x = next(self.multi_iterator)
# if not self.takewhile(x):
# break
# yield x
#
# class MultiIterable:
# def __init__(self, *unnamed, **named):
# self.multi_iterator = MultiIterator(*unnamed, **named)
# self.objects = self.multi_iterator.objects
#
# def __iter__(self):
# while True:
# yield next(self.multi_iterator)
#
# def takewhile(self, predicate=None):
# """itertools.takewhile applied to self, with a bit of syntactic sugar
# There's nothing to stop the iteration"""
# if predicate is None:
# predicate = lambda x: True # always true
# return itertools_takewhile(predicate, self)
class _MultiIterator(MultiObj):
"""Helper class for DictZip"""
StopCondition = Callable[[Any], bool]
# TODO: Make smart default for stop_condition. If finite iterable, use any_value_is_none?
# TODO: Default consumer(s) (e.g. data-safe prints?)
# TODO: Default slabs? (iterate through
@dataclass
@dataclass
apply = Pipe(map, tuple)
class MultiIterable:
"""Join several iterables together.
>>> from know.util import any_value_is_none
>>> from functools import partial
>>>
>>> any_value_is_none = lambda d: any(d[k] is None for k in d)
>>> mk_multi_iterable = partial(MultiIterable, stop_condition=any_value_is_none)
>>> mi = mk_multi_iterable(lets='abc', nums=[1, 2, 3, 4])
>>> list(mi)
[{'lets': 'a', 'nums': 1}, {'lets': 'b', 'nums': 2}, {'lets': 'c', 'nums': 3}]
>>> mi = MultiIterable(
... x=[5, 4, 3, 2, 1], y=[1, 2, 3, 4, 5],
... stop_condition=lambda d: d['x'] == d['y']
... )
>>> list(mi)
[{'x': 5, 'y': 1}, {'x': 4, 'y': 2}]
"""
def takewhile(self, predicate=None):
"""itertools.takewhile applied to self, with a bit of syntactic sugar
There's nothing to stop the iteration"""
if predicate is None:
predicate = lambda x: True # always true
return itertools.takewhile(predicate, self)
@dataclass
# TODO: Weird subclassing. Not the Creek init. Consider factory or delegation
| [
37811,
34154,
286,
1517,
329,
12141,
198,
198,
37811,
198,
11748,
340,
861,
10141,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
340,
861,
10141,
1330,
1011,
4514,
355,
340,
861,
10141,
62,
20657,
4514,
198,
198,
6738,... | 2.495726 | 1,287 |
# Generated by Django 3.1.4 on 2021-01-16 09:02
import datetime
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
19,
319,
33448,
12,
486,
12,
1433,
7769,
25,
2999,
198,
198,
11748,
4818,
8079,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.972222 | 36 |
import datetime
from snippets.utils.datetime import utcnow
from vars.models import SiteConfig
CACHE_TIMEOUT = datetime.timedelta(0, 30)
site_config = SiteConfigs()
| [
11748,
4818,
8079,
198,
198,
6738,
45114,
13,
26791,
13,
19608,
8079,
1330,
3384,
66,
2197,
198,
6738,
410,
945,
13,
27530,
1330,
14413,
16934,
628,
198,
34,
2246,
13909,
62,
34694,
12425,
796,
4818,
8079,
13,
16514,
276,
12514,
7,
15... | 2.982456 | 57 |
import seaborn as sns
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
from data_transformer import transform_neural_to_normal
from analysis_functions import calculate_angle_from_history, calculate_winning_pattern_from_distances
from analysis_functions import calculate_patterns_timings
def set_text(ax, coordinate_from, coordinate_to, fontsize=25, color='black'):
"""
Set text in an axis
:param ax: The axis
:param coordinate_from: From pattern
:param coordinate_to: To pattern
:param fontsize: The fontsize
:return:
"""
message = str(coordinate_from) + '->' + str(coordinate_to)
ax.text(coordinate_from, coordinate_to, message, ha='center', va='center',
rotation=315, fontsize=fontsize, color=color)
def hinton(matrix, max_weight=None, ax=None):
"""Draw Hinton diagram for visualizing a weight matrix."""
ax = ax if ax is not None else plt.gca()
if not max_weight:
max_weight = 2 ** np.ceil(np.log(np.abs(matrix).max()) / np.log(2))
ax.patch.set_facecolor('gray')
ax.set_aspect('equal', 'box')
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
for (x, y), w in np.ndenumerate(matrix):
color = 'white' if w > 0 else 'black'
size = np.sqrt(np.abs(w) / max_weight)
rect = plt.Rectangle([x - size / 2, y - size / 2], size, size,
facecolor=color, edgecolor=color)
ax.add_patch(rect)
ax.autoscale_view()
ax.invert_yaxis()
def plot_winning_pattern(manager, ax=None, separators=False, remove=0):
"""
Plots the winning pattern for the sequences
:param manager: A network manager instance
:param ax: an axis instance
:return:
"""
n_patterns = manager.nn.minicolumns
T_total = manager.T_total
# Get the angles
angles = calculate_angle_from_history(manager)
winning = calculate_winning_pattern_from_distances(angles) + 1 # Get them in the color bounds
timings = calculate_patterns_timings(winning, manager.dt, remove)
winners = [x[0] for x in timings]
pattern_times = [x[2] + 0.5 * x[1] for x in timings]
# 0.5 is for half of the time that the pattern lasts ( that is x[1])
start_times = [x[2] for x in timings]
# Filter the data
angles[angles < 0.1] = 0
filter = np.arange(1, angles.shape[1] + 1)
angles = angles * filter
# Add a column of zeros and of the winners to the stack
zeros = np.zeros_like(winning)
angles = np.column_stack((angles, zeros, winning))
# Plot
with sns.axes_style("whitegrid", {'axes.grid': False}):
if ax is None:
fig = plt.figure(figsize=(16, 12))
ax = fig.add_subplot(111)
fig = ax.figure
cmap = matplotlib.cm.Paired
cmap.set_under('white')
extent = [0, n_patterns + 2, T_total, 0]
im = ax.imshow(angles, aspect='auto', interpolation='None', cmap=cmap, vmax=filter[-1], vmin=0.9, extent=extent)
ax.set_title('Sequence of patterns')
ax.set_xlabel('Patterns')
ax.set_ylabel('Time')
# Put labels in both axis
ax.tick_params(labeltop=False, labelright=False)
# Add seperator
ax.axvline(n_patterns, color='k', linewidth=2)
ax.axvline(n_patterns + 1, color='k', linewidth=2)
ax.axvspan(n_patterns, n_patterns + 1, facecolor='gray', alpha=0.3)
# Add the sequence as a text in a column
x_min = n_patterns * 1.0/ (n_patterns + 2)
x_max = (n_patterns + 1) * 1.0 / (n_patterns + 2)
for winning_pattern, time, start_time in zip(winners, pattern_times, start_times):
ax.text(n_patterns + 0.5, time, str(winning_pattern), va='center', ha='center')
if separators:
ax.axhline(y=start_time, xmin=x_min, xmax=x_max, linewidth=2, color='black')
# Colorbar
bounds = np.arange(0.5, n_patterns + 1.5, 1)
ticks = np.arange(1, n_patterns + 1, 1)
# Set the ticks positions
ax.set_xticks(bounds)
# Set the strings in those ticks positions
strings = [str(int(x + 1)) for x in bounds[:-1]]
strings.append('Winner')
ax.xaxis.set_major_formatter(plt.FixedFormatter(strings))
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.12, 0.05, 0.79])
fig.colorbar(im, cax=cbar_ax, boundaries=bounds, cmap=cmap, ticks=ticks, spacing='proportional')
| [
11748,
384,
397,
1211,
355,
3013,
82,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
13,
2164,
2340,
43106,
355,
50000,
431... | 2.335193 | 1,972 |
import os
import datetime
from dateutil.tz import tzlocal
import h5py as h5
import numpy as np
from warp import getselfe, getphi, getb, geta
class FieldDiagnostic(object):
"""
Common functionality for field diagnostic classes
Parameters:
solver: A solver object containing fields to be output.
top: The object representing Warp's top package.
w3d: The object representing Warp's w3d package.
comm_world: Object representing an MPI communicator.
period (int): Sets the period in steps of data writeout by the diagnostic.
Defaults to writeout on every step if not set.
write_dir (str): Relative path to place data output of the diagnostic.
Defaults to 'diags/fields/electric' for electric fields/potentials, and 'diags/fields/magnetic'
for magnetic fields/vector potentials if not set.
"""
class ElectrostaticFields(FieldDiagnostic):
"""
Test
Produce an HDF5 file with electric fields and potential .
File tree:
/data/meshes
/mesh
/x
/y
/z
Note that the coordinates will be replaced as appropriate for different
solver geometries (e.g. xyz -> rtz for RZgeom).
/phi
/E
/x
/y
/z
"""
class MagnetostaticFields(FieldDiagnostic):
"""
Produce an HDF5 file with magnetic fields and vector potential.
File tree:
/data/meshes/
/mesh
/x
/y
/z
Note that the coordinates will be replaced as appropriate for different
solver geometries (e.g. xyz -> rtz for RZgeom).
/vector_potential
/x
/y
/z
/B
/x
/y
/z
"""
| [
11748,
28686,
198,
198,
11748,
4818,
8079,
198,
6738,
3128,
22602,
13,
22877,
1330,
256,
89,
12001,
198,
11748,
289,
20,
9078,
355,
289,
20,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
25825,
1330,
651,
944,
68,
11,
651,
34846,
11,... | 2.059129 | 964 |
#!/usr/bin/env python
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import time
from typing import NoReturn, Optional
from signedjson.key import encode_verify_key_base64, get_verify_key, read_signing_keys
from signedjson.types import VerifyKey
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
13130,
383,
24936,
13,
2398,
5693,
327,
13,
40,
13,
34,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
... | 3.54918 | 244 |
import os
import json
import h5py
import random
import numpy as np
from config_parser.config import PATHS, DEBUG
from data_loader.base import CacheLoader
from data_loader.image import image2array
H5_PATH = PATHS["h5_path"]
JSON_PATH = PATHS["json_path"]
SHOEPRINT_DIR = PATHS["shoeprint_dir"]
SAMPLE_DIR = PATHS["sample_dir"]
SHOEPRINT_DIR_TEST = PATHS["shoeprint_test_dir"]
DETERMINE_FILE = PATHS["determine_file"]
DETERMINE_FILE_TEST = PATHS["determine_test_file"]
@CacheLoader(name="sample", debug=DEBUG)
def get_sample_arrays(augment):
""" 获取样本文件结构,将样本图片预处理成所需格式
``` python
[
[<img1_array1>, <img1_array2>, ...],
[<img2_array1>, <img2_array2>, ...],
...
],
{
<type_id>: {
"img_indices": [<img1_index>, <img2_index>, <img3_index>, ...],
},
...
},
```
"""
sample_map = {}
sample_arrays = []
types = os.listdir(SAMPLE_DIR)
index = 0
assert types, "样本图库文件夹为空!"
for i, type_id in enumerate(types):
print("get_sample_arrays {}/{} ".format(i, len(types)), end='\r')
type_dir = os.path.join(SAMPLE_DIR, type_id)
img_path = os.path.join(type_dir, os.listdir(type_dir)[0])
sample_map[type_id] = {}
img_array = image2array(img_path, augment)
sample_map[type_id]["img_indices"] = [index + j for j in range(len(img_array))]
index += len(img_array)
sample_arrays.extend(img_array)
assert len(sample_arrays) == index
return sample_arrays, sample_map
@CacheLoader(name="shoeprint", debug=DEBUG)
def get_shoeprint_arrays(augment, sample_length, action_type="train"):
""" 获取鞋印文件结构,将鞋印图片预处理成所需格式追加在 sample_arrays 后,并将数据分类为训练类型、开发类型
之所以不整体打乱,是因为验证集与训练集、开发集是与验证集在不同的样式中,
所以开发集理应与训练集也在不同的样式中
``` python
[
[<img1_array1>, <img1_array2>, ...],
[<img2_array1>, <img2_array2>, ...],
...
],
{
<name>: {
"type_id": <xxxxxxxx>,
"img_indices": [<img1_index>, <img2_index>, <img3_index>, ...],
"set_type": "train/dev/test"
},
...
}
{
<type_id1>: [<name1>, <name2>, ...],
<type_id2>: [<name1>, <name2>, ...],
...
}
```
"""
shoeprint_map = {}
shoeprint_arrays = []
type_map = {}
shoeprint_base_dir = SHOEPRINT_DIR if action_type == "train" else SHOEPRINT_DIR_TEST
types = os.listdir(shoeprint_base_dir)
type_counter = {"train": set(), "dev": set(), "test": set()}
index = sample_length
assert types, "鞋印图库文件夹为空!"
for i, type_id in enumerate(types):
print("get_shoeprint_arrays {}/{} ".format(i, len(types)), end='\r')
if action_type == "train":
set_type = "train" if random.random() < 0.95 else "dev"
else:
set_type = "test"
type_dir = os.path.join(shoeprint_base_dir, type_id)
type_map[type_id] = []
for filename in os.listdir(type_dir):
img_path = os.path.join(type_dir, filename)
img_array = image2array(img_path, augment)
shoeprint_map[filename] = {}
shoeprint_map[filename]["type_id"] = type_id
shoeprint_map[filename]["img_indices"] = [index + j for j in range(len(img_array))]
shoeprint_map[filename]["set_type"] = set_type
shoeprint_arrays.extend(img_array)
index += len(img_array)
type_counter[set_type].add(type_id)
type_map[type_id].append(filename)
if action_type == "train":
print("训练数据共 {} 类,开发数据共 {} 类".format(len(type_counter["train"]), len(type_counter["dev"])))
else:
print("测试数据共 {} 类".format(len(type_counter["test"])))
assert len(shoeprint_arrays) == index - sample_length
return shoeprint_arrays, shoeprint_map, type_map
@CacheLoader(name="determine", debug=DEBUG)
def get_determine_scope(action_type="train"):
""" 读取待判定范围文件,并构造成字典型
``` python
{
<name>: [
<P>, <N1>, <N2>, <N3>, ... // 注意, P 不一定在最前面,而且这里记录的是 type_id
],
...
}
```
"""
determine_scope = {}
determine_scope_file = DETERMINE_FILE if action_type == "train" else DETERMINE_FILE_TEST
with open(determine_scope_file, 'r') as f:
for line in f:
line_items = line.split('\t')
for i in range(len(line_items)):
line_items[i] = line_items[i].strip()
determine_scope[line_items[0]] = line_items[1:]
return determine_scope
@CacheLoader(name="class_indices", debug=DEBUG)
def get_indices(sample_map, shoeprint_map, type_map):
""" 将所有 indices 组织在一起
``` python
[
[
[<idx01>, <idx02>], # 某一个
[<idx01>, <idx02>],
...
], # 某一类
...
]
```
"""
indices = []
for i, type_id in enumerate(sample_map):
print("get_indices {}/{} ".format(i, len(sample_map)), end='\r')
class_indices = []
class_indices.append(sample_map[type_id]["img_indices"])
if type_id in type_map:
for pos_name in type_map[type_id]:
if shoeprint_map[pos_name]["set_type"] == "train":
class_indices.append(shoeprint_map[pos_name]["img_indices"])
indices.append(class_indices)
return indices
@CacheLoader(name="test_data_set", debug=DEBUG)
def test_data_import(augment=[], action_type="test"):
""" 构造测试数据
``` python
img_arrays
{
"train": [
{
"name": <name>,
"index": <idx>,
"scope_indices": [<idx01>, <idx02>, ...],
"label": <correct_idx>
},
...
],
"dev": ...,
"test": ...
}
```
"""
determine_scope = get_determine_scope(action_type=action_type)
sample_arrays, sample_map = get_sample_arrays(augment=[])
shoeprint_arrays, shoeprint_map, _ = get_shoeprint_arrays(
augment=augment, sample_length=len(sample_arrays), action_type=action_type)
img_arrays = np.concatenate((sample_arrays, shoeprint_arrays))
test_data_map = {"train": [], "dev": [], "test": []}
print("sample {} shoeprint {} ".format(len(sample_arrays), len(shoeprint_arrays)))
scope_length = len(determine_scope[list(determine_scope.keys())[0]])
imgs_num = len(determine_scope)
for i, origin_name in enumerate(determine_scope):
print("get_test_data ({}) {}/{} ".format(action_type, i, imgs_num), end='\r')
if action_type == "test":
assert origin_name in shoeprint_map
else:
if origin_name not in shoeprint_map:
print(origin_name)
continue
set_type = shoeprint_map[origin_name]["set_type"]
type_id = shoeprint_map[origin_name]["type_id"]
item = {}
item["name"] = origin_name
item["indices"] = shoeprint_map[origin_name]["img_indices"]
item["scope_indices"] = []
item["label"] = determine_scope[origin_name].index(type_id)
for j in range(scope_length):
item["scope_indices"].append(sample_map[determine_scope[origin_name][j]]["img_indices"][0])
test_data_map[set_type].append(item)
return img_arrays, test_data_map, len(sample_arrays)
def data_import(augment=[]):
""" 导入数据集, 分为训练集、开发集
``` h5
{
"img_arrays": [<img01>, <img02>, ...] # 每个都是 (H, W, 1)
}
```
"""
data_set = {}
if not os.path.exists(H5_PATH) or not os.path.exists(JSON_PATH):
print("未发现处理好的数据文件,正在处理...")
determine_scope = get_determine_scope(action_type="train")
sample_arrays, sample_map = get_sample_arrays(augment)
shoeprint_arrays, shoeprint_map, type_map = get_shoeprint_arrays(
augment, sample_length=len(sample_arrays), action_type="train")
img_arrays = np.concatenate((sample_arrays, shoeprint_arrays))
indices = get_indices(sample_map, shoeprint_map, type_map)
data_set["img_arrays"] = img_arrays
data_set["indices"] = indices
h5f = h5py.File(H5_PATH, 'w')
h5f["img_arrays"] = data_set["img_arrays"]
h5f.close()
with open(JSON_PATH, 'w', encoding="utf8") as f:
json.dump(data_set["indices"], f, indent=2)
else:
print("发现处理好的数据文件,正在读取...")
h5f = h5py.File(H5_PATH, 'r')
data_set["img_arrays"] = h5f["img_arrays"][: ]
h5f.close()
with open(JSON_PATH, 'r', encoding="utf8") as f:
data_set["indices"] = json.load(f)
print("成功加载数据")
return data_set
| [
11748,
28686,
198,
11748,
33918,
198,
11748,
289,
20,
9078,
198,
11748,
4738,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
4566,
62,
48610,
13,
11250,
1330,
28748,
7998,
11,
16959,
198,
6738,
1366,
62,
29356,
13,
8692,
1330,
3408... | 1.858961 | 4,637 |
from __future__ import annotations
from . import debug
from typing import Coroutine, Tuple, Type, Callable, TypeVar, Optional, List, Any, Dict
from types import FunctionType
import abc
APP_VERSION = 3004000
TDF_MAGIC = b"TDF$"
_T = TypeVar("_T")
_TCLS = TypeVar("_TCLS", bound=type)
_RT = TypeVar("_RT")
_F = TypeVar("_F", bound=Callable[..., Any])
class override(object): # nocov
"""
To use inside a class decorated with @extend_class\n
Any attributes decorated with @override will be replaced
"""
@staticmethod
class extend_class(object): # nocov
"""
Extend a class, all attributes will be added to its parents\n
This won't override attributes that are already existed, please refer to @override or @extend_override_class to do this
"""
@staticmethod
@staticmethod
class extend_override_class(extend_class):
"""
Extend a class, all attributes will be added to its parents\n
If those attributes are already existed, they will be replaced by the new one
"""
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
764,
1330,
14257,
198,
198,
6738,
19720,
1330,
2744,
28399,
11,
309,
29291,
11,
5994,
11,
4889,
540,
11,
5994,
19852,
11,
32233,
11,
7343,
11,
4377,
11,
360,
713,
198,
6738,
3858,
... | 3.05 | 340 |
"""
Cram tests
"""
import logging
import os
import six
from tasks.util.workunit import get_refspec_after_overrides
from teuthology import misc as teuthology
from teuthology.parallel import parallel
from teuthology.orchestra import run
from teuthology.config import config as teuth_config
log = logging.getLogger(__name__)
def task(ctx, config):
"""
Run all cram tests from the specified paths on the specified
clients. Each client runs tests in parallel.
Limitations:
Tests must have a .t suffix. Tests with duplicate names will
overwrite each other, so only the last one will run.
For example::
tasks:
- ceph:
- cram:
clients:
client.0:
- qa/test.t
- qa/test2.t]
client.1: [qa/test.t]
branch: foo
You can also run a list of cram tests on all clients::
tasks:
- ceph:
- cram:
clients:
all: [qa/test.t]
:param ctx: Context
:param config: Configuration
"""
assert isinstance(config, dict)
assert 'clients' in config and isinstance(config['clients'], dict), \
'configuration must contain a dictionary of clients'
clients = teuthology.replace_all_with_clients(ctx.cluster,
config['clients'])
testdir = teuthology.get_testdir(ctx)
overrides = ctx.config.get('overrides', {})
refspec = get_refspec_after_overrides(config, overrides)
git_url = teuth_config.get_ceph_qa_suite_git_url()
log.info('Pulling tests from %s ref %s', git_url, refspec)
try:
for client, tests in clients.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
remote.run(
args=[
'mkdir', '--', client_dir,
run.Raw('&&'),
'virtualenv', '{tdir}/virtualenv'.format(tdir=testdir),
run.Raw('&&'),
'{tdir}/virtualenv/bin/pip'.format(tdir=testdir),
'install', 'cram==0.6',
],
)
clone_dir = '{tdir}/clone.{role}'.format(tdir=testdir, role=client)
remote.run(args=refspec.clone(git_url, clone_dir))
for test in tests:
assert test.endswith('.t'), 'tests must end in .t'
remote.run(
args=[
'cp', '--', os.path.join(clone_dir, test), client_dir,
],
)
with parallel() as p:
for role in clients.keys():
p.spawn(_run_tests, ctx, role)
finally:
for client, tests in clients.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
test_files = set([test.rsplit('/', 1)[1] for test in tests])
# remove test files unless they failed
for test_file in test_files:
abs_file = os.path.join(client_dir, test_file)
remote.run(
args=[
'test', '-f', abs_file + '.err',
run.Raw('||'),
'rm', '-f', '--', abs_file,
],
)
# ignore failure since more than one client may
# be run on a host, and the client dir should be
# non-empty if the test failed
remote.run(
args=[
'rm', '-rf', '--',
'{tdir}/virtualenv'.format(tdir=testdir),
clone_dir,
run.Raw(';'),
'rmdir', '--ignore-fail-on-non-empty', client_dir,
],
)
def _run_tests(ctx, role):
"""
For each role, check to make sure it's a client, then run the cram on that client
:param ctx: Context
:param role: Roles
"""
assert isinstance(role, six.string_types)
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
(remote,) = ctx.cluster.only(role).remotes.keys()
ceph_ref = ctx.summary.get('ceph-sha1', 'master')
testdir = teuthology.get_testdir(ctx)
log.info('Running tests for %s...', role)
remote.run(
args=[
run.Raw('CEPH_REF={ref}'.format(ref=ceph_ref)),
run.Raw('CEPH_ID="{id}"'.format(id=id_)),
run.Raw('PATH=$PATH:/usr/sbin'),
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'{tdir}/virtualenv/bin/cram'.format(tdir=testdir),
'-v', '--',
run.Raw('{tdir}/archive/cram.{role}/*.t'.format(tdir=testdir, role=role)),
],
logger=log.getChild(role),
)
| [
37811,
198,
34,
859,
5254,
198,
37811,
198,
11748,
18931,
198,
11748,
28686,
198,
198,
11748,
2237,
198,
198,
6738,
8861,
13,
22602,
13,
1818,
20850,
1330,
651,
62,
5420,
16684,
62,
8499,
62,
2502,
81,
1460,
198,
198,
6738,
573,
1071,... | 1.93264 | 2,598 |
# This code calculates the number of unique words in each review:
# from pathos.multiprocessing import ProcessingPool as Pool
import multiprocessing
from multiprocessing.pool import Pool
from tqdm import tqdm
import time
# import timing
import re # import the library for regular expressions
from nltk.tokenize import sent_tokenize, word_tokenize
import sqlite3
import shutil
def get_rarepairs():
""" This code finds the list of rare word pairs
"""
connn = sqlite3.connect('Liars7_unique1.sqlite')
# Get the cursor, which is used to traverse the database, line by line
currr = conn.cursor()
sqlstr = 'SELECT Word_pair, TF FROM [Review word pairs]'
wordpairs_rare = []
for row in currr.execute(sqlstr):
wordpair = row[0]
TF = row[1]
if TF < 2: # This number is highly corpus specific (27 for Liars, 28 for Mott?), 6.3 for Liars Pilot, 20.2 for Liars 7 (reviews only), 2.78 for pairs Liars 7, 2.98 for Ott, 3.90 for yelpCHIhotels, 3.49 for kaggle21k
wordpairs_rare.append(wordpair)
return wordpairs_rare
def process_row(arg):
"""
this function receives a single row of a table
and returns a pair (id, depth) for a given row
"""
wordpairs_rare, row = arg
reviewtext = row[0]
wordpairs = dict() # Initializes an empty dictionary where we will keep track of all wordforms in the whole corpus of reviews and how many times their occurence values
sentences = sent_tokenize(reviewtext)
for s in sentences:
words = word_tokenize(s)
for i in range(len(words) - 2 + 1):
key = tuple(words[i:i+2])
if ',' in key or '.' in key or ':' in key or '!' in key or '?' in key or ';' in key:
continue
else:
wordpairs[key] = wordpairs.get(key, 0) + 1
unique_pairs = 0
for wp in wordpairs:
wp_str = ' '.join(wp)
if wp_str in wordpairs_rare:
unique_pairs = unique_pairs + wordpairs[wp]
return (unique_pairs, reviewtext)
def record_answers(curr, answers):
"""
this function receives cursor to sql (cur) and list of answers List[(id, depth)]
and records answers to the sql
for now, this is single process code
"""
for answer in answers:
unique_pairs, reviewtext = answer
curr.execute('UPDATE Reviews SET Uniqpairs1 = ? WHERE Review_cleaned = ?', (unique_pairs,reviewtext, ))
if __name__ == '__main__':
conn = sqlite3.connect('Liars7_unique1.sqlite')
# Get the cursor, which is used to traverse the database, line by line
cur = conn.cursor()
shutil.copyfile('Liars7_unique1.sqlite', 'Am_kg_w.sqlite')
conn_w = sqlite3.connect('Am_kg_w.sqlite') # The database to be updated
cur_w = conn_w.cursor()
try:
cur_w.execute('''ALTER TABLE Reviews ADD Uniqpairs1 INTEGER NOT NULL DEFAULT 0''') # DEFAULT 0 was removed from the sql string
except:
print("The column 'Uniqpairs1' exists already")
pass # handle the error
wordpairs_rare = get_rarepairs()
sqlstr = 'SELECT Review_cleaned FROM Reviews' # Select query that instructs over what we will be iterating
args = [(wordpairs_rare, row) for row in cur.execute(sqlstr)] # read rows from sql
print("start computing..")
t0 = time.time()
n_processes = multiprocessing.cpu_count()
if n_processes == 1:
print("single process")
answers = [process_row(arg) for arg in args] # single process each row in rows
else:
print(f"pool process with {n_processes} threads")
# we call initializer function = set_wordnet so that each worker receives separate wn object
with Pool(processes=n_processes) as pool:
answers = list(tqdm(pool.imap(process_row, args), total = len(args)))
print(f"finished computing in {time.time() - t0} seconds...")
t0 = time.time()
print("start recording...")
record_answers(cur_w, answers) # recording answers
print(f"finished recording in {time.time() - t0} seconds")
conn_w.commit()
cur_w.close()
conn_w.close()
cur.close()
conn.close()
shutil.copyfile('Am_kg_w.sqlite', 'Liars7_uniquepairs1.sqlite')
| [
2,
770,
2438,
43707,
262,
1271,
286,
3748,
2456,
287,
1123,
2423,
25,
198,
198,
2,
422,
3108,
418,
13,
16680,
541,
305,
919,
278,
1330,
28403,
27201,
355,
19850,
198,
11748,
18540,
305,
919,
278,
198,
6738,
18540,
305,
919,
278,
13,... | 2.596178 | 1,622 |
from flask import render_template
from flask import request
from flaskexample import app
from sqlalchemy import create_engine
import pandas as pd
import psycopg2
import yaml
from a_Model import ModelIt
from flaskexample.support_functions import formatted_query
ymlfile = open("../configs.yml", 'r')
cfg = yaml.load(ymlfile)
ymlfile.close()
dbname = cfg['dbname']
user = cfg['username']
host = 'localhost'
db = create_engine('postgres://%s%s/%s' % (user, host, dbname))
con = None
con = psycopg2.connect(database=dbname, user=user)
@app.route('/')
@app.route('/index')
@app.route('/db')
@app.route('/ny_bills')
@app.route('/ny_bill_input')
@app.route('/us_bill_input')
@app.route('/ny_bills_output')
@app.route('/us_bills_output')
@app.route('/lda_topics')
| [
6738,
42903,
1330,
8543,
62,
28243,
198,
6738,
42903,
1330,
2581,
198,
6738,
781,
292,
365,
87,
1403,
1330,
598,
198,
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
17331,
22163,
70,
... | 2.623729 | 295 |
from bempy import context_blocks
context_blocks('cssreset', locals())
| [
6738,
307,
3149,
88,
1330,
4732,
62,
27372,
198,
198,
22866,
62,
27372,
10786,
25471,
42503,
3256,
17205,
28955,
198
] | 3.55 | 20 |
#coding:utf-8
#
# id: functional.tabloid.dbp_2146_distinct_not_in
# title: Common SQL. Check correctness of the results
# decription:
# tracker_id:
# min_versions: ['2.5']
# versions: 2.5
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5
# resources: None
substitutions_1 = [('=.*', '')]
init_script_1 = """"""
db_1 = db_factory(from_backup='tabloid-dbp-2146.fbk', init=init_script_1)
test_script_1 = """
set list on;
with
eset
as(
select tbi, count(distinct ari) as cnt
from pdata u
where (
select count(distinct ari)
from pdata where tbi=u.tbi
) > 2
group by tbi having sum(cv)=16*16-1
)
,wset
as(
select ari
from pdata
where tbi in (
select tbi from pdata group by tbi
having sum(cv)=16*16-1
)
group by ari having sum(cv)=1000-235
)
,q1 as(
select distinct pa.id ari, pt.id tbi, p.cnt
from pdata u
join eset p on p.tbi=u.tbi
join parea pa on pa.id=u.ari
join ptube pt on pt.id=u.tbi
join wset b on b.ari=u.ari
)
,q2 as (
select
a.ari
,a.tbi
,b.cnt
from
(
select distinct a.ari, b.tbi
from
(
select ari
from pdata
where tbi not in (
select tbi
from pdata
group by tbi
having sum(cv) <> 16*16-1
)
group by ari
having 1000 - sum(cv) = 235
) a
, pdata b
where a.ari = b.ari
) a,
(
select tbi, count(distinct ari) cnt
from pdata group by tbi
having count(distinct ari) > 2
) b
where a.tbi = b.tbi
)
select ari,tbi,cnt
from q1 natural join q2
order by 1,2,3
;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
ARI 6
TBI 10
CNT 3
"""
@pytest.mark.version('>=2.5')
| [
2,
66,
7656,
25,
40477,
12,
23,
198,
2,
198,
2,
4686,
25,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
10345,
13,
8658,
75,
1868,
13,
9945,
79,
62,
17,
20964,
62,
17080,
4612,
62,
1662,
62,
259,
198,
2,
3670,
25,
220,
2... | 1.695876 | 1,358 |
from HubSale import HubSale
from pprint import pprint
client_id = ""
client_secret = ""
hubsale = HubSale(client_id, client_secret)
code, message = hubsale.connect()
if code:
print("Success!!!")
else:
print('Oops,', message) | [
6738,
14699,
50,
1000,
1330,
14699,
50,
1000,
198,
6738,
279,
4798,
220,
1330,
279,
4798,
198,
198,
16366,
62,
312,
220,
220,
220,
220,
796,
13538,
198,
16366,
62,
21078,
796,
13538,
198,
198,
40140,
21378,
220,
220,
220,
220,
220,
... | 2.608696 | 92 |
""""""
Using names.txt (right click and 'Save Link/Target As...'), a 46K text file containing over five-thousand first names,
begin by sorting it into alphabetical order. Then working out the alphabetical value for each name,
multiply this value by its alphabetical position in the list to obtain a name score.
For example, when the list is sorted into alphabetical order,
COLIN, which is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list.
So, COLIN would obtain a score of 938 × 53 = 49714.
What is the total of all the name scores in the file?
"""
def total_score(filename):
with open(filename, mode='r') as f:
names = [n[1:-1] for n in f.readline().split(',')]
names.sort()
return sum((1 + i) * (sum(ord(letter) - ord('A') + 1 for letter in name)) for i, name in enumerate(names))
if __name__ == "__main__":
filename = "22.txt"
print(total_score(filename))
Using names.txt (right click and 'Save Link/Target As...'), a 46K text file containing over five-thousand first names,
begin by sorting it into alphabetical order. Then working out the alphabetical value for each name,
multiply this value by its alphabetical position in the list to obtain a name score.
For example, when the list is sorted into alphabetical order,
COLIN, which is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list.
So, COLIN would obtain a score of 938 × 53 = 49714.
What is the total of all the name scores in the file?
"""
def total_score(filename):
with open(filename, mode='r') as f:
names = [n[1:-1] for n in f.readline().split(',')]
names.sort()
return sum((1 + i) * (sum(ord(letter) - ord('A') + 1 for letter in name)) for i, name in enumerate(names))
if __name__ == "__main__":
filename = "22.txt"
print(total_score(filename))
| [
15931,
15931,
15931,
198,
12814,
3891,
13,
14116,
357,
3506,
3904,
290,
705,
16928,
7502,
14,
21745,
1081,
986,
33809,
257,
6337,
42,
2420,
2393,
7268,
625,
1936,
12,
400,
29910,
717,
3891,
11,
198,
27471,
416,
29407,
340,
656,
24830,
... | 3.041876 | 597 |
from sklearn.compose import ColumnTransformer
from sklearn.utils.validation import check_array, check_is_fitted
from scipy import sparse
from collections import namedtuple
import numpy as np
import pandas as pd
class TSColumnTransformer(ColumnTransformer):
"""Time Series compatible ColumnTransformer.
Allow usage of hcrystalball wrappers and index based transformers.
See also: `sklearn.compose.ColumnTransformer`
Returns
-------
pandas.DataFrame
Data transformed on given column
Raises
------
ValueError
If `remainder=='passthrough'` is set. Use `passthrough` as an identity estimator
If sparse output is requested, but not all columns are numeric
"""
@property
def remainder(self):
"""Access to original remainder"""
return self._remainder_original
@remainder.setter
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
check_is_fitted(self, "transformers_")
# gather column names generated by transformers to defined structure
# and solve name duplicities in more sophisticated way
Columns = namedtuple("Columns", "col_name trans_name trans_index passthrough")
feature_tuples = []
for index, (name, trans, apply_cols, _) in enumerate(self._iter(fitted=True)):
if trans == "passthrough":
col_tuple = Columns(
col_name=apply_cols,
trans_name=name,
trans_index=index,
passthrough=True,
)
elif trans == "drop":
continue
elif hasattr(trans, "get_feature_names"):
col_tuple = Columns(
col_name=trans.get_feature_names(),
trans_name=name,
trans_index=index,
passthrough=False,
)
else:
# TODO: for transformers that reduce/inflate dimensions,
# this might cause unwanted behavior
# Temporary fix for PCA
if hasattr(trans, "n_components"):
if trans.n_components != len(apply_cols):
apply_cols = [name + "_" + str(i) for i in range(trans.n_components)]
col_tuple = Columns(
col_name=apply_cols,
trans_name=name,
trans_index=index,
passthrough=False,
)
feature_tuples.append(col_tuple)
# make sure passthrough column names have precendece over other transformers
# when duplicate colum names occur
df = (
pd.DataFrame(feature_tuples)
.explode("col_name")
.reset_index(drop=True)
.sort_values("passthrough", ascending=False)
)
duplicates = df.duplicated(subset=["col_name"])
df.loc[duplicates, "col_name"] += "_" + df.loc[duplicates, "trans_name"]
feature_names = df.sort_index()["col_name"].tolist()
return feature_names
def _hstack(self, Xs):
"""Stack Xs horizontally.
This allows subclasses to control the stacking behavior, while reusing
everything else from ColumnTransformer and returning pandas.DataFrame
version of data at the end.
Parameters
----------
Xs : List
List of numpy arrays, sparse arrays, or DataFrames
Returns
-------
pandas.DataFrame
Stacked data with correct column names
Raises
------
ValueError
Raises ValueError when columns are not numeric for sparse output
"""
if self.sparse_output_:
try:
# since all columns should be numeric before stacking them
# in a sparse matrix, `check_array` is used for the
# dtype conversion if necessary.
converted_Xs = [check_array(X, accept_sparse=True, force_all_finite=False) for X in Xs]
except ValueError:
raise ValueError(
"For a sparse output, all columns should" " be a numeric or convertible to a numeric."
)
return pd.DataFrame(sparse.hstack(converted_Xs).tocsr(), columns=self.get_feature_names())
else:
Xs = [f.toarray() if sparse.issparse(f) else f for f in Xs]
# addition, that turns nparray to dataframe with correct column names
return pd.DataFrame(np.hstack(Xs), columns=self.get_feature_names())
def transform(self, X):
"""Run index aware transform
Parameters
----------
X : pandas.DataFrame
Input features.
Returns
-------
pandas.DataFrame
Transformed data by given transformer on given column
"""
df = super().transform(X)
df.index = X.index
return df
def fit_transform(self, X, y=None):
"""Run index aware fit_transform
Parameters
----------
X : pandas.DataFrame
Input features.
y : pandas.Series or numpy.array
Target values
Returns
-------
pandas.DataFrame
Transformed data by given transformer on given column
"""
df = super().fit_transform(X, y)
df.index = X.index
return df
| [
6738,
1341,
35720,
13,
785,
3455,
1330,
29201,
8291,
16354,
198,
6738,
1341,
35720,
13,
26791,
13,
12102,
341,
1330,
2198,
62,
18747,
11,
2198,
62,
271,
62,
38631,
198,
6738,
629,
541,
88,
1330,
29877,
198,
6738,
17268,
1330,
3706,
83... | 2.177494 | 2,586 |
"""
Structural subsystem
--------------------
"""
import numpy as np
from .subsystem import Subsystem
class SubsystemStructural(Subsystem):
"""
Abstract base class for all structural subsystems.
"""
@property
def conductance_point_average(self):
"""
Average point conductance of a structural component.
.. math:: \\overline{G} = \\frac{1}{4} M \overline{\\delta f}
See Lyon, page 149, equation 8.5.2 as well as page 200.
"""
return 0.25 * self.component.mass * self.average_frequency_spacing
@property
def resistance_point_average(self):
"""
Average point resistance.
"""
return 1.0 / self.conductance_point_average
@property
def velocity(self):
"""
Vibrational velocity :math:`v`.
.. math:: v = \\sqrt{\\frac{E}{m}}
Craik, equation 3.11, page 55.
"""
return np.sqrt(self.energy / self.component.mass)
@property
def velocity_level(self):
"""
Velocity level :math:`L_v`.
:rtype: :class:`numpy.ndarray`
The structural velocity level is calculated as
.. math:: L_v = 20 \\log_{10}{\\left( \\frac{v}{v_0} \\right) }
.. seealso:: :attr:`seapy.system.System.reference_velocity`
"""
return 20.0 * np.log10(self.velocity / self.system.reference_velocity)
| [
37811,
198,
44909,
1523,
39335,
198,
19351,
198,
198,
37811,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
764,
7266,
10057,
1330,
3834,
10057,
628,
198,
4871,
3834,
10057,
44909,
1523,
7,
7004,
10057,
2599,
198,
220,
220,
220,
37227,
... | 2.202663 | 676 |
# Generated by Django 3.0.4 on 2020-04-20 22:13
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
19,
319,
12131,
12,
3023,
12,
1238,
2534,
25,
1485,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import warnings
from setuptools import setup, Extension
from Cython.Distutils import build_ext
dist_dir = os.path.dirname(os.path.abspath(__file__))
os.system("gunzip -kf %s/irtrans/models/* 2> /dev/null" %dist_dir)
try:
import py2exe
except ImportError:
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
print("Cannot import py2exe", file=sys.stderr)
exit(1)
py2exe_options = {
"bundle_files": 1,
"compressed": 1,
"optimize": 2,
"dist_dir": '.',
"dll_excludes": ['w9xpopen.exe'],
}
py2exe_console = [{
"script": "./irtrans/__main__.py",
"dest_base": "irtrans",
}]
py2exe_params = {
'console': py2exe_console,
'options': {"py2exe": py2exe_options},
'zipfile': None
}
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
params = py2exe_params
else:
files_spec = [
('share/doc/irtrans', ['README.rst'])
]
root = os.path.dirname(os.path.abspath(__file__))
data_files = []
for dirname, files in files_spec:
resfiles = []
for fn in files:
if not os.path.exists(fn):
warnings.warn('Skipping file %s since it is not present. Type make to build all automatically generated files.' % fn)
else:
resfiles.append(fn)
data_files.append((dirname, resfiles))
params = {
'data_files': data_files,
}
params['entry_points'] = {'console_scripts': ['irtrans = irtrans:main']}
# Get the package version
exec(compile(open('irtrans/version.py').read(),
'irtrans/version.py', 'exec'))
setup(
name = "irtrans",
version = __version__,
description="Transliteration Tool: Hindi to Urdu transliterator and vice-versa",
long_description = open('README.rst', 'rb').read().decode('utf8'),
keywords = ['Language Transliteration', 'Computational Linguistics',
'Indic', 'Roman'],
author=['Riyaz Ahmad', 'Irshad Ahmad'],
author_email='irshad.bhat@research.iiit.ac.in',
maintainer='Irshad Ahmad',
maintainer_email='irshad.bhat@research.iiit.ac.in',
license = "MIT",
url="https://github.com/irshadbhat/irtrans",
package_dir={"irtrans":"irtrans"},
packages=['irtrans', 'irtrans._utils', 'irtrans._decode'],
package_data={'irtrans': ['models/*.npy']},
classifiers=[
"Topic :: Text Processing :: Linguistic",
"Topic :: Software Development :: Libraries :: Python Modules",
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Operating System :: Unix"
],
cmdclass={'build_ext': build_ext},
ext_modules=[
Extension("irtrans._decode.viterbi", ["irtrans/_decode/viterbi.pyx"]),
],
install_requires=["cython", "numpy", "scipy"],
#requires=["cython", "numpy", "scipy"],
**params
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
14601,
198,
... | 2.380558 | 1,327 |
import os
import time
import json
import random
import logging.config
import networkx as nx
from pathlib import Path
import matplotlib.pyplot as plt
from yafs.core import Sim
from yafs.application import create_applications_from_json
from yafs.topology import Topology
from yafs.placement import JSONPlacement
from yafs.path_routing import DeviceSpeedAwareRouting
from yafs.distribution import deterministic_distribution
| [
11748,
28686,
198,
11748,
640,
198,
11748,
33918,
198,
11748,
4738,
198,
11748,
18931,
13,
11250,
198,
198,
11748,
3127,
87,
355,
299,
87,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,... | 3.427419 | 124 |
# Created on Jan 12 2015
import statistics
import math
import os
__author__ = "Jinglei Ren"
__copyright__ = "Copyright (c) 2015 Jinglei Ren"
__email__ = "jinglei@ren.systems"
RESULTS_FILE = 'ycsb-redis.results'
| [
2,
15622,
319,
2365,
1105,
1853,
198,
198,
11748,
7869,
198,
11748,
10688,
198,
11748,
28686,
198,
198,
834,
9800,
834,
796,
366,
41,
17697,
72,
7152,
1,
198,
834,
22163,
4766,
834,
796,
366,
15269,
357,
66,
8,
1853,
449,
17697,
72,... | 2.805195 | 77 |
from oracle_conn import run_select as oc_select, run_insert as oc_insert, run_sql as oc_sql, new_conn as oc_conn
from mysql_connection import run_select as sql_select,run_sql,new_conn as sql_conn
from datetime import datetime
import re
#Get country code from senior based on country name
#Get city code from senior based on city name
#Get state code from senior based on state name
#Get district code from senior based on district name
#Get deficiency id from senior based on carreiras's code, using desc as value
#Define dados de candidato do senior para carreiras
#Arruma valor, arrancando caracteres invalidos para inserir no banco
#Monta update do on duplicate keys para o banco
#Atualiza dados do candidato no carreiras
#Adiciona status de sincronizado ao banco do carreiras para a inscrição nessa vaga desse candidato
#Altera status do exportador para candidatos exportados
#Na senior não tem autoincrement na PK, então eu obtenho o valor maximo dela aqui e somo 1 antes de inserir
#Insere/atualiza candidato na senior
if __name__ == "__main__":
#Instancia Conexões Principais
main_sql_conn=sql_conn()
main_oc_conn=oc_conn()
#Obtém data do ultimo sync de importação ativo
last_sync=sql_select("SELECT last_sync FROM senior_sync WHERE type='export' AND active=1",main_sql_conn)
if last_sync[0]['last_sync']==None:
last_sync[0]['last_sync']=datetime.strptime('2021-05-01','%Y-%m-%d')
#Candidatos que foram adicionados à lista de exportação
candidates_carreiras_avulsos=sql_select("SELECT candidates.* FROM candidates JOIN exportables ON candidates.id=exportables.candidate_id WHERE exportables.status=0 AND candidates.senior_num_can IS NULL",main_sql_conn)
candidates_senior_avulsos=carreiras_to_senior_candidate(candidates_carreiras_avulsos)
export_candidates_to_senior(candidates_senior_avulsos,main_oc_conn)
update_exportable(candidates_senior_avulsos)
#Candidatos do carreiras que estão inscritos em vagas ativas e última sincronização com senior foi anterior a ultima atualização/inscrição do candidato em
candidates_carreiras=sql_select("SELECT DISTINCT candidates.*,group_concat(subscribed_has_states.subscribed_id) as subscriptions FROM candidates JOIN subscribed ON subscribed.candidate_id=candidates.id JOIN subscribed_has_states ON subscribed_has_states.subscribed_id=subscribed.id LEFT JOIN subscribed_has_states AS denied_states ON denied_states.subscribed_id=subscribed.id AND denied_states.state_id IN (5,2) LEFT JOIN states ON states.id=subscribed_has_states.state_id WHERE candidates.senior_num_can IS NULL AND states.sync_to_senior=1 AND denied_states.id IS NULL AND (candidates.last_senior_synced<=candidates.updated_at OR candidates.last_senior_synced<=subscribed.updated_at OR candidates.last_senior_synced<=subscribed_has_states.updated_at OR candidates.last_senior_synced IS NULL) GROUP BY candidates.id",main_sql_conn)
candidates_senior=carreiras_to_senior_candidate(candidates_carreiras)
export_candidates_to_senior(candidates_senior,main_oc_conn)
add_carreiras_subscribed_state(candidates_carreiras,main_sql_conn)
#test=oc_select("SELECT NOMCAN,COUNT(NUMCAN) as CONNTA from R122CEX GROUP BY NOMCAN ORDER BY CONNTA DESC FETCH NEXT 3 ROWS ONLY ",main_oc_conn)
#test=oc_select("select * from R122CEX WHERE NOMCAN LIKE '%KARL%'",main_oc_conn)
#candidates_senior=oc_select("select * from R122CEX ORDER BY NUMCAN DESC FETCH NEXT 1 ROWS ONLY",main_oc_conn)
#print(test)
#Candidatos
#candidate_senior=oc_select("SELECT * from R122CEX WHERE DATINC >= '"+last_sync[0]['last_sync'].strftime('%Y-%m-%d')+"' ",main_oc_conn)
#candidates_carreiras=senior_to_carreiras_candidate(candidate_senior)
#import_candidates(candidates_carreiras,main_sql_conn)
#Vagas
#job_senior=oc_select("SELECT * FROM R126RQU WHERE DATRQU>='"+last_sync[0]['last_sync'].strftime('%Y-%m-%d')+"' AND SITRQU IN('0','1')",main_oc_conn)
#full_job=senior_fill_job_data(job_senior,main_oc_conn)
#jobs_carreiras=senior_to_carreiras_job(full_job)
#import_jobs(jobs_carreiras,main_sql_conn)
##Atualiza controle de sincronizador
#update_senior_sync(main_sql_conn)
#Fecha as conexões principais
main_oc_conn.close()
main_sql_conn.close()
| [
6738,
393,
6008,
62,
37043,
1330,
1057,
62,
19738,
355,
267,
66,
62,
19738,
11,
1057,
62,
28463,
355,
267,
66,
62,
28463,
11,
1057,
62,
25410,
355,
267,
66,
62,
25410,
11,
649,
62,
37043,
355,
267,
66,
62,
37043,
198,
6738,
48761,... | 2.686875 | 1,600 |
import json
import os
import sys
from django.conf.global_settings import * # noqa
from django.core.exceptions import ImproperlyConfigured
from unipath import Path
import dbservice as project_module
"""Base settings shared by all environments"""
# =============================================================================
# Generic Django project settings
# =============================================================================
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# SITE_ID = 1
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
TIME_ZONE = 'UTC'
USE_TZ = False
USE_I18N = False
USE_L10N = False
LANGUAGE_CODE = 'en'
LANGUAGES = (
('en', 'English'),
)
# based on Django REST Framework interpretation of ISO 8601
DATE_INPUT_FORMATS = (
'%Y', # '2006'
'%Y-%m', # '2006-10'
'%Y-%m-%d', # '2006-10-25'
)
TIME_INPUT_FORMATS = (
'%H:%M', # '14:30'
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
)
DATETIME_INPUT_FORMATS = (
'%Y%m%dT%H:%M:%S.%f', # '20061025T14:30:59.000200'
'%Y%m%dT%H:%M:%S', # '20061025T14:30:59'
'%Y%m%dT%H:%M', # '20061025T14:30'
'%Y%m%dT%H', # '20061025T14'
'%Y%m%d', # '20061025'
'%Y-%m-%dT%H:%M:%S.%f', # '2006-10-25T14:30:59.00200'
'%Y-%m-%dT%H:%M:%S', # '2006-10-25T14:30:59'
'%Y-%m-%dT%H:%M', # '2006-10-25T14:30'
'%Y-%m-%dT%H', # '2006-10-25T14'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%YT%H:%M:%S.%f', # '10/25/2006T14:30:59.000200'
'%m/%d/%YT%H:%M:%S', # '10/25/2006T14:30:59'
'%m/%d/%YT%H:%M', # '10/25/2006T14:30'
'%m/%d/%YT%H', # '10/25/2006T14'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%yT%H:%M:%S.%f', # '10/25/06T14:30:59.000200'
'%m/%d/%yT%H:%M:%S', # '10/25/06T14:30:59'
'%m/%d/%yT%H:%M', # '10/25/06T14:30'
'%m/%d/%yT%H', # '10/25/06T14'
'%m/%d/%y', # '10/25/06'
)
INSTALLED_APPS = (
'dbservice.apps.users',
'dbservice.apps.private',
'dbservice.apps.homes',
'rest_framework',
'rest_framework_jwt',
'django_filters',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
# 'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# 'django.contrib.admindocs',
)
# =============================================================================
# Calculation of directories relative to the project module location
# =============================================================================
PROJECT_DIR = os.path.dirname(os.path.realpath(project_module.__file__))
LOGS_DIR = os.path.join(PROJECT_DIR, os.pardir, 'logs')
PYTHON_BIN = os.path.dirname(sys.executable)
VE_PATH = os.path.dirname(os.path.dirname(os.path.dirname(PROJECT_DIR)))
# Assume that the presence of 'activate_this.py' in the python bin/
# directory means that we're running in a virtual environment.
if os.path.exists(os.path.join(PYTHON_BIN, 'activate_this.py')):
# We're running with a virtualenv python executable.
VAR_ROOT = os.path.join(os.path.dirname(PYTHON_BIN), 'var')
elif VE_PATH and os.path.exists(os.path.join(VE_PATH, 'bin',
'activate_this.py')):
# We're running in [virtualenv_root]/src/[project_name].
VAR_ROOT = os.path.join(VE_PATH, 'var')
else:
# Set the variable root to a path in the project which is
# ignored by the repository.
VAR_ROOT = os.path.join(PROJECT_DIR, 'var')
if not os.path.exists(VAR_ROOT):
os.mkdir(VAR_ROOT)
if not os.path.exists(LOGS_DIR):
os.mkdir(LOGS_DIR)
# =============================================================================
# Logging
# =============================================================================
LOGGING_CONFIG = None
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%Y-%m-%d %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
'simple_sql': {
'format': '[%(asctime)s] duration(sec): %(duration).6f|sql: %(sql)s|params: %(params)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': os.path.join(LOGS_DIR, 'general_debug.log'),
'formatter': 'verbose'
},
'file_database': {
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'simple_sql',
'filename': os.path.join(LOGS_DIR, 'debug_database.log'),
'level': 'DEBUG',
'maxBytes': 1024 * 1000 * 10,
'backupCount': 3
},
'email_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler',
'include_html': True,
'formatter': 'verbose'
},
},
'loggers': {
'django.request': {
'handlers': ['email_admins'],
'level': 'ERROR',
'propogate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'email_admins', 'file'],
'propagate': True,
},
'django.db.backends': {
'level': 'DEBUG',
'handlers': ['file_database'],
'propagate': False,
}
}
}
import logging.config
logging.config.dictConfig(LOGGING)
# =============================================================================
# Project URLS and media settings
# =============================================================================
ROOT_URLCONF = 'dbservice.urls'
LOGIN_URL = '/login/'
LOGOUT_URL = '/logout/'
LOGIN_REDIRECT_URL = '/'
STATIC_URL = '/static/'
MEDIA_URL = '/uploads/'
STATIC_ROOT = os.path.join(VAR_ROOT, 'static')
MEDIA_ROOT = os.path.join(VAR_ROOT, 'uploads')
STATICFILES_DIRS = (
os.path.join(PROJECT_DIR, 'static'),
)
# =============================================================================
# Templates
# =============================================================================
TEMPLATE_DIRS = (
os.path.join(PROJECT_DIR, 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.request',
)
# =============================================================================
# Middleware
# =============================================================================
MIDDLEWARE_CLASSES += (
'django.middleware.common.CommonMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
# =============================================================================
# Auth / security
# =============================================================================
AUTHENTICATION_BACKENDS += (
)
# =============================================================================
# Miscellaneous project settings
# =============================================================================
AUTH_USER_MODEL = 'users.User'
# =============================================================================
# Third party app settings
# =============================================================================
REST_FRAMEWORK = {
'FILTER_BACKEND': 'rest_framework.filters.DjangoFilterBackend',
'PAGINATE_BY': 20,
'PAGINATE_BY_PARAM': 'page_size',
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_THROTTLE_CLASSES': (
'rest_framework.throttling.UserRateThrottle',
'rest_framework.throttling.ScopedRateThrottle',
),
'DEFAULT_THROTTLE_RATES': {
'measurements': '100/minute',
'user': '10000/minute',
}
}
JWT_AUTH = {
# To simplify things we turn off token expiration. We can turn this on and
# write token refresh mechanisms later.
'JWT_VERIFY_EXPIRATION': False,
}
# SECRETS ##############################
_secrets = None
_secrets_filename = Path('~', 'dbservice.json').expand()
| [
11748,
33918,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
6738,
42625,
14208,
13,
10414,
13,
20541,
62,
33692,
1330,
1635,
220,
1303,
645,
20402,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
12205,
525,
306,
16934,
1522,
... | 2.319401 | 3,876 |
import unittest
import numpy as np
import pandas as pd
import numpy.testing as np_testing
import pandas.testing as pd_testing
import os
import import_ipynb
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dense, Activation
from tensorflow import random
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import StratifiedKFold, cross_val_score
from sklearn.preprocessing import StandardScaler
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
13,
33407,
355,
45941,
62,
33407,
198,
11748,
19798,
292,
13,
33407,
355,
279,
67,
62,
33407,
198,
11748,
28686,
1... | 3.331288 | 163 |
# This file is just Python, with a touch of Django which means
# you can inherit and tweak settings to your hearts content.
from sentry.conf.server import *
import os.path
CONF_ROOT = os.path.dirname(__file__)
DATABASES = {
'default': {
'ENGINE': 'sentry.db.postgres',
'NAME': '{{ sentry_db_name }}',
'USER': '{{ sentry_db_user }}',
'PASSWORD': '{{ sentry_db_password }}',
'HOST': '{{ sentry_db_host }}',
'PORT': '{{ sentry_db_port }}',
'AUTOCOMMIT': True,
'ATOMIC_REQUESTS': False,
}
}
# You should not change this setting after your database has been created
# unless you have altered all schemas first
SENTRY_USE_BIG_INTS = True
# If you're expecting any kind of real traffic on Sentry, we highly recommend
# configuring the CACHES and Redis settings
###########
# General #
###########
# Instruct Sentry that this install intends to be run by a single organization
# and thus various UI optimizations should be enabled.
SENTRY_SINGLE_ORGANIZATION = True
DEBUG = False
#########
# Cache #
#########
# Sentry currently utilizes two separate mechanisms. While CACHES is not a
# requirement, it will optimize several high throughput patterns.
# If you wish to use memcached, install the dependencies and adjust the config
# as shown:
#
# pip install python-memcached
#
# CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'LOCATION': ['127.0.0.1:11211'],
# }
# }
# A primary cache is required for things such as processing events
SENTRY_CACHE = 'sentry.cache.redis.RedisCache'
#########
# Queue #
#########
# See https://docs.sentry.io/on-premise/server/queue/ for more
# information on configuring your queue broker and workers. Sentry relies
# on a Python framework called Celery to manage queues.
BROKER_URL = '{{ sentry_broker_url }}'
###############
# Rate Limits #
###############
# Rate limits apply to notification handlers and are enforced per-project
# automatically.
SENTRY_RATELIMITER = 'sentry.ratelimits.redis.RedisRateLimiter'
##################
# Update Buffers #
##################
# Buffers (combined with queueing) act as an intermediate layer between the
# database and the storage API. They will greatly improve efficiency on large
# numbers of the same events being sent to the API in a short amount of time.
# (read: if you send any kind of real data to Sentry, you should enable buffers)
SENTRY_BUFFER = 'sentry.buffer.redis.RedisBuffer'
##########
# Quotas #
##########
# Quotas allow you to rate limit individual projects or the Sentry install as
# a whole.
SENTRY_QUOTAS = 'sentry.quotas.redis.RedisQuota'
########
# TSDB #
########
# The TSDB is used for building charts as well as making things like per-rate
# alerts possible.
SENTRY_TSDB = 'sentry.tsdb.redis.RedisTSDB'
###########
# Digests #
###########
# The digest backend powers notification summaries.
SENTRY_DIGESTS = 'sentry.digests.backends.redis.RedisBackend'
##############
# Web Server #
##############
# If you're using a reverse SSL proxy, you should enable the X-Forwarded-Proto
# header and uncomment the following settings
{% if sentry_behind_ssl_proxy %}
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
{% endif %}
# If you're not hosting at the root of your web server,
# you need to uncomment and set it to the path where Sentry is hosted.
# FORCE_SCRIPT_NAME = '/sentry'
SENTRY_WEB_HOST = '{{ sentry_web_host }}'
SENTRY_WEB_PORT = {{ sentry_web_port }}
SENTRY_WEB_OPTIONS = {
# 'workers': 3, # the number of web workers
# 'protocol': 'uwsgi', # Enable uwsgi protocol instead of http
}
SENTRY_FEATURES["auth:register"] = {{ sentry_auth_register }}
{{ sentry_extra_conf_py }}
| [
2,
770,
2393,
318,
655,
11361,
11,
351,
257,
3638,
286,
37770,
543,
1724,
198,
2,
345,
460,
16955,
290,
25393,
6460,
284,
534,
11954,
2695,
13,
198,
6738,
1908,
563,
13,
10414,
13,
15388,
1330,
1635,
198,
198,
11748,
28686,
13,
6978... | 2.925191 | 1,310 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import sys
from text_stoppers import *
from text_validators import *
from random import choice
class MarkovGenerator(object):
"""A Markov chain text generator"""
def open_and_read_file(self, filename):
"""Take file(s) as tuples; return text as string.
Takes a string that is a file path, opens the file,
and turns the files' contents as one string of text.
"""
f = codecs.open(filename, encoding='utf-8')
text = f.read()
f.close()
self.make_chains(text)
def make_chains(self, text_string, n=2):
"""Take input text as string; return dictionary of Markov chains.
A chain will be a key that consists of a tuple of (word1, word2)
and the value would be a list of the word(s) that follow those two
words in the input text.
n is an integer indicating the number of items used to generate the n-grams.
It is usually 2 or 3. If no number is specified, a bigram will be generated.
For example:
chains = make_chains("hi there mary hi there juanita")
Each bigram (except the last) will be a key in chains:
sorted(chains.keys())
[('hi', 'there'), ('mary', 'hi'), ('there', 'mary')]
Each item in chains is a list of all possible following words:
chains[('hi', 'there')]
['mary', 'juanita']
chains[('there','juanita')]
[None]
"""
self.chains = {}
words = text_string.split()
for i in range(len(words)-n):
ngram = (words[i], words[i+1])
next_word = words[i+n]
self.chains.setdefault(ngram, [])
self.chains[ngram].append(next_word)
def make_text(self):
"""Take dictionary of markov chains; returns random text."""
words = []
are_valid_words = False
char_limit = 280
while not are_valid_words:
link = choice(self.chains.keys())
word1 = link[0]
word2 = link[1]
print 'Checking words: ', word1, word2
# Is the first word an acceptable POS? Are the words valid?
are_valid_words = all([is_valid_p_o_s(word1), is_valid_word(word2),
is_valid_word(word1)])
words += word1.capitalize(), word2
while link in self.chains:
# Keep looping until we have a key that isn't in the chains
# Or until we reach one of the text stopper conditions
# Or we reach the 280 chars limit
# If picked word is invalid, choose a new one
next_word = choice(self.chains[link])
if is_valid_word(next_word):
words.append(next_word)
# Should we stop here?
if stop_text(next_word, words):
break
link = (link[1], next_word)#create new ngram
return " ".join(words)
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
40481,
82,
198,
11748,
25064,
198,
6738,
2420,
62,
301,
37186,
1330,
1635,
198,
6738,
2420,
62,
12102,
2024,
133... | 2.242159 | 1,371 |
from typing import Any, Callable, Dict
import datetime
import os
from blossom_wrapper import BlossomAPI
from praw import Reddit
# Load configuration regardless of if bugsnag is setup correctly
try:
import bugsnag
except ImportError:
# If loading from setup.py or bugsnag isn't installed, we
# don't want to bomb out completely
bugsnag = None
import pkg_resources
_missing = object()
# @see https://stackoverflow.com/a/17487613/1236035
class cached_property(object):
"""A decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value::
class Foo(object):
@cached_property
def foo(self):
# calculate something important here
return 42
The class has to have a `__dict__` in order for this property to
work.
"""
# implementation detail: this property is implemented as non-data
# descriptor. non-data descriptors are only invoked if there is no
# entry with the same name in the instance's __dict__. this allows
# us to completely get rid of the access function call overhead. If
# one choses to invoke __get__ by hand the property will still work
# as expected because the lookup logic is replicated in __get__ for
# manual invocation.
class Config(object):
"""
A singleton object for checking global configuration from
anywhere in the application
"""
# API key for later overwriting based on contents of filesystem
bugsnag_api_key: str = None
debug_mode: bool = False
# Global flag to enable/disable placing the triggers
# for the OCR bot
OCR: bool = True
ocr_delay: int = None
# Name of the bot
name: str = None
bot_version: str = '0.0.0' # this should get overwritten by the bot process
blossom: BlossomAPI = None
r: Reddit = None
me: Dict = None # blossom object of transcribot
last_post_scan_time: datetime.datetime = datetime.datetime(1970, 1, 1, 1, 1, 1)
@cached_property
try:
Config.bugsnag_api_key = open('bugsnag.key').readline().strip()
except OSError:
Config.bugsnag_api_key = os.environ.get('BUGSNAG_API_KEY', None)
if bugsnag and Config.bugsnag_api_key:
bugsnag.configure(
api_key=Config.bugsnag_api_key,
app_version=pkg_resources.get_distribution('tor_ocr').version
)
# ----- Compatibility -----
config = Config()
| [
6738,
19720,
1330,
4377,
11,
4889,
540,
11,
360,
713,
198,
11748,
4818,
8079,
198,
11748,
28686,
198,
198,
6738,
698,
25548,
62,
48553,
1330,
35544,
17614,
198,
6738,
279,
1831,
1330,
10750,
198,
198,
2,
8778,
8398,
7692,
286,
611,
11... | 2.936269 | 863 |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='pipsy',
version="0.1.2",
description='Shows updates for installed packages',
long_description='Shows available updates and security notices for installed packages',
author='Donovan Schönknecht',
author_email='don@tpyo.net',
url='https://github.com/tpyo/pipsy',
packages=['pipsy'],
license='MIT',
include_package_data=True,
install_requires=[
'pip>=9.0.1',
'changelogs>=0.9.0',
'packaging>=16.8',
],
entry_points={
"console_scripts": [
"pipsy=pipsy:main",
],
},
extras_require={
'testing': ['pytest', 'mock'],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Topic :: Software Development :: Build Tools",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: PyPy"
],
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
79,
541,
1837,
3256,
198,
220,
220,
220,
2196,
2625... | 2.409178 | 523 |
import librosa
import numpy as np
from vad.acoustics.transforms.transform import Transform
from vad.data_models.audio_data import AudioData
| [
11748,
9195,
4951,
64,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
410,
324,
13,
330,
23968,
873,
13,
7645,
23914,
13,
35636,
1330,
26981,
198,
6738,
410,
324,
13,
7890,
62,
27530,
13,
24051,
62,
7890,
1330,
13491,
6601,
628
] | 3.380952 | 42 |
import traceback
import os
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import SGDRegressor
from machine_learner.utils import repository
DIR_PATH = os.path.join('machine_learner', 'trained_models', 'pllaregression')
| [
11748,
12854,
1891,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
3866,
36948,
1330,
1855,
11518,
3351,
36213,
198,
6738,
1341,
35720,
13,
29127,
62,
19849,
1330,
26147,
7707,
1533,
44292,
198,
6738,
4... | 3.333333 | 81 |
"""submission table
Revision ID: 143f81c3bba3
Revises:
Create Date: 2019-02-25 21:25:17.891423
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '143f81c3bba3'
down_revision = None
branch_labels = None
depends_on = None
| [
37811,
7266,
3411,
3084,
198,
198,
18009,
1166,
4522,
25,
24356,
69,
6659,
66,
18,
65,
7012,
18,
198,
18009,
2696,
25,
220,
198,
16447,
7536,
25,
13130,
12,
2999,
12,
1495,
2310,
25,
1495,
25,
1558,
13,
4531,
1415,
1954,
198,
198,
... | 2.661157 | 121 |
from pymongo import MongoClient
import os
USERNAME = os.environ.get('USERNAME')
PASSWORD = os.environ.get('PASSWORD')
CLUSTER_INFO = os.environ.get('CLUSTER_INFO')
DB_NAME = os.environ.get('DB_NAME')
COLLECTION_NAME = os.environ.get('COLLECTION_NAME')
cluster = MongoClient(f'mongodb+srv://{USERNAME}:{PASSWORD}{CLUSTER_INFO}')
database = cluster[DB_NAME]
collection = database[COLLECTION_NAME]
# add a document to the collection
try:
post = {
'_id': 0,
"username": 'user1',
'activity': 'using mongodb'
}
collection.insert_one(post)
except:
pass # document of the same id already exists
# adding many documents to the collection
try:
posts = [{'_id': 1, "username": 'user1', 'activity': 'using python'},
{'_id': 2, "username": 'user1', 'activity': 'using nosql-practice'}]
collection.insert_many([posts[0], posts[1]])
except:
pass # document of the same id already exists
# searching for a document in the collection that meets a certain criteria
results = collection.find({'username': 'user1'})
# print all the attributes of the results that holds information for the user_name: user1
for result in results:
print(f"user id: {result['_id']}")
print(f"username: {result['username']}")
print(f"activity: {result['activity']}")
# getting all the current entries in the db:
view_db()
# Delete document from the db :
# collection.delete_one({"_id": 1})
print("collection after deleting id:1")
# view_db()
# delete many documents from the db that match a given criteria:
# collection.delete_many({"username": 'user1'})
print("Collection all users with username: user1")
# view_db()
# Update documents in collection
collection.update_many({"_id": 0}, {'$set': {"username": 'User1'}})
view_db()
# Update many documents in collection
collection.update_many({"username": 'user1'}, {'$set': {"username": 'User1'}})
view_db()
collection.update_many({"username": 'User1'}, {'$set': {"location": 'Germany'}})
view_db()
# count documents in db
documents_count = collection.count_documents({})
print(documents_count)
#
collection.update_one({"_id": 0}, {'$set': {"age": 15}})
collection.update_one({"_id": 1}, {'$set': {"age": 22}})
collection.update_one({"_id": 2}, {'$set': {"age": 30}})
view_db()
# displays all users with an age greater than or equal to 21?
users = collection.find({'age': {'$gt': 21}})
print('users with an age greater than or equal to 21 are: ')
print(users)
for user in users:
print(user)
| [
6738,
279,
4948,
25162,
1330,
42591,
11792,
198,
11748,
28686,
198,
198,
29904,
20608,
796,
28686,
13,
268,
2268,
13,
1136,
10786,
29904,
20608,
11537,
198,
47924,
54,
12532,
796,
28686,
13,
268,
2268,
13,
1136,
10786,
47924,
54,
12532,
... | 2.851598 | 876 |
from datetime import datetime
from django.test import TestCase
from django.utils.dateparse import parse_datetime
from restclients.exceptions import DataFailureException
from restclients.models.bridge import BridgeUser, BridgeCustomField,\
BridgeUserRole
from restclients.test import fdao_pws_override
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
42625,
14208,
13,
26791,
13,
4475,
29572,
1330,
21136,
62,
19608,
8079,
198,
6738,
1334,
565,
2334,
13,
1069,
11755,
1330,
6060,
50015,
... | 3.642857 | 84 |
"""Version of str(timedelta) which is not English specific."""
def duration_string(duration):
days = duration.days
seconds = duration.seconds
microseconds = duration.microseconds
minutes = seconds // 60
seconds = seconds % 60
hours = minutes // 60
minutes = minutes % 60
string = '{:02d}:{:02d}:{:02d}'.format(hours, minutes, seconds)
if days:
string = '{} '.format(days) + string
if microseconds:
string += '.{:06d}'.format(microseconds)
return string
| [
37811,
14815,
286,
965,
7,
16514,
276,
12514,
8,
543,
318,
407,
3594,
2176,
526,
15931,
201,
201,
201,
4299,
9478,
62,
8841,
7,
32257,
2599,
201,
220,
220,
220,
1528,
796,
9478,
13,
12545,
201,
220,
220,
220,
4201,
796,
9478,
13,
... | 2.731579 | 190 |
"""
The dependency-decomposition based unique measure partial information decomposition.
"""
from ...multivariate import coinformation
from ..pid import BaseUniquePID
from ...profiles import DependencyDecomposition
__all__ = (
'PID_dep',
'PID_RA',
)
class PID_dep(BaseUniquePID):
"""
The dependency partial information decomposition, as defined by James at al.
"""
_name = "I_dep"
@staticmethod
def _measure(d, sources, target, maxiter=None):
"""
This computes unique information as min(delta(I(sources : target))) where delta
is taken over the dependency decomposition.
Parameters
----------
d : Distribution
The distribution to compute i_dep for.
sources : iterable of iterables
The source variables.
target : iterable
The target variable.
Returns
-------
idep : dict
The value of I_dep for each individual source.
"""
uniques = {}
measure = {'I': lambda d: coinformation(d, [[0, 1], [2]])}
source_0_target = frozenset((frozenset((0, 2)),))
source_1_target = frozenset((frozenset((1, 2)),))
if len(sources) == 2:
dm = d.coalesce(sources + (target,)) # put it into [0, 1], [2] order
dd = DependencyDecomposition(dm, measures=measure, maxiter=maxiter)
u_0 = min(dd.delta(edge, 'I') for edge in dd.edges(source_0_target))
u_1 = min(dd.delta(edge, 'I') for edge in dd.edges(source_1_target))
uniques[sources[0]] = u_0
uniques[sources[1]] = u_1
else:
for source in sources:
others = sum((i for i in sources if i != source), ())
dm = d.coalesce([source, others, target])
dd = DependencyDecomposition(dm, measures=measure, maxiter=maxiter)
u = min(dd.delta(edge, 'I') for edge in dd.edges(source_0_target))
uniques[source] = u
return uniques
class PID_RA(BaseUniquePID):
"""
The "reproducibility analysis" partial information decomposition, derived
from the work of Zwick.
"""
_name = "I_RA"
@staticmethod
def _measure(d, sources, target, maxiter=None):
"""
This computes unique information as the change in I[sources : target]
when adding the source-target constraint.
Parameters
----------
d : Distribution
The distribution to compute i_RA for.
sources : iterable of iterables
The source variables.
target : iterable
The target variable.
Returns
-------
ira : dict
The value of I_RA for each individual source.
"""
uniques = {}
measure = {'I': lambda d: coinformation(d, [[0, 1], [2]])}
source_0_target = frozenset([frozenset((0, 2))])
source_1_target = frozenset([frozenset((1, 2))])
all_pairs = frozenset([frozenset((0, 1))]) | source_0_target | source_1_target
if len(sources) == 2:
dm = d.coalesce(sources + (target,))
dd = DependencyDecomposition(dm, measures=measure, maxiter=maxiter)
u_0 = dd.delta((all_pairs, all_pairs - source_0_target), 'I')
u_1 = dd.delta((all_pairs, all_pairs - source_1_target), 'I')
uniques[sources[0]] = u_0
uniques[sources[1]] = u_1
else:
for source in sources:
others = sum((i for i in sources if i != source), ())
dm = d.coalesce([source, others, target])
dd = DependencyDecomposition(dm, measures=measure, maxiter=maxiter)
u = dd.delta((all_pairs, all_pairs - source_0_target), 'I')
uniques[source] = u
return uniques
class PID_dep_a(BaseUniquePID):
"""
The dependency partial information decomposition, as defined by James at al.
Notes
-----
This alternative method behaves oddly with three or more sources.
"""
_name = "I_dep_a"
@staticmethod
def _measure(d, sources, target): # pragma: no cover
"""
This computes unique information as min(delta(I(sources : target))) where delta
is taken over the dependency decomposition.
Parameters
----------
d : Distribution
The distribution to compute i_dep_a for.
sources : iterable of iterables
The source variables.
target : iterable
The target variable.
Returns
-------
idepa : dict
The value of I_dep_a for each individual source.
"""
var_to_index = {var: i for i, var in enumerate(sources + (target,))}
d = d.coalesce(sorted(var_to_index.keys(), key=lambda k: var_to_index[k]))
invars = [var_to_index[var] for var in sources]
outvar = [var_to_index[(var,)] for var in target]
measure = {'I': lambda d: coinformation(d, [invars, outvar])}
dd = DependencyDecomposition(d, list(var_to_index.values()), measures=measure)
uniques = {}
for source in sources:
constraint = frozenset((frozenset((var_to_index[source], var_to_index[target])),))
u = min(dd.delta(edge, 'I') for edge in dd.edges(constraint))
uniques[source] = u
return uniques
class PID_dep_b(BaseUniquePID):
"""
The reduced dependency partial information decomposition, as defined by James at al.
Notes
-----
This decomposition is known to be inconsistent.
"""
_name = "I_dep_b"
@staticmethod
def _measure(d, sources, target): # pragma: no cover
"""
This computes unique information as min(delta(I(sources : target))) where delta
is taken over a restricted dependency decomposition which never constrains dependencies
among the sources.
Parameters
----------
d : Distribution
The distribution to compute i_dep_b for.
sources : iterable of iterables
The source variables.
target : iterable
The target variable.
Returns
-------
idepb : dict
The value of I_dep_b for each individual source.
"""
var_to_index = {var: i for i, var in enumerate(sources + (target,))}
target_index = var_to_index[target]
d = d.coalesce(sorted(var_to_index.keys(), key=lambda k: var_to_index[k]))
invars = [var_to_index[var] for var in sources]
outvar = [var_to_index[(var,)] for var in target]
measure = {'I': lambda d: coinformation(d, [invars, outvar])}
dd = DependencyDecomposition(d, list(var_to_index.values()), measures=measure)
uniques = {}
for source in sources:
constraint = frozenset((frozenset((var_to_index[source], target_index)),))
broja_style = lambda edge: all({target_index} < set(_) for _ in edge[0] if len(_) > 1)
edge_set = (edge for edge in dd.edges(constraint) if broja_style(edge))
u = min(dd.delta(edge, 'I') for edge in edge_set)
uniques[source] = u
return uniques
class PID_dep_c(BaseUniquePID):
"""
The reduced dependency partial information decomposition, as defined by James at al.
Notes
-----
This decomposition can result in subadditive redundancy.
"""
_name = "I_dep_c"
@staticmethod
def _measure(d, sources, target): # pragma: no cover
"""
This computes unique information as min(delta(I(sources : target))) where delta
is taken over a restricted dependency decomposition which never constrains dependencies
among the sources.
Parameters
----------
d : Distribution
The distribution to compute i_dep_c for.
sources : iterable of iterables
The source variables.
target : iterable
The target variable.
Returns
-------
idepc : dict
The value of I_dep_c for each individual source.
"""
var_to_index = {var: i for i, var in enumerate(sources + (target,))}
d = d.coalesce(sorted(var_to_index.keys(), key=lambda k: var_to_index[k]))
invars = [var_to_index[var] for var in sources]
outvar = [var_to_index[(var,)] for var in target]
measure = {'I': lambda d: coinformation(d, [invars, outvar])}
dd = DependencyDecomposition(d, list(var_to_index.values()), measures=measure)
uniques = {}
for source in sources:
constraint = frozenset((frozenset((var_to_index[source], var_to_index[target])),))
edge_set = (edge for edge in dd.edges(constraint) if tuple(invars) in edge[0])
u = min(dd.delta(edge, 'I') for edge in edge_set)
uniques[source] = u
return uniques
| [
37811,
198,
464,
20203,
12,
12501,
296,
9150,
1912,
3748,
3953,
13027,
1321,
26969,
9150,
13,
198,
37811,
198,
198,
6738,
2644,
16680,
42524,
1330,
10752,
1161,
198,
6738,
11485,
35317,
1330,
7308,
40257,
47,
2389,
198,
6738,
2644,
5577,
... | 2.284366 | 3,921 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import setuptools
setuptools
setuptools.setup(
name='capalyzer',
version='2.16.0',
description="Parsing functionality for the metasub CAP",
author="David C. Danko",
author_email='dcdanko@gmail.com',
url='https://github.com/dcdanko/capalyzer',
packages=setuptools.find_packages(),
package_dir={'capalyzer': 'capalyzer'},
install_requires=[
'click',
'pandas',
'scipy',
'numpy',
'umap-learn',
],
entry_points={
'console_scripts': [
'capalyzer=capalyzer.cli:main'
]
},
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
package_data={'capalyzer': [
'packet_parser/ncbi_tree/*.dmp.gz',
'packet_parser/microbe-directory.csv',
]},
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
900,
37623,
10141,
198,
198,
2617,
37623,
10141,
198,
2617,
37623,
10141,
13,
40406,
7,
198,
220,
220,
220,... | 2.163311 | 447 |
import toml
import os
import datetime
import itertools
#load config.toml
cwd = os.getcwd()
path = os.path.join(cwd,'terrain-corrector','terrain','settings','config.toml')
config = toml.load(path)
#add config to locals
locals().update(config)
#...and a few more expressive variations
BOUNDS = [*EAST_BOUNDS, *NORTH_BOUNDS, *ELEV_BOUNDS]
EAST_MIN, EAST_MAX, NORTH_MIN, NORTH_MAX, ELEV_MIN, ELEV_MAX = BOUNDS
#create a list of topo data files
TOPO_PATH = os.path.join(cwd, 'terrain-corrector', 'data', 'topo')
TOPO_LIST = [file for file in os.listdir(TOPO_PATH)]
TOPO_LIST.sort()
#...and a list of timeseries data files
TIME_PATH = os.path.join(cwd, 'terrain-corrector', 'data', 'time')
TIME_LIST = [file for file in os.listdir(TIME_PATH)]
TIME_LIST.sort()
#assert that lengths of these two lists are equal
assert (len(TOPO_LIST)==len(TIME_LIST)), 'Fileset lengths unequal.'
#create list of datetime objects
ENABLED_DATES = [datetime.datetime(
int(fn[0:4]), int(fn[4:6]), int(fn[6:8]), 0, 0, 0, 0,
tzinfo=datetime.timezone.utc) for fn in TOPO_LIST]
#...and iterable index of enabled datetimes
DATE_DICT={date.strftime("%Y-%m-%d"):i for i,date in enumerate(ENABLED_DATES)} | [
11748,
284,
4029,
198,
11748,
28686,
198,
11748,
4818,
8079,
198,
11748,
340,
861,
10141,
198,
198,
2,
2220,
4566,
13,
39532,
75,
198,
66,
16993,
796,
28686,
13,
1136,
66,
16993,
3419,
198,
6978,
796,
28686,
13,
6978,
13,
22179,
7,
... | 2.565217 | 460 |
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: CozmoAnim
import flatbuffers
| [
2,
6338,
7560,
416,
262,
21939,
36474,
364,
17050,
11,
466,
407,
13096,
198,
198,
2,
25745,
25,
1766,
89,
5908,
35320,
198,
198,
11748,
6228,
36873,
364,
198
] | 3.896552 | 29 |
#:copyright: Copyright 2009-2010 by the Vesper team, see AUTHORS.
#:license: Dual licenced under the GPL or Apache2 licences, see LICENSE.
"""
vesper.app
==========
This module defines the framework used by Vesper to bootstrap a running server from
a given configuration.
"""
import os, os.path, sys, traceback, re
from optparse import OptionParser
import itertools
from vesper import utils
from vesper.utils import MRUCache
from vesper.utils.Uri import UriToOsPath
from vesper.data import base, DataStore, transactions, store
from vesper.data.transaction_processor import TransactionProcessor
from vesper.backports import *
from vesper import VesperError
try:
import cStringIO
StringIO = cStringIO
except ImportError:
import StringIO
import logging
DEFAULT_LOGLEVEL = logging.INFO
#logging.BASIC_FORMAT = "%(asctime)s %(levelname)s %(name)s:%(message)s"
#logging.root.setLevel(DEFAULT_LOGLEVEL)
#logging.basicConfig()
log = logging.getLogger("app")
_defexception = utils.DynaExceptionFactory(__name__)
_defexception('CmdArgError')
_defexception('unusable namespace error')
_defexception('not authorized')
class DoNotHandleException(Exception):
'''
RequestProcessor.doActions() will not invoke error handler actions on
exceptions derived from this class.
'''
############################################################
##Helper classes and functions
############################################################
class Requestor(object):
'''
Requestor is a helper class that allows python code to invoke a
vesper request as if it was function call
Usage:
response = __requestor__.requestname(**kw)
where kw is the optional request parameters
An AttributeError exception is raised if the server does not
recognize the request
'''
#the trailing __ so you can have requests named 'invoke' without conflicting
def defaultActionCacheKeyPredicateFactory(action, cacheKeyPredicate):
'''
Returns a predicate to calculate a key for the action
based on a given request.
This function gives an action a chance to
customize the cacheKeyPredicate for the particulars of the
action instance. At the very least it should bind the action
instance with the cacheKeyPredicate to disambiguate keys from
different actions.
'''
actionid = id(action) #do this to avoid memory leaks
return lambda kw, retVal: (actionid, cacheKeyPredicate(kw, retVal))
class Action(object):
'''
The Action class encapsulates a step in the request processing pipeline.
An Action has two parts, one or more match expressions and an action
function that is invoked if the request metadata matches one of the
match expressions. The action function returns a value which is passed
onto the next Action in the sequence.
'''
NotModified = ('notmodified',)
def __init__(self, action,
cachePredicate=notCacheableKeyPredicate,
sideEffectsPredicate=None, sideEffectsFunc=None,
isValueCacheableCalc=defaultActionValueCacheableCalc,
cachePredicateFactory=defaultActionCacheKeyPredicateFactory,
debug=False):
'''
action must be a function with this signature:
def action(kw, retVal) where:
kw is the dictionary of metadata associated with the request
retVal was the return value of the last action invoked in the in action sequence or None
'''
self.action = action
self.cacheKeyPredicate = cachePredicateFactory(self, cachePredicate)
self.cachePredicateFactory = cachePredicateFactory
self.sideEffectsPredicate = sideEffectsPredicate
self.sideEffectsFunc = sideEffectsFunc
self.isValueCacheableCalc = isValueCacheableCalc
self.debug = debug
def assignAttrs(obj, configDict, varlist, default):
'''
Helper function for adding attributes to an object
given a dictionary of configuration properties
'''
import copy
for name in varlist:
try:
defaultValue = copy.copy(default)
except TypeError:
#probably ok, can't copy certain non-mutable objects like functions
defaultValue = default
value = configDict.get(name, defaultValue)
if default is not None and not isinstance(value, type(default)):
raise VesperError('config variable %s (of type %s)'
'must be compatible with type %s'
% (name, type(value), type(default)))
setattr(obj, name, value)
############################################################
## main class
############################################################
#################################################
##command line handling
#################################################
def argsToKw(argv):
'''
'''
kw = {}
i = iter(argv)
try:
arg = i.next()
while 1:
if arg[0] != '-':
raise CmdArgError('missing arg')
longArg = arg[:2] == '--'
name = arg.lstrip('-')
if not longArg and len(name) > 1:
#e.g. -fname
kw[name[0]] = name[1:]
arg = i.next()
elif longArg and '=' in name:
name, val = name.split('=', 1)
kw[name] = val
arg = i.next()
else:
kw[name] = True
arg = i.next()
if arg[0] != '-':
kw[name] = arg
arg = i.next()
except StopIteration: pass
return kw
_initConfigState()
def _normpath(basedir, path):
"""
return an absolute path given a basedir and a path fragment. If `path` is already absolute
it will be returned unchanged.
"""
if os.path.isabs(path):
return path
else:
tmp = os.path.normpath(os.path.join(basedir, path))
#assert os.path.isabs(tmp), 'not abs path %s, from %s + %s' % (tmp,basedir,path)
return tmp
def _get_module_path(modulename):
"for a modulename like 'vesper.web.admin' return a tuple (absolute_module_path, is_directory)"
import sys, imp
if modulename == "__main__":
m = sys.modules[modulename]
assert hasattr(m, '__file__'), "__main__ module missing __file__ attribute"
path = _normpath(os.getcwd(), m.__file__)
return (path, False)
else:
parts = modulename.split('.')
parts.reverse()
path = None
while parts:
part = parts.pop()
f = None
try:
f, path, descr = imp.find_module(part, path and [path] or None)
finally:
if f: f.close()
return (path, descr[-1] == imp.PKG_DIRECTORY)
def _importApp(baseapp):
'''
Executes the given app config file. If `createApp()` was
called during execution of the config file, the `_current_config`
global will be set to the app configuration returned by `createApp()`.
'''
baseglobals = utils.attrdict(Action=Action, createApp=createApp)
#assuming the baseapp file calls createApp(), it will set _current_config
if os.path.exists(baseapp):
#set this global so we can resolve relative paths against the location
#of the config file they appear in
_current_configpath.append( os.path.dirname(os.path.abspath(baseapp)) )
execfile(baseapp, baseglobals)
else:
(path, isdir) = _get_module_path(baseapp)
# print "_get_module_path for:" + str(baseapp) + " --> path:" + str(path) + " isdir:" + str(isdir)
assert path
#set this global so we can resolve relative paths against the location
#of the config file they appear in
_current_configpath.append( os.path.abspath(path) )
basemod = sys.modules.get(baseapp)
if basemod:
reload(basemod)
else:
__import__(baseapp, baseglobals)
_current_configpath.pop()
def createApp(derivedapp=None, baseapp=None, static_path=(), template_path=(), actions=None, **config):
'''
Create a new `AppConfig`.
:param derivedapp: is the name of the module that is extending the app. (Usually just pass `__name__`)
:param baseapp: is either a module name or a file path to the Python script that defines an app.
This file should have a call to :func:`createApp` in it
:param static_path: list or string prepended to the static resource path of the app.
:param template_path: list or string prepended to the template resource path of the app.
:param actions: actions map of the app, will updates the base app's `action` dictionary.
:param config: Any other keyword arguments will override config values set by the base app
'''
global _current_config
if derivedapp:
(derived_path, isdir) = _get_module_path(derivedapp)
if not isdir:
derived_path = os.path.dirname(derived_path)
else:
derived_path = None
if baseapp:
assert isinstance(baseapp, (str, unicode))
#sets _current_config if the baseapp calls createApp
_importApp(baseapp)
else:
_current_config = AppConfig()
#config variables that shouldn't be simply overwritten should be specified
#as an explicit function argument so they're not overwritten by this line:
_current_config.update(config)
if 'actions' in _current_config:
if actions:
_current_config.actions.update(actions)
else:
_current_config.actions = actions or {}
basedir = _current_configpath[-1] or derived_path
if basedir is not None:
if not os.path.isdir(basedir):
basedir = os.path.dirname(basedir)
addToPath(static_path, 'static_path')
addToPath(template_path, 'template_path')
#storage_template_path should be relative to the app config
#that sets it, not the final (most derived) app
for configdict in itertools.chain([_current_config, config.get('storeDefaults')],
(config.get('stores') or {}).values()):
if not configdict:
continue
storage_template_path = configdict.get('storage_template_path')
if storage_template_path:
abspath = _normpath(basedir, storage_template_path)
configdict['storage_template_path'] = abspath
return _current_config
| [
2,
25,
22163,
4766,
25,
15069,
3717,
12,
10333,
416,
262,
36448,
525,
1074,
11,
766,
37195,
20673,
13,
198,
2,
25,
43085,
25,
20446,
3476,
5864,
739,
262,
38644,
393,
24843,
17,
45475,
11,
766,
38559,
24290,
13,
198,
37811,
198,
220... | 2.58256 | 4,094 |
from step_02_data_analyzer import data, target
from step_05_classify_data import t
from sklearn.decomposition import PCA
from pylab import plot, show, figure, subplot
pca = PCA(n_components=2)
pcad = pca.fit_transform(data)
figure()
subplot(211)
plot(pcad[target=='setosa',0],pcad[target=='setosa',1],'bo')
plot(pcad[target=='versicolor',0],pcad[target=='versicolor',1],'ro')
plot(pcad[target=='virginica',0],pcad[target=='virginica',1],'go')
print pca.explained_variance_ratio_
print 1-sum(pca.explained_variance_ratio_)
data_inv = pca.inverse_transform(pcad)
subplot(212)
plot(data[t==1,0],data[t==1,2],'bo')
plot(data[t==2,0],data[t==2,2],'ro')
plot(data[t==3,0],data[t==3,2],'go')
show()
print abs(sum(sum(data - data_inv)))
| [
6738,
2239,
62,
2999,
62,
7890,
62,
38200,
9107,
1330,
1366,
11,
2496,
198,
6738,
2239,
62,
2713,
62,
4871,
1958,
62,
7890,
1330,
256,
198,
6738,
1341,
35720,
13,
12501,
296,
9150,
1330,
4217,
32,
198,
6738,
279,
2645,
397,
1330,
71... | 2.318612 | 317 |
# -*- Mode: Python; tab-width: 4 -*-
# Copyright (c) 2005-2010 Slide, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the author nor the names of other
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""access
Basic access to object services.
"""
import exceptions
import random as _random
import time
import sys
import os
import socket
from gogreen import coro
import pyinfo
import error
BLANK_TOKEN = None
CORO_LOCAL_TDATA = 'access-trace-data'
CORO_LOCAL_TCTRL = 'access-trace-ctrl'
CORO_LOCAL_SOURCE = 'access-call-source'
DEFAULT_RETRY = 2
ACCESS_TRACE_LIMITS = {'obj' : None, 'cmd' : None, 'vids' : None }
ACCESS_TRACE_OFF = 0
ACCESS_TRACE_TERSE = 10
ACCESS_TRACE_INFO = 20
ACCESS_TRACE_DEBUG = 30
ACCESS_TRACE_VERBOSE = 40
#
# set local request parameter to use service DB slaves if possible.
#
#
# Public trace data/info function(s)
#
def trace_dump(clear = True):
'''trace_dump
Dump to stdout current trace data at the current trace level. An optional
clear parameter to reset or preserve the trace data. (default: True)
See Also: enable_trace()
'''
if clear:
tdata = coro.pop_local(CORO_LOCAL_TDATA, {})
tlevl = _clr_trace_local()
else:
tdata = coro.get_local(CORO_LOCAL_TDATA, {})
tlevl = _get_trace_local()
if not tlevl:
return None
total, idcnt, count = 0, 0, 0
for obj, data in tdata.items():
for cmd, (elapse, ids, cnt) in data.items():
total += elapse
count += cnt
idcnt += ids
if tlevl > ACCESS_TRACE_TERSE:
print 'Access | %0.4f | %4d | %4d | Summary (%s.%s)' % (
elapse, cnt, ids, obj, cmd)
if not total:
return None
lmt = has_trace_limits()
if lmt is None:
lmt = ''
else:
lmt = 'limit: %s' % lmt
print 'Access | %.4f | %4d | %4d | Summary (TOTAL) %s' % (
total, count, idcnt, lmt)
def enable_trace(value, clear = True):
'''enable_trace
Set trace level.
0 - No debug info.
10 - Info about each access call is saved. A call to trace_dump() will
produce a single line summary (number of access calls, number of
IDs, and total time spent in access calls) of the saved data.
20 - A call to trace_dump() will dump a line for each object/command
with the same information as the 'total' result above.
30 - For each call to access echo the call parameters and elapse time
to stdout.
40 - When echoing access calls to stdout include the call stack.
Note: Each level will produce all the data that the previous level
produces, plus the additional documented data.
constants:
ACCESS_TRACE_OFF = 0
ACCESS_TRACE_TERSE = 10
ACCESS_TRACE_INFO = 20
ACCESS_TRACE_DEBUG = 30
ACCESS_TRACE_VERBOSE = 40
Optional parameters:
clear - Reset/Clear the local override when trace_dump is called with
the clear parameter set to true. (default: True)
See Also: trace_dump()
'''
_set_trace_local(value, clear)
return _get_trace_local(), value
#
# Private/Local trace data/info support function(s)
#
sum_tuple = lambda *a: tuple(map(sum, zip(*a)))
def _execute_trace_decorator():
'''_execute_trace_decorator
trace decorator explicitly for the execute function.
'''
return function
def _complete_trace_decorator():
'''_complete_trace_decorator
trace decorator explicitly for *Access complete(s) methods.
'''
return function
_trace_limits = ACCESS_TRACE_LIMITS
def trace_limit(obj = None, cmd = None, vids = None):
'''trace_limit
Limit access tracing to calls only with the supplied signature. None
values (the default) implies ignore that component; i.e., match a call
with any value for that component.
obj: Only trace calls for this access object.
cmd: Only trace calls against this command. Note that the cmd value needs
to make sense with the obj value. If the cmd limit isn't a command
associated with the obj then it effectively will not trace anything.
vids: Only trace for this vid(s). Can be a list. Note since calls can
be made with lists of vids this limit will match if ANY of the vids
in the call match those in the limit.
'''
_trace_limits['obj'] = obj
_trace_limits['cmd'] = cmd
_trace_limits['vids'] = vids
def has_trace_limits():
'''returns a string flagging which trace limits have been enabled. the
string will have the following:
o: if obj limits enabled
c: if cmd limits enabled
v: if vids limits enabled
so the string 'oc' would means obj and cmd limits, while 'ov' would mean
obj and vids limits.
None means no limits enabled.
'''
r = ''.join([str(x[0])[0] for x in _trace_limits.items() if x[1] is not None]) or None
return r
#
# exception propagation
#
def _unwrap(result):
'''_unwrap
Given a result fetched over RPC from service.base, check for an
envelope, and if present processes the result. When the result
is in an envelope; on success the result is removed and returned
to the called, on failure an appropriate exception is raised.
'''
if not isinstance(result, dict):
return result
if not result.get('envl', False):
return result
if not result.get('rc', 0):
return result.get('result', result)
raise error.lookup(result.get('rc'), *result.get('args', ()))
@_execute_trace_decorator()
class SplitAccess(object):
'''SplitAccess
Provide a notifier/publisher wrapper which splits access calls into
two component parts; 1) an initiation component which will send the
resquest and return tracking sequence number(s) for the request(s),
2) a completion component which will wait for and return responses
for the request sequence numbers which are provided.
Examples:
split = SplitAccess(notifier)
s1 = access.test.ping(split, 0, data1)
s2 = access.test.ping(split, 0, data2)
[r1, r2] = split.complete([s1, s2])
s1 = access.test.ping(split, 0, data1)
s2 = access.test.ping(split, 0, data2)
r1, r2 = split.completes(s1, s2)
s1 = access.test.ping(split, 0, data1)
s2 = access.test.ping(split, 0, data2)
[s3, s4] = access.test.ping(split, [0, 1], data)
r1, r2, [r3, r4] = split.completes(s1, s2, [s3, s4])
'''
#
# straight wrap for publish
#
#
# RPC start
#
#
# RPC reap
#
@_complete_trace_decorator()
def complete(self, seq_list, **kwargs):
'''complete
Given a list of one or more sequence numbers, produced by
access requests for RPC(s), wait for the request completion
and return the result(s) as a list in the same order as the
requested sequence numbers.
Examples:
[r1, r2, r3, r4] = self.complete([s1, s2, s3, s4])
Optional arguments are passed to the underlying publisher RPC
completion function:
timeout - No single socket recieve should take longer then
timeout seconds. (float or int are valid as well
as None which denotes infinite/no timeout)
See Also: rpc(), rpcs()
'''
if not seq_list:
return None
retry = self._retry + 1
results = map(lambda i: None, xrange(len(seq_list)))
pending = dict(zip(seq_list, xrange(len(seq_list))))
errored = {}
if len(results) != len(pending):
raise ValueError(
'length mismatch. duplicates? <%d:%d>',
len(results),
len(pending))
while retry and pending:
retry -= 1
result = self._notifier.rpcs_recv(pending.keys(), **kwargs)
for seq, req, rep in result:
pos = pending.pop(seq)
tmp = errored.pop(pos, None)
results[pos] = rep
if rep or not req:
continue
seq = self.rpc(*req, **kwargs).pop()
if not seq:
continue
pending[seq] = pos
errored[pos] = (req, rep)
#
# If no errors are present, and by extension nothing is left
# pending, then return the results to the user.
#
if not errored:
return map(lambda i: _unwrap(i[0]), results)
#
# All remaining cases are errors which will be reported as a
# dictionary of sequence IDs mapped to the request tuple.
#
errored = dict(map(
lambda i: (seq_list[i[0]], i[1]),
errored.iteritems()))
#
# Determine if any sequence ID(s) provided are reported to
# have a local error. (either network error, or invalid
# sequence number)
#
if filter(lambda i: i[1] is None, errored.itervalues()):
raise SplitClientError(seq_list, results, errored)
#
# If any sequence has an empty response, meaning no service
# was available to response, then raise an error for the
# entire request. (default behavior, if other behaviour is
# desired add a non-default mode to control it)
#
raise SplitServerError(seq_list, results, errored)
# not decorated to prevent trace from doubled counting
def completes(self, *args, **kwargs):
'''completes
Given sequence number(s), produced by access requests for
RPC(s), wait for the request completion and return the result
in the same order as the arguments were presented. The sequence
number arguments can be presented as either individual arguments
and/or lists/tuples of sequence numbers.
Examples:
r1 = self.completes(s1)
r1, r2, = self.completes(s1, s2)
r1, r2, (r3, r4) = self.completes(s1, s2, (s3, s4))
Optional arguments are passed to the underlying publisher RPC
completion function:
timeout - No single socket recieve should take longer then
timeout seconds. (float or int are valid as well
as None which denotes infinite/no timeout)
See Also: rpc(), rpcs()
'''
if not args:
return None
flat = _flatten(args)
result = _fatten(args, dict(zip(flat, self.complete(flat, **kwargs))))
if len(args) == 1:
return result.pop()
else:
return result
@_complete_trace_decorator()
def any(self, seq_set):
'''any
EXPERIMENTAL
Given a set of sequence numbers, produced by access requests for
RPC(s), return a tuple of any one sequence number and the matching
response. Also remove the sequence number from the given set.
Note: No retries, minimal testing.
'''
if not seq_set:
raise ValueError('empty sequence set', seq_set)
seq, req, rep = self._notifier.rpcs_pop(seq_set)
if rep is None:
raise ClientError(seq, req, rep)
if not rep:
raise ServerError(seq, req, rep)
seq_set.remove(seq)
return (seq, _unwrap(rep.pop()))
def clear(self, seqs):
'''clear
Clear any request/response state associated with a set or list
of sequence numbers
'''
return self._notifier.rpcs_clear(seqs)
| [
2,
532,
9,
12,
10363,
25,
11361,
26,
7400,
12,
10394,
25,
604,
532,
9,
12,
198,
198,
2,
15069,
357,
66,
8,
5075,
12,
10333,
37651,
11,
3457,
13,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
... | 2.495408 | 5,226 |
import random as r
if __name__ == "__main__":
main() | [
11748,
4738,
355,
374,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
197,
12417,
3419
] | 2.7 | 20 |
# -*- coding:utf-8 -*-
import json
import time
import random
import hashlib
import urllib
import urllib2
if __name__ == '__main__':
html = send_request()
dict_obj = json.loads(html)
# 翻译的内容
print(dict_obj["translateResult"][0][0]["tgt"])
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
11748,
33918,
198,
11748,
640,
198,
11748,
4738,
198,
11748,
12234,
8019,
198,
11748,
2956,
297,
571,
198,
11748,
2956,
297,
571,
17,
628,
198,
361,
11593,
3672,
834,
6624,
... | 2.294643 | 112 |
from Desktop.chess-nn.minimax_lookup import *
from Desktop.chess-nn.evl_conv_3 import model_fn
with tf.device('/gpu:0'):
evl_conv_temp = tf.estimator.Estimator(
model_fn = model_fn, model_dir = './DNN/evl_conv_5')
player_side = input("Please choose your side(b/w): ")
difficulty = input("Choose Difficulty(1-10): ")
board = chess.Board()
#done? implement a-b pruning for memory saving
#need to consider lose or winning senarios
# Number of moves from board_a to board_b
turn_dict = {'b':False , 'w':True}
#prob of win+draw
sess.close()
| [
6738,
27850,
13,
2395,
824,
12,
20471,
13,
1084,
320,
897,
62,
5460,
929,
1330,
1635,
198,
6738,
27850,
13,
2395,
824,
12,
20471,
13,
1990,
75,
62,
42946,
62,
18,
1330,
2746,
62,
22184,
628,
628,
198,
198,
4480,
48700,
13,
25202,
... | 2.581818 | 220 |
import libnum
import hashlib
import random
n=8269
g=11
password = "Hello"
x = int(hashlib.sha256(password.encode()).hexdigest()[:8], 16) % n
print('\n======Phase 4: Peggy recieves c and calculate r=v-cx, sends r to Victor==================')
c = input("c= ")
v = input("v= ")
r = (int(v) - int(c) * x) % (n-1)
print('r=v-cx =\t\t',r) | [
11748,
9195,
22510,
198,
11748,
12234,
8019,
198,
11748,
4738,
198,
77,
28,
23,
26276,
198,
70,
28,
1157,
198,
198,
28712,
796,
366,
15496,
1,
198,
87,
796,
493,
7,
17831,
8019,
13,
26270,
11645,
7,
28712,
13,
268,
8189,
3419,
737,
... | 2.356643 | 143 |
import sys
from unittest.mock import MagicMock
import pytest
import pytorch_lightning as pl
import torch
from transformers import AutoTokenizer
from lightning_transformers.core.nlp import HFBackboneConfig
from lightning_transformers.task.nlp.masked_language_modeling import (
MaskedLanguageModelingDataModule,
MaskedLanguageModelingTransformer,
)
from lightning_transformers.task.nlp.masked_language_modeling.config import MaskedLanguageModelingDataConfig
@pytest.mark.skipif(sys.platform == "win32", reason="Currently Windows is not supported")
| [
11748,
25064,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
6139,
44,
735,
198,
198,
11748,
12972,
9288,
198,
11748,
12972,
13165,
354,
62,
2971,
768,
355,
458,
198,
11748,
28034,
198,
6738,
6121,
364,
1330,
11160,
30642,
7509,
198,
19... | 3.447853 | 163 |