function stringlengths 11 56k | repo_name stringlengths 5 60 | features list |
|---|---|---|
def dst(self, dt):
return ZERO | sprinkler/rainmachine-developer-resources | [
26,
38,
26,
4,
1436776616
] |
def rmYMDToTimestamp(year, month, day):
if year > Y2K38_MAX_YEAR: #Y2K38
year = Y2K38_MAX_YEAR
try:
return int(datetime(year, month, day).strftime("%s"))
except ValueError:
return int(time.mktime(datetime(year, month, day).timetuple())) # Windows platform doesn't have strftime(%s) | sprinkler/rainmachine-developer-resources | [
26,
38,
26,
4,
1436776616
] |
def rmTimestampToDate(timestamp):
if timestamp > Y2K38_MAX_TIMESTAMP: #Y2K38
timestamp = Y2K38_MAX_TIMESTAMP
return datetime.fromtimestamp(timestamp) | sprinkler/rainmachine-developer-resources | [
26,
38,
26,
4,
1436776616
] |
def rmCurrentTimestampToDateAsString(format = None):
timestamp = int(time.time())
if format:
return datetime.fromtimestamp(timestamp).strftime(format)
return datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S') | sprinkler/rainmachine-developer-resources | [
26,
38,
26,
4,
1436776616
] |
def rmTimestampFromDateAsString(dateString, format):
return int(datetime.strptime(dateString, format).strftime("%s")) | sprinkler/rainmachine-developer-resources | [
26,
38,
26,
4,
1436776616
] |
def rmTimestampFromUTCDateAsString(dateString, format):
dt = datetime.strptime(dateString, format)
return int((dt - datetime.utcfromtimestamp(0)).total_seconds()) | sprinkler/rainmachine-developer-resources | [
26,
38,
26,
4,
1436776616
] |
def rmTimestampToYearMonthDay(timestamp):
d = datetime.fromtimestamp(timestamp)
return d.year, d.month, d.day | sprinkler/rainmachine-developer-resources | [
26,
38,
26,
4,
1436776616
] |
def rmNormalizeTimestamp(timestamp):
return int(datetime.fromtimestamp(timestamp).strftime('%s')) | sprinkler/rainmachine-developer-resources | [
26,
38,
26,
4,
1436776616
] |
def rmNowDateTime():
return datetime.now() | sprinkler/rainmachine-developer-resources | [
26,
38,
26,
4,
1436776616
] |
def rmCurrentDayTimestamp():
return rmGetStartOfDay(int(time.time())) | sprinkler/rainmachine-developer-resources | [
26,
38,
26,
4,
1436776616
] |
def rmGetStartOfDay(timestamp):
tuple = datetime.fromtimestamp(timestamp).timetuple()
return int(datetime(tuple.tm_year, tuple.tm_mon, tuple.tm_mday).strftime("%s")) | sprinkler/rainmachine-developer-resources | [
26,
38,
26,
4,
1436776616
] |
def rmTimestampIsLeapYear(timestamp):
d = datetime.fromtimestamp(timestamp)
#try:
# datetime(d.year, 2, 29)
# return True
#except ValueError:
# return False
if d.year % 400 == 0:
return True
elif d.year % 100 == 0:
return False
elif d.year % 4 == 0:
return True
return False | sprinkler/rainmachine-developer-resources | [
26,
38,
26,
4,
1436776616
] |
def rmDayRange(startDayTimestamp, numDays):
d = datetime.fromtimestamp(startDayTimestamp)
if numDays >=0:
dateList = [int(time.mktime( (d + timedelta(days=x)).timetuple() )) for x in range(0, numDays)]
else:
numDays = -numDays
dateList = [int(time.mktime( (d - timedelta(days=x)).timetuple() )) for x in range(0, numDays)]
return dateList | sprinkler/rainmachine-developer-resources | [
26,
38,
26,
4,
1436776616
] |
def rmGetNumberOfDaysBetweenTimestamps(startTimestamp, endTimestamp):
d1 = datetime.fromtimestamp(startTimestamp)
d2 = datetime.fromtimestamp(endTimestamp)
delta = d2-d1
return delta.days | sprinkler/rainmachine-developer-resources | [
26,
38,
26,
4,
1436776616
] |
def computeSuntransitAndDayLenghtForDayTs(ts, lat, lon, elevation):
ts = rmGetStartOfDayUtc(ts)
n = julianDayFromTimestamp(ts)
J = __computeMeanSolarNoon(n, lon)
M = __computeSolarMeanAnomay(J)
C = __equationOfTheCenter(M)
L = __computeEclipticLongitude(M, C)
Jtr = computeSolarTransit(J, M, L)
delta = __computeSinSunDeclination(L)
w0 = computeHourAngle(lat, delta, elevation)
return Jtr, w0 | sprinkler/rainmachine-developer-resources | [
26,
38,
26,
4,
1436776616
] |
def rmGetSunriseTimestampForDayTimestamp(ts, lat, lon, elevation):
if lat is None or lon is None:
log.debug("Latitude or longitude is not set. Returning same timestamp")
return ts
Jtr, w0 = computeSuntransitAndDayLenghtForDayTs(ts, lat, -lon, elevation)
Jrise = Jtr-w0/360
tsJrise = julianDayToUTC(Jrise)
return tsJrise | sprinkler/rainmachine-developer-resources | [
26,
38,
26,
4,
1436776616
] |
def julianDayToUTC(JD):
return (JD - 2440587.5)*86400 | sprinkler/rainmachine-developer-resources | [
26,
38,
26,
4,
1436776616
] |
def __sina(degree):
radian = degree/180*3.14159265359
return sin(radian) | sprinkler/rainmachine-developer-resources | [
26,
38,
26,
4,
1436776616
] |
def __asina(x):
if abs(x) > 1:
return -90. if x< 0 else 90.
radian = asin(x)
return radian/(3.14159265359)*180. | sprinkler/rainmachine-developer-resources | [
26,
38,
26,
4,
1436776616
] |
def __computeSolarMeanAnomay(solarNoon): #degrees
return (357.5291 + 0.98560028*solarNoon)%360 | sprinkler/rainmachine-developer-resources | [
26,
38,
26,
4,
1436776616
] |
def __computeEclipticLongitude(solarMeanAnomaly, eqCenter): #degrees (it adds a sum a sines)
L = (solarMeanAnomaly + eqCenter + 180 + 102.9372) % 360
return L | sprinkler/rainmachine-developer-resources | [
26,
38,
26,
4,
1436776616
] |
def __computeSinSunDeclination(L):
delta = __sina(L)*__sina(23.439 )
return delta | sprinkler/rainmachine-developer-resources | [
26,
38,
26,
4,
1436776616
] |
def rmNTPFetch(server = "pool.ntp.org", withRequestDrift = False):
import struct
from socket import socket, AF_INET, SOCK_DGRAM
requestPacket = '\x1b' + 47 * '\0'
startTime = time.time()
try:
sock = socket(AF_INET, SOCK_DGRAM)
sock.settimeout(5)
except Exception, e:
log.error("NTPFetch: Can't create socket")
return None
try:
sock.sendto(requestPacket, (server, 123))
data, ip = sock.recvfrom(1024)
except Exception, e:
#log.error("NTPFetch: Error receiving data: %s" % e)
return None
try:
if data:
timestamp = struct.unpack('!12I', data)[10]
timestamp -= 2208988800L # = date in sec since epoch
# http://stackoverflow.com/questions/1599060/how-can-i-get-an-accurate-utc-time-with-python
if withRequestDrift:
reqTime = time.time() - startTime
timestamp += reqTime / 2
return timestamp
except:
log.error("NTPFetch: Conversion failed.")
return None | sprinkler/rainmachine-developer-resources | [
26,
38,
26,
4,
1436776616
] |
def __init__(self, fallback = True):
self.fallback = fallback
self.clock_gettime = None
self.get = None
self.monotonicInit() | sprinkler/rainmachine-developer-resources | [
26,
38,
26,
4,
1436776616
] |
def monotonicFallback(self, asSeconds = True):
if asSeconds:
return int(time.time())
return time.time() | sprinkler/rainmachine-developer-resources | [
26,
38,
26,
4,
1436776616
] |
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
Adapted from scipy.
"""
import sksurv
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except AttributeError:
return None
try:
fn = inspect.getsourcefile(obj)
except TypeError:
fn = None
if fn is None and hasattr(obj, '__module__'):
fn = inspect.getsourcefile(sys.modules[obj.__module__])
if fn is None:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except ValueError:
lineno = None
if lineno:
linespec = '#L%d-L%d' % (lineno, lineno + len(source) - 1)
else:
linespec = ''
startdir = Path(sksurv.__file__).parent.parent.absolute()
if not fn.startswith(str(startdir)): # not in sksurv
return None
fn = '/'.join(Path(fn).relative_to(startdir).parts)
if fn.startswith('sksurv/'):
m = re.match(r'^.*dev[0-9]+\+g([a-f0-9]+)$', release)
if m:
branch = m.group(1)
elif 'dev' in release:
branch = 'master'
else:
branch = 'v{}'.format(release)
return 'https://github.com/sebp/scikit-survival/blob/{branch}/{filename}{linespec}'.format(
branch=branch,
filename=fn,
linespec=linespec
)
else:
return None | sebp/scikit-survival | [
906,
185,
906,
27,
1482790553
] |
def preprocess_cell(self, cell, resources, index):
# path of notebook directory, relative to conf.py
nb_path = Path(resources['metadata']['path']).relative_to(self.DOC_DIR)
to_root = [os.pardir] * len(nb_path.parts)
if cell.cell_type == 'markdown':
text = cell.source
replace = []
for match in self.URL_PATTERN.finditer(text):
path = to_root[:]
path.append(match.group(1))
rel_url = "/".join(path)
filename = match.group(2)
replace.append((match.group(0), '({}/{}.rst)'.format(rel_url, filename)))
for s, r in replace:
text = text.replace(s, r)
cell.source = text
return cell, resources
return cell, resources | sebp/scikit-survival | [
906,
185,
906,
27,
1482790553
] |
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper() and name[0] != "_":
# Not very good, we assume Uppercase names are classes...
mocktype = type(name, (), {})
mocktype.__module__ = __name__
return mocktype
else:
return MockModule() | sebp/scikit-survival | [
906,
185,
906,
27,
1482790553
] |
def __init__(self, machine):
"""
Default scan telemetry constructor
:param machine: Scanned machine
"""
super(ScanTelem, self).__init__()
self.machine = machine | guardicore/monkey | [
6098,
725,
6098,
196,
1440919371
] |
def setUp(self):
self.client = TenantClient(self.tenant)
# need a teacher and a student with known password so tests can log in as each, or could use force_login()?
self.test_password = "password"
# need a teacher before students can be created or the profile creation will fail when trying to notify
self.test_teacher = User.objects.create_user('test_teacher', password=self.test_password, is_staff=True)
self.test_student1 = User.objects.create_user('test_student', password=self.test_password)
self.map = baker.make('djcytoscape.CytoScape') | timberline-secondary/hackerspace | [
13,
18,
13,
234,
1436242433
] |
def test_all_page_status_codes_for_students(self):
success = self.client.login(username=self.test_student1.username, password=self.test_password)
self.assertTrue(success)
self.assert200('djcytoscape:index')
self.assert200('djcytoscape:quest_map_personalized', args=[self.map.id, self.test_student1.id])
# need to build interlinked maps to test this. Do in own test
# self.assert200('djcytoscape:quest_map_interlink', args=[1, 1, 1])
self.assert200('djcytoscape:list')
self.assert200('djcytoscape:primary')
self.assert200('djcytoscape:quest_map', args=[self.map.id])
self.assertRedirectsAdmin('djcytoscape:update', args=[self.map.id])
self.assertRedirectsAdmin('djcytoscape:delete', args=[self.map.id])
self.assertRedirectsAdmin('djcytoscape:regenerate', args=[self.map.id])
self.assertRedirectsAdmin('djcytoscape:regenerate_all')
self.assertRedirectsAdmin('djcytoscape:generate_map', kwargs={'quest_id': 1, 'scape_id': 1})
self.assertRedirectsAdmin('djcytoscape:generate_unseeded') | timberline-secondary/hackerspace | [
13,
18,
13,
234,
1436242433
] |
def test_x_axis_labels(self):
test_inputs = {
0: {},
7: {},
10: {5: '25s'},
15: {5: '25s', 10: '50'},
20: {5: '25s', 10: '50', 15: '1m'},
25: {5: '25s', 10: '50', 15: '1m', 20: '1.6'},
45: {5: '25s', 10: '50', 15: '1m', 20: '1.6', 25: '2.0', 30: '2.5', 35: '2.9', 40: '3.3'},
80: {10: '50s', 20: '1m', 30: '2.5', 40: '3.3', 50: '4.1', 60: '5.0', 70: '5.8'}, # spaced more since wide
}
for width, expected in test_inputs.items():
self.assertEqual(expected, nyx.panel.graph._x_axis_labels(nyx.panel.graph.Interval.FIVE_SECONDS, width))
test_inputs = {
nyx.panel.graph.Interval.EACH_SECOND: {
10: '10s', 20: '20', 30: '30', 40: '40', 50: '50', 60: '1m', 70: '1.1'
}, nyx.panel.graph.Interval.FIVE_SECONDS: {
10: '50s', 20: '1m', 30: '2.5', 40: '3.3', 50: '4.1', 60: '5.0', 70: '5.8'
}, nyx.panel.graph.Interval.THIRTY_SECONDS: {
10: '5m', 20: '10', 30: '15', 40: '20', 50: '25', 60: '30', 70: '35'
}, nyx.panel.graph.Interval.MINUTELY: {
10: '10m', 20: '20', 30: '30', 40: '40', 50: '50', 60: '1h', 70: '1.1'
}, nyx.panel.graph.Interval.FIFTEEN_MINUTE: {
10: '2h', 20: '5', 30: '7', 40: '10', 50: '12', 60: '15', 70: '17'
}, nyx.panel.graph.Interval.THIRTY_MINUTE: {
10: '5h', 20: '10', 30: '15', 40: '20', 50: '1d', 60: '1.2', 70: '1.4'
}, nyx.panel.graph.Interval.HOURLY: {
10: '10h', 20: '20', 30: '1d', 40: '1.6', 50: '2.0', 60: '2.5', 70: '2.9'
}, nyx.panel.graph.Interval.DAILY: {
10: '10d', 20: '20', 30: '30', 40: '40', 50: '50', 60: '60', 70: '70'
},
}
for interval, expected in test_inputs.items():
self.assertEqual(expected, nyx.panel.graph._x_axis_labels(interval, 80)) | sammyshj/nyx | [
8,
3,
8,
4,
1457028601
] |
def test_draw_subgraph_blank(self, tor_controller_mock):
tor_controller_mock().get_info.return_value = None
data = nyx.panel.graph.BandwidthStats()
rendered = test.render(nyx.panel.graph._draw_subgraph, data.primary, 0, 30, 7, nyx.panel.graph.Bounds.LOCAL_MAX, nyx.panel.graph.Interval.EACH_SECOND, nyx.curses.Color.CYAN, '*')
self.assertEqual(EXPECTED_BLANK_GRAPH, rendered.content) | sammyshj/nyx | [
8,
3,
8,
4,
1457028601
] |
def test_draw_subgraph(self, tor_controller_mock):
tor_controller_mock().get_info.return_value = '543,543 421,421 551,551 710,710 200,200 175,175 188,188 250,250 377,377'
data = nyx.panel.graph.BandwidthStats()
rendered = test.render(nyx.panel.graph._draw_subgraph, data.primary, 0, 30, 7, nyx.panel.graph.Bounds.LOCAL_MAX, nyx.panel.graph.Interval.EACH_SECOND, nyx.curses.Color.CYAN, '*')
self.assertEqual(EXPECTED_GRAPH, rendered.content) | sammyshj/nyx | [
8,
3,
8,
4,
1457028601
] |
def test_draw_accounting_stats(self, tor_controller_mock):
tor_controller_mock().is_alive.return_value = True
accounting_stat = stem.control.AccountingStats(
1410723598.276578,
'awake',
datetime.datetime(2014, 9, 14, 19, 41),
62,
4837, 102944, 107781,
2050, 7440, 9490,
)
rendered = test.render(nyx.panel.graph._draw_accounting_stats, 0, accounting_stat)
self.assertEqual(EXPECTED_ACCOUNTING, rendered.content) | sammyshj/nyx | [
8,
3,
8,
4,
1457028601
] |
def get_first_noncomment_line(filename: str) -> Optional[str]:
try:
with open(filename) as f:
return next(gen_noncomment_lines(f))
except StopIteration:
return None | RudolfCardinal/crate | [
12,
5,
12,
5,
1425998885
] |
def main() -> None:
"""
Command-line entry point.
"""
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
description="Hash IDs in bulk, using a cryptographic hash function.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'infile', type=str,
help="Input file, or '-' for stdin. "
"Use one line per thing to be hashed. "
"Comments (marked with '#') and blank lines are ignored. "
"Lines have whitespace stripped left and right.")
parser.add_argument(
'--outfile', type=str, default="-",
help="Output file, or '-' for stdout. "
"One line will be written for every input line. "
"Blank lines will be written for commented or blank input.")
parser.add_argument(
'--key', type=str,
help="Secret key for hasher (warning: may be visible in process list; "
"see also --keyfile)")
parser.add_argument(
'--keyfile', type=str,
help="File whose first noncomment line contains the secret key for "
"the hasher. (It will be whitespace-stripped right and left.)")
parser.add_argument(
'--method', choices=[HashMethods.HMAC_MD5,
HashMethods.HMAC_SHA256,
HashMethods.HMAC_SHA512],
default=HashMethods.HMAC_MD5,
help="Hash method")
parser.add_argument(
'--keepid', action="store_true",
help="Produce CSV output with (hash,id) rather than just the hash")
parser.add_argument(
'--verbose', '-v', action="store_true",
help="Be verbose (NB will write key to stderr)")
args = parser.parse_args()
main_only_quicksetup_rootlogger(logging.DEBUG if args.verbose
else logging.INFO)
assert bool(args.key) != bool(args.keyfile), (
"Specify either --key or --keyfile (and not both)."
)
if args.keyfile:
key = get_first_noncomment_line(args.keyfile)
assert key, f"No key found in keyfile: {args.keyfile}"
else:
key = args.key
bulk_hash(
input_filename=args.infile,
output_filename=args.outfile,
hash_method=args.method,
key=key,
keep_id=args.keepid,
) | RudolfCardinal/crate | [
12,
5,
12,
5,
1425998885
] |
def debug_print(*s):
"""
Print message to console in debugging mode
"""
if muv_props.DEBUG:
print(s) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def redraw_all_areas():
"""
Redraw all areas
"""
for area in bpy.context.screen.areas:
area.tag_redraw() | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def test_schema(self):
schema = MesonPlugin.get_schema()
self.assertThat(
schema,
Equals(
{
"$schema": "http://json-schema.org/draft-04/schema#",
"additionalProperties": False,
"properties": {
"meson-parameters": {
"default": [],
"items": {"type": "string"},
"type": "array",
"uniqueItems": True,
},
"meson-version": {"default": "", "type": "string"},
},
"required": ["source"],
"type": "object",
}
),
) | ubuntu-core/snapcraft | [
1092,
435,
1092,
66,
1446139395
] |
def test_get_build_environment(self):
plugin = MesonPlugin(part_name="my-part", options=lambda: None)
self.assertThat(plugin.get_build_environment(), Equals(dict())) | ubuntu-core/snapcraft | [
1092,
435,
1092,
66,
1446139395
] |
def trap(self, height):
"""
:type height: List[int]
:rtype: int
"""
l=len(height)
maxheight=[0 for i in range(l)]
leftmax=0
rightmax=0
res=0
for i in range(l):
if height[i]>leftmax:
leftmax=height[i]
maxheight[i]=leftmax | dichen001/Go4Jobs | [
1,
1,
1,
2,
1474399542
] |
def test_main(in_sas_id, in_parset_path, in_file_sizes_path, in_f0seqnr_sizes_path):
result = True | brentjens/pyautoplot | [
1,
2,
1,
2,
1461226603
] |
def parse_arguments(argv):
sas_id = int(argv[1])
parset_path = argv[2]
file_sizes_path = argv[3]
f0seqnr_sizes_file_path = argv[4]
return sas_id, parset_path, file_sizes_path, f0seqnr_sizes_file_path | brentjens/pyautoplot | [
1,
2,
1,
2,
1461226603
] |
def __init__(self, user, redirect_to, *args, **kwargs):
super(DefaultBranchForm, self).__init__(*args, **kwargs)
if user.is_superuser:
branches = Branch.objects.all()
else:
branches = Branch.objects.filter(pk__in=user.branches_organized.all)
choices = [(o.id, unicode(o.title)) for o in branches]
self.fields['default_branch'] = forms.ChoiceField(choices=choices)
if user.default_branch:
self.initial['default_branch'] = user.default_branch.pk
self.initial['organizer_id'] = user.pk
self.initial['redirect_to'] = redirect_to | orzubalsky/tradeschool | [
16,
5,
16,
31,
1344286300
] |
def label_from_instance(self, obj):
from django.utils import timezone
current_tz = timezone.get_current_timezone()
date = obj.start_time.astimezone(current_tz).strftime('%A, %b %d')
time = obj.start_time.astimezone(current_tz).strftime(
'%I:%M%p').lstrip('0').lower()
if obj.venue is not None:
return "%s %s at %s" % (date, time, obj.venue)
return "%s %s" % (date, time) | orzubalsky/tradeschool | [
16,
5,
16,
31,
1344286300
] |
def __init__(self, *args, **kwargs):
super(BranchForm, self).__init__(*args, **kwargs)
self.fields['city'].error_messages['required'] = _(
"Please enter a city")
self.fields['country'].error_messages['required'] = _(
"Please enter a country")
self.initial['site'] = Site.objects.get_current() | orzubalsky/tradeschool | [
16,
5,
16,
31,
1344286300
] |
def __init__(self, *args, **kwargs):
"Sets custom meta data to the form's fields"
super(ModelForm, self).__init__(*args, **kwargs)
self.fields['fullname'].error_messages['required'] = _(
"Please enter your name")
self.fields['email'].error_messages['required'] = _(
"Please enter your email")
self.fields['bio'].error_messages['required'] = _(
"Please tell us about yourself")
self.fields['phone'].error_messages['required'] = _(
"Please enter phone number") | orzubalsky/tradeschool | [
16,
5,
16,
31,
1344286300
] |
def __init__(self, *args, **kwargs):
"Sets custom meta data to the form's fields"
super(TeacherForm, self).__init__(*args, **kwargs)
self.fields['fullname'].error_messages['required'] = _(
"Please enter your name")
self.fields['email'].error_messages['required'] = _(
"Please enter your email")
self.fields['names_of_co_organizers'].error_messages['required'] = _(
"Please enter the names of at least one or two more organizers")
self.fields['bio'].error_messages['required'] = _(
"Please tell us about why you would like to open a Trade School in your area") | orzubalsky/tradeschool | [
16,
5,
16,
31,
1344286300
] |
def __init__(self, *args, **kwargs):
"Sets custom meta data to the form's fields"
super(ModelForm, self).__init__(*args, **kwargs)
self.fields['title'].error_messages['required'] = _(
"Please enter a class title")
self.fields['description'].error_messages['required'] = _(
"Please enter a class description")
self.fields['max_students'].error_messages['required'] = _(
"Please enter the maximum number of students in your class") | orzubalsky/tradeschool | [
16,
5,
16,
31,
1344286300
] |
def __init__(self, *args, **kwargs):
"Sets custom meta data to the form's fields"
super(ModelForm, self).__init__(*args, **kwargs)
self.fields['title'].widget.attrs['class'] = 'barter_item'
self.fields['title'].error_messages['required'] = _(
"Barter item cannot be blank") | orzubalsky/tradeschool | [
16,
5,
16,
31,
1344286300
] |
def __init__(self, branch, *args, **kwargs):
""
self.branch = branch
super(BaseBarterItemFormSet, self).__init__(*args, **kwargs) | orzubalsky/tradeschool | [
16,
5,
16,
31,
1344286300
] |
def __init__(self, course, *args, **kwargs):
super(RegistrationForm, self).__init__(*args, **kwargs)
self.fields['items'].queryset = BarterItem.objects.filter(
course=course)
self.fields['items'].error_messages['required'] = _(
"Please select at least one item")
self.fields['items'].empty_label = None | orzubalsky/tradeschool | [
16,
5,
16,
31,
1344286300
] |
def __init__(self, *args, **kwargs):
super(StudentForm, self).__init__(*args, **kwargs)
self.fields['fullname'].error_messages['required'] = _(
"Please enter your name")
self.fields['email'].error_messages['required'] = _(
"Please enter your email")
self.fields['phone'].error_messages['required'] = _(
"Please enter your phone number") | orzubalsky/tradeschool | [
16,
5,
16,
31,
1344286300
] |
def __init__(self, *args, **kwargs):
super(FeedbackForm, self).__init__(*args, **kwargs)
self.fields['content'].error_messages['required'] = _(
"Please enter your feedback") | orzubalsky/tradeschool | [
16,
5,
16,
31,
1344286300
] |
def __init__(self, selected_individuals, fill_creator):
BaseCreation.__init__(self)
self.__fill_creator = fill_creator
self.__selected_individuals = selected_individuals
self.__individuals = [] | MachineLearningControl/OpenMLC-Python | [
11,
6,
11,
2,
1446331881
] |
def lookup_pimlico_versions():
# Use Github API to find all tagged releases
tag_api_url = "%s/repos/markgw/pimlico/tags" % GITHUB_API
try:
tag_response = urlopen(tag_api_url).read().decode("utf-8")
except Exception as e:
print("Could not fetch Pimlico release tags from {}: {}".format(tag_api_url, e))
sys.exit(1)
tag_data = json.loads(tag_response)
return [tag["name"] for tag in reversed(tag_data)] | markgw/pimlico | [
6,
1,
6,
13,
1475829569
] |
def find_config_value(config_path, key, start_in_pipeline=False):
with open(config_path, "r", encoding="utf-8") as f:
in_pipeline = start_in_pipeline
for line in f:
line = line.strip("\n ")
if in_pipeline and line:
# Look for the required key in the pipeline section
line_key, __, line_value = line.partition("=")
if line_key.strip() == key:
return line_value.strip()
elif line.startswith("["):
# Section heading
# Start looking for keys if we're in the pipeline section
in_pipeline = line.strip("[]") == "pipeline"
elif line.upper().startswith("%% INCLUDE"):
# Found include directive: follow into the included file
filename = line[10:].strip()
# Get filename relative to current config file
filename = os.path.join(os.path.dirname(config_path), filename)
found_value = find_config_value(filename, key, start_in_pipeline=in_pipeline)
if found_value is not None:
return found_value
# Didn't find the key anywhere
return | markgw/pimlico | [
6,
1,
6,
13,
1475829569
] |
def tar_dirname(tar_path):
with tarfile.open(tar_path, "r:gz") as tar:
# Expect first member to be a directory
member = tar.next()
if not member.isdir():
raise ValueError("downloaded tar file was expected to contain a directory, but didn't")
return member.name | markgw/pimlico | [
6,
1,
6,
13,
1475829569
] |
def bootstrap(config_file, git=False):
current_dir = os.path.abspath(os.path.dirname(__file__))
branch_name = git if type(git) is str else "master"
branch_url = "{}{}/".format(RAW_URL, branch_name)
if os.path.exists(os.path.join(current_dir, "pimlico")):
print("Pimlico source directory already exists: delete it if you want to fetch again")
sys.exit(1)
# Check the config file to find the version of Pimlico we need
version = find_config_value(config_file, "release")
if version is None:
print("Could not find Pimlico release in config file %s" % config_file)
sys.exit(1)
major_version = int(version.partition(".")[0])
print("Config file requires Pimlico version {}".format(version))
available_releases = lookup_pimlico_versions()
bleeding_edge = lookup_bleeding_edge(branch_url)
tags = available_releases
# If the bleeding edge version is compatible (same major version) just use that
if int(bleeding_edge.lstrip("v").partition(".")[0]) == major_version:
print("Bleeding edge ({}) is compatible".format(bleeding_edge))
fetch_release = "master"
else:
if git:
print("Error: tried to clone the Git repo instead of fetching a release, but config file is not " \
"compatible with latest Pimlico version")
sys.exit(1)
# Find the latest release that has the same major version
compatible_tags = [t for t in tags if int(t.lstrip("v").partition(".")[0]) == major_version]
fetch_release = compatible_tags[-1]
print("Fetching latest release of major version {}, which is {}".format(major_version, fetch_release))
if git:
# Clone the latest version of the code from the Git repository
# Allow the git kwarg to name a branch to clone
if type(git) is str:
args = "--branch {} ".format(git)
else:
args = ""
print("Cloning git repository ({})".format("{} branch".format(git) if type(git) is str else "master"))
import subprocess
subprocess.check_call("git clone {}{}".format(args, GIT_URL), shell=True)
else:
archive_url = "%s%s.tar.gz" % (DOWNLOAD_URL, fetch_release)
print("Downloading Pimlico source code from {}".format(archive_url))
tar_download_path = os.path.join(current_dir, "archive.tar.gz")
with open(tar_download_path, "wb") as archive_file:
archive_file.write(urlopen(archive_url).read())
print("Extracting source code")
extracted_dirname = tar_dirname(tar_download_path)
extract(tar_download_path)
# Extracted source code: remove the archive
os.remove(tar_download_path)
os.rename(os.path.join(current_dir, extracted_dirname), os.path.join(current_dir, "pimlico"))
print("Pimlico source (%s) is now available in directory pimlico/" % fetch_release)
# Create symlink to pimlico.sh, so it's easier to run
print("Creating symlink pimlico.sh for running Pimlico")
symlink(os.path.join("pimlico", "bin", "pimlico.sh"), "pimlico.sh") | markgw/pimlico | [
6,
1,
6,
13,
1475829569
] |
def __init__(self):
self.number_of_decisions = 8
speller = config_speller_8.Config()
robot = config_robot_8.Config()
self.state = []
self.actions = []
self.letters = []
#MENU
menu_state = 0
self.letters.append([u"Speller",u"Robot"
,"Switch", "SSVEP", #u"High SSVEP",u"Low SSVEP"
u"", u"", u"", u""])
self.actions.append([
"",
"start_robot_feedback()",
"transform_scenario('switch')", #restart_scenario('"+self._high_ssvep_scenario()+"')",
"transform_scenario('ssvep')", #restart_scenario('"+self._low_ssvep_scenario()+"')",
"", "", "", ""])
self.state.append([0]*self.number_of_decisions)
self._setup_menu()
zero_state = 1
#SPELLER
speller_state = zero_state
for i, s in enumerate(speller.state):
self.state.append([x+speller_state for x in s])
self.actions.append(speller.actions[i])
self.letters.append(speller.letters[i])
self.state[zero_state][-1] = 0 #GOTO MENU
self.actions[zero_state][-1] = "clear()"
zero_state += len(speller.state)
#ROBOT
robot_state = zero_state
for i, s in enumerate(robot.state):
self.state.append([x+robot_state for x in s])
self.actions.append(robot.actions[i])
self.letters.append(robot.letters[i])
self.state[zero_state][-1] = 0 #GOTO MENU
self.actions[zero_state][-1] = "stop_robot_feedback()"
zero_state += len(robot.state)
self.state[menu_state][0] = speller_state
self.state[menu_state][1] = robot_state
self.number_of_states = zero_state
self.states_configs = ['state', 'letters', 'actions', 'letters_solver', 'actions_solver']
self.other_configs = []
self.letters_solver = self.number_of_states * [self.number_of_decisions * [""]]
self.actions_solver = self.number_of_states * [self.number_of_decisions * [""]] | BrainTech/openbci | [
12,
9,
12,
3,
1407695699
] |
def __init__(self):
super(MainWindow, self).__init__()
self.video, self.resizeTimer = '', 0
self.parse_cmdline()
self.init_settings()
self.init_logger()
self.init_scale()
self.init_cutter()
self.setWindowTitle(qApp.applicationName())
self.setContentsMargins(0, 0, 0, 0)
self.statusBar().showMessage('Ready')
self.statusBar().setStyleSheet('border: none; padding: 0; margin: 0;')
self.setAcceptDrops(True)
self.show()
if sys.platform == 'win32' and TaskbarProgress.isValidWinVer():
self.win_taskbar_button = QWinTaskbarButton(self)
self.win_taskbar_button.setWindow(self.windowHandle())
self.win_taskbar_button.progress().setVisible(True)
self.win_taskbar_button.progress().setValue(0)
self.console.setGeometry(int(self.x() - (self.width() / 2)), self.y() + int(self.height() / 3), 750, 300)
if not self.video and os.path.isfile(os.path.join(QDir.tempPath(), MainWindow.TEMP_PROJECT_FILE)):
self.video = os.path.join(QDir.tempPath(), MainWindow.TEMP_PROJECT_FILE)
if self.video:
self.file_opener(self.video) | ozmartian/vidcutter | [
1224,
120,
1224,
257,
1472670250
] |
def file_opener(self, filename: str) -> None:
try:
if QFileInfo(filename).suffix() == 'vcp':
self.cutter.openProject(project_file=filename)
if filename == os.path.join(QDir.tempPath(), MainWindow.TEMP_PROJECT_FILE):
os.remove(os.path.join(QDir.tempPath(), MainWindow.TEMP_PROJECT_FILE))
else:
self.cutter.loadMedia(filename)
except (FileNotFoundError, PermissionError):
QMessageBox.critical(self, 'Error loading file', sys.exc_info()[0])
logging.exception('Error loading file')
qApp.restoreOverrideCursor()
self.restart() | ozmartian/vidcutter | [
1224,
120,
1224,
257,
1472670250
] |
def get_size(mode: str='NORMAL') -> QSize:
modes = {
'LOW': QSize(800, 425),
'NORMAL': QSize(930, 680),
'HIGH': QSize(1850, 1300)
}
return modes[mode] | ozmartian/vidcutter | [
1224,
120,
1224,
257,
1472670250
] |
def init_settings(self) -> None:
try:
settings_path = self.get_app_config_path()
except AttributeError:
if sys.platform == 'win32':
settings_path = os.path.join(QDir.homePath(), 'AppData', 'Local', qApp.applicationName().lower())
elif sys.platform == 'darwin':
settings_path = os.path.join(QDir.homePath(), 'Library', 'Preferences',
qApp.applicationName().lower())
else:
settings_path = os.path.join(QDir.homePath(), '.config', qApp.applicationName().lower())
os.makedirs(settings_path, exist_ok=True)
settings_file = '{}.ini'.format(qApp.applicationName().lower())
self.settings = QSettings(os.path.join(settings_path, settings_file), QSettings.IniFormat)
if self.settings.value('geometry') is not None:
self.restoreGeometry(self.settings.value('geometry'))
if self.settings.value('windowState') is not None:
self.restoreState(self.settings.value('windowState'))
self.theme = self.settings.value('theme', 'light', type=str)
self.startupvol = self.settings.value('volume', 100, type=int)
self.verboseLogs = self.settings.value('verboseLogs', 'off', type=str) in {'on', 'true'} | ozmartian/vidcutter | [
1224,
120,
1224,
257,
1472670250
] |
def log_uncaught_exceptions(cls, exc, tb) -> None:
logging.critical(''.join(traceback.format_tb(tb)))
logging.critical('{0}: {1}'.format(cls, exc)) | ozmartian/vidcutter | [
1224,
120,
1224,
257,
1472670250
] |
def init_cutter(self) -> None:
self.cutter = VideoCutter(self)
self.cutter.errorOccurred.connect(self.errorHandler)
self.setCentralWidget(self.cutter)
qApp.setWindowIcon(VideoCutter.getAppIcon(encoded=False)) | ozmartian/vidcutter | [
1224,
120,
1224,
257,
1472670250
] |
def get_bitness() -> int:
from struct import calcsize
return calcsize('P') * 8 | ozmartian/vidcutter | [
1224,
120,
1224,
257,
1472670250
] |
def reboot(self) -> None:
if self.cutter.mediaAvailable:
self.cutter.saveProject(reboot=True)
self.save_settings()
qApp.exit(MainWindow.EXIT_CODE_REBOOT) | ozmartian/vidcutter | [
1224,
120,
1224,
257,
1472670250
] |
def lock_gui(self, locked: bool=True) -> None:
if locked:
qApp.setOverrideCursor(Qt.WaitCursor)
self.cutter.cliplist.setEnabled(False)
self.setEnabled(False)
else:
self.setEnabled(True)
self.cutter.cliplist.setEnabled(True)
qApp.restoreOverrideCursor()
qApp.processEvents() | ozmartian/vidcutter | [
1224,
120,
1224,
257,
1472670250
] |
def flatpak(self) -> bool:
return sys.platform.startswith('linux') and QFileInfo(__file__).absolutePath().startswith('/app/') | ozmartian/vidcutter | [
1224,
120,
1224,
257,
1472670250
] |
def get_path(path: str=None, override: bool=False) -> str:
if override:
if getattr(sys, 'frozen', False) and getattr(sys, '_MEIPASS', False):
# noinspection PyProtectedMember, PyUnresolvedReferences
return os.path.join(sys._MEIPASS, path)
return os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), path)
return ':{}'.format(path) | ozmartian/vidcutter | [
1224,
120,
1224,
257,
1472670250
] |
def errorHandler(self, msg: str, title: str=None) -> None:
qApp.restoreOverrideCursor()
QMessageBox.critical(self, 'An error occurred' if title is None else title, msg, QMessageBox.Ok)
logging.error(msg) | ozmartian/vidcutter | [
1224,
120,
1224,
257,
1472670250
] |
def cleanup():
shutil.rmtree(MainWindow.WORKING_FOLDER, ignore_errors=True) | ozmartian/vidcutter | [
1224,
120,
1224,
257,
1472670250
] |
def mousePressEvent(self, event: QMouseEvent) -> None:
if event.button() == Qt.LeftButton and self.cutter.mediaAvailable:
self.cutter.cliplist.clearSelection()
self.cutter.timeCounter.clearFocus()
self.cutter.frameCounter.clearFocus()
# noinspection PyBroadException
try:
if hasattr(self.cutter, 'notify'):
self.cutter.notify.close()
except BaseException:
pass
event.accept() | ozmartian/vidcutter | [
1224,
120,
1224,
257,
1472670250
] |
def dropEvent(self, event: QDropEvent) -> None:
filename = event.mimeData().urls()[0].toLocalFile()
self.file_opener(filename)
event.accept() | ozmartian/vidcutter | [
1224,
120,
1224,
257,
1472670250
] |
def timerEvent(self, event: QTimerEvent) -> None:
try:
self.cutter.seekSlider.reloadThumbs()
self.killTimer(self.resizeTimer)
self.resizeTimer = 0
except AttributeError:
pass | ozmartian/vidcutter | [
1224,
120,
1224,
257,
1472670250
] |
def main():
qt_set_sequence_auto_mnemonic(False)
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QGuiApplication.setAttribute(Qt.AA_EnableHighDpiScaling, True)
if hasattr(Qt, 'AA_Use96Dpi'):
QGuiApplication.setAttribute(Qt.AA_Use96Dpi, True)
if hasattr(Qt, 'AA_ShareOpenGLContexts'):
fmt = QSurfaceFormat()
fmt.setDepthBufferSize(24)
QSurfaceFormat.setDefaultFormat(fmt)
QGuiApplication.setAttribute(Qt.AA_ShareOpenGLContexts, True)
# if sys.platform == 'darwin':
# qApp.setStyle('Fusion')
app = SingleApplication(vidcutter.__appid__, sys.argv)
app.setApplicationName(vidcutter.__appname__)
app.setApplicationVersion(vidcutter.__version__)
app.setDesktopFileName(vidcutter.__desktopid__)
app.setOrganizationDomain(vidcutter.__domain__)
app.setQuitOnLastWindowClosed(True)
win = MainWindow()
win.stylename = app.style().objectName().lower()
app.setActivationWindow(win)
app.messageReceived.connect(win.file_opener)
app.aboutToQuit.connect(MainWindow.cleanup)
exit_code = app.exec_()
if exit_code == MainWindow.EXIT_CODE_REBOOT:
if sys.platform == 'win32':
if hasattr(win.cutter, 'mpvWidget'):
win.close()
QProcess.startDetached('"{}"'.format(qApp.applicationFilePath()))
else:
QProcess.startDetached(' '.join(sys.argv))
sys.exit(exit_code) | ozmartian/vidcutter | [
1224,
120,
1224,
257,
1472670250
] |
def index_path(tree_name):
return config['trees'][tree_name]['index_path'] | bill-mccloskey/searchfox | [
169,
56,
169,
5,
1426482198
] |
def parse_path_filter(filter):
filter = filter.replace('(', '\\(')
filter = filter.replace(')', '\\)')
filter = filter.replace('|', '\\|')
filter = filter.replace('.', '\\.')
def star_repl(m):
if m.group(0) == '*':
return '[^/]*'
else:
return '.*'
filter = re.sub(r'\*\*|\*', star_repl, filter)
filter = filter.replace('?', '.')
def repl(m):
s = m.group(1)
components = s.split(',')
s = '|'.join(components)
return '(' + s + ')'
filter = re.sub('{([^}]*)}', repl, filter)
return filter | bill-mccloskey/searchfox | [
169,
56,
169,
5,
1426482198
] |
def parse_search(searchString):
pieces = searchString.split(' ')
result = {}
for i in range(len(pieces)):
if pieces[i].startswith('path:'):
result['pathre'] = parse_path_filter(pieces[i][len('path:'):])
elif pieces[i].startswith('pathre:'):
result['pathre'] = pieces[i][len('pathre:'):]
elif pieces[i].startswith('context:'):
# Require the context to be an integer <= 10.
try:
# This may throw.
context_lines = int(pieces[i][len('context:'):])
context_lines = max(0, context_lines)
context_lines = min(10, context_lines)
result['context_lines'] = context_lines
except:
pass
elif pieces[i].startswith('symbol:'):
result['symbol'] = ' '.join(pieces[i:])[len('symbol:'):].strip().replace('.', '#')
elif pieces[i].startswith('re:'):
result['re'] = (' '.join(pieces[i:]))[len('re:'):]
break
elif pieces[i].startswith('text:'):
result['re'] = escape_regex((' '.join(pieces[i:]))[len('text:'):])
break
elif pieces[i].startswith('id:'):
result['id'] = pieces[i][len('id:'):]
else:
result['default'] = escape_regex(' '.join(pieces[i:]))
break
return result | bill-mccloskey/searchfox | [
169,
56,
169,
5,
1426482198
] |
def __init__(self):
self.results = []
self.qualified_results = []
self.pathre = None
self.compiled = {} | bill-mccloskey/searchfox | [
169,
56,
169,
5,
1426482198
] |
def add_results(self, results):
self.results.append(results) | bill-mccloskey/searchfox | [
169,
56,
169,
5,
1426482198
] |
def categorize_path(self, path):
'''
Given a path, decide whether it's "normal"/"test"/"generated". These
are the 3 top-level groups by which results are categorized.
These are hardcoded heuristics that probably could be better defined
in the `config.json` metadata, with a means for trees like gecko to be
able to leverage in-tree build meta-information like moz.build and the
various mochitest.ini files, etc.
'''
def is_test(p):
# Except /unit/ and /androidTest/, all other paths contain the substring 'test', so we can exit early
# in case it is not present.
if '/unit/' in p or '/androidTest/' in p:
return True
if 'test' not in p:
return False
return ('/test/' in p or '/tests/' in p or '/mochitest/' in p or 'testing/' in p or
'/jsapi-tests/' in p or '/reftests/' in p or '/reftest/' in p or
'/crashtests/' in p or '/crashtest/' in p or
'/googletest/' in p or '/gtest/' in p or '/gtests/' in p or
'/imptests/' in p)
if '__GENERATED__' in path:
return 'generated'
elif path.startswith('third_party/'):
return "thirdparty"
elif is_test(path):
return 'test'
else:
return 'normal' | bill-mccloskey/searchfox | [
169,
56,
169,
5,
1426482198
] |
def sort_compiled(self):
'''
Traverse the `compiled` state in `path_precedences` order, and then
its "qkind" children in their inherent order (which is derived from
the use of `key_precedences` by `get()`), transforming and propagating
the results, applying a `max_count` result limit.
Additional transformations that are performed:
- result de-duplication is performed so that a given (path, line) tuple
can only be emitted once. Because of the intentional order of
`key_precedences` this means that semantic matches should preclude
their results from being duplicated in the more naive text search
results.
- line_modifier's bounds fixups as mentioned in `compile_result` are
applied which helps the bolding logic in the display logic on the
(web) client.
'''
count = 0
line_hash = {}
result = collections.OrderedDict()
for pathkind in self.path_precedences:
for qkind in self.compiled.get(pathkind, []):
paths = list(self.compiled[pathkind][qkind].keys())
paths.sort()
for path in paths:
# see `compile_resulte docs for line_modifier above.
(lines, line_modifier) = self.compiled[pathkind][qkind][path]
lines.sort(key=lambda l: l['lno'])
lines_out = []
for line in lines:
lno = line['lno']
key = (path, lno)
if key in line_hash:
continue
line_hash[key] = True
if line_modifier:
line_modifier(line)
lines_out.append(line)
count += 1
if count == self.max_count:
break
if lines_out or qkind == 'Files':
l = result.setdefault(pathkind, collections.OrderedDict()).setdefault(qkind, [])
l.append({'path': path, 'lines': lines_out})
if count == self.max_count:
break
if count == self.max_count:
break
if count == self.max_count:
break
return result | bill-mccloskey/searchfox | [
169,
56,
169,
5,
1426482198
] |
def search_files(tree_name, path):
pathFile = os.path.join(index_path(tree_name), 'repo-files')
objdirFile = os.path.join(index_path(tree_name), 'objdir-files')
try:
# We set the locale to make grep much faster.
results = subprocess.check_output(['grep', '-Eih', path, pathFile, objdirFile], env={'LC_CTYPE': 'C'}, universal_newlines=True)
except subprocess.CalledProcessError:
return []
results = results.strip().split('\n')
results = [ {'path': f, 'lines': []} for f in results ]
return results[:1000] | bill-mccloskey/searchfox | [
169,
56,
169,
5,
1426482198
] |
def identifier_search(search, tree_name, needle, complete, fold_case):
needle = re.sub(r'\\(.)', r'\1', needle)
pieces = re.split(r'\.|::', needle)
# If the last segment of the search needle is too short, return no results
# because we're worried that would return too many results.
if not complete and len(pieces[-1]) < 3:
return {}
# Fixup closure for use by add_qualified_results to reduce the range of the
# match's bounds to the prefix that was included in the search needle from
# the full bounds of the search result. (So if the search was "foo::bar"
# and we matched "foo::bartab" and "foo::barhat", the idea I guess is that
# only the "bar" portion would be highlighted assuming the bounds
# previously were referencing "bartab" and "barhat".)
def line_modifier(line):
if 'bounds' in line:
(start, end) = line['bounds']
end = start + len(pieces[-1])
line['bounds'] = [start, end]
ids = identifiers.lookup(tree_name, needle, complete, fold_case)
for (i, (qualified, sym)) in enumerate(ids):
if i > 500:
break
q = demangle(sym)
if q == sym:
q = qualified
results = crossrefs.lookup(tree_name, sym)
search.add_qualified_results(q, results, line_modifier) | bill-mccloskey/searchfox | [
169,
56,
169,
5,
1426482198
] |
def do_GET(self):
pid = os.fork()
if pid:
# Parent process
log('request(handled by %d) %s', pid, self.path)
timedOut = [False]
def handler(signum, frame):
log('timeout %d, killing', pid)
timedOut[0] = True
os.kill(pid, signal.SIGKILL)
signal.signal(signal.SIGALRM, handler)
signal.alarm(15)
t = time.time()
while True:
try:
(pid2, status) = os.waitpid(pid, 0)
break
except OSError as e:
if e.errno != errno.EINTR: raise e
failed = timedOut[0]
if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0:
log('error pid %d - %f', pid, time.time() - t)
failed = True
else:
log('finish pid %d - %f', pid, time.time() - t)
if failed:
self.send_response(504)
self.end_headers()
else:
# Child process
try:
self.process_request()
os._exit(0)
except:
e = traceback.format_exc()
log('exception\n%s', e)
os._exit(1) | bill-mccloskey/searchfox | [
169,
56,
169,
5,
1426482198
] |
def process_request(self):
url = six.moves.urllib.parse.urlparse(self.path)
path_elts = url.path.split('/')
# Strip any extra slashes.
path_elts = [ elt for elt in path_elts if elt != '' ]
if len(path_elts) >= 2 and path_elts[1] == 'search':
tree_name = path_elts[0]
query = six.moves.urllib.parse.parse_qs(url.query)
j = get_json_search_results(tree_name, query)
if 'json' in self.headers.get('Accept', ''):
self.generateJson(j)
else:
j = j.replace("</", "<\\/").replace("<script", "<\\script").replace("<!", "<\\!")
template = os.path.join(index_path(tree_name), 'templates/search.html')
self.generateWithTemplate({'{{BODY}}': j, '{{TITLE}}': 'Search'}, template)
elif len(path_elts) >= 2 and path_elts[1] == 'define':
tree_name = path_elts[0]
query = six.moves.urllib.parse.parse_qs(url.query)
symbol = query['q'][0]
results = crossrefs.lookup(tree_name, symbol)
definition = results['Definitions'][0]
filename = definition['path']
lineno = definition['lines'][0]['lno']
url = '/' + tree_name + '/source/' + filename + '#' + str(lineno)
self.send_response(301)
self.send_header("Location", url)
self.end_headers()
else:
return six.moves.SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self) | bill-mccloskey/searchfox | [
169,
56,
169,
5,
1426482198
] |
def generateWithTemplate(self, replacements, templateFile):
output = open(templateFile).read()
for (k, v) in replacements.items():
output = output.replace(k, v)
databytes = output.encode('utf-8')
self.send_response(200)
self.send_header("Vary", "Accept")
self.send_header("Content-type", "text/html;charset=utf-8")
self.send_header("Content-Length", str(len(databytes)))
self.end_headers()
self.wfile.write(databytes) | bill-mccloskey/searchfox | [
169,
56,
169,
5,
1426482198
] |
def handle_spider_item(self, data, url_object):
"""Immediately process the spider item."""
return self.process(data, url_object) | os2webscanner/os2webscanner | [
12,
1,
12,
1,
1405072144
] |
def process(self, data, url_object):
"""Process XML data.
Converts document to json before processing with TextProcessor.
if XML is not well formed, treat it as HTML
"""
logging.info("Process XML %s" % url_object.url)
try:
data = json.dumps(xmltodict.parse(data))
return self.text_processor.process(data, url_object)
except ExpatError:
return super(XmlProcessor,self).process(data,url_object) | os2webscanner/os2webscanner | [
12,
1,
12,
1,
1405072144
] |
def main():
PEOPLE = insert_people()
sum_salary_all(PEOPLE)
list_people_by_city(PEOPLE) | feroda/lessons-python4beginners | [
2,
12,
2,
3,
1472811971
] |
def sum_salary_all(list_people):
for p in list_people:
sum_salary_single(p) | feroda/lessons-python4beginners | [
2,
12,
2,
3,
1472811971
] |
def list_people_by_city(list_people):
list_city = list_people.sort() | feroda/lessons-python4beginners | [
2,
12,
2,
3,
1472811971
] |
def setUp(self):
super(TestStockCycleCount, self).setUp()
self.res_users_model = self.env["res.users"]
self.cycle_count_model = self.env["stock.cycle.count"]
self.stock_cycle_count_rule_model = self.env["stock.cycle.count.rule"]
self.inventory_model = self.env["stock.inventory"]
self.stock_location_model = self.env["stock.location"]
self.stock_move_model = self.env["stock.move"]
self.stock_warehouse_model = self.env["stock.warehouse"]
self.product_model = self.env["product.product"]
self.quant_model = self.env["stock.quant"]
self.move_model = self.env["stock.move"]
self.company = self.env.ref("base.main_company")
self.partner = self.env.ref("base.res_partner_1")
self.g_stock_manager = self.env.ref("stock.group_stock_manager")
self.g_stock_user = self.env.ref("stock.group_stock_user")
# Create users:
self.manager = self._create_user(
"user_1", [self.g_stock_manager], self.company
).id
self.user = self._create_user("user_2", [self.g_stock_user], self.company).id
# Create warehouses:
self.big_wh = self.stock_warehouse_model.create(
{"name": "BIG", "code": "B", "cycle_count_planning_horizon": 30}
)
self.small_wh = self.stock_warehouse_model.create(
{"name": "SMALL", "code": "S"}
)
# Create rules:
self.rule_periodic = self._create_stock_cycle_count_rule_periodic(
self.manager, "rule_1", [2, 7]
)
self.rule_turnover = self._create_stock_cycle_count_rule_turnover(
self.manager, "rule_2", [100]
)
self.rule_accuracy = self._create_stock_cycle_count_rule_accuracy(
self.manager, "rule_3", [5], self.big_wh.view_location_id.ids
)
self.zero_rule = self._create_stock_cycle_count_rule_zero(
self.manager, "rule_4"
)
# Configure warehouses:
self.rule_ids = [
self.rule_periodic.id,
self.rule_turnover.id,
self.rule_accuracy.id,
self.zero_rule.id,
]
self.big_wh.write({"cycle_count_rule_ids": [(6, 0, self.rule_ids)]})
# Create a location:
self.count_loc = self.stock_location_model.create(
{"name": "Place", "usage": "production"}
)
self.stock_location_model._parent_store_compute()
# Create a cycle count:
self.cycle_count_1 = self.cycle_count_model.with_user(self.manager).create(
{
"name": "Test cycle count",
"cycle_count_rule_id": self.rule_periodic.id,
"location_id": self.count_loc.id,
}
)
# Create a product:
self.product1 = self.product_model.create(
{"name": "Test Product 1", "type": "product", "default_code": "PROD1"}
) | OCA/stock-logistics-warehouse | [
248,
611,
248,
91,
1402934883
] |
def _create_stock_cycle_count_rule_periodic(self, uid, name, values):
rule = self.stock_cycle_count_rule_model.with_user(uid).create(
{
"name": name,
"rule_type": "periodic",
"periodic_qty_per_period": values[0],
"periodic_count_period": values[1],
}
)
return rule | OCA/stock-logistics-warehouse | [
248,
611,
248,
91,
1402934883
] |
def _create_stock_cycle_count_rule_accuracy(self, uid, name, values, zone_ids):
rule = self.stock_cycle_count_rule_model.with_user(uid).create(
{
"name": name,
"rule_type": "accuracy",
"accuracy_threshold": values[0],
"apply_in": "location",
"location_ids": [(6, 0, zone_ids)],
}
)
return rule | OCA/stock-logistics-warehouse | [
248,
611,
248,
91,
1402934883
] |
def test_cycle_count_planner(self):
"""Tests creation of cycle counts."""
# Common rules:
wh = self.big_wh
locs = self.stock_location_model
for rule in self.big_wh.cycle_count_rule_ids:
locs += wh._search_cycle_count_locations(rule)
locs = locs.exists() # remove duplicated locations.
counts = self.cycle_count_model.search([("location_id", "in", locs.ids)])
self.assertFalse(counts, "Existing cycle counts before execute planner.")
date_pre_existing_cc = datetime.today() + timedelta(days=30)
loc = locs.filtered(lambda l: l.usage != "view")[0]
pre_existing_count = self.cycle_count_model.create(
{
"name": "To be cancelled when running cron job.",
"cycle_count_rule_id": self.rule_periodic.id,
"location_id": loc.id,
"date_deadline": date_pre_existing_cc,
}
)
self.assertEqual(
pre_existing_count.state, "draft", "Testing data not generated properly."
)
date = datetime.today() - timedelta(days=1)
self.inventory_model.create(
{
"name": "Pre-existing inventory",
"location_ids": [(4, loc.id)],
"date": date,
}
)
self.quant_model.create(
{
"product_id": self.product1.id,
"location_id": self.count_loc.id,
"quantity": 1.0,
}
)
move1 = self.stock_move_model.create(
{
"name": "Pre-existing move",
"product_id": self.product1.id,
"product_uom_qty": 1.0,
"product_uom": self.product1.uom_id.id,
"location_id": self.count_loc.id,
"location_dest_id": loc.id,
}
)
move1._action_confirm()
move1._action_assign()
move1.move_line_ids[0].qty_done = 1.0
move1._action_done()
wh.cron_cycle_count()
self.assertNotEqual(
pre_existing_count.date_deadline,
date_pre_existing_cc,
"Date of pre-existing cycle counts has not been " "updated.",
)
counts = self.cycle_count_model.search([("location_id", "in", locs.ids)])
self.assertTrue(counts, "Cycle counts not planned")
# Zero-confirmations:
count = self.cycle_count_model.search(
[
("location_id", "=", loc.id),
("cycle_count_rule_id", "=", self.zero_rule.id),
]
)
self.assertFalse(count, "Unexpected zero confirmation.")
move2 = self.move_model.create(
{
"name": "make the locations to run out of stock.",
"product_id": self.product1.id,
"product_uom_qty": 1.0,
"product_uom": self.product1.uom_id.id,
"location_id": loc.id,
"location_dest_id": self.count_loc.id,
}
)
move2._action_confirm()
move2._action_assign()
move2.move_line_ids[0].qty_done = 1.0
move2._action_done()
count = self.cycle_count_model.search(
[
("location_id", "=", loc.id),
("cycle_count_rule_id", "=", self.zero_rule.id),
]
)
self.assertTrue(count, "Zero confirmation not being created.") | OCA/stock-logistics-warehouse | [
248,
611,
248,
91,
1402934883
] |
def test_view_methods(self):
"""Tests the methods used to handle views."""
self.cycle_count_1.action_create_inventory_adjustment()
self.cycle_count_1.sudo().action_view_inventory()
inv_count = self.cycle_count_1.inventory_adj_count
self.assertEqual(inv_count, 1, "View method failing.")
rules = [
self.rule_periodic,
self.rule_turnover,
self.rule_accuracy,
self.zero_rule,
]
for r in rules:
r._compute_rule_description()
self.assertTrue(r.rule_description, "No description provided")
self.rule_accuracy._onchange_locaton_ids()
self.assertEqual(
self.rule_accuracy.warehouse_ids.ids,
self.big_wh.ids,
"Rules defined for zones are not getting the right " "warehouse.",
) | OCA/stock-logistics-warehouse | [
248,
611,
248,
91,
1402934883
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.