commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
56ff187a56d7831325bf88fc68124f7b9bca2564
fix #33, reload on tsconfig.json save, fixfix
lib/tsconfiglint/tsconfigglobexpand.py
lib/tsconfiglint/tsconfigglobexpand.py
# coding=utf8 import os import glob import json from subprocess import Popen, PIPE, TimeoutExpired from .TsconfigLinter import check_tsconfig # from pathlib import Path # not avalilable in python 3.3 (only 3.4) from ..utils.debug import Debug from ..utils.pathutils import get_expandglob_path, default_node_path from ..utils.osutils import get_kwargs from ..utils.disabling import set_tsglobexpansion_disabled, set_tsglobexpansion_enabled, is_tsglobexpansion_disabled # Expanding is now done via javascript expandglob.js # There are too many differences between the glob implementations, and pathutils # are not available for now # Second reason for pure js solution: python shuffles the dicts keys in each iteration # # [1] https://github.com/TypeStrong/atom-typescript/issues/172 # [2] https://github.com/TypeStrong/atom-typescript/blob/master/docs/tsconfig.md # [3] https://github.com/TypeStrong/atom-typescript/blob/master/lib/main/tsconfig/tsconfig.ts # [4] https://github.com/anodynos/node-glob-expand/blob/master/source/code/expand.coffee def expand_filesglob(linter): """ This mimics the filesGlob behaviour of atom-typescript [2] If tsconfig.json has no HardErrors, it executes bin/expandglob.js and reloads linter.view from disk. This operates on the file contents, so the file should have been saved before. Returns immediately if not linted or linter is None Returns True if the filesGlob has been expanded Returns False if there was a linter error, so no expansion has been done """ # Expanding? if is_tsglobexpansion_disabled(): return False if linter is None or not linter or not linter.linted: return False if len(linter.harderrors) > 0: return False if len(linter.softerrors) != linter.numerrors: return False if linter.content == "": return False if "filesGlob" not in linter.tsconfig: return False # Expand! project_dir = os.path.dirname(linter.view.file_name()) file_list = _expand_globs_with_javascript(project_dir, linter) Debug("tsconfig.json", "fileGlobs expaned") # reload file linter.view.run_command("revert") # lint again, so the soft errors are still displayed check_tsconfig(linter.view) return True def _expand_globs_with_javascript(project_dir, linter): """ use the nodejs script bin/expandglob.js to expand the glob entries in the already saved file tsconfig.json in project_dir. Returns the files list, but also CHANGES THE DISK CONTENTS. We use javascript here, because python shuffles the dicts keys in each iteration """ try: node_path = None if "ArcticTypescript" in linter.tsconfig: if "node_path" in linter.tsconfig["ArcticTypescript"]: node_path = linter.tsconfig["ArcticTypescript"]["node_path"] node_path = default_node_path(node_path) expandglob_path = get_expandglob_path() cwd = os.path.abspath(project_dir) cmdline = [node_path, expandglob_path] kwargs = get_kwargs() try: # EXECUTE expand_glob_process = Popen(cmdline, stdin=PIPE, stdout=PIPE, cwd=cwd, **kwargs) # FETCH and terminate result_str, _err = expand_glob_process.communicate(timeout=10) # PARSE result = json.loads(str(result_str.decode('UTF-8'))) # CHECK (only for error Display) if "error" in result and isinstance(result['error'], str): Debug('error', 'expandglob.js: %s' % result['error']) #elif "error" in result and "files" in result \ # and result['error'] == False: # return result['files'] except TimeoutExpired: expand_glob_process.kill() outs, errs = expand_glob_process.communicate() Debug('notify', 'expandglob.js: Timeout') except Exception as e: Debug('notify', 'expandglob: %s' % e) except FileNotFoundError: Debug('error', "\n".join(["Could not find nodejs.", "I have tried this path: %s" % node_path, "Please install nodejs and/or set node_path in the project or plugin settings to the actual executable.", "If you are on windows and just have installed node, you first need to logout and login again."])) return None
Python
0
@@ -1961,28 +1961,72 @@ return -Fals +True # Should reopen project, so return True her e%0A%0A # Exp
c09f4567d220517ff4f55475d153580885402f95
Parse thousands correctly in the statements. Closes #1984
modules/citibank/parser.py
modules/citibank/parser.py
# -*- coding: utf-8 -*- # Copyright(C) 2014 Oleg Plakhotniuk # # This file is part of weboob. # # weboob is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # weboob is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with weboob. If not, see <http://www.gnu.org/licenses/>. from weboob.capabilities.bank import Transaction from weboob.tools.capabilities.bank.transactions import \ AmericanTransaction as AmTr from weboob.tools.date import closest_date from weboob.tools.pdf import decompress_pdf from weboob.tools.tokenizer import ReTokenizer import datetime import re def clean_label(text): """ Web view and statements use different label formatting. User shouldn't be able to see the difference, so we need to make labels from both sources look the same. """ for pattern in [r' \d+\.\d+ +POUND STERLING', u'Subject to Foreign Fee', u'Description']: text = re.sub(pattern, u'', text, re.UNICODE) return re.sub(r' +', u' ', text.strip().upper(), re.UNICODE) def formatted(read_func): """ Reads boilerplate PDF formatting around the data of interest. """ def wrapped(self, pos): startPos = pos pos, ws = self.read_whitespace(pos) pos, bt = self.read_layout_bt(pos) pos, tf = self.read_layout_tf(pos) pos, tm = self.read_layout_tm(pos) pos, data = read_func(self, pos) pos, et = self.read_layout_et(pos) if ws is None or bt is None or tf is None \ or tm is None or data is None or et is None: return startPos, None else: return pos, data return wrapped class StatementParser(object): """ Each "read_*" method takes position as its argument, and returns next token position if read was successful, or the same position if it was not. """ LEX = [ ('date_range', r'^\((\d{2}/\d{2}/\d{2})-(\d{2}/\d{2}/\d{2})\) Tj$'), ('amount', r'^\((-?\$\d+\.\d{2})\) Tj$'), ('date', r'^\((\d{2}/\d{2})\) Tj$'), ('text', r'^\((.*)\) Tj$'), ('layout_tf', r'^.* Tf$'), ('layout_tm', r'^' + (6*r'([^ ]+) ') + r'Tm$'), ('layout_bt', r'^BT$'), ('layout_et', r'^ET$'), ('whitespace', r'^$') ] def __init__(self, pdf): self._pdf = decompress_pdf(pdf) self._tok = ReTokenizer(self._pdf, '\n', self.LEX) def read_transactions(self): # Read statement dates range. date_from, date_to = self.read_first_date_range() # Read transactions. pos = 0 while not self._tok.tok(pos).is_eof(): pos, trans = self.read_transaction(pos, date_from, date_to) if trans: yield trans else: pos += 1 def read_first_date_range(self): pos = 0 while not self._tok.tok(pos).is_eof(): pos, date_range = self.read_date_range(pos) if date_range is not None: return date_range else: pos += 1 def read_date_range(self, pos): t = self._tok.tok(pos) if t.is_date_range(): return (pos+1, [datetime.datetime.strptime(v, '%m/%d/%y') for v in t.value()]) else: return (pos, None) def read_transaction(self, pos, date_from, date_to): startPos = pos pos, tdate = self.read_date(pos) pos, pdate = self.read_date(pos) # Early check to call read_multiline_desc() only when needed. if tdate is None: return startPos, None pos, desc = self.read_multiline_desc(pos) pos, amount = self.read_amount(pos) if desc is None or amount is None: return startPos, None else: # Sometimes one date is missing. pdate = pdate or tdate tdate = closest_date(tdate, date_from, date_to) pdate = closest_date(pdate, date_from, date_to) trans = Transaction() trans.date = tdate trans.rdate = pdate trans.type = Transaction.TYPE_UNKNOWN trans.raw = desc trans.label = desc trans.amount = -amount return pos, trans def read_multiline_desc(self, pos): """ Read transaction description which can span over multiple lines. Amount must always follow the multiline description. But multiline description might be split by page break. After reading first line of the description, we skip everything which is not an amount and which has different horizontal offset than the first read line. """ startPos = pos descs = [] xofs = None while not self._tok.tok(pos).is_eof(): pos, desc_tm = self.read_text(pos) if desc_tm is None: if not descs: break prev_pos = pos pos, amount = self.read_amount(pos) if amount is not None: pos = prev_pos break pos += 1 else: desc, tm = desc_tm if xofs is None: _, _, _, _, xofs, _ = tm _, _, _, _, xofs_new, _ = tm if xofs == xofs_new: descs.append(desc) else: pos += 1 if descs: return pos, clean_label(' '.join(descs)) else: return startPos, None def __getattr__(self, name): if name.startswith('read_'): return lambda pos: self._tok.simple_read(name[5:], pos) raise AttributeError() @formatted def read_date(self, pos): return self._tok.simple_read('date', pos, lambda v: datetime.datetime.strptime(v, '%m/%d')) @formatted def read_amount(self, pos): return self._tok.simple_read('amount', pos, AmTr.decimal_amount) def read_text(self, pos): startPos = pos pos, ws = self.read_whitespace(pos) pos, bt = self.read_layout_bt(pos) pos, tf = self.read_layout_tf(pos) pos, tm = self.read_layout_tm(pos) pos, text = self._tok.simple_read('text', pos, lambda v: unicode(v, errors='ignore')) pos, et = self.read_layout_et(pos) if ws is None or bt is None or tf is None \ or tm is None or text is None or et is None: return startPos, None else: return pos, (text, tm)
Python
0.999997
@@ -2445,16 +2445,25 @@ (-?%5C$%5Cd+ +(,%5Cd%7B3%7D)* %5C.%5Cd%7B2%7D) @@ -6497,16 +6497,64 @@ t', pos, +%0A lambda xs: AmTr.de @@ -6565,16 +6565,23 @@ l_amount +(xs%5B0%5D) )%0A%0A d
164262400dbb265a3363eb9f1415284b391c079c
Remove duplicate call
fc.py
fc.py
#!/usr/bin/python3 #Fork compiler toolchain script import argparse import re, os from sys import exit def main(): #Parse arguments parser = argparse.ArgumentParser(description='Fork toolchain command line parser...') parser.add_argument('-v',action='store_true',help='Use valgrind') parser.add_argument('-c',action='store_true',help='Compile and link static binary') parser.add_argument('files',metavar='filename',type=str,nargs='+',help='files to process') regex_delete = re.compile("(^\s*//.*)|(^\s*$)") args = parser.parse_args() files = args.files #Check that parser exists if not os.path.exists("./parser"): print("Parser binary not found in current directory.") exit(1) #Preprocessing temp_files = [x + '.wrapper_tmp_file' for x in files] for file_in,file_out in zip(files,temp_files): try: f_in = open(file_in,'r') f_out = open(file_out,'w') except (FileNotFoundError,PermissionError) as e: print(e) exit(2) for line in f_in: if not regex_delete.match(line): f_out.write(line) f_in.close() f_out.close() #Build temp_files for file in temp_files: if args.v: print("Please ignore GC_INIT() uninitialized memory.") os.system("valgrind --vgdb=no ./parser {}".format(file)) else: basename = file[0:-20] os.system("./parser {0}".format(file,basename)) if args.c: os.system("""echo "./parser {0} 3>&1 1>&2 2>&3 | tee {1}.ll" | bash """.format(file,basename)) print("Attemping to compile and link IR statically.") print("Compile LLVM IR to local architecture assembly...") os.system("llvm/build/Release+Asserts/bin/llc -O2 {0}.ll; echo ; cat {0}.s".format(basename)) print("\nInvoking GCC assembler for static compilation...") os.system("gcc -c {0}.s -o {0}.o".format(basename)) print("Linking executable...") os.system("g++ -std=c++11 -fomit-frame-pointer -rdynamic -fvisibility-inlines-hidden -fno-exceptions -fno-rtti -fPIC -ffunction-sections -fdata-sections -Wl,-rpath=. -o {0}.bin {0}.o lib.o".format(basename)) #Postprocessing for file in temp_files: os.remove(file) if __name__=='__main__': print('Running Fork Compiler...') main()
Python
0.000004
@@ -1325,62 +1325,8 @@ 20%5D%0A - os.system(%22./parser %7B0%7D%22.format(file,basename))%0A
1e14d68c86e0cacb9bedc51884081ef0a1cfdcdc
Fix for trevis
isitdown/config.py
isitdown/config.py
import os basedir = os.path.abspath(os.path.dirname(__file__)) class Config(object): DEBUG = False TESTING = False DATABASE_URI = os.environ["ISITDOWN_DATABASE_URI"] or 'sqlite:///' + os.path.join(basedir, 'app.db') SECRET_KEY = os.environ['ISITDOWN_SECRET_KEY'] or 'you-will-never-guess' SQLALCHEMY_DATABASE_URI = str(DATABASE_URI) SQLALCHEMY_TRACK_MODIFICATIONS = False SQLALCHEMY_ECHO = False BACKOFF_API_CALL_TIME = 30 * 1e3 # ms class DevelopmentConfig(Config): DEBUG = True BACKOFF_API_CALL_TIME = 2 * 1e3 # ms class TestingConfig(Config): TESTING = True BACKOFF_API_CALL_TIME = 0 # ms
Python
0
@@ -147,17 +147,21 @@ .environ -%5B +.get( %22ISITDOW @@ -175,20 +175,17 @@ ASE_URI%22 -%5D or +, 'sqlite @@ -223,16 +223,17 @@ app.db') +) %0A SEC @@ -256,9 +256,13 @@ iron -%5B +.get( 'ISI @@ -282,12 +282,9 @@ KEY' -%5D or +, 'yo @@ -302,16 +302,17 @@ r-guess' +) %0A SQL
16d87a91bcd6eb5cdb23d6aeb45e48ca7baf181a
remove unnecessary import
createdb.py
createdb.py
from flask import Flask from flask_sqlalchemy import SQLAlchemy app = Flask('app') app.config.from_object('config') db = SQLAlchemy(app) from app import models db.create_all()
Python
0.000037
@@ -138,34 +138,8 @@ p)%0A%0A -%0Afrom app import models%0A%0A%0A db.c
ee1b02d7327eeeeb65115c705a0df1ffd7c82034
Make random election view a bit more random
democracy_club/apps/everyelection/views.py
democracy_club/apps/everyelection/views.py
from django.db.models import Count from django.shortcuts import get_object_or_404 from django.views.generic import RedirectView, UpdateView from django.core.urlresolvers import reverse from braces.views import LoginRequiredMixin from .models import AuthorityElection, AuthorityElectionPosition from .forms import AuthorityAreaForm class RandomAuthority(RedirectView): permanent = False def get_redirect_url(self, *args, **kwargs): authority_election = AuthorityElection.objects.annotate( position_count=Count('authorityelectionposition') ).order_by('position_count').first() return reverse('everyelection:authority', kwargs={ 'pk': authority_election.pk}) class AuthorityEdit(LoginRequiredMixin, UpdateView): template_name = "everyelection/authority.html" form_class = AuthorityAreaForm model = AuthorityElection def get_success_url(self): return reverse('everyelection:random_election') def get_form_kwargs(self): kwargs = super().get_form_kwargs() kwargs['user'] = self.request.user return kwargs def post(self, *args, **kwargs): if 'skipped_form' in self.request.POST: form = AuthorityElectionSkippedForm({ 'user': self.request.user.pk, 'authority_election': self.get_object().pk, 'notes': self.request.POST['notes'], }) if form.is_valid(): form.save() url = reverse('everyelection:random_election') return redirect(url) return super().post(*args, **kwargs) def get_context_data(self, **kwargs): kwargs['elections_researched'] = \ AuthorityElectionPosition.objects.filter( user=self.request.user)\ .values('authority_election')\ .distinct().count() kwargs['areas_researched'] = AuthorityElectionPosition.objects.filter( user=self.request.user ).count() kwargs['skip_form'] = AuthorityElectionSkippedForm() return super().get_context_data(**kwargs)
Python
0.00002
@@ -1,20 +1,35 @@ +import random%0A%0A from django.db.model @@ -192,16 +192,54 @@ reverse +%0Afrom django.shortcuts import redirect %0A%0Afrom b @@ -378,16 +378,46 @@ AreaForm +, AuthorityElectionSkippedForm %0A%0A%0Aclass @@ -546,19 +546,25 @@ election +s = +list( Authorit @@ -694,15 +694,167 @@ t'). -first() +values_list('election_id', flat=True))%0A half = authority_elections%5B0:int(len(authority_elections)/2)%5D%0A authority_election = random.choice(half)%0A%0A %0A @@ -945,19 +945,16 @@ election -.pk %7D)%0A%0A%0Acla
2549a66b6785d5a0ed0658a4f375a21c486792df
Raise explicit exception on no type match
sifr/util.py
sifr/util.py
import datetime from dateutil import parser import six def normalize_time(t): try: if isinstance(t, datetime.datetime): return t elif isinstance(t, datetime.date): return datetime.datetime(t.year, t.month, t.day) elif isinstance(t, (int, float)): return datetime.datetime.fromtimestamp(t) elif isinstance(t, six.string_types): return parser.parse(t) else: raise except: # noqa raise TypeError( "time must be represented as either a timestamp (int,float), " "a datetime.datetime or datetime.date object, " "or an iso-8601 formatted string" )
Python
0.000006
@@ -460,16 +460,26 @@ raise + TypeError %0A exc
62ede23b0e13ab907b3eab620193921de29e162b
Bump version to 4.3.2b1
platformio/__init__.py
platformio/__init__.py
# Copyright (c) 2014-present PlatformIO <contact@platformio.org> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. VERSION = (4, 3, "2a4") __version__ = ".".join([str(s) for s in VERSION]) __title__ = "platformio" __description__ = ( "A new generation ecosystem for embedded development. " "Cross-platform IDE and Unified Debugger. " "Static Code Analyzer and Remote Unit Testing. " "Multi-platform and Multi-architecture Build System. " "Firmware File Explorer and Memory Inspection. " "Arduino, ARM mbed, Espressif (ESP8266/ESP32), STM32, PIC32, nRF51/nRF52, " "RISC-V, FPGA, CMSIS, SPL, AVR, Samsung ARTIK, libOpenCM3" ) __url__ = "https://platformio.org" __author__ = "PlatformIO" __email__ = "contact@platformio.org" __license__ = "Apache Software License" __copyright__ = "Copyright 2014-present PlatformIO" __apiurl__ = "https://api.platformio.org" __pioaccount_api__ = "https://api.accounts.platformio.org" __pioremote_endpoint__ = "ssl:host=remote.platformio.org:port=4413"
Python
0.000001
@@ -627,10 +627,10 @@ , %222 -a4 +b1 %22)%0A_
d4af985eb8786f6531e319e837fcc8d7b3e33ece
Bump version to 4.0.0a5
platformio/__init__.py
platformio/__init__.py
# Copyright (c) 2014-present PlatformIO <contact@platformio.org> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. VERSION = (4, 0, "0a4") __version__ = ".".join([str(s) for s in VERSION]) __title__ = "platformio" __description__ = ( "An open source ecosystem for IoT development. " "Cross-platform IDE and unified debugger. " "Remote unit testing and firmware updates. " "Arduino, ARM mbed, Espressif (ESP8266/ESP32), STM32, PIC32, nRF51/nRF52, " "FPGA, CMSIS, SPL, AVR, Samsung ARTIK, libOpenCM3") __url__ = "https://platformio.org" __author__ = "PlatformIO" __email__ = "contact@platformio.org" __license__ = "Apache Software License" __copyright__ = "Copyright 2014-present PlatformIO" __apiurl__ = "https://api.platformio.org"
Python
0
@@ -624,17 +624,17 @@ , 0, %220a -4 +5 %22)%0A__ver
454cd8d7847074988bf967f6240d59f25bdf310e
Bump version to 5.0.2rc1
platformio/__init__.py
platformio/__init__.py
# Copyright (c) 2014-present PlatformIO <contact@platformio.org> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys VERSION = (5, 0, "2b5") __version__ = ".".join([str(s) for s in VERSION]) __title__ = "platformio" __description__ = ( "A professional collaborative platform for embedded development. " "Cross-platform IDE and Unified Debugger. " "Static Code Analyzer and Remote Unit Testing. " "Multi-platform and Multi-architecture Build System. " "Firmware File Explorer and Memory Inspection. " "IoT, Arduino, CMSIS, ESP-IDF, FreeRTOS, libOpenCM3, mbedOS, Pulp OS, SPL, " "STM32Cube, Zephyr RTOS, ARM, AVR, Espressif (ESP8266/ESP32), FPGA, " "MCS-51 (8051), MSP430, Nordic (nRF51/nRF52), NXP i.MX RT, PIC32, RISC-V, " "STMicroelectronics (STM8/STM32), Teensy" ) __url__ = "https://platformio.org" __author__ = "PlatformIO" __email__ = "contact@platformio.org" __license__ = "Apache Software License" __copyright__ = "Copyright 2014-present PlatformIO" __accounts_api__ = "https://api.accounts.platformio.org" __registry_api__ = [ "https://api.registry.platformio.org", "https://api.registry.ns1.platformio.org", ] __pioremote_endpoint__ = "ssl:host=remote.platformio.org:port=4413" __default_requests_timeout__ = (10, None) # (connect, read) __core_packages__ = { "contrib-piohome": "~3.3.1", "contrib-pysite": "~2.%d%d.0" % (sys.version_info.major, sys.version_info.minor), "tool-unity": "~1.20500.0", "tool-scons": "~2.20501.7" if sys.version_info.major == 2 else "~4.40001.0", "tool-cppcheck": "~1.210.0", "tool-clangtidy": "~1.100000.0", "tool-pvs-studio": "~7.9.0", } __check_internet_hosts__ = [ "185.199.110.153", # Github.com "88.198.170.159", # platformio.org "github.com", "platformio.org", ]
Python
0
@@ -639,10 +639,11 @@ , %222 -b5 +rc1 %22)%0A_
dc14bd73623f453d8ef272a9cd46df3733fcfad9
Bump version to 6.1.5rc1
platformio/__init__.py
platformio/__init__.py
# Copyright (c) 2014-present PlatformIO <contact@platformio.org> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys VERSION = (6, 1, "5a4") __version__ = ".".join([str(s) for s in VERSION]) __title__ = "platformio" __description__ = ( "A professional collaborative platform for embedded development. " "Cross-platform IDE and Unified Debugger. " "Static Code Analyzer and Remote Unit Testing. " "Multi-platform and Multi-architecture Build System. " "Firmware File Explorer and Memory Inspection. " "IoT, Arduino, CMSIS, ESP-IDF, FreeRTOS, libOpenCM3, mbedOS, Pulp OS, SPL, " "STM32Cube, Zephyr RTOS, ARM, AVR, Espressif (ESP8266/ESP32), FPGA, " "MCS-51 (8051), MSP430, Nordic (nRF51/nRF52), NXP i.MX RT, PIC32, RISC-V, " "STMicroelectronics (STM8/STM32), Teensy" ) __url__ = "https://platformio.org" __author__ = "PlatformIO Labs" __email__ = "contact@piolabs.com" __license__ = "Apache Software License" __copyright__ = "Copyright 2014-present PlatformIO Labs" __accounts_api__ = "https://api.accounts.platformio.org" __registry_mirror_hosts__ = [ "registry.platformio.org", "registry.nm1.platformio.org", ] __pioremote_endpoint__ = "ssl:host=remote.platformio.org:port=4413" __core_packages__ = { "contrib-piohome": "~3.4.2", "contrib-pysite": "~2.%d%d.0" % (sys.version_info.major, sys.version_info.minor), "tool-scons": "~4.40400.0", "tool-cppcheck": "~1.270.0", "tool-clangtidy": "~1.120001.0", "tool-pvs-studio": "~7.18.0", } __check_internet_hosts__ = [ "185.199.110.153", # Github.com "88.198.170.159", # platformio.org "github.com", ] + __registry_mirror_hosts__
Python
0
@@ -639,10 +639,11 @@ , %225 -a4 +rc1 %22)%0A_
3329728b0e21fb384c9e2cd7c6a5ac7254bc58bb
Check modified time before deleting
cax/tasks/clear.py
cax/tasks/clear.py
"""Logic for pruning data The requirement to prune data can occur in a few cases. Time outs on transfers or failed checksums are an obvious case. We also use this section for clearing the DAQ buffer copy. """ import datetime import os import shutil from cax import config from cax.task import Task from cax.tasks import checksum class RetryStalledTransfer(checksum.CompareChecksums): """Alert if stale transfer. Inherits from the checksum task since we use checksums to know when we can delete data. """ # Do not overload this routine from checksum inheritance. each_run = Task.each_run def has_untriggered(self): for data_doc in self.run_doc['data']: if data_doc['type'] == 'untriggered': return True return False def each_location(self, data_doc): if 'host' not in data_doc or data_doc['host'] != config.get_hostname(): return # Skip places where we can't locally access data if 'creation_time' not in data_doc: self.log.warning("No creation time for %s" % str(data_doc)) return # How long has transfer been ongoing time_mod = datetime.fromtimestamp(os.stat(data_doc['location']).st_mtime) time_made = data_doc['creation_time'] difference = datetime.datetime.utcnow() - max(time_mod, time_made) if data_doc["status"] == "transferred": return # Transfer went fine self.log.debug(difference) if difference > datetime.timedelta(hours=2): # If stale transfer self.give_error("Transfer %s from run %d (%s) lasting more than " "one hour" % (data_doc['type'], self.run_doc['number'], self.run_doc['name'])) if difference > datetime.timedelta(hours=6) or \ data_doc["status"] == 'error': # If stale transfer self.give_error("Transfer lasting more than six hours " "or errored, retry.") self.log.info("Deleting %s" % data_doc['location']) if os.path.isdir(data_doc['location']): try: shutil.rmtree(data_doc['location']) except FileNotFoundError: self.log.warning("FileNotFoundError within %s" % data_doc['location']) self.log.info('Deleted, notify run database.') elif os.path.isfile(data_doc['location']): os.remove(data_doc['location']) else: self.log.error('did not exist, notify run database.') if config.DATABASE_LOG == True: resp = self.collection.update({'_id': self.run_doc['_id']}, {'$pull': {'data': data_doc}}) self.log.error('Removed from run database.') self.log.debug(resp) class RetryBadChecksumTransfer(checksum.CompareChecksums): """Alert if stale transfer. Inherits from the checksum task since we use checksums to know when we can delete data. """ # Do not overload this routine from checksum inheritance. each_run = Task.each_run def each_location(self, data_doc): if 'host' not in data_doc or data_doc['host'] != config.get_hostname(): return # Skip places where we can't locally access data if data_doc["status"] != "transferred": return if data_doc['checksum'] != self.get_main_checksum(**data_doc): self.give_error("Bad checksum") if self.check(warn=False) > 1: self.log.info("Deleting %s" % data_doc['location']) if os.path.isdir(data_doc['location']): shutil.rmtree(data_doc['location']) self.log.error('Deleted, notify run database.') elif os.path.isfile(data_doc['location']): os.remove(data_doc['location']) else: self.log.error('did not exist, notify run database.') if config.DATABASE_LOG == True: resp = self.collection.update({'_id': self.run_doc['_id']}, {'$pull': {'data': data_doc}}) self.log.error('Removed from run database.') self.log.debug(resp)
Python
0
@@ -1181,34 +1181,16 @@ _mod - = datetime.fromtimestamp( +ified = os.s @@ -1223,16 +1223,86 @@ st_mtime +%0A time_modified = datetime.datetime.fromtimestamp(time_modified )%0A @@ -1403,16 +1403,21 @@ time_mod +ified ,%0A
1800e98f0570bfd1029d9df881fb144fdd943e72
Remove leftover debug print from Melnor (#78870)
tests/components/melnor/test_number.py
tests/components/melnor/test_number.py
"""Test the Melnor sensors.""" from __future__ import annotations from .conftest import ( mock_config_entry, patch_async_ble_device_from_address, patch_async_register_callback, patch_melnor_device, ) async def test_manual_watering_minutes(hass): """Test the manual watering switch.""" entry = mock_config_entry(hass) with patch_async_ble_device_from_address(), patch_melnor_device() as device_patch, patch_async_register_callback(): device = device_patch.return_value assert await hass.config_entries.async_setup(entry.entry_id) await hass.async_block_till_done() number = hass.states.get("number.zone_1_manual_minutes") print(number) assert number.state == "0" assert number.attributes["max"] == 360 assert number.attributes["min"] == 1 assert number.attributes["step"] == 1.0 assert number.attributes["icon"] == "mdi:timer-cog-outline" assert device.zone1.manual_watering_minutes == 0 await hass.services.async_call( "number", "set_value", {"entity_id": "number.zone_1_manual_minutes", "value": 10}, blocking=True, ) number = hass.states.get("number.zone_1_manual_minutes") assert number.state == "10" assert device.zone1.manual_watering_minutes == 10
Python
0
@@ -688,30 +688,8 @@ %22)%0A%0A - print(number)%0A
9379504dbdb57c5754fbc984bcf72cc032a9d626
support loading configs by filename
simulator.py
simulator.py
#!/bin/bash - # If you have PyPy 1.6+ in a directory called pypy alongside pox.py, we # use it. # Otherwise, we try to use a Python interpreter called python2.7, which # is a good idea if you're using Python from MacPorts, for example. # We fall back to just "python" and hope that works. ''''echo -n export OPT="-O" export FLG="" if [[ "$(basename $0)" == "debug-pox.py" ]]; then export OPT="" export FLG="--debug" fi if [ -x pypy/bin/pypy ]; then exec pypy/bin/pypy $OPT "$0" $FLG "$@" fi if [ "$(type -P python2.7)" != "" ]; then exec python2.7 $OPT "$0" $FLG "$@" fi exec python $OPT "$0" $FLG "$@" ''' from sts.procutils import kill_procs from sts.topology import FatTree, BufferedPatchPanel from sts.control_flow import Fuzzer from sts.invariant_checker import InvariantChecker from sts.simulation import Simulation from pox.lib.recoco.recoco import Scheduler from sts.snapshot import PoxSnapshotService, FloodlightSnapshotService import signal import sys import string import subprocess import time import argparse import logging logging.basicConfig(level=logging.DEBUG) log = logging.getLogger("sts") description = """ Run a simulation. Example usage: $ %s -c config.fat_tree """ % (sys.argv[0]) parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=description) parser.add_argument('-c', '--config', default='config.fat_tree', help='''experiment config module in the config/ ''' '''subdirectory, e.g. config.fat_tree''') args = parser.parse_args() config = __import__(args.config, globals(), locals(), ["*"]) # For instrumenting the controller if hasattr(config, 'controllers'): controller_configs = config.controllers else: raise RuntimeError("Must specify controllers in config file") # For forwarding packets if hasattr(config, 'patch_panel_class'): patch_panel_class = config.patch_panel_class else: # We default to a BufferedPatchPanel patch_panel_class = BufferedPatchPanel # For tracking the edges and vertices in our network if hasattr(config, 'topology_class'): topology_class = config.topology_class else: # We default to a FatTree topology_class = FatTree # For constructing the topology object if hasattr(config, 'topology_params'): topology_params = config.topology_params else: # We default to no parameters topology_params = "" # For controlling the simulation if hasattr(config, 'control_flow'): simulator = config.control_flow else: # We default to a Fuzzer simulator = Fuzzer() # For snapshotting the controller # Read from config what controller we are using # TODO(cs): move this demultiplexing to a factory method in snapshotservice.py if controller_configs != [] and controller_configs[0].name == "pox": snapshotService = PoxSnapshotService() elif controller_configs != [] and controller_configs[0].name == "floodlight": snapshotService = FloodlightSnapshotService() else: # We default snapshotService to POX snapshotService = PoxSnapshotService() simulator.set_invariant_checker(InvariantChecker(snapshotService)) # For injecting dataplane packets into the simulated network if hasattr(config, 'dataplane_trace') and config.dataplane_trace: dataplane_trace_path = config.dataplane_trace else: # We default to no dataplane trace dataplane_trace_path = None def handle_int(signal, frame): print >> sys.stderr, "Caught signal %d, stopping sdndebug" % signal if simulation is not None: simulation.clean_up() sys.exit(0) signal.signal(signal.SIGINT, handle_int) signal.signal(signal.SIGTERM, handle_int) simulation = None try: simulation = Simulation(controller_configs, topology_class, topology_params, patch_panel_class, dataplane_trace_path=dataplane_trace_path, controller_sync_callback=simulator.get_sync_callback()) simulator.simulate(simulation) finally: if simulation is not None: simulation.clean_up()
Python
0
@@ -1614,16 +1614,100 @@ _args()%0A +if args.config.endswith('.py'):%0A args.config = args.config%5B:-3%5D.replace(%22/%22, %22.%22)%0A%0A config =
b813ace93206ae39e2b0f2e3bba990297bf1909d
fix bug when area is unknown
modules/leboncoin/pages.py
modules/leboncoin/pages.py
# -*- coding: utf-8 -*- # Copyright(C) 2014 Bezleputh # # This file is part of weboob. # # weboob is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # weboob is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with weboob. If not, see <http://www.gnu.org/licenses/>. from decimal import Decimal from weboob.browser.pages import HTMLPage, pagination from weboob.browser.elements import ItemElement, ListElement, method from weboob.browser.filters.standard import CleanText, Regexp, CleanDecimal, Env, DateTime, BrowserURL from weboob.browser.filters.html import Attr, Link from weboob.capabilities.housing import City, Housing, HousingPhoto from datetime import date, timedelta from weboob.tools.date import DATE_TRANSLATE_FR, LinearDateGuesser class CityListPage(HTMLPage): @method class get_cities(ListElement): item_xpath = '//li' class item(ItemElement): klass = City obj_id = CleanText('./span[@class="zipcode"]') obj_name = CleanText('./span[@class="city"]') class HousingListPage(HTMLPage): def get_area_min(self, asked_area): return self.find_select_value(asked_area, '//select[@id="sqs"]/option') def get_area_max(self, asked_area): return self.find_select_value(asked_area, '//select[@id="sqe"]/option') def get_rooms_min(self, asked_rooms): return self.find_select_value(asked_rooms, '//select[@id="rooms_ros"]/option') # def get_rooms_max(self, asked_rooms): # return self.find_select_value(asked_rooms, '//select[@id="roe"]/option') def get_cost_min(self, asked_cost): return self.find_select_value(asked_cost, '//select[@id="ps"]/option') def get_cost_max(self, asked_cost): return self.find_select_value(asked_cost, '//select[@id="pe"]/option') def find_select_value(self, ref_value, selector): select = {} for item in self.doc.xpath(selector): if item.attrib['value']: select[CleanDecimal('.')(item)] = CleanDecimal('./@value')(item) select_keys = select.keys() select_keys.sort() for select_value in select_keys: if select_value >= ref_value: return select[select_value] return select[select_keys[-1]] @pagination @method class get_housing_list(ListElement): item_xpath = '//div[@class="list-lbc"]/a' def next_page(self): return Link('//li[@class="page"]/a[contains(text(),"Page suivante")]')(self) class item(ItemElement): klass = Housing obj_id = Regexp(Link('.'), 'http://www.leboncoin.fr/(ventes_immobilieres|locations)/(.*).htm', '\\2') obj_title = CleanText('./div[@class="lbc"]/div/div[@class="title"]') obj_cost = CleanDecimal('./div[@class="lbc"]/div/div[@class="price"]', replace_dots=(',', '.'), default=Decimal(0)) obj_currency = Regexp(CleanText('./div[@class="lbc"]/div/div[@class="price"]'), '.*([%s%s%s])' % (u'€', u'$', u'£'), default=u'€') obj_text = CleanText('./div[@class="lbc"]/div[@class="detail"]') def obj_date(self): _date = CleanText('./div[@class="lbc"]/div[@class="date"]', replace=[('Aujourd\'hui', str(date.today())), ('Hier', str((date.today() - timedelta(1))))])(self) for fr, en in DATE_TRANSLATE_FR: _date = fr.sub(en, _date) self.env['tmp'] = _date return DateTime(Env('tmp'), LinearDateGuesser())(self) def obj_photos(self): photos = [] url = Attr('./div[@class="lbc"]/div[@class="image"]/div/img', 'src', default=None)(self) if url: photos.append(HousingPhoto(url)) return photos class HousingPage(HTMLPage): @method class get_housing(ItemElement): klass = Housing def parse(self, el): details = dict() for tr in el.xpath('//div[@class="floatLeft"]/table/tr'): if 'Ville' in CleanText('./th')(tr): self.env['location'] = CleanText('./td')(tr) else: details['%s' % CleanText('./th', replace=[(':', '')])(tr)] = CleanText('./td')(tr) for tr in el.xpath('//div[@class="lbcParams criterias"]/table/tr'): if 'Surface' in CleanText('./th')(tr): self.env['area'] = CleanDecimal(Regexp(CleanText('./td'), '(.*)m.*'), replace_dots=(',', '.'))(tr) else: key = '%s' % CleanText('./th', replace=[(':', '')])(tr) if 'GES' in key or 'Classe' in key: details[key] = CleanText('./td/noscript/a')(tr) else: details[key] = CleanText('./td')(tr) self.env['details'] = details obj_id = Env('_id') obj_title = CleanText('//h2[@id="ad_subject"]') obj_cost = CleanDecimal('//span[@class="price"]', replace_dots=(',', '.'), default=Decimal(0)) obj_currency = Regexp(CleanText('//span[@class="price"]'), '.*([%s%s%s])' % (u'€', u'$', u'£')) obj_text = CleanText('//div[@class="content"]') obj_location = Env('location') obj_details = Env('details') obj_area = Env('area') obj_url = BrowserURL('housing', _id=Env('_id')) def obj_date(self): _date = Regexp(CleanText('//div[@class="upload_by"]', replace=[(u'à', '')]), '.*- Mise en ligne le (.*).')(self) for fr, en in DATE_TRANSLATE_FR: _date = fr.sub(en, _date) self.env['tmp'] = _date return DateTime(Env('tmp'), LinearDateGuesser())(self) def obj_photos(self): photos = [] for img in self.el.xpath('//div[@id="thumbs_carousel"]/a/span'): url = CleanText(Regexp(Attr('.', 'style', default=''), "background-image: url\('(.*)'\);", default=''), replace=[('thumbs', 'images')], default='')(img) if url: photos.append(HousingPhoto(url)) return photos
Python
0
@@ -1096,16 +1096,66 @@ ngPhoto%0A +from weboob.capabilities.base import NotAvailable%0A from dat @@ -4654,24 +4654,72 @@ ls = dict()%0A + self.env%5B'location'%5D = NotAvailable%0A @@ -5012,32 +5012,76 @@ xt('./td')(tr)%0A%0A + self.env%5B'area'%5D = NotAvailable%0A for
ae6025a4be24637e57c0545aa860d96a0447aa89
Raise any exceptions from ended listeners in workers
channels/worker.py
channels/worker.py
import asyncio from asgiref.server import StatelessServer class Worker(StatelessServer): """ ASGI protocol server that surfaces events sent to specific channels on the channel layer into a single application instance. """ def __init__(self, application, channels, channel_layer, max_applications=1000): super().__init__(application, max_applications) self.channels = channels self.channel_layer = channel_layer if self.channel_layer is None: raise ValueError("Channel layer is not valid") async def handle(self): """ Listens on all the provided channels and handles the messages. """ # For each channel, launch its own listening coroutine listeners = [] for channel in self.channels: listeners.append(asyncio.ensure_future( self.listener(channel) )) # Wait for them all to exit await asyncio.wait(listeners) async def listener(self, channel): """ Single-channel listener """ while True: message = await self.channel_layer.receive(channel) if not message.get("type", None): raise ValueError("Worker received message with no type.") # Make a scope and get an application instance for it scope = {"type": "channel", "channel": channel} instance_queue = self.get_or_create_application_instance(channel, scope) # Run the message into the app await instance_queue.put(message)
Python
0
@@ -977,16 +977,148 @@ steners) +%0A # See if any of the listeners had an error (e.g. channel layer error)%0A %5Blistener.result() for listener in listeners%5D %0A%0A as
38c2e06b676aced578722e82a0668eda53f668d8
Implement Action Runner * Resolving merge conflicts
st2actionrunnercontroller/st2actionrunnercontroller/controllers/liveactions.py
st2actionrunnercontroller/st2actionrunnercontroller/controllers/liveactions.py
import httplib from pecan import (abort, expose, request, response) from pecan.rest import RestController import uuid from wsme import types as wstypes import wsmeext.pecan as wsme_pecan from st2common import log as logging from st2common.exceptions.db import StackStormDBObjectNotFoundError from st2common.models.api.actionrunner import LiveActionAPI from st2common.persistence.action import ActionExecution from st2common.persistence.actionrunner import LiveAction LOG = logging.getLogger(__name__) class LiveActionsController(RestController): """ Implements the RESTful web endpoint that handles the lifecycle of ActionRunners in the system. """ _liveaction_apis = {} def __init__(self): api = LiveActionAPI() api.id = '12345' api.name = 'test/echo' api.description = 'A test/echo action' api.action_execution_id = 'some id' self._liveaction_apis['12345'] = api api = LiveActionAPI() api.id = '78901' api.name = 'test/hello' api.description = 'A test/hello action' api.action_execution_id = 'some other id' self._liveaction_apis['78901'] = api def get_actionexecution_by_id(self, id): """ Get ActionExecution by id. On error, raise ST2ObjectNotFoundError. """ # TODO: Maybe lookup should be done via HTTP interface. Handle via direct DB call # for now. try: actionexecution = ActionExecution.get_by_id(id) except (ValueError, ValidationError) as e: LOG.error('Database lookup for actionexecution with id="%s" resulted in exception: %s', id, e) raise StackStormDBObjectNotFoundError('Unable to find actionexecution with id="%s"', id) return actionexecution @wsme_pecan.wsexpose(LiveActionAPI, wstypes.text) def get_one(self, id): """ List LiveAction by id. Handle: GET /liveactions/1 """ # TODO: test/handle object not found. return {'liveaction': id} @wsme_pecan.wsexpose([LiveActionAPI]) def get_all(self): """ List all liveactions. Handles requests: GET /liveactions/ """ LOG.info('GET all /liveactions/') # liveaction_apis = self._liveaction_apis # liveaction_api = LiveActionAPI() # liveaction_api.id = str(uuid.uuid4()) # liveaction_api.action_name = u'test/echo' # self._liveaction_apis.append(self.create_liveaction('test/echo', {}, {})) # TODO: Implement list comprehension to transform the in-memory objects into API objects # liveaction_apis = [liveaction_api for (id, liveaction_api) in self._liveaction_apis.items()] liveaction_apis = self._liveaction_apis.values() for api in liveaction_apis: LOG.debug(' %s', str(api)) LOG.debug('GET all /liveactions/ client_result=%s', self._liveaction_apis) return self._liveaction_apis """ def create_liveaction(self, action_name, runner_parameters={}, action_parameters={}): # Note: action name, action runner parameters and action parameters are all # fields in the ActionExecutionDB object. liveaction_api = LiveActionAPI() liveaction_api.id = str(uuid.uuid4()) liveaction_api.action_name = str.encode(action_name) liveaction_api.runner_parameters = runner_parameters liveaction_api.action_parameters = action_parameters return liveaction_api """ #@wsme_pecan.wsexpose(LiveActionAPI, body=LiveActionAPI, status_code=httplib.CREATED) @expose('json') def post(self, **kwargs): """ Create a new LiveAction. Handles requests: POST /liveactions/ """ LOG.info('POST /liveactions/ with liveaction data=%s', kwargs) actionexecution_id = str(kwargs['action_execution_id']) actionexecution_db = None LOG.info('POST /liveactions/ received action_execution_id: %s', actionexecution_id) LOG.info('POST /liveactions/ attempting to obtain action_execution from database.') try: actionexecution_db = self.get_actionexecution_by_id(actionexecution_id) except StackStormDBObjectNotFoundError, e: LOG.error(e.msg) # TODO: Is there a more appropriate status code? abort(httplib.BAD_REQUEST) LOG.info('POST /liveactions/ obtained action execution object from database. Object is %s', actionexecution_db) LOG.info('ae name %s', actionexecution_db.name) LOG.debug('Got ActionExecution.... now launch action command.') abort(httplib.NOT_IMPLEMENTED) @expose('json') def put(self, id, **kwargs): """ Update not supported for LiveActions. Handles requests: POST /liveactions/1?_method=put PUT /liveactions/1 """ abort(httplib.METHOD_NOT_ALLOWED) @wsme_pecan.wsexpose(None, wstypes.text) def delete(self, id): """ Delete a LiveAction. Handles requests: POST /liveactions/1?_method=delete DELETE /liveactions/1 """ abort(httplib.NOT_IMPLEMENTED)
Python
0
@@ -2341,283 +2341,8 @@ ')%0A%0A -# liveaction_apis = self._liveaction_apis%0A%0A# liveaction_api = LiveActionAPI()%0A# liveaction_api.id = str(uuid.uuid4())%0A# liveaction_api.action_name = u'test/echo'%0A%0A# self._liveaction_apis.append(self.create_liveaction('test/echo', %7B%7D, %7B%7D))%0A%0A @@ -2810,102 +2810,8 @@ %22%22%22%0A - def create_liveaction(self, action_name, runner_parameters=%7B%7D, action_parameters=%7B%7D):%0A @@ -2882,28 +2882,24 @@ ers are all%0A - # fields @@ -2932,16 +2932,106 @@ object.%0A + def create_liveaction(self, action_name, runner_parameters=%7B%7D, action_parameters=%7B%7D):%0A @@ -3469,32 +3469,159 @@ elf, **kwargs):%0A +# @wsme_pecan.wsexpose(LiveActionAPI, body=LiveActionAPI, status_code=httplib.CREATED)%0A# def post(self, liveaction_api):%0A %22%22%22%0A
6149cef01900d6710efa322ef934965c5f1379f8
Fix pylint
xapi/utils.py
xapi/utils.py
from opaque_keys.edx.keys import CourseKey from opaque_keys import InvalidKeyError from opaque_keys.edx.locations import SlashSeparatedCourseKey from xmodule.modulestore.django import modulestore from django.contrib.auth.models import User from django.test.client import RequestFactory from openedx.core.djangoapps.content.course_overviews.models import CourseOverview from courseware.courses import get_course_by_id, get_course_about_section from courseware.module_render import get_module def get_request_for_user(user): """Create a request object for user.""" request = RequestFactory() request.user = user request.COOKIES = {} request.META = {} request.is_secure = lambda: True request.get_host = lambda: "edx.org" request.method = 'GET' return request def get_course_key(course_id): course_key = "" try: course_key = CourseKey.from_string(course_id) except InvalidKeyError: course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id) return course_key def get_course(course_id): course_key = get_course_key(course_id) course = get_course_by_id(course_key) return course def get_course_title(course_id): course_key = get_course_key(course_id) title = CourseOverview.get_from_id(course_key).display_name return title def get_course_description(course_id, user_id): course = get_course(course_id) user = User.objects.get(user_id) request = get_request_for_user(user) module = get_module(user, request, course.location.replace(category='about', name="short_description"), []) return module.data def get_usage_key(course_id, module_id): """ Get the usage key of sequential block Can be : i4x://test/TEST101/sequential/45b889d710424143aa7f13e7c4bc0446 or block-v1:ORG+TEST101+RUNCODE+type@sequential+block@45b889d710424143aa7f13e7c4bc0446 depending on modulestore """ course_key = CourseKey.from_string(course_id) items = modulestore().get_items(course_key, qualifiers={'name': module_id}) return items[0].location.to_deprecated_string()
Python
0.000099
@@ -366,82 +366,8 @@ iew%0A -from courseware.courses import get_course_by_id, get_course_about_section%0A from
0b707c2e3a4d00aef5dad683c535498cb8bc1e21
use the default date formatter for logging
xutils/log.py
xutils/log.py
# -*- coding: utf-8 -*- import os import os.path import logging from logging.handlers import RotatingFileHandler def init(logger=None, level="INFO", file=None, handler_cls=None, process=False, max_count=30, propagate=True, file_config=None, dict_config=None): root = logging.getLogger() if not logger: logger = root # Initialize the argument logger with the arguments, level and log_file. if logger: fmt = ("%(asctime)s - %(process)d - %(pathname)s - %(funcName)s - " "%(lineno)d - %(levelname)s - %(message)s") datefmt = "%Y-%m-%d %H:%M:%S" formatter = logging.Formatter(fmt=fmt, datefmt=datefmt) level = getattr(logging, level.upper()) if file: if process: filename, ext = os.path.splitext(file) file = "{0}.{1}{2}".format(filename, os.getpid(), ext) if handler_cls: handler = handler_cls(file, max_count) else: handler = RotatingFileHandler(file, maxBytes=1024**3, backupCount=max_count) else: handler = logging.StreamHandler() handler.setLevel(level) handler.setFormatter(formatter) root.setLevel(level) root.addHandler(handler) loggers = logger if isinstance(logger, (list, tuple)) else [logger] for logger in loggers: if logger is root: continue logger.propagate = propagate logger.setLevel(level) logger.addHandler(handler) # Initialize logging by the configuration file, file_config. if file_config: logging.config.fileConfig(file_config, disable_existing_loggers=False) # Initialize logging by the dict configuration, dict_config. if dict_config and hasattr(logging.config, "dictConfig"): logging.config.dictConfig(dict_config)
Python
0
@@ -570,46 +570,8 @@ s%22)%0A - datefmt = %22%25Y-%25m-%25d %25H:%25M:%25S%22%0A @@ -615,25 +615,8 @@ =fmt -, datefmt=datefmt )%0A%0A
7d8380a65523e7d6ef77d39eaf125823e3e10812
Bump version to 5.0.0dev0
chardet/version.py
chardet/version.py
""" This module exists only to simplify retrieving the version number of chardet from within setup.py and from chardet subpackages. :author: Dan Blanchard (dan.blanchard@gmail.com) """ __version__ = "4.0.0" VERSION = __version__.split('.')
Python
0
@@ -199,13 +199,17 @@ = %22 -4 +5 .0.0 +dev0 %22%0AVE
4cb36450aa4ddabe0a6fa48300dc37edc053dd13
fix bug in isOpen for reports
modules/queue/ModReport.py
modules/queue/ModReport.py
"""Queue item for basic analysis by irwin""" from collections import namedtuple from datetime import datetime, timedelta import pymongo class ModReport(namedtuple('ModReport', ['id', 'processed', 'created'])): @staticmethod def new(userId): return ModReport( id=userId, processed=False, created=datetime.now()) class ModReportBSONHandler: @staticmethod def reads(bson): return ModReport( id=bson['_id'], processed=bson['processed'], created=bson['created']) @staticmethod def writes(modReport): return { '_id': modReport.id, 'processed': modReport.processed, 'created': modReport.created, 'updated': datetime.now() } class ModReportDB(namedtuple('ModReportDB', ['modReportColl'])): def write(self, modReport): self.modReportColl.update_one( {'_id': modReport.id}, {'$set': ModReportBSONHandler.writes(modReport)}, upsert=True) def close(self, userId): self.modReportColl.update_one( {'_id': userId}, {'$set': {'processed': True, 'updated': datetime.now()}}, upsert=False) def isOpen(self, userId): modReportBSON = self.modReportColl.find_one({'_id': userId}) processed = False if modReportBSON is not None: processed = modReportBSON['processed'] return processed def allOpen(self, limit=None): return [ModReportBSONHandler.reads(bson) for bson in self.modReportColl.find({'processed': False}, batch_size=limit)] def oldestUnprocessed(self): modReportBSON = self.modReportColl.find_one_and_update( filter={'processed': False, 'updated': {'$lt': datetime.now() - timedelta(days=2)}}, update={'$set': {'updated': datetime.now()}}, sort=[('updated', pymongo.ASCENDING)]) return None if modReportBSON is None else ModReportBSONHandler.reads(modReportBSON)
Python
0.000014
@@ -1363,20 +1363,19 @@ essed = -Fals +Tru e%0A @@ -1472,16 +1472,20 @@ return +not processe
6326cfa9ad5cb203eeade0c5875a005e06bbe932
fix isort test
tests/test_core_base_multi_executor.py
tests/test_core_base_multi_executor.py
import pytest from datetime import timedelta @pytest.fixture def config_yaml(unused_port): return """ e: cls: aioworkers.core.base.MultiExecutorEntity executors: get: 1 put: 1 none: none x: null """ async def test_multiexecutor(context): assert await context.e.run_in_executor('get', timedelta, days=1) assert await context.e.run_in_executor('none', timedelta, hours=2) assert await context.e.run_in_executor('put', timedelta, minutes=1) assert await context.e.run_in_executor('x', timedelta, seconds=1)
Python
0
@@ -1,18 +1,4 @@ -import pytest%0A from @@ -25,16 +25,31 @@ edelta%0A%0A +import pytest%0A%0A %0A@pytest
a07ec2940d5fb4121e9c570dc74e38d67ab4bb00
fix test
tests/test_ec2/test_security_groups.py
tests/test_ec2/test_security_groups.py
import boto from boto.exception import EC2ResponseError from sure import expect from moto import mock_ec2 @mock_ec2 def test_create_and_describe_security_group(): conn = boto.connect_ec2('the_key', 'the_secret') security_group = conn.create_security_group('test security group', 'this is a test security group') expect(security_group).name.should.equal('test security group') security_group.description.should.equal('this is a test security group') # Trying to create another group with the same name should throw an error conn.create_security_group.when.called_with('test security group', 'this is a test security group').should.throw(EC2ResponseError) all_groups = conn.get_all_security_groups() all_groups.should.have.length_of(1) all_groups[0].name.should.equal('test security group') @mock_ec2 def test_deleting_security_groups(): conn = boto.connect_ec2('the_key', 'the_secret') security_group1 = conn.create_security_group('test1', 'test1') conn.create_security_group('test2', 'test2') conn.get_all_security_groups().should.have.length_of(2) # Deleting a group that doesn't exist should throw an error conn.delete_security_group.when.called_with('foobar').should.throw(EC2ResponseError) # Delete by name conn.delete_security_group('test2') conn.get_all_security_groups().should.have.length_of(1) # Delete by group id conn.delete_security_group(security_group1.id) conn.get_all_security_groups().should.have.length_of(0) @mock_ec2 def test_authorize_ip_range_and_revoke(): conn = boto.connect_ec2('the_key', 'the_secret') security_group = conn.create_security_group('test', 'test') success = security_group.authorize(ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") assert success.should.be.true security_group = conn.get_all_security_groups()[0] int(security_group.rules[0].to_port).should.equal(2222) security_group.rules[0].grants[0].cidr_ip.should.equal("123.123.123.123/32") # Wrong Cidr should throw error security_group.revoke.when.called_with(ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.122/32").should.throw(EC2ResponseError) # Actually revoke security_group.revoke(ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") security_group = conn.get_all_security_groups()[0] security_group.rules.should.have.length_of(0) @mock_ec2 def test_authorize_other_group_and_revoke(): conn = boto.connect_ec2('the_key', 'the_secret') security_group = conn.create_security_group('test', 'test') other_security_group = conn.create_security_group('other', 'other') wrong_group = conn.create_security_group('wrong', 'wrong') success = security_group.authorize(ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) assert success.should.be.true security_group = [group for group in conn.get_all_security_groups() if group.name == 'test'][0] int(security_group.rules[0].to_port).should.equal(2222) security_group.rules[0].grants[0].group_id.should.equal(other_security_group.id) # Wrong source group should throw error security_group.revoke.when.called_with(ip_protocol="tcp", from_port="22", to_port="2222", src_group=wrong_group).should.throw(EC2ResponseError) # Actually revoke security_group.revoke(ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) security_group = [group for group in conn.get_all_security_groups() if group.name == 'test'][0] security_group.rules.should.have.length_of(0)
Python
0.000002
@@ -346,14 +346,14 @@ roup -) .name +) .sho
94260ec953de44ae9a4108b964a9a607b0457148
fix new search index
muckrock/news/search_indexes.py
muckrock/news/search_indexes.py
""" Search Index for the news application """ from haystack.indexes import SearchIndex, CharField, DateTimeField from haystack import site from muckrock.news.models import Article class ArticleIndex(SearchIndex): """Search index for news articles""" text = CharField(document=True, use_template=True) author = CharField(model_attr='author') pub_date = DateTimeField(model_attr='pub_date') def get_queryset(self): """Used when the entire index for model is updated.""" return Article.objects.get_published() site.register(Article, ArticleIndex)
Python
0.000075
@@ -315,16 +315,17 @@ author +s = CharF @@ -347,16 +347,17 @@ ='author +s ')%0A p @@ -491,24 +491,56 @@ updated.%22%22%22%0A + # pylint: disable=R0201%0A retu
b0b6080a17fa170c822463098fbed94823b26ec1
Remove unsupported sort key "user"
senlinclient/v1/event.py
senlinclient/v1/event.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Clustering v1 event action implementations""" import logging from openstack import exceptions as sdk_exc from osc_lib.command import command from osc_lib import exceptions as exc from osc_lib import utils from senlinclient.common.i18n import _ from senlinclient.common import utils as senlin_utils class ListEvent(command.Lister): """List events.""" log = logging.getLogger(__name__ + ".ListEvent") def get_parser(self, prog_name): parser = super(ListEvent, self).get_parser(prog_name) parser.add_argument( '--filters', metavar='<"key1=value1;key2=value2...">', help=_("Filter parameters to apply on returned events. " "This can be specified multiple times, or once with " "parameters separated by a semicolon. The valid filter " "keys are: ['level', 'otype', 'oid' ,'cluster_id', " "'oname', 'action']"), action='append' ) parser.add_argument( '--limit', metavar='<limit>', help=_('Limit the number of events returned') ) parser.add_argument( '--marker', metavar='<id>', help=_('Only return events that appear after the given event ID') ) parser.add_argument( '--sort', metavar='<key>[:<direction>]', help=_("Sorting option which is a string containing a list of " "keys separated by commas. Each key can be optionally " "appended by a sort direction (:asc or :desc). The valid " "sort keys are: ['timestamp', 'level', 'otype', " "'oname', 'user', 'action', 'status']") ) parser.add_argument( '--global-project', default=False, action="store_true", help=_('Whether events from all projects should be listed. ' ' Default to False. Setting this to True may demand ' 'for an admin privilege') ) parser.add_argument( '--full-id', default=False, action="store_true", help=_('Print full IDs in list') ) return parser def take_action(self, parsed_args): self.log.debug("take_action(%s)", parsed_args) senlin_client = self.app.client_manager.clustering columns = ['id', 'generated_at', 'obj_type', 'obj_id', 'obj_name', 'action', 'status', 'status_reason', 'level'] queries = { 'sort': parsed_args.sort, 'limit': parsed_args.limit, 'marker': parsed_args.marker, 'global_project': parsed_args.global_project, } if parsed_args.filters: queries.update(senlin_utils.format_parameters(parsed_args.filters)) formatters = {} if not parsed_args.full_id: formatters['id'] = lambda x: x[:8] formatters['obj_id'] = lambda x: x[:8] if x else '' events = senlin_client.events(**queries) return (columns, (utils.get_item_properties(e, columns, formatters=formatters) for e in events)) class ShowEvent(command.ShowOne): """Describe the event.""" log = logging.getLogger(__name__ + ".ShowEvent") def get_parser(self, prog_name): parser = super(ShowEvent, self).get_parser(prog_name) parser.add_argument( 'event', metavar='<event>', help=_('ID of event to display details for') ) return parser def take_action(self, parsed_args): self.log.debug("take_action(%s)", parsed_args) senlin_client = self.app.client_manager.clustering try: event = senlin_client.get_event(parsed_args.event) except sdk_exc.ResourceNotFound: raise exc.CommandError(_("Event not found: %s") % parsed_args.event) data = event.to_dict() columns = sorted(data.keys()) return columns, utils.get_dict_properties(data, columns)
Python
0.000003
@@ -2280,16 +2280,8 @@ me', - 'user', 'ac
2bafdb04dc4e04c5a2cf9136135dfa130ac6e78b
read full buffer, not byte by byte
serialSniffer/sniffer.py
serialSniffer/sniffer.py
from serial import Serial from concurrent.futures import ThreadPoolExecutor class Sniffer(object): """ TODO: write docstring """ def __init__(self, virtual_comm='COM7', physical_comm='COM1'): self.virtual_comm = Serial(virtual_comm) self.physical_comm = Serial(physical_comm) self.pool = ThreadPoolExecutor(4) self.v_bytes = [] self.p_bytes = [] self.pool.submit(self.read_physical) self.pool.submit(self.read_virtual) def read_physical(self): """ TODO: write docstring """ while True: p_byte = self.physical_comm.read() self.virtual_comm.write(p_byte) self.p_bytes.append(p_byte) # TODO: store the data somewhere def read_virtual(self): """ TODO: write docstring """ while True: v_byte = self.virtual_comm.read() self.physical_comm.write(v_byte) self.v_bytes.append(v_byte)
Python
0.000058
@@ -607,22 +607,88 @@ -p_byte +n = self.physical_comm.inWaiting()%0A if n:%0A msg = self. @@ -712,32 +712,36 @@ d()%0A + + self.virtual_com @@ -748,22 +748,19 @@ m.write( -p_byte +msg )%0A @@ -757,32 +757,36 @@ sg)%0A + + self.p_bytes.app @@ -789,22 +789,19 @@ .append( -p_byte +msg )%0A @@ -956,22 +956,87 @@ -v_byte +n = self.virtual_comm.inWaiting()%0A if n:%0A msg = self. @@ -1059,32 +1059,36 @@ d()%0A + + self.physical_co @@ -1096,22 +1096,19 @@ m.write( -v_byte +msg )%0A @@ -1105,32 +1105,36 @@ sg)%0A + + self.v_bytes.app @@ -1137,16 +1137,13 @@ .append( -v_byte +msg )%0A
acc6efa7c375a5f423dc6ba0a511f354b9474e1e
revert forcing for awdio
timeside/tools/waveform_batch_awdio.py
timeside/tools/waveform_batch_awdio.py
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (c) 2009-2010 Guillaume Pellerin <yomguy@parisson.com> # This file is part of TimeSide. # TimeSide is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # TimeSide is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with TimeSide. If not, see <http://www.gnu.org/licenses/>. # Author: Guillaume Pellerin <yomguy@parisson.com> version = '0.2' import os import sys import timeside from logger import Logger class GrapherScheme: def __init__(self): self.color = 255 self.color_scheme = { 'waveform': [ # Four (R,G,B) tuples for three main color channels for the spectral centroid method (self.color,self.color,self.color) # (0, 0, 0), (0, 0, 0), (0, 0, 0), (0,0,0) ], 'spectrogram': [ (0, 0, 0), (58/4,68/4,65/4), (80/2,100/2,153/2), (90,180,100), (224,224,44), (255,60,30), (255,255,255) ]} # Width of the image self.width = 572 # Height of the image self.height = 74 # Background color self.bg_color = None # Force computation. By default, the class doesn't overwrite existing image files. self.force = True class Media2Waveform: def __init__(self, media_dir, img_dir, log_file): self.root_dir = os.path.join(os.path.dirname(__file__), media_dir) self.img_dir = os.path.join(os.path.dirname(__file__), img_dir) self.scheme = GrapherScheme() self.width = self.scheme.width self.height = self.scheme.height self.bg_color = self.scheme.bg_color self.color_scheme = self.scheme.color_scheme self.force = self.scheme.force self.logger = Logger(log_file) self.media_list = self.get_media_list() if not os.path.exists(self.img_dir): os.makedirs(self.img_dir) self.path_dict = self.get_path_dict() def get_media_list(self): media_list = [] for root, dirs, files in os.walk(self.root_dir): if root: for file in files: ext = file.split('.')[-1] media_list.append(root+os.sep+file) return media_list def get_path_dict(self): path_dict = {} for media in self.media_list: filename = media.split(os.sep)[-1] name, ext = os.path.splitext(filename) path_dict[media] = self.img_dir + os.sep + name + '.png' return path_dict def process(self): for audio, image in self.path_dict.iteritems(): if not os.path.exists(image) or self.force: mess = 'Processing ' + audio self.logger.write_info(mess) pipe = PipeWaveform() waveform = pipe.process(audio, self.width, self.height, self.bg_color, self.color_scheme) if os.path.exists(image): os.remove(image) mess = 'Rendering ' + image self.logger.write_info(mess) waveform.render(output=image) mess = 'frames per pixel = ' + str(waveform.graph.samples_per_pixel) self.logger.write_info(mess) waveform.release() class PipeWaveform: def process(self, audio, width, height, bg_color, color_scheme): decoder = timeside.decoder.FileDecoder(audio) waveform = timeside.grapher.WaveformAwdio(width=width, height=height, bg_color=bg_color, color_scheme=color_scheme) (decoder | waveform).run() return waveform if __name__ == '__main__': if len(sys.argv) <= 2: print """ Usage : python waveform_batch /path/to/media_dir /path/to/img_dir Dependencies : timeside, python, python-numpy, python-gst0.10, gstreamer0.10-plugins-base See http://code.google.com/p/timeside/ for more information. """ else: media_dir = sys.argv[-3] img_dir = sys.argv[-2] log_file = sys.argv[-1] m = Media2Waveform(media_dir, img_dir, log_file) m.process()
Python
0
@@ -1726,21 +1726,14 @@ e = -True%0A +False%0A %0A%0Acl
dc63540318b1bfa055762b0c70990e4954fa9057
Make -t (--timestamp-results) a boolean flag
simulator.py
simulator.py
#!/usr/bin/env python2.7 # # Copyright 2011-2013 Colin Scott # Copyright 2011-2013 Andreas Wundsam # Copyright 2012-2013 Sam Whitlock # Copyright 2012-2012 Kyriakos Zarifis # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys if sys.version_info < (2, 7): raise RuntimeError('''Must use python 2.7 or greater. ''' '''See http://www.python.org/download/releases/2.7.4/''') from sts.util.procutils import kill_procs from sts.control_flow.fuzzer import Fuzzer from sts.simulation_state import SimulationConfig import sts.experiments.setup as experiment_setup import sts.experiments.lifecycle as exp_lifecycle import signal import argparse import logging import logging.config description = """ Run a simulation. Example usage: $ %s -c config.fat_tree """ % (sys.argv[0]) parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=description) parser.add_argument('-c', '--config', default='config.fuzz_pox_fattree', help='''experiment config module in the config/ ''' '''subdirectory, e.g. config.fuzz_pox_mesh''') parser.add_argument('-v', '--verbose', action="count", default=0, help='''increase verbosity''') parser.add_argument('-L', '--log-config', metavar="FILE", dest="log_config", help='''choose a python log configuration file''') parser.add_argument('-n', '--exp-name', dest="exp_name", default=None, help='''experiment name (determines result directory name)''') parser.add_argument('-t', '--timestamp-results', dest="timestamp_results", default=None, nargs=1, action="store", type=lambda s: s.lower() in ('y', 'yes', 'on', 't', 'true', '1', 'yeay', 'ja', 'jepp'), help='''whether to append a timestamp to the result directory name''') parser.add_argument('-p', '--publish', action="store_true", default=False, help='''publish experiment results to git''') args = parser.parse_args() # Allow configs to be specified as paths as well as module names if args.config.endswith('.py'): args.config = args.config[:-3].replace("/", ".") try: config = __import__(args.config, globals(), locals(), ["*"]) except ImportError as e: try: # module path might not have been specified. Try again with path prepended config = __import__("config.%s" % args.config, globals(), locals(), ["*"]) except ImportError: raise e # Set up the experiment results directories experiment_setup.setup_experiment(args, config) # Simulator controls the simulation if hasattr(config, 'control_flow'): simulator = config.control_flow else: # We default to a Fuzzer simulator = Fuzzer(SimulationConfig()) # Set an interrupt handler def handle_int(signal, frame): import os from sts.util.rpc_forker import LocalForker sys.stderr.write("Caught signal %d, stopping sdndebug (pid %d)\n" % (signal, os.getpid())) if (simulator.simulation_cfg.current_simulation is not None): simulator.simulation_cfg.current_simulation.clean_up() # kill fork()ed procs LocalForker.kill_all() sys.exit(13) signal.signal(signal.SIGINT, handle_int) signal.signal(signal.SIGTERM, handle_int) signal.signal(signal.SIGQUIT, handle_int) # Start the simulation try: # First tell simulator where to log simulator.init_results(config.results_dir) # Now start the simulation simulation = simulator.simulate() finally: if (simulator.simulation_cfg.current_simulation is not None): simulator.simulation_cfg.current_simulation.clean_up() if args.publish: exp_lifecycle.publish_results(config.exp_name, config.results_dir) sys.exit(simulation.exit_code)
Python
0.015864
@@ -2248,21 +2248,13 @@ ult= -None, nargs=1 +False , ac @@ -2268,117 +2268,14 @@ tore -%22,%0A type=lambda s: s.lower() in ('y', 'yes', 'on', 't', 'true', '1', 'yeay', 'ja', 'jepp') +_true%22 ,%0A
a21d2408dc9ed1696cea17e3cff3adc6dcdea815
Add ability to punish XP farm users.
joku/cogs/debug.py
joku/cogs/debug.py
""" Debug cog. """ import inspect import pprint import traceback import discord from discord.ext import commands from discord.ext.commands import Context from joku.bot import Jokusoramame from joku.checks import is_owner class Debug(object): def __init__(self, bot: Jokusoramame): self.bot = bot @commands.command(pass_context=True) @commands.check(is_owner) async def eval(self, ctx, *, cmd): try: d = eval(cmd) if inspect.isawaitable(d): d = await d except Exception: await ctx.bot.say(''.join(traceback.format_exc())) return await ctx.bot.say("`" + repr(d) + "`") @commands.group() @commands.check(is_owner) async def debug(self): """ Debug command to inspect the bot. Only usable by the owner. """ @debug.command(pass_context=True) async def reloadall(self, ctx): """ Reloads all modules. """ for extension in ctx.bot.extensions.copy(): ctx.bot.unload_extension(extension) ctx.bot.logger.info("Reloaded {}.".format(extension)) ctx.bot.load_extension(extension) await ctx.bot.say("Reloaded all.") @debug.command(pass_context=True) async def reload(self, ctx, module: str): try: ctx.bot.unload_extension(module) ctx.bot.load_extension(module) except Exception as e: await ctx.bot.say(e) else: await ctx.bot.say("Reloaded `{}`.".format(module)) @debug.group() async def rdb(self): """ Command group to inspect the RethinkDB status. """ @rdb.command(pass_context=True) async def inspect(self, ctx, *, user: discord.Member): obb = await ctx.bot.rethinkdb.create_or_get_user(user) p = pprint.pformat(obb) await ctx.bot.say("```json\n{}\n```".format(p)) @rdb.command(pass_context=True) async def info(self, ctx): """ Gets data about the RethinkDB cluster. """ data = await ctx.bot.rethinkdb.get_info() tmp = { "server": data["server_info"]["name"], "server_id": data["server_info"]["id"], "jobs": len(data["jobs"]), "clients": data["stats"]["query_engine"]["client_connections"] } await ctx.bot.say("""**RethinkDB stats**: **Connected to server** `{server}` (`{server_id}`). There are `{jobs}` job(s) across `{clients}` clients. """.format(**tmp)) def setup(bot): bot.add_cog(Debug(bot))
Python
0
@@ -1567,24 +1567,390 @@ t(module))%0A%0A + @debug.command(pass_context=True)%0A async def punish(self, ctx: Context, user: discord.User):%0A %22%22%22%0A Punishes a user.%0A%0A Sets their EXP to a very large negative number.%0A %22%22%22%0A await ctx.bot.rethinkdb.update_user_xp(user, xp=-3.4756738956329854e+307)%0A await ctx.bot.say(%22:skull: User %7B%7D has been punished.%22.format(user))%0A%0A @debug.g
0334ef016eae57934e3e1497f655d76aaa51c632
Remove now unnecessary apiUrl placeholder.
server/guiserver/apps.py
server/guiserver/apps.py
# This file is part of the Juju GUI, which lets users view and manage Juju # environments within a graphical interface (https://launchpad.net/juju-gui). # Copyright (C) 2013 Canonical Ltd. # # This program is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License version 3, as published by # the Free Software Foundation. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranties of MERCHANTABILITY, # SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """Juju GUI server applications.""" import time from pyramid.config import Configurator from tornado import web from tornado.options import options from tornado.wsgi import WSGIContainer from guiserver import ( auth, handlers, utils, ) from guiserver.bundles.base import Deployer from jujugui import make_application # Define the template to use for building the WebSocket URL. WEBSOCKET_URL_TEMPLATE = '/api/$server/$port/$uuid' def server(): """Return the main server application. The server app is responsible for serving the WebSocket connection, the Juju GUI static files and the main index file for dynamic URLs. """ # Set up the bundle deployer. deployer = Deployer(options.apiurl, options.apiversion, options.charmworldurl) # Set up handlers. server_handlers = [] if options.sandbox: # Sandbox mode. server_handlers.append( (r'^/ws(?:/.*)?$', handlers.SandboxHandler, {})) else: # Real environment. tokens = auth.AuthenticationTokenHandler() websocket_handler_options = { # The Juju API backend url. 'apiurl': options.apiurl, # The backend to use for user authentication. 'auth_backend': auth.get_backend(options.apiversion), # The Juju deployer to use for importing bundles. 'deployer': deployer, # The tokens collection for authentication token requests. 'tokens': tokens, # The WebSocket URL template. 'ws_url_template': WEBSOCKET_URL_TEMPLATE, } juju_proxy_handler_options = { 'target_url': utils.ws_to_http(options.apiurl), 'charmworld_url': options.charmworldurl, } server_handlers.extend([ # Handle WebSocket connections. (r'^/ws(?:/.*)?$', handlers.WebSocketHandler, websocket_handler_options), # Handle connections to the juju-core HTTPS server. # The juju-core HTTPS and WebSocket servers share the same URL. (r'^/juju-core/(.*)', handlers.JujuProxyHandler, juju_proxy_handler_options), ]) if options.testsroot: params = {'path': options.testsroot, 'default_filename': 'index.html'} server_handlers.append( # Serve the Juju GUI tests. (r'^/test/(.*)', web.StaticFileHandler, params), ) info_handler_options = { 'apiurl': options.apiurl, 'apiversion': options.apiversion, 'deployer': deployer, 'sandbox': options.sandbox, 'start_time': int(time.time()), } if options.sandbox: apiUrl = '' else: apiUrl = options.apiurl wsgi_settings = { 'jujugui.sandbox': options.sandbox, 'jujugui.raw': options.jujuguidebug, 'jujugui.combine': not options.jujuguidebug, 'jujugui.apiAddress': apiUrl, 'jujugui.socketTemplate': WEBSOCKET_URL_TEMPLATE, 'jujugui.jujuCoreVersion': options.jujuversion, 'jujugui.jem_url': options.jemlocation, 'jujugui.uuid': options.uuid, 'jujugui.interactive_login': options.interactivelogin, 'jujugui.gzip': options.gzip, } if options.password: wsgi_settings['jujugui.password'] = options.password config = Configurator(settings=wsgi_settings) wsgi_app = WSGIContainer(make_application(config)) server_handlers.extend([ # Handle GUI server info. (r'^/gui-server-info', handlers.InfoHandler, info_handler_options), (r".*", web.FallbackHandler, dict(fallback=wsgi_app)) ]) return web.Application(server_handlers, debug=options.debug) def redirector(): """Return the redirector application. The redirector app is responsible for redirecting HTTP traffic to HTTPS. """ return web.Application([ # Redirect all HTTP traffic to HTTPS. (r'.*', handlers.HttpsRedirectHandler), ], debug=options.debug)
Python
0
@@ -3487,94 +3487,8 @@ %7D%0A - if options.sandbox:%0A apiUrl = ''%0A else:%0A apiUrl = options.apiurl%0A @@ -3681,12 +3681,20 @@ s': +options. api -U +u rl,%0A
f996f81b09e328cfe3533ed92c1c3efca8c12a2c
Remove autocomplete endpoint since nominatim does not autocomplete
server/rest/twofishes.py
server/rest/twofishes.py
#!/usr/bin/env python # -*- coding: utf-8 -*- ############################################################################### # Copyright Kitware Inc. # # Licensed under the Apache License, Version 2.0 ( the "License" ); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################### import json import StringIO import geojson import requests from shapely.wkt import loads from girder.api import access from girder.api.describe import Description from girder.api.rest import Resource from girder.utility.model_importer import ModelImporter from girder.plugins.minerva.rest.geojson_dataset import GeojsonDataset from girder.plugins.minerva.utility.minerva_utility import findDatasetFolder class TwoFishes(Resource): """Resource that handles geocoding related operations""" def __init__(self): super(TwoFishes, self).__init__() self.resourceName = 'minerva_geocoder' self.route('GET', ('autocomplete',), self.autocomplete) self.route('GET', ('geojson',), self.getGeojson) self.route('POST', ('geojson',), self.postGeojson) @staticmethod def getWktFromTwoFishes(twofishes, location): """Gets wkt from twofishes for a given location""" r = requests.get(twofishes, params={'query': location, 'responseIncludes': 'WKT_GEOMETRY'}) wkt = r.json()['interpretations'][0]['feature']['geometry']['wktGeometry'] return wkt @staticmethod def createGeometryFromWkt(wkt): """Creates a shapely geometry from wkt""" return loads(wkt) @staticmethod def createGeojson(twofishes, locations): """Create geojson for given locations and twofishes url""" geoms = [] for i in locations: wkt = TwoFishes.getWktFromTwoFishes(twofishes, i) geom = TwoFishes.createGeometryFromWkt(wkt) for g in geom: geoms.append(geojson.Feature(geometry=g, properties={'location': i})) multiPoly = geojson.FeatureCollection(geoms) return multiPoly def createMinervaDataset(self, geojsonString, name): """Creates a dataset from a geojson string""" output = StringIO.StringIO(json.dumps(geojsonString)) outputSize = output.len user = self.getCurrentUser() datasetFolder = findDatasetFolder(user, user, create=True) itemModel = ModelImporter.model('item') uploadModel = ModelImporter.model('upload') item = itemModel.createItem(name, user, datasetFolder) geojsonFile = uploadModel.uploadFromFile(output, outputSize, name, 'item', item, user) GeojsonDataset().createGeojsonDataset(itemId=geojsonFile['itemId'], params={}) return geojsonFile @access.public def autocomplete(self, params): r = requests.get(params['twofishes'], params={'autocomplete': True, 'query': params['location'], 'maxInterpretations': 10, 'autocompleteBias': None}) return [i['feature']['matchedName'] for i in r.json()['interpretations']] autocomplete.description = ( Description('Autocomplete result for a given location name') .param('twofishes', 'Twofishes url') .param('location', 'Location name to autocomplete') ) @access.public def getGeojson(self, params): locations = json.loads(params['locations']) geojson = TwoFishes.createGeojson(params['twofishes'], locations) return geojson getGeojson.description = ( Description('Create a geojson string from multiple locations') .param('twofishes', 'Twofishes url') .param('locations', 'List of locations', dataType='list') ) @access.public def postGeojson(self, params): twofishes = params['twofishes'] try: locationInfo = json.loads(params['locations']) geojson = TwoFishes.createGeojson(twofishes, locationInfo) except ValueError: locationInfo = params['locations'] geojson = TwoFishes.createGeojson(twofishes, locationInfo) minervaDataset = self.createMinervaDataset(geojson, params['name']) return minervaDataset postGeojson.description = ( Description('Create a minerva dataset from the search result/results') .param('twofishes', 'Twofishes url') .param('locations', 'Location name or list of locations to get a geojson') .param('name', 'Name for the geojson dataset') )
Python
0.001396
@@ -1395,72 +1395,8 @@ er'%0A - self.route('GET', ('autocomplete',), self.autocomplete)%0A @@ -3345,643 +3345,8 @@ le%0A%0A - @access.public%0A def autocomplete(self, params):%0A r = requests.get(params%5B'twofishes'%5D,%0A params=%7B'autocomplete': True,%0A 'query': params%5B'location'%5D,%0A 'maxInterpretations': 10,%0A 'autocompleteBias': None%7D)%0A%0A return %5Bi%5B'feature'%5D%5B'matchedName'%5D for i in r.json()%5B'interpretations'%5D%5D%0A%0A autocomplete.description = (%0A Description('Autocomplete result for a given location name')%0A .param('twofishes', 'Twofishes url')%0A .param('location', 'Location name to autocomplete')%0A )%0A%0A
cf60bcf1a06998bc2980cf793a6b7ffe086b9369
Factor out test cases into common function
server/test-querylang.py
server/test-querylang.py
"""Test Python utilities related to query language processing.""" import itertools import json import unittest import querylang as ql data = [ {'age': 19}, {'age': 21}, {'age': 23} ] class TestQueryLanguage(unittest.TestCase): """Test suite.""" def test_operator_expressions(self): """Test operator expressions.""" asts = None with open('test/operator-ast-baselines.json') as f: asts = json.load(f) baseline = [ True, False, False, True, True, False, False, False, True, False, True, True, False, True, False, True, False, True ] funcs = map(ql.astToFunction, asts) for ((f, a), d), b in zip(itertools.product(zip(funcs, asts), data), baseline): self.assertEqual(f(d), b, '%s on %s should be %s' % (a, d, not b)) def test_conjunction_expressions(self): """Test conjunction expressions.""" asts = None with open('test/conjunction-ast-baselines.json') as f: asts = json.load(f) baseline = [ False, True, False ] funcs = map(ql.astToFunction, asts) for ((f, a), d), b in zip(itertools.product(zip(funcs, asts), data), baseline): self.assertEqual(f(d), b, '%s on %s should be %s' % (a, d, not b)) if __name__ == '__main__': unittest.main()
Python
0.999999
@@ -259,25 +259,24 @@ ite.%22%22%22%0A -%0A def test_ope @@ -271,21 +271,11 @@ def -test_operator +run _exp @@ -291,46 +291,113 @@ self -):%0A %22%22%22Test operator expression +, ast_file, data, baseline):%0A %22%22%22Test several ASTs on a sequence of data against a set of baseline s.%22%22 @@ -440,42 +440,16 @@ pen( -'test/operator-ast-baselines.json' +ast_file ) as @@ -489,228 +489,8 @@ f)%0A%0A - baseline = %5B%0A True, False, False,%0A True, True, False,%0A False, False, True,%0A False, True, True,%0A False, True, False,%0A True, False, True%0A %5D%0A%0A @@ -711,27 +711,24 @@ ef test_ -conjunction +operator _express @@ -755,27 +755,24 @@ %22%22%22Test -conjunction +operator express @@ -792,55 +792,43 @@ -asts = None%0A with open('test/conjunction +self.run_expressions('test/operator -ast @@ -843,23 +843,25 @@ es.json' -) as f: +, data, %5B %0A @@ -869,49 +869,90 @@ -asts = json.load(f)%0A%0A baseline = %5B +True, False, False,%0A True, True, False,%0A False, False, True, %0A @@ -973,21 +973,21 @@ , True, -False +True, %0A @@ -991,141 +991,115 @@ -%5D%0A%0A - funcs = map(ql.astToFunction, asts)%0A%0A for ((f, a), d), b in zip(itertools.product(zip(funcs, asts), data), baseline +False, True, False,%0A True, False, True%0A %5D)%0A%0A def test_conjunction_expressions(self ):%0A @@ -1107,79 +1107,140 @@ + - self.assertEqual(f(d), b, '%25s on %25s should be %25s' %25 (a, d, not b) +%22%22%22Test conjunction expressions.%22%22%22%0A self.run_expressions('test/conjunction-ast-baselines.json', data, %5BFalse, True, False%5D )%0A%0A%0A
eac867faba8d4653fa580ee0c2bd708ff83b13ee
Remove test workaround
server/tests/test_api.py
server/tests/test_api.py
import json from server.tests.base import BaseTestCase from server.models import db, Lecturer, Course, Lecture, Comment class GetCommentsApiTest(BaseTestCase): def setUp(self): super(GetCommentsApiTest, self).setUp() simon = Lecturer('Simon', 'McCallum') db.session.add(simon) imt3601 = Course('IMT3601 - Game Programming', simon) db.session.add(imt3601) imt3601_l1 = Lecture('Lecture 1', imt3601) db.session.add(imt3601_l1) imt3601_l1_c1 = Comment('This is boring', imt3601_l1) imt3601_l1_c2 = Comment('This is fun!', imt3601_l1) db.session.add(imt3601_l1_c1) db.session.add(imt3601_l1_c2) db.session.commit() def test_all(self): self._test_success() self._test_lecture_not_found() self._test_list() self._test_content() def _test_success(self): rv = self.app.get('/api/0/lectures/1/comments') assert rv.status_code == 200 def _test_lecture_not_found(self): rv = self.app.get('/api/0/lectures/2/comments') assert rv.status_code == 404 def _test_list(self): rv = self.app.get('/api/0/lectures/1/comments') assert rv.headers['Content-Type'] == 'application/json' response = json.loads(rv.data.decode('utf-8')) assert len(response['comments']) == 2 def _test_content(self): rv = self.app.get('/api/0/lectures/1/comments') assert rv.headers['Content-Type'] == 'application/json' response = json.loads(rv.data.decode('utf-8')) assert response['comments'][0]['content'] == 'This is boring'
Python
0
@@ -724,157 +724,8 @@ def -test_all(self):%0A self._test_success()%0A self._test_lecture_not_found()%0A self._test_list()%0A self._test_content()%0A%0A def _ test @@ -732,32 +732,32 @@ _success(self):%0A + rv = sel @@ -838,25 +838,24 @@ 00%0A%0A def -_ test_lecture @@ -970,25 +970,24 @@ 04%0A%0A def -_ test_list(se @@ -1214,24 +1214,24 @@ ) == 2%0A%0A + def _test_co @@ -1222,17 +1222,16 @@ def -_ test_con
4e1653e1d00c1f4001e37de3cc650dc11d4b6ca5
fix stack inspection in render_to_response to work on windows
lib/rapidsms/utils/render_to_response.py
lib/rapidsms/utils/render_to_response.py
#!/usr/bin/env python # vim: ai ts=4 sts=4 et sw=4 import os import sys import inspect def render_to_response(req, template_name, dictionary=None, **kwargs): """ Wrap django.shortcuts.render_to_response, to avoid having to include the same stuff in every view. TODO: moar. """ # delay imports until this function is called, so this module can be # imported before django is finished being configured import django.shortcuts import django.template # find the view which this function was called from view = _extract_callback() module = sys.modules[view.__module__] # if the module containing 'view' contains a __context__ function, # call it and add the output (a dict or None) to the view context if hasattr(module, "__context__"): app_dict = module.__context__(req, view) if app_dict is not None: # merge backwards, so the view can overwrite values from the # app without iterating and checking them individually tmp = dictionary or {} dictionary = app_dict dictionary.update(tmp) # unless a context instance was specified, default to RequestContext # to get all of the TEMPLATE_CONTEXT_PROCESSORS working. (this is a # really crappy part of django, which makes app reuse difficult.) if "context_instance" not in kwargs: kwargs["context_instance"] =\ django.template.RequestContext(req) # pass along the combined dicts to the original function return django.shortcuts.render_to_response( template_name, dictionary, **kwargs) def _extract_callback(): """ Extract and return the view function which is handling this request (by inspecting the stack!), or Raise Exception if called outside of a request. """ # find the filename of the django.core.handlers.base module, which # calls the view (in get_response:92 (of django 1.1)). split the # filename, since it's always the source (.py file) the stack, but # module (.pyc file) in sys.modules handler_module = sys.modules["django.core.handlers.base"] root, ext = os.path.splitext(handler_module.__file__) # iterate the stack, searching for django.core.handlers.base. when # we find it, extract the 'callback' variable, which contains the # original view function that this request was dispatched to for tpl in inspect.stack(0): try: frame, filename = tpl[0:2] if os.path.splitext(filename)[0] == root: return frame.f_locals['callback'] # release the frame object to avoid gc cycles, as advised by: # http://docs.python.org/library/inspect.html#the-interpreter-stack finally: del frame # if we haven't returned yet, we're probably being called from # somewhere other than a request. this makes no sense, so explode raise Exception("Couldn't find django.core.handlers.base in the stack.")
Python
0
@@ -2506,16 +2506,33 @@ if +os.path.normcase( os.path. @@ -2556,16 +2556,35 @@ )%5B0%5D +) == +os.path.normcase( root +) :%0A
ff1a07783b0d284871a1f6195579768aab1c806b
Handle multiple results with cached geolocation better
molly/geolocation/utils.py
molly/geolocation/utils.py
from functools import wraps from django.conf import settings from django.contrib.gis.geos import Point from molly.conf import get_app from models import Geocode __all__ = ['geocode', 'reverse_geocode'] def _cached(getargsfunc): def g(f): @wraps(f) def h(*args, **kwargs): args = getargsfunc(*args, **kwargs) app = get_app('molly.geolocation', args.pop('local_name', None)) try: return Geocode.recent.get(local_name=app.local_name, **args).results except Geocode.DoesNotExist: pass except Geocode.MultipleObjectsReturned: Geocode.recent.filter(local_name=app.local_name, **args).delete() results = f(providers=app.providers, **args) i = 0 while i < len(results): loc, name = Point(results[i]['location'], srid=4326).transform(settings.SRID, clone=True), results[i]['name'] if any((r['name'] == name and Point(r['location'], srid=4326).transform(settings.SRID, clone=True).distance(loc) < 100) for r in results[:i]): results[i:i+1] = [] else: i += 1 if hasattr(app, 'prefer_results_near'): point = Point(app.prefer_results_near[:2], srid=4326).transform(settings.SRID, clone=True) distance = app.prefer_results_near[2] filtered_results = [ result for result in results if Point(result['location'], srid=4326).transform(settings.SRID, clone=True).distance(point) <= distance] if filtered_results: results = filtered_results geocode, _ = Geocode.objects.get_or_create(local_name = app.local_name, **args) geocode.results = results geocode.save() return results return h return g @_cached(lambda query,local_name=None:{'query':query, 'local_name':local_name}) def geocode(query, providers): results = [] for provider in providers: results += provider.geocode(query) return results @_cached(lambda lon,lat,local_name=None:{'lon': lon, 'lat':lat, 'local_name':local_name}) def reverse_geocode(lon, lat, providers): results = [] for provider in providers: results += provider.reverse_geocode(lon, lat) return results
Python
0
@@ -1714,16 +1714,37 @@ esults%0A%0A + try:%0A @@ -1819,16 +1819,314 @@ l_name,%0A + **args)%0A except Geocode.MultipleObjectsReturned:%0A Geocode.objects.filter(local_name = app.local_name, **args).delete()%0A geocode, _ = Geocode.objects.get_or_create(local_name = app.local_name,%0A
c4f73ea6b3598f3b2e9d8326a6f5d69b73178aa2
Change default node log level to 'info'
ocradmin/lib/nodetree/node.py
ocradmin/lib/nodetree/node.py
""" Base class for OCR nodes. """ import logging FORMAT = '%(levelname)-5s %(module)s: %(message)s' logging.basicConfig(format=FORMAT) LOGGER = logging.getLogger("Node") LOGGER.setLevel(logging.DEBUG) import cache class NodeError(Exception): def __init__(self, node, msg): super(NodeError, self).__init__(msg) self.node = node self.msg = msg class UnsetParameterError(NodeError): pass class ValidationError(NodeError): pass class InvalidParameterError(NodeError): pass class InputOutOfRange(NodeError): pass class CircularDagError(NodeError): pass def noop_abort_func(*args): """ A function for nodes to call that signals that they should abort. By default it does nothing. """ return False def noop_progress_func(*args): """ A function for nodes to call that reports on their progress. By default it does nothing. """ pass class Node(object): """ Node object. Evaluates some input and return the output. """ name = "Base::None" description = "Base node" arity = 1 # number of inputs passthrough = 0 # input to pass through if node ignored stage = "general" _parameters = [ ] def __init__(self, label=None, abort_func=None, cacher=None, progress_func=None, logger=None, ignored=False): """ Initialise a node. """ self.abort_func = abort_func if abort_func is not None \ else noop_abort_func self.logger = logger if logger is not None \ else LOGGER self.progress_func = progress_func if progress_func is not None \ else noop_progress_func self._cacher = cacher if cacher is not None \ else cache.BasicCacher(logger=self.logger) self._params = {} self.label = label self._parents = [] self._inputs = [None for n in range(self.arity)] self._inputdata = [None for n in range(self.arity)] self.logger.debug("Initialised %s with cacher: %s" % (self.label, self._cacher)) self.ignored = ignored @classmethod def parameters(cls): return cls._parameters def set_param(self, param, name): """ Set a parameter. """ self._params[param] = name def _set_p(self, p, v): """ Set a parameter internally. """ pass def _eval(self): """ Perform actual processing. """ pass def add_parent(self, n): """ Add a parent node. """ if self == n: raise CircularDagError(self, "added as parent to self") if not n in self._parents: self._parents.append(n) def has_parents(self): """ Check if the node is a terminal node or if there's a tree further down. """ return bool(len(self._parents)) def set_input(self, num, n): """ Set an input. num: 0-based input number node: input node """ if num > len(self._inputs) - 1: raise InputOutOfRange(self, "Input '%d'" % num) if n is not None: n.add_parent(self) self._inputs[num] = n def mark_dirty(self): """ Tell the node it needs to reevaluate. """ self.logger.debug("%s marked dirty", self) for parent in self._parents: parent.mark_dirty() self._cacher.clear_cache() def set_cache(self, cache): """ Set the cache on a node, preventing it from eval'ing its inputs. """ self._cacher.set_cache(self, cache) def eval_input(self, num): """ Eval an input node. """ if self._inputs[num] is not None: return self._inputs[num].eval() def eval_inputs(self): """ Eval all inputs and store the data in self._inputdata. """ for i in range(len(self._inputs)): self._inputdata[i] = self.eval_input(i) def get_input_data(self, num): """ Fetch data for a given input, eval'ing it if necessary. """ if self._inputdata[num] is None: self._inputdata[num] = self.eval_input(num) return self._inputdata[num] return self._inputdata[num] def validate(self): """ Check params are present and correct. """ if self.arity > 0: for n in self._inputs: if n is not None: n.validate() self._validate() def _validate(self): pass def hash_value(self): """ Get a representation of this node's current state. This is a data structure the node type, it's parameters, and it's children's hash_values. """ # if ignore, return the hash of the # passthrough input if self.arity > 0 and self.ignored: return self._inputs[self.passthrough].hash_value() def makesafe(val): if isinstance(val, unicode): return val.encode() elif isinstance(val, float): return str(val) return val return dict( name=self.name.encode(), params=[[makesafe(v) for v in p] for p \ in self._params.iteritems()], children=[n.hash_value() for n in self._inputs \ if n is not None] ) def null_data(self): """ What we return when ignored. """ if self.arity > 0: return self.eval_input(self.passthrough) def first_active(self): """ Get the first node in the tree that is active. If not ignored this is 'self'. """ if self.arity > 0 and self.ignored: return self._inputs[self.passthrough].first_active() return self def eval(self): """ Eval the node. """ if self.ignored: self.logger.debug("Ignoring node: %s", self) return self.null_data() self.validate() for p, v in self._params.iteritems(): self.logger.debug("Set Param %s.%s -> %s", self, p, v) self._set_p(p, v) if self._cacher.has_cache(self): self.logger.debug("%s returning cached input", self) return self._cacher.get_cache(self) self.eval_inputs() self.logger.debug("Evaluating '%s' Node", self) data = self._eval() self._cacher.set_cache(self, data) return data def __repr__(self): return "<%s: %s: %s" % (self.__class__.__name__, self.name, self.label) def __str__(self): return "%s<%s>" % (self.label, self.name)
Python
0
@@ -191,13 +191,12 @@ ing. -DEBUG +INFO )%0A%0Ai
cf8cba155edb7f1d27fce7b20aaafce044415a22
Update 35b556aef8ef_add_news_flash_table.py
alembic/versions/35b556aef8ef_add_news_flash_table.py
alembic/versions/35b556aef8ef_add_news_flash_table.py
"""add new table Revision ID: 35b556aef8ef Revises: 3c8ad66233c0 Create Date: 2018-12-10 11:31:29.518909 """ # revision identifiers, used by Alembic. revision = '35b556aef8ef' down_revision = '423a7ea74c0a' branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.create_table('news_flash', sa.Column('id', sa.BigInteger(), nullable=False), sa.Column('accident', sa.Boolean(), nullable=True), sa.Column('author', sa.Text(), nullable=True), sa.Column('date', sa.TIMESTAMP(), nullable=True), sa.Column('description', sa.Text(), nullable=True), sa.Column('lat', sa.Float(), nullable=True), sa.Column('link', sa.Text(), nullable=True), sa.Column('lon', sa.Float(), nullable=True), sa.Column('title', sa.Text(), nullable=True), sa.Column('source', sa.Text(), nullable=True), sa.Column('location', sa.Text(), nullable=True), sa.PrimaryKeyConstraint('id') ) def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_table('news_flash') ### end Alembic commands ###
Python
0
@@ -50,20 +50,20 @@ es: -3c8ad66233 +423a7ea74 c0 +a %0ACre
5f12bac216f5380e55dc65ccfcd16e369c575dac
Use redis on localhost
app.py
app.py
from flask import Flask from redis import Redis import os app = Flask(__name__) redis = Redis(host= "redis", port=6379) @app.route('/') def hello(): redis.incr('hits') return 'This page has been seen {0} times' . format (redis.get( 'hits' )) if __name__ == "__main__": app.run(host= "0.0.0.0", debug=True)
Python
0.000001
@@ -95,21 +95,25 @@ (host= %22 -redis +localhost %22, port=
6bce13a7c0896c9f19b1a9187460467464d80a4c
enable cross domain origin
app.py
app.py
#!/usr/bin/env python import urllib import requests import json import os from datetime import date, timedelta from flask import Flask from flask import request, Response from flask import make_response from geopy.geocoders import Nominatim geolocator = Nominatim() # Flask app should start in global layout app = Flask(__name__) @app.route('/webhook', methods=['POST']) def webhook(): req = request.get_json(silent=True, force=True) res = processHumanAPIRequest(req) res = json.dumps(res, indent=4) r = make_response(res) r.headers['Content-Type'] = 'application/json' return r def processHumanAPIRequest(req): # username = '31c54b1e5bb56c1e8c4a49e22df3254d20eaed3f:' # password = None # clientID = "b7cfb75957a179cc624695699ce5a136a01a9196" # yesterday = date.today() - timedelta(1) # url = "https://api.humanapi.co/v1/apps/"+ clientID +"/users/activities/summaries?updated_since=" + yesterday.strftime("%Y%m%d%H%M%S%Z" )+"&source=moves&limit=1" # r = requests.get(url, auth=(username, password)) # print r.json() activityurl = "https://api.humanapi.co/v1/human/activities/summaries?access_token=" locationurl = "https://api.humanapi.co/v1/human/locations?access_token=" daskalov_access_token = os.environ['HUMANAPI_ACCESS_TOKEN_DASKALOV'] daskalov_activity_url = activityurl + daskalov_access_token + "&source=moves&limit=1" daskalov_location_url = locationurl + daskalov_access_token + "&source=moves&limit=1" daskalov_activity = urllib.urlopen(daskalov_activity_url).read() daskalov_location = urllib.urlopen(daskalov_location_url).read() daskalov_activity_data = json.loads(daskalov_activity) daskalov_location_data = json.loads(daskalov_location) daskalov_data = parseHumanData(daskalov_activity_data, daskalov_location_data) ari_access_token = os.environ['HUMANAPI_ACCESS_TOKEN_ARI'] ari_activity_url = activityurl + ari_access_token + "&source=moves&limit=1" ari_location_url = locationurl + ari_access_token + "&source=moves&limit=1" ari_activity = urllib.urlopen(ari_activity_url).read() ari_location = urllib.urlopen(ari_location_url).read() ari_activity_data = json.loads(ari_activity) ari_location_data = json.loads(ari_location) ari_data = parseHumanData(ari_activity_data, ari_location_data) res = makeWebhookResult(ari_data,daskalov_data) return res def parseHumanData(activity_data,location_data): steps = activity_data[0]["steps"] calories = activity_data[0]["calories"] lat = str(location_data[0]["location"]["lat"]) lon = str(location_data[0]["location"]["lon"]) coord = lat + "," + lon location = geolocator.reverse(coord, exactly_one=True) address = location.raw['address'] city = address.get('city', '') state = address.get('state', '') data = [steps, calories,city, state] return data def makeWebhookResult(ari_data, daskalov_data): return { "Ari": { "steps": ari_data[0], "calories": ari_data[1], "city": ari_data[2], "state": ari_data[3] }, "Alexandra": { "steps": ari_data[0], "calories": ari_data[1], "city": ari_data[2], "state": ari_data[3] }, "Daskalov": { "steps": daskalov_data[0], "calories": daskalov_data[1], "city": daskalov_data[2], "state": daskalov_data[3] }, "Nadim": { "steps": ari_data[0], "calories": ari_data[1], "city": ari_data[2], "state": ari_data[3] }, "Imran": { "steps": ari_data[0], "calories": ari_data[1], "city": ari_data[2], "state": ari_data[3] } } if __name__ == '__main__': port = int(os.getenv('PORT', 5000)) print "Starting app on port %d" % port app.run(debug=False, port=port, host='0.0.0.0')
Python
0
@@ -369,16 +369,41 @@ POST'%5D)%0A +@crossdomain(origin='*')%0A def webh @@ -627,16 +627,16 @@ turn r%0A%0A + def proc @@ -664,442 +664,8 @@ q):%0A -%0A # username = '31c54b1e5bb56c1e8c4a49e22df3254d20eaed3f:'%0A # password = None%0A # clientID = %22b7cfb75957a179cc624695699ce5a136a01a9196%22%0A # yesterday = date.today() - timedelta(1)%0A # url = %22https://api.humanapi.co/v1/apps/%22+ clientID +%22/users/activities/summaries?updated_since=%22 + yesterday.strftime(%22%25Y%25m%25d%25H%25M%25S%25Z%22 )+%22&source=moves&limit=1%22%0A # r = requests.get(url, auth=(username, password))%0A # print r.json()%0A%0A
a1c558027cd17eec69a2babb786e35b147ffae6b
add start.html
app.py
app.py
#!flask/bin/python from flask import request, render_template, Flask import os, sys, json import requests app = Flask(__name__) from random import randint from telesign.messaging import MessagingClient from telesign.voice import VoiceClient from flask import request @app.route('/') def index(): return render_template('start.html') @app.route('/profile') def index(): @app.route('/send') def send(): args = request.args customer_id = "2C1097F6-3917-4A53-9D38-C45A3C8ADD2B" api_key = "FTgHUVjcPWvgzCvtksi2v+tMLTAXbh5LLVEl1Wcl4NAtszxElZL4HS/ZwJqJufRkEmRpwUTwULxsZgL2c649vQ==" phone_number = "14084299128" message = args['msg'] message_type = "ARN" messaging = MessagingClient(customer_id, api_key) response2 = messaging.message(phone_number, message, message_type) voice = VoiceClient(customer_id, api_key) response1 = voice.call(phone_number, message, message_type) return "success" if __name__ == '__main__': app.secret_key = os.urandom(12) #app.run(debug=True)
Python
0.000002
@@ -338,48 +338,9 @@ l')%0A -%0A@app.route('/profile')%0Adef index():%0A%09%0A%0A +%09 %0A@ap
3b8c55fd13b314dd737b9d50ab0cce8a9d178bb9
save os_driver
occinet/api/openstack_driver.py
occinet/api/openstack_driver.py
# -*- coding: utf-8 -*- # Copyright 2015 Spanish National Research Council # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import copy import json import os import six.moves.urllib.parse as urlparse import webob.exc from occinet.drivers import request class OpenStackNet(request.BaseHelper): """Class to interact with the nova API.""" @staticmethod def tenant_from_req(req): try: return req.environ["keystone.token_auth"].user.project_id except AttributeError: return req.environ["keystone.token_info"]["token"]["project"]["id"] def _get_index_req(self, req): tenant_id = self.tenant_from_req(req) path = "/%s/servers" % tenant_id return self._get_req(req, path=path, method="GET")
Python
0.000003
@@ -832,11 +832,14 @@ he n -ova +eutron API @@ -888,24 +888,216 @@ m_req(req):%0A + %22%22%22Return the tenant id%0A This method retrieve a list of network to which the tenant has access.%0A :param req: the original request%0A :returns: tenant Id%0A %22%22%22%0A try: @@ -1310,24 +1310,242 @@ self, req):%0A + %22%22%22Return a new Request object to interact with OpenStack.%0A This method retrieve a request ready to list networks%0A :param req: the original request%0A%0A :returns: request modified%0A %22%22%22%0A tena @@ -1599,17 +1599,18 @@ = %22/ -%25s/server +networks?%25 s%22 %25 @@ -1679,8 +1679,368 @@ =%22GET%22)%0A +%0A def index(self, req):%0A %22%22%22Get a list of servers for a tenant.%0A This method retrieve a list of network to which the tenant has access.%0A :param req: the incoming request%0A %22%22%22%0A os_req = self._get_index_req(req)%0A response = os_req.get_response(self.app)%0A return self.get_from_response(response, %22networks%22, %5B%5D)
be3fc40d021ee9a43ca524bcafdcdad7896de9a5
Add remaining completed locales to prod settings.
mrburns/settings/server.py
mrburns/settings/server.py
import os import socket from django.utils.translation import ugettext_lazy as _ from .base import * # noqa SERVER_ENV = os.getenv('DJANGO_SERVER_ENV') SECRET_KEY = os.getenv('SECRET_KEY') DEBUG = TEMPLATE_DEBUG = False ALLOWED_HOSTS = [ # the server's IP (for monitors) socket.gethostbyname(socket.gethostname()), ] if SERVER_ENV == 'prod': ALLOWED_HOSTS.extend([ 'webwewant.mozilla.org', 'glow.cdn.mozilla.net', 'glow-origin.cdn.mozilla.net', ]) STATIC_URL = 'https://glow.cdn.mozilla.net/static/' LANGUAGES = ( ('de', _('German')), ('en', _('English')), ('es', _('Spanish')), ('fr', _('French')), ('he', _('Hebrew')), ('hu', _('Hungarian')), ('it', _('Italian')), ('ja', _('Japanese')), ('ko', _('Korean')), ('nl', _('Dutch')), ('pt-br', _('Brazilian Portuguese')), ('zh-cn', _('Simplified Chinese')), ('zh-tw', _('Traditional Chinese')), ) elif SERVER_ENV == 'dev': ALLOWED_HOSTS.append('webwewant.allizom.org') CACHES = { # DB 1 is for the site cache 'default': { 'BACKEND': 'redis_cache.cache.RedisCache', 'LOCATION': 'unix:/var/run/redis/redis.sock:1', 'OPTIONS': { 'PARSER_CLASS': 'redis.connection.HiredisParser', } }, # DB 0 is for the glow data 'smithers': { 'BACKEND': 'redis_cache.cache.RedisCache', 'LOCATION': 'unix:/var/run/redis/redis.sock:0', 'OPTIONS': { 'PARSER_CLASS': 'redis.connection.HiredisParser', } } } DJANGO_REDIS_IGNORE_EXCEPTIONS = False ENABLE_REDIS = True # Sentry INSTALLED_APPS += ('raven.contrib.django.raven_compat',) RAVEN_CONFIG = { 'dsn': os.getenv('SENTRY_DSN'), }
Python
0
@@ -561,16 +561,44 @@ GES = (%0A + ('cs', _('Czech')),%0A @@ -760,32 +760,65 @@ ('Hungarian')),%0A + ('id', _('Indonesian')),%0A ('it', _ @@ -905,67 +905,312 @@ (' -nl', _('Dutch')),%0A ('pt-br', _('Brazilian Portuguese +lt', _('Lithuanian')),%0A ('nl', _('Dutch')),%0A ('pl', _('Polish')),%0A ('pt-br', _('Brazilian Portuguese')),%0A ('ro', _('Romanian')),%0A ('ru', _('Russian')),%0A ('sk', _('Slovak')),%0A ('sl', _('Slovenian')),%0A ('sq', _('Albanian')),%0A ('sr', _('Serbian ')),
9320c7dfcc7d890f9a5ad805d9b4d38f6c0795e5
Add SPL filter for Floating IPs
src/nodeconductor_openstack/filters.py
src/nodeconductor_openstack/filters.py
import django_filters from nodeconductor.core import filters as core_filters from nodeconductor.structure import filters as structure_filters from . import models class OpenStackServiceProjectLinkFilter(structure_filters.BaseServiceProjectLinkFilter): service = core_filters.URLFilter(view_name='openstack-detail', name='service__uuid') class InstanceFilter(structure_filters.BaseResourceFilter): class Meta(structure_filters.BaseResourceFilter.Meta): model = models.Instance order_by = structure_filters.BaseResourceFilter.Meta.order_by + [ 'ram', '-ram', 'cores', '-cores', 'system_volume_size', '-system_volume_size', 'data_volume_size', '-data_volume_size', ] order_by_mapping = dict( # Backwards compatibility project__customer__name='service_project_link__project__customer__name', project__name='service_project_link__project__name', project__project_groups__name='service_project_link__project__project_groups__name', **structure_filters.BaseResourceFilter.Meta.order_by_mapping ) class SecurityGroupFilter(django_filters.FilterSet): name = django_filters.CharFilter( name='name', lookup_type='icontains', ) description = django_filters.CharFilter( name='description', lookup_type='icontains', ) service = django_filters.CharFilter( name='service_project_link__service__uuid', ) project = django_filters.CharFilter( name='service_project_link__project__uuid', ) settings_uuid = django_filters.CharFilter( name='service_project_link__service__settings__uuid' ) service_project_link = core_filters.URLFilter( view_name='openstack-spl-detail', name='service_project_link__pk', lookup_field='pk', ) state = core_filters.StateFilter() class Meta(object): model = models.SecurityGroup fields = [ 'name', 'description', 'service', 'project', 'service_project_link', 'state', ] class FloatingIPFilter(django_filters.FilterSet): project = django_filters.CharFilter( name='service_project_link__project__uuid', ) service = django_filters.CharFilter( name='service_project_link__service__uuid', ) class Meta(object): model = models.FloatingIP fields = [ 'project', 'service', 'status', ] class FlavorFilter(structure_filters.ServicePropertySettingsFilter): class Meta(structure_filters.ServicePropertySettingsFilter.Meta): model = models.Flavor fields = dict({ 'cores': ['exact', 'gte', 'lte'], 'ram': ['exact', 'gte', 'lte'], 'disk': ['exact', 'gte', 'lte'], }, **{field: ['exact'] for field in structure_filters.ServicePropertySettingsFilter.Meta.fields}) order_by = [ 'cores', '-cores', 'ram', '-ram', 'disk', '-disk', ] class BackupScheduleFilter(django_filters.FilterSet): description = django_filters.CharFilter( lookup_type='icontains', ) class Meta(object): model = models.BackupSchedule fields = ( 'description', ) class BackupFilter(django_filters.FilterSet): description = django_filters.CharFilter( lookup_type='icontains', ) instance = django_filters.CharFilter( name='instance__uuid', ) project = django_filters.CharFilter( name='instance__service_project_link__project__uuid', ) class Meta(object): model = models.Backup fields = ( 'description', 'instance', 'project', )
Python
0
@@ -2459,32 +2459,199 @@ ce__uuid',%0A ) +%0A service_project_link = core_filters.URLFilter(%0A view_name='openstack-spl-detail',%0A name='service_project_link__pk',%0A lookup_field='pk',%0A ) %0A%0A class Meta @@ -2777,24 +2777,60 @@ 'status',%0A + 'service_project_link',%0A %5D%0A%0A%0A
6dcc92797e8aa8661adfb67fc1f0aaa00de99709
fix cancellation context
drivers/azure_shell/driver.py
drivers/azure_shell/driver.py
import jsonpickle from cloudshell.cp.azure.azure_shell import AzureShell from cloudshell.shell.core.resource_driver_interface import ResourceDriverInterface class AzureShellDriver(ResourceDriverInterface): def __init__(self): """ ctor must be without arguments, it is created with reflection at run time """ self.deployments = dict() self.deployments['Azure VM From Marketplace'] = self.deploy_vm self.deployments['Azure VM From Custom Image'] = self.deploy_vm_from_custom_image self.azure_shell = AzureShell() def Deploy(self, context, request=None, cancelation_context=None): app_request = jsonpickle.decode(request) deployment_name = app_request['DeploymentServiceName'] if deployment_name in self.deployments.keys(): deploy_method = self.deployments[deployment_name] return deploy_method(context,request,cancelation_context) else: raise Exception('Could not find the deployment') def initialize(self, context): pass def cleanup(self): pass def deploy_vm(self, context, request, cancellation_context): return self.azure_shell.deploy_azure_vm(command_context=context, deployment_request=request, cancellation_context=cancellation_context) def deploy_vm_from_custom_image(self, context, request, cancellation_context): return self.azure_shell.deploy_vm_from_custom_image(command_context=context, deployment_request=request, cancellation_context=cancellation_context) def PowerOn(self, context, ports): return self.azure_shell.power_on_vm(context) def PowerOff(self, context, ports): return self.azure_shell.power_off_vm(context) def PowerCycle(self, context, ports, delay): pass def remote_refresh_ip(self, context, ports, cancellation_context): return self.azure_shell.refresh_ip(context) def destroy_vm_only(self, context, ports): self.azure_shell.delete_azure_vm(command_context=context) def PrepareConnectivity(self, context, request, cancellation_context): return self.azure_shell.prepare_connectivity(context, request, cancellation_context) def CleanupConnectivity(self, context, request): return self.azure_shell.cleanup_connectivity(command_context=context) def GetApplicationPorts(self, context, ports): return self.azure_shell.get_application_ports(command_context=context) def get_inventory(self, context): return self.azure_shell.get_inventory(command_context=context) def GetAccessKey(self, context, ports): return self.azure_shell.get_access_key(context)
Python
0.000001
@@ -612,24 +612,25 @@ None, cancel +l ation_contex @@ -922,16 +922,17 @@ t,cancel +l ation_co
74c15f510e30645212848da128b88e55c5c011c4
remove redundant duplicate tests from node_network_limited
test/functional/node_network_limited.py
test/functional/node_network_limited.py
#!/usr/bin/env python3 # Copyright (c) 2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.messages import CInv, msg_getdata from test_framework.mininode import NODE_BLOOM, NODE_NETWORK_LIMITED, NODE_BITCOIN_CASH, NetworkThread, P2PInterface from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal class P2PIgnoreInv(P2PInterface): def on_inv(self, message): # The node will send us invs for other blocks. Ignore them. pass class NodeNetworkLimitedTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 self.extra_args = [['-prune=550']] def get_signalled_service_flags(self): node = self.nodes[0].add_p2p_connection(P2PIgnoreInv()) NetworkThread().start() node.wait_for_verack() services = node.nServices self.nodes[0].disconnect_p2ps() node.wait_for_disconnect() return services def try_get_block_via_getdata(self, blockhash, must_disconnect): node = self.nodes[0].add_p2p_connection(P2PIgnoreInv()) NetworkThread().start() node.wait_for_verack() getdata_request = msg_getdata() getdata_request.inv.append(CInv(2, int(blockhash, 16))) node.send_message(getdata_request) if (must_disconnect): # Ensure we get disconnected node.wait_for_disconnect(5) else: # check if the peer sends us the requested block node.wait_for_block(int(blockhash, 16), 3) self.nodes[0].disconnect_p2ps() node.wait_for_disconnect() def run_test(self): # NODE_BLOOM & NODE_BITCOIN_CASH & NODE_NETWORK_LIMITED must now be signaled assert_equal(self.get_signalled_service_flags(), NODE_BLOOM | NODE_BITCOIN_CASH | NODE_NETWORK_LIMITED) # Now mine some blocks over the NODE_NETWORK_LIMITED + 2(racy buffer ext.) target firstblock = self.nodes[0].generate(1)[0] blocks = self.nodes[0].generate(292) block_within_limited_range = blocks[-1] # Make sure we can max retrive block at tip-288 # requesting block at height 2 (tip-289) must fail (ignored) # first block must lead to disconnect self.try_get_block_via_getdata(firstblock, True) # last block in valid range self.try_get_block_via_getdata(blocks[1], False) # first block outside of the 288+2 limit self.try_get_block_via_getdata(blocks[0], True) # NODE_NETWORK_LIMITED must still be signaled after restart self.restart_node(0) assert_equal(self.get_signalled_service_flags(), NODE_BLOOM | NODE_BITCOIN_CASH | NODE_NETWORK_LIMITED) # Test the RPC service flags assert_equal(int(self.nodes[0].getnetworkinfo()[ 'localservices'], 16), NODE_BLOOM | NODE_BITCOIN_CASH | NODE_NETWORK_LIMITED) # getdata a block above the NODE_NETWORK_LIMITED threshold must be possible self.try_get_block_via_getdata(block_within_limited_range, False) # getdata a block below the NODE_NETWORK_LIMITED threshold must be ignored self.try_get_block_via_getdata(firstblock, True) if __name__ == '__main__': NodeNetworkLimitedTest().main()
Python
0.000014
@@ -2053,230 +2053,326 @@ # -Now mine some blocks over the NODE_NETWORK_LIMITED + 2(racy buffer ext.) target%0A firstblock = self.nodes%5B0%5D.generate(1)%5B0%5D%0A blocks = self.nodes%5B0%5D.generate(292)%0A block_within_limited_range = blocks%5B-1%5D +Test the RPC service flags%0A assert_equal(int(self.nodes%5B0%5D.getnetworkinfo()%5B%0A 'localservices'%5D, 16), NODE_BLOOM %7C NODE_BITCOIN_CASH %7C NODE_NETWORK_LIMITED)%0A%0A # Now mine some blocks over the NODE_NETWORK_LIMITED + 2(racy buffer ext.) target%0A blocks = self.nodes%5B0%5D.generate(292) %0A%0A @@ -2498,111 +2498,8 @@ ed)%0A - # first block must lead to disconnect%0A self.try_get_block_via_getdata(firstblock, True)%0A @@ -2697,733 +2697,8 @@ e)%0A%0A - # NODE_NETWORK_LIMITED must still be signaled after restart%0A self.restart_node(0)%0A assert_equal(self.get_signalled_service_flags(),%0A NODE_BLOOM %7C NODE_BITCOIN_CASH %7C NODE_NETWORK_LIMITED)%0A%0A # Test the RPC service flags%0A assert_equal(int(self.nodes%5B0%5D.getnetworkinfo()%5B%0A 'localservices'%5D, 16), NODE_BLOOM %7C NODE_BITCOIN_CASH %7C NODE_NETWORK_LIMITED)%0A%0A # getdata a block above the NODE_NETWORK_LIMITED threshold must be possible%0A self.try_get_block_via_getdata(block_within_limited_range, False)%0A%0A # getdata a block below the NODE_NETWORK_LIMITED threshold must be ignored%0A self.try_get_block_via_getdata(firstblock, True)%0A%0A %0Aif
abdb2743ed214ea3b6a72b4163be2d09a8354bd0
Remove aliases of rpc backend for messaging
smaug/rpc.py
smaug/rpc.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'init', 'cleanup', 'set_defaults', 'add_extra_exmods', 'clear_extra_exmods', 'get_allowed_exmods', 'RequestContextSerializer', 'get_client', 'get_server', 'get_notifier', 'TRANSPORT_ALIASES', ] from oslo_config import cfg import oslo_messaging as messaging from oslo_serialization import jsonutils import smaug.context import smaug.exception CONF = cfg.CONF TRANSPORT = None NOTIFIER = None ALLOWED_EXMODS = [ smaug.exception.__name__, ] EXTRA_EXMODS = [] TRANSPORT_ALIASES = {} def init(conf): if initialized(): return global TRANSPORT, NOTIFIER exmods = get_allowed_exmods() TRANSPORT = messaging.get_transport(conf, allowed_remote_exmods=exmods, aliases=TRANSPORT_ALIASES) serializer = RequestContextSerializer(JsonPayloadSerializer()) NOTIFIER = messaging.Notifier(TRANSPORT, serializer=serializer) def initialized(): return None not in [TRANSPORT, NOTIFIER] def cleanup(): global TRANSPORT, NOTIFIER assert TRANSPORT is not None assert NOTIFIER is not None TRANSPORT.cleanup() TRANSPORT = NOTIFIER = None def set_defaults(control_exchange): messaging.set_transport_defaults(control_exchange) def add_extra_exmods(*args): EXTRA_EXMODS.extend(args) def clear_extra_exmods(): del EXTRA_EXMODS[:] def get_allowed_exmods(): return ALLOWED_EXMODS + EXTRA_EXMODS class JsonPayloadSerializer(messaging.NoOpSerializer): @staticmethod def serialize_entity(context, entity): return jsonutils.to_primitive(entity, convert_instances=True) class RequestContextSerializer(messaging.Serializer): def __init__(self, base): self._base = base def serialize_entity(self, context, entity): if not self._base: return entity return self._base.serialize_entity(context, entity) def deserialize_entity(self, context, entity): if not self._base: return entity return self._base.deserialize_entity(context, entity) def serialize_context(self, context): _context = context.to_dict() return _context def deserialize_context(self, context): return smaug.context.RequestContext.from_dict(context) def get_client(target, version_cap=None, serializer=None): assert TRANSPORT is not None serializer = RequestContextSerializer(serializer) return messaging.RPCClient(TRANSPORT, target, version_cap=version_cap, serializer=serializer) def get_server(target, endpoints, serializer=None): assert TRANSPORT is not None serializer = RequestContextSerializer(serializer) return messaging.get_rpc_server(TRANSPORT, target, endpoints, executor='eventlet', serializer=serializer) def get_notifier(service=None, host=None, publisher_id=None): assert NOTIFIER is not None if not publisher_id: publisher_id = "%s.%s" % (service, host or CONF.host) return NOTIFIER.prepare(publisher_id=publisher_id)
Python
0.000001
@@ -793,33 +793,8 @@ r',%0A - 'TRANSPORT_ALIASES',%0A %5D%0A%0Af @@ -1066,32 +1066,8 @@ %5B%5D%0A%0A -TRANSPORT_ALIASES = %7B%7D%0A%0A %0Adef @@ -1228,16 +1228,16 @@ t(conf,%0A + @@ -1300,75 +1300,8 @@ mods -,%0A aliases=TRANSPORT_ALIASES )%0A%0A
782c5a1b747e77f0c751a690af01694ea553c272
Reorder imports
nasman/snapshots/models.py
nasman/snapshots/models.py
from datetime import datetime from pathlib import Path from django.db import models from django.utils.timezone import get_default_timezone_name from djorm_pgfulltext.fields import VectorField from djorm_pgfulltext.models import SearchManager import magic import pytz from sitetree.models import TreeItemBase, TreeBase from fontawesome.fields import IconField class PathField(models.TextField): description = 'A path on a filesystem.' def to_python(self, value): if value is None: return value return Path(value) def from_db_value(self, value, expression, connection, context): if value is None: return value return Path(value) class File(models.Model): """ Model representing a file/directory/etc on the filesystem """ full_path = PathField('full path') dirname = models.TextField( 'dirname', db_index=True ) name = models.TextField('name', db_index=True) snapshot_name = models.TextField('snapshot', db_index=True) directory = models.BooleanField(default=False) mime_type = models.ForeignKey( 'IconMapping', verbose_name='mime-type', on_delete=models.SET_NULL, null=True ) magic = models.CharField('magic', blank=True, max_length=255) modified = models.DateTimeField('modified') size = models.IntegerField('size', blank=True, null=True) search_index = VectorField() objects = SearchManager( fields=('name', 'dirname', 'snapshot_name', 'magic'), config='pg_catalog.english', search_field='search_index', auto_update_search_field=True ) class Meta: app_label = 'snapshots' def clean_fields(self, exclude=None): path_field = self._meta.get_field('full_path') self.full_path = path_field.clean(self.full_path, self) self.dirname = str(self.full_path.parent) self.name = str(self.full_path.name) self.directory = self.full_path.is_dir() try: mime_type = magic.from_file( str(self.full_path), mime=True).decode('utf-8') self.magic = magic.from_file( str(self.full_path)).decode('utf-8') except magic.MagicException: icon = None self.magic = '' else: icon, _ = IconMapping.objects.get_or_create( mime_type=mime_type ) self.mime_type = icon mtime = datetime.fromtimestamp(self.full_path.lstat().st_mtime) mtime = pytz.timezone(get_default_timezone_name()).localize(mtime) self.modified = mtime self.size = self.full_path.lstat().st_size super(File, self).clean_fields(exclude) def __unicode__(self): return self.full_path @property def extension(self): """ The file extension of this file :rtype: str """ return self.full_path.suffix class IconMapping(models.Model): """ Model to manage icon mapping to mimetypes """ ICON_CHOICES = ( ('fa-file-o', 'default'), ('fa-file-archive-o', 'archive'), ('fa-file-audio-o', 'audio'), ('fa-file-code-o', 'code'), ('fa-file-excel-o', 'excel'), ('fa-file-image-o', 'image'), ('fa-file-pdf-o', 'pdf'), ('fa-file-powerpoint-o', 'powerpoint'), ('fa-file-text-o', 'text'), ('fa-file-video-o', 'video'), ('fa-file-word-o', 'word'), ('fa-folder-open-o', 'directory') ) icon = models.CharField( 'icon', max_length=25, choices=ICON_CHOICES, default='fa-file-o' ) mime_type = models.CharField( 'mime-type', max_length=255, primary_key=True, db_index=True ) class Meta: app_label = 'snapshots' def save(self, **kwargs): icon_mapping = {x[1]: x[0] for x in self.ICON_CHOICES} if self.icon == 'fa-file-o': major = self.mime_type.split('/')[0] if major in icon_mapping: self.icon = icon_mapping[major] super(IconMapping, self).save(**kwargs) def __str__(self): return self.mime_type class NasmanTree(TreeBase): pass class NasmanTreeItem(TreeItemBase): icon = IconField( 'icon', )
Python
0
@@ -236,16 +236,57 @@ Manager%0A +from fontawesome.fields import IconField%0A import m @@ -358,49 +358,8 @@ se%0A%0A -from fontawesome.fields import IconField%0A %0A%0Acl
6e3395f78a67a58a75d9e95923e5442ee8193d52
Change path to production
newsman/config/settings.py
newsman/config/settings.py
#!/usr/bin/python # -*- coding: utf-8 -*- """ config.py contains most CONSTANTS in the project """ # @author chengdujin # @contact chengdujin@gmail.com # @created Jan 17, 2013 import sys reload(sys) sys.setdefaultencoding('UTF-8') # SERVICES import logging # mongodb client from pymongo.connection import Connection from pymongo.database import Database from pymongo.collection import Collection from pymongo.errors import CollectionInvalid con = Connection('127.0.0.1:27017') db = Database(con, 'news') # redis rclient import redis from redis import ConnectionError rclient = redis.StrictRedis(host='10.240.35.40', port=6379) #rclient = redis.StrictRedis(host='127.0.0.1') # htmlparser to do unescaping from HTMLParser import HTMLParser hparser = HTMLParser() # CONSTANTS PUBLIC = 'http://mobile-global.baidu.com/news/%s' # hk01-hao123-mob01/mob02 #PUBLIC = 'http://180.76.2.34/%s' # hk01-hao123-mob00 #PUBLIC = 'http://54.251.107.116/%s' # AWS singapore #PUBLIC = 'http://54.232.81.44/%s' # AWS sao paolo #PUBLIC = 'http://54.248.227.71/%s' # AWS tokyo #LOCAL = '/home/work/%s' # official server prefix #LOCAL = '/home/ubuntu/%s' # AWS server prefix LOCAL = '/home/jinyuan/Downloads/%s' # local server prefix # code base folder for updating CODE_BASE = LOCAL % 'newsman' # logging settings LOG_FORMAT = "%(levelname)-8s %(asctime)-25s %(lineno)-3d:%(filename)-16s %(message)s" # critical, error, warning, info, debug, notset logging.basicConfig(format=LOG_FORMAT) logger = logging.getLogger('news-logger') logger.setLevel(logging.WARNING) # paths for generating transcoded files, mp3 and images TRANSCODED_LOCAL_DIR = LOCAL % 'STATIC/news/ts/' TRANSCODED_PUBLIC_DIR = PUBLIC % 'ts/' IMAGES_LOCAL_DIR = LOCAL % 'STATIC/news/img/' IMAGES_PUBLIC_DIR = PUBLIC % 'img/' MEDIA_LOCAL_DIR = LOCAL % 'STATIC/news/mid/' MEDIA_PUBLIC_DIR = PUBLIC % 'mid/' # path for generating temporary files (used in mp3 download) MEDIA_TEMP_LOCAL_DIR = LOCAL % 'STATIC/news/tmp/' # templates for new page NEWS_TEMPLATE_1 = LOCAL % 'STATIC/news/templates/news1.html' NEWS_TEMPLATE_2 = LOCAL % 'STATIC/news/templates/news2.html' NEWS_TEMPLATE_3 = LOCAL % 'STATIC/news/templates/news3.html' NEWS_TEMPLATE_ARABIC = LOCAL % 'STATIC/news/templates/index_arabic.html' # uck transcoding web service url UCK_TRANSCODING = 'http://gate.baidu.com/tc?m=8&from=bdpc_browser&src=' UCK_TRANSCODING_NEW = 'http://m.baidu.com/openapp?/webapp?debug=1&from=bd_international&onlyspdebug=1&structpage&siteType=7&nextpage=1&siteappid=1071361&src=' LOGO_PUBLIC_PREFIX = 'http://mobile-global.baidu.com/logos/' # meta info for a new page TRANSCODED_ENCODING = '<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>\n' # words on 'opening origial page' button TRANSCODING_BTN_EN = 'Original page' TRANSCODING_BTN_PT = 'Página original' TRANSCODING_BTN_JA = '元のページ' TRANSCODING_BTN_IN = 'Laman Asli' TRANSCODING_BTN_TH = 'หน้าเดิม' TRANSCODING_BTN_AR = 'ﺎﻠﻤﺻﺩﺭ' TRANSCODING_BTN_ZH = '查看原始网页' # hot news title HOTNEWS_TITLE_EN = 'Hot News' HOTNEWS_TITLE_PT = 'Notícias Quentes' HOTNEWS_TITLE_JA = '人気ニュース' HOTNEWS_TITLE_IN = 'Berita Terbaru' HOTNEWS_TITLE_TH = 'ข่าวฮิต' HOTNEWS_TITLE_AR = 'أخبار عاجلة' HOTNEWS_TITLE_ZH = '头条' # expirations DATABASE_REMOVAL_DAYS = 365 FEED_UPDATE_DAYS = 2 MEMORY_EXPIRATION_DAYS = 20 # database names for feeds FEED_REGISTRAR = 'feeds' # settings used in summarizing PARAGRAPH_CRITERIA = 40 SUMMARY_LENGTH_LIMIT = 500 # request connection timeouts UCK_TIMEOUT = 40 # 14 seconds timeout GOOGLE_TTS_TIMEOUT = 120 # 2 minutes timeout # supported languages LANGUAGES = ['en', 'th', 'in', 'ja', 'pt', 'ar', 'zh'] # supported countries, in code COUNTRIES = ['AU', 'BR', 'CA', 'CN', 'EG', 'FR', 'GB', 'HK', 'ID', 'IN', 'JP', 'KR', 'TH', 'TR', 'TW', 'US', 'VN'] # sizes for generating images MIN_IMAGE_SIZE = 150, 150 THUMBNAIL_STYLE = 1.4 THUMBNAIL_LANDSCAPE_SIZE_HIGH = 600, 226 THUMBNAIL_LANDSCAPE_SIZE_NORMAL = 450, 169 THUMBNAIL_LANDSCAPE_SIZE_LOW = 230, 85 THUMBNAIL_PORTRAIT_SIZE_HIGH = 310, 400 THUMBNAIL_PORTRAIT_SIZE_NORMAL = 175, 210 THUMBNAIL_PORTRAIT_SIZE_LOW = 90, 110 CATEGORY_IMAGE_SIZE = 300, 200 HOT_IMAGE_SIZE = 600, 226
Python
0
@@ -1124,17 +1124,16 @@ S tokyo%0A -# LOCAL = @@ -1272,16 +1272,17 @@ prefix%0A +# LOCAL =
01dcc7c3a973e5c363b4789bda86f871d5b10b59
Add opaque context argument to merge() to pass to merge callbacks
edgedb/lang/common/algos/topological.py
edgedb/lang/common/algos/topological.py
## # Copyright (c) 2008-2010 Sprymix Inc. # All rights reserved. # # See LICENSE for details. ## from collections import defaultdict, OrderedDict from semantix.utils.datastructures import OrderedSet class UnresolvedReferenceError(Exception): pass class CycleError(Exception): pass def sort(graph, return_record=False, root_only=False): adj = defaultdict(OrderedSet) radj = defaultdict(OrderedSet) for item_name, item in graph.items(): if "merge" in item: for merge in item["merge"]: if merge in graph: adj[item_name].add(merge) radj[merge].add(item_name) else: raise UnresolvedReferenceError("reference to an undefined item %s in %s" \ % (merge, item_name)) if "deps" in item: for dep in item["deps"]: if dep in graph: adj[item_name].add(dep) radj[dep].add(item_name) else: raise UnresolvedReferenceError("reference to an undefined item %s in %s" \ % (dep, item_name)) visiting = set() visited = set() sorted = [] def visit(item): if item in visiting: raise CycleError("detected cycle on vertex {!r}".format(item)) if item not in visited: visiting.add(item) for n in adj[item]: visit(n) sorted.append(item) visiting.remove(item) visited.add(item) if root_only: items = set(graph) - set(radj) else: items = graph for item in items: visit(item) if return_record: return ((item, graph[item]) for item in sorted) else: return (graph[item]["item"] for item in sorted) def normalize(graph, merger): merged = OrderedDict() for name, item in sort(graph, return_record=True): merge = item.get("merge") if merge: for m in merge: merger(item["item"], merged[m]) merged.setdefault(name, item["item"]) return merged.values()
Python
0
@@ -1927,16 +1927,30 @@ , merger +, context=None ):%0A m @@ -1970,16 +1970,88 @@ edDict() +%0A merger_kwargs = %7B'context': context%7D if context is not None else %7B%7D %0A%0A fo @@ -2225,16 +2225,33 @@ erged%5Bm%5D +, **merger_kwargs )%0A%0A
aa097dcacb33fc77a5c9471b65b0f914f0484276
Fix lambda definition so it works in all versions of Python
lingvo/tasks/lm/tools/download_lm1b.py
lingvo/tasks/lm/tools/download_lm1b.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Downloads and processes lm1b dataset (http://www.statmt.org/lm-benchmark).""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import glob import os import tensorflow as tf tf.flags.DEFINE_string("outdir", "/tmp/lm1b", "The output directory.") tf.flags.DEFINE_integer( "count_cutoff", 3, "Ignore tokens that appear fewer than " "this amount of times when creating the vocab file.") FLAGS = tf.flags.FLAGS def main(_): basename = "1-billion-word-language-modeling-benchmark-r13output" fname = basename + ".tar.gz" url = "http://www.statmt.org/lm-benchmark/" + fname sha256hash = "01ba60381110baf7f189dfd2b8374de371e8c9a340835793f190bdae9e90a34e" tf.keras.utils.get_file( fname, url, file_hash=sha256hash, cache_subdir=FLAGS.outdir, extract=True) tf.logging.info("Generating vocab file. This may take a few minutes.") vocab = {} for fname in glob.glob( os.path.join(FLAGS.outdir, basename, "training-monolingual.tokenized.shuffled", "news.en*")): with open(fname) as f: for line in f: for w in line.split(): vocab[w] = vocab.get(w, 0) + 1 with open(os.path.join(FLAGS.outdir, basename, "vocab.txt"), "w") as f: f.write("<epsilon>\t0\n<S>\t1\n</S>\t2\n<UNK>\t3\n") id = 4 for k, v in sorted(vocab.items(), key=lambda (k, v): (-v, k)): if v < FLAGS.count_cutoff: break f.write("%s\t%d\n" % (k, id)) id += 1 if __name__ == "__main__": tf.app.run(main)
Python
0.999985
@@ -2069,14 +2069,12 @@ bda -( k, v -) : (-
736d25a3363dd3abcd06df10f97ebee5f49026fc
make bulk deletion work
localtv/subsite/admin/sources_views.py
localtv/subsite/admin/sources_views.py
import re from django.contrib.auth.models import User from django.core.paginator import Paginator, InvalidPage from django.db.models import Q from django.http import Http404, HttpResponseRedirect from django.shortcuts import render_to_response, get_object_or_404 from django.template.context import RequestContext from localtv.decorators import get_sitelocation, require_site_admin from localtv import models from localtv.util import sort_header from localtv.subsite.admin import forms VIDEO_SERVICE_TITLES = ( re.compile(r'Uploads by (.+)'), re.compile(r"Vimeo / (.+)'s uploaded videos") ) ## ------------------- ## Source administration ## ------------------- class MockQueryset(object): def __init__(self, objects): self.objects = objects self.ordered = True def _clone(self): return self def __len__(self): return len(self.objects) def __iter__(self): return iter(self.objects) def __getitem__(self, k): return self.objects[k] @require_site_admin @get_sitelocation def manage_sources(request, sitelocation=None): sort = request.GET.get('sort', 'name__lower') headers = [ sort_header('name__lower', 'Source', sort), {'label': 'Categories'}, {'label': 'User Attribution'}, sort_header('type', 'Type', sort), sort_header('auto_approve', 'Auto Approve', sort) ] feeds = models.Feed.objects.filter( site=sitelocation.site, status=models.FEED_STATUS_ACTIVE).extra(select={ 'name__lower': 'LOWER(name)'}).order_by('name__lower') searches = models.SavedSearch.objects.filter( site=sitelocation.site).extra(select={ 'name__lower': 'LOWER(query_string)'}).order_by( 'name__lower') search_string = request.GET.get('q', '') if search_string: feeds = feeds.filter(Q(feed_url__icontains=search_string) | Q(name__icontains=search_string) | Q(webpage__icontains=search_string) | Q(description__icontains=search_string)) searches = searches.filter(query_string__icontains=search_string) category = request.GET.get('category') if category: category = get_object_or_404(models.Category, pk=category) feeds = feeds.filter(auto_categories=category) searches = searches.filter(auto_categories=category) author = request.GET.get('author') if author: author = get_object_or_404(User, pk=author) feeds = feeds.filter(auto_authors=author) searches = searches.filter(auto_authors=author) source_filter = request.GET.get('filter') if source_filter == 'search': queryset = searches elif source_filter in ('feed', 'user'): q = Q(feed_url__iregex=models.VIDEO_SERVICE_REGEXES[0][1]) for service, regexp in models.VIDEO_SERVICE_REGEXES[1:]: q = q | Q(feed_url__iregex=regexp) if source_filter == 'user': queryset = feeds.filter(q) else: queryset = feeds.exclude(q) else: feeds_list = [(feed.name.lower(), feed) for feed in feeds] searches_list = [(search.query_string.lower(), search) for search in searches] queryset = [l[1] for l in sorted(feeds_list + searches_list)] paginator = Paginator(queryset, 15) try: page = paginator.page(int(request.GET.get('page', 1))) except InvalidPage: raise Http404 if request.method == 'POST': formset = forms.SourceFormset(request.POST, queryset=MockQueryset(page.object_list)) if formset.is_valid(): bulk_edits = formset.extra_forms[0].cleaned_data for key in list(bulk_edits.keys()): # get the list because we'll be # changing the dictionary if bulk_edits[key] in ['', None]: del bulk_edits[key] bulk_action = request.POST.get('bulk_action', '') if bulk_action: bulk_edits['action'] = bulk_action if bulk_edits: for form in formset.initial_forms: if not form.cleaned_data['bulk']: continue for key, value in bulk_edits.items(): if key == 'action': # do something to the video if value == 'remove': form.fields['DELETE'] = True else: form.cleaned_data[key] = value formset.forms = formset.initial_forms # get rid of the extra bulk # edit form formset.save() return HttpResponseRedirect(request.get_full_path()) else: formset = forms.SourceFormset(queryset=MockQueryset(page.object_list)) return render_to_response('localtv/subsite/admin/manage_sources.html', { 'page_obj': page, 'paginator': paginator, 'headers': headers, 'search_string': search_string, 'source_filter': source_filter, 'categories': models.Category.objects.filter( site=sitelocation.site), 'users': User.objects.all(), 'formset': formset}, context_instance=RequestContext(request))
Python
0.000001
@@ -4057,16 +4057,46 @@ ts%5Bkey%5D%0A + to_delete = set()%0A @@ -4609,36 +4609,36 @@ -form.fields%5B'DELETE'%5D = True +to_delete.add(form.instance) %0A @@ -4891,16 +4891,89 @@ .save()%0A + for instance in to_delete:%0A instance.delete()%0A
0045135f8de8994ae260922dd5c02164f431f382
Add possibility to remove input
atlas/prodjob/views.py
atlas/prodjob/views.py
import json import requests import logging # import os import re from django.http import HttpResponse from django.shortcuts import render from django.conf import settings import atlas.deftcore.api.client as deft _logger = logging.getLogger('prodtaskwebui') _deft_client = deft.Client(settings.DEFT_AUTH_USER, settings.DEFT_AUTH_KEY) _deft_job_actions = { 'kill_jobs': 'kill_job', 'set_debug_jobs': 'set_job_debug_mode', 'reassign_jobs': 'reassign_jobs', } def request_jobs(request): params_for_bigpanda = '' request_path = request.META['QUERY_STRING'] if request_path: params_for_bigpanda = "http://bigpanda.cern.ch/jobs/?" + request_path return render(request, 'prodjob/_job_table.html',{ 'params_for_bigpanda': params_for_bigpanda }) def jobs_action(request,action): """ :type request: object """ user = request.user.username is_superuser = request.user.is_superuser data = json.loads(request.body) jobs= data.get('jobs') args= data.get('parameters', []) fin_res=[] result = dict(owner=user, job=None, task=None, action=action, args=args, status=None, accepted=False, registered=False, exception=None, exception_source=None) if not is_superuser: result['exception'] = "Permission denied" return HttpResponse(json.dumps(result)) #do actions here for job in jobs: result.update(_do_deft_job_action(user, job['taskid'], job['pandaid'], action, *args)) fin_res.append(result) return HttpResponse(json.dumps(fin_res)) def get_jobs(request): result = '' try: url = json.loads(request.body)[0] _logger.info("Get jobs from bigpanda for: %s" % url) url=re.sub('&display_limit.*(\d+)','',url) url = url.replace('https','http') if 'json' not in url: if url[-1]=='&': url += '&' else: url += '&json' headers = {'content-type': 'application/json', 'accept': 'application/json'} resp = requests.get(url, headers=headers) data = resp.json()['jobs'] result = json.dumps(data) except Exception, e: _logger.error("Problem during reading job info from bigpanda:%s" % str(e)) return HttpResponse(result) def get_jobs_from_url(url): url=re.sub('&display_limit.*(\d+)','',url) url = url.replace('https','http') if 'json' not in url: if url[-1]=='&': url += '&' else: url += '&json' headers = {'content-type': 'application/json', 'accept': 'application/json'}; resp = requests.get(url, headers=headers) data = resp.json()['jobs'] return data def get_job_from_id(id): url = "https://bigpanda.cern.ch/job?pandaid=%s"%(str(id)) url = url.replace('https','http') if 'json' not in url: if url[-1]=='&': url += '&' else: url += '&json' headers = {'content-type': 'application/json', 'accept': 'application/json'} resp = requests.get(url, headers=headers) data = resp.json() return data def get_outputs_for_jobs(panda_ids): result = {} for id in panda_ids: result[id] = [] job=get_job_from_id(id) for file in job['files']: result[id].append(file['lfn']) return result def _do_deft_job_action(owner, task_id, job_id, action, *args): """ Perform task action using DEFT API :param owner: username form which task action will be performed :param task_id: task ID :param action: action name :param args: additional arguments for the action (if needed) :return: dictionary with action execution details """ result = dict(owner=owner, job=job_id, task=task_id, action=action, args=args, status=None, accepted=False, registered=False, exception=None, exception_source=None) try: func = getattr(_deft_client, _deft_job_actions[action]) except AttributeError as e: result.update(exception=str(e)) return result try: request_id = func(owner, task_id, job_id, *args) except Exception as e: result.update(exception=str(e), exception_source=_deft_client.__class__.__name__) return result result['accepted'] = True try: status = _deft_client.get_status(request_id) except Exception as e: result.update(exception=str(e), exception_source=_deft_client.__class__.__name__) return result result.update(registered=True, status=status) return result
Python
0.000001
@@ -2175,16 +2175,33 @@ s(data)%0A + del resp%0A exce
48ea534cdd8cd818255715028a414661c044af15
handle empty build queue case — thanks paltman for the report
manoria_project/apps/manoria/models.py
manoria_project/apps/manoria/models.py
import datetime import random from django.db import models from django.contrib.auth.models import User class Player(models.Model): user = models.ForeignKey(User, related_name="players") name = models.CharField(max_length=20, unique=True) # @@@ points def __unicode__(self): return self.name class Continent(models.Model): name = models.CharField(max_length=20) def __unicode__(self): return self.name class Settlement(models.Model): name = models.CharField(max_length=20) kind = models.CharField( max_length=15, choices=[ ("homestead", "Homestead"), ("hamlet", "Hamlet"), ("village", "Village"), ("town", "Town"), ], default="homestead", ) player = models.ForeignKey(Player, related_name="settlements") continent = models.ForeignKey(Continent) # location on continent x = models.IntegerField() y = models.IntegerField() # @@@ points def __unicode__(self): return u"%s (%s)" % (self.name, self.player) def place(self, commit=True): # @@@ need to test if continent is full otherwise an infinite loop # will occur y = None while y is None: x = random.randint(1, 10) S = set(range(1, 11)) - set([s.y for s in Settlement.objects.filter(x=x)]) if S: y = random.choice(list(S)) self.x = x self.y = y if commit: self.save() def build_queue(self): queue = SettlementBuilding.objects.filter( settlement=self, construction_end__gt=datetime.datetime.now() ) queue = queue.order_by("construction_start") return queue def buildings(self): return SettlementBuilding.objects.filter( settlement=self, construction_end__lte=datetime.datetime.now() ) class ResourceKind(models.Model): name = models.CharField(max_length=25) def __unicode__(self): return self.name class BaseResourceCount(models.Model): count = models.IntegerField(default=0) timestamp = models.DateTimeField(default=datetime.datetime.now) rate = models.DecimalField(max_digits=7, decimal_places=1) limit = models.IntegerField(default=0) class Meta: abstract = True def current(self): change = datetime.datetime.now() - self.timestamp amount = int(self.count + float(self.rate) * (change.days * 86400 + change.seconds) / 3600.0) if self.limit == 0: return max(0, amount) else: return min(max(0, amount), self.limit) class PlayerResourceCount(BaseResourceCount): kind = models.ForeignKey(ResourceKind) player = models.ForeignKey(Player, related_name="resource_counts") class Meta: unique_together = [("kind", "player")] def __unicode__(self): return u"%s (%s)" % (self.kind, self.player) class SettlementResourceCount(BaseResourceCount): kind = models.ForeignKey(ResourceKind) settlement = models.ForeignKey(Settlement, related_name="resource_counts") class Meta: unique_together = [("kind", "settlement")] def __unicode__(self): return u"%s (%s)" % (self.kind, self.settlement) class BuildingKind(models.Model): name = models.CharField(max_length=30) def __unicode__(self): return self.name class SettlementBuilding(models.Model): kind = models.ForeignKey(BuildingKind) settlement = models.ForeignKey(Settlement) # location in settlement x = models.IntegerField() y = models.IntegerField() # build queue construction_start = models.DateTimeField(default=datetime.datetime.now) construction_end = models.DateTimeField(default=datetime.datetime.now) class Meta: unique_together = [("settlement", "x", "y")] def __unicode__(self): return u"%s on %s" % (self.kind, self.settlement) def build(self, commit=True): oldest = self.settlement.build_queue().reverse()[0] # @@@ hard-coded two minute build times self.construction_start = oldest.construction_end self.construction_end = self.construction_start + datetime.timedelta(minutes=2) if commit: self.save()
Python
0
@@ -4150,32 +4150,49 @@ , commit=True):%0A + try:%0A oldest = @@ -4235,16 +4235,69 @@ se()%5B0%5D%0A + except IndexError:%0A oldest = None%0A @@ -4345,16 +4345,39 @@ d times%0A + if oldest:%0A @@ -4426,16 +4426,92 @@ ion_end%0A + else:%0A self.construction_start = datetime.datetime.now()%0A
f4f8b4cba1fbba9f814092af41ed601598743aad
use the mesh generator
CasingSimulations/Run.py
CasingSimulations/Run.py
import time import numpy as np import scipy.sparse as sp import os import json from scipy.constants import mu_0 import discretize import properties from discretize import utils from pymatsolver import Pardiso from SimPEG.EM import FDEM from SimPEG import Utils, Maps from .Model import PhysicalProperties, CasingParameters from . import Mesh from . import Sources class BaseSimulation(properties.HasProperties): """ Base class wrapper to run an EM Forward Simulation :param CasingSimulations.CasingParameters cp: casing parameters object :param CasingSimulations.Mesh mesh: a CasingSimulation mesh object """ formulation = properties.StringChoice( "Formulation of the problem to solve [e, b, h, j]", default='h', choices=['e', 'b', 'h', 'j'] ) directory = properties.String( "working directory", default='.' ) cp_filename = properties.String( "filename for the casing properties", default='casingParameters.json' ) mesh_filename = properties.String( "filename for the mesh", default='meshParameters.json' ) fields_filename = properties.String( "filename for the fields", default='fields.npy' ) def __init__(self, cp, mesh, src, **kwargs): # if cp is a string, it is a filename, load in the json and create the # CasingParameters object if isinstance(cp, str): with open(cp, 'r') as outfile: cp = CasingParameters.deserialize( json.load(outfile) ) self.cp = cp # if cp is a string, it is a filename, load in the json and create the # CasingParameters object if isinstance(mesh, str): with open(mesh, 'r') as outfile: mesh = Mesh.deserialize( json.load(outfile) ) self.mesh = mesh # if src is a string, create a source of that type if isinstance(src, str): src = getattr(Sources, src)( self.mesh.mesh, self.cp ) self.src = src # set keyword arguments Utils.setKwargs(self, **kwargs) # if the working directory does not exsist, create it if not os.path.isdir(self.directory): os.mkdir(self.directory) class SimulationFDEM(BaseSimulation): """ A wrapper to run an FDEM Forward Simulation :param CasingSimulations.CasingParameters cp: casing parameters object :param CasingSimulations.Mesh mesh: a CasingSimulation mesh object """ def __init__(self, cp, mesh, src, **kwargs): super(SimulationFDEM, self).__init__(cp, mesh, src, **kwargs) def run(self): """ Run the forward simulation """ # ----------------- Validate Parameters ----------------- # print('Validating parameters...') # Casing Parameters self.cp.validate() self.cp.save(directory=self.directory, filename=self.cp_filename) print(' Saved casing properties: {}') print(' skin depths in casing: {}'.format( self.cp.skin_depth(sigma=self.cp.sigma_casing, mu=self.cp.mur_casing*mu_0) )) print(' casing thickness: {}'.format( self.cp.casing_t )) print(' skin depths in background: {}'.format(self.cp.skin_depth())) # Mesh Parameters self.mesh.validate() self.mesh.save(directory=self.directory, filename=self.cp_filename) print(' Saved Mesh Parameters') sim_mesh = self.mesh.mesh # grab the discretize mesh off of the mesh object print(' max x: {}, min z: {}, max z: {}'.format( sim_mesh.vectorNx.max(), sim_mesh.vectorNz.min(), sim_mesh.vectorNz.max() )) # Source (only validation, no saving, can be re-created from cp) self.src.validate() print(' Using {} sources'.format(len(self.src.srcList))) print('... parameters valid\n') # ----------------- Set up the simulation ----------------- # physprops = PhysicalProperties(sim_mesh, self.cp) prb = getattr(FDEM, 'Problem3D_{}'.format(self.formulation))( sim_mesh, sigmaMap=physprops.wires.sigma, muMap=physprops.wires.mu, Solver=Pardiso ) survey = FDEM.Survey(self.src.srcList) prb.pair(survey) # ----------------- Run the the simulation ----------------- # print('Starting Simulation') t = time.time() fields = prb.fields(physprops.model) np.save( '/'.join([self.directory, self.fields_filename]), fields[:, '{}Solution'.format(self.formulation)] ) print('Elapsed time : {}'.format(time.time()-t)) self._fields = fields return fields def fields(self): """ fields from the forward simulation """ if getattr(self, '_fields', None) is None: self._fields = self.run() return self._fields
Python
0.000001
@@ -320,24 +320,28 @@ eters%0Afrom . +Mesh import Mesh @@ -340,16 +340,25 @@ ort Mesh +Generator %0Afrom . @@ -1844,16 +1844,25 @@ h = Mesh +Generator .deseria
6b652761794a47c35dfa30f035279f2090cb84a9
Update at 2017-07-23 19-24-55
data.py
data.py
import json import random from pathlib import Path import numpy as np import scipy.misc from moviepy.editor import VideoFileClip from tqdm import tqdm class ImageNpzCreator(object): '''Create `image_train.npz`, `image_val.npz` at target_dir from image extracted from video_dirs ''' def __init__(self, n_train=100, n_val=20, fps=1, target_dir=Path('./npz/')): self.n_train = n_train self.n_val = n_val self.fps = fps self.target_dir = target_dir def extract_data(self, video_dir): video = VideoFileClip(str(video_dir / 'video.mp4')) info = json.load((video_dir / 'info.json').open()) n_frames = int(video.duration) * self.fps label = np.zeros(n_frames, dtype=np.uint8) for s, e in zip(info['starts'], info['ends']): fs = round(s * self.fps) fe = round(e * self.fps) label[fs:fe + 1] = 1 data = [(video, f, label[f]) for f in range(n_frames)] return data def gen_npz(self, data, name): n = len(data) xs = np.zeros((n, 224, 224, 3), dtype=np.float32) ys = np.zeros((n, 1), dtype=np.uint8) for i, (video, f, y) in enumerate(tqdm(data)): img = video.get_frame(f / self.fps) xs[i] = scipy.misc.imresize(img, (224, 224)) ys[i] = y npz_path = self.target_dir / '{}.npz'.format(name) np.savez(npz_path, xs=xs, ys=ys) del xs, ys def fit(self, video_dirs): train, val = [], [] for video_dir in video_dirs: data = self.extract_data(video_dir) pivot = round( (self.n_train) / (self.n_train + self.n_val) * len(data)) train.extend(data[:pivot]) val.extend(data[pivot:]) train = random.sample(train, k=self.n_train) val = random.sample(val, k=self.n_val) self.target_dir.mkdir(exist_ok=True, parents=True) self.gen_npz(train, 'image_train') self.gen_npz(val, 'image_val') class WindowNpzCreator(object): '''Create `window_train.npz`, `window_val.npz` at target_dir from windows extracted from video_dirs ''' def __init__(self, n_train=None, n_val=None, fps=1, timesteps=5, overlap=4, target_dir=None): self.n_train = n_train or 100 self.n_val = n_val or 20 self.fps = fps self.timesteps = timesteps self.overlap = overlap self.target_dir = target_dir or Path('./npz/') def extract_windows(self, video_dir): video = VideoFileClip(str(video_dir / 'video.mp4')) info = json.load((video_dir / 'info.json').open()) n_frames = int(video.duration) * self.fps timesteps = self.timesteps overlap = self.overlap label = np.zeros(n_frames, dtype=np.uint8) for s, e in zip(info['starts'], info['ends']): fs = round(s * self.fps) fe = round(e * self.fps) label[fs:fe + 1] = 1 windows = [(video, f - timesteps, f, label[f - 1]) for f in range(timesteps, n_frames, timesteps - overlap)] print(video_dir, int(video.duration), len(windows)) return windows def gen_npz(self, windows, name): n = len(windows) xs = np.zeros((n, self.timesteps, 224, 224, 3), dtype=np.float32) ys = np.zeros(n, dtype=np.uint8) for i, (video, s, e, y) in enumerate(tqdm(windows)): for j in range(e - s): img = video.get_frame((s + j) / self.fps) xs[i][j] = scipy.misc.imresize(img, (224, 224)) ys[i] = y npz_path = self.target_dir / '{}.npz'.format(name) np.savez(npz_path, xs=xs, ys=ys) del xs, ys def fit(self, video_dirs): train, val = [], [] for video_dir in video_dirs: windows = self.extract_windows(video_dir) random.shuffle(windows) pivot = round( (self.n_train) / (self.n_train + self.n_val) * len(windows)) train.extend(windows[:pivot]) val.extend(windows[pivot:]) train = random.sample(train, k=self.n_train) val = random.sample(val, k=self.n_val) self.target_dir.mkdir(exist_ok=True, parents=True) self.gen_npz(train, 'window_train') self.gen_npz(val, 'window_val') def main(): dataset = Path('~/tthl-dataset/').expanduser() video_dirs = sorted(dataset.glob('video*/')) # gen = ImageNpzCreator(n_train=10000, n_val=2000, fps=3) # gen.fit(video_dirs) gen = WindowNpzCreator( n_train=10000, n_val=2000, fps=3, timesteps=6, overlap=3) gen.fit(video_dirs) if __name__ == '__main__': main()
Python
0
@@ -3726,32 +3726,143 @@ ys%5Bi%5D = y%0A%0A + n_1 = np.count_nonzero(ys)%0A print('%7B%7D: %7B%7D / %7B%7D = %7B%7D'.format(name, n_1, len(ys), n_1 / len(ys)))%0A npz_path
890ddb3507c89177d561eb20afb86c922bf52bf8
Correct import in demo
demo.py
demo.py
#!/usr/bin/env python3 from flask import Flask, redirect, request from resumable import rebuild, split app = Flask(__name__) def form(action, contents): return ''' <meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1"> <form action="{}" method=post> {} <button type=submit>Submit</button> </form> '''.format(action, contents) # for the purposes of this demo, we will explicitly pass request # and response (this is not needed in flask) @rebuild def controller(_): page = form('/c/welcomed', '<input name="name"/>') response = value(page, 'welcomed') page = form( '/c/my_name', ''' <label> Hi, {}, my name is <input name="my_name"/> </label> '''.format(response.form['name']) ) response = value(page, 'my_name') return value('Sweet, my name is {}!'.format(response.form['my_name'])) @app.route('/c/<name>', methods=['POST', 'GET']) def router(name): return controller[name](request) @app.route('/') def index(): return redirect('/c/controller') if __name__ == '__main__': app.run(debug=True)
Python
0.000002
@@ -96,13 +96,13 @@ ld, -split +value %0A%0Aap
8a365e7b50350c66650c623dfc9b0d37c170ee90
fix for no reply-to header
ForgeDiscussion/forgediscussion/model/forum.py
ForgeDiscussion/forgediscussion/model/forum.py
import urllib import re from itertools import chain from ming import schema from ming.utils import LazyProperty from ming.orm.mapped_class import MappedClass from ming.orm.property import FieldProperty, RelationProperty, ForeignIdProperty from allura import model as M from allura.lib import utils config = utils.ConfigProxy( common_suffix='forgemail.domain') class Forum(M.Discussion): class __mongometa__: name='forum' type_s = 'Discussion' parent_id = FieldProperty(schema.ObjectId, if_missing=None) threads = RelationProperty('ForumThread') posts = RelationProperty('ForumPost') deleted = FieldProperty(bool, if_missing=False) @classmethod def attachment_class(cls): return ForumAttachment @classmethod def thread_class(cls): return ForumThread @LazyProperty def threads(self): threads = self.thread_class().query.find(dict(discussion_id=self._id)).all() sorted_threads = chain( (t for t in threads if 'Announcement' in t.flags), (t for t in threads if 'Sticky' in t.flags and 'Announcement' not in t.flags), (t for t in threads if 'Sticky' not in t.flags and 'Announcement' not in t.flags)) return list(sorted_threads) @property def parent(self): return Forum.query.get(_id=self.parent_id) @property def subforums(self): return Forum.query.find(dict(parent_id=self._id)).all() @property def email_address(self): domain = '.'.join(reversed(self.app.url[1:-1].split('/'))).replace('_', '-') return '%s@%s%s' % (self.shortname.replace('/', '.'), domain, config.common_suffix) @LazyProperty def announcements(self): return self.thread_class().query.find(dict( app_config_id=self.app_config_id, flags='Announcement')).all() def breadcrumbs(self): if self.parent: l = self.parent.breadcrumbs() else: l = [] return l + [(self.name, self.url())] def url(self): return urllib.quote(self.app.url + self.shortname + '/') def delete(self): # Delete the subforums for sf in self.subforums: sf.delete() super(Forum, self).delete() def get_discussion_thread(self, data=None): # If the data is a reply, use the parent's thread subject = '[no subject]' parent_id = None if data is not None: parent_id = data.get('in_reply_to', [ None ])[0] subject = data['headers'].get('Subject', subject) if parent_id is not None: parent = self.post_class().query.get(_id=parent_id) if parent: return parent.thread # Otherwise it's a new thread return self.thread_class()(discussion_id=self._id,subject=subject) @property def discussion_thread(self): return None @property def icon(self): return ForumFile.query.get(forum_id=self._id) class ForumFile(M.File): forum_id=FieldProperty(schema.ObjectId) class ForumThread(M.Thread): class __mongometa__: name='forum_thread' type_s = 'Thread' discussion_id = ForeignIdProperty(Forum) first_post_id = ForeignIdProperty('ForumPost') flags = FieldProperty([str]) discussion = RelationProperty(Forum) posts = RelationProperty('ForumPost') first_post = RelationProperty('ForumPost', via='first_post_id') @classmethod def attachment_class(cls): return ForumAttachment def primary(self, primary_class): return self def post(self, subject, text, message_id=None, parent_id=None, **kw): post = super(ForumThread, self).post(text, message_id=message_id, parent_id=parent_id) if subject: post.subject = subject return post def set_forum(self, new_forum): self.post_class().query.update( dict(discussion_id=self.discussion_id, thread_id=self._id), {'$set':dict(discussion_id=new_forum._id)}) self.attachment_class().query.update( {'discussion_id':self.discussion_id, 'thread_id':self._id}, {'$set':dict(discussion_id=new_forum._id)}) self.discussion_id = new_forum._id class ForumPostHistory(M.PostHistory): class __mongometa__: name='post_history' artifact_id = ForeignIdProperty('ForumPost') class ForumPost(M.Post): class __mongometa__: name='forum_post' history_class = ForumPostHistory type_s = 'Post' subject = FieldProperty(str) discussion_id = ForeignIdProperty(Forum) thread_id = ForeignIdProperty(ForumThread) discussion = RelationProperty(Forum) thread = RelationProperty(ForumThread) @classmethod def attachment_class(cls): return ForumAttachment @property def email_address(self): return self.discussion.email_address def primary(self, primary_class): return self def promote(self): '''Make the post its own thread head''' thd = self.thread_class()( discussion_id=self.discussion_id, subject=self.subject, first_post_id=self._id) self.move(thd, None) return thd def move(self, thread, new_parent_id): # Add a placeholder to note the move placeholder = self.thread.post( subject='Discussion moved', text='', parent_id=self.parent_id) placeholder.slug = self.slug placeholder.full_slug = self.full_slug placeholder.approve() if new_parent_id: parent = self.post_class().query.get(_id=new_parent_id) else: parent = None # Set the thread ID on my replies and attachments old_slug = self.slug + '/', self.full_slug + '/' reply_re = re.compile(self.slug + '/.*') self.slug, self.full_slug = self.make_slugs(parent=parent, timestamp=self.timestamp) placeholder.text = 'Discussion moved to [here](%s#post-%s)' % ( thread.url(), self.slug) new_slug = self.slug + '/', self.full_slug + '/' self.discussion_id=thread.discussion_id self.thread_id=thread._id self.parent_id=new_parent_id self.text = 'Discussion moved from [here](%s#post-%s)\n\n%s' % ( placeholder.thread.url(), placeholder.slug, self.text) reply_tree = self.query.find(dict(slug=reply_re)).all() for post in reply_tree: post.slug = new_slug[0] + post.slug[len(old_slug[0]):] post.full_slug = new_slug[1] + post.slug[len(old_slug[1]):] post.discussion_id=self.discussion_id post.thread_id=self.thread_id for post in [ self ] + reply_tree: for att in post.attachments: att.discussion_id=self.discussion_id att.thread_id=self.thread_id class ForumAttachment(M.DiscussionAttachment): DiscussionClass=Forum ThreadClass=ForumThread PostClass=ForumPost class __mongometa__: polymorphic_identity='ForumAttachment' attachment_type=FieldProperty(str, if_missing='ForumAttachment') MappedClass.compile_all()
Python
0
@@ -1453,24 +1453,16 @@ ).all()%0A - %0A @pr @@ -2492,16 +2492,17 @@ nt_id = +( data.get @@ -2519,17 +2519,18 @@ _to' -, +) or %5B - None - %5D)%5B0 @@ -5448,17 +5448,16 @@ text='', - %0A
9a1cb05ffea340423ffd46eda018655f75e8b878
Fix test_basic_wiring_operations
src/wirecloud/platform/wiring/tests.py
src/wirecloud/platform/wiring/tests.py
# -*- coding: utf-8 -*- # Copyright 2012 Universidad Politécnica de Madrid # This file is part of Wirecloud. # Wirecloud is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # Wirecloud is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with Wirecloud. If not, see <http://www.gnu.org/licenses/>. import time from django.core.urlresolvers import reverse from django.contrib.auth.models import User from django.db import transaction from django.test import TransactionTestCase, Client from django.utils import simplejson from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.support.ui import WebDriverWait from wirecloud.commons.test import WirecloudSeleniumTestCase, widget_operation from wirecloud.platform.workspace.models import Workspace # Avoid nose to repeat these tests (they are run through wirecloud/tests.py) __test__ = False class WiringTestCase(TransactionTestCase): fixtures = ['test_data'] def setUp(self): super(WiringTestCase, self).setUp() self.user = User.objects.get(username='test') workspace = Workspace.objects.get(id=1) self.workspace_id = workspace.pk workspace.wiringStatus = simplejson.dumps({ 'operators': [], 'connections': [], }) workspace.save() transaction.commit() self.wiring_url = reverse('wirecloud.workspace_wiring', kwargs={'workspace_id': self.workspace_id}) def test_basic_wiring_operations(self): client = Client() client.login(username='test', password='test') data = simplejson.dumps({ 'operators': [], 'connections': [ { 'source': { 'type': 'iwidget', 'id': 1, 'endpoint': 'event', }, 'target': { 'type': 'iwidget', 'id': 1, 'endpoint': 'slot', }, }, ], }) response = client.put(self.wiring_url, data, content_type='application/json') self.assertEquals(response.status_code, 204) test_basic_wiring_operations.tags = ('fiware-ut-6',) def test_wiring_modification_fails_with_incorrect_user(self): client = Client() client.login(username='test2', password='test') data = simplejson.dumps({ 'operators': [], 'connections': [], }) response = client.put(self.wiring_url, data, content_type='application/json') self.assertEquals(response.status_code, 403) test_wiring_modification_fails_with_incorrect_user.tags = ('fiware-ut-6',) def test_read_only_connections_cannot_be_deleted(self): workspace = Workspace.objects.get(id=1) workspace.wiringStatus = simplejson.dumps({ 'operators': [], 'connections': [ { 'readOnly': True, 'source': { 'type': 'iwidget', 'id': 1, 'endpoint': 'event', }, 'target': { 'type': 'iwidget', 'id': 1, 'endpoint': 'slot', }, }, ], }) workspace.save() client = Client() client.login(username='test', password='test') data = simplejson.dumps({ 'operators': [], 'connections': [], }) response = client.put(self.wiring_url, data, content_type='application/json') self.assertEquals(response.status_code, 403) class WiringSeleniumTestCase(WirecloudSeleniumTestCase): tags = ('fiware-ut-6',) def get_iwidget_anchor(self, iwidget, endpoint): return self.driver.execute_script(''' var wiringEditor = LayoutManagerFactory.getInstance().viewsByName["wiring"]; return LayoutManagerFactory.getInstance().viewsByName["wiring"].iwidgets[%(iwidget)d].getAnchor("%(endpoint)s").wrapperElement; ''' % {"iwidget": iwidget, "endpoint": endpoint} ) def test_basic_wiring_operations(self): self.login() self.add_widget_to_mashup('Test', 'Test (1)') self.add_widget_to_mashup('Test', 'Test (2)') self.add_widget_to_mashup('Test', 'Test (3)') self.change_main_view('wiring') time.sleep(2) grid = self.driver.find_element_by_xpath("//*[contains(@class, 'container center_container grid')]") source = self.driver.find_element_by_xpath("//*[contains(@class, 'container iwidget')]//*[text()='Test (1)']") ActionChains(self.driver).click_and_hold(source).move_to_element(grid).move_by_offset(-40, -40).release(None).perform() source = self.driver.find_element_by_xpath("//*[contains(@class, 'container iwidget')]//*[text()='Test (2)']") ActionChains(self.driver).click_and_hold(source).move_to_element(grid).move_by_offset(40, 40).release(None).perform() time.sleep(0.2) source = self.get_iwidget_anchor(1, 'event') target = self.get_iwidget_anchor(2, 'slot') ActionChains(self.driver).drag_and_drop(source, target).perform() self.change_main_view('workspace') time.sleep(0.2) with widget_operation(self.driver, 1): text_input = self.driver.find_element_by_tag_name('input') self.fill_form_input(text_input, 'hello world!!') # Work around hang when using Firefox Driver self.driver.execute_script('sendEvent();') #self.driver.find_element_by_id('b1').click() time.sleep(0.2) with widget_operation(self.driver, 2): try: WebDriverWait(self.driver, timeout=30).until(lambda driver: driver.find_element_by_id('wiringOut').text == 'hello world!!') except: pass text_div = self.driver.find_element_by_id('wiringOut') self.assertEqual(text_div.text, 'hello world!!') with widget_operation(self.driver, 3): text_div = self.driver.find_element_by_id('wiringOut') self.assertEqual(text_div.text, '') with widget_operation(self.driver, 1): text_div = self.driver.find_element_by_id('wiringOut') self.assertEqual(text_div.text, '')
Python
0.000017
@@ -4798,24 +4798,33 @@ hup('Test', +new_name= 'Test (1)')%0A @@ -4861,24 +4861,33 @@ hup('Test', +new_name= 'Test (2)')%0A @@ -4928,16 +4928,25 @@ 'Test', +new_name= 'Test (3
b5a583267cd2fe623575a35fa0e12138f8730804
Add comment for adjust_settings_php() function and how it should be reverting databases after a failed build.
drupal/AdjustConfiguration.py
drupal/AdjustConfiguration.py
from fabric.api import * from fabric.contrib.files import * import Revert # Adjust settings.php. Copy the relevant file based on the branch, delete the rest. @task @roles('app_all') def adjust_settings_php(repo, branch, build, buildtype, alias, site): # In some cases it seems jenkins loses write permissions to the site directory # Let's make sure! sudo("chmod -R 775 /var/www/%s_%s_%s/www/sites/%s" % (repo, branch, build, site)) # Check there is a settings.inc file, there are no cases where there should not be! if run("stat /var/www/config/%s_%s.settings.inc" % (alias, branch)): with settings(warn_only=True): if run("stat /var/www/%s_%s_%s/www/sites/%s/settings.php" % (repo, branch, build, site)).succeeded: run("mv /var/www/%s_%s_%s/www/sites/%s/settings.php /var/www/%s_%s_%s/www/sites/%s/unused.settings.php" % (repo, branch, build, site, repo, branch, build, site)) if run("ln -s /var/www/config/%s_%s.settings.inc /var/www/%s_%s_%s/www/sites/%s/settings.php" % (alias, branch, repo, branch, build, site)).failed: raise SystemExit("######## Couldn't symlink in settings.inc file! Aborting build.") else: raise SystemExit("######## Couldn't find any settings.inc! This site probably failed its initial build and needs fixing. Aborting early! TIP: Add a /var/www/config/%s_%s.settings.inc file manually and do a file_exists() check for /var/www/%s_%s_%s/www/sites/%s/%s.settings.php and if it exists, include it. Then symlink that to /var/www/%s_%s_%s/www/sites/%s/settings.php." % (alias, branch, repo, branch, build, site, buildtype, repo, branch, build, site)) with settings(warn_only=True): # Let's make sure we're checking for $buildtype.settings.php. # If so, we'll update the build number - if not, we'll add the check to the bottom of the file. settings_file = "/var/www/config/%s_%s.settings.inc" % (alias, branch) if run('grep "\$file = \'\/var\/www\/%s" %s' % (repo, settings_file)).return_code == 0: print "===> %s already has a file_exists() check. We need to replace the build number so the newer %s.settings.php file is used." % (settings_file, buildtype) sudo('sed -i.bak "s:/var/www/.\+_.\+_build_[0-9]\+/.\+/.\+\.settings\.php:/var/www/%s_%s_%s/www/sites/%s/%s.settings.php:g" %s' % (repo, branch, build, site, buildtype, settings_file)) else: append_string = """$file = '/var/www/%s_%s_%s/www/sites/%s/%s.settings.php'; if (file_exists($file)) { include($file); }""" % (repo, branch, build, site, buildtype) append(settings_file, append_string, use_sudo=True) print "===> %s did not have a file_exists() check, so it was appended to the bottom of the file." % settings_file # Adjust shared files symlink @task @roles('app_all') def adjust_files_symlink(repo, branch, build, alias, site): print "===> Setting the symlink for files" sudo("ln -s /var/www/shared/%s_%s_files/ /var/www/%s_%s_%s/www/sites/%s/files" % (alias, branch, repo, branch, build, site)) # If we have a drushrc.php file in the site that reflects this branch, copy that into place @task @roles('app_all') def adjust_drushrc_php(repo, branch, build, site): with settings(warn_only=True): print "===> Copying %s.drushrc.php to drushrc.php if it exists" % branch if run("stat /var/www/%s_%s_%s/www/sites/%s/%s.drushrc.php" % (repo, branch, build, site, branch)).failed: print "===> Couldn't find /var/www/%s_%s_%s/www/sites/%s/%s.drushrc.php, so moving on..." % (repo, branch, build, site, branch) else: if sudo("cp /var/www/%s_%s_%s/www/sites/%s/%s.drushrc.php /var/www/%s_%s_%s/www/sites/%s/drushrc.php" % (repo, branch, build, site, branch, repo, branch, build, site)).failed: print "####### Could not copy /var/www/%s_%s_%s/www/sites/%s/%s.drushrc.php to /var/www/%s_%s_%s/www/sites/%s/drushrc.php. Continuing with build, but perhaps have a look into why the file couldn't be copied." % (repo, branch, build, site, branch, repo, branch, build, site) else: print "===> Copied /var/www/%s_%s_%s/www/sites/%s/%s.drushrc.php to /var/www/%s_%s_%s/www/sites/%s/drushrc.php" % (repo, branch, build, site, branch, repo, branch, build, site)
Python
0
@@ -153,16 +153,358 @@ e rest.%0A +# Failures here should be reverting the build entirely. If it fails to find settings.inc, or symlink in the file, the build will fail and the site being deployed and all sites that have been deployed will remain offline. All sites that have been deployed should have their databases reverted, as they could have had database updates applied.%0A @task%0A@r
2f789441eefbca50bf9c47dc5beb0d00cd8ce6e4
Update support server invite
help.py
help.py
from typing import Mapping, Optional from discord.ext.commands import Cog, Command, MinimalHelpCommand class BHelp(MinimalHelpCommand): async def send_bot_help(self, mapping: Mapping[Optional[Cog], list[Command]]): await super().send_bot_help(mapping) if ctx := self.context: await ctx.send( "Join the support server for more help: discord.gg/a3kHCRs9Q8" ) def add_subcommand_formatting(self, command: Command) -> None: fmt = "{0} \N{EN DASH} {1}" if command.short_doc else "{0}" assert self.paginator is not None self.paginator.add_line( fmt.format( self.get_command_signature(command), command.short_doc, ) )
Python
0
@@ -391,18 +391,18 @@ .gg/ -a3kHCRs9Q8 +HKmAadu5sP %22%0A
5f6a890e678014cdb5f3d7e08098007b517ac816
Remove check command output.
start_jupyter_cm/tests/test_command.py
start_jupyter_cm/tests/test_command.py
import subprocess import os import sys import pytest from subprocess import PIPE from start_jupyter_cm.utils import get_environment_label from start_jupyter_cm.gnome import SPATH def isadmin(): try: # only windows users with admin privileges can read the C:\windows\temp os.listdir(os.path.join([os.environ.get('SystemRoot', 'C:\\windows'), 'temp'])) return True except: # We don't have admin right return False @pytest.mark.parametrize("action", ['add', 'remove']) def test_run_command(action): call = ["start_jupyter_cm"] if action == 'remove': call.append("--remove") # https://stackoverflow.com/questions/53209127/subprocess-unexpected-keyword-argument-capture-output output = subprocess.run(call, stdout=PIPE, stderr=PIPE) assert output.returncode == 0 env_label = get_environment_label() if sys.platform.startswith("linux"): for terminal in ["qtconsole", "notebook"]: script_path = os.path.join(SPATH, "Jupyter %s here%s" % ( terminal, env_label)) script_exist = os.path.exists(script_path) if action == "add": assert script_exist else: assert not script_exist output_string_list = output.stdout.decode().splitlines() print(output_string_list) # If running from a conda environment, it should have the name of the # environemnt in brackend if not running from base environment out = "created" if action == "add" else "removed" expected_out = ['Jupyter qtconsole here%s %s.' % (env_label, out), 'Jupyter notebook here%s %s.' % (env_label, out), ] if env_label != "": expected_out.insert(0, "Using conda environment: %s" % os.environ["CONDA_DEFAULT_ENV"]) assert output_string_list == expected_out elif sys.platform == "win32": import winreg if isadmin: h_key_base = winreg.HKEY_LOCAL_MACHINE else: h_key_base = winreg.HKEY_CURRENT_USER for terminal in ["qtconsole", "notebook"]: key = r'Software\Classes\Directory\shell\jupyter_%s_here%s\Command' % ( terminal, env_label.replace(" ", "_")) if action == "add": # Check if we can open the key to test if the key is present. registry_key = winreg.OpenKey(h_key_base, key) winreg.CloseKey(registry_key) else: with pytest.raises(FileNotFoundError): # If the key have been properly removed, we will expect # a `FileNotFoundError` to be raised winreg.OpenKey(h_key_base, key)
Python
0.000001
@@ -1293,699 +1293,8 @@ xist -%0A output_string_list = output.stdout.decode().splitlines()%0A print(output_string_list)%0A # If running from a conda environment, it should have the name of the%0A # environemnt in brackend if not running from base environment%0A out = %22created%22 if action == %22add%22 else %22removed%22%0A expected_out = %5B'Jupyter qtconsole here%25s %25s.' %25 (env_label, out),%0A 'Jupyter notebook here%25s %25s.' %25 (env_label, out),%0A %5D%0A if env_label != %22%22:%0A expected_out.insert(0, %22Using conda environment: %25s%22 %25%0A os.environ%5B%22CONDA_DEFAULT_ENV%22%5D)%0A assert output_string_list == expected_out %0A%0A
f90d96d94257f6da24ed47e86d585a93541ca812
Add haplotypes entrypoint
avedata/api/genomes.py
avedata/api/genomes.py
from ..db import get_db from ..sequence import get_chrominfo from ..sequence import get_reference from ..features import get_genes from ..features import get_annotations from ..features import get_featuretypes from ..variants import get_accessions_list def get(genome_id): genome_info = { 'genome_id': genome_id, 'chromosomes': chromosomes(genome_id), 'feature_types': featuretypes(genome_id), 'accessions': accession_list(genome_id), 'reference': two_bit_uri(genome_id), 'gene_track': gene_track_uri(genome_id) } return genome_info def chromosomes(genome_id): """Fetch fasta file name for this genome open file with pyfaidx get list of chromosomes and fetch their 'chrom_id': len retun [{'chrom_id': lenght},] """ db = get_db() query = """SELECT filename FROM metadata WHERE genome=? AND datatype='sequence'""" cursor = db.cursor() cursor.execute(query, (genome_id, )) filename = cursor.fetchone()[0] chrominfo = get_chrominfo(filename) return chrominfo def featuretypes(genome_id): db = get_db() query = """SELECT filename FROM metadata WHERE genome=? AND datatype='features'""" cursor = db.cursor() cursor.execute(query, (genome_id, )) filename = cursor.fetchone()[0] return get_featuretypes(filename) def accession_list(genome_id): db = get_db() query = """SELECT filename FROM metadata WHERE genome=? AND datatype='variants'""" cursor = db.cursor() cursor.execute(query, (genome_id, )) filename = cursor.fetchone()[0] return get_accessions_list(filename) def reference(genome_id, chrom_id, start_position, end_position): """Fetch reference sequence of genomic region""" db = get_db() query = """SELECT filename FROM metadata WHERE genome=? AND datatype='sequence'""" cursor = db.cursor() cursor.execute(query, (genome_id, )) filename = cursor.fetchone()['filename'] return get_reference(filename, chrom_id, start_position, end_position) def two_bit_uri(genome_id): db = get_db() query = """SELECT filename FROM metadata WHERE genome=? AND datatype='2bit'""" cursor = db.cursor() cursor.execute(query, (genome_id, )) filename = cursor.fetchone()['filename'] return filename def gene_track_uri(genome_id): db = get_db() query = """SELECT filename FROM metadata WHERE genome=? AND datatype='bigbed'""" cursor = db.cursor() cursor.execute(query, (genome_id, )) filename = cursor.fetchone()['filename'] return filename def genes(genome_id, chrom_id, start_position, end_position): """Fetch all gene annototion information for particular location. Return list of dicts with gff information.""" db = get_db() query = """SELECT filename FROM metadata WHERE genome=? AND datatype='features'""" cursor = db.cursor() cursor.execute(query, (genome_id, )) filename = cursor.fetchone()['filename'] return get_genes(filename, chrom_id, start_position, end_position) def features(genome_id, chrom_id, start_position, end_position): """Fetch genomic features of selected genomes Return list of genomic features. """ db = get_db() query = """SELECT filename FROM metadata WHERE genome=? AND datatype='features'""" cursor = db.cursor() cursor.execute(query, (genome_id, )) filename = cursor.fetchone()['filename'] return get_annotations(filename, chrom_id, start_position, end_position) def haplotypes(genome_id, chrom_id, start_position, end_position): raise NotImplementedError() def gene_search(genome_id, query): raise NotImplementedError() def feature_search(genome_id, query): raise NotImplementedError()
Python
0.000046
@@ -769,16 +769,17 @@ retu +r n %5B%7B'chr @@ -794,10 +794,10 @@ leng -h t +h %7D,%5D%0A @@ -3801,42 +3801,569 @@ tion -):%0A raise NotImplementedError() +, accessions):%0A %22%22%22%0A Calculate haplotypes for chosen region and set of accessions.%0A %22%22%22%0A db = get_db()%0A query = %22%22%22SELECT filename%0A FROM metadata%0A WHERE genome=? AND datatype='variants'%22%22%22%0A cursor = db.cursor()%0A cursor.execute(query, (genome_id, ))%0A filename = cursor.fetchone()%5B'filename'%5D%0A%0A%0A%0A haplotypes_list = %7B%0A hierarchy: %7B%7D,%0A heplotypes: %5B%7B'haplotype_id': '',%0A 'accessions': %5B%5D,%0A 'variants': %5B%5D%7D,%0A 'sequence'%5D%0A %7D%0A return %0A%0A%0Ad
af642b214fa63a6e8f0dfe7633b5f19bb6b6d028
Set environment in cache manager instance
novaimagebuilder/BaseOS.py
novaimagebuilder/BaseOS.py
# encoding: utf-8 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from CacheManager import CacheManager from StackEnvironment import StackEnvironment from SyslinuxHelper import SyslinuxHelper import inspect import logging class BaseOS(object): """ @param osinfo_dict: @param install_type: @param install_media_location: @param install_config: @param install_script: """ def __init__(self, osinfo_dict, install_type, install_media_location, install_config, install_script = None): self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) self.env = StackEnvironment() self.cache = CacheManager() self.syslinux = SyslinuxHelper() self.osinfo_dict = osinfo_dict self.install_type = install_type self.install_media_location = install_media_location self.install_config = install_config self.install_script = install_script self.iso_volume_delete = False # Subclasses can pull in the above and then do OS specific tasks to fill in missing # information and determine if the resulting install is possible def os_ver_arch(self): """ @return: """ return self.osinfo_dict['shortid'] + "-" + self.install_config['arch'] def prepare_install_instance(self): """ @return: """ raise NotImplementedError("Function (%s) not implemented" % (inspect.stack()[0][3])) def start_install_instance(self): """ @return: """ raise NotImplementedError("Function (%s) not implemented" % (inspect.stack()[0][3])) def update_status(self): """ @return: """ raise NotImplementedError("Function (%s) not implemented" % (inspect.stack()[0][3])) def wants_iso_content(self): """ @return: """ raise NotImplementedError("Function (%s) not implemented" % (inspect.stack()[0][3])) def iso_content_dict(self): """ @return: """ raise NotImplementedError("Function (%s) not implemented" % (inspect.stack()[0][3])) def url_content_dict(self): """ @return: """ raise NotImplementedError("Function (%s) not implemented" % (inspect.stack()[0][3])) def abort(self): """ @return: """ raise NotImplementedError("Function (%s) not implemented" % (inspect.stack()[0][3])) def cleanup(self): """ @return: """ raise NotImplementedError("Function (%s) not implemented" % (inspect.stack()[0][3]))
Python
0.000001
@@ -1214,24 +1214,58 @@ heManager()%0A + self.cache.env = self.env%0A self
8a704533abcf277906cdf57c8cdca55effd542e1
Add positive test case for cloudtrail deadman
tests/alerts/test_cloudtrail_deadman.py
tests/alerts/test_cloudtrail_deadman.py
from positive_alert_test_case import PositiveAlertTestCase from negative_alert_test_case import NegativeAlertTestCase from alert_test_suite import AlertTestSuite class TestAlertCloudtrailDeadman(AlertTestSuite): alert_filename = "cloudtrail_deadman" # This event is the default positive event that will cause the # alert to trigger default_event = { "_type": "cloudtrail", "_source": { "eventName": "somename" } } # This alert is the expected result from running this task default_alert = { "category": "deadman", "severity": "ERROR", "summary": 'No cloudtrail events found the last hour', "tags": ['cloudtrail', 'aws'], } test_cases = [ PositiveAlertTestCase( description="Positive test case with good event", events=[ { "_source": { "utctimestamp": AlertTestSuite.subtract_from_timestamp_lambda({'hours': 2}), } } ], expected_alert=default_alert ), # PositiveAlertTestCase( # description="Positive test case with an event with somewhat old timestamp", # events=[ # { # "_source": { # "utctimestamp": AlertTestSuite.subtract_from_timestamp_lambda({'minutes': 29}) # } # } # ], # expected_alert=default_alert # ), # NegativeAlertTestCase( # description="Negative test case with bad event type", # events=[ # { # "_type": "event", # } # ], # ), # NegativeAlertTestCase( # description="Negative test case with bad eventName", # events=[ # { # "_source": { # "eventName": "Badeventname", # } # } # ], # ), NegativeAlertTestCase( description="Negative test case with old timestamp", events=[ { "_source": { "utctimestamp": AlertTestSuite.subtract_from_timestamp_lambda({'minutes': 30}) } } ], ), ]
Python
0.00005
@@ -1122,18 +1122,16 @@ %0A - # Positiv @@ -1146,38 +1146,36 @@ stCase(%0A -# - description=%22Pos @@ -1166,33 +1166,32 @@ description=%22Pos -i tive test case w @@ -1198,44 +1198,22 @@ ith -an +bad event -with somewhat old timestamp +type %22,%0A @@ -1210,34 +1210,32 @@ t type%22,%0A - # events=%5B%0A @@ -1236,33 +1236,32 @@ s=%5B%0A -# %7B%0A # @@ -1240,33 +1240,32 @@ - %7B%0A # @@ -1249,34 +1249,32 @@ %7B%0A - # %22_s @@ -1276,133 +1276,27 @@ %22_ -source%22: %7B%0A # %22utctimestamp%22: AlertTestSuite.subtract_from_timestamp_lambda(%7B'minutes': 29%7D)%0A # +type%22: %22event%22,%0A @@ -1313,38 +1313,16 @@ %0A - # %7D%0A # %5D,%0A @@ -1329,22 +1329,20 @@ -# - expected @@ -1373,553 +1373,8 @@ - # ),%0A%0A # NegativeAlertTestCase(%0A # description=%22Negative test case with bad event type%22,%0A # events=%5B%0A # %7B%0A # %22_type%22: %22event%22,%0A # %7D%0A # %5D,%0A # ),%0A%0A # NegativeAlertTestCase(%0A # description=%22Negative test case with bad eventName%22,%0A # events=%5B%0A # %7B%0A # %22_source%22: %7B%0A # %22eventName%22: %22Badeventname%22,%0A # %7D%0A # %7D%0A # %5D,%0A # ),%0A
44d14b0ab6bc4f37a266e8dfc9eae77d706014af
Allow creation of actors through a sponsor
rt.py
rt.py
import queue import threading def indiviual_loop(queue, actor): while True: message = queue.get() actor.behavior(message) def global_loop(queue): while True: actor, message = queue.get() actor.behavior(message) class EventLoop(object): loop = None def __init__(self): self.queue = queue.Queue() self.thread = threading.Thread( target=global_loop, args=(self.queue,), name='global-loop') self.thread.start() def schedule(self, message, target): self.queue.put((target, message)) @classmethod def get_loop(cls): if cls.loop is None: cls.loop = cls() return cls.loop class AbstractActor(object): def __call__(self, message): self._put(message) def _put(self, message): raise NotImplementedError() def _ensure_loop(self): pass @classmethod def create(cls, *args): actor = cls(*args) actor._ensure_loop() return actor class ActorOwnLoop(AbstractActor): def _put(self, message): self.queue.put(message) def _ensure_loop(self): self.queue = queue.Queue() self.dispatcher = threading.Thread( target=indiviual_loop, args=(self.queue, self), name=self._thread_name()) self.dispatcher.start() def _thread_name(self): return '{}-{}'.format( self.__class__.__name__, hex(id(self))) class ActorGlobalLoop(AbstractActor): def _put(self, message): self.loop.schedule(message, self) def _ensure_loop(self): self.loop = EventLoop.get_loop() Actor = ActorGlobalLoop
Python
0
@@ -1006,19 +1006,181 @@ s, *args +, **kwargs ):%0A + sponsor = kwargs.pop('sponsor', None)%0A if sponsor is not None:%0A return sponsor.create(cls, *args, **kwargs)%0A else:%0A @@ -1194,24 +1194,28 @@ cls(*args)%0A + acto @@ -1231,16 +1231,20 @@ _loop()%0A +
ad22bd13bfc8b9263b08d7e7188da45d945a685d
improve function
Dijkstra-algorithm/DA.py
Dijkstra-algorithm/DA.py
class PathGraph(): pointSet = [] pathLen = {} # {point:{shortMark:Bool,val:{p1:length,p2:length...},...}} def __init__(self, points): self.pointSet = points for p in self.pointSet: self.pathLen[p] = {"shortMark": False, "val": {p: 0}} def add_path(self, start, to, length): self.pathLen[start]["val"][to] = length self.pathLen[start]["shortest"] = False def path(self, start, to): try: return self.pathLen[start]["val"][to] except: print("No value") return None def shortest(self, start, to): tempVar = self.path(start, to) if tempVar is not None: if self.pathLen[start]["shortest"]: print("{0}".format(tempVar)) return tempVar else: print("value is {0}, but not sure shortest or not" .format(tempVar)) return tempVar else: print("No value") return None def Dijkstra(g, start): tempSet = set(g.pointSet) while len(tempSet) != 0: for v in sorted(a.pathLen[start]["val"].items(), key=lambda x: x[1]): if v[0] in tempSet: thisPoint = v[0] break for (k, v) in g.pathLen[thisPoint]["val"].items(): sumValue = g.path(start, thisPoint) + g.path(thisPoint, k) try: if sumValue < g.path(start, k): g.pathLen[start]["val"][k] = sumValue except: g.add_path(start, k, sumValue) tempSet.remove(thisPoint) # if __name__ == "__main__": qset = ["s", "t", "y", "x", "z"] a = PathGraph(qset) a.add_path("s", "t", 10) a.add_path("s", "y", 5) a.add_path("t", "y", 2) a.add_path("t", "x", 1) a.add_path("x", "z", 4) a.add_path("y", "t", 3) a.add_path("y", "x", 9) a.add_path("y", "z", 2) a.add_path("z", "x", 6) a.add_path("z", "s", 7)
Python
0.000196
@@ -431,34 +431,105 @@ (self, start, to -): +=None):%0A if not to:%0A return self.pathLen%5Bstart%5D%5B%22val%22%5D%0A %0A try:%0A @@ -1217,33 +1217,23 @@ d(a.path -Len%5B +( start -%5D%5B%22val%22%5D +) .items() @@ -1370,20 +1370,17 @@ n g.path -Len%5B +( thisPoin @@ -1380,24 +1380,17 @@ hisPoint -%5D%5B%22val%22%5D +) .items() @@ -1553,33 +1553,23 @@ g.path -Len%5B +( start -%5D%5B%22val%22%5D +) %5Bk%5D = su
6933340547a91f1e51a3405ebf42d91e8be78c92
add test + remove typo
examples/decoding/plot_ems_filtering.py
examples/decoding/plot_ems_filtering.py
""" ============================================== Compute effect-matched-spatial filtering (EMS) ============================================== This example computes the EMS to reconstruct the time course of the experimental effect as described in: Aaron Schurger, Sebastien Marti, and Stanislas Dehaene, "Reducing multi-sensor data to a single time course that reveals experimental effects", BMC Neuroscience 2013, 14:122 """ # Author: Denis Engemann <denis.engemann@gmail.com> # # License: BSD (3-clause) print(__doc__) import os.path as op import numpy as np import mne from mne import fiff from mne.datasets import sample from mne.epochs import combine_event_ids data_path = sample.data_path() # Set parameters raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif' event_ids = {'AudL': 1, 'VisL': 2} tmin = -0.2 tmax = 0.5 # Setup for reading the raw data raw = fiff.Raw(raw_fname, preload=True) raw.filter(1, 45) events = mne.read_events(event_fname) # Set up pick list: EEG + STI 014 - bad channels (modify to your needs) include = [] # or stim channels ['STI 014'] raw.info['bads'] += ['EEG 053'] # bads + 1 more # pick EEG channels picks = fiff.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True, include=include, exclude='bads') # Read epochs reject = dict(grad=4000e-13, eog=150e-6) # reject = dict(mag=4e-12, eog=150e-6) epochs = mne.Epochs(raw, events, event_ids, tmin, tmax, picks=picks, baseline=None, reject=reject) # Let's equalize the trial counts in each condition epochs.equalize_event_counts(['AudL', 'VisL'], copy=False) # Now let's combine some conditions picks2 = fiff.pick_types(epochs.info, meg='grad', exclude='bads') data = epochs.get_data()[:, picks2, :] # the matlab routine expects n_sensors, n_times, n_epochs data2 = np.transpose(data, [1, 2, 0]) # # create bool indices conditions = [epochs.events[:, 2] == 1, epochs.events[:, 2] == 2] # # matlab io functions don't deal with bool values # # so we need tom make a detour via int conditions = [c.astype(int) for c in conditions] ############################################################################### # Now it's time for some hacking ... from scipy import io io.savemat('epochs_data.mat', {'data': data2, 'conditions': conditions}) var_name1, var_name2 = 'surrogates', 'spatial_filter' my_pwd = op.abspath(op.curdir) # expand path # this requires # https://gist.github.com/dengemann/640d202f84befff1545d # in the local directory my_matlab_code = """ disp('reading data ...'); epochs = load('epochs_data.mat'); conditions = boolean(epochs.conditions'); disp('computing trial surrogates'); [{0}, {1}] = ems_ncond(epochs.data, conditions); disp('saving results ...'); save('{pwd}/{0}.mat', '{0}'); save('{pwd}/{1}.mat', '{1}'); quit; """.format(var_name1, var_name2, pwd=my_pwd).strip('\n').replace('\n', '') run_matlab = ['matlab', '-nojvm', '-nodesktop', '-nodisplay', '-r'] run_matlab.append(my_matlab_code) from subprocess import Popen, PIPE process = Popen(run_matlab, stdin=PIPE, stdout=None, shell=False) process.communicate() # call and quit matlab surrogates = io.loadmat(var_name1 + '.mat')[var_name1] spatial_filter = io.loadmat(var_name2 + '.mat')[var_name2] from mne.decoding import compute_ems iter_comparisons = [ (surrogates, spatial_filter), compute_ems(data, conditions) ] import matplotlib.pyplot as plt for ii, (tsurrogate, sfilter) in enumerate(iter_comparisons): lang = 'python' if ii > 0 else 'matlab' order = epochs.events[:, 2].argsort() times = epochs.times * 1e3 plt.figure() plt.title('single surrogate trial - %s' % lang) plt.imshow(surrogates[order], origin='lower', aspect='auto', extent=[times[0], times[-1], 1, len(epochs)]) plt.xlabel('Time (ms)') plt.ylabel('Trials (reordered by condition)') plt.savefig('fig-%s-1.png' % lang) plt.figure() plt.title('Average EMS signal - %s' % lang) for key, value in epochs.event_id.items(): ems_ave = surrogates[epochs.events[:, 2] == value] ems_ave /= 4e-11 plt.plot(times, ems_ave.mean(0), label=key) plt.xlabel('Time (ms)') plt.ylabel('fT/cm') plt.legend(loc='best') plt.savefig('fig-%s-2.png' % lang) # visualize spatial filter evoked = epochs.average() evoked.data = spatial_filter evoked.plot_topomap(ch_type='grad', title=lang) plt.savefig('fig-%s-3.png' % lang)
Python
0
@@ -3417,98 +3417,335 @@ ms%0A%0A -iter_comparisons = %5B%0A (surrogates, spatial_filter),%0A compute_ems(data, conditions +surrogates_py, spatial_filter_py = compute_ems(data, conditions)%0A%0Aiter_comparisons = %5B%0A (surrogates, spatial_filter),%0A (surrogates_py, spatial_filter_py)%0A%5D%0A%0Afrom numpy.testing import asser_array_almost_equal%0A%0Aasser_array_almost_equal(surrogates, surrogates_py)%0Aasser_array_almost_equal(spatial_filter, spatial_filter_py )%0A -%5D %0A%0Aim @@ -4041,16 +4041,17 @@ .imshow( +t surrogat @@ -4051,17 +4051,16 @@ urrogate -s %5Border%5D, @@ -4400,16 +4400,17 @@ s_ave = +t surrogat @@ -4410,17 +4410,16 @@ urrogate -s %5Bepochs. @@ -4717,23 +4717,16 @@ data = s -patial_ filter%0A
759c1083ae63c854ba2ff8e7834ffbe19b5f6da7
Version bump.
sphinx_me.py
sphinx_me.py
#!/usr/bin/env python from __future__ import with_statement from datetime import datetime from os.path import abspath, dirname, exists, join, isdir, splitext from os import chdir, getcwd, listdir, mkdir, sep from subprocess import Popen, PIPE import sys __version__ = "0.1" def install(): """ Main entry point for running sphinx_me as a script. Creates a docs directory in the current directory and adds the required files for generating Sphinx docs from the project's README file - a conf module that calls setup_conf() from this module, and an index file that includes the project's README. """ for name in listdir(getcwd()): if splitext(name)[0].upper() == "README": readme = name break else: print print "ABORT: No README file in the current directory." return docs_path = join(getcwd(), "docs") if not isdir(docs_path): mkdir(docs_path) with open(join(docs_path, "index.rst"), "w") as f: f.write(".. include:: ../%s" % readme) with open(join(docs_path, "conf.py"), "w") as f: f.write("# This file is automatically generated via sphinx-me\n") f.write("from sphinx_me import setup_conf; setup_conf(globals())\n") print print "SUCCESS: Sphinx docs layout created in %s" % docs_path try: import sphinx except ImportError: print print "Sphinx not installed. Not building docs." else: build_path = join(docs_path, "build") Popen(["sphinx-build", docs_path, build_path]).wait() print print "Docs built in %s" % build_path def get_version(module): """ Attempts to read a version attribute from the given module that could be specified via several different names and formats. """ version_names = ["__version__", "get_version", "version"] version_names.extend([name.upper() for name in version_names]) for name in version_names: try: version = getattr(module, name) except AttributeError: continue if callable(version): version = version() try: version = ".".join([unicode(i) for i in version.__iter__()]) except AttributeError: pass return version def get_setup_attribute(attribute, setup_path): """ Runs the project's setup.py script in a process with an arg that will print out the value for a particular attribute such as author or version, and returns the value. """ args = ["python", setup_path, "--%s" % attribute] return Popen(args, stdout=PIPE).communicate[0].strip() def setup_conf(conf_globals): """ Setup function that is called from within the project's docs/conf.py module that takes the conf module's globals() and assigns the values that can be automatically determined from the current project, such as project name, package name, version and author. """ project_path = abspath(join(dirname(conf_globals["__file__"]), "..")) chdir(project_path) sys.path.insert(0, project_path) authors_file = "AUTHORS" version = None author = None setup = "setup.py" setup_path = join(project_path, setup) ignore = (setup,) # First try and get the author and version from setup.py if exists(setup_path): try: import setuptools except ImportError: pass else: version = get_setup_attribute("version", setup_path) if version == "0.0.0": version = None author = get_setup_attribute("author", setup_path) if author == "UNKNOWN": author = None # Iterate through each of the files in the project's directory, # looking for an AUTHORS file for the project's author, or # importable packages/modules for the version. for name in listdir(project_path): path = join(project_path, name) if name.upper() == authors_file: with open(path, "r") as f: for line in f.readlines(): line = line.strip("*- \n\r\t") if line: author = line break elif name not in ignore and (isdir(path) or splitext(name)[1] == ".py"): try: module = __import__(name) except (ImportError, ValueError): continue if not version: version = get_version(module) if version and not author: try: author = getattr(module, "__author__") except AttributeError: pass # Ask for any values that couldn't be found. if not version: version = raw_input("No version number found, please enter one: ") if not author: author = raw_input("No author found, please enter one: ") with open(join(project_path, authors_file), "w") as f: f.write(author) # Inject the minimum required names into the conf module. settings = { "version": version, "release": version, "project": project_path.rstrip(sep).split(sep)[-1], "master_doc": "index", "copyright": u"%s, %s" % (datetime.now().year, author), } pad = max([len(k) for k in settings.keys()]) + 3 print print "sphinx-me using the following values:" print print "\n".join([(k + ":").ljust(pad) + v for k, v in settings.items()]) print conf_globals.update(settings) if __name__ == "__main__": install()
Python
0
@@ -268,16 +268,18 @@ _ = %220.1 +.1 %22%0A%0A%0Adef
2571031f95987ff1aa1ccb79acf2258cf6022804
Read class added
object_detector/file_io.py
object_detector/file_io.py
#-*- coding: utf-8 -*- import glob import os import commentjson as json # Todo : doctest have to be added def read_json(filename): """load json file as dict object Parameters ---------- filename : str filename of json file Returns ---------- conf : dict dictionary containing contents of json file Examples -------- """ conf = json.loads(open(filename).read()) return conf # Todo : doctest have to be added def list_files(directory, pattern="*.*", recursive_option=True): """list files in a directory matched in defined pattern. Parameters ---------- directory : str filename of json file pattern : str regular expression for file matching recursive_option : boolean option for searching subdirectories. If this option is True, function searches all subdirectories recursively. Returns ---------- conf : dict dictionary containing contents of json file Examples -------- """ if recursive_option == True: dirs = [path for path, _, _ in os.walk(directory)] else: dirs = [directory] files = [] for dir_ in dirs: for p in glob.glob(os.path.join(dir_, pattern)): files.append(p) return files if __name__ == "__main__": import doctest doctest.testmod()
Python
0
@@ -19,16 +19,28 @@ -*-%0D%0A%0D%0A +import abc%0D%0A import g @@ -87,53 +87,252 @@ on%0D%0A -%0D%0A# Todo : doctest have to be added%0D%0A +from scipy import io%0D%0A%0D%0Aclass ReadFile(object):%0D%0A __metaclass__ = abc.ABCMeta%0D%0A %0D%0A def __init__(self):%0D%0A pass%0D%0A %0D%0A @abc.abstractmethod%0D%0A def read(self, filename):%0D%0A pass%0D%0A%0D%0Aclass ReadJson(ReadFile):%0D%0A def read _jso @@ -331,14 +331,15 @@ read -_json( +(self, file @@ -342,24 +342,28 @@ filename):%0D%0A + %22%22%22load @@ -386,24 +386,28 @@ t object%0D%0A%0D%0A + Paramete @@ -406,32 +406,36 @@ Parameters%0D%0A + + ----------%0D%0A @@ -426,24 +426,28 @@ ----------%0D%0A + filename @@ -454,32 +454,36 @@ : str%0D%0A + filename of json @@ -485,26 +485,34 @@ json file%0D%0A -%0D%0A + %0D%0A Returns%0D @@ -504,32 +504,36 @@ Returns%0D%0A + ----------%0D%0A @@ -524,32 +524,36 @@ ----------%0D%0A + conf : dict%0D @@ -545,32 +545,36 @@ conf : dict%0D%0A + dictiona @@ -602,34 +602,42 @@ s of json file%0D%0A -%0D%0A + %0D%0A Examples%0D%0A @@ -630,32 +630,36 @@ Examples%0D%0A + + --------%0D%0A %22%22 @@ -660,25 +660,31 @@ + + %22%22%22%0D%0A -%0D%0A -conf = + return jso @@ -719,24 +719,423 @@ ))%0D%0A - return conf +%0D%0Aclass ReadMat(ReadFile):%0D%0A def read(self, filename):%0D%0A %22%22%22load json file as dict object%0D%0A%0D%0A Parameters%0D%0A ----------%0D%0A filename : str%0D%0A filename of json file%0D%0A %0D%0A Returns%0D%0A ----------%0D%0A conf : dict%0D%0A dictionary containing contents of json file%0D%0A %0D%0A Examples%0D%0A --------%0D%0A %22%22%22%0D%0A return io.loadmat(filename)%0D%0A %0D%0A%0D%0A
1667b7450a79f5014770dfb04a0c7544e45d8519
Create indices on the timeseries database
emission/core/get_database.py
emission/core/get_database.py
from pymongo import MongoClient import os import json def get_mode_db(): current_db = MongoClient().Stage_database Modes=current_db.Stage_Modes return Modes def get_moves_db(): current_db = MongoClient('localhost').Stage_database MovesAuth=current_db.Stage_user_moves_access return MovesAuth def get_section_db(): current_db=MongoClient('localhost').Stage_database Sections=current_db.Stage_Sections return Sections def get_trip_db(): current_db=MongoClient().Stage_database Trips=current_db.Stage_Trips return Trips def get_profile_db(): current_db=MongoClient().Stage_database Profiles=current_db.Stage_Profiles return Profiles """ def get_routeDistanceMatrix_db(): current_db=MongoClient().Stage_database routeDistanceMatrix=current_db.Stage_routeDistanceMatrix return routeDistanceMatrix """ def get_routeDistanceMatrix_db(user_id, method): if not os.path.exists('routeDistanceMatrices'): os.makedirs('routeDistanceMatrices') routeDistanceMatrix = {} if not os.path.exists('routeDistanceMatrices/' + user_id + '_' + method + '_routeDistanceMatrix.json'): data = {} f = open('routeDistanceMatrices/' + user_id + '_' + method + '_routeDistanceMatrix.json', 'w+') f.write(json.dumps({})) f.close() else: f = open('routeDistanceMatrices/' + user_id + '_' + method + '_routeDistanceMatrix.json', 'r') routeDistanceMatrix = json.loads(f.read()) return routeDistanceMatrix def update_routeDistanceMatrix_db(user_id, method, updatedMatrix): f = open('routeDistanceMatrices/' + user_id + '_' + method + '_routeDistanceMatrix.json', 'w+') f.write(json.dumps(updatedMatrix)) f.close() def get_client_db(): current_db=MongoClient().Stage_database Clients = current_db.Stage_clients return Clients def get_routeCluster_db(): current_db=MongoClient().Stage_database routeCluster=current_db.Stage_routeCluster return routeCluster def get_groundClusters_db(): current_db=MongoClient().Stage_database groundClusters=current_db.Stage_groundClusters return groundClusters def get_pending_signup_db(): current_db=MongoClient().Stage_database Pending_signups = current_db.Stage_pending_signups return Pending_signups def get_worktime_db(): current_db=MongoClient().Stage_database Worktimes=current_db.Stage_Worktime return Worktimes def get_uuid_db(): current_db=MongoClient().Stage_database UUIDs = current_db.Stage_uuids return UUIDs def get_client_stats_db(): current_db=MongoClient().Stage_database ClientStats = current_db.Stage_client_stats return ClientStats def get_server_stats_db(): current_db=MongoClient().Stage_database ServerStats = current_db.Stage_server_stats return ServerStats def get_result_stats_db(): current_db=MongoClient().Stage_database ResultStats = current_db.Stage_result_stats return ResultStats def get_db(): current_db=MongoClient('localhost').Stage_database return current_db def get_test_db(): current_db=MongoClient().Test2 Trips=current_db.Test_Trips return Trips def get_transit_db(): current_db = MongoClient().Stage_database Transits=current_db.Stage_Transits return Transits def get_utility_model_db(): current_db = MongoClient().Stage_database Utility_Models = current_db.Stage_utility_models return Utility_Models def get_alternatives_db(): current_db = MongoClient().Stage_database Alternative_trips=current_db.Stage_alternative_trips return Alternative_trips def get_perturbed_trips_db(): current_db = MongoClient().Stage_database Perturbed_trips=current_db.Stage_alternative_trips return Perturbed_trips def get_usercache_db(): current_db = MongoClient().Stage_database UserCache = current_db.Stage_usercache return UserCache def get_timeseries_db(): current_db = MongoClient().Stage_database TimeSeries = current_db.Stage_timeseries return TimeSeries def get_timeseries_error_db(): current_db = MongoClient().Stage_database TimeSeriesError = current_db.Stage_timeseries_error return TimeSeriesError def get_pipeline_state_db(): current_db = MongoClient().Stage_database PipelineState = current_db.Stage_pipeline_state return PipelineState def get_place_db(): current_db = MongoClient().Stage_database Places = current_db.Stage_place return Places def get_trip_new_db(): current_db = MongoClient().Stage_database Trips = current_db.Stage_trip_new return Trips def get_stop_db(): current_db = MongoClient().Stage_database Stops = current_db.Stage_stop return Stops def get_section_new_db(): current_db = MongoClient().Stage_database Sections = current_db.Stage_section_new return Sections def get_fake_trips_db(): current_db = MongoClient().Stage_database FakeTrips = current_db.Stage_fake_trips return FakeTrips def get_fake_sections_db(): current_db = MongoClient().Stage_database FakeSections = current_db.Stage_fake_sections return FakeSections
Python
0
@@ -26,16 +26,32 @@ Client%0D%0A +import pymongo%0D%0A import o @@ -4196,24 +4196,706 @@ timeseries%0D%0A + TimeSeries.create_index(%5B(%22user_id%22, pymongo.HASHED)%5D)%0D%0A TimeSeries.create_index(%5B(%22metadata.key%22, pymongo.HASHED)%5D)%0D%0A TimeSeries.create_index(%5B(%22metadata.write_ts%22, pymongo.DESCENDING)%5D)%0D%0A TimeSeries.create_index(%5B(%22data.ts%22, pymongo.DESCENDING)%5D, sparse=True)%0D%0A TimeSeries.create_index(%5B(%22data.start_ts%22, pymongo.DESCENDING)%5D, sparse=True)%0D%0A TimeSeries.create_index(%5B(%22data.end_ts%22, pymongo.DESCENDING)%5D, sparse=True)%0D%0A TimeSeries.create_index(%5B(%22data.enter_ts%22, pymongo.DESCENDING)%5D, sparse=True)%0D%0A TimeSeries.create_index(%5B(%22data.exit_ts%22, pymongo.DESCENDING)%5D, sparse=True)%0D%0A TimeSeries.create_index(%5B(%22data.loc%22, pymongo.GEOSPHERE)%5D, sparse=True)%0D%0A return T
a906f16490c916db6d1db182f0b477fa8c3bb03c
remove commented code
onadata/libs/data/query.py
onadata/libs/data/query.py
from django.conf import settings from django.db import connection from django.utils.translation import ugettext as _ from onadata.libs.utils.common_tags import SUBMISSION_TIME def _count_group(field, name, xform): if using_postgres: result = _postgres_count_group(field, name, xform) else: raise Exception("Unsupported Database") return result def _dictfetchall(cursor): "Returns all rows from a cursor as a dict" desc = cursor.description return [ dict(zip([col[0] for col in desc], row)) for row in cursor.fetchall() ] def _execute_query(query, to_dict=True): cursor = connection.cursor() cursor.execute(query) return _dictfetchall(cursor) if to_dict else cursor def _get_fields_of_type(xform, types): k = [] dd = xform.data_dictionary() survey_elements = flatten( [dd.get_survey_elements_of_type(t) for t in types]) for element in survey_elements: name = element.get_abbreviated_xpath() k.append(name) return k def _json_query(field): return "json->>'%s'" % field def _postgres_count_group(field, name, xform): string_args = _query_args(field, name, xform) if is_date_field(xform, field): string_args['json'] = "to_char(to_date(%(json)s, 'YYYY-MM-DD'), 'YYYY"\ "-MM-DD')" % string_args return "SELECT %(json)s AS %(name)s, COUNT(*) AS count FROM "\ "%(table)s WHERE %(restrict_field)s=%(restrict_value)s "\ "GROUP BY %(json)s" % string_args def _postgres_select_key(field, name, xform): string_args = _query_args(field, name, xform) return "SELECT %(json)s AS %(name)s FROM %(table)s WHERE "\ "%(restrict_field)s=%(restrict_value)s" % string_args def _query_args(field, name, xform): return { 'table': 'odk_logger_instance', 'json': _json_query(field), 'name': name, 'restrict_field': 'xform_id', 'restrict_value': xform.pk} def _select_key(field, name, xform): if using_postgres: result = _postgres_select_key(field, name, xform) else: raise Exception("Unsupported Database") return result def flatten(l): return [item for sublist in l for item in sublist] def get_date_fields(xform): """List of date field names for specified xform""" return [SUBMISSION_TIME] + _get_fields_of_type(xform, ['date']) def get_field_records(field, xform): result = _execute_query(_select_key(field, field, xform), to_dict=False) return [float(i[0]) for i in result] def get_form_submissions_grouped_by_field(xform, field, name=None): """Number of submissions grouped by field""" if not name: name = field result = _execute_query(_count_group(field, name, xform)) # if we have a single None result, the field doesnt exist if len(result) == 1 and result[0][name] is None: raise ValueError(_(u"Field '%s' does not exist." % field)) #elif len(result) > 0 and result[0][name] is None: # # strip out the first result if it has a count of 0 and value of None # result = result[1:] return result def get_numeric_fields(xform): """List of numeric field names for specified xform""" return _get_fields_of_type(xform, ['decimal', 'integer']) def is_date_field(xform, field): return field in get_date_fields(xform) @property def using_postgres(): return settings.DATABASES[ 'default']['ENGINE'] == 'django.db.backends.postgresql_psycopg2'
Python
0
@@ -3014,171 +3014,8 @@ ld)) -%0A #elif len(result) %3E 0 and result%5B0%5D%5Bname%5D is None:%0A # # strip out the first result if it has a count of 0 and value of None%0A # result = result%5B1:%5D %0A%0A
912ac42445c6b040346bd255f5628ce1713cf03b
Add board view
openacademy/__openerp__.py
openacademy/__openerp__.py
# -*- coding: utf-8 -*- { 'name': "Open Academy", 'summary': """Manage trainings""", 'author': "Vauxoo", 'website': "http://www.vauxoo.com", # Categories can be used to filter modules in modules listing # Check https://github.com/odoo/odoo/blob/master/openerp/addons/base/module/module_data.xml # for the full list 'category': 'Test', 'version': '0.1', # any module necessary for this one to work correctly 'depends': ['base'], # always loaded 'data': [ 'view/openacademy_course_view.xml', 'view/openacademy_session_view.xml', 'view/partner_view.xml', 'workflow/openacademy_session_workflow.xml', 'security/security.xml', 'security/ir.model.access.csv', 'report/openacademy_session_report.xml', ], # only loaded in demonstration mode 'demo': [ 'demo/openacademy_course_demo.xml', ], 'installable':True, 'auto_install':False, }
Python
0
@@ -466,16 +466,24 @@ %5B'base' +,'board' %5D,%0A%0A @@ -805,24 +805,69 @@ eport.xml',%0A + 'view/openacademy_session_board.xml'%0A %5D,%0A #
d14c446c7a4a2729045e59a07892a4391796dfae
Exclude metadata and events when requesting execution info
valohai_cli/commands/execution/info.py
valohai_cli/commands/execution/info.py
import click from valohai_cli.ctx import get_project from valohai_cli.messages import print_table from valohai_cli.utils import humanize_identifier ignored_keys = { 'commit', 'counter', 'ctime', 'events', 'id', 'inputs', 'metadata', 'outputs', 'parameters', 'project', 'url', 'urls', 'environment', } @click.command() @click.argument('counter') def info(counter): """ Show execution info. """ execution = get_project(require=True).get_execution_from_counter(counter=counter) data = dict((humanize_identifier(key), str(value)) for (key, value) in execution.items() if key not in ignored_keys) data['project name'] = execution['project']['name'] data['environment name'] = execution['environment']['name'] print_table(data) print() print_table( {input['name']: '; '.join(input['urls']) for input in execution.get('inputs', ())}, headers=('input', 'URLs'), ) print() print_table( execution.get('parameters', {}), headers=('parameter', 'value'), ) print()
Python
0
@@ -202,16 +202,35 @@ ctime',%0A + 'environment',%0A 'eve @@ -231,24 +231,24 @@ 'events',%0A - 'id',%0A @@ -319,24 +319,36 @@ 'project',%0A + 'tags',%0A 'url',%0A @@ -358,35 +358,16 @@ 'urls',%0A - 'environment',%0A %7D%0A%0A%0A@cli @@ -535,16 +535,25 @@ counter( +%0A counter= @@ -559,16 +559,92 @@ =counter +,%0A params=%7B%0A 'exclude': 'metadata,events',%0A %7D,%0A )%0A da
a6d6628552f94fd2ecd7413345fb8e4cc7bbf888
Declare static url in example settings
valuenetwork/local_settings_example.py
valuenetwork/local_settings_example.py
""" You want a local_settings.py file in the same directory as settings.py. settings.py will import it, if it exists and local_settings will override settings for the setting with the same name. You also want your localsettings.py to be different on a development machine and a server, in ways that will be mentioned below. Note: don't use this local_settings_example.py. It is internally inconsistent to show some choices. Create your own local_settings.py file to fit your own needs. """ #for a development machine DEBUG = True #for a server DEBUG = False TEMPLATE_DEBUG = DEBUG #this is nice for development DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'valuenetwork.sqlite' } } #for a server, you want a real database DATABASES = { 'default': { 'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', or 'oracle'. 'NAME': '', 'USER': '', 'PASSWORD': '', 'HOST': '', 'PORT': '', # Set to empty string for default. } } # valueaccounting settings can be overridden USE_WORK_NOW = False SUBSTITUTABLE_DEFAULT = False #example: Greece MAP_LATITUDE = 38.2749497 MAP_LONGITUDE = 23.8102717 MAP_ZOOM = 6 #and you can override any other settings in settings.py
Python
0.00001
@@ -1211,16 +1211,41 @@ %7D%0A%7D%0A%0A +STATIC_URL = %22/static/%22%0A%0A # valuea
0986de86f00e932fc9f58ae4ab253a5e12e55454
Change default verbose
vd.py
vd.py
#!/usr/bin/env python # Copyright (C) 2012 Nathan Charles # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. def usage(): """Prints usage options when called with no arguments or with invalid arguments """ print """usage: [options] -a current -l length -p phase [default 1] -v voltage [default 240] -s size -h help """ import ee def vd(a,l,size= None,v = 240, pf=-1, tAmb=30, percent=1, material='CU', \ c='STEEL',verbose = False): oc = a * 1.25 ocp = ee.ocpSize(oc) #print "OCP Size: %s" % ocp #egc = ee.findEGC(ocp,material) vdrop = v * percent/100.0 #ratio = ee.CMIL[ee.conductorAmpacity(a,material).size]*1.0/ee.CMIL[ee.findEGC(ocp)] if size: conductor = ee.conductor(size,material) conductor = ee.checkAmpacity(conductor, ocp, tAmb) vdrop = conductor.vd(a,l, v = v, pf=pf, tAmb=tAmb,c=c) vdp=(vdrop * 100/v) if verbose: print "Percent drop: %s%%" % round(vdp,2) #print "EGC Size: %s" % incEGC(conductor,egc,ratio) return conductor else: if verbose: print "Allowed Voltage drop: %sV" % vdrop sets = 0 conductor = None while conductor is None: sets += 1 for s in ee.CONDUCTOR_STANDARD_SIZES: #print s, material conductor = ee.conductor(s,material) #print conductor if conductor.vd(a*1.0/sets,l, v = v, pf=pf, tAmb=tAmb,c=c) < vdrop: break else: conductor = None if sets > 1: print "%s sets of %s" % (sets, conductor) #print "EGC Size: %s" % incEGC(conductor,egc,ratio) return [conductor for i in range(sets)] else: if verbose: print "Conductor %s" % conductor conductor = ee.checkAmpacity(conductor, ocp/sets, tAmb) #print "EGC Size: %s %s" % ( incEGC(conductor,egc,ratio),'CU'#conductor.material) if verbose: print "Drop: %s V" % round(conductor.vd(a*1.0/sets,l, v = v, pf=pf, tAmb=tAmb,c=c),2) return conductor if __name__ == "__main__": import argparse import sys parser = argparse.ArgumentParser(description='Voltage Drop/Rise Calculator') parser.add_argument('-a', '--aluminum',action='store_true') parser.add_argument('-c', '--current',required=True,help="accepts basic math") #evaluated to allow command line math parser.add_argument('-d', '--drop',type=float,default=1,help="Voltage Drop/Rise in percent") parser.add_argument('-v', '--voltage',type=float,default=240) parser.add_argument('-f', '--powerfactor',default="-1") parser.add_argument('-l', '--length',required=True,help="accepts basic math")#evaluated to allow command line math parser.add_argument('-p', '--conduit',type=str,default='PVC') parser.add_argument('-s', '--size',type=str,help="wire size") parser.add_argument('-t', '--temp',type=float,help="ambient",default=30) #parser.print_help() args = vars(parser.parse_args()) material = 'CU' if args['aluminum']: material = 'AL' #solve(args['current'],args['length'],args['voltage'],args['drop']) #print args try: #start program #vd(args) #vd(current,length,size= None,v = 240, pf="-1",temp = 75,percent = 1,material = 'CU', c = 'PVC') DC = 'DC' dc = 'DC' vd(eval(args['current']),eval(args['length']),args['size'], args['voltage'],eval(args['powerfactor']),args['temp'], args['drop'],material=material,c = args['conduit']) except (KeyboardInterrupt, SystemExit): sys.exit(1) except: raise
Python
0.000001
@@ -1069,12 +1069,11 @@ e = -Fals +Tru e):%0A
e10b0e89ae8e4a74ee6159214668a5e8761ff767
Add timestamps.
loadkit/core/manifest.py
loadkit/core/manifest.py
import json import collections from loadkit.util import json_default, json_hook class Manifest(dict): """ A manifest has metadata on a package. """ def __init__(self, key): self.key = key self.reload() def reload(self): if self.key.exists(): self.update(json.load(self.key, object_hook=json_hook)) else: self.update({'resources': {}}) def save(self): content = json.dumps(self, default=json_default, indent=2) self.key.set_contents_from_string(content) def __repr__(self): return '<Manifest(%r)>' % self.key class ResourceMetaData(collections.MutableMapping): """ Metadata for a resource is derived from the main manifest. """ def __init__(self, resource): self.resource = resource self.manifest = resource.package.manifest if not isinstance(self.manifest.get('resources'), dict): self.manifest['resources'] = {} existing = self.manifest['resources'].get(self.resource.path) if not isinstance(existing, dict): self.manifest['resources'][self.resource.path] = {} def __getitem__(self, key): return self.manifest['resources'][self.resource.path][key] def __setitem__(self, key, value): self.manifest['resources'][self.resource.path][key] = value def __delitem__(self, key): del self.manifest['resources'][self.resource.path][key] def __iter__(self): return iter(self.manifest['resources'][self.resource.path]) def __len__(self): return len(self.manifest['resources'][self.resource.path]) def __keytransform__(self, key): return key def save(self): self.resource.package.save() def __repr__(self): return '<ResourceMetaData(%r)>' % self.resource.path
Python
0.000064
@@ -24,16 +24,46 @@ ections%0A +from datetime import datetime%0A %0A%0Afrom l @@ -389,16 +389,67 @@ else:%0A + self%5B'created_at'%5D = datetime.utcnow()%0A @@ -496,32 +496,79 @@ def save(self):%0A + self%5B'updated_at'%5D = datetime.utcnow()%0A content @@ -1264,17 +1264,203 @@ ath%5D = %7B -%7D +%0A 'created_at': datetime.utcnow()%0A %7D%0A%0A def touch(self):%0A self.manifest%5B'resources'%5D%5Bself.resource.path%5D%5B'updated_at'%5D = %5C%0A datetime.utcnow() %0A%0A de @@ -1659,16 +1659,37 @@ = value +%0A self.touch() %0A%0A de @@ -1769,32 +1769,53 @@ ource.path%5D%5Bkey%5D +%0A self.touch() %0A%0A def __iter @@ -2041,16 +2041,16 @@ rn key%0A%0A - def @@ -2053,32 +2053,53 @@ def save(self):%0A + self.touch()%0A self.res
bdc77257df8e050db968537006ddc36f1853c60a
Fix CMSSW dataset for ntuplizing.
lobster/cmssw/dataset.py
lobster/cmssw/dataset.py
import logging import math import os import pickle import re import requests from retrying import retry import xdg.BaseDirectory from lobster.core.dataset import DatasetInfo from lobster.util import Configurable from dbs.apis.dbsClient import DbsApi from WMCore.Credential.Proxy import Proxy from WMCore.DataStructs.LumiList import LumiList logger = logging.getLogger('lobster.cmssw.dataset') class DASWrapper(DbsApi): @retry(stop_max_attempt_number=10) def listFileLumis(self, *args, **kwargs): return super(DASWrapper, self).listFileLumis(*args, **kwargs) @retry(stop_max_attempt_number=10) def listFileSummaries(self, *args, **kwargs): return super(DASWrapper, self).listFileSummaries(*args, **kwargs) @retry(stop_max_attempt_number=10) def listFiles(self, *args, **kwargs): return super(DASWrapper, self).listFiles(*args, **kwargs) @retry(stop_max_attempt_number=10) def listBlocks(self, *args, **kwargs): return super(DASWrapper, self).listBlocks(*args, **kwargs) class Cache(object): def __init__(self): self.cachedir = xdg.BaseDirectory.save_cache_path('lobster') def cache(self, name, baseinfo, dataset): logger.debug("writing dataset '{}' to cache".format(name)) cache = os.path.join(self.cachedir, name.replace('/', ':')) + '.pkl' with open(cache, 'wb') as fd: pickle.dump((baseinfo, dataset), fd) def cached(self, name, baseinfo): cache = os.path.join(self.cachedir, name.replace('/', ':')) + '.pkl' try: with open(cache, 'rb') as fd: info, dset = pickle.load(fd) if baseinfo == info: logger.debug("retrieved dataset '{}' from cache".format(name)) return dset return None except Exception: return None class Dataset(Configurable): """ Specification for processing a dataset stored in DBS. Parameters ---------- dataset : str The full dataset name as in DBS. lumis_per_task : int How many luminosity sections to process in one task. May be modified by Lobster to match the user-specified task runtime. events_per_task : int Adjust `lumis_per_task` to contain as many luminosity sections to process the specified amount of events. lumi_mask : str The URL or filename of a JSON luminosity section mask, as customary in CMS. file_based : bool Process whole files instead of single luminosity sections. dbs_instance : str Which DBS instance to query for the `dataset`. """ _mutable = {} __apis = {} __dsets = {} __cache = Cache() def __init__(self, dataset, lumis_per_task=25, events_per_task=None, lumi_mask=None, file_based=False, dbs_instance='global'): self.dataset = dataset self.lumi_mask = lumi_mask self.lumis_per_task = lumis_per_task self.events_per_task = events_per_task self.file_based = file_based self.dbs_url = 'https://cmsweb.cern.ch/dbs/prod/{0}/DBSReader'.format(dbs_instance) self.total_units = 0 def __get_mask(self, url): if not re.match(r'https?://', url): return url fn = os.path.basename(url) cached = os.path.join(Dataset.__cache.cachedir, fn) if not os.path.isfile(cached): r = requests.get(url) if not r.ok: raise IOError("unable to retrieve '{0}'".format(url)) with open(cached, 'w') as f: f.write(r.text) return cached def validate(self): if self.dataset in Dataset.__dsets: return True if self.lumi_mask: self.lumi_mask = self.__get_mask(self.lumi_mask) cred = Proxy({'logger': logging.getLogger("WMCore")}) dbs = DASWrapper(self.dbs_url, ca_info=cred.getProxyFilename()) baseinfo = dbs.listFileSummaries(dataset=self.dataset) if baseinfo is None or (len(baseinfo) == 1 and baseinfo[0] is None): return False return True def get_info(self): if self.dataset not in Dataset.__dsets: if self.lumi_mask: self.lumi_mask = self.__get_mask(self.lumi_mask) res = self.query_database( self.dataset, self.lumi_mask, self.file_based) if self.events_per_task: res.tasksize = int( math.ceil(self.events_per_task / float(res.total_events) * res.total_units)) else: res.tasksize = self.lumis_per_task Dataset.__dsets[self.dataset] = res self.total_units = Dataset.__dsets[self.dataset].total_units return Dataset.__dsets[self.dataset] def query_database(self, dataset, mask, file_based): cred = Proxy({'logger': logging.getLogger("WMCore")}) dbs = DASWrapper(self.dbs_url, ca_info=cred.getProxyFilename()) baseinfo = dbs.listFileSummaries(dataset=dataset) if baseinfo is None or (len(baseinfo) == 1 and baseinfo[0] is None): raise ValueError('unable to retrive information for dataset {}'.format(dataset)) result = self.__cache.cached(dataset, baseinfo) if result: return result result = DatasetInfo() result.total_events = sum([info['num_event'] for info in baseinfo]) for info in dbs.listFiles(dataset=dataset, detail=True): fn = info['logical_file_name'] result.files[fn].events = info['event_count'] result.files[fn].size = info['file_size'] if file_based: for info in dbs.listFiles(dataset=dataset): fn = info['logical_file_name'] result.files[fn].lumis = [(-2, -2)] else: blocks = dbs.listBlocks(dataset=dataset) if mask: unmasked_lumis = LumiList(filename=mask) for block in blocks: runs = dbs.listFileLumis(block_name=block['block_name']) for run in runs: fn = run['logical_file_name'] for lumi in run['lumi_section_num']: if not mask or ((run['run_num'], lumi) in unmasked_lumis): result.files[fn].lumis.append((run['run_num'], lumi)) elif mask and ((run['run_num'], lumi) not in unmasked_lumis): result.masked_units += 1 result.unmasked_units = sum([len(f.lumis) for f in result.files.values()]) result.total_units = result.unmasked_units + result.masked_units self.__cache.cache(dataset, baseinfo, result) return result
Python
0
@@ -3141,19 +3141,24 @@ elf.dbs_ -url +instance = 'http @@ -3980,35 +3980,40 @@ rapper(self.dbs_ -url +instance , ca_info=cred.g @@ -5046,19 +5046,24 @@ elf.dbs_ -url +instance , ca_inf
ef9e6743f5639b982af8b16cee01c1bb22c6dd59
Fix bugzilla + tw2.
fedoracommunity/widgets/package/bugs.py
fedoracommunity/widgets/package/bugs.py
import tw2.core as twc import datetime from fedoracommunity.widgets.grid import Grid from fedoracommunity.connectors.api import get_connector class BugStatsWidget(twc.Widget): template = "mako:fedoracommunity.widgets.package.templates.bugs_stats_widget" id = twc.Param(default='bugs_widget') kwds = twc.Param(default=None) product = twc.Param(default='Fedora') version = twc.Param(default='rawhide') epel_version = twc.Param(default='el6') package = twc.Param(default=None) num_open = twc.Param(default='-') num_new_this_week = twc.Param(default='') num_closed_this_week = twc.Param(default='') bz_prefix = "https://bugzilla.redhat.com/buglist.cgi" status_open_string = "bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED" status_closed_string = "bug_status=CLOSED" base_query_string = twc.Variable(default='') open_query_string = twc.Variable(default='') closed_query_string = twc.Variable(default='') def prepare(self): super(BugStatsWidget, self).prepare() def to_query_string(query): return "&".join([ "{key}={value}".format(key=key, value=value) for key, value in query.items() ]) self.base_query_string = to_query_string({ "query_format": "advanced", "product": self.product, "component": self.package, }) self.open_query_string = to_query_string({ "chfieldto": "Now", "chfield": "[Bug creation]", "chfieldfrom": datetime.datetime.now().isoformat().split('T')[0], }) self.closed_query_string = to_query_string({ "chfieldto": "Now", "chfield": "bug_status", "chfieldvalue": "CLOSED", "chfieldfrom": datetime.datetime.now().isoformat().split('T')[0], }) class BugsGrid(Grid): resource = 'bugzilla' resource_path = 'query_bugs' release_table = twc.Param() package = twc.Param() template = "mako:fedoracommunity.widgets.package.templates.bugs_table_widget" def prepare(self): # Signify that we haven't loaded any bugs yet. self.total_rows = -1 releases = [] self.filters = {'package': self.package} pkgdb = get_connector('pkgdb') collections = pkgdb.get_collection_table(active_only=True) for id, collection in collections.items(): name = collection['name'] ver = collection['version'] label = "%s %s" % (name, ver) value = str(ver) if ver == 'devel': name = 'Rawhide' ver = 9999999 label = 'Rawhide' value = 'rawhide' if name in ('Fedora', 'Rawhide', 'Fedora EPEL'): releases.append({'label': label, 'value': value, 'version': ver}) def _sort(a,b): return cmp(int(b['version']), int(a['version'])) releases.sort(_sort) self.release_table = releases super(BugsGrid, self).prepare() class BugsWidget(twc.Widget): bug_stats = BugStatsWidget bug_grid = BugsGrid kwds = twc.Param() package = twc.Param() template = "mako:fedoracommunity.widgets.package.templates.bugs" def prepare(self): super(BugsWidget, self).prepare() self.package = self.kwds['package_name'] self.main_package = self.kwds.get('subpackage_of', '') if not self.main_package: self.main_package = self.package # This is here so you can hit packages/kernel/bugs/all if self.args == ['all']: self.children[1].rows_per_page = 100000
Python
0
@@ -464,46 +464,8 @@ 6')%0A - package = twc.Param(default=None)%0A @@ -1950,34 +1950,8 @@ m()%0A - package = twc.Param()%0A @@ -3131,34 +3131,8 @@ m()%0A - package = twc.Param()%0A
618a27676476b90c650b1ed875b489775fa82068
Accept time strings for new keyword
src/SnmpLibrary/traps.py
src/SnmpLibrary/traps.py
# Copyright 2015 Kontron Europe GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import warnings import functools from robot import utils with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) from pysnmp.carrier.asynsock.dispatch import AsynsockDispatcher from pysnmp.carrier.asynsock.dgram import udp from pysnmp.proto.api import decodeMessageVersion, v2c, protoVersion2c from pyasn1.codec.ber import decoder class StopListener(Exception): pass def _generic_trap_filter(domain, sock, pdu, **kwargs): snmpTrapOID = (1, 3, 6, 1, 6, 3, 1, 1, 4, 1, 0) if 'host' in kwargs and kwargs['oid']: if sock[0] != kwargs['host']: return False for oid, val in v2c.apiPDU.getVarBindList(pdu): if 'oid' in kwargs and kwargs['oid']: if oid == snmpTrapOID: if val[0][0][2] != v2c.ObjectIdentifier(kwargs['oid']): return False return True def _trap_receiver(trap_filter, host, port, timeout): started = time.time() def _trap_timer_cb(now): if now - started > timeout: raise AssertionError('No matching trap received in %s.' % utils.secs_to_timestr(timeout)) def _trap_receiver_cb(transport, domain, sock, msg): if decodeMessageVersion(msg) != protoVersion2c: raise RuntimeError('Only SNMP v2c traps are supported.') req, msg = decoder.decode(msg, asn1Spec=v2c.Message()) pdu = v2c.apiMessage.getPDU(req) # ignore any non trap PDUs if not pdu.isSameTypeWith(v2c.TrapPDU()): return if trap_filter(domain, sock, pdu): raise StopListener() # Stop the receiver if the trap we are looking for was received. if False: raise StopListener() dispatcher = AsynsockDispatcher() dispatcher.registerRecvCbFun(_trap_receiver_cb) dispatcher.registerTimerCbFun(_trap_timer_cb) transport = udp.UdpSocketTransport().openServerMode((host, port)) dispatcher.registerTransport(udp.domainName, transport) # we'll never finish, except through an exception dispatcher.jobStarted(1) try: dispatcher.runDispatcher() except StopListener: pass finally: dispatcher.closeDispatcher() class _Traps: def __init__(self): self._trap_filters = dict() def new_trap_filter(self, name, host=None, oid=None): """Defines a new SNMP trap filter. At the moment, you can only filter on the sending host and on the trap OID. """ trap_filter = functools.partial(_generic_trap_filter, host=host, oid=self._parse_oid(oid)) self._trap_filters[name] = trap_filter def wait_until_trap_is_received(self, trap_filter_name, timeout=5.0, host='0.0.0.0', port=1620): """Wait until the first matching trap is received.""" if trap_filter_name not in self._trap_filters: raise RuntimeError('Trap filter "%s" not found.' % trap_filter_name) trap_filter = self._trap_filters[trap_filter_name] _trap_receiver(trap_filter, host, port, timeout)
Python
0.999913
@@ -3612,16 +3612,17 @@ r_name)%0A +%0A @@ -3671,16 +3671,65 @@ er_name%5D +%0A timeout = utils.timestr_to_secs(timeout) %0A%0A
493e9314e5c37a2198c583e50c8d313015b2e8a5
Update failing line in tests
vinaigrette/tests/test_makemessages.py
vinaigrette/tests/test_makemessages.py
import polib import os from django.core import management from django.test import TestCase from django.utils import translation class TestVinaigretteMakemessages(TestCase): def setUp(self): self.popath = 'locale/fr/LC_MESSAGES/django.po' self.mopath = 'locale/fr/LC_MESSAGES/django.mo' self.popathen = 'locale/en/LC_MESSAGES/django.po' self.mopathen = 'locale/en/LC_MESSAGES/django.mo' def tearDown(self): if os.path.exists(self.popath): os.remove(self.popath) if os.path.exists(self.mopath): os.remove(self.mopath) if os.path.exists(self.popathen): os.remove(self.popathen) if os.path.exists(self.mopathen): os.remove(self.mopathen) def test_happy_path(self): management.call_command('makemessages', locale=('fr',)) pofile = polib.pofile(self.popath) expected = { u'Vinaigrette': [(u'dressings.Dressing/name', u'1')], u'Ranch': [(u'dressings.Dressing/name', u'2')], u'Thousand Island': [(u'dressings.Dressing/name', u'3')], } actual = {poentry.msgid: poentry.occurrences for poentry in pofile} self.assertDictContainsSubset(expected, actual) def test_same_result_after_compilemessages(self): """ Ensures that makemessages is not affected by a current compiled message file. """ # makemessages without translations being active with translation.override(None): management.call_command('makemessages', locale=('en',)) if os.path.exists(self.mopathen): os.remove(self.mopathen) # Fill some translations pofileen = polib.pofile(self.popathen) for entry in pofileen: if entry.msgid == u'Vinaigrette': entry.msgstr = u'blub' pofileen.save(self.popathen) # compilemessages management.call_command('compilemessages', locale=('en',)) # Reload the catalog (clear catalog, then re-activate; rather hacky). translation.trans_real.gettext_module._translations.clear() translation.trans_real._translations.clear() translation._trans._translations.clear() translation._trans.catalog()._catalog.clear() translation.activate('en') # makemessages with translations being active management.call_command('makemessages', locale=('en',)) # Did it work? expected = { u'Vinaigrette': ('blub', [(u'dressings.Dressing/name', u'1')]), u'Ranch': ('', [(u'dressings.Dressing/name', u'2')]), u'Thousand Island': ('', [(u'dressings.Dressing/name', u'3')]), } pofileen2 = polib.pofile(self.popathen) actual = {} for poentry in pofileen2: if not poentry.obsolete: actual[poentry.msgid] = (poentry.msgstr, poentry.occurrences) for k, v in expected.items(): self.assertEqual(v, actual.get(k, None))
Python
0
@@ -2296,16 +2296,26 @@ catalog. +_catalogs. clear()%0A
78285953627e4e70e2abc28aa4897bade1d4babf
Update __init__.py
loggerloader/__init__.py
loggerloader/__init__.py
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals import os try: from loggerloader.loader import * except ImportError: from .loggerloader import * __version__ = '0.4.3' __author__ = 'Paul Inkenbrandt' __name__ = 'loggerloader' __all__ = ['new_trans_imp','well_baro_merge','fcl','wellimport','simp_imp_well','WaterElevation', 'table_to_pandas_dataframe','HeaderTable','PullOutsideBaro']
Python
0.000072
@@ -184,22 +184,16 @@ from . -logger loader i
83f99100bc0227ba5fc91efb7b1d797ce1735491
use ismlmux for smooth-streaming
flumotion/component/muxers/fmp4/fmp4.py
flumotion/component/muxers/fmp4/fmp4.py
# -*- Mode: Python; test-case-name: flumotion.muxers.mpegts.mpegts -*- # vi:si:et:sw=4:sts=4:ts=4 # # Flumotion - a streaming media server # Copyright (C) 2009,2010 Fluendo, S.L. (www.fluendo.com). # All rights reserved. # flumotion-fragmented-streaming - Flumotion Advanced fragmented streaming # Licensees having purchased or holding a valid Flumotion Advanced # Streaming Server license may use this file in accordance with the # Flumotion Advanced Streaming Server Commercial License Agreement. # See "LICENSE.Flumotion" in the source distribution for more information. # Headers in this file shall remain intact. import sys import gst import gobject from flumotion.component import feedcomponent from flumotion.component.component import moods from flumotion.common import gstreamer, messages, documentation from flumotion.common.i18n import N_, gettexter from flumotion.component.consumers.applestreamer import mpegtssegmenter T_ = gettexter() class FMP4(feedcomponent.MuxerComponent): checkTimestamp = True DEFAULT_FRAGMENT_DURATION=5000 def do_check(self): exists = gstreamer.element_factory_exists('mp4mux') if not exists: m = messages.Error(T_(N_( "%s is missing. Make sure your %s " "installation is complete."), 'mp4mux', 'mp4mux')) documentation.messageAddGStreamerInstall(m) self.debug(m) self.addMessage(m) return v = gstreamer.get_plugin_version('qtmux') # The mpegtsmuxer does not use the delta unit flag to mark keyframes # until gst-plugin-bad-0.10.18. Patched versions in the platform # will be numberer using minor=10 to check if the plugin has been # patched if v <= (0, 10, 19, 0) and v[3] != 11: m = messages.Warning( T_(N_("Versions up to and including %s of the '%s' " "GStreamer plug-in are not suitable for " "smooth streaming.\n"), '0.10.19', 'qtmux')) self.addMessage(m) def get_muxer_string(self, props): muxer = 'mp4mux name=muxer fragment-duration=%d ' \ 'movie-timescale=10000000 trak-timescale=10000000 streamable=1' % \ props.get('fragment-duration', self.DEFAULT_FRAGMENT_DURATION) return muxer def configure_pipeline(self, pipeline, properties): feedcomponent.MuxerComponent.configure_pipeline(self, pipeline, properties)
Python
0.000001
@@ -1131,19 +1131,20 @@ exists(' -mp4 +isml mux')%0A @@ -1345,21 +1345,23 @@ ' -mp4 +isml mux', ' -mp4 +isml mux' @@ -2183,11 +2183,12 @@ = ' -mp4 +isml mux
66d1bce2cb497954749b211a26fd00ae4db6f7e7
Remove random bit of code
foodsaving/conversations/serializers.py
foodsaving/conversations/serializers.py
from django.utils.translation import ugettext_lazy as _ from rest_framework import serializers from rest_framework.exceptions import PermissionDenied from foodsaving.conversations.models import Conversation, ConversationMessage class ConversationSerializer(serializers.ModelSerializer): class Meta: model = Conversation fields = [ 'id', 'participants', 'created_at' ] def retrieve(self, validated_data): user = self.context['request'].user return ConversationMessage.objects.create(author=user, **validated_data) class ConversationMessageSerializer(serializers.ModelSerializer): class Meta: model = ConversationMessage fields = [ 'id', 'author', 'content', 'conversation', 'created_at' ] class CreateConversationMessageSerializer(serializers.ModelSerializer): class Meta: model = ConversationMessage fields = [ 'id', 'author', 'content', 'conversation' ] extra_kwargs = { 'author': { 'read_only': True } } def validate_conversation(self, conversation): if self.context['request'].user not in conversation.participants.all(): raise PermissionDenied(_('You are not in this conversation')) return conversation def create(self, validated_data): user = self.context['request'].user return ConversationMessage.objects.create(author=user, **validated_data)
Python
0
@@ -433,174 +433,8 @@ %5D%0A%0A - def retrieve(self, validated_data):%0A user = self.context%5B'request'%5D.user%0A return ConversationMessage.objects.create(author=user, **validated_data)%0A%0A %0Acla
efa61b2948703eaee4ae86910f75276a1cc39ccc
update to MVP
MVP.py
MVP.py
import json
Python
0
@@ -6,8 +6,849 @@ rt json%0A +import requests%0Aimport time%0Aimport urrlib2%0A%0A%0A%0A%0A%0Adef getTemp(int turbine):%0A string url ='https://turbine-farm.run.aws-usw02-pr.ice.predix.io/api/turbines/' + turbine + '/sensors/temperature'%0A float temperature = urrlib2.urlopen(url).read()%0A return voltage%0A%0Adef getVoltage(int turbine):%0A string url ='https://turbine-farm.run.aws-usw02-pr.ice.predix.io/api/turbines/' + turbine + '/sensors/voltage'%0A float voltage = urrlib2.urlopen(url).read()%0A return voltage%0A%0Awhile resp.status_code = 200%0A #print voltage of each turbine%0A for i in %5B1, 2, 3%5D:%0A print('Voltage for turbine %7B0%7D is %7B1%7D.'.format(i, getVoltage(i)))%0A #print temperature of each turbine%0A for i in %5B1, 2, 3%5D:%0A print('Temperature for turbine %7B0%7D is %7B1%7D.'.format(i, getTemp(i)))%0A ## wait 2 seconds before printing again%0A time.sleep(2)%0A%0A%0A
4854ca20caa50204d847630c8bf95a0bba399bdb
Fix typo introduced in commit:86a6174
web/blueprints/dormitories/__init__.py
web/blueprints/dormitories/__init__.py
# -*- coding: utf-8 -*- # Copyright (c) 2014 The Pycroft Authors. See the AUTHORS file. # This file is part of the Pycroft project and licensed under the terms of # the Apache License, Version 2.0. See the LICENSE file for details. """ web.blueprints.dormitories ~~~~~~~~~~~~~~ This module defines view functions for /dormitories :copyright: (c) 2012 by AG DSN. """ from datetime import datetime from flask import Blueprint, flash, redirect, render_template, url_for from flask.ext.login import current_user from pycroft import lib from pycroft.helpers import dormitory from pycroft.lib.dormitory import create_dormitory, create_room, delete_room from pycroft.model.session import session from pycroft.model.dormitory import Room, Dormitory from web.blueprints.navigation import BlueprintNavigation from web.blueprints.dormitories.forms import RoomForm, DormitoryForm, \ RoomLogEntry from web.blueprints.access import BlueprintAccess bp = Blueprint('dormitories', __name__, ) access = BlueprintAccess(bp, ['dormitories_show']) nav = BlueprintNavigation(bp, "Wohnheime", blueprint_access=access) @bp.route('/') @nav.navigate(u"Wohnheime") # careful with permissions here, redirects! def overview(): dormitories_list = Dormitory.q.all() dormitories_list = dormitory.sort_dormitories(dormitories_list) return render_template('dormitories/overview.html', dormitories=dormitories_list) @bp.route('/show/<dormitory_id>') @access.require('dormitories_show') def dormitory_show(dormitory_id): dormitory = Dormitory.q.get(dormitory_id) rooms_list = dormitory.rooms return render_template('dormitories/dormitory_show.html', page_title=u"Wohnheim " + dormitory.short_name, rooms=rooms_list) @bp.route('/create', methods=['GET', 'POST']) @nav.navigate(u"Neues Wohnheim") @access.require('dormitories_change') def dormitory_create(): form = DormitoryForm() if form.validate_on_submit(): create_dormitory(short_name=form.short_name.data, street=form.street.data, number=form.number.data) flash(u'Wohnheim angelegt', 'success') return redirect(url_for('.dormitories')) return render_template('dormitories/dormitory_create.html', form=form) @bp.route('/room/delete/<room_id>') @access.require('dormitories_change') def room_delete(room_id): delete_room(room_id) flash(u'Raum gelöscht', 'success') return redirect(url_for('.overview')) @bp.route('/room/show/<room_id>', methods=['GET', 'POST']) @access.require('dormitories_show') def room_show(room_id): room = Room.q.get(room_id) form = RoomLogEntry() if form.validate_on_submit(): lib.logging.create_room_log_entry(message=form.message.data, timestamp=datetime.utcnow(), author=current_user, room=room) flash(u'Kommentar hinzugefügt', 'success') room_log_list = room.room_log_entries[::-1] return render_template('dormitories/room_show.html', page_title=u"Raum " + str(room.dormitory.short_name) + u" " + \ str(room.level) + u"-" + str(room.number), room=room, room_log=room_log_list, form=form) @bp.route('/room/create', methods=['GET', 'POST']) @nav.navigate(u"Neuer Raum") @access.require('dormitories_change') def room_create(): form = RoomForm() if form.validate_on_submit(): room = create_room( number=form.number.data, level=form.level.data, inhabitable=form.inhabitable.data, dormitory=form.dormitory_id.data) flash(u'Raum angelegt', 'success') return redirect(url_for('.room_show', room_id=room.id)) return render_template('dormitories/dormitory_create.html', form=form) # ToDo: Review this! @bp.route('/levels/<int:dormitory_id>') @access.require('dormitories_show') def dormitory_levels(dormitory_id): dormitory = Dormitory.q.get(dormitory_id) rooms_list = Room.q.filter_by( dormitory_id=dormitory_id).order_by(Room.level).distinct() levels_list = [room.level for room in rooms_list] levels_list = list(set(levels_list)) return render_template('dormitories/levels.html', levels=levels_list, dormitory_id=dormitory_id, dormitory=dormitory, page_title=u"Etagen Wohnheim {}".format(dormitory.short_name)) # ToDo: Review this! @bp.route('/levels/<int:dormitory_id>/rooms/<int:level>') @access.require('dormitories_show') def dormitory_level_rooms(dormitory_id, level): dormitory = Dormitory.q.get(dormitory_id) rooms_list = Room.q.filter_by( dormitory_id=dormitory_id, level=level).order_by(Room.number) level_l0 = "{:%02d}".format(level) #TODO depending on, whether a user is living in the room, the room is # a link to the user. If there is more then one user, the room is # duplicated return render_template('dormitories/rooms.html', rooms=rooms_list, level=level_l0, dormitory=dormitory, page_title=u"Zimmer der Etage {:d} des Wohnheims {}".format(level, dormitory.short_name))
Python
0.000005
@@ -4763,9 +4763,8 @@ %22%7B: -%25 02d%7D
23f68acc3856178da682b09c401ee81436b84ee9
Fix datasetname
src/auspex/filters/io.py
src/auspex/filters/io.py
# Copyright 2016 Raytheon BBN Technologies # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 __all__ = ['WriteToFile', 'DataBuffer'] import os import sys if sys.platform == 'win32' or 'NOFORKING' in os.environ: import threading as mp from threading import Thread as Process from threading import Event from queue import Queue else: import multiprocessing as mp from multiprocessing import Process, Event from multiprocessing import Queue from auspex.data_format import AuspexDataContainer import itertools import contextlib import queue import numpy as np import os.path import os, psutil import time import datetime from shutil import copyfile import cProfile from .filter import Filter from auspex.parameter import Parameter, FilenameParameter, BoolParameter from auspex.stream import InputConnector, OutputConnector from auspex.log import logger import auspex.config as config class WriteToFile(Filter): """Writes data to file using the Auspex container type, which is a simple directory structure with subdirectories, binary datafiles, and json meta files that store the axis descriptors and other information.""" sink = InputConnector() filename = FilenameParameter() groupname = Parameter(default='main') def __init__(self, filename=None, groupname=None, datasetname='data', **kwargs): super(WriteToFile, self).__init__(**kwargs) if filename: self.filename.value = filename if groupname: self.groupname.value = groupname if datasetname: self.datasetname = datasetname self.ret_queue = None # MP queue For returning data def final_init(self): assert self.filename.value, "Filename never supplied to writer." assert self.groupname.value, "Groupname never supplied to writer." assert self.datasetname, "Dataset name never supplied to writer." self.descriptor = self.sink.input_streams[0].descriptor self.container = AuspexDataContainer(self.filename.value) self.group = self.container.new_group(self.groupname.value) self.mmap = self.container.new_dataset(self.groupname.value, self.datasetname, self.descriptor) self.w_idx = 0 self.points_taken = 0 def get_data_while_running(self, return_queue): """Return data to the main thread or user as requested. Use a MP queue to transmit.""" assert not self.done.is_set(), Exception("Experiment is over and filter done. Please use get_data") self.return_queue.put(np.array(self.mmap)) def get_data(self): assert self.done.is_set(), Exception("Experiment is still running. Please use get_data_while_running") container = AuspexDataContainer(self.filename.value) return container.open_dataset(self.groupname.value, self.datasetname) def process_data(self, data): # Write the data self.mmap[self.w_idx:self.w_idx+data.size] = data self.w_idx += data.size self.points_taken = self.w_idx class DataBuffer(Filter): """Writes data to IO.""" sink = InputConnector() def __init__(self, **kwargs): super(DataBuffer, self).__init__(**kwargs) self._final_buffer = Queue() self._temp_buffer = Queue() self._get_buffer = Event() self.final_buffer = None def final_init(self): self.w_idx = 0 self.points_taken = 0 self.descriptor = self.sink.input_streams[0].descriptor self.buff = np.empty(self.descriptor.expected_num_points(), dtype=self.descriptor.dtype) def checkin(self): if self._get_buffer.is_set(): self._temp_buffer.put(self.buff) self._get_buffer.clear() def process_data(self, data): # Write the data self.buff[self.w_idx:self.w_idx+data.size] = data self.w_idx += data.size self.points_taken = self.w_idx def main(self): super(DataBuffer, self).main() self._final_buffer.put(self.buff) def get_data(self): if self.done.is_set(): if self.final_buffer is None: self.final_buffer = self._final_buffer.get() time.sleep(0.05) return np.reshape(self.final_buffer, self.descriptor.dims()), self.descriptor else: self._get_buffer.set() temp_buffer = self._temp_buffer.get() time.sleep(0.05) return np.reshape(temp_buffer, self.descriptor.dims()), self.descriptor
Python
0.000942
@@ -1452,16 +1452,60 @@ ='main') +%0A datasetname = Parameter(default='data') %0A%0A de @@ -1564,22 +1564,20 @@ setname= -'data' +None , **kwar @@ -1817,16 +1817,22 @@ asetname +.value = datas @@ -2105,16 +2105,22 @@ asetname +.value , %22Datas @@ -2450,16 +2450,22 @@ asetname +.value , self.d
111c30f5530c7b5abd785b7640e1790e12f36439
Print config file name along with config errors
git-keeper-server/gkeepserver/gkeepd.py
git-keeper-server/gkeepserver/gkeepd.py
# Copyright 2016, 2017 Nathan Sommer and Ben Coleman # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ Main entry point for gkeepd, the git-keeper server process. Spawns a number of threads: logger - GkeepdLoggerThread for logging runtime information email_sender - EmailSenderThread for sending rate-limited emails log_poller - LogPollingThread for watching student and faculty logs for events handler_assigner - EventHandlerAssignerThread for creating event handlers from log events submission_test_threads - list of SubmissionTestThread objects which run tests """ import argparse from time import sleep import fcntl import sys from queue import Queue from signal import signal, SIGINT, SIGTERM from gkeepcore.version import __version__ as core_version from gkeepserver.check_config import check_config from gkeepserver.check_system import check_system from gkeepserver.database import db from gkeepserver.email_sender_thread import email_sender from gkeepserver.event_handler_assigner import EventHandlerAssignerThread from gkeepserver.event_handler_thread import EventHandlerThread from gkeepserver.event_handlers.handler_registry import event_handlers_by_type from gkeepserver.gkeepd_logger import gkeepd_logger as logger from gkeepserver.info_update_thread import info_updater from gkeepserver.local_log_file_reader import LocalLogFileReader from gkeepserver.log_polling import log_poller from gkeepserver.server_configuration import config, ServerConfigurationError from gkeepserver.submission_test_thread import SubmissionTestThread from gkeepserver.version import __version__ as server_version # switched to True by the signal handler on SIGINT or SIGTERM shutdown_flag = False def signal_handler(signum, frame): """ Handle SIGINT and SIGTERM signals. The main loop keeps looping while shutdown_flag is False. This will switch it to True so the server shuts down. :param signum: unused :param frame: unused """ global shutdown_flag shutdown_flag = True def verify_core_version_match(): """ Exits with a non-zero exit code if the gkeepserver version does not match the gkeepcore version. """ if server_version != core_version: error = 'git-keeper-server and git-keeper-core versions must match.\n' error += 'server version: {}\n'.format(server_version) error += 'core version: {}'.format(core_version) sys.exit(error) def main(): """ Entry point of the gkeepd process. If gkeepd is run with the --version or -v flags, it will print the current version and exit. """ verify_core_version_match() description = ('gkeepd, the git-keeper server, version {}' .format(server_version)) parser = argparse.ArgumentParser(description=description) parser.add_argument('-v', '--version', action='store_true', help='Print gkeepd version') parser.add_argument('-c', '--check', action='store_true', help='Validate config and send test email to admins') args = parser.parse_args() if args.version: print('gkeepd version {}'.format(server_version)) sys.exit(0) if args.check: try: check_config() sys.exit(0) except Exception as e: print(e) sys.exit(1) # setup signal handling global shutdown_flag signal(SIGINT, signal_handler) signal(SIGTERM, signal_handler) # do not run if there are errors in the configuration file try: config.parse() except ServerConfigurationError as e: sys.exit(e) # prevent multiple instances try: fp = open(config.lock_file_path, 'w') fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: error_message = ('Could not lock {}, gkeepd may already be running' .format(config.lock_file_path)) sys.exit(error_message) # initialize and start system logger logger.initialize(config.log_file_path, log_level=config.log_level) logger.start() logger.log_info('--- Starting gkeepd version {}---'.format(server_version)) db.connect(config.db_path) # check for fatal errors in the system state, and correct correctable # issues including new faculty members try: check_system() except Exception as e: logger.log_error(str(e)) logger.log_info('Shutting down') logger.shutdown() sys.exit(1) # start the info refresher thread and refresh the info for each faculty info_updater.start() for faculty in db.get_all_faculty(): info_updater.enqueue_full_scan(faculty.username) # queues for thread communication new_log_event_queue = Queue() event_handler_queue = Queue() # the handler assigner creates event handlers for the event handler thread # to call upon handler_assigner = EventHandlerAssignerThread(new_log_event_queue, event_handler_queue, event_handlers_by_type, logger) # the event handler thread handles events created by the assigner event_handler_thread = EventHandlerThread(event_handler_queue, logger) # the log poller detects new events and passes them to the handler assigner log_poller.initialize(new_log_event_queue, LocalLogFileReader, logger) # start the rest of the threads email_sender.start() submission_test_threads = [] for count in range(config.test_thread_count): # thread is automatically started by the constructor submission_test_threads.append(SubmissionTestThread()) event_handler_thread.start() handler_assigner.start() log_poller.start() logger.log_info('Server is running') # spin until shutdown while not shutdown_flag: sleep(0.1) print('Shutting down. Waiting for threads to finish ... ', end='') # flush so it prints immediately despite no newline sys.stdout.flush() logger.log_info('Shutting down threads') # shut down the pipeline in this order so that no new log events are lost log_poller.shutdown() handler_assigner.shutdown() event_handler_thread.shutdown() for thread in submission_test_threads: thread.shutdown() info_updater.shutdown() email_sender.shutdown() logger.log_info('Shutting down gkeepd') logger.shutdown() print('done') if __name__ == '__main__': main()
Python
0
@@ -4214,24 +4214,88 @@ Error as e:%0A + error = 'Error in %7B%7D%5Cn%7B%7D'.format(config.config_path, e)%0A sys. @@ -4300,16 +4300,20 @@ s.exit(e +rror )%0A%0A #
7437f6275b3da51281269defe01877484ed505c9
remove hardcoded test string
main/tests/test_index.py
main/tests/test_index.py
from django.test import TestCase from django.urls import reverse from django.utils.translation import activate class IndexViewsTest(TestCase): def test_index(self): """ The index view. """ activate('en') url = reverse('main:index') response = self.client.get(url) self.assertContains(response, 'OK!Thess') def test_about(self): """ The about view. """ activate('en') url = reverse('main:about') response = self.client.get(url) self.assertContains(response, 'OK!Thess') self.assertContains(response, 'Τι κάνουμε') def test_contact(self): """ The contact view. """ activate('en') url = reverse('main:contact') response = self.client.get(url) self.assertContains(response, 'OK!Thess')
Python
0.999978
@@ -588,60 +588,8 @@ ss') -%0A self.assertContains(response, '%CE%A4%CE%B9 %CE%BA%CE%AC%CE%BD%CE%BF%CF%85%CE%BC%CE%B5') %0A%0A
a99a8ea2b1e7d3a0d9274f451aeb79c03bdbb0b9
Update version.py
manager_utils/version.py
manager_utils/version.py
__version__ = '0.7.0'
Python
0.000001
@@ -12,11 +12,11 @@ = '0.7. -0 +1 '%0A
b298c28fbdcfc680ff105255158961f930a60d9e
Change name of test file
api.py
api.py
#!/usr/bin/env python ''' Flask app that provides a RESTful API to the multiscanner. Proposed supported operations: GET / ---> Test functionality. {'Message': 'True'} GET /api/v1/tasks/list ---> Receive list of tasks in multiscanner GET /api/v1/tasks/<task_id> ---> receive report in JSON format GET /api/v1/tasks/delete/<task_id> ----> delete task_id POST /api/v1/tasks/create ---> POST file and receive report id ''' from flask import Flask, jsonify, make_response, request, abort TASKS = [ {'id': 1, 'report': {"/tmp/example.log":{"MD5":"53f43f9591749b8cae536ff13e48d6de","SHA256":"815d310bdbc8684c1163b62f583dbaffb2df74b9104e2aadabf8f8491bafab66","libmagic":"ASCII text"}}}, {'id': 2, 'report': {"/opt/qtip/grep_in_mem.py":{"MD5":"96b47da202ddba8d7a6b91fecbf89a41","SHA256":"26d11f0ea5cc77a59b6e47deee859440f26d2d14440beb712dbac8550d35ef1f","libmagic":"a /bin/python script text executable"}}}, ] TASK_NOT_FOUND = {'Message': 'No task with that ID not found!'} INVALID_REQUEST = {'Message': 'Invalid request parameters'} HTTP_OK = 200 HTTP_CREATED = 201 HTTP_BAD_REQUEST = 400 HTTP_NOT_FOUND = 404 app = Flask(__name__) @app.errorhandler(HTTP_BAD_REQUEST) def invalid_request(error): return make_response(jsonify(INVALID_REQUEST), HTTP_BAD_REQUEST) @app.errorhandler(HTTP_NOT_FOUND) def not_found(error): return make_response(jsonify(TASK_NOT_FOUND), HTTP_NOT_FOUND) @app.route('/') def index(): return jsonify({'Message': 'True'}) @app.route('/api/v1/tasks/list/', methods=['GET']) def task_list(): return jsonify({'tasks': TASKS}) @app.route('/api/v1/tasks/list/<int:task_id>', methods=['GET']) def get_task(task_id): task = [task for task in TASKS if task['id'] == task_id] if len(task) == 0: abort(HTTP_NOT_FOUND) return jsonify({'Message': task[0]}) @app.route('/api/v1/tasks/delete/<int:task_id>', methods=['GET']) def delete_task(task_id): task = [task for task in TASKS if task['id'] == task_id] if len(task) == 0: abort(HTTP_NOT_FOUND) TASKS.remove(task[0]) return jsonify({'Message': 'Deleted'}) @app.route('/api/v1/tasks/create/', methods=['POST']) def create_task(): if not request.json or not 'report' in request.json: abort(HTTP_BAD_REQUEST) task = { 'id': TASKS[-1]['id'] + 1, 'report': request.json['report'], } TASKS.append(task) return jsonify({'Message': 'Added'}), HTTP_CREATED if __name__ == '__main__': app.run(host='0.0.0.0', port=8080, debug=True)
Python
0.000001
@@ -715,13 +715,8 @@ opt/ -qtip/ grep
22e65c38ec3e0995e8b07436ef05493ac1347b1f
Check ping on api backend
api.py
api.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import requests from bottle import route, run, response, request def jsonp(request, dictionary): if request.query.callback: return "%s(%s)" % (request.query.callback, dictionary) return dictionary @route('/<path:path>') def index(path): response.set_header('Access-Control-Allow-Origin', '*') response.set_header('charset', 'UTF-8') j = "&".join("%s=%s" % tup for tup in request.GET.items()) method = request.query._method r = getattr(requests, method) re = r('http://127.0.0.1:8098/{}?{}'.format(path, j)) print re if request.query.callback: response.content_type = "application/javascript" d = re.text if method in ['delete']: d = {} return jsonp(request, d) response.content_type = 'application/json' return re.text run(host='127.0.0.1', port=8889)
Python
0.000001
@@ -54,16 +54,28 @@ requests +%0Aimport json %0A%0Afrom b @@ -608,21 +608,36 @@ j))%0A +%0A -print re%0A +d = json.dumps(re.text) %0A @@ -721,24 +721,25 @@ script%22%0A +%0A d = re.t @@ -734,19 +734,140 @@ -d = re.text +if path == 'ping' and re.text == %22OK%22:%0A d = %7B%22ping%22: %22OK%22%7D%0A elif(path == 'ping'):%0A d = %7B%22ping%22: %22OFF%22%7D%0A %0A @@ -915,16 +915,17 @@ d = %7B%7D%0A +%0A @@ -949,16 +949,17 @@ est, d)%0A +%0A resp @@ -1008,23 +1008,17 @@ return -re.text +d %0A%0A%0Arun(h
b01cdae6ed0e41934d95cd3191e6cd5046d52fdb
fix for migration where no favorites exist
wooey/migrations/0019_userfile_data.py
wooey/migrations/0019_userfile_data.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations from django.apps import apps from django.contrib.contenttypes.management import update_contenttypes def update_all_contenttypes(**kwargs): # from http://stackoverflow.com/questions/29550102/importerror-cannot-import-name-update-all-contenttypes for app_config in apps.get_app_configs(): update_contenttypes(app_config, **kwargs) def gen_userfiles(apps, schema_editor): WooeyFile = apps.get_model('wooey', 'WooeyFile') UserFile = apps.get_model('wooey', 'UserFile') Favorite = apps.get_model('wooey', 'Favorite') update_all_contenttypes() ContentType = apps.get_model("contenttypes", "ContentType") ctype = ContentType.objects.get(model='wooeyfile') new_ctype = ContentType.objects.get(model='userfile') import os checksums = {} to_delete = [] for obj in WooeyFile.objects.all(): checksum = obj.checksum first_file = checksums.get(checksum, obj) user_file = UserFile(filename=os.path.split(obj.filepath.name)[1], job=obj.job, parameter=obj.parameter, system_file=first_file) user_file.save() Favorite.objects.filter(content_type=ctype, object_id=obj.id).update(content_object=first_file, content_type=new_ctype) if first_file != obj: to_delete.append(obj.pk) # remove redundant wooeyfiles WooeyFile.objects.filter(pk__in=to_delete).delete() class Migration(migrations.Migration): dependencies = [ ('wooey', '0018_userfile'), ] operations = [ migrations.RunPython(gen_userfiles), ]
Python
0
@@ -1213,16 +1213,28 @@ %0A + favorites = Favorit @@ -1287,16 +1287,69 @@ =obj.id) +%0A if favorites.count():%0A favorites .update(
919e141a367ce7dc05d55958f3a5f7eda5f4232a
sort languages by code
mediasnakebooks/views.py
mediasnakebooks/views.py
import os import re import time import json from django.conf import settings from django.http import HttpResponse, Http404, HttpResponseForbidden, StreamingHttpResponse from django.core.cache import cache from django.views.decorators.cache import cache_page from django.views.decorators.cache import cache_control from django.shortcuts import render, redirect from django.contrib.auth.decorators import login_required from mediasnakebooks.models import Ebook, Word, Language from mediasnakebooks.epubtools import open_epub from mediasnakebooks.tokenize import tokenize from mediasnakebooks._stardict import Stardict @login_required def index(request): books = Ebook.objects.order_by('author', 'title').all() context = {'books': books} return render(request, "mediasnakebooks/index.html", context) def _get_epub(id, pos): try: ebook = Ebook.objects.get(pk=id) except Ebook.DoesNotExist: raise Http404 epub = open_epub(ebook.filename) chapters = epub.chapters() try: pos = int(pos) chapter = chapters[pos] except (IndexError, ValueError): raise Http404 paragraphs = epub.get(chapter) return ebook, epub, chapters, paragraphs, pos @login_required def ebook(request, id, pos): ebook, epub, chapters, paragraphs, pos = _get_epub(id, pos) languages = [dict(code=lang.code, dict_url=lang.dict_url) for lang in Language.objects.all()] context = { 'ebook': ebook, 'paragraphs': paragraphs, 'chapters': chapters, 'pos': pos, 'next': pos + 1 if pos + 1 < len(chapters) else None, 'prev': pos - 1 if pos > 0 else None, 'languages': languages, } return render(request, "mediasnakebooks/ebook.html", context) @login_required @cache_control(private=True, max_age=30*60) @cache_page(30*60) def tokens(request, id, pos, language): ebook, epub, chapters, paragraphs, pos = _get_epub(id, pos) words, html = tokenize(paragraphs, language) html = re.sub(u' </span>', u'</span> ', html) html = re.sub(u'(<span[^>]*>) ', ur' \1', html) content = json.dumps(dict(html=html, words=words)) return HttpResponse(content, content_type="application/json") @login_required def word_dict(request, language, word): try: lang = Language.objects.get(code=language) except Language.DoesNotExist: raise Http404 text = u"<No dictionary -- see admin page>" error = True if lang.stardict is not None: try: sd = Stardict(lang.stardict) text = u"\n\n".join(sd.lookup(word)) if text: error = False else: text = u"<No results for '%s'>" % (word,) except IOError: text = u"<Stardict dictionary file not found!>" content = json.dumps(dict(text=text, error=error)) return HttpResponse(content, content_type="application/json") @login_required def words(request, language): if request.method != 'POST': raise Http404 try: lang = Language.objects.get(code=language) except Language.DoesNotExist: raise Http404 words = request.POST.getlist('words[]') known = [5]*len(words) chunksize = 50 for j in range(0, len(words), chunksize): word_objs = Word.objects.filter(base_form__in=words[j:j+chunksize], language=lang) for w in word_objs: j = words.index(w.base_form) known[j] = w.known content = json.dumps(dict(words=words, known=known)) return HttpResponse(content, content_type="application/json") @login_required def word_adjust(request, language, word): if request.method != 'POST': raise Http404 try: lang = Language.objects.get(code=language) except Language.DoesNotExist: raise Http404 try: level = int(request.POST['level']) if level < 0 or level > 5: raise ValueError except ValueError: return HttpResponse("400 Bad request", status=400) word, created = Word.objects.get_or_create(base_form=word, language=lang) word.known = level word.save() content = json.dumps(dict(word=word.base_form, level=level)) return HttpResponse(content, content_type="application/json")
Python
0.999933
@@ -1434,16 +1434,33 @@ objects. +order_by('code'). all()%5D%0A%0A
0f181735205fde964af301a19805879b134e1bba
Fix typo
main.py
main.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from urllib2 import Request, urlopen, HTTPError, URLError from csv_converter import CsvConverter from stock_updater import StockUpdater import sqlite3 import mysql.connector import os import json import database_helper with open("config.json") as f: config = json.load(f) request = Request(config["source"]["url"]) try: response = urlopen(request) except HTTPError as e: print "The server returned error {}".format(e.code) exit except URLError as e: print "Failed to reach server: {}".format(e.reason) exit converter = CsvConverter("") converter.setSourceColumns( config["source"]["product_code_column"], config["source"]["quantity_column"]) converter.read_csv(response) if config["database_connection"]["type"] == "sqlite3": conn = sqlite3.connect(config["database_connection"]["database"]) elif config["database_connection"]["type"] == "mysql": conn = mysql.connector.connect( user=config["database_connection"]["username"], password=config["password"]["password"], host=config["database_connection"]["host"], database=config["database_connection"]["database"]) else: raise "Please, define database" if config["testing"]: database_helper.initialize(conn) database_helper.add_test_products(conn) updater = StockUpdater(conn) updater.set_perform_check_product(config["database_connection"]["check_products"]) updater.set_destination_colums( config["database_connection"]["product_code_column"], config["database_connection"]["quantity_column"]) updater.set_table(config["database_connection"]["products_table"]) updater.set_items(converter.rows) updater.update()
Python
0.999999
@@ -291,20 +291,16 @@ %22) as f: - %0A con @@ -1046,24 +1046,35 @@ config%5B%22 -password +database_connection %22%5D%5B%22pass
8ed53f96a6a780bcf6fa4b8da9f8fba16f9fec17
Remove redundant imports
main.py
main.py
#!/usr/bin/env python #encoding:utf-8 #author:dbr/Ben #project:tvnamer #repository:http://github.com/dbr/tvnamer #license:Creative Commons GNU GPL v2 # http://creativecommons.org/licenses/GPL/2.0/ """tvnamer - Automagical TV episode renamer Uses data from www.thetvdb.com (via tvdb_api) to rename TV episode files from "some.show.name.s01e01.blah.avi" to "Some Show Name - [01x01] - The First.avi" """ from optparse import OptionParser from tvdb_api import Tvdb from utils import (Config, FileFinder, FileParser, Renamer, warn, getEpisodeName) from tvnamer_exceptions import (ShowNotFound, SeasonNotFound, EpisodeNotFound, EpisodeNameNotFound, UserAbort, UserAbort, InvalidPath, NoValidFilesFoundError, InvalidFilename, NoValidFilesFoundError, InvalidConfigFile, InvalidConfigFile, NoValidFilesFoundError, UserAbort) def processFile(tvdb_instance, episode): """Gets episode name, prompts user for input """ print "# Processing %s" % (episode.filename) try: correctedSeriesName, epName = getEpisodeName(tvdb_instance, episode) except (DataRetrievalError, ShowNotFound), errormsg: warn(errormsg) except (SeasonNotFound, EpisodeNotFound, EpisodeNameNotFound), errormsg: # Show was found, so use corrected series name warn(errormsg) episode.seriesname = correctedSeriesName else: episode.seriesname = correctedSeriesName episode.episodename = epName cnamer = Renamer(episode.fullpath) newName = episode.generateFilename() print "#" * 20 print "# Old filename: %s" % episode.filename print "# New filename: %s" % newName if Config['alwaysrename']: cnamer.newName(newName) return ans = None while ans not in ['y', 'n', 'a', 'q', '']: print "Rename?" print "([y]/n/a/q)", try: ans = raw_input().strip() except KeyboardInterrupt, errormsg: print "\n", errormsg raise UserAbort(errormsg) if len(ans) == 0: print "Renaming (default)" cnamer.newName(newName) elif ans == "a": print "Always renaming" Config['alwaysrename'] = True cnamer.newName(newName) elif ans == "q": print "Quitting" raise UserAbort("User exited with q") elif ans == "y": print "Renaming" cnamer.newName(newName) elif ans == "n": print "Skipping" else: print "Invalid input, skipping" def findFiles(paths): """Takes an array of paths, returns all files found """ valid_files = [] for cfile in paths: cur = FileFinder(cfile, recursive = Config['recursive']) try: valid_files.extend(cur.findFiles()) except InvalidPath: warn("Invalid path: %s" % cfile) if len(valid_files) == 0: raise NoValidFilesFoundError() # Remove duplicate files (all paths from FileFinder are absolute) valid_files = list(set(valid_files)) return valid_files def tvnamer(paths): """Main tvnamer function, takes an array of paths, does stuff. """ print "####################" print "# Starting tvnamer" episodes_found = [] for cfile in findFiles(paths): parser = FileParser(cfile) try: episode = parser.parse() except InvalidFilename: warn("Invalid filename %s" % cfile) else: episodes_found.append(episode) if len(episodes_found) == 0: raise NoValidFilesFoundError() print "# Found %d episodes" % len(episodes_found) tvdb_instance = Tvdb(interactive=not Config['selectfirst'], debug = Config['verbose']) for episode in episodes_found: processFile(tvdb_instance, episode) print "# Done" def main(): """Parses command line arguments, displays errors from tvnamer in terminal """ opter = OptionParser() opter.add_option( "-c", "--config", dest="config", help = "Override the config file path") opter.add_option( "-s", "--save", dest="saveconfig", help = "Save (default) config to file") opter.add_option( "-v", "--verbose", default=False, dest="verbose", action="store_true", help="show debugging information") opter.add_option( "-r", "--recursive", default = False, dest="recursive", action="store_true", help="Descend more than one level directories supplied as arguments") opter.add_option( "-a", "--always", default = False, dest="alwaysrename", action="store_true", help="always renames files (but still prompts for correct series). Can be set at runtime with the 'a' prompt-option") opter.add_option( "-f", "--selectfirst", default = False, dest="selectfirst", action="store_true", help="select first series search result (instead of showing the select-series interface") opter.add_option( "-b", "--batch", default = False, dest="batch", action="store_true", help="rename without human intervention, selects first series and always renames, same as --always and --selectfirst") opts, args = opter.parse_args() if opts.config is not None: print "Loading config from: %s" % (opts.config) try: Config.loadConfig(opts.config) except InvalidConfigFile: warn("Invalid config file %s - using default configuration" % ( opts.config)) Config.useDefaultConfig() if opts.saveconfig is not None: print "Saving current config to %s" % (opts.saveconfig) try: Config.saveConfig(opts.saveconfig) except InvalidConfigFile: opter.error("Could not save config to %s" % opts.saveconfig) else: print "Done, exiting" opter.exit(0) if opts.batch: opts.selectfirst = True opts.alwaysrename = True Config['verbose'] = opts.verbose Config['recursive'] = opts.recursive Config['alwaysrename'] = opts.alwaysrename Config['selectfirst'] = opts.selectfirst if len(args) == 0: opter.error("No filenames or directories supplied") try: tvnamer(paths = args) except NoValidFilesFoundError: opter.error("No valid files were supplied") except UserAbort, errormsg: opter.error(errormsg) if __name__ == '__main__': main()
Python
0.867839
@@ -653,27 +653,16 @@ erAbort, - UserAbort, Invalid @@ -666,17 +666,17 @@ lidPath, -%0A + NoValidF @@ -690,17 +690,17 @@ ndError, - +%0A InvalidF @@ -727,25 +727,25 @@ sFoundError, -%0A + InvalidConfi @@ -769,17 +769,17 @@ figFile, - +%0A NoValidF @@ -794,25 +794,34 @@ dError, -UserAb +DataRetrievalErr or -t )%0A%0A%0Adef
345c886dec98bc6d104a975c7fdb871d19543e06
Add unique ID and device info to Nest camera (#16846)
homeassistant/components/camera/nest.py
homeassistant/components/camera/nest.py
""" Support for Nest Cameras. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/camera.nest/ """ import logging from datetime import timedelta import requests from homeassistant.components import nest from homeassistant.components.camera import (PLATFORM_SCHEMA, Camera, SUPPORT_ON_OFF) from homeassistant.util.dt import utcnow _LOGGER = logging.getLogger(__name__) DEPENDENCIES = ['nest'] NEST_BRAND = 'Nest' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({}) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up a Nest Cam. No longer in use. """ async def async_setup_entry(hass, entry, async_add_entities): """Set up a Nest sensor based on a config entry.""" camera_devices = \ await hass.async_add_job(hass.data[nest.DATA_NEST].cameras) cameras = [NestCamera(structure, device) for structure, device in camera_devices] async_add_entities(cameras, True) class NestCamera(Camera): """Representation of a Nest Camera.""" def __init__(self, structure, device): """Initialize a Nest Camera.""" super(NestCamera, self).__init__() self.structure = structure self.device = device self._location = None self._name = None self._online = None self._is_streaming = None self._is_video_history_enabled = False # Default to non-NestAware subscribed, but will be fixed during update self._time_between_snapshots = timedelta(seconds=30) self._last_image = None self._next_snapshot_at = None @property def name(self): """Return the name of the nest, if any.""" return self._name @property def should_poll(self): """Nest camera should poll periodically.""" return True @property def is_recording(self): """Return true if the device is recording.""" return self._is_streaming @property def brand(self): """Return the brand of the camera.""" return NEST_BRAND @property def supported_features(self): """Nest Cam support turn on and off.""" return SUPPORT_ON_OFF @property def is_on(self): """Return true if on.""" return self._online and self._is_streaming def turn_off(self): """Turn off camera.""" _LOGGER.debug('Turn off camera %s', self._name) # Calling Nest API in is_streaming setter. # device.is_streaming would not immediately change until the process # finished in Nest Cam. self.device.is_streaming = False def turn_on(self): """Turn on camera.""" if not self._online: _LOGGER.error('Camera %s is offline.', self._name) return _LOGGER.debug('Turn on camera %s', self._name) # Calling Nest API in is_streaming setter. # device.is_streaming would not immediately change until the process # finished in Nest Cam. self.device.is_streaming = True def update(self): """Cache value from Python-nest.""" self._location = self.device.where self._name = self.device.name self._online = self.device.online self._is_streaming = self.device.is_streaming self._is_video_history_enabled = self.device.is_video_history_enabled if self._is_video_history_enabled: # NestAware allowed 10/min self._time_between_snapshots = timedelta(seconds=6) else: # Otherwise, 2/min self._time_between_snapshots = timedelta(seconds=30) def _ready_for_snapshot(self, now): return (self._next_snapshot_at is None or now > self._next_snapshot_at) def camera_image(self): """Return a still image response from the camera.""" now = utcnow() if self._ready_for_snapshot(now): url = self.device.snapshot_url try: response = requests.get(url) except requests.exceptions.RequestException as error: _LOGGER.error("Error getting camera image: %s", error) return None self._next_snapshot_at = now + self._time_between_snapshots self._last_image = response.content return self._last_image
Python
0
@@ -1790,16 +1790,465 @@ ._name%0A%0A + @property%0A def unique_id(self):%0A %22%22%22Return the serial number.%22%22%22%0A return self.device.device_id%0A%0A @property%0A def device_info(self):%0A %22%22%22Return information about the device.%22%22%22%0A return %7B%0A 'identifiers': %7B%0A (nest.DOMAIN, self.device.device_id)%0A %7D,%0A 'name': self.device.name_long,%0A 'manufacturer': 'Nest Labs',%0A 'model': %22Camera%22,%0A %7D%0A%0A @pro
ba754c507a6d172053fb75d062a7035e73e79779
Add --dbdir flag to wrapupdater
mesonwrap/wrapupdater.py
mesonwrap/wrapupdater.py
#!/usr/bin/env python # Copyright 2015 The Meson development team # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from mesonwrap import wrapdb, wrapcreator class WrapUpdater: def __init__(self, dbdir='.'): self.dbdir = dbdir self.db = wrapdb.WrapDatabase(self.dbdir, True) def close(self): self.db.close() def update_db(self, project_name, repo_url, branch): wrap = wrapcreator.make_wrap(project_name, repo_url, branch) self.db.insert(project_name, branch, wrap.revision, wrap.wrap, wrap.zip) def main(prog, args): parser = argparse.ArgumentParser(prog) parser.add_argument('project') parser.add_argument('repo_url') parser.add_argument('branch') args = parser.parse_args(args) m = WrapUpdater() m.update_db(args.project, args.repo_url, args.branch)
Python
0
@@ -1146,16 +1146,64 @@ r(prog)%0A + parser.add_argument('--dbdir', default='.')%0A pars @@ -1354,16 +1354,32 @@ Updater( +dbdir=args.dbdir )%0A m.
7e066e11ad54da5fba99dcf503dd0d82d536587b
Remove left-over
homeassistant/components/sensor/rest.py
homeassistant/components/sensor/rest.py
""" homeassistant.components.sensor.rest ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The rest sensor will consume JSON responses sent by an exposed REST API. Configuration: To use the rest sensor you will need to add something like the following to your configuration.yaml file. sensor: platform: arest name: REST sensor resource: http://IP_ADDRESS/ENDPOINT variable: temperature unit: '°C' Variables: name *Optional The name of the sensor. Default is 'REST Sensor'. resource *Required The full URL of the REST service/endpoint that provide the JSON response. variable *Required The name of the variable inside the JSON response you want to monitor. unit *Optional Defines the units of measurement of the sensor, if any. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/sensor.rest.html """ import logging from requests import get, exceptions from datetime import timedelta from homeassistant.util import Throttle from homeassistant.helpers.entity import Entity _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = "REST Sensor" # Return cached results if last scan was less then this time ago MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60) # pylint: disable=unused-variable def setup_platform(hass, config, add_devices, discovery_info=None): """ Get the REST sensor. """ resource = config.get('resource', None) try: response = get(resource, timeout=10) except exceptions.MissingSchema: _LOGGER.error("Missing resource or schema in configuration. " "Add http:// to your URL.") return False except exceptions.ConnectionError: _LOGGER.error("No route to resource/endpoint. " "Please check the URL in the configuration file.") return False rest = RestData(resource) dev = [] add_devices([RestSensor(rest, config.get('name', DEFAULT_NAME), config.get('variable'), config.get('unit'))]) add_devices(dev) class RestSensor(Entity): """ Implements a REST sensor. """ def __init__(self, rest, name, variable, unit_of_measurement): self.rest = rest self._name = name self._variable = variable self._state = 'n/a' self._unit_of_measurement = unit_of_measurement self.update() @property def name(self): """ The name of the sensor. """ return self._name @property def unit_of_measurement(self): """ Unit the value is expressed in. """ return self._unit_of_measurement @property def state(self): """ Returns the state of the device. """ return self._state def update(self): """ Gets the latest data from REST API and updates the state. """ self.rest.update() value = self.rest.data if 'error' in value: self._state = value['error'] else: try: self._state = value[self._variable] except KeyError: _LOGGER.error('Variable "%s" not found in response: "%s".', self._variable, value) self._state = 'N/A' # pylint: disable=too-few-public-methods class RestData(object): """ Class for handling the data retrieval. """ def __init__(self, resource): self.resource = resource self.data = dict() @Throttle(MIN_TIME_BETWEEN_UPDATES) def update(self): """ Gets the latest data from REST service. """ try: response = get(self.resource, timeout=10) if 'error' in self.data: del self.data['error'] self.data = response.json() except exceptions.ConnectionError: _LOGGER.error("No route to resource/endpoint.") self.data['error'] = 'N/A'
Python
0.000001
@@ -1863,21 +1863,8 @@ e)%0A%0A - dev = %5B%5D%0A @@ -2061,29 +2061,8 @@ )%5D)%0A - add_devices(dev)%0A %0A%0Acl
281fbe1dfac1f7a93f51622578b55075977a9fa5
Update wilight for new fan entity model (#45869)
homeassistant/components/wilight/fan.py
homeassistant/components/wilight/fan.py
"""Support for WiLight Fan.""" from pywilight.const import ( DOMAIN, FAN_V1, ITEM_FAN, WL_DIRECTION_FORWARD, WL_DIRECTION_OFF, WL_DIRECTION_REVERSE, WL_SPEED_HIGH, WL_SPEED_LOW, WL_SPEED_MEDIUM, ) from homeassistant.components.fan import ( DIRECTION_FORWARD, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, SUPPORT_DIRECTION, SUPPORT_SET_SPEED, FanEntity, ) from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant from . import WiLightDevice SUPPORTED_SPEEDS = [SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH] SUPPORTED_FEATURES = SUPPORT_SET_SPEED | SUPPORT_DIRECTION async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities ): """Set up WiLight lights from a config entry.""" parent = hass.data[DOMAIN][entry.entry_id] # Handle a discovered WiLight device. entities = [] for item in parent.api.items: if item["type"] != ITEM_FAN: continue index = item["index"] item_name = item["name"] if item["sub_type"] != FAN_V1: continue entity = WiLightFan(parent.api, index, item_name) entities.append(entity) async_add_entities(entities) class WiLightFan(WiLightDevice, FanEntity): """Representation of a WiLights fan.""" def __init__(self, api_device, index, item_name): """Initialize the device.""" super().__init__(api_device, index, item_name) # Initialize the WiLights fan. self._direction = WL_DIRECTION_FORWARD @property def supported_features(self): """Flag supported features.""" return SUPPORTED_FEATURES @property def icon(self): """Return the icon of device based on its type.""" return "mdi:fan" @property def is_on(self): """Return true if device is on.""" return self._status.get("direction", WL_DIRECTION_OFF) != WL_DIRECTION_OFF @property def speed(self) -> str: """Return the current speed.""" return self._status.get("speed", SPEED_HIGH) @property def speed_list(self) -> list: """Get the list of available speeds.""" return SUPPORTED_SPEEDS @property def current_direction(self) -> str: """Return the current direction of the fan.""" if "direction" in self._status: if self._status["direction"] != WL_DIRECTION_OFF: self._direction = self._status["direction"] return self._direction # # The fan entity model has changed to use percentages and preset_modes # instead of speeds. # # Please review # https://developers.home-assistant.io/docs/core/entity/fan/ # async def async_turn_on( self, speed: str = None, percentage: int = None, preset_mode: str = None, **kwargs, ) -> None: """Turn on the fan.""" if speed is None: await self._client.set_fan_direction(self._index, self._direction) else: await self.async_set_speed(speed) async def async_set_speed(self, speed: str): """Set the speed of the fan.""" wl_speed = WL_SPEED_HIGH if speed == SPEED_LOW: wl_speed = WL_SPEED_LOW if speed == SPEED_MEDIUM: wl_speed = WL_SPEED_MEDIUM await self._client.set_fan_speed(self._index, wl_speed) async def async_set_direction(self, direction: str): """Set the direction of the fan.""" wl_direction = WL_DIRECTION_REVERSE if direction == DIRECTION_FORWARD: wl_direction = WL_DIRECTION_FORWARD await self._client.set_fan_direction(self._index, wl_direction) async def async_turn_off(self, **kwargs): """Turn the fan off.""" await self._client.set_fan_direction(self._index, WL_DIRECTION_OFF)
Python
0
@@ -298,57 +298,8 @@ RD,%0A - SPEED_HIGH,%0A SPEED_LOW,%0A SPEED_MEDIUM,%0A @@ -454,16 +454,136 @@ ssistant +%0Afrom homeassistant.util.percentage import (%0A ordered_list_item_to_percentage,%0A percentage_to_ordered_list_item,%0A) %0A%0Afrom . @@ -605,25 +605,33 @@ Device%0A%0A -SUPPORTED +ORDERED_NAMED_FAN _SPEEDS @@ -633,16 +633,19 @@ EEDS = %5B +WL_ SPEED_LO @@ -647,16 +647,19 @@ ED_LOW, +WL_ SPEED_ME @@ -664,16 +664,19 @@ MEDIUM, +WL_ SPEED_HI @@ -2086,21 +2086,26 @@ def -s pe +rcentag e -d (self) - @@ -2146,16 +2146,27 @@ nt speed + percentage .%22%22%22%0A @@ -2162,38 +2162,42 @@ age.%22%22%22%0A -return +wl_speed = self._status.ge @@ -2209,150 +2209,145 @@ eed%22 -, SPEED_HIGH)%0A%0A @property%0A def speed_list(self) -%3E list:%0A %22%22%22Get the list of available speeds.%22%22%22%0A return SUPPORTED_SPEEDS +)%0A if wl_speed is None:%0A return None%0A return ordered_list_item_to_percentage(ORDERED_NAMED_FAN_SPEEDS, wl_speed) %0A%0A @@ -2651,211 +2651,8 @@ on%0A%0A - #%0A # The fan entity model has changed to use percentages and preset_modes%0A # instead of speeds.%0A #%0A # Please review%0A # https://developers.home-assistant.io/docs/core/entity/fan/%0A #%0A @@ -2857,21 +2857,26 @@ if -s pe +rcentag e -d is None @@ -3007,19 +3007,29 @@ set_ -s pe -ed(speed +rcentage(percentage )%0A%0A @@ -3055,30 +3055,40 @@ set_ -s pe +rcentag e -d (self, -s pe -ed: str +rcentage: int ):%0A @@ -3130,41 +3130,8 @@ %22%22%22%0A - wl_speed = WL_SPEED_HIGH%0A @@ -3141,136 +3141,212 @@ if -s pe -ed == SPEED_LOW:%0A wl_speed = WL_SPEED_LOW%0A if speed == SPEED_MEDIUM:%0A wl_speed = WL_SPE +rcentage == 0:%0A await self._client.set_fan_direction(self._index, WL_DIRECTION_OFF)%0A return%0A wl_speed = percentage_to_ordered_list_item(ORDER ED_ +NA MED -IUM +_FAN_SPEEDS, percentage) %0A
7e15cbb885a5d959df34fc24fcabea5b5ee8433a
Fix pep8 fail (lines too long)
mezzanine/pages/views.py
mezzanine/pages/views.py
from django.contrib.admin.views.decorators import staff_member_required from django.core.exceptions import ImproperlyConfigured from django.http import HttpResponse, Http404 from django.shortcuts import get_object_or_404 from mezzanine.conf import settings from mezzanine.pages import page_processors from mezzanine.pages.models import Page from mezzanine.utils.urls import home_slug from mezzanine.utils.views import render page_processors.autodiscover() def get_id(s): id = s.split("_")[-1] if id == 'null': return None return id def admin_page_ordering(request): """ Updates the ordering of pages via AJAX from within the admin. """ page = get_object_or_404(Page, id=get_id(request.POST['id'])) old_parent_id = page.parent_id new_parent_id = get_id(request.POST['parent_id']) if page.parent_id != new_parent_id: page.parent_id = new_parent_id page.save() page.reset_slugs() for i, old_sibling in enumerate(Page.objects.filter(parent_id=old_parent_id).order_by('_order')): Page.objects.filter(id=old_sibling.id).update(_order=i) for i, new_sibling in enumerate(request.POST.getlist('siblings[]')): Page.objects.filter(id=get_id(new_sibling)).update(_order=i) return HttpResponse("ok") admin_page_ordering = staff_member_required(admin_page_ordering) def page(request, slug, template=u"pages/page.html", extra_context=None): """ Select a template for a page and render it. The ``extra_context`` arg will include a ``page`` object that's added via ``mezzanine.pages.middleware.PageMiddleware``. The page is loaded via the middleware so that other apps with urlpatterns that match the current page can include a page in their template context. The urlpattern that maps to this view is a catch-all pattern, in which case the page instance will be None, so raise a 404 then. For template selection, a list of possible templates is built up based on the current page. This list is order from most granular match, starting with a custom template for the exact page, then adding templates based on the page's parent page, that could be used for sections of a site (eg all children of the parent). Finally at the broadest level, a template for the page's content type (it's model class) is checked for, and then if none of these templates match, the default pages/page.html is used. """ page_middleware = "mezzanine.pages.middleware.PageMiddleware" if page_middleware not in settings.MIDDLEWARE_CLASSES: raise ImproperlyConfigured(page_middleware + " is missing from " + "settings.MIDDLEWARE_CLASSES") extra_context = extra_context or {} try: page = extra_context["page"] except KeyError: raise Http404 # Check for a template name matching the page's slug. If the homepage # is configured as a page instance, the template "pages/index.html" is # used, since the slug "/" won't match a template name. template_name = unicode(slug) if slug != home_slug() else "index" templates = [u"pages/%s.html" % template_name] if page.content_model is not None: templates.append(u"pages/%s/%s.html" % (template_name, page.content_model)) for parent in page.get_ascendants(): parent_template_name = unicode(parent.slug) # Check for a template matching the page's content model. if page.content_model is not None: templates.append(u"pages/%s/%s.html" % (parent_template_name, page.content_model)) # Check for a template matching the page's content model. if page.content_model is not None: templates.append(u"pages/%s.html" % page.content_model) templates.append(template) return render(request, templates, extra_context)
Python
0
@@ -962,23 +962,16 @@ %0A - for i, old_sib @@ -978,85 +978,151 @@ ling - in enumerate(Page.objects.filter(parent_id=old_parent_id).order_by('_order') +s = Page.objects.order_by('_order').filter(%0A parent_id=old_parent_id,%0A )%0A for i, old_sibling in enumerate(old_siblings ):%0A