commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
28518572a472f28c20ba60cdf536c51c7e9a7796 | use metrics_file variable | lenxeon/graph-index,douban/graph-index,douban/graph-index,lenxeon/graph-index | config.py | config.py | graphite_url = 'http://dori.intra.douban.com:8800'
listen_host = '0.0.0.0'
listen_port = 8808
debug = False
metrics_file = 'metrics.json'
| graphite_url = 'http://dori.intra.douban.com:8800'
listen_host = '0.0.0.0'
listen_port = 8808
debug = False
| mit | Python |
5f8ce25447905650343be76f73eba8287196a352 | change blank | holytortoise/abwreservierung,holytortoise/abwreservierung,holytortoise/abwreservierung,holytortoise/abwreservierung | src/reservierung/forms.py | src/reservierung/forms.py | from django import forms
import datetime
from .models import Reservierung,Raum
# Create the form class
class ReservierungForm(forms.Form):
choice = ()
model = Reservierung()
choice = model.create_choice()
reserviertFür = forms.CharField(max_length=40,required=False)
reservierterRaum = forms.ChoiceField(choice)
reservierungsGrund = forms.CharField(max_length=40)
anfangsDatum = forms.DateField()
endDatum = forms.DateField()
anfangsZeit = forms.TimeField(help_text='HH:mm')
endZeit = forms.TimeField(help_text='HH:mm')
täglich = forms.BooleanField(required=False)
def clean(self):
cleaned_data = super(ReservierungForm, self).clean()
anfangsdatum = cleaned_data.get("anfangsDatum")
enddatum = cleaned_data.get("endDatum")
anfangszeit = cleaned_data.get("anfangsZeit")
endzeit = cleaned_data.get("endZeit")
if anfangsdatum and enddatum:
# Only do something if both fields are valid so far.
if anfangsdatum < datetime.date.today():
raise forms.ValidationError("Anfangsdatum kann nicht in der Vergangenheit liegen.")
if anfangsdatum == datetime.date.today():
if anfangszeit < datetime.datetime.now().time():
raise forms.ValidationError("Anfangszeit kann nicht in der Vergangenheit liegen")
if anfangsdatum > enddatum:
raise forms.ValidationError("Enddatum kann nicht vor Anfangsdatum sein")
if anfangsdatum == enddatum:
if anfangszeit > endzeit:
raise forms.ValidationError("Anfangs Zeit kann nicht nach der End Zeit liegen")
if anfangszeit == endzeit:
raise forms.ValidationError("Anfangs und End Zeit können nicht gleich sein")
| from django import forms
import datetime
from .models import Reservierung,Raum
# Create the form class
class ReservierungForm(forms.Form):
choice = ()
model = Reservierung()
choice = model.create_choice()
reserviertFür = forms.CharField(max_length=40,blank=True)
reservierterRaum = forms.ChoiceField(choice)
reservierungsGrund = forms.CharField(max_length=40)
anfangsDatum = forms.DateField()
endDatum = forms.DateField()
anfangsZeit = forms.TimeField(help_text='HH:mm')
endZeit = forms.TimeField(help_text='HH:mm')
täglich = forms.BooleanField(required=False)
def clean(self):
cleaned_data = super(ReservierungForm, self).clean()
anfangsdatum = cleaned_data.get("anfangsDatum")
enddatum = cleaned_data.get("endDatum")
anfangszeit = cleaned_data.get("anfangsZeit")
endzeit = cleaned_data.get("endZeit")
if anfangsdatum and enddatum:
# Only do something if both fields are valid so far.
if anfangsdatum < datetime.date.today():
raise forms.ValidationError("Anfangsdatum kann nicht in der Vergangenheit liegen.")
if anfangsdatum == datetime.date.today():
if anfangszeit < datetime.datetime.now().time():
raise forms.ValidationError("Anfangszeit kann nicht in der Vergangenheit liegen")
if anfangsdatum > enddatum:
raise forms.ValidationError("Enddatum kann nicht vor Anfangsdatum sein")
if anfangsdatum == enddatum:
if anfangszeit > endzeit:
raise forms.ValidationError("Anfangs Zeit kann nicht nach der End Zeit liegen")
if anfangszeit == endzeit:
raise forms.ValidationError("Anfangs und End Zeit können nicht gleich sein")
| mit | Python |
f9548f93e3e6154c3207b0d7b6f3e741ae5287a2 | bump to 0.15.1 | arq5x/gemini,bw2/gemini,heuermh/gemini,heuermh/gemini,bgruening/gemini,xuzetan/gemini,bpow/gemini,bpow/gemini,bpow/gemini,brentp/gemini,arq5x/gemini,udp3f/gemini,brentp/gemini,udp3f/gemini,bw2/gemini,bw2/gemini,bgruening/gemini,arq5x/gemini,bgruening/gemini,bpow/gemini,brentp/gemini,xuzetan/gemini,brentp/gemini,xuzetan/gemini,udp3f/gemini,xuzetan/gemini,arq5x/gemini,bgruening/gemini,bw2/gemini,heuermh/gemini,heuermh/gemini,udp3f/gemini | gemini/version.py | gemini/version.py | __version__="0.15.1"
| __version__="0.15.0"
| mit | Python |
f114141aa1311bae4301e158cf2e6db43fe7391a | Bump version to 2.0.0b1 | vstconsulting/polemarch,vstconsulting/polemarch,vstconsulting/polemarch,vstconsulting/polemarch | polemarch/__init__.py | polemarch/__init__.py | """
### Polemarch is ansible based service for orchestration infrastructure.
* [Documentation](http://polemarch.readthedocs.io/)
* [Issue Tracker](https://gitlab.com/vstconsulting/polemarch/issues)
* [Source Code](https://gitlab.com/vstconsulting/polemarch)
"""
import os
import warnings
try:
from vstutils.environment import prepare_environment, cmd_execution
except ImportError:
warnings.warn('"vstutils" was not installed', ImportWarning)
prepare_environment = lambda *args, **kwargs: ()
cmd_execution = prepare_environment
default_settings = {
# ansible specific environment variables
"ANSIBLE_HOST_KEY_CHECKING": 'False',
"ANSIBLE_FORCE_COLOR": "true",
# celery specific
"C_FORCE_ROOT": "true",
# django settings module
"DJANGO_SETTINGS_MODULE": os.getenv(
"DJANGO_SETTINGS_MODULE", 'polemarch.main.settings'
),
# VSTUTILS settings
"VST_PROJECT": os.getenv("VST_PROJECT", 'polemarch'),
"VST_ROOT_URLCONF": os.getenv("VST_ROOT_URLCONF", 'vstutils.urls'),
}
__version__ = "2.0.0b1"
prepare_environment(**default_settings)
| """
### Polemarch is ansible based service for orchestration infrastructure.
* [Documentation](http://polemarch.readthedocs.io/)
* [Issue Tracker](https://gitlab.com/vstconsulting/polemarch/issues)
* [Source Code](https://gitlab.com/vstconsulting/polemarch)
"""
import os
import warnings
try:
from vstutils.environment import prepare_environment, cmd_execution
except ImportError:
warnings.warn('"vstutils" was not installed', ImportWarning)
prepare_environment = lambda *args, **kwargs: ()
cmd_execution = prepare_environment
default_settings = {
# ansible specific environment variables
"ANSIBLE_HOST_KEY_CHECKING": 'False',
"ANSIBLE_FORCE_COLOR": "true",
# celery specific
"C_FORCE_ROOT": "true",
# django settings module
"DJANGO_SETTINGS_MODULE": os.getenv(
"DJANGO_SETTINGS_MODULE", 'polemarch.main.settings'
),
# VSTUTILS settings
"VST_PROJECT": os.getenv("VST_PROJECT", 'polemarch'),
"VST_ROOT_URLCONF": os.getenv("VST_ROOT_URLCONF", 'vstutils.urls'),
}
__version__ = "1.8.5"
prepare_environment(**default_settings)
| agpl-3.0 | Python |
0c1976007c5f34d14bcc01f2fab9dee7c23e6381 | bump version to 0.5.0b | bpow/gemini,arq5x/gemini,bw2/gemini,udp3f/gemini,bgruening/gemini,bpow/gemini,brentp/gemini,heuermh/gemini,brentp/gemini,bw2/gemini,udp3f/gemini,xuzetan/gemini,xuzetan/gemini,bgruening/gemini,arq5x/gemini,heuermh/gemini,bpow/gemini,heuermh/gemini,brentp/gemini,xuzetan/gemini,arq5x/gemini,xuzetan/gemini,bgruening/gemini,bgruening/gemini,arq5x/gemini,udp3f/gemini,brentp/gemini,bw2/gemini,bpow/gemini,udp3f/gemini,heuermh/gemini,bw2/gemini | gemini/version.py | gemini/version.py | __version__="0.5.0b"
| __version__="0.4.0b"
| mit | Python |
245b32775db0de95d4feb54533a423748bddf7a0 | Bump version number | rkhleics/police-api-client-python | police_api/version.py | police_api/version.py | __version__ = '1.0.2'
| __version__ = '1.0.2dev'
| mit | Python |
50ead4fe13eec7ad9760f0f577212beb8e8a51be | Use a queryset to display only kind=page | mysociety/pombola,mysociety/pombola,geoffkilpin/pombola,hzj123/56th,ken-muturi/pombola,mysociety/pombola,patricmutwiri/pombola,patricmutwiri/pombola,ken-muturi/pombola,ken-muturi/pombola,hzj123/56th,ken-muturi/pombola,ken-muturi/pombola,hzj123/56th,mysociety/pombola,hzj123/56th,geoffkilpin/pombola,geoffkilpin/pombola,hzj123/56th,ken-muturi/pombola,patricmutwiri/pombola,mysociety/pombola,geoffkilpin/pombola,patricmutwiri/pombola,patricmutwiri/pombola,mysociety/pombola,geoffkilpin/pombola,hzj123/56th,patricmutwiri/pombola,geoffkilpin/pombola | pombola/info/views.py | pombola/info/views.py | from django.views.generic import DetailView
from models import InfoPage
class InfoPageView(DetailView):
"""Show the page for the given slug"""
model = InfoPage
queryset = InfoPage.objects.filter(kind=InfoPage.KIND_PAGE)
| from django.views.generic import DetailView
from models import InfoPage
class InfoPageView(DetailView):
"""Show the page, or 'index' if no slug"""
model = InfoPage
| agpl-3.0 | Python |
3fa0f7d3a6f52eb9f7cb7b32dc4e3760d1b50f62 | Update __init__.py | morganmeliment/Conway-Life,morganmeliment/Conway-Life | ggame/__init__.py | ggame/__init__.py | from ggame.ggame import *
from ggame.pyinput import *
| from ggame.ggame import *
| mit | Python |
3441b61821f88b0e04655953fb49fe56c950c852 | Use colors from config utils. | probcomp/cgpm,probcomp/cgpm | gpmcc/utils/plots.py | gpmcc/utils/plots.py | # -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2014 Baxter S. Eaves Jr,
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Lead Developer: Feras Saad <fsaad@mit.edu>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
from gpmcc.utils.config import colors
_plot_layout = {
1: (1,1),
2: (2,1),
3: (3,1),
4: (2,2),
5: (3,2),
6: (3,2),
7: (4,2),
8: (4,2),
9: (3,3),
10: (5,2),
11: (4,3),
12: (4,3),
13: (5,3),
14: (5,3),
15: (5,3),
16: (4,4),
17: (6,3),
18: (6,3),
19: (5,4),
20: (5,4),
21: (7,3),
22: (6,4),
23: (6,4),
24: (6,4),
}
def get_state_plot_layout(n_cols):
layout = dict()
layout['plots_x'] = _plot_layout[n_cols][0]
layout['plots_y'] = _plot_layout[n_cols][1]
layout['plot_inches_x'] = 13/6. * layout['plots_x']
layout['plot_inches_y'] = 6. * layout['plots_y']
layout['border_color'] = colors()
return layout
| # -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2014 Baxter S. Eaves Jr,
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Lead Developer: Feras Saad <fsaad@mit.edu>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
_colors = ['black', 'red', 'blue', 'green', 'yellow', 'orange', 'purple',
'pink']
_plot_layout = {
1: (1,1),
2: (2,1),
3: (3,1),
4: (2,2),
5: (3,2),
6: (3,2),
7: (4,2),
8: (4,2),
9: (3,3),
10: (5,2),
11: (4,3),
12: (4,3),
13: (5,3),
14: (5,3),
15: (5,3),
16: (4,4),
17: (6,3),
18: (6,3),
19: (5,4),
20: (5,4),
21: (7,3),
22: (6,4),
23: (6,4),
24: (6,4),
}
def get_state_plot_layout(n_cols):
layout = dict()
layout['plots_x'] = _plot_layout[n_cols][0]
layout['plots_y'] = _plot_layout[n_cols][1]
layout['plot_inches_x'] = 13/6. * layout['plots_x']
layout['plot_inches_y'] = 6. * layout['plots_y']
layout['border_color'] = _colors
return layout
| apache-2.0 | Python |
a9b61963d6c1fe6c85c4cafaed47a17f4f5ca589 | Add Version, Author and License info | aurainfosec/burp-multi-browser-highlighting | multi-browser.py | multi-browser.py | # Author: Emmanuel Law
# Version: 1.0
# License: MIT License
from burp import IBurpExtender
from burp import IHttpListener
from burp import IProxyListener
from burp import IInterceptedProxyMessage
from burp import IContextMenuFactory
from javax.swing import JMenuItem
from java.awt.event import ActionListener
from java.io import PrintWriter
class BurpExtender(IBurpExtender,IProxyListener, IContextMenuFactory,ActionListener):
def registerExtenderCallbacks( self, callbacks):
# keep a reference to our callbacks and helper object
self._callbacks=callbacks
self._helpers=callbacks.getHelpers()
self.stdout = PrintWriter(callbacks.getStdout(), True)
# Keep Track of Browsers
self._browser={}
# Colors for different browsers
self.colors=["red", "pink", "green","magenta","cyan", "gray", "yellow"]
self._callbacks.setExtensionName("Multi-Browser Highlighting")
self.isEnabled=False
#IExtensionHelpers helpers = callbacks.getHelpers()
callbacks.registerProxyListener(self)
callbacks.registerContextMenuFactory(self)
return
def processProxyMessage(self, messageIsRequest, message):
if self.isEnabled == False:
return
# self._stdout.println(("Proxy request to " if messageIsRequest else "Proxy response from ") + message.getMessageInfo().getHttpService().toString())
if messageIsRequest == False:
return
browser_agent=None
headers=self._helpers.analyzeRequest(message.getMessageInfo()).getHeaders()
for x in headers:
if x.lower().startswith("user-agent:"):
browser_agent=x
break
if browser_agent not in self._browser:
self._browser[browser_agent]={"id":len(self._browser)+1, "agent":browser_agent, "color":self.colors.pop()}
self.stdout.println(self._browser[browser_agent]["agent"])
message.getMessageInfo().setHighlight(self._browser[browser_agent]["color"])
def createMenuItems(self, invocation):
if invocation.getInvocationContext() == invocation.CONTEXT_PROXY_HISTORY:
mymenu=[]
if self.isEnabled:
item=JMenuItem("Multi-Browser Highlight (Running): Click to Disable ")
else:
item=JMenuItem("Multi-Browser Highlight (Stopped): Click to Enable ")
item.addActionListener(self)
mymenu.append(item)
return mymenu
else:
return None
def actionPerformed(self, actionEvent):
self.isEnabled= not self.isEnabled
| from burp import IBurpExtender
from burp import IHttpListener
from burp import IProxyListener
from burp import IInterceptedProxyMessage
from burp import IContextMenuFactory
from javax.swing import JMenuItem
from java.awt.event import ActionListener
from java.io import PrintWriter
class BurpExtender(IBurpExtender,IProxyListener, IContextMenuFactory,ActionListener):
def registerExtenderCallbacks( self, callbacks):
# keep a reference to our callbacks and helper object
self._callbacks=callbacks
self._helpers=callbacks.getHelpers()
self.stdout = PrintWriter(callbacks.getStdout(), True)
# Keep Track of Browsers
self._browser={}
# Colors for different browsers
self.colors=["red", "pink", "green","magenta","cyan", "gray", "yellow"]
self._callbacks.setExtensionName("Multi-Browser Highlighting")
self.isEnabled=False
#IExtensionHelpers helpers = callbacks.getHelpers()
callbacks.registerProxyListener(self)
callbacks.registerContextMenuFactory(self)
return
def processProxyMessage(self, messageIsRequest, message):
if self.isEnabled == False:
return
# self._stdout.println(("Proxy request to " if messageIsRequest else "Proxy response from ") + message.getMessageInfo().getHttpService().toString())
if messageIsRequest == False:
return
browser_agent=None
headers=self._helpers.analyzeRequest(message.getMessageInfo()).getHeaders()
for x in headers:
if x.lower().startswith("user-agent:"):
browser_agent=x
break
if browser_agent not in self._browser:
self._browser[browser_agent]={"id":len(self._browser)+1, "agent":browser_agent, "color":self.colors.pop()}
self.stdout.println(self._browser[browser_agent]["agent"])
message.getMessageInfo().setHighlight(self._browser[browser_agent]["color"])
def createMenuItems(self, invocation):
if invocation.getInvocationContext() == invocation.CONTEXT_PROXY_HISTORY:
mymenu=[]
if self.isEnabled:
item=JMenuItem("Multi-Browser Highlight (Running): Click to Disable ")
else:
item=JMenuItem("Multi-Browser Highlight (Stopped): Click to Enable ")
item.addActionListener(self)
mymenu.append(item)
return mymenu
else:
return None
def actionPerformed(self, actionEvent):
self.isEnabled= not self.isEnabled
| mit | Python |
28de4cfc42fc45d8b0ad019b8334ba79ab0e14bd | Make it work properly. | Motoko11/MotoBot | desutest.py | desutest.py | from motobot import IRCBot, IRCLevel
import desutest as this
import threading
import traceback
def worker():
this.bot.run()
def main():
this.bot.load_plugins('plugins')
this.bot.load_database('desutest.json')
this.bot.join('#MotoChan')
thread = threading.Thread(target=worker)
thread.start()
running = True
while running:
try:
msg = input()
if msg.startswith(':'):
this.bot.load_plugins('plugins')
else:
this.bot.send(msg)
except KeyboardInterrupt:
running = False
this.bot.disconnect()
except:
traceback.print_exc()
if __name__ == '__main__':
main()
else:
bot = IRCBot('desutest', 'irc.rizon.net', command_prefix='!')
| from motobot import IRCBot, IRCLevel
import desutest as this
import threading
import traceback
def worker():
this.bot.run()
def main():
IRCBot.load_plugins('plugins')
this.bot.load_database('desutest.json')
this.bot.join('#MotoChan')
thread = threading.Thread(target=worker)
thread.start()
running = True
while running:
try:
msg = input()
if msg.startswith(':'):
IRCBot.load_plugins('plugins')
else:
this.bot.send(msg)
except KeyboardInterrupt:
running = False
this.bot.disconnect()
except:
traceback.print_exc()
if __name__ == '__main__':
main()
else:
bot = IRCBot('desutest', 'irc.rizon.net', command_prefix='!')
| mit | Python |
db87621ec04d599da8b62bb3569d83ac365dca3e | use latest Odoo address, instead of (old) stored ones | OCA/l10n-portugal,OCA/l10n-portugal | l10n_pt_account_invoicexpress/models/res_partner.py | l10n_pt_account_invoicexpress/models/res_partner.py | # Copyright (C) 2021 Open Source Integrators
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import models
class ResPartner(models.Model):
_inherit = "res.partner"
def _prepare_invoicexpress_vals(self):
self.ensure_one()
vals = {
"name": self.name,
"email": self.email,
"address": ", ".join(filter(None, [self.street, self.street2])),
"city": self.city,
"postal_code": self.zip,
"country": self.country_id.invoicexpress_name,
"language": self.lang[:2],
"fiscal_id": self.vat,
"website": self.website,
"phone": self.phone,
}
return {k: v for k, v in vals.items() if v}
| # Copyright (C) 2021 Open Source Integrators
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import models
class ResPartner(models.Model):
_inherit = "res.partner"
def _prepare_invoicexpress_vals(self):
self.ensure_one()
vals = {
"name": self.name,
"code": self.vat or "ODOO-{}".format(self.id),
"email": self.email,
"address": ", ".join(filter(None, [self.street, self.street2])),
"city": self.city,
"postal_code": self.zip,
"country": self.country_id.invoicexpress_name,
"language": self.lang[:2],
"fiscal_id": self.vat,
"website": self.website,
"phone": self.phone,
}
return {k: v for k, v in vals.items() if v}
| agpl-3.0 | Python |
140f4d85ce07d8c2fad5918dd3146c9a1678d705 | fix : remove misc comment | bas524/linuxdependency,bas524/linuxdependency | nautilus-python/extensions/linuxdependency.py | nautilus-python/extensions/linuxdependency.py |
import os, os.path
import urllib
from gi.repository import Nautilus, GObject, GConf
QLDD_KEY = '/usr/bin/Qldd'
class OpenQlddExtension(Nautilus.MenuProvider, GObject.GObject):
def __init__(self):
pass
def _open_qldd(self, file):
filename = urllib.unquote(file.get_uri()[7:])
qldd = QLDD_KEY
os.system('%s %s &' % (qldd, filename))
def menu_activate_cb(self, menu, file):
self._open_qldd(file)
def get_file_items(self, window, files):
if len(files) != 1:
return
file = files[0]
if file.is_directory() or file.get_uri_scheme() != 'file':
return
item = Nautilus.MenuItem(name='NautilusPython::openqldd_file_item',
label='View dependecy' ,
tip='View dependecy of %s' % file.get_name())
item.connect('activate', self.menu_activate_cb, file)
return item,
| # This example is contributed by Martin Enlund
import os, os.path
import urllib
from gi.repository import Nautilus, GObject, GConf
QLDD_KEY = '/usr/bin/Qldd'
class OpenQlddExtension(Nautilus.MenuProvider, GObject.GObject):
def __init__(self):
pass
def _open_qldd(self, file):
filename = urllib.unquote(file.get_uri()[7:])
qldd = QLDD_KEY
os.system('%s %s &' % (qldd, filename))
def menu_activate_cb(self, menu, file):
self._open_qldd(file)
def get_file_items(self, window, files):
if len(files) != 1:
return
file = files[0]
if file.is_directory() or file.get_uri_scheme() != 'file':
return
item = Nautilus.MenuItem(name='NautilusPython::openqldd_file_item',
label='View dependecy' ,
tip='View dependecy of %s' % file.get_name())
item.connect('activate', self.menu_activate_cb, file)
return item,
| apache-2.0 | Python |
fe998861aadf2e1964badc90a8881a0329eca439 | Update paramsweep.py with new Python package name | petebachant/UNH-RVAT-turbinesFoam,petebachant/UNH-RVAT-turbinesFoam | paramsweep.py | paramsweep.py | #!/usr/bin/env python
"""
Run multiple simulations varying a single parameter.
"""
import foampy
from foampy.dictionaries import replace_value
import numpy as np
from subprocess import call
import os
import pandas as pd
from py_unh_rvat_turbinesfoam import processing as pr
def zero_tsr_fluc():
"""Set TSR fluctuation amplitude to zero."""
replace_value("system/fvOptions", "tsrAmplitude", 0.0)
def set_tsr(val):
"""Set mean tip speed ratio."""
print("Setting TSR to", val)
replace_value("system/fvOptions", "tipSpeedRatio", val)
def log_perf(param="tsr", append=True):
"""Log performance to file."""
if not os.path.isdir("processed"):
os.mkdir("processed")
fpath = "processed/{}_sweep.csv".format(param)
if append and os.path.isfile(fpath):
df = pd.read_csv(fpath)
else:
df = pd.DataFrame(columns=["tsr", "cp", "cd"])
df = df.append(pr.calc_perf(t1=3.0), ignore_index=True)
df.to_csv(fpath, index=False)
def tsr_sweep(start=0.4, stop=3.4, step=0.5, append=False):
"""Run over multiple TSRs. `stop` will be included."""
if not append and os.path.isfile("processed/tsr_sweep.csv"):
os.remove("processed/tsr_sweep.csv")
tsrs = np.arange(start, stop + 0.5*step, step)
zero_tsr_fluc()
cp = []
cd = []
for tsr in tsrs:
set_tsr(tsr)
if tsr == tsrs[0]:
call("./Allclean")
call("./Allrun")
else:
call("pimpleFoam | tee log.pimpleFoam", shell=True)
os.rename("log.pimpleFoam", "log.pimpleFoam." + str(tsr))
log_perf(append=True)
# Checkout original fvOptions
call(["git", "checkout", "system/fvOptions"])
if __name__ == "__main__":
tsr_sweep(0.4, 3.4, 0.5, append=False)
| #!/usr/bin/env python
"""
Run multiple simulations varying a single parameter.
"""
import foampy
from foampy.dictionaries import replace_value
import numpy as np
from subprocess import call
import os
import pandas as pd
from modules import processing as pr
def zero_tsr_fluc():
"""Set TSR fluctuation amplitude to zero."""
replace_value("system/fvOptions", "tsrAmplitude", 0.0)
def set_tsr(val):
"""Set mean tip speed ratio."""
print("Setting TSR to", val)
replace_value("system/fvOptions", "tipSpeedRatio", val)
def log_perf(param="tsr", append=True):
"""Log performance to file."""
if not os.path.isdir("processed"):
os.mkdir("processed")
fpath = "processed/{}_sweep.csv".format(param)
if append and os.path.isfile(fpath):
df = pd.read_csv(fpath)
else:
df = pd.DataFrame(columns=["tsr", "cp", "cd"])
df = df.append(pr.calc_perf(t1=3.0), ignore_index=True)
df.to_csv(fpath, index=False)
def tsr_sweep(start=0.4, stop=3.4, step=0.5, append=False):
"""Run over multiple TSRs. `stop` will be included."""
if not append and os.path.isfile("processed/tsr_sweep.csv"):
os.remove("processed/tsr_sweep.csv")
tsrs = np.arange(start, stop + 0.5*step, step)
zero_tsr_fluc()
cp = []
cd = []
for tsr in tsrs:
set_tsr(tsr)
if tsr == tsrs[0]:
call("./Allclean")
call("./Allrun")
else:
call("pimpleFoam | tee log.pimpleFoam", shell=True)
os.rename("log.pimpleFoam", "log.pimpleFoam." + str(tsr))
log_perf(append=True)
# Checkout original fvOptions
call(["git", "checkout", "system/fvOptions"])
if __name__ == "__main__":
tsr_sweep(0.4, 3.4, 0.5, append=False)
| mit | Python |
d8f6092b023660540c69ffb233716f98eb98208c | make sure the datadog port is an int | DXCanas/content-curation,jonboiser/content-curation,jonboiser/content-curation,jayoshih/content-curation,aronasorman/content-curation,DXCanas/content-curation,jonboiser/content-curation,fle-internal/content-curation,fle-internal/content-curation,jayoshih/content-curation,jayoshih/content-curation,DXCanas/content-curation,DXCanas/content-curation,aronasorman/content-curation,jonboiser/content-curation,aronasorman/content-curation,jayoshih/content-curation,fle-internal/content-curation,fle-internal/content-curation | contentcuration/contentcuration/production_settings.py | contentcuration/contentcuration/production_settings.py | import os
from .settings import *
SITE_ID = 3
STORAGE_ROOT = "/contentworkshop_content/storage/"
DB_ROOT = "/contentworkshop_content/databases/"
STATIC_ROOT = "/contentworkshop_static/"
MEDIA_ROOT = STORAGE_ROOT
SITE_ID = 3
SESSION_ENGINE = "django.contrib.sessions.backends.db"
if os.getenv("USE_DATADOG"):
INSTALLED_APPS = (
"ddtrace.contrib.django",
) + INSTALLED_APPS
MIDDLEWARE_CLASSES = (
'ddtrace.contrib.django.TraceMiddleware',
) + MIDDLEWARE_CLASSES
DATADOG_TRACE = {
'DEFAULT_SERVICE': 'contentworkshop',
'AGENT_PORT': int(os.getenv("DATADOG_STATSD_PORT") or 8126),
'AGENT_HOSTNAME': os.getenv("DATADOG_STATSD_HOSTNAME"),
'TAGS': {'env': 'production'},
}
DATABASES = {
'default': {
'ENGINE':
'django.db.backends.postgresql_psycopg2',
'NAME': os.getenv("DB_CREDENTIALS_DBNAME"),
'USER': os.getenv("DB_CREDENTIALS_USER"),
'PASSWORD': os.getenv("DB_CREDENTIALS_PASSWORD"),
'HOST': os.getenv("DB_CREDENTIALS_HOST"),
'PORT': int(os.getenv("DB_CREDENTIALS_PORT")),
},
'export_staging': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'export_staging.sqlite3')
}
}
# email settings
EMAIL_BACKEND = "postmark.backends.PostmarkBackend"
POSTMARK_API_KEY = os.getenv("EMAIL_CREDENTIALS_POSTMARK_API_KEY")
| import os
from .settings import *
SITE_ID = 3
STORAGE_ROOT = "/contentworkshop_content/storage/"
DB_ROOT = "/contentworkshop_content/databases/"
STATIC_ROOT = "/contentworkshop_static/"
MEDIA_ROOT = STORAGE_ROOT
SITE_ID = 3
SESSION_ENGINE = "django.contrib.sessions.backends.db"
if os.getenv("USE_DATADOG"):
INSTALLED_APPS = (
"ddtrace.contrib.django",
) + INSTALLED_APPS
MIDDLEWARE_CLASSES = (
'ddtrace.contrib.django.TraceMiddleware',
) + MIDDLEWARE_CLASSES
DATADOG_TRACE = {
'DEFAULT_SERVICE': 'contentworkshop',
'AGENT_PORT': os.getenv("DATADOG_STATSD_PORT"),
'AGENT_HOSTNAME': os.getenv("DATADOG_STATSD_HOSTNAME"),
'TAGS': {'env': 'production'},
}
DATABASES = {
'default': {
'ENGINE':
'django.db.backends.postgresql_psycopg2',
'NAME': os.getenv("DB_CREDENTIALS_DBNAME"),
'USER': os.getenv("DB_CREDENTIALS_USER"),
'PASSWORD': os.getenv("DB_CREDENTIALS_PASSWORD"),
'HOST': os.getenv("DB_CREDENTIALS_HOST"),
'PORT': int(os.getenv("DB_CREDENTIALS_PORT")),
},
'export_staging': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'export_staging.sqlite3')
}
}
# email settings
EMAIL_BACKEND = "postmark.backends.PostmarkBackend"
POSTMARK_API_KEY = os.getenv("EMAIL_CREDENTIALS_POSTMARK_API_KEY")
| mit | Python |
0ec52bf3d0764b110f70e677e2ec9ede8dcbfda1 | increase coverage | netkraken/minion,netkraken/minion | src/unittest/python/netkraken_tests.py | src/unittest/python/netkraken_tests.py | from __future__ import print_function
from datetime import datetime
import unittest
from mock import patch
import netkraken
from testhelper import myAssertDictEqual
class NetKrakenTests(unittest.TestCase):
def setUp(self):
netkraken.settings["stagedir"] = "/stage"
netkraken.settings["finaldir"] = "/final"
def test_get_timestamp(self):
self.assertEquals(("minute", "2042-12-12T12:12"),
netkraken.get_timestamp("2042-12-12T12:12"))
self.assertEquals(("minute", "2042-12-12T12:12"),
netkraken.get_timestamp("///foo///2042-12-12T12:12"))
self.assertRaises(Exception, netkraken.get_timestamp, "no-valid-date")
def test_get_higher_timestamp(self):
self.assertEquals(("hour", "2042-12-12T12"),
netkraken.get_higher_timestamp("///foo///2042-12-12T12:12"))
self.assertEquals(("day", "2042-12-12"),
netkraken.get_higher_timestamp("///foo///2042-12-12T12"))
@patch("netkraken.get_current_datetime")
def test_get_current_timestrings(self, now_mock):
now_mock.return_value = datetime(2042, 12, 12, 12, 12)
myAssertDictEqual({'day': '2042-12-12', 'hour': '2042-12-12T12', 'minute': '2042-12-12T12:12'},
netkraken.get_current_timestrings())
self.assertEqual("/stage/2042-12-12T12:12", netkraken.get_current_stage_filename())
def test_get_final_filename_with_full_path(self):
self.assertEqual("/final/aha", netkraken.get_final_filename("/stage/aha"))
def test_get_higher_timestamp_failed(self):
self.assertEqual((None, None), netkraken.get_higher_timestamp("2042-12-13"))
if __name__ == "__main__":
unittest.main()
| from __future__ import print_function
from datetime import datetime
import unittest
from mock import patch
import netkraken
from testhelper import myAssertDictEqual
class NetKrakenTests(unittest.TestCase):
def test_get_timestamp(self):
self.assertEquals(("minute", "2042-12-12T12:12"),
netkraken.get_timestamp("2042-12-12T12:12"))
self.assertEquals(("minute", "2042-12-12T12:12"),
netkraken.get_timestamp("///foo///2042-12-12T12:12"))
self.assertRaises(Exception, netkraken.get_timestamp, "no-valid-date")
def test_get_higher_timestamp(self):
self.assertEquals(("hour", "2042-12-12T12"),
netkraken.get_higher_timestamp("///foo///2042-12-12T12:12"))
self.assertEquals(("day", "2042-12-12"),
netkraken.get_higher_timestamp("///foo///2042-12-12T12"))
@patch("netkraken.get_current_datetime")
def test_get_current_timestrings(self, now_mock):
now_mock.return_value = datetime(2042, 12, 12, 12, 12)
myAssertDictEqual({'day': '2042-12-12', 'hour': '2042-12-12T12', 'minute': '2042-12-12T12:12'},
netkraken.get_current_timestrings())
if __name__ == "__main__":
unittest.main()
| apache-2.0 | Python |
00c1cf47fd012b1cbd965a43347615184e404dbe | Fix error in data/geo/airports/download.py (#719) | mbudiu-vmw/hiero,mbudiu-vmw/hiero,mbudiu-vmw/hiero,mbudiu-vmw/hiero,mbudiu-vmw/hiero | data/geo/airports/download.py | data/geo/airports/download.py | #!/usr/bin/env python3
# Copyright (c) 2020 VMware Inc. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Downloads the US states map
# pylint: disable=invalid-name,missing-docstring
import os
import subprocess
def execute_command(command):
"""Executes the specified command using a shell"""
print(command)
exitcode = subprocess.call(command, shell=True)
if exitcode != 0:
print("Exit code returned:", exitcode)
exit(exitcode)
def main():
site = "https://mapcruzin.com/fcc-wireless-shapefiles/"
file = "airports.zip"
execute_command("wget -q " + site + file)
execute_command("unzip -o " + file)
os.unlink(file)
if __name__ == "__main__":
main()
| #!/usr/bin/env python3
# Copyright (c) 2020 VMware Inc. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Downloads the US states map
# pylint: disable=invalid-name,missing-docstring
import os
import subprocess
def execute_command(command):
"""Executes the specified command using a shell"""
print(command)
exitcode = subprocess.call(command, shell=True)
if exitcode != 0:
print("Exit code returned:", exitcode)
exit(exitcode)
def main():
site = "https://mapcruzin.com/fcc-wireless-shapefiles/airports.zip"
file = "airports.zip"
execute_command("wget -q " + site + file)
execute_command("unzip -o " + file)
os.unlink(file)
if __name__ == "__main__":
main()
| apache-2.0 | Python |
0826548cdc2c75db0ddda128124ec443a1a1b29f | set invalid positions to -1 | nathbo/GO_DILab,nathbo/GO_DILab | src/learn/dev_yu/MovePredictionBot.py | src/learn/dev_yu/MovePredictionBot.py | import os
from os.path import dirname, abspath
import numpy as np
import math
from src import Utils
from src.play.model.Board import EMPTY, BLACK, WHITE
from src.play.model.Move import Move
BLACK_VAL = -1.35
WHITE_VAL = 1.25
EMPTY_VAL = 0.25
class MovePredictionBot:
def __init__(self):
project_dir = dirname(dirname(dirname(dirname(abspath(__file__)))))
Utils.set_keras_backend('theano')
import keras
model_path = os.path.join(project_dir, 'src/learn/dev_yu/model.h5')
self.model = keras.models.load_model(model_path)
@staticmethod
def replace_entry(entry):
if entry is EMPTY:
return EMPTY_VAL
if entry is BLACK:
return BLACK_VAL
if entry is WHITE:
return WHITE_VAL
def flatten_matrix(self, m, color):
ls = m.tolist()
if color == BLACK:
ls = [self.replace_entry(entry) for row in ls for entry in row]
ls.append(BLACK_VAL)
else:
ls = [self.replace_entry(entry) for row in ls for entry in row]
ls.append(WHITE_VAL)
return ls
def genmove(self, color, game) -> Move:
input_board = self.flatten_matrix(game.board, color)
pred = self.model.predict(np.array([input_board]).reshape(1,-1))
max_idx = np.argmax(pred)
if max_idx is 81:
return Move(is_pass=True)
else:
board = pred[0][0:81]
# set all invalid locations to 0 to avoid them being chosen
for move in game.get_invalid_locations(color):
flat_idx = move.to_flat_idx(game.size)
board[flat_idx] = -1
max_idx = np.argmax(board)
row = int(math.floor(max_idx / game.size))
col = int(max_idx % game.size)
return Move(col=col, row=row)
| import os
from os.path import dirname, abspath
import numpy as np
import math
from src import Utils
from src.play.model.Board import EMPTY, BLACK, WHITE
from src.play.model.Move import Move
BLACK_VAL = -1.35
WHITE_VAL = 1.25
EMPTY_VAL = 0.25
class MovePredictionBot:
def __init__(self):
project_dir = dirname(dirname(dirname(dirname(abspath(__file__)))))
Utils.set_keras_backend('theano')
import keras
model_path = os.path.join(project_dir, 'src/learn/dev_yu/model.h5')
self.model = keras.models.load_model(model_path)
@staticmethod
def replace_entry(entry):
if entry is EMPTY:
return EMPTY_VAL
if entry is BLACK:
return BLACK_VAL
if entry is WHITE:
return WHITE_VAL
def flatten_matrix(self, m, color):
ls = m.tolist()
if color == BLACK:
ls = [self.replace_entry(entry) for row in ls for entry in row]
ls.append(BLACK_VAL)
else:
ls = [self.replace_entry(entry) for row in ls for entry in row]
ls.append(WHITE_VAL)
return ls
def genmove(self, color, game) -> Move:
input_board = self.flatten_matrix(game.board, color)
pred = self.model.predict(np.array([input_board]).reshape(1,-1))
max_idx = np.argmax(pred)
if max_idx is 81:
return Move(is_pass=True)
else:
board = pred[0][0:81]
# set all invalid locations to 0 to avoid them being chosen
for move in game.get_invalid_locations(color):
flat_idx = move.to_flat_idx(game.size)
board[flat_idx] = 0
max_idx = np.argmax(board)
row = int(math.floor(max_idx / game.size))
col = int(max_idx % game.size)
return Move(col=col, row=row)
| mit | Python |
7cbcee5dd6488316789fbad6f8d93fc929d560b7 | Update users/admin.py | moodpulse/l2,moodpulse/l2,moodpulse/l2,moodpulse/l2,moodpulse/l2 | users/admin.py | users/admin.py | from django.contrib import admin
from .models import DoctorProfile, AssignmentTemplates, AssignmentResearches, Speciality, Position, AvailableResearchByGroup, DistrictResearchLimitAssign, GroupHideMainMenuButtons
class DocAdmin(admin.ModelAdmin):
list_filter = (
'podrazdeleniye',
'specialities',
'user__is_staff',
)
list_display = (
'fio',
'podrazdeleniye',
)
list_display_links = ('fio',)
search_fields = ('fio',)
filter_horizontal = ('white_list_monitoring', 'black_list_monitoring', 'disabled_fin_source')
class ResDistrictResearchLimitAssign(admin.ModelAdmin):
list_display = ('district_group', 'research', 'type_period_limit', 'limit_count')
list_display_links = ('district_group', 'research', 'type_period_limit', 'limit_count')
class ResGroupHideMainMenuButtons(admin.ModelAdmin):
list_display = (
'title_buttons',
'group',
)
list_display_links = (
'title_buttons',
'group',
)
admin.site.register(DoctorProfile, DocAdmin) # Активация редактирования профилей врачей в админке
admin.site.register(AssignmentTemplates)
admin.site.register(AssignmentResearches)
admin.site.register(Speciality)
admin.site.register(Position)
admin.site.register(AvailableResearchByGroup)
admin.site.register(DistrictResearchLimitAssign, ResDistrictResearchLimitAssign)
admin.site.register(GroupHideMainMenuButtons, ResGroupHideMainMenuButtons)
| from django.contrib import admin
from .models import DoctorProfile, AssignmentTemplates, AssignmentResearches, Speciality, Position, AvailableResearchByGroup, DistrictResearchLimitAssign, GroupHideMainMenuButtons
class DocAdmin(admin.ModelAdmin):
list_filter = (
'podrazdeleniye',
'specialities',
'user__is_staff',
)
list_display = (
'fio',
'podrazdeleniye',
)
list_display_links = ('fio',)
search_fields = ('fio',)
filter_horizontal = ('white_list_monitoring', 'black_list_monitoring', 'disabled_fin_source')
class ResDistrictResearchLimitAssign(admin.ModelAdmin):
list_display = ('district_group', 'research', 'type_period_limit', 'limit_count')
list_display_links = ('district_group', 'research', 'type_period_limit', 'limit_count')
class ResGroupHideMainMenuButtons(admin.ModelAdmin):
list_display = ('title_buttons', 'group',)
list_display_links = ('title_buttons', 'group',)
admin.site.register(DoctorProfile, DocAdmin) # Активация редактирования профилей врачей в админке
admin.site.register(AssignmentTemplates)
admin.site.register(AssignmentResearches)
admin.site.register(Speciality)
admin.site.register(Position)
admin.site.register(AvailableResearchByGroup)
admin.site.register(DistrictResearchLimitAssign, ResDistrictResearchLimitAssign)
admin.site.register(GroupHideMainMenuButtons, ResGroupHideMainMenuButtons)
| mit | Python |
06384c5e55498945d5d18cb72ae56fbd63413cd5 | Add GCI programs to program map | rhyolight/nupic.son,rhyolight/nupic.son,rhyolight/nupic.son | app/soc/modules/gci/callback.py | app/soc/modules/gci/callback.py | # Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the GCI Callback.
"""
__authors__ = [
'"Madhusudan C.S." <madhusudancs@gmail.com>',
'"Daniel Hans" <dhans@google.com>',
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from soc.modules.gci.tasks.updates import role_conversion
class Callback(object):
"""Callback object that handles interaction between the core.
"""
API_VERSION = 1
def __init__(self, core):
"""Initializes a new Callback object for the specified core.
"""
self.core = core
self.views = []
def registerViews(self):
"""Instantiates all view objects.
"""
from soc.modules.gci.views import bulk_create
from soc.modules.gci.views import dashboard
from soc.modules.gci.views import homepage
from soc.modules.gci.views import org_app
from soc.modules.gci.views import task
self.views.append(bulk_create.BulkCreate())
self.views.append(dashboard.Dashboard())
self.views.append(homepage.Homepage())
self.views.append(org_app.GCIOrgAppEditPage())
self.views.append(org_app.GCIOrgAppPreviewPage())
self.views.append(org_app.GCIOrgAppShowPage())
self.views.append(org_app.GCIOrgAppTakePage())
self.views.append(task.TaskViewPage())
# Google Appengine Tasks
from soc.modules.gci.tasks.bulk_create import BulkCreateTask
self.views.append(BulkCreateTask())
def registerWithSitemap(self):
"""Called by the server when sitemap entries should be registered.
"""
self.core.requireUniqueService('registerWithSitemap')
# Redesigned view registration
for view in self.views:
self.core.registerSitemapEntry(view.djangoURLPatterns())
self.core.registerSitemapEntry(role_conversion.getDjangoURLPatterns())
def registerWithProgramMap(self):
"""Called by the server when program_map entries should be registered.
"""
self.core.requireUniqueService('registerWithProgramMap')
from soc.modules.gci.models.program import GCIProgram
program_entities = GCIProgram.all().fetch(1000)
map = ('GCI Programs', [
(str(e.key()), e.name) for e in program_entities])
self.core.registerProgramEntry(map)
| # Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the GCI Callback.
"""
__authors__ = [
'"Madhusudan C.S." <madhusudancs@gmail.com>',
'"Daniel Hans" <dhans@google.com>',
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from soc.modules.gci.tasks.updates import role_conversion
class Callback(object):
"""Callback object that handles interaction between the core.
"""
API_VERSION = 1
def __init__(self, core):
"""Initializes a new Callback object for the specified core.
"""
self.core = core
self.views = []
def registerViews(self):
"""Instantiates all view objects.
"""
from soc.modules.gci.views import bulk_create
from soc.modules.gci.views import dashboard
from soc.modules.gci.views import homepage
from soc.modules.gci.views import org_app
from soc.modules.gci.views import task
self.views.append(bulk_create.BulkCreate())
self.views.append(dashboard.Dashboard())
self.views.append(homepage.Homepage())
self.views.append(org_app.GCIOrgAppEditPage())
self.views.append(org_app.GCIOrgAppPreviewPage())
self.views.append(org_app.GCIOrgAppShowPage())
self.views.append(org_app.GCIOrgAppTakePage())
self.views.append(task.TaskViewPage())
# Google Appengine Tasks
from soc.modules.gci.tasks.bulk_create import BulkCreateTask
self.views.append(BulkCreateTask())
def registerWithSitemap(self):
"""Called by the server when sitemap entries should be registered.
"""
self.core.requireUniqueService('registerWithSitemap')
# Redesigned view registration
for view in self.views:
self.core.registerSitemapEntry(view.djangoURLPatterns())
self.core.registerSitemapEntry(role_conversion.getDjangoURLPatterns())
| apache-2.0 | Python |
7d114d01a7721f93e8b8adbe9a61ac379dd0f393 | Move variables outside for loop | open-synergy/social,open-synergy/social,acsone/social,acsone/social,open-synergy/social,acsone/social | mail_template_multi_report/models/email_template.py | mail_template_multi_report/models/email_template.py | # -*- coding: utf-8 -*-
# © 2016 Savoir-faire Linux
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import base64
from openerp import api, fields, models
from openerp.tools.safe_eval import safe_eval
class EmailTemplate(models.Model):
_inherit = 'email.template'
report_line_ids = fields.One2many(
'email.template.report.line', 'template_id', string='Other Reports')
@api.model
def generate_email_batch(self, template_id, res_ids, fields=None):
results = super(EmailTemplate, self).generate_email_batch(
template_id, res_ids, fields=fields)
template = self.browse(template_id)
report_ext = '.pdf'
for report_line in template.report_line_ids:
records = self.env[template.model_id.model].browse(res_ids)
for rec in records:
condition = report_line.condition
if condition and condition.strip():
condition_result = self.render_template(
condition, template.model, rec.id)
if not condition_result or not safe_eval(condition_result):
continue
report_name = self.render_template(
report_line.report_name, template.model, rec.id)
report = report_line.report_template_id
report_service = report.report_name
result = self.env['report'].get_pdf(rec, report_service)
result = base64.b64encode(result)
if not report_name:
report_name = 'report.' + report_service
if not report_name.endswith(report_ext):
report_name += report_ext
results[rec.id].setdefault('attachments', [])
results[rec.id]['attachments'].append((report_name, result))
return results
| # -*- coding: utf-8 -*-
# © 2016 Savoir-faire Linux
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import base64
from openerp import api, fields, models
from openerp.tools.safe_eval import safe_eval
class EmailTemplate(models.Model):
_inherit = 'email.template'
report_line_ids = fields.One2many(
'email.template.report.line', 'template_id', string='Other Reports')
@api.model
def generate_email_batch(self, template_id, res_ids, fields=None):
results = super(EmailTemplate, self).generate_email_batch(
template_id, res_ids, fields=fields)
template = self.browse(template_id)
for report_line in template.report_line_ids:
records = self.env[template.model_id.model].browse(res_ids)
for rec in records:
condition = report_line.condition
if condition and condition.strip():
condition_result = self.render_template(
condition, template.model, rec.id)
if not condition_result or not safe_eval(condition_result):
continue
report_name = self.render_template(
report_line.report_name, template.model, rec.id)
report = report_line.report_template_id
report_service = report.report_name
result = self.env['report'].get_pdf(rec, report_service)
report_format = 'pdf'
result = base64.b64encode(result)
if not report_name:
report_name = 'report.' + report_service
ext = "." + report_format
if not report_name.endswith(ext):
report_name += ext
results[rec.id].setdefault('attachments', [])
results[rec.id]['attachments'].append((report_name, result))
return results
| agpl-3.0 | Python |
e87ee0f670731a203d34e53d5befa30b374d06b9 | Remove whitespace | Gjum/SpockBot,luken/SpockBot,SpockBotMC/SpockBot,gamingrobot/SpockBot,nickelpro/SpockBot | spock/plugins/helpers/start.py | spock/plugins/helpers/start.py | """
This plugin creates a convenient start() method and attaches it directly
to the client. More complex bots will likely want to create their own
initialization plugin, so StartPlugin stays out of the way unless you
call the start() method. However, the start() method is very convenient
for demos and tutorials, and illustrates the basic steps for initializing
a bot.
"""
from spock.mcp import mcdata
from spock.plugins.base import PluginBase
class StartPlugin(PluginBase):
requires = ('Event', 'Net', 'Auth')
events = {
'event_start': 'start_session',
'connect': 'handshake_and_login_start',
}
defaults = {
'username': 'Bot',
'password': None,
'host': 'localhost',
'port': 25565,
}
def __init__(self, ploader, settings):
super(StartPlugin, self).__init__(ploader, settings)
setattr(ploader, 'start', self.start)
def start(self, host=None, port=None):
self.host = host if host else self.settings['host']
self.port = port if port else self.settings['port']
self.event.event_loop()
def start_session(self, _, __):
if 'error' not in self.auth.start_session(
self.settings['username'],
self.settings['password']
):
self.net.connect(self.host, self.port)
def handshake_and_login_start(self, _, __):
self.net.push_packet('HANDSHAKE>Handshake', {
'protocol_version': mcdata.MC_PROTOCOL_VERSION,
'host': self.net.host,
'port': self.net.port,
'next_state': mcdata.LOGIN_STATE
})
self.net.push_packet('LOGIN>Login Start', {'name': self.auth.username})
| """
This plugin creates a convenient start() method and attaches it directly
to the client. More complex bots will likely want to create their own
initialization plugin, so StartPlugin stays out of the way unless you
call the start() method. However, the start() method is very convenient
for demos and tutorials, and illustrates the basic steps for initializing
a bot.
"""
from spock.mcp import mcdata
from spock.plugins.base import PluginBase
class StartPlugin(PluginBase):
requires = ('Event', 'Net', 'Auth')
events = {
'event_start': 'start_session',
'connect': 'handshake_and_login_start',
}
defaults = {
'username': 'Bot',
'password': None,
'host': 'localhost',
'port': 25565,
}
def __init__(self, ploader, settings):
super(StartPlugin, self).__init__(ploader, settings)
setattr(ploader, 'start', self.start)
def start(self, host=None, port=None):
self.host = host if host else self.settings['host']
self.port = port if port else self.settings['port']
self.event.event_loop()
def start_session(self, _, __):
if 'error' not in self.auth.start_session(
self.settings['username'],
self.settings['password']
):
self.net.connect(self.host, self.port)
def handshake_and_login_start(self, _, __):
self.net.push_packet('HANDSHAKE>Handshake', {
'protocol_version': mcdata.MC_PROTOCOL_VERSION,
'host': self.net.host,
'port': self.net.port,
'next_state': mcdata.LOGIN_STATE
})
self.net.push_packet('LOGIN>Login Start', {'name': self.auth.username})
| mit | Python |
528edba420089249bd58c0621e06225db84e223f | Add missing translation on logging contrib app | jeanmask/opps,jeanmask/opps,williamroot/opps,YACOWS/opps,jeanmask/opps,williamroot/opps,jeanmask/opps,YACOWS/opps,williamroot/opps,opps/opps,YACOWS/opps,opps/opps,opps/opps,williamroot/opps,YACOWS/opps,opps/opps | opps/contrib/logging/models.py | opps/contrib/logging/models.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from opps.core.models import NotUserPublishable
class Logging(NotUserPublishable):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True, blank=True,
verbose_name=_(u'User')
)
application = models.CharField(
_(u"Application"),
max_length=75,
null=True, blank=True,
db_index=True)
action = models.CharField(
_(u"Action"),
max_length=50,
null=True, blank=True,
db_index=True)
text = models.TextField(
_(u"Text"),
null=True, blank=True,
db_index=True)
def save(self, *args, **kwargs):
self.published = True
super(Logging, self).save(*args, **kwargs)
class Meta:
verbose_name = _(u'Logging')
verbose_name_plural = _(u'Loggings')
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from opps.core.models import NotUserPublishable
class Logging(NotUserPublishable):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True, blank=True,
)
application = models.CharField(
_(u"Application"),
max_length=75,
null=True, blank=True,
db_index=True)
action = models.CharField(
_(u"Action"),
max_length=50,
null=True, blank=True,
db_index=True)
text = models.TextField(
_(u"Text"),
null=True, blank=True,
db_index=True)
def save(self, *args, **kwargs):
self.published = True
super(Logging, self).save(*args, **kwargs)
| mit | Python |
229fbef2849965bd75005e418a491971feb803ca | support default loc scale for logpdf too | pyro-ppl/numpyro | numpyro/distributions/distribution.py | numpyro/distributions/distribution.py | import scipy.stats as sp
from jax import lax
from jax.numpy.lax_numpy import _promote_args
import jax.numpy as np
from numpy.random import mtrand
class jax_continuous(sp.rv_continuous):
def rvs(self, *args, **kwargs):
rng = kwargs.pop('random_state')
if rng is None:
rng = self.random_state
# assert that rng is PRNGKey and not mtrand.RandomState object from numpy.
assert not isinstance(rng, mtrand.RandomState)
size = kwargs.get('size', None)
args = list(args)
scale = kwargs.get('scale', args.pop() if len(args) > 0 else 1)
loc = kwargs.get('loc', args.pop() if len(args) > 0 else 0)
loc, scale, *args = _promote_args("rvs", loc, scale, *args)
if not size:
shapes = [np.shape(arg) for arg in args] + [np.shape(loc), np.shape(scale)]
size = lax.broadcast_shapes(*shapes)
else:
args = [np.reshape(arg, size) for arg in args]
self._random_state = rng
self._size = size
vals = self._rvs(*args)
return vals * scale + loc
def logpdf(self, x, *args, **kwargs):
args = list(args)
scale = kwargs.get('scale', args.pop() if len(args) > 0 else 1)
loc = kwargs.get('loc', args.pop() if len(args) > 0 else 0)
loc, scale, *args = _promote_args(self.logpdf, loc, scale, *args)
x = (x - loc) / scale
return self._logpdf(x) - np.log(scale)
| import scipy.stats as sp
from jax import lax
from jax.numpy.lax_numpy import _promote_args
import jax.numpy as np
from numpy.random import mtrand
class jax_continuous(sp.rv_continuous):
def rvs(self, *args, **kwargs):
rng = kwargs.pop('random_state')
if rng is None:
rng = self.random_state
# assert that rng is PRNGKey and not mtrand.RandomState object from numpy.
assert not isinstance(rng, mtrand.RandomState)
size = kwargs.get('size', None)
args = list(args)
scale = kwargs.get('scale', args.pop() if len(args) > 0 else 1)
loc = kwargs.get('loc', args.pop() if len(args) > 0 else 0)
loc, scale, *args = _promote_args("rvs", loc, scale, *args)
if not size:
shapes = [np.shape(arg) for arg in args] + [np.shape(loc), np.shape(scale)]
size = lax.broadcast_shapes(*shapes)
else:
args = [np.reshape(arg, size) for arg in args]
self._random_state = rng
self._size = size
vals = self._rvs(*args)
return vals * scale + loc
def logpdf(self, x, *args, **kwargs):
args = list(args)
scale = kwargs.get('scale', args.pop())
loc = kwargs.get('loc', args.pop())
loc, scale, *args = _promote_args(self.logpdf, loc, scale, *args)
x = (x - loc) / scale
return self._logpdf(x) - np.log(scale)
| apache-2.0 | Python |
411decbdb193b28bb3060e02e81bfa29483e85a9 | Remove debug code from staticgen views. | mishbahr/staticgen-demo,mishbahr/staticgen-demo,mishbahr/staticgen-demo | staticgen_demo/blog/staticgen_views.py | staticgen_demo/blog/staticgen_views.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from staticgen.staticgen_pool import staticgen_pool
from staticgen.staticgen_views import StaticgenView
from .models import Post
class BlogPostListView(StaticgenView):
is_paginated = True
i18n = True
def items(self):
return ('blog:posts_list', )
class BlogPostDetailView(StaticgenView):
i18n = True
def items(self):
return Post.objects.all()
staticgen_pool.register(BlogPostListView)
staticgen_pool.register(BlogPostDetailView)
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from staticgen.staticgen_pool import staticgen_pool
from staticgen.staticgen_views import StaticgenView
from .models import Post
class BlogPostListView(StaticgenView):
is_paginated = True
i18n = True
def items(self):
return ('blog:posts_list', )
def _get_paginator(self, url):
response = self.client.get(url)
print 'status_code: %s' % response.status_code
if not response.status_code == 200:
pass
else:
context = {}
if hasattr(response, 'context_data'):
context = response.context_data
elif hasattr(response, 'context'):
context = response.context
print context
try:
return context['paginator'], context['is_paginated']
except KeyError:
pass
return None, False
class BlogPostDetailView(StaticgenView):
i18n = True
def items(self):
return Post.objects.all()
staticgen_pool.register(BlogPostListView)
staticgen_pool.register(BlogPostDetailView)
| bsd-3-clause | Python |
2ba5b77b31cec9ccbaca262ce44df9ba175f4485 | Refactor ElectronicAttenuator to use @property | DavidLutton/EngineeringProject | engineering_project/Instrument/ElectronicAttenuator.py | engineering_project/Instrument/ElectronicAttenuator.py | #!/usr/bin/env python3
import time
import logging
# from scipy.interpolate import UnivariateSpline
# import numpy as np
from Instrument.GenericInstrument import GenericInstrument as GenericInstrument
class ElectronicAttenuator(GenericInstrument):
def __init__(self, instrument):
super().__init__(instrument)
def __repr__(self):
return("{}, {}".format(__class__, self.instrument))
class Marconi2187(ElectronicAttenuator):
def __init__(self, instrument, logger=None):
super().__init__(instrument)
# self.log = logging.getLogger(__name__)
self.log.info('Creating {} for {}'.format(str(__class__.__name__), self.instrument))
# self.log.info('Creating an instance of\t' + str(__class__))
assert self.IDN.startswith("MARCONI INSTRUMENTS,2187,")
def __repr__(self):
return("{}, {}".format(__class__, self.instrument))
@property
def attenuation(self):
return(float(self.query("ATTN?")))
@attenuation.setter
def attenuation(self, frequency):
self.write("ATTN {0:.1f}DB".format(attenuation))
| #!/usr/bin/env python3
import time
import logging
# from scipy.interpolate import UnivariateSpline
# import numpy as np
from Instrument.GenericInstrument import GenericInstrument as GenericInstrument
class ElectronicAttenuator(GenericInstrument):
def __init__(self, instrument):
super().__init__(instrument)
def __repr__(self):
return("{}, {}".format(__class__, self.instrument))
class Marconi2187(ElectronicAttenuator):
def __init__(self, instrument, logger=None):
super().__init__(instrument)
# self.log = logging.getLogger(__name__)
self.log.info('Creating {} for {}'.format(str(__class__.__name__), self.instrument))
# self.log.info('Creating an instance of\t' + str(__class__))
assert self.IDN.startswith("MARCONI INSTRUMENTS,2187,")
self.attenuation = float(self.query("ATTN?"))
def __repr__(self):
return("{}, {}".format(__class__, self.instrument))
def set(self, attenuation):
if attenuation != self.attenuation:
self.write("ATTN {0:.1f}DB".format(attenuation))
self.attenuation = attenuation
| mit | Python |
45df0a7370456b285f7116cc3d1bc05d683f31d1 | Update ElectronicAttenuator.MarconiInstruments2187 with preset value | DavidLutton/EngineeringProject | engineering_project/Instrument/ElectronicAttenuator.py | engineering_project/Instrument/ElectronicAttenuator.py | #!/usr/bin/env python3
"""."""
# import time
# import logging
# from scipy.interpolate import UnivariateSpline
# import numpy as np
try:
from Instrument.GenericInstrument import GenericInstrument
from Instrument.IEEE488 import IEEE488
from Instrument.SCPI import SCPI
except ImportError:
from GenericInstrument import GenericInstrument
from IEEE488 import IEEE488
from SCPI import SCPI
class ElectronicAttenuator(GenericInstrument):
"""Parent class for ElectronicAttenuators."""
def __init__(self, instrument):
"""."""
super().__init__(instrument)
def __repr__(self):
"""."""
return"{}, {}".format(__class__, self.instrument)
class MarconiInstruments2187(ElectronicAttenuator, IEEE488):
"""Marconi 2187 - DC-20GHz 1W max N-type.
.. figure:: images/ElectronicAttenuator/MarconiInstruments2187.jpg
"""
units = {
'dB': 'DB',
}
def __init__(self, instrument):
"""."""
super().__init__(instrument)
# self.log = logging.getLogger(__name__)
self.log.info('Creating {} for {}'.format(str(__class__.__name__), self.instrument))
# self.log.info('Creating an instance of\t' + str(__class__))
assert self.IDN.startswith("MARCONI INSTRUMENTS,2187,")
def __repr__(self):
"""."""
return "{}, {}".format(__class__, self.instrument)
@property
def attenuation(self):
"""Attenuation of instrument."""
return float(self.query("ATTN?"))
# @validsteps(3,4,5,6)
@attenuation.setter
def attenuation(self, attenuation):
self.write("ATTN {0:.1f}{1}".format(attenuation, self.units['dB']))
def preset(self):
"""."""
self.attenuation = 144
REGISTER = {
"MARCONI INSTRUMENTS,2187,": MarconiInstruments2187,
}
| #!/usr/bin/env python3
"""."""
# import time
# import logging
# from scipy.interpolate import UnivariateSpline
# import numpy as np
try:
from Instrument.GenericInstrument import GenericInstrument
from Instrument.IEEE488 import IEEE488
from Instrument.SCPI import SCPI
except ImportError:
from GenericInstrument import GenericInstrument
from IEEE488 import IEEE488
from SCPI import SCPI
class ElectronicAttenuator(GenericInstrument):
"""Parent class for ElectronicAttenuators."""
def __init__(self, instrument):
"""."""
super().__init__(instrument)
def __repr__(self):
"""."""
return"{}, {}".format(__class__, self.instrument)
class MarconiInstruments2187(ElectronicAttenuator, IEEE488):
"""Marconi 2187 - DC-20GHz 1W max N-type.
.. figure:: images/ElectronicAttenuator/MarconiInstruments2187.jpg
"""
units = {
'dB': 'DB',
}
def __init__(self, instrument):
"""."""
super().__init__(instrument)
# self.log = logging.getLogger(__name__)
self.log.info('Creating {} for {}'.format(str(__class__.__name__), self.instrument))
# self.log.info('Creating an instance of\t' + str(__class__))
assert self.IDN.startswith("MARCONI INSTRUMENTS,2187,")
def __repr__(self):
"""."""
return "{}, {}".format(__class__, self.instrument)
@property
def attenuation(self):
"""Attenuation of instrument."""
return float(self.query("ATTN?"))
@attenuation.setter
# @validsteps(3,4,5,6)
def attenuation(self, attenuation):
self.write("ATTN {0:.1f}{1}".format(attenuation, self.units['dB']))
REGISTER = {
"MARCONI INSTRUMENTS,2187,": MarconiInstruments2187,
}
| mit | Python |
15be37bfad04ff6ed3514c24e323059b04ccc92e | Add shallow copy comment | bowen0701/algorithms_data_structures | lc0040_combination_sum_ii.py | lc0040_combination_sum_ii.py | """Leetcode 40. Combination Sum II
Medium
URL: https://leetcode.com/problems/combination-sum-ii/
Given a collection of candidate numbers (candidates) and a target number (target),
find all unique combinations in candidates where the candidate numbers sums to target.
Each number in candidates may only be used once in the combination.
Note:
- All numbers (including target) will be positive integers.
- The solution set must not contain duplicate combinations.
Example 1:
Input: candidates = [10,1,2,7,6,1,5], target = 8,
A solution set is:
[
[1, 7],
[1, 2, 5],
[2, 6],
[1, 1, 6]
]
Example 2:
Input: candidates = [2,5,2,1,2], target = 5,
A solution set is:
[
[1,2,2],
[5]
]
"""
class SolutionBacktrack(object):
def _backtrack(self, result, temp, start, target, candidates):
if target < 0:
return None
if target == 0:
# Use shallow copy.
result.append(temp[:])
return None
for i in range(start, len(candidates)):
if i == start or candidates[i] != candidates[i - 1]:
temp.append(candidates[i])
self._backtrack(result, temp, i + 1, target - candidates[i], candidates)
temp.pop()
def combinationSum2(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
Apply backtracking with sorting to avoid duplicates.
Time complexity: O(2^n).
Space complexity: O(k).
"""
# Sort candidates to avoid duplicates.
candidates.sort()
result = []
temp = []
start = 0
self._backtrack(result, temp, start, target, candidates)
return result
def main():
# Output: [[1, 7],[1, 2, 5],[2, 6],[1, 1, 6]]
# candidates = [10,1,2,7,6,1,5]
# target = 8
# print SolutionBacktrack().combinationSum2(candidates, target)
# Output: [[1, 2, 2],[5]]
candidates = [2,5,2,1,2]
target = 5
print SolutionBacktrack().combinationSum2(candidates, target)
if __name__ == '__main__':
main()
| """Leetcode 40. Combination Sum II
Medium
URL: https://leetcode.com/problems/combination-sum-ii/
Given a collection of candidate numbers (candidates) and a target number (target),
find all unique combinations in candidates where the candidate numbers sums to target.
Each number in candidates may only be used once in the combination.
Note:
- All numbers (including target) will be positive integers.
- The solution set must not contain duplicate combinations.
Example 1:
Input: candidates = [10,1,2,7,6,1,5], target = 8,
A solution set is:
[
[1, 7],
[1, 2, 5],
[2, 6],
[1, 1, 6]
]
Example 2:
Input: candidates = [2,5,2,1,2], target = 5,
A solution set is:
[
[1,2,2],
[5]
]
"""
class SolutionBacktrack(object):
def _backtrack(self, result, temp, start, target, candidates):
if target < 0:
return None
if target == 0:
result.append(temp[:])
return None
for i in range(start, len(candidates)):
if i == start or candidates[i] != candidates[i - 1]:
temp.append(candidates[i])
self._backtrack(result, temp, i + 1, target - candidates[i], candidates)
temp.pop()
def combinationSum2(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
Apply backtracking with sorting to avoid duplicates.
Time complexity: O(2^n).
Space complexity: O(k).
"""
# Sort candidates to avoid duplicates.
candidates.sort()
result = []
temp = []
start = 0
self._backtrack(result, temp, start, target, candidates)
return result
def main():
# Output: [[1, 7],[1, 2, 5],[2, 6],[1, 1, 6]]
# candidates = [10,1,2,7,6,1,5]
# target = 8
# print SolutionBacktrack().combinationSum2(candidates, target)
# Output: [[1, 2, 2],[5]]
candidates = [2,5,2,1,2]
target = 5
print SolutionBacktrack().combinationSum2(candidates, target)
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
2553f9863a6539e320cb4bbc87cff4e8aef8d9dc | Complete DFS recur udpate sol | bowen0701/algorithms_data_structures | lc0695_max_area_of_island.py | lc0695_max_area_of_island.py | """Leetcode 695. Max Area of Island
Medium
URL: https://leetcode.com/problems/max-area-of-island/
Given a non-empty 2D array grid of 0's and 1's, an island is a group of 1's
(representing land) connected 4-directionally (horizontal or vertical.)
You may assume all four edges of the grid are surrounded by water.
Find the maximum area of an island in the given 2D array.
(If there is no island, the maximum area is 0.)
Example 1:
[[0,0,1,0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,1,1,0,1,0,0,0,0,0,0,0,0],
[0,1,0,0,1,1,0,0,1,0,1,0,0],
[0,1,0,0,1,1,0,0,1,1,1,0,0],
[0,0,0,0,0,0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,0,0,0,0,0,0,1,1,0,0,0,0]]
Given the above grid, return 6. Note the answer is not 11,
because the island must be connected 4-directionally.
Example 2:
[[0,0,0,0,0,0,0,0]]
Given the above grid, return 0.
Note: The length of each dimension in the given grid does not exceed 50.
"""
class SolutionDFSRecurUpdate(object):
def _dfs(self, r, c, grid):
# Check if visit out of boundary.
if r < 0 or r >= len(grid) or c < 0 or c >= len(grid[0]):
return 0
# Check if the cell is 0 or visited.
if grid[r][c] == 0:
return 0
# Update grid to mark visit.
grid[r][c] = 0
area = 1
# Visit 4 directions to accumulate area.
dirs = [(r - 1, c), (r + 1, c), (r, c - 1), (r, c + 1)]
for r_neighbor, c_neighbor in dirs:
area += self._dfs(r_neighbor, c_neighbor, grid)
return area
def maxAreaOfIsland(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
Time complexity: O(m*n).
Space complexity: O(m*n).
"""
if not grid or not grid[0]:
return 0
max_area = 0
for r in range(len(grid)):
for c in range(len(grid[0])):
if grid[r][c] == 1:
area = self._dfs(r, c, grid)
max_area = max(max_area, area)
return max_area
def main():
# Output: 6
grid = [[0,0,1,0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,1,1,0,1,0,0,0,0,0,0,0,0],
[0,1,0,0,1,1,0,0,1,0,1,0,0],
[0,1,0,0,1,1,0,0,1,1,1,0,0],
[0,0,0,0,0,0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,0,0,0,0,0,0,1,1,0,0,0,0]]
print SolutionDFSRecurUpdate().maxAreaOfIsland(grid)
# Output: 0.
grid = [[0,0,0,0,0,0,0,0]]
print SolutionDFSRecurUpdate().maxAreaOfIsland(grid)
if __name__ == '__main__':
main()
| """Leetcode 695. Max Area of Island
Medium
URL: https://leetcode.com/problems/max-area-of-island/
Given a non-empty 2D array grid of 0's and 1's, an island is a group of 1's
(representing land) connected 4-directionally (horizontal or vertical.)
You may assume all four edges of the grid are surrounded by water.
Find the maximum area of an island in the given 2D array.
(If there is no island, the maximum area is 0.)
Example 1:
[[0,0,1,0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,1,1,0,1,0,0,0,0,0,0,0,0],
[0,1,0,0,1,1,0,0,1,0,1,0,0],
[0,1,0,0,1,1,0,0,1,1,1,0,0],
[0,0,0,0,0,0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,0,0,0,0,0,0,1,1,0,0,0,0]]
Given the above grid, return 6. Note the answer is not 11,
because the island must be connected 4-directionally.
Example 2:
[[0,0,0,0,0,0,0,0]]
Given the above grid, return 0.
Note: The length of each dimension in the given grid does not exceed 50.
"""
class Solution(object):
def maxAreaOfIsland(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
16766d1fae67d1a949e6fb27c3147a882d964957 | Revise docstring and main() | bowen0701/algorithms_data_structures | lc206_reverse_linked_list.py | lc206_reverse_linked_list.py | """206. Reverse Linked List
Easy
Reverse a singly linked list.
Example:
Input: 1->2->3->4->5->NULL
Output: 5->4->3->2->1->NULL
Follow up:
A linked list can be reversed either iteratively or recursively.
Could you implement both?
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class SolutionIter(object):
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
Time complexity: O(n).
Space complexity: O(1).
"""
previous = None
while head:
current = ListNode(head.val)
current.next = previous
previous = current
head = head.next
return previous
class SolutionRecur(object):
def reverseList(self, head, previous=None):
"""
:type head: ListNode
:rtype: ListNode
Time complexity: O(n).
Space complexity: O(1).
"""
if not head:
return previous
current = ListNode(head.val)
current.next = previous
head = head.next
previous = current
return self.reverseList(head, previous)
def main():
# 1->2->3->4->5->NULL
node1 = ListNode(1)
node2 = ListNode(2)
node3 = ListNode(3)
node4 = ListNode(4)
node5 = ListNode(5)
node1.next = node2
node2.next = node3
node3.next = node4
node4.next = node5
# 5->4->3->2->1->NULL
# Should be 5.
print SolutionIter().reverseList(node1).val
print SolutionRecur().reverseList(node1).val
# Ans: 4.
print SolutionIter().reverseList(node1).next.val
# Ans: 3.
print SolutionIter().reverseList(node1).next.next.val
if __name__ == '__main__':
main()
| """206. Reverse Linked List
Easy
Reverse a singly linked list.
Example:
Input: 1->2->3->4->5->NULL
Output: 5->4->3->2->1->NULL
Follow up:
A linked list can be reversed either iteratively or recursively. Could you implement both?
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class SolutionIter(object):
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
Time complexity: O(n).
Space complexity: O(1).
"""
previous = None
while head:
current = ListNode(head.val)
current.next = previous
previous = current
head = head.next
return previous
class SolutionRecur(object):
def reverseList(self, head, previous=None):
"""
:type head: ListNode
:rtype: ListNode
Time complexity: O(n).
Space complexity: O(1).
"""
if not head:
return previous
current = ListNode(head.val)
current.next = previous
head = head.next
previous = current
return self.reverseList(head, previous)
def main():
# 1->2->3->4->5->NULL
node1 = ListNode(1)
node2 = ListNode(2)
node3 = ListNode(3)
node4 = ListNode(4)
node5 = ListNode(5)
node1.next = node2
node2.next = node3
node3.next = node4
node4.next = node5
# 5->4->3->2->1->NULL
print SolutionIter().reverseList(node1).val
print SolutionRecur().reverseList(node1).val
# Shoule be 4.
print SolutionRecur().reverseList(node1).next.val
# Shoule be 3.
print SolutionRecur().reverseList(node1).next.next.val
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
83dc0283f4b31da76cf6ea1dcb102b11c6dc2a91 | Rearrange tests so that test_zero is only things that don't work with ask() | bukzor/sympy,kumarkrishna/sympy,yukoba/sympy,Vishluck/sympy,souravsingh/sympy,madan96/sympy,shikil/sympy,wanglongqi/sympy,Davidjohnwilson/sympy,oliverlee/sympy,VaibhavAgarwalVA/sympy,Shaswat27/sympy,oliverlee/sympy,atsao72/sympy,Titan-C/sympy,skidzo/sympy,MechCoder/sympy,hargup/sympy,chaffra/sympy,moble/sympy,atreyv/sympy,jerli/sympy,drufat/sympy,wyom/sympy,madan96/sympy,Vishluck/sympy,jerli/sympy,rahuldan/sympy,saurabhjn76/sympy,Curious72/sympy,oliverlee/sympy,kaushik94/sympy,wyom/sympy,shikil/sympy,sampadsaha5/sympy,chaffra/sympy,hargup/sympy,rahuldan/sympy,ga7g08/sympy,garvitr/sympy,jaimahajan1997/sympy,emon10005/sympy,atreyv/sympy,VaibhavAgarwalVA/sympy,abhiii5459/sympy,jaimahajan1997/sympy,aktech/sympy,pandeyadarsh/sympy,Designist/sympy,postvakje/sympy,skidzo/sympy,abloomston/sympy,aktech/sympy,garvitr/sympy,wanglongqi/sympy,farhaanbukhsh/sympy,grevutiu-gabriel/sympy,mcdaniel67/sympy,kevalds51/sympy,lindsayad/sympy,garvitr/sympy,kumarkrishna/sympy,ChristinaZografou/sympy,ChristinaZografou/sympy,Gadal/sympy,aktech/sympy,maniteja123/sympy,Davidjohnwilson/sympy,kaichogami/sympy,pandeyadarsh/sympy,sampadsaha5/sympy,sampadsaha5/sympy,bukzor/sympy,drufat/sympy,farhaanbukhsh/sympy,jaimahajan1997/sympy,abhiii5459/sympy,asm666/sympy,farhaanbukhsh/sympy,Designist/sympy,shikil/sympy,Shaswat27/sympy,ga7g08/sympy,kevalds51/sympy,VaibhavAgarwalVA/sympy,pandeyadarsh/sympy,mcdaniel67/sympy,MechCoder/sympy,debugger22/sympy,kaushik94/sympy,Designist/sympy,iamutkarshtiwari/sympy,ga7g08/sympy,ahhda/sympy,toolforger/sympy,kumarkrishna/sympy,Davidjohnwilson/sympy,Gadal/sympy,Curious72/sympy,yukoba/sympy,iamutkarshtiwari/sympy,mafiya69/sympy,saurabhjn76/sympy,ahhda/sympy,Curious72/sympy,souravsingh/sympy,lindsayad/sympy,yashsharan/sympy,MechCoder/sympy,cswiercz/sympy,kaichogami/sympy,yashsharan/sympy,yukoba/sympy,Titan-C/sympy,rahuldan/sympy,grevutiu-gabriel/sympy,jbbskinny/sympy,madan96/sympy,Titan-C/sympy,emon10005/sympy,hargup/sympy,AkademieOlympia/sympy,atsao72/sympy,yashsharan/sympy,toolforger/sympy,debugger22/sympy,emon10005/sympy,lindsayad/sympy,sahmed95/sympy,kaushik94/sympy,ahhda/sympy,abhiii5459/sympy,jbbskinny/sympy,wyom/sympy,cswiercz/sympy,chaffra/sympy,postvakje/sympy,mafiya69/sympy,saurabhjn76/sympy,atsao72/sympy,Vishluck/sympy,wanglongqi/sympy,Arafatk/sympy,abloomston/sympy,ChristinaZografou/sympy,Arafatk/sympy,asm666/sympy,abloomston/sympy,moble/sympy,Shaswat27/sympy,mcdaniel67/sympy,skidzo/sympy,jbbskinny/sympy,drufat/sympy,Arafatk/sympy,atreyv/sympy,AkademieOlympia/sympy,maniteja123/sympy,Gadal/sympy,mafiya69/sympy,sahmed95/sympy,moble/sympy,kaichogami/sympy,AkademieOlympia/sympy,jerli/sympy,souravsingh/sympy,sahmed95/sympy,cswiercz/sympy,iamutkarshtiwari/sympy,maniteja123/sympy,toolforger/sympy,grevutiu-gabriel/sympy,kevalds51/sympy,debugger22/sympy,postvakje/sympy,bukzor/sympy,asm666/sympy | sympy/assumptions/tests/test_newask.py | sympy/assumptions/tests/test_newask.py | from sympy.assumptions.newask import newask
from sympy import symbols, Q, assuming, Implies
from sympy.utilities.pytest import raises
x, y = symbols('x y')
def test_newask():
# No relevant facts
assert newask(Q.real(x), Q.real(x)) is True
assert newask(Q.real(x), ~Q.real(x)) is False
assert newask(Q.real(x)) is None
assert newask(Q.real(x), Q.positive(x)) is True
assert newask(Q.positive(x), Q.real(x)) is None
assert newask(Q.real(x), ~Q.positive(x)) is None
assert newask(Q.positive(x), ~Q.real(x)) is False
raises(ValueError, lambda: newask(Q.real(x), Q.real(x) & ~Q.real(x)))
with assuming(Q.positive(x)):
assert newask(Q.real(x)) is True
assert newask(~Q.positive(x)) is False
raises(ValueError, lambda: newask(Q.real(x), ~Q.positive(x)))
assert newask(Q.zero(x), Q.nonzero(x)) is False
assert newask(Q.positive(x), Q.zero(x)) is False
assert newask(Q.real(x), Q.zero(x)) is True
assert newask(Q.zero(x), Q.zero(x*y)) is None
assert newask(Q.zero(x*y), Q.zero(x))
def test_zero():
"""
Everything in this test doesn't work with ask, and most things would be
very difficult or impossible to make work under the current handlers
model.
"""
assert newask(Q.zero(x) | Q.zero(y), Q.zero(x*y)) is True
assert newask(Implies(Q.zero(x), Q.zero(x*y))) is True
# This one in particular requires computing the fixed-point of the
# relevant facts, because going from Q.nonzero(x*y) -> ~Q.zero(x*y) and
# Q.zero(x*y) -> Equivalent(Q.zero(x*y), Q.zero(x) | Q.zero(y)) takes two
# steps.
assert newask(Q.zero(x) | Q.zero(y), Q.nonzero(x*y)) is False
| from sympy.assumptions.newask import newask
from sympy import symbols, Q, assuming, Implies
from sympy.utilities.pytest import raises
x, y = symbols('x y')
def test_newask():
# No relevant facts
assert newask(Q.real(x), Q.real(x)) is True
assert newask(Q.real(x), ~Q.real(x)) is False
assert newask(Q.real(x)) is None
assert newask(Q.real(x), Q.positive(x)) is True
assert newask(Q.positive(x), Q.real(x)) is None
assert newask(Q.real(x), ~Q.positive(x)) is None
assert newask(Q.positive(x), ~Q.real(x)) is False
raises(ValueError, lambda: newask(Q.real(x), Q.real(x) & ~Q.real(x)))
with assuming(Q.positive(x)):
assert newask(Q.real(x)) is True
assert newask(~Q.positive(x)) is False
raises(ValueError, lambda: newask(Q.real(x), ~Q.positive(x)))
assert newask(Q.zero(x), Q.nonzero(x)) is False
assert newask(Q.positive(x), Q.zero(x)) is False
assert newask(Q.real(x), Q.zero(x)) is True
def test_zero():
"""
Everything in this test doesn't work with ask, and most things would be
very difficult or impossible to make work under the current handlers
model.
"""
assert newask(Q.zero(x), Q.zero(x*y)) is None
assert newask(Q.zero(x) | Q.zero(y), Q.zero(x*y)) is True
assert newask(Implies(Q.zero(x), Q.zero(x*y))) is True
# This one in particular requires computing the fixed-point of the
# relevant facts, because going from Q.nonzero(x*y) -> ~Q.zero(x*y) and
# Q.zero(x*y) -> Equivalent(Q.zero(x*y), Q.zero(x) | Q.zero(y)) takes two
# steps.
assert newask(Q.zero(x) | Q.zero(y), Q.nonzero(x*y)) is False
| bsd-3-clause | Python |
e2a3a54228822c92aed07e2c8c0c49baaf1a509f | fix test modes | gdsfactory/gdsfactory,gdsfactory/gdsfactory | gdsfactory/simulation/gtidy3d/tests/test_modes.py | gdsfactory/simulation/gtidy3d/tests/test_modes.py | import numpy as np
import gdsfactory.simulation.gtidy3d as gt
from gdsfactory.simulation.gtidy3d.modes import Waveguide, group_index, si, sio2
nm = 1e-3
def test_neff_cached():
c = Waveguide(
wavelength=1.55,
wg_width=0.5,
wg_thickness=0.22,
slab_thickness=0.0,
ncore=si,
nclad=sio2,
)
c.compute_modes()
n0 = abs(c.neffs[0])
assert np.isclose(n0, 2.46586, rtol=0.01), n0
def test_neff_no_cache():
c = Waveguide(
wavelength=1.55,
wg_width=0.5,
wg_thickness=0.22,
slab_thickness=0.0,
ncore=si,
nclad=sio2,
cache=False,
)
c.compute_modes()
n0 = abs(c.neffs[0])
assert np.isclose(n0, 2.46586, rtol=0.01), n0
def test_ng_no_cache():
wg_settings = dict(
wavelength=1.55,
wg_width=0.5,
wg_thickness=0.22,
slab_thickness=0.0,
ncore=si,
nclad=sio2,
cache=False,
)
ng = group_index(**wg_settings)
assert np.isclose(ng, 4.169, rtol=0.01), ng
def test_sweep_width(dataframe_regression) -> None:
df = gt.modes.sweep_width(
width1=200 * nm,
width2=1000 * nm,
steps=1,
wavelength=1.55,
wg_thickness=220 * nm,
slab_thickness=0 * nm,
ncore=gt.modes.si,
nclad=gt.modes.sio2,
cache=False,
)
if dataframe_regression:
dataframe_regression.check(df, default_tolerance=dict(atol=1e-3, rtol=1e-3))
if __name__ == "__main__":
test_ng_no_cache()
| import numpy as np
import gdsfactory.simulation.gtidy3d as gt
from gdsfactory.simulation.gtidy3d.modes import Waveguide, group_index, si, sio2
nm = 1e-3
def test_neff_cached():
c = Waveguide(
wavelength=1.55,
wg_width=0.5,
wg_thickness=0.22,
slab_thickness=0.0,
ncore=si,
nclad=sio2,
)
c.compute_modes()
n0 = abs(c.neffs[0])
assert np.isclose(n0, 2.46586, rtol=0.01), n0
def test_neff_no_cache():
c = Waveguide(
wavelength=1.55,
wg_width=0.5,
wg_thickness=0.22,
slab_thickness=0.0,
ncore=si,
nclad=sio2,
cache=None,
)
c.compute_modes()
n0 = abs(c.neffs[0])
assert np.isclose(n0, 2.46586, rtol=0.01), n0
def test_ng_no_cache():
wg_settings = dict(
wavelength=1.55,
wg_width=0.5,
wg_thickness=0.22,
slab_thickness=0.0,
ncore=si,
nclad=sio2,
cache=None,
)
ng = group_index(**wg_settings)
assert np.isclose(ng, 4.169, rtol=0.01), ng
def test_sweep_width(dataframe_regression) -> None:
df = gt.modes.sweep_width(
width1=200 * nm,
width2=1000 * nm,
steps=1,
wavelength=1.55,
wg_thickness=220 * nm,
slab_thickness=0 * nm,
ncore=gt.modes.si,
nclad=gt.modes.sio2,
cache=None,
)
if dataframe_regression:
dataframe_regression.check(df, default_tolerance=dict(atol=1e-3, rtol=1e-3))
if __name__ == "__main__":
test_ng_no_cache()
| mit | Python |
1882edc39ad1b98e432445233cc90e91284522ee | Rename spider class name to more specific | fpagnoux/berniemetrics,Rumel/berniemetrics,dpxxdp/berniemetrics,Rumel/berniemetrics,fpagnoux/berniemetrics,Rumel/berniemetrics,fpagnoux/berniemetrics,dpxxdp/berniemetrics,Rumel/berniemetrics,dpxxdp/berniemetrics,fpagnoux/berniemetrics,dpxxdp/berniemetrics | private/realclearpolitics-scraper/realclearpolitics/spiders/spider.py | private/realclearpolitics-scraper/realclearpolitics/spiders/spider.py | import scrapy
from realclearpolitics.items import TableItem
class RcpSpider(scrapy.Spider):
name = "realclearpoliticsSpider"
start_urls = []
def __init__(self, url):
self.url = url
def start_requests(self):
return [scrapy.FormRequest(self.url,
callback=self.parse)]
def parse(self, response):
table = response.css('.data').pop()
legend = table.css('tr')[0]
fieldNames = legend.css('th::text').extract()
nb_fields = len(fieldNames)
items = []
contentLines = table.css('tr')[1::]
for line in contentLines:
item = TableItem()
values = line.css('td::text, td span::text, td a::text').extract()
for i in range(nb_fields):
item[fieldNames[i]] = values[i]
items.append(item)
return items
| import scrapy
from realclearpolitics.items import TableItem
class spider(scrapy.Spider):
name = "realclearpoliticsSpider"
start_urls = []
def __init__(self, url):
self.url = url
def start_requests(self):
return [scrapy.FormRequest(self.url,
callback=self.parse)]
def parse(self, response):
table = response.css('.data').pop()
legend = table.css('tr')[0]
fieldNames = legend.css('th::text').extract()
nb_fields = len(fieldNames)
items = []
contentLines = table.css('tr')[1::]
for line in contentLines:
item = TableItem()
values = line.css('td::text, td span::text, td a::text').extract()
for i in range(nb_fields):
item[fieldNames[i]] = values[i]
items.append(item)
return items
| mit | Python |
757d17d4bd4b5803febd2f70e3edc8949dcc77a8 | Add the version number | yuzie007/ph_unfolder,yuzie007/upho | ph_unfolder/phonon/__init__.py | ph_unfolder/phonon/__init__.py | __version__ = '0.2.0'
| mit | Python | |
030af1a74ebf95ca6f255c1830cabcb75c2e32dc | Use create_or_resolve_commit in pipelines | valohai/valohai-cli | valohai_cli/commands/pipeline/run/run.py | valohai_cli/commands/pipeline/run/run.py | import contextlib
from typing import Optional
import click
from click import Context
from valohai_yaml.objs import Config, Pipeline
from valohai_cli.api import request
from valohai_cli.commands.pipeline.run.utils import build_edges, build_nodes, match_pipeline
from valohai_cli.ctx import get_project
from valohai_cli.messages import success
from valohai_cli.utils.commits import create_or_resolve_commit
@click.command(
context_settings=dict(ignore_unknown_options=True),
add_help_option=False
)
@click.argument('name', required=False, metavar='PIPELINE-NAME')
@click.option('--commit', '-c', default=None, metavar='SHA', help='The commit to use. Defaults to the current HEAD.')
@click.option('--title', '-c', default=None, help='The optional title of the pipeline run.')
@click.pass_context
def run(ctx: Context, name: Optional[str], commit: Optional[str], title: Optional[str]) -> None:
"""
Start a pipeline run.
"""
# Having to explicitly compare to `--help` is slightly weird, but it's because of the nested command thing.
if name == '--help' or not name:
click.echo(ctx.get_help(), color=ctx.color)
print_pipeline_list(ctx, commit)
ctx.exit()
return
project = get_project(require=True)
assert project
commit = create_or_resolve_commit(project, commit=commit, adhoc=False)
config = project.get_config()
matched_pipeline = match_pipeline(config, name)
pipeline = config.pipelines[matched_pipeline]
start_pipeline(config, pipeline, project.id, commit, title)
def print_pipeline_list(ctx: Context, commit: Optional[str]) -> None:
with contextlib.suppress(Exception): # If we fail to extract the pipeline list, it's not that big of a deal.
project = get_project(require=True)
assert project
config = project.get_config(commit_identifier=commit)
if config.pipelines:
click.secho('\nThese pipelines are available in the selected commit:\n', color=ctx.color, bold=True)
for pipeline_name in sorted(config.pipelines):
click.echo(f' * {pipeline_name}', color=ctx.color)
def start_pipeline(
config: Config,
pipeline: Pipeline,
project_id: str,
commit: str,
title: Optional[str] = None,
) -> None:
edges = build_edges(pipeline)
nodes = build_nodes(commit, config, pipeline)
payload = {
"edges": edges,
"nodes": nodes,
"project": project_id,
"title": title or pipeline.name,
}
resp = request(
method='post',
url='/api/v0/pipelines/',
json=payload,
).json()
success(f"Pipeline ={resp.get('counter')} queued. See {resp.get('urls').get('display')}")
| import contextlib
from typing import Optional
import click
from click import Context
from valohai_yaml.objs import Config, Pipeline
from valohai_cli.api import request
from valohai_cli.commands.pipeline.run.utils import build_edges, build_nodes, match_pipeline
from valohai_cli.ctx import get_project
from valohai_cli.messages import success
@click.command(
context_settings=dict(ignore_unknown_options=True),
add_help_option=False
)
@click.argument('name', required=False, metavar='PIPELINE-NAME')
@click.option('--commit', '-c', default=None, metavar='SHA', help='The commit to use. Defaults to the current HEAD.')
@click.option('--title', '-c', default=None, help='The optional title of the pipeline run.')
@click.pass_context
def run(ctx: Context, name: Optional[str], commit: Optional[str], title: Optional[str]) -> None:
"""
Start a pipeline run.
"""
# Having to explicitly compare to `--help` is slightly weird, but it's because of the nested command thing.
if name == '--help' or not name:
click.echo(ctx.get_help(), color=ctx.color)
print_pipeline_list(ctx, commit)
ctx.exit()
return
project = get_project(require=True)
assert project
commit = commit or project.resolve_commit()['identifier']
config = project.get_config()
matched_pipeline = match_pipeline(config, name)
pipeline = config.pipelines[matched_pipeline]
start_pipeline(config, pipeline, project.id, commit, title)
def print_pipeline_list(ctx: Context, commit: Optional[str]) -> None:
with contextlib.suppress(Exception): # If we fail to extract the pipeline list, it's not that big of a deal.
project = get_project(require=True)
assert project
config = project.get_config(commit_identifier=commit)
if config.pipelines:
click.secho('\nThese pipelines are available in the selected commit:\n', color=ctx.color, bold=True)
for pipeline_name in sorted(config.pipelines):
click.echo(f' * {pipeline_name}', color=ctx.color)
def start_pipeline(
config: Config,
pipeline: Pipeline,
project_id: str,
commit: str,
title: Optional[str] = None,
) -> None:
edges = build_edges(pipeline)
nodes = build_nodes(commit, config, pipeline)
payload = {
"edges": edges,
"nodes": nodes,
"project": project_id,
"title": title or pipeline.name,
}
resp = request(
method='post',
url='/api/v0/pipelines/',
json=payload,
).json()
success(f"Pipeline ={resp.get('counter')} queued. See {resp.get('urls').get('display')}")
| mit | Python |
673d6cecfaeb0e919f30997f793ee2bb18e399ee | Fix V2 hypervisor server schema attribute | jaspreetw/tempest,openstack/tempest,Vaidyanath/tempest,vedujoshi/tempest,NexusIS/tempest,FujitsuEnablingSoftwareTechnologyGmbH/tempest,tonyli71/tempest,hayderimran7/tempest,xbezdick/tempest,akash1808/tempest,roopali8/tempest,tudorvio/tempest,alinbalutoiu/tempest,flyingfish007/tempest,manasi24/jiocloud-tempest-qatempest,flyingfish007/tempest,izadorozhna/tempest,afaheem88/tempest_neutron,queria/my-tempest,pczerkas/tempest,afaheem88/tempest,FujitsuEnablingSoftwareTechnologyGmbH/tempest,yamt/tempest,sebrandon1/tempest,bigswitch/tempest,masayukig/tempest,Tesora/tesora-tempest,manasi24/jiocloud-tempest-qatempest,hpcloud-mon/tempest,bigswitch/tempest,ebagdasa/tempest,openstack/tempest,neerja28/Tempest,izadorozhna/tempest,Tesora/tesora-tempest,NexusIS/tempest,jamielennox/tempest,eggmaster/tempest,roopali8/tempest,rzarzynski/tempest,yamt/tempest,queria/my-tempest,rzarzynski/tempest,vedujoshi/tempest,manasi24/tempest,redhat-cip/tempest,Juniper/tempest,varunarya10/tempest,redhat-cip/tempest,hpcloud-mon/tempest,rakeshmi/tempest,masayukig/tempest,JioCloud/tempest,Juniper/tempest,Juraci/tempest,cisco-openstack/tempest,dkalashnik/tempest,LIS/lis-tempest,rakeshmi/tempest,CiscoSystems/tempest,dkalashnik/tempest,nunogt/tempest,Lilywei123/tempest,tudorvio/tempest,tonyli71/tempest,pandeyop/tempest,danielmellado/tempest,neerja28/Tempest,Juraci/tempest,LIS/lis-tempest,JioCloud/tempest,danielmellado/tempest,zsoltdudas/lis-tempest,pczerkas/tempest,zsoltdudas/lis-tempest,eggmaster/tempest,manasi24/tempest,jamielennox/tempest,sebrandon1/tempest,afaheem88/tempest,varunarya10/tempest,afaheem88/tempest_neutron,Lilywei123/tempest,cisco-openstack/tempest,nunogt/tempest,pandeyop/tempest,hayderimran7/tempest,Vaidyanath/tempest,alinbalutoiu/tempest,ebagdasa/tempest,akash1808/tempest,xbezdick/tempest,jaspreetw/tempest,CiscoSystems/tempest | tempest/api_schema/response/compute/v2/hypervisors.py | tempest/api_schema/response/compute/v2/hypervisors.py | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.api_schema.response.compute import hypervisors
hypervisors_servers = copy.deepcopy(hypervisors.common_hypervisors_detail)
# Defining extra attributes for V3 show hypervisor schema
hypervisors_servers['response_body']['properties']['hypervisors']['items'][
'properties']['servers'] = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'uuid': {'type': 'string'},
'name': {'type': 'string'}
}
}
}
# In V2 API, if there is no servers (VM) on the Hypervisor host then 'servers'
# attribute will not be present in response body So it is not 'required'.
| # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.api_schema.response.compute import hypervisors
hypervisors_servers = copy.deepcopy(hypervisors.common_hypervisors_detail)
# Defining extra attributes for V3 show hypervisor schema
hypervisors_servers['response_body']['properties']['hypervisors']['items'][
'properties']['servers'] = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
# NOTE: Now the type of 'id' is integer,
# but here allows 'string' also because we
# will be able to change it to 'uuid' in
# the future.
'id': {'type': ['integer', 'string']},
'name': {'type': 'string'}
}
}
}
# In V2 API, if there is no servers (VM) on the Hypervisor host then 'servers'
# attribute will not be present in response body So it is not 'required'.
| apache-2.0 | Python |
db31d216d1cff540be8382617445a95c1113f69b | fix unicode literal in v4.test_convert | ipython/ipython,ipython/ipython | IPython/nbformat/v4/tests/test_convert.py | IPython/nbformat/v4/tests/test_convert.py | # -*- coding: utf-8 -*-
import copy
import nose.tools as nt
from IPython.nbformat import validate
from .. import convert
from . import nbexamples
from IPython.nbformat.v3.tests import nbexamples as v3examples
from IPython.nbformat import v3, v4
def test_upgrade_notebook():
nb03 = copy.deepcopy(v3examples.nb0)
validate(nb03)
nb04 = convert.upgrade(nb03)
validate(nb04)
def test_downgrade_notebook():
nb04 = copy.deepcopy(nbexamples.nb0)
validate(nb04)
nb03 = convert.downgrade(nb04)
validate(nb03)
def test_upgrade_heading():
v3h = v3.new_heading_cell
v4m = v4.new_markdown_cell
for v3cell, expected in [
(
v3h(source='foo', level=1),
v4m(source='# foo'),
),
(
v3h(source='foo\nbar\nmulti-line\n', level=4),
v4m(source='#### foo bar multi-line'),
),
(
v3h(source=u'ünìcö∂e–cønvërsioñ', level=4),
v4m(source=u'#### ünìcö∂e–cønvërsioñ'),
),
]:
upgraded = convert.upgrade_cell(v3cell)
nt.assert_equal(upgraded, expected)
def test_downgrade_heading():
v3h = v3.new_heading_cell
v4m = v4.new_markdown_cell
v3m = lambda source: v3.new_text_cell('markdown', source)
for v4cell, expected in [
(
v4m(source='# foo'),
v3h(source='foo', level=1),
),
(
v4m(source='#foo'),
v3h(source='foo', level=1),
),
(
v4m(source='#\tfoo'),
v3h(source='foo', level=1),
),
(
v4m(source='# \t foo'),
v3h(source='foo', level=1),
),
(
v4m(source='# foo\nbar'),
v3m(source='# foo\nbar'),
),
]:
downgraded = convert.downgrade_cell(v4cell)
nt.assert_equal(downgraded, expected)
| # -*- coding: utf-8 -*-
import copy
import nose.tools as nt
from IPython.nbformat import validate
from .. import convert
from . import nbexamples
from IPython.nbformat.v3.tests import nbexamples as v3examples
from IPython.nbformat import v3, v4
def test_upgrade_notebook():
nb03 = copy.deepcopy(v3examples.nb0)
validate(nb03)
nb04 = convert.upgrade(nb03)
validate(nb04)
def test_downgrade_notebook():
nb04 = copy.deepcopy(nbexamples.nb0)
validate(nb04)
nb03 = convert.downgrade(nb04)
validate(nb03)
def test_upgrade_heading():
v3h = v3.new_heading_cell
v4m = v4.new_markdown_cell
for v3cell, expected in [
(
v3h(source='foo', level=1),
v4m(source='# foo'),
),
(
v3h(source='foo\nbar\nmulti-line\n', level=4),
v4m(source='#### foo bar multi-line'),
),
(
v3h(source='ünìcö∂e–cønvërsioñ', level=4),
v4m(source=u'#### ünìcö∂e–cønvërsioñ'),
),
]:
upgraded = convert.upgrade_cell(v3cell)
nt.assert_equal(upgraded, expected)
def test_downgrade_heading():
v3h = v3.new_heading_cell
v4m = v4.new_markdown_cell
v3m = lambda source: v3.new_text_cell('markdown', source)
for v4cell, expected in [
(
v4m(source='# foo'),
v3h(source='foo', level=1),
),
(
v4m(source='#foo'),
v3h(source='foo', level=1),
),
(
v4m(source='#\tfoo'),
v3h(source='foo', level=1),
),
(
v4m(source='# \t foo'),
v3h(source='foo', level=1),
),
(
v4m(source='# foo\nbar'),
v3m(source='# foo\nbar'),
),
]:
downgraded = convert.downgrade_cell(v4cell)
nt.assert_equal(downgraded, expected)
| bsd-3-clause | Python |
19fae1fc2703aa24f9d26ad1426e1ac3c1acd232 | Add alternative solution for 'Keyboard Row' problem | kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode | Python/keyboard-row.py | Python/keyboard-row.py | # Time: O(n)
# Space: O(1)
# Given a List of words, return the words that can be typed
# using letters of alphabet on only one row's of American keyboard like the image below.
#
# Example 1:
# Input: ["Hello", "Alaska", "Dad", "Peace"]
# Output: ["Alaska", "Dad"]
# Note:
# You may use one character in the keyboard more than once.
# You may assume the input string will only contain letters of alphabet.
class Solution(object):
def findWords(self, words):
"""
:type words: List[str]
:rtype: List[str]
"""
rows = [set(['q', 'w', 'e', 'r', 't', 'y','u', 'i', 'o', 'p']),
set(['a', 's', 'd', 'f', 'g', 'h', 'j', 'k', 'l']),
set(['z', 'x', 'c', 'v', 'b' ,'n', 'm'])]
result = []
for word in words:
k = 0
for i in xrange(len(rows)):
if word[0].lower() in rows[i]:
k = i
break
for c in word:
if c.lower() not in rows[k]:
break
else:
result.append(word)
return result
class Solution2(object):
def findWords(self, words):
"""
:type words: List[str]
:rtype: List[str]
"""
keyboard_rows = ['qwertyuiop', 'asdfghjkl', 'zxcvbnm']
single_row_words = []
for word in words:
for row in keyboard_rows:
if all(letter in row for letter in word.lower()):
single_row_words.append(word)
return single_row_words
| # Time: O(n)
# Space: O(1)
# Given a List of words, return the words that can be typed
# using letters of alphabet on only one row's of American keyboard like the image below.
#
# Example 1:
# Input: ["Hello", "Alaska", "Dad", "Peace"]
# Output: ["Alaska", "Dad"]
# Note:
# You may use one character in the keyboard more than once.
# You may assume the input string will only contain letters of alphabet.
class Solution(object):
def findWords(self, words):
"""
:type words: List[str]
:rtype: List[str]
"""
rows = [set(['q', 'w', 'e', 'r', 't', 'y','u', 'i', 'o', 'p']),
set(['a', 's', 'd', 'f', 'g', 'h', 'j', 'k', 'l']),
set(['z', 'x', 'c', 'v', 'b' ,'n', 'm'])]
result = []
for word in words:
k = 0
for i in xrange(len(rows)):
if word[0].lower() in rows[i]:
k = i
break
for c in word:
if c.lower() not in rows[k]:
break
else:
result.append(word)
return result
| mit | Python |
10abb657bf3ff008785820ffd16d26a27c16dd5e | Increase code readability | kirnap/algorithms-in-python | count_number_of_inversions.py | count_number_of_inversions.py | """
This is for an educational purposes
This is a good practice for intermediate python learners.
The definition of the problem: Suppose you are given a list of n elements with an arbitrary order and
you are asked to find the number of inversions in a given list by using Divide & Conquer Paradigm in computer science
Here is the definition of inversion: Number of pairs(i, j) of list indices i < j and List[i] > List[j]
for example it is given (2,3,5,6,1) the inversions are: (2,1) and (6,1)
Please note that you need a pen a piece of paper first to understand the problem and algorithm which also is explained
in https://class.coursera.org/algo-004/lecture/16?s=e
I highly recommend first to understand your algorithm in the paper and then get into the code!
"""
def sort_and_count(iterable):
"""
This function recursively calls itself to break up the list into two parts and then recursively recover them
the actual algorithm is implemented in this function
"""
if len(iterable) == 1:
return iterable, 0
else:
(sorted_left, left_inversion_number) = sort_and_count(iterable[:(len(iterable) / 2)])
(sorted_right, right_inversion_number) = sort_and_count(iterable[len(iterable) / 2:])
(result, split_inversion_number) = merge_and_count(sorted_left,sorted_right)
return result,left_inversion_number + right_inversion_number + split_inversion_number
def find_inversions(iterable):
"""
Since sort_and_count function returns a tuple and I just need the number of inversions first part is enough for
me
List:parameter
Number:return
"""
return sort_and_count(iterable)[1]
def merge_and_count(first, second):
"""
It is explained during lecture that while implementing merge part there is a single path that
counts the inversions automatically if the second part of the
"""
i = 0
j = 0
ret = []
number_of_inversion = 0
while len(ret) != len(first) + len(second):
if i == len(first):
ret += second[j:]
elif j == len(second):
ret += first[i:]
else:
if first[i] < second[j]:
ret.append(first[i])
i += 1
else:
ret.append(second[j])
j += 1
number_of_inversion += len(first) - i
return ret,number_of_inversion
| """
This is for an educational purposes
This is a good practice for intermediate python learners.
The definition of the problem: Suppose you are given a list of n elements with an arbitrary order and
you are asked to find the number of inversions in a given list by using Divide & Conquer Paradigm in computer science
Here is the definition of inversion: Number of pairs(i, j) of list indices i < j and List[i] > List[j]
for example it is given (2,3,5,6,1) the inversions are: (2,1) and (6,1)
Please note that you need a pen a piece of paper first to understand the problem and algorithm which also is explained
in https://class.coursera.org/algo-004/lecture/16?s=e
I highly recommend first to understand your algorithm in the paper and then get into the code!
"""
def sort_and_count(iterable):
"""
This function recursively calls itself to break up the list into two parts and then recursively recover them
the actual algorithm is implemented in this function
"""
if len(iterable) == 1:
return iterable, 0
else:
(sorted_left, left_inversion_number) = sort_and_count(iterable[:(len(iterable) / 2)])
(sorted_right, right_inversion_number) = sort_and_count(iterable[len(iterable) / 2:])
(result, split_inversion_number) = merge_and_count(sorted_left,sorted_right)
return result,left_inversion_number + right_inversion_number + split_inversion_number
def find_inversions(iterable):
"""
Since sort_and_count function returns a tuple and I just need the number of inversions first part is enough for
me
List:parameter
Number:return
"""
return sort_and_count(iterable)[1]
def merge_and_count(first, second):
"""
It is explained during lecture that while implementing merge part there is a single path that
counts the inversions automatically if the second part of the
"""
i = 0
j = 0
ret = []
number_of_inversion = 0
while len(ret) != len(first) + len(second):
if i == len(first):
ret += second[j:]
elif j == len(second):
ret += first[i:]
else:
if first[i] < second[j]:
ret.append(first[i])
i += 1
else:
ret.append(second[j])
j += 1
number_of_inversion += len(first) - i
return ret,number_of_inversion
| mit | Python |
69c6759625c8faacd2ce5194c9845349c61dda7f | Use os_helper to fix ImportError in Python 3.10 | python/importlib_metadata | tests/py39compat.py | tests/py39compat.py | try:
from test.support.os_helper import FS_NONASCII
except ImportError:
from test.support import FS_NONASCII # noqa
| try:
from test.support.os_helpers import FS_NONASCII
except ImportError:
from test.support import FS_NONASCII # noqa
| apache-2.0 | Python |
68f4d0351cb02260f942ab9f4cda0d81e43bb6a6 | Resolve PR comments | zyantific/zydis,zyantific/zydis | tests/regression.py | tests/regression.py | import os
import sys
import shlex
from subprocess import Popen, PIPE
import argparse
TEST_CASE_DIRECTORY = "./cases"
def get_exitcode_stdout_stderr(cmd):
"""
Executes an external command and returns the exitcode, stdout and stderr.
"""
args = shlex.split(cmd)
proc = Popen(args, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
exitcode = proc.returncode
return exitcode, out, err
parser = argparse.ArgumentParser(description="Regression testing.")
parser.add_argument(dest="operation", choices=["test", "rebase"])
parser.add_argument(dest="zydis_info_path", type=str)
args = parser.parse_args()
has_failed = False
for case in os.listdir(TEST_CASE_DIRECTORY):
if not case.endswith(".in"):
continue
path = os.path.join(TEST_CASE_DIRECTORY, case)
print(path)
with open(path, mode="r") as f:
payload = f.read()
exitcode, out, err = get_exitcode_stdout_stderr(f"{args.zydis_info_path} {payload}")
pre, ext = os.path.splitext(case)
path = os.path.join(TEST_CASE_DIRECTORY, pre + ".out")
if args.operation == "rebase":
with open(path, mode="wb") as f:
f.write(out)
continue
try:
with open(path, mode="rb") as f:
s = f.read()
if s != out:
print(f"FAILED: '{case}' [{payload}]")
has_failed = True
except FileNotFoundError:
print(f"FAILED: '{case}' [Output file missing]")
has_failed = True
sys.exit(-1 if has_failed else 0) | import os
import sys
import shlex
from subprocess import Popen, PIPE
import argparse
TEST_CASE_DIRECTORY = "./cases"
def get_exitcode_stdout_stderr(cmd):
"""
Executes an external command and returns the exitcode, stdout and stderr.
"""
args = shlex.split(cmd)
proc = Popen(args, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
exitcode = proc.returncode
return exitcode, out, err
parser = argparse.ArgumentParser(description="Regression testing.")
parser.add_argument(dest="operation", choices=["test", "rebase"])
parser.add_argument(dest="zydis_info_path", type=str)
args = parser.parse_args()
has_failed = False
for case in os.listdir(TEST_CASE_DIRECTORY):
if case.endswith(".in"):
path = os.path.join(TEST_CASE_DIRECTORY, case)
print(path)
with open(path, mode="r") as f:
payload = f.read()
exitcode, out, err = get_exitcode_stdout_stderr(f"{args.zydis_info_path} {payload}")
pre, ext = os.path.splitext(case)
path = os.path.join(TEST_CASE_DIRECTORY, pre + ".out")
if args.operation == "rebase":
with open(path, mode="wb") as f:
f.write(out)
continue
try:
with open(path, mode="rb") as f:
s = f.read()
if s != out:
print(f"FAILED: '{case}' [{payload}]")
has_failed = True
except:
print(f"FAILED: '{case}' [Output file missing]")
has_failed = True
sys.exit(-1 if has_failed else 0) | mit | Python |
4be78b535424f53c1fa313d2a5d483e729b0497b | test data cleanup | alexandrul-ci/robotframework,stasiek/robotframework,alexandrul-ci/robotframework,un33k/robotframework,suvarnaraju/robotframework,dkentw/robotframework,Colorfulstan/robotframework,moto-timo/robotframework,Colorfulstan/robotframework,moto-timo/robotframework,snyderr/robotframework,kyle1986/robortframe,yonglehou/robotframework,SivagnanamCiena/robotframework,jaloren/robotframework,kyle1986/robortframe,yahman72/robotframework,jaloren/robotframework,jaloren/robotframework,snyderr/robotframework,eric-stanley/robotframework,un33k/robotframework,rwarren14/robotframework,robotframework/robotframework,xiaokeng/robotframework,xiaokeng/robotframework,rwarren14/robotframework,jorik041/robotframework,userzimmermann/robotframework,HelioGuilherme66/robotframework,kurtdawg24/robotframework,yonglehou/robotframework,stasiek/robotframework,edbrannin/robotframework,fingeronthebutton/robotframework,robotframework/robotframework,joongh/robotframework,jaloren/robotframework,xiaokeng/robotframework,jorik041/robotframework,xiaokeng/robotframework,kurtdawg24/robotframework,nmrao/robotframework,synsun/robotframework,rwarren14/robotframework,un33k/robotframework,edbrannin/robotframework,un33k/robotframework,nmrao/robotframework,ashishdeshpande/robotframework,kyle1986/robortframe,moto-timo/robotframework,moto-timo/robotframework,synsun/robotframework,dkentw/robotframework,dkentw/robotframework,edbrannin/robotframework,userzimmermann/robotframework,dkentw/robotframework,wojciechtanski/robotframework,wojciechtanski/robotframework,SivagnanamCiena/robotframework,yahman72/robotframework,snyderr/robotframework,SivagnanamCiena/robotframework,ChrisHirsch/robotframework,JackNokia/robotframework,moto-timo/robotframework,JackNokia/robotframework,userzimmermann/robotframework,nmrao/robotframework,jorik041/robotframework,kyle1986/robortframe,Colorfulstan/robotframework,jorik041/robotframework,HelioGuilherme66/robotframework,wojciechtanski/robotframework,suvarnaraju/robotframework,jorik041/robotframework,alexandrul-ci/robotframework,kurtdawg24/robotframework,suvarnaraju/robotframework,eric-stanley/robotframework,ChrisHirsch/robotframework,suvarnaraju/robotframework,JackNokia/robotframework,synsun/robotframework,Colorfulstan/robotframework,un33k/robotframework,SivagnanamCiena/robotframework,stasiek/robotframework,kurtdawg24/robotframework,yonglehou/robotframework,jaloren/robotframework,Colorfulstan/robotframework,snyderr/robotframework,joongh/robotframework,wojciechtanski/robotframework,fingeronthebutton/robotframework,yahman72/robotframework,edbrannin/robotframework,alexandrul-ci/robotframework,dkentw/robotframework,suvarnaraju/robotframework,rwarren14/robotframework,synsun/robotframework,fingeronthebutton/robotframework,userzimmermann/robotframework,ashishdeshpande/robotframework,stasiek/robotframework,joongh/robotframework,nmrao/robotframework,yonglehou/robotframework,HelioGuilherme66/robotframework,ChrisHirsch/robotframework,eric-stanley/robotframework,edbrannin/robotframework,rwarren14/robotframework,snyderr/robotframework,yahman72/robotframework,kurtdawg24/robotframework,xiaokeng/robotframework,JackNokia/robotframework,yonglehou/robotframework,ChrisHirsch/robotframework,JackNokia/robotframework,fingeronthebutton/robotframework,joongh/robotframework,stasiek/robotframework,userzimmermann/robotframework,robotframework/robotframework,wojciechtanski/robotframework,eric-stanley/robotframework,joongh/robotframework,alexandrul-ci/robotframework,nmrao/robotframework,kyle1986/robortframe,fingeronthebutton/robotframework,yahman72/robotframework,ChrisHirsch/robotframework,ashishdeshpande/robotframework,ashishdeshpande/robotframework,synsun/robotframework,ashishdeshpande/robotframework,SivagnanamCiena/robotframework | tools/remoteserver/test/atest/arguments.py | tools/remoteserver/test/atest/arguments.py | class MyObject:
def __init__(self, index=0):
self.index = index
def __str__(self):
return '<MyObject%s>' % (self.index or '')
UNICODE = (u'Hyv\u00E4\u00E4 y\u00F6t\u00E4. '
u'\u0421\u043F\u0430\u0441\u0438\u0431\u043E!')
LIST_WITH_OBJECTS = [MyObject(1), MyObject(2)]
NESTED_LIST = [[True, False], [[1, None, MyObject(), {}]]]
NESTED_TUPLE = ((True, False), [(1, None, MyObject(), {})])
DICT_WITH_OBJECTS = {'As value': MyObject(1), MyObject(2): 'As key'}
NESTED_DICT = {1: {None: False},
2: {'A': {'n': None},
'B': {'o': MyObject(), 'e': {}}}}
| # Can be used in the test data like ${MyObject()} or ${MyObject(1)}
class MyObject:
def __init__(self, index=''):
self.index = index
def __str__(self):
return '<MyObject%s>' % self.index
UNICODE = (u'Hyv\u00E4\u00E4 y\u00F6t\u00E4. '
u'\u0421\u043F\u0430\u0441\u0438\u0431\u043E!')
LIST_WITH_OBJECTS = [MyObject(1), MyObject(2)]
NESTED_LIST = [ [True, False], [[1, None, MyObject(), {}]] ]
NESTED_TUPLE = ( (True, False), [(1, None, MyObject(), {})] )
DICT_WITH_OBJECTS = {'As value': MyObject(1), MyObject(2): 'As key'}
NESTED_DICT = { 1: {None: False},
2: {'A': {'n': None},
'B': {'o': MyObject(), 'e': {}}} }
| apache-2.0 | Python |
9d3d6c0b058e7c9e1c386c3726305596f086e594 | Improve command string of convert_to_json (#3477) | facebookresearch/ParlAI,facebookresearch/ParlAI,facebookresearch/ParlAI,facebookresearch/ParlAI,facebookresearch/ParlAI | parlai/scripts/convert_data_to_json_format.py | parlai/scripts/convert_data_to_json_format.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Converts data used in a task to json format. (Same as "Conversation" class; ie, for use
in ACUTE-eval)
Specify the task with `-t`. By default, this code will save to a file with prefix "tmp".
To change the prefix, set `--world-logs`.
"""
from parlai.core.script import register_script
from parlai.scripts.eval_model import EvalModel
@register_script('convert_to_json', hidden=True)
class DumpDataToConversations(EvalModel):
@classmethod
def setup_args(cls):
parser = EvalModel.setup_args()
parser.description = 'Convert data to json format'
parser.set_defaults(model="repeat_label")
parser.set_defaults(world_logs="tmp")
return parser
if __name__ == '__main__':
DumpDataToConversations.main()
| #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Converts data used in a task to json format. (Same as "Conversation" class; ie, for use
in ACUTE-eval)
Specify the task with `-t`. By default, this code will save to a file with prefix "tmp".
To change the prefix, set `--world-logs`.
"""
from parlai.core.script import register_script
from parlai.scripts.eval_model import EvalModel
@register_script('dump_data_to_conversations')
class DumpDataToConversations(EvalModel):
@classmethod
def setup_args(cls):
parser = EvalModel.setup_args()
parser.set_defaults(model="repeat_label")
parser.set_defaults(world_logs="tmp")
return parser
if __name__ == '__main__':
DumpDataToConversations.main()
| mit | Python |
f5d9a361ed4eb6862956604334c636191c7b2241 | Fix future only jobs for api. | cshields/satnogs-network,cshields/satnogs-network,cshields/satnogs-network,cshields/satnogs-network | network/api/views.py | network/api/views.py | from django.utils.timezone import now
import django_filters
from rest_framework import viewsets, mixins
from network.api.perms import StationOwnerCanEditPermission
from network.api import serializers
from network.base.models import (Antenna, Data, Observation, Satellite,
Station, Transponder)
class AntennaView(viewsets.ModelViewSet):
queryset = Antenna.objects.all()
serializer_class = serializers.AntennaSerializer
class StationView(viewsets.ModelViewSet):
queryset = Station.objects.all()
serializer_class = serializers.StationSerializer
class SatelliteView(viewsets.ModelViewSet):
queryset = Satellite.objects.all()
serializer_class = serializers.SatelliteSerializer
class TransponderView(viewsets.ModelViewSet):
queryset = Transponder.objects.all()
serializer_class = serializers.TransponderSerializer
class ObservationView(viewsets.ModelViewSet):
queryset = Observation.objects.all()
serializer_class = serializers.ObservationSerializer
class DataFilter(django_filters.FilterSet):
class Meta:
model = Data
fields = ['start', 'end', 'ground_station']
class DataView(viewsets.ReadOnlyModelViewSet, mixins.UpdateModelMixin):
queryset = Data.objects.all()
serializer_class = serializers.DataSerializer
filter_class = DataFilter
permission_classes = [
StationOwnerCanEditPermission
]
def get_queryset(self):
payload = self.request.QUERY_PARAMS.get('payload', None)
if payload == '':
return self.queryset.filter(payload='')
return super(DataView, self).get_queryset()
class JobView(viewsets.ReadOnlyModelViewSet):
queryset = Data.objects.filter(payload='')
serializer_class = serializers.JobSerializer
filter_class = DataFilter
filter_fields = ('ground_station')
def get_queryset(self):
return self.queryset.filter(start__gte=now())
| import django_filters
from rest_framework import viewsets, mixins
from network.api.perms import StationOwnerCanEditPermission
from network.api import serializers
from network.base.models import (Antenna, Data, Observation, Satellite,
Station, Transponder)
class AntennaView(viewsets.ModelViewSet):
queryset = Antenna.objects.all()
serializer_class = serializers.AntennaSerializer
class StationView(viewsets.ModelViewSet):
queryset = Station.objects.all()
serializer_class = serializers.StationSerializer
class SatelliteView(viewsets.ModelViewSet):
queryset = Satellite.objects.all()
serializer_class = serializers.SatelliteSerializer
class TransponderView(viewsets.ModelViewSet):
queryset = Transponder.objects.all()
serializer_class = serializers.TransponderSerializer
class ObservationView(viewsets.ModelViewSet):
queryset = Observation.objects.all()
serializer_class = serializers.ObservationSerializer
class DataFilter(django_filters.FilterSet):
class Meta:
model = Data
fields = ['start', 'end', 'ground_station']
class DataView(viewsets.ReadOnlyModelViewSet, mixins.UpdateModelMixin):
queryset = Data.objects.all()
serializer_class = serializers.DataSerializer
filter_class = DataFilter
permission_classes = [
StationOwnerCanEditPermission
]
def get_queryset(self):
payload = self.request.QUERY_PARAMS.get('payload', None)
if payload == '':
return self.queryset.filter(payload='')
return super(DataView, self).get_queryset()
class JobView(viewsets.ReadOnlyModelViewSet):
queryset = Data.objects.filter(payload='')
serializer_class = serializers.JobSerializer
filter_class = DataFilter
filter_fields = ('ground_station')
| agpl-3.0 | Python |
af3e10c1f717eefca0227736404dbe240687e6e8 | use an absolute scale | xiph/rav1e,xiph/rav1e | tools/draw-importances.py | tools/draw-importances.py | #!/usr/bin/env python3
import struct
import sys
from os.path import splitext
import numpy as np
from PIL import Image, ImageDraw
from matplotlib import pyplot as plt
# Renders block importances output by ContextInner::compute_lookahead_data().
# Usage:
# cargo run --features=dump_lookahead_data <input.y4m> -o /dev/null
# draw-importances.py <i-hres.png> <i-imps.bin>
# will output i-imps.png.
with open(sys.argv[2], 'rb') as f:
contents = f.read()
rows, cols = struct.unpack('qq', contents[:16])
imps = np.frombuffer(contents[16:], dtype=np.float32).reshape((rows, cols))
# Use a fixed scale where anything >= 10 cannot be distinguished
# to allow visually comparing multiple pictures
max_imp = 10 ## Replace by `np.max(imps)` for relative scaling
frame_size_multiplier = 4
frame = Image.open(sys.argv[1])
frame = frame.resize((frame.width * frame_size_multiplier, frame.height * frame_size_multiplier))
frame = frame.convert(mode='RGB')
mv_original_block_size = 8 // 2 # The importances are in 8×8 blocks, but we use half-resolution images.
mv_block_size = mv_original_block_size * frame_size_multiplier
draw = ImageDraw.Draw(frame, mode='RGBA')
# Draw the grid.
for i in range(0, frame.width, mv_block_size):
draw.line(((i, 0), (i, frame.height)), fill=(0, 0, 0, 255))
for i in range(0, frame.height, mv_block_size):
draw.line(((0, i), (frame.width, i)), fill=(0, 0, 0, 255))
# Draw the importances.
if max_imp > 0:
for y in range(rows):
for x in range(cols):
imp = imps[y, x]
top_left = (x * mv_block_size, y * mv_block_size)
bottom_right = (top_left[0] + mv_block_size, top_left[1] + mv_block_size)
draw.rectangle((top_left, bottom_right), fill=(int(imp / max_imp * 255), 0, 0, 128))
fig = plt.figure(figsize=(frame.width, frame.height), dpi=1)
fig.figimage(frame, cmap='gray')
plt.savefig(splitext(sys.argv[2])[0] + '.png', bbox_inches='tight')
| #!/usr/bin/env python3
import struct
import sys
from os.path import splitext
import numpy as np
from PIL import Image, ImageDraw
from matplotlib import pyplot as plt
# Renders block importances output by ContextInner::compute_lookahead_data().
# Usage:
# cargo run --features=dump_lookahead_data <input.y4m> -o /dev/null
# draw-importances.py <i-hres.png> <i-imps.bin>
# will output i-imps.png.
with open(sys.argv[2], 'rb') as f:
contents = f.read()
rows, cols = struct.unpack('qq', contents[:16])
imps = np.frombuffer(contents[16:], dtype=np.float32).reshape((rows, cols))
max_imp = np.max(imps)
frame_size_multiplier = 4
frame = Image.open(sys.argv[1])
frame = frame.resize((frame.width * frame_size_multiplier, frame.height * frame_size_multiplier))
frame = frame.convert(mode='RGB')
mv_original_block_size = 8 // 2 # The importances are in 8×8 blocks, but we use half-resolution images.
mv_block_size = mv_original_block_size * frame_size_multiplier
draw = ImageDraw.Draw(frame, mode='RGBA')
# Draw the grid.
for i in range(0, frame.width, mv_block_size):
draw.line(((i, 0), (i, frame.height)), fill=(0, 0, 0, 255))
for i in range(0, frame.height, mv_block_size):
draw.line(((0, i), (frame.width, i)), fill=(0, 0, 0, 255))
# Draw the importances.
if max_imp > 0:
for y in range(rows):
for x in range(cols):
imp = imps[y, x]
top_left = (x * mv_block_size, y * mv_block_size)
bottom_right = (top_left[0] + mv_block_size, top_left[1] + mv_block_size)
draw.rectangle((top_left, bottom_right), fill=(int(imp / max_imp * 255), 0, 0, 128))
fig = plt.figure(figsize=(frame.width, frame.height), dpi=1)
fig.figimage(frame, cmap='gray')
plt.savefig(splitext(sys.argv[2])[0] + '.png', bbox_inches='tight')
| bsd-2-clause | Python |
25cf3528b96fb93c6622b8b9c31d4de8cddd5fa8 | comment explaining why we are sending a date | ecohealthalliance/EpiTator | annotator/jvm_nlp_annotator.py | annotator/jvm_nlp_annotator.py | #!/usr/bin/env python
"""Annotator to add NLP annotations from REST calls to a webservice"""
import requests
from annotator import *
class JVMNLPAnnotator():
default_base_url = 'http://localhost:8080'
annotate_path = '/annotate/getNLPAnnotations'
def __init__(self, tiers, base_url=None):
"""Specify a list of tiers that we want to transfer from the service
result to the AnnoDoc. Specify base_url if it differs from the default.
"""
if base_url is not None:
self.base_url = base_url
else:
self.base_url = self.default_base_url
self.tiers = tiers
def annotate(self, doc):
"""Annotate a document by taking the text and sending it to the
anootation server.
"""
data = {'text': doc.text}
# Stanford SUTime can use a reference date to canonicalize relative
# dates like "tomorrow." If we have a doc.date for this document,
# send it along and the jvm-nlp will not attempt to find a reference
# date in the beginning of the document.
if doc.date:
data['referenceDate'] = doc.date.strftime('%Y-%m-%d')
request = requests.post(self.base_url + self.annotate_path, data)
spans = []
for tier in self.tiers:
for request_span in request.json()['tiers'][tier]['spans']:
span = AnnoSpan(request_span['start'],
request_span['stop'],
doc,
label=request_span['label'])
span.type = request_span['type']
spans.append(span)
doc.tiers[tier] = AnnoTier(spans)
return doc
| #!/usr/bin/env python
"""Annotator to add NLP annotations from REST calls to a webservice"""
import requests
from annotator import *
class JVMNLPAnnotator():
default_base_url = 'http://localhost:8080'
annotate_path = '/annotate/getNLPAnnotations'
def __init__(self, tiers, base_url=None):
"""Specify a list of tiers that we want to transfer from the service
result to the AnnoDoc. Specify base_url if it differs from the default.
"""
if base_url is not None:
self.base_url = base_url
else:
self.base_url = self.default_base_url
self.tiers = tiers
def annotate(self, doc):
"""Annotate a document by taking the text and sending it to the
anootation server.
"""
data = {'text': doc.text}
if doc.date:
data['referenceDate'] = doc.date.strftime('%Y-%m-%d')
request = requests.post(self.base_url + self.annotate_path, data)
spans = []
for tier in self.tiers:
for request_span in request.json()['tiers'][tier]['spans']:
span = AnnoSpan(request_span['start'],
request_span['stop'],
doc,
label=request_span['label'])
span.type = request_span['type']
spans.append(span)
doc.tiers[tier] = AnnoTier(spans)
return doc
| apache-2.0 | Python |
c33c5795ccaf8d4dbc1470cd3c32d9fdddd980bb | Correct unit tests for Actor-related exceptions | hypatia-software-org/hypatia-engine,lillian-lemmer/hypatia,hypatia-software-org/hypatia-engine,lillian-lemmer/hypatia | tests/test_actor.py | tests/test_actor.py | # This module is part of Hypatia and is released under the
# MIT license: http://opensource.org/licenses/MIT
"""py.test unit testing for hypatia/actor.py
Run py.test on this module to assert hypatia.actor
is completely functional.
"""
import os
import pygame
import pytest
from hypatia import actor
from hypatia import physics
from hypatia import constants
from hypatia import sprites
try:
os.chdir('demo')
except OSError:
pass
class TestActor(object):
"""A grouping of tests for the actor.Actor class.
"""
def test_blah(self):
pass
def test_no_response():
"""Test the exception class.
See Also:
* actor.NoActorResponse
* actor.NoResponseReason
* actor.ActorCannotTalk
"""
# If the response reason is invalid a typeerror should be raised
with pytest.raises(TypeError):
raise actor.NoActorResponse(2)
# Give NoResponse a valid reason and see if it raises NoResponse
with pytest.raises(actor.NoActorResponse):
raise actor.NoActorResponse(actor.NoResponseReason.no_say_text)
# Make sure the reason attribute is accessible and is set
# to the supplied and valid reason.
try:
raise actor.NoActorResponse(actor.NoResponseReason.no_say_text)
except actor.NoActorResponse as no_response:
assert no_response.reason == actor.NoResponseReason.no_say_text
def test_actor():
"""Test actor.Actor class.
This is bad and outdated and bad.
"""
walkabout = sprites.Walkabout('debug')
velocity = physics.Velocity(10, 10)
an_actor = actor.Actor(walkabout=walkabout,
say_text='Hello, world!',
velocity=velocity)
| # This module is part of Hypatia and is released under the
# MIT license: http://opensource.org/licenses/MIT
"""py.test unit testing for hypatia/actor.py
Run py.test on this module to assert hypatia.actor
is completely functional.
"""
import os
import pygame
import pytest
from hypatia import actor
from hypatia import physics
from hypatia import constants
from hypatia import sprites
try:
os.chdir('demo')
except OSError:
pass
class TestActor(object):
"""A grouping of tests for the actor.Actor class.
"""
def test_blah(self):
pass
def test_no_response():
"""Test the exception class.
See Also:
* actor.NoActorResponse
* actor.NoResponseReason
* actor.ActorCannotTalk
"""
# If the response reason is invalid a typeerror should be raised
with pytest.raises(TypeError):
raise actor.NoResponseReason(2)
# Give NoResponse a valid reason and see if it raises NoResponse
with pytest.raises(actor.NoResponse):
raise actor.NoResponseReason(actor.NoResponseReason.no_say_text)
# Make sure the reason attribute is accessible and is set
# to the supplied and valid reason.
try:
raise actor.NoResponseReason(actor.NoResponseReason.no_say_text)
except actor.NoResponseReason as no_response:
assert no_response.reason == actor.NoResponseReason.no_say_text
def test_actor():
"""Test actor.Actor class.
This is bad and outdated and bad.
"""
walkabout = sprites.Walkabout('debug')
velocity = physics.Velocity(10, 10)
an_actor = actor.Actor(walkabout=walkabout,
say_text='Hello, world!',
velocity=velocity)
| mit | Python |
fee0e9484d7824d767fa31b8c3358a1af88d2c2d | Fix test_check | shkurak/hangman | tests/test_check.py | tests/test_check.py | from hangman import check
def test_check():
assert check('hello', set(['h', 'e', 'l', 'o']))
| from hangman import check
def test_check():
assert check('hello', set('h', 'e', 'l', 'o'))
| mit | Python |
f35a015657bdcf3a149b31026eebe454004fd7ef | Add more tests | sigmavirus24/curryer | tests/test_curry.py | tests/test_curry.py | import pytest
from currypy import curry
class TestCurry:
def test_curry_as_decorator(self):
"""Ensure that currypy.curry can be used as a decorator"""
@curry
def func():
pass
assert func.curried is False
def test_curry_refuses_None(self):
"""Ensure that currypy.curry refuses None"""
with pytest.raises(TypeError):
curry(None)
def test_curries_when_given_parameters(self):
@curry
def add(a, b):
return a + b
assert add(1).curried is True
def test_evaluates_when_given_enough_parameters(self):
@curry
def add(a, b):
return a + b
assert add(1)(2) == 3
assert add(1, 2) == 3
| import pytest
from currypy import curry
class TestCurry:
def test_curry_as_decorator(self):
"""Ensure that currypy.curry can be used as a decorator"""
@curry
def func():
pass
assert func.curried is False
def test_curry_refuses_None(self):
with pytest.raises(ValueError):
curry(None)
| bsd-3-clause | Python |
80b4bae5903d4c3ba8c440d4067f6bd443f261f4 | Improve watermark functionality (3) | jiss-software/jiss-rendering-service,jiss-software/jiss-rendering-service | handler/Watermark.py | handler/Watermark.py | import core
import tornado
import uuid
import time
from utils import open_remote_image, add_watermark, open_image
class WatermarkHandler(core.BaseHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
self.logger.info('Request watermark generation for remote file')
name = '/tmp/%s.png' % str(uuid.uuid4())
proportion = self.request.headers.get('proportion')
text = self.request.headers.get('X-Jiss-Text')
add_watermark(open_remote_image(self.get_query_argument('url')), name, text, proportion)
self.response_file(name)
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self):
self.logger.info('Request watermark generation for request file')
proportion = self.request.headers.get('proportion')
text = self.request.headers.get('X-Jiss-Text')
for item in self.request.files.values():
for file_info in item:
name = '/tmp/%s-%s.pdf' % (time.time(), file_info['filename'])
add_watermark(open_image(file_info['body']), name, text, proportion)
self.response_file(name)
return
| import core
import tornado
import uuid
import time
import urllib
from utils import open_remote_image, add_watermark, open_image
class WatermarkHandler(core.BaseHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
self.logger.info('Request watermark generation for remote file')
name = '/tmp/%s.png' % str(uuid.uuid4())
proportion = self.get_query_argument('proportion', default=1.5)
text = urllib.unquote(self.get_query_argument('text', default="Test")).decode('utf8')
add_watermark(open_remote_image(self.get_query_argument('url')), name, text, proportion)
self.response_file(name)
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self):
self.logger.info('Request watermark generation for request file')
proportion = self.get_query_argument('proportion', default=1.5)
text = urllib.unquote(self.get_query_argument('text', default="Test")).decode('utf8')
for item in self.request.files.values():
for file_info in item:
name = '/tmp/%s-%s.pdf' % (time.time(), file_info['filename'])
add_watermark(open_image(file_info['body']), name, text, proportion)
self.response_file(name)
return
| apache-2.0 | Python |
15ef4b48962d64f9999f8efbdae394e2e7e0b6b5 | Fix migration | crowdresearch/daemo,crowdresearch/crowdsource-platform,aginzberg/crowdsource-platform,aginzberg/crowdsource-platform,shirishgoyal/crowdsource-platform,shirishgoyal/crowdsource-platform,crowdresearch/crowdsource-platform,crowdresearch/crowdsource-platform,shirishgoyal/crowdsource-platform,crowdresearch/daemo,crowdresearch/daemo,crowdresearch/crowdsource-platform,aginzberg/crowdsource-platform,crowdresearch/daemo,shirishgoyal/crowdsource-platform,aginzberg/crowdsource-platform | crowdsourcing/migrations/0012_auto_20160107_0521.py | crowdsourcing/migrations/0012_auto_20160107_0521.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-07 05:21
from __future__ import unicode_literals
from django.db import migrations
def create_system_financial_account(apps, schema_editor):
# We can't import the FinancialAccount model directly as it may be a newer
# version than this migration expects. We use the historical version.
account = apps.get_model("crowdsourcing", "FinancialAccount")
account.objects.get_or_create(is_system=True, type="paypal_deposit")
class Migration(migrations.Migration):
dependencies = [
('crowdsourcing', '0011_auto_20151221_1618'),
]
operations = [
migrations.RunPython(create_system_financial_account),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-07 05:21
from __future__ import unicode_literals
from django.db import migrations
def create_system_financial_account(apps):
# We can't import the FinancialAccount model directly as it may be a newer
# version than this migration expects. We use the historical version.
account = apps.get_model("crowdsourcing", "FinancialAccount")
account.objects.get_or_create(is_system=True, type="paypal_deposit")
class Migration(migrations.Migration):
dependencies = [
('crowdsourcing', '0011_auto_20151221_1618'),
]
operations = [
migrations.RunPython(create_system_financial_account),
]
| mit | Python |
711a58c888a9695c37b924a575fcf156ec9864bd | Correct error message | gogoair/foremast,gogoair/foremast | src/foremast/securitygroup/create_securitygroup.py | src/foremast/securitygroup/create_securitygroup.py | """Create Security Groups for Spinnaker Pipelines."""
import logging
import os
import requests
from ..consts import API_URL, HEADERS
from ..exceptions import (SpinnakerSecurityGroupCreationFailed,
SpinnakerTaskError)
from ..utils import check_task, get_template, get_vpc_id
class SpinnakerSecurityGroup:
"""Manipulate Spinnaker Security Groups.
Args:
app_name: Str of application name add Security Group to.
"""
def __init__(self, app_info):
self.log = logging.getLogger(__name__)
self.here = os.path.dirname(os.path.realpath(__file__))
self.app_info = app_info
self.app_name = app_info['app']
def create_security_group(self):
"""Sends a POST to spinnaker to create a new security group."""
url = "{0}/applications/{1}/tasks".format(API_URL, self.app_name)
self.app_info['vpc'] = get_vpc_id(self.app_info['env'],
self.app_info['region'])
secgroup_json = get_template(
template_file='securitygroup_template.json',
**self.app_info)
response = requests.post(url, data=secgroup_json, headers=HEADERS)
assert response.ok, ('Failed Security Group request for {0}: '
'{1}').format(self.app_name, response.text)
try:
check_task(response.json(), self.app_name)
except SpinnakerTaskError as error:
logging.error('Failed to create Security Group for %s: %s',
self.app_name, response.text)
raise SpinnakerSecurityGroupCreationFailed(error)
logging.info('Successfully created %s security group', self.app_name)
return True
| """Create Security Groups for Spinnaker Pipelines."""
import logging
import os
import requests
from ..consts import API_URL, HEADERS
from ..exceptions import (SpinnakerSecurityGroupCreationFailed,
SpinnakerTaskError)
from ..utils import check_task, get_template, get_vpc_id
class SpinnakerSecurityGroup:
"""Manipulate Spinnaker Security Groups.
Args:
app_name: Str of application name add Security Group to.
"""
def __init__(self, app_info):
self.log = logging.getLogger(__name__)
self.here = os.path.dirname(os.path.realpath(__file__))
self.app_info = app_info
self.app_name = app_info['app']
def create_security_group(self):
"""Sends a POST to spinnaker to create a new security group."""
url = "{0}/applications/{1}/tasks".format(API_URL, self.app_name)
self.app_info['vpc'] = get_vpc_id(self.app_info['env'],
self.app_info['region'])
secgroup_json = get_template(
template_file='securitygroup_template.json',
**self.app_info)
response = requests.post(url, data=secgroup_json, headers=HEADERS)
assert response.ok, ('Failed to create Security Group for {0}: '
'{1}').format(self.app_name, response.text)
try:
check_task(response.json(), self.app_name)
except SpinnakerTaskError as error:
logging.error('Failed to create Security Group for %s: %s',
self.app_name, response.text)
raise SpinnakerSecurityGroupCreationFailed(error)
logging.info('Successfully created %s security group', self.app_name)
return True
| apache-2.0 | Python |
c58a4829918f275941e96fed880f65da7f0474a5 | Create database_mappings directory | RNAcentral/rnacentral-import-pipeline,RNAcentral/rnacentral-import-pipeline,RNAcentral/rnacentral-import-pipeline,RNAcentral/rnacentral-import-pipeline | luigi/tasks/export/ftp/id_mapping/database_mappings.py | luigi/tasks/export/ftp/id_mapping/database_mappings.py | # -*- coding: utf-8 -*-
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import luigi
from tasks.config import export
from rnacentral.export.ftp.id_mapping import split_by_database
from .id_mapping import IdMapping
class DatabaseSpecificMappings(luigi.Task):
def requires(self):
return IdMapping()
def output(self):
return luigi.LocalTarget(export().database_mappings('ena.tsv'))
def run(self):
with self.requires().output().open('r') as raw:
os.makedirs(export().database_mappings())
split_by_database(raw, export().database_mappings())
| # -*- coding: utf-8 -*-
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import luigi
from tasks.config import export
from rnacentral.export.ftp.id_mapping import split_by_database
from .id_mapping import IdMapping
class DatabaseSpecificMappings(luigi.Task):
def requires(self):
return IdMapping()
def output(self):
return luigi.LocalTarget(export().database_mappings('ena.tsv'))
def run(self):
with self.requires().output().open('r') as raw:
split_by_database(raw, export().database_mappings())
| apache-2.0 | Python |
7acb1962b6119a9895e8847b128e6694632ae15c | test char and string parameter methods | SCIP-Interfaces/PySCIPOpt,SCIP-Interfaces/PySCIPOpt | tests/test_model.py | tests/test_model.py | from pyscipopt import Model
def test_model():
# create solver instance
s = Model()
# test parameter methods
pric = s.getParam('lp/pricing')
s.setParam('lp/pricing', 'q')
assert 'q' == s.getParam('lp/pricing')
s.setParam('lp/pricing', pric)
s.setParam('visual/vbcfilename', 'vbcfile')
assert 'vbcfile' == s.getParam('visual/vbcfilename')
s.setParam('visual/vbcfilename', '-')
# add some variables
x = s.addVar("x", vtype = 'C', obj = 1.0)
y = s.addVar("y", vtype = 'C', obj = 2.0)
assert x.getObj() == 1.0
assert y.getObj() == 2.0
s.setObjective(4.0 * y + 10.5, clear = False)
assert x.getObj() == 1.0
assert y.getObj() == 4.0
assert s.getObjoffset() == 10.5
# add some constraint
c = s.addCons(x + 2 * y >= 1.0)
assert c.isLinear()
s.chgLhs(c, 5.0)
s.chgRhs(c, 6.0)
assert s.getLhs(c) == 5.0
assert s.getRhs(c) == 6.0
# solve problem
s.optimize()
solution = s.getBestSol()
# print solution
assert (s.getVal(x) == s.getSolVal(solution, x))
assert (s.getVal(y) == s.getSolVal(solution, y))
assert round(s.getVal(x)) == 5.0
assert round(s.getVal(y)) == 0.0
assert s.getSlack(c, solution) == 0.0
assert s.getSlack(c, solution, 'lhs') == 0.0
assert s.getSlack(c, solution, 'rhs') == 1.0
assert s.getActivity(c, solution) == 5.0
s.writeProblem('model')
s.writeProblem('model.lp')
s.freeProb()
s = Model()
x = s.addVar("x", vtype = 'C', obj = 1.0)
y = s.addVar("y", vtype = 'C', obj = 2.0)
c = s.addCons(x + 2 * y <= 1.0)
s.setMaximize()
s.delCons(c)
s.optimize()
assert s.getStatus() == 'unbounded'
if __name__ == "__main__":
test_model()
| from pyscipopt import Model
def test_model():
# create solver instance
s = Model()
# add some variables
x = s.addVar("x", vtype = 'C', obj = 1.0)
y = s.addVar("y", vtype = 'C', obj = 2.0)
assert x.getObj() == 1.0
assert y.getObj() == 2.0
s.setObjective(4.0 * y + 10.5, clear = False)
assert x.getObj() == 1.0
assert y.getObj() == 4.0
assert s.getObjoffset() == 10.5
# add some constraint
c = s.addCons(x + 2 * y >= 1.0)
assert c.isLinear()
s.chgLhs(c, 5.0)
s.chgRhs(c, 6.0)
assert s.getLhs(c) == 5.0
assert s.getRhs(c) == 6.0
# solve problem
s.optimize()
solution = s.getBestSol()
# print solution
assert (s.getVal(x) == s.getSolVal(solution, x))
assert (s.getVal(y) == s.getSolVal(solution, y))
assert round(s.getVal(x)) == 5.0
assert round(s.getVal(y)) == 0.0
assert s.getSlack(c, solution) == 0.0
assert s.getSlack(c, solution, 'lhs') == 0.0
assert s.getSlack(c, solution, 'rhs') == 1.0
assert s.getActivity(c, solution) == 5.0
s.writeProblem('model')
s.writeProblem('model.lp')
s.freeProb()
s = Model()
x = s.addVar("x", vtype = 'C', obj = 1.0)
y = s.addVar("y", vtype = 'C', obj = 2.0)
c = s.addCons(x + 2 * y <= 1.0)
s.setMaximize()
s.delCons(c)
s.optimize()
assert s.getStatus() == 'unbounded'
if __name__ == "__main__":
test_model()
| mit | Python |
1f560a21e0b901f9f0c786f864111a0e92951f5d | fix test | tfmorris/dedupe,01-/dedupe,nmiranda/dedupe,neozhangthe1/dedupe,dedupeio/dedupe,datamade/dedupe,pombredanne/dedupe,nmiranda/dedupe,tfmorris/dedupe,01-/dedupe,dedupeio/dedupe,datamade/dedupe,davidkunio/dedupe,pombredanne/dedupe,davidkunio/dedupe,neozhangthe1/dedupe | tests/test_price.py | tests/test_price.py | import unittest
import dedupe
from dedupe.variables.price import PriceType
class TestPrice(unittest.TestCase):
def test_comparator(self) :
assert PriceType.comparator(1, 10) == 1
assert PriceType.comparator(10, 1) == 1
| import unittest
import dedupe
from dedupe.variables.fieldclasses import PriceType
class TestPrice(unittest.TestCase):
def test_comparator(self) :
assert PriceType.comparator(1, 10) == 1
assert PriceType.comparator(10, 1) == 1
| mit | Python |
9ade5ad4da22d8bb200edaf7b4137522c19525f0 | Set module category | OCA/carrier-delivery | delivery_carrier_label_postlogistics/__openerp__.py | delivery_carrier_label_postlogistics/__openerp__.py | # -*- coding: utf-8 -*-
# © 2013-2016 Yannick Vaucher (Camptocamp SA)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{'name': 'PostLogistics Labels WebService',
'version': '9.0.1.1.0',
'author': "Camptocamp,Odoo Community Association (OCA)",
'maintainer': 'Camptocamp',
'license': 'AGPL-3',
'category': 'Delivery',
'complexity': 'normal',
'depends': ['base_delivery_carrier_label'],
'website': 'http://www.camptocamp.com/',
'data': ['data/res_partner.xml',
'data/delivery.xml',
'views/delivery.xml',
'views/postlogistics_license.xml',
'views/res_config.xml',
'security/ir.model.access.csv',
],
'tests': [],
'installable': True,
'auto_install': False,
'application': True,
'external_dependencies': {
'python': ['suds'],
}
}
| # -*- coding: utf-8 -*-
# © 2013-2016 Yannick Vaucher (Camptocamp SA)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{'name': 'PostLogistics Labels WebService',
'version': '9.0.1.1.0',
'author': "Camptocamp,Odoo Community Association (OCA)",
'maintainer': 'Camptocamp',
'license': 'AGPL-3',
'category': 'version',
'complexity': 'normal',
'depends': ['base_delivery_carrier_label'],
'website': 'http://www.camptocamp.com/',
'data': ['data/res_partner.xml',
'data/delivery.xml',
'views/delivery.xml',
'views/postlogistics_license.xml',
'views/res_config.xml',
'security/ir.model.access.csv',
],
'tests': [],
'installable': True,
'auto_install': False,
'application': True,
'external_dependencies': {
'python': ['suds'],
}
}
| agpl-3.0 | Python |
bf4d51ea0b00e091728ecf8d4d51aa295448a416 | fix error exit when doing spack patch without parameters, same as spack stage | krafczyk/spack,lgarren/spack,LLNL/spack,krafczyk/spack,skosukhin/spack,EmreAtes/spack,EmreAtes/spack,tmerrick1/spack,tmerrick1/spack,mfherbst/spack,tmerrick1/spack,LLNL/spack,LLNL/spack,krafczyk/spack,TheTimmy/spack,TheTimmy/spack,lgarren/spack,mfherbst/spack,iulian787/spack,tmerrick1/spack,skosukhin/spack,iulian787/spack,mfherbst/spack,lgarren/spack,matthiasdiener/spack,krafczyk/spack,mfherbst/spack,matthiasdiener/spack,skosukhin/spack,LLNL/spack,TheTimmy/spack,lgarren/spack,skosukhin/spack,TheTimmy/spack,iulian787/spack,iulian787/spack,matthiasdiener/spack,lgarren/spack,skosukhin/spack,tmerrick1/spack,EmreAtes/spack,EmreAtes/spack,krafczyk/spack,matthiasdiener/spack,matthiasdiener/spack,mfherbst/spack,LLNL/spack,EmreAtes/spack,TheTimmy/spack,iulian787/spack | lib/spack/spack/cmd/patch.py | lib/spack/spack/cmd/patch.py | ##############################################################################
# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License (as published by
# the Free Software Foundation) version 2.1 dated February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import argparse
import llnl.util.tty as tty
import spack.cmd
import spack
description="Patch expanded archive sources in preparation for install"
def setup_parser(subparser):
subparser.add_argument(
'-n', '--no-checksum', action='store_true', dest='no_checksum',
help="Do not check downloaded packages against checksum")
subparser.add_argument(
'packages', nargs=argparse.REMAINDER, help="specs of packages to stage")
def patch(parser, args):
if not args.packages:
tty.die("patch requires at least one package argument")
if args.no_checksum:
spack.do_checksum = False
specs = spack.cmd.parse_specs(args.packages, concretize=True)
for spec in specs:
package = spack.repo.get(spec)
package.do_patch()
| ##############################################################################
# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License (as published by
# the Free Software Foundation) version 2.1 dated February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import argparse
import spack.cmd
import spack
description="Patch expanded archive sources in preparation for install"
def setup_parser(subparser):
subparser.add_argument(
'-n', '--no-checksum', action='store_true', dest='no_checksum',
help="Do not check downloaded packages against checksum")
subparser.add_argument(
'packages', nargs=argparse.REMAINDER, help="specs of packages to stage")
def patch(parser, args):
if not args.packages:
tty.die("patch requires at least one package argument")
if args.no_checksum:
spack.do_checksum = False
specs = spack.cmd.parse_specs(args.packages, concretize=True)
for spec in specs:
package = spack.repo.get(spec)
package.do_patch()
| lgpl-2.1 | Python |
677c2859c75fbafe4ee5e1fb2b90d7e8da881120 | Update csrf test to be compatible with Django 1.8 | LabD/django-postcode-lookup | tests/test_views.py | tests/test_views.py | from django.middleware import csrf
from freezegun import freeze_time
from pretend import stub
from rest_framework.test import APIRequestFactory
from django_postcode_lookup import views
from django_postcode_lookup.backends.base import PostcodeLookupResult
def test_valid_api_key():
rf = APIRequestFactory(enforce_csrf_checks=True)
params = {
'postcode': '3531 WR',
'number': '1',
}
request = rf.post('/', data=params, format='json')
csrf.rotate_token(request)
request.COOKIES['csrftoken'] = request.META['CSRF_COOKIE']
request.META['HTTP_X_CSRFTOKEN'] = request.META['CSRF_COOKIE']
views.PostcodeLookupView.backend = stub(
lookup=lambda postcode, number: PostcodeLookupResult(
postcode='3531 WR',
number='1',
city='UTRECHT',
street='Niasstraat'))
view = views.PostcodeLookupView.as_view()
response = view(request)
assert response.status_code == 200, response.rendered_content
assert response.data == {
'street': 'Niasstraat',
'number': '1',
'postcode': '3531 WR',
'city': 'UTRECHT',
}
def test_missing_csrf_key():
rf = APIRequestFactory(enforce_csrf_checks=True)
with freeze_time('2016-01-01 12:00'):
params = {
'postcode': '3531 WR',
'number': '1',
}
request = rf.post('/', data=params, format='json')
views.PostcodeLookupView.backend = stub(
lookup=lambda postcode, number: PostcodeLookupResult(
postcode='3531 WR',
number='1',
city='UTRECHT',
street='Niasstraat'),
validate_api_key=True)
view = views.PostcodeLookupView.as_view()
response = view(request)
assert response.status_code == 403
assert response.data == {
'detail': 'CSRF Failed: CSRF cookie not set.'
}
| from django.middleware.csrf import get_token as get_csrf_token
from freezegun import freeze_time
from pretend import stub
from rest_framework.test import APIRequestFactory
from django_postcode_lookup import views
from django_postcode_lookup.backends.base import PostcodeLookupResult
def test_valid_api_key():
rf = APIRequestFactory(enforce_csrf_checks=True)
params = {
'postcode': '3531 WR',
'number': '1',
}
request = rf.post('/', data=params, format='json')
csrf_token = get_csrf_token(request)
request.COOKIES['csrftoken'] = csrf_token
request.META['HTTP_X_CSRFTOKEN'] = csrf_token
views.PostcodeLookupView.backend = stub(
lookup=lambda postcode, number: PostcodeLookupResult(
postcode='3531 WR',
number='1',
city='UTRECHT',
street='Niasstraat'))
view = views.PostcodeLookupView.as_view()
response = view(request)
assert response.status_code == 200, response.rendered_content
assert response.data == {
'street': 'Niasstraat',
'number': '1',
'postcode': '3531 WR',
'city': 'UTRECHT',
}
def test_missing_csrf_key():
rf = APIRequestFactory(enforce_csrf_checks=True)
with freeze_time('2016-01-01 12:00'):
params = {
'postcode': '3531 WR',
'number': '1',
}
request = rf.post('/', data=params, format='json')
views.PostcodeLookupView.backend = stub(
lookup=lambda postcode, number: PostcodeLookupResult(
postcode='3531 WR',
number='1',
city='UTRECHT',
street='Niasstraat'),
validate_api_key=True)
view = views.PostcodeLookupView.as_view()
response = view(request)
assert response.status_code == 403
assert response.data == {
'detail': 'CSRF Failed: CSRF cookie not set.'
}
| mit | Python |
8cfd48449262caa19b0795d4b6c1c537372d0782 | add forward referencing | saurabh6790/frappe,yashodhank/frappe,almeidapaulopt/frappe,mhbu50/frappe,almeidapaulopt/frappe,saurabh6790/frappe,yashodhank/frappe,frappe/frappe,frappe/frappe,StrellaGroup/frappe,StrellaGroup/frappe,mhbu50/frappe,frappe/frappe,mhbu50/frappe,almeidapaulopt/frappe,almeidapaulopt/frappe,yashodhank/frappe,StrellaGroup/frappe,mhbu50/frappe,yashodhank/frappe,saurabh6790/frappe,saurabh6790/frappe | frappe/core/page/background_jobs/background_jobs.py | frappe/core/page/background_jobs/background_jobs.py | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from typing import TYPE_CHECKING, Dict, List
from rq import Queue, Worker
import frappe
from frappe import _
from frappe.utils import cint, convert_utc_to_user_timezone, format_datetime
from frappe.utils.background_jobs import get_redis_conn
from frappe.utils.scheduler import is_scheduler_inactive
if TYPE_CHECKING:
from rq.job import Job
COLORS = {
'queued': 'orange',
'failed': 'red',
'started': 'blue',
'finished': 'green'
}
@frappe.whitelist()
def get_info(show_failed=False) -> List[Dict]:
conn = get_redis_conn()
queues = Queue.all(conn)
workers = Worker.all(conn)
jobs = []
def add_job(job: 'Job', name: str) -> None:
if job.kwargs.get('site') == frappe.local.site:
job_info = {
'job_name': job.kwargs.get('kwargs', {}).get('playbook_method')
or job.kwargs.get('kwargs', {}).get('job_type')
or str(job.kwargs.get('job_name')),
'status': job.get_status(),
'queue': name,
'creation': format_datetime(convert_utc_to_user_timezone(job.created_at)),
'color': COLORS[job.get_status()]
}
if job.exc_info:
job_info['exc_info'] = job.exc_info
jobs.append(job_info)
# show worker jobs
for worker in workers:
job = worker.get_current_job()
if job:
add_job(job, worker.name)
# show active queued jobs
for queue in queues:
if queue.name != 'failed':
for job in queue.jobs:
add_job(job, queue.name)
# show failed jobs, if requested
if cint(show_failed):
for queue in queues:
fail_registry = queue.failed_job_registry
for job_id in fail_registry.get_job_ids():
job = queue.fetch_job(job_id)
add_job(job, queue.name)
return jobs
@frappe.whitelist()
def remove_failed_jobs():
conn = get_redis_conn()
queues = Queue.all(conn)
for queue in queues:
fail_registry = queue.failed_job_registry
for job_id in fail_registry.get_job_ids():
job = queue.fetch_job(job_id)
fail_registry.remove(job, delete_job=True)
@frappe.whitelist()
def get_scheduler_status():
if is_scheduler_inactive():
return [_("Inactive"), "red"]
return [_("Active"), "green"]
| # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from typing import TYPE_CHECKING, Dict, List
from rq import Queue, Worker
import frappe
from frappe import _
from frappe.utils import cint, convert_utc_to_user_timezone, format_datetime
from frappe.utils.background_jobs import get_redis_conn
from frappe.utils.scheduler import is_scheduler_inactive
if TYPE_CHECKING:
from rq.job import Job
COLORS = {
'queued': 'orange',
'failed': 'red',
'started': 'blue',
'finished': 'green'
}
@frappe.whitelist()
def get_info(show_failed: bool = False) -> List[Dict]:
conn = get_redis_conn()
queues = Queue.all(conn)
workers = Worker.all(conn)
jobs = []
def add_job(job: Job, name: str) -> None:
if job.kwargs.get('site') == frappe.local.site:
job_info = {
'job_name': job.kwargs.get('kwargs', {}).get('playbook_method')
or job.kwargs.get('kwargs', {}).get('job_type')
or str(job.kwargs.get('job_name')),
'status': job.get_status(),
'queue': name,
'creation': format_datetime(convert_utc_to_user_timezone(job.created_at)),
'color': COLORS[job.get_status()]
}
if job.exc_info:
job_info['exc_info'] = job.exc_info
jobs.append(job_info)
# show worker jobs
for worker in workers:
job = worker.get_current_job()
if job:
add_job(job, worker.name)
# show active queued jobs
for queue in queues:
if queue.name != 'failed':
for job in queue.jobs:
add_job(job, queue.name)
# show failed jobs, if requested
if cint(show_failed):
for queue in queues:
fail_registry = queue.failed_job_registry
for job_id in fail_registry.get_job_ids():
job = queue.fetch_job(job_id)
add_job(job, queue.name)
return jobs
@frappe.whitelist()
def remove_failed_jobs() -> None:
conn = get_redis_conn()
queues = Queue.all(conn)
for queue in queues:
fail_registry = queue.failed_job_registry
for job_id in fail_registry.get_job_ids():
job = queue.fetch_job(job_id)
fail_registry.remove(job, delete_job=True)
@frappe.whitelist()
def get_scheduler_status() -> None:
if is_scheduler_inactive():
return [_("Inactive"), "red"]
return [_("Active"), "green"]
| mit | Python |
b465ccc9b26bdf6f06cfc3f72df2cbdb01c597e4 | fix sentry logging in conjunction with celery | schacki/cookiecutter-django,luzfcb/cookiecutter-django,aeikenberry/cookiecutter-django-rest-babel,jondelmil/cookiecutter-django,stepanovsh/project_template,mjhea0/cookiecutter-django,webspired/cookiecutter-django,webspired/cookiecutter-django,drxos/cookiecutter-django-dokku,ingenioustechie/cookiecutter-django-openshift,stepanovsh/project_template,crdoconnor/cookiecutter-django,aleprovencio/cookiecutter-django,nunchaks/cookiecutter-django,aeikenberry/cookiecutter-django-rest-babel,webyneter/cookiecutter-django,ingenioustechie/cookiecutter-django-openshift,ovidner/cookiecutter-django,mjhea0/cookiecutter-django,webyneter/cookiecutter-django,bopo/cookiecutter-django,mjhea0/cookiecutter-django,stepanovsh/project_template,hackebrot/cookiecutter-django,aeikenberry/cookiecutter-django-rest-babel,hairychris/cookiecutter-django,gappsexperts/cookiecutter-django,ingenioustechie/cookiecutter-django-openshift,ryankanno/cookiecutter-django,topwebmaster/cookiecutter-django,ad-m/cookiecutter-django,ryankanno/cookiecutter-django,ovidner/cookiecutter-django,gappsexperts/cookiecutter-django,thisjustin/cookiecutter-django,nunchaks/cookiecutter-django,schacki/cookiecutter-django,andresgz/cookiecutter-django,kappataumu/cookiecutter-django,andresgz/cookiecutter-django,ovidner/cookiecutter-django,asyncee/cookiecutter-django,pydanny/cookiecutter-django,hackebrot/cookiecutter-django,luzfcb/cookiecutter-django,ryankanno/cookiecutter-django,ddiazpinto/cookiecutter-django,trungdong/cookiecutter-django,mistalaba/cookiecutter-django,webyneter/cookiecutter-django,andresgz/cookiecutter-django,kappataumu/cookiecutter-django,crdoconnor/cookiecutter-django,stepanovsh/project_template,crdoconnor/cookiecutter-django,mistalaba/cookiecutter-django,ddiazpinto/cookiecutter-django,calculuscowboy/cookiecutter-django,ddiazpinto/cookiecutter-django,stepanovsh/project_template,bopo/cookiecutter-django,ad-m/cookiecutter-django,webyneter/cookiecutter-django,bopo/cookiecutter-django,gappsexperts/cookiecutter-django,aleprovencio/cookiecutter-django,hairychris/cookiecutter-django,trungdong/cookiecutter-django,luzfcb/cookiecutter-django,yunti/cookiecutter-django,luzfcb/cookiecutter-django,yunti/cookiecutter-django,calculuscowboy/cookiecutter-django,HandyCodeJob/hcj-django-temp,ad-m/cookiecutter-django,drxos/cookiecutter-django-dokku,topwebmaster/cookiecutter-django,asyncee/cookiecutter-django,hairychris/cookiecutter-django,asyncee/cookiecutter-django,asyncee/cookiecutter-django,pydanny/cookiecutter-django,yunti/cookiecutter-django,hackebrot/cookiecutter-django,drxos/cookiecutter-django-dokku,nunchaks/cookiecutter-django,thisjustin/cookiecutter-django,mjhea0/cookiecutter-django,kappataumu/cookiecutter-django,mistalaba/cookiecutter-django,schacki/cookiecutter-django,gappsexperts/cookiecutter-django,trungdong/cookiecutter-django,hairychris/cookiecutter-django,aeikenberry/cookiecutter-django-rest-babel,hackebrot/cookiecutter-django,trungdong/cookiecutter-django,drxos/cookiecutter-django-dokku,mistalaba/cookiecutter-django,schacki/cookiecutter-django,ddiazpinto/cookiecutter-django,andresgz/cookiecutter-django,jondelmil/cookiecutter-django,jondelmil/cookiecutter-django,Parbhat/cookiecutter-django-foundation,ad-m/cookiecutter-django,topwebmaster/cookiecutter-django,HandyCodeJob/hcj-django-temp,calculuscowboy/cookiecutter-django,yunti/cookiecutter-django,Parbhat/cookiecutter-django-foundation,ingenioustechie/cookiecutter-django-openshift,nunchaks/cookiecutter-django,kappataumu/cookiecutter-django,jondelmil/cookiecutter-django,aleprovencio/cookiecutter-django,crdoconnor/cookiecutter-django,Parbhat/cookiecutter-django-foundation,aleprovencio/cookiecutter-django,pydanny/cookiecutter-django,webspired/cookiecutter-django,webspired/cookiecutter-django,calculuscowboy/cookiecutter-django,thisjustin/cookiecutter-django,pydanny/cookiecutter-django,topwebmaster/cookiecutter-django,bopo/cookiecutter-django,ryankanno/cookiecutter-django,Parbhat/cookiecutter-django-foundation,thisjustin/cookiecutter-django,HandyCodeJob/hcj-django-temp,HandyCodeJob/hcj-django-temp,ovidner/cookiecutter-django | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/taskapp/celery.py | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/taskapp/celery.py | {% if cookiecutter.use_celery == "y" %}
from __future__ import absolute_import
import os
from celery import Celery
from django.apps import AppConfig
from django.conf import settings
{% if cookiecutter.use_sentry == "y" -%}
from raven import Client
from raven.contrib.celery import register_signal
{%- endif %}
if not settings.configured:
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local") # pragma: no cover
app = Celery('{{cookiecutter.repo_name}}')
class CeleryConfig(AppConfig):
name = '{{cookiecutter.repo_name}}.taskapp'
verbose_name = 'Celery Config'
def ready(self):
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS, force=True)
{% if cookiecutter.use_sentry == "y" -%}
if hasattr(settings, 'RAVEN_CONFIG'):
# Celery signal registration
client = Client(dsn=settings.RAVEN_CONFIG['dsn'])
register_signal(client)
{%- endif %}
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) # pragma: no cover
{% else %}
# Use this as a starting point for your project with celery.
# If you are not using celery, you can remove this app
{% endif -%}
| {% if cookiecutter.use_celery == "y" %}
from __future__ import absolute_import
import os
from celery import Celery
from django.apps import AppConfig
from django.conf import settings
if not settings.configured:
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local") # pragma: no cover
app = Celery('{{cookiecutter.repo_name}}')
class CeleryConfig(AppConfig):
name = '{{cookiecutter.repo_name}}.taskapp'
verbose_name = 'Celery Config'
def ready(self):
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS, force=True)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) # pragma: no cover
{% else %}
# Use this as a starting point for your project with celery.
# If you are not using celery, you can remove this app
{% endif -%}
| bsd-3-clause | Python |
462a24e3e78d3bd6ba7346f8e14fdfa18f726378 | Switch test_misc to pytest | ericdill/databroker,ericdill/databroker | databroker/tests/test_misc.py | databroker/tests/test_misc.py | import time as ttime
import uuid
import numpy as np
import pytest
from filestore.test.utils import fs_setup, fs_teardown
from metadatastore.commands import insert_run_start
from metadatastore.test.utils import mds_setup, mds_teardown
from numpy.testing.utils import assert_array_equal
from databroker import DataBroker as db
from databroker.pims_readers import Images, get_images
from ..examples.sample_data import image_and_scalar
from ..utils.diagnostics import watermark
@pytest.fixture(scope='module')
def image_uid():
rs = insert_run_start(time=ttime.time(), scan_id=105,
owner='stepper', beamline_id='example',
uid=str(uuid.uuid4()), cat='meow')
image_and_scalar.run(run_start_uid=rs)
return rs
def setup_module(module):
mds_setup()
fs_setup()
def teardown_module(module):
mds_teardown()
fs_teardown()
def test_watermark():
result = watermark()
assert result
def test_pims_images_old_api(image_uid):
header = db[image_uid]
images = Images(header, 'img')
images[:5] # smoke test
assert images.pixel_type == np.float64
assert_array_equal(images.frame_shape, images[0].shape)
assert len(images) == image_and_scalar.num1
def test_pims_images(image_uid):
header = db[image_uid]
images = get_images(header, 'img')
images[:5] # smoke test
assert images.pixel_type == np.float64
assert_array_equal(images.frame_shape, images[0].shape)
assert len(images) == image_and_scalar.num1
| from ..utils.diagnostics import watermark
from databroker.pims_readers import Images, get_images
from databroker import DataBroker as db
from ..examples.sample_data import image_and_scalar
from metadatastore.test.utils import mds_setup, mds_teardown
from filestore.test.utils import fs_setup, fs_teardown
import numpy as np
from nose.tools import assert_equal
from numpy.testing.utils import assert_array_equal
def test_watermark():
watermark()
def test_pims_images_old_api():
header = db[-1]
images = Images(header, 'img')
images[:5] # smoke test
assert_equal(images.pixel_type, np.float64)
assert_array_equal(images.frame_shape, images[0].shape)
assert_equal(len(images), image_and_scalar.num1)
def test_pims_images():
header = db[-1]
images = get_images(header, 'img')
images[:5] # smoke test
assert_equal(images.pixel_type, np.float64)
assert_array_equal(images.frame_shape, images[0].shape)
assert_equal(len(images), image_and_scalar.num1)
def setup_module():
mds_setup()
fs_setup()
image_and_scalar.run()
def teardown_module():
mds_teardown()
fs_teardown()
| bsd-3-clause | Python |
27b04ec5b8f425330280743f7dc064c74063497e | Fix collection test | VirusTotal/vt-graph-api,VirusTotal/vt-graph-api | tests/test_create_collection.py | tests/test_create_collection.py | """Test create collection from graph."""
import pytest
import vt_graph_api
import vt_graph_api.errors
test_graph = vt_graph_api.VTGraph(
"Dummy api key", verbose=False, private=False, name="Graph test",
user_editors=["agfernandez"], group_viewers=["virustotal"])
def test_create_collection(mocker):
m = mocker.Mock(status_code=200, json=mocker.Mock(return_value={
'data': {'id': 'new_collection'}
}))
mocker.patch("requests.post", return_value=m)
test_graph.add_node("virustotal.com", "domain")
collection_url = test_graph.create_collection()
assert collection_url == "https://www.virustotal.com/gui/collection/new_collection"
)
def test_create_collection_fails(mocker):
m = mocker.Mock(status_code=400, json=mocker.Mock({}))
mocker.patch("requests.post", return_value=m)
test_graph.add_node("virustotal.com", "domain")
with pytest.raises(vt_graph_api.errors.CreateCollectionError):
test_graph.create_collection()
| """Test create collection from graph."""
import pytest
import vt_graph_api
import vt_graph_api.errors
test_graph = vt_graph_api.VTGraph(
"Dummy api key", verbose=False, private=False, name="Graph test",
user_editors=["agfernandez"], group_viewers=["virustotal"])
def test_create_collection(mocker):
m = mocker.Mock(status_code=200, json=mocker.Mock(return_value={
'data': {'id': 'new_collection'}
}))
mocker.patch("requests.post", return_value=m)
test_graph.add_node("virustotal.com", "domain")
collection_url = test_graph.create_collection()
assert collection_url == ("https://www.virustotal.com/gui/collection/"
"{collection_url}".format(collection_url=collection_url))
def test_create_collection_fails(mocker):
m = mocker.Mock(status_code=400, json=mocker.Mock({}))
mocker.patch("requests.post", return_value=m)
test_graph.add_node("virustotal.com", "domain")
with pytest.raises(vt_graph_api.errors.CreateCollectionError):
test_graph.create_collection()
| apache-2.0 | Python |
de59cc4ec06d3666fc9b8e32272b723ed37501fb | Update docx_parser.py | deanmalmgren/textract,deanmalmgren/textract,deanmalmgren/textract | textract/parsers/docx_parser.py | textract/parsers/docx_parser.py | import docx
from .utils import BaseParser
class Parser(BaseParser):
"""Extract text from docx file using python-docx.
"""
def extract(self, filename, **kwargs):
text = ""
document = docx.Document(filename)
# Extract text from root paragraphs
text += '\n\n'.join([
paragraph.text for paragraph in document.paragraphs
])
# Recursively extract text from root tables
for table in document.tables:
text += self._parse_table(table, text)
return text
def _parse_table(self, table, text):
for row in table.rows:
for cell in row.cells:
# For every cell in every row of the table, extract text from child paragraphs
text += '\n\n'.join([
paragraph.text for paragraph in cell
])
# Then recursively extract text from child tables
for table in cell.tables:
text += _parse_table(table, text)
return text
| import docx
from .utils import BaseParser
class Parser(BaseParser):
"""Extract text from docx file using python-docx.
"""
def extract(self, filename, **kwargs):
text = ""
document = docx.Document(filename)
# Extract text from root paragraphs
text += '\n\n'.join([
paragraph.text for paragraph in document.paragraphs
])
# Recursively extract text from root tables
for table in document.tables:
text += self._parse_table(table, text)
return text
def _parse_table(self, table, text):
for row in table.rows:
for cell in row.cells:
# For every cell in every row of the table, extract text from child paragraphs
text += '\n\n'.join([
paragraph.text for paragraph in cell
])
# Then recursively extract text from child tables
for table in cell.tables:
text += _parse_table(table, text)
return text
| mit | Python |
6de1083784d8a73e234dd14cabd17e7ee5852949 | Add missing import to utility python script | dart-lang/sdk,dart-lang/sdk,dartino/dart-sdk,dart-lang/sdk,dartino/dart-sdk,dart-lang/sdk,dart-lang/sdk,dartino/dart-sdk,dart-archive/dart-sdk,dart-lang/sdk,dart-archive/dart-sdk,dart-archive/dart-sdk,dartino/dart-sdk,dart-lang/sdk,dart-archive/dart-sdk,dartino/dart-sdk,dart-archive/dart-sdk,dartino/dart-sdk,dartino/dart-sdk,dart-archive/dart-sdk,dart-lang/sdk,dart-archive/dart-sdk,dartino/dart-sdk,dartino/dart-sdk,dart-archive/dart-sdk,dart-archive/dart-sdk | tools/clean_output_directory.py | tools/clean_output_directory.py | #!/usr/bin/env python
#
# Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
#
import shutil
import sys
import subprocess
import utils
def Main():
build_root = utils.GetBuildRoot(utils.GuessOS())
print 'Deleting %s' % build_root
if sys.platform != 'win32':
shutil.rmtree(build_root, ignore_errors=True)
else:
# Intentionally ignore return value since a directory might be in use.
subprocess.call(['rmdir', '/Q', '/S', build_root],
env=os.environ.copy(),
shell=True)
return 0
if __name__ == '__main__':
sys.exit(Main())
| #!/usr/bin/env python
#
# Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
#
import shutil
import sys
import utils
def Main():
build_root = utils.GetBuildRoot(utils.GuessOS())
print 'Deleting %s' % build_root
if sys.platform != 'win32':
shutil.rmtree(build_root, ignore_errors=True)
else:
# Intentionally ignore return value since a directory might be in use.
subprocess.call(['rmdir', '/Q', '/S', build_root],
env=os.environ.copy(),
shell=True)
return 0
if __name__ == '__main__':
sys.exit(Main())
| bsd-3-clause | Python |
39cc0a83e47caf2ba51b679d4f082381d0a6a6d6 | Fix chunk | muddyfish/PYKE,muddyfish/PYKE | node/floor_divide.py | node/floor_divide.py | #!/usr/bin/env python
from nodes import Node
import math
class FloorDiv(Node):
char = "f"
args = 2
results = 1
@Node.test_func([3,2], [1])
@Node.test_func([6,-3], [-2])
def func(self, a:Node.number,b:Node.number):
"""a/b. Rounds down, returns an int."""
return a//b
@Node.test_func(["test", "e"], [["t", "e", "st"]])
def partition(self, string:str, sep:str):
"""Split the string at the first occurrence of sep,
return a 3-list containing the part before the separator,
the separator itself, and the part after the separator.
If the separator is not found,
return a 3-list containing the string itself,
followed by two empty strings."""
return [list(string.partition(sep))]
@Node.test_func(["134", 1], [["134"]])
@Node.test_func(["1234", 2], [["12", "34"]])
@Node.test_func(["1234", 3], [["1", "2", "34"]])
@Node.test_func([[4,8,15,16,23,42], 5], [[[4],[8],[15],[16],[23,42]]])
def chunk(self, inp:Node.indexable, num:int):
"""Return inp seperated into num groups"""
rtn = []
size = len(inp)//num
for i in range(0, num*size, size):
rtn.append(inp[i:i+size])
if len(rtn) != num:
rtn.append(inp[i+size:])
else:
rtn[-1] += inp[i+size:]
return [rtn] | #!/usr/bin/env python
from nodes import Node
class FloorDiv(Node):
char = "f"
args = 2
results = 1
@Node.test_func([3,2], [1])
@Node.test_func([6,-3], [-2])
def func(self, a:Node.number,b:Node.number):
"""a/b. Rounds down, returns an int."""
return a//b
@Node.test_func(["test", "e"], [["t", "e", "st"]])
def partition(self, string:str, sep:str):
"""Split the string at the first occurrence of sep,
return a 3-list containing the part before the separator,
the separator itself, and the part after the separator.
If the separator is not found,
return a 3-list containing the string itself,
followed by two empty strings."""
return [list(string.partition(sep))]
@Node.test_func(["134", 1], [["134"]])
@Node.test_func(["1234", 2], [["12", "34"]])
@Node.test_func(["1234", 3], [["1", "2", "34"]])
@Node.test_func([[4,8,15,16,23,42], 5], [[[4],[8],[15],[16],[23,42]]])
def chunk(self, inp:Node.indexable, num:int):
"""Return inp seperated into num groups"""
rtn = []
last = 0
size = len(inp)//num
for i in range(size, len(inp), size):
rtn.append(inp[last:i])
last = i
if len(rtn) != num:
rtn.append(inp[last:])
else:
rtn[-1] += inp[last:]
if len(rtn):
if isinstance(inp, str):
rtn[-1] = "".join(rtn[-1])
else:
rtn[-1] = type(inp)(rtn[-1])
return [rtn] | mit | Python |
e3cae2fb35199383febf9834af272118a39a16f3 | Add solution to Lessson2.08-Handling_Attributes | krzyste/ud032,krzyste/ud032 | Lesson_2_Data_in_More_Complex_Formats/08-Handling_Attributes/authors.py | Lesson_2_Data_in_More_Complex_Formats/08-Handling_Attributes/authors.py | #!/usr/bin/env python
# Your task here is to extract data from xml on authors of an article
# and add it to a list, one item for an author.
# See the provided data structure for the expected format.
# The tags for first name, surname and email should map directly
# to the dictionary keys
import xml.etree.ElementTree as ET
article_file = "exampleResearchArticle.xml"
def get_root(fname):
tree = ET.parse(fname)
return tree.getroot()
def get_authors(root):
authors = []
for author in root.findall('./fm/bibl/aug/au'):
data = {
"fnm": None,
"snm": None,
"email": None,
"insr": []
}
for elem in author:
for key in data.iterkeys():
if elem.tag == key:
if elem.tag == "insr":
data[key].append(elem.attrib["iid"])
else:
data[key] = elem.text
authors.append(data)
return authors
def test():
solution = [{'insr': ['I1'], 'fnm': 'Omer', 'snm': 'Mei-Dan', 'email': 'omer@extremegate.com'},
{'insr': ['I2'], 'fnm': 'Mike', 'snm': 'Carmont', 'email': 'mcarmont@hotmail.com'},
{'insr': ['I3', 'I4'], 'fnm': 'Lior', 'snm': 'Laver', 'email': 'laver17@gmail.com'},
{'insr': ['I3'], 'fnm': 'Meir', 'snm': 'Nyska', 'email': 'nyska@internet-zahav.net'},
{'insr': ['I8'], 'fnm': 'Hagay', 'snm': 'Kammar', 'email': 'kammarh@gmail.com'},
{'insr': ['I3', 'I5'], 'fnm': 'Gideon', 'snm': 'Mann', 'email': 'gideon.mann.md@gmail.com'},
{'insr': ['I6'], 'fnm': 'Barnaby', 'snm': 'Clarck', 'email': 'barns.nz@gmail.com'},
{'insr': ['I7'], 'fnm': 'Eugene', 'snm': 'Kots', 'email': 'eukots@gmail.com'}]
root = get_root(article_file)
data = get_authors(root)
assert data[0] == solution[0]
assert data[1]["insr"] == solution[1]["insr"]
test() | #!/usr/bin/env python
# Your task here is to extract data from xml on authors of an article
# and add it to a list, one item for an author.
# See the provided data structure for the expected format.
# The tags for first name, surname and email should map directly
# to the dictionary keys, but you have to extract the attributes from the "insr" tag
# and add them to the list for the dictionary key "insr"
import xml.etree.ElementTree as ET
article_file = "exampleResearchArticle.xml"
def get_root(fname):
tree = ET.parse(fname)
return tree.getroot()
def get_authors(root):
authors = []
for author in root.findall('./fm/bibl/aug/au'):
data = {
"fnm": None,
"snm": None,
"email": None,
"insr": []
}
# YOUR CODE HERE
authors.append(data)
return authors
def test():
solution = [{'insr': ['I1'], 'fnm': 'Omer', 'snm': 'Mei-Dan', 'email': 'omer@extremegate.com'},
{'insr': ['I2'], 'fnm': 'Mike', 'snm': 'Carmont', 'email': 'mcarmont@hotmail.com'},
{'insr': ['I3', 'I4'], 'fnm': 'Lior', 'snm': 'Laver', 'email': 'laver17@gmail.com'},
{'insr': ['I3'], 'fnm': 'Meir', 'snm': 'Nyska', 'email': 'nyska@internet-zahav.net'},
{'insr': ['I8'], 'fnm': 'Hagay', 'snm': 'Kammar', 'email': 'kammarh@gmail.com'},
{'insr': ['I3', 'I5'], 'fnm': 'Gideon', 'snm': 'Mann', 'email': 'gideon.mann.md@gmail.com'},
{'insr': ['I6'], 'fnm': 'Barnaby', 'snm': 'Clarck', 'email': 'barns.nz@gmail.com'},
{'insr': ['I7'], 'fnm': 'Eugene', 'snm': 'Kots', 'email': 'eukots@gmail.com'}]
root = get_root(article_file)
data = get_authors(root)
assert data[0] == solution[0]
assert data[1]["insr"] == solution[1]["insr"]
test() | agpl-3.0 | Python |
f879f82ad0393a770ed50043c70ee1dd4a12daaa | Rewrite "test_tissue_corr_computation" as a pytest | genenetwork/genenetwork2,genenetwork/genenetwork2,genenetwork/genenetwork2,genenetwork/genenetwork2 | wqflask/tests/unit/wqflask/correlation/test_correlation_functions.py | wqflask/tests/unit/wqflask/correlation/test_correlation_functions.py | """module contains tests for correlation functions"""
import unittest
from unittest import mock
from wqflask.correlation.correlation_functions import get_trait_symbol_and_tissue_values
from wqflask.correlation.correlation_functions import cal_zero_order_corr_for_tiss
def test_tissue_corr_computation(mocker):
"""Test for cal_zero_order_corr_for_tiss"""
primary_values = [9.288, 9.313, 8.988, 9.660, 8.21]
target_values = [9.586, 8.498, 9.362, 8.820, 8.786]
_m = mocker.patch(("wqflask.correlation.correlation_functions."
"compute_corr_coeff_p_value"),
return_value=(0.51, 0.7))
results = cal_zero_order_corr_for_tiss(primary_values, target_values)
_m.assert_called_once_with(
primary_values=primary_values, target_values=target_values,
corr_method="pearson")
assert len(results) == 3
@mock.patch("wqflask.correlation.correlation_functions.MrnaAssayTissueData")
def test_get_trait_symbol_and_tissue_values(self, mock_class):
"""test for getting trait symbol and tissue_values"""
mock_class_instance = mock_class.return_value
mock_class_instance.gene_symbols = ["k1", "k2", "k3"]
mock_class_instance.get_symbol_values_pairs.return_value = {
"k1": ["v1", "v2", "v3"], "k2": ["v2", "v3"], "k3": ["k3"]}
results = get_trait_symbol_and_tissue_values(
symbol_list=["k1", "k2", "k3"])
mock_class.assert_called_with(gene_symbols=['k1', 'k2', 'k3'])
self.assertEqual({"k1": ["v1", "v2", "v3"], "k2": [
"v2", "v3"], "k3": ["k3"]}, results)
| """module contains tests for correlation functions"""
import unittest
from unittest import mock
from wqflask.correlation.correlation_functions import get_trait_symbol_and_tissue_values
from wqflask.correlation.correlation_functions import cal_zero_order_corr_for_tiss
class TestCorrelationFunctions(unittest.TestCase):
"""test for correlation helper functions"""
@mock.patch("wqflask.correlation.correlation_functions.compute_corr_coeff_p_value")
def test_tissue_corr_computation(self, mock_tiss_corr_computation):
"""test for cal_zero_order_corr_for_tiss"""
primary_values = [9.288, 9.313, 8.988, 9.660, 8.21]
target_values = [9.586, 8.498, 9.362, 8.820, 8.786]
mock_tiss_corr_computation.return_value = (0.51, 0.7)
results = cal_zero_order_corr_for_tiss(primary_values, target_values)
mock_tiss_corr_computation.assert_called_once_with(
primary_values=primary_values, target_values=target_values,
corr_method="pearson")
self.assertEqual(len(results), 3)
@mock.patch("wqflask.correlation.correlation_functions.MrnaAssayTissueData")
def test_get_trait_symbol_and_tissue_values(self, mock_class):
"""test for getting trait symbol and tissue_values"""
mock_class_instance = mock_class.return_value
mock_class_instance.gene_symbols = ["k1", "k2", "k3"]
mock_class_instance.get_symbol_values_pairs.return_value = {
"k1": ["v1", "v2", "v3"], "k2": ["v2", "v3"], "k3": ["k3"]}
results = get_trait_symbol_and_tissue_values(
symbol_list=["k1", "k2", "k3"])
mock_class.assert_called_with(gene_symbols=['k1', 'k2', 'k3'])
self.assertEqual({"k1": ["v1", "v2", "v3"], "k2": [
"v2", "v3"], "k3": ["k3"]}, results)
| agpl-3.0 | Python |
59a68102c728b94c07b742eeb44fe39788237720 | Change not to use functions starts with an underscore | wkentaro/chainer,wkentaro/chainer,jnishi/chainer,rezoo/chainer,aonotas/chainer,AlpacaDB/chainer,sinhrks/chainer,cupy/cupy,benob/chainer,kashif/chainer,sinhrks/chainer,cupy/cupy,keisuke-umezawa/chainer,cemoody/chainer,delta2323/chainer,benob/chainer,ktnyt/chainer,anaruse/chainer,chainer/chainer,chainer/chainer,chainer/chainer,okuta/chainer,wkentaro/chainer,ronekko/chainer,ktnyt/chainer,okuta/chainer,kiyukuta/chainer,hvy/chainer,jnishi/chainer,keisuke-umezawa/chainer,wkentaro/chainer,kikusu/chainer,cupy/cupy,niboshi/chainer,hvy/chainer,niboshi/chainer,ktnyt/chainer,jnishi/chainer,ktnyt/chainer,jnishi/chainer,tkerola/chainer,cupy/cupy,niboshi/chainer,keisuke-umezawa/chainer,okuta/chainer,niboshi/chainer,ysekky/chainer,kikusu/chainer,okuta/chainer,hvy/chainer,AlpacaDB/chainer,pfnet/chainer,hvy/chainer,chainer/chainer,keisuke-umezawa/chainer | tests/chainer_tests/functions_tests/math_tests/test_batch_l2_norm_squared.py | tests/chainer_tests/functions_tests/math_tests/test_batch_l2_norm_squared.py | import unittest
import numpy as np
import six
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.utils import type_check
def _as_two_dim(x):
if x.ndim == 2:
return x
return x.reshape((len(x), -1))
@testing.parameterize(
{'shape': (4, 3, 5)},
{'shape': (4, 15)},
)
class TestBatchL2NormSquared(unittest.TestCase):
def setUp(self):
self.x = np.random.uniform(-1, 1, self.shape).astype(np.float32)
self.gy = np.random.uniform(-1, 1, self.shape[0]).astype(np.float32)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = functions.batch_l2_norm_squared(x)
self.assertEqual(y.data.dtype, np.float32)
y_data = cuda.to_cpu(y.data)
x_two_dim = _as_two_dim(self.x)
y_expect = np.empty(len(self.x))
for n in six.moves.range(len(self.x)):
y_expect[n] = sum(map(lambda x: x * x, x_two_dim[n]))
gradient_check.assert_allclose(y_expect, y_data)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
functions.BatchL2NormSquared(), x_data, y_grad, eps=1)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
class TestBatchL2NormSquaredTypeError(unittest.TestCase):
def test_invalid_shape(self):
x = chainer.Variable(np.zeros((4,), dtype=np.float32))
with self.assertRaises(type_check.InvalidType):
chainer.functions.batch_l2_norm_squared(x)
testing.run_module(__name__, __file__)
| import unittest
import numpy as np
import six
import chainer
from chainer import cuda
from chainer import functions
from chainer.functions.math.batch_l2_norm_squared import _as_two_dim
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.utils import type_check
@testing.parameterize(
{'shape': (4, 3, 5)},
{'shape': (4, 15)},
)
class TestBatchL2NormSquared(unittest.TestCase):
def setUp(self):
self.x = np.random.uniform(-1, 1, self.shape).astype(np.float32)
self.gy = np.random.uniform(-1, 1, self.shape[0]).astype(np.float32)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = functions.batch_l2_norm_squared(x)
self.assertEqual(y.data.dtype, np.float32)
y_data = cuda.to_cpu(y.data)
x_two_dim = _as_two_dim(self.x)
y_expect = np.empty(len(self.x))
for n in six.moves.range(len(self.x)):
y_expect[n] = sum(map(lambda x: x * x, x_two_dim[n]))
gradient_check.assert_allclose(y_expect, y_data)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
functions.BatchL2NormSquared(), x_data, y_grad, eps=1)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
class TestBatchL2NormSquaredTypeError(unittest.TestCase):
def test_invalid_shape(self):
x = chainer.Variable(np.zeros((4,), dtype=np.float32))
with self.assertRaises(type_check.InvalidType):
chainer.functions.batch_l2_norm_squared(x)
testing.run_module(__name__, __file__)
| mit | Python |
4b1902ee97dcb7972f800396e8954d99df446362 | Update P05_seleniumBrowser removed redundant parenthesis | JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials | books/AutomateTheBoringStuffWithPython/Chapter11/P05_seleniumBrowser.py | books/AutomateTheBoringStuffWithPython/Chapter11/P05_seleniumBrowser.py | # This program uses selenium to parse and interact with websites
#
# Note:
# - geckodriver is needed for Linux
# - download from https://github.com/mozilla/geckodriver/releases
# - place in /usr/local/bin
# - more info https://github.com/SeleniumHQ/selenium/blob/master/py/docs/source/index.rst
from selenium import webdriver
# Starting a Selenium-Controlled Browser
browser = webdriver.Firefox()
print(type(browser))
browser.get("http://inventwithpython.com")
# Finding Elements on the Page
try:
elem = browser.find_element_by_class_name("card-img-top")
print("Found <%s> element with that class name!" % elem.tag_name)
except:
print("Was not able to find an element with that name.")
| # This program uses selenium to parse and interact with websites
#
# Note:
# - geckodriver is needed for Linux
# - download from https://github.com/mozilla/geckodriver/releases
# - place in /usr/local/bin
# - more info https://github.com/SeleniumHQ/selenium/blob/master/py/docs/source/index.rst
from selenium import webdriver
# Starting a Selenium-Controlled Browser
browser = webdriver.Firefox()
print(type(browser))
browser.get("http://inventwithpython.com")
# Finding Elements on the Page
try:
elem = browser.find_element_by_class_name("card-img-top")
print("Found <%s> element with that class name!" % (elem.tag_name))
except:
print("Was not able to find an element with that name.")
| mit | Python |
13c968f9f345f58775750f1f83ca7881cee2755a | Use Tracking DB Service URL rather than localhost in the DB connection string. | llevar/germline-regenotyper,llevar/germline-regenotyper | bootstrap/conf/salt/state/run-tracking-db/scripts/import_sample_data.py | bootstrap/conf/salt/state/run-tracking-db/scripts/import_sample_data.py | import pandas as pd
import sys
df = pd.read_csv(sys.argv[1])
df.columns = [c.lower() for c in df.columns]
from sqlalchemy import create_engine
engine = create_engine('postgresql://pcawg_admin:pcawg@run-tracking-db.service.consul:5432/germline_genotype_tracking')
df.to_sql("pcawg_samples", engine) | import pandas as pd
import sys
df = pd.read_csv(sys.argv[1])
df.columns = [c.lower() for c in df.columns]
from sqlalchemy import create_engine
engine = create_engine('postgresql://pcawg_admin:pcawg@localhost:5432/germline_genotype_tracking')
df.to_sql("pcawg_samples", engine) | mit | Python |
fedff2e76d8d96f1ea407f7a3a48aa8dc7a7e50a | Make x axis label samples for now, though eventually should have a date option | justinccdev/opensimulator-tools,justinccdev/opensimulator-tools,justinccdev/opensimulator-tools,justinccdev/opensimulator-tools | analysis/opensimulator-stats-analyzer/src/ostagraph.py | analysis/opensimulator-stats-analyzer/src/ostagraph.py | #!/usr/bin/python
import argparse
import matplotlib.pyplot as plt
from pylab import *
from osta.osta import *
############
### MAIN ###
############
parser = argparse.ArgumentParser(formatter_class = argparse.RawTextHelpFormatter)
parser.add_argument(
'--select',
help = "Select the full name of a stat to graph (e.g. \"scene.Keynote 1.RootAgents\")")
parser.add_argument(
'--out',
help = "Path to output the graph rather the interactively display. Filename extension determines graphics type (e.g. \"graph.jpg\")",
default = argparse.SUPPRESS)
parser.add_argument(
'statsLogPath',
help = "Path to the stats log file.",
metavar = "stats-log-path")
opts = parser.parse_args()
osta = Osta()
osta.parse(opts.statsLogPath)
stat = osta.getStat(opts.select)
if not stat == None:
plt.plot(stat['abs']['values'])
plt.title(stat['fullName'])
plt.xlabel("samples")
plt.ylabel(stat['name'])
if 'out' in opts:
savefig(opts.out)
else:
plt.show()
else:
print "No such stat as %s" % (opts.select) | #!/usr/bin/python
import argparse
import matplotlib.pyplot as plt
from pylab import *
from osta.osta import *
############
### MAIN ###
############
parser = argparse.ArgumentParser(formatter_class = argparse.RawTextHelpFormatter)
parser.add_argument(
'--select',
help = "Select the full name of a stat to graph (e.g. \"scene.Keynote 1.RootAgents\")")
parser.add_argument(
'--out',
help = "Path to output the graph rather the interactively display. Filename extension determines graphics type (e.g. \"graph.jpg\")",
default = argparse.SUPPRESS)
parser.add_argument(
'statsLogPath',
help = "Path to the stats log file.",
metavar = "stats-log-path")
opts = parser.parse_args()
osta = Osta()
osta.parse(opts.statsLogPath)
stat = osta.getStat(opts.select)
if not stat == None:
plt.plot(stat['abs']['values'])
plt.title(stat['fullName'])
plt.ylabel(stat['name'])
if 'out' in opts:
savefig(opts.out)
else:
plt.show()
else:
print "No such stat as %s" % (opts.select) | bsd-3-clause | Python |
bf17fdf1ee348dfe7f170f6c4477535bc9c959cc | update software parameters | cytomine/Cytomine-python-datamining,cytomine/Cytomine-python-datamining | cytomine-datamining/algorithms/sldc/examples/with_pyxit/add_software.py | cytomine-datamining/algorithms/sldc/examples/with_pyxit/add_software.py | import os
import tempfile
if __name__ == "__main__":
import cytomine
# Connect to cytomine, edit connection values
cytomine_host = "demo.cytomine.be"
cytomine_public_key = "XXX" # to complete
cytomine_private_key = "XXX" # to complete
id_project = -1 # to complete
# Connection to Cytomine Core
conn = cytomine.Cytomine(
cytomine_host,
cytomine_public_key,
cytomine_private_key,
base_path='/api/',
working_path=os.path.join(tempfile.gettempdir(), "cytomine"),
verbose=True
)
# define software parameter template
software = conn.add_software("Demo_SLDC_Workflow_With_Pyxit", "pyxitSuggestedTermJobService", "ValidateAnnotation")
conn.add_software_parameter("cytomine_id_software", software.id, "Number", 0, True, 1, True)
conn.add_software_parameter("cytomine_id_project", software.id, "Number", 0, True, 100, True)
conn.add_software_parameter("cytomine_id_image", software.id, "Number", 0, True, 200, True)
conn.add_software_parameter("n_jobs", software.id, "Number", 1, True, 300, False)
conn.add_software_parameter("tile_overlap", software.id, "Number", 10, True, 600, False)
conn.add_software_parameter("tile_width", software.id, "Number", 768, True, 700, False)
conn.add_software_parameter("tile_height", software.id, "Number", 768, True, 800, False)
conn.add_software_parameter("pyxit_model_path", software.id, "Number", "", True, 900, False)
conn.add_software_parameter("min_area", software.id, "Number", 500, True, 1100, False)
conn.add_software_parameter("threshold", software.id, "Number", 215, True, 1200, False)
conn.add_software_parameter("rseed", software.id, "Number", 0, True, 1300, False)
conn.add_software_parameter("working_path", software.id, "Number", "", True, 1400, False)
# add software to a given project
addSoftwareProject = conn.add_software_project(id_project, software.id)
| import os
import tempfile
if __name__ == "__main__":
import cytomine
# Connect to cytomine, edit connection values
cytomine_host = "demo.cytomine.be"
cytomine_public_key = "XXX" # to complete
cytomine_private_key = "XXX" # to complete
id_project = -1 # to complete
# Connection to Cytomine Core
conn = cytomine.Cytomine(
cytomine_host,
cytomine_public_key,
cytomine_private_key,
base_path='/api/',
working_path=os.path.join(tempfile.gettempdir(), "cytomine"),
verbose=True
)
# define software parameter template
software = conn.add_software("Demo_SLDC_Workflow_With_Pyxit", "pyxitSuggestedTermJobService", "ValidateAnnotation")
conn.add_software_parameter("cytomine_id_software", software.id, "Number", 0, True, 1, True)
conn.add_software_parameter("cytomine_id_project", software.id, "Number", 0, True, 100, True)
conn.add_software_parameter("cytomine_id_image", software.id, "Number", 0, True, 200, True)
conn.add_software_parameter("n_jobs", software.id, "Number", 1, True, 300, False)
conn.add_software_parameter("min_area", software.id, "Number", 12, True, 400, False)
conn.add_software_parameter("threshold", software.id, "Number", 140, True, 500, False)
conn.add_software_parameter("sldc_tile_overlap", software.id, "Number", 10, True, 600, False)
conn.add_software_parameter("sldc_tile_width", software.id, "Number", 768, True, 700, False)
conn.add_software_parameter("sldc_tile_height", software.id, "Number", 768, True, 800, False)
conn.add_software_parameter("pyxit_model_path", software.id, "Number", "", True, 900, False)
conn.add_software_parameter("n_jobs", software.id, "Number", 1, True, 1000, False)
conn.add_software_parameter("min_area", software.id, "Number", 500, True, 1100, False)
conn.add_software_parameter("threshold", software.id, "Number", 215, True, 1200, False)
conn.add_software_parameter("rseed", software.id, "Number", 0, True, 1300, False)
conn.add_software_parameter("working_path", software.id, "Number", "", True, 1400, False)
# add software to a given project
addSoftwareProject = conn.add_software_project(id_project, software.id)
| apache-2.0 | Python |
114b3f3403e970943618e7096b0b898b8aa5589f | Remove verbose keywork for pip install | wheeler-microfluidics/microdrop | microdrop/core_plugins/electrode_controller_plugin/on_plugin_install.py | microdrop/core_plugins/electrode_controller_plugin/on_plugin_install.py | from datetime import datetime
import logging
from path_helpers import path
from pip_helpers import install
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logging.info(str(datetime.now()))
requirements_file = path(__file__).parent.joinpath('requirements.txt')
if requirements_file.exists():
logging.info(install(['-U', '-r', requirements_file]))
| from datetime import datetime
import logging
from path_helpers import path
from pip_helpers import install
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logging.info(str(datetime.now()))
requirements_file = path(__file__).parent.joinpath('requirements.txt')
if requirements_file.exists():
logging.info(install(['-U', '-r', requirements_file], verbose=True)) | bsd-3-clause | Python |
b2664b825c5be2b0582ff755df421c5d026947b8 | Update LearnLockerConnection.py | GeneralizedLearningUtilities/SuperGLU,GeneralizedLearningUtilities/SuperGLU,GeneralizedLearningUtilities/SuperGLU,GeneralizedLearningUtilities/SuperGLU,GeneralizedLearningUtilities/SuperGLU | python_module/SuperGLU/Services/LoggingService/LearnLockerConnection.py | python_module/SuperGLU/Services/LoggingService/LearnLockerConnection.py | '''
Created on May 31, 2018
This service will forward logging messages to LearnLocker as well as log them to a file.
@author: auerbach, Alicia Tsai
'''
from SuperGLU.Core.MessagingGateway import BaseService
from SuperGLU.Services.LoggingService.Constants import XAPI_LOG_VERB
import requests
import uuid
import json
class LearnLockerConnection(BaseService):
def __init__(self, gateway, url, key):
super(LearnLockerConnection, self).__init__(gateway=gateway)
self._url = url
self._key = key
self.logFile = open("log.txt", 'w')
self.errorLog = open("errorLog.txt", "w")
def receiveMessage(self, msg):
super(LearnLockerConnection, self).receiveMessage(msg)
if msg.getVerb() == XAPI_LOG_VERB:
statementAsJson = msg.getResult()
headerDict = {'Authorization' : self._key,
'X-Experience-API-Version': '1.0.3',
'Content-Type' : 'application/json'
}
# --- quick fix for invalid xAPI statement to avoid bad request ----- #
# these should be fixed in xAPI_Learn_Logger
statement = json.loads(statementAsJson)
statement['context']['extensions'] = {}
statement['object']['id'] = "http://example.com/activities/solo-hang-gliding"
statement['actor'].pop('openid', None)
# ------------------------------------------------------
response = requests.put(url=self._url + '/data/xAPI/statements?statementId=' + str(uuid.uuid4()), data=json.dumps(statement), headers=headerDict)
# log bad request message into errorLog file
if str(response) == "<Response [400]>":
print('Warning: ', str(response), response.text)
self.errorLog.write(response.text)
self.errorLog.write("\n")
# write xAPI statement to log file
self.logFile.write(statementAsJson)
self.logFile.write("\n")
| '''
Created on May 31, 2018
This service will forward logging messages to LearnLocker as well as log them to a file.
@author: auerbach
'''
from SuperGLU.Core.MessagingGateway import BaseService
from SuperGLU.Services.LoggingService.Constants import XAPI_LOG_VERB
import requests
import uuid
import json
class LearnLockerConnection(BaseService):
def __init__(self, gateway, url, key):
super(LearnLockerConnection, self).__init__(gateway=gateway)
self._url = url
self._key = key
self.logFile = open("log.txt", 'w')
self.errorLog = open("errorLog.txt", "w")
def receiveMessage(self, msg):
super(LearnLockerConnection, self).receiveMessage(msg)
if msg.getVerb() == XAPI_LOG_VERB:
statementAsJson = msg.getResult()
headerDict = {'Authorization' : self._key,
'X-Experience-API-Version': '1.0.3',
'Content-Type' : 'application/json'
}
# --- quick fix for invalid xAPI statement to avoid bad request ----- #
# these should be fixed in xAPI_Learn_Logger
statement = json.loads(statementAsJson)
statement['context']['extensions'] = {}
statement['object']['id'] = "http://example.com/activities/solo-hang-gliding"
statement['actor'].pop('openid', None)
# ------------------------------------------------------
response = requests.put(url=self._url + '/data/xAPI/statements?statementId=' + str(uuid.uuid4()), data=json.dumps(statement), headers=headerDict)
# log bad request message
if str(response) == "<Response [400]>":
print('Warning: ', str(response), response.text)
self.errorLog.write(response.text)
self.errorLog.write("\n")
# write xAPI statement to log file
self.logFile.write(statementAsJson)
self.logFile.write("\n")
| mit | Python |
b6f94c9684e8d58f0ba79849c9303108a8ac3bbd | Mark this test as XFail while I investigate the issue | apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb | packages/Python/lldbsuite/test/lang/swift/po/sys_types/TestSwiftPOSysTypes.py | packages/Python/lldbsuite/test/lang/swift/po/sys_types/TestSwiftPOSysTypes.py | # TestSwiftPOSysTypes.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
import lldbsuite.test.lldbinline as lldbinline
import lldbsuite.test.lldbtest as lldbtest
lldbinline.MakeInlineTest(__file__, globals(), decorators=[lldbtest.skipUnlessDarwin,lldbtest.expectedFailureDarwin])
| # TestSwiftPOSysTypes.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
import lldbsuite.test.lldbinline as lldbinline
import lldbsuite.test.lldbtest as lldbtest
lldbinline.MakeInlineTest(__file__, globals(), decorators=lldbtest.skipUnlessDarwin)
| apache-2.0 | Python |
c5b7380e46805d8239a0309b06353fe61530e7ec | add docstring to __init__ for module | neurodata/ndio,jhuapl-boss/intern,neurodata/ndio,openconnectome/ndio,neurodata/ndio | ndio/__init__.py | ndio/__init__.py | """
A Python library for open neuroscience data access and manipulation.
"""
version = "1.0.0"
def check_version():
"""
Tells you if you have an old version of ndio.
"""
import requests
r = requests.get('https://pypi.python.org/pypi/ndio/json').json()
r = r['info']['version']
if r != version:
print("A newer version of ndio is available. " +
"'pip install -U ndio' to update.")
return r
| version = "1.0.0"
def check_version():
"""
Tells you if you have an old version of ndio.
"""
import requests
r = requests.get('https://pypi.python.org/pypi/ndio/json').json()
r = r['info']['version']
if r != version:
print("A newer version of ndio is available. " +
"'pip install -U ndio' to update.")
return r
| apache-2.0 | Python |
eb0135828b2384f2242b3e50b244f131c59d8dda | update Mid Devon import script for parl.2017-06-08 | DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations | polling_stations/apps/data_collection/management/commands/import_mid_devon.py | polling_stations/apps/data_collection/management/commands/import_mid_devon.py | from data_collection.management.commands import BaseXpressDCCsvInconsistentPostcodesImporter
class Command(BaseXpressDCCsvInconsistentPostcodesImporter):
council_id = 'E07000042'
addresses_name = 'parl.2017-06-08/Version 1/Mid Devon Democracy_Club__08June2017-2.tsv'
stations_name = 'parl.2017-06-08/Version 1/Mid Devon Democracy_Club__08June2017-2.tsv'
elections = ['parl.2017-06-08']
csv_delimiter = '\t'
| from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = 'E07000042'
addresses_name = 'MidDevon_Democracy_Club__04May2017.tsv'
stations_name = 'MidDevon_Democracy_Club__04May2017.tsv'
elections = [
'local.devon.2017-05-04',
'parl.2017-06-08'
]
csv_delimiter = '\t'
| bsd-3-clause | Python |
d5733fe73e91d01f4203742616d8c6a54fd3b91d | add test for histogram ill-defined data | timsnyder/bokeh,schoolie/bokeh,timsnyder/bokeh,timsnyder/bokeh,DuCorey/bokeh,phobson/bokeh,jakirkham/bokeh,stonebig/bokeh,clairetang6/bokeh,schoolie/bokeh,aiguofer/bokeh,draperjames/bokeh,phobson/bokeh,aavanian/bokeh,quasiben/bokeh,mindriot101/bokeh,stonebig/bokeh,Karel-van-de-Plassche/bokeh,draperjames/bokeh,DuCorey/bokeh,ptitjano/bokeh,bokeh/bokeh,aiguofer/bokeh,quasiben/bokeh,rs2/bokeh,aiguofer/bokeh,percyfal/bokeh,justacec/bokeh,percyfal/bokeh,bokeh/bokeh,mindriot101/bokeh,clairetang6/bokeh,jakirkham/bokeh,philippjfr/bokeh,aavanian/bokeh,ericmjl/bokeh,justacec/bokeh,schoolie/bokeh,DuCorey/bokeh,aiguofer/bokeh,bokeh/bokeh,clairetang6/bokeh,percyfal/bokeh,draperjames/bokeh,azjps/bokeh,phobson/bokeh,ptitjano/bokeh,ptitjano/bokeh,Karel-van-de-Plassche/bokeh,bokeh/bokeh,dennisobrien/bokeh,philippjfr/bokeh,jakirkham/bokeh,ericmjl/bokeh,jakirkham/bokeh,DuCorey/bokeh,ericmjl/bokeh,philippjfr/bokeh,quasiben/bokeh,schoolie/bokeh,phobson/bokeh,Karel-van-de-Plassche/bokeh,mindriot101/bokeh,ptitjano/bokeh,aavanian/bokeh,timsnyder/bokeh,aavanian/bokeh,DuCorey/bokeh,timsnyder/bokeh,azjps/bokeh,phobson/bokeh,azjps/bokeh,dennisobrien/bokeh,justacec/bokeh,draperjames/bokeh,azjps/bokeh,justacec/bokeh,stonebig/bokeh,rs2/bokeh,ericmjl/bokeh,schoolie/bokeh,rs2/bokeh,philippjfr/bokeh,Karel-van-de-Plassche/bokeh,ericmjl/bokeh,dennisobrien/bokeh,clairetang6/bokeh,draperjames/bokeh,aavanian/bokeh,dennisobrien/bokeh,dennisobrien/bokeh,stonebig/bokeh,aiguofer/bokeh,bokeh/bokeh,mindriot101/bokeh,jakirkham/bokeh,rs2/bokeh,percyfal/bokeh,Karel-van-de-Plassche/bokeh,ptitjano/bokeh,rs2/bokeh,philippjfr/bokeh,percyfal/bokeh,azjps/bokeh | bokeh/charts/tests/test_stats.py | bokeh/charts/tests/test_stats.py | import pytest
from bokeh.charts.stats import Bins, Histogram
from bokeh.models import ColumnDataSource
import pandas as pd
@pytest.fixture
def ds(test_data):
return ColumnDataSource(test_data.auto_data)
def test_explicit_bin_count(ds):
b = Bins(source=ds, column='mpg', bin_count=2)
assert len(b.bins) == 2
def test_auto_bin_count(ds):
b = Bins(source=ds, column='mpg')
assert len(b.bins) == 12
# this should test it still matches
# http://stats.stackexchange.com/questions/114490/optimal-bin-width-for-two-dimensional-histogram
# with iterables with the same value
b = Bins(values=[5,5,5,5,5], bin_count=None)
assert len(b.bins) == 3
def test_bin_labeling(ds):
Bins(source=ds, column='cyl', bin_count=2)
assert len(pd.Series(ds.data['cyl_bin']).drop_duplicates()) == 2
def test_histogram_wo_density():
values = list(range(10))
h = Histogram(values=values, bin_count=3)
assert len(h.bins) == 3
assert [b.label[0] for b in h.bins] == ['[0.0, 3.0]', '(3.0, 6.0]', '(6.0, 9.0]']
assert [b.values[0] for b in h.bins] == [3, 3, 4]
def test_histogram_w_density():
values = list(range(10))
h = Histogram(values=values, bin_count=3, density=True)
assert len(h.bins) == 3
assert [b.label[0] for b in h.bins] == ['[0.0, 3.0]', '(3.0, 6.0]', '(6.0, 9.0]']
assert [b.values[0] for b in h.bins] == [0.1, 0.1, 0.13333333333333333]
def test_histogram_ill_defined_data():
# See e.g. #3660
for x in (-21, -0.001, 0, 0.001, 21):
values = [x, x]
h = Histogram(values=values)
assert len(h.bins) <= 3
assert len(h.bins) >= 1
assert sum([b.value for b in h.bins]) == 2
| import pytest
from bokeh.charts.stats import Bins, Histogram
from bokeh.models import ColumnDataSource
import pandas as pd
@pytest.fixture
def ds(test_data):
return ColumnDataSource(test_data.auto_data)
def test_explicit_bin_count(ds):
b = Bins(source=ds, column='mpg', bin_count=2)
assert len(b.bins) == 2
def test_auto_bin_count(ds):
b = Bins(source=ds, column='mpg')
assert len(b.bins) == 12
# this should test it still matches
# http://stats.stackexchange.com/questions/114490/optimal-bin-width-for-two-dimensional-histogram
# with iterables with the same value
b = Bins(values=[5,5,5,5,5], bin_count=None)
assert len(b.bins) == 3
def test_bin_labeling(ds):
Bins(source=ds, column='cyl', bin_count=2)
assert len(pd.Series(ds.data['cyl_bin']).drop_duplicates()) == 2
def test_histogram_wo_density():
values = list(range(10))
h = Histogram(values=values, bin_count=3)
assert len(h.bins) == 3
assert [b.label[0] for b in h.bins] == ['[0.0, 3.0]', '(3.0, 6.0]', '(6.0, 9.0]']
assert [b.values[0] for b in h.bins] == [3, 3, 4]
def test_histogram_w_density():
values = list(range(10))
h = Histogram(values=values, bin_count=3, density=True)
assert len(h.bins) == 3
assert [b.label[0] for b in h.bins] == ['[0.0, 3.0]', '(3.0, 6.0]', '(6.0, 9.0]']
assert [b.values[0] for b in h.bins] == [0.1, 0.1, 0.13333333333333333] | bsd-3-clause | Python |
2eb39ef414d96697e32b168213e4269c957bcba1 | Add 2D maxpool operation | wiseodd/hipsternet,wiseodd/hipsternet | hipsternet/utils.py | hipsternet/utils.py | import numpy as np
def exp_running_avg(running, new, gamma=.9):
return gamma * running + (1. - gamma) * new
def accuracy(y_true, y_pred):
return np.mean(y_pred == y_true)
def onehot(labels):
y = np.zeros([labels.size, np.max(labels) + 1])
y[range(labels.size), labels] = 1.
return y
def conv_2d(X, kernel, stride=1, padding=1):
if not is_square(X) or not is_square(kernel):
raise Exception('Image and kernel must be a square matrix!')
X_pad = zeropad_image(X, padding)
m = X.shape[0]
w = X_pad.shape[0]
k = kernel.shape[0]
out_dim = (m - k + 2 * padding) / stride + 1
if not out_dim.is_integer():
raise Exception('Convolution parameters invalid! Please check the input, kernel, stride, and padding size!')
out = np.zeros(shape=[out_dim, out_dim])
for i, ii in enumerate(range(0, w - k + 1, stride)):
for j, jj in enumerate(range(0, w - k + 1, stride)):
out[i, j] = np.sum(X_pad[ii:ii + k, jj:jj + k] * kernel)
return out
def maxpool_2d(X, k=2, stride=2):
if not is_square(X):
raise Exception('Image must be a square matrix!')
m = X.shape[0]
out_dim = (m - k) / stride + 1
out = np.zeros(shape=[out_dim, out_dim])
cache = np.zeros_like(X, dtype=bool)
for i, ii in enumerate(range(0, m - k + 1, stride)):
for j, jj in enumerate(range(0, m - k + 1, stride)):
patch = X[ii:ii + k, jj:jj + k]
x, y = np.unravel_index(np.argmax(patch), patch.shape)
out[i, j] = patch[x, y]
cache[ii + x, jj + y] = 1
return out, cache
def zeropad_image(X, pad=1):
m, n = X.shape
X_pad = np.zeros(shape=[m + 2 * pad, n + 2 * pad])
X_pad[pad:m + pad, pad:n + pad] = X
return X_pad
def is_square(X):
return X.shape[0] == X.shape[1]
| import numpy as np
def exp_running_avg(running, new, gamma=.9):
return gamma * running + (1. - gamma) * new
def accuracy(y_true, y_pred):
return np.mean(y_pred == y_true)
def onehot(labels):
y = np.zeros([labels.size, np.max(labels) + 1])
y[range(labels.size), labels] = 1.
return y
def conv_2d(X, kernel, stride=1, padding=1):
if not is_square(X) or not is_square(kernel):
raise Exception('Image and kernel must be a square matrix!')
X_pad = zeropad_image(X, padding)
m = X.shape[0]
w = X_pad.shape[0]
k = kernel.shape[0]
out_dim = (m - k + 2 * padding) / stride + 1
if not out_dim.is_integer():
raise Exception('Convolution parameters invalid! Please check the input, kernel, stride, and padding size!')
out = np.zeros(shape=[out_dim, out_dim])
for i, ii in enumerate(range(0, w - k + 1, stride)):
for j, jj in enumerate(range(0, w - k + 1, stride)):
out[i, j] = np.sum(X_pad[ii:ii + k, jj:jj + k] * kernel)
return out
def zeropad_image(X, pad=1):
m, n = X.shape
X_pad = np.zeros(shape=[m + 2 * pad, n + 2 * pad])
X_pad[pad:m + pad, pad:n + pad] = X
return X_pad
def is_square(X):
return X.shape[0] == X.shape[1]
| unlicense | Python |
5881829f3ccf5d3fe853e34302b9e38b0ba01d4a | Bump version | futurecolors/django-httplog,anmekin/django-httplog,Crystalnix/django-httplog | httplog/__init__.py | httplog/__init__.py | __version__ = '0.2.0'
| __version__ = '0.1.0' | bsd-3-clause | Python |
35417f9cadf3e4d2f49ea90e28b9a9f62104c23b | Remove commented alternative method for database type detection. | pansapiens/mytardis,pansapiens/mytardis,pansapiens/mytardis,pansapiens/mytardis | tardis/tardis_portal/migrations/0008_string_value_partial_index_postgres.py | tardis/tardis_portal/migrations/0008_string_value_partial_index_postgres.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations, connection
from tardis.tardis_portal.models import ExperimentParameter, DatasetParameter, \
DatafileParameter, InstrumentParameter
def _generate_index_migrations():
max_length = 256
if hasattr(connection, 'vendor') and 'postgresql' not in connection.vendor:
return []
string_value_tables = [
ExperimentParameter.objects.model._meta.db_table,
DatasetParameter.objects.model._meta.db_table,
DatafileParameter.objects.model._meta.db_table,
InstrumentParameter.objects.model._meta.db_table,
]
create_template = "CREATE INDEX %s ON %s(string_value) " \
"WHERE char_length(string_value) <= %s;"
operations = []
for table_name in string_value_tables:
index_name = table_name + "_string_value"
ops = [
migrations.RunSQL(
"DROP INDEX IF EXISTS %s;" % index_name,
reverse_sql=create_template % (index_name, table_name, max_length)
),
migrations.RunSQL(
create_template % (index_name, table_name, max_length),
reverse_sql="DROP INDEX IF EXISTS %s;" % index_name
),
]
operations.extend(ops)
return operations
class Migration(migrations.Migration):
dependencies = [
('tardis_portal', '0007_remove_parameter_string_value_index'),
]
operations = _generate_index_migrations()
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations, connection
from tardis.tardis_portal.models import ExperimentParameter, DatasetParameter, \
DatafileParameter, InstrumentParameter
from tardis import settings
def _generate_index_migrations():
max_length = 256
# if 'postgres' not in settings.DATABASES['default']['ENGINE'].lower():
# return []
if hasattr(connection, 'vendor') and 'postgresql' not in connection.vendor:
return []
string_value_tables = [
ExperimentParameter.objects.model._meta.db_table,
DatasetParameter.objects.model._meta.db_table,
DatafileParameter.objects.model._meta.db_table,
InstrumentParameter.objects.model._meta.db_table,
]
create_template = "CREATE INDEX %s ON %s(string_value) " \
"WHERE char_length(string_value) <= %s;"
operations = []
for table_name in string_value_tables:
index_name = table_name + "_string_value"
ops = [
migrations.RunSQL(
"DROP INDEX IF EXISTS %s;" % index_name,
reverse_sql=create_template % (index_name, table_name, max_length)
),
migrations.RunSQL(
create_template % (index_name, table_name, max_length),
reverse_sql="DROP INDEX IF EXISTS %s;" % index_name
),
]
operations.extend(ops)
return operations
class Migration(migrations.Migration):
dependencies = [
('tardis_portal', '0007_remove_parameter_string_value_index'),
]
operations = _generate_index_migrations()
| bsd-3-clause | Python |
88db4f0b363adfd12c8adaa912b30d3fc316d75c | remove test directory export | peastman/deepchem,lilleswing/deepchem,peastman/deepchem,miaecle/deepchem,deepchem/deepchem,lilleswing/deepchem,lilleswing/deepchem,miaecle/deepchem,miaecle/deepchem,deepchem/deepchem | deepchem/data/__init__.py | deepchem/data/__init__.py | """
Gathers all datasets in one place for convenient imports
"""
# TODO(rbharath): Get rid of * import
from deepchem.data.datasets import pad_features
from deepchem.data.datasets import pad_batch
from deepchem.data.datasets import Dataset
from deepchem.data.datasets import NumpyDataset
from deepchem.data.datasets import DiskDataset
from deepchem.data.datasets import ImageDataset
from deepchem.data.datasets import sparsify_features
from deepchem.data.datasets import densify_features
from deepchem.data.supports import *
from deepchem.data.data_loader import DataLoader
from deepchem.data.data_loader import CSVLoader
from deepchem.data.data_loader import UserCSVLoader
from deepchem.data.data_loader import SDFLoader
from deepchem.data.data_loader import FASTALoader
from deepchem.data.data_loader import ImageLoader
| """
Gathers all datasets in one place for convenient imports
"""
# TODO(rbharath): Get rid of * import
from deepchem.data.datasets import pad_features
from deepchem.data.datasets import pad_batch
from deepchem.data.datasets import Dataset
from deepchem.data.datasets import NumpyDataset
from deepchem.data.datasets import DiskDataset
from deepchem.data.datasets import ImageDataset
from deepchem.data.datasets import sparsify_features
from deepchem.data.datasets import densify_features
from deepchem.data.supports import *
from deepchem.data.data_loader import DataLoader
from deepchem.data.data_loader import CSVLoader
from deepchem.data.data_loader import UserCSVLoader
from deepchem.data.data_loader import SDFLoader
from deepchem.data.data_loader import FASTALoader
from deepchem.data.data_loader import ImageLoader
import deepchem.data.tests
| mit | Python |
a35c2c4e3d332c8ee581608317c4496722fb9b77 | Update wording on path of the image. | us-ignite/us_ignite,us-ignite/us_ignite,us-ignite/us_ignite,us-ignite/us_ignite,us-ignite/us_ignite | us_ignite/advertising/models.py | us_ignite/advertising/models.py | from django.db import models
from django_extensions.db.fields import (
AutoSlugField, CreationDateTimeField, ModificationDateTimeField)
from us_ignite.advertising import managers
class Advert(models.Model):
PUBLISHED = 1
DRAFT = 2
REMOVED = 3
STATUS_CHOICES = (
(PUBLISHED, 'Published'),
(DRAFT, 'Draft'),
(REMOVED, 'Removed'),
)
name = models.CharField(max_length=255)
slug = AutoSlugField(populate_from='name')
status = models.IntegerField(choices=STATUS_CHOICES, default=DRAFT)
url = models.URLField(max_length=500)
image = models.ImageField(upload_to="featured")
is_featured = models.BooleanField(
default=False, help_text='Marking this Advert as featured will publish'
' it and show it on the site.')
created = CreationDateTimeField()
modified = ModificationDateTimeField()
# managers:
objects = models.Manager()
published = managers.AdvertPublishedManager()
class Meta:
ordering = ('-is_featured', '-created')
def __unicode__(self):
return self.name
| from django.db import models
from django_extensions.db.fields import (
AutoSlugField, CreationDateTimeField, ModificationDateTimeField)
from us_ignite.advertising import managers
class Advert(models.Model):
PUBLISHED = 1
DRAFT = 2
REMOVED = 3
STATUS_CHOICES = (
(PUBLISHED, 'Published'),
(DRAFT, 'Draft'),
(REMOVED, 'Removed'),
)
name = models.CharField(max_length=255)
slug = AutoSlugField(populate_from='name')
status = models.IntegerField(choices=STATUS_CHOICES, default=DRAFT)
url = models.URLField(max_length=500)
image = models.ImageField(upload_to="ads")
is_featured = models.BooleanField(
default=False, help_text='Marking this Advert as featured will publish'
' it and show it on the site.')
created = CreationDateTimeField()
modified = ModificationDateTimeField()
# managers:
objects = models.Manager()
published = managers.AdvertPublishedManager()
class Meta:
ordering = ('-is_featured', '-created')
def __unicode__(self):
return self.name
| bsd-3-clause | Python |
983da7ed60aab7debce772f6e1181f29f1e321ac | fix anonymous user exclusion | Fresnoy/kart,Fresnoy/kart | people/api.py | people/api.py | # -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.auth.models import User
from tastypie import fields
from tastypie.resources import ModelResource, ALL, ALL_WITH_RELATIONS
from common.api import WebsiteResource
from .models import Artist, Staff, Organization
# django-guardian anonymous user
try:
ANONYMOUS_USER_NAME = settings.ANONYMOUS_USER_NAME
except AttributeError:
ANONYMOUS_USER_NAME = "AnonymousUser"
class UserResource(ModelResource):
class Meta:
queryset = User.objects.exclude(username=ANONYMOUS_USER_NAME) # Exclude anonymous user
detail_uri_name = 'username'
resource_name = 'people/user'
fields = ['username', 'first_name', 'last_name', 'id', ]
filtering = {
'first_name': ALL,
'last_name': ALL
}
def dehydrate(self, bundle):
if hasattr(bundle.obj, 'profile'):
bundle.data['photo'] = bundle.obj.profile.photo
bundle.data['birthdate'] = bundle.obj.profile.birthdate
bundle.data['birthplace'] = bundle.obj.profile.birthplace
bundle.data['cursus'] = bundle.obj.profile.cursus
bundle.data['gender'] = bundle.obj.profile.gender
# Nationality : country code separated by commas
bundle.data['nationality'] = bundle.obj.profile.nationality
bundle.data['homeland_country'] = bundle.obj.profile.homeland_country
bundle.data['birthplace_country'] = bundle.obj.profile.birthplace_country
return bundle
class ArtistResource(ModelResource):
class Meta:
queryset = Artist.objects.all()
resource_name = 'people/artist'
filtering = {
'user': ALL_WITH_RELATIONS,
'resource_uri': ALL
}
fields = ['id', 'nickname', 'bio_short_fr', 'bio_short_en',
'bio_fr', 'bio_en', 'twitter_account', 'facebook_profile']
websites = fields.ToManyField(WebsiteResource, 'websites', full=True)
user = fields.ForeignKey(UserResource, 'user', full=True)
artworks = fields.ToManyField('production.api.ArtworkResource', 'artworks',
full=False, null=True, use_in=['detail'])
class StaffResource(ModelResource):
class Meta:
queryset = Staff.objects.all()
resource_name = 'people/staff'
fields = ('user',)
user = fields.ForeignKey(UserResource, 'user', full=True)
class OrganizationResource(ModelResource):
class Meta:
queryset = Organization.objects.all()
resource_name = 'people/organization'
| # -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from tastypie import fields
from tastypie.resources import ModelResource, ALL, ALL_WITH_RELATIONS
from common.api import WebsiteResource
from .models import Artist, Staff, Organization
class UserResource(ModelResource):
class Meta:
queryset = User.objects.exclude(pk=-1) # Exclude anonymous user
detail_uri_name = 'username'
resource_name = 'people/user'
fields = ['username', 'first_name', 'last_name', 'id', ]
filtering = {
'first_name': ALL,
'last_name': ALL
}
def dehydrate(self, bundle):
if hasattr(bundle.obj, 'profile'):
bundle.data['photo'] = bundle.obj.profile.photo
bundle.data['birthdate'] = bundle.obj.profile.birthdate
bundle.data['birthplace'] = bundle.obj.profile.birthplace
bundle.data['cursus'] = bundle.obj.profile.cursus
bundle.data['gender'] = bundle.obj.profile.gender
# Nationality : country code separated by commas
bundle.data['nationality'] = bundle.obj.profile.nationality
bundle.data['homeland_country'] = bundle.obj.profile.homeland_country
bundle.data['birthplace_country'] = bundle.obj.profile.birthplace_country
return bundle
class ArtistResource(ModelResource):
class Meta:
queryset = Artist.objects.all()
resource_name = 'people/artist'
filtering = {
'user': ALL_WITH_RELATIONS,
'resource_uri': ALL
}
fields = ['id', 'nickname', 'bio_short_fr', 'bio_short_en',
'bio_fr', 'bio_en', 'twitter_account', 'facebook_profile']
websites = fields.ToManyField(WebsiteResource, 'websites', full=True)
user = fields.ForeignKey(UserResource, 'user', full=True)
artworks = fields.ToManyField('production.api.ArtworkResource', 'artworks',
full=False, null=True, use_in=['detail'])
class StaffResource(ModelResource):
class Meta:
queryset = Staff.objects.all()
resource_name = 'people/staff'
fields = ('user',)
user = fields.ForeignKey(UserResource, 'user', full=True)
class OrganizationResource(ModelResource):
class Meta:
queryset = Organization.objects.all()
resource_name = 'people/organization'
| agpl-3.0 | Python |
343dffe048bdbc52ec4c42838bc45cf984bb72cc | Update arlo-snapshot.py | jeffreydwalter/arlo | examples/arlo-snapshot.py | examples/arlo-snapshot.py | from Arlo import Arlo
USERNAME = 'user@example.com'
PASSWORD = 'supersecretpassword'
try:
# Instantiating the Arlo object automatically calls Login(), which returns an oAuth token that gets cached.
# Subsequent successful calls to login will update the oAuth token.
arlo = Arlo(USERNAME, PASSWORD)
# At this point you're logged into Arlo.
# Get the list of devices.
devices = arlo.GetDevices()
# Filter on device type to only get the cameras.
# This will return an array which includes all of the cameras and their associated metadata.
cameras = [ device for device in devices if device['deviceType'] == 'camera' ]
# Starting recording with a camera.
arlo.StartRecording(cameras[0]['parentId'], cameras[0]['deviceId'], cameras[0]['xCloudId'], cameras[0]['properties']['olsonTimeZone']);
# Wait for 4 seconds while the camera records. (There are probably better ways to do this, but you get the idea.)
time.sleep(4)
# Stop recording.
arlo.StopRecording(cameras[0]['parentId'], cameras[0]['deviceId'], cameras[0]['xCloudId'], cameras[0]['properties']['olsonTimeZone']);
# Take the snapshot.
arlo.TakeSnapshot(cameras[0]['parentId'], cameras[0]['deviceId'], cameras[0]['xCloudId'], cameras[0]['properties']['olsonTimeZone']);
except Exception as e:
print (e)
| from Arlo import Arlo
USERNAME = 'user@example.com'
PASSWORD = 'supersecretpassword'
try:
# Instantiating the Arlo object automatically calls Login(), which returns an oAuth token that gets cached.
# Subsequent successful calls to login will update the oAuth token.
arlo = Arlo(USERNAME, PASSWORD)
# At this point you're logged into Arlo.
# Get the list of devices.
devices = arlo.GetDevices()
# Filter on device type to only get the basestation.
# This will return an array which includes all of the basestation's associated metadata.
basestation = [ device for device in devices if device['deviceType'] == 'basestation' ]
# Filter on device type to only get the cameras.
# This will return an array which includes all of the cameras and their associated metadata.
cameras = [ device for device in devices if device['deviceType'] == 'camera' ]
# Starting recording with a camera.
arlo.StartRecording(basestation[0]['deviceId'], cameras[0]['deviceId'], basestation[0]['xCloudId'], cameras[0]['properties']['olsonTimeZone']);
# Wait for 4 seconds while the camera records. (There are probably better ways to do this, but you get the idea.)
time.sleep(4)
# Stop recording.
arlo.StopRecording(basestation[0]['deviceId'], cameras[0]['deviceId'], basestation[0]['xCloudId'], cameras[0]['properties']['olsonTimeZone']);
# Take the snapshot.
arlo.TakeSnapshot(basestation[0]['deviceId'], cameras[0]['deviceId'], basestation[0]['xCloudId'], cameras[0]['properties']['olsonTimeZone']);
except Exception as e:
print (e)
| apache-2.0 | Python |
bd7346626568b57878298ff7b26f205afc8a9eec | rename options to config | spaam/svtplay-dl,olof/svtplay-dl,spaam/svtplay-dl,olof/svtplay-dl | lib/svtplay_dl/fetcher/http.py | lib/svtplay_dl/fetcher/http.py | # ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
from __future__ import absolute_import
from svtplay_dl.utils.output import ETA, progressbar, output
from svtplay_dl.fetcher import VideoRetriever
class HTTP(VideoRetriever):
@property
def name(self):
return "http"
def download(self):
""" Get the stream from HTTP """
self.output_extention = "mp4" # this might be wrong..
data = self.http.request("get", self.url, stream=True)
try:
total_size = data.headers['content-length']
except KeyError:
total_size = 0
total_size = int(total_size)
bytes_so_far = 0
file_d = output(self.output, self.config, "mp4")
if file_d is None:
return
eta = ETA(total_size)
for i in data.iter_content(8192):
bytes_so_far += len(i)
file_d.write(i)
if not self.config.get("silent"):
eta.update(bytes_so_far)
progressbar(total_size, bytes_so_far, ''.join(["ETA: ", str(eta)]))
file_d.close()
self.finished = True
| # ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
from __future__ import absolute_import
from svtplay_dl.utils.output import ETA, progressbar, output
from svtplay_dl.fetcher import VideoRetriever
class HTTP(VideoRetriever):
@property
def name(self):
return "http"
def download(self):
""" Get the stream from HTTP """
self.output_extention = "mp4" # this might be wrong..
data = self.http.request("get", self.url, stream=True)
try:
total_size = data.headers['content-length']
except KeyError:
total_size = 0
total_size = int(total_size)
bytes_so_far = 0
file_d = output(self.output, self.config, "mp4")
if file_d is None:
return
eta = ETA(total_size)
for i in data.iter_content(8192):
bytes_so_far += len(i)
file_d.write(i)
if not self.options.get("silent"):
eta.update(bytes_so_far)
progressbar(total_size, bytes_so_far, ''.join(["ETA: ", str(eta)]))
file_d.close()
self.finished = True
| mit | Python |
872899bdc52dfb2b55132a2db847603e31bf53ea | bump version | markope/AutobahnPython,iffy/AutobahnPython,jvdm/AutobahnPython,inirudebwoy/AutobahnPython,dash-dash/AutobahnPython,nucular/AutobahnPython,oberstet/autobahn-python,schoonc/AutobahnPython,crossbario/autobahn-python,crossbario/autobahn-python,bencharb/AutobahnPython,tomwire/AutobahnPython,iffy/AutobahnPython,tavendo/AutobahnPython,Jenselme/AutobahnPython,dash-dash/AutobahnPython,iffy/AutobahnPython,RyanHope/AutobahnPython,Geoion/AutobahnPython,crossbario/autobahn-python,dash-dash/AutobahnPython,oberstet/autobahn-python,dash-dash/AutobahnPython,hzruandd/AutobahnPython,iffy/AutobahnPython,oberstet/autobahn-python,meejah/AutobahnPython,ttimon7/AutobahnPython | autobahn/autobahn/__init__.py | autobahn/autobahn/__init__.py | ###############################################################################
##
## Copyright (C) 2011-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__version__ = "0.9.3"
version = __version__ # backward compat.
| ###############################################################################
##
## Copyright (C) 2011-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__version__ = "0.9.2"
version = __version__ # backward compat.
| mit | Python |
3cb14b8bdaf020cc417312516d6bfaf126152823 | Use 1804-style deprovisioning for all versions >= 18.04 (#1483) | rjschwei/WALinuxAgent,rjschwei/WALinuxAgent,Azure/WALinuxAgent,Azure/WALinuxAgent | azurelinuxagent/pa/deprovision/factory.py | azurelinuxagent/pa/deprovision/factory.py | # Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \
DISTRO_FULL_NAME
from .default import DeprovisionHandler
from .arch import ArchDeprovisionHandler
from .clearlinux import ClearLinuxDeprovisionHandler
from .coreos import CoreOSDeprovisionHandler
from .ubuntu import UbuntuDeprovisionHandler, Ubuntu1804DeprovisionHandler
from distutils.version import LooseVersion as Version
def get_deprovision_handler(distro_name=DISTRO_NAME,
distro_version=DISTRO_VERSION,
distro_full_name=DISTRO_FULL_NAME):
if distro_name == "arch":
return ArchDeprovisionHandler()
if distro_name == "ubuntu":
if Version(distro_version) >= Version('18.04'):
return Ubuntu1804DeprovisionHandler()
else:
return UbuntuDeprovisionHandler()
if distro_name == "coreos":
return CoreOSDeprovisionHandler()
if "Clear Linux" in distro_full_name:
return ClearLinuxDeprovisionHandler()
return DeprovisionHandler()
| # Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \
DISTRO_FULL_NAME
from .default import DeprovisionHandler
from .arch import ArchDeprovisionHandler
from .clearlinux import ClearLinuxDeprovisionHandler
from .coreos import CoreOSDeprovisionHandler
from .ubuntu import UbuntuDeprovisionHandler, Ubuntu1804DeprovisionHandler
from distutils.version import LooseVersion as Version
def get_deprovision_handler(distro_name=DISTRO_NAME,
distro_version=DISTRO_VERSION,
distro_full_name=DISTRO_FULL_NAME):
if distro_name == "arch":
return ArchDeprovisionHandler()
if distro_name == "ubuntu":
if Version(distro_version) in [Version('18.04')]:
return Ubuntu1804DeprovisionHandler()
else:
return UbuntuDeprovisionHandler()
if distro_name == "coreos":
return CoreOSDeprovisionHandler()
if "Clear Linux" in distro_full_name:
return ClearLinuxDeprovisionHandler()
return DeprovisionHandler()
| apache-2.0 | Python |
1bd7410cc21c6aa9c2ad8fa11fa98d1ad3015f10 | send hls url to hlsparse | dalgr/svtplay-dl,iwconfig/svtplay-dl,olof/svtplay-dl,spaam/svtplay-dl,selepo/svtplay-dl,leakim/svtplay-dl,spaam/svtplay-dl,dalgr/svtplay-dl,iwconfig/svtplay-dl,qnorsten/svtplay-dl,selepo/svtplay-dl,olof/svtplay-dl,leakim/svtplay-dl,leakim/svtplay-dl,qnorsten/svtplay-dl | lib/svtplay_dl/service/dbtv.py | lib/svtplay_dl/service/dbtv.py | from __future__ import absolute_import
import re
import json
import copy
from svtplay_dl.service import Service, OpenGraphThumbMixin
from svtplay_dl.utils.urllib import urlparse
from svtplay_dl.fetcher.http import HTTP
from svtplay_dl.fetcher.hls import HLS, hlsparse
from svtplay_dl.log import log
class Dbtv(Service, OpenGraphThumbMixin):
supported_domains = ['dbtv.no']
def get(self, options):
data = self.get_urldata()
if self.exclude(options):
return
parse = urlparse(self.url)
vidoid = parse.path[parse.path.rfind("/")+1:]
match = re.search(r'JSONdata = ({.*});', data)
if not match:
log.error("Cant find json data")
return
janson = json.loads(match.group(1))
playlist = janson["playlist"]
for i in playlist:
if i["brightcoveId"] == vidoid:
if i["HLSURL"]:
streams = hlsparse(i["HLSURL"], self.http.get(i["HLSURL"]).text)
for n in list(streams.keys()):
yield HLS(copy.copy(options), streams[n], n)
for n in i["renditions"]:
if n["container"] == "MP4":
yield HTTP(copy.copy(options), n["URL"], int(n["rate"])/1000)
| from __future__ import absolute_import
import re
import json
import copy
from svtplay_dl.service import Service, OpenGraphThumbMixin
from svtplay_dl.utils.urllib import urlparse
from svtplay_dl.fetcher.http import HTTP
from svtplay_dl.fetcher.hls import HLS, hlsparse
from svtplay_dl.log import log
class Dbtv(Service, OpenGraphThumbMixin):
supported_domains = ['dbtv.no']
def get(self, options):
data = self.get_urldata()
if self.exclude(options):
return
parse = urlparse(self.url)
vidoid = parse.path[parse.path.rfind("/")+1:]
match = re.search(r'JSONdata = ({.*});', data)
if not match:
log.error("Cant find json data")
return
janson = json.loads(match.group(1))
playlist = janson["playlist"]
for i in playlist:
if i["brightcoveId"] == vidoid:
if i["HLSURL"]:
streams = hlsparse(self.http.get(i["HLSURL"]).text)
for n in list(streams.keys()):
yield HLS(copy.copy(options), streams[n], n)
for n in i["renditions"]:
if n["container"] == "MP4":
yield HTTP(copy.copy(options), n["URL"], int(n["rate"])/1000)
| mit | Python |
aaa88075d4cf799509584de439f207476e092584 | Use alternate import for RTD. | ohsu-qin/qiprofile-rest,ohsu-qin/qirest | doc/conf.py | doc/conf.py | import os
import sys
try:
import qirest
except ImportError:
# A ReadTheDocs build does not install qirest. In that case,
# load the module directly.
src_dir = os.path.join(os.path.dirname(__file__), '..')
sys.path.append(src_dir)
import qirest
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo']
autoclass_content = "both"
autodoc_default_flags= ['members', 'show-inheritance']
source_suffix = '.rst'
master_doc = 'index'
project = u'qirest'
copyright = u'2014, OHSU Knight Cancer Institute. This software is not intended for clinical use'
pygments_style = 'sphinx'
htmlhelp_basename = 'qirestdoc'
html_title = "qirest"
def skip(app, what, name, obj, skip, options):
return False if name == "__init__" else skip
def setup(app):
app.connect("autodoc-skip-member", skip)
| import os
import sys
import qirest
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo']
autoclass_content = "both"
autodoc_default_flags= ['members', 'show-inheritance']
source_suffix = '.rst'
master_doc = 'index'
project = u'qirest'
copyright = u'2014, OHSU Knight Cancer Institute. This software is not intended for clinical use'
pygments_style = 'sphinx'
htmlhelp_basename = 'qirestdoc'
html_title = "qirest"
def skip(app, what, name, obj, skip, options):
return False if name == "__init__" else skip
def setup(app):
app.connect("autodoc-skip-member", skip)
| bsd-2-clause | Python |
3c9d6c6db3d8f8f46047993a1541b92c8a1b1a9b | fix bug | BillBillBillBill/Take-out,BillBillBillBill/Take-out,BillBillBillBill/Take-out,BillBillBillBill/Take-out | backend/takeout/bussiness/models/store.py | backend/takeout/bussiness/models/store.py | # coding: utf-8
from django.db import models
from bussiness.models.seller import Seller
from lib.models.image import ImageStore
from lib.utils.misc import get_timestamp_from_datetime
class Store(models.Model):
BAN_STATUS = (
("Y", "yes"),
("N", "no")
)
name = models.CharField(max_length=20)
address = models.CharField(max_length=200)
phone = models.CharField(max_length=13)
announcement = models.CharField(max_length=200)
description = models.CharField(max_length=200)
is_banned = models.CharField(max_length=1, choices=BAN_STATUS, default="N")
created_time = models.DateTimeField(auto_now_add=True)
updated_time = models.DateTimeField(auto_now=True, auto_now_add=True)
image_ids = models.CharField(max_length=500, null=True)
owner = models.OneToOneField(Seller, related_name='store')
def to_string(self):
data = {
"id": self.id,
"name": self.name,
"address": self.address,
"phone": self.phone,
"announcement": self.announcement,
"description": self.description,
"owner": self.owner.to_detail_string(),
"order_review_list": []
}
total_star = 0
all_order_reviews = self.order_reviews.all()
for order_review in all_order_reviews:
order_review = order_review.to_string()
data['order_review_list'].append(order_review)
total_star += order_review.get("star", 5)
order_reviews_length = len(all_order_reviews)
if order_reviews_length != 0:
data["average_star"] = total_star / float(len(all_order_reviews))
else:
data["average_star"] = 5
data["total_orders_num"] = len(self.orders.all())
if self.image_ids:
data['images'] = ImageStore.get_by_ids(self.image_ids)
else:
data['images'] = []
return data
def to_detail_string(self):
return self.to_string()
| # coding: utf-8
from django.db import models
from bussiness.models.seller import Seller
from lib.models.image import ImageStore
from lib.utils.misc import get_timestamp_from_datetime
class Store(models.Model):
BAN_STATUS = (
("Y", "yes"),
("N", "no")
)
name = models.CharField(max_length=20)
address = models.CharField(max_length=200)
phone = models.CharField(max_length=13)
announcement = models.CharField(max_length=200)
description = models.CharField(max_length=200)
is_banned = models.CharField(max_length=1, choices=BAN_STATUS, default="N")
created_time = models.DateTimeField(auto_now_add=True)
updated_time = models.DateTimeField(auto_now=True, auto_now_add=True)
image_ids = models.CharField(max_length=500, null=True)
owner = models.OneToOneField(Seller, related_name='store')
def to_string(self):
data = {
"id": self.id,
"name": self.name,
"address": self.address,
"phone": self.phone,
"announcement": self.announcement,
"description": self.description,
"owner": self.owner.to_detail_string(),
"order_review_list": []
}
total_star = 0
all_order_reviews = self.order_reviews.all()
for order_review in all_order_reviews:
order_review = order_review.to_string()
data['order_review_list'].append(order_review)
total_star += order_review.get("star", 5)
data["average_star"] = total_star / len(all_order_reviews)
data["total_orders_num"] = len(self.orders.all())
if self.image_ids:
data['images'] = ImageStore.get_by_ids(self.image_ids)
else:
data['images'] = []
return data
def to_detail_string(self):
return self.to_string()
| mit | Python |
d9b8943563120a3d3606ffb070318eea1768415c | update reference implementation to reflect new kwargs and python-requests api usage | amlweems/python-paddingoracle,mwielgoszewski/python-paddingoracle | example.py | example.py | # -*- coding: utf-8 -*-
from paddingoracle import BadPaddingException, PaddingOracle
from base64 import b64encode, b64decode
from urllib import quote, unquote
import requests
import socket
import time
class PadBuster(PaddingOracle):
def __init__(self, **kwargs):
super(PadBuster, self).__init__(**kwargs)
self.session = requests.Session()
def oracle(self, data, **kwargs):
somecookie = quote(b64encode(data))
self.session.cookies['somecookie'] = somecookie
while 1:
try:
response = self.session.get('http://www.example.com/',
stream=False, timeout=5, verify=False)
break
except (socket.error, requests.exceptions.RequestException):
time.sleep(2)
continue
self.history.append(response)
if response.ok:
logging.debug('No padding exception raised on %r', somecookie)
return
raise BadPaddingException
if __name__ == '__main__':
import logging
import sys
if not sys.argv[1:]:
print 'Usage: %s <somecookie value>' % (sys.argv[0], )
sys.exit(1)
logging.basicConfig(level=logging.DEBUG)
encrypted_cookie = b64decode(unquote(sys.argv[1]))
padbuster = PadBuster()
cookie = padbuster.decrypt(encrypted_cookie, block_size=8, iv=bytearray(8))
print('Decrypted somecookie: %s => %r' % (sys.argv[1], cookie))
| # -*- coding: utf-8 -*-
from paddingoracle import BadPaddingException, PaddingOracle
from base64 import b64encode, b64decode
from urllib import quote, unquote
import requests
import socket
import time
class PadBuster(PaddingOracle):
def __init__(self, **kwargs):
super(PadBuster, self).__init__(**kwargs)
self.session = requests.session(prefetch=True, timeout=5, verify=False)
def oracle(self, data):
somecookie = quote(b64encode(data))
self.session.cookies['somecookie'] = somecookie
while 1:
try:
response = self.session.get('http://www.example.com/')
break
except (socket.error, requests.exceptions.SSLError):
time.sleep(2)
continue
self.history.append(response)
if response.ok:
logging.debug('No padding exception raised on %r', somecookie)
return
raise BadPaddingException
if __name__ == '__main__':
import logging
import sys
if not sys.argv[1:]:
print 'Usage: %s <somecookie value>' % (sys.argv[0], )
sys.exit(1)
logging.basicConfig(level=logging.DEBUG)
encrypted_cookie = b64decode(unquote(sys.argv[1]))
padbuster = PadBuster()
cookie = padbuster.decrypt(encrypted_cookie, block_size=8, iv=bytearray(8))
print('Decrypted somecookie: %s => %r' % (sys.argv[1], cookie))
| bsd-2-clause | Python |
f85fb38926893643205de6ce05127ca77713b58c | Reorder qfunction | davidrobles/mlnd-capstone-code | capstone/algorithms/qlearning.py | capstone/algorithms/qlearning.py | import random
from capstone.player import RandPlayer
class RandomPolicy(object):
def action(self, env, vf=None, qf=None):
return random.choice(env.actions(env.cur_state()))
class QLearning(object):
def __init__(self, env, policy=RandomPolicy(), qf={}, alpha=0.1,
gamma=0.99, n_episodes=1000):
self.env = env
self.policy = policy
self.qf = qf
self.alpha = alpha
self.gamma = gamma
self.n_episodes = n_episodes
def max_q_value(self, state):
actions = self.env.actions(state)
if not actions:
return 0
best_value = -100000
for next_action in actions:
temp_value = self.qf.get((state, next_action), random.random() - 0.5)
if temp_value > best_value:
best_value = temp_value
return best_value
def learn(self):
for episode in range(self.n_episodes):
print('Episode {}'.format(episode))
self.env.reset()
step = 0
while not self.env.is_terminal():
print('Step {}'.format(step))
state = self.env.cur_state()
action = self.policy.action(self.env, self.qf)
reward, next_state = self.env.do_action(action)
max_q_value = self.max_q_value(next_state)
q_value = self.qf.get((state, action), random.random() - 0.5)
update_value = reward + (self.gamma * max_q_value) - q_value
self.qf[(state, action)] = q_value + (self.alpha * update_value)
step += 1
print('Results:')
for (state, action), value in self.qf.iteritems():
print('State:\n\n{}'.format(state))
print('Action:\n\n{}\n'.format(action))
print('Value:\n\n{}'.format(value))
print('*' * 60)
| import random
from capstone.player import RandPlayer
class RandomPolicy(object):
def action(self, env, vf=None, qf=None):
return random.choice(env.actions(env.cur_state()))
class QLearning(object):
def __init__(self, env, policy=RandomPolicy(), qf={}, alpha=0.1,
gamma=0.99, n_episodes=1000):
self.env = env
self.policy = policy
self.alpha = alpha
self.gamma = gamma
self.n_episodes = n_episodes
self.qf = qf
def max_q_value(self, state):
actions = self.env.actions(state)
if not actions:
return 0
best_value = -100000
for next_action in actions:
temp_value = self.qf.get((state, next_action), random.random() - 0.5)
if temp_value > best_value:
best_value = temp_value
return best_value
def learn(self):
for episode in range(self.n_episodes):
print('Episode {}'.format(episode))
self.env.reset()
step = 0
while not self.env.is_terminal():
print('Step {}'.format(step))
state = self.env.cur_state()
action = self.policy.action(self.env, self.qf)
reward, next_state = self.env.do_action(action)
max_q_value = self.max_q_value(next_state)
q_value = self.qf.get((state, action), random.random() - 0.5)
update_value = reward + (self.gamma * max_q_value) - q_value
self.qf[(state, action)] = q_value + (self.alpha * update_value)
step += 1
print('Results:')
for (state, action), value in self.qf.iteritems():
print('State:\n\n{}'.format(state))
print('Action:\n\n{}\n'.format(action))
print('Value:\n\n{}'.format(value))
print('*' * 60)
| mit | Python |
5353bb9ea8d0224d02960cdaf50ae94880d17b07 | Update sample | fabaff/python-mystrom | example.py | example.py | """
Copyright (c) 2015-2016 Fabian Affolter <fabian@affolter-engineering.ch>
Licensed under MIT. All rights reserved.
"""
import time
import pymystrom
plug = pymystrom.MyStromPlug('10.100.0.137')
# Preserve state
STATE_ON = plug.get_relay_state()
# Switch relay on if the plug is currently off
if not STATE_ON:
print('Relay will be switched on.')
plug.set_relay_on()
# Wait a few seconds to get a reading of the power consumption
print('Waiting for a couple of seconds...')
time.sleep(10)
# Get the new state of the switch
print('Relay state: ', plug.get_relay_state())
print('Power consumption:', plug.get_consumption())
# Switch relay off if it was off.
if not STATE_ON:
plug.set_relay_off()
| """
Copyright (c) 2015-2016 Fabian Affolter <fabian@affolter-engineering.ch>
Licensed under MIT. All rights reserved.
"""
import time
import pymystrom
plug = pymystrom.MyStromPlug('10.100.0.137')
# Switch relay on
plug.set_relay_on()
# Get the state of the switch
print('Relay state: ', plug.get_relay_state())
print('Power consumption:', plug.get_consumption())
# Switch relay off
time.sleep(10)
plug.set_relay_off()
| mit | Python |
a43c609253c757e31e0b18f9d365d6e6b0fecfe7 | Fix path to example Spark fixture | xq262144/hue,lumig242/Hue-Integration-with-CDAP,Peddle/hue,cloudera/hue,lumig242/Hue-Integration-with-CDAP,Peddle/hue,MobinRanjbar/hue,cloudera/hue,xq262144/hue,cloudera/hue,fangxingli/hue,kawamon/hue,fangxingli/hue,fangxingli/hue,todaychi/hue,kawamon/hue,MobinRanjbar/hue,Peddle/hue,cloudera/hue,cloudera/hue,kawamon/hue,kawamon/hue,jjmleiro/hue,jayceyxc/hue,todaychi/hue,jjmleiro/hue,kawamon/hue,xq262144/hue,xq262144/hue,cloudera/hue,xq262144/hue,cloudera/hue,cloudera/hue,lumig242/Hue-Integration-with-CDAP,todaychi/hue,xq262144/hue,Peddle/hue,cloudera/hue,jayceyxc/hue,jjmleiro/hue,lumig242/Hue-Integration-with-CDAP,kawamon/hue,Peddle/hue,Peddle/hue,jayceyxc/hue,jayceyxc/hue,MobinRanjbar/hue,fangxingli/hue,jayceyxc/hue,kawamon/hue,jayceyxc/hue,lumig242/Hue-Integration-with-CDAP,MobinRanjbar/hue,jjmleiro/hue,MobinRanjbar/hue,fangxingli/hue,kawamon/hue,kawamon/hue,Peddle/hue,jayceyxc/hue,jjmleiro/hue,lumig242/Hue-Integration-with-CDAP,jjmleiro/hue,todaychi/hue,kawamon/hue,todaychi/hue,cloudera/hue,cloudera/hue,kawamon/hue,MobinRanjbar/hue,kawamon/hue,lumig242/Hue-Integration-with-CDAP,cloudera/hue,cloudera/hue,todaychi/hue,kawamon/hue,fangxingli/hue,cloudera/hue,Peddle/hue,xq262144/hue,cloudera/hue,jjmleiro/hue,cloudera/hue,kawamon/hue,jjmleiro/hue,cloudera/hue,fangxingli/hue,MobinRanjbar/hue,xq262144/hue,kawamon/hue,kawamon/hue,jjmleiro/hue,todaychi/hue,MobinRanjbar/hue,xq262144/hue,kawamon/hue,jayceyxc/hue,todaychi/hue,Peddle/hue,lumig242/Hue-Integration-with-CDAP,jayceyxc/hue,lumig242/Hue-Integration-with-CDAP,fangxingli/hue,kawamon/hue,cloudera/hue,todaychi/hue | desktop/libs/notebook/src/notebook/management/commands/notebook_setup.py | desktop/libs/notebook/src/notebook/management/commands/notebook_setup.py | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pwd
from django.contrib.auth.models import User
from django.core import management
from django.core.management.base import BaseCommand
from desktop.models import Document, Document2, SAMPLE_USER_OWNERS
from useradmin.models import install_sample_user
LOG = logging.getLogger(__name__)
class Command(BaseCommand):
args = '<user>'
help = 'Install examples but do not overwrite them.'
def handle(self, *args, **options):
if not options.get('user'):
user = User.objects.get(username=pwd.getpwuid(os.getuid()).pw_name)
else:
user = options['user']
if not Document2.objects.filter(type='notebook', owner__username__in=SAMPLE_USER_OWNERS).exists():
install_sample_user()
management.call_command('loaddata', 'initial_notebook_examples.json', verbosity=2)
Document.objects.sync()
from beeswax.management.commands.beeswax_install_examples import Command
app_name = 'beeswax'
Command().handle(app_name=app_name, user=user, tables='tables.json')
| #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pwd
from django.contrib.auth.models import User
from django.core import management
from django.core.management.base import BaseCommand
from desktop.models import Document, Document2, SAMPLE_USER_OWNERS
from useradmin.models import install_sample_user
LOG = logging.getLogger(__name__)
class Command(BaseCommand):
args = '<user>'
help = 'Install examples but do not overwrite them.'
def handle(self, *args, **options):
if not options.get('user'):
user = User.objects.get(username=pwd.getpwuid(os.getuid()).pw_name)
else:
user = options['user']
if not Document2.objects.filter(type='notebook', owner__username__in=SAMPLE_USER_OWNERS).exists():
install_sample_user()
management.call_command('loaddata', 'desktop/libs/notebook/src/notebook/fixtures/initial_notebook_examples.json', verbosity=2)
Document.objects.sync()
from beeswax.management.commands.beeswax_install_examples import Command
app_name = 'beeswax'
Command().handle(app_name=app_name, user=user, tables='tables.json')
| apache-2.0 | Python |
61c2f4e61e152cf69593dfc55c987cb7180e1a5f | enable logging by default | delimited0/generals2,toshima/generalsio | example.py | example.py | #!/bin/python
import generals
import logging
logging.basicConfig(level=logging.DEBUG)
# 1v1
g = generals.Generals('your userid', 'your username', '1v1')
# ffa
# g = generals.Generals('your userid', 'your username', 'ffa')
# private game
# g = generals.Generals('your userid', 'your username', 'private', 'your gameid')
# 2v2 game
# g = generals.Generals('your userid', 'your username', 'team')
for update in g.get_updates():
# get position of your general
pi = update['player_index']
y, x = update['generals'][pi]
# move units from general to arbitrary square
for dy, dx in [(0, 1), (0, -1), (1, 0), (-1, 0)]:
if (0 <= y+dy < update['rows'] and 0 <= x+dx < update['cols']
and update['tile_grid'][y+dy][x+dx] != generals.MOUNTAIN):
g.move(y, x, y+dy, x+dx)
break
| #!/bin/python
import generals
# 1v1
g = generals.Generals('your userid', 'your username', '1v1')
# ffa
# g = generals.Generals('your userid', 'your username', 'ffa')
# private game
# g = generals.Generals('your userid', 'your username', 'private', 'your gameid')
# 2v2 game
# g = generals.Generals('your userid', 'your username', 'team')
for update in g.get_updates():
# get position of your general
pi = update['player_index']
y, x = update['generals'][pi]
# move units from general to arbitrary square
for dy, dx in [(0, 1), (0, -1), (1, 0), (-1, 0)]:
if (0 <= y+dy < update['rows'] and 0 <= x+dx < update['cols']
and update['tile_grid'][y+dy][x+dx] != generals.MOUNTAIN):
g.move(y, x, y+dy, x+dx)
break
| epl-1.0 | Python |
2a59f53886af32d735a02eb3e7849b6d1a23f13e | Fix some bugs with the api. | johnbachman/indra,bgyori/indra,johnbachman/indra,johnbachman/belpy,sorgerlab/belpy,johnbachman/belpy,johnbachman/belpy,bgyori/indra,sorgerlab/indra,sorgerlab/belpy,sorgerlab/indra,pvtodorov/indra,bgyori/indra,pvtodorov/indra,johnbachman/indra,pvtodorov/indra,sorgerlab/indra,pvtodorov/indra,sorgerlab/belpy | indra_db_api/api.py | indra_db_api/api.py | from flask import Flask, request, abort, jsonify
from indra.db.util import get_statements_by_gene_role_type
app = Flask(__name__)
@app.route('/statements', methods=['GET'])
def get_statments():
"""Get some statements constrained by query."""
json_req = request.get_json()
if not json_req:
abort(400)
obj = json_req.get('object')
act = json_req.get('action')
sub = json_req.get('subject')
if sub is None and obj is None:
abort(400)
stmts = []
if sub is not None:
stmts = get_statements_by_gene_role_type(agent_id=sub,
role='SUBJECT',
stmt_type=act)
if obj is not None:
stmts = [s for s in stmts if len(s.agent_list()) > 1
and s.agent_list()[1].name == obj]
elif obj is not None:
stmts = get_statements_by_gene_role_type(agent_id=obj,
role='OBJECT',
stmt_type=act)
return jsonify([stmt.to_json() for stmt in stmts])
if __name__ == '__main__':
app.run() | from flask import Flask, request, abort, jsonify
from indra.db.util import get_statements_by_gene_role_type
app = Flask(__name__)
@app.route('/statements', methods=['GET'])
def get_statments():
"""Get some statements constrained by query."""
json_req = request.get_json()
if not json_req:
abort(400)
obj = json_req.get('object')
act = json_req.get('action')
sub = json_req.get('subject')
if sub is None and obj is None:
abort(400)
stmts = []
if sub is not None:
stmts = get_statements_by_gene_role_type(agent_id=sub,
role='SUBJECT',
stmt_type=act)
if obj is not None:
stmts = [s for s in stmts if s.agent_list()[1].name == obj]
elif obj is not None:
stmts = get_statements_by_gene_role_type(agent_id=obj,
role='OBJECT',
stmt_type=act)
return jsonify([stmt.to_json() for stmt in stmts]) | bsd-2-clause | Python |
610eab797018e3047f5973d1fd195862e6bfcd49 | Fix gunicorn_config tests | SoftwareHeritage/swh-web-ui,SoftwareHeritage/swh-web-ui,SoftwareHeritage/swh-web-ui | swh/web/tests/test_gunicorn_config.py | swh/web/tests/test_gunicorn_config.py | # Copyright (C) 2019-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import os
from unittest.mock import patch
import swh.web.gunicorn_config as gunicorn_config
def test_post_fork_default():
with patch('sentry_sdk.init') as sentry_sdk_init:
gunicorn_config.post_fork(None, None)
sentry_sdk_init.assert_not_called()
def test_post_fork_with_dsn_env():
django_integration = object() # unique object to check for equality
with patch('swh.web.gunicorn_config.DjangoIntegration',
new=lambda: django_integration):
with patch('sentry_sdk.init') as sentry_sdk_init:
with patch.dict(os.environ, {'SWH_SENTRY_DSN': 'test_dsn'}):
gunicorn_config.post_fork(None, None)
sentry_sdk_init.assert_called_once_with(
dsn='test_dsn',
environment=None,
integrations=[django_integration],
debug=False,
release=None,
)
def test_post_fork_debug():
django_integration = object() # unique object to check for equality
with patch('swh.web.gunicorn_config.DjangoIntegration',
new=lambda: django_integration):
with patch('sentry_sdk.init') as sentry_sdk_init:
with patch.dict(os.environ, {'SWH_SENTRY_DSN': 'test_dsn',
'SWH_SENTRY_DEBUG': '1'}):
gunicorn_config.post_fork(None, None)
sentry_sdk_init.assert_called_once_with(
dsn='test_dsn',
environment=None,
integrations=[django_integration],
debug=True,
release=None,
)
| # Copyright (C) 2019-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import os
from unittest.mock import patch
import swh.web.gunicorn_config as gunicorn_config
def test_post_fork_default():
with patch('sentry_sdk.init') as sentry_sdk_init:
gunicorn_config.post_fork(None, None)
sentry_sdk_init.assert_not_called()
def test_post_fork_with_dsn_env():
django_integration = object() # unique object to check for equality
with patch('swh.web.gunicorn_config.DjangoIntegration',
new=lambda: django_integration):
with patch('sentry_sdk.init') as sentry_sdk_init:
with patch.dict(os.environ, {'SWH_SENTRY_DSN': 'test_dsn'}):
gunicorn_config.post_fork(None, None)
sentry_sdk_init.assert_called_once_with(
dsn='test_dsn',
integrations=[django_integration],
debug=False,
release=None,
)
def test_post_fork_debug():
django_integration = object() # unique object to check for equality
with patch('swh.web.gunicorn_config.DjangoIntegration',
new=lambda: django_integration):
with patch('sentry_sdk.init') as sentry_sdk_init:
with patch.dict(os.environ, {'SWH_SENTRY_DSN': 'test_dsn',
'SWH_SENTRY_DEBUG': '1'}):
gunicorn_config.post_fork(None, None)
sentry_sdk_init.assert_called_once_with(
dsn='test_dsn',
integrations=[django_integration],
debug=True,
release=None,
)
| agpl-3.0 | Python |
58e3c68924246ce1278e7b65d53b9bbbdc631e03 | Reorder imports | Ghostkeeper/Luna | plugins/configuration/configurationtype/__init__.py | plugins/configuration/configurationtype/__init__.py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
#This software is distributed under the Creative Commons license (CC0) version 1.0. A copy of this license should have been distributed with this software.
#The license can also be read online: <https://creativecommons.org/publicdomain/zero/1.0/>. If this online license differs from the license provided with this software, the license provided with this software should be applied.
"""
Provides a way to configure the application to the user's liking, and store this
configuration persistently for the user.
The plug-in registers an API that allows storing of user configuration and later
reading that configuration back.
"""
import configurationtype.configuration #The API for other plug-ins to use configuration with.
import luna.plugins
def metadata():
"""
Provides the metadata for the ConfigurationType plug-in.
This gives human-readable information on the plug-in, dependency resolution
information, and tells the plug-in system what this plug-in can do.
:return: Dictionary of metadata.
"""
return {
"name": "Configuration Type",
"description": "Defines a type of plug-in that stores a configuration for the application persistently.",
"version": 1,
"dependencies": {},
"type": { #This is a "plug-in type" plug-in.
"type_name": "configuration",
"api": configurationtype.configuration,
"validate_metadata": validate_metadata
}
}
def validate_metadata(metadata):
"""
Validates whether the specified metadata is valid for configuration
plug-ins.
Configuration's metadata must have a ``configuration`` field, which must
have a ``name`` entry and an ``instance`` entry. The ``instance`` entry must
implement ``__getattr__``, ``serialise`` and ``deserialise``.
:param metadata: The metadata to validate.
:raises luna.plugins.MetadataValidationError: The metadata was invalid.
"""
if "configuration" not in metadata:
raise luna.plugins.MetadataValidationError("This is not a configuration plug-in.")
try:
if "name" not in metadata["configuration"]:
raise luna.plugins.MetadataValidationError("The configuration plug-in doesn't specify a name.")
if "instance" not in metadata["configuration"]:
raise luna.plugins.MetadataValidationError("The configuration plug-in doesn't specify an instance to keep track of the configuration.")
except TypeError:
raise luna.plugins.MetadataValidationError("The configuration metadata entry is not a dictionary.")
instance_attributes = set(dir(metadata["configuration"]["instance"]))
required_functions = {"__getattr__", "serialise", "deserialise"}
if required_functions > instance_attributes: #Instance is not implementing all required functions.
raise luna.plugins.MetadataValidationError("The configuration instance doesn't implement the required functions {functions}.".format(functions=", ".join(required_functions - instance_attributes))) | #!/usr/bin/env python
#-*- coding: utf-8 -*-
#This software is distributed under the Creative Commons license (CC0) version 1.0. A copy of this license should have been distributed with this software.
#The license can also be read online: <https://creativecommons.org/publicdomain/zero/1.0/>. If this online license differs from the license provided with this software, the license provided with this software should be applied.
"""
Provides a way to configure the application to the user's liking, and store this
configuration persistently for the user.
The plug-in registers an API that allows storing of user configuration and later
reading that configuration back.
"""
import luna.plugins
import configurationtype.configuration #The API for other plug-ins to use configuration with.
def metadata():
"""
Provides the metadata for the ConfigurationType plug-in.
This gives human-readable information on the plug-in, dependency resolution
information, and tells the plug-in system what this plug-in can do.
:return: Dictionary of metadata.
"""
return {
"name": "Configuration Type",
"description": "Defines a type of plug-in that stores a configuration for the application persistently.",
"version": 1,
"dependencies": {},
"type": { #This is a "plug-in type" plug-in.
"type_name": "configuration",
"api": configurationtype.configuration,
"validate_metadata": validate_metadata
}
}
def validate_metadata(metadata):
"""
Validates whether the specified metadata is valid for configuration
plug-ins.
Configuration's metadata must have a ``configuration`` field, which must
have a ``name`` entry and an ``instance`` entry. The ``instance`` entry must
implement ``__getattr__``, ``serialise`` and ``deserialise``.
:param metadata: The metadata to validate.
:raises luna.plugins.MetadataValidationError: The metadata was invalid.
"""
if "configuration" not in metadata:
raise luna.plugins.MetadataValidationError("This is not a configuration plug-in.")
try:
if "name" not in metadata["configuration"]:
raise luna.plugins.MetadataValidationError("The configuration plug-in doesn't specify a name.")
if "instance" not in metadata["configuration"]:
raise luna.plugins.MetadataValidationError("The configuration plug-in doesn't specify an instance to keep track of the configuration.")
except TypeError:
raise luna.plugins.MetadataValidationError("The configuration metadata entry is not a dictionary.")
instance_attributes = set(dir(metadata["configuration"]["instance"]))
required_functions = {"__getattr__", "serialise", "deserialise"}
if required_functions > instance_attributes: #Instance is not implementing all required functions.
raise luna.plugins.MetadataValidationError("The configuration instance doesn't implement the required functions {functions}.".format(functions=", ".join(required_functions - instance_attributes))) | cc0-1.0 | Python |
4d621d515dfa8daa1a9653a09a94013d0e4f717e | Update __init__.py | wolfiex/ipython-dev-reload,wolfiex/ipython-dev-reload | build/lib/ipyReload/__init__.py | build/lib/ipyReload/__init__.py | import time,os,functools,re
from concurrent.futures import ThreadPoolExecutor
from IPython import get_ipython
ipython = get_ipython()
global future,filename,finish,executor,altfn
altfn=False
def CheckUpdate(x):
global filename
filename=x
modtime = os.path.getmtime(filename)
x=0
while True:
time.sleep(1)
if os.path.getmtime(filename) != modtime: return os.path.getmtime(filename)
def callback(x):
global filename,finish
if not finish:
print '\033[34m Reloading \033[00m' + filename
try:
if callable(altfn):
altfn()
else:
ipython.magic("run " + filename)
except:None
watch(filename)
def watch (filename,call=False):
global future,finish,executor,altfn
if not os.path.exists(filename):
print 'Failed to start: invalid filename'
return None
executor = ThreadPoolExecutor(max_workers=1)
finish = False
future = executor.submit(CheckUpdate,filename)
if callable(call):
altfn=call
future.add_done_callback(callback)
def kill():
global finish,executor
finish = True
#add a newline to trigger last update before kill
with open(filename, "a") as myfile:
myfile.write("\n")
executor.shutdown(False) # non-blocking
print '\033[34m Shutdown invoked \033[00m' + filename
ipython.magic("autocall 0")
def hist(glob = 'run *'):
ipython.magic("history -g hereisarangompattern]\]';]")
histvar = os.popen("ipython -c 'history -g %s'"%glob).read()
matches = re.findall(r'\d+/\d+:\s+(.*)\n',histvar)
matches = [i for i in reversed(matches)][:7]
print 'Select command to run from history'
for i in enumerate(matches):
print i
return eval( matches[ int(raw_input('Enter Selection:\n').strip())])
def start():
ipython.magic("autocall 1")
| import time,os,functools,re
from concurrent.futures import ThreadPoolExecutor
from IPython import get_ipython
ipython = get_ipython()
global future,filename,finish,executor,altfn
altfn=False
def CheckUpdate(x):
global filename
filename=x
modtime = os.path.getmtime(filename)
x=0
while True:
time.sleep(1)
if os.path.getmtime(filename) != modtime: return os.path.getmtime(filename)
def callback(x):
global filename,finish
if not finish:
print '\033[34m Reloading \033[00m' + filename
try:
if callable(altfn):
altfn()
else:
ipython.magic("run " + filename)
except:None
watch(filename)
def watch (filename,call=False):
global future,finish,executor,altfn
if not os.path.exists(filename):
print 'Failed to start: invalid filename'
return None
executor = ThreadPoolExecutor(max_workers=1)
finish = False
future = executor.submit(CheckUpdate,filename)
if callable(call):
altfn=call
future.add_done_callback(callback)
def kill():
global finish,executor
finish = True
#add a newline to trigger last update before kill
with open(filename, "a") as myfile:
myfile.write("\n")
executor.shutdown(False) # non-blocking
print '\033[34m Shutdown invoked \033[00m' + filename
ipython.magic("autocall 0")
def hist(glob = 'run *'):
ipython.magic("history -g hereisarangompattern]\]';]")
histvar = os.popen("ipython -c 'history -g %s'"%glob).read()
matches = re.findall(r'\d+/\d+:\s+(.*)\n',histvar)
matches = [i for i in reversed(matches)][:7]
print 'Select command to run from history'
for i in enumerate(matches):
print i
return eval( matches[ int(input('Enter Selection:\n').strip())])
def start():
ipython.magic("autocall 1")
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.