commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
11281719a3f1b0e18d986906a312c68c9d84e57f | add missing dependency for tr_barcode | gurneyalex/stock-logistics-barcode,damdam-s/stock-logistics-barcode | tr_barcode/__openerp__.py | tr_barcode/__openerp__.py | # -*- coding: utf-8 -*-
#/#############################################################################
#
# Tech-Receptives Solutions Pvt. Ltd.
# Copyright (C) 2004-TODAY Tech-Receptives(<http://www.tech-receptives.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#/#############################################################################
{
'name': 'TR Barcode',
'version': '1.0',
'category': 'Generic Modules',
'description': """
Presentation:
This module adds the menu Barcode used to generate and configuration barcodes.
""",
'author': 'Tech-Receptives Solutions Pvt. Ltd.',
'website': 'http://www.techreceptives.com',
'depends': [
"base",
'stock',
],
'init_xml': [],
'update_xml': [
"tr_barcode_installer.xml",
"tr_barcode_view.xml",
"wizard/tr_barcode_wizard.xml",
"security/ir.model.access.csv",
],
'demo_xml': [],
"images" : ['images/Barcode configuration.png','images/Barcode.png'],
'test': [
],
'installable': True,
'active': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| # -*- coding: utf-8 -*-
#/#############################################################################
#
# Tech-Receptives Solutions Pvt. Ltd.
# Copyright (C) 2004-TODAY Tech-Receptives(<http://www.tech-receptives.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#/#############################################################################
{
'name': 'TR Barcode',
'version': '1.0',
'category': 'Generic Modules',
'description': """
Presentation:
This module adds the menu Barcode used to generate and configuration barcodes.
""",
'author': 'Tech-Receptives Solutions Pvt. Ltd.',
'website': 'http://www.techreceptives.com',
'depends': [
"base",
],
'init_xml': [],
'update_xml': [
"tr_barcode_installer.xml",
"tr_barcode_view.xml",
"wizard/tr_barcode_wizard.xml",
"security/ir.model.access.csv",
],
'demo_xml': [],
"images" : ['images/Barcode configuration.png','images/Barcode.png'],
'test': [
],
'installable': True,
'active': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | Python |
af8b9fdeb144279b35abcf7de9f0538d88ac7a5d | Add field mapping to tuid provider | d120/pyophase,d120/pyophase,d120/pyophase,d120/pyophase | tuid_provider/provider.py | tuid_provider/provider.py | from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth_cas.providers import CASProvider
class TUIDAccount(ProviderAccount):
pass
class TUIDProvider(CASProvider):
id = 'tuid' # Choose an identifier for your provider
name = 'TU-ID CAS Provider' # Verbose name of your provider
account_class = TUIDAccount
def extract_uid(self, data):
return str(data[1]['cn'])
def extract_common_fields(self, data):
return dict(username=data[1]['cn'],
email=data[1].get('mail', ''),
first_name=data[1].get('givenName', ''),
last_name=data[1].get('surname', ''), )
providers.registry.register(TUIDProvider)
| from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth_cas.providers import CASProvider
class TUIDAccount(ProviderAccount):
pass
class TUIDProvider(CASProvider):
id = 'tuid' # Choose an identifier for your provider
name = 'TU-ID CAS Provider' # Verbose name of your provider
account_class = TUIDAccount
providers.registry.register(TUIDProvider)
| agpl-3.0 | Python |
7bdfa64277889ac1db6b1bba11829b8e61a2445d | Increment version to 0.4.2 | dgwartney-io/import-io-api-python,dgwartney-io/import-io-api-python | importio2/version.py | importio2/version.py | #
# Copyright 2017 Import.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__version__ = '0.4.2'
| #
# Copyright 2017 Import.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__version__ = '0.4.1'
| apache-2.0 | Python |
158e3b384a3368c94f3fd4c231bbe688b6628e1a | add use flag for gdbm | sassoftware/conary,sassoftware/conary,sassoftware/conary,sassoftware/conary,sassoftware/conary | build/use.py | build/use.py | #
# Copyright (c) 2004 Specifix, Inc.
# All rights reserved
#
"""
Provides the build configuration as special dictionaries that directly
export their namespaces.
Should read, or be provided, some sort of configuration information
relative to the build being done. For now, we'll intialize a static
configuration sufficient to build.
@var Use: Set of flags defined for this build, with their boolean status
@type Use: UseClass
@var Arch: Set of architectures defined for this build, with their boolean status
@type Arch: UseClass
"""
class UseClass(dict):
"""
Immutable dictionary
"""
def __init__(self, d):
self.freeze = 0
self.update(d)
self.freeze = 1
def __setitem__(self, key, value):
if self.freeze:
raise TypeError, 'cannot modify immutable dictionary FIXME reference'
dict.__setitem__(self, key, value)
def __getattr__(self, attr):
return self[attr]
Use = UseClass({
'pcre': True,
'gcj': True,
'gnat': False,
'selinux': False,
'pam': True,
'dietlibc': False,
'bootstrap': False,
'python': True, # XXX should this even be an option?
# temporarily disabled until we build appropriate packages
'perl': False,
'tcl': False,
'tk': False,
'X': False,
'gtk': False,
'gnome': False,
'kde': False,
'readline': False,
'ssl': False,
'slang': False,
'netpbm': False,
'nptl': False,
'gdbm': False,
})
Arch = UseClass({
'i386': True,
'i486': True,
'i586': True,
'i686': True,
'x86_64': False,
'sparc': False,
'sparc64': False,
'ppc64': False,
'ia64': False,
's390': False,
's390x': False,
})
| #
# Copyright (c) 2004 Specifix, Inc.
# All rights reserved
#
"""
Provides the build configuration as special dictionaries that directly
export their namespaces.
Should read, or be provided, some sort of configuration information
relative to the build being done. For now, we'll intialize a static
configuration sufficient to build.
@var Use: Set of flags defined for this build, with their boolean status
@type Use: UseClass
@var Arch: Set of architectures defined for this build, with their boolean status
@type Arch: UseClass
"""
class UseClass(dict):
"""
Immutable dictionary
"""
def __init__(self, d):
self.freeze = 0
self.update(d)
self.freeze = 1
def __setitem__(self, key, value):
if self.freeze:
raise TypeError, 'cannot modify immutable dictionary FIXME reference'
dict.__setitem__(self, key, value)
def __getattr__(self, attr):
return self[attr]
Use = UseClass({
'pcre': True,
'gcj': True,
'gnat': False,
'selinux': False,
'pam': True,
'dietlibc': False,
'bootstrap': False,
'python': True, # XXX should this even be an option?
# temporarily disabled until we build appropriate packages
'perl': False,
'tcl': False,
'tk': False,
'X': False,
'gtk': False,
'gnome': False,
'kde': False,
'readline': False,
'ssl': False,
'slang': False,
'netpbm': False,
'nptl': False,
})
Arch = UseClass({
'i386': True,
'i486': True,
'i586': True,
'i686': True,
'x86_64': False,
'sparc': False,
'sparc64': False,
'ppc64': False,
'ia64': False,
's390': False,
's390x': False,
})
| apache-2.0 | Python |
a19efd8fdaa4fa2f03b608b7cbac330b2cd53c76 | Reduce tweet period to 300 seconds. | kkwteh/twinyewest | twinsies/onewordtweets.py | twinsies/onewordtweets.py |
import tweepy
import json
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from datetime import datetime
from memory_profiler import profile
import os
CONSUMER_KEY = os.environ['OWT_API_KEY']
CONSUMER_SECRET = os.environ['OWT_API_SECRET']
ACCESS_TOKEN = os.environ['OWT_ACCESS_TOKEN']
ACCESS_TOKEN_SECRET = os.environ['OWT_ACCESS_TOKEN_SECRET']
TWEET_PERIOD_SECONDS = 300
last_updated = {'value': datetime(1999,1,1).timestamp()}
class StdOutListener(StreamListener):
""" A listener handles tweets that are received from the stream.
This is a basic listener that just prints received tweets to stdout.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.tweet = None
def on_data(self, data):
if (datetime.now().timestamp() - last_updated['value']) > TWEET_PERIOD_SECONDS:
tweet_dict = json.loads(data)
words = tweet_dict['text'].strip().split() if 'text' in tweet_dict else []
if (len(words) == 2 and words[1].startswith('https') and 'media' in tweet_dict['entities']
and not tweet_dict['possibly_sensitive']):
print('tweet found')
self.tweet = json.loads(data)
if self.tweet:
print('retweeting')
twitter_api().retweet(self.tweet['id'])
self.tweet = None
last_updated['value'] = datetime.now().timestamp()
return True
def on_error(self, status):
print(status)
@profile
def twitter_api():
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
return tweepy.API(auth)
if __name__ == '__main__':
l = StdOutListener()
auth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
stream = Stream(auth, l)
letters = list('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')
stream.filter(track=letters, languages=['en'])
|
import tweepy
import json
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from datetime import datetime
from memory_profiler import profile
import os
CONSUMER_KEY = os.environ['OWT_API_KEY']
CONSUMER_SECRET = os.environ['OWT_API_SECRET']
ACCESS_TOKEN = os.environ['OWT_ACCESS_TOKEN']
ACCESS_TOKEN_SECRET = os.environ['OWT_ACCESS_TOKEN_SECRET']
last_updated = {'value': datetime(1999,1,1).timestamp()}
class StdOutListener(StreamListener):
""" A listener handles tweets that are received from the stream.
This is a basic listener that just prints received tweets to stdout.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.tweet = None
def on_data(self, data):
if (datetime.now().timestamp() - last_updated['value']) > 900:
tweet_dict = json.loads(data)
words = tweet_dict['text'].strip().split() if 'text' in tweet_dict else []
if (len(words) == 2 and words[1].startswith('https') and 'media' in tweet_dict['entities']
and not tweet_dict['possibly_sensitive']):
print('tweet found')
self.tweet = json.loads(data)
if self.tweet:
print('retweeting')
twitter_api().retweet(self.tweet['id'])
self.tweet = None
last_updated['value'] = datetime.now().timestamp()
return True
def on_error(self, status):
print(status)
@profile
def twitter_api():
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
return tweepy.API(auth)
if __name__ == '__main__':
l = StdOutListener()
auth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
stream = Stream(auth, l)
letters = list('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')
stream.filter(track=letters, languages=['en'])
| mit | Python |
fe0847f11b6aac70013eaf4aaf4360dad3e18892 | Replace if-statement in cc_upload for phone check with str.replace() | deadlyraptor/reels | cc_upload.py | cc_upload.py | import os
import requests
import xlrd
import constantcontact as cc
from credentials import general_list_id, after_hours_list_id
# Assumes directory with the workbook is relative to script's location.
directory = 'workbooks/'
workbook = ''
for dirpath, dirnames, filenames in os.walk(directory):
for files in filenames:
workbook = (dirpath + files)
# Preps the workbook.
wb = xlrd.open_workbook(workbook)
sh = wb.sheet_by_index(0)
total_rows = sh.nrows
first_row = 6 # skips the first six rows as they are irrelevant.
# These lists will hold customer data in dictionaries.
general_contacts = []
after_hours_contacts = []
# This creates a list with all of the After Hours films in the workbook.
films = []
weeks = int(input('How many weeks are in the workbook? '))
for week in range(0, weeks):
film = input('The After Hours film for week {} was: '.format(week + 1))
films.append(film)
def append_contacts(contacts):
'''Populates the contact and address dictionaries and then appends them to
a contacts list.
Arguments:
contacts = The list the dictionary will be appended to.
'''
contact['email_addresses'] = [row_values[3]]
contact['first_name'] = row_values[2].title()
contact['last_name'] = row_values[1].title()
contact['home_phone'] = row_values[16].replace('No Primary Phone', '')
contact['addresses'] = [address]
address['line1'] = row_values[11]
address['line2'] = row_values[12]
address['city'] = row_values[13].title()
address['state_code'] = row_values[14]
address['postal_code'] = row_values[15]
contacts.append(contact)
# Loops over the workbook and appends the dictionaries created by calling
# append_contacts into the corresponding lists.
for row in range(first_row, total_rows):
contact = {}
address = {}
row_values = sh.row_values(row)
opt_in = row_values[5]
film_title = row_values[20]
if not opt_in:
continue
elif opt_in and film_title in films:
append_contacts(after_hours_contacts)
elif opt_in and film_title not in films:
append_contacts(general_contacts)
# General list.
general_payload = cc.create_payload(general_contacts, [general_list_id])
activity = cc.add_contacts(general_payload)
status_report = cc.get_status(activity)
cc.poll_activity(status_report)
# After Hours list.
after_hours_payload = cc.create_payload(after_hours_contacts,
[after_hours_list_id])
activity = cc.add_contacts(after_hours_payload)
status_report = cc.get_status(activity)
cc.poll_activity(status_report)
| import os
import requests
import xlrd
import constantcontact as cc
from credentials import general_list_id, after_hours_list_id
# Assumes directory with the workbook is relative to script's location.
directory = 'workbooks/'
workbook = ''
for dirpath, dirnames, filenames in os.walk(directory):
for files in filenames:
workbook = (dirpath + files)
# Preps the workbook.
wb = xlrd.open_workbook(workbook)
sh = wb.sheet_by_index(0)
total_rows = sh.nrows
first_row = 6 # skips the first six rows as they are irrelevant.
# These lists will hold customer data in dictionaries.
general_contacts = []
after_hours_contacts = []
# This creates a list with all of the After Hours films in the workbook.
films = []
weeks = int(input('How many weeks are in the workbook? '))
for week in range(0, weeks):
film = input('The After Hours film for week {} was: '.format(week + 1))
films.append(film)
def append_contacts(contacts):
'''Populates the contact and address dictionaries and then appends them to
a contacts list.
Arguments:
contacts = The list the dictionary will be appended to.
'''
contact['email_addresses'] = [row_values[3]]
contact['first_name'] = row_values[2].title()
contact['last_name'] = row_values[1].title()
if row_values[16] == 'No Primary Phone':
contact['home_phone'] = ''
else:
contact['home_phone'] = row_values[16]
contact['addresses'] = [address]
address['line1'] = row_values[11]
address['line2'] = row_values[12]
address['city'] = row_values[13].title()
address['state_code'] = row_values[14]
address['postal_code'] = row_values[15]
contacts.append(contact)
# Loops over the workbook and appends the dictionaries created by calling
# append_contacts into the corresponding lists.
for row in range(first_row, total_rows):
contact = {}
address = {}
row_values = sh.row_values(row)
opt_in = row_values[5]
film_title = row_values[20]
if not opt_in:
continue
elif opt_in and film_title in films:
append_contacts(after_hours_contacts)
elif opt_in and film_title not in films:
append_contacts(general_contacts)
# General list.
general_payload = cc.create_payload(general_contacts, [general_list_id])
activity = cc.add_contacts(general_payload)
status_report = cc.get_status(activity)
cc.poll_activity(status_report)
# After Hours list.
after_hours_payload = cc.create_payload(after_hours_contacts,
[after_hours_list_id])
activity = cc.add_contacts(after_hours_payload)
status_report = cc.get_status(activity)
cc.poll_activity(status_report)
| mit | Python |
b7836720994530678875a724be9a6742c9a3dd02 | patch manage.py for eventlet | mbanje/ureport_uganda,unicefuganda/ureport,unicefuganda/ureport,mbanje/ureport_uganda,unicefuganda/ureport | ureport_project/manage.py | ureport_project/manage.py | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
from django.core.management import execute_manager
import settings
import eventlet
import eventlet.debug
import os
#patch for eventlet
os.environ["EVENTLET_NOPATCH"] = 'True'
eventlet.monkey_patch()
eventlet.debug.hub_prevent_multiple_readers(False)
if __name__ == "__main__":
execute_manager(settings)
| #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
from django.core.management import execute_manager
import settings
if __name__ == "__main__":
execute_manager(settings)
| bsd-3-clause | Python |
2c62a20305117b8d9b6c98ad048dea48c9b52efc | use python built in for updating a dict | revsys/django-fack,howiworkdaily/django-faq,revsys/django-fack | faq/views.py | faq/views.py | from django.views.generic.list_detail import object_detail, object_list
from models import Question
import datetime
def question_detail(request, slug, template_name='faq/question_detail.html', extra_context={}):
"""
Displays an invidual question.
"""
return object_detail(
request,
template_name = template_name,
extra_context = extra_context,
slug = slug,
slug_field = 'slug',
queryset = Question.objects.active(),
)
def question_list( request, template_name='faq/question_list.html', extra_context={}):
'''
Displays a list of all the questions.
'''
#NOTE:
#this below is NOT NEEDED really so I would remove but I think it's a good example
#for people to see how they could "extend" their existing extra_context using a parameter value to
#allow developers to make their app more reusable
#we set the below dict value and then allow the user to pass along their own
#if they we then populate the user supplied extra_context using the update method
extra = { 'created_on': datetime.datetime.now() }
extra.update( extra_context )
return object_list(
request,
template_name = template_name,
extra_context = extra,
queryset = Question.objects.active(),
)
| from django.views.generic.list_detail import object_detail, object_list
from models import Question
import datetime
def dict_to_dict(origin, new):
"""
A utility method to add items from one dictionary to an already existing dictionary.
Primary use is for extra_context population.
"""
for key, value in new.items():
if callable(value):
origin[key] = value()
else:
origin[key] = value
def question_detail(request, slug, template_name='faq/question_detail.html', extra_context={}):
"""
Displays an invidual question.
"""
return object_detail(
request,
template_name = template_name,
extra_context = extra_context,
slug = slug,
slug_field = 'slug',
queryset = Question.objects.active(),
)
def question_list( request, template_name='faq/question_list.html', extra_context={}):
'''
Displays a list of all the questions.
'''
#NOTE:
#this below is NOT NEEDED really so I would remove but I think it's a good example
#for people to see how they could "extend" their existing extra_context using a parameter value to
#allow developers to make their app more reusable
#we set the below dict value and then allow the user to pass along their own
#if they we then populate the user supplied extra_context using the dict_to_dict method
extra = { 'created_on': datetime.datetime.now() }
if extra_context:
dict_to_dict( extra, extra_context )
return object_list(
request,
template_name = template_name,
extra_context = extra,
queryset = Question.objects.active(),
)
| bsd-3-clause | Python |
506a5821b57672a11d4aaf03735a8ce3a72a92f8 | save command | PJB3005/MoMMI,PJB3005/MoMMI,PJB3005/MoMMI | MoMMI/Modules/bot_administration.py | MoMMI/Modules/bot_administration.py | import asyncio
from typing import Match
import aiohttp
from discord import Message
from MoMMI.commands import command
from MoMMI.master import master
from MoMMI.server import MChannel
from MoMMI.role import MRoleType
@command("reload", "reload", roles=[MRoleType.OWNER])
async def reload(channel: MChannel, match: Match, message: Message):
errors = await master.reload_modules()
if errors:
await master.client.add_reaction(message, "🤒")
else:
await master.client.add_reaction(message, "👌")
@command("modules", "modules", roles=[MRoleType.OWNER])
async def modules(channel: MChannel, match: Match, message: Message):
msg = "```"
for module in channel.server.master.modules.values():
msg += f"{module.name}:\n"
for handler in module.handlers.values():
msg += f"* {handler.name} ({type(handler)})\n"
msg += "```"
await channel.send(msg)
@command("shutdown", "shutdown", roles=[MRoleType.OWNER])
async def shutdown_command(channel: MChannel, match: Match, message: Message):
await channel.send("Shutting down!")
# Ensure future instead of awaiting to prevent code calling us exploding.
asyncio.ensure_future(channel.server.master.shutdown())
@command("name", r"name\s+(.+)", roles=[MRoleType.OWNER])
async def name_command(channel: MChannel, match: Match, message: Message):
await master.client.edit_profile(username=match.group(1))
@command("nick", r"nick\s+(.+)", roles=[MRoleType.OWNER])
async def nick_command(channel: MChannel, match: Match, message: Message):
member = message.server.get_member(master.client.user.id)
await master.client.change_nickname(member, match.group(1))
@command("avatar", r"avatar", roles=[MRoleType.OWNER])
async def avatar_command(channel: MChannel, match: Match, message: Message):
attachment = message.attachments[0]["url"]
async with aiohttp.ClientSession() as session:
async with session.get(attachment) as request:
data = await request.read()
await master.client.edit_profile(avatar=data)
@command("save", r"save", roles=[MRoleType.OWNER])
async def avatar_command(channel: MChannel, match: Match, message: Message):
for server in master.servers.values():
await server.save_all_storages()
| import asyncio
from typing import Match
import aiohttp
from discord import Message
from MoMMI.commands import command
from MoMMI.master import master
from MoMMI.server import MChannel
from MoMMI.role import MRoleType
@command("reload", "reload", roles=[MRoleType.OWNER])
async def reload(channel: MChannel, match: Match, message: Message):
errors = await master.reload_modules()
if errors:
await master.client.add_reaction(message, "🤒")
else:
await master.client.add_reaction(message, "👌")
@command("modules", "modules", roles=[MRoleType.OWNER])
async def modules(channel: MChannel, match: Match, message: Message):
msg = "```"
for module in channel.server.master.modules.values():
msg += f"{module.name}:\n"
for handler in module.handlers.values():
msg += f"* {handler.name} ({type(handler)})\n"
msg += "```"
await channel.send(msg)
@command("shutdown", "shutdown", roles=[MRoleType.OWNER])
async def shutdown_command(channel: MChannel, match: Match, message: Message):
await channel.send("Shutting down!")
# Ensure future instead of awaiting to prevent code calling us exploding.
asyncio.ensure_future(channel.server.master.shutdown())
@command("name", r"name\s+(.+)", roles=[MRoleType.OWNER])
async def name_command(channel: MChannel, match: Match, message: Message):
await master.client.edit_profile(username=match.group(1))
@command("nick", r"nick\s+(.+)", roles=[MRoleType.OWNER])
async def nick_command(channel: MChannel, match: Match, message: Message):
member = message.server.get_member(master.client.user.id)
await master.client.change_nickname(member, match.group(1))
@command("avatar", r"avatar", roles=[MRoleType.OWNER])
async def avatar_command(channel: MChannel, match: Match, message: Message):
attachment = message.attachments[0]["url"]
async with aiohttp.ClientSession() as session:
async with session.get(attachment) as request:
data = await request.read()
await master.client.edit_profile(avatar=data)
| mit | Python |
8f0a4fd16a9365aa2de9b75583cc9200860e2ed1 | update secret_key | zheng-zy/flask_weixin,zheng-zy/flask_weixin,zheng-zy/flask_weixin,zheng-zy/flask_weixin | __init__.py | __init__.py | #!usr/bin/env python
# coding=utf-8
# Created by zhezhiyong@163.com on 2017/3/22.
import logging
from flask import Flask
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
logger = logging.getLogger()
app = Flask(__name__)
app.config['WECHAT_APPID'] = 'wx3b602e650c2c8dda'
app.config['WECHAT_SECRET'] = '12e75aabd90ab2e034941f61f0c8d0aa'
app.config['WECHAT_TOKEN'] = 'token'
app.config['DEBUG'] = True
app.secret_key = '9c66bc144457b9ba8c63f17f4ad93356' # AppSecret
| #!usr/bin/env python
# coding=utf-8
# Created by zhezhiyong@163.com on 2017/3/22.
import logging
from flask import Flask
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
logger = logging.getLogger()
app = Flask(__name__)
app.config['WECHAT_APPID'] = 'wx3b602e650c2c8dda'
app.config['WECHAT_SECRET'] = '12e75aabd90ab2e034941f61f0c8d0aa'
app.config['WECHAT_TOKEN'] = 'token'
app.config['DEBUG'] = True
app.secret_key = 'QCgz5h5kaixpc7Kb5gYyx7JXwYFRQlf439Bp9us4zZW' # AppSecret
| apache-2.0 | Python |
2be45cb0eb4e92543b5c3975796f73f96f059aa8 | return data | eivl/fsfflan-innslipp,eivl/fsfflan-innslipp,eivl/fsfflan-innslipp | __init__.py | __init__.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*
from flask import Flask, render_template, send_from_directory
from flask.ext.mysql import MySQL
from mysecret import DATABASE_USER, DATABASE_PASS, DATABASE_HOST
from werkzeug import generate_password_hash, check_password_hash
app = Flask(__name__)
@app.route('/')
def homepage():
return "Hei på deg!, Flask virker som det skal.."
@app.route('/m2')
def m2():
return render_template('main.html')
@app.route('/db')
def db():
mysql = MySQL()
# MySQL configurations
app.config['MYSQL_DATABASE_USER'] = DATABASE_USER
app.config['MYSQL_DATABASE_PASSWORD'] = DATABASE_PASS
app.config['MYSQL_DATABASE_DB'] = 'opascree_lan-seats'
app.config['MYSQL_DATABASE_HOST'] = DATABASE_HOST
mysql.init_app(app)
conn = mysql.connect()
cursor = conn.cursor()
query_string = "SELECT * from tickets"
cursor.execute(query_string)
data = cursor.fetchall()
db.close()
return data
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0')
| #!/usr/bin/env python3
# -*- coding: utf-8 -*
from flask import Flask, render_template, send_from_directory
from flask.ext.mysql import MySQL
from mysecret import DATABASE_USER, DATABASE_PASS, DATABASE_HOST
from werkzeug import generate_password_hash, check_password_hash
app = Flask(__name__)
@app.route('/')
def homepage():
return "Hei på deg!, Flask virker som det skal.."
@app.route('/m2')
def m2():
return render_template('main.html')
@app.route('/db')
def db():
mysql = MySQL()
# MySQL configurations
app.config['MYSQL_DATABASE_USER'] = DATABASE_USER
app.config['MYSQL_DATABASE_PASSWORD'] = DATABASE_PASS
app.config['MYSQL_DATABASE_DB'] = 'opascree_lan-seats'
app.config['MYSQL_DATABASE_HOST'] = DATABASE_HOST
mysql.init_app(app)
conn = mysql.connect()
cursor = conn.cursor()
query_string = "SELECT * from tickets"
cursor.execute(query_string)
data = cursor.fetchall()
db.close()
return render_template(data)
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0')
| mit | Python |
d705799b745a2de8ff404476485310a390aea9b2 | Switch to Python 3. | brendancsmith/cohort-facebook,brendancsmith/cohort-facebook | __main__.py | __main__.py | #!/usr/bin/env python3
import facebook
import os
from getpass import getpass
PRINGUS_DINGUS = '1475782379372580'
def get_token():
# Read the FACEBOOK_TOKEN environment variable,
# or ask for it if none is set.
# On Mac OS X, this can be temporarily set via:
# launchctl setenv FACEBOOK_TOKEN <user access token here>
token = os.environ.get('FACEBOOK_TOKEN')
if not token:
token = getpass('Facebook User Access Token: ')
return token
def main():
graph = facebook.GraphAPI(get_token())
pringusDingus = graph.get_object(PRINGUS_DINGUS)
print(pringusDingus['comments'])
if __name__ == '__main__':
main()
| import facebook
import os
from getpass import getpass
PRINGUS_DINGUS = '1475782379372580'
def get_token():
# Read the FACEBOOK_TOKEN environment variable,
# or ask for it if none is set.
# On Mac OS X, this can be temporarily set via:
# launchctl setenv FACEBOOK_TOKEN <user access token here>
token = os.environ.get('FACEBOOK_TOKEN')
if not token:
token = getpass('Facebook User Access Token: ')
return token
def main():
graph = facebook.GraphAPI(get_token())
pringusDingus = graph.get_object(PRINGUS_DINGUS)
print pringusDingus['comments']
if __name__ == '__main__':
main()
| mit | Python |
5cefd2c2b37763fe7756913757d9730a2e092de0 | Update hello.py | amitsaha/learning,amitsaha/learning,amitsaha/learning,amitsaha/learning,amitsaha/learning,amitsaha/learning,amitsaha/learning,amitsaha/learning,amitsaha/learning,amitsaha/learning,amitsaha/learning | Python/pip_editable/sample/hello.py | Python/pip_editable/sample/hello.py | def main():
return 'Hello called'
| def main():
return 'Hello called again.'
| unlicense | Python |
e9fbd541b3fcb84c6d2de9ba1de53886730a67fa | Add new changes to th branch | nvthanh1/Skypybot | common.py | common.py | import os
def get_project_path():
root_dir = os.path.abspath(os.path.dirname(__file__))
return root_dir
def get_yourproject_path():
root_dir = os.path.abspath(os.path.dirname(__file__))
return root_dir
| import os
def get_project_path():
root_dir = os.path.abspath(os.path.dirname(__file__))
return root_dir | mit | Python |
5ddea6f1280fa2420f5266e01704bceed7ce3afd | update concat.py | inada-s/autocut | concat.py | concat.py | # coding: UTF-8
import os
import subprocess
from datetime import datetime
import sys
def main():
os.chdir(os.path.dirname(os.sys.argv[0]))
print os.getcwd()
movie_list = os.sys.argv[1:]
print movie_list
for i in xrange(len(movie_list)):
movie_list[i] = os.path.abspath(movie_list[i])
with open("concat_tmp.txt", "w") as f:
for movie in movie_list:
print >>f, r"file '%s'" % movie
datename = datetime.now().strftime("%Y%m%d-%H%M%S")
name, ext = os.path.splitext(movie_list[0])
command = [
'ffmpeg',
'-f', 'concat',
'-i', 'concat_tmp.txt',
'-codec', 'copy',
os.path.join(os.path.dirname(movie_list[0]), "concat_" + datename + ext)
]
print command
with open("concat.log", "wb") as logfile:
p = subprocess.call(' '.join(command), stderr=logfile)
print "Press Enter Key To Exit."
raw_input()
if __name__ == '__main__':
main()
#ffmpeg -i concat:"201507120933390_001.ts|201507120933390_002.ts|201507120933390_003.ts|201507120933390_004.ts|201507120933390_005.ts|201507120933390_006.ts|201507120933390_007.ts|201507120933390_008.ts|201507120933390_009.ts|201507120933390_010.ts|201507120933390_011.ts|201507120933390_012.ts|201507120933390_013.ts|201507120933390_014.ts|201507120933390_015.ts|201507120933390_016.ts|201507120933390_017.ts|201507120933390_018.ts|201507120933390_019.ts|201507120933390_020.ts|201507120933390_021.ts|201507120933390_022.ts|201507120933390_023.ts" -c:v copy -c:a copy concat.ts
| # coding: UTF-8
import os
import subprocess
def main():
movie_list = os.sys.argv[1:]
command = [
'ffmpeg', '-i',
r'concat:"' + '|'.join(movie_list) + r'"',
'-c:v', 'copy',
'-c:a', 'copy',
'concat_' + movie_list[0]
]
p = subprocess.call(' '.join(command), shell=True)
if __name__ == '__main__':
main()
#ffmpeg -i concat:"201507120933390_001.ts|201507120933390_002.ts|201507120933390_003.ts|201507120933390_004.ts|201507120933390_005.ts|201507120933390_006.ts|201507120933390_007.ts|201507120933390_008.ts|201507120933390_009.ts|201507120933390_010.ts|201507120933390_011.ts|201507120933390_012.ts|201507120933390_013.ts|201507120933390_014.ts|201507120933390_015.ts|201507120933390_016.ts|201507120933390_017.ts|201507120933390_018.ts|201507120933390_019.ts|201507120933390_020.ts|201507120933390_021.ts|201507120933390_022.ts|201507120933390_023.ts" -c:v copy -c:a copy concat.ts
| mit | Python |
ae0ac504e01667df8729fa062f7806c61c9d2433 | Correct HOME expansion. | graag/lgogwebui,graag/lgogwebui,graag/lgogwebui | config.py | config.py | #!/usr/bin/env python3
import os
#: Path to lgogdownloader config
lgog_config = os.path.expanduser(os.environ.get("LGOG_CONFIG", "~/.config/lgogdownloader"))
#: Path to lgogdownloader cache
lgog_cache = os.path.expanduser(os.environ.get("LGOG_CACHE", "~/.cache/lgogdownloader"))
#: Path to GOG game library
lgog_library = os.path.expanduser(os.environ.get("GOG_DIR", "~/GOG"))
| #!/usr/bin/env python3
import os
#: Path to lgogdownloader config
lgog_config = os.path.realpath(os.environ.get("LGOG_CONFIG", "~/.config/lgogdownloader"))
#: Path to lgogdownloader cache
lgog_cache = os.path.realpath(os.environ.get("LGOG_CACHE", "~/.cache/lgogdownloader"))
#: Path to GOG game library
lgog_library = os.path.realpath(os.environ.get("GOG_DIR", "~/GOG"))
| bsd-2-clause | Python |
30cc6bf4daae62b4c5c7c6f0096820f4877de711 | Fix daemon | PanDAWMS/panda-bigmon-atlas,PanDAWMS/panda-bigmon-atlas,PanDAWMS/panda-bigmon-atlas,PanDAWMS/panda-bigmon-atlas | daemon.py | daemon.py | import argparse
import logging
import logging.handlers
import os
import time
from daemonize import Daemonize
pid = '/var/log/prodtasklog/prodtask-messaging.pid'
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'atlas.settings')
import django
django.setup()
from atlas.messaging.manager import start_processing, start_bunch
from atlas.settings.messaging import TEST_CONFIG, IDDS_PRODUCTION_CONFIG
if args.is_test:
config = TEST_CONFIG['connection']
logger.info('Starting messaging waiting for test')
start_processing(TEST_CONFIG['queue'], 'atlas.special_workflows.views.idds_recive_message', config)
while True:
time.sleep(10)
else:
config = IDDS_PRODUCTION_CONFIG['connection']
logger.info('Starting messaging waiting for idds')
start_bunch(IDDS_PRODUCTION_CONFIG['queue'], 'atlas.special_workflows.views.idds_recive_message', config)
if __name__ == "__main__":
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'atlas.settings')
os.environ.setdefault('LD_LIBRARY_PATH', '/usr/local/lib')
import django
parser = argparse.ArgumentParser()
parser.add_argument(
'-t',
'--test',
action='store_true',
dest='is_test',
default=False,
help='run on test queue'
)
args = parser.parse_args()
django.setup()
logger = logging.getLogger('prodtask_messaging')
logger.info('Starting the daemon for messaging')
daemon = Daemonize(
app='prodtask messaging daemon',
pid=pid,
action=main,
verbose=True,
logger=logger,
)
daemon.start()
logger.info('The daemon ended gracefully') | import argparse
import logging
import logging.handlers
import os
import time
from daemonize import Daemonize
pid = '/tmp/prodtask-messaging.pid'
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'atlas.settings')
import django
django.setup()
from atlas.messaging.manager import start_processing, start_bunch
from atlas.settings.messaging import TEST_CONFIG, IDDS_PRODUCTION_CONFIG
if args.is_test:
config = TEST_CONFIG['connection']
logger.info('Starting messaging waiting for test')
start_processing(TEST_CONFIG['queue'], 'atlas.special_workflows.views.idds_recive_message', config)
while True:
time.sleep(10)
else:
config = IDDS_PRODUCTION_CONFIG['connection']
logger.info('Starting messaging waiting for idds')
start_bunch(IDDS_PRODUCTION_CONFIG['queue'], 'atlas.special_workflows.views.idds_recive_message', config)
if __name__ == "__main__":
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'atlas.settings')
os.environ.setdefault('LD_LIBRARY_PATH', '/usr/local/lib')
import django
parser = argparse.ArgumentParser()
parser.add_argument(
'-t',
'--test',
action='store_true',
dest='is_test',
default=False,
help='run on test queue'
)
args = parser.parse_args()
django.setup()
logger = logging.getLogger('prodtask_messaging')
logger.info('Starting the daemon for messaging')
daemon = Daemonize(
app='prodtask messaging daemon',
pid=pid,
action=main,
verbose=True,
logger=logger,
)
daemon.start()
logger.info('The daemon ended gracefully') | apache-2.0 | Python |
3fd04af45c5979cf28672d5ad115e256fc702f64 | bump version | andyfangdz/django-asyncmailer,andyfangdz/django-asyncmailer | asyncmailer/__init__.py | asyncmailer/__init__.py | # -*- coding: utf-8 -*-
__version__ = '1.0' # pragma: no cover
| # -*- coding: utf-8 -*-
__version__ = '0.1' # pragma: no cover
| mit | Python |
d483c00875ce59ce4bdcee0119b2d8d3ded959ea | use get_processor in test_vamp_simple_host | Parisson/TimeSide,Parisson/TimeSide,Parisson/TimeSide,Parisson/TimeSide,Parisson/TimeSide | tests/test_vamp_simple_host.py | tests/test_vamp_simple_host.py | #! /usr/bin/env python
import unittest
from unit_timeside import TestRunner
from timeside.core.tools.test_samples import samples
from timeside.core import get_processor
FileDecoder = get_processor('file_decoder')
try:
VampSimpleHost = get_processor('vamp_simple_host')
except:
VampSimpleHost = None
@unittest.skipIf(not VampSimpleHost, 'vamp-simple-host library is not available')
class TestVampsimpleHost(unittest.TestCase):
def setUp(self):
self.analyzer = VampSimpleHost()
def testOnC4_scale(self):
"runs on C4_scale"
self.source = samples["C4_scale.wav"]
def tearDown(self):
decoder = FileDecoder(self.source)
(decoder | self.analyzer).run()
results = self.analyzer.results
print(results.keys())
#print(results)
#print(results.to_yaml())
#print(results.to_json())
#print(results.to_xml())
if __name__ == '__main__':
unittest.main(testRunner=TestRunner())
| #! /usr/bin/env python
import unittest
from unit_timeside import TestRunner
from timeside.plugins.decoder.file import FileDecoder
from timeside.core import get_processor
from timeside.core import _WITH_VAMP
from timeside.core.tools.test_samples import samples
@unittest.skipIf(not _WITH_VAMP, 'vamp-simple-host library is not available')
class TestVampsimpleHost(unittest.TestCase):
def setUp(self):
self.analyzer = get_processor('vamp_simple_host')()
def testOnC4_scale(self):
"runs on C4_scale"
self.source = samples["C4_scale.wav"]
def tearDown(self):
decoder = FileDecoder(self.source)
(decoder | self.analyzer).run()
results = self.analyzer.results
print(results.keys())
#print(results)
#print(results.to_yaml())
#print(results.to_json())
#print(results.to_xml())
if __name__ == '__main__':
unittest.main(testRunner=TestRunner())
| agpl-3.0 | Python |
42a54411634ef395ca3c2dfbfeefd7cea2d55101 | Update to v3 of the auth0 client for the exception handling | bretth/auth0plus | auth0plus/exceptions.py | auth0plus/exceptions.py |
try:
from auth0.v3.exceptions import Auth0Error
except ImportError: # pragma: no cover
class Auth0Error(Exception):
def __init__(self, status_code, error_code, message):
self.status_code = status_code
self.error_code = error_code
self.message = message
def __str__(self):
return '%s: %s' % (self.status_code, self.message)
class MultipleObjectsReturned(Exception):
pass
class ObjectDoesNotExist(Exception):
pass
class DoesNotExist(ObjectDoesNotExist):
pass
class UnimplementedException(Exception):
pass
|
try:
from auth0.v2.exceptions import Auth0Error
except ImportError: # pragma: no cover
class Auth0Error(Exception):
def __init__(self, status_code, error_code, message):
self.status_code = status_code
self.error_code = error_code
self.message = message
def __str__(self):
return '%s: %s' % (self.status_code, self.message)
class MultipleObjectsReturned(Exception):
pass
class ObjectDoesNotExist(Exception):
pass
class DoesNotExist(ObjectDoesNotExist):
pass
class UnimplementedException(Exception):
pass
| isc | Python |
86258f714ecd3649873f744f60f2d796d304d4c1 | Fix indentation in `query_rfc_is_also()` | lafrenierejm/ietf-cli | ietf/utility/query_is_also.py | ietf/utility/query_is_also.py | from ietf.sql.bcp import Bcp
from ietf.sql.rfc import IsAlso, Rfc
from ietf.utility.query_doc import (query_bcp, query_fyi, query_rfc,
query_std,)
from ietf.xml.enum import DocumentType
def query_bcp_is_also(Session, number):
docs = [] # List of docs to return
rfcs = Session.query(Rfc).join(IsAlso).\
filter(IsAlso.doc_type == DocumentType.BCP).\
filter(IsAlso.doc_id == number).\
all() # Returns a list
docs.extend(rfcs) # Add rfcs to list
for rfc in rfcs:
rfc_aliases = query_rfc_is_also(Session, rfc.id)
for doc in rfc_aliases:
if (not isinstance(doc, Bcp)) or (doc.id != number):
docs.append(doc)
return docs
def query_rfc_is_also(Session, number):
"""Return aliases for RFC `number`."""
# Lookup RFC `number`
rfc = query_rfc(Session, number)
#
aliases = []
if rfc and rfc.is_also:
for alias in rfc.is_also:
alias_type = alias.doc_type
alias_id = alias.doc_id
if alias_type is DocumentType.RFC:
aliases.append(query_rfc(Session, alias_id))
elif alias_type is DocumentType.STD:
aliases.append(query_std(Session, alias_id))
elif alias_type is DocumentType.BCP:
aliases.append(query_bcp(Session, alias_id))
elif alias_type is DocumentType.FYI:
aliases.append(query_fyi(Session, alias_id))
else:
aliases.append(alias)
return aliases
| from ietf.sql.bcp import Bcp
from ietf.sql.rfc import IsAlso, Rfc
from ietf.utility.query_doc import (query_bcp, query_fyi, query_rfc,
query_std,)
from ietf.xml.enum import DocumentType
def query_bcp_is_also(Session, number):
docs = [] # List of docs to return
rfcs = Session.query(Rfc).join(IsAlso).\
filter(IsAlso.doc_type == DocumentType.BCP).\
filter(IsAlso.doc_id == number).\
all() # Returns a list
docs.extend(rfcs) # Add rfcs to list
for rfc in rfcs:
rfc_aliases = query_rfc_is_also(Session, rfc.id)
for doc in rfc_aliases:
if (not isinstance(doc, Bcp)) or (doc.id != number):
docs.append(doc)
return docs
def query_rfc_is_also(Session, number):
"""Return aliases for RFC `number`."""
# Lookup RFC `number`
rfc = query_rfc(Session, number)
#
aliases = []
if rfc and rfc.is_also:
for alias in rfc.is_also:
alias_type = alias.doc_type
alias_id = alias.doc_id
if alias_type is DocumentType.RFC:
aliases.append(query_rfc(Session, alias_id))
elif alias_type is DocumentType.STD:
aliases.append(query_std(Session, alias_id))
elif alias_type is DocumentType.BCP:
aliases.append(query_bcp(Session, alias_id))
elif alias_type is DocumentType.FYI:
aliases.append(query_fyi(Session, alias_id))
else:
aliases.append(alias)
return aliases
| isc | Python |
3a983c2a53dee0c56ba67b248bea2e1cd0cf1cd6 | Make strategy test proper URL (https, %s) | esarafianou/rupture,dimriou/rupture,dionyziz/rupture,dimriou/rupture,dimkarakostas/rupture,esarafianou/rupture,dionyziz/rupture,esarafianou/rupture,dimriou/rupture,dionyziz/rupture,dimkarakostas/rupture,dionyziz/rupture,dimkarakostas/rupture,dionyziz/rupture,esarafianou/rupture,dimriou/rupture,dimkarakostas/rupture,dimkarakostas/rupture,dimriou/rupture | backend/breach/tests.py | backend/breach/tests.py | from django.test import TestCase
from mock import patch
from breach.models import SampleSet, Victim, Target, Round
from breach.strategy import Strategy
from breach.analyzer import decide_next_world_state
@patch('sniffer.Sniffer')
class RuptureTestCase(TestCase):
def setUp(self):
target = Target.objects.create(
endpoint='https://di.uoa.gr/?breach=%s',
prefix='test',
alphabet='0123456789'
)
self.victim = Victim.objects.create(
target=target,
sourceip='192.168.10.140',
snifferendpoint='http://localhost/'
)
round = Round.objects.create(
victim=self.victim,
amount=1,
knownsecret='testsecret',
knownalphabet='01'
)
self.samplesets = [
SampleSet.objects.create(
round=round,
candidatealphabet='0',
data='bigbigbigbigbigbig'
),
SampleSet.objects.create(
round=round,
candidatealphabet='1',
data='small'
)
]
class StrategyTestCase(RuptureTestCase):
@patch('breach.strategy.Sniffer')
def test_first_round(self, Sniffer):
strategy = Strategy(self.victim)
strategy.get_work()
def test_same_round_same_batch(self):
pass
def test_same_round_different_batch(self):
pass
def test_advance_round(self):
pass
class AnalyzerTestCase(RuptureTestCase):
def test_decide(self):
decision = decide_next_world_state(self.samplesets)
state = decision['state']
confidence = decision['confidence']
self.assertEqual(state['knownsecret'], 'testsecret1')
self.assertEqual(state['knownalphabet'], '0123456789')
| from django.test import TestCase
from mock import patch
from breach.models import SampleSet, Victim, Target, Round
from breach.strategy import Strategy
from breach.analyzer import decide_next_world_state
@patch('sniffer.Sniffer')
class RuptureTestCase(TestCase):
def setUp(self):
target = Target.objects.create(
endpoint='http://di.uoa.gr/',
prefix='test',
alphabet='0123456789'
)
self.victim = Victim.objects.create(
target=target,
sourceip='192.168.10.140',
snifferendpoint='http://localhost/'
)
round = Round.objects.create(
victim=self.victim,
amount=1,
knownsecret='testsecret',
knownalphabet='01'
)
self.samplesets = [
SampleSet.objects.create(
round=round,
candidatealphabet='0',
data='bigbigbigbigbigbig'
),
SampleSet.objects.create(
round=round,
candidatealphabet='1',
data='small'
)
]
class StrategyTestCase(RuptureTestCase):
@patch('breach.strategy.Sniffer')
def test_first_round(self, Sniffer):
strategy = Strategy(self.victim)
strategy.get_work()
def test_same_round_same_batch(self):
pass
def test_same_round_different_batch(self):
pass
def test_advance_round(self):
pass
class AnalyzerTestCase(RuptureTestCase):
def test_decide(self):
decision = decide_next_world_state(self.samplesets)
state = decision['state']
confidence = decision['confidence']
self.assertEqual(state['knownsecret'], 'testsecret1')
self.assertEqual(state['knownalphabet'], '0123456789')
| mit | Python |
ef9216ba4650d4e04b8c92c7e3d7b49edd1fe92d | Remove deprecated comment in breach views | esarafianou/rupture,dimriou/rupture,dimriou/rupture,esarafianou/rupture,dionyziz/rupture,dionyziz/rupture,dimriou/rupture,dimkarakostas/rupture,dimkarakostas/rupture,dimriou/rupture,esarafianou/rupture,dionyziz/rupture,dimriou/rupture,dimkarakostas/rupture,dionyziz/rupture,dimkarakostas/rupture,dimkarakostas/rupture,dionyziz/rupture,esarafianou/rupture | backend/breach/views.py | backend/breach/views.py | from django.http import Http404, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from breach.strategy import Strategy
from breach.models import Victim
import json
def get_work(request, victim_id=0):
assert(victim_id)
try:
victim = Victim.objects.get(pk=victim_id)
except:
raise Http404('Victim not found')
strategy = Strategy(victim)
new_work = strategy.get_work()
return JsonResponse(new_work)
@csrf_exempt
def work_completed(request, victim_id=0):
assert(victim_id)
realtime_parameters = json.loads(request.body.decode('utf-8'))
assert('success' in realtime_parameters)
success = realtime_parameters['success']
try:
victim = Victim.objects.get(pk=victim_id)
except:
raise Http404('Victim not found')
strategy = Strategy(victim)
victory = strategy.work_completed(success)
return JsonResponse({
'victory': victory
})
| from django.http import Http404, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from breach.strategy import Strategy
from breach.models import Victim
import json
def get_work(request, victim_id=0):
assert(victim_id)
try:
victim = Victim.objects.get(pk=victim_id)
except:
raise Http404('Victim not found')
strategy = Strategy(victim)
# Example work structure:
# return {'url': 'https://www.dimkarakostas.com/?breach-test',
# 'amount': 10,
# 'timeout': 0}
new_work = strategy.get_work()
return JsonResponse(new_work)
@csrf_exempt
def work_completed(request, victim_id=0):
assert(victim_id)
realtime_parameters = json.loads(request.body.decode('utf-8'))
assert('success' in realtime_parameters)
success = realtime_parameters['success']
try:
victim = Victim.objects.get(pk=victim_id)
except:
raise Http404('Victim not found')
strategy = Strategy(victim)
victory = strategy.work_completed(success)
return JsonResponse({
'victory': victory
})
| mit | Python |
a4fdaefacbceccd63ab41a62ea53cfcbed352bac | Update version.py | CGATOxford/UMI-tools | umi_tools/version.py | umi_tools/version.py | __version__ = "0.5.4"
| __version__ = "0.5.3"
| mit | Python |
d2fbdf9ad14891c68f5590e43c54383658b5a122 | Fix indentation/line continuation problem introduced with style fix | craigbruce/awacs,cloudtools/awacs | tools/gen.py | tools/gen.py | #!/usr/bin/env python
#
# Generate Actions from AWS static configuration
#
import json
import os
import urllib2
from slimit.parser import Parser
from slimit.visitors import nodevisitor
from slimit.visitors.ecmavisitor import ECMAVisitor
from slimit import ast
aws_url = \
"https://awsiamconsole.s3.amazonaws.com/iam/assets/js/bundles/policies.js"
header = """\
# Copyright (c) 2012-2013, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from aws import Action
"""
basedir = 'generated'
response = urllib2.urlopen(aws_url)
config = response.read()
class JSONVisitor(ECMAVisitor):
def visit_Identifier(self, node):
return '"%s"' % node.value
def visit_Number(self, node):
return '"%s"' % node.value
def visit_UnaryOp(self, node):
s = self.visit(node.value)
if node.op == '!' and s == 0:
return '"true"'
else:
return s
visitor = JSONVisitor()
parser = Parser()
tree = parser.parse(config)
flag = False
policy_editor_config = ""
for node in nodevisitor.visit(tree):
if (isinstance(node, ast.Identifier)
and node.value == 'PolicyEditorConfig'):
flag = True
elif flag:
policy_editor_config = visitor.visit(node)
break
d = json.loads(policy_editor_config)
try:
os.mkdir(basedir)
except OSError:
pass
for serviceName, serviceValue in d['serviceMap'].items():
prefix = serviceValue['StringPrefix']
filename = prefix
# Handle prefix such as "directconnect:"
if prefix[-1] == ':':
filename = prefix[:-1]
filename = ''.join([basedir, "/", filename, ".py"])
with open(filename, "a") as fp:
fp.write(header)
fp.write("service_name = '%s'\n" % (serviceName,))
fp.write("prefix = '%s'\n" % (prefix,))
fp.write("\n")
for action in serviceValue['Actions']:
# Wrap lines for pep8
if len(action) > 25:
format = "%s = \\\n Action(prefix, '%s')\n"
else:
format = "%s = Action(prefix, '%s')\n"
fp.write(format % (action, action))
| #!/usr/bin/env python
#
# Generate Actions from AWS static configuration
#
import json
import os
import urllib2
from slimit.parser import Parser
from slimit.visitors import nodevisitor
from slimit.visitors.ecmavisitor import ECMAVisitor
from slimit import ast
aws_url =
"https://awsiamconsole.s3.amazonaws.com/iam/assets/js/bundles/policies.js"
header = """\
# Copyright (c) 2012-2013, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from aws import Action
"""
basedir = 'generated'
response = urllib2.urlopen(aws_url)
config = response.read()
class JSONVisitor(ECMAVisitor):
def visit_Identifier(self, node):
return '"%s"' % node.value
def visit_Number(self, node):
return '"%s"' % node.value
def visit_UnaryOp(self, node):
s = self.visit(node.value)
if node.op == '!' and s == 0:
return '"true"'
else:
return s
visitor = JSONVisitor()
parser = Parser()
tree = parser.parse(config)
flag = False
policy_editor_config = ""
for node in nodevisitor.visit(tree):
if (isinstance(node, ast.Identifier)
and node.value == 'PolicyEditorConfig'):
flag = True
elif flag:
policy_editor_config = visitor.visit(node)
break
d = json.loads(policy_editor_config)
try:
os.mkdir(basedir)
except OSError:
pass
for serviceName, serviceValue in d['serviceMap'].items():
prefix = serviceValue['StringPrefix']
filename = prefix
# Handle prefix such as "directconnect:"
if prefix[-1] == ':':
filename = prefix[:-1]
filename = ''.join([basedir, "/", filename, ".py"])
with open(filename, "a") as fp:
fp.write(header)
fp.write("service_name = '%s'\n" % (serviceName,))
fp.write("prefix = '%s'\n" % (prefix,))
fp.write("\n")
for action in serviceValue['Actions']:
# Wrap lines for pep8
if len(action) > 25:
format = "%s = \\\n Action(prefix, '%s')\n"
else:
format = "%s = Action(prefix, '%s')\n"
fp.write(format % (action, action))
| bsd-2-clause | Python |
e1c657ab19990a034eca381c67cf74f4ffe05425 | add newline after transformation | KingPixil/ice,KingPixil/ice | transform.py | transform.py | import png
import src.loader
import src.seed
import src.pixel
# Intro
print("\x1b[36mIce\x1b[0m Transforming Image 💡")
# Seed
seedText = src.seed.generateSeed()
# Read
f = open("somecoolwaterfalls.png", "rb")
r = png.Reader(f)
info = r.read()
width = info[0]
height = info[1]
data = list(info[2])
alpha = info[3]["alpha"]
skip = 4 if alpha else 3
f.close()
# Transform
transformSlope = 2.0 / 255.0
transformYInt = -1.0
secondSlope = 2.0 / float(width + height - 2)
secondYInt = -1.0
pixel = src.pixel.generatePixel()
pixelOperationC1 = pixel[0]
pixelOperationC2 = pixel[1]
pixelOperationC3 = pixel[2]
pixelColor = pixel[3]
for rowIndex, row in enumerate(data):
row = list(map(float, row))
for colIndex in range(0, len(row), skip):
second = ((rowIndex + (colIndex / skip)) * secondSlope) + secondYInt
r = (row[colIndex] * transformSlope) + transformYInt
r = pixelOperationC1.compute(r, second)
g = (row[colIndex + 1] * transformSlope) + transformYInt
g = pixelOperationC2.compute(g, second)
b = (row[colIndex + 2] * transformSlope) + transformYInt
b = pixelOperationC3.compute(b, second)
r, g, b = pixelColor(r, g, b)
row[colIndex] = r
row[colIndex + 1] = g
row[colIndex + 2] = b
data[rowIndex] = row
src.loader.load(rowIndex / (height - 1))
# Write
f = open("transform.png", "wb")
w = png.Writer(width, height, alpha=alpha)
w.write(f, data)
f.close()
# Success
print("\x1b[36mIce\x1b[0m Success \"" + seedText + "\" ✨\n")
| import png
import src.loader
import src.seed
import src.pixel
# Intro
print("\x1b[36mIce\x1b[0m Transforming Image 💡")
# Seed
seedText = src.seed.generateSeed()
# Read
f = open("art.png", "rb")
r = png.Reader(f)
info = r.read()
width = info[0]
height = info[1]
data = list(info[2])
alpha = info[3]["alpha"]
skip = 4 if alpha else 3
f.close()
# Transform
transformSlope = 2.0 / 255.0
transformYInt = -1.0
secondSlope = 2.0 / float(width + height - 2)
secondYInt = -1.0
pixel = src.pixel.generatePixel()
pixelOperationC1 = pixel[0]
pixelOperationC2 = pixel[1]
pixelOperationC3 = pixel[2]
pixelColor = pixel[3]
for rowIndex, row in enumerate(data):
row = list(map(float, row))
for colIndex in range(0, len(row), skip):
second = ((rowIndex + (colIndex / skip)) * secondSlope) + secondYInt
r = (row[colIndex] * transformSlope) + transformYInt
r = pixelOperationC1.compute(r, second)
g = (row[colIndex + 1] * transformSlope) + transformYInt
g = pixelOperationC2.compute(g, second)
b = (row[colIndex + 2] * transformSlope) + transformYInt
b = pixelOperationC3.compute(b, second)
r, g, b = pixelColor(r, g, b)
row[colIndex] = r
row[colIndex + 1] = g
row[colIndex + 2] = b
data[rowIndex] = row
src.loader.load(rowIndex / (height - 1))
# Write
f = open("transform.png", "wb")
w = png.Writer(width, height, alpha=alpha)
w.write(f, data)
f.close()
# Success
print("\x1b[36mIce\x1b[0m Success \"" + seedText + "\" ✨")
| mit | Python |
a69c482fded8bf84c0228b904cc9bd667d63f909 | update docstrings | peastman/deepchem,deepchem/deepchem,deepchem/deepchem,peastman/deepchem | deepchem/feat/molecule_featurizers/roberta_tokenizer.py | deepchem/feat/molecule_featurizers/roberta_tokenizer.py | from deepchem.feat import MolecularFeaturizer
from transformers import RobertaTokenizerFast
from deepchem.utils.typing import RDKitMol
from typing import Dict, List
class RobertaFeaturizer(RobertaTokenizerFast, MolecularFeaturizer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
return
def _featurize(self, mol: RDKitMol, **kwargs) -> List[List[int]]:
"""Calculate encoding using HuggingFace's RobertaTokenizerFast
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
encoding: List
List containing two lists; the `input_ids` and the `attention_mask`
"""
try:
from rdkit import Chem
except ModuleNotFoundError:
raise ImportError("This class requires RDKit to be installed.")
smiles_string = Chem.MolToSmiles(mol)
# the encoding is natively a dictionary with keys 'input_ids' and 'attention_mask'
# -> make this a list of two arrays to allow np to handle it
encoding = list(self(smiles_string, **kwargs).values())
return encoding
def __call__(self, *args, **kwargs) -> Dict[str, List[int]]:
return super().__call__(*args, **kwargs)
| from deepchem.feat import MolecularFeaturizer
from transformers import RobertaTokenizerFast
from deepchem.utils.typing import RDKitMol
class RobertaFeaturizer(RobertaTokenizerFast, MolecularFeaturizer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
return
def _featurize(self, mol: RDKitMol, **kwargs):
"""Calculate encoding using HuggingFace's RobertaTokenizerFast
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
1D array of RDKit descriptors for `mol`. The length is 881.
"""
try:
from rdkit import Chem
except ModuleNotFoundError:
raise ImportError("This class requires RDKit to be installed.")
smiles_string = Chem.MolToSmiles(mol)
# the encoding is natively a dictionary with keys 'input_ids' and 'attention_mask'
# -> make this a list of two arrays to allow np to handle it
encoding = list(self(smiles_string, **kwargs).values())
return encoding
def __call__(self, *args, **kwargs):
return super().__call__(*args, **kwargs)
| mit | Python |
f6b3950a956116570a4bc2d9310555805a8b29a2 | Enable result backend | UPOLSearch/UPOL-Search-Engine,UPOLSearch/UPOL-Search-Engine,UPOLSearch/UPOL-Search-Engine,UPOLSearch/UPOL-Search-Engine | upol_search_engine/celery_app.py | upol_search_engine/celery_app.py | # from __future__ import absolute_import
from celery import Celery
from kombu import Exchange, Queue
from upol_search_engine import settings
class Config(object):
broker_url = 'amqp://guest:guest@localhost:5672//'
result_backend = 'amqp://guest:guest@localhost:5672//'
task_queues = (
Queue(
'crawler',
exchange=Exchange('crawler'),
routing_key='crawler'
),
Queue(
'feeder',
exchange=Exchange('feeder'),
routing_key='feeder'
),
)
enable_utc = False
timezone = 'Europe/Prague'
include = ['upol_search_engine.upol_crawler.tasks']
# worker_hijack_root_logger = False
log_file = settings.CONFIG.get('Settings', 'log_dir')
task_acks_late = True
app = Celery('celery_app')
app.config_from_object(Config)
if __name__ == '__main__':
app.start()
| # from __future__ import absolute_import
from celery import Celery
from kombu import Exchange, Queue
from upol_search_engine import settings
class Config(object):
broker_url = 'amqp://guest:guest@localhost:5672//'
task_queues = (
Queue(
'crawler',
exchange=Exchange('crawler'),
routing_key='crawler'
),
Queue(
'feeder',
exchange=Exchange('feeder'),
routing_key='feeder'
),
)
enable_utc = False
timezone = 'Europe/Prague'
include = ['upol_search_engine.upol_crawler.tasks']
# worker_hijack_root_logger = False
log_file = settings.CONFIG.get('Settings', 'log_dir')
task_acks_late = True
app = Celery('celery_app')
app.config_from_object(Config)
if __name__ == '__main__':
app.start()
| mit | Python |
64f04ee05de166d0c9197bd17eb661f76862804b | Fix version character replacement. | Emberwalker/LibFTB | libftb/ftb.py | libftb/ftb.py | __author__ = 'Arkan'
import urllib.request
import os.path
import time
import xml.etree.ElementTree as ET
import libftb.internal.parser as parser
CDN_ROOT = "http://ftb.cursecdn.com/FTB2"
def get_packs():
root = __get_or_create_cache()
return parser.packs_xml_to_dict(root)
def get_pack_url(pack_dict, version=None, server=False):
"""
:type version str
:type server bool
"""
pack_id = pack_dict['id']
if not version:
version = pack_dict['version']
version = version.replace('.', '_')
print("Getting URL for ID {} ({}) (server: {})".format(pack_id, version, server))
if server:
file = pack_dict['server_url']
else:
file = pack_dict['url']
return __get_pack_url(pack_id, version, file)
def __get_or_create_cache():
if not (os.path.isfile('modpacks.xml') and (time.time() - os.path.getmtime('modpacks.xml')) < 3600):
print("Updating local copy of modpacks.xml...")
urllib.request.urlretrieve(__get_static_url('modpacks.xml'), 'modpacks.xml')
return ET.parse('modpacks.xml').getroot()
def __get_static_url(file):
return "{}/static/{}".format(CDN_ROOT, file)
def __get_pack_url(pack, version, file):
return "{}/modpacks/{}/{}/{}".format(CDN_ROOT, pack, version, file) | __author__ = 'Arkan'
import urllib.request
import os.path
import time
import xml.etree.ElementTree as ET
import libftb.internal.parser as parser
CDN_ROOT = "http://ftb.cursecdn.com/FTB2"
def get_packs():
root = __get_or_create_cache()
return parser.packs_xml_to_dict(root)
def get_pack_url(pack_dict, version=None, server=False):
"""
:type version str
:type server bool
"""
pack_id = pack_dict['id']
if not version:
version = pack_dict['version']
version.replace('.', '_')
print("Getting URL for ID {} ({}) (server: {})".format(pack_id, version, server))
if server:
file = pack_dict['server_url']
else:
file = pack_dict['url']
return __get_pack_url(pack_id, version, file)
def __get_or_create_cache():
if not (os.path.isfile('modpacks.xml') and (time.time() - os.path.getmtime('modpacks.xml')) < 3600):
print("Updating local copy of modpacks.xml...")
urllib.request.urlretrieve(__get_static_url('modpacks.xml'), 'modpacks.xml')
return ET.parse('modpacks.xml').getroot()
def __get_static_url(file):
return "{}/static/{}".format(CDN_ROOT, file)
def __get_pack_url(pack, version, file):
return "{}/modpacks/{}/{}/{}".format(CDN_ROOT, pack, version, file) | mit | Python |
9e8a7538e7242167572f888d99d2903464fc5f6e | Make variables class members | handrake/brainfuck | brainfuck.py | brainfuck.py | import sys
from getch import getch
commands = '><+-.,[]'
class BrainfuckInterpreter:
def __init__(self):
self.i = 0
self.p = 0
self.cells = [0]
@staticmethod
def find_matching_paren(source, c):
paren = 0
d = {'[':']', ']':'['}
for k in range(len(source)):
if source[k]==c:
paren += 1
elif source[k]==d[c]:
if paren == 0:
return k
paren -= 1
return -1
def eval(self, source):
while self.i != len(source):
c = source[self.i]
if c == '>':
if self.p == len(self.cells)-1:
self.cells.append(0)
self.p += 1
elif c == '<':
if self.p != 0:
self.p -= 1
elif c == '+':
self.cells[self.p] += 1
elif c == '-':
self.cells[self.p] -= 1
elif c == '.':
sys.stdout.write(chr(self.cells[self.p]))
sys.stdout.flush()
elif c == ',':
self.cells[self.p] = ord(getch())
elif c == '[' and self.cells[self.p] == 0:
self.i += self.find_matching_paren(source[self.i+1:], c)
elif c == ']' and self.cells[self.p] != 0:
self.i -= self.find_matching_paren(source[self.i-1::-1], c) + 1
self.i += 1
def main():
source = ''
while 1:
line = input("brainfuck>> ")
if line == '':break
source += line
source = ''.join([c for c in source if c in commands])
interpreter = BrainfuckInterpreter()
interpreter.eval(source)
if __name__ == "__main__":
main()
| import sys
from getch import getch
commands = '><+-.,[]'
class BrainfuckInterpreter:
@staticmethod
def find_matching_paren(source, c):
paren = 0
d = {'[':']', ']':'['}
for k in range(len(source)):
if source[k]==c:
paren += 1
elif source[k]==d[c]:
if paren == 0:
return k
paren -= 1
return -1
def eval(self, source):
i = 0
p = 0
cells = [0]
while i != len(source):
c = source[i]
if c == '>':
if p == len(cells)-1:
cells.append(0)
p += 1
elif c == '<':
if p != 0:
p -= 1
elif c == '+':
cells[p] += 1
elif c == '-':
cells[p] -= 1
elif c == '.':
sys.stdout.write(chr(cells[p]))
sys.stdout.flush()
elif c == ',':
cells[p] = ord(getch())
elif c == '[' and cells[p] == 0:
i += self.find_matching_paren(source[i+1:], c)
elif c == ']' and cells[p] != 0:
i -= self.find_matching_paren(source[i-1::-1], c) + 1
i += 1
def main():
source = ''
while 1:
line = input("brainfuck>> ")
if line == '':break
source += line
source = ''.join([c for c in source if c in commands])
interpreter = BrainfuckInterpreter()
interpreter.eval(source)
if __name__ == "__main__":
main()
| bsd-3-clause | Python |
59f29688d45916be81206201dd314ce140b8a154 | Improve handling of non-ASCII chars (fixes #3). | andialbrecht/runsqlrun | rsr/cmd.py | rsr/cmd.py | import os
import signal
import sys
from argparse import ArgumentParser
from gi.repository import Gio, GLib
from rsr import __version__
from rsr.app import Application
parser = ArgumentParser(prog='runsqlrun', description='Run SQL statements')
parser.add_argument(
'--version', action='version', version='%(prog)s ' + __version__)
# See issue3. Unfortunately this needs to be done before opening
# any Oracle connection.
os.environ.setdefault('NLS_LANG', '.AL32UTF8')
def main():
parser.parse_args()
signal.signal(signal.SIGINT, signal.SIG_DFL)
GLib.set_application_name('RunSQLRun')
GLib.set_prgname('runsqlrun')
resource = Gio.resource_load('data/runsqlrun.gresource')
Gio.Resource._register(resource)
app = Application()
sys.exit(app.run(sys.argv))
| import signal
import sys
from argparse import ArgumentParser
from gi.repository import Gio, GLib
from rsr import __version__
from rsr.app import Application
parser = ArgumentParser(prog='runsqlrun', description='Run SQL statements')
parser.add_argument(
'--version', action='version', version='%(prog)s ' + __version__)
def main():
parser.parse_args()
signal.signal(signal.SIGINT, signal.SIG_DFL)
GLib.set_application_name('RunSQLRun')
GLib.set_prgname('runsqlrun')
resource = Gio.resource_load('data/runsqlrun.gresource')
Gio.Resource._register(resource)
app = Application()
sys.exit(app.run(sys.argv))
| mit | Python |
c8085fda4afba86ca102c29019302e3dd778031b | Return creds in HTML | everett-toews/rackspace-user-management | rum/rum.py | rum/rum.py | import threading
from flask import Flask
from users import users
api = Flask(__name__)
lock = threading.Lock()
user_num = 0
@api.route('/')
def index():
return 'Rackspace User Management'
@api.route('/user')
def get_user():
global user_num
with lock:
if user_num < len(users):
html = "<pre>\n"
html += "export OS_AUTH_URL=https://identity.api.rackspacecloud.com/v2.0/\n"
html += "export OS_REGION_NAME=IAD\n"
html += "export OS_USERNAME={}\n".format(users[user_num].username)
html += "export OS_PROJECT_NAME={}\n".format(users[user_num].account_num)
html += "export OS_PASSWORD={}\n".format(users[user_num].password)
html += "export OS_API_KEY={}\n".format(users[user_num].api_key)
html += "</pre>\n"
user_num += 1
else:
html = "No More Creds\n"
return html
@api.route('/reset')
def reset_users():
global user_num
with lock:
user_num = 0
return "More Creds\n"
if __name__ == '__main__':
api.run(debug=True)
| import threading
from flask import Flask
from users import users
api = Flask(__name__)
lock = threading.Lock()
user_num = 0
@api.route('/')
def index():
return 'Rackspace User Management'
@api.route('/user')
def get_user():
global user_num
with lock:
if user_num < len(users):
bash = "export OS_REGION_NAME=ORD\n"
bash += "export OS_USERNAME={}\n".format(users[user_num].username)
bash += "export OS_API_KEY={}\n".format(users[user_num].api_key)
bash += "export NAME=machine{0:02d}\n".format(user_num)
bash += "export PATH=$PATH:.\n"
user_num += 1
else:
bash = "No More Creds\n"
return bash
@api.route('/reset')
def reset_users():
global user_num
with lock:
user_num = 0
return "More Creds\n"
if __name__ == '__main__':
api.run(debug=True)
| mit | Python |
07f1a36b30c35dee40a82ae1a6b6ab4635f7cf39 | Move constant | kbd/setup,kbd/setup,kbd/setup,kbd/setup,kbd/setup | bootstrap.py | bootstrap.py | #!/usr/bin/env python
"""Bootstrap the setup tool.
This assumes Python is installed on the target os, but not specifically Python3.
What this does (only intended for Mac atm):
* installs Homebrew
* Homebrew installs a core set of packages (git and python3)
* git check out the project into ~/setup
* run
- setup (will restart os functions to reflect new settings)
- setup brew
- setup packages
* tell the user to restart terminal to get new everything
You should be able to run this with curl | python shenanigans.
"""
import os
import subprocess
REPO_URL = 'https://github.com/kbd/setup.git'
HOMEBREW_INSTALL_CMD = '/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"'
SETUP_PATH = os.path.expanduser('~/setup')
SETUP_EXE = os.path.join(SETUP_PATH, 'HOME/bin/setup')
def main():
print("Installing Homebrew")
if not subprocess.call(['which', 'brew']):
print("Homebrew is installed")
else:
subprocess.check_call(HOMEBREW_INSTALL_CMD, shell=True, executable='/bin/bash')
print("Installing dependencies")
for cmd in 'git', 'python':
subprocess.check_call("brew install {0} || brew upgrade {0}".format(cmd), shell=True)
subprocess.check_call(['pip3', 'install', '--upgrade', 'click']) # required for 'setup'
if os.path.exists(SETUP_PATH):
print("Setup location already exists, updating")
subprocess.check_call(['git', 'pull'], cwd=SETUP_PATH)
else:
print("Checking out setup repo")
subprocess.check_call(['git', 'clone', REPO_URL], cwd=os.path.dirname(SETUP_PATH))
print("Installing all the things")
# add to path because bootstrapping
os.environ['PATH'] = ':'.join([
os.path.dirname(SETUP_EXE), # add repo bin dir to path, symlinks not yet run
os.path.expanduser('~/bin'),
os.environ['PATH']
])
subprocess.check_call([SETUP_EXE, 'init'])
print("Done installing all the things. Restart your terminal.")
if __name__ == '__main__':
main()
| #!/usr/bin/env python
"""Bootstrap the setup tool.
This assumes Python is installed on the target os, but not specifically Python3.
What this does (only intended for Mac atm):
* installs Homebrew
* Homebrew installs a core set of packages (git and python3)
* git check out the project into ~/setup
* run
- setup (will restart os functions to reflect new settings)
- setup brew
- setup packages
* tell the user to restart terminal to get new everything
You should be able to run this with curl | python shenanigans.
"""
import os
import subprocess
REPO_URL = 'https://github.com/kbd/setup.git'
HOMEBREW_INSTALL_CMD = '/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"'
SETUP_PATH = os.path.expanduser('~/setup')
def main():
print("Installing Homebrew")
if not subprocess.call(['which', 'brew']):
print("Homebrew is installed")
else:
subprocess.check_call(HOMEBREW_INSTALL_CMD, shell=True, executable='/bin/bash')
print("Installing dependencies")
for cmd in 'git', 'python':
subprocess.check_call("brew install {0} || brew upgrade {0}".format(cmd), shell=True)
subprocess.check_call(['pip3', 'install', '--upgrade', 'click']) # required for 'setup'
if os.path.exists(SETUP_PATH):
print("Setup location already exists, updating")
subprocess.check_call(['git', 'pull'], cwd=SETUP_PATH)
else:
print("Checking out setup repo")
subprocess.check_call(['git', 'clone', REPO_URL], cwd=os.path.dirname(SETUP_PATH))
print("Installing all the things")
setup_exe = os.path.join(SETUP_PATH, 'HOME/bin/setup')
# add to path because bootstrapping
os.environ['PATH'] = ':'.join([
os.path.dirname(setup_exe), # add repo bin dir to path, symlinks not yet run
os.path.expanduser('~/bin'),
os.environ['PATH']
])
subprocess.check_call([setup_exe, 'init'])
print("Done installing all the things. Restart your terminal.")
if __name__ == '__main__':
main()
| mit | Python |
9336c9383eb3cd0c9b92e6524cccb2b7aa4a3ea6 | fix model inheritence | amboycharlie/Child-Friendly-LCMS,django-leonardo/django-leonardo,amboycharlie/Child-Friendly-LCMS,amboycharlie/Child-Friendly-LCMS,django-leonardo/django-leonardo,django-leonardo/django-leonardo,amboycharlie/Child-Friendly-LCMS,django-leonardo/django-leonardo | leonardo/module/nav/mixins.py | leonardo/module/nav/mixins.py |
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
ORIENTATION_CHOICES = (
('vertical', 'Vertical'),
('horizon', 'Horizon'),
)
class NavigationWidgetMixin(models.Model):
display_active = models.NullBooleanField(
verbose_name=_('Display active'), default=True)
display_in_nav = models.NullBooleanField(
verbose_name=_('Display in navigation'), default=True)
orientation = models.CharField(
verbose_name=_("Orientation"), max_length=20,
choices=ORIENTATION_CHOICES, default='horizontal')
class Meta:
abstract = True
|
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
ORIENTATION_CHOICES = (
('vertical', 'Vertical'),
('horizon', 'Horizon'),
)
class NavigationWidgetMixin(object):
display_active = models.NullBooleanField(
verbose_name=_('Display active'), default=True)
display_in_nav = models.NullBooleanField(
verbose_name=_('Display in navigation'), default=True)
orientation = models.CharField(
verbose_name=_("Orientation"), max_length=20,
choices=ORIENTATION_CHOICES, default='horizontal')
| apache-2.0 | Python |
f752df6eece63a9ab6b806c0f24779f88e19ccd9 | Use test site in item tests | veeti/vasara | vasara/tests/test_item.py | vasara/tests/test_item.py | from unittest import TestCase
from vasara.item import Item
from vasara.tests.common import build_test_site
TEST_ITEM = """---
{
"name": "Test",
"list": [1, 2, 3]
}
---
Hello, world! This is the actual content."""
class TestItem(TestCase):
def setUp(self):
self.site = build_test_site()
self.item = Item(key="test", site=self.site, raw=TEST_ITEM)
def test_content_matcher(self):
"""Tests that all metadata and actual content is properly parsed from input."""
self.assertEqual(self.item.metadata["name"], "Test")
self.assertEqual(self.item.metadata["list"], [1, 2, 3])
self.assertEqual(self.item.raw_content, "Hello, world! This is the actual content.")
def test_filter_not_twice(self):
"""Ensures that items don't filter themselves twice."""
def increment_filter(item):
if "counter" in item.metadata:
item.metadata["counter"] += 1
else:
item.metadata["counter"] = 1
self.item.filters.append(increment_filter)
self.item.filter()
self.item.filter() # Shouldn't do anything!
self.assertEqual(1, self.item.metadata["counter"])
def test_content_property(self):
"""Ensures that the item is filtered if the content property is retrieved."""
def replacer_filter(item):
item.filtered_content = "Unit Testing!"
self.item.filters.append(replacer_filter)
self.assertEqual("Unit Testing!", self.item.content)
def test_templater_property(self):
"""Ensure that the item is templated if the templated property is retrieved."""
def templater(item):
return "Hello, test_templater_property!"
self.item.templater = templater
self.assertEqual("Hello, test_templater_property!", self.item.templated)
def test_pretty_route(self):
"""Ensures that the pretty_route property displays a "prettified" route."""
self.item.route = "test/index.html"
self.assertEqual("test/", self.item.pretty_route) | from unittest import TestCase
from vasara.item import Item
TEST_ITEM = """---
{
"name": "Test",
"list": [1, 2, 3]
}
---
Hello, world! This is the actual content."""
class TestItem(TestCase):
def setUp(self):
self.site = None # TODO
self.item = Item(key="test", site=self.site, raw=TEST_ITEM)
def test_content_matcher(self):
"""Tests that all metadata and actual content is properly parsed from input."""
self.assertEqual(self.item.metadata["name"], "Test")
self.assertEqual(self.item.metadata["list"], [1, 2, 3])
self.assertEqual(self.item.raw_content, "Hello, world! This is the actual content.")
def test_filter_not_twice(self):
"""Ensures that items don't filter themselves twice."""
def increment_filter(item):
if "counter" in item.metadata:
item.metadata["counter"] += 1
else:
item.metadata["counter"] = 1
self.item.filters.append(increment_filter)
self.item.filter()
self.item.filter() # Shouldn't do anything!
self.assertEqual(1, self.item.metadata["counter"])
def test_content_property(self):
"""Ensures that the item is filtered if the content property is retrieved."""
def replacer_filter(item):
item.filtered_content = "Unit Testing!"
self.item.filters.append(replacer_filter)
self.assertEqual("Unit Testing!", self.item.content)
def test_templater_property(self):
"""Ensure that the item is templated if the templated property is retrieved."""
def templater(item):
return "Hello, test_templater_property!"
self.item.templater = templater
self.assertEqual("Hello, test_templater_property!", self.item.templated)
def test_pretty_route(self):
"""Ensures that the pretty_route property displays a "prettified" route."""
self.item.route = "test/index.html"
self.assertEqual("test/", self.item.pretty_route) | mit | Python |
f916fe1715ab891ded093c9adb9a6d0a4d2b30eb | add more complex example | mrocklin/unification | unification/tests/test_match.py | unification/tests/test_match.py | from unification.match import *
from unification.utils import raises
from unification.core import var
def inc(x):
return x + 1
def dec(x):
return x - 1
def add(x, y):
return x + y
def mul(x, y):
return x * y
def foo(*args):
return args
def test_simple():
d = Dispatcher('d')
d.add((1,), inc)
d.add((10,), dec)
assert d(1) == 2
assert d(10) == 9
def test_complex():
d = Dispatcher('d')
x = var('x')
y = var('y')
d.add((x,), inc)
d.add((x, 1), add)
d.add((x, x), mul)
d.add((x, (x, x)), foo)
assert d(1) == 2
assert d(2) == 3
assert d(2, 1) == 3
assert d(10, 10) == 100
assert d(10, (10, 10)) == (10, (10, 10))
assert raises(NotImplementedError, lambda : d(1, 2))
def test_raises_error():
d = Dispatcher('d')
assert raises(NotImplementedError, lambda : d(1, 2, 3))
def test_register():
d = Dispatcher('d')
@d.register(1)
def f(x):
return 10
@d.register(2)
def f(x):
return 20
assert d(1) == 10
assert d(2) == 20
| from unification.match import *
from unification.utils import raises
def inc(x):
return x + 1
def dec(x):
return x - 1
def test_simple():
d = Dispatcher('d')
d.add((1,), inc)
d.add((10,), dec)
assert d(1) == 2
assert d(10) == 9
def test_raises_error():
d = Dispatcher('d')
assert raises(NotImplementedError, lambda : d(1, 2, 3))
def test_register():
d = Dispatcher('d')
@d.register(1)
def f(x):
return 10
@d.register(2)
def f(x):
return 20
assert d(1) == 10
assert d(2) == 20
| bsd-3-clause | Python |
73e7b6b4c9393c60a4d30ec9def7513e4ac0e2f2 | add --skip | sspickle/sci-comp-notebooks | buildPDFs.py | buildPDFs.py | """
Build pdfs from student notebooks. You need a 'report_rubric.pdf' in the same directory as this file.
"""
import sys
import os
import glob
if len(sys.argv)>1:
paths=sys.argv[1:]
else:
paths=[os.curdir]
files = []
skiprubric = False
for path in paths:
if path.startswith('--skip'):
skiprubric = True
continue
if os.path.isdir(path):
files += glob.glob(os.path.join(path,'*.ipynb'))
elif os.path.isfile(path):
files += [path]
rubricPath = os.path.join(os.path.dirname(sys.argv[0]),'report_rubric.pdf')
print("Files:",files)
for fname in files:
fpath, fsrc = os.path.split(fname)
fRoot = os.path.splitext(fsrc)[0]
fPDF = os.path.join(fpath, fRoot + '.pdf')
fDest = '.'.join([fRoot,'out','pdf'])
if not os.path.exists(fDest):
cmd = "jupyter nbconvert --to PDF %s" % fname
print("executing:", cmd)
result = os.system(cmd)
if not result:
if skiprubric:
cmd = 'mv "%s" "%s"' % (fPDF, fDest)
else:
cmd = 'gs -dBATCH -dNOPAUSE -q -sDEVICE=pdfwrite -sOutputFile="%s" "%s" "%s"' % (fDest, rubricPath, fPDF)
print("executing:", cmd)
result = os.system(cmd)
if not result:
print("Complete!", fDest)
if not skiprubric:
os.unlink(fPDF)
else:
print("Ack")
else:
print("Ack Ack!")
else:
print("%s already exists" % fDest)
| """
Build pdfs from student notebooks. You need a 'report_rubric.pdf' in the same directory as this file.
"""
import sys
import os
import glob
if len(sys.argv)>1:
paths=sys.argv[1:]
else:
paths=[os.curdir]
files = []
for path in paths:
if os.path.isdir(path):
files += glob.glob(os.path.join(path,'*.ipynb'))
elif os.path.isfile(path):
files += [path]
rubricPath = os.path.join(os.path.dirname(sys.argv[0]),'report_rubric.pdf')
for fname in files:
fRoot = os.path.splitext(os.path.split(fname)[1])[0]
fPDF = fRoot + '.pdf'
fDest = '.'.join([fRoot,'out','pdf'])
if not os.path.exists(fDest):
cmd = "jupyter nbconvert --to PDF %s" % fname
print("executing:", cmd)
result = os.system(cmd)
if not result:
cmd = 'gs -dBATCH -dNOPAUSE -q -sDEVICE=pdfwrite -sOutputFile=%s %s %s' % (fDest, rubricPath, fPDF)
print("executing:", cmd)
result = os.system(cmd)
if not result:
print("Complete!", fDest)
os.unlink(fPDF)
else:
print("Ack")
else:
print("Ack Ack!")
else:
print("%s already exists" % fDest)
| mit | Python |
ffa8b40530c1be118a230312141cfb8ee4165bae | Add utils for shortcutting error responses | igboyes/virtool,virtool/virtool,virtool/virtool,igboyes/virtool | virtool/handlers/utils.py | virtool/handlers/utils.py | from aiohttp import web
async def unpack_json_request(req):
return req.app["db"], await req.json()
def not_found():
return web.json_response({"message": "Not found"}, status=404)
def requires_login():
return web.json_response({"message": "Requires login"}, status=400)
def invalid_input(errors):
return web.json_response({"message": "Invalid input", "errors": errors}, status=422)
| async def unpack_json_request(req):
return req.app["db"], await req.json()
| mit | Python |
6cdd22b746f901c1d8de5356f78452e491411829 | use 6 inputs | yasokada/python-160423_footInput | footInput.py | footInput.py | #!/usr/bin/env python
'''
v0.5 2016 Apr 23
- use 6 inputs
v0.4 2016 Apr 23
- add UDP_procCommand()
v0.3 2016 Apr 23
- add UDP_recvData()
- add UDP_setup()
- add GPIO_setup()
v0.2 2016 Apr 23
- define main()
- change interval to 10 msec base for UDP comm
v0.1 2016 Apr 23
- can check 5 GPIO input
'''
import RPi.GPIO as GPIO
import socket
import time
import os
ins = [40, 38, 36, 32, 26, 24]
vals = range(6)
def GPIO_setup():
GPIO.setmode(GPIO.BOARD)
for idx in range(6):
GPIO.setup(ins[idx], GPIO.IN, pull_up_down=GPIO.PUD_UP)
def UDP_setup():
# incoming data string port
datip="" # INADDR_ANY
datport = 7002
datsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
datsock.bind((datip, datport))
datsock.setblocking(0)
return datsock
def UDP_recvData(datsock, rcvdat):
address = ""
try:
data,address = datsock.recvfrom(100)
except socket.error:
pass
else:
rcvdat = rcvdat + data
return rcvdat, True, address
return rcvdat, False, address
def UDP_procCommand(rcvdat, datsock, rcvadr):
if "foot" not in rcvdat:
return
ret = "foot"
for idx in range(6):
if vals[idx]==GPIO.HIGH:
ret = ret + ",1"
else:
ret = ret + ",0"
ret = ret + "\n"
datsock.sendto(ret, rcvadr)
def main():
GPIO_setup()
datsock = UDP_setup()
cnt=0
rcvdat = ""
while True:
cnt=cnt+1
rcvdat,rcvd,rcvadr = UDP_recvData(datsock, rcvdat)
time.sleep(0.01)
if rcvd == True and "\n" in rcvdat:
UDP_procCommand(rcvdat, datsock, rcvadr)
rcvdat = ""
if cnt < 30: # 300msec
continue
cnt=0
for idx in range(6):
vals[idx]=GPIO.input(ins[idx])
print vals[idx],
print
if __name__ == '__main__':
main()
| #!/usr/bin/env python
'''
v0.4 2016 Apr 23
- add UDP_procCommand()
v0.3 2016 Apr 23
- add UDP_recvData()
- add UDP_setup()
- add GPIO_setup()
v0.2 2016 Apr 23
- define main()
- change interval to 10 msec base for UDP comm
v0.1 2016 Apr 23
- can check 5 GPIO input
'''
import RPi.GPIO as GPIO
import socket
import time
import os
ins = [40, 38, 36, 32, 26]
vals = range(5)
def GPIO_setup():
GPIO.setmode(GPIO.BOARD)
for idx in range(5):
GPIO.setup(ins[idx], GPIO.IN, pull_up_down=GPIO.PUD_UP)
def UDP_setup():
# incoming data string port
datip="" # INADDR_ANY
datport = 7002
datsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
datsock.bind((datip, datport))
datsock.setblocking(0)
return datsock
def UDP_recvData(datsock, rcvdat):
address = ""
try:
data,address = datsock.recvfrom(100)
except socket.error:
pass
else:
rcvdat = rcvdat + data
return rcvdat, True, address
return rcvdat, False, address
def UDP_procCommand(rcvdat, datsock, rcvadr):
if "foot" not in rcvdat:
return
ret = "foot"
for idx in range(5):
if vals[idx]==GPIO.HIGH:
ret = ret + ",1"
else:
ret = ret + ",0"
ret = ret + "\n"
datsock.sendto(ret, rcvadr)
def main():
GPIO_setup()
datsock = UDP_setup()
cnt=0
rcvdat = ""
while True:
cnt=cnt+1
rcvdat,rcvd,rcvadr = UDP_recvData(datsock, rcvdat)
time.sleep(0.01)
if rcvd == True and "\n" in rcvdat:
UDP_procCommand(rcvdat, datsock, rcvadr)
rcvdat = ""
if cnt < 30: # 300msec
continue
cnt=0
for idx in range(5):
vals[idx]=GPIO.input(ins[idx])
print vals[idx],
print
if __name__ == '__main__':
main()
| mit | Python |
b19380e26e005d4ab71baaac1ee0158a82f64cf3 | fix encode | lffsantos/itindublin.github.io,lffsantos/itindublin.github.io,lffsantos/itindublin.github.io,ITinDublin/ITinDublin.github.io,ITinDublin/ITinDublin.github.io,ITinDublin/ITinDublin.github.io,lffsantos/itindublin.github.io,ITinDublin/ITinDublin.github.io | functions.py | functions.py | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
import io
import hashlib
import functools
import os
import posixpath
import random
def GET_WIKI_PAGES():
content = 'content/pages/wiki'
wiki_pages = []
for page in os.listdir(content):
ler_arquivo = io.open(content+'/'+page, 'r', encoding='utf-8')
titulo = ler_arquivo.readline()
extensao = page.split(".")[1]
url = page.split(".")[0]
if extensao == 'md':
titulo = titulo.replace("Title:", " ").strip()
ler_arquivo.close()
wiki_pages.append((url, titulo))
return wiki_pages
def GET_AVATAR(autor, membros):
if autor in membros:
if 'github' in membros[autor]:
formatter = "https://avatars.githubusercontent.com/{}?size=250"
username = membros[autor]['github']
elif 'email' in membros[autor]:
formatter = "http://www.gravatar.com/avatar/{}?s=250"
username = hashlib.md5(membros[autor]['email'].strip().lower().encode("utf-8")).hexdigest()
elif 'twitter' in membros[autor]:
formatter = "http://avatars.io/twitter/{}"
username = membros[autor]['twitter']
if username.startswith("@"):
username = username[1:]
else:
formatter = "/theme/img/{}"
username = "default_avatar.png"
else:
formatter = "/theme/img/{}"
username = "default_avatar.gif"
return formatter.format(username)
def GET_ARTICLE_IMAGE(article, root):
if hasattr(article, 'image'):
img = article.image
if img.startswith('/'):
img = img[1:]
return img
if not root:
return ""
base = os.path.join('content', root)
banners = map(functools.partial(os.path.join, root), os.walk(base).next()[2])
random.seed(article.date)
return random.choice(banners)
def GET_ARTICLE_AT_GITHUB(article, repo, branch):
base = posixpath.relpath(article.source_path, os.getcwd())
return posixpath.join(repo, 'tree/', branch, base)
def GET_LINK(link):
if link.startswith('http://') or link.startswith('https://'):
return link
else:
return '/' + link | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
import hashlib
import functools
import os
import posixpath
import random
def GET_WIKI_PAGES():
content = 'content/pages/wiki'
wiki_pages = []
for page in os.listdir(content):
ler_arquivo = open(content+'/'+page, 'r', encoding='utf-8')
titulo = ler_arquivo.readline()
extensao = page.split(".")[1]
url = page.split(".")[0]
if extensao == 'md':
titulo = titulo.replace("Title:", " ").strip()
ler_arquivo.close()
wiki_pages.append((url, titulo))
return wiki_pages
def GET_AVATAR(autor, membros):
if autor in membros:
if 'github' in membros[autor]:
formatter = "https://avatars.githubusercontent.com/{}?size=250"
username = membros[autor]['github']
elif 'email' in membros[autor]:
formatter = "http://www.gravatar.com/avatar/{}?s=250"
username = hashlib.md5(membros[autor]['email'].strip().lower().encode("utf-8")).hexdigest()
elif 'twitter' in membros[autor]:
formatter = "http://avatars.io/twitter/{}"
username = membros[autor]['twitter']
if username.startswith("@"):
username = username[1:]
else:
formatter = "/theme/img/{}"
username = "default_avatar.png"
else:
formatter = "/theme/img/{}"
username = "default_avatar.gif"
return formatter.format(username)
def GET_ARTICLE_IMAGE(article, root):
if hasattr(article, 'image'):
img = article.image
if img.startswith('/'):
img = img[1:]
return img
if not root:
return ""
base = os.path.join('content', root)
banners = map(functools.partial(os.path.join, root), os.walk(base).next()[2])
random.seed(article.date)
return random.choice(banners)
def GET_ARTICLE_AT_GITHUB(article, repo, branch):
base = posixpath.relpath(article.source_path, os.getcwd())
return posixpath.join(repo, 'tree/', branch, base)
def GET_LINK(link):
if link.startswith('http://') or link.startswith('https://'):
return link
else:
return '/' + link | mit | Python |
a592dcdf1126867588753c2503a469f8cebb492a | Write out constructors and interfaces. | nanaze/jsdoctor,Prachigarg1/Prachi,Prachigarg1/Prachi,Prachigarg1/Prachi,nanaze/jsdoctor,nanaze/jsdoctor | generator.py | generator.py | from xml.dom import minidom
import symboltypes
def GenerateDocs(namespace_map):
for namespace, symbols in namespace_map.iteritems():
filepath = '%s.html' % namespace
doc = _GenerateDocument(namespace, symbols)
content = doc.documentElement.toprettyxml(indent=' ')
yield filepath, content
def _MakeTextNode(content):
text = minidom.Text()
text.data = content
return text
def _MakeHeader(content=None):
return _MakeElement('h2', content)
def _MakeElement(tagname, content=None):
element = minidom.Element(tagname)
if content:
element.appendChild(_MakeTextNode(content))
return element
def _GetSymbolsOfType(symbols, type):
return [symbol for symbol in symbols if symbol.type == type]
def _GenerateDocument(namespace, symbols):
doc = minidom.getDOMImplementation().createDocument(None, 'html', None)
body = doc.createElement('body')
doc.documentElement.appendChild(body)
for elem in _GenerateContent(namespace, symbols):
body.appendChild(elem)
return doc
def _GenerateContent(namespace, symbols):
node_list = minidom.NodeList()
sorted_symbols = sorted(symbols, key= lambda symbol: symbol.identifier)
# Constructor
constructor_symbols = _GetSymbolsOfType(
sorted_symbols, symboltypes.CONSTRUCTOR)
if constructor_symbols:
node_list.append(_MakeElement('h2', 'Constructor'))
for symbol in constructor_symbols:
node_list.append(_MakeElement('h3', symbol.identifier))
for section in symbol.comment.description_sections:
node_list.append(_MakeElement('p', section))
# Interface
interface_symbols = _GetSymbolsOfType(
sorted_symbols, symboltypes.INTERFACE)
if interface_symbols:
node_list.append(_MakeElement('h2', 'Interface'))
for symbol in interface_symbols:
node_list.append(_MakeElement('h3', symbol.identifier))
for section in symbol.comment.description_sections:
node_list.append(_MakeElement('p', section))
return node_list
| from xml.dom import minidom
def GenerateDocs(namespace_map):
for namespace, symbols in namespace_map.iteritems():
filepath = '%s.html' % namespace
doc = _GenerateDocument(namespace, symbols)
content = doc.documentElement.toprettyxml(indent=' ')
yield filepath, content
def _MakeTextNode(content):
text = minidom.Text()
text.data = content
return text
def _GenerateDocument(namespace, symbols):
doc = minidom.getDOMImplementation().createDocument(None, 'html', None)
body = doc.createElement('body')
doc.documentElement.appendChild(body)
for elem in _GenerateContent(namespace, symbols):
body.appendChild(elem)
return doc
def _GenerateContent(namespace, symbols):
node_list = minidom.NodeList()
sorted_symbols = sorted(symbols, key= lambda symbol: symbol.identifier)
for symbol in sorted_symbols:
header = minidom.Element('h2')
header.appendChild(_MakeTextNode(symbol.identifier))
node_list.append(header)
comment = minidom.Element('p')
comment.appendChild(_MakeTextNode(symbol.comment.text))
node_list.append(comment)
return node_list
| apache-2.0 | Python |
53ad3866b8dfbd012748e4ad7d7ed7025d491bd0 | REVERT remove application id validation | mauriceyap/ccm-assistant | src/alexa-main.py | src/alexa-main.py | import handlers.events as events
APPLICATION_ID = "amzn1.ask.skill.dd677950-cade-4805-b1f1-ce2e3a3569f0"
def lambda_handler(event, context):
# Make sure only this Alexa skill can use this function
if event['session']['application']['applicationId'] != APPLICATION_ID:
raise ValueError("Invalid Application ID")
if event['session']['new']:
events.on_session_started({'requestId': event['request']['requestId']},
event['session'])
request_type = event['request']['type']
if request_type == "LaunchRequest":
return events.on_launch(event['request'], event['session'])
elif request_type == "IntentRequest":
return events.on_intent(event['request'], event['session'])
elif request_type == "SessionEndedRequest":
return events.on_session_ended(event['request'], event['session'])
| import handlers.events as events
APPLICATION_ID = "amzn1.ask.skill.dd677950-cade-4805-b1f1-ce2e3a3569f0"
def lambda_handler(event, context):
if event['session']['new']:
events.on_session_started({'requestId': event['request']['requestId']},
event['session'])
request_type = event['request']['type']
if request_type == "LaunchRequest":
return events.on_launch(event['request'], event['session'])
elif request_type == "IntentRequest":
return events.on_intent(event['request'], event['session'])
elif request_type == "SessionEndedRequest":
return events.on_session_ended(event['request'], event['session'])
| mit | Python |
c1f826d5a807c50c49d56620934f83db9e514192 | Add (ugly) simplfiier for IterativeLoops | ytanay/thinglang,ytanay/thinglang,ytanay/thinglang,ytanay/thinglang | thinglang/parser/simplifier.py | thinglang/parser/simplifier.py | from thinglang.lexer.tokens.base import LexicalIdentifier
from thinglang.parser.symbols import Transient
from thinglang.parser.symbols.arithmetic import ArithmeticOperation
from thinglang.parser.symbols.base import AssignmentOperation
from thinglang.parser.symbols.functions import MethodCall, Access, ArgumentList
from thinglang.parser.symbols.logic import IterativeLoop, Loop
from thinglang.parser.symbols.types import CastOperation
from thinglang.utils.tree_utils import TreeTraversal, inspects
from thinglang.utils.union_types import POTENTIALLY_OBTAINABLE
class Simplifier(TreeTraversal):
@inspects(IterativeLoop)
def unwrap_iterative_loops(self, node):
generator_id, generator_assignment = self.create_transient(node.generator, node, LexicalIdentifier('Range'))
initial_assignment = AssignmentOperation.create(node.name, MethodCall([Access([generator_id, LexicalIdentifier('next')]), ArgumentList()]), LexicalIdentifier('number')).contextify(node.parent)
iterative_assignment = AssignmentOperation.create(node.name, MethodCall(
[Access([generator_id, LexicalIdentifier('next')]), ArgumentList()]))
node.insert_before(generator_assignment)
node.insert_before(initial_assignment)
node.children.append(iterative_assignment)
loop = Loop([None, node.name]).contextify(node.parent).populate(node.children)
node.insert_before(loop)
node.remove()
@inspects(predicate=lambda x: isinstance(getattr(x, 'value', None), POTENTIALLY_OBTAINABLE))
def inspect_obtainable_operations(self, node):
return self.unwrap_method_calls(node.value, node)
def unwrap_method_calls(self, method_call, node, parent_call=None):
if not isinstance(method_call, POTENTIALLY_OBTAINABLE):
return
for argument in method_call.arguments:
if isinstance(argument, (MethodCall, CastOperation)):
self.unwrap_method_calls(argument, node, parent_call=method_call)
if isinstance(argument, ArithmeticOperation):
for x in argument.arguments:
self.unwrap_method_calls(x, node, parent_call=argument)
if parent_call is not None:
id, assignment = self.create_transient(method_call, node)
node.insert_before(assignment)
parent_call.replace(method_call, id)
@classmethod
def is_compound(cls, node):
if not node:
return False
return (isinstance(node, MethodCall) and any(isinstance(arg, MethodCall) for arg in node.arguments.value)) or \
(isinstance(node, AssignmentOperation) and cls.is_compound(node.value))
@staticmethod
def create_transient(value, parent, type=None):
local_id = Transient().contextify(parent.context)
return local_id, AssignmentOperation([type, local_id, None, value]).contextify(parent.parent)
| from thinglang.parser.symbols import Transient
from thinglang.parser.symbols.arithmetic import ArithmeticOperation
from thinglang.parser.symbols.base import AssignmentOperation
from thinglang.parser.symbols.functions import MethodCall
from thinglang.parser.symbols.types import CastOperation
from thinglang.utils.tree_utils import TreeTraversal, inspects
from thinglang.utils.union_types import POTENTIALLY_OBTAINABLE
class Simplifier(TreeTraversal):
@inspects(predicate=lambda x: isinstance(getattr(x, 'value', None), POTENTIALLY_OBTAINABLE))
def inspect_obtainable_operations(self, node):
return self.unwrap_method_calls(node.value, node)
def unwrap_method_calls(self, method_call, node, parent_call=None):
if not isinstance(method_call, POTENTIALLY_OBTAINABLE):
return
for argument in method_call.arguments:
if isinstance(argument, (MethodCall, CastOperation)):
self.unwrap_method_calls(argument, node, parent_call=method_call)
if isinstance(argument, ArithmeticOperation):
for x in argument.arguments:
self.unwrap_method_calls(x, node, parent_call=argument)
if parent_call is not None:
id, assignment = self.create_transient(method_call, node)
node.insert_before(assignment)
parent_call.replace(method_call, id)
@classmethod
def is_compound(cls, node):
if not node:
return False
return (isinstance(node, MethodCall) and any(isinstance(arg, MethodCall) for arg in node.arguments.value)) or \
(isinstance(node, AssignmentOperation) and cls.is_compound(node.value))
@staticmethod
def create_transient(value, parent, type=None):
local_id = Transient().contextify(parent.context)
return local_id, AssignmentOperation([type, local_id, None, value]).contextify(parent.parent)
| mit | Python |
d36f16e804180c402f32a7e0e29d822592254a68 | Integrate LLVM at llvm/llvm-project@54cc7de4bc01 | gautam1858/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,gautam1858/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,gautam1858/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,tensorflow/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,gautam1858/tensorflow,paolodedios/tensorflow,gautam1858/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,yongtang/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "54cc7de4bc01e6178213e4487d6ab49b809ba2b0"
LLVM_SHA256 = "a939c493893b2d27ea87d0d28efc79f33cdee4c56ac5954bc19ca2222a29af9d"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = ["//third_party/llvm:macos_build_fix.patch", "//third_party/llvm:fix_ppc64le.patch", "//third_party/llvm:disable_typeid_check.patch"],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "077f90315bec29443784a1bb2c55f3d7fc2eab64"
LLVM_SHA256 = "b8204098753a27847e6084c95d7aa62083cc752ba29899b55058e38ea3f1c4f6"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = ["//third_party/llvm:macos_build_fix.patch", "//third_party/llvm:fix_ppc64le.patch", "//third_party/llvm:disable_typeid_check.patch"],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| apache-2.0 | Python |
0b8eec703df52d94ca683cc962110c81951f5900 | Integrate LLVM at llvm/llvm-project@72f89556413f | tensorflow/runtime,tensorflow/runtime,tensorflow/runtime,tensorflow/runtime | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "72f89556413f9c1fb1fb9138d6d53e13499ce545"
LLVM_SHA256 = "8b2a612f73a8dc54c76a2b40debc5b2464a55c91dac0f3b6c607009ba637311b"
tfrt_http_archive(
name = name,
build_file = "//third_party/llvm:BUILD",
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "e07a7040d9c6dbdd71e55d1153bfe1357079e511"
LLVM_SHA256 = "004147901d4725b45eaa724b2cb511989eb2d031146f876ba1b4327b5a5e053f"
tfrt_http_archive(
name = name,
build_file = "//third_party/llvm:BUILD",
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
)
| apache-2.0 | Python |
77abacb3887c35712ac47dd7685cfa2ce33c3695 | Add domain to __init__ | kolanos/vaporize | vaporize/__init__.py | vaporize/__init__.py | __title__ = 'vaporize'
__author__ = 'Michael Lavers'
__version__ = '0.1.0'
__license__ = 'MIT'
__copyright__ = 'Copyright 2012 Michael Lavers'
from . import domain, flavor, image, ipgroup, loadbalancer, server
from .core import connect
| __title__ = 'vaporize'
__author__ = 'Michael Lavers'
__version__ = '0.1.0'
__license__ = 'MIT'
__copyright__ = 'Copyright 2012 Michael Lavers'
from . import flavor, image, ipgroup, loadbalancer, server
from .core import connect
| mit | Python |
5b58b88784771e1e8f39c21872fd109a09eec141 | Integrate LLVM at llvm/llvm-project@f116107f2d93 | google/tsl,google/tsl,google/tsl | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "f116107f2d9342dabf8f1ccecb3f33f9b85d9b48"
LLVM_SHA256 = "0db5a61f8d1d0435d278fd19c7a0a47dc8170a99fa365993718d3912417b5940"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:build.patch",
"//third_party/llvm:mathextras.patch",
"//third_party/llvm:toolchains.patch",
"//third_party/llvm:temporary.patch", # Cherry-picks and temporary reverts. Do not remove even if temporary.patch is empty.
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "65644125beb76066171ce586e13bdff918140c0e"
LLVM_SHA256 = "3e3afa5504c3de889a56a1678d016300dc6c45611c28a199adb487d4fc50653f"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:build.patch",
"//third_party/llvm:mathextras.patch",
"//third_party/llvm:toolchains.patch",
"//third_party/llvm:temporary.patch", # Cherry-picks and temporary reverts. Do not remove even if temporary.patch is empty.
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| apache-2.0 | Python |
c4262eb68366c705aec08fdc0b20b54d58dcdb19 | Integrate LLVM at llvm/llvm-project@caea37b37e6a | tensorflow/runtime,tensorflow/runtime,tensorflow/runtime,tensorflow/runtime | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "caea37b37e6aa8b0c1bb21526ad2d216b46a4b10"
LLVM_SHA256 = "91c38b69bfb9cd03b7bbe967d7d75ab7c6a8d289378180b1f30948a8432ec39e"
tfrt_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
link_files = {
"//third_party/llvm:llvm.autogenerated.BUILD": "llvm/BUILD",
"//third_party/mlir:BUILD": "mlir/BUILD",
"//third_party/mlir:test.BUILD": "mlir/test/BUILD",
},
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "91d3f73937b603b168a2be40f57a81efcc37da86"
LLVM_SHA256 = "500fe116e7bfc8c50d3ac873211b3700d479d710d9f2ac8171bfa903d085ba5c"
tfrt_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
link_files = {
"//third_party/llvm:llvm.autogenerated.BUILD": "llvm/BUILD",
"//third_party/mlir:BUILD": "mlir/BUILD",
"//third_party/mlir:test.BUILD": "mlir/test/BUILD",
},
)
| apache-2.0 | Python |
1a28370b26c23d9d7c9399896ea5eba23bec029f | Integrate LLVM at llvm/llvm-project@4821508d4db7 | tensorflow/runtime,tensorflow/runtime,tensorflow/runtime,tensorflow/runtime | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "4821508d4db75a535d02b8938f81fac6de66cc26"
LLVM_SHA256 = "4138bb3f83bfb822f3ce795cd7e3e7a7018709d8af4e7b54194cbd81931cc93c"
tfrt_http_archive(
name = name,
build_file = "//third_party/llvm:BUILD",
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "6f258c0fd34cf4001ffa08c61f6e4e0f1254c50f"
LLVM_SHA256 = "9464c454c66b0c33f0e431fd24c5e35cd5db02c4983377b88d386cd0ad94130e"
tfrt_http_archive(
name = name,
build_file = "//third_party/llvm:BUILD",
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
)
| apache-2.0 | Python |
c9d54bd013f60f580942b0b93c94168ec772f49e | Integrate LLVM at llvm/llvm-project@892260d7f352 | paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,karllessard/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,karllessard/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "892260d7f3526c429047b645f22401635a7df368"
LLVM_SHA256 = "20c04584b188e4f39e88e607453f62b76faf7430dd770e2a9bf6b56c6c0822b2"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:build.patch",
"//third_party/llvm:toolchains.patch",
"//third_party/llvm:temporary.patch", # Cherry-picks and temporary reverts. Do not remove even if temporary.patch is empty.
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "eda9fdc493e5f06c59f29f520255ada23901b3b4"
LLVM_SHA256 = "1db526f68751d8ad0258a7692d7c0ad997e1ed33262fb771f8c5fb1a5a25bb1b"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:build.patch",
"//third_party/llvm:toolchains.patch",
"//third_party/llvm:temporary.patch", # Cherry-picks and temporary reverts. Do not remove even if temporary.patch is empty.
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| apache-2.0 | Python |
b163018fa478e792b66da6e5de71f87ad24858b6 | Integrate LLVM at llvm/llvm-project@c6013f71a455 | Intel-tensorflow/tensorflow,Intel-Corporation/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,karllessard/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,gautam1858/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,Intel-Corporation/tensorflow,yongtang/tensorflow,karllessard/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,gautam1858/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,karllessard/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,karllessard/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,Intel-Corporation/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,karllessard/tensorflow,karllessard/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,gautam1858/tensorflow,karllessard/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,yongtang/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,gautam1858/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,Intel-Corporation/tensorflow,Intel-Corporation/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "c6013f71a4555f6d9ef9c60e6bc4376ad63f1c47"
LLVM_SHA256 = "644a1f9db6e55ba28fba1e03fe6c2d28514d47e1e02210b4b281868d7a7af70c"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:BUILD.bazel",
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "43d6991c2a4cc2ac374e68c029634f2b59ffdfdf"
LLVM_SHA256 = "6be97e134eab943941bbb06ad0c714070dc24cb4418a104813c1e9a2ca6655f7"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:BUILD.bazel",
)
| apache-2.0 | Python |
2d602aaa83cac1fa3397ceefb39d36dd814d1165 | Integrate LLVM at llvm/llvm-project@93f54fae9dda | tensorflow/runtime,tensorflow/runtime,tensorflow/runtime,tensorflow/runtime | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "93f54fae9dda7d5d89711eb97f1ee44de6ce1ff7"
LLVM_SHA256 = "02e569a27548632ba083addbc9223bc5246e433c9eda72746c947dc17a5b0117"
tfrt_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
link_files = {
"//third_party/llvm:llvm.autogenerated.BUILD": "llvm/BUILD",
"//third_party/mlir:BUILD": "mlir/BUILD",
"//third_party/mlir:test.BUILD": "mlir/test/BUILD",
},
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "49cbf4cd85a9ae6b53947fb8cf39ccfb56becc57"
LLVM_SHA256 = "b48235a81cf29ad0e25f5029602eb2dfa092bd0c3a0e4ca785da595415fe29e2"
tfrt_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
link_files = {
"//third_party/llvm:llvm.autogenerated.BUILD": "llvm/BUILD",
"//third_party/mlir:BUILD": "mlir/BUILD",
"//third_party/mlir:test.BUILD": "mlir/test/BUILD",
},
)
| apache-2.0 | Python |
d574bf6871219e7c1232d0f39a145878a23b8cd8 | Integrate LLVM at llvm/llvm-project@f79214d1e1fd | tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,yongtang/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,tensorflow/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "f79214d1e1fd79f684b53a8a1d34efeb37a33a28"
LLVM_SHA256 = "5e36dc179188c453034b47c205704f645b874f33a81899fd4713b0f07b2a229d"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:infer_type.patch", # TODO(b/231285230): remove once resolved
"//third_party/llvm:build.patch",
"//third_party/llvm:toolchains.patch",
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "540a13652fda8b91b62b73fb9ae1e34879e8e36c"
LLVM_SHA256 = "928551015dbc884488d7cda367989bc65f6ec702233cd61148426854d34d7080"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:infer_type.patch", # TODO(b/231285230): remove once resolved
"//third_party/llvm:build.patch",
"//third_party/llvm:toolchains.patch",
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| apache-2.0 | Python |
bcc7475b0028a7d07fd0c2398742f09f61377949 | Integrate LLVM at llvm/llvm-project@1f06398e96d4 | tensorflow/runtime,tensorflow/runtime,tensorflow/runtime,tensorflow/runtime | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "1f06398e96d4508d22f42b760f70eb5d4e7b1dc9"
LLVM_SHA256 = "84bd02a75966a1af45bf4556484817f9c4621a045686f7d1f32b034dc1c784c0"
tfrt_http_archive(
name = name,
build_file = "//third_party/llvm:BUILD",
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "6edbdf80cac119f8f30d2ae6fa2972d9e778510b"
LLVM_SHA256 = "dd7ca82116dbfc1ddd7159425965318dfbe7a39e5f150a19880f7f40c6118001"
tfrt_http_archive(
name = name,
build_file = "//third_party/llvm:BUILD",
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
)
| apache-2.0 | Python |
5cd674ec764be72bb3e94c0b56fdf733a4a1c885 | Remove Audio Already Done error | Harmon758/Harmonbot,Harmon758/Harmonbot | Discord/utilities/errors.py | Discord/utilities/errors.py |
from discord.ext.commands.errors import CommandError
class NotServerOwner(CommandError):
'''Not Server Owner'''
pass
class VoiceNotConnected(CommandError):
'''Voice Not Connected'''
pass
class PermittedVoiceNotConnected(VoiceNotConnected):
'''Permitted, but Voice Not Connected'''
pass
class NotPermittedVoiceNotConnected(VoiceNotConnected):
'''Voice Not Connected, and Not Permitted'''
pass
class MissingPermissions(CommandError):
'''Missing Permissions'''
pass
class MissingCapability(CommandError):
'''Missing Capability'''
def __init__(self, permissions):
self.permissions = permissions
class NotPermitted(CommandError):
'''Not Permitted'''
pass
class AudioError(CommandError):
'''Audio Error'''
pass
class AudioNotPlaying(AudioError):
'''Audio Not Playing'''
pass
|
from discord.ext.commands.errors import CommandError
class NotServerOwner(CommandError):
'''Not Server Owner'''
pass
class VoiceNotConnected(CommandError):
'''Voice Not Connected'''
pass
class PermittedVoiceNotConnected(VoiceNotConnected):
'''Permitted, but Voice Not Connected'''
pass
class NotPermittedVoiceNotConnected(VoiceNotConnected):
'''Voice Not Connected, and Not Permitted'''
pass
class MissingPermissions(CommandError):
'''Missing Permissions'''
pass
class MissingCapability(CommandError):
'''Missing Capability'''
def __init__(self, permissions):
self.permissions = permissions
class NotPermitted(CommandError):
'''Not Permitted'''
pass
class AudioError(CommandError):
'''Audio Error'''
pass
class AudioNotPlaying(AudioError):
'''Audio Not Playing'''
pass
class AudioAlreadyDone(AudioError):
'''Audio Already Done playing'''
pass
| mit | Python |
dd3a8bfa195f07e60d6c7c8c53d86d2a9405bf9d | Update test.py | madhurilalitha/Python-Projects | EntityExtractor/src/test.py | EntityExtractor/src/test.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created by Lalitha Madhuri Putchala on Dec 10 2017
"""
import unittest
from entity_main import Entity
from docx import Document
import docx2txt
class TestEntity(unittest.TestCase):
'''The below function verifies the basic sanity functionality of the program
by validating the word count in the document before and after the highlighting
of the text'''
def test_sanity(self):
et = Entity()
len_before_highlight = len(et.raw_data)
et.highlight_address_fields()
et.highlight_contact_details()
et.highlight_dates()
et.tag_person_entities()
et.save_document()
# load the new document with highlighted text
new_raw_data = docx2txt.process('Contract_Output.docx')
len_after_highlight = len(new_raw_data)
self.assertEqual(len_before_highlight, len_after_highlight)
if __name__ == '__main__':
unittest.main()
| #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created by Lalitha Madhuri Putchala on Dec 10 2017
"""
import unittest
from entity_main import Entity
from docx import Document
import docx2txt
class TestEntity(unittest.TestCase):
'''The below function verifies the basic sanity functionality of the program
by validating the word count in the document before and after the highlighting
of the text'''
def test_sanity(self):
et = Entity()
len_before_highlight = len(et.raw_data)
et.highlight_address_fields()
et.highlight_contact_details()
et.highlight_dates()
et.tag_person_entities()
et.save_document()
# load the new document with highlighted text
new_raw_data = docx2txt.process('Contract_Output.docx')
len_after_highlight = len(new_raw_data)
self.assertEqual(len_before_highlight, len_after_highlight)
if __name__ == '__main__':
unittest.main()
| mit | Python |
c9bf706c0b7394b2bb5b9268d1c699aef97dfb14 | fix __openerp__.py | OCA/connector,OCA/connector | connector_base_product/__openerp__.py | connector_base_product/__openerp__.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: David BEAL, Copyright Akretion, 2014
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'name': 'Connector Base Product',
'version': '1.0',
'author': 'Openerp Connector Core Editors',
'website': 'http://openerp-connector.com',
'license': 'AGPL-3',
'category': 'Connector',
'description': """
Connector Base Product
======================
Add 'Connector' tab to product view
""",
'depends': [
'connector',
'product',
],
'data': [
'product_view.xml'
],
'installable': True,
}
| # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'name': 'Connector Base Product',
'version': '1.0',
'author': 'Openerp Connector Core Editors',
'website': 'http://openerp-connector.com',
'license': 'AGPL-3',
'category': 'Generic Modules',
'description': """
Connector Base Product
========================
Add 'Connector' tab to product view
""",
'depends': [
'connector',
],
'data': [
'product_view.xml'
],
'installable': True,
'application': True,
}
| agpl-3.0 | Python |
92d8f60188b6c9df57c1a250ce92efe8755c7c81 | replace slug display in admin with full slug | noxan/django-mini-cms | cms/admin.py | cms/admin.py | from django.contrib import admin
from models import Page
class PageAdmin(admin.ModelAdmin):
model = Page
list_display = ('headline', 'parent', 'get_full_slug', 'public')
admin.site.register(Page, PageAdmin)
| from django.contrib import admin
from models import Page
class PageAdmin(admin.ModelAdmin):
model = Page
list_display = ('headline', 'parent', 'slug', 'public')
admin.site.register(Page, PageAdmin)
| bsd-3-clause | Python |
2838e70c20bb177b85ba0f6b96b80889e6c97de7 | Change form to form(s) in logging statement | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/users/management/commands/gdpr_scrub_user_from_forms.py | corehq/apps/users/management/commands/gdpr_scrub_user_from_forms.py | from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from django.core.management.base import BaseCommand
from corehq.apps.users.models import CommCareUser
from corehq.form_processor.interfaces.dbaccessors import FormAccessors
from io import StringIO
from lxml import etree
import sys
import six
import logging
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Scrubs the username from all forms associated with the given user"
def add_arguments(self, parser):
parser.add_argument('username')
parser.add_argument('domain')
def handle(self, username, domain, **options):
this_form_accessor = FormAccessors(domain=domain)
user = CommCareUser.get_by_username(username)
if not user:
logger.info("User {} not found.".format(username))
sys.exit(1)
user_id = user._id
form_ids = this_form_accessor.get_form_ids_for_user(user_id)
new_username = "Redacted User (GDPR)"
input_response = raw_input("Update {} form(s) for user {} in domain {}? (y/n): ".format(len(form_ids), username, domain))
if input_response == "y":
for form_data in this_form_accessor.iter_forms(form_ids):
form_attachment_xml_new = self.update_form_data(form_data, new_username)
this_form_accessor.modify_attachment_xml_and_metadata(form_data, form_attachment_xml_new)
logging.info("Updated {} form(s) for user {} in domain {}".format(len(form_ids), username, domain))
elif input_response == "n":
logging.info("No forms updated, exiting.")
else:
logging.info("Command not recognized. Exiting.")
@staticmethod
def update_form_data(form_data, new_username):
form_attachment_xml = form_data.get_attachment("form.xml")
xml_elem = etree.parse(StringIO(six.text_type(form_attachment_xml)))
id_elem = xml_elem.find("{http://openrosa.org/jr/xforms}meta").find(
"{http://openrosa.org/jr/xforms}username")
id_elem.text = new_username
new_form_attachment_xml = etree.tostring(xml_elem, pretty_print=True).decode("UTF-8")
return new_form_attachment_xml
| from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from django.core.management.base import BaseCommand
from corehq.apps.users.models import CommCareUser
from corehq.form_processor.interfaces.dbaccessors import FormAccessors
from io import StringIO
from lxml import etree
import sys
import six
import logging
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Scrubs the username from all forms associated with the given user"
def add_arguments(self, parser):
parser.add_argument('username')
parser.add_argument('domain')
def handle(self, username, domain, **options):
this_form_accessor = FormAccessors(domain=domain)
user = CommCareUser.get_by_username(username)
if not user:
logger.info("User {} not found.".format(username))
sys.exit(1)
user_id = user._id
form_ids = this_form_accessor.get_form_ids_for_user(user_id)
new_username = "Redacted User (GDPR)"
input_response = raw_input("Update {} forms for user {} in domain {}? (y/n): ".format(len(form_ids), username, domain))
if input_response == "y":
for form_data in this_form_accessor.iter_forms(form_ids):
form_attachment_xml_new = self.update_form_data(form_data, new_username)
this_form_accessor.modify_attachment_xml_and_metadata(form_data, form_attachment_xml_new)
logging.info("Updated {} form(s) for user {} in domain {}".format(len(form_ids), username, domain))
elif input_response == "n":
logging.info("No forms updated, exiting.")
else:
logging.info("Command not recognized. Exiting.")
@staticmethod
def update_form_data(form_data, new_username):
form_attachment_xml = form_data.get_attachment("form.xml")
xml_elem = etree.parse(StringIO(six.text_type(form_attachment_xml)))
id_elem = xml_elem.find("{http://openrosa.org/jr/xforms}meta").find(
"{http://openrosa.org/jr/xforms}username")
id_elem.text = new_username
new_form_attachment_xml = etree.tostring(xml_elem, pretty_print=True).decode("UTF-8")
return new_form_attachment_xml
| bsd-3-clause | Python |
199e09408c252c703cdc6668e0bd4593c3642b55 | Allow to pass different codecs | torbenbrodt/smarthome,torbenbrodt/smarthome,torbenbrodt/smarthome,torbenbrodt/smarthome | cast/cast.py | cast/cast.py | # pip install -r requirements.txt
from __future__ import print_function
from ConfigParser import ConfigParser
from os.path import expanduser
import time
import pychromecast
import argparse
parser = argparse.ArgumentParser(description='Chromecast toolset.')
parser.add_argument('--url', help='url of the video')
parser.add_argument('--seekforward', type=int, help='Skip by n seconds')
parser.add_argument('--seekback', type=int, help='Rewind by n seconds')
parser.add_argument('--stop', help='Send the STOP command.', nargs='?')
parser.add_argument('--status', help='Print status information.', nargs='?')
parser.add_argument('--pause', help='Send the PAUSE command.')
parser.add_argument('--play', help='Send the PLAY command.')
parser.add_argument('--skip', help='Skips rest of the media.')
parser.add_argument('--rewind', help='Starts playing the media from the beginning.')
parser.add_argument('--codec', help='Codec, e.g. video/mp4')
args = parser.parse_args()
config = ConfigParser()
config.read(expanduser('~/assistant-helper/smarthome.ini'))
CHROMECAST_HOST = config.get('cast', 'chromecast_ip')
if CHROMECAST_HOST:
cast = pychromecast.Chromecast(CHROMECAST_HOST)
else:
chromecasts = pychromecast.get_chromecasts()
cast = next(cc for cc in chromecasts if cc.device.friendly_name == "WohnzimmerTV")
# Wait for cast device to be ready
cast.wait()
print("wait for chromecast")
mc = cast.media_controller
if args.url:
if args.codec:
codec = args.codec
else:
codec = 'video/mp4'
mc.play_media(args.url, codec)
mc.block_until_active()
if args.play:
mc.play()
if args.pause:
mc.pause()
if args.seekforward:
mc.seek(mc.status.current_time + args.seekforward);
if args.seekback:
mc.seek(mc.status.current_time - args.seekback);
if args.stop:
mc.stop()
if args.pause:
mc.pause()
if args.skip:
mc.skip()
if args.rewind:
mc.rewind()
if args.status:
print(cast.device)
print(cast.status)
print(mc.status)
| # pip install -r requirements.txt
from __future__ import print_function
from ConfigParser import ConfigParser
from os.path import expanduser
import time
import pychromecast
import argparse
parser = argparse.ArgumentParser(description='Chromecast toolset.')
parser.add_argument('--url', help='url of the video')
parser.add_argument('--seekforward', type=int, help='Skip by n seconds')
parser.add_argument('--seekback', type=int, help='Rewind by n seconds')
parser.add_argument('--stop', help='Send the STOP command.', nargs='?')
parser.add_argument('--status', help='Print status information.', nargs='?')
parser.add_argument('--pause', help='Send the PAUSE command.')
parser.add_argument('--play', help='Send the PLAY command.')
parser.add_argument('--skip', help='Skips rest of the media.')
parser.add_argument('--rewind', help='Starts playing the media from the beginning.')
args = parser.parse_args()
config = ConfigParser()
config.read(expanduser('~/assistant-helper/smarthome.ini'))
CHROMECAST_HOST = config.get('cast', 'chromecast_ip')
if CHROMECAST_HOST:
cast = pychromecast.Chromecast(CHROMECAST_HOST)
else:
chromecasts = pychromecast.get_chromecasts()
cast = next(cc for cc in chromecasts if cc.device.friendly_name == "WohnzimmerTV")
# Wait for cast device to be ready
cast.wait()
print("wait for chromecast")
mc = cast.media_controller
if args.url:
mc.play_media(args.url, 'video/mp4')
mc.block_until_active()
if args.play:
mc.play()
if args.pause:
mc.pause()
if args.seekforward:
mc.seek(mc.status.current_time + args.seekforward);
if args.seekback:
mc.seek(mc.status.current_time - args.seekback);
if args.stop:
mc.stop()
if args.pause:
mc.pause()
if args.skip:
mc.skip()
if args.rewind:
mc.rewind()
if args.status:
print(cast.device)
print(cast.status)
print(mc.status)
| apache-2.0 | Python |
737d069c57c3cb2d6305f8e5d1f7d88402ef1327 | Add support for v2 endpoints in config. | praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go | go/apps/jsbox/definition.py | go/apps/jsbox/definition.py | import json
from go.vumitools.conversation.definition import (
ConversationDefinitionBase, ConversationAction)
class ViewLogsAction(ConversationAction):
action_name = 'view_logs'
action_display_name = 'View Sandbox Logs'
redirect_to = 'jsbox_logs'
class ConversationDefinition(ConversationDefinitionBase):
conversation_type = 'jsbox'
conversation_display_name = 'Javascript App'
actions = (ViewLogsAction,)
def configured_endpoints(self, config):
# TODO: make jsbox apps define these explicitly and
# update the outbound resource to check and
# complain if a jsbox app sends on an endpoint
# it hasn't defined.
app_config = config.get("jsbox_app_config", {})
raw_js_config = app_config.get("config", {}).get("value", {})
try:
js_config = json.loads(raw_js_config)
except Exception:
return []
endpoints = set()
# vumi-jssandbox-toolkit v2 endpoints
try:
endpoints.update(js_config["endpoints"].keys())
except Exception:
pass
# vumi-jssandbox-toolkit v1 endpoints
try:
pool, tag = js_config["sms_tag"]
endpoints.add("%s:%s" % (pool, tag))
except Exception:
pass
return sorted(endpoints)
| import json
from go.vumitools.conversation.definition import (
ConversationDefinitionBase, ConversationAction)
class ViewLogsAction(ConversationAction):
action_name = 'view_logs'
action_display_name = 'View Sandbox Logs'
redirect_to = 'jsbox_logs'
class ConversationDefinition(ConversationDefinitionBase):
conversation_type = 'jsbox'
conversation_display_name = 'Javascript App'
actions = (ViewLogsAction,)
def configured_endpoints(self, config):
# TODO: make jsbox apps define these explicitly and
# update the outbound resource to check and
# complain if a jsbox app sends on an endpoint
# it hasn't defined.
app_config = config.get("jsbox_app_config", {})
raw_js_config = app_config.get("config", {}).get("value", {})
try:
js_config = json.loads(raw_js_config)
pool, tag = js_config.get("sms_tag")
except Exception:
return []
return ["%s:%s" % (pool, tag)]
| bsd-3-clause | Python |
2d81c83545703359ab13652997bdffffc2b0f62b | Fix TOC | tensorflow/examples,tensorflow/examples,tensorflow/examples,tensorflow/examples,tensorflow/examples,tensorflow/examples,tensorflow/examples,tensorflow/examples,tensorflow/examples | lite/tools/build_model_maker_api_docs.py | lite/tools/build_model_maker_api_docs.py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Generate python docs for tf.lite.
# How to run
```
python build_docs.py --output_dir=/path/to/output
```
"""
import pathlib
from absl import app
from absl import flags
from tensorflow_docs.api_generator import generate_lib
from tensorflow_docs.api_generator import public_api
import tensorflow_examples
import tflite_model_maker
import yaml
class OrderedDumper(yaml.dumper.Dumper):
pass
def _dict_representer(dumper, data):
"""Force yaml to output dictionaries in order, not alphabetically."""
return dumper.represent_dict(data.items())
OrderedDumper.add_representer(dict, _dict_representer)
flags.DEFINE_string('output_dir', '/tmp/mm_api/',
'The path to output the files to')
flags.DEFINE_string('code_url_prefix',
'https://github.com/tensorflow/examples/blob/master/',
'The url prefix for links to code.')
flags.DEFINE_bool('search_hints', True,
'Include metadata search hints in the generated files')
flags.DEFINE_string('site_path', '/', 'Path prefix in the _toc.yaml')
FLAGS = flags.FLAGS
def main(_):
doc_generator = generate_lib.DocGenerator(
root_title='TensorFlow Lite Model Maker',
py_modules=[('tflite_model_maker', tflite_model_maker)],
code_url_prefix=FLAGS.code_url_prefix,
# Since model_maker imports from tensorflow_examples 'it'
# needs to use the tensorflow_examples path as the base_dir
# otherwise no docs are generated because they're in an 'external'
# module
base_dir=str(pathlib.Path(tensorflow_examples.__file__).parent),
search_hints=FLAGS.search_hints,
site_path=FLAGS.site_path,
callbacks=[public_api.explicit_package_contents_filter])
doc_generator.build(output_dir=FLAGS.output_dir)
toc_file = pathlib.Path(FLAGS.output_dir) / 'tflite_model_maker/_toc.yaml'
toc = yaml.safe_load(toc_file.read_text())
## Nest all sub-modules under the root module instead of beside it.
#
# Before:
#
# mm
# mm.compat
# mm.configs
#
# After:
#
# mm
# compat
# configs
# The first item of the toc is the root module.
mm = toc['toc'][0]
mm['status'] = 'experimental'
# Shorten the title, and insert each sub-modules into the root module's
# "section"
sub_sections = mm['section']
# The remaining items are the submodules
for section in toc['toc'][1:]:
section['title'] = section['title'].replace('tflite_model_maker.', '')
sub_sections.append(section)
# replace the list of (sub)modules with the root module.
toc['toc'] = [mm]
with toc_file.open('w') as f:
yaml.dump(toc, f, Dumper=OrderedDumper)
if __name__ == '__main__':
app.run(main)
| # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Generate python docs for tf.lite.
# How to run
```
python build_docs.py --output_dir=/path/to/output
```
"""
import pathlib
from absl import app
from absl import flags
from tensorflow_docs.api_generator import generate_lib
from tensorflow_docs.api_generator import public_api
import tensorflow_examples
import tflite_model_maker
flags.DEFINE_string('output_dir', '/tmp/mm_api/',
'The path to output the files to')
flags.DEFINE_string('code_url_prefix',
'https://github.com/tensorflow/examples/blob/master/',
'The url prefix for links to code.')
flags.DEFINE_bool('search_hints', True,
'Include metadata search hints in the generated files')
flags.DEFINE_string('site_path', '/', 'Path prefix in the _toc.yaml')
FLAGS = flags.FLAGS
def main(_):
doc_generator = generate_lib.DocGenerator(
root_title='TensorFlow Lite Model Maker',
py_modules=[('tflite_model_maker', tflite_model_maker)],
code_url_prefix=FLAGS.code_url_prefix,
# Since model_maker imports from tensorflow_examples 'it'
# needs to use the tensorflow_examples path as the base_dir
# otherwise no docs are generated because they're in an 'external'
# module
base_dir=str(pathlib.Path(tensorflow_examples.__file__).parent),
search_hints=FLAGS.search_hints,
site_path=FLAGS.site_path,
callbacks=[public_api.explicit_package_contents_filter])
doc_generator.build(output_dir=FLAGS.output_dir)
if __name__ == '__main__':
app.run(main)
| apache-2.0 | Python |
b595f5c67cd9ed99233df3524b41f6ca972dc22d | change default | kiyukuta/string_recorder,kiyukuta/string_recorder | string_recorder/__init__.py | string_recorder/__init__.py | from string_recorder.string_recorder_pil import StringRecorder
| from string_recorder.string_recorder import StringRecorder
| mit | Python |
5b27973ef242ef949b292f03e8320c31ac1a62a1 | Disable testing when dev scope isn't set. | amarburg/apriltags,amarburg/apriltags,amarburg/apriltags,amarburg/apriltags,amarburg/apriltags | conanfile.py | conanfile.py | from conans import ConanFile, CMake
class ApriltagsConan(ConanFile):
name = "apriltags"
version = "master"
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
options = {"opencv_dir": "ANY",
"use_openmp": [True,False],
"shared": [True,False]}
default_options = "opencv_dir=''", "shared=True", "use_openmp=False"
exports = '*'
def config(self):
if self.scope.dev and self.scope.build_tests:
self.requires( "gtest/1.8.0@lasote/stable" )
self.options["gtest"].shared = False
def imports(self):
self.copy("*.dll", dst="bin", src="bin") # From bin to bin
self.copy("*.dylib*", dst="bin", src="lib") # From lib to bin
def build(self):
cmake = CMake(self.settings)
cmake_opts = "-DUSE_CONAN:BOOL=TRUE "
cmake_opts += "-DUSE_OPENMP:BOOL=%s " % (self.options.use_openmp)
cmake_opts += "-DOpenCV_DIR:PATH=%s " % (self.options.opencv_dir) if self.options.opencv_dir else ""
if not self.scope.dev:
cmake_opts += "-DBUILD_PERF_TESTS:BOOL=FALSE -DBUILD_UNIT_TESTS:BOOL=FALSE "
self.run('cmake "%s" %s %s' % (self.conanfile_directory,
cmake.command_line, cmake_opts))
self.run('cmake --build . %s' % (cmake.build_config))
def package(self):
self.copy("*.h", dst="")
#if self.options.shared:
if self.settings.os == "Macos":
self.copy(pattern="*.dylib", dst="lib", keep_path=False)
else:
self.copy(pattern="*.so*", dst="lib", src="lib", keep_path=False)
#else:
# self.copy(pattern="*.a", dst="lib", src="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = ["apriltags"]
| from conans import ConanFile, CMake
class ApriltagsConan(ConanFile):
name = "apriltags"
version = "master"
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
options = {"opencv_dir": "ANY",
"use_openmp": [True,False],
"shared": [True,False]}
default_options = "opencv_dir=''", "shared=True", "use_openmp=False"
exports = '*'
def config(self):
if self.scope.dev and self.scope.build_tests:
self.requires( "gtest/1.8.0@lasote/stable" )
self.options["gtest"].shared = False
def imports(self):
self.copy("*.dll", dst="bin", src="bin") # From bin to bin
self.copy("*.dylib*", dst="bin", src="lib") # From lib to bin
def build(self):
cmake = CMake(self.settings)
cmake_opts = "-DUSE_CONAN:BOOL=TRUE "
cmake_opts += "-DUSE_OPENMP:BOOL=%s " % (self.options.use_openmp)
cmake_opts += "-DOpenCV_DIR:PATH=%s " % (self.options.opencv_dir) if self.options.opencv_dir else ""
self.run('cmake "%s" %s %s' % (self.conanfile_directory,
cmake.command_line, cmake_opts))
self.run('cmake --build . %s' % (cmake.build_config))
def package(self):
self.copy("*.h", dst="")
#if self.options.shared:
if self.settings.os == "Macos":
self.copy(pattern="*.dylib", dst="lib", keep_path=False)
else:
self.copy(pattern="*.so*", dst="lib", src="lib", keep_path=False)
#else:
# self.copy(pattern="*.a", dst="lib", src="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = ["apriltags"]
| lgpl-2.1 | Python |
72793a2fe309e296e37b39174178825db93f5641 | Bump version: 0.0.3 -> 0.0.4 | polysquare/cmake-unit | conanfile.py | conanfile.py | from conans import ConanFile
from conans.tools import download, unzip
import os
VERSION = "0.0.4"
class CMakeUnitConan(ConanFile):
name = "cmake-unit"
version = os.environ.get("CONAN_VERSION_OVERRIDE", VERSION)
generators = "cmake"
requires = ("cmake-include-guard/master@smspillaz/cmake-include-guard",
"cmake-call-function/master@smspillaz/cmake-call-function",
"cmake-opt-arg-parsing/master@smspillaz/cmake-opt-arg-parsing",
"cmake-forward-cache/master@smspillaz/cmake-forward-cache",
"cmake-spacify-list/master@smspillaz/cmake-spacify-list",
"cmake-forward-arguments/master"
"@smspillaz/cmake-forward-arguments")
url = "http://github.com/polysquare/cmake-unit"
license = "MIT"
def source(self):
zip_name = "cmake-unit.zip"
download("https://github.com/polysquare/"
"cmake-unit/archive/{version}.zip"
"".format(version="v" + VERSION),
zip_name)
unzip(zip_name)
os.unlink(zip_name)
def package(self):
self.copy(pattern="*.cmake",
dst="cmake/cmake-unit",
src="cmake-unit-" + VERSION,
keep_path=True)
| from conans import ConanFile
from conans.tools import download, unzip
import os
VERSION = "0.0.3"
class CMakeUnitConan(ConanFile):
name = "cmake-unit"
version = os.environ.get("CONAN_VERSION_OVERRIDE", VERSION)
generators = "cmake"
requires = ("cmake-include-guard/master@smspillaz/cmake-include-guard",
"cmake-call-function/master@smspillaz/cmake-call-function",
"cmake-opt-arg-parsing/master@smspillaz/cmake-opt-arg-parsing",
"cmake-forward-cache/master@smspillaz/cmake-forward-cache",
"cmake-spacify-list/master@smspillaz/cmake-spacify-list",
"cmake-forward-arguments/master"
"@smspillaz/cmake-forward-arguments")
url = "http://github.com/polysquare/cmake-unit"
license = "MIT"
def source(self):
zip_name = "cmake-unit.zip"
download("https://github.com/polysquare/"
"cmake-unit/archive/{version}.zip"
"".format(version="v" + VERSION),
zip_name)
unzip(zip_name)
os.unlink(zip_name)
def package(self):
self.copy(pattern="*.cmake",
dst="cmake/cmake-unit",
src="cmake-unit-" + VERSION,
keep_path=True)
| mit | Python |
e57aef65510e367fadd4776e2ed4cd247c43e8c1 | update conanfile to export pakage | NTNU-IHB/FMI4cpp,NTNU-IHB/FMI4cpp,NTNU-IHB/FMI4cpp | conanfile.py | conanfile.py |
from conans import ConanFile, CMake
class FMI4cppConan(ConanFile):
name = "FMI4cpp"
version = "0.5.4"
license = "MIT"
author = "Lars Ivar Hatledal [larsivarhatledal@gmail.com]"
url = "https://github.com/NTNU-IHB/FMI4cpp"
description = "FMI 2.0 implementation written in modern C++."
topics = ("FMI", "co-simulation", "model-exchange", "cpp17")
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
exports_sources = "include/*", "src/*", "cmake/*", "CMakeLists.txt"
requires = (
"boost/1.66.0@conan/stable",
"libzip/1.5.1@bincrafters/stable",
"spdlog/1.3.1@bincrafters/stable"
)
options = {"curl": [True, False]}
default_options = (
"curl=False",
"boost:shared=True",
"libzip:shared=True"
)
def configure(self):
if self.options.curl:
self.options["libcurl"].shared=True
def requirements(self):
if self.options.curl:
self.requires("OpenSSL/1.0.2o@conan/stable")
self.requires("libcurl/7.61.1@bincrafters/stable")
def build(self):
cmake = CMake(self)
cmake.definitions["FMI4CPP_WITH_ODEINT"] = "ON"
cmake.definitions["FMI4CPP_USING_CONAN"] = "ON"
cmake.definitions["FMI4CPP_BUILD_TOOL"] = "OFF"
cmake.definitions["FMI4CPP_USING_VCPKG"] = "OFF"
cmake.definitions["FMI4CPP_BUILD_TESTS"] = "OFF"
cmake.definitions["FMI4CPP_BUILD_EXAMPLES"] = "OFF"
cmake.configure()
cmake.build()
def package_info(self):
self.cpp_info.libs = ["fmi4cpp"]
|
from conans import ConanFile, CMake
class FMI4cppConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake", "virtualrunenv"
requires = (
"boost/1.66.0@conan/stable",
"libzip/1.5.1@bincrafters/stable",
"spdlog/1.3.1@bincrafters/stable"
)
options = {"curl": [True, False]}
default_options = (
"curl=False",
"boost:shared=True",
"libzip:shared=True"
)
def configure(self):
if self.options.curl:
self.options["libcurl"].shared=True
def requirements(self):
if self.options.curl:
self.requires("OpenSSL/1.0.2o@conan/stable")
self.requires("libcurl/7.61.1@bincrafters/stable")
| mit | Python |
676638ede872decf0d037a7edaf13b0b9bad047a | Fix ceilometer-test-event.py script | openstack/ceilometer,idegtiarov/ceilometer,ityaptin/ceilometer,openstack/ceilometer,ityaptin/ceilometer,idegtiarov/ceilometer | tools/ceilometer-test-event.py | tools/ceilometer-test-event.py | #!/usr/bin/env python
#
# Copyright 2013 Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Command line tool help you debug your event definitions.
Feed it a list of test notifications in json format, and it will show
you what events will be generated.
"""
import json
import sys
from oslo_config import cfg
from stevedore import extension
from ceilometer.event import converter
from ceilometer import service
cfg.CONF.register_cli_opts([
cfg.StrOpt('input-file',
short='i',
help='File to read test notifications from.'
' (Containing a json list of notifications.)'
' defaults to stdin.'),
cfg.StrOpt('output-file',
short='o',
help='File to write results to. Defaults to stdout.'),
])
TYPES = {1: 'text',
2: 'int',
3: 'float',
4: 'datetime'}
service.prepare_service()
output_file = cfg.CONF.output_file
input_file = cfg.CONF.input_file
if output_file is None:
out = sys.stdout
else:
out = open(output_file, 'w')
if input_file is None:
notifications = json.load(sys.stdin)
else:
with open(input_file, 'r') as f:
notifications = json.load(f)
out.write("Definitions file: %s\n" % cfg.CONF.event.definitions_cfg_file)
out.write("Notifications tested: %s\n" % len(notifications))
event_converter = converter.setup_events(
extension.ExtensionManager(
namespace='ceilometer.event.trait_plugin'))
for notification in notifications:
event = event_converter.to_event(notification)
if event is None:
out.write("Dropped notification: %s\n" %
notification['message_id'])
continue
out.write("Event: %s at %s\n" % (event.event_type, event.generated))
for trait in event.traits:
dtype = TYPES[trait.dtype]
out.write(" Trait: name: %s, type: %s, value: %s\n" % (
trait.name, dtype, trait.value))
| #!/usr/bin/env python
#
# Copyright 2013 Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Command line tool help you debug your event definitions.
Feed it a list of test notifications in json format, and it will show
you what events will be generated.
"""
import json
import sys
from oslo_config import cfg
from stevedore import extension
from ceilometer.event import converter
from ceilometer import service
cfg.CONF.register_cli_opts([
cfg.StrOpt('input-file',
short='i',
help='File to read test notifications from.'
' (Containing a json list of notifications.)'
' defaults to stdin.'),
cfg.StrOpt('output-file',
short='o',
help='File to write results to. Defaults to stdout.'),
])
TYPES = {1: 'text',
2: 'int',
3: 'float',
4: 'datetime'}
service.prepare_service()
config_file = converter.get_config_file()
output_file = cfg.CONF.output_file
input_file = cfg.CONF.input_file
if output_file is None:
out = sys.stdout
else:
out = open(output_file, 'w')
if input_file is None:
notifications = json.load(sys.stdin)
else:
with open(input_file, 'r') as f:
notifications = json.load(f)
out.write("Definitions file: %s\n" % config_file)
out.write("Notifications tested: %s\n" % len(notifications))
event_converter = converter.setup_events(
extension.ExtensionManager(
namespace='ceilometer.event.trait_plugin'))
for notification in notifications:
event = event_converter.to_event(notification)
if event is None:
out.write("Dropped notification: %s\n" %
notification['message_id'])
continue
out.write("Event: %s at %s\n" % (event.event_name, event.generated))
for trait in event.traits:
dtype = TYPES[trait.dtype]
out.write(" Trait: name: %s, type: %s, value: %s\n" % (
trait.name, dtype, trait.value))
| apache-2.0 | Python |
cef1a4ff0da96741a560889de73f8c4529ad9404 | remove empty line | demianw/dipy,jyeatman/dipy,rfdougherty/dipy,sinkpoint/dipy,JohnGriffiths/dipy,samuelstjean/dipy,oesteban/dipy,matthieudumont/dipy,rfdougherty/dipy,nilgoyyou/dipy,FrancoisRheaultUS/dipy,nilgoyyou/dipy,mdesco/dipy,Messaoud-Boudjada/dipy,StongeEtienne/dipy,jyeatman/dipy,villalonreina/dipy,Messaoud-Boudjada/dipy,samuelstjean/dipy,sinkpoint/dipy,samuelstjean/dipy,beni55/dipy,beni55/dipy,JohnGriffiths/dipy,demianw/dipy,oesteban/dipy,villalonreina/dipy,matthieudumont/dipy,FrancoisRheaultUS/dipy,StongeEtienne/dipy,mdesco/dipy | doc/examples/reconst_shore.py | doc/examples/reconst_shore.py | """
====================================================
Continuous and analytical diffusion signal modelling
====================================================
We show how to model the diffusion signal as a linear combination
of continuous functions from the SHORE basis (Ozarslan et al. ISMRM 2009).
We also compute analytically the ODF.
First import the necessary modules:
"""
#from dipy.data import three_shells_voxels, two_shells_voxels, get_sphere
from dipy.reconst.shore import ShoreModel
from dipy.reconst.shm import sh_to_sf
from dipy.viz import fvtk
from dipy.data import fetch_isbi2013_2shell, read_isbi2013_2shell, get_sphere
from dipy.core.gradients import gradient_table
"""
Download and read the data for this tutorial.
two_shells_voxels() provides data from the ISBI HARDI contest 2013 acquired
for two shells at b-values 1500 and 2500.
The six parameters of these two functions define the ROI where to reconstruct
the data. They respectively correspond to (xmin,xmax,ymin,ymax,zmin,zmax)
with x,y and the three axis defining the spatial positions of the voxels.
"""
fetch_isbi2013_2shell()
img, gtab=read_isbi2013_2shell()
data = img.get_data()
data_small=data[10:40,10:40,25]
print('data.shape (%d, %d, %d, %d)' % data.shape)
"""
data contains the voxel data and gtab contains a GradientTable
object (gradient information e.g. b-values). For example to read the b-values
it is possible to write print(gtab.bvals).
Instantiate the SHORE Model.
radial_order is the radial order of the SHORE basis.
zeta is the scale factor of the SHORE basis.
lambdaN and lambdaN are the radial and angular regularization constants, respectively.
For details regarding these four parameters see (Cheng J. et al, MICCAI workshop 2011) and
(Merlet S. et al, Medical Image Analysis 2013).
"""
radial_order = 6
zeta = 700
lambdaN=1e-8
lambdaL=1e-8
asm = ShoreModel(gtab, radial_order=radial_order, zeta=zeta, lambdaN=lambdaN, lambdaL=lambdaL)
"""
Fit the SHORE model to the data
"""
asmfit = asm.fit(data_small)
"""
Load an odf reconstruction sphere
"""
sphere = get_sphere('symmetric724')
"""
Compute the ODF
"""
odf = asmfit.odf(sphere)
print('odf.shape (%d, %d, %d)' % odf.shape)
"""
Display the ODFs
"""
r = fvtk.ren()
fvtk.add(r, fvtk.sphere_funcs(odf, sphere, colormap='jet'))
fvtk.show(r)
fvtk.record(r, n_frames=1, out_path='odfs.png', size=(600, 600))
"""
.. include:: ../links_names.inc
"""
| """
====================================================
Continuous and analytical diffusion signal modelling
====================================================
We show how to model the diffusion signal as a linear combination
of continuous functions from the SHORE basis (Ozarslan et al. ISMRM 2009).
We also compute analytically the ODF.
First import the necessary modules:
"""
#from dipy.data import three_shells_voxels, two_shells_voxels, get_sphere
from dipy.reconst.shore import ShoreModel
from dipy.reconst.shm import sh_to_sf
from dipy.viz import fvtk
from dipy.data import fetch_isbi2013_2shell, read_isbi2013_2shell, get_sphere
from dipy.core.gradients import gradient_table
"""
Download and read the data for this tutorial.
two_shells_voxels() provides data from the ISBI HARDI contest 2013 acquired
for two shells at b-values 1500 and 2500.
The six parameters of these two functions define the ROI where to reconstruct
the data. They respectively correspond to (xmin,xmax,ymin,ymax,zmin,zmax)
with x,y and the three axis defining the spatial positions of the voxels.
"""
fetch_isbi2013_2shell()
img, gtab=read_isbi2013_2shell()
data = img.get_data()
data_small=data[10:40,10:40,25]
print('data.shape (%d, %d, %d, %d)' % data.shape)
"""
data contains the voxel data and gtab contains a GradientTable
object (gradient information e.g. b-values). For example to read the b-values
it is possible to write print(gtab.bvals).
Instantiate the SHORE Model.
radial_order is the radial order of the SHORE basis.
zeta is the scale factor of the SHORE basis.
lambdaN and lambdaN are the radial and angular regularization constants, respectively.
For details regarding these four parameters see (Cheng J. et al, MICCAI workshop 2011) and
(Merlet S. et al, Medical Image Analysis 2013).
"""
radial_order = 6
zeta = 700
lambdaN=1e-8
lambdaL=1e-8
asm = ShoreModel(gtab, radial_order=radial_order, zeta=zeta, lambdaN=lambdaN, lambdaL=lambdaL)
"""
Fit the SHORE model to the data
"""
asmfit = asm.fit(data_small)
"""
Load an odf reconstruction sphere
"""
sphere = get_sphere('symmetric724')
"""
Compute the ODF
"""
odf = asmfit.odf(sphere)
print('odf.shape (%d, %d, %d)' % odf.shape)
"""
Display the ODFs
"""
r = fvtk.ren()
fvtk.add(r, fvtk.sphere_funcs(odf, sphere, colormap='jet'))
fvtk.show(r)
"""
.. include:: ../links_names.inc
"""
| bsd-3-clause | Python |
c97e5cf11fc21e2ef4ee04779a424e4d6a2b96ae | Add CustomizeBrowserOptions method to Metric base class | mogoweb/chromium-crosswalk,Just-D/chromium-1,M4sse/chromium.src,dushu1203/chromium.src,Jonekee/chromium.src,Just-D/chromium-1,ChromiumWebApps/chromium,axinging/chromium-crosswalk,bright-sparks/chromium-spacewalk,PeterWangIntel/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,markYoungH/chromium.src,M4sse/chromium.src,Fireblend/chromium-crosswalk,Jonekee/chromium.src,dednal/chromium.src,krieger-od/nwjs_chromium.src,PeterWangIntel/chromium-crosswalk,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,ltilve/chromium,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,dushu1203/chromium.src,markYoungH/chromium.src,axinging/chromium-crosswalk,markYoungH/chromium.src,M4sse/chromium.src,jaruba/chromium.src,littlstar/chromium.src,fujunwei/chromium-crosswalk,mogoweb/chromium-crosswalk,jaruba/chromium.src,ondra-novak/chromium.src,patrickm/chromium.src,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk,dednal/chromium.src,mogoweb/chromium-crosswalk,Pluto-tv/chromium-crosswalk,krieger-od/nwjs_chromium.src,anirudhSK/chromium,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,littlstar/chromium.src,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,jaruba/chromium.src,markYoungH/chromium.src,hgl888/chromium-crosswalk-efl,bright-sparks/chromium-spacewalk,dushu1203/chromium.src,chuan9/chromium-crosswalk,fujunwei/chromium-crosswalk,littlstar/chromium.src,TheTypoMaster/chromium-crosswalk,chuan9/chromium-crosswalk,ChromiumWebApps/chromium,PeterWangIntel/chromium-crosswalk,patrickm/chromium.src,crosswalk-project/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,Chilledheart/chromium,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,markYoungH/chromium.src,TheTypoMaster/chromium-crosswalk,krieger-od/nwjs_chromium.src,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk-efl,ChromiumWebApps/chromium,krieger-od/nwjs_chromium.src,krieger-od/nwjs_chromium.src,anirudhSK/chromium,axinging/chromium-crosswalk,ondra-novak/chromium.src,anirudhSK/chromium,mohamed--abdel-maksoud/chromium.src,anirudhSK/chromium,ChromiumWebApps/chromium,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,anirudhSK/chromium,M4sse/chromium.src,Fireblend/chromium-crosswalk,anirudhSK/chromium,Jonekee/chromium.src,markYoungH/chromium.src,littlstar/chromium.src,axinging/chromium-crosswalk,jaruba/chromium.src,Pluto-tv/chromium-crosswalk,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,patrickm/chromium.src,hgl888/chromium-crosswalk,dushu1203/chromium.src,fujunwei/chromium-crosswalk,M4sse/chromium.src,Chilledheart/chromium,hgl888/chromium-crosswalk-efl,chuan9/chromium-crosswalk,dushu1203/chromium.src,crosswalk-project/chromium-crosswalk-efl,M4sse/chromium.src,mohamed--abdel-maksoud/chromium.src,fujunwei/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk,M4sse/chromium.src,TheTypoMaster/chromium-crosswalk,ChromiumWebApps/chromium,hgl888/chromium-crosswalk-efl,ChromiumWebApps/chromium,patrickm/chromium.src,Just-D/chromium-1,dushu1203/chromium.src,axinging/chromium-crosswalk,dednal/chromium.src,dednal/chromium.src,jaruba/chromium.src,ltilve/chromium,Fireblend/chromium-crosswalk,bright-sparks/chromium-spacewalk,Pluto-tv/chromium-crosswalk,fujunwei/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk,fujunwei/chromium-crosswalk,Jonekee/chromium.src,axinging/chromium-crosswalk,littlstar/chromium.src,Chilledheart/chromium,Jonekee/chromium.src,PeterWangIntel/chromium-crosswalk,ondra-novak/chromium.src,ChromiumWebApps/chromium,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk,Chilledheart/chromium,patrickm/chromium.src,jaruba/chromium.src,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,Fireblend/chromium-crosswalk,Fireblend/chromium-crosswalk,ltilve/chromium,Just-D/chromium-1,Jonekee/chromium.src,anirudhSK/chromium,markYoungH/chromium.src,markYoungH/chromium.src,jaruba/chromium.src,ChromiumWebApps/chromium,TheTypoMaster/chromium-crosswalk,ChromiumWebApps/chromium,dushu1203/chromium.src,crosswalk-project/chromium-crosswalk-efl,axinging/chromium-crosswalk,hgl888/chromium-crosswalk-efl,M4sse/chromium.src,Chilledheart/chromium,mogoweb/chromium-crosswalk,Jonekee/chromium.src,M4sse/chromium.src,mohamed--abdel-maksoud/chromium.src,mogoweb/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,patrickm/chromium.src,ltilve/chromium,patrickm/chromium.src,ondra-novak/chromium.src,PeterWangIntel/chromium-crosswalk,Jonekee/chromium.src,bright-sparks/chromium-spacewalk,markYoungH/chromium.src,mohamed--abdel-maksoud/chromium.src,markYoungH/chromium.src,hgl888/chromium-crosswalk,Jonekee/chromium.src,ltilve/chromium,axinging/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,mogoweb/chromium-crosswalk,ltilve/chromium,jaruba/chromium.src,patrickm/chromium.src,M4sse/chromium.src,Chilledheart/chromium,krieger-od/nwjs_chromium.src,anirudhSK/chromium,bright-sparks/chromium-spacewalk,mogoweb/chromium-crosswalk,ltilve/chromium,dednal/chromium.src,anirudhSK/chromium,ChromiumWebApps/chromium,PeterWangIntel/chromium-crosswalk,chuan9/chromium-crosswalk,mogoweb/chromium-crosswalk,anirudhSK/chromium,Jonekee/chromium.src,bright-sparks/chromium-spacewalk,TheTypoMaster/chromium-crosswalk,ChromiumWebApps/chromium,Pluto-tv/chromium-crosswalk,anirudhSK/chromium,krieger-od/nwjs_chromium.src,dednal/chromium.src,jaruba/chromium.src,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,ChromiumWebApps/chromium,dednal/chromium.src,bright-sparks/chromium-spacewalk,Pluto-tv/chromium-crosswalk,ondra-novak/chromium.src,fujunwei/chromium-crosswalk,Jonekee/chromium.src,jaruba/chromium.src,bright-sparks/chromium-spacewalk,dednal/chromium.src,dushu1203/chromium.src,ltilve/chromium,dednal/chromium.src,mohamed--abdel-maksoud/chromium.src,mohamed--abdel-maksoud/chromium.src,littlstar/chromium.src,chuan9/chromium-crosswalk,littlstar/chromium.src,hgl888/chromium-crosswalk-efl,ondra-novak/chromium.src,Pluto-tv/chromium-crosswalk,Just-D/chromium-1,bright-sparks/chromium-spacewalk,crosswalk-project/chromium-crosswalk-efl,ondra-novak/chromium.src,Chilledheart/chromium,crosswalk-project/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,Chilledheart/chromium,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,Just-D/chromium-1,chuan9/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk,ondra-novak/chromium.src,ondra-novak/chromium.src,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,dushu1203/chromium.src,mohamed--abdel-maksoud/chromium.src,mogoweb/chromium-crosswalk,littlstar/chromium.src,dednal/chromium.src,dednal/chromium.src,anirudhSK/chromium,mohamed--abdel-maksoud/chromium.src,mogoweb/chromium-crosswalk,hgl888/chromium-crosswalk,jaruba/chromium.src,TheTypoMaster/chromium-crosswalk,Fireblend/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,markYoungH/chromium.src,ltilve/chromium,patrickm/chromium.src,M4sse/chromium.src,hgl888/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src | tools/perf/metrics/__init__.py | tools/perf/metrics/__init__.py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class Metric(object):
"""Base class for all the metrics that are used by telemetry measurements.
The Metric class represents a way of measuring something. Metrics are
helper classes used by PageMeasurements. Each PageMeasurement may use
multiple metrics; each metric should be focussed on collecting data
about one thing.
"""
def CustomizeBrowserOptions(self, options):
"""Add browser options that are required by this metric.
Some metrics do not have any special browser options that need
to be added, and they do not need to override this method; by
default, no browser options are added.
To add options here, call options.AppendExtraBrowserArg(arg).
"""
pass
def Start(self, page, tab):
"""Start collecting data for this metric."""
raise NotImplementedError()
def Stop(self, page, tab):
"""Stop collecting data for this metric (if applicable)."""
raise NotImplementedError()
def AddResults(self, tab, results):
"""Add the data collected into the results object for a measurement.
Metrics may implement AddResults to provide a common way to add results
to the PageMeasurementResults in PageMeasurement.AddMeasurement --
results should be added with results.Add(trace_name, unit, value).
"""
raise NotImplementedError()
| # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class Metric(object):
"""Base class for all the metrics that are used by telemetry measurements.
The Metric class represents a way of measuring something. Metrics are
helper classes used by PageMeasurements. Each PageMeasurement may use
multiple metrics; each metric should be focussed on collecting data
about one thing.
"""
def Start(self, page, tab):
"""Start collecting data for this metric."""
raise NotImplementedError()
def Stop(self, page, tab):
"""Stop collecting data for this metric (if applicable)."""
raise NotImplementedError()
def AddResults(self, tab, results):
"""Add the data collected into the results object for a measurement.
Metrics may implement AddResults to provide a common way to add results
to the PageMeasurementResults in PageMeasurement.AddMeasurement --
results should be added with results.Add(trace_name, unit, value).
"""
raise NotImplementedError()
| bsd-3-clause | Python |
bafb9e91958a122ebd8ee623c8c623c72617f1aa | Fix scanning class. | lnls-fac/apsuite | apsuite/optimization/scanning.py | apsuite/optimization/scanning.py | """Multidimensional Simple Scan method for Minimization."""
import numpy as _np
from .base import OptimizeParams as _OptimizeParams, Optimize as _Optimize
class SimpleScanParams(_OptimizeParams):
"""."""
def __init__(self):
"""."""
super().__init__()
self.number_of_steps = 10
def __str__(self):
"""."""
stg = self._TMPD.format('number_of_steps', self.number_of_steps)
stg += super().__str__(self)
return stg
def to_dict(self):
"""."""
dic = super().to_dict()
dic['number_of_steps'] = self.number_of_steps
def from_dict(self, dic):
"""."""
super().from_dict(dic)
self.number_of_steps = dic.get('number_of_steps', self.number_of_steps)
class SimpleScan(_Optimize):
"""."""
def __init__(self, use_thread=True):
"""."""
super().__init__(
self, params=SimpleScanParams(), use_thread=use_thread)
self.params = SimpleScanParams()
def set_limits(self, upper=None, lower=None):
"""."""
self._upper_limits = upper
self._lower_limits = lower
def _optimize(self):
"""."""
num_pts = self.params.number_of_steps
num_dims = self.params.initial_position.shape[-1]
shape = num_dims * (num_pts, )
size = num_pts**num_dims
low = self.params.limit_lower
high = self.params.limit_upper
delta = high - low
self.best_objfuncs = _np.zeros(size, dtype=float)
self.best_positions = _np.zeros((size, num_dims), dtype=float)
for i in range(size):
ivec = _np.unravel_index(i, shape)
pos = low + (delta * ivec)/(num_pts - 1)
self.best_positions[i] = pos
self.best_objfuncs[i] = self._objective_func(pos)
| """Multidimensional Simple Scan method for Minimization."""
from threading import Thread as _Thread
import numpy as _np
class SimpleScan:
"""."""
def __init__(self):
"""."""
self._position = _np.array([])
self._delta = _np.array([])
self._curr_dim = 0
self._stop = False
def set_limits(self, upper=None, lower=None):
"""."""
self._upper_limits = upper
self._lower_limits = lower
self.ndim = len(upper)
def _optimize(self, npoints):
"""."""
self._delta = _np.zeros(npoints)
func = _np.zeros(self._ndim)
best = _np.zeros(self._ndim)
for i in range(self._ndim):
self._delta = _np.linspace(
self._lower_limits[i], self._upper_limits[i], npoints)
self._curr_dim = i
func[i], best[i] = self.calc_obj_fun()
self._position[i] = best[i]
print('Best result is: ' + str(best))
print('Figure of merit is: ' + str(_np.min(func)))
| mit | Python |
8d15f88e8c1e400a5f28622aa16e18587aea8a3c | Handle absent email key | pennersr/django-allauth,rsalmaso/django-allauth,rsalmaso/django-allauth,pennersr/django-allauth,pennersr/django-allauth,rsalmaso/django-allauth | allauth/socialaccount/providers/amazon/provider.py | allauth/socialaccount/providers/amazon/provider.py | from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class AmazonAccount(ProviderAccount):
def to_str(self):
return self.account.extra_data.get("name", super(AmazonAccount, self).to_str())
class AmazonProvider(OAuth2Provider):
id = "amazon"
name = "Amazon"
account_class = AmazonAccount
def get_default_scope(self):
return ["profile"]
def extract_uid(self, data):
return str(data["user_id"])
def extract_common_fields(self, data):
# Hackish way of splitting the fullname.
# Asumes no middlenames.
name = data.get("name", "")
first_name, last_name = name, ""
if name and " " in name:
first_name, last_name = name.split(" ", 1)
return dict(email=data.get("email", ""), last_name=last_name, first_name=first_name)
provider_classes = [AmazonProvider]
| from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class AmazonAccount(ProviderAccount):
def to_str(self):
return self.account.extra_data.get("name", super(AmazonAccount, self).to_str())
class AmazonProvider(OAuth2Provider):
id = "amazon"
name = "Amazon"
account_class = AmazonAccount
def get_default_scope(self):
return ["profile"]
def extract_uid(self, data):
return str(data["user_id"])
def extract_common_fields(self, data):
# Hackish way of splitting the fullname.
# Asumes no middlenames.
name = data.get("name", "")
first_name, last_name = name, ""
if name and " " in name:
first_name, last_name = name.split(" ", 1)
return dict(email=data["email"], last_name=last_name, first_name=first_name)
provider_classes = [AmazonProvider]
| mit | Python |
59a1047d4105b94181a75b1d496469f0efe2989f | add admin action test | inventree/InvenTree,SchrodingersGat/InvenTree,SchrodingersGat/InvenTree,inventree/InvenTree,SchrodingersGat/InvenTree,inventree/InvenTree,inventree/InvenTree,SchrodingersGat/InvenTree | InvenTree/plugin/test_api.py | InvenTree/plugin/test_api.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.urls import reverse
from InvenTree.api_tester import InvenTreeAPITestCase
class PluginDetailAPITest(InvenTreeAPITestCase):
"""
Tests the plugin AP I endpoints
"""
roles = [
'admin.add',
'admin.view',
'admin.change',
'admin.delete',
]
def setUp(self):
self.MSG_NO_PKG = 'Either packagenmae of url must be provided'
self.PKG_NAME = 'minimal'
super().setUp()
def test_plugin_install(self):
"""
Test the plugin install command
"""
url = reverse('api-plugin-install')
# valid - Pypi
data = self.post(url, {
'confirm': True,
'packagename': self.PKG_NAME
}, expected_code=201).data
self.assertEqual(data['success'], True)
# invalid tries
# no input
self.post(url, {}, expected_code=400)
# no package info
data = self.post(url, {
'confirm': True,
}, expected_code=400).data
self.assertEqual(data['url'][0].title().upper(), self.MSG_NO_PKG.upper())
self.assertEqual(data['packagename'][0].title().upper(), self.MSG_NO_PKG.upper())
# not confirmed
self.post(url, {
'packagename': self.PKG_NAME
}, expected_code=400).data
data = self.post(url, {
'packagename': self.PKG_NAME,
'confirm': False,
}, expected_code=400).data
self.assertEqual(data['confirm'][0].title().upper(), 'Installation not confirmed'.upper())
def test_admin_action(self):
"""
Test the PluginConfig action commands
"""
from plugin.models import PluginConfig
from plugin import plugin_reg
url = reverse('admin:plugin_pluginconfig_changelist')
fixtures = PluginConfig.objects.all()
# check if plugins were registered -> in some test setups the startup has no db access
if not fixtures:
plugin_reg.reload_plugins()
fixtures = PluginConfig.objects.all()
print([str(a) for a in fixtures])
fixtures = fixtures.first()
# deactivate plugin
self.post(url, {
'action': 'plugin_deactivate',
'_selected_action': [f.pk for f in fixtures],
}, expected_code=200)
# deactivate plugin - deactivate again -> nothing will hapen but the nothing 'changed' function is triggered
self.post(url, {
'action': 'plugin_deactivate',
'_selected_action': [f.pk for f in fixtures],
}, expected_code=200)
# activate plugin
self.post(url, {
'action': 'plugin_activate',
'_selected_action': [f.pk for f in fixtures],
}, expected_code=200)
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.urls import reverse
from InvenTree.api_tester import InvenTreeAPITestCase
class PluginDetailAPITest(InvenTreeAPITestCase):
"""
Tests the plugin AP I endpoints
"""
roles = [
'admin.add',
'admin.view',
'admin.change',
'admin.delete',
]
def setUp(self):
self.MSG_NO_PKG = 'Either packagenmae of url must be provided'
self.PKG_NAME = 'minimal'
super().setUp()
def test_plugin_install(self):
"""
Test the plugin install command
"""
url = reverse('api-plugin-install')
# valid - Pypi
data = self.post(url, {
'confirm': True,
'packagename': self.PKG_NAME
}, expected_code=201).data
self.assertEqual(data['success'], True)
# invalid tries
# no input
self.post(url, {}, expected_code=400)
# no package info
data = self.post(url, {
'confirm': True,
}, expected_code=400).data
self.assertEqual(data['url'][0].title().upper(), self.MSG_NO_PKG.upper())
self.assertEqual(data['packagename'][0].title().upper(), self.MSG_NO_PKG.upper())
# not confirmed
self.post(url, {
'packagename': self.PKG_NAME
}, expected_code=400).data
data = self.post(url, {
'packagename': self.PKG_NAME,
'confirm': False,
}, expected_code=400).data
self.assertEqual(data['confirm'][0].title().upper(), 'Installation not confirmed'.upper())
| mit | Python |
f9c7a911411429972929bb4372b370192bd4cf8a | Update crossfilter to gray/blue scheme | altair-viz/altair,jakevdp/altair | altair/examples/interactive_layered_crossfilter.py | altair/examples/interactive_layered_crossfilter.py | """
Interactive Crossfilter
=======================
This example shows a multi-panel view of the same data, where you can interactively
select a portion of the data in any of the panels to highlight that portion in any
of the other panels.
"""
# category: interactive charts
import altair as alt
from vega_datasets import data
source = alt.UrlData(
data.flights_2k.url,
format={'parse': {'date': 'date'}}
)
brush = alt.selection(type='interval', encodings=['x'])
# Define the base chart, with the common parts of the
# background and highlights
base = alt.Chart().mark_bar().encode(
x=alt.X(alt.repeat('column'), type='quantitative', bin=alt.Bin(maxbins=20)),
y='count()'
).properties(
width=160,
height=130
)
# gray background with selection
background = base.encode(
color=alt.value('#ddd')
).add_selection(brush)
# blue highlights on the transformed data
highlight = base.transform_filter(brush)
# layer the two charts & repeat
alt.layer(
background,
highlight,
data=source
).transform_calculate(
"time",
"hours(datum.date)"
).repeat(column=["distance", "delay", "time"])
| """
Interactive Crossfilter
=======================
This example shows a multi-panel view of the same data, where you can interactively
select a portion of the data in any of the panels to highlight that portion in any
of the other panels.
"""
# category: interactive charts
import altair as alt
from vega_datasets import data
source = alt.UrlData(
data.flights_2k.url,
format={'parse': {'date': 'date'}}
)
brush = alt.selection(type='interval', encodings=['x'])
# Define the base chart, with the common parts of the
# background and highlights
base = alt.Chart().mark_bar().encode(
x=alt.X(alt.repeat('column'), type='quantitative', bin=alt.Bin(maxbins=20)),
y='count()'
).properties(
width=160,
height=130
)
# blue background with selection
background = base.add_selection(brush)
# yellow highlights on the transformed data
highlight = base.encode(
color=alt.value('goldenrod')
).transform_filter(brush)
# layer the two charts & repeat
alt.layer(
background,
highlight,
data=source
).transform_calculate(
"time",
"hours(datum.date)"
).repeat(column=["distance", "delay", "time"])
| bsd-3-clause | Python |
1a52a28c5864f82edd19febba14b861d5f3efef1 | fix gibbs ensemble unit test | joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue | hoomd/hpmc/test-py/gibbs.py | hoomd/hpmc/test-py/gibbs.py | from __future__ import division
from hoomd import *
from hoomd import hpmc
import unittest
import math
# this script needs to be run on two ranks
# initialize with one rank per partitions
context.initialize(args="--nrank=1")
class gibbs_ensemble_test(unittest.TestCase):
def setUp(self):
p = comm.get_partition()
phi=0.2
a = (1/6*math.pi / phi)**(1/3)
unitcell=lattice.sc(a=a, type_name='A')
self.system = init.create_lattice(unitcell=unitcell, n=5)
self.system.particles.types.add('B')
self.mc = hpmc.integrate.sphere(seed=123+p)
self.mc.set_params(d=0.1)
def tearDown(self):
del self.mc
del self.system
context.initialize()
def test_spheres(self):
# within two-phase region of hard spheres phase diagram
q=0.8
etap=0.7
ntrial = 20
p = comm.get_partition()
nR = etap/(math.pi/6.0*math.pow(q,3.0))
self.mc.set_fugacity('B',nR)
self.mc.shape_param.set('A', diameter=1.0)
self.mc.shape_param.set('B', diameter=q)
# needs to be run with 2 partitions
muvt=hpmc.update.muvt(mc=self.mc,seed=456,ngibbs=2,transfer_types=['A'])
muvt.set_params(n_trial=20)
muvt.set_params(dV=0.01)
muvt.set_params(move_ratio=.01)
run(100)
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
| from __future__ import division
from hoomd import *
from hoomd import hpmc
import unittest
import math
# this script needs to be run on two ranks
# initialize with one rank per partitions
context.initialize(args="--nrank=1")
class gibbs_ensemble_test(unittest.TestCase):
def setUp(self):
p = comm.get_partition()
phi=0.2
a = (1/6*math.pi / phi)**(1/3)
unitcell=lattice.sc(a=a, type_name='A')
self.system = init.create_lattice(unitcell=unitcell, n=5)
self.system.particles.types.add('B')
self.mc = hpmc.integrate.sphere(seed=123+p,implicit=True)
self.mc.set_params(d=0.1)
def tearDown(self):
del self.mc
del self.system
context.initialize()
def test_spheres(self):
# within two-phase region of hard spheres phase diagram
q=0.8
etap=0.7
ntrial = 20
p = comm.get_partition()
nR = etap/(math.pi/6.0*math.pow(q,3.0))
self.mc.set_fugacity('B',nR)
self.mc.shape_param.set('A', diameter=1.0)
self.mc.shape_param.set('B', diameter=q)
# needs to be run with 2 partitions
muvt=hpmc.update.muvt(mc=self.mc,seed=456,ngibbs=2,transfer_types=['A'])
muvt.set_params(n_trial=20)
muvt.set_params(dV=0.01)
muvt.set_params(move_ratio=.01)
run(100)
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
| bsd-3-clause | Python |
d1911e9c545a88c5924d7d631d278e1c225edcf8 | Add log message about unknown couch exception | openprocurement/openprocurement.auction,openprocurement/openprocurement.auction | openprocurement/auction/helpers/couch.py | openprocurement/auction/helpers/couch.py | import socket
from random import sample
from urlparse import urlparse
from couchdb import Server, Session
from time import sleep
import sys
import logging
CONSTANT_IS_TRUE = True
LOGGER = logging.getLogger(__name__)
def couchdb_dns_query_settings(server_url, database_name):
parsed_url = urlparse(server_url)
all_ips = set([str(i[4][0]) for i in socket.getaddrinfo(urlparse(server_url).hostname, 80)])
while all_ips:
selected_ip = set(sample(all_ips, 1))
all_ips -= selected_ip
couch_url = server_url.replace(parsed_url.hostname, selected_ip.pop())
try:
server = Server(couch_url, session=Session(retry_delays=range(10)))
return server[database_name]
except socket.error:
continue
raise Exception("No route to any couchdb server")
def iterview(server_url, database_name, view_name, sleep_seconds=10, wrapper=None, **options):
"""Iterate the rows in a view, fetching rows in batches and yielding
one row at a time.
Since the view's rows are fetched in batches any rows emitted for
documents added, changed or deleted between requests may be missed or
repeated.
:param name: the name of the view; for custom views, use the format
``design_docid/viewname``, that is, the document ID of the
design document and the name of the view, separated by a
slash.
:param batch: number of rows to fetch per HTTP request.
:param wrapper: an optional callable that should be used to wrap the
result rows
:param options: optional query string parameters
:return: row generator
"""
database = couchdb_dns_query_settings(server_url, database_name)
start_key = 0
options['start_key'] = start_key
options['limit'] = 1000
while CONSTANT_IS_TRUE:
try:
rows = list(database.view(view_name, wrapper, **options))
except socket.error:
options['start_key'] = 0
database = couchdb_dns_query_settings(server_url, database_name)
continue
except Exception:
err_type, value, traceback = sys.exc_info()
LOGGER.warning('Couch error: {}; {}'.format(err_type, value))
raise Exception
if len(rows) != 0:
for row in rows:
start_key = row['key']
yield row
else:
sleep(sleep_seconds)
options['start_key'] = (start_key + 1)
| import socket
from random import sample
from urlparse import urlparse
from couchdb import Server, Session
from time import sleep
CONSTANT_IS_TRUE = True
def couchdb_dns_query_settings(server_url, database_name):
parsed_url = urlparse(server_url)
all_ips = set([str(i[4][0]) for i in socket.getaddrinfo(urlparse(server_url).hostname, 80)])
while all_ips:
selected_ip = set(sample(all_ips, 1))
all_ips -= selected_ip
couch_url = server_url.replace(parsed_url.hostname, selected_ip.pop())
try:
server = Server(couch_url, session=Session(retry_delays=range(10)))
return server[database_name]
except socket.error:
continue
raise Exception("No route to any couchdb server")
def iterview(server_url, database_name, view_name, sleep_seconds=10, wrapper=None, **options):
"""Iterate the rows in a view, fetching rows in batches and yielding
one row at a time.
Since the view's rows are fetched in batches any rows emitted for
documents added, changed or deleted between requests may be missed or
repeated.
:param name: the name of the view; for custom views, use the format
``design_docid/viewname``, that is, the document ID of the
design document and the name of the view, separated by a
slash.
:param batch: number of rows to fetch per HTTP request.
:param wrapper: an optional callable that should be used to wrap the
result rows
:param options: optional query string parameters
:return: row generator
"""
database = couchdb_dns_query_settings(server_url, database_name)
start_key = 0
options['start_key'] = start_key
options['limit'] = 1000
while CONSTANT_IS_TRUE:
try:
rows = list(database.view(view_name, wrapper, **options))
except socket.error:
options['start_key'] = 0
database = couchdb_dns_query_settings(server_url, database_name)
continue
if len(rows) != 0:
for row in rows:
start_key = row['key']
yield row
else:
sleep(sleep_seconds)
options['start_key'] = (start_key + 1)
| apache-2.0 | Python |
3e312180c1d3a0df2a5ba193cb75961166e92cc6 | Add SignedCertificateTimestamp namedtuple | theno/ctutlz,theno/ctutlz | ctutlz/sct/scrape/tls_extension_18.py | ctutlz/sct/scrape/tls_extension_18.py | import collections
from utlz import flo
from utlz import StructContext
_SctListEntry = collections.namedtuple(
typename='SctListEntry',
field_names=[
'sct_len',
'sct_der',
]
)
_TlsExtension18 = collections.namedtuple(
typename='TlsExtension18',
field_names=[
'tls_extension_type',
'tls_extension_len',
'signed_certificate_timestamp_list_len',
'sct_list',
]
)
def TlsExtension18(extension_18_der):
with StructContext(extension_18_der) as struct:
data_dict = {
'tls_extension_type': struct.read('!H'),
'tls_extension_len': struct.read('!H'),
'signed_certificate_timestamp_list_len': struct.read('!H'),
}
sct_list = []
while struct.offset < struct.length:
sct_len = struct.read('!H')
sct_der = struct.read(flo('!{sct_len}s'))
sct_list.append(_SctListEntry(sct_len, sct_der))
return _TlsExtension18(sct_list=sct_list, **data_dict)
_SignedCertificateTimestampList = collections.namedtuple(
typename='SignedCertificateTimestampList',
field_names=[
'signed_certificate_timestamp_list_len',
'sct_list',
]
)
def SignedCertificateTimestampList(sctlist):
with StructContext(sctlist) as struct:
data_dict = {
'signed_certificate_timestamp_list_len': struct.read('!H'),
}
sct_list = []
while struct.offset < struct.length:
sct_len = struct.read('!H')
sct_der = struct.read(flo('!{sct_len}s'))
sct_list.append(_SctListEntry(sct_len, sct_der))
return _SignedCertificateTimestampList(sct_list=sct_list, **data_dict)
| import collections
from utlz import flo
from utlz import StructContext
_SctListEntry = collections.namedtuple(
typename='SctListEntry',
field_names=[
'sct_len',
'sct_der',
]
)
_TlsExtension18 = collections.namedtuple(
typename='TlsExtension18',
field_names=[
'tls_extension_type',
'tls_extension_len',
'signed_certificate_timestamp_list_len',
'sct_list',
]
)
def TlsExtension18(extension_18_der):
with StructContext(extension_18_der) as struct:
data_dict = {
'tls_extension_type': struct.read('!H'),
'tls_extension_len': struct.read('!H'),
'signed_certificate_timestamp_list_len': struct.read('!H'),
}
sct_list = []
while struct.offset < struct.length:
sct_len = struct.read('!H')
sct_der = struct.read(flo('!{sct_len}s'))
sct_list.append(_SctListEntry(sct_len, sct_der))
return _TlsExtension18(sct_list=sct_list, **data_dict)
| mit | Python |
97e81e53ac068927f41e4045bc05002831d2c408 | fix url patterns | tpeek/Copy-n-Haste,tpeek/Copy-n-Haste,kaka0525/Copy-n-Haste,kaka0525/Copy-n-Haste,tpeek/Copy-n-Haste,kaka0525/Copy-n-Haste | CopyHaste/CopyHaste/urls.py | CopyHaste/CopyHaste/urls.py | from django.conf.urls import include, url
from django.contrib import admin
from .views import home_view
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^$', home_view, name='homepage'),
url(r'^accounts/', include('registration.backends.default.urls')),
url(r'^profile/', include('cnh_profile.urls')),
]
| from django.conf.urls import patterns, include, url
from django.contrib import admin
from .views import home_view
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
url(r'^$', home_view, name='homepage'),
url(r'^accounts/', include('registration.backends.default.urls')),
url(r'^profile/', include('cnh_profile.urls')),
)
| mit | Python |
4f3376d2803a4f158d44b59811e9f953895db0b3 | Fix Line constructor | MorganR/gaussian-processes,MorganR/gaussian-processes | data/line.py | data/line.py | # This class will be used to generate random straight lines
import random
def generate_line(d_min, d_max):
random.seed()
d = random.randint(d_min, d_max)
theta = random.randint(0, 359)
return Line(d, theta)
class Line:
"""Represents a straight line as distance 'd' from the origin and angle 'theta' from the x-axis"""
def __init__(self, d=0, theta=0):
self.d = d
self.theta = theta
| # This class will be used to generate random straight lines
import random
def generate_line(d_min, d_max):
random.seed()
d = random.randint(d_min, d_max)
theta = random.randint(0, 359)
return Line(d, theta)
class Line:
"""Represents a straight line as distance 'd' from the origin and angle 'theta' from the x-axis"""
def __init__(self):
self.d = 0
self.theta = 0
def __init__(self, d, theta):
self.d = d
self.theta = theta
| mit | Python |
d564a5402b4818b3d87639a0a6d245460df73e5a | Add sphinx.ext.autosectionlabel extension | JonathonReinhart/scuba,JonathonReinhart/scuba,JonathonReinhart/scuba | docs/conf.py | docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Scuba'
copyright = '2021, Jonathon Reinhart'
author = 'Jonathon Reinhart'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autosectionlabel',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static'] | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Scuba'
copyright = '2021, Jonathon Reinhart'
author = 'Jonathon Reinhart'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static'] | mit | Python |
c2abb41906a6e1fca8194175f7f59f120d868166 | remove timezonefinder import | MrMinimal64/timezonefinder,MrMinimal64/timezonefinder | docs/conf.py | docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import re
# sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, os.path.join(project_root, 'src'))
def get_version(package):
"""
Return package version as listed in `__version__` in `__init__.py`.
"""
init_py = open(os.path.join(project_root, package, '__init__.py')).read()
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
# import timezonefinder # needed for auto document, ATTENTION: must then be installed during online build!
# -- Project information -----------------------------------------------------
project = 'timezonefinder'
copyright = '2019, Jannik Michelfeit'
author = 'Jannik Michelfeit'
# The full version, including alpha/beta/rc tags.
release = get_version('timezonefinder')
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
# 'sphinx.ext.autodoc', # automatically document with docstring
'sphinx.ext.viewcode',
# 'sphinx.ext.intersphinx', # to auto link to other online documentations
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import re
# sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, os.path.join(project_root, 'src'))
def get_version(package):
"""
Return package version as listed in `__version__` in `__init__.py`.
"""
init_py = open(os.path.join(project_root, package, '__init__.py')).read()
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
import timezonefinder # needed for auto document
# -- Project information -----------------------------------------------------
project = 'timezonefinder'
copyright = '2019, Jannik Michelfeit'
author = 'Jannik Michelfeit'
# The full version, including alpha/beta/rc tags.
release = get_version('timezonefinder')
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
# 'sphinx.ext.autodoc', # automatically document with docstring
'sphinx.ext.viewcode',
# 'sphinx.ext.intersphinx', # to auto link to other online documentations
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| mit | Python |
d1896b357d4840c3c2c46291df06ae223b97115e | Prepare official version bump | troeger/opensubmit,troeger/opensubmit,troeger/opensubmit,troeger/opensubmit,troeger/opensubmit | docs/conf.py | docs/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath('../executor/'))
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.githubpages']
source_suffix = '.rst'
master_doc = 'index'
project = 'OpenSubmit'
version = '0.7.3'
release = '0.7.3'
copyright = u'2018, Peter Tröger'
author = u'Peter Tröger'
language = "en"
exclude_patterns = ['formats', 'Thumbs.db', '.DS_Store', 'modules.rst']
pygments_style = 'sphinx'
todo_include_todos = True
html_theme = "sphinx_rtd_theme"
html_favicon = '../web/opensubmit/static/images/favicon.ico'
html_logo = '../web/opensubmit/static/images/favicon-96x96.png'
napoleon_google_docstring = True
napoleon_numpy_docstring = False
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath('../executor/'))
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.githubpages']
source_suffix = '.rst'
master_doc = 'index'
project = 'OpenSubmit'
copyright = u'2018, Peter Tröger'
author = u'Peter Tröger'
language = "en"
exclude_patterns = ['formats', 'Thumbs.db', '.DS_Store', 'modules.rst']
pygments_style = 'sphinx'
todo_include_todos = True
html_theme = "sphinx_rtd_theme"
html_favicon = '../web/opensubmit/static/images/favicon.ico'
html_logo = '../web/opensubmit/static/images/favicon-96x96.png'
napoleon_google_docstring = True
napoleon_numpy_docstring = False
| agpl-3.0 | Python |
de8f7df034e5313d94e84ebc4951335eec0667c4 | Complete version of exercise 2 and questions. | alyhashahrukh/inf1340_2015_asst1 | exercise2.py | exercise2.py | #!/usr/bin/env python
""" Assignment 1, Exercise 2, INF1340, Fall, 2015. Name that shape.
This module contains one function name_that_shape(). It prompts the user
to input the number of sides in a shape and outputs the name of the shape.
"""
__author__ = 'Susan Sim'
__email__ = "ses@drsusansim.org"
__copyright__ = "2015 Susan Sim"
__license__ = "MIT License"
# Assignment Outline
# Write a program that determines the name of a shape from its number of sides. Read the number of
# sides from the user and then report the appropriate name as part of a meaningful message. Your program
# should support shapes with anywhere from 3 up to (and including) 10 sides. If the input something other
# than the numbers 3 to 10 then your program should display 'Error'. Your code should be easily readable.
# The function should include comments, proper variable naming and produce messages in case of errors.
# Add your code and test cases to the starter file, exercise2.py, that has been provided. Do not
# change the names of the functions in the file.
def name_that_shape():
"""
For a given number of sides in a regular polygon, returns the shape name
Inputs:
Expected Outputs:
Errors:
"""
# Find shape name by number of sides input by user.
side = int(input("Please enter the number of sides:"))
if side == 3:
print("This is a triangle.")
elif side == 4:
print("This is a square.")
elif side == 5:
print("This is a pentagon.")
elif side == 6:
print("This is a hexagon.")
elif side == 7:
print("This is a heptagon.")
elif side == 8:
print("This is an octagon.")
elif side == 9:
print("This is a nonagon.")
elif side == 10:
print("This is a decagon.")
# If sides is 0 or greater than 10 there in no name, therefore "Error" issued.
if side == 0:
print ("Error!")
elif side > 10:
print ("Error!")
name_that_shape()
# QUESTIONS
# Meaning full message means a proper sentence?
# Does the error statement need to be in a s sentence?
# Comment above need to be filled out or not? | #!/usr/bin/env python
""" Assignment 1, Exercise 2, INF1340, Fall, 2015. Name that shape.
This module contains one function name_that_shape(). It prompts the user
to input the number of sides in a shape and outputs the name of the shape.
"""
__author__ = 'Susan Sim'
__email__ = "ses@drsusansim.org"
__copyright__ = "2015 Susan Sim"
__license__ = "MIT License"
# Assignment Outline
# Write a program that determines the name of a shape from its number of sides. Read the number of
# sides from the user and then report the appropriate name as part of a meaningful message. Your program
# should support shapes with anywhere from 3 up to (and including) 10 sides. If the input something other
# than the numbers 3 to 10 then your program should display 'Error'. Your code should be easily readable.
# The function should include comments, proper variable naming and produce messages in case of errors.
# Add your code and test cases to the starter file, exercise2.py, that has been provided. Do not
# change the names of the functions in the file.
def name_that_shape():
"""
For a given number of sides in a regular polygon, returns the shape name
Inputs:
Expected Outputs:
Errors:
"""
print("Error")
name_that_shape() | mit | Python |
801f7103c860af740cafabeb84e9e594d32db11e | Update docstrings in fsictools module | ChrisThoung/fsic | fsictools.py | fsictools.py | # -*- coding: utf-8 -*-
"""
fsictools
=========
Supporting tools for FSIC-based economic models. See the individual docstrings
for dependencies additional to those of `fsic`.
"""
# Version number keeps track with the main `fsic` module
from fsic import __version__
import re
from typing import List
from fsic import BaseModel, Symbol
def symbols_to_dataframe(symbols: List[Symbol]) -> 'DataFrame':
"""Convert the list of symbols to a `pandas` DataFrame. **Requires `pandas`**."""
from pandas import DataFrame
return DataFrame([s._asdict() for s in symbols])
def model_to_dataframe(model: BaseModel) -> 'DataFrame':
"""Return the values and solution information from the model as a `pandas` DataFrame. **Requires `pandas`**."""
from pandas import DataFrame
df = DataFrame(model.values.T, index=model.span, columns=model.names)
df['status'] = model.status
df['iterations'] = model.iterations
return df
| # -*- coding: utf-8 -*-
"""
fsictools
=========
Supporting tools for FSIC-based economic models.
"""
# Version number keeps track with the main `fsic` module
from fsic import __version__
from typing import List
from fsic import BaseModel, Symbol
def symbols_to_dataframe(symbols: List[Symbol]) -> 'DataFrame':
"""Convert the list of symbols to a `pandas` DataFrame."""
from pandas import DataFrame
return DataFrame([s._asdict() for s in symbols])
def model_to_dataframe(model: BaseModel) -> 'DataFrame':
"""Return the values and solution information from the model as a `pandas` DataFrame."""
from pandas import DataFrame
df = DataFrame(model.values.T, index=model.span, columns=model.names)
df['status'] = model.status
df['iterations'] = model.iterations
return df
| mit | Python |
10645b8d26f883312f0411f2b0bd79345556a5e2 | correct syntax for hasattr | Sage-Bionetworks/nbviewer,franblas/beaker-sharing-server,iamjakob/nbviewer,AlfiyaZi/nbviewer,christophelec/nbviewer,AlfiyaZi/nbviewer,bollwyvl/nbviewer,Jay-Oh-eN/nbviewer,thomasyu888/nbviewer,twosigma/beaker-sharing-server,roxyboy/nbviewer,Sage-Bionetworks/nbviewer,iamjakob/nbviewer,franblas/beaker-sharing-server,twosigma/beaker-sharing-server,thomasyu888/nbviewer,roxyboy/nbviewer,christophelec/nbviewer,twosigma/beaker-sharing-server,twosigma/beaker-sharing-server,imsparsh/nbviewer,Jay-Oh-eN/nbviewer,franblas/beaker-sharing-server,franblas/beaker-sharing-server,bollwyvl/nbviewer,imsparsh/nbviewer | githubapp.py | githubapp.py | import os
import base64
from flask import Flask , request, render_template
import nbconvert.nbconvert as nbconvert
import requests
from nbformat import current as nbformat
from flask import Flask, redirect, abort
import re
import github as gh
from gist import render_content
app = Flask(__name__)
github = gh.Github()
@app.route('/')
def render_url():
return 'you are at root'
@app.route('/<user>/')
def user(user):
return github.get_user(user).name
@app.route('/<user>/<repo>/')
def repo(user,repo):
return github.get_user(user).get_repo(repo).url
@app.route('/<user>/<repo>/<tree>/<branch>/<path:subfile>')
def file(user,repo,tree,branch, subfile):
#we don't care about tree or branch now...
base = "You are trying to access the file : %(file)s, from the %(repo)s repository of %(name)s"
user = github.get_user(user)
repo = user.get_repo(repo)
master = repo.master_branch
branch = [b for b in repo.get_branches() if b.name == master][0]
headtree = repo.get_git_tree(branch.commit.sha)
formated = base % { 'name':user.name,'repo':repo.url, 'file':subfile}
e = rwt(repo, branch.commit.sha, subfile.strip('/').split('/'))
if hasattr(e,'type') and e.type == 'blob' :
f = repo.get_git_blob(e.sha)
return render_content(base64.decodestring(f.content))
else :
return '\n'.join([n.path for n in e.tree])
#recursively walk tree....
def rwt(repo,sha,path):
tree = repo.get_git_tree(sha)
if len(path)==0:
return tree
subpath = path[1:]
key = path[0]
nodes = tree.tree
for n in nodes :
if n.path == key:
if n.type == 'tree':
return rwt(repo, n.sha, subpath)
else :
return n
if __name__ == '__main__':
# Bind to PORT if defined, otherwise default to 5000.
port = int(os.environ.get('PORT', 5000))
debug = os.path.exists('.debug')
if debug :
print 'DEBUG MODE IS ACTIVATED !!!'
else :
print 'debug is not activated'
app.run(host='0.0.0.0', port=port, debug=debug)
| import os
import base64
from flask import Flask , request, render_template
import nbconvert.nbconvert as nbconvert
import requests
from nbformat import current as nbformat
from flask import Flask, redirect, abort
import re
import github as gh
from gist import render_content
app = Flask(__name__)
github = gh.Github()
@app.route('/')
def render_url():
return 'you are at root'
@app.route('/<user>/')
def user(user):
return github.get_user(user).name
@app.route('/<user>/<repo>/')
def repo(user,repo):
return github.get_user(user).get_repo(repo).url
@app.route('/<user>/<repo>/<tree>/<branch>/<path:subfile>')
def file(user,repo,tree,branch, subfile):
#we don't care about tree or branch now...
base = "You are trying to access the file : %(file)s, from the %(repo)s repository of %(name)s"
user = github.get_user(user)
repo = user.get_repo(repo)
master = repo.master_branch
branch = [b for b in repo.get_branches() if b.name == master][0]
headtree = repo.get_git_tree(branch.commit.sha)
formated = base % { 'name':user.name,'repo':repo.url, 'file':subfile}
e = rwt(repo, branch.commit.sha, subfile.strip('/').split('/'))
if e.hasattr('type') and e.type == 'blob' :
f = repo.get_git_blob(e.sha)
return render_content(base64.decodestring(f.content))
else :
return '\n'.join([n.path for n in e.tree])
#recursively walk tree....
def rwt(repo,sha,path):
tree = repo.get_git_tree(sha)
if len(path)==0:
return tree
subpath = path[1:]
key = path[0]
nodes = tree.tree
for n in nodes :
if n.path == key:
if n.type == 'tree':
return rwt(repo, n.sha, subpath)
else :
return n
if __name__ == '__main__':
# Bind to PORT if defined, otherwise default to 5000.
port = int(os.environ.get('PORT', 5000))
debug = os.path.exists('.debug')
if debug :
print 'DEBUG MODE IS ACTIVATED !!!'
else :
print 'debug is not activated'
app.run(host='0.0.0.0', port=port, debug=debug)
| bsd-3-clause | Python |
8f326164a1f8503b357eadcbba7a29c75caebf2e | Fix xml feed extraction. | fake-name/ReadableWebProxy,fake-name/ReadableWebProxy,fake-name/ReadableWebProxy,fake-name/ReadableWebProxy | WebMirror/processor/XmlProcessor.py | WebMirror/processor/XmlProcessor.py |
from . import ProcessorBase
import bs4
class XmlProcessor(ProcessorBase.PageProcessor):
wanted_mimetypes = ['text/xml', 'application/xml']
want_priority = 40
loggerPath = "Main.Text.XmlProcessor"
# def __init__(self, pageUrl, loggerPath, content, pbLut, **kwargs):
def __init__(self, baseUrls, pageUrl, pgContent, loggerPath, relinkable, **kwargs):
'''
I'm assuming that pastebin content doesn't have any links, because lazy, mostly.
'''
self.loggerPath = (loggerPath+".XmlProcessor") if not self.loggerPath.endswith(".XmlProcessor") else self.loggerPath
self.pageUrl = pageUrl
self.content = pgContent
self.urlLut = {}
# Methods to allow the child-class to modify the content at various points.
def extractTitle(self, content, url):
return "XML Blob"
# Process a Google-Doc resource page.
# This call does a set of operations to permute and clean a google doc page.
def extractContent(self):
title = self.extractTitle(self.content, self.pageUrl)
procContent = bs4.BeautifulSoup(self.content, "xml")
procContent = "<pre>" + procContent.prettify() + "</pre>"
self.log.info("Processed title: '%s'", title)
ret = {}
# No links here
ret['plainLinks'] = []
ret['rsrcLinks'] = []
ret['title'] = title
ret['contents'] = procContent
return ret
def test():
print("Test mode!")
import WebRequest
import logSetup
logSetup.initLogging()
if __name__ == "__main__":
test()
|
from . import ProcessorBase
import bs4
class XmlProcessor(ProcessorBase.PageProcessor):
wanted_mimetypes = ['text/xml', 'application/xml']
want_priority = 50
loggerPath = "Main.Text.XmlProcessor"
# def __init__(self, pageUrl, loggerPath, content, pbLut, **kwargs):
def __init__(self, baseUrls, pageUrl, pgContent, loggerPath, relinkable, **kwargs):
'''
I'm assuming that pastebin content doesn't have any links, because lazy, mostly.
'''
self.loggerPath = (loggerPath+".XmlProcessor") if not self.loggerPath.endswith(".XmlProcessor") else self.loggerPath
self.pageUrl = pageUrl
self.content = pgContent
self.urlLut = {}
# Methods to allow the child-class to modify the content at various points.
def extractTitle(self, content, url):
return "XML Blob"
# Process a Google-Doc resource page.
# This call does a set of operations to permute and clean a google doc page.
def extractContent(self):
title = self.extractTitle(self.content, self.pageUrl)
procContent = bs4.BeautifulSoup(self.content, "xml")
procContent = "<pre>" + procContent.prettify() + "</pre>"
self.log.info("Processed title: '%s'", title)
ret = {}
# No links here
ret['plainLinks'] = []
ret['rsrcLinks'] = []
ret['title'] = title
ret['contents'] = procContent
return ret
def test():
print("Test mode!")
import WebRequest
import logSetup
logSetup.initLogging()
if __name__ == "__main__":
test()
| bsd-3-clause | Python |
3c776540b0774679af6a27bea1b9958a6769e48f | update worker.py | ojengwa/gfe | scraper.py | scraper.py | import re
from dateutil.parser import parse
from xml.sax.saxutils import unescape
import requests
import bs4
from celery import Celery
from app import app, db
from models import Result
base_scraper_url = app.config.get('SCRAPER_BASE_URL')
base_url = app.config.get('BASE_URL')
base_dict = {}
celery = Celery(app.name, broker=app.config['CELERY_BROKER_URL'])
celery.conf.update(app.config)
@celery.task
def test():
print('Hello world, sample tasks.')
def clean_html(html):
return ' '.join(re.findall(r'\S+', html))
def get_rows(url):
page = requests.get(url)
rows = []
if page.ok:
content = clean_html(page.content)
tree = bs4.BeautifulSoup(content, 'lxml')
table = tree.find('table', class_='data')
rows = table.find_all('tr')
return rows
def get_article(url):
page = requests.get(url)
if page.ok:
content = clean_html(page.content)
tree = bs4.BeautifulSoup(content, 'lxml')
return str(tree.html)
return None
@celery.task
def scraper():
base_rows = get_rows(base_scraper_url)
base_for_today = base_rows[0]
base_datacells = base_for_today.find_all('td')
today_url_cell = base_datacells[0].find('a')
base_dict['url'] = unescape(base_url + today_url_cell.attrs['href'])
base_dict['date'] = parse(today_url_cell.text)
todays_rows = get_rows(base_dict.get('url'))
for row in todays_rows:
datacells = row.find_all('td')
doc = {
'url': base_dict.get('url'),
'date': base_dict.get('date'),
'source': datacells[1].text,
'agency': datacells[2].text,
'fsg': datacells[3].text,
'title': datacells[4].text,
'keywords': datacells[5].text,
'url_2': unescape(
base_url + datacells[4].find('a').attrs.get('href')),
}
article = get_article(doc.get('url_2'))
if article:
doc['description'] = article
result = Result(**doc)
db.session.add(result)
db.session.commit()
return base_rows
test.apply_async()
scraper.apply_async()
| import re
from dateutil.parser import parse
from xml.sax.saxutils import unescape
import requests
import bs4
from app import app, db
from models import Result
base_scraper_url = app.config.get('SCRAPER_BASE_URL')
base_url = app.config.get('BASE_URL')
base_dict = {}
def clean_html(html):
return ' '.join(re.findall(r'\S+', html))
def get_rows(url):
page = requests.get(url)
rows = []
if page.ok:
content = clean_html(page.content)
tree = bs4.BeautifulSoup(content, 'lxml')
table = tree.find('table', class_='data')
rows = table.find_all('tr')
return rows
def get_article(url):
page = requests.get(url)
if page.ok:
content = clean_html(page.content)
tree = bs4.BeautifulSoup(content, 'lxml')
return tree
print(dir(tree), 'skjskjksj')
return None
def scraper():
base_rows = get_rows(base_scraper_url)
base_for_today = base_rows[0]
base_datacells = base_for_today.find_all('td')
today_url_cell = base_datacells[0].find('a')
base_dict['url'] = unescape(base_url + today_url_cell.attrs['href'])
base_dict['date'] = parse(today_url_cell.text)
todays_rows = get_rows(base_dict.get('url'))
for row in todays_rows:
datacells = row.find_all('td')
doc = {
'url': base_dict.get('url'),
'date': base_dict.get('date'),
'source': datacells[1].text,
'agency': datacells[2].text,
'fsg': datacells[3].text,
'title': datacells[4].text,
'keywords': datacells[5].text,
'url_2': unescape(
base_url + datacells[4].find('a').attrs.get('href')),
}
article = get_article(doc.get('url_2'))
if article:
doc['description'] = article.string
result = Result(**doc)
db.session.add(result)
db.session.commit()
return base_rows
| mit | Python |
f0f20089bc1c68c6547156f4f7b20116a5128dc7 | Update RegisterHandler.py | emeric254/gala-stri-website,emeric254/gala-stri-website,emeric254/gala-stri-website | Handlers/RegisterHandler.py | Handlers/RegisterHandler.py | # -*- coding: utf-8 -*-
import logging
from tornado import escape
from Handlers.BaseHandler import BaseHandler
from Tools import PostgreSQL, VerifyFields
logger = logging.getLogger(__name__)
class RegisterHandler(BaseHandler):
"""handle / endpoint"""
def get(self):
"""Serve Get and return main page"""
self.render('register.html')
def post(self):
"""Get user completed form and verify it before save it"""
prenom = escape.xhtml_escape(self.get_body_argument('prenom'))[:64]
nom = escape.xhtml_escape(self.get_body_argument('nom'))[:64]
courriel = escape.xhtml_escape(self.get_body_argument('courriel'))[:96]
genre = escape.xhtml_escape(self.get_body_argument('genre'))[:16]
promotion = int(escape.xhtml_escape(self.get_body_argument('promotion'))[:4])
prenom_accompagnateurs = self.get_body_arguments('accompagnateurs-prenom')
nom_accompagnateurs = self.get_body_arguments('accompagnateurs-nom')
accompagnateurs = []
size = len(prenom_accompagnateurs)
if size == len(nom_accompagnateurs):
for i in range(0, size):
a_prenom = escape.xhtml_escape(prenom_accompagnateurs[i])
a_nom = escape.xhtml_escape(nom_accompagnateurs[i])
if not a_prenom or not a_nom:
self.send_error(status_code=400)
return
accompagnateurs.append((a_prenom, a_nom))
if VerifyFields.verify_all(prenom, nom, courriel, genre, promotion, accompagnateurs):
if PostgreSQL.insert_inscrit(prenom, nom, genre, courriel, promotion, accompagnateurs):
self.render('registered.html')
return
self.send_error(status_code=400)
| # -*- coding: utf-8 -*-
import logging
from Handlers.BaseHandler import BaseHandler
from Tools import PostgreSQL, VerifyFields
logger = logging.getLogger(__name__)
class RegisterHandler(BaseHandler):
"""handle / endpoint"""
def get(self):
"""Serve Get and return main page"""
self.render('register.html')
def post(self):
"""Get user completed form and verify it before save it"""
prenom = self.get_body_argument('prenom')
nom = self.get_body_argument('nom')
courriel = self.get_body_argument('courriel')
genre = self.get_body_argument('genre')
promotion = int(self.get_body_argument('promotion'))
if VerifyFields.verify_all(prenom, nom, courriel, genre, promotion):
PostgreSQL.insert_inscrit(prenom, nom, genre, courriel, promotion)
self.render('registered.html')
else:
self.send_error(status_code=400)
| mit | Python |
e71c5a488106c3eecf7deb9099797e714e44b498 | Change status detection | UPOLSearch/UPOL-Search-Engine,UPOLSearch/UPOL-Search-Engine,UPOLSearch/UPOL-Search-Engine,UPOLSearch/UPOL-Search-Engine | upol_search_engine/__main__.py | upol_search_engine/__main__.py | from time import sleep
from upol_search_engine.upol_crawler import tasks
def main():
crawler_settings = {'limit_domain': 'trnecka.inf.upol.cz',
'max_depth': 10,
'connect_max_timeout': 3.05,
'read_max_timeout': 10,
'frequency_per_server': 0.5,
'blacklist': ""}
seed = "http://trnecka.inf.upol.cz"
feeder = tasks.feeder_task.delay(
crawler_settings=crawler_settings,
seed=seed,
batch_size=300,
delay_between_feeding=5)
while 'RUNNING' in feeder.status or 'PENDING' in feeder.status:
print('running')
sleep(5)
print('done')
if __name__ == "__main__":
main()
| from time import sleep
from upol_search_engine.upol_crawler import tasks
def main():
crawler_settings = {'limit_domain': 'trnecka.inf.upol.cz',
'max_depth': 10,
'connect_max_timeout': 3.05,
'read_max_timeout': 10,
'frequency_per_server': 0.5,
'blacklist': ""}
seed = "http://trnecka.inf.upol.cz"
feeder = tasks.feeder_task.delay(
crawler_settings=crawler_settings,
seed=seed,
batch_size=300,
delay_between_feeding=5)
while 'RUNNING' in feeder.status:
print('running')
sleep(5)
print('done')
if __name__ == "__main__":
main()
| mit | Python |
764913ba1b2e492eb1d966ab30c91229afab1893 | Update config variable name | mattstibbs/blockbuster-server,mattstibbs/blockbuster-server | blockbuster/example_config_files/example_config.py | blockbuster/example_config_files/example_config.py | # General Settings
timerestriction = False
debug_mode = True
log_directory = './logs'
# Email Settings
# emailtype = "Gmail"
emailtype = "Console"
# SMS Settings
# outboundsmstype = "WebService"
outboundsmstype = "Console"
# Twilio Auth Keys
account_sid = "twilio sid here"
auth_token = "auth token here"
# SMS Services Auth
spsms_basic_auth = 'basic auth header here'
spsms_host = 'host here'
spsms_url = 'url here'
# Postgres Connection Details
pg_host = 'localhost'
pg_dbname = 'blockbuster'
pg_user = 'blockbuster'
pg_passwd = 'blockbuster'
# Proxy Details
proxy_user = ''
proxy_pass = ''
proxy_host = ''
proxy_port = 8080
# Testing
test_to_number = ''
test_from_number = ''
# Pushover Keys
pushover_app_token = "pushover_token"
# Email Configuration
smtp_server = 'smtp.gmail.com:587'
mail_username = ''
mail_fromaddr = mail_username
mail_password = ''
mail_monitoring_addr = ''
# API Variables
api_username = "username here"
api_passphrase = "passphrase here"
# New Number
return_number = "+440000111222" | # General Settings
timerestriction = False
debug_mode = True
log_directory = './logs'
# Email Settings
# emailtype = "Gmail"
emailtype = "Console"
# SMS Settings
# outboundsmstype = "WebService"
outboundsmstype = "Console"
# Twilio Auth Keys
account_sid = "twilio sid here"
auth_token = "auth token here"
# SMS Services Auth
basic_auth = 'basic auth header here'
spsms_host = 'host here'
spsms_url = 'url here'
# Postgres Connection Details
pg_host = 'localhost'
pg_dbname = 'blockbuster'
pg_user = 'blockbuster'
pg_passwd = 'blockbuster'
# Proxy Details
proxy_user = ''
proxy_pass = ''
proxy_host = ''
proxy_port = 8080
# Testing
test_to_number = ''
test_from_number = ''
# Pushover Keys
pushover_app_token = "pushover_token"
# Email Configuration
smtp_server = 'smtp.gmail.com:587'
mail_username = ''
mail_fromaddr = mail_username
mail_password = ''
mail_monitoring_addr = ''
# API Variables
api_username = "username here"
api_passphrase = "passphrase here"
# New Number
return_number = "+440000111222" | mit | Python |
f5bd8b400e93e33666212a3558c05741ebef037d | Implement consume_token method | randomic/aniauth-tdd,randomic/aniauth-tdd | accounts/token.py | accounts/token.py | """module containing generator for login tokens
"""
import base64
from django.core.signing import TimestampSigner
class LoginTokenGenerator(object):
"""Generator for the timestamp signed tokens used for logging in.
"""
def __init__(self):
self.signer = TimestampSigner(
salt='aniauth-tdd.accounts.token.LoginTokenGenerator')
def create_token(self, email):
"""Return a login token for the provided email address.
"""
return base64.urlsafe_b64encode(
self.signer.sign(email).encode()
).decode()
def consume_token(self, token, max_age=600):
"""Extract the email provided the token isn't older than max_age.
"""
return self.signer.unsign(
base64.urlsafe_b64decode(token.encode()), max_age
)
| """module containing generator for login tokens
"""
import base64
from django.core.signing import TimestampSigner
class LoginTokenGenerator(object):
"""Generator for the timestamp signed tokens used for logging in.
"""
def __init__(self):
self.signer = TimestampSigner(
salt='aniauth-tdd.accounts.token.LoginTokenGenerator')
def create_token(self, email):
"""Return a login token for the provided email address.
"""
return base64.urlsafe_b64encode(
self.signer.sign(email).encode()
).decode()
| mit | Python |
6064db3000f2aeec66a775345d22b8a2b421497f | Fix gzip test for Python 2.6 | tbabej/astropy,bsipocz/astropy,lpsinger/astropy,MSeifert04/astropy,StuartLittlefair/astropy,larrybradley/astropy,DougBurke/astropy,stargaser/astropy,pllim/astropy,stargaser/astropy,MSeifert04/astropy,tbabej/astropy,lpsinger/astropy,joergdietrich/astropy,astropy/astropy,joergdietrich/astropy,dhomeier/astropy,kelle/astropy,kelle/astropy,saimn/astropy,MSeifert04/astropy,bsipocz/astropy,funbaker/astropy,kelle/astropy,pllim/astropy,dhomeier/astropy,StuartLittlefair/astropy,pllim/astropy,dhomeier/astropy,MSeifert04/astropy,larrybradley/astropy,mhvk/astropy,funbaker/astropy,StuartLittlefair/astropy,stargaser/astropy,StuartLittlefair/astropy,aleksandr-bakanov/astropy,astropy/astropy,AustereCuriosity/astropy,tbabej/astropy,DougBurke/astropy,AustereCuriosity/astropy,funbaker/astropy,pllim/astropy,lpsinger/astropy,larrybradley/astropy,funbaker/astropy,dhomeier/astropy,mhvk/astropy,joergdietrich/astropy,saimn/astropy,kelle/astropy,lpsinger/astropy,stargaser/astropy,mhvk/astropy,bsipocz/astropy,StuartLittlefair/astropy,dhomeier/astropy,saimn/astropy,mhvk/astropy,joergdietrich/astropy,astropy/astropy,AustereCuriosity/astropy,AustereCuriosity/astropy,astropy/astropy,aleksandr-bakanov/astropy,joergdietrich/astropy,mhvk/astropy,aleksandr-bakanov/astropy,saimn/astropy,larrybradley/astropy,larrybradley/astropy,tbabej/astropy,pllim/astropy,bsipocz/astropy,aleksandr-bakanov/astropy,DougBurke/astropy,astropy/astropy,kelle/astropy,lpsinger/astropy,AustereCuriosity/astropy,tbabej/astropy,saimn/astropy,DougBurke/astropy | astropy/utils/tests/test_gzip.py | astropy/utils/tests/test_gzip.py | import io
import os
from ...tests.helper import pytest
from .. import gzip
pytestmark = pytest.mark.skipif("sys.version_info < (3,0)")
def test_gzip(tmpdir):
fd = gzip.GzipFile(str(tmpdir.join("test.gz")), 'wb')
fd = io.TextIOWrapper(fd, encoding='utf8')
| import io
import os
from ...tests.helper import pytest
from .. import gzip
def test_gzip(tmpdir):
fd = gzip.GzipFile(str(tmpdir.join("test.gz")), 'wb')
fd = io.TextIOWrapper(fd, encoding='utf8')
| bsd-3-clause | Python |
1ec6da18d7906246380401c13496fdef3c80a27a | Address PR comments | ciena/afkak,ciena/afkak | afkak/protocol.py | afkak/protocol.py | # -*- coding: utf-8 -*-
# Copyright (C) 2015 Cyan, Inc.
from __future__ import absolute_import
import logging
from twisted.internet.error import ConnectionDone
from twisted.protocols.basic import Int32StringReceiver
from twisted.python.failure import Failure
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
class KafkaProtocol(Int32StringReceiver):
"""
Very thin wrapper around the Int32StringReceiver
Simply knows to call its factory.handleResponse()
method with the string received by stringReceived() and
to cleanup the factory reference when the connection is lost
"""
factory = None
closing = False # set by factory so we know to expect connectionLost
MAX_LENGTH = 4 * 1024 * 1024
def stringReceived(self, string):
self.factory.handleResponse(string)
def connectionLost(self, reason=None):
# If we are closing, or if the connection was cleanly closed (as
# Kafka brokers will do after 10 minutes of idle connection) we log
# only at debug level. Other connection close reasons when not
# shutting down will cause a warning log.
if self.closing or reason is None or reason.check(ConnectionDone):
log.debug("Connection to Kafka Broker closed: %r Closing: %r",
reason, self.closing)
else:
log.warning("Lost Connection to Kafka Broker: %r", reason)
self.factory = None
def lengthLimitExceeded(self, length):
log.error("KafkaProtocol Max Length: %d exceeded: %d",
self.MAX_LENGTH, length)
self.transport.loseConnection()
| # -*- coding: utf-8 -*-
# Copyright (C) 2015 Cyan, Inc.
from __future__ import absolute_import
import logging
from twisted.internet.error import ConnectionDone
from twisted.protocols.basic import Int32StringReceiver
from twisted.python.failure import Failure
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
class KafkaProtocol(Int32StringReceiver):
"""
Very thin wrapper around the Int32StringReceiver
Simply knows to call its factory.handleResponse()
method with the string received by stringReceived() and
to cleanup the factory reference when the connection is lost
"""
factory = None
closing = False # set by factory so we know to expect connectionLost
MAX_LENGTH = 4 * 1024 * 1024
CLEAN_CLOSE = Failure(ConnectionDone())
def stringReceived(self, string):
self.factory.handleResponse(string)
def connectionLost(self, reason=CLEAN_CLOSE):
# If we are closing, or if the connection was cleanly closed (as
# Kafka brokers will do after 10 minutes of idle connection) we log
# only at debug level. Other connection close reasons when not
# shutting down will cause a warning log.
if self.closing or reason.check(ConnectionDone):
log.debug("Connection to Kafka Broker closed: %r Closing: %r",
reason, self.closing)
else:
log.warning("Lost Connection to Kafka Broker: %r", reason)
self.factory = None
def lengthLimitExceeded(self, length):
log.error("KafkaProtocol Max Length: %d exceeded: %d",
self.MAX_LENGTH, length)
self.transport.loseConnection()
| apache-2.0 | Python |
7869d1004dd58263926515ad196d5423b77ee251 | Fix verify_gravatars after downgrade to Python 2.7 | SoPR/horas,SoPR/horas,SoPR/horas,SoPR/horas | apps/profiles/management/commands/verify_gravatars.py | apps/profiles/management/commands/verify_gravatars.py | import datetime
import urllib2
from django.core.management.base import BaseCommand
from django.utils.timezone import now
from apps.profiles.models import User
class Command(BaseCommand):
help = 'Tries to verify gravatar for recently joined users.'
def handle(self, *args, **options):
users_verified = []
users_not_verified = []
users = User.objects.filter(
date_joined__gte=now() - datetime.timedelta(hours=24))
for user in users:
try:
request = urllib2.Request(user.gravatar_url)
urllib2.urlopen(request)
user.is_gravatar_verified = True
user.save()
users_verified.append(user)
except urllib2.HTTPError:
users_not_verified.append(user)
self.stdout.write('Verified {} users.\nUnverifed {} users.'.format(
len(users_verified), len(users_not_verified)))
| import datetime
import urllib
from django.core.management.base import BaseCommand
from django.utils.timezone import now
from apps.profiles.models import User
class Command(BaseCommand):
help = 'Tries to verify gravatar for recently joined users.'
def handle(self, *args, **options):
users_verified = []
users_not_verified = []
users = User.objects.filter(
date_joined__gte=now() - datetime.timedelta(hours=24))
for user in users:
try:
urllib.request.urlopen(user.gravatar_url)
user.is_gravatar_verified = True
user.save()
users_verified.append(user)
except urllib.error.HTTPError:
users_not_verified.append(user)
self.stdout.write('Verified {} users.\nUnverifed {} users.'.format(
len(users_verified), len(users_not_verified)))
| mit | Python |
f09a04158ecea7c0d4ef7f34319e3a94e1e01340 | Fix seo description | apihackers/wapps,apihackers/wapps,apihackers/wapps,apihackers/wapps | wapps/templatetags/seo.py | wapps/templatetags/seo.py | import jinja2
from django_jinja import library
from jinja2.ext import Extension
from ..models import IdentitySettings
class Metadata(object):
'''
Extract metadata from a Page object
'''
def __init__(self, context, **kwargs):
self.context = context
self.kwargs = kwargs
self.page = context['page']
self.request = context['request']
self.site = self.request.site
self.identity = IdentitySettings.for_site(self.site)
@property
def title(self):
if self.kwargs.get('title'):
return self.kwargs['title']
else:
return self.page.seo_title or self.page.title
@property
def full_title(self):
if self.identity.name:
return ' | '.join((self.title, self.identity.name))
elif self.context.get('WAGTAIL_SITE_NAME'):
return ' | '.join((self.title, self.context['WAGTAIL_SITE_NAME']))
else:
return self.title
@property
def description(self):
if self.kwargs.get('description'):
return self.kwargs['description']
elif getattr(self.page, 'search_description', None):
return self.page.search_description
elif getattr(self.page, 'description', None):
return self.page.description
else:
return self.identity.description
@property
def image(self):
if self.kwargs.get('image'):
return self.kwargs['image']
elif getattr(self.page, 'image', None):
return self.site.root_url + self.page.image.get_rendition('original').url
elif self.identity.logo:
return self.site.root_url + self.identity.logo.get_rendition('original').url
@property
def tags(self):
tags = set(self.identity.tags.all())
if self.kwargs.get('tags'):
tags.update(self.kwargs['tags'])
if getattr(self.page, 'tags', None):
tags.update(self.page.tags.all())
return tags
@library.global_function
@jinja2.contextfunction
def page_meta(context, **kwargs):
return Metadata(context, **kwargs)
| import jinja2
from django_jinja import library
from jinja2.ext import Extension
from ..models import IdentitySettings
class Metadata(object):
'''
Extract metadata from a Page object
'''
def __init__(self, context, **kwargs):
self.context = context
self.kwargs = kwargs
self.page = context['page']
self.request = context['request']
self.site = self.request.site
self.identity = IdentitySettings.for_site(self.site)
@property
def title(self):
if self.kwargs.get('title'):
return self.kwargs['title']
else:
return self.page.seo_title or self.page.title
@property
def full_title(self):
if self.identity.name:
return ' | '.join((self.title, self.identity.name))
elif self.context.get('WAGTAIL_SITE_NAME'):
return ' | '.join((self.title, self.context['WAGTAIL_SITE_NAME']))
else:
return self.title
@property
def description(self):
if self.kwargs.get('description'):
return self.kwargs['description']
elif getattr(self.page, 'seo_description', None):
return self.page.seo_description
elif getattr(self.page, 'description', None):
return self.page.description
else:
return self.identity.description
@property
def image(self):
if self.kwargs.get('image'):
return self.kwargs['image']
elif getattr(self.page, 'image', None):
return self.site.root_url + self.page.image.get_rendition('original').url
elif self.identity.logo:
return self.site.root_url + self.identity.logo.get_rendition('original').url
@property
def tags(self):
tags = set(self.identity.tags.all())
if self.kwargs.get('tags'):
tags.update(self.kwargs['tags'])
if getattr(self.page, 'tags', None):
tags.update(self.page.tags.all())
return tags
@library.global_function
@jinja2.contextfunction
def page_meta(context, **kwargs):
return Metadata(context, **kwargs)
| mit | Python |
4f7cd1c6f48fec9be34af049f3b91f0e895a36f1 | Bump version to 0.1.1 | gst/amqpy,veegee/amqpy | amqpy/__init__.py | amqpy/__init__.py | VERSION = (0, 1, 1)
__version__ = '.'.join(map(str, VERSION[0:3])) + ''.join(VERSION[3:])
__author__ = 'veegee'
__maintainer__ = 'veegee'
__contact__ = 'veegee@veegee.org'
__homepage__ = 'http://github.com/veegee/amqpy'
__docformat__ = 'restructuredtext'
from .message import Message
from .channel import Channel
from .connection import Connection
from .exceptions import (
AMQPError,
ConnectionError,
RecoverableConnectionError,
IrrecoverableConnectionError,
ChannelError,
RecoverableChannelError,
IrrecoverableChannelError,
ConsumerCancelled,
ContentTooLarge,
NoConsumers,
ConnectionForced,
InvalidPath,
AccessRefused,
NotFound,
ResourceLocked,
PreconditionFailed,
FrameError,
FrameSyntaxError,
InvalidCommand,
ChannelNotOpen,
UnexpectedFrame,
ResourceError,
NotAllowed,
AMQPNotImplementedError,
InternalError,
error_for_code,
__all__ as _all_exceptions,
)
__all__ = ['Connection', 'Channel', 'Message'] + _all_exceptions
| VERSION = (0, 1, 0)
__version__ = '.'.join(map(str, VERSION[0:3])) + ''.join(VERSION[3:])
__author__ = 'veegee'
__maintainer__ = 'veegee'
__contact__ = 'veegee@veegee.org'
__homepage__ = 'http://github.com/veegee/amqpy'
__docformat__ = 'restructuredtext'
from .message import Message
from .channel import Channel
from .connection import Connection
from .exceptions import (
AMQPError,
ConnectionError,
RecoverableConnectionError,
IrrecoverableConnectionError,
ChannelError,
RecoverableChannelError,
IrrecoverableChannelError,
ConsumerCancelled,
ContentTooLarge,
NoConsumers,
ConnectionForced,
InvalidPath,
AccessRefused,
NotFound,
ResourceLocked,
PreconditionFailed,
FrameError,
FrameSyntaxError,
InvalidCommand,
ChannelNotOpen,
UnexpectedFrame,
ResourceError,
NotAllowed,
AMQPNotImplementedError,
InternalError,
error_for_code,
__all__ as _all_exceptions,
)
__all__ = ['Connection', 'Channel', 'Message'] + _all_exceptions
| mit | Python |
a1fb04e1ba961149eaa5ced9e2017cd0c865373f | Bump version | veegee/amqpy | amqpy/__init__.py | amqpy/__init__.py | from __future__ import absolute_import, division, print_function, unicode_literals
__metaclass__ = type
VERSION = (0, 12, 2)
__version__ = '.'.join(map(str, VERSION[0:3])) + ''.join(VERSION[3:])
__author__ = 'veegee'
__maintainer__ = 'veegee'
__contact__ = 'veegee@veegee.org'
__homepage__ = 'http://github.com/veegee/amqpy'
__docformat__ = 'restructuredtext'
from .connection import Connection
from .channel import Channel
from .message import Message
from .consumer import AbstractConsumer
from .spec import basic_return_t, queue_declare_ok_t, method_t
from .exceptions import (
Timeout,
AMQPError,
AMQPConnectionError,
RecoverableConnectionError,
IrrecoverableConnectionError,
ChannelError,
RecoverableChannelError,
IrrecoverableChannelError,
ConsumerCancelled,
ContentTooLarge,
NoConsumers,
ConnectionForced,
InvalidPath,
AccessRefused,
NotFound,
ResourceLocked,
PreconditionFailed,
FrameError,
FrameSyntaxError,
InvalidCommand,
ChannelNotOpen,
UnexpectedFrame,
ResourceError,
NotAllowed,
AMQPNotImplementedError,
InternalError,
error_for_code,
__all__ as _all_exceptions,
)
__all__ = ['Connection', 'Channel', 'Message', 'AbstractConsumer',
'basic_return_t', 'queue_declare_ok_t', 'method_t'] + _all_exceptions
| from __future__ import absolute_import, division, print_function, unicode_literals
__metaclass__ = type
VERSION = (0, 12, 1)
__version__ = '.'.join(map(str, VERSION[0:3])) + ''.join(VERSION[3:])
__author__ = 'veegee'
__maintainer__ = 'veegee'
__contact__ = 'veegee@veegee.org'
__homepage__ = 'http://github.com/veegee/amqpy'
__docformat__ = 'restructuredtext'
from .connection import Connection
from .channel import Channel
from .message import Message
from .consumer import AbstractConsumer
from .spec import basic_return_t, queue_declare_ok_t, method_t
from .exceptions import (
Timeout,
AMQPError,
AMQPConnectionError,
RecoverableConnectionError,
IrrecoverableConnectionError,
ChannelError,
RecoverableChannelError,
IrrecoverableChannelError,
ConsumerCancelled,
ContentTooLarge,
NoConsumers,
ConnectionForced,
InvalidPath,
AccessRefused,
NotFound,
ResourceLocked,
PreconditionFailed,
FrameError,
FrameSyntaxError,
InvalidCommand,
ChannelNotOpen,
UnexpectedFrame,
ResourceError,
NotAllowed,
AMQPNotImplementedError,
InternalError,
error_for_code,
__all__ as _all_exceptions,
)
__all__ = ['Connection', 'Channel', 'Message', 'AbstractConsumer',
'basic_return_t', 'queue_declare_ok_t', 'method_t'] + _all_exceptions
| mit | Python |
c762009732f2e09a90982b1b429fad6a79a7ef40 | Fix exception handling of default fields loader | vodkina/GlobaLeaks,vodkina/GlobaLeaks,vodkina/GlobaLeaks,vodkina/GlobaLeaks | backend/globaleaks/db/appdata.py | backend/globaleaks/db/appdata.py | # -*- coding: UTF-8
# datainit.py: database initialization
# ******************
import json
import os
from globaleaks import models
from globaleaks.orm import transact
#from globaleaks.handlers.submission import db_assign_submission_sequence
from globaleaks.handlers.admin.field import db_create_field
from globaleaks.rest import errors
from globaleaks.settings import GLSettings
def load_appdata():
if os.path.exists(GLSettings.appdata_file):
with file(GLSettings.appdata_file, 'r') as f:
json_string = f.read()
appdata_dict = json.loads(json_string)
return appdata_dict
raise errors.InternalServerError("Unable to load application data")
def load_default_fields(store):
try:
if os.path.exists(GLSettings.fields_path):
for fname in os.listdir(GLSettings.fields_path):
fpath = os.path.join(GLSettings.fields_path, fname)
with file(fpath, 'r') as f:
json_string = f.read()
field_dict = json.loads(json_string)
db_create_field(store, field_dict, None)
except Exception as e:
raise errors.InternalServerError("Unable to load default fields: %s" % e)
def db_init_appdata(store):
# Load new appdata
appdata_dict = load_appdata()
# Drop old appdata
store.find(models.ApplicationData).remove()
# Load and setup new appdata
store.add(models.ApplicationData(appdata_dict))
return appdata_dict
@transact
def init_appdata(store):
return db_init_appdata(store)
| # -*- coding: UTF-8
# datainit.py: database initialization
# ******************
import json
import os
from globaleaks import models
from globaleaks.orm import transact
#from globaleaks.handlers.submission import db_assign_submission_sequence
from globaleaks.handlers.admin.field import db_create_field
from globaleaks.rest import errors
from globaleaks.settings import GLSettings
def load_appdata():
if os.path.exists(GLSettings.appdata_file):
with file(GLSettings.appdata_file, 'r') as f:
json_string = f.read()
appdata_dict = json.loads(json_string)
return appdata_dict
raise errors.InternalServerError("Unable to load application data")
def load_default_fields(store):
if os.path.exists(GLSettings.fields_path):
for fname in os.listdir(GLSettings.fields_path):
fpath = os.path.join(GLSettings.fields_path, fname)
with file(fpath, 'r') as f:
json_string = f.read()
field_dict = json.loads(json_string)
db_create_field(store, field_dict, None)
return
raise errors.InternalServerError("Unable to load default fields")
def db_init_appdata(store):
# Load new appdata
appdata_dict = load_appdata()
# Drop old appdata
store.find(models.ApplicationData).remove()
# Load and setup new appdata
store.add(models.ApplicationData(appdata_dict))
return appdata_dict
@transact
def init_appdata(store):
return db_init_appdata(store)
| agpl-3.0 | Python |
99b7db8defaa98feaa64cd3ba3d7f104ebcc43e5 | Change repr(irNull) to output 'irNull' instead of the db-representation | kata198/indexedredis,kata198/indexedredis | IndexedRedis/fields/null.py | IndexedRedis/fields/null.py | # Copyright (c) 2014, 2015, 2016, 2017 Timothy Savannah under LGPL version 2.1. See LICENSE for more information.
#
# null - The irNull singleton and IRNullType
#
# vim:set ts=8 shiftwidth=8 softtabstop=8 noexpandtab :
import sys
__all__ = ('IR_NULL_STR', 'IR_NULL_BYTES', 'IR_NULL_UNICODE', 'IR_NULL_STRINGS', 'IRNullType', 'irNull')
try:
unicode
except NameError:
unicode = str
IR_NULL_STR = 'IRNullType()'
IR_NULL_BYTES = b'IRNullType()'
IR_NULL_UNICODE = u'IRNullType()'
if sys.version_info.major >= 3:
IR_NULL_STRINGS = (IR_NULL_STR, IR_NULL_BYTES)
else:
# This generates a unicode warning, but we SHOULDN'T have such a condition.. I don't think
# IR_NULL_STRINGS = (IR_NULL_STR, IR_NULL_UNICODE)
IR_NULL_STRINGS = (IR_NULL_STR, )
# TODO: May be indexed as empty string on types that would str the value. empty string != null
# There is an odd "feature" of python 2.7 where the __eq__ method is not called when
# u'' == irNull
# however it is in all other forms (including: irNull == u'')
#
# when IRNullType extends str. But when it extends unicode, it works as expected.
#
if unicode == str:
IrNullBaseType = str
else:
IrNullBaseType = unicode
class IRNullType(IrNullBaseType):
'''
The type to represent NULL for anything except string which has no NULL.
Values of this type only equal other values of this type (i.e. '' does not equal IRNullType())
Even False does not equal IRNull.
You probably shouldn't ever need to use this directly, instead use the static instance, "irNull", defined in this module.
'''
def __new__(self, val=''):
'''
Don't let this be assigned a value.
'''
return IrNullBaseType.__new__(self, '')
def __eq__(self, otherVal):
return bool(issubclass(otherVal.__class__, IRNullType))
def __ne__(self, otherVal):
return not bool(issubclass(otherVal.__class__, IRNullType))
def __str__(self):
return ''
def __bool__(self):
return False
def __nonzero__(self):
return False
def __repr__(self):
return 'irNull'
# For all fields which have a type, if they have a null value this will be returned. IRNullType('') != str('') so you can
# filter out nulls on result like:
# myObjs = MyModel.objects.all()
# notNullMyFieldObjs = results.filter(myField__ne=IR_NULL)
global irNull
irNull = IRNullType()
# vim:set ts=8 shiftwidth=8 softtabstop=8 noexpandtab :
| # Copyright (c) 2014, 2015, 2016, 2017 Timothy Savannah under LGPL version 2.1. See LICENSE for more information.
#
# null - The irNull singleton and IRNullType
#
# vim:set ts=8 shiftwidth=8 softtabstop=8 noexpandtab :
import sys
__all__ = ('IR_NULL_STR', 'IR_NULL_BYTES', 'IR_NULL_UNICODE', 'IR_NULL_STRINGS', 'IRNullType', 'irNull')
try:
unicode
except NameError:
unicode = str
IR_NULL_STR = 'IRNullType()'
IR_NULL_BYTES = b'IRNullType()'
IR_NULL_UNICODE = u'IRNullType()'
if sys.version_info.major >= 3:
IR_NULL_STRINGS = (IR_NULL_STR, IR_NULL_BYTES)
else:
# This generates a unicode warning, but we SHOULDN'T have such a condition.. I don't think
# IR_NULL_STRINGS = (IR_NULL_STR, IR_NULL_UNICODE)
IR_NULL_STRINGS = (IR_NULL_STR, )
# TODO: May be indexed as empty string on types that would str the value. empty string != null
# There is an odd "feature" of python 2.7 where the __eq__ method is not called when
# u'' == irNull
# however it is in all other forms (including: irNull == u'')
#
# when IRNullType extends str. But when it extends unicode, it works as expected.
#
if unicode == str:
IrNullBaseType = str
else:
IrNullBaseType = unicode
class IRNullType(IrNullBaseType):
'''
The type to represent NULL for anything except string which has no NULL.
Values of this type only equal other values of this type (i.e. '' does not equal IRNullType())
Even False does not equal IRNull.
You probably shouldn't ever need to use this directly, instead use the static instance, "irNull", defined in this module.
'''
def __new__(self, val=''):
'''
Don't let this be assigned a value.
'''
return IrNullBaseType.__new__(self, '')
def __eq__(self, otherVal):
return bool(issubclass(otherVal.__class__, IRNullType))
def __ne__(self, otherVal):
return not bool(issubclass(otherVal.__class__, IRNullType))
def __str__(self):
return ''
def __bool__(self):
return False
def __nonzero__(self):
return False
def __repr__(self):
return IR_NULL_STR
# For all fields which have a type, if they have a null value this will be returned. IRNullType('') != str('') so you can
# filter out nulls on result like:
# myObjs = MyModel.objects.all()
# notNullMyFieldObjs = results.filter(myField__ne=IR_NULL)
global irNull
irNull = IRNullType()
# vim:set ts=8 shiftwidth=8 softtabstop=8 noexpandtab :
| lgpl-2.1 | Python |
6c5b2183479ff6d6f68ef6e2a71fba4bc4fb6f60 | Move hard-coded panels into `fetch_panels` | nrejack/redi,nrejack/redi,nrejack/redi,nrejack/redi,nrejack/redi,nrejack/redi,nrejack/redi | config/preproc/preproc.py | config/preproc/preproc.py | #!/usr/bin/env python
# Contributors:
# Nicholas Rejack <nrejack@ufl.edu>
# Kevin Hanson <hansonks@gmail.com>
# Copyright (c) 2014-2015, University of Florida
# All rights reserved.
#
# Distributed under the BSD 3-Clause License
# For full text of the BSD 3-Clause License see http://opensource.org/licenses/BSD-3-Clause
import csv
import shutil
clinical_component_to_loinc_path = 'clinical-componenet-to-loinc-mapping.xml'
results_path = 'raw.txt'
subject_id_column = 'STUDY_ID'
translation_table_path = 'translationTable.xml'
def run_processing():
rows = load(results_path)
subject_ids = []
for row in rows:
subject_ids.append(row[subject_id_column])
consent_dates = fetch_consent_dates(subject_ids)
panels = fetch_panels('clinical-component-to-loinc.xml',
'translationTable.xml')
grouped_by_panel = group_rows_by_panel(panels, rows)
#grouped_by_panel = {
# 'rna': [<csv_row>, <csv_row>, <csv_row>],
# 'cbc': [],
# 'NONE': [<csv_row>, <csv_row>]
#}
filtered = filter_old_labs(grouped_by_panel, consent_dates)
save(rows.fieldnames, filtered, results_path)
def fetch_consent_dates(subject_ids):
raise NotImplementedError()
def fetch_panels(loinc_mapping, translation_table):
return {
'rna': [1230, 3774, 1914, 4189, 6912, 1561675, 6860],
'cbc': [1534435, 918, 1534444, 1577116, 1009, 1558101, 1539315, 913,
999, 1577876],
'chem': [1534098, 971, 1534081, 968, 1810650, 1526000, 1525870, 1558221,
1534076],
'inr': [1534098, 1810583]
}
def filter_old_labs(rows, consent_dates):
raise NotImplementedError()
def group_rows_by_panel(panels, rows):
raise NotImplementedError()
def load(filepath):
# with open(filepath) as fp:
# return csv.DictReader(fp)
raise NotImplementedError()
def main():
run_processing()
def save(headers, rows, path, backup=shutil.copy2, open_file=open):
if backup:
backup(path, path + '.bak')
with open_file(path, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=headers)
writer.writeheader()
writer.writerows(iter(rows))
if __name__ == "__main__":
main() | #!/usr/bin/env python
# Contributors:
# Nicholas Rejack <nrejack@ufl.edu>
# Kevin Hanson <hansonks@gmail.com>
# Copyright (c) 2014-2015, University of Florida
# All rights reserved.
#
# Distributed under the BSD 3-Clause License
# For full text of the BSD 3-Clause License see http://opensource.org/licenses/BSD-3-Clause
import csv
import shutil
clinical_component_to_loinc_path = 'clinical-componenet-to-loinc-mapping.xml'
results_path = 'raw.txt'
subject_id_column = 'STUDY_ID'
translation_table_path = 'translationTable.xml'
def run_processing():
rows = load(results_path)
subject_ids = []
for row in rows:
subject_ids.append(row[subject_id_column])
consent_dates = fetch_consent_dates(subject_ids)
# panels = fetch_panels('clinical-component-to-loinc.xml','translationTable.xml')
panels = {
'rna': [1230, 3774, 1914, 4189, 6912, 1561675, 6860]
'cbc': [1534435, 918, 1534444, 1577116, 1009, 1558101, 1539315, 913, 999, 1577876]
'chem': [1534098, 971, 1534081, 968, 1810650, 1526000, 1525870, 1558221, 1534076]
'inr': [1534098, 1810583]
}
grouped_by_panel = group_rows_by_panel(panels, rows)
#grouped_by_panel = {
# 'rna': [<csv_row>, <csv_row>, <csv_row>],
# 'cbc': [],
# 'NONE': [<csv_row>, <csv_row>]
#}
filtered = filter_old_labs(grouped_by_panel, consent_dates)
save(rows.fieldnames, filtered, results_path)
def fetch_consent_dates(subject_ids):
raise NotImplementedError()
def fetch_panels(loinc_mapping, translation_table):
raise NotImplementedError()
def filter_old_labs(rows, consent_dates):
raise NotImplementedError()
def group_rows_by_panel(panels, rows):
raise NotImplementedError()
def load(filepath):
# with open(filepath) as fp:
# return csv.DictReader(fp)
raise NotImplementedError()
def main():
run_processing()
def save(headers, rows, path, backup=shutil.copy2, open_file=open):
if backup:
backup(path, path + '.bak')
with open_file(path, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=headers)
writer.writeheader()
writer.writerows(iter(rows))
if __name__ == "__main__":
main() | bsd-3-clause | Python |
cd09540f41ee34b360248aa4637b063c82c23e35 | add update_gemspec (#276) | googleapis/synthtool,googleapis/synthtool,googleapis/synthtool,googleapis/synthtool,googleapis/synthtool | synthtool/languages/ruby.py | synthtool/languages/ruby.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import re
from typing import Iterable, Union
import synthtool
PathOrStr = Union[str, Path]
ListOfPathsOrStrs = Iterable[PathOrStr]
def merge_gemspec(src: str, dest: str, path: Path):
"""Merge function for ruby gemspecs.
Preserves the gem version and homepage fields from the destination, and
copies the remaining fields from the newly generated source.
Args:
src: Source gemspec content from gapic
dest: Destination gemspec content
path: Destination gemspec path
Returns:
The merged gemspec content.
"""
regex = re.compile(r'^\s+gem.version\s*=\s*"[\d\.]+"$', flags=re.MULTILINE)
match = regex.search(dest)
if match:
src = regex.sub(match.group(0), src, count=1)
regex = re.compile(r'^\s+gem.homepage\s*=\s*"[^"]+"$', flags=re.MULTILINE)
match = regex.search(dest)
if match:
src = regex.sub(match.group(0), src, count=1)
return src
def update_gemspec(src: PathOrStr):
"""Updates the required ruby version and google-style dependency.
Args:
src: Source gemspec
"""
regex = 'required_ruby_version[\\s=]*"([~><=\\s\\d\\.]*)"'
synthtool.replace([src], regex, 'required_ruby_version = ">= 2.4"')
synthtool.replace([src], "rubocop", "google-style")
regex = '"google-style"[,\\s]*"[~><=\\s\\d\\.]*"'
synthtool.replace([src], regex, '"google-style", "~> 1.24.0"')
def delete_method(sources: ListOfPathsOrStrs, method_name: str):
"""Deletes a Ruby method, including the leading comment if any.
Args:
sources: Source file or list of files
method_name: Name of the method to delete
"""
regex = f"\\n\\n(\\s+#[^\\n]*\\n)*\\n*(\\s+)def\\s+{method_name}[^\\n]+\\n+(\\2\\s\\s[^\\n]+\\n+)*\\2end\\n"
synthtool.replace(sources, regex, "\n")
| # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import re
from typing import Iterable, Union
import synthtool
PathOrStr = Union[str, Path]
ListOfPathsOrStrs = Iterable[PathOrStr]
def merge_gemspec(src: str, dest: str, path: Path):
"""Merge function for ruby gemspecs.
Preserves the gem version and homepage fields from the destination, and
copies the remaining fields from the newly generated source.
Args:
src: Source gemspec content from gapic
dest: Destination gemspec content
path: Destination gemspec path
Returns:
The merged gemspec content.
"""
regex = re.compile(r'^\s+gem.version\s*=\s*"[\d\.]+"$', flags=re.MULTILINE)
match = regex.search(dest)
if match:
src = regex.sub(match.group(0), src, count=1)
regex = re.compile(r'^\s+gem.homepage\s*=\s*"[^"]+"$', flags=re.MULTILINE)
match = regex.search(dest)
if match:
src = regex.sub(match.group(0), src, count=1)
return src
def delete_method(sources: ListOfPathsOrStrs, method_name: str):
"""Deletes a Ruby method, including the leading comment if any.
Args:
sources: Source file or list of files
method_name: Name of the method to delete
"""
regex = f"\\n\\n(\\s+#[^\\n]*\\n)*\\n*(\\s+)def\\s+{method_name}[^\\n]+\\n+(\\2\\s\\s[^\\n]+\\n+)*\\2end\\n"
synthtool.replace(sources, regex, "\n")
| apache-2.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.