id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
3559698 | <gh_stars>0
from finq import FINQ
def test_pairs():
a = [1, 2, 5, 7]
expected = [(0, 1), (1, 2), (2, 5), (3, 7)]
a_f = FINQ(a)
assert a_f.enumerate().to_list() == expected
def test_pairs_from_1():
a = [1, 2, 5, 7]
expected = [(1, 1), (2, 2), (3, 5), (4, 7)]
a_f = FINQ(a)
assert a_f.enumerate(1).to_list() == expected
| StarcoderdataPython |
6469745 | class FlashCard(object):
def __init__(self, title, content, media, hierarchy):
self.title = title
self.content = content
self.media = media
self.hierarchy = hierarchy
| StarcoderdataPython |
11366949 | <reponame>chapman-mcd/Flip-Sign<filename>FlipDotSign.py
"""
Since this is basically firmware, it should be able to handle a lot of hard coding. Some of the elements,
especially the events to add, will be difficult to allow the user to send.
1. ???
2. List of message objects
3. Pop random element off list of message objects
4. When list reaches X entries, start the ??? process in a new thread to update the list of message objects
5. Repeat with new list of message objects
"""
from MessageClasses import *
from DisplayClasses import *
import googleapiclient.errors
import copy
import random
import time
import serial
from TransitionFunctions import *
from Generate_Layout import *
from MessageGenerator import *
from WeatherClasses import *
import os
from datetime import time as dt_time
z = SimpleTransition('', 'z')
fontsize = 9
minfontsize = 3
wait_time = 300
base_directory = os.path.dirname(__file__)
weather_API_key = open(os.path.join(base_directory, 'WeatherKey.txt')).readline()
default_font_path = os.path.join(base_directory, 'PressStart2P.ttf')
google_sheet_id = open(os.path.join(base_directory, 'GoogleSheet.txt')).readline()
google_location_key = open(os.path.join(base_directory, 'Google_Location_Key.txt')).readline()
home_location = input('Please enter zip code for home location: ')
def GetGoogleSheetData(sheetID, credentials, lstCalendars, lstTemporaryMessages):
# Create google sheets object
result = {}
try:
SHEETS = build('sheets', 'v4', credentials=credentials)
except httplib2.ServerNotFoundError:
raise IOError("No Internet Connection")
try_again = True
num_times = 1
# Service may be unavailable, so try at least 3 times, backing off during
while try_again:
try:
# if successful, then update TryAgain to get out of the loop
result = SHEETS.spreadsheets().values().get(spreadsheetId=sheetID, range="Messages!A:C").execute()
try_again = False
except googleapiclient.errors.HttpError:
num_times += 1
if num_times == 4:
# if we've done this 4 times, raise an ValueError to be caught by the calling function
raise ValueError
# wait before trying again
time.sleep(int(random.random() * (2 ^ num_times - 1)))
except httplib2.ServerNotFoundError:
raise IOError("No Internet Connection")
for processmessage in result['values']:
if processmessage[0] == "GCal":
lstCalendars.append(GoogleCalendar(processmessage[1], credentials))
elif processmessage[0] == "SpecificDateMessage":
lstTemporaryMessages.append(SpecificDateMessage(processmessage[1], parse(processmessage[2])))
elif processmessage[0] == "BasicTextMessage":
lstTemporaryMessages.append(BasicTextMessage(processmessage[1]))
elif processmessage[0] == "MessageGenerator":
lstGeneratedMessages = Message_Generator(processmessage[1],processmessage[2]).create_messages()
for Generated_Message in lstGeneratedMessages:
lstTemporaryMessages.append(Generated_Message)
elif processmessage[0] == "WeatherLocation":
try:
location = WeatherLocation(processmessage[1], processmessage[2], weather_API_key,
default_font_path, google_location_key=google_location_key)
lstTemporaryMessages.append(location.ten_day_forecast(rows=21, columns=168, daysfromnow=0))
except urllib.error.HTTPError:
print("Problem with weather API")
# If system is running on mac (development)
if os.uname().sysname == "Darwin":
Display = FakeFlipDotDisplay(columns=168, rows=21, serialinterface=None, layout=None)
transition_functions = [SimpleTransition]
# override wait_time for faster running / debugging
wait_time = 5
# if system is running on raspberry linux (production)
elif os.uname().sysname == "Linux":
port = '/dev/ttyS0'
serialinterface = serial.Serial(port=port, baudrate=57600, parity=serial.PARITY_NONE, bytesize=serial.EIGHTBITS,
timeout=1, stopbits=serial.STOPBITS_ONE)
Display = FlipDotDisplay(columns=168, rows=21, serialinterface=serialinterface, layout=Generate_Layout_2())
transition_functions = [SimpleTransition, dissolve_changes_only]
else:
raise ValueError("Unsupported platform - must be MacOS or Linux")
# set up list of transit messages - since this is static, it is done outside the loop
lstTransitMessages = []
# lstTransitMessages.append(TransitMessageURL(
# "http://www.norta.com/Mobile/whers-my-busdetail.aspx?stopcode=235&routecode=10123&direction=0", "Street Car"))
# lstTransitMessages.append(TransitMessageURL(
# "http://www.norta.com/Mobile/whers-my-busdetail.aspx?stopcode=145&routecode=10122&direction=0", "Magazine Bus"))
# lstTransitMessages.append(TransitMessageURL(
# "http://www.norta.com/Mobile/whers-my-busdetail.aspx?stopcode=58&routecode=10121&direction=0", "Tchoup Bus"))
q = datetime(1990, 1, 1, 1, 1)
start_time = dt_time(6,45)
end_time = dt_time(23,00)
while True:
q = datetime(1990, 1, 1, 1, 1)
now_time_fix = q.now().time()
if start_time < now_time_fix < end_time:
# Reset list of calendars and messages to display
lstCalendars = []
lstMessagestoDisplay = []
lstTemporaryMessages = []
try:
# attempt to get new temporary messages and calendars from the google spreadsheet
# the "check" list is used so that the temporary messages list is only replaced if the internet is up
check = []
GetGoogleSheetData(google_sheet_id, get_credentials(), lstCalendars, check)
lstTemporaryMessages = check
print("Pulled google sheet data")
except IOError:
# if the internet is down, do nothing
print("Found no internet connection when pulling google sheet data.")
pass
except ValueError:
print("No google service when opening google sheet.")
lstTemporaryMessages.append(BasicTextMessage("No Google Service"))
# for each calendar in the list of google calendars we want to display
# if the internet connection check earlier was unsuccessful, then this will be an empty list and the whole block
# will be skipped
for cal in lstCalendars:
# create a temporary list of messages from the google calendar routine
temp = []
try:
# run the message creation
in_tuple = cal.create_messages(5)
# the first element of the tuple is a list of event messages
temp = in_tuple[0]
# the second element of the tuple is a list of tuples
# first element of each tuple is the location string
# second element is the number of days until that event
for location in in_tuple[1]:
# turn the first element of each tuple into a weather location
weather_location = WeatherLocation(location[0], location[0], weather_API_key,
default_font_path, google_location_key=google_location_key,
home_location=home_location)
# get the forecast - go ahead a max of five days or until the event starts
num_of_days_until = min(5, location[1])
weather_forecast = weather_location.ten_day_forecast(rows=21, columns=168,
daysfromnow=num_of_days_until)
temp.append(weather_forecast)
print("Created messages from google calendar.")
except IOError:
pass
print("No internet connection when pulling from google calendar.")
# for each message we got back from GCal, add that to the list of temporary messages
for message in temp:
lstTemporaryMessages.append(message)
# if it's between 6 and 9 AM, we care a lot more about transit than anything else, add a lot more of those
if 6 < datetime.now().hour < 9:
for i in range(3):
lstMessagestoDisplay += copy.deepcopy(lstTransitMessages)
# build the list of messages to display
lstMessagestoDisplay += copy.deepcopy(lstTransitMessages)
lstMessagestoDisplay += lstTemporaryMessages
random.shuffle(lstMessagestoDisplay)
# for each messages in our list to display, make the display show it then wait for 1 second before sending next
for message in lstMessagestoDisplay:
try:
Display.update(random.choice(transition_functions), message,
font=ImageFont.truetype(default_font_path, size=9))
time.sleep(wait_time)
# if we've got an internet connection problem, tell the user about it
except IOError:
Display.update(SimpleTransition, BasicTextMessage("Check Internet"),
font=ImageFont.truetype(default_font_path, size=9))
except DateMessageInPastError:
# if it's a one time specific date message, then valueerror means the date is passed
# if it's not a one-time specific date message, then this is a real error
if isinstance(message, OneTimeSpecificDateMessage):
print("Had a case where a one-time specific date message was in the past.")
pass
except StringTooLongError:
trysize = fontsize - 1
while trysize >= minfontsize:
try:
Display.update(SimpleTransition, message,
font=ImageFont.truetype(default_font_path, size=trysize))
time.sleep(wait_time)
break
except StringTooLongError:
trysize += -1
# give the dots some exercise
# flip to all white and then all black
# PIL wants the image size as width, height so run the tuple backwards
Display.show(Image.new('1', Display.get_size()[::-1], 1))
time.sleep(1)
Display.show(Image.new('1', Display.get_size()[::-1], 0))
| StarcoderdataPython |
1828774 | import json, hashlib, requests
from django.test import TestCase, Client
# class QAResponseTest(TestCase):
# def test_response(self):
# # c = Client()
# qa_list = [
# {
# "question":"My child is sick where can he get measles vaccine ?",
# "answer":"Please go to khandala",
# },
# {
# "question":"My child is sick where can he get rubella vaccine ?",
# "answer":"Please go to khandala",
# },
# {
# "question":"My child is sick where can he get polio vaccine ?",
# "answer":"Please go to khandala",
# },
# ]
# for x in qa_list:
# query_string = x['question']+x['answer']
# qa_hash = hashlib.sha512(query_string.encode()).hexdigest()
# x['id']=qa_hash
# batch_response_test = {
# "question_answer_list" : qa_list
# }
# # import pdb
# # pdb.set_trace()
# url = "http://0.0.0.0:8000"
# base_url = url + "/extract-keywords"
# response = requests.post(base_url, data=json.dumps(batch_response_test))
# print(response.text)
# # response = c.post('/extract-keywords', data=batch_response_test)
# # print(response) | StarcoderdataPython |
9712013 | <reponame>maximg/vcs-archeo
#import re
import sqlite3
import pprint
import pydot
import sys
nodes = {}
maxNodes = 300
if len(sys.argv) > 1:
proj = sys.argv[1]
else:
proj = 'svnplot'
#proj = 'hadoop-common'
#proj = 'squirrelmail'
#proj = 'squirrel-sql'
#proj = 'notepad-plus'
#proj = 'zero-install'
#proj = 'codeswarm'
conn = sqlite3.connect('..\\data\\' + proj + '\\' + proj + '.sqlite')
def getMinMaxChanges():
c = conn.cursor()
if proj == 'hadoop-common':
rows = c.execute('SELECT min(cnt), max(cnt) as max FROM SvnNodes')
for row in rows:
min = row[0]
max = row[1]
c.close()
return (min,max)
c.close()
return (1,100)
def getNodes():
c1 = conn.cursor()
rows = c1.execute('''
select nd.id, nd.path, nd.rank, cp.fromId parent from SVNNodesVw nd
left outer join SVNCopiesVw cp on nd.id = cp.id
order by nd.id''')
for node in rows:
id = node[0]
if id < maxNodes:
nodes[id] = node
c1.close()
def getColor(cnt, min, max):
#pprint.pprint(row)
if cnt > max/5:
color = "red"
elif cnt > max/10:
color = "yellow"
else:
color = "grey"
return color
(min,max) = getMinMaxChanges()
graph = pydot.Dot(graph_type='graph', overlap='scale')
remap = {}
getNodes()
new_nodes = {}
for id in nodes:
(x, path, rank, parent) = nodes[id]
# check for renames
if not parent is None:
pprint.pprint(nodes[parent])
(x1, path1, prank, parent1) = nodes[parent]
rank = rank + prank
remap[parent] = id
del new_nodes[parent]
new_nodes[id] = (id, path, rank, parent)
#pprint.pprint(new_nodes[id])
for id in new_nodes:
(x, path, rank, parent) = new_nodes[id]
if rank > max/10:
label = path.split('/')[-1]
else:
label = id
label = path.split('/')[-1]
color = getColor(rank, min, max)
shape = "ellipse"
if label == '':
# it is a folder
color = "green"
label = path.split('/')[-2]
shape = "box"
label = label + " (" + str(id) + ")"
graph.add_node(pydot.Node(id, label=label, style="filled", fillcolor=color, shape=shape, URL=path))
c = conn.cursor()
links = c.execute('''
SELECT t1.changedpathid p1, t2.changedpathid p2, count(t1.revno) weight
from SVNLogDetail t1, SVNLogDetail t2
where t1.revno = t2.revno
and p1 < p2
group by p1,p2
order by p1, p2, weight''')
def getLength(rank):
if rank > 1:
return 0.5
return 1
def getStyle(weight):
if weight > 1:
return 'bold'
return 'invis'
def getNode(id):
while id in remap:
id = remap[id]
return id
for (id1, id2, linkRank) in links:
if id1 < maxNodes and id2 < maxNodes:
#graph.add_edge( pydot.Edge( node1, node2, len=getLength(linkRank), style=getStyle(linkRank) ) )
node1 = getNode(id1)
node2 = getNode(id2)
graph.add_edge( pydot.Edge( node1, node2, style=getStyle(linkRank), len=0.1 ) )
graph.write(proj + '.dot')
graph.write_png(proj + '.png', prog='neato')
graph.write_cmapx(proj + '.map', prog='neato')
c.close()
conn.close() | StarcoderdataPython |
284394 | """The package juman defines Japanese spacy.Language with JUMAN tokenizer."""
import itertools
from collections import namedtuple
from typing import Any, Callable, Dict, Iterator, List, Optional, Type
from spacy.compat import copy_reg
from spacy.language import Language
from spacy.tokens import Doc, Token
from camphr.consts import JUMAN_LINES, KEY_FSTRING
from camphr.lang.stop_words import STOP_WORDS
from camphr.utils import SerializationMixin, get_juman_command
ShortUnitWord = namedtuple(
"ShortUnitWord", ["surface", "lemma", "pos", "fstring", "space"]
)
_REPLACE_STRINGS = {"\t": " ", "\r": "", "(": "(", ")": ")", "\n": " "}
def han_to_zen_normalize(text):
try:
import mojimoji
except ImportError:
raise ValueError("juman or knp Language requires mojimoji.")
text = mojimoji.han_to_zen(text)
for k, v in _REPLACE_STRINGS.items():
text = text.replace(k, v)
return text
class Tokenizer(SerializationMixin):
"""Juman tokenizer
Note:
`spacy.Token._.fstring` is set. The Juman's output is stored into it during tokenizing.
"""
serialization_fields = ["preprocessor", "juman_kwargs"]
key_fstring = KEY_FSTRING
@classmethod
def install_extensions(cls):
"""See https://github.com/explosion/spacy-pytorch-transformers#extension-attributes."""
Token.set_extension(cls.key_fstring, default=None, force=True)
def __init__(
self,
cls: Type["Defaults"],
nlp: Optional[Language] = None,
juman_kwargs: Optional[Dict[str, str]] = None,
preprocessor: Optional[Callable[[str], str]] = han_to_zen_normalize,
):
"""
Args:
juman_kwargs: passed to `pyknp.Juman.__init__`
preprocessor: applied to text before tokenizing. `mojimoji.han_to_zen` is often used.
"""
from pyknp import Juman
juman_kwargs = juman_kwargs or {}
default_command = get_juman_command()
assert default_command
juman_kwargs.setdefault("command", default_command)
self.vocab = nlp.vocab if nlp is not None else cls.create_vocab(nlp)
self.tokenizer = Juman(**juman_kwargs) if juman_kwargs else Juman()
self.juman_kwargs = juman_kwargs
self.preprocessor = preprocessor
def reset_tokenizer(self):
from pyknp import Juman
self.tokenizer = Juman(**self.juman_kwargs) if self.juman_kwargs else Juman()
def __call__(self, text: str) -> Doc:
"""Make doc from text. Juman's `fstring` is stored in `Token._.fstring`"""
if self.preprocessor:
text = self.preprocessor(text)
juman_lines = self._juman_string(text)
dtokens = self._detailed_tokens(juman_lines)
doc = self._dtokens_to_doc(dtokens)
doc.user_data[JUMAN_LINES] = juman_lines
return doc
def _juman_string(self, text: str) -> str:
try:
texts = _split_text_for_juman(text)
lines: str = "".join(
itertools.chain.from_iterable(
self.tokenizer.juman_lines(text) for text in texts
)
)
except BrokenPipeError:
# Juman is sometimes broken due to its subprocess management.
self.reset_tokenizer()
lines = self.tokenizer.juman_lines(text)
return lines
def _dtokens_to_doc(self, dtokens: List[ShortUnitWord]) -> Doc:
words = [x.surface for x in dtokens]
spaces = [x.space for x in dtokens]
doc = Doc(self.vocab, words=words, spaces=spaces)
for token, dtoken in zip(doc, dtokens):
token.lemma_ = dtoken.lemma
token.tag_ = dtoken.pos
token._.set(self.key_fstring, dtoken.fstring)
doc.is_tagged = True
return doc
def _detailed_tokens(self, juman_lines: str) -> List[ShortUnitWord]:
"""Tokenize text with Juman and format the outputs for further processing"""
from pyknp import MList
ml = MList(juman_lines).mrph_list()
words: List[ShortUnitWord] = []
for m in ml:
surface = m.midasi
pos = m.hinsi + "," + m.bunrui
lemma = m.genkei or surface
words.append(ShortUnitWord(surface, lemma, pos, m.fstring, False))
return words
_SEPS = ["。", ".", "."]
def _split_text_for_juman(text: str) -> Iterator[str]:
"""Juman denies long text (maybe >4096 bytes) so split text"""
n = 1000
if len(text) < n:
yield text
return
for sep in _SEPS:
if sep in text:
i = text.index(sep)
head, tail = text[: i + 1], text[i + 1 :]
if len(head) < n:
yield from _split_text_for_juman(head)
yield from _split_text_for_juman(tail)
return
# If any separator is not found in text, split roughly
yield text[:n]
yield from _split_text_for_juman(text[n:])
# for pickling. see https://spacy.io/usage/adding-languages
class Defaults(Language.Defaults): # type: ignore
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
stop_words = STOP_WORDS
writing_system = {"direction": "ltr", "has_case": False, "has_letters": False}
@classmethod
def create_tokenizer(
cls,
nlp=None,
juman_kwargs: Optional[Dict[str, Any]] = None,
preprocessor: Optional[Callable[[str], str]] = han_to_zen_normalize,
):
return Tokenizer(cls, nlp, juman_kwargs=juman_kwargs, preprocessor=preprocessor)
class Japanese(Language):
lang = "ja_juman"
Defaults = Defaults
def make_doc(self, text: str) -> Doc:
return self.tokenizer(text)
# avoid pickling problem (see https://github.com/explosion/spaCy/issues/3191)
def pickle_japanese(instance):
return Japanese, tuple()
copy_reg.pickle(Japanese, pickle_japanese)
Language.factories[Japanese.lang] = Japanese
# for lazy loading. see https://spacy.io/usage/adding-languages
__all__ = ["Japanese"]
Tokenizer.install_extensions()
| StarcoderdataPython |
3346175 | <gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
from framework.to import TO, convert_to_unicode
from mcfw.properties import unicode_property, typed_property, long_property
class IYOSeeDocumenVersion(TO):
version = long_property('1')
category = unicode_property('2')
link = unicode_property('3')
content_type = unicode_property('4')
markdown_short_description = unicode_property('5')
markdown_full_description = unicode_property('6')
creation_date = unicode_property('7')
start_date = unicode_property('8')
end_date = unicode_property('9')
keystore_label = unicode_property('10')
signature = unicode_property('11')
def __init__(self, version=1, category=None, link=None, content_type=None, markdown_short_description=None,
markdown_full_description=None, creation_date=None, start_date=None, end_date=None,
keystore_label=None, signature=None, **kwargs):
self.version = version
self.category = convert_to_unicode(category)
self.link = convert_to_unicode(link)
self.content_type = convert_to_unicode(content_type)
self.markdown_short_description = convert_to_unicode(markdown_short_description)
self.markdown_full_description = convert_to_unicode(markdown_full_description)
self.creation_date = convert_to_unicode(creation_date)
self.start_date = convert_to_unicode(start_date)
self.end_date = convert_to_unicode(end_date)
self.keystore_label = convert_to_unicode(keystore_label)
self.signature = convert_to_unicode(signature)
class IYOSeeDocument(TO):
username = unicode_property('1')
globalid = unicode_property('2')
uniqueid = unicode_property('3')
versions = typed_property('4', IYOSeeDocumenVersion, True) # type: list[IYOSeeDocumenVersion]
def __init__(self, username=None, globalid=None, uniqueid=None, versions=None, **kwargs):
self.username = convert_to_unicode(username)
self.globalid = convert_to_unicode(globalid)
self.uniqueid = convert_to_unicode(uniqueid)
if not versions:
versions = []
self.versions = [IYOSeeDocumenVersion(**v) for v in versions]
class IYOSeeDocumentView(IYOSeeDocumenVersion):
username = unicode_property('51')
globalid = unicode_property('52')
uniqueid = unicode_property('53')
def __init__(self, username=None, globalid=None, uniqueid=None, version=1, category=None, link=None,
content_type=None, markdown_short_description=None, markdown_full_description=None,
creation_date=None, start_date=None, end_date=None, keystore_label=None, signature=None, **kwargs):
super(IYOSeeDocumentView, self).__init__(version, category, link, content_type, markdown_short_description,
markdown_full_description, creation_date, start_date, end_date,
keystore_label, signature, **kwargs)
self.username = convert_to_unicode(username)
self.globalid = convert_to_unicode(globalid)
self.uniqueid = convert_to_unicode(uniqueid)
| StarcoderdataPython |
8036225 | from flask import Flask, request, jsonify
from UtterenceModel import predictUtterence
from DeepLearntFeatures import featureMean,feature20BinMeans
from ConversationModel import predictConversationOffline, predictConversationOnline
from flask_cors import CORS
app = Flask(__name__)
cors = CORS(app)
utterence_folder = './utterences/'
@app.route("/")
def home():
return "success"
@app.route("/utterence",methods = ['POST'])
def utterenceEmotionPrediction():
file = request.files['audio']
utterence_path = utterence_folder+'utt.wav'
file.save(utterence_path)
prediction = predictUtterence(utterence_path)
return jsonify({'prediction': prediction[0]})
@app.route("/conversation/offline",methods = ['POST'])
def conversationEmotionPredictionOffline():
files = request.files
data = request.args['speakers']
prediction,attention_f,attention_b = predictConversationOffline(files,data)
emotion_predictions = []
i=0
for p in prediction.tolist():
temp = {}
temp['timestep'] = i
temp['Happy'] = p[0]
temp['Sad'] = p[1]
temp['Neutral'] = p[2]
temp['Angry'] = p[3]
temp['Excited'] = p[4]
temp['Frustrated'] = p[5]
emotion_predictions.append(temp)
i+=1
#return jsonify({'prediction': prediction.tolist(), 'attentionf':attention_f, 'attentionb':attention_b})
return jsonify({'prediction':emotion_predictions})
@app.route("/conversation/online",methods = ['POST'])
def conversationEmotionPredictionOnline():
files = request.files
data = request.args['speakers']
prediction = predictConversationOnline(files,data)
print(prediction)
return "success"
if __name__ == "__main__":
app.run(host='0.0.0.0')
| StarcoderdataPython |
11222870 | <gh_stars>1-10
from tools.iterators.simple import SimpleIterator
from tools.run.reader import ParallelMatrix3dInputRunner, RunnerDataReader
import tools.linear_algebra.__init__ as linalg
| StarcoderdataPython |
62797 | """
test_question.py
サンプルテストケース
"""
import pytest
import run as myApp
from datetime import datetime, timedelta
from models import Question
@pytest.fixture
def api():
return myApp.api
class TestQuestionModel:
def test_was_published_recently_with_future_question(self, api):
"""
未来の質問に対してwas_published_recently()はFalseを返すはずである
:param api:
:return:
"""
# 未来の公開日となる質問を作成
time = datetime.now() + timedelta(days=30)
feature_question = Question('future_question', pub_date=time)
# これはFalseとなるはず
assert feature_question.was_published_recently() is False
def test_was_published_recently_with_boundary_question(self, api):
"""
== 境界値テスト ==
1日1秒前の質問に対してはwas_published_recently()はFalseを返すはずである
また,23時間59分59秒以内であればwas_published_recently()はTrueを返すはずである
:param api:
:return:
"""
# 最近の境界値となる質問を作成
time_old = datetime.now() - timedelta(days=1)
time_res = datetime.now() - timedelta(hours=23, minutes=59, seconds=59)
old_question = Question('old_question', time_old)
res_question = Question('resent_question', time_res)
assert old_question.was_published_recently() is False
assert res_question.was_published_recently() is True
| StarcoderdataPython |
1827391 | <filename>raiden/tests/unit/network/rtc/test_web_rtc_manager.py
import pytest
from raiden.constants import ICEConnectionState
from raiden.network.transport.matrix.rtc.aiogevent import yield_future
from raiden.network.transport.matrix.rtc.web_rtc import WebRTCManager
from raiden.tests.utils.factories import make_signer
from raiden.tests.utils.transport import (
ignore_candidates,
ignore_close,
ignore_sdp,
ignore_web_rtc_messages,
)
pytestmark = pytest.mark.asyncio
def test_rtc_partner_close() -> None:
web_rtc_manager = WebRTCManager(
None, ignore_web_rtc_messages, ignore_sdp, ignore_candidates, ignore_close
)
node_address = make_signer().address
web_rtc_manager.node_address = node_address
partner_address = make_signer().address
rtc_partner = web_rtc_manager.get_rtc_partner(partner_address)
peer_connection_first = rtc_partner.peer_connection
msg = "ICEConnectionState should be 'new'"
assert peer_connection_first.iceConnectionState == "new", msg
close_task = web_rtc_manager.close_connection(rtc_partner.partner_address)
yield_future(close_task)
peer_connection_second = rtc_partner.peer_connection
msg = "peer connections should be different objects"
assert peer_connection_first != peer_connection_second, msg
msg = "New peer connection should be in state 'new'"
assert peer_connection_second.iceConnectionState == ICEConnectionState.NEW.value, msg
msg = "Old RTCPeerConnection state should be 'closed' after close()"
assert peer_connection_first.iceConnectionState == ICEConnectionState.CLOSED.value, msg
msg = "Should not have ready channel after close()"
assert not web_rtc_manager.has_ready_channel(partner_address), msg
| StarcoderdataPython |
1693138 | <reponame>fughilli/nanopb
def nanopb_library(name, srcs = []):
if (len(srcs) != 1):
fail("srcs must have one element")
native.genrule(
name = name + "_nanopb_gen",
srcs = srcs,
outs = [name + ".nanopb.h", name + ".nanopb.c"],
cmd = ("$(location @com_github_nanopb_nanopb//:nanopb_shim) " +
"$(location " +
"@com_github_nanopb_nanopb//generator:nanopb_generator) " +
"$(SRCS) $(OUTS)"),
tools = [
"@com_github_nanopb_nanopb//generator:nanopb_generator",
"@com_github_nanopb_nanopb//:nanopb_shim",
],
)
native.cc_library(
name = name,
srcs = [name + ".nanopb.c"],
hdrs = [name + ".nanopb.h"],
deps = [
"@com_github_nanopb_nanopb//:nanopb",
],
copts = [
"-isystemnanopb",
],
)
| StarcoderdataPython |
1845714 | <filename>urls.py
from django.conf.urls.defaults import *
from django.contrib import admin
from django.conf import settings
admin.autodiscover()
urlpatterns = patterns('',
(r'^admin/(.*)', admin.site.root),
(r'^', include('blangoblog.blango.urls')),
)
handler500 = 'blango.views.server_error'
handler404 = 'blango.views.page_not_found'
if settings.DEBUG:
from os.path import abspath, dirname, join
PROJECT_DIR = dirname(abspath(__file__))
urlpatterns += patterns('',
(r'^%s(?P<path>.*)$' % settings.MEDIA_URL[1:], 'django.views.static.serve', {'document_root': join(PROJECT_DIR, 'media')}),
)
| StarcoderdataPython |
3540669 | # OpenWeatherMap API Key
api_key = 'a2042807a0dd790b7ee58363d8cb65da'
| StarcoderdataPython |
1777493 | import numpy as np
import gym
from gym import wrappers
from gym.envs.registration import register
import matplotlib.pylab as plt
evaluate_v = []
def run_episode(env, policy, gamma = 1.0, render = False):
""" Runs an episode and return the total reward """
obs = env.reset()
total_reward = 0
step_idx = 0
while True:
if render:
env.render()
obs, reward, done, _ = env.step(int(policy[obs]))
total_reward += (gamma**step_idx * reward)
step_idx += 1
if done:
break
return total_reward
def evaluate_policy(env, policy, gamma = 1.0, n = 100):
scores = [run_episode(env, policy, gamma, False) for _ in range(n)]
return np.mean(scores)
def extract_policy(env, v, gamma = 1.0):
""" Extract the policy given a value-function """
policy = np.zeros(env.env.nS)
for s in range(env.env.nS):
q_sa = np.zeros(env.env.nA)
for a in range(env.env.nA):
q_sa[a] = sum([p * (r + gamma * v[s_]) for p, s_, r, _ in env.env.P[s][a]])
policy[s] = np.argmax(q_sa)
return policy
def compute_policy_v(env, policy, gamma=1.0):
""" Iteratively evaluate the value-function under policy.
Alternatively, we could formulate a set of linear equations in iterms of v[s]
and solve them to find the value function.
"""
v = np.zeros(env.env.nS)
eps = 1e-10
while True:
# evaluate_v.append(evaluate_policy(env, policy, gamma, n=100))
pre_v = v.copy()
for s in range(env.env.nS):
policy_a = policy[s]
v[s] = sum([p * (r + gamma * pre_v[s_]) for p, s_, r, _ in env.env.P[s][policy_a]])
if (np.sum((np.fabs(pre_v - v))) <= eps):
break
return v
def policy_iteration(env, gamma = 1.0):
""" Policy-Iteration algorithm """
policy = np.random.choice(env.env.nA, size=(env.env.nS))
max_iterations = 200000
gamma = 1.0
for i in range(max_iterations):
evaluate_v.append(evaluate_policy(env, policy, gamma, n=100))
old_policy_v = compute_policy_v(env, policy, gamma)
new_policy = extract_policy(env, old_policy_v, gamma)
if (np.all(policy == new_policy)):
print('Policy-Iteration converged at step %d.' % (i + 1))
break
policy = new_policy
return policy
if __name__ == '__main__':
env_name = 'FrozenLake-v0' # 'FrozenLake8x8-v0'
env = gym.make(env_name)
optimal_policy = policy_iteration(env, gamma = 1.0)
scores = evaluate_policy(env, optimal_policy, gamma = 1.0)
print('Average scores = ', np.mean(scores))
for i in range(5):
evaluate_v.append(evaluate_policy(env, optimal_policy, gamma = 1.0, n=100))
time_step = [i+1 for i in range(len(evaluate_v))]
plt.figure()
plt.plot(time_step, evaluate_v)
plt.show()
| StarcoderdataPython |
5096754 | <filename>realms.py
'''
WoW Realms with their name and connected-realm ids
'''
realms = [
('Kilrogg', 4),
('Winterhoof', 4),
('Proudmoore', 5),
("Kil'jaeden", 9),
('Tichondrius', 11),
('<NAME>', 12),
('<NAME>', 12),
('Farstriders', 12),
('Eitrigg', 47),
("Shu'halo", 47),
('Alleria', 52),
('Medivh', 52),
('Khadgar', 52),
('Exodar', 52),
('Hellscream', 53),
('Spinebreaker', 53),
('Gorefiend', 53),
('Eredar', 53),
('Wildhammer', 53),
('Zangarmarsh', 53),
('Blackhand', 54),
('Galakrond', 54),
('Whisperwind', 55),
('Dentarg', 55),
('Illidan', 57),
('Stormreaver', 58),
('Stormrage', 60),
("Zul'jin", 61),
('Durotan', 63),
('Ysera', 63),
('Bloodhoof', 64),
('Duskwood', 64),
('Elune', 67),
('Gilneas', 67),
('<NAME>', 67),
("Cho'gall", 67),
('Auchindoun', 67),
('Arthas', 69),
('Warsong', 71),
('Gorgonnash', 71),
('Balnazzar', 71),
('Anvilmar', 71),
('<NAME>', 71),
('Undermine', 71),
('The Forgotten Coast', 71),
('<NAME>', 73),
('<NAME>', 75),
('<NAME>', 75),
('Sargeras', 76),
('Mannoroth', 77),
('Azgalor', 77),
('Destromath', 77),
('Thunderlord', 77),
('Azshara', 77),
('Nazjatar', 77),
('<NAME>', 77),
('Magtheridon', 78),
('Anetheron', 78),
('Ysondre', 78),
('<NAME>', 78),
('Dragonmaw', 84),
('Uldum', 84),
("Eldre'Thalas", 84),
('Akama', 84),
("Mug'thol", 84),
('Korialstrasz', 84),
('Antonidas', 84),
('Shadowsong', 86),
('Silvermoon', 86),
('Terenas', 86),
('Skywall', 86),
("Drak'thul", 86),
('Hydraxis', 86),
('<NAME>', 86),
("Mok'Nathal", 86),
('Skullcrusher', 96),
('Eonar', 96),
('Ursin', 96),
("Gul'dan", 96),
('Zuluhed', 96),
('Scilla', 96),
('Andorhal', 96),
('<NAME>', 96),
('Velen', 96),
('Llane', 99),
('Arygos', 99),
('<NAME>', 100),
('Icecrown', 104),
('Garona', 104),
('<NAME>', 104),
("<NAME>", 104),
('Malygos', 104),
('Onyxia', 104),
('Aggramar', 106),
('Fizzcrank', 106),
('Windrunner', 113),
('Suramar', 113),
('Draka', 113),
('Darrowmere', 113),
('Dragonblight', 114),
('Fenris', 114),
('Draenor', 115),
('<NAME>', 115),
('Bronzebeard', 117),
('Shandris', 117),
('Feathermoon', 118),
('<NAME>', 118),
('Darkspear', 120),
('Blackrock', 121),
('Azjol-Nerub', 121),
('<NAME>', 121),
('Muradin', 121),
('Nordrassil', 121),
('C<NAME>', 125),
('Shadow Council', 125),
('Blackwater Raiders', 125),
('Sisters of Elune', 125),
('Frostwolf', 127),
('Stormscale', 127),
('Spirestone', 127),
('Firetree', 127),
('Malorne', 127),
('Vashj', 127),
('Rivendare', 127),
("Drak'Tharon", 127),
('Uther', 151),
('Runetotem', 151),
('Dethecus', 154),
('Shadowmoon', 154),
('Detheroc', 154),
('Haomarush', 154),
('Lethon', 154),
('Black<NAME>air', 154),
('Deathwing', 155),
('Kalecgos', 155),
('Executus', 155),
('Shattered Halls', 155),
('Shattered Hand', 157),
('Dark Iron', 157),
('Dalvengyr', 157),
('<NAME>', 157),
('Coilfang', 157),
('Greymane', 158),
('Tanaris', 158),
('Staghelm', 160),
('Madoran', 160),
('Azuremyst', 160),
('Dawnbringer', 160),
('<NAME>', 162),
('Maelstrom', 163),
('<NAME>', 163),
('Lightninghoof', 163),
('The V<NAME>', 163),
('Ravenholdt', 163),
('Alexstrasza', 1070),
('Terokkar', 1070),
('<NAME>', 1071),
('<NAME>', 1071),
('Sentinels', 1071),
('Ravencrest', 1072),
('Uldaman', 1072),
('Archimonde', 1129),
('Kargath', 1129),
('<NAME>', 1129),
('Thunderhorn', 1129),
('Agamaggan', 1129),
('Norgannon', 1129),
('Jaedenar', 1129),
('<NAME>', 1129),
("<NAME>", 1129),
('Bonechewer', 1136),
('Daggerspine', 1136),
('Gurubashi', 1136),
('Aegwynn', 1136),
('Hakkar', 1136),
('Garrosh', 1136),
('Nathrezim', 1138),
('Crushridge', 1138),
('Smolderthorn', 1138),
('Chromaggus', 1138),
('Garithos', 1138),
('Arathor', 1138),
("Anub'arak", 1138),
('Drenden', 1138),
('<NAME>', 1147),
('Bladefist', 1147),
('Rexxar', 1151),
('Misha', 1151),
('Cenarius', 1168),
("Ner'zhul", 1168),
('Perenolde', 1168),
('Frostmane', 1168),
('Korgath', 1168),
('Tortheldrin', 1168),
('Cairne', 1168),
('<NAME>', 1171),
('Lothar', 1175),
('Moonrunner', 1175),
("Kael'thas", 1175),
('Malfurion', 1175),
('Trollbane', 1175),
('Gnomeregan', 1175),
('Ghostlands', 1175),
('<NAME>', 1175),
("Vek'nilash", 1184),
('Nazgrel', 1184),
('Nesingwary', 1184),
('Stonemaul', 1185),
('Dunemaul', 1185),
('Boulderfist', 1185),
('Bloodscalp', 1185),
("Sen'jin", 1185),
('Maiev', 1185),
("Quel'dorei", 1185),
('Doomhammer', 1190),
('Baelgun', 1190),
('Drakkari', 1425),
('<NAME>', 1426),
('Ragnaros', 1427),
("Quel'Thalas", 1428),
('Goldrinn', 3207),
('Nemesis', 3208),
('<NAME>', 3208),
('Azralon', 3209),
('Gallywix', 3234),
('Hyjal', 3661),
('<NAME>', 3675),
('Area 52', 3676),
('Thrall', 3678),
('Dalaran', 3683),
("Mal'Ganis", 3684),
('Turalyon', 3685),
("Kel'Thuzad", 3693),
('Lightbringer', 3694),
('Caelestrasz', 3721),
('Nagrand', 3721),
('Saurfang', 3721),
('Barthilas', 3723),
('Thaurissan', 3725),
('Frostmourne', 3725),
('Dreadmaul', 3725),
("Jubei'Thos", 3725),
('Gundrak', 3725),
("Aman'Thul", 3726),
("Khaz'goroth", 3726),
("Dath'Remar", 3726)
]
| StarcoderdataPython |
5158812 | <reponame>allo-media/eventail<filename>scripts/resurrect.py
#!/usr/bin/env python
#
# MIT License
#
# Copyright (c) 2018-2019 Groupe Allo-Media
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import argparse
from typing import List, Tuple
import pika
from pika.exceptions import (
ChannelClosed,
ConnectionClosed,
AMQPConnectionError,
AMQPHeartbeatTimeout,
)
class Resurrection:
def __init__(
self, url: str, queue: str, batch_size: int = 1, count: int = 0
) -> None:
connection = pika.BlockingConnection(pika.URLParameters(url))
channel = connection.channel()
self._batch_size = batch_size
result = channel.queue_declare(queue, passive=True)
channel.basic_qos(prefetch_count=self._batch_size)
queue_name = result.method.queue
self._count = result.method.message_count if count == 0 else count
self._seen = 0
self.messages: List[
Tuple[pika.spec.Basic.Deliver, pika.spec.BasicProperties, bytes]
] = []
channel.basic_consume(
queue=queue_name, on_message_callback=self.callback, auto_ack=False
)
self._channel = channel
self._connection = connection
def callback(
self,
ch: pika.channel.Channel,
method: pika.spec.Basic.Deliver,
properties: pika.spec.BasicProperties,
body: bytes,
) -> None:
# we cache the message to avoid loops if
# some resurrected messages come back dead again.
self.messages.append((method, properties, body))
print("Buffering message", method)
self._seen += 1
if self._seen == self._count:
print("replay")
self.replay()
print("stop consuming")
self._channel.stop_consuming()
elif self._seen % self._batch_size == 0:
print("replay batch")
self.replay()
def replay(self):
for method, properties, body in self.messages:
print("Replaying", method)
print()
self._channel.basic_publish(
exchange=properties.headers["x-first-death-exchange"],
routing_key=method.routing_key,
body=body,
properties=pika.BasicProperties(
delivery_mode=2, # make message persistent
content_type=properties.content_type,
reply_to=properties.reply_to,
correlation_id=properties.correlation_id,
headers=properties.headers,
),
)
# Confirm consumption only if successfuly resent
self._channel.basic_ack(method.delivery_tag)
self.messages.clear()
def run(self):
try:
self._channel.start_consuming()
except KeyboardInterrupt:
return True
except (
ChannelClosed,
ConnectionClosed,
AMQPConnectionError,
AMQPHeartbeatTimeout,
) as e:
print(e)
return False
else:
return True
finally:
if not self._connection.is_closed:
self._connection.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Resend dead letters.")
parser.add_argument("amqp_url", help="URL of the broker, including credentials.")
parser.add_argument("queue", help="Name of dead-letter queue.")
parser.add_argument(
"--count",
help="Number of message to resurrect (default is 0 = all).",
type=int,
default=0,
)
parser.add_argument(
"--batch_size",
help="for more efficiency, if the messages are small, process them in batches of this size (default is 1).",
type=int,
default=1,
)
# parser.add_argument(
# "--filter",
# help="Log patterns to subscribe to (default to all)",
# nargs="*",
# default=["#"],
# )
args = parser.parse_args()
expected_stop = False
print("Ctrl-C to quit.")
print("Resurrecting from:", args.queue)
inspector = Resurrection(args.amqp_url, args.queue, args.batch_size, args.count)
if inspector.run():
print("Done!")
else:
print("connection error (closed)")
| StarcoderdataPython |
9673828 | <gh_stars>1-10
# Importing Application, Scene and Rectangle
from mithril import Application, Scene
from mithril.graphics.base_shapes import Rectangle
# Creating a empty scene
scene = Scene()
# Creating a 100 x 50 white rectangle
rect = Rectangle(100, 100, 100, 50, (255, 255, 255))
# Adding rectangle to scene
scene.add_node(rect)
# Creating a application with a created scene
app = Application("Example02", (1280, 720), scene)
# Running application
app.run()
| StarcoderdataPython |
1920726 | <gh_stars>0
from ermaket.api.scripts import ReturnContext, UserScript
__all__ = ['script']
script = UserScript(id=4)
@script.register
def add(context):
return ReturnContext({"data2": "EXAMPLE_DATA2"})
| StarcoderdataPython |
8173360 | #########################################################################
#
# Date: Apr 2006 Authors: <NAME>, <NAME>
#
# <EMAIL>
# <EMAIL>
#
# The Scripps Research Institute (TSRI)
# Molecular Graphics Lab
# La Jolla, CA 92037, USA
#
# Copyright: <NAME>, <NAME> and TSRI
#
#########################################################################
import warnings
from Vision import UserLibBuild
from NetworkEditor.items import NetworkNode
from MolKit.molecule import Atom, AtomSet, Molecule, MoleculeSet
from MolKit.protein import Residue, ResidueSet, Chain, ChainSet
from AutoDockTools.atomTypeTools import AutoDock4_AtomTyper
from AutoDockTools.atomTypeTools import NonpolarHydrogenMerger, LonepairMerger
from AutoDockTools.atomTypeTools import AromaticCarbonManager, SolvationParameterizer
from AutoDockTools.MoleculePreparation import RotatableBondManager
from AutoDockTools.MoleculePreparation import LigandWriter, AD4LigandWriter
def importAdtLib(net):
try:
from AutoDockTools.VisionInterface.AdtNodes import adtlib
net.editor.addLibraryInstance(
adtlib, 'AutoDockTools.VisionInterface.AdtNodes', 'adtlib')
except:
warnings.warn(
'Warning! Could not import adtlib from AutoDockTools.VisionInterface')
class GridParameterFileBrowserNE(NetworkNode):
"""A specialized Tkinter Filebrowser. Double-clicking into the entry opens the
filebrowser."""
def __init__(self, name='Grid Parameter File Browser', **kw):
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw )
#self.readOnly = 1
code = """def doit(self, filename):
if filename:
self.outputData(filename=filename)
"""
self.setFunction(code)
# show the entry widget by default
self.inNodeWidgetVisibleByDefault = True
fileTypes=[('gpf', '*')]
self.widgetDescr['filename'] = {
'class':'NEEntryWithFileBrowser', 'master':'node',
'filetypes':fileTypes, 'title':'read file', 'width':16,
'labelCfg':{'text':'gpf file:'},
}
#self.widgetDescr['filename'] = {
# 'class':'NEEntryWithFileBrowser', 'master':'node', 'width':16,
# 'initialValue':'', 'lockedOnPort':True,
# 'labelCfg':{'text':'Filename: '}
# }
self.inputPortsDescr.append(datatype='string', name='filename')
self.outputPortsDescr.append(datatype='string', name='filename')
class DockingParameterFileBrowserNE(NetworkNode):
"""A specialized Tkinter Filebrowser. Double-clicking into the entry opens the
filebrowser."""
def __init__(self, name='Docking Parameter File Browser', **kw):
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw )
#self.readOnly = 1
code = """def doit(self, filename):
if filename:
self.outputData(filename=filename)
"""
self.setFunction(code)
# show the entry widget by default
self.inNodeWidgetVisibleByDefault = True
fileTypes=[('dpf', '*')]
self.widgetDescr['filename'] = {
'class':'NEEntryWithFileBrowser', 'master':'node',
'filetypes':fileTypes, 'title':'read file', 'width':16,
'labelCfg':{'text':'dpf file:'},
}
self.inputPortsDescr.append(datatype='string', name='filename')
self.outputPortsDescr.append(datatype='string', name='filename')
class ReadGridParameterFile(NetworkNode):
#mRequiredTypes = {}
#mRequiredSynonyms = [
#]
def __init__(self, constrkw = {}, name='ReadGridParameterFile', **kw):
kw['constrkw'] = constrkw
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw)
fileTypes=[('gpf', '*')]
self.widgetDescr['filename'] = {
'class':'NEEntryWithFileBrowser', 'master':'node',
'filetypes':fileTypes, 'title':'read file', 'width':16,
'labelCfg':{'text':'file:'},
}
code = """def doit(self, template_gpf_filename):
if template_gpf_filename:
from AutoDockTools.GridParameters import GridParameters
gpo = GridParameters()
gpo.read(template_gpf_filename)
self.outputData(gpo=gpo)
"""
self.configure(function=code)
self.inputPortsDescr.append(
{'name': 'template_gpf_filename', 'cast': True, 'datatype': 'string', 'balloon': 'template grid parameter filename', 'required': False, 'height': 8, 'width': 12, 'shape': 'oval', 'color': 'white'})
self.outputPortsDescr.append(
{'name': 'gpo', 'datatype': 'None', 'balloon': 'gpo, grid parameter object, instance of AutoDockTools.GridParameters', 'height': 8, 'width': 12, 'shape': 'diamond', 'color': 'white'})
self.widgetDescr['template_gpf_filename'] = {
'initialValue': '', 'labelGridCfg': {'column': 0, 'row': 0}, 'master': 'node', 'widgetGridCfg': {'labelSide': 'left', 'column': 1, 'row': 0}, 'labelCfg': {'text': ''}, 'class': 'NEEntryWithFileBrowser'}
def beforeAddingToNetwork(self, net):
try:
ed = net.getEditor()
except:
import traceback; traceback.print_exc()
print 'Warning! Could not import widgets'
class ReadDockingParameterFile(NetworkNode):
#mRequiredTypes = {}
#mRequiredSynonyms = [
#]
def __init__(self, constrkw = {}, name='ReadDockingParameterFile', **kw):
kw['constrkw'] = constrkw
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw)
fileTypes=[('dpf', '*')]
self.widgetDescr['filename'] = {
'class':'NEEntryWithFileBrowser', 'master':'node',
'filetypes':fileTypes, 'title':'read file', 'width':16,
'labelCfg':{'text':'file:'},
}
code = """def doit(self, template_dpf_filename):
if template_dpf_filename:
from AutoDockTools.DockingParameters import DockingParameters
gpo = DockingParameters()
gpo.read(template_dpf_filename)
self.outputData(gpo=gpo)
"""
self.configure(function=code)
self.inputPortsDescr.append(
{'name': 'template_dpf_filename', 'cast': True, 'datatype': 'string', 'balloon': 'template grid parameter filename', 'required': False, 'height': 8, 'width': 12, 'shape': 'oval', 'color': 'white'})
self.outputPortsDescr.append(
{'name': 'gpo', 'datatype': 'None', 'balloon': 'gpo, grid parameter object, instance of AutoDockTools.DockingParameters', 'height': 8, 'width': 12, 'shape': 'diamond', 'color': 'white'})
self.widgetDescr['template_dpf_filename'] = {
'initialValue': '', 'labelGridCfg': {'column': 0, 'row': 0}, 'master': 'node', 'widgetGridCfg': {'labelSide': 'left', 'column': 1, 'row': 0}, 'labelCfg': {'text': ''}, 'class': 'NEEntryWithFileBrowser'}
def beforeAddingToNetwork(self, net):
try:
ed = net.getEditor()
except:
import traceback; traceback.print_exc()
print 'Warning! Could not import widgets'
###class ReadGridParameterFile(NetworkNode):
### """Read a Grid Parameter file [using Python's readlines() command.]
###Double-clicking on the node opens a text entry widget to type the file name.
###In addition, double-clicking in the text entry opens a file browser window.
###Input Ports
### filename: name of the file to be opened
###Output Ports
### data: a list of strings
###"""
### def __init__(self, name='Read Parameter File', **kw):
### kw['name'] = name
### apply( NetworkNode.__init__, (self,), kw )
### #self.readOnly = 1
### code = """def doit(self, filename):
### if filename and len(filename):
### gpo = GridParameters()
### gpo.read(filename)
### #f = open(filename)
### #datastream = f.readlines()
### #f.close()
### #if datastream:
### self.outputData(data=gpo)
###"""
### self.setFunction(code)
### fileTypes=[('gpf', 'gpf')]
### self.widgetDescr['filename'] = {
### 'class':'NEEntryWithFileBrowser', 'master':'node',
### 'filetypes':fileTypes, 'title':'read file', 'width':16,
### 'labelCfg':{'text':'file:'},
### }
### self.inputPortsDescr.append(datatype='string', name='filename')
### self.outputPortsDescr.append(datatype='instance', name='gpo')
class RemoveWaters(NetworkNode):
""" removes water residues
Process entails:
# 1. looping over each molecule in input
# 2. selecting all atoms in water residues
# 3. removing all bonds from each atom
# 4. removing each atom from its parent residue
# 5. removing parent residue if it has no remaining atoms
# 6. removing parent chain if it has no remaining residues
# 7. resetting allAtoms attribute of molecule
# 8. returning number of water residues removed
Input: molecules (MoleculeSet)
Output: molecules where all atoms in HOH residues have been removed(MoleculeSet)"""
def __init__(self, name='RemoveWaters', **kw):
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw )
ip = self.inputPortsDescr
ip.append(datatype='MoleculeSet', name='molecules')
ip.append(datatype='str', required=False, name='residue_type_str', defaultValue='HOH')
op = self.outputPortsDescr
op.append(datatype='MoleculeSet', name='molecules_with_no_water_residues')
op.append(datatype='int', name='num_water_res')
code = """def doit(self, molecules, residue_type_str):
if molecules:
from MolKit.molecule import BondSet
lenHOHs = 0
for mol in molecules:
hohs = mol.allAtoms.get(lambda x: x.parent.type==residue_type_str)
if hohs:
#remove(hohs)
lenHOHs = len(hohs)
for h in hohs:
for b in h.bonds:
c = b.atom1
if c==h:
c = b.atom2
c.bonds.remove(b)
h.bonds = BondSet()
res = h.parent
h.parent.remove(h)
if len(h.parent.children)==0:
res = h.parent
chain = res.parent
#print 'removing residue: ', res.name
chain.remove(res)
if len(chain.children)==0:
mol = chain.parent
print 'removing chain', chain.id
mol.remove(chain)
del chain
del res
del h
#fix allAtoms short cut
mol.allAtoms = mol.chains.residues.atoms
self.outputData(molecules_with_no_water_residues=molecules, num_water_res=lenHOHs)"""
self.setFunction(code)
def myCallback(self, event=None):
#self.paramPanel.run()
pass
class MergeNonPolarHydrogens(NetworkNode):
""" merges nonpolar hydrogens
Process entails:
# 1. adding charge on nonpolar hydrogen to charge of carbon atom to which it is bonded
# 2. removing the nonpolar hydrogen from the molecule
# 3. resetting allAtoms attribute of molecule
# 4. returning number of nonpolar hydrogens removed
Input: mols (MoleculeSet)
Output: mols where each non-polar hydrogen atom has been removed(MoleculeSet)"""
def __init__(self, name='Nonpolar Hydrogen Merger', **kw):
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw )
ip = self.inputPortsDescr
ip.append(datatype='MoleculeSet', name='mols')
ip.append(datatype='int', required=False, name='renumber', defaultValue=1)
op = self.outputPortsDescr
op.append(datatype='MoleculeSet', name='mols')
op.append(datatype='int', name='num_nphs')
code = """def doit(self,mols, renumber):
if mols:
nph_merger = NonpolarHydrogenMerger()
num_nphs = 0
for mol in mols:
if not len(mol.allAtoms.bonds[0]):
mol.buildBondsByDistance()
num_nphs = nph_merger.mergeNPHS(mol.allAtoms, renumber=renumber)
self.outputData(mols=mols, num_nphs=num_nphs)\n"""
self.setFunction(code)
def myCallback(self, event=None):
#self.paramPanel.run()
pass
class MergeLonePairs(NetworkNode):
""" merges lone pairs
Process entails:
# 1. adding charge on lone pair 'atom' to charge of carbon atom to which it is 'bonded'
# 2. removing the lone pair 'atom' from the molecule
# 3. resetting allAtoms attribute of molecule
# 4. returning number of lone pair 'atoms' removed
Input: mols (MoleculeSet)
Output: mols where each lone pair 'atom' has been removed(MoleculeSet)"""
def __init__(self, name='Lone Pair Merger', **kw):
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw )
ip = self.inputPortsDescr
ip.append(datatype='MoleculeSet', name='mols')
ip.append(datatype='int', required=False, name='renumber', defaultValue=1)
op = self.outputPortsDescr
op.append(datatype='MoleculeSet', name='mols')
op.append(datatype='int', name='num_lps')
code = """def doit(self,mols, renumber):
if mols:
lps_merger = LonepairMerger()
num_lps = 0
for mol in mols:
if not len(mol.allAtoms.bonds[0]):
mol.buildBondsByDistance()
lps = lps_merger.mergeLPS(mol.allAtoms, renumber=renumber)
num_lps += len(lps)
self.outputData(mols=mols, num_lps=num_lps)\n"""
self.setFunction(code)
def myCallback(self, event=None):
#self.paramPanel.run()
pass
class ManageAromaticCarbons(NetworkNode):
""" manages assigning autodock carbon atom types: aliphatic and aromatic
Process entails:
# 1. 'setAromaticCarbons' method detects cyclic aromatic carbons: renaming
# them 'A' and setting autodock_element to 'A'. Cyclic carbons are 'aromatic'
# if the angle between adjacent normals is less than a specified 'cutoff'
# angle which is 7.5 degrees by default. Returns atoms which are changed
# 2. provides widget for changing the 'cutoff'
# 3. NOT_IMPLEMENTED:provides method 'set_carbon_names' for forcing any 'C' to 'A' and
# the opposite. Returns atoms which are changed
Input: mols (MoleculeSet)
Output: mols where aromatic carbons have been detected (MoleculeSet)"""
def __init__(self, name='Manage Aromatic Carbons', **kw):
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw )
self.widgetDescr['cutoff'] = {
'class':'NEDial', 'size':50,
'oneTurn':5.0, 'min':1.0, 'lockMin':1, 'type':'float',
'initialValue':7.5,
'labelGridCfg':{'sticky':'w'},
'labelCfg':{'text':'cutoff'},
}
ip = self.inputPortsDescr
ip.append(datatype='MoleculeSet', name='mols')
ip.append(datatype='float', required=False, name='cutoff', defaultValue=7.5)
op = self.outputPortsDescr
op.append(datatype='MoleculeSet', name='mols')
op.append(datatype='int', name='num_aromaticCs')
code = """def doit(self, mols, cutoff):
if mols:
aromC_manager = AromaticCarbonManager(cutoff=cutoff)
num_aromCs = 0
for mol in mols:
if not len(mol.allAtoms.bonds[0]):
mol.buildBondsByDistance()
aromCs = aromC_manager.setAromaticCarbons(mol, cutoff=cutoff)
num_aromCs+=len(aromCs)
self.outputData(mols=mols, num_aromaticCs=num_aromCs)\n"""
self.setFunction(code)
def myCallback(self, event=None):
#self.paramPanel.run()
pass
class Assign_AD4Types(NetworkNode):
""" assigns autodock4-style 'autodock_element' to atoms
Process entails:
# 1. distinguishing between nitrogens which do accept hydrogen-bonds and
# those which do not (because they already have bonds to hydrogens)
# 2. distinguishing between oxygens which do accept hydrogen-bonds and
# those which do not (because they already have bonds to hydrogens)
# 3. setting autodock_element to 'A' for carbon atoms in cycles in standard amino acids
# 4. setting autodock_element to 'HD' for all hydrogen atoms
# 5. setting autodock_element for all other atoms to the atom's element
# NOTE: more types can be added if more distinctions are supported by
# autodock
Input: mols (MoleculeSet)
Output: typed_mols where each atom has autodock_element field(MoleculeSet)"""
def __init__(self, name='AD4_typer', **kw):
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw )
ip = self.inputPortsDescr
ip.append(datatype='MoleculeSet', name='mols')
ip.append(datatype='int', required=False, name='set_aromatic_carbons', defaultValue=1)
ip.append(datatype='int', required=False, name='reassign', defaultValue=1)
ip.append(datatype='int', required=False, name='renameAtoms', defaultValue=0)
op = self.outputPortsDescr
op.append(datatype='MoleculeSet', name='typed_mols')
code = """def doit(self,mols, set_aromatic_carbons=1, reassign=1, renameAtoms=0):
if mols:
at_typer = AutoDock4_AtomTyper(set_aromatic_carbons=1, renameAtoms=renameAtoms)
for mol in mols:
if not len(mol.allAtoms.bonds[0]):
mol.buildBondsByDistance()
at_typer.setAutoDockElements(mol, reassign=reassign)
self.outputData(typed_mols=mols)\n"""
self.setFunction(code)
def myCallback(self, event=None):
#self.paramPanel.run()
pass
class Add_SolvationParameters(NetworkNode):
""" assigns autodock3-style 'solvation parameters' to atoms
Process entails:
# 1. distinguishing between aromatic and aliphatic carbons
# 2. look-up table of solvation volumes (AtSolVol)
# 3. setting AtSolPar and AtVol
Input: mols (MoleculeSet)
Output: typed_mols where each atom has SolVol and AtSolPar fields(MoleculeSet)"""
def __init__(self, name='AD4_SolvationParameterizer', **kw):
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw )
ip = self.inputPortsDescr
ip.append(datatype='MoleculeSet', name='mols')
op = self.outputPortsDescr
op.append(datatype='MoleculeSet', name='typed_mols')
op.append(datatype='list', name='AtSolPar')
code = """def doit(self,mols):
if mols:
SP = SolvationParameterizer()
for mol in mols:
unknown_atoms = SP.addParameters(mol.chains.residues.atoms)
#?keep information about unknown_atoms?
if unknown_atoms is not None: mol.unknown_atoms = len(unknown_atoms)
self.outputData(typed_mols=mols, AtSolPar=mols.allAtoms.AtSolPar)\n"""
self.setFunction(code)
def myCallback(self, event=None):
#self.paramPanel.run()
pass
class ManageRotatableBonds(NetworkNode):
""" manages setting flexibility pattern in molecule
Process entails:
# 1. distinguishing between possibly-rotatable and non-rotatable bonds
# 2. turning on/off classes of possibly-rotatable bonds such as
# amide, guanidinium, peptide-backbone
# 3. optionally turning on/off specific bonds between named atoms
# 4. optionally limiting the total number of rotatable bonds
# toggling either:
# those which move the most atoms
# or
# those which move the fewest atoms
Input: mol (Molecule)
Output: mol with marked bonds """
def __init__(self, name='Manage Rotatable Bonds', **kw):
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw )
ip = self.inputPortsDescr
ip.append(datatype='MoleculeSet', name='mols', defaultValue='auto')
ip.append(datatype='string', required=False, name='root')
ip.append(datatype='string', required=False, name='allowed_bonds', defaultValue='backbone')
ip.append(datatype='int', required=False, name='check_for_fragments', defaultValue=0)
ip.append(datatype='string', required=False, name='bonds_to_inactivate', defaultValue='')
ip.append(datatype='string', required=False, name='limit_torsions', defaultValue='')
op = self.outputPortsDescr
op.append(datatype='MoleculeSet', name='mols')
code = """def doit(self, mols, root, allowed_bonds, check_for_fragments,
bonds_to_inactivate, limit_torsions):
if mols:
#mol = mols[0]
for mol in mols:
if not len(mol.allAtoms.bonds[0]):
mol.buildBondsByDistance()
print "root=", root
mol.RBM = RotatableBondManager(mol, allowed_bonds, root,
check_for_fragments=check_for_fragments,
bonds_to_inactivate=bonds_to_inactivate)
if limit_torsions:
mol.RBM.limit_torsions(limit_torsions)
self.outputData(mols=mols)\n"""
self.setFunction(code)
def myCallback(self, event=None):
#self.paramPanel.run()
pass
class Ligand_Writer(NetworkNode):
""" writes autodock3 ligand pdbq file
Process entails:
# 1. writing REMARK records about torsions
# 2. writing ROOT/ENDROOT records about rigid portion of ligand
# 3. writing BRANCH/ENDBRANCH records about movable sections of ligand
# 4. writing TORSDOF record showing torsional degrees of freedom
Input: mol (Molecule), output_filename (string)
Output: mol """
def __init__(self, name='Ligand_Writer', **kw):
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw )
fileTypes=[('pdbq', '*.pdbq'), ('all', '*')]
self.widgetDescr['output_filename'] = {
'class':'NEEntryWithFileSaver', 'master':'node',
'filetypes':fileTypes, 'title':'save AD3 Ligand', 'width':10,
'labelCfg':{'text':'file:'},
}
ip = self.inputPortsDescr
ip.append(datatype='Molecule', name='mol')
ip.append(datatype='string', name='output_filename')
op = self.outputPortsDescr
op.append(datatype='Molecule', name='mol')
code = """def doit(self, mol, output_filename):
if mol:
#mol = mols[0]
#check for bonds with 'possibleTors/activeTOrs keys'
#check for root
#check for TORSDOF
#check for charges
writer = LigandWriter()
writer.write(mol, output_filename)
self.outputData(mol=mol)\n"""
self.setFunction(code)
def myCallback(self, event=None):
#self.paramPanel.run()
pass
class Ligand_Writer_AD4(NetworkNode):
""" writes autodock4 ligand pdbqt file
Process entails:
# 1. writing REMARK records about torsions
# 2. writing ROOT/ENDROOT records about rigid portion of ligand
# 3. writing BRANCH/ENDBRANCH records about movable sections of ligand
# 4. writing TORSDOF record showing torsional degrees of freedom
Input: mol (Molecule), output_filename (string)
Output: mol, output_filename """
def __init__(self, name='Ligand_Writer_AD4', **kw):
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw )
fileTypes=[('pdbqt', '*.pdbqt'), ('all', '*')]
self.widgetDescr['output_filename'] = {
'class':'NEEntryWithFileSaver', 'master':'node',
'filetypes':fileTypes, 'title':'save AD4 Ligand', 'width':10,
'labelCfg':{'text':'file:'},
}
ip = self.inputPortsDescr
ip.append(datatype='Molecule', name='mol')
ip.append(datatype='string', name='output_filename')
op = self.outputPortsDescr
op.append(datatype='Molecule', name='mol')
op.append(datatype='string', name='output_filename')
code = """def doit(self, mol, output_filename):
if mol:
writer = AD4LigandWriter()
writer.write(mol, output_filename)
self.outputData(mol=mol, output_filename=output_filename)\n"""
self.setFunction(code)
def myCallback(self, event=None):
#self.paramPanel.run()
pass
####class AdtPrepareLigand(NetworkNode):
#### """ formats ligand for autodock3 using AutoDockTools.MoleculePreparation.LigandPreparation.
####Process entails:
#### 1. possible clean_up:
#### removing lone pairs
#### merging non_polar hydrogens
#### adding bonds to atoms with no bonds
#### 2. making atoms in ligand conform to autodock3 atom types:
#### distinction between carbons and cyclic-aromatic carbons,
#### no non-polar hydrogens
#### 3. adding partial charges (gasteiger by default)
#### 4. establishing 'torTree' for flexibility pattern by setting 'root' and 'rotatable' bonds
#### 5. writing 'pdbq' file
####Input: mols (MoleculeSet)
####Output: AD3ligand (Molecule)"""
###class Adt4PrepareLigand(NetworkNode):
### """ formats ligand for autodock4 using AutoDockTools.MoleculePreparation.AD4LigandPreparation.
###Process entails:
### 1. possible clean_up:
### removing lone pairs
### merging non_polar hydrogens
### adding bonds to atoms with no bonds
### 2. making atoms in ligand conform to autodock3 atom types:
### distinction between carbons and cyclic-aromatic carbons,
### no non-polar hydrogens
### 3. adding partial charges (gasteiger by default)
### 4. establishing 'torTree' for flexibility pattern by setting 'root' and 'rotatable' bonds
### 5. writing 'pdbqt' file
###Input: mols (MoleculeSet)
###Output: AD4ligand (Molecule)"""
###class AdtPrepareReceptor(NetworkNode):
### """ formats receptor for autodock3 using AutoDockTools.MoleculePreparation.ReceptorPreparation.
###Process entails:
### 1. possible clean_up:
### removing lone pairs
### merging non_polar hydrogens
### adding bonds to atoms with no bonds
### 2. making atoms in receptor conform to autodock3 atom types:
### distinction between carbons and cyclic-aromatic carbons,
### polar hydrogens but no non-polar hydrogens
### 3. adding partial charges (Kollman by default)
### 4. writing 'pdbqs' file
###Input: mols (MoleculeSet)
###Output: AD3receptor (Molecule)"""
###class Adt4PrepareReceptor(NetworkNode):
### """ formats receptor for autodock4 using AutoDockTools.MoleculePreparation.AD4ReceptorPreparation.
###Process entails:
### 1. possible clean_up:
### removing lone pairs
### merging non_polar hydrogens
### adding bonds to atoms with no bonds
### 2. making atoms in receptor conform to autodock4 atom types:
### distinction between carbons and cyclic-aromatic carbons,
### distinction between hydrogen-bonding and non-hydrogen-bonding nitrogens,
### no non-polar hydrogens
### 3. adding partial charges (gasteiger by default)
### 4. writing 'pdbqt' file
###Input: mols (MoleculeSet)
###Output: AD4receptor (Molecule)"""
class AdtPrepareGpf3(NetworkNode):
""" writes parameter file for autogrid3
Process entails:
1. setting ligand
2. setting receptor
3. setting specified values of various parameters
4. writing gpf file for autogrid3
Input: ligand_filename, receptor_filename, optional parameter dictionary
Output: gpf ('string')"""
def __init__(self, name='Prepare Autogrid3 Gpf', **kw):
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw )
ip = self.inputPortsDescr
ip.append(datatype='string', name='ligand_filename')
ip.append(datatype='string', name='receptor_filename')
ip.append(datatype='string', required=False, name='gpf_filename', defaultValue='')
ip.append(datatype='dict', required=False, name='parameters', defaultValue={})
ip.append(datatype='string', required=False, name='outputfilename', defaultValue='')
op = self.outputPortsDescr
op.append(datatype='string', name='ag3_parameter_file')
code = """def doit(self, ligand_filename, receptor_filename, gpf_filename, parameters, outputfilename):
if ligand_filename and receptor_filename:
from AutoDockTools.GridParameters import GridParameterFileMaker
gpfm = GridParameterFileMaker()
gpfm.set_ligand(ligand_filename)
gpfm.set_receptor(receptor_filename)
if gpf_filename:
gpfm.read_reference(gpf_filename)
if len(parameters):
gpfm.set_grid_parameters(parameters)
if not outputfilename:
outputfilename = gpfm.ligand.name+'_'+gpfm.receptor_stem + ".gpf"
gpfm.write_gpf(outputfilename)
self.outputData(ag3_parameter_file=outputfilename)\n"""
self.setFunction(code)
def myCallback(self, event=None):
#self.paramPanel.run()
pass
class AdtPrepareGpf4(NetworkNode):
""" writes parameter file for autogrid4
Process entails:
1. setting ligand
2. setting receptor
3. setting specified values of various parameters
4. writing gpf file for autogrid4
Input: ligand_filename, receptor_filename, optional parameter dictionary
Output: gpf ('string')"""
def __init__(self, name='Prepare Autogrid4 Gpf', **kw):
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw )
ip = self.inputPortsDescr
ip.append(datatype='string', name='ligand_filename')
ip.append(datatype='string', name='receptor_filename')
ip.append(datatype='string', required=False, name='gpf_filename', defaultValue='')
ip.append(datatype='dict', required=False, name='parameters', defaultValue={})
ip.append(datatype='string', required=False, name='outputfilename', defaultValue='')
op = self.outputPortsDescr
op.append(datatype='string', name='ag4_parameter_file')
code = """def doit(self, ligand_filename, receptor_filename, gpf_filename, parameters, outputfilename):
if ligand_filename and receptor_filename:
from AutoDockTools.GridParameters import GridParameter4FileMaker
gpfm = GridParameter4FileMaker()
gpfm.set_ligand(ligand_filename)
gpfm.set_receptor(receptor_filename)
if gpf_filename:
gpfm.read_reference(gpf_filename)
if len(parameters):
gpfm.set_grid_parameters(parameters)
if not outputfilename:
outputfilename = gpfm.ligand.name+'_'+gpfm.receptor_stem + ".gpf"
gpfm.write_gpf(outputfilename)
self.outputData(ag4_parameter_file=outputfilename)\n"""
self.setFunction(code)
def myCallback(self, event=None):
#self.paramPanel.run()
pass
class AdtPrepareDpf3(NetworkNode):
""" writes parameter file for autodock3
Process entails:
1. setting ligand
2. setting receptor
3. setting specified values of various parameters
4. writing dpf file for autogrid3
Input: ligand_filename, receptor_filename, optional parameter dictionary
Output: dpf ('string')"""
def __init__(self, name='Prepare Autodock3 Dpf', **kw):
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw )
ip = self.inputPortsDescr
ip.append(datatype='string', name='ligand_filename')
ip.append(datatype='string', name='receptor_filename')
ip.append(datatype='string', required=False, name='dpf_filename', defaultValue='')
ip.append(datatype='dict', required=False, name='parameters', defaultValue={})
ip.append(datatype='string', required=False, name='outputfilename', defaultValue='')
op = self.outputPortsDescr
op.append(datatype='string', name='ad3_parameter_file')
code = """def doit(self, ligand_filename, receptor_filename, dpf_filename, parameters, outputfilename):
if ligand_filename and receptor_filename:
from AutoDockTools.DockingParameters import DockingParameterFileMaker
dpfm = DockingParameterFileMaker()
dpfm.set_ligand(ligand_filename)
dpfm.set_receptor(receptor_filename)
if dpf_filename:
dpfm.read_reference(dpf_filename)
if len(parameters):
dpfm.set_docking_parameters(parameters)
if not outputfilename:
outputfilename = dpfm.ligand.name+'_'+dpfm.receptor_stem + ".dpf"
dpfm.write_dpf(outputfilename)
self.outputData(ad3_parameter_file=outputfilename)\n"""
self.setFunction(code)
def myCallback(self, event=None):
#self.paramPanel.run()
pass
class AdtPrepareDpf4(NetworkNode):
""" writes parameter file for autodock4
Process entails:
1. setting ligand
2. setting receptor
3. setting specified values of various parameters
4. writing dpf file for autodock4
Input: ligand_filename, receptor_filename, optional parameter dictionary
Output: dpf ('string')"""
def __init__(self, name='Prepare Autodock4 Dpf', **kw):
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw )
ip = self.inputPortsDescr
ip.append(datatype='string', name='ligand_filename')
ip.append(datatype='string', name='receptor_filename')
ip.append(datatype='string', required=False, name='dpf_filename', defaultValue='')
ip.append(datatype='dict', required=False, name='parameters', defaultValue={})
ip.append(datatype='string', required=False, name='outputfilename', defaultValue='')
op = self.outputPortsDescr
op.append(datatype='string', name='ad4_parameter_file')
code = """def doit(self, ligand_filename, receptor_filename, dpf_filename, parameters, outputfilename):
if ligand_filename and receptor_filename:
from AutoDockTools.DockingParameters import DockingParameter4FileMaker
dpfm = DockingParameter4FileMaker()
dpfm.set_ligand4(ligand_filename)
dpfm.set_receptor4(receptor_filename)
if dpf_filename:
dpfm.read_reference4(dpf_filename)
if len(parameters):
dpfm.set_docking_parameters(parameters)
if not outputfilename:
outputfilename = dpfm.ligand.name+'_'+dpfm.receptor_stem + ".dpf"
dpfm.write_dpf(outputfilename)
self.outputData(ad4_parameter_file=outputfilename)\n"""
self.setFunction(code)
def myCallback(self, event=None):
#self.paramPanel.run()
pass
#class AdtPrepareFlexDocking4
#class AdtPdbqToPdbqt
#class AdtPdbqsToPdbqt
#class AdtGpf3ToGpf4
#class AdtDpf3ToDpf4
#class AdtRunAutogrid3
#class AdtRunAutogrid4
#class AdtRunAutodock3
#class AdtRunAutodock4
#class AdtSummarizeResults
#class AdtSummarizeResults4
#class AdtSummarizeXMLResults4
#class AdtPixelMapResults
#objects:
#MolecularPreparation classes
# AutoDockBondClassifier
# ReceptorWriter<==MolKitNodes/WriteMolecule
# AD4ReceptorWriter<==MolKitNodes/WriteMolecule
#AD4FlexibleDockingPreparation
#prepare_ligand_dict
#ReceptorWriter
#AD4ReceptorWriter
#LigandPreparation
#AD4LigandPreparation
#AutoDock3_AtomTyper
#DockingParameters
#DockingParameterFileMaker
#GridParameters
#GridParameterFileMaker
#??Docking??
from Vision.VPE import NodeLibrary
adtlib = NodeLibrary('adtlib', '#4444FF')
#macros from other files in this directory
#from AutoDockTools.VisionInterface.PrepareAD4Molecule import PrepareAD4Molecule
#adtlib.addNode(PrepareAD4Molecule, 'Prepare AD4Molecule', 'Macro')
#from AutoDockTools.VisionInterface.AD4Ligands import AD4Ligands
#adtlib.addNode(AD4Ligands, 'Prepare AD4 Ligands', 'Macro')
#from AutoDockTools.VisionInterface.Prepare_AD4_Ligands import Prepare_AD4_Ligands
#adtlib.addNode(Prepare_AD4_Ligands, 'Prepare AD4 Ligands', 'Macro')
###from AutoDockTools.VisionInterface.PrepareAD3Ligand import PrepareAD3Ligand
###adtlib.addNode(PrepareAD3Ligand, 'Prepare AD3 Ligand', 'Macro')
###from AutoDockTools.VisionInterface.PrepareAD3Receptor import PrepareAD3Receptor
###adtlib.addNode(PrepareAD3Receptor, 'Prepare AD3Receptor', 'Macro')
###from AutoDockTools.VisionInterface.AD3Gpf import AD3Gpf
###adtlib.addNode(AD3Gpf, 'Prepare AD3 Gpf ', 'Macro')
###from AutoDockTools.VisionInterface.AD3Dpf import AD3Dpf
###adtlib.addNode(AD3Dpf, 'Prepare AD3 Dpf ', 'Macro')
#from AutoDockTools.VisionInterface.GPF4 import GPF4
#adtlib.addNode(GPF4, 'Prepare AD4 Gpf ', 'Macro')
from AutoDockTools.VisionInterface.Docking import Docking
adtlib.addNode(Docking, 'Docking', 'Macro')
from AutoDockTools.VisionInterface.recluster import recluster
adtlib.addNode(recluster, 'recluster...', 'Macro')
adtlib.addNode(GridParameterFileBrowserNE, 'Grid Parameter File Browser', 'Input')
adtlib.addNode(DockingParameterFileBrowserNE, 'Docking Parameter File Browser', 'Input')
#adtlib.addNode(ReadGridParameterFile, 'Read Grid Parameter File', 'Input')
#adtlib.addNode(ReadDockingParameterFile, 'Read Docking Parameter File', 'Input')
#adtlib.addNode(AdtPrepareGpf3, 'Prepare AD3Gpf', 'Macro')
#adtlib.addNode(AdtPrepareGpf4, 'Prepare AD4Gpf', 'Macro')
#adtlib.addNode(AdtPrepareDpf3, 'Prepare AD3Dpf', 'Macro')
#adtlib.addNode(AdtPrepareDpf4, 'Prepare AD4Dpf', 'Macro')
adtlib.addNode(Assign_AD4Types, 'AD4_typer', 'Mapper')
adtlib.addNode(Add_SolvationParameters, 'Add Solvation Parameters', 'Mapper')
adtlib.addNode(ManageRotatableBonds, 'Manage Rotatable Bonds', 'Mapper')
adtlib.addNode(MergeNonPolarHydrogens, 'Merge NonPolar Hydrogens', 'Mapper')
adtlib.addNode(MergeLonePairs, 'Merge Lone Pairs', 'Mapper')
adtlib.addNode(RemoveWaters, 'Remove Water Residues', 'Mapper')
adtlib.addNode(ManageAromaticCarbons, 'Manage Aromatic Carbons', 'Mapper')
adtlib.addNode(Ligand_Writer, 'Ligand Writer', 'Output')
adtlib.addNode(Ligand_Writer_AD4, 'AD4 Ligand Writer', 'Output')
try:
UserLibBuild.addTypes(adtlib, 'MolKit.VisionInterface.MolKitTypes')
except Exception, e:
print "loading types failed:", e
| StarcoderdataPython |
3295062 | import os
import pytest
import shutil, tempfile
nbdir = os.path.join('notebooks')
testdir = tempfile.mkdtemp()
def get_notebooks():
return [f for f in os.listdir(nbdir) if f.endswith('.ipynb')]
def get_jupyter_kernel():
try:
jklcmd = ('jupyter', 'kernelspec', 'list')
b = subprocess.Popen(jklcmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0]
if isinstance(b, bytes):
b = b.decode('utf-8')
print(b)
for line in b.splitlines():
if 'python' in line:
kernel = line.split()[0]
except:
kernel = None
return kernel
@pytest.mark.parametrize("fn", get_notebooks())
def test_notebook(fn):
kernel = get_jupyter_kernel()
print('available jupyter kernel {}'.format(kernel))
pth = os.path.join(nbdir, fn)
cmd = 'jupyter ' + 'nbconvert ' + \
'--ExecutePreprocessor.timeout=600 ' + '--to ' + 'notebook ' + \
'--execute ' + '{} '.format(pth) + \
'--output-dir ' + '{} '.format(testdir) + \
'--output ' + '{}'.format(fn)
ival = os.system(cmd)
assert ival == 0, 'could not run {}'.format(fn)
if __name__ == '__main__':
test_notebook()
shutil.rmtree(testdir)
| StarcoderdataPython |
6643140 | <reponame>tanggai/robotframework_selenium2library<filename>src/Selenium2Library/keywords/__init__.py
from _logging import _LoggingKeywords
from _runonfailure import _RunOnFailureKeywords
from _browsermanagement import _BrowserManagementKeywords
from _element import _ElementKeywords
from _tableelement import _TableElementKeywords
from _formelement import _FormElementKeywords
from _selectelement import _SelectElementKeywords
from _javascript import _JavaScriptKeywords
from _cookie import _CookieKeywords
from _screenshot import _ScreenshotKeywords
from _waiting import _WaitingKeywords
__all__ = [
"_LoggingKeywords",
"_RunOnFailureKeywords",
"_BrowserManagementKeywords",
"_ElementKeywords",
"_TableElementKeywords",
"_FormElementKeywords",
"_SelectElementKeywords",
"_JavaScriptKeywords",
"_CookieKeywords",
"_ScreenshotKeywords",
"_WaitingKeywords"
]
| StarcoderdataPython |
9797197 | #!/usr/bin/env python
from livereload import Server, shell
server = Server()
dirs = ("templates", "../build")
server.watch("templates/*.html",
shell(["bagel", "--file-type", "html", dirs[0], dirs[1]]))
server.watch("app/static/*.css", shell("rsync -a static/ ../build"))
server.serve(root=dirs[1], port=8080, host="localhost", open_url=True)
| StarcoderdataPython |
3381644 | # Assignment #1 - Basic Python
# Yanuar <NAME>
# Soal no 3
# Variable input
teori = float(input("NamaNilai ujian teori :"))
praktek = float(input("Nilai ujian praktek :"))
# Condition
if teori < 70 and praktek < 70 :
print("Anda harus mengulang ujian teori dan praktek")
elif teori < 70 and praktek >=70 :
print("Anda harus mengulang ujian teori.")
elif teori >=70 and praktek <70 :
print("Anda harus mengulang ujian praktek.")
else:
print("Selamat, anda lulus!.")
| StarcoderdataPython |
3418903 | <gh_stars>1-10
#!/usr/bin/python
# Copyright (C) 2013 <NAME>, FoldedSoft e.U.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RequestHandlers for Hangout Comment Tracker Glassware"""
__author__ = '<EMAIL> (<NAME>)'
# Add the library location to the path
import sys
sys.path.insert(0, 'lib')
from utils import config
import webapp2
from auth import AUTH_ROUTES
from notify import NOTIFY_ROUTES
from service import SERVICE_ROUTES
ROUTES = (AUTH_ROUTES + SERVICE_ROUTES + NOTIFY_ROUTES )
app = webapp2.WSGIApplication(ROUTES, debug=True, config=config)
| StarcoderdataPython |
4895354 | <reponame>luismartins-td/vizceral-example<filename>get_data.py
#!/usr/local/bin/python
import requests
import sys
import json
import socketio
sio = socketio.Client()
@sio.event
def connect():
print('connection established')
@sio.event
def my_message(data):
print('message received with ', data)
sio.emit('my response', {'response': 'my response'})
@sio.event
def disconnect():
print('disconnected from server')
sio.connect('http://localhost:5000')
if len(sys.argv) != 3:
print('Usage: {0} http://prometheus:9090 a_query'.format(sys.argv[0]))
sys.exit(1)
response = requests.get('{0}/api/v1/query'.format(sys.argv[1]),
params={'query': sys.argv[2]})
results = response.json()['data']['result']
# Build a list of all labelnames used.
labelnames = set()
for result in results:
labelnames.update(result['metric'].keys())
# Canonicalize
labelnames = sorted(labelnames)
data = {
"renderer": "global",
"name": "edge",
"nodes": [{
"renderer": "region",
"name": "INTERNET",
"class": "normal"
}]
}
info = 0
warn = 0
danger = 0
clusters = {}
namespaces = []
requests = {}
for result in results:
cluster = result['metric'].get("cluster_name", '')
timestamp = result['value'][0]
clusters[cluster]= timestamp
namespace = result['metric'].get("exported_namespace", '')
namespaces.append (namespace)
request_val = result['value'][1]
status_code = result['metric'].get("status", '')
if int(status_code) >= 100 and int(status_code) < 400:
if namespace+"-info" in requests.keys():
requests[namespace+"-info"] += int(request_val)
info += int(request_val)
else:
requests[namespace+"-info"] = int(request_val)
info += int(request_val)
elif int(status_code) >= 400 and int(status_code) < 500:
if namespace+"-warn" in requests.keys():
requests[namespace+"-warn"] += int(request_val)
warn += int(request_val)
else:
requests[namespace+"-warn"] = int(request_val)
warn += int(request_val)
elif int(status_code) >= 500:
if namespace+"-danger" in requests.keys():
requests[namespace+"-danger"] += int(request_val)
danger += int(request_val)
else:
requests[namespace+"-danger"] = int(request_val)
danger += int(request_val)
namespaces_clear = list(set(namespaces))
for namespace in namespaces_clear:
if namespace:
if namespace+"-info" not in requests.keys():
requests[namespace+"-info"] = 0
if namespace+"-warn" not in requests.keys():
requests[namespace+"-warn"] = 0
if namespace+"-danger" not in requests.keys():
requests[namespace+"-danger"] = 0
for cluster,timestamp in clusters.items():
data["nodes"].append(
{
"renderer": "region",
"name": cluster,
"class": "normal",
"maxVolume": 50000,
"class": "normal",
"updated": timestamp
}
)
data["nodes"][1].update({
"nodes": [{
"name": "INTERNET",
"renderer": "focusedChild",
"class": "normal"
}]
})
for namespace in namespaces_clear:
if namespace:
data["nodes"][1]["nodes"].append({
"name": namespace,
"renderer": "focusedChild",
"class": "normal"
})
data["nodes"][1].update({
"connections": [{
}]
})
for namespace in namespaces_clear:
if namespace:
data["nodes"][1]["connections"].append({
"source": "INTERNET",
"target": namespace,
"metrics": {
"danger": str(requests[namespace+"-danger"]),
"normal": str(requests[namespace+"-info"]),
"warning": str(requests[namespace+"-warn"])
},
"class": "normal"
})
data['connections'] = []
data['connections'].append({
"source": "INTERNET",
"target": "stg02",
"metrics": {
"normal": info,
"warning": warn,
"danger": danger
},
"notices": [
],
"class": "normal"
})
#print (data)
sio.emit('freshData', data)
# with open('src/xxx.json', 'w') as outfile:
# json.dump(data, outfile, indent=2)
# outfile.close()
sio.disconnect()
| StarcoderdataPython |
9796525 | <reponame>Hacker-1202/Selfium<gh_stars>10-100
import discord
from app.vars.client import client
from app.helpers import Notify, getUser, getGuild
@client.command()
async def inviteInfo(ctx, link):
notify = Notify(ctx=ctx, title='Invite information')
notify.prepair()
linkData = await client.fetch_invite(url=link)
if (linkData.inviter):
inviterData = await getUser.byID(linkData.inviter.id)
try:
guildData = await getGuild.byID(linkData.guild.id)
except:
guildData = linkData.guild
fields = [
("ID", f"```{guildData.id}```", False),
("Name::", f"```{guildData.name}```", False),
("Description", f"```{guildData.description}```", False),
("Created in:", f'```{guildData.created_at.strftime("%d/%m/%Y")}```', False),
("Member Count:", f"```{int(linkData.approximate_member_count)}```", False),
("Link", f"```{linkData.url}```", False),
("\u200b", "\u200b", False),
]
if (linkData.inviter):
fields.append(("Inviter ID:", f"```{inviterData.user.id}```", False))
fields.append(("Inviter:", f"```{inviterData.user.name + '#' + inviterData.user.discriminator}```", False))
notify.fields(fields=fields) | StarcoderdataPython |
4811186 | # Return value of 2 arguments using function
def avg(num1,num2):
x = (num1+num2)/2
return x
y = int(input("Insert first value: "))
z = int(input("Insert second value: "))
average = avg(y,z)
print(round(average,2)) | StarcoderdataPython |
3415425 | """This module defines the asynchronous forward reliable workflow."""
import asyncio
from typing import Tuple, Optional
import utilities.integration_adaptors_logger as log
from comms import queue_adaptor
from exceptions import MaxRetriesExceeded
from isodate import isoerror
from utilities import timing, config
from utilities.date_utilities import DateUtilities
from mhs_common import workflow
from mhs_common.routing import routing_reliability
from mhs_common.state import persistence_adaptor
from mhs_common.state import work_description as wd
from mhs_common.transmission import transmission_adaptor
from mhs_common.workflow import common_asynchronous, asynchronous_reliable
logger = log.IntegrationAdaptorsLogger('ASYNC_FORWARD_WORKFLOW')
class AsynchronousForwardReliableWorkflow(asynchronous_reliable.AsynchronousReliableWorkflow):
"""Handles the workflow for the asynchronous forward reliable messaging pattern."""
def __init__(self, party_key: str = None, persistence_store: persistence_adaptor.PersistenceAdaptor = None,
transmission: transmission_adaptor.TransmissionAdaptor = None,
queue_adaptor: queue_adaptor.QueueAdaptor = None,
inbound_queue_max_retries: int = None,
inbound_queue_retry_delay: int = None,
max_request_size: int = None,
persistence_store_max_retries: int = None,
routing: routing_reliability.RoutingAndReliability = None):
super().__init__(party_key, persistence_store, transmission,
queue_adaptor, inbound_queue_max_retries,
inbound_queue_retry_delay, max_request_size, persistence_store_max_retries,
routing)
self.workflow_specific_interaction_details = dict(
ack_soap_actor="urn:oasis:names:tc:ebxml-msg:actor:nextMSH",
duplicate_elimination=True,
ack_requested=True,
sync_reply=False)
self.workflow_name = workflow.FORWARD_RELIABLE
@timing.time_function
async def handle_outbound_message(self, from_asid: Optional[str],
message_id: str, correlation_id: str, interaction_details: dict,
payload: str,
wdo: Optional[wd.WorkDescription]) \
-> Tuple[int, str, Optional[wd.WorkDescription]]:
logger.info('0001', 'Entered async forward reliable workflow to handle outbound message')
logger.audit('0100', 'Outbound {WorkflowName} workflow invoked.', {'WorkflowName': self.workflow_name})
wdo = await self._create_new_work_description_if_required(message_id, wdo, self.workflow_name)
try:
details = await self._lookup_endpoint_details(interaction_details)
url = config.get_config("FORWARD_RELIABLE_ENDPOINT_URL")
to_party_key = details[self.ENDPOINT_PARTY_KEY]
cpa_id = details[self.ENDPOINT_CPA_ID]
except Exception:
await wdo.set_outbound_status(wd.MessageStatus.OUTBOUND_MESSAGE_PREPARATION_FAILED)
return 500, 'Error obtaining outbound URL', None
reliability_details = await self._lookup_reliability_details(interaction_details,
interaction_details.get('ods-code'))
retry_interval_xml_datetime = reliability_details[common_asynchronous.MHS_RETRY_INTERVAL]
try:
retry_interval = DateUtilities.convert_xml_date_time_format_to_seconds(retry_interval_xml_datetime)
except isoerror.ISO8601Error:
await wdo.set_outbound_status(wd.MessageStatus.OUTBOUND_MESSAGE_PREPARATION_FAILED)
return 500, 'Error when converting retry interval: {} to seconds'.format(retry_interval_xml_datetime), None
error, http_headers, message = await self._serialize_outbound_message(message_id, correlation_id,
interaction_details,
payload, wdo, to_party_key, cpa_id)
if error:
return error[0], error[1], None
return await self._make_outbound_request_with_retries_and_handle_response(url, http_headers, message, wdo,
reliability_details, retry_interval)
@timing.time_function
async def handle_unsolicited_inbound_message(self, message_id: str, correlation_id: str, payload: str,
attachments: list):
logger.info('0005', 'Entered async forward reliable workflow to handle unsolicited inbound message')
logger.audit('0101', 'Unsolicited inbound {WorkflowName} workflow invoked.',
{'WorkflowName': self.workflow_name})
work_description = wd.create_new_work_description(self.persistence_store, message_id, self.workflow_name,
wd.MessageStatus.UNSOLICITED_INBOUND_RESPONSE_RECEIVED)
await work_description.publish()
for retry_num in range(self.inbound_queue_max_retries + 1):
try:
await self._put_message_onto_queue_with(message_id, correlation_id, payload, attachments=attachments)
break
except Exception as e:
logger.warning('0006', 'Failed to put unsolicited message onto inbound queue due to {Exception}',
{'Exception': e})
if retry_num >= self.inbound_queue_max_retries:
logger.error("0020",
"Exceeded the maximum number of retries, {max_retries} retries, when putting "
"unsolicited message onto inbound queue",
{"max_retries": self.inbound_queue_max_retries})
await work_description.set_inbound_status(wd.MessageStatus.UNSOLICITED_INBOUND_RESPONSE_FAILED)
raise MaxRetriesExceeded('The max number of retries to put a message onto the inbound queue has '
'been exceeded') from e
logger.info("0021", "Waiting for {retry_delay} seconds before retrying putting unsolicited message "
"onto inbound queue", {"retry_delay": self.inbound_queue_retry_delay})
await asyncio.sleep(self.inbound_queue_retry_delay)
logger.audit('0022', '{WorkflowName} workflow invoked for inbound unsolicited request. '
'Attempted to place message onto inbound queue with {Acknowledgement}.',
{'Acknowledgement': wd.MessageStatus.UNSOLICITED_INBOUND_RESPONSE_SUCCESSFULLY_PROCESSED,
'WorkflowName': self.workflow_name})
await work_description.set_inbound_status(wd.MessageStatus.UNSOLICITED_INBOUND_RESPONSE_SUCCESSFULLY_PROCESSED)
| StarcoderdataPython |
6471564 | from setuptools import setup, find_packages
setup(
name='qface-store',
version='1.1',
description='Generator based on the QFace library for redux store concept',
url='https://github.com/Pelagicore/qface-store',
author='jryannel',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Code Generators',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
],
keywords='qt redux generator',
packages=find_packages(),
include_package_data=True,
package_data={
'': ['*.*']
},
install_requires=[
'qface>=1.3.1',
],
entry_points={
'console_scripts': [
'qface-store = store.store:app'
],
},
)
| StarcoderdataPython |
6429390 | # -*- coding: utf-8 -*-
"""ML-2-MultipleLinearRegression.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1EZQTodMmsYb308buM0PZjk0nU3Poim4J
"""
#importing libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#importing dataset
dataset = pd.read_csv('50_Startups.csv')
X = dataset.iloc[:, :-1].values
Y = dataset.iloc[:, -1].values
#print(X)
#here we have state as categorical data so we will performe onehot encodng to convert it to binary vector
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
ct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [3])], remainder='passthrough')
X = np.array(ct.fit_transform(X))
#print(X)
#spliting dataset into trainset and testset
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state = 0)
#print(X_train)
#print(X_test)
#print(Y_train)
#print(Y_test)
#training our multiple LR model
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, Y_train)
#predicting the results
Y_pred = regressor.predict(X_test)
np.set_printoptions(precision=2)
print("real value" + " predicted values")
print(np.concatenate((Y_pred.reshape(len(Y_pred),1), Y_test.reshape(len(Y_test),1)),1))
#---------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#optional code-building backward elimination
#import statsmodels.api as sm
#here we will first add b0(constant)
#X = np.append(arr = np.ones((50, 1)).astype(int), values = X, axis = 1)
#step1:intialize x_opt with all the independent variables
#X_opt = X[:, [0, 1, 2, 3, 4, 5]]
#X_opt = X_opt.astype(np.float64)
#fit the all model with all possible prediction
#making obj of OLS class=ordinary least sqaure
#regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
#here summary function return the feture with highest P-value and other stuff:we will exclude the feture which has lower P-value
#regressor_OLS.summary()
#repeat1-removed 2(p value:0.990)
#X_opt = X[:, [0, 1, 3, 4, 5]]
#X_opt = X_opt.astype(np.float64)
#regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
#regressor_OLS.summary()
#repeat2:remove 1(p value:0.950)
#X_opt = X[:, [0, 3, 4, 5]]
#X_opt = X_opt.astype(np.float64)
#regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
#regressor_OLS.summary()
#remove4:
#X_opt = X[:, [0, 3, 5]]
#X_opt = X_opt.astype(np.float64)
#regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
#regressor_OLS.summary()
#final:feture with p value nearly 0
#X_opt = X[:, [0, 3]]
#X_opt = X_opt.astype(np.float64)regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
#regressor_OLS.summary()
| StarcoderdataPython |
4829112 | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# <NAME>
# (C) 1998-2001 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from __future__ import print_function
def testElementScanner():
print("Testing element scanner")
from pyre.chemistry.unpickle.chemkin.scanners.Elements import Elements
scanner = Elements()
tests = [
" ", # Don't send an empty string
"!",
"! This is a comment",
"H2",
"H2/1.004/" "H2 / 1.004 /",
]
testScanner(scanner, tests)
return
def testSpeciesScanner():
print("Testing species scanner")
from pyre.chemistry.unpickle.chemkin.scanners.Species import Species
scanner = Species()
tests = [
" ", # Don't send an empty string
"!",
"! This is a comment",
"H2",
"H2O2++",
"CO2(S)",
]
testScanner(scanner, tests)
return
def testThermoScanner():
print("Testing thermo scanner")
from pyre.chemistry.unpickle.chemkin.scanners.Thermo import Thermo
scanner = Thermo()
tests = [
" ", # Don't send an empty string
"!",
"! This is a comment",
"THERMO",
"THERMO ALL",
]
testScanner(scanner, tests)
return
def testReactionScanner():
print("Testing reaction scanner")
from pyre.chemistry.unpickle.chemkin.scanners.Reactions import Reactions
scanner = Reactions()
tests = [
" ", # Don't send an empty string
"!",
"! This is a comment",
"REACTION",
"CH3+CH3(+M)=C2H6(+M) 9.03E+16 -1.18 653.3 ! 1",
"Rev / 1.0 2.0 3.0 /",
"LOW / 1.0 2.0 3.0 /",
"LT / 1.0 2.0 /",
"RLT / 1.0 2.0 /",
"SRI / 1.0 2.0 3.0/",
"SRI / 1.0 2.0 3.0 4.0 5.0 /",
"TROE / 1.0 2.0 3.0/",
"TROE / 1.0 2.0 3.0 4.0 /",
]
testScanner(scanner, tests)
return
def testScanner(scanner, tests):
for test in tests:
print(" {%s}: %s" % (test, scanner.match(test, 0, 0)))
return
# main
if __name__ == "__main__":
testElementScanner()
testSpeciesScanner()
testThermoScanner()
testReactionScanner()
# version
__id__ = "$Id$"
#
# End of file
| StarcoderdataPython |
9605535 | # Generated by Django 3.1.2 on 2021-09-13 21:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('booking', '0004_auto_20210906_0829'),
]
operations = [
migrations.AddField(
model_name='appointment',
name='client_phone_number',
field=models.CharField(default=None, max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='appointment',
name='when',
field=models.DateField(default=None),
preserve_default=False,
),
migrations.AlterField(
model_name='appointment',
name='client_name',
field=models.CharField(max_length=200),
),
]
| StarcoderdataPython |
9691194 | <filename>recommender/src/helper.py
from tqdm import tqdm
from ast import literal_eval
from collections import defaultdict
from scipy.sparse import coo_matrix, csr_matrix
from sklearn.metrics.pairwise import cosine_similarity
import os
import heapq
import zipfile
import pandas as pd
import numpy as np
import scipy.sparse as sparse
import sys
import matplotlib.pyplot as plt
# Some logistics helping functions
class colors:
"""Color used for printing."""
HEADER = '\033[95m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def print_success(message):
"""Specific print func for this notebook."""
assert isinstance(message, str)
print(f"{colors.HEADER}[Recommender Message]{colors.ENDC} - {colors.OKGREEN}{message}{colors.ENDC}")
def print_failure(message):
"""Specific error func for this notebook."""
assert isinstance(message, str)
print(f"{colors.HEADER}[Recommender Failure]{colors.ENDC} - {colors.FAIL}{message}{colors.ENDC}")
def print_warning(message):
"""Specific warning func for this notebook."""
assert isinstance(message, str)
print(f"{colors.HEADER}[Recommender Warning]{colors.ENDC} - {colors.WARNING}{message}{colors.ENDC}")
def extract_csv_data(zip_path, data_path):
"""Extract and retrieve csv data."""
assert isinstance(zip_path, str) and isinstance(data_path, str)
# Get files in the zip_path
zip_files = [os.path.join(zip_path, f) for f in os.listdir(zip_path)]
print_success('Files in ' + zip_path + ':\n' + str(zip_files))
# Pass if data_path already exists
if os.path.exists(data_path): ### Path may exist but not the data... Its better to rewrite the data
print_warning('Extracted data (%s) are already existed.' % data_path)
return
# Store the extracted csv files in data_path
for zip_file in zip_files:
if zipfile.is_zipfile(zip_file):
with zipfile.ZipFile(zip_file, 'r') as zip_ref:
zip_ref.extractall(data_path)
print_success('All zip files are extracted.')
def get_training_data(data_path, store_path):
"""Define the prior(eval_set="prior") orders as the training dataset."""
assert isinstance(data_path, str) and isinstance(store_path, str)
# Filenames for storing the processed data
prodsPerUser_filename = os.path.join(store_path, 'productsPerUser_train.csv')
userProdFreq_filename = os.path.join(store_path, 'user_product_frequency_train.csv')
prodFreq_filename = os.path.join(store_path, 'product_frequency_train.csv')
# Pass if files are already existed
if os.path.exists(prodsPerUser_filename) and os.path.exists(userProdFreq_filename) and os.path.exists(prodFreq_filename):
print_warning('Training data are already existed.')
return pd.read_csv(prodFreq_filename), \
pd.read_csv(userProdFreq_filename), \
pd.read_csv(prodsPerUser_filename)
try:
# Load the csv files as dataframes
df_orders = pd.read_csv(os.path.join(data_path, 'orders.csv'))
df_order_products_train = pd.read_csv(os.path.join(data_path, 'order_products__prior.csv'))
# Trim the unnecessary columns
df_order_products_train = df_order_products_train[["order_id", "product_id"]]
# Get the frequency of occurrence for each product (ready for tf-idf)
df_product_frequency = df_order_products_train['product_id'].value_counts()
df_product_frequency = df_product_frequency.rename_axis('product_id').reset_index(name='frequency')
print_success('Calculation of product frequency is completed.')
# Get the direct relation between products and users
df_usersAndProducts_train = pd.merge(df_orders, df_order_products_train, on='order_id', how='inner')
df_usersAndProducts_train = df_usersAndProducts_train[['user_id', 'product_id']]
df_productsPerUser_train = df_usersAndProducts_train.groupby('user_id').agg(set).reset_index()
print_success('Calculation of productsPerUser is completed.')
# Get the frequency of occurence for each user-product pair
df_user_product_frequency = df_usersAndProducts_train.groupby(['user_id', 'product_id'])\
.size().reset_index().rename(columns={0: 'frequency'})
print_success('Calculation of user-product-pair frequency is completed.')
# Store the processed data to enhance efficiency
if not os.path.exists(store_path):
os.mkdir(store_path)
df_productsPerUser_train.to_csv(prodsPerUser_filename, index_label=False)
df_user_product_frequency.to_csv(userProdFreq_filename, index_label=False)
df_product_frequency.to_csv(prodFreq_filename, index_label=False)
print_success('Training data are retrieved and saved.')
return df_product_frequency, df_user_product_frequency, df_productsPerUser_train
except Exception as e:
print_failure(str(e))
def get_testing_data(data_path, store_path):
"""Define the current(eval_set="train") orders as the testing dataset."""
assert isinstance(data_path, str) and isinstance(store_path, str)
# Filename for testing the recommender system
test_filename = os.path.join(store_path, 'productsPerUser_test.csv')
# Pass if file is already existed
if os.path.exists(test_filename):
print_warning('Testing data are already existed.')
return pd.read_csv(test_filename)
try:
# Load the csv files as dataframes
df_orders = pd.read_csv(os.path.join(data_path, 'orders.csv'))
df_order_products_test = pd.read_csv(os.path.join(data_path, 'order_products__train.csv'))
# Trim the unnecessary columns
df_order_products_test = df_order_products_test[["order_id", "product_id"]]
# Get the direct relation between products and users
df_usersAndProducts_test = pd.merge(df_orders, df_order_products_test, on='order_id', how='inner')
df_usersAndProducts_test = df_usersAndProducts_test[['user_id', 'product_id']]
df_productsPerUser_test = df_usersAndProducts_test.groupby('user_id').agg(set).reset_index()
# Store the processed data to enhance efficiency
if not os.path.exists(store_path):
os.mkdir(store_path)
df_productsPerUser_test.to_csv(test_filename, index_label=False)
print_success('Testing data are retrieved and saved.')
return df_productsPerUser_test
except Exception as e:
print_failure(str(e))
def get_category_data(data_path):
"""Get the other category csv datasets."""
assert isinstance(data_path, str)
try:
df_aisles = pd.read_csv(os.path.join(data_path, 'aisles.csv'))
df_departments = pd.read_csv(os.path.join(data_path, 'departments.csv'))
df_products = pd.read_csv(os.path.join(data_path, 'products.csv'))
print_success('Category data are retrieved.')
return df_aisles, df_departments, df_products
except Exception as e:
print_failure(str(e))
def build_user_product_matrix(df_user_product_frequency, matrix_file_path, matrix_name):
"""Build and store coo/csr sparse matrix of user-product matrix."""
assert isinstance(df_user_product_frequency, pd.DataFrame)
assert isinstance(matrix_file_path, str) and isinstance(matrix_name, str)
matrix_path = os.path.join(matrix_file_path, matrix_name)
if os.path.exists(matrix_path):
print_warning('User-product matrix is already existed.')
return sparse.load_npz(matrix_path).tocsr()
df_user_product_frequency['user_id'] = df_user_product_frequency['user_id'].astype('category')
df_user_product_frequency['product_id'] = df_user_product_frequency['product_id'].astype('category')
# Define sparse user-product matrix in coo format
data = df_user_product_frequency['frequency']
row = df_user_product_frequency['user_id'].cat.codes.copy()
col = df_user_product_frequency['product_id'].cat.codes.copy()
user_product_matrix = sparse.coo_matrix((data, (row, col)))
# Store and return the sparse matrix
if not os.path.exists(matrix_file_path):
os.mkdir(matrix_file_path)
sparse.save_npz(matrix_path, user_product_matrix)
print_success('User-product matrix is stored at %s' % matrix_path)
return user_product_matrix.tocsr()
def build_tfidf_matrix(tf, matrix_file_path, matrix_name):
"""Build tf-idf sparse matrix for product. 'tf' refers to term frequency."""
assert isinstance(tf, sparse.csr.csr_matrix)
assert isinstance(matrix_file_path, str) and isinstance(matrix_name, str)
matrix_path = os.path.join(matrix_file_path, matrix_name)
if os.path.exists(matrix_path):
print_warning('User-product TF-IDF matrix is already existed.')
return sparse.load_npz(matrix_path).tocsr()
tf_idf = coo_matrix(tf)
# Get total number of documents (here is user number)
N = tf.shape[0]
# Calculate IDF (inverse document frequency)
idf = np.log(N / (1 + np.bincount(tf_idf.col)))
# Since terms don’t show up in many documents, we apply a square root penalty over tf to dampen it.
tf_idf.data = np.sqrt(tf_idf.data) * idf[tf_idf.col]
# Store and return the sparse matrix
if not os.path.exists(matrix_file_path):
os.mkdir(matrix_file_path)
sparse.save_npz(matrix_path, tf_idf)
print_success('User-product TF-IDF matrix is stored at %s' % matrix_path)
return tf_idf.tocsr()
# User-based recommendation
def get_topK_similar_users(user_id, feature_matrix, k):
"""Find the most k similar users based on similarity."""
assert isinstance(user_id, int) and isinstance(k, int)
assert isinstance(feature_matrix, sparse.csr.csr_matrix)
# Get list of cosine similarities
similarities = cosine_similarity(feature_matrix, feature_matrix[user_id - 1], False)
# Select top K similar users
top_K_similar_users = heapq.nlargest(k + 1, range(similarities.shape[0]), similarities.toarray().take)[1:]
top_K_similar_users = [x + 1 for x in top_K_similar_users]
# Return the list excluding the target user
return top_K_similar_users
def generate_recommendation(user_id, feature_matrix, df_productsPerUser, df_product_frequency, k, n):
"""Find the most n recommended products based on the shopping history of the similar users."""
assert isinstance(user_id, int) and isinstance(k, int) and isinstance(n, int)
assert isinstance(feature_matrix, sparse.csr.csr_matrix)
assert isinstance(df_product_frequency, pd.DataFrame) and isinstance(df_productsPerUser, pd.DataFrame)
# Get top k similar users
topK_similar_users = get_topK_similar_users(user_id, feature_matrix, k)
# Product popularity is defined as following 2 parts:
# 1. the number of similar users who buy this product
# 2. the buying frequency of this product in all users
recommended_prods = defaultdict(int)
user_prods = df_productsPerUser['product_id'][df_productsPerUser['user_id'] == user_id].values[0]
if type(user_prods) == str:
user_prods = literal_eval(user_prods)
for user in topK_similar_users:
prods = df_productsPerUser['product_id'][df_productsPerUser['user_id'] == user].values
prods = set() if len(prods) == 0 else prods[0]
if type(prods) == str:
prods = literal_eval(prods)
for prod in prods:
recommended_prods[prod] += 1
# Get popularity for each prod
recommended_prods = [(p, (x, int(df_product_frequency[df_product_frequency['product_id'] == x].frequency))) \
for (p, x) in recommended_prods.items()]
# Sort the products based on the popularity in the set of similar users
recommended_prods = sorted(recommended_prods, key = lambda kv : (kv[1], kv[0]), reverse=True)
return recommended_prods[:n]
def report_userBased(recommended_prods, df_products, df_departments, df_aisles):
'''Prints out the details of the recommended products in a dataframe.'''
assert isinstance(df_products, pd.DataFrame) and isinstance(df_aisles, pd.DataFrame)
assert isinstance(df_departments, pd.DataFrame)
data = {'product_id': [], 'popularity': []}
for product in recommended_prods:
data['product_id'].append(product[0])
data['popularity'].append(product[1])
df = pd.DataFrame(data, columns=list(data.keys()))
df = pd.merge(df, df_products, on='product_id', how='inner') # add product details
df = pd.merge(df, df_departments, on='department_id', how='inner') # add department details
df = pd.merge(df, df_aisles, on='aisle_id', how='inner') # add aisle details
return df.sort_values(by='popularity', ascending=False)
| StarcoderdataPython |
8157536 | import logging
from flask_restful import Api
from .job import MonitorJob
from .base import MsiScheduler
from .handlers import init_api
from .db import db
class JobManager:
singleton = None
def __init__(self, app=None):
if JobManager.singleton is None:
self.app = app
self.api = Api(app)
self.scheduler = MsiScheduler()
if app is not None:
init_api(self.api)
self.init_app(app)
JobManager.singleton = self
def init_app(self, app):
self.scheduler.init_app(app)
self.scheduler.app = app
db.init_app(app)
db.app = app
with app.app_context():
db.create_all()
init_api(self.api)
try:
self.start()
logging.info("The scheduler started successfully.")
except Exception as e:
raise e
def start(self):
return self.scheduler.start()
def stop(self):
return self.scheduler.shutdown()
def add_job(self, job, job_id=None, args=None, trigger='interval', minutes=720, **kwargs):
if isinstance(job, MonitorJob):
minutes = job.task.monitor.schedule.minutes
trigger = job.task.monitor.schedule.type._value_
return self.scheduler.add_scheduler_job(job, job_id=str(job_id), args=args, trigger=trigger, minutes=minutes)
def pause_job(self, job_id):
return self.scheduler.pause_job(job_id)
def get_job(self, job_id):
return self.scheduler.get_job(job_id)
def get_jobs(self):
return self.scheduler.get_jobs()
def remove_job(self, job_id):
return self.scheduler.remove_job(job_id)
def resume_job(self, job_id):
return self.scheduler.resume_job(job_id)
| StarcoderdataPython |
11356512 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import mysql.connector
class databaseManager:
def __init__(self):
self.conn = mysql.connector.connect(host="localhost",user="root",password="<PASSWORD>", database="cerbere_db")
self.cursor = self.conn.cursor()
def createTables(self):
self.cursor.execute(""" CREATE TABLE IF NOT EXISTS IndexTable (\
idIndex int NOT NULL AUTO_INCREMENT, \
word varchar(250) NOT NULL, \
PRIMARY KEY(idIndex) \
);""")
self.cursor.execute(""" CREATE TABLE IF NOT EXISTS Documents(\
idDocuments int NOT NULL AUTO_INCREMENT, \
title varchar(250) NOT NULL, \
PRIMARY KEY(idDocuments) \
);""")
self.cursor.execute(""" CREATE TABLE IF NOT EXISTS IndexDocumentsCorrespondences(\
idIndex int NOT NULL, \
idDocuments int NOT NULL, \
frequence int NOT NULL, \
FOREIGN KEY (idIndex) REFERENCES IndexTable(idIndex), \
FOREIGN KEY (idDocuments) REFERENCES Documents(idDocuments) \
);""")
#print "end creation tables..."
def addElementsIndexTable(self,Listword):
try:
for word in Listword:
self.cursor.execute("""INSERT INTO IndexTable (word) VALUES (%s)""", [word])
self.conn.commit()
except:
print("ERROR WHEN ADD ELEMENT IN IndexTable")
self.conn.rollback()
def addElementDocumentsTable(self,ListTitle):
try:
for title in ListTitle:
self.cursor.execute("""INSERT INTO Documents (title) VALUES (%s)""", [title])
self.conn.commit()
except:
print("ERROR WHEN ADD ELEMENT IN Documents")
self.conn.rollback()
def addElementIndexDocumentsCorrespondences(self,dic_idWord_IdDoc_Freq):
try:
for (idWord,idDoc),freq in dic_idWord_IdDoc_Freq.iteritems():
#print idWord,idDoc,freq
self.cursor.execute('INSERT INTO IndexDocumentsCorrespondences (idIndex,idDocuments,frequence) VALUES ("%s","%s","%s")' % \
(str(idWord),str(idDoc),str(freq)))
self.conn.commit()
except:
print("ERROR WHEN ADD ELEMENT IN IndexDocumentsCorrespondences")
self.conn.rollback()
def deleteTables(self):
self.cursor.execute(""" DROP TABLE IndexDocumentsCorrespondences ;""")
self.cursor.execute(""" DROP TABLE IndexTable ;""")
self.cursor.execute(""" DROP TABLE Documents ;""")
def getIdByWord(self, word_):
self.cursor.execute("""SELECT idIndex FROM IndexTable WHERE word = '%s'""" % (word_))
result = self.cursor.fetchone()
if result is not None:
return result[0]
else:
return -1
def freqByIdWordIdDoc(self, idWord, idDocumentsReq):
self.cursor.execute("""SELECT frequence FROM IndexDocumentsCorrespondences WHERE idIndex ='%s' AND idDocuments='%s'""" % (str(idWord), str(idDocumentsReq)))
result = self.cursor.fetchone()
if result is not None:
return result[0]
else:
return 0
def countNbAppareancesWord(self, idWord):
self.cursor.execute("""SELECT COUNT(idIndex) FROM IndexDocumentsCorrespondences WHERE idIndex ='%s'""" % (str(idWord)))
result = self.cursor.fetchone()
if result is not None:
return result[0]
else:
return 0
if __name__ == '__main__':
d1= databaseManager()
#d1.deleteTables()
# d1.createTables()
# d1.addElementsIndexTable(["madad"])
# d1.addElementsIndexTable(["bonjour"])
# d1.addElementDocumentsTable(["la vie est belle"])
# d1.addElementDocumentsTable(["gg"])
# d1.addElementDocumentsTable(["gg1"])
# d1.addElementDocumentsTable(["gg2"])
d1.addElementIndexDocumentsCorrespondences({(1,1):23})
d1.addElementIndexDocumentsCorrespondences({(1,2):20})
d1.addElementIndexDocumentsCorrespondences({(2,3):23})
#a={}
#a[(1,25)]=78
#d1.addElementIndexDocumentsCorrespondences(a)
#d1.addElementsIndexTable("dfsd")
print d1.countNbAppareancesWord(1)
| StarcoderdataPython |
194852 | <filename>exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/scripts/inventory/lxc_inventory.py
#!/usr/bin/env python
#
# (c) 2015-16 <NAME>, hastexo Professional Services GmbH
# <<EMAIL>>
# Based in part on:
# libvirt_lxc.py, (c) 2013, <NAME> <<EMAIL>>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
Ansible inventory script for LXC containers. Requires Python
bindings for LXC API.
In LXC, containers can be grouped by setting the lxc.group option,
which may be found more than once in a container's
configuration. So, we enumerate all containers, fetch their list
of groups, and then build the dictionary in the way Ansible expects
it.
"""
from __future__ import print_function
import sys
import lxc
import json
def build_dict():
"""Returns a dictionary keyed to the defined LXC groups. All
containers, including the ones not in any group, are included in the
"all" group."""
# Enumerate all containers, and list the groups they are in. Also,
# implicitly add every container to the 'all' group.
containers = dict([(c,
['all'] +
(lxc.Container(c).get_config_item('lxc.group') or []))
for c in lxc.list_containers()])
# Extract the groups, flatten the list, and remove duplicates
groups = set(sum([g for g in containers.values()], []))
# Create a dictionary for each group (including the 'all' group
return dict([(g, {'hosts': [k for k, v in containers.items() if g in v],
'vars': {'ansible_connection': 'lxc'}}) for g in groups])
def main(argv):
"""Returns a JSON dictionary as expected by Ansible"""
result = build_dict()
if len(argv) == 2 and argv[1] == '--list':
json.dump(result, sys.stdout)
elif len(argv) == 3 and argv[1] == '--host':
json.dump({'ansible_connection': 'lxc'}, sys.stdout)
else:
print("Need an argument, either --list or --host <host>", file=sys.stderr)
if __name__ == '__main__':
main(sys.argv)
| StarcoderdataPython |
220954 | <reponame>kelesi/coedcop
class Digit(object):
def __init__(self, representation):
self._digit_representation = representation
def scale(self, scale_factor=1):
if scale_factor == 1:
return self
scaled_lines = []
for line in self._digit_representation:
scaled_lines.append(line[0] + line[1]*scale_factor + line[2])
scaled_digit = []
scaled_digit.append(scaled_lines[0])
for _ in xrange(scale_factor):
scaled_digit.append(scaled_lines[1])
scaled_digit.append(scaled_lines[2])
for _ in xrange(scale_factor):
scaled_digit.append(scaled_lines[3])
scaled_digit.append(scaled_lines[4])
return Digit(scaled_digit)
def get_representation(self):
return self._digit_representation
class Digits(object):
def __init__(self, digit_representations, scale_factor=1):
self._digits = [Digit(representation).scale(scale_factor)
for _, representation in enumerate(digit_representations)]
def get_digit(self, digit):
return self._digits[int(digit)] #This is an indexed getter
def assemble_numbers(self, numbers):
def concat_lists(list1, list2):
if list1 is None:
return list2
return [a+b for a, b in zip(list1, list2)]
new_representation = None
for number in str(numbers):
digit = self.get_digit(number)
new_representation = concat_lists(new_representation, digit.get_representation())
return new_representation
def print_numbers(self, numbers, printer):
for line in self.assemble_numbers(numbers):
printer(line)
class DigitReader(object):
def __init__(self, resource_directory):
self._resource_directory = resource_directory
def read_digits(self, scale_factor=1):
return Digits([self._read_digit_from_file(number) for number in xrange(10)], scale_factor)
def _read_digit_from_file(self, digit):
file_name = self._resource_directory + str(digit) + ".txt"
with open(file_name) as file_handle:
return [line.replace('\n', '') for line in file_handle.readlines()]
| StarcoderdataPython |
12863777 | '''
Collection of Windows-specific I/O functions
'''
import msvcrt
import time
import ctypes
from platforms import winconstants, winclipboard
EnumWindows = ctypes.windll.user32.EnumWindows
EnumWindowsProc = ctypes.WINFUNCTYPE(ctypes.c_bool, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int))
GetWindowText = ctypes.windll.user32.GetWindowTextW
GetWindowTextLength = ctypes.windll.user32.GetWindowTextLengthW
IsWindowVisible = ctypes.windll.user32.IsWindowVisible
def flush_io_buffer():
while msvcrt.kbhit():
print(msvcrt.getch().decode('utf8'), end='')
def close_active_window():
hwnd = ctypes.windll.user32.GetForegroundWindow()
ctypes.windll.user32.PostMessageA(hwnd, winconstants.WM_CLOSE, 0, 0)
def get_active_window_name():
hwnd = ctypes.windll.user32.GetForegroundWindow()
return get_window_title(hwnd)
def maximize_active_window():
hwnd = ctypes.windll.user32.GetForegroundWindow()
ctypes.windll.user32.ShowWindow(hwnd, 3)
def minimize_active_window():
hwnd = ctypes.windll.user32.GetForegroundWindow()
ctypes.windll.user32.ShowWindow(hwnd, 6)
def get_window_title(hwnd):
length = GetWindowTextLength(hwnd)
buff = ctypes.create_unicode_buffer(length + 1)
GetWindowText(hwnd, buff, length + 1)
return buff.value
def get_matching_windows(title_list):
matches = {}
def window_enum_callback(hwnd, lParam):
if IsWindowVisible(hwnd):
window_name = get_window_title(hwnd).lower()
for name in title_list:
if name not in window_name:
return True
matches[window_name] = hwnd
return True
EnumWindows(EnumWindowsProc(window_enum_callback), 0)
return matches
def activate_window(title, position=1):
if position > 0:
position -= 1
matches = get_matching_windows(title)
sorted_keys = list(sorted(matches.keys(), key=len))
key = sorted_keys[position]
hwnd = matches[key]
# magic incantations to activate window consistently
IsIconic = ctypes.windll.user32.IsIconic
ShowWindow = ctypes.windll.user32.ShowWindow
GetForegroundWindow = ctypes.windll.user32.GetForegroundWindow
GetWindowThreadProcessId = ctypes.windll.user32.GetWindowThreadProcessId
BringWindowToTop = ctypes.windll.user32.BringWindowToTop
AttachThreadInput = ctypes.windll.user32.AttachThreadInput
SetForegroundWindow = ctypes.windll.user32.SetForegroundWindow
SystemParametersInfo = ctypes.windll.user32.SystemParametersInfoA
if IsIconic(hwnd):
ShowWindow(hwnd, winconstants.SW_RESTORE)
if GetForegroundWindow() == hwnd:
return True
ForegroundThreadID = GetWindowThreadProcessId(GetForegroundWindow(), None)
ThisThreadID = GetWindowThreadProcessId(hwnd, None)
if AttachThreadInput(ThisThreadID, ForegroundThreadID, True):
BringWindowToTop(hwnd)
SetForegroundWindow(hwnd)
AttachThreadInput(ThisThreadID, ForegroundThreadID, False)
if GetForegroundWindow() == hwnd:
return True
timeout = ctypes.c_int()
zero = ctypes.c_int(0)
SystemParametersInfo(winconstants.SPI_GETFOREGROUNDLOCKTIMEOUT, 0, ctypes.byref(timeout), 0)
(winconstants.SPI_SETFOREGROUNDLOCKTIMEOUT, 0, ctypes.byref(zero), winconstants.SPIF_SENDCHANGE)
BringWindowToTop(hwnd)
SetForegroundWindow(hwnd)
SystemParametersInfo(winconstants.SPI_SETFOREGROUNDLOCKTIMEOUT, 0, ctypes.byref(timeout), winconstants.SPIF_SENDCHANGE)
if GetForegroundWindow() == hwnd:
return True
return False
def get_mouse_location():
pt = winconstants.POINT()
ctypes.windll.user32.GetCursorPos(ctypes.byref(pt))
return pt.x, pt.y
def mouse_click(button, direction, number):
event_nums = get_mouse_event_nums(button, direction)
for i in range(number):
for num in event_nums:
ctypes.windll.user32.mouse_event(num, 0, 0, 0, 0)
def mouse_move(x=None, y=None, relative=False):
startx, starty = get_mouse_location()
if not relative:
if x is None: x = startx
if y is None: y = starty
ctypes.windll.user32.SetCursorPos(x, y)
return
if x is None: x = 0
if y is None: y = 0
ctypes.windll.user32.SetCursorPos(startx + x, starty + y)
def get_clipboard_contents():
return winclipboard.init_windows_clipboard()[1]()
def set_clipboard_contents(text):
return winclipboard.init_windows_clipboard()[0](str(text))
def get_mouse_event_nums(button, direction):
if button == 'left' and direction == 'down': return [2]
if button == 'left' and direction == 'up': return [4]
if button == 'left' and direction == 'both': return [2, 4]
if button == 'right' and direction == 'down': return [8]
if button == 'right' and direction == 'up': return [16]
if button == 'right' and direction == 'both': return [8, 16] | StarcoderdataPython |
5171380 | <reponame>polyfemos/atavism<filename>atavism/video.py
from errno import EPERM, EACCES
import os
import sys
import subprocess
from tempfile import mkdtemp
import re
import math
def find_ffmpeg(binary_name='ffmpeg', skip_list=None, paths=None, silent=False):
""" Function to find and return the full path to a suitable ffmpeg binary.
If no suitable ffmpeg binary is found, a RuntimeError exception is raised.
:param binary_name: The name of the binary to search for. Default is ffmpeg
:param skip_list: List of binary files to skip.
:param paths: Additional system paths to include in search.
:return: Path to ffmpeg binary.
"""
found = False
if skip_list is None:
skip_list = []
os_paths = ['/usr/bin', '/usr/local/bin']
if paths is not None:
if isinstance(paths, (list, tuple)):
for p in reversed(paths):
os_paths.insert(0, p)
else:
os_paths.insert(0, paths)
for p in os_paths:
poss = os.path.join(p, binary_name)
if os.path.exists(poss) and os.path.isfile(poss) and poss not in skip_list:
found = True
break
if not found:
raise RuntimeError("\nNo binary '{}' has been found. Paths tried: {}".
format(binary_name, ','.join(os_paths)))
# Check for hls support
try:
p = subprocess.Popen([poss, '-protocols'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
except (IOError, OSError) as e:
if e.errno in (EPERM, EACCES):
if not silent:
print("\n'{}' is not an executable file.".format(poss))
found = False
if not found:
skip_list.append(poss)
return find_ffmpeg(binary_name=binary_name, skip_list=skip_list)
if b'hls' not in out:
if not silent:
print("HLS was not found in the protocols list for '{}'.\n"\
"Without HLS support we can't use this {}.".format(poss, binary_name))
skip_list.append(poss)
return find_ffmpeg(binary_name=binary_name, skip_list=skip_list)
# Check for libx264
p = subprocess.Popen([poss, '-encoders'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if b'libx264' not in out:
if not silent:
print("libx264 was not found in the encoders list for '{}'.\n"\
"As we require libx264, we can't use this {}.".format(poss, binary_name))
skip_list.append(poss)
return find_ffmpeg(binary_name=binary_name, skip_list=skip_list)
return poss
class BaseVideo(object):
def __init__(self, source):
self.source = source
self.directory = os.path.dirname(source)
def url(self):
return "/{}".format(os.path.basename(self.source))
def find_file(self, url):
poss_fn = os.path.join(self.directory, url[1:] if url.startswith('/') else url)
if not os.path.exists(poss_fn):
return None
return poss_fn
class SimpleVideo(BaseVideo):
def __init__(self, source):
BaseVideo.__init__(self, source)
class HLSVideo(BaseVideo):
""" We will attempt to create a temporary directory to contain the segments of an HLS
stream. The files are removed when the instance that created them is deleted.
"""
def __init__(self, source, tmp_base=None, ffmpeg=None):
BaseVideo.__init__(self, source)
self.cleanup = True
self.fn = os.path.splitext(os.path.basename(source))[0] + '.m3u8'
self.directory = mkdtemp(dir=tmp_base or '/tmp')
self.hls_filename = os.path.join(self.directory, self.fn)
self.ffmpeg = ffmpeg or find_ffmpeg()
self.streams = []
self.video_stream = None
self.meta = {}
self.duration_data = {}
self.hls_time = 10
self.segments = 0
self.get_video_information()
def url(self):
return "/{}".format(self.fn)
def __del__(self):
if self.cleanup:
# print("Removing directory {}".format(self.directory))
for f in os.listdir(self.directory):
os.unlink(os.path.join(self.directory, f))
os.rmdir(self.directory)
def create_hls(self, max_width=-1, max_height=-1):
opts = []
if self.needs_resize(max_width, max_height):
opts.extend(['-vf', 'scale={}:{}'.format(*self._resized(max_width, max_height))])
output, err = self._hls_command(opts)
self.segments = len(os.listdir(self.directory)) - 1
if self.segments > 0:
return True
print(output)
print(err)
return False
def get_video_information(self):
ignored, data = self._execute_ffmpeg([])
for input in data.split(b'\nInput')[1:]:
for l in [ln.strip() for ln in input.split(b'\n')]:
if not l.startswith(b'Stream'):
continue
info = re.match(b'^Stream #([0-9]\:[0-9])\(?([A-Za-z]{2,})?\)?: ([A-Za-z]+):', l)
if info is None:
continue
sinfo = {'type': info.group(3), 'n': info.group(1), 'lang': info.group(2)}
parts = b'.'.join(l.split(b': ')[2:]).split(b', ')
if info.group(3) == b'Video':
if self.video_stream is None:
self.video_stream = sinfo
for p in parts:
if b'fps' in p:
sinfo['fps'] = float(p.replace(b'fps', b'').strip())
elif re.search(b'[0-9]{2,}x[0-9]{2,}', p):
sz = re.search(b'([0-9]{2,})x([0-9]{2,})', p)
sinfo['width'] = int(sz.group(1))
sinfo['height'] = int(sz.group(2))
self.streams.append(sinfo)
def video_width(self):
if self.video_stream is None:
return -1.0
return float(self.video_stream.get('width', -1))
def video_height(self):
if self.video_stream is None:
return -1.0
return float(self.video_stream.get('height', -1))
def has_audio(self):
for s in self.streams:
if s.get('type') == b'Audio':
return True
return False
def audio_streams(self):
n = 0
for s in self.streams:
if s.get('type') == b'Audio':
n += 1
return n
def needs_resize(self, max_width=-1, max_height=-1):
if max_width == -1 and max_height == -1:
return False
if max_width != -1 and self.video_width() > max_width:
return True
if max_height != -1 and self.video_height() > max_height:
return True
return False
def _resized(self, max_width, max_height):
rw = max_width / self.video_width() if max_width != -1 else 1.0
rh = max_height / self.video_height() if max_height != -1 else 1.0
ratio = min(rw, rh)
w = int(math.floor(self.video_width() * ratio))
h = int(math.floor(self.video_height() * ratio))
if w % 2 != 0:
w -= 1
if h % 2 != 0:
h -= 1
return w, h
def _execute_ffmpeg(self, *args):
cmd_args = [self.ffmpeg, '-i', self.source]
cmd_args.extend(*args)
p = subprocess.Popen(cmd_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p.communicate()
def _hls_command(self, *opts):
args = ['-hls_time', str(self.hls_time), '-hls_list_size', '0', '-f', 'hls']
args.extend(*opts)
args += [self.hls_filename]
return self._execute_ffmpeg(args)
| StarcoderdataPython |
4925059 | <filename>pypoptools/pypoptesting/framework/test_util.py
import datetime
import logging
import multiprocessing as mp
import os
import pathlib
import sys
import tempfile
import time
from typing import Callable
from .node import Node
CreateNodeFunction = Callable[[int, pathlib.Path], Node]
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
BOLD, GREEN, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name != 'nt' or sys.getwindowsversion() >= (10, 0, 14393):
if os.name == 'nt':
import ctypes
kernel32 = ctypes.windll.kernel32
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# Enable ascii color control to stdout
stdout = kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
stdout_mode = ctypes.c_int32()
kernel32.GetConsoleMode(stdout, ctypes.byref(stdout_mode))
kernel32.SetConsoleMode(stdout, stdout_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
# Enable ascii color control to stderr
stderr = kernel32.GetStdHandle(STD_ERROR_HANDLE)
stderr_mode = ctypes.c_int32()
kernel32.GetConsoleMode(stderr, ctypes.byref(stderr_mode))
kernel32.SetConsoleMode(stderr, stderr_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
GREEN = ('\033[0m', '\033[0;32m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
class TestResult:
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def sort_key(self):
if self.status == "Passed":
return 0, self.name.lower()
elif self.status == "Failed":
return 2, self.name.lower()
elif self.status == "Skipped":
return 1, self.name.lower()
def __repr__(self):
glyph = None
color = None
if self.status == "Passed":
color = GREEN
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (
self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
class TestHandler:
"""
Trigger the test scripts passed in via the list.
"""
def __init__(self, *, create_node, num_tests_parallel, tmpdir, test_list, timeout_duration,
use_term_control=True):
assert num_tests_parallel >= 1
self.create_node = create_node
self.num_jobs = num_tests_parallel
self.parent = tmpdir
self.timeout_duration = timeout_duration
self.test_list = test_list
self.num_running = 0
self.jobs = []
self.use_term_control = use_term_control
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
test = self.test_list.pop(0)
log_stdout = tempfile.SpooledTemporaryFile(max_size=2 ** 16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2 ** 16)
p = mp.Process(target=lambda: test.main(self.create_node, self.parent))
p.start()
self.jobs.append((test,
time.time(),
p,
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
# Print remaining running jobs when all jobs have been started.
if not self.test_list:
print("Remaining jobs: [{}]".format(", ".join(j[0].name() for j in self.jobs)))
dot_count = 0
while True:
# Return first proc that finishes
time.sleep(.5)
for job in self.jobs:
(test, start_time, proc, log_out, log_err) = job
name = type(test).__name__
if int(time.time() - start_time) > self.timeout_duration:
# Timeout individual tests if timeout is specified (to stop
# tests hanging and not providing useful output).
proc.kill()
if not proc.is_alive():
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [log_file.read().decode('utf-8') for log_file in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.exitcode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.exitcode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(job)
if self.use_term_control:
clearline = '\r' + (' ' * dot_count) + '\r'
print(clearline, end='', flush=True)
dot_count = 0
return TestResult(name, status, int(time.time() - start_time)), test.dir, stdout, stderr
if self.use_term_control:
print('.', end='', flush=True)
dot_count += 1
def kill_and_join(self):
"""Send SIGKILL to all jobs and block until all have ended."""
procs = [i[2] for i in self.jobs]
for proc in procs:
proc.kill()
for proc in procs:
proc.close()
def run_tests(test_list, create_node: CreateNodeFunction, timeout=float('inf')):
try:
import pypoptools.pypopminer
except ImportError:
logging.error("pypopminer module not available.")
sys.exit(1)
mp.set_start_method('fork')
timestamp = datetime.datetime.now().strftime("%y%m%d%H%M%S")
tmpdir = tempfile.mkdtemp(prefix="pypoptesting_{}_".format(timestamp))
job_queue = TestHandler(
create_node=create_node,
tmpdir=tmpdir,
num_tests_parallel=1,
test_list=test_list,
timeout_duration=timeout,
)
test_results = []
test_count = len(test_list)
max_len_name = len(max(test_list, key=lambda x: len(x.name())).name())
start_time = time.time()
for i in range(test_count):
test_result, testdir, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
done_str = "{}/{} - {}{}{}".format(i + 1, test_count, BOLD[1], test_result.name, BOLD[0])
if test_result.status == "Passed":
logging.debug("%s passed, Duration: %s s" % (done_str, test_result.time))
elif test_result.status == "Skipped":
logging.debug("%s skipped" % (done_str))
else:
print("%s failed, Duration: %s s\n" % (done_str, test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
print_results(test_results, max_len_name, (int(time.time() - start_time)))
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=TestResult.sort_key)
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
if not all_passed:
results += RED[1]
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + \
BOLD[0]
if not all_passed:
results += RED[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
| StarcoderdataPython |
11235272 | <filename>zfit/models/physics.py
# Copyright (c) 2019 zfit
from typing import Type, Any
import tensorflow as tf
import tensorflow_probability.python.distributions as tfd
import numpy as np
import zfit
from zfit import ztf
from ..core.basepdf import BasePDF
from ..core.limits import ANY_UPPER, ANY_LOWER, Space
from ..settings import ztypes
from ..util import ztyping
def _powerlaw(x, a, k):
return a * tf.pow(x, k)
def crystalball_func(x, mu, sigma, alpha, n):
t = (x - mu) / sigma * tf.sign(alpha)
abs_alpha = tf.abs(alpha)
a = tf.pow((n / abs_alpha), n) * tf.exp(-0.5 * tf.square(alpha))
b = (n / abs_alpha) - abs_alpha
cond = tf.less(t, -abs_alpha)
# func = tf.where(cond, tf.exp(-0.5 * tf.square(t)), _powerlaw(b - t, a, -n))
func = ztf.safe_where(cond,
lambda t: _powerlaw(b - t, a, -n),
lambda t: tf.exp(-0.5 * tf.square(t)),
values=t, value_safer=lambda t: tf.ones_like(t) * (b - 2))
return func
def double_crystalball_func(x, mu, sigma, alphal, nl, alphar, nr):
cond = tf.less(x, mu)
func = tf.compat.v1.where(cond,
crystalball_func(x, mu, sigma, alphal, nl),
crystalball_func(x, mu, sigma, -alphar, nr))
return func
# def _python_crystalball_integral(limits, params): # not working with tf, used for autoconvert
# mu = params['mu']
# sigma = params['sigma']
# alpha = params['alpha']
# n = params['n']
#
# (lower,), (upper,) = limits.limits
#
# sqrt_pi_over_two = np.sqrt(np.pi / 2)
# sqrt2 = np.sqrt(2)
#
# result = 0.0
# use_log = tf.abs(n - 1.0) < 1.0e-05
#
# abs_sigma = tf.abs(sigma)
# abs_alpha = tf.abs(alpha)
#
# tmin = (lower - mu) / abs_sigma
# tmax = (upper - mu) / abs_sigma
#
# if alpha < 0:
# tmin, tmax = -tmax, -tmin
#
# if tmin >= -abs_alpha:
# result += abs_sigma * sqrt_pi_over_two * (tf.erf(tmax / sqrt2)
# - tf.erf(tmin / sqrt2))
#
# elif tmax <= -abs_alpha:
# a = tf.pow(n / abs_alpha, n) * tf.exp(-0.5 * tf.square(abs_alpha))
#
# b = n / abs_alpha - abs_alpha
#
# if use_log:
# result += a * abs_sigma * (tf.log(b - tmin) - tf.log(b - tmax))
# else:
# result += a * abs_sigma / (1.0 - n) * (1.0 / (tf.pow(b - tmin, n - 1.0))
# - 1.0 / (tf.pow(b - tmax, n - 1.0)))
# else:
# a = tf.pow(n / abs_alpha, n) * tf.exp(-0.5 * tf.square(abs_alpha))
# b = n / abs_alpha - abs_alpha
#
# if use_log:
# term1 = a * abs_sigma * (tf.log(b - tmin) - tf.log(n / abs_alpha))
#
# else:
# term1 = a * abs_sigma / (1.0 - n) * (1.0 / (tf.pow(b - tmin, n - 1.0))
# - 1.0 / (tf.pow(n / abs_alpha, n - 1.0)))
#
# term2 = abs_sigma * sqrt_pi_over_two * (tf.erf(tmax / sqrt2)
# - tf.erf(-abs_alpha / sqrt2))
#
# result += term1 + term2
#
# return result
# created with the help of TensorFlow autograph used on python code converted from ShapeCB of RooFit
def crystalball_integral(limits, params, model):
mu = params['mu']
sigma = params['sigma']
alpha = params['alpha']
n = params['n']
(lower,), (upper,) = limits.limits
lower = lower[0] # obs number 0
upper = upper[0]
sqrt_pi_over_two = np.sqrt(np.pi / 2)
sqrt2 = np.sqrt(2)
result = 0.0
use_log = tf.less(tf.abs(n - 1.0), 1e-05)
abs_sigma = tf.abs(sigma)
abs_alpha = tf.abs(alpha)
tmin = (lower - mu) / abs_sigma
tmax = (upper - mu) / abs_sigma
def if_true():
return tf.negative(tmin), tf.negative(tmax)
def if_false():
return tmax, tmin
tmax, tmin = tf.cond(pred=tf.less(alpha, 0), true_fn=if_true, false_fn=if_false)
def if_true_4():
result_5, = result,
result_5 += abs_sigma * sqrt_pi_over_two * (tf.math.erf(tmax / sqrt2) - tf.math.erf(tmin / sqrt2))
return result_5
def if_false_4():
result_6 = result
def if_true_3():
result_3 = result_6
a = tf.pow(n / abs_alpha, n) * tf.exp(-0.5 * tf.square(abs_alpha))
b = n / abs_alpha - abs_alpha
def if_true_1():
result_1, = result_3,
result_1 += a * abs_sigma * (tf.math.log(b - tmin) - tf.math.log(b - tmax))
return result_1
def if_false_1():
result_2, = result_3,
result_2 += a * abs_sigma / (1.0 - n) * (
1.0 / tf.pow(b - tmin, n - 1.0) - 1.0 / tf.pow(b - tmax, n - 1.0))
return result_2
result_3 = tf.cond(pred=use_log, true_fn=if_true_1, false_fn=if_false_1)
return result_3
def if_false_3():
result_4, = result_6,
a = tf.pow(n / abs_alpha, n) * tf.exp(-0.5 * tf.square(abs_alpha))
b = n / abs_alpha - abs_alpha
def if_true_2():
term1 = a * abs_sigma * (tf.math.log(b - tmin) - tf.math.log(n / abs_alpha))
return term1
def if_false_2():
term1 = a * abs_sigma / (1.0 - n) * (
1.0 / tf.pow(b - tmin, n - 1.0) - 1.0 / tf.pow(n / abs_alpha, n - 1.0))
return term1
term1 = tf.cond(pred=use_log, true_fn=if_true_2, false_fn=if_false_2)
term2 = abs_sigma * sqrt_pi_over_two * (
tf.math.erf(tmax / sqrt2) - tf.math.erf(-abs_alpha / sqrt2))
result_4 += term1 + term2
return result_4
result_6 = tf.cond(pred=tf.less_equal(tmax, -abs_alpha), true_fn=if_true_3, false_fn=if_false_3)
return result_6
# if_false_4()
result = tf.cond(pred=tf.greater_equal(tmin, -abs_alpha), true_fn=if_true_4, false_fn=if_false_4)
return result
def double_crystalball_integral(limits, params, model):
mu = params['mu']
sigma = params['sigma']
(lower,), (upper,) = limits.limits
lower = lower[0] # obs number 0
upper = upper[0]
limits_left = Space(limits.obs, (lower, mu))
limits_right = Space(limits.obs, (mu, upper))
params_left = dict(mu=mu, sigma=sigma, alpha=params["alphal"],
n=params["nl"])
params_right = dict(mu=mu, sigma=sigma, alpha=-params["alphar"],
n=params["nr"])
left = tf.cond(pred=tf.less(mu, lower), true_fn=0., false_fn=crystalball_integral(limits_left, params_left))
right = tf.cond(pred=tf.greater(mu, upper), true_fn=0., false_fn=crystalball_integral(limits_right, params_right))
return left + right
class CrystalBall(BasePDF):
_N_OBS = 1
def __init__(self, mu: ztyping.ParamTypeInput, sigma: ztyping.ParamTypeInput,
alpha: ztyping.ParamTypeInput, n: ztyping.ParamTypeInput,
obs: ztyping.ObsTypeInput, name: str = "CrystalBall", dtype: Type = ztypes.float):
"""`Crystal Ball shaped PDF`__. A combination of a Gaussian with an powerlaw tail.
The function is defined as follows:
.. math::
f(x;\\mu, \\sigma, \\alpha, n) = \\begin{cases} \\exp(- \\frac{(x - \\mu)^2}{2 \\sigma^2}),
& \\mbox{for}\\frac{x - \\mu}{\\sigma} \\geqslant -\\alpha \\newline
A \\cdot (B - \\frac{x - \\mu}{\\sigma})^{-n}, & \\mbox{for }\\frac{x - \\mu}{\\sigma}
< -\\alpha \\end{cases}
with
.. math::
A = \\left(\\frac{n}{\\left| \\alpha \\right|}\\right)^n \\cdot
\\exp\\left(- \\frac {\\left|\\alpha \\right|^2}{2}\\right)
B = \\frac{n}{\\left| \\alpha \\right|} - \\left| \\alpha \\right|
Args:
mu (`zfit.Parameter`): The mean of the gaussian
sigma (`zfit.Parameter`): Standard deviation of the gaussian
alpha (`zfit.Parameter`): parameter where to switch from a gaussian to the powertail
n (`zfit.Parameter`): Exponent of the powertail
obs (:py:class:`~zfit.Space`):
name (str):
dtype (tf.DType):
.. _CBShape: https://en.wikipedia.org/wiki/Crystal_Ball_function
__CBShape_
"""
params = {'mu': mu,
'sigma': sigma,
'alpha': alpha,
'n': n}
super().__init__(obs=obs, dtype=dtype, name=name, params=params)
def _unnormalized_pdf(self, x):
mu = self.params['mu']
sigma = self.params['sigma']
alpha = self.params['alpha']
n = self.params['n']
x = x.unstack_x()
return crystalball_func(x=x, mu=mu, sigma=sigma, alpha=alpha, n=n)
crystalball_integral_limits = Space.from_axes(axes=(0,), limits=(((ANY_LOWER,),), ((ANY_UPPER,),)))
# TODO uncomment, dependency: bug in TF (31.1.19) # 25339 that breaks gradient of resource var in cond
# CrystalBall.register_analytic_integral(func=crystalball_integral, limits=crystalball_integral_limits)
class DoubleCB(BasePDF):
_N_OBS = 1
def __init__(self, mu: ztyping.ParamTypeInput, sigma: ztyping.ParamTypeInput,
alphal: ztyping.ParamTypeInput, nl: ztyping.ParamTypeInput,
alphar: ztyping.ParamTypeInput, nr: ztyping.ParamTypeInput,
obs: ztyping.ObsTypeInput, name: str = "DoubleCB", dtype: Type = ztypes.float):
"""`Double sided Crystal Ball shaped PDF`__. A combination of two CB using the **mu** (not a frac).
on each side.
The function is defined as follows:
.. math::
f(x;\\mu, \\sigma, \\alpha_{L}, n_{L}, \\alpha_{R}, n_{R}) = \\begin{cases}
A_{L} \\cdot (B_{L} - \\frac{x - \\mu}{\\sigma})^{-n},
& \\mbox{for }\\frac{x - \\mu}{\\sigma} < -\\alpha_{L} \\newline
\\exp(- \\frac{(x - \\mu)^2}{2 \\sigma^2}),
& -\\alpha_{L} \\leqslant \\mbox{for}\\frac{x - \\mu}{\\sigma} \\leqslant \\alpha_{R} \\newline
A_{R} \\cdot (B_{R} - \\frac{x - \\mu}{\\sigma})^{-n},
& \\mbox{for }\\frac{x - \\mu}{\\sigma} > \\alpha_{R}
\\end{cases}
with
.. math::
A_{L/R} = \\left(\\frac{n_{L/R}}{\\left| \\alpha_{L/R} \\right|}\\right)^n_{L/R} \\cdot
\\exp\\left(- \\frac {\\left|\\alpha_{L/R} \\right|^2}{2}\\right)
B_{L/R} = \\frac{n_{L/R}}{\\left| \\alpha_{L/R} \\right|} - \\left| \\alpha_{L/R} \\right|
Args:
mu (`zfit.Parameter`): The mean of the gaussian
sigma (`zfit.Parameter`): Standard deviation of the gaussian
alphal (`zfit.Parameter`): parameter where to switch from a gaussian to the powertail on the left
side
nl (`zfit.Parameter`): Exponent of the powertail on the left side
alphar (`zfit.Parameter`): parameter where to switch from a gaussian to the powertail on the right
side
nr (`zfit.Parameter`): Exponent of the powertail on the right side
obs (:py:class:`~zfit.Space`):
name (str):
dtype (tf.DType):
"""
params = {'mu': mu,
'sigma': sigma,
'alphal': alphal,
'nl': nl,
'alphar': alphar,
'nr': nr}
super().__init__(obs=obs, dtype=dtype, name=name, params=params)
def _unnormalized_pdf(self, x):
mu = self.params['mu']
sigma = self.params['sigma']
alphal = self.params['alphal']
nl = self.params['nl']
alphar = self.params['alphar']
nr = self.params['nr']
x = x.unstack_x()
return double_crystalball_func(x=x, mu=mu, sigma=sigma, alphal=alphal, nl=nl,
alphar=alphar, nr=nr)
# DoubleCB.register_analytic_integral(func=double_crystalball_integral, limits=crystalball_integral_limits)
if __name__ == '__main__':
mu = ztf.constant(0)
sigma = ztf.constant(0.5)
alpha = ztf.constant(3)
n = ztf.constant(1)
# res = crystalball_func(np.random.random(size=100), mu, sigma, alpha, n)
# int1 = crystalball_integral(limits=zfit.Space(obs='obs1', limits=(-3, 5)),
# params={'mu': mu, "sigma": sigma, "alpha": alpha, "n": n})
from tensorflow.contrib import autograph
import matplotlib.pyplot as plt
new_code = autograph.to_code(crystalball_integral)
obs = zfit.Space(obs='obs1', limits=(-3, 1))
cb1 = CrystalBall(mu, sigma, alpha, n, obs=obs)
res = cb1.pdf(np.random.random(size=100))
int1 = cb1.integrate(limits=(-0.01, 2), norm_range=obs)
# tf.add_check_numerics_ops()
x = np.linspace(-5, 1, num=1000)
vals = cb1.pdf(x=x)
y = zfit.run(vals)[0]
plt.plot(x, y)
plt.show()
# print(new_code)
print(zfit.run(res))
print(zfit.run(int1))
| StarcoderdataPython |
6614790 | from __future__ import print_function
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
import config
from utils.parsers.transnet_parser import TransnetParser
from utils.scrapers.osm_nodes_scraper import OsmNodesScraper
def scrape_nodes():
region = config.config_params['loc']
min_voltage = 380000
max_voltage = 380000
print('Parsing transnet data...')
transnet_parser = TransnetParser()
print('Filtering by region : %s' % region)
transnet_parser.filter_by_regions(regions='config')
print('Filtering by voltage,\n min voltage : %d \n max voltage : %d' % (min_voltage, max_voltage))
transnet_parser.filter_by_min_max_voltage(min_voltage=min_voltage, max_voltage=max_voltage)
nodes = transnet_parser.nodes
print('Total nodes : %d' % len(nodes))
print('done..\n')
print('Scraping osm data...')
osm_scraper = OsmNodesScraper(nodes, region)
n = osm_scraper.scrape()
print('Scraped %d nodes..' % n)
print('done..')
if __name__ == '__main__':
scrape_nodes()
| StarcoderdataPython |
3245985 | # Generated by Django 2.0.1 on 2018-04-09 05:59
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('board', '0003_auto_20180226_2047'),
]
operations = [
migrations.AddField(
model_name='article',
name='hit',
field=models.ManyToManyField(related_name='hit_article_set', to=settings.AUTH_USER_MODEL),
),
]
| StarcoderdataPython |
9789181 | <reponame>dimitar-daskalov/TopStore
from datetime import datetime
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from TopStore.store.models import Order
UserModel = get_user_model()
class SignUpUserTests(TestCase):
def setUp(self):
self.user = UserModel.objects.create_user(
email='<EMAIL>',
username='test_user',
password='<PASSWORD>',
)
def test_SignUpTemplate_expectToBeCorrect(self):
response = self.client.get(reverse('sign up user'))
self.assertEqual(200, response.status_code)
self.assertTemplateUsed(response, 'account/sign_up_user.html')
def test_SignUpWhenAuthenticatedUser_expectToRedirect(self):
self.client.force_login(self.user)
response = self.client.get(reverse('sign up user'))
self.assertEqual(302, response.status_code)
def test_SignUpWhenNotAuthenticatedUser_expectToSignUpSuccessfully(self):
response = self.client.post(reverse('sign up user'))
self.assertEqual(self.user.email, UserModel.objects.get(pk=self.user.id).email)
self.assertEqual(200, response.status_code)
def test_SignUpWhenNotAuthenticatedUserSignUpSuccessfully_expectToCreateProfileSuccessfully(self):
response = self.client.post(reverse('sign up user'))
self.assertEqual(self.user.email, UserModel.objects.get(pk=self.user.id).email)
self.assertEqual(self.user.id, UserModel.objects.get(pk=self.user.id).profile.pk)
self.assertEqual(200, response.status_code)
def test_SignUpWhenNotAuthenticatedUserSignUpSuccessfully_expectToCreateOrGetIncompleteOrderAndRedirect(self):
self.client.force_login(self.user)
response = self.client.get(reverse('sign up user'))
order = Order.objects.create(
date_ordered=datetime.now(),
is_completed=False,
user=self.user,
)
self.assertEqual(order, Order.objects.filter(user=self.user, is_completed=False)[0])
self.assertEqual(302, response.status_code)
| StarcoderdataPython |
1691589 | # -*- coding: utf-8 -*-
"""
SlideSpeechConverter.py
a component of SlideSpeech.py
Extract speaker notes from .odp or .ppt file
Prepare script.txt for SlideSpeech
Prepare convert.bat to generate audio via text-to-speech
Output HTML wrappers for slide images and audio
Prepare makeVid.bat to generate video
Copyright (c) 2011 <NAME>
MIT License: see LICENSE.txt
20110825 Add version to title bar
20110901 Add direct to video output
20110901 Switch to jpg, mklink with /h
20110902 Switch to jpg for Windows and png for Mac
20110907 More tweaks to joinContents
20110909 Allow over 20 slides in MP4Box to cat for Mac
20110910 Coping with unavailable mklink in Windows and path names containing spaces
20110913 Remove [] from script output and wrap ctypes import with win32 test
20110915 Added boilerplate script comments including version number
20110916 Read Unicode
20110917 Write out bits of Question/Answer/Response
20111118 Show image along with question. Requires slide with comment first.
Example:
Comment on Slide4
[questions=on]
Question for slide 4:
Answer 1 ; Response 1
[questions=off]
NOTE: last slide must not have questions
img1.png > img1.htm > img1.mp3
[questions=on]
How many slides have we seen? > q/img1q1.htm > q/img1q1.mp3
One ;; > q/img1q1a1.mp3
Two ;; > q/img1q1a1.mp3
What slide is next? > q/img1q2.htm > q/img1q2.mp3
Third ;; > q/img1q2a1.mp3
Fourth; No, just the third. > q/img1q2a2.mp3, > q/img1q2r2.mp3
[questions=off]
20111112 If script has [Source=http:// ...] add this link to the question page
20111121 Turn off debug2.txt and put quotes around calls in makeVid.bat
20111128 Working Linux version once dependencies are installed
Linux dependencies include:
sox
ffmpeg
mencode
espeak
20111205 Allow for direct script and image creation from PowerPoint files
20111206 Renamed SlideSpeech Converter
20111207 Changed to using mencoder to create .avi files for makeVid Windows
"""
__version__ = "0.1.31"
import BeautifulSoup
from BeautifulSoup import BeautifulStoneSoup
from ConfigParser import ConfigParser
import codecs
import easygui
import math
import os
import os.path
import shutil
import scriptParser
import stat
import subprocess
import sys
import time
if sys.platform.startswith("win"):
import win32com.client
import webbrowser
from zipfile import ZipFile
def ensure_dir(d):
"""Make a directory if it does not exist"""
if not os.path.exists(d):
os.makedirs(d)
# Find location of Windows common application data files for odp2ss.ini
iniDirectory = None
if sys.platform.startswith("win"):
import ctypes
from ctypes import wintypes, windll
CSIDL_COMMON_APPDATA = 35
_SHGetFolderPath = windll.shell32.SHGetFolderPathW
_SHGetFolderPath.argtypes = [wintypes.HWND,
ctypes.c_int,
wintypes.HANDLE,
wintypes.DWORD, wintypes.LPCWSTR]
path_buf = wintypes.create_unicode_buffer(wintypes.MAX_PATH)
result = _SHGetFolderPath(0, CSIDL_COMMON_APPDATA, 0, 0, path_buf)
iniDirectory = path_buf.value+os.sep+"SlideSpeech"
else:
iniDirectory = os.path.expanduser('~')+os.sep+".SlideSpeech"
ensure_dir(iniDirectory)
if sys.platform.startswith("win"):
imageFileSuffix = ".jpg"
else:
imageFileSuffix = ".png"
## Obtain odpFile name and directory
# Check for last .odp file in config file
if sys.platform.startswith("win"):
lastOdpFile = '~/*.ppt*'
else:
lastOdpFile = '~/*.odp'
config = ConfigParser()
try:
config.read(iniDirectory+os.sep+'odp2ss.ini')
lastOdpFile = config.get("Files","lastOdpFile")
except:
config.add_section("Files")
config.set("Files","lastOdpFile","")
with open(iniDirectory+os.sep+'odp2ss.ini', 'wb') as configfile:
config.write(configfile)
if not os.path.isfile(lastOdpFile):
lastOdpFile = None
if sys.platform.startswith("win"):
odpFilePath = easygui.fileopenbox(title="SlideSpeech from PPT Converter "+__version__, msg="Select a .ppt file",
default=lastOdpFile, filetypes=None)
else:
odpFilePath = easygui.fileopenbox(title="SlideSpeech from ODP Converter "+__version__, msg="Select an .odp file",
default=lastOdpFile, filetypes=None)
if odpFilePath == None:
sys.exit()
(odpFileDirectory, odpFile) = os.path.split(odpFilePath)
(odpName, odpSuffix) = odpFile.split(".")
## Find or create list of .png or .jpg files
odpFileSubdirectory = odpFileDirectory+os.sep+odpName
# Create a subdirectory for generated files (if needed)
ensure_dir(odpFileSubdirectory)
scriptAndImagesCreated = False
if sys.platform.startswith("win") and odpSuffix.startswith("ppt"):
# create .jpg files
slideNotes = []
try:
Application = win32com.client.Dispatch("PowerPoint.Application")
except:
easygui.msgbox("PowerPoint not available.")
sys.exit()
Application.Visible = True
Presentation = Application.Presentations.Open(odpFilePath)
onSlide = 0
for Slide in Presentation.Slides:
imageName = "Slide" + str(onSlide) + ".jpg"
onSlide += 1
Slide.Export(odpFileSubdirectory+os.sep+imageName,"JPG",800,600)
for Shape in Slide.NotesPage.Shapes:
if Shape.HasTextFrame:
if Shape.TextFrame.HasText:
text = Shape.TextFrame.TextRange.Text
if not text.isdigit():
slideNotes.append(text)
Application.Quit()
# Look for .jpg files (slide images) in the odpName subdirectory
dir = os.listdir(odpFileSubdirectory)
imageFileList = [file for file in dir if file.lower().endswith(imageFileSuffix)]
outFile = open(odpFileSubdirectory+os.sep+"script.txt","w")
onSlide = 0
for item in slideNotes:
imageName = "Slide" + str(onSlide) + ".jpg\n"
onSlide += 1
outFile.write(imageName)
outFile.write(item + "\n\n")
outFile.close()
if ((0 != len(odpFile)) and (os.path.exists(odpFilePath))):
# Save file name to config file
config.set("Files","lastOdpFile",odpFilePath)
with open(iniDirectory+os.sep+'odp2ss.ini', 'wb') as configfile:
config.write(configfile)
scriptAndImagesCreated = True
else:
# Look for .jpg files (slide images) in the odpName subdirectory
dir = os.listdir(odpFileSubdirectory)
imageFileList = [file for file in dir if file.lower().endswith(imageFileSuffix)]
# If no image files found there ...
if len(imageFileList)==0:
# ... look for image files in odpFileDirectory and copy to odpName subdirectory
dir = os.listdir(odpFileDirectory)
imageFileList = [file for file in dir if file.lower().endswith(imageFileSuffix)]
# If still no image files, request some.
if len(imageFileList)==0:
easygui.msgbox("Need some slide image files for this presentation.\n.jpg for Windows or .png for Mac OSX.")
sys.exit()
else:
for file in imageFileList:
shutil.copy(odpFileDirectory+os.sep+file, odpFileSubdirectory)
# Find minimum value for slide number for linking to First Slide
# Find maximum value for slide number for linking to Last Slide
# Find imageFilePrefix, imageFileSuffix
# Default values
minNum = 0
maxNum = 0
wrongStem = False
for file in imageFileList:
# Parse out file name stem (which includes number) and imageFileSuffix
(stem, imageFileSuffix) = file.split(".")
# Parse out just number (num) and imageFilePrefix
if stem.startswith("Slide"):
# PowerPoint Slide images are output to jpg with starting index of 0
imageFilePrefix = "Slide"
minNum=0
num = int(stem[5:])
elif stem.startswith("img"):
# ODP slide images are output to img with starting index of 0
imageFilePrefix = "img"
num = int(stem[3:])
else:
wrongStem = True
if wrongStem:
easygui.msgbox("Need slide image files for this presentation\n"+
"with consistent stem: Slide* or img*\n\nCheck in "+odpFileSubdirectory)
sys.exit()
else:
if num>maxNum:
maxNum=num
if not scriptAndImagesCreated:
## Step 1 - parse the .odp file, prepare script.txt and .zip file
def joinContents(textPList):
"""Combine tagged XML into single string
Needs to handle this from PowerPoint:
<text:p text:style-name="a785" text:class-names="" text:cond-style-name="">
<text:span text:style-name="a783" text:class-names="">Voice over 1</text:span>
<text:span text:style-name="a784" text:class-names=""/>
</text:p>
or worse, this:
<text:p text:style-name="a786" text:class-names="" text:cond-style-name="">
<text:span text:style-name="a783" text:class-names="">
Voice
<text:s text:c="1"/>
</text:span>
<text:span text:style-name="a784" text:class-names="">
over 1
<text:s text:c="1"/>
asdf
</text:span>
<text:span text:style-name="a785" text:class-names=""/>
</text:p>
"""
# item is list of all the XML for a single slide
joinedItems = ""
if len(textPList)>0:
textItems = []
i = 0
for textP in textPList:
textSpans = []
# break the XML into a list of tagged pieces (text:span)
for item in textP:
if type(item)==BeautifulSoup.Tag:
tagContents = item.contents
if type(tagContents)==type([]):
for item2 in tagContents:
if type(item2)==BeautifulSoup.Tag:
textSpans.append([item2.contents])
else:
textSpans.append([item2])
else:
textSpans.append([tagContents])
else:
textSpans.append([item])
# flatten list
textSpans1 = [item for sublist in textSpans for item in sublist]
# clean up
textSpans1b = []
for item in textSpans1:
if type(item)==BeautifulSoup.NavigableString:
textSpans1b.append(item)
elif type(item)==type([]):
if len(item)==0:
pass
elif len(item)==1:
textSpans1b.append(item[0])
else:
for itemInList in item:
textSpans1b.append(itemInList)
# find the contents of these pieces if they are still tagged (text:s)
textSpans2 = []
for textSpan in textSpans1b:
if type(textSpan)==BeautifulSoup.Tag:
textSpans2.append(textSpan.text)
else:
if (type(textSpan)==type([]) and len(textSpan)>0):
textSpans2.append(unicode(textSpan[0]))
else:
textSpans2.append(unicode(textSpan))
justText = u""
for item in textSpans2:
# deal with single quote and double quotes and dashes
# \u2018 LEFT SINGLE QUOTATION MARK
justText = justText + item + u" "
textItems.append(justText)
joinedItems = "\n".join(textItems)
return joinedItems
if ((0 != len(odpFile)) and (os.path.exists(odpFilePath))):
# Save file name to config file
config.set("Files","lastOdpFile",odpFilePath)
with open(iniDirectory+os.sep+'odp2ss.ini', 'wb') as configfile:
config.write(configfile)
odpName = odpFile.replace(".odp","")
odp = ZipFile(odpFilePath,'r')
f = odp.read(u'content.xml')
soup = BeautifulStoneSoup(f)
notes = soup.findAll(attrs={"presentation:class":"notes"})
noteTextPLists = [item.findAll("text:p") for item in notes]
noteText = [joinContents(noteTextPList) for noteTextPList in noteTextPLists]
else:
sys.exit()
# Create script.txt file
scriptFile = codecs.open(odpFileSubdirectory+os.sep+'script.txt', encoding='utf-8', mode='w+')
scriptFile.write("""#[path=]
#
# Script created with SlideSpeech from ODP version """+__version__+
"\n# http://slidespeech.org\n"+
"# Date: "+time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())+"""
#
# Title:
# Author:
#
# SlideSpeech Slide show version
# http://
#
# SlideSpeech Video version
# http://
#
# SlideSpeech script:
""")
onImg = minNum
for item in noteText:
if onImg-minNum == 0: # first slide
# insert line with link to first slide image after parameter lines
# For example, noteText could start with [path=...]
lines = item.split("\n")
slideOnLine = -1
for linenum, line in enumerate(lines):
if len(line.strip())>0:
if line.startswith("["):
scriptFile.write(line+"\n")
elif slideOnLine == -1:
scriptFile.write(imageFilePrefix+str(onImg)+"."+imageFileSuffix+"\n")
slideOnLine = linenum
scriptFile.write(line+"\n")
else:
scriptFile.write(line+"\n")
else:
scriptFile.write("\n")
else:
# Add a line with a link to each slide
scriptFile.write(imageFilePrefix+str(onImg)+"."+imageFileSuffix+"\n")
# followed by the voice over text for the slide
scriptFile.write(item+"\n")
scriptFile.write("\n")
onImg += 1
scriptFile.close()
# Collect script and image files into ZIP file
outputFile = ZipFile(odpFileDirectory+os.sep+odpName+".zip",'w')
savePath = os.getcwd()
os.chdir(odpFileSubdirectory)
outputFile.write("script.txt")
for file in imageFileList:
outputFile.write(file)
os.chdir(savePath)
easygui.msgbox("Zipped script.txt and image files to "+odpFileDirectory+os.sep+odpName+".zip")
## Step 2 - Sequence script and make and run convert.bat
def convertItem(f,item,onImgStr):
if sys.platform.startswith("win"):
# For Windows
f.write('"'+savePath+os.sep+'sapi2wav.exe" '+imageFilePrefix+onImgStr+'.wav 1 -t "')
lines = item.split("\n")
for linenum, line in enumerate(lines):
if not line.startswith("["):
line.replace('"',' ').replace('`',' ').replace(';',' ')
if not line.startswith("#"):
f.write(line+" ")
elif linenum>0:
break
f.write('"\n')
f.write('"'+savePath+os.sep+'lame.exe" -h '+imageFilePrefix+onImgStr+'.wav '+ '"' + \
odpFileSubdirectory+os.sep+imageFilePrefix+onImgStr+'.mp3"\n')
f.write('"'+savePath+os.sep+'sox.exe" '+imageFilePrefix+onImgStr+'.wav '+ '"' + \
odpFileSubdirectory+os.sep+imageFilePrefix+onImgStr+'.ogg"\n')
f.write('del '+imageFilePrefix+onImgStr+'.wav\n')
elif sys.platform.startswith("darwin"):
# For Mac OSX
f.write("/usr/bin/say -o "+imageFilePrefix+onImgStr+'.aiff "')
lines = item.split("\n")
for linenum, line in enumerate(lines):
line.replace('"',' ').replace('`',' ').replace(';',' ')
if not line.startswith("["):
f.write(line+" ")
elif linenum>0:
break
# f.write(item)
f.write('"\n')
f.write("~/bin/sox "+imageFilePrefix+onImgStr+'.aiff "'+
odpFileSubdirectory+os.sep+imageFilePrefix+onImgStr+'.ogg"\n')
f.write("~/bin/sox "+imageFilePrefix+onImgStr+'.aiff "'+
odpFileSubdirectory+os.sep+imageFilePrefix+onImgStr+'.mp3"\n')
f.write("rm "+imageFilePrefix+onImgStr+'.aiff\n')
else:
# For Linux
f.write("/usr/bin/espeak -w "+imageFilePrefix+onImgStr+'.wav "')
lines = item.split("\n")
for linenum, line in enumerate(lines):
line.replace('"',' ').replace('`',' ').replace(';',' ')
if not line.startswith("["):
f.write(line+" ")
elif linenum>0:
break
# f.write(item)
f.write('"\n')
f.write("/usr/bin/sox "+imageFilePrefix+onImgStr+'.wav "'+
odpFileSubdirectory+os.sep+imageFilePrefix+onImgStr+'.ogg"\n')
f.write("/usr/bin/sox "+imageFilePrefix+onImgStr+'.wav "'+
odpFileSubdirectory+os.sep+imageFilePrefix+onImgStr+'.mp3"\n')
# f.write("rm "+imageFilePrefix+onImgStr+'.wav\n')
def writeHtmlHeader(htmlFile):
htmlFile.write('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"' + "\n")
htmlFile.write('"http://www.w3.org/TR/html4/transitional.dtd">' + "\n")
htmlFile.write("<html>\n<head>\n")
htmlFile.write('<meta HTTP-EQUIV=CONTENT-TYPE CONTENT="text/html; charset=utf-8">' + "\n")
htmlFile.write('<title>SlideSpeech</title>\n')
def writeHtmlHeader2(htmlFile):
htmlFile.write('</head>\n')
htmlFile.write('<body text="#000000" bgcolor="#FFFFFF" link="#000080" vlink="#0000CC" alink="#000080">' + "\n")
htmlFile.write('<center>' + "\n")
def writeHtmlFileNavigation(htmlFile, questionFileNames, maxNum, position):
# First page and Back navigation
# First page
if position==0:
htmlFile.write("""First page Back """)
# Second page
elif position==1:
htmlFile.write("""<a href="../""" + odpName +""".htm">First page</a> <a href="../""" +
odpName +""".htm">Back</a> """)
# Rest of pages
else:
htmlFile.write("""<a href="../""" + odpName +""".htm">First page</a> <a href=""" + '"' +
questionFileNames[position-1]+""".htm">Back</a> """)
# Continue and Last Page navigation
# Last page
if position==maxNum:
htmlFile.write('Continue Last page<br>\n')
# First page
elif position==0:
htmlFile.write( \
'<a href="'+
odpName+"/"+questionFileNames[position+1]+
'.htm">Continue</a> ')
htmlFile.write( \
'<a href="'+
odpName+"/"+questionFileNames[-1]+
'.htm">Last page</a><br>\n')
# Rest of pages
else:
htmlFile.write( \
'<a href="'+
questionFileNames[position+1]+
'.htm">Continue</a> ')
htmlFile.write( \
'<a href="' +
questionFileNames[-1] +
'.htm">Last page</a><br>\n')
def writeHtmlJavascript(htmlFile,
questionFileNames,
question,
position,
audioFileTimes):
"""
<script language="javascript" type="text/javascript">
var t;
function respond0()
{
clearTimeout(t)
document.getElementById("a0").innerHTML = "Separators of plus and quotes";
document.getElementById("a0").style.color = "grey";
document.getElementById('playaudio').innerHTML='<audio controls autoplay><source src="img8q1r0.mp3" /><source src="img8q1r0.ogg" />Your browser does not support the <code>audio</code> element.</audio><!--[if lte IE 8]><embed src="img8q1r0.mp3" autostart="true"><![endif]-->';
}
function respond1()
{
clearTimeout(t)
document.getElementById('playaudio').innerHTML='<audio controls autoplay><source src="img8q1r1.mp3" /><source src="img8q1r1.ogg" />Your browser does not support the <code>audio</code> element.</audio><!--[if lte IE 8]><embed src="img8q1r1.mp3" autostart="true"><![endif]-->';
t=setTimeout("advance1()",12762);
}
function advance1()
{
location.href="img8q2.htm";
}
</script>
"""
htmlFile.write('<script language="javascript" type="text/javascript">\nvar t;\n')
for answerNum, answer in enumerate(question.answers):
if len(answer.answerText)>0:
htmlFile.write('function respond'+
str(answerNum)+
'()\n{\n clearTimeout(t);\n')
if not answer.action > 0:
htmlFile.write(' document.getElementById("a'+
str(answerNum)+'").innerHTML = "'+
answer.answerText+
'";\n')
htmlFile.write(' document.getElementById("a'+
str(answerNum)+
'").style.color = "grey";\n')
if len(answer.responseText)>0:
if position==0:
pathToAudio = odpName+'/'+questionFileNames[position]+'r'+str(answerNum)
else:
pathToAudio = questionFileNames[position]+'r'+str(answerNum)
htmlFile.write( \
" document.getElementById('playaudio').innerHTML=" +
"'<audio controls autoplay><source src=" +
'"' + pathToAudio +
'.mp3" />')
htmlFile.write('<source src="' +
pathToAudio +
'.ogg" />')
htmlFile.write( \
"<embed src=" +
'"' + pathToAudio +
'.mp3' +
'" autostart="true"></audio>' + "';\n")
if answer.action > 0:
htmlFile.write(' t=setTimeout("advance'+
str(answerNum)+
'()",'+
str(audioFileTimes[pathToAudio]+1000)+
');\n')
elif answer.action > 0:
htmlFile.write(" advance"+
str(answerNum)+
'();\n')
htmlFile.write('}\n')
if (answer.action > 0 and position+answer.action < len(questionFileNames)):
htmlFile.write('function advance'+
str(answerNum)+
'()\n{\n')
htmlFile.write(' location.href="'+
questionFileNames[position+answer.action]+
'.htm";\n}\n')
htmlFile.write('</script>\n')
def makeConvert(sequence):
# Make list of question file names for navigation
questionFileNames = []
onImg = minNum
onImgStr = str(onImg)
onQ = 0
for question in sequence:
if len(question.answers)==0:
questionFileNames.append(imageFilePrefix+onImgStr)
onImg += 1
onImgStr = str(onImg)
onQ = 0
else:
onQ += 1
questionFileNames.append(imageFilePrefix+onImgStr+"q"+str(onQ))
maxNum = len(questionFileNames)-1
# Make convert.bat to convert questionText into audio files
f = codecs.open(odpFileDirectory+os.sep+"convert.bat", encoding='utf-8', mode='w+')
os.chmod(odpFileDirectory+os.sep+"convert.bat",stat.S_IRWXU)
onImg = minNum
onImgStr = str(onImg)
onQ = 0
oggList = []
for position, question in enumerate(sequence):
# write convert.bat
if len(question.answers)==0:
convertItem(f," ".join(question.questionTexts),onImgStr)
oggList.append(onImgStr)
onImg += 1
onImgStr = str(onImg)
onQ = 0
else:
onQ += 1
convertItem(f," ".join(question.questionTexts),onImgStr+"q"+str(onQ))
oggList.append(onImgStr+"q"+str(onQ))
onAns = 0
for answer in question.answers:
convertItem(f,answer.answerText,onImgStr+"q"+str(onQ)+"a"+str(onAns))
oggList.append(onImgStr+"q"+str(onQ)+"a"+str(onAns))
if len(answer.responseText)>0:
convertItem(f,answer.responseText,onImgStr+"q"+str(onQ)+"r"+str(onAns))
oggList.append(onImgStr+"q"+str(onQ)+"r"+str(onAns))
onAns += 1
# Write concatenation of all .ogg files into all.ogg
f.write('cd "'+odpFileSubdirectory+'"\n')
if sys.platform.startswith("win"):
f.write('"'+savePath+os.sep+'sox.exe" ')
elif sys.platform.startswith("darwin"):
f.write("~/bin/sox ")
else:
f.write("/usr/bin/sox ")
for item in oggList:
f.write(imageFilePrefix+item+".ogg ")
f.write('"'+savePath+os.sep+'silence.ogg" ')
f.write("all.ogg\n")
f.close()
def fetchAudioFileTimes():
os.chdir(odpFileSubdirectory)
dir = os.listdir(odpFileSubdirectory)
ogg = [file for file in dir if file.lower().endswith(".ogg")]
oggDict = {}
for file in ogg:
# Parse out file name stem
(stem, audioFileSuffix) = file.split(".")
# soxi -D returns the duration in seconds of the audio file as a float
if sys.platform.startswith("win"):
# Unfortunately, there is a requirement that soxi (with an implict .exe)
# be the command to check audio file duration in Win32
# but soxi is the name of the unix version of this utility
# So we need to delete the (unix) file called soxi so the command line call
# to soxi will run soxi.exe
if os.path.isfile(savePath+os.sep+"soxi"):
os.remove(savePath+os.sep+"soxi")
command = [savePath+os.sep+"soxi","-D",odpFileSubdirectory+os.sep+file]
elif sys.platform.startswith("darwin"):
if os.path.isfile(savePath+os.sep+"soxi"):
command = [savePath+os.sep+"soxi","-D",odpFileSubdirectory+os.sep+file]
elif os.path.isfile(savePath+os.sep+"Contents/Resources/soxi"):
command = [savePath+os.sep+"Contents/Resources/soxi","-D",odpFileSubdirectory+os.sep+file]
else:
command = ["soxi","-D",odpFileSubdirectory+os.sep+file]
else :
command = ["soxi","-D",odpFileSubdirectory+os.sep+file]
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
output = process.communicate()
retcode = process.poll()
if retcode:
print "No time available"
oggDict[stem]=int(float(output[0].strip())*1000)
return oggDict
def writeHtml(sequence, audioFileTimes):
# Make list of question file names for navigation
questionFileNames = []
onImg = minNum
onImgStr = str(onImg)
onQ = 0
for question in sequence:
if len(question.answers)==0:
questionFileNames.append(imageFilePrefix+onImgStr)
onImg += 1
onImgStr = str(onImg)
onQ = 0
else:
onQ += 1
questionFileNames.append(imageFilePrefix+onImgStr+"q"+str(onQ))
maxNum = len(questionFileNames)-1
onImg = minNum
onImgStr = str(onImg)
onQ = 0
for position, question in enumerate(sequence):
if position==0:
# Create first .htm file in same directory as odpFile
htmlFile = codecs.open(odpFileDirectory+os.sep+odpName+".htm", encoding='utf-8', mode='w+')
else:
# Create subsequent .htm files in folder in same directory as odpFile
htmlFile = codecs.open(odpFileSubdirectory+os.sep+questionFileNames[position]+".htm", encoding='utf-8', mode='w+')
writeHtmlHeader(htmlFile)
if len(question.answers)==0:
writeHtmlHeader2(htmlFile)
writeHtmlFileNavigation(htmlFile, questionFileNames, maxNum, position)
else:
writeHtmlJavascript(htmlFile, questionFileNames, question, position,
audioFileTimes)
writeHtmlHeader2(htmlFile)
writeHtmlFileNavigation(htmlFile, questionFileNames, maxNum, position)
if len(question.answers)==0:
# image src and link to next slide
# Last page which is not (also) the first page
if (position==maxNum and position>0):
# src but no link
htmlFile.write( \
'<img src="' +
questionFileNames[position] + '.' + imageFileSuffix +
'" style="border:0px"><br>\n')
# Last page which is also the first page
elif (position==maxNum and position==0):
# src but no link
htmlFile.write( \
'<img src="' +
odpName+"/"+questionFileNames[position] + '.' + imageFileSuffix +
'" style="border:0px"><br>\n')
# First page
elif position==0:
htmlFile.write( \
'<a href="' +
odpName+"/"+questionFileNames[position+1] +
'.htm">')
htmlFile.write( \
'<img src="' +
odpName +"/" + questionFileNames[position] + '.' + imageFileSuffix +
'" style="border:0px"></a><br>\n')
# Rest of pages
else:
htmlFile.write( \
'<a href="' +
questionFileNames[position+1] +
'.htm">')
htmlFile.write( \
'<img src="' +
questionFileNames[position] + '.' + imageFileSuffix +
'" style="border:0px"></a><br>\n')
# Add source link, if any
if 0<len(question.sourceLink):
htmlFile.write( \
'<a href="' + question.sourceLink + '">' + question.sourceLink + '</a><br>\n')
else:
htmlFile.write("<br><br><hr><br><center>\n")
if len(question.linkToShow)>0:
# src but no link
htmlFile.write( \
'<img src="' +
question.linkToShow +
'" style="border:0px"><br>\n')
htmlFile.write("""<table width="400" style="text-align:left"><tbody>
<tr><td>""")
htmlFile.write(" ".join(question.questionTexts)+ "</td></tr>\n" )
for answerNumber, answer in enumerate(question.answers):
if len(answer.answerText)>0:
htmlFile.write('<tr><td><div id="a'+
str(answerNumber)+
'"><a href="javascript:respond'+
str(answerNumber)+
'();">'+
answer.answerText +
'</a></div></td></tr>\n')
htmlFile.write("""</tbody>
</table>
</center><br><hr>""")
# include audio
# First page
if position==0:
pathToAudio = odpName+'/'+questionFileNames[position]
else:
pathToAudio = questionFileNames[position]
# For Safari
htmlFile.write( \
'<p id="playaudio">' +
'<audio controls autoplay><source src="' +
pathToAudio +
'.mp3" />')
# For Firefox
htmlFile.write( \
'<source src="' +
pathToAudio +
'.ogg" />\n')
# For others
htmlFile.write( \
'Your browser does not support the <code>audio</code> element.\n</audio>\n')
htmlFile.write( \
'</p>\n')
# For Internet Explorer
htmlFile.write( \
'<!--[if lte IE 8]>\n' +
'<script>\n' +
'document.getElementById("playaudio").innerHTML=' + "'" +
'<embed src="' +
pathToAudio +
'.mp3" autostart="true">' + "'" + ';\n' +
'</script>\n' +
'<![endif]-->\n')
htmlFile.write('</center>' + "\n")
htmlFile.write('</body>\n</html>\n')
htmlFile.close()
sequence = scriptParser.parseTxtFile(odpFileSubdirectory+os.sep+"script.txt")
makeConvert(sequence)
os.chdir(odpFileDirectory)
p = subprocess.Popen('"'+odpFileDirectory+os.sep+'convert.bat"',shell=True).wait()
audioFileTimes = fetchAudioFileTimes()
writeHtml(sequence, audioFileTimes)
## Step 3 - create makeVid.bat
os.chdir(odpFileSubdirectory)
dir = os.listdir(odpFileSubdirectory)
ogg = [file for file in dir if file.lower().endswith(".ogg")]
oggDict = {}
for file in ogg:
# Parse out file name stem (which includes number) and imageFileSuffix
(stem, audioFileSuffix) = file.split(".")
# Parse out just number (num) and imageFilePrefix
if stem.startswith("Slide"):
numberPart = file[5:].split(".")[0]
if numberPart.isdigit():
oggDict[int(numberPart)] = file
else:
# imgXX.ogg
numberPart = file[3:].split(".")[0]
if numberPart.isdigit():
oggDict[int(file[3:].split(".")[0])] = file
sortedOgg = oggDict.values()
times = []
for file in sortedOgg:
# soxi -D returns the duration in seconds of the audio file as a float
if sys.platform.startswith("win"):
# Unfortunately, there is a requirement that soxi (with an implict .exe)
# be the command to check audio file duration in Win32
# but soxi is the name of the unix version of this utility
# So we need to delete the (unix) file called soxi so the command line call
# to soxi will run soxi.exe
if os.path.isfile(savePath+os.sep+"soxi"):
os.remove(savePath+os.sep+"soxi")
command = [savePath+os.sep+"soxi","-D",odpFileSubdirectory+os.sep+file]
elif sys.platform.startswith("darwin"):
if os.path.isfile(savePath+os.sep+"soxi"):
command = [savePath+os.sep+"soxi","-D",odpFileSubdirectory+os.sep+file]
elif os.path.isfile(savePath+os.sep+"Contents/Resources/soxi"):
command = [savePath+os.sep+"Contents/Resources/soxi","-D",odpFileSubdirectory+os.sep+file]
else:
command = ["soxi","-D",odpFileSubdirectory+os.sep+file]
else:
command = ["soxi","-D",odpFileSubdirectory+os.sep+file]
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
output = process.communicate()
retcode = process.poll()
if retcode:
print "No time available"
times.append(float(output[0].strip()))
# Create makeVid.bat in odpFileDirectory for Windows
f = open(odpFileDirectory+os.sep+"makeVid.bat","w")
os.chmod(odpFileDirectory+os.sep+"makeVid.bat",stat.S_IRWXU)
if sys.platform.startswith("win"):
# find out if mklink is available
mklinkAvailable = False
if os.path.isfile("win32_mklink_test"):
subprocess.Popen(["del","win32_mklink_test"],shell=True)
os.chdir(odpFileDirectory)
subprocess.Popen(["mklink","/h","win32_mklink_test","convert.bat"],shell=True).wait()
if os.path.isfile("win32_mklink_test"):
mklinkAvailable = True
subprocess.Popen(["del","win32_mklink_test"],shell=True)
f.write("echo off\ncls\n")
f.write("if exist output.avi (del output.avi)\n")
catCommand = "copy /b"
for i, file in enumerate(sortedOgg):
stem, suffix = file.split(".")
# Add the slide video to the list of videos to be concatenated
if i==0:
catCommand += " "+stem+".avi"
else:
catCommand += " + "+stem+".avi"
tenthsOfSeconds = int(math.floor(times[i]*10))
# If we are on the last slide, add enough frames
# to give audio time to finish
if sortedOgg[i]==sortedOgg[-1]:
tenthsOfSeconds += 20
# Make a symlink to the slide image for each second the audio runs
# Only 999 symlinks are allowed per image, so, if there are more
# than this number, we need to also make additional copies of the
# slide image to link to
for j in range(tenthsOfSeconds):
if ((j > 0) and (j % 900 == 0)):
f.write("copy "+stem+'.jpg '+stem+str(j)+'.jpg\n')
extraStem = ""
for j in range(tenthsOfSeconds):
if ((j > 0) and (j % 900 == 0)):
extraStem = str(j)
if mklinkAvailable:
f.write("mklink /h "+stem+'_'+str(j).zfill(5)+'.jpg '+stem+extraStem+'.jpg\n')
else:
f.write("copy "+stem+'.jpg '+stem+'_'+str(j).zfill(5)+'.jpg\n')
# Convert the images to a video of that slide with voice over
# NOTE: Little trick here -- Windows wants to substitute the batch file name
# into %0 so we use %1 and pass %0 as the first parameter
f.write('"'+savePath+os.sep+'ffmpeg" -i '+stem+'.mp3 -r 10 -i "'+stem+'_%15d.jpg" -ab 64k '+stem+".avi\n")
# Delete the symlinks
for j in range(tenthsOfSeconds):
f.write("del "+stem+'_'+str(j).zfill(5)+'.jpg\n')
# Delete any extra copies
for j in range(tenthsOfSeconds):
if ((j > 0) and (j % 900 == 0)):
f.write("del "+stem+str(j)+'.jpg\n')
# Add an output file name for the concatenation
catCommand += " temp.avi\n"
if os.path.isfile(savePath+os.sep+"mencoder.exe"):
catCommand += '"'+savePath+os.sep+'mencoder.exe" temp.avi -o output.avi -forceidx -ovc copy -oac copy\n'
else:
catCommand += "mencoder.exe temp.avi -o output.avi -forceidx -ovc copy -oac copy\n"
f.write(catCommand)
# Delete all the single slide videos
for file in sortedOgg:
stem, suffix = file.split(".")
f.write('del '+stem+'.avi\n')
f.close()
elif sys.platform.startswith("darwin"):
# For Mac OSX
# ffmpeg -i Slide1.mp3 -r 1 -i Slide1_%03d.png -ab 64k output.mp4
f.write("clear\n")
f.write("if [ -f output.mp4 ]\n")
f.write("then rm output.mp4\n")
f.write("fi\n")
if os.path.isfile(savePath+os.sep+"MP4Box"):
# for uncompiled run
catCommand = '"'+savePath+os.sep+'MP4Box"'
elif os.path.isfile(savePath+os.sep+"Contents/Resources/MP4Box"):
# for compiled run
catCommand = '"'+savePath+os.sep+'Contents/Resources/MP4Box"'
else:
# for when MP4Box is not distributed but is installed
catCommand = "MP4Box"
# save another copy for subsequent cat lines if more than 20 slides
catCommand2 = catCommand
tempFilesToDelete = []
for i, file in enumerate(sortedOgg):
stem, suffix = file.split(".")
# Add the slide video to the list of videos to be concatenated
catCommand += " -cat "+stem+".mp4"
if ((i>0) and (i % 18 == 0)):
tempFilesToDelete.append("temp"+ str(i) +".mp4")
# add a temp.mp4 for output and then input on next line
catCommand += " temp" + str(i) +".mp4\n"+catCommand2+" -cat temp" + str(i) +".mp4"
tenthsOfSeconds = int(math.floor(times[i]*10))
# If we are on the last slide, add enough frames
# to give audio time to finish
if sortedOgg[i]==sortedOgg[-1]:
tenthsOfSeconds += 20
# Make a symlink to the slide image for each second the audio runs
for j in range(tenthsOfSeconds):
# ln -s Slide2.png Slide2_001.png
f.write("ln -s "+stem+'.png '+stem+'_'+str(j).zfill(5)+'.png\n')
f.write('"'+savePath+os.sep+'ffmpeg" -i '+stem+'.mp3 -r 10 -i "'+stem+'_%05d.png" -ab 64k '+stem+".mp4\n")
# Delete the symlinks
for j in range(tenthsOfSeconds):
f.write("rm "+stem+'_'+str(j).zfill(5)+'.png\n')
# Add an output file name for the concatenation
catCommand += " output.mp4\n"
f.write(catCommand)
# Delete all the single slide videos
for file in sortedOgg:
stem, suffix = file.split(".")
f.write('rm '+stem+'.mp4\n')
for file in tempFilesToDelete:
f.write('rm '+file+"\n")
f.close()
else:
# For Linux
# ffmpeg -i Slide1.mp3 -r 1 -i Slide1_%03d.png -ab 64k output.mp4
f.write("clear\n")
f.write("if [ -f output.mp4 ]\n")
f.write("then rm output.mp4\n")
f.write("fi\n")
# We need to do this:
# cat img0.avi img1.avi > output.avi
# mencoder output.avi -o final.avi -forceidx -ovc copy -oac copy
catCommand = "cat "
catCommand2= "cat "
# save another copy for subsequent cat lines if more than 20 slides
tempFilesToDelete = []
for i, file in enumerate(sortedOgg):
stem, suffix = file.split(".")
# Add the slide video to the list of videos to be concatenated
catCommand += " " + stem+".avi"
if ((i>0) and (i % 18 == 0)):
tempFilesToDelete.append("temp"+ str(i) +".avi")
# add a temp.mp4 for output and then input on next line
catCommand += " temp" + str(i) +".avi\n"+catCommand2+" temp" + str(i) +".avi"
tenthsOfSeconds = int(math.floor(times[i]*10))
# If we are on the last slide, add enough frames
# to give audio time to finish
if sortedOgg[i]==sortedOgg[-1]:
tenthsOfSeconds += 20
# Make a symlink to the slide image for each second the audio runs
for j in range(tenthsOfSeconds):
# ln -s Slide2.png Slide2_001.png
f.write("ln -s "+stem+'.png '+stem+'_'+str(j).zfill(5)+'.png\n')
f.write('ffmpeg -i '+stem+'.mp3 -r 10 -i "'+stem+'_%05d.png" -ab 64k '+stem+".avi\n")
# Delete the symlinks
for j in range(tenthsOfSeconds):
f.write("rm "+stem+'_'+str(j).zfill(5)+'.png\n')
# Add an output file name for the concatenation
catCommand += " > output.avi\n mencoder output.avi -o final.avi -forceidx -ovc copy -oac copy\n"
f.write(catCommand)
# Delete all the single slide videos
for file in sortedOgg:
stem, suffix = file.split(".")
f.write('rm '+stem+'.avi\n')
for file in tempFilesToDelete:
f.write('rm '+file+"\n")
f.close()
# Run the makeVid.bat file with %0 as the first parameter
os.chdir(odpFileSubdirectory)
if sys.platform.startswith("win"):
p = subprocess.Popen([odpFileDirectory+os.sep+'makeVid.bat',"%0"],shell=True).wait()
webbrowser.open_new_tab(odpFileDirectory+os.sep+odpName+'.htm')
else:
p = subprocess.Popen([odpFileDirectory+os.sep+"makeVid.bat"],shell=True).wait()
p = subprocess.Popen('open "'+odpFileDirectory+os.sep+odpName+'.htm"', shell=True).pid
os.chdir(savePath)
| StarcoderdataPython |
12859022 | #!/usr/bin/env python
#
# Copyright 2019 <NAME>
#
# This file is part of a Python candidate reference implementation of
# the optimade API [https://www.optimade.org/]
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''
This is part of a Python candidate reference implementation of the
optimade API [https://www.optimade.org/].
This program runs a simple test query against the example_sqlite3 backend.
'''
from __future__ import print_function
import os, sys
from pprint import pprint
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),'src'))
from parse import parse_optimade_filter
if __name__ == "__main__":
import backends.example_sqlite3 as backend
backend.initialize()
# This represents the query being received (later to be received via a web URL query)
tables = ["structures"]
response_fields = ["id", "chemical_formula", "elements"]
if len(sys.argv) >= 2:
input_string = 'filter='+sys.argv[1]
else:
input_string = 'filter=elements="Ga,Ti" AND (nelements=3 OR nelements=2)'
response_limit = 50
filter_ast = parse_optimade_filter(input_string)
print("==== FILTER STRING PARSE RESULT:")
pprint(filter_ast)
print("====")
result = backend.execute_query(tables, response_fields, response_limit, filter_ast, debug=True)
print("==== END RESULT")
pprint(list(result))
print("===============")
backend.close()
| StarcoderdataPython |
3575861 | <gh_stars>1-10
#!/usr/bin/env python
#
# This script runs a set of black-box tests on Mailpile using the test
# messages found in `testing/`.
#
# If run with -i as the first argument, it will then drop to an interactive
# python shell for experimenting and manual testing.
#
import os
import sys
import traceback
# Set up some paths
mailpile_root = os.path.join(os.path.dirname(__file__), '..')
mailpile_test = os.path.join(mailpile_root, 'testing')
mailpile_send = os.path.join(mailpile_root, 'scripts', 'test-sendmail.sh')
mailpile_home = os.path.join(mailpile_test, 'tmp')
mailpile_sent = os.path.join(mailpile_home, 'sent.mbx')
# Add the root to our import path, import API and standard plugins
sys.path.append(mailpile_root)
from mailpile.plugins import *
from mailpile import Mailpile
##[ Black-box test script ]###################################################
FROM_BRE = [u'from:r\xfanar', u'from:bjarni']
MY_FROM = '<EMAIL>'
try:
# First, we set up a pristine Mailpile
os.system('rm -rf %s' % mailpile_home)
mp = Mailpile(workdir=mailpile_home)
def contents(fn):
return open(fn, 'r').read()
def grep(w, fn):
return '\n'.join([l for l in open(fn, 'r').readlines() if w in l])
def grepv(w, fn):
return '\n'.join([l for l in open(fn, 'r').readlines() if w not in l])
def say(stuff):
mp._session.ui.mark(stuff)
mp._session.ui.reset_marks()
# Set up initial tags and such
mp.setup()
# Configure our fake mail sending setup
mp.set('my_from: %s = Test Account' % MY_FROM)
mp.set('my_sendmail: %s = |%s -i %%(rcpt)s' % (MY_FROM, mailpile_send))
mp.set('debug = sendmail log compose')
# Add the mailboxes, scan them
for mailbox in ('tests.mbx', 'Maildir'):
mp.add(os.path.join(mailpile_test, mailbox))
mp.rescan()
# Save and load the index, just for kicks
mp._config.index.save()
mp._config.index.load()
# Rescan AGAIN, so we can test for the presence of duplicates.
mp.rescan()
# Search for things, there should be exactly one match for each.
mp.order('flat-date')
for search in (FROM_BRE,
['agirorn'],
['subject:emerging'],
['from:twitter', 'brennan'],
['dates:2013-09-17', 'feministinn'],
['mailbox:tests.mbx'] + FROM_BRE,
['att:jpg', 'fimmtudaginn'],
['subject:Moderation', 'kde-isl']):
say('Searching for: %s' % search)
results = mp.search(*search)
assert(results.result['count'] == 1)
# Make sure we are decoding weird headers correctly
result_bre = mp.search(*FROM_BRE).result['messages'][0]
result_bre = mp.view('=%s' % result_bre['mid']).result['messages'][0]
say('Checking encoding: %s' % result_bre['from'])
assert('=C3' not in result_bre['from'])
say('Checking encoding: %s' % result_bre['message']['headers']['To'])
assert('utf' not in result_bre['message']['headers']['To'])
# Create a message...
new_mid = mp.message_compose().result['messages'][0]['mid']
assert(mp.search('tag:drafts').result['count'] == 0)
assert(mp.search('tag:blank').result['count'] == 1)
assert(mp.search('tag:sent').result['count'] == 0)
assert(not os.path.exists(mailpile_sent))
# Edit the message (moves from Blank to Draft, not findable in index)
msg_data = {
'from': [MY_FROM],
'bcc': ['<EMAIL>'],
'mid': [new_mid],
'subject': ['This the TESTMSG subject'],
'body': ['Hello world!']
}
mp.message_update(**msg_data)
assert(mp.search('tag:drafts').result['count'] == 1)
assert(mp.search('tag:blank').result['count'] == 0)
assert(mp.search('TESTMSG').result['count'] == 0)
assert(not os.path.exists(mailpile_sent))
# Send the message (moves from Draft to Sent, is findable via. search)
del msg_data['subject']
msg_data['body'] = ['Hello world: thisisauniquestring :)']
mp.message_update_send(**msg_data)
assert(mp.search('tag:drafts').result['count'] == 0)
assert(mp.search('tag:blank').result['count'] == 0)
assert('the TESTMSG subject' in contents(mailpile_sent))
assert('thisisauniquestring' in contents(mailpile_sent))
assert(MY_FROM in grep('X-Args', mailpile_sent))
assert('<EMAIL>' in grep('X-Args', mailpile_sent))
assert('<EMAIL>' not in grepv('X-Args', mailpile_sent))
for search in (['tag:sent'],
['bcc:<EMAIL>'],
['thisisauniquestring'],
['subject:TESTMSG']):
say('Searching for: %s' % search)
assert(mp.search(*search).result['count'] == 1)
os.remove(mailpile_sent)
# Test the send method's "bounce" capability
mp.message_send(mid=[new_mid], to=['<EMAIL>'])
assert('thisisauniquestring' in contents(mailpile_sent))
assert('<EMAIL>' not in grepv('X-Args', mailpile_sent))
assert('-i <EMAIL>' in contents(mailpile_sent))
os.remove(mailpile_sent)
say("Tests passed, woot!")
except:
say("Tests FAILED!")
print
traceback.print_exc()
##[ Interactive mode ]########################################################
if '-i' in sys.argv:
import code
import readline
code.InteractiveConsole(locals=globals()).interact("""
Welcome to the Mailpile test shell. You can interact pythonically with the
Mailpile object `mp`, or drop to the Mailpile CLI with `mp.Interact()`.
""")
##[ Cleanup ]#################################################################
os.system('rm -rf %s' % mailpile_home)
| StarcoderdataPython |
8113113 | from model.group import Group
import random
def test_delete_first(app, db, chech_ui):
if len(db.get_group_list()) == 0:
app.group.create(Group(name="test"))
old_groups = db.get_group_list()
group = random.choice(old_groups)
app.group.delete_group_by_id(group.id)
new_groups = db.get_group_list()
assert len(old_groups) - 1 == app.group.count()
old_groups.remove(group)
assert old_groups == new_groups
if chech_ui:
assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max) | StarcoderdataPython |
5081840 | # Find the Median
# Find the Median in a list of numbers.
#
# https://www.hackerrank.com/challenges/find-the-median/problem
#
def findMedian(arr):
# la valeur médiane est celle qui partage la liste triée en 2
return sorted(arr)[len(arr) // 2]
if __name__ == "__main__":
n = int(input().strip())
arr = list(map(int, input().strip().split(' ')))
result = findMedian(arr)
print(result)
| StarcoderdataPython |
5131030 | <filename>asab/task.py<gh_stars>10-100
import logging
import asyncio
import asab
#
L = logging.getLogger(__name__)
#
class TaskService(asab.Service):
'''
Task service is for managed execution of fire-and-forget, one-off, background tasks.
The task is a coroutine, future (asyncio.ensure_future) or task (asyncio.create_task).
The task is executed in the main event loop.
The task should be a relatively short-lived (~5 seconds) asynchronous procedure.
The result of the task is collected (and discarted) automatically
and if there was an exception, it will be printed to the log.
'''
def __init__(self, app, service_name="asab.TaskService"):
super().__init__(app, service_name)
self.NewTasks = []
self.PendingTasks = set()
self.Main = None
async def initialize(self, app):
self.start()
def start(self):
assert(self.Main is None)
self.Main = asyncio.ensure_future(self.main())
self.Main.add_done_callback(self._main_task_exited)
async def finalize(self, app):
if self.Main is not None:
task = self.Main
self.Main = None
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
except Exception as e:
L.exception("Error '{}' during task service:".format(e))
total_tasks = len(self.PendingTasks) + len(self.NewTasks)
if total_tasks > 0:
L.warning("{} pending and incompleted tasks".format(total_tasks))
def _main_task_exited(self, ctx):
if self.Main is None:
return
try:
self.Main.result()
except asyncio.CancelledError:
pass
except Exception as e:
L.exception("Error '{}' during task service:".format(e))
self.Main = None
L.warning("Main task exited unexpectedly, restarting ...")
self.start()
def schedule(self, *tasks):
'''
Schedule execution of task(s).
Tasks will be started in 1-5 seconds (not immediately).
Task can be a simple coroutine, future or task.
'''
self.NewTasks.extend(tasks)
async def main(self):
while True:
while len(self.NewTasks) > 0:
task = self.NewTasks.pop()
self.PendingTasks.add(task)
if len(self.PendingTasks) == 0:
await asyncio.sleep(5.0)
else:
done, self.PendingTasks = await asyncio.wait(self.PendingTasks, timeout=1.0)
for task in done:
try:
await task
except Exception as e:
L.exception("Error '{}' during task:".format(e))
| StarcoderdataPython |
123415 | <filename>agent/python/perper/protocol/task_collection.py
import asyncio
import functools
class TaskCollection:
def __init__(self):
self.tasks = set()
self.tasks_left = 0
self.cancelled = False
self.future = asyncio.get_running_loop().create_future()
def remove(self, task):
if task in self.tasks:
self.tasks.remove(task)
self.tasks_left -= 1
if task.done():
if task.cancelled():
pass
elif task.exception() is not None:
if not self.future.done():
self.future.set_exception(task.exception())
ex = task.exception()
elif self.tasks_left == 0:
if not self.future.done():
self.future.set_result(None)
def add(self, task):
task = asyncio.ensure_future(task)
if task not in self.tasks:
self.tasks.add(task)
self.tasks_left += 1
task.add_done_callback(self.remove)
if self.cancelled:
task.cancel()
def cancel(self):
self.cancelled = True
for task in self.tasks:
task.cancel()
def wait(self, complete=True):
if complete and self.tasks_left == 0 and not self.future.done():
self.future.set_result(None)
return self.future
| StarcoderdataPython |
4924182 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2020,2021
# Apache License, Version 2.0 (see https://opensource.org/licenses/Apache-2.0)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class ModuleDocFragment(object):
COMMON = r'''
options:
cmci_host:
description:
- The TCP/IP host name of CMCI connection.
type: str
required: true
cmci_port:
description:
- The port number of the CMCI connection.
type: int
required: true
cmci_user:
description:
- The user ID under which the CMCI request will run.
- Can also be specified using the environment variable CMCI_USER.
- Required if I(cmci_password) is specified.
- Authentication prioritises certificate authentication if I(cmci_cert)
and I(cmci_key) are provided, then basic authentication if I(cmci_user)
and (cmci_password) are provided, and then unauthenticated if none is
provided.
type: str
cmci_password:
description:
- The password of I(cmci_user) to pass HTTP basic authentication.
- Can also be specified using the environment variable CMCI_PASSWORD.
- Required if I(cmci_user) is specified.
- Authentication prioritises certificate authentication if I(cmci_cert)
and I(cmci_key) are provided, then
basic authentication if I(cmci_user) and (cmci_password) are provided,
and then unauthenticated if none is provided.
type: str
cmci_cert:
description:
- Location of the PEM-formatted certificate chain file to be used for
HTTPS client authentication.
- Can also be specified using the environment variable CMCI_CERT.
- Required if I(cmci_key) is specified.
- Authentication prioritises certificate authentication if I(cmci_cert)
and I(cmci_key) are provided, then basic authentication if I(cmci_user)
and (cmci_password) are provided, and then unauthenticated if none is
provided.
required: false
type: str
cmci_key:
description:
- Location of the PEM-formatted file storing your private key to be used
for HTTPS client authentication.
- Can also be specified using the environment variable CMCI_KEY.
- Required if I(cmci_cert) is specified.
- Authentication prioritises certificate authentication if I(cmci_cert)
and I(cmci_key) are provided, then basic authentication if I(cmci_user)
and (cmci_password) are provided, and then unauthenticated if none is
provided.
required: false
type: str
context:
description:
- If CMCI is installed in a CICSPlex® SM environment, I(context) is the
name of the CICSplex or CMAS associated with the request, for example,
C(PLEX1). To determine whether a CMAS can be specified as I(context),
see the B(CMAS context) entry in the CICSPlex SM resource table
reference of a resource. For example, according to the
L(PROGRAM resource table,https://www.ibm.com/support/knowledgecenter/en/SSGMCP_5.6.0/reference-cpsm-restables/cpsm-restables/PROGRAMtab.html),
CMAS context is not supported for PROGRAM.
- If CMCI is installed in a single region (SMSS), I(context) is the
APPLID of the CICS region associate with the request.
- The value of I(context) must contain no spaces. I(context) is not
case-sensitive.
type: str
required: true
scope:
description:
- Specifies the name of a CICSplex, CICS region group, CICS region, or
logical scope that is associated with the query.
- I(scope) is a subset of I(context) and limits the request to particular
CICS systems or resources.
- I(scope) is optional. If it's not specified, the request is limited by
the value of I(context) alone.
- The value of I(scope) must contain no spaces. I(scope) is not
case-sensitive.
type: str
type:
description:
- The CMCI external resource name that maps to the target CICS or CICSPlex
SM resource type.
For a list of CMCI external resource names, see
L(CMCI resource names,https://www.ibm.com/support/knowledgecenter/SSGMCP_5.6.0/reference-system-programming/cmci/clientapi_resources.html).
type: str
required: true
scheme:
description: The HTTP scheme to use when establishing a connection to the
CMCI REST API.
type: str
choices:
- http
- https
default: https
insecure:
description: When set to C(true), disables SSL certificate trust chain
verification when using HTTPS.
type: bool
required: false
default: false
timeout:
description: HTTP request timeout in seconds
type: int
required: false
default: 30
'''
RESOURCES = r'''
options:
resources:
description:
- Options that specify a target resource.
type: dict
required: false
suboptions:
filter:
description:
- A dictionary with attribute names as keys, and target values, to be
used as criteria to filter the set of resources returned from
CICSPlex SM.
- Filters implicitly use the C(=) operator.
- Filters for C(string) type attributes can use the C(*) and C(+)
wildcard operators.
- C(*) is a wildcard representing an unknown number of characters,
and must appear at the end of the value.
- C(+) is a wildcard representing a single character, and can appear
in any place in the value, potentially multiple times.
- To use more complicated filter expressions, including a range of
different filter operators, and the ability to compose filters with
C(and) and C(or) operators, see the C(complex_filter) parameter.
- For more details, see
L(How to build a filter expression,https://www.ibm.com/support/knowledgecenter/SSGMCP_5.6.0/system-programming/cpsm/eyup1a0.html).
- For examples, see M(cmci_get).
- For supported attributes of different resource types, see their
resource table reference, for example,
L(PROGDEF resource table reference,https://www.ibm.com/support/knowledgecenter/en/SSGMCP_5.6.0/reference-cpsm-restables/cpsm-restables/PROGDEFtab.html).
type: dict
required: false
complex_filter:
description:
- A dictionary representing a complex filter expression. Complex
filters are composed of filter expressions, represented as
dictionaries. Each dictionary can specify either an attribute
expression, a list of filter expressions to be composed with the
C(and) operator, or a list of filter expressions to be composed
with the C(or) operator.
- The C(attribute), C(and) and C(or) options are mutually exclusive
with each other.
- Can contain one or more filters. Multiple filters must be combined
using C(and) or C(or) logical operators.
- Filters can be nested.
- When supplying the C(attribute) option, you must also supply a
C(value) for the filter. You can also override the default
operator of C(=) with the C(operator) option.
- For examples, see "Examples" in M(cmci_get).
type: dict
required: false
suboptions:
and:
description:
- A list of filter expressions to be combined with an C(and)
operation.
- Filter expressions are nested C(complex_filter) elements. Each
nested filter expression can be either an C(attribute), C(and)
or C(or) complex filter expression.
type: list
elements: dict
required: false
or:
description:
- A list of filter expressions to be combined with an C(or)
operation.
- Filter expressions are nested C(complex_filter) elements. Each
nested filter expression can be either an C(attribute), C(and)
or C(or) complex filter expression.
type: list
elements: dict
required: false
attribute:
description:
- The name of a resource table attribute on which to filter.
- For supported attributes of different resource types, see their
resource table reference, for example,
L(PROGDEF resource table reference, https://www.ibm.com/support/knowledgecenter/en/SSGMCP_5.6.0/reference-cpsm-restables/cpsm-restables/PROGDEFtab.html).
type: str
required: false
operator:
description: >
These operators are accepted: C(<) or C(LT) (less than), C(<=) or
C(LE) (less than or equal to), C(=) or C(EQ) (equal to), C(>) or
C(GT) (greater than), C(>=) or C(GE) (greater than or equal to),
C(==) or C(IS) (is), C(¬=), C(!=), or C(NE) (not equal to). If
not supplied when C(attribute) is used, C(EQ) is assumed.
type: str
required: false
choices:
- "<"
- ">"
- "<="
- ">="
- "="
- "=="
- "!="
- "¬="
- EQ
- GT
- GE
- LT
- LE
- NE
- IS
value:
description:
- The value by which you are to filter the resource attributes.
- The value must be a valid one for the resource table attribute
as documented in the resource table reference, for example,
L(PROGDEF resource table reference,https://www.ibm.com/support/knowledgecenter/en/SSGMCP_5.6.0/reference-cpsm-restables/cpsm-restables/PROGDEFtab.html).
type: str
required: false
get_parameters:
description: >
A list of one or more parameters with optional values used to identify
the resources for this request. Eligible parameters for identifying
the target resources can be found in the resource table reference for
the target resource type, as valid parameters for the GET operation in
the "Valid CPSM operations" table. For example, the valid parameters
for identifying a PROGDEF resource are CICSSYS, CSDGROUP and RESGROUP,
as found in the
L(PROGDEF resource table reference,https://www.ibm.com/support/knowledgecenter/en/SSGMCP_5.6.0/reference-cpsm-restables/cpsm-restables/PROGDEFtab.html).
type: list
elements: dict
suboptions:
name:
description: Parameter name available for the GET operation.
required: true
type: str
value:
description: Parameter value if any.
required: false
type: str
required: false
'''
ATTRIBUTES = r'''
options:
attributes:
description:
- The resource attributes to be created or updated. Available attributes
can be found in the CICSPlex® SM resource table reference for the
target resource type, for example,
L(PROGDEF resource table reference,https://www.ibm.com/support/knowledgecenter/en/SSGMCP_5.6.0/reference-cpsm-restables/cpsm-restables/PROGDEFtab.html).
type: dict
required: false
'''
| StarcoderdataPython |
8788 | <reponame>vnaskos/Website
from django.contrib import admin
# Register your models here.]
from website.sites.models import Post
@admin.register(Post)
class TestAdmin2(admin.ModelAdmin):
pass | StarcoderdataPython |
9603512 | <filename>orm_sqlfan/libreria/migrations/0002_libro.py
# Generated by Django 2.2.7 on 2019-11-25 05:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('libreria', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Libro',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('isbn', models.CharField(max_length=13)),
('titulo', models.CharField(max_length=70)),
('paginas', models.IntegerField()),
('fecha_publicacion', models.DateField()),
('imagen', models.URLField(max_length=85)),
('desc_corta', models.CharField(max_length=2000)),
('estatus', models.CharField(choices=[('P', 'Publish'), ('M', 'MEAP')], max_length=1)),
('categoria', models.CharField(max_length=50)),
],
),
]
| StarcoderdataPython |
1685841 | <filename>synapse/rest/client/v2_alpha/devices.py<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from twisted.internet import defer
from synapse.api import errors
from synapse.http.servlet import (
RestServlet,
assert_params_in_dict,
parse_json_object_from_request,
)
from ._base import client_patterns, interactive_auth_handler
logger = logging.getLogger(__name__)
class DevicesRestServlet(RestServlet):
PATTERNS = client_patterns("/devices$")
def __init__(self, hs):
"""
Args:
hs (synapse.server.HomeServer): server
"""
super(DevicesRestServlet, self).__init__()
self.hs = hs
self.auth = hs.get_auth()
self.device_handler = hs.get_device_handler()
@defer.inlineCallbacks
def on_GET(self, request):
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
devices = yield self.device_handler.get_devices_by_user(
requester.user.to_string()
)
defer.returnValue((200, {"devices": devices}))
class DeleteDevicesRestServlet(RestServlet):
"""
API for bulk deletion of devices. Accepts a JSON object with a devices
key which lists the device_ids to delete. Requires user interactive auth.
"""
PATTERNS = client_patterns("/delete_devices")
def __init__(self, hs):
super(DeleteDevicesRestServlet, self).__init__()
self.hs = hs
self.auth = hs.get_auth()
self.device_handler = hs.get_device_handler()
self.auth_handler = hs.get_auth_handler()
@interactive_auth_handler
@defer.inlineCallbacks
def on_POST(self, request):
requester = yield self.auth.get_user_by_req(request)
try:
body = parse_json_object_from_request(request)
except errors.SynapseError as e:
if e.errcode == errors.Codes.NOT_JSON:
# DELETE
# deal with older clients which didn't pass a JSON dict
# the same as those that pass an empty dict
body = {}
else:
raise e
assert_params_in_dict(body, ["devices"])
yield self.auth_handler.validate_user_via_ui_auth(
requester, body, self.hs.get_ip_from_request(request)
)
yield self.device_handler.delete_devices(
requester.user.to_string(), body["devices"]
)
defer.returnValue((200, {}))
class DeviceRestServlet(RestServlet):
PATTERNS = client_patterns("/devices/(?P<device_id>[^/]*)$")
def __init__(self, hs):
"""
Args:
hs (synapse.server.HomeServer): server
"""
super(DeviceRestServlet, self).__init__()
self.hs = hs
self.auth = hs.get_auth()
self.device_handler = hs.get_device_handler()
self.auth_handler = hs.get_auth_handler()
@defer.inlineCallbacks
def on_GET(self, request, device_id):
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
device = yield self.device_handler.get_device(
requester.user.to_string(), device_id
)
defer.returnValue((200, device))
@interactive_auth_handler
@defer.inlineCallbacks
def on_DELETE(self, request, device_id):
requester = yield self.auth.get_user_by_req(request)
try:
body = parse_json_object_from_request(request)
except errors.SynapseError as e:
if e.errcode == errors.Codes.NOT_JSON:
# deal with older clients which didn't pass a JSON dict
# the same as those that pass an empty dict
body = {}
else:
raise
yield self.auth_handler.validate_user_via_ui_auth(
requester, body, self.hs.get_ip_from_request(request)
)
yield self.device_handler.delete_device(requester.user.to_string(), device_id)
defer.returnValue((200, {}))
@defer.inlineCallbacks
def on_PUT(self, request, device_id):
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
body = parse_json_object_from_request(request)
yield self.device_handler.update_device(
requester.user.to_string(), device_id, body
)
defer.returnValue((200, {}))
def register_servlets(hs, http_server):
DeleteDevicesRestServlet(hs).register(http_server)
DevicesRestServlet(hs).register(http_server)
DeviceRestServlet(hs).register(http_server)
| StarcoderdataPython |
6671535 | import random
import pickle
import sys
if(len(sys.argv) != 3):
print("Usage: python {} TokenName NumTokens".format(sys.argv[0]))
sys.exit(1)
f = open(sys.argv[1], "w")
tokenlist = []
for i in range(int(sys.argv[2])):
h = random.getrandbits(128)
h = "%032x" % h
tokenlist.append(h + "\n")
f.writelines(tokenlist)
f.close()
| StarcoderdataPython |
6604518 | <gh_stars>10-100
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class EskaGoIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?eskago\.pl/radio/(?P<id>[^/\s?#]+)'
_TESTS = [{
'url': 'https://www.eskago.pl/radio/eska-rock',
'info_dict': {
'id': 'eska-rock',
'ext': 'aac',
'title': 'Eska ROCK',
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.eskago.pl/radio/disco-polo-top',
'only_matching': True,
}]
def _real_extract(self, url):
station_slug = self._match_id(url)
webpage = self._download_webpage(url, station_slug, headers={
'Referer': url,
'X-Requested-With': 'XHLHttpRequest',
})
stream_url = self._html_search_regex(r"{\s*var streamUrl\s*=\s*'(https?://.+?)';",
webpage, 'stream url')
icsu = self._html_search_regex(r'<input[^>]+id="icsu"[^>]+value="(.+?)"',
webpage, 'some weird token thing')
formats = []
# used by zpr as a fallback to support /(Windows NT 6\.(1|2).*Trident)/
if '.aac' in stream_url:
formats.append({
'url': stream_url.replace('.aac', '.mp3') + icsu,
'http_headers': {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko',
},
})
formats.append({
'url': stream_url + icsu,
})
title = self._html_search_regex([
r"\$\('#radio-controller \.(?:playlist_small strong|radioline span|player-category \.category)'\)\.html\('(.+?)'\);",
r"'programName':\s*'(.+?)',",
], webpage, 'stream title')
return {
'id': station_slug,
'title': title,
'formats': formats,
}
| StarcoderdataPython |
12814125 | <reponame>snjoetw/appdaemon_apps<gh_stars>0
from datetime import datetime, timedelta
from lib.core.monitored_callback import monitored_callback
from lib.triggers import TriggerInfo
from lighting.motion_lighting import MotionLighting
CHECK_FREQUENCY = 900
TIMER_ACTION_TURN_ON = 'timer_action_turn_on'
TIMER_ACTION_TURN_OFF = 'timer_action_turn_off'
class TimerSettings:
def __init__(self, config):
self._turn_on_start_time = config['turn_on_start_time']
self._turn_on_end_time = config['turn_on_end_time']
@property
def turn_on_start_time(self):
return self._turn_on_start_time
@property
def turn_on_end_time(self):
return self._turn_on_end_time
class TimerMotionLighting(MotionLighting):
_timer_settings: TimerSettings
def initialize(self):
super().initialize()
self._timer_settings = TimerSettings(self.cfg.value('timer'))
now = datetime.now() + timedelta(seconds=2)
self.run_every(self._run_every_handler, now, CHECK_FREQUENCY)
@monitored_callback
def _run_every_handler(self, time=None, **kwargs):
if not self.is_enabled:
return
trigger_info = TriggerInfo("time", {
"time": time,
})
action = self._figure_timer_action()
if action == TIMER_ACTION_TURN_OFF:
self._turn_off_lights(trigger_info)
elif action == TIMER_ACTION_TURN_ON:
self._cancel_turn_off_delay()
self._turn_on_lights()
def _figure_timer_action(self):
action_period_end = datetime.now().time()
action_period_start = (datetime.now() - timedelta(seconds=CHECK_FREQUENCY)).time()
turn_on_start_time = self.parse_datetime(self._timer_settings.turn_on_start_time).time()
if action_period_start <= turn_on_start_time <= action_period_end:
return TIMER_ACTION_TURN_ON
turn_on_end_time = self.parse_datetime(self._timer_settings.turn_on_end_time).time()
if action_period_start <= turn_on_end_time <= action_period_end:
return TIMER_ACTION_TURN_OFF
return None
def _is_in_timer_period(self):
period = self._timer_settings
return self.now_is_between(period.turn_on_start_time, period.turn_on_end_time)
def _should_turn_on_lights(self, trigger_info):
if self._is_in_timer_period():
return False
return super()._should_turn_on_lights(trigger_info)
def _should_turn_off_lights(self, trigger_info):
if self._is_in_timer_period():
return False
return super()._should_turn_off_lights(trigger_info)
| StarcoderdataPython |
3283096 | <gh_stars>0
from selenium import webdriver
browser = webdriver.Firefox()
browser.get('http://seleniumhq.org/') | StarcoderdataPython |
349320 | import setuptools
with open('requirements.txt', 'r') as f:
install_requires = f.read().splitlines()
setuptools.setup(name='my_project',
packages=['my_project'],
install_requires=install_requires)
| StarcoderdataPython |
12842565 | <gh_stars>1-10
#!/usr/bin/env python
'''
Copyright (c) 2016, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import roslib
import rospy
import smach
import smach_ros
from common.common_sm import GetMoveRobotSateMachine
from common.init import SearchInit
from common.move import PTUPoseCorrection
from indirect_search.nbv import NBVSetPointCloud, NextBestView, NextBestViewUpdate
from cropbox_search_states import CropBoxGeneration
from record_states import CropboxStateRecording
from common.visualize_waypoints import VisualizeWaypoints
class CropBoxRecordStateMachine():
sm_init = smach.StateMachine(outcomes=['succeeded',
'aborted'])
with sm_init:
smach.StateMachine.add('SEARCH_INIT',
SearchInit(),
transitions={'succeeded':'CROPBOX_GENERATION',
'aborted':'aborted'},
remapping={'searched_object_types':'searched_object_types'})
smach.StateMachine.add('CROPBOX_GENERATION',
CropBoxGeneration(),
transitions={'succeeded':'NBV_SET_POINT_CLOUD',
'aborted':'aborted'},
remapping={'object_pointcloud':'object_pointcloud'})
smach.StateMachine.add('NBV_SET_POINT_CLOUD',
NBVSetPointCloud(),
transitions={'succeeded':'succeeded',
'aborted':'aborted',
'too_many_deactivated_normals':'aborted'},
remapping={'object_pointcloud':'object_pointcloud'})
sm_cropbox_record = smach.StateMachine(outcomes=['aborted',
'finished'])
with sm_cropbox_record:
smach.StateMachine.add('CROPBOX_RECORD_INIT',
sm_init,
transitions={'succeeded':'NBV_CALCULATION',
'aborted':'aborted'})
smach.StateMachine.add('NBV_CALCULATION',
NextBestView(),
transitions={'found_next_best_view':'MOVE_ROBOT_TO_VIEW',
'aborted':'aborted',
'no_nbv_found':'finished',
'nbv_update_point_cloud':'MOVE_ROBOT_TO_VIEW'},
remapping={'goal_camera_pose':'goal_camera_pose',
'goal_robot_pose':'goal_robot_pose',
'goal_ptu_position':'goal_ptu_position',
'searched_object_types':'searched_object_types'})
smach.StateMachine.add('MOVE_ROBOT_TO_VIEW',
GetMoveRobotSateMachine(),
transitions={'succeeded':'PTU_POSE_CORRECTION',
'aborted':'aborted'},
remapping={'goal_robot_pose':'goal_robot_pose',
'goal_ptu_position':'goal_ptu_position'})
smach.StateMachine.add('PTU_POSE_CORRECTION',
PTUPoseCorrection(),
transitions={'succeeded':'VISUALIZE_WAYPOINT',
'aborted':'VISUALIZE_WAYPOINT'},
remapping={'goal_camera_pose':'goal_camera_pose'})
smach.StateMachine.add('VISUALIZE_WAYPOINT',
VisualizeWaypoints(),
transitions={'succeeded':'NBV_UPDATE_POINT_CLOUD'})
smach.StateMachine.add('NBV_UPDATE_POINT_CLOUD',
NextBestViewUpdate(),
transitions={'succeeded':'STATE_RECORDING',
'aborted':'aborted',
'no_nbv_found':'finished'},
remapping={'goal_camera_pose':'goal_camera_pose',
'searched_object_types':'searched_object_types',
'deactivated_object_normals_count':'deactivated_object_normals_count'})
smach.StateMachine.add('STATE_RECORDING',
CropboxStateRecording(),
transitions={'succeeded':'NBV_CALCULATION',
'aborted':'aborted'},
remapping={'goal_camera_pose':'goal_camera_pose',
'goal_robot_pose':'goal_robot_pose',
'goal_ptu_position':'goal_ptu_position',
'deactivated_object_normals_count':'deactivated_object_normals_count'})
| StarcoderdataPython |
11323600 | import warnings
class WarningTestCaseMixin:
"""
TestCase mixin to catch warnings
"""
def assertWarning(self, warning, callable, *args, **kwargs):
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter(action="always")
callable(*args, **kwargs)
self.assertTrue(any(item.category == warning for item in warning_list))
| StarcoderdataPython |
151135 | <filename>awokado/exceptions/__init__.py
from .auth import AuthError
from .base import BaseApiException
from .not_found import NotFound, RelationNotFound, ResourceNotFound
from .bad_request import (
BadFilter,
BadLimitOffset,
BadRequest,
MethodNotAllowed,
IdFieldMissingError,
)
from .forbidden import (
CreateResourceForbidden,
DeleteResourceForbidden,
Forbidden,
ReadResourceForbidden,
UpdateResourceForbidden,
)
| StarcoderdataPython |
1708406 | from setuptools import setup
setup(
name='cpsk',
description='an unofficial api for grabbing data from cp.sk',
author='<NAME>',
author_email='<EMAIL>',
version='0.1.5',
url='https://github.com/Adman/python-cpsk-api',
install_requires=['requests', 'lxml', 'json'],
include_package_data=True,
packages=['cpsk'],
license="The MIT License (MIT)",
keywords=['travel', 'train', 'bus', 'mhd']
)
| StarcoderdataPython |
3544193 | # An example of the Holdout Cross-Validation split
import pandas
from sklearn import datasets
from sklearn.model_selection import train_test_split
# The percentage (as a decimal) of our data that will be training data
TRAIN_SPLIT = 0.8
# The diabetes dataset contains the following columns:
columns = ['age', 'sex', 'bmi', 'map', 'tc', 'ldl', 'hdl', 'tch', 'ltg', 'glu']
# Load the diabetes dataset
dataset = datasets.load_diabetes()
# Create a pandas DataFrame from the diabetes dataset
dataframe = pandas.DataFrame(dataset.data, columns=columns)
# Split via the holdout method
x_train, x_test, y_train, y_test = train_test_split(dataframe,
dataset.target,
train_size=TRAIN_SPLIT,
test_size=1 - TRAIN_SPLIT)
print("""
The holdout method removes a certain portion of the training data and uses it
as test data. Ideally, the data points removed are random on each run.
The following output shows a set of sample diabetes data split into test and
training data:
""")
# Print our test and training data
print("Total diabetes data points: {}".format(len(dataframe.index)))
print("# of training data points: {} (~{}%)".format(len(x_train),
TRAIN_SPLIT * 100))
print("# of test data points: {} (~{}%)\n".format(len(x_test),
(1 - TRAIN_SPLIT) * 100))
print("""If you'd like to see the actual data points, uncomment the print
statements at the bottom of this script.
""")
print("Training data:\n{}\n".format(x_train))
print("Test data:\n{}".format(x_test))
| StarcoderdataPython |
220812 | import csv
import json
import sys
from tqdm import tqdm
from preproc import run_emtsv_all, run_emtsv_tok
def parse_row(row):
doc_id_x, doc_id, doc_type, title, author, author_id, content, date_created, date_added, context, url, domain, sentiment, sent_points, comments, views, shares, wow, love, like, haha, sad, angry, thankful, fans, url_orig, content_orig, domain_category = row # noqa
doc = {
"doc_id": doc_id,
"doc_type": doc_type,
"title": title,
"author": {
"id": author_id,
"name": author},
"content": content,
"date_created": date_created,
"date_added": date_added,
"context": context,
"url": url,
"domain": domain,
"domain_category": domain_category
}
return doc
def run_emtsv(text):
if len(text) > 100000:
sys.stderr.write('skipping long sen ({} chars)\n'.format(len(text)))
return None
sens = run_emtsv_tok(text)
return [{
"text": sen['text'],
"ana": run_emtsv_all(sen['text'])} for sen in sens]
def main():
for i, row in tqdm(enumerate(
csv.reader(sys.stdin, delimiter=";", quotechar='"'))):
if i == 0 and row[0] == 'X':
continue
doc = parse_row(row)
doc['content'] = {
"text": doc['content'],
"sens": run_emtsv(doc['content'])}
print(json.dumps(doc))
if __name__ == "__main__":
main()
| StarcoderdataPython |
12848356 | # -*- coding: utf-8 -*-
"""
Linear algebra operations and helpers.
Inspired by <NAME>'s transformation.py <http://www.lfd.uci.edu/~gohlke/>
This module is not directly exported by the `crystals` library. Use it with caution.
"""
import math
import numpy as np
# standard basis
e1, e2, e3 = np.eye(3)
def affine_map(array):
"""
Extends 3x3 transform matrices to 4x4, i.e. general affine transforms.
Parameters
----------
array : ndarray, shape {(3,3), (4,4)}
Transformation matrix. If shape = (4,4), returned intact.
Returns
-------
extended : ndarray, shape (4,4)
Extended array
Raises
------
ValueError : If the transformation matrix is neither 3x3 or 4x4
"""
if array.shape == (4, 4): # Already the right shape
return array
elif array.shape == (3, 3):
extended_matrix = np.zeros(shape=(4, 4), dtype=array.dtype)
extended_matrix[-1, -1] = 1
extended_matrix[:3, :3] = array
return extended_matrix
else:
raise ValueError(
"Array shape not 3x3 or 4x4, and thus is not a transformation matrix."
)
def transform(matrix, array):
"""
Applies a matrix transform on an array.
Parameters
----------
matrix : ndarray, shape {(3,3), (4,4)}
Transformation matrix.
array : ndarray, shape {(3,), (3,3), (4,4)}
Array to be transformed. Either a 1x3 vector, or a transformation
matrix in 3x3 or 4x4 shape.
Returns
-------
transformed : ndarray
Transformed array, either a 1D vector or a 4x4 transformation matrix
Raises
------
ValueError : If the transformation matrix is neither 3x3 or 4x4
"""
array = np.asarray(array)
if matrix.shape not in [(3, 3), (4, 4)]:
raise ValueError(
f"Input matrix is neither a 3x3 or 4x4 matrix, but \
rather of shape {matrix.shape}."
)
matrix = affine_map(matrix)
# Case of a vector (e.g. position vector):
if array.ndim == 1:
extended_vector = np.array([0, 0, 0, 1], dtype=array.dtype)
extended_vector[:3] = array
return np.dot(matrix, extended_vector)[:3]
else:
array = affine_map(array)
return np.dot(matrix, array)
def translation_matrix(direction):
"""
Return matrix to translate by direction vector.
Parameters
----------
direction : array_like, shape (3,)
Returns
-------
translation : `~numpy.ndarray`, shape (4,4)
4x4 translation matrix.
"""
matrix = np.eye(4)
matrix[:3, 3] = np.asarray(direction)[:3]
return matrix
def change_of_basis(basis1, basis2=(e1, e2, e3)):
"""
Returns the matrix transforms vectors expressed in one basis,
to vectors expressed in another basis.
Parameters
----------
basis1 : list of array_like, shape (3,)
First basis
basis2 : list of array_like, shape (3,), optional
Second basis. By default, this is the standard basis
Returns
-------
cob : `~numpy.ndarray`, shape (3,3)
Change-of-basis matrix.
"""
# Calculate the transform that goes from basis 1 to standard basis
basis1 = [np.asarray(vector).reshape(3, 1) for vector in basis1]
basis1_to_standard = np.hstack(tuple(basis1))
# Calculate the transform that goes from standard basis to basis2
basis2 = [np.asarray(vector).reshape(3, 1) for vector in basis2]
standard_to_basis2 = np.linalg.inv(np.hstack(tuple(basis2)))
return np.dot(standard_to_basis2, basis1_to_standard)
def is_basis(basis):
"""
Returns true if the set of vectors forms a basis. This is done by checking
whether basis vectors are independent via an eigenvalue calculation.
Parameters
----------
basis : list of array-like, shape (3,)
Returns
-------
out : bool
Whether or not the basis is valid.
"""
return 0 not in np.linalg.eigvals(np.asarray(basis))
def is_rotation_matrix(matrix):
"""
Checks whether a matrix is orthogonal with unit determinant (1 or -1), properties
of rotation matrices.
Parameters
----------
matrix : ndarray, shape {(3,3), (4,4)}
Rotation matrix candidate. If (4,4) matrix is provided,
only the top-left block matrix of (3,) is checked
Returns
-------
result : bool
If True, input could be a rotation matrix.
"""
# TODO: is this necessary? should a composite transformation
# of translation and rotation return True?
# if matrix.shape == (4,4):
# matrix = matrix[:3,:3]
is_orthogonal = np.allclose(np.linalg.inv(matrix), np.transpose(matrix))
unit_determinant = np.allclose(abs(np.linalg.det(matrix)), 1)
return is_orthogonal and unit_determinant
def rotation_matrix(angle, axis=(0, 0, 1)):
"""
Return matrix to rotate about axis defined by direction around the origin [0,0,0].
Parameters
----------
angle : float
Rotation angle [rad]
axis : array-like of length 3
Axis about which to rotate
Returns
-------
matrix : `~numpy.ndarray`, shape (3,3)
Rotation matrix.
See also
--------
translation_rotation_matrix
Notes
-----
To combine rotation and translations, see
http://www.euclideanspace.com/maths/geometry/affine/matrix4x4/index.htm
"""
sina, cosa = math.sin(angle), math.cos(angle)
# Make sure direction is a numpy vector of unit length
direction = np.asarray(axis)
direction = direction / np.linalg.norm(direction)
# rotation matrix around unit vector
R = np.diag([cosa, cosa, cosa])
R += np.outer(direction, direction) * (1.0 - cosa)
direction *= sina
R += np.array(
[
[0.0, -direction[2], direction[1]],
[direction[2], 0.0, -direction[0]],
[-direction[1], direction[0], 0.0],
]
)
return R
def translation_rotation_matrix(angle, axis, translation):
"""
Returns a 4x4 matrix that includes a rotation and a translation.
Parameters
----------
angle : float
Rotation angle [rad]
axis : array-like of length 3
Axis about which to rotate
translation : array_like, shape (3,)
Translation vector
Returns
-------
matrix : `~numpy.ndarray`, shape (4,4)
Affine transform matrix.
"""
rmat = affine_map(rotation_matrix(angle=angle, axis=axis))
rmat[:3, 3] = np.asarray(translation)
return rmat
def change_basis_mesh(xx, yy, zz, basis1, basis2):
"""
Changes the basis of meshgrid arrays.
Parameters
----------
xx, yy, zz : ndarrays
Arrays of equal shape, such as produced by numpy.meshgrid.
basis1 : list of ndarrays, shape(3,)
Basis of the mesh
basis2 : list of ndarrays, shape(3,)
Basis in which to express the mesh
Returns
-------
XX, YY, ZZ : `~numpy.ndarray`
"""
# Build coordinate array row-wise
changed = np.empty(shape=(3, xx.size), dtype=np.float)
linearized = np.empty(shape=(3, xx.size), dtype=np.float)
linearized[0, :] = xx.ravel()
linearized[1, :] = yy.ravel()
linearized[2, :] = zz.ravel()
# Change the basis at each row
COB = change_of_basis(basis1, basis2)
np.dot(COB, linearized, out=changed)
return (
changed[0, :].reshape(xx.shape),
changed[1, :].reshape(yy.shape),
changed[2, :].reshape(zz.shape),
)
def minimum_image_distance(xx, yy, zz, lattice):
"""
Returns a periodic array according to the minimum image convention.
Parameters
----------
xx, yy, zz : ndarrays
Arrays of equal shape, such as produced by numpy.meshgrid.
lattice : list of ndarrays, shape(3,)
Basis of the mesh
Returns
-------
r : `~numpy.ndarray`
Minimum image distance over the lattice
"""
COB = change_of_basis(np.eye(3), lattice)
linearized = np.empty(shape=(3, xx.size), dtype=np.float) # In the standard basis
ulinearized = np.empty_like(linearized) # In the unitcell basis
linearized[0, :] = xx.ravel()
linearized[1, :] = yy.ravel()
linearized[2, :] = zz.ravel()
# Go to unitcell basis, where the cell is cubic of side length 1
np.dot(COB, linearized, out=ulinearized)
ulinearized -= np.rint(ulinearized)
np.dot(np.linalg.inv(COB), ulinearized, out=linearized)
return np.reshape(np.linalg.norm(linearized, axis=0), xx.shape)
| StarcoderdataPython |
9653954 | #!/usr/bin/env python
from argparse import ArgumentParser
import sys
parser = ArgumentParser(description="Run the test suite.")
parser.add_argument(
"--failfast",
action="store_true",
default=False,
dest="failfast",
help="Stop the test suite after the first failed test.",
)
parser.add_argument(
"--no-coverage",
action="store_false",
default=True,
dest="coverage",
help="Do not run coverage.py while running the tests.",
)
parser.add_argument(
"--no-input",
action="store_false",
default=True,
dest="interactive",
help="If the tests require input, do not prompt the user for input.",
)
args = parser.parse_args()
if args.coverage:
try:
from coverage import coverage
cov = coverage(include="doac*")
cov.start()
except ImportError:
cov = None
else:
cov = None
from django.conf import settings
from tests import settings as test_settings
settings.configure(test_settings, debug=True)
from django.test.utils import get_runner
TestRunner = get_runner(settings)
runner = TestRunner(verbosity=1, interactive=args.interactive, failfast=args.failfast)
failures = runner.run_tests(["tests", ])
if cov:
cov.stop()
cov.html_report()
if failures:
sys.exit(bool(failures))
| StarcoderdataPython |
3487992 | from typing import Callable
from PyQt5 import QtWidgets
from PyQt5.QtGui import QMovie
from gui.main_window import Ui_main_window
from gui.tutorial_window import Ui_tutorial_window
from gui.window_with_close_listener import WindowWithCloseListener
_tutorial_path = "tutorials/media/"
def _gui_init_tutorial(tutorial_push_button: QtWidgets.QPushButton, title: str, file_name: str) -> Callable[[], None]:
tutorial_window = None
is_tutorial_window_open = False
def on_tutorial_window_close():
nonlocal is_tutorial_window_open
is_tutorial_window_open = False
tutorial_push_button.setEnabled(True)
# noinspection PyTypeChecker
tutorial_push_button.setToolTip(None)
def on_tutorial_push_button_clicked():
nonlocal is_tutorial_window_open, tutorial_window
is_tutorial_window_open = True
tutorial_push_button.setEnabled(False)
tutorial_push_button.setToolTip("Tutorial window is already open now")
tutorial_window = WindowWithCloseListener(on_tutorial_window_close)
tutorial_ui = Ui_tutorial_window()
tutorial_ui.setupUi(tutorial_window)
tutorial_window.setWindowTitle(title)
gif = QMovie(_tutorial_path + file_name)
tutorial_ui.tutorial_label.setMovie(gif)
gif.start()
tutorial_window.show()
def close_listener() -> None:
if is_tutorial_window_open:
# noinspection PyUnresolvedReferences
creditor_window.close()
tutorial_push_button.clicked.connect(on_tutorial_push_button_clicked)
return close_listener
def gui_init_usa_tutorial(ui: Ui_main_window) -> Callable[[], None]:
return _gui_init_tutorial(ui.usa_tutorial_push_button,
"Tutorial for USA country",
"usa_country.gif")
def gui_init_all_other_tutorial(ui: Ui_main_window) -> Callable[[], None]:
return _gui_init_tutorial(ui.all_other_tutorial_push_button,
"Tutorial for All other countries",
"all_other_countries.gif")
| StarcoderdataPython |
9732797 | <reponame>michaelhkw/impala
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Client tests for SQL statement authorization
import grp
import pytest
from getpass import getuser
from os import getenv
from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.test_dimensions import create_uncompressed_text_dimension
SENTRY_CONFIG_FILE = getenv('IMPALA_HOME') + '/fe/src/test/resources/sentry-site.xml'
class TestGrantRevoke(CustomClusterTestSuite, ImpalaTestSuite):
@classmethod
def add_test_dimensions(cls):
super(TestGrantRevoke, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload()))
@classmethod
def get_workload(cls):
return 'functional-query'
def setup_method(self, method):
super(TestGrantRevoke, self).setup_method(method)
self.__test_cleanup()
def teardown_method(self, method):
self.__test_cleanup()
super(TestGrantRevoke, self).teardown_method(method)
def __test_cleanup(self):
# Clean up any old roles created by this test
for role_name in self.client.execute("show roles").data:
if 'grant_revoke_test' in role_name:
self.client.execute("drop role %s" % role_name)
# Cleanup any other roles that were granted to this user.
# TODO: Update Sentry Service config and authorization tests to use LocalGroupMapping
# for resolving users -> groups. This way we can specify custom test users that don't
# actually exist in the system.
group_name = grp.getgrnam(getuser()).gr_name
for role_name in self.client.execute("show role grant group `%s`" % group_name).data:
self.client.execute("drop role %s" % role_name)
# Create a temporary admin user so we can actually view/clean up the test
# db.
self.client.execute("create role grant_revoke_test_admin")
try:
self.client.execute("grant all on server to grant_revoke_test_admin")
self.client.execute("grant role grant_revoke_test_admin to group %s" % group_name)
self.cleanup_db('grant_rev_db', sync_ddl=0)
finally:
self.client.execute("drop role grant_revoke_test_admin")
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--server_name=server1",
catalogd_args="--sentry_config=" + SENTRY_CONFIG_FILE)
def test_grant_revoke(self, vector):
self.run_test_case('QueryTest/grant_revoke', vector, use_db="default")
| StarcoderdataPython |
6536034 | <filename>youbot_gazebo_publisher/src/youbot_ros_hello_world.py
#!/usr/bin/env python
# @brief python_file
from __future__ import print_function
import roslib
# roslib.load_manifest('teleop')
import rospy
from numpy import inf, zeros
from geometry_msgs.msg import Twist
import trajectory_msgs.msg as tm
import sys, select, termios, tty
msg = """
Reading from the keyboard and Publishing to Twist!
---------------------------
Moving around:
u i o
j k l
m , .
For Holonomic mode (strafing), hold down the shift key:
---------------------------
U I O
J K L
M < >
t : up (+z)
b : down (-z)
anything else : stop
q/z : increase/decrease max speeds by 10%
w/x : increase/decrease only linear speed by 10%
e/c : increase/decrease only angular speed by 10%
CTRL-C to quit
"""
moveBindings = {
"i": (1, 0, 0, 0),
"o": (1, 0, 0, -1),
"j": (0, 0, 0, 1),
"l": (0, 0, 0, -1),
"u": (1, 0, 0, 1),
",": (-1, 0, 0, 0),
".": (-1, 0, 0, 1),
"m": (-1, 0, 0, -1),
"O": (1, -1, 0, 0),
"I": (1, 0, 0, 0),
"J": (0, 1, 0, 0),
"L": (0, -1, 0, 0),
"U": (1, 1, 0, 0),
"<": (-1, 0, 0, 0),
">": (-1, -1, 0, 0),
"M": (-1, 1, 0, 0),
"t": (0, 0, 1, 0),
"b": (0, 0, -1, 0),
}
speedBindings = {
"q": (1.1, 1.1),
"z": (0.9, 0.9),
"w": (1.1, 1),
"x": (0.9, 1),
"e": (1, 1.1),
"c": (1, 0.9),
}
def getKey():
"""
Function to get keyboard input
:return: key pressed
:rtype: char
"""
tty.setraw(sys.stdin.fileno())
select.select([sys.stdin], [], [], 0)
key = sys.stdin.read(1)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
return key
def vels(speed, turn):
"""
Function to get current velocity (speed and heading direction)
:param speed: linear velocity (m/sec)
:param turn: heading direction (radians)
:return: typeset string useful for displaying current velocity
:rtype: string
"""
return "currently:\tspeed %s\tturn %s " % (speed, turn)
def createArmPositionCommand(newPositions):
msg = tm.JointTrajectory()
point = tm.JointTrajectoryPoint()
point.positions = newPositions
point.velocities = zeros(len(newPositions))
point.accelerations = zeros(len(newPositions))
point.time_from_start = rospy.Duration(0.5)
msg.points = [point]
jointNames = []
for i in range(5):
jointNames.append("arm_joint_" + str(i + 1))
msg.joint_names = jointNames
msg.header.frame_id = "arm_link_0"
msg.header.stamp = rospy.Time.now()
return msg
def createGripperPositionCommand(newPosition):
msg = tm.JointTrajectory()
point = tm.JointTrajectoryPoint()
point.positions = [newPosition, newPosition]
point.velocities = zeros(2)
point.accelerations = zeros(2)
point.time_from_start = rospy.Duration(0.5)
msg.points = [point]
msg.joint_names = ["gripper_finger_joint_l", "gripper_finger_joint_r"]
## fill message header and sent it out
msg.header.frame_id = "gripper_finger_joint_l"
msg.header.stamp = rospy.Time.now()
return msg
def moveArm():
armPublisher = rospy.Publisher(
"/arm_1/arm_controller/command", tm.JointTrajectory, queue_size=1
)
jointvalues = [2.95, 1.05, -2.44, 1.73, 2.95]
msg = createArmPositionCommand(jointvalues)
armPublisher.publish(msg)
rospy.sleep(3)
jointvalues = [0.11, 0.11, -0.11, 0.11, 0.11]
msg = createArmPositionCommand(jointvalues)
armPublisher.publish(msg)
rospy.sleep(3)
def moveGripper():
gripperPublisher = rospy.Publisher(
"/arm_1/gripper_controller/command", tm.JointTrajectory, queue_size=1
)
msg = createGripperPositionCommand(0.11)
gripperPublisher.publish(msg)
rospy.sleep(3)
msg = createGripperPositionCommand(0)
gripperPublisher.publish(msg)
if __name__ == "__main__":
settings = termios.tcgetattr(sys.stdin)
rospy.init_node("teleop_twist_keyboard")
# Publisher for velocity command fed to Whiskeye
pub = rospy.Publisher("/cmd_vel", Twist, queue_size=1)
speed = rospy.get_param("~speed", 1.5) # target linear velocity
turn = rospy.get_param("~turn", 1.0) # angle for turning
# Position variables
x = 1
y = 1
z = 1
# orientation variables
th = 1
twist = Twist()
twist.linear.x = x * speed
twist.linear.y = y * speed
twist.linear.z = z * speed
twist.angular.x = 0
twist.angular.y = 0
twist.angular.z = th * turn
pub.publish(twist)
try:
print(msg) # print usage instructions
print(vels(speed, turn)) # print robot velocity information
while 1:
# key = getKey() # get the key pressed
# if key in moveBindings.keys():
# x = moveBindings[key][0]
# y = moveBindings[key][1]
# z = moveBindings[key][2]
# th = moveBindings[key][3]
# elif key in speedBindings.keys():
# speed = speed * speedBindings[key][0]
# turn = turn * speedBindings[key][1]
# print(vels(speed, turn))
# else:
# # Reset parameters if arbitrary key is pressed
# x = 0
# y = 0
# z = 0
# th = 0
# print(msg) # Show the usage instructions again
# if key == "\x03": # CTRL-C pressed
# break
# twist = Twist()
# twist.linear.x = x * speed
# twist.linear.y = y * speed
# twist.linear.z = z * speed
# twist.angular.x = 0
# twist.angular.y = 0
# twist.angular.z = th * turn
# Publish commands to the robot
# pub.publish(twist)
print("moveArm")
moveArm()
# print("moveGripper")
# moveGripper()
except Exception as e:
print(e)
finally:
twist = Twist()
twist.linear.x = 0
twist.linear.y = 0
twist.linear.z = 0
twist.angular.x = 0
twist.angular.y = 0
twist.angular.z = 0
pub.publish(twist)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
| StarcoderdataPython |
3273409 | from gremlin_python import statics
from gremlin_python.process.anonymous_traversal import traversal
from gremlin_python.process.graph_traversal import __
from gremlin_python.process.strategies import *
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
from gremlin_python.process.traversal import T
from gremlin_python.process.traversal import Order
from gremlin_python.process.traversal import Cardinality
from gremlin_python.process.traversal import Column
from gremlin_python.process.traversal import Direction
from gremlin_python.process.traversal import Operator
from gremlin_python.process.traversal import P
from gremlin_python.process.traversal import Pop
from gremlin_python.process.traversal import Scope
from gremlin_python.process.traversal import Barrier
from gremlin_python.process.traversal import Bindings
from gremlin_python.process.traversal import WithOptions
from gremlin_python.driver import client
from gremlin_python.process.graph_traversal import select
from gremlin_python.process.graph_traversal import property
class GremlinConnection:
def inject_vertex_properties(traversal,properties,label):
try:
vertex = traversal.inject(properties).unfold().as_('properties')\
.addV(label).as_('vertex')\
.sideEffect(
select('properties').unfold().as_('kv')
.select('vertex')
.property(select('kv').by(Column.keys), select('kv').by(Column.values)))
except Exception as e:
raise e
return vertex
def gtx_inject_vertex_properties(gtx,properties,label):
try:
vertex = gtx.inject(properties).unfold().as_('properties')\
.addV(label).as_('vertex')\
.sideEffect(
select('properties').unfold().as_('kv')
.select('vertex')
.property(select('kv').by(Column.keys), select('kv').by(Column.values)))
except Exception as e:
raise e
return vertex
def connection_driver(host,port):
connection_driver = GremlinConnection._connection_driver(host,port)
return connection_driver
def _connection_driver(host,port):
connect_string = f'ws://{host}:{port}/gremlin'
try:
connection_driver = DriverRemoteConnection(connect_string, 'g')
except Exception as e:
raise e
return connection_driver
def add_vertex_traversal(traversal, label, properties):
#This is not commited!!! call next() on what is returned to right to the graph
vertex = traversal.addV(label).property(properties)
return vertex
def client_connection(host,port):
client = GremlinConnection._client_connection(host,port)
return client
@staticmethod
def _client_connection(host,port):
connect_string = f'ws://{host}:{port}/gremlin'
try:
client_ = client.Client('ws://localhost:8182/gremlin', 'g')
# The connection should be closed on shut down to close open connections with connection.close()
#g = traversal().withRemote(connection)
except Exception as e:
raise e
return client_
def traversal_connection(connection):
gremlin = GremlinConnection._traversal_connection(connection)
return gremlin
@staticmethod
def _traversal_connection(connection):
#connect_string = f'ws://{host}:{port}/gremlin'
try:
#connection = DriverRemoteConnection(connect_string, 'g')
# The connection should be closed on shut down to close open connections with connection.close()
g = traversal().withRemote(connection)
except Exception as e:
raise e
return g
# Reuse 'g' across the application | StarcoderdataPython |
1808809 | <gh_stars>1-10
"""Support for monitoring a Sense energy sensor device."""
import logging
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.core import callback
from . import SENSE_DATA, SENSE_DEVICE_UPDATE
_LOGGER = logging.getLogger(__name__)
BIN_SENSOR_CLASS = 'power'
MDI_ICONS = {
'ac': 'air-conditioner',
'aquarium': 'fish',
'car': 'car-electric',
'computer': 'desktop-classic',
'cup': 'coffee',
'dehumidifier': 'water-off',
'dishes': 'dishwasher',
'drill': 'toolbox',
'fan': 'fan',
'freezer': 'fridge-top',
'fridge': 'fridge-bottom',
'game': 'gamepad-variant',
'garage': 'garage',
'grill': 'stove',
'heat': 'fire',
'heater': 'radiatior',
'humidifier': 'water',
'kettle': 'kettle',
'leafblower': 'leaf',
'lightbulb': 'lightbulb',
'media_console': 'set-top-box',
'modem': 'router-wireless',
'outlet': 'power-socket-us',
'papershredder': 'shredder',
'printer': 'printer',
'pump': 'water-pump',
'settings': 'settings',
'skillet': 'pot',
'smartcamera': 'webcam',
'socket': 'power-plug',
'sound': 'speaker',
'stove': 'stove',
'trash': 'trash-can',
'tv': 'television',
'vacuum': 'robot-vacuum',
'washer': 'washing-machine',
}
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the Sense binary sensor."""
if discovery_info is None:
return
data = hass.data[SENSE_DATA]
sense_devices = await data.get_discovered_device_data()
devices = [SenseDevice(data, device) for device in sense_devices
if device['tags']['DeviceListAllowed'] == 'true']
async_add_entities(devices)
def sense_to_mdi(sense_icon):
"""Convert sense icon to mdi icon."""
return 'mdi:{}'.format(MDI_ICONS.get(sense_icon, 'power-plug'))
class SenseDevice(BinarySensorDevice):
"""Implementation of a Sense energy device binary sensor."""
def __init__(self, data, device):
"""Initialize the Sense binary sensor."""
self._name = device['name']
self._id = device['id']
self._icon = sense_to_mdi(device['icon'])
self._data = data
self._undo_dispatch_subscription = None
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._name in self._data.active_devices
@property
def name(self):
"""Return the name of the binary sensor."""
return self._name
@property
def unique_id(self):
"""Return the id of the binary sensor."""
return self._id
@property
def icon(self):
"""Return the icon of the binary sensor."""
return self._icon
@property
def device_class(self):
"""Return the device class of the binary sensor."""
return BIN_SENSOR_CLASS
@property
def should_poll(self):
"""Return the deviceshould not poll for updates."""
return False
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def update():
"""Update the state."""
self.async_schedule_update_ha_state(True)
self._undo_dispatch_subscription = async_dispatcher_connect(
self.hass, SENSE_DEVICE_UPDATE, update)
async def async_will_remove_from_hass(self):
"""Undo subscription."""
if self._undo_dispatch_subscription:
self._undo_dispatch_subscription()
| StarcoderdataPython |
1774430 | <filename>src/test/cc_binary_test.py<gh_stars>1-10
# Copyright (c) 2011 Tencent Inc.
# All rights reserved.
#
# Author: Michaelpeng <<EMAIL>>
# Date: October 20, 2011
"""
This is the test module for cc_binary target.
"""
import blade_test
class TestCcBinary(blade_test.TargetTest):
"""Test cc_binary """
def setUp(self):
"""setup method. """
self.doSetUp('cc', command='run', target='string_main_prog')
def testGenerateRules(self):
"""Test that rules are generated correctly. """
self.assertTrue(self.runBlade())
com_lower_line = self.findCommand(['plowercase.cpp.o', '-c'])
com_upper_line = self.findCommand(['puppercase.cpp.o', '-c'])
com_string_line = self.findCommand(['string_main.cpp.o', '-c'])
string_main_depends_libs = self.findCommand('string_main_prog ')
self.assertCxxFlags(com_lower_line)
self.assertCxxFlags(com_upper_line)
self.assertCxxFlags(com_string_line)
self.assertLinkFlags(string_main_depends_libs)
self.assertIn('liblowercase.a', string_main_depends_libs)
self.assertIn('libuppercase.a', string_main_depends_libs)
self.assertTrue(self.findCommand(['Hello, world']))
if __name__ == '__main__':
blade_test.run(TestCcBinary)
| StarcoderdataPython |
9681467 | <reponame>Botomatik/JackBot
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This is a script written to add the template "orphan" to the pages that aren't linked by other pages.
It can give some strange Errors sometime, I hope that all of them are fixed in this version.
These command line parameters can be used to specify which pages to work on:
¶ms;
-xml Retrieve information from a local XML dump (pages-articles
or pages-meta-current, see http://download.wikimedia.org).
Argument can also be given as "-xml:filename".
-page Only edit a specific page.
Argument can also be given as "-page:pagetitle". You can
give this parameter multiple times to edit multiple pages.
Furthermore, the following command line parameters are supported:
-enable: - Enable or disable the bot via a Wiki Page.
-disambig: - Set a page where the bot save the name of the disambig pages found (default: skip the pages)
-limit: - Set how many pages check.
-always - Always say yes, won't ask
--- FixMes ---
* Check that all the code hasn't bugs
--- Credit and Help ---
This Script has been developed by <NAME> Filnik on botwiki. If you want to help us
improving our script archive and pywikipediabot's archive or you simply need help
you can find us here: http://botwiki.sno.cc
--- Examples ---
python lonelypages.py -enable:User:Bot/CheckBot -always
"""
#
# (C) Pietrodn, it.wiki 2006-2007
# (C) Filnik, it.wiki 2007
# (C) Pywikipedia bot team, 2008-2012
#
# Distributed under the terms of the MIT license.
#
__version__ = '$Id: 3f5675b62cff437bc47eb779768d6514efd0e48e $'
#
import wikipedia as pywikibot
import pagegenerators
import re
# This is required for the text that is shown when you run this script
# with the parameter -help.
docuReplacements = {
'¶ms;': pagegenerators.parameterHelp,
}
#####################################################
# Here you have to put the config for your Project. #
#####################################################
# ************* Modify only below! ************* #
# Template to add in the orphan pages
Template = {
'ar': u'{{يتيمة|تاريخ={{نسخ:اسم_شهر}} {{نسخ:عام}}}}',
'ca': u'{{Orfe|date={{subst:CURRENTMONTHNAME}} {{subst:CURRENTYEAR}}}}',
'en': u'{{Orphan|date={{subst:CURRENTMONTHNAME}} {{subst:CURRENTYEAR}}}}',
'it': u'{{O||mese={{subst:CURRENTMONTHNAME}} {{subst:CURRENTYEAR}}}}',
'ja': u'{{孤立|{{subst:DATE}}}}',
'zh': u'{{subst:Orphan/auto}}',
}
# Comment that the Bot will use to put the template
commento = {
'ar': u'بوت: صفحة يتيمة، إضافة قالب',
'ca': u'Bot:Pàgina orfe, afegint plantilla',
'en': u'Robot: Orphan page, add template',
'it': u'[[Project:Bot|Bot]]: Voce orfana, aggiungo template {{O}}',
'ja': u'ロボットによる: 孤立したページへのテンプレート貼付け',
'zh': u'機器人: 本頁的鏈入頁面太少',
}
# When you add a disambig to the list of disambig pages
#(if you set disambigPage to None, you can put here nothing)
commenttodisambig = {
'ar': u'بوت: إضافة صفحة توضيح',
'ca': u'Bot; Afegint una desambiguació',
'en': u'Robot: Adding a disambig page',
'it': u'[[Project:Bot|Bot]]: Aggiungo una disambigua',
'ja': u'ロボットによる: 曖昧さ回避の追加',
'zh': u'機器人: 增加消歧義頁面',
}
# Use regex to prevent to put the same template twice!
# Warning: put always "()" inside the regex, so the bot will find "something"
exception = {
'ar': [ur'\{\{(?:قالب:|)(يتيمة)[\|\}]'],
'ca': [r'\{\{(?:template:|)(orfe)[\|\}]'],
'en': [r'\{\{(?:template:|)(orphan)[\|\}]',
r'\{\{(?:template:|)(wi)[\|\}]'],
'it': [r'\{\{(?:template:|)(o|a)[\|\}]'],
'ja': [ur'\{\{(?:template:|)(孤立)[\|\}]'],
'zh': [r'\{\{(?:template:|)(orphan)[\|\}]'],
}
# ************* Modify only above! ************* #
def main():
# Load the configurations in the function namespace
global commento; global Template; global disambigPage; global commenttodisambig
global exception
enablePage = None # Check if someone set an enablePage or not
limit = 50000 # All the pages! (I hope that there aren't so many lonely pages in a project..)
generator = None # Check if the bot should use the default generator or not
genFactory = pagegenerators.GeneratorFactory() # Load all the default generators!
nwpages = False # Check variable for newpages
always = False # Check variable for always
disambigPage = None # If no disambigPage given, not use it.
# Arguments!
for arg in pywikibot.handleArgs():
if arg.startswith('-enable'):
if len(arg) == 7:
enablePage = pywikibot.input(u'Would you like to check if the bot should run or not?')
else:
enablePage = arg[8:]
if arg.startswith('-disambig'):
if len(arg) == 9:
disambigPage = pywikibot.input(u'In which page should the bot save the disambig pages?')
else:
disambigPage = arg[10:]
elif arg.startswith('-limit'):
if len(arg) == 6:
limit = int(pywikibot.input(u'How many pages do you want to check?'))
else:
limit = int(arg[7:])
elif arg.startswith('-newpages'):
if len(arg) == 9:
nwlimit = 50 # Default: 50 pages
else:
nwlimit = int(arg[10:])
generator = pywikibot.getSite().newpages(number = nwlimit)
nwpages = True
elif arg == '-always':
always = True
else:
genFactory.handleArg(arg)
# Retrive the site
wikiSite = pywikibot.getSite()
if not generator:
generator = genFactory.getCombinedGenerator()
# If the generator is not given, use the default one
if not generator:
generator = wikiSite.lonelypages(repeat = True, number = limit)
# Take the configurations according to our project
comment = pywikibot.translate(wikiSite, commento)
commentdisambig = pywikibot.translate(wikiSite, commenttodisambig)
template = pywikibot.translate(wikiSite, Template)
exception = pywikibot.translate(wikiSite, exception)
# EnablePage part
if enablePage != None:
# Define the Page Object
enable = pywikibot.Page(wikiSite, enablePage)
# Loading the page's data
try:
getenable = enable.get()
except pywikibot.NoPage:
pywikibot.output(u"%s doesn't esist, I use the page as if it was blank!" % enable.title())
getenable = ''
except wikiepedia.IsRedirect:
pywikibot.output(u"%s is a redirect, skip!" % enable.title())
getenable = ''
# If the enable page is set to disable, turn off the bot
# (useful when the bot is run on a server)
if getenable != 'enable':
pywikibot.output('The bot is disabled')
return
# DisambigPage part
if disambigPage != None:
disambigpage = pywikibot.Page(wikiSite, disambigPage)
try:
disambigtext = disambigpage.get()
except pywikibot.NoPage:
pywikibot.output(u"%s doesn't esist, skip!" % disambigpage.title())
disambigtext = ''
except wikiepedia.IsRedirect:
pywikibot.output(u"%s is a redirect, don't use it!" % disambigpage.title())
disambigPage = None
# Main Loop
for page in generator:
if nwpages == True:
page = page[0] # The newpages generator returns a tuple, not a Page object.
pywikibot.output(u"Checking %s..." % page.title())
# Used to skip the first pages in test phase...
#if page.title()[0] in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q']:
#continue
if page.isRedirectPage(): # If redirect, skip!
pywikibot.output(u'%s is a redirect! Skip...' % page.title())
continue
# refs is not a list, it's a generator while resList... is a list, yes.
refs = page.getReferences()
refsList = list()
for j in refs:
if j == None:
# We have to find out why the function returns that value
pywikibot.error(u'1 --> Skip page')
continue
refsList.append(j)
# This isn't possible with a generator
if refsList != []:
pywikibot.output(u"%s isn't orphan! Skip..." % page.title())
continue
# Never understood how a list can turn in "None", but it happened :-S
elif refsList == None:
# We have to find out why the function returns that value
pywikibot.error(u'2 --> Skip page')
continue
else:
# Ok, no refs, no redirect... let's check if there's already the template
try:
oldtxt = page.get()
except pywikibot.NoPage:
pywikibot.output(u"%s doesn't exist! Skip..." % page.title())
continue
except pywikibot.IsRedirectPage:
pywikibot.output(u"%s is a redirect! Skip..." % page.title())
continue
# I've used a loop in a loop. If I use continue in the second loop, it won't do anything
# in the first. So let's create a variable to avoid this problem.
Find = False
for regexp in exception:
res = re.findall(regexp, oldtxt.lower())
# Found a template! Let's skip the page!
if res != []:
pywikibot.output(u'Your regex has found something in %s, skipping...' % page.title())
Find = True
break
# Skip the page..
if Find:
continue
# Is the page a disambig?
if page.isDisambig() and disambigPage != None:
pywikibot.output(u'%s is a disambig page, report..' % page.title())
if not page.title().lower() in disambigtext.lower():
disambigtext = u"%s\n*[[%s]]" % (disambigtext, page.title())
disambigpage.put(disambigtext, commentdisambig)
continue
# Is the page a disambig but there's not disambigPage? Skip!
elif page.isDisambig():
pywikibot.output(u'%s is a disambig page, skip...' % page.title())
continue
else:
# Ok, the page need the template. Let's put it there!
newtxt = u"%s\n%s" % (template, oldtxt) # Adding the template in the text
pywikibot.output(u"\t\t>>> %s <<<" % page.title()) # Showing the title
pywikibot.showDiff(oldtxt, newtxt) # Showing the changes
choice = 'y' # Default answer
if not always:
choice = pywikibot.inputChoice(u'Orphan page found, shall I add the template?', ['Yes', 'No', 'All'], ['y', 'n', 'a'])
if choice == 'a':
always = True
choice = 'y'
if choice == 'y':
try:
page.put(newtxt, comment)
except pywikibot.EditConflict:
pywikibot.output(u'Edit Conflict! Skip...')
continue
if __name__ == '__main__':
try:
main()
finally:
pywikibot.stopme()
| StarcoderdataPython |
3422564 | # Float
# Write a program that takes a float from the user
# and stores it in a variable. Cast the number to an
# integer and store in another variable.
# Then print: (floating point number) = (integer number).
# For example, if the user enters 5, the output would be:
# 5.0 = 5
# Also print the type of the floating point variable.
# Remember! type(variable_name) will return the data type of a variable
# Write code here
| StarcoderdataPython |
4824537 | <gh_stars>0
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from marshmallow import ValidationError
from polyaxon.exceptions import PolyaxonfileError
from polyaxon.k8s import k8s_schemas
from polyaxon.polyaxonfile import check_polyaxonfile
from polyaxon.polyaxonfile.specs import (
CompiledOperationSpecification,
OperationSpecification,
)
from polyaxon.polyflow import V1CompiledOperation, V1Hyperband
from polyaxon.polyflow.io import V1IO
from polyaxon.polyflow.matrix import V1GridSearch
from polyaxon.polyflow.matrix.params import V1HpChoice, V1HpLinSpace
from polyaxon.polyflow.params import V1Param
from tests.utils import BaseTestCase
@pytest.mark.polyaxonfile_mark
class TestPolyaxonfileWithTypes(BaseTestCase):
def test_using_untyped_params_raises(self):
with self.assertRaises(PolyaxonfileError):
check_polyaxonfile(
polyaxonfile=os.path.abspath(
"tests/fixtures/typing/untyped_params.yml"
),
is_cli=False,
)
def test_no_params_for_required_inputs_outputs_raises(self):
# Get compiled_operation data
run_config = V1CompiledOperation.read(
[
os.path.abspath("tests/fixtures/typing/required_inputs.yml"),
{"kind": "compiled_operation"},
]
)
# Inputs don't have delayed validation by default
with self.assertRaises(ValidationError):
CompiledOperationSpecification.apply_operation_contexts(run_config)
run_config = V1CompiledOperation.read(
[
os.path.abspath("tests/fixtures/typing/required_outputs.yml"),
{"kind": "compiled_operation"},
]
)
# Outputs have delayed validation by default
CompiledOperationSpecification.apply_operation_contexts(run_config)
def test_validation_for_required_inputs_outputs_raises(self):
# Get compiled_operation data
run_config = V1CompiledOperation.read(
[
os.path.abspath("tests/fixtures/typing/required_inputs.yml"),
{"kind": "compiled_operation"},
]
)
# Inputs don't have delayed validation by default
with self.assertRaises(ValidationError):
run_config.validate_params(is_template=False, check_runs=True)
run_config = V1CompiledOperation.read(
[
os.path.abspath("tests/fixtures/typing/required_outputs.yml"),
{"kind": "compiled_operation"},
]
)
# Outputs have delayed validation by default
run_config.validate_params(is_template=False, check_runs=True)
def test_required_inputs_with_params(self):
run_config = V1CompiledOperation.read(
[
os.path.abspath("tests/fixtures/typing/required_inputs.yml"),
{"kind": "compiled_operation"},
]
)
with self.assertRaises(ValidationError):
CompiledOperationSpecification.apply_operation_contexts(run_config)
assert run_config.inputs[0].value is None
assert run_config.inputs[1].value is None
run_config.apply_params(
params={"loss": {"value": "bar"}, "flag": {"value": False}}
)
assert run_config.inputs[0].value == "bar"
assert run_config.inputs[1].value is False
run_config = CompiledOperationSpecification.apply_operation_contexts(run_config)
run_config = CompiledOperationSpecification.apply_runtime_contexts(run_config)
assert run_config.version == 1.1
assert run_config.tags == ["foo", "bar"]
assert run_config.run.container.image == "my_image"
assert run_config.run.container.command == ["/bin/sh", "-c"]
assert run_config.run.container.args == "video_prediction_train --loss=bar "
run_config = V1CompiledOperation.read(
[
os.path.abspath("tests/fixtures/typing/required_inputs.yml"),
{"kind": "compiled_operation"},
]
)
assert run_config.inputs[0].value is None
assert run_config.inputs[1].value is None
run_config.apply_params(
params={"loss": {"value": "bar"}, "flag": {"value": True}}
)
assert run_config.inputs[0].value == "bar"
assert run_config.inputs[1].value is True
run_config = CompiledOperationSpecification.apply_operation_contexts(run_config)
run_config = CompiledOperationSpecification.apply_runtime_contexts(run_config)
assert run_config.version == 1.1
assert run_config.tags == ["foo", "bar"]
assert run_config.run.container.image == "my_image"
assert run_config.run.container.command == ["/bin/sh", "-c"]
assert (
run_config.run.container.args == "video_prediction_train --loss=bar --flag"
)
# Adding extra value raises
with self.assertRaises(ValidationError):
run_config.validate_params(
params={
"loss": {"value": "bar"},
"flag": {"value": True},
"value": {"value": 1.1},
}
)
with self.assertRaises(PolyaxonfileError):
check_polyaxonfile(
polyaxonfile=os.path.abspath(
"tests/fixtures/typing/required_inputs.yml"
),
params={"loss": {"value": "bar"}, "value": {"value": 1.1}},
is_cli=False,
)
# Adding non valid params raises
with self.assertRaises(ValidationError):
run_config.validate_params(params={"value": {"value": 1.1}})
def test_required_inputs_with_arg_format(self):
run_config = V1CompiledOperation.read(
[
os.path.abspath(
"tests/fixtures/typing/required_inputs_with_arg_format.yml"
),
{"kind": "compiled_operation"},
]
)
with self.assertRaises(ValidationError):
CompiledOperationSpecification.apply_operation_contexts(run_config)
assert run_config.inputs[0].value is None
assert run_config.inputs[1].value is None
run_config.apply_params(
params={"loss": {"value": "bar"}, "flag": {"value": False}}
)
assert run_config.inputs[0].value == "bar"
assert run_config.inputs[1].value is False
run_config = CompiledOperationSpecification.apply_operation_contexts(run_config)
run_config = CompiledOperationSpecification.apply_runtime_contexts(run_config)
assert run_config.version == 1.1
assert run_config.tags == ["foo", "bar"]
assert run_config.run.container.image == "my_image"
assert run_config.run.container.command == ["/bin/sh", "-c"]
assert run_config.run.container.args == "video_prediction_train --loss=bar "
run_config = V1CompiledOperation.read(
[
os.path.abspath(
"tests/fixtures/typing/required_inputs_with_arg_format.yml"
),
{"kind": "compiled_operation"},
]
)
assert run_config.inputs[0].value is None
assert run_config.inputs[1].value is None
run_config.apply_params(
params={"loss": {"value": "bar"}, "flag": {"value": True}}
)
assert run_config.inputs[0].value == "bar"
assert run_config.inputs[1].value is True
run_config = CompiledOperationSpecification.apply_operation_contexts(run_config)
run_config = CompiledOperationSpecification.apply_runtime_contexts(run_config)
assert run_config.version == 1.1
assert run_config.tags == ["foo", "bar"]
assert run_config.run.container.image == "my_image"
assert run_config.run.container.command == ["/bin/sh", "-c"]
assert (
run_config.run.container.args == "video_prediction_train --loss=bar --flag"
)
# Adding extra value raises
with self.assertRaises(ValidationError):
run_config.validate_params(
params={
"loss": {"value": "bar"},
"flag": {"value": True},
"value": {"value": 1.1},
}
)
with self.assertRaises(PolyaxonfileError):
check_polyaxonfile(
polyaxonfile=os.path.abspath(
"tests/fixtures/typing/required_inputs.yml"
),
params={"loss": {"value": "bar"}, "value": {"value": 1.1}},
is_cli=False,
)
# Adding non valid params raises
with self.assertRaises(ValidationError):
run_config.validate_params(params={"value": {"value": 1.1}})
def test_matrix_file_passes_int_float_types(self):
plxfile = check_polyaxonfile(
polyaxonfile=os.path.abspath(
"tests/fixtures/typing/matrix_file_with_int_float_types.yml"
),
is_cli=False,
to_op=False,
)
# Get compiled_operation data
run_config = OperationSpecification.compile_operation(plxfile)
run_config = CompiledOperationSpecification.apply_operation_contexts(run_config)
assert run_config.version == 1.1
assert run_config.has_pipeline
assert run_config.is_dag_run is False
assert isinstance(run_config.matrix.params["param1"], V1HpChoice)
assert isinstance(run_config.matrix.params["param2"], V1HpChoice)
assert run_config.matrix.params["param1"].to_dict() == {
"kind": "choice",
"value": [1, 2],
}
assert run_config.matrix.params["param2"].to_dict() == {
"kind": "choice",
"value": [3.3, 4.4],
}
assert isinstance(run_config.matrix, V1GridSearch)
assert run_config.matrix.concurrency == 2
assert run_config.matrix.kind == V1GridSearch.IDENTIFIER
assert run_config.matrix.early_stopping is None
def test_matrix_job_file_passes_int_float_types(self):
plxfile = check_polyaxonfile(
polyaxonfile=os.path.abspath(
"tests/fixtures/typing/matrix_job_file_with_int_float_types.yml"
),
is_cli=False,
to_op=False,
)
# Get compiled_operation data
run_config = OperationSpecification.compile_operation(plxfile)
run_config = CompiledOperationSpecification.apply_operation_contexts(run_config)
assert run_config.version == 1.1
assert isinstance(run_config.matrix.params["param1"], V1HpChoice)
assert isinstance(run_config.matrix.params["param2"], V1HpChoice)
assert run_config.matrix.params["param1"].to_dict() == {
"kind": "choice",
"value": [1, 2],
}
assert run_config.matrix.params["param2"].to_dict() == {
"kind": "choice",
"value": [3.3, 4.4],
}
assert isinstance(run_config.matrix, V1GridSearch)
assert run_config.matrix.concurrency == 2
assert run_config.matrix.kind == V1GridSearch.IDENTIFIER
assert run_config.matrix.early_stopping is None
def test_matrix_file_with_required_inputs_and_wrong_matrix_type_fails(self):
with self.assertRaises(PolyaxonfileError):
check_polyaxonfile(
polyaxonfile=os.path.abspath(
"tests/fixtures/typing/matrix_job_required_inputs_file_wrong_matrix_type.yml"
),
is_cli=False,
)
def test_matrix_file_with_required_inputs_passes(self):
plx_file = check_polyaxonfile(
polyaxonfile=os.path.abspath(
"tests/fixtures/typing/matrix_job_required_inputs_file.yml"
),
is_cli=False,
)
run_config = OperationSpecification.compile_operation(plx_file)
run_config = CompiledOperationSpecification.apply_operation_contexts(run_config)
assert run_config.version == 1.1
assert isinstance(run_config.matrix, V1Hyperband)
assert isinstance(run_config.matrix.params["lr"], V1HpLinSpace)
assert isinstance(run_config.matrix.params["loss"], V1HpChoice)
assert run_config.matrix.params["lr"].to_dict() == {
"kind": "linspace",
"value": {"start": 0.01, "stop": 0.1, "num": 5},
}
assert run_config.matrix.params["loss"].to_dict() == {
"kind": "choice",
"value": ["MeanSquaredError", "AbsoluteDifference"],
}
assert run_config.matrix.concurrency == 2
assert isinstance(run_config.matrix, V1Hyperband)
assert run_config.matrix.kind == V1Hyperband.IDENTIFIER
assert run_config.matrix.early_stopping is None
def test_run_simple_file_passes(self):
run_config = V1CompiledOperation.read(
[
os.path.abspath("tests/fixtures/typing/run_cmd_simple_file.yml"),
{"kind": "compiled_operation"},
]
)
assert run_config.inputs[0].value == "MeanSquaredError"
assert run_config.inputs[1].value is None
validated_params = run_config.validate_params()
assert run_config.inputs[0].value == "MeanSquaredError"
assert run_config.inputs[1].value is None
assert {
"loss": V1Param(value="MeanSquaredError"),
"num_masks": V1Param(value=None),
} == {p.name: p.param for p in validated_params}
with self.assertRaises(ValidationError):
CompiledOperationSpecification.apply_operation_contexts(run_config)
validated_params = run_config.validate_params(
params={"num_masks": {"value": 100}}
)
assert {
"loss": V1Param(value="MeanSquaredError"),
"num_masks": V1Param(value=100),
} == {p.name: p.param for p in validated_params}
assert run_config.run.container.args == [
"video_prediction_train",
"--num_masks={{num_masks}}",
"--loss={{loss}}",
]
with self.assertRaises(ValidationError):
# Applying context before applying params
CompiledOperationSpecification.apply_operation_contexts(run_config)
run_config.apply_params(params={"num_masks": {"value": 100}})
run_config = CompiledOperationSpecification.apply_operation_contexts(run_config)
run_config = CompiledOperationSpecification.apply_runtime_contexts(run_config)
assert run_config.version == 1.1
assert run_config.tags == ["foo", "bar"]
container = run_config.run.container
assert isinstance(container, k8s_schemas.V1Container)
assert container.image == "my_image"
assert container.command == ["/bin/sh", "-c"]
assert container.args == [
"video_prediction_train",
"--num_masks=100",
"--loss=MeanSquaredError",
]
def test_run_with_refs(self):
# Get compiled_operation data
run_config = V1CompiledOperation.read(
[
os.path.abspath("tests/fixtures/typing/run_with_refs.yml"),
{"kind": "compiled_operation"},
]
)
params = {
"num_masks": {"value": 2},
"model_path": {
"ref": "runs.64332180bfce46eba80a65caf73c5396",
"value": "outputs.doo",
},
}
validated_params = run_config.validate_params(params=params)
param_specs_by_name = {p.name: p.param for p in validated_params}
assert param_specs_by_name == {
"num_masks": V1Param(value=2),
"model_path": V1Param(
ref="runs.64332180bfce46eba80a65caf73c5396", value="outputs.doo"
),
}
ref_param = param_specs_by_name["model_path"]
assert ref_param.to_dict() == params["model_path"]
with self.assertRaises(ValidationError):
run_config.apply_params(params=params)
# Passing correct context
run_config.apply_params(
params=params,
context={
"runs.64332180bfce46eba80a65caf73c5396.outputs.doo": V1IO(
name="model_path",
value="model_path",
is_optional=True,
type="path",
)
},
)
# New params
params = {
"num_masks": {"value": 2},
"model_path": {"ref": "ops.A", "value": "outputs.doo"},
}
validated_params = run_config.validate_params(params=params)
param_specs_by_name = {p.name: p.param for p in validated_params}
assert param_specs_by_name == {
"num_masks": V1Param(value=2),
"model_path": V1Param(ref="ops.A", value="outputs.doo"),
}
ref_param = param_specs_by_name["model_path"]
assert ref_param.to_dict() == params["model_path"]
with self.assertRaises(ValidationError):
run_config.apply_params(params=params)
run_config.apply_params(
params=params,
context={
"ops.A.outputs.doo": V1IO(
name="model_path",
value="model_path",
is_optional=True,
type="path",
)
},
)
| StarcoderdataPython |
6631215 | <gh_stars>1-10
def foo(*a):
pass
foo<caret>(1, 2, 3 | StarcoderdataPython |
1734394 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import libkge
VERSION = libkge.__version__
NAME = 'libkge'
DESCRIPTION = 'A library for knowledge graph embedding models'
with open('README.md') as f:
LONG_DESCRIPTION = f.read()
long_description_content_type = 'text/markdown'
AUTHOR = '<NAME>'
URL = 'http://samehkamaleldin.github.io/'
with open('LICENSE') as f:
LICENSE = f.read()
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type=long_description_content_type,
author=AUTHOR,
url=URL,
install_requires=['numpy',
'bidict',
'tqdm',
'umap-learn',
'sklearn',
],
license=LICENSE,
packages=find_packages(exclude=('tests', 'docs')),
extras_require={
'tf': ['tensorflow>=1.13.0'],
'tf_gpu': ['tensorflow-gpu>=1.2.0']
}
)
| StarcoderdataPython |
1663205 | <reponame>sguzman/bamboo-scrape
import functools
from typing import Dict
from . import __log as log
from . import __exit as exit
from . import __greet as greet
from . import __json as json
@functools.cache
def exec() -> Dict:
log.exec()
greet.exec()
exit.exec()
return json.exec()
| StarcoderdataPython |
4851768 | <gh_stars>0
from filtering import *
#from ui import *
from views import * | StarcoderdataPython |
6488665 | from xaircraft.envs.lvaircraft_pitch import LVAircraftPitch
from gym.envs.registration import register
register(
id='LVAircraftAltitude-v0',
entry_point='xaircraft.envs.lvaircraft_altitude:LVAircraftAltitudeV0'
)
register(
id='LVAircraftPitch-v0',
entry_point='xaircraft.envs.lvaircraft_pitch:LVAircraftPitchV0'
)
register(
id='LVAircraftPitch-v1',
entry_point='xaircraft.envs.lvaircraft_pitch:LVAircraftPitchV1'
)
| StarcoderdataPython |
125441 | <reponame>mfreiwald/FromMotionToEmotion<filename>MakeTimelineTest.py
from module.Configuration import Configuration
from module.Evaluation import Evaluation
from dask.distributed import Client
import logging
import math
import pandas as pd
def transform(df, parts=1):
newdfs = []
for idx, group in df.groupby(level=0):
fileSize = len(group)
partSize = math.ceil(fileSize / parts)
dfPerGroup = []
for i in range(parts):
low = i * partSize
high = i * partSize + partSize
dd = group.iloc[low:high, :]
dd.index = dd.index.astype(str) + '_%d'%i
dfPerGroup.append(dd)
newdf = pd.concat(dfPerGroup)
newdfs.append(newdf)
result = pd.concat(newdfs)
result.index.name = 'id'
return result
def process_index(k):
return tuple(["_".join(k.split("_")[:-1]), k.split("_")[-1]])
def main():
conf = Configuration(window_size=20, window_step=5.0, features=['standard_deviation'])
print(conf)
c = Client()
eva = Evaluation(c, conf)
logging.getLogger('distributed.utils_perf').setLevel(logging.CRITICAL)
df1 = eva.make_preprocessing()
df2 = map(lambda df: transform(df, parts=5), df1)
df2 = list(df2)
df3 = eva.make_feature_engineering(df2)
tmpdf = df3.copy()
tmpdf.index = pd.MultiIndex.from_tuples([process_index(k) for k, v in tmpdf.iterrows()])
newgrouping = []
for idx, group in tmpdf.groupby(level=0):
newrow = []
for idx2, row in group.iterrows():
row.index = row.index.astype(str) + '_%s'%idx2[1]
newrow.append(pd.DataFrame([row.values], columns=list(row.index)))
newdf = pd.concat(newrow, sort=True, axis=1)
newdf.index = [idx]
newgrouping.append(newdf)
df4 = pd.concat(newgrouping)
df5 = eva.make_selection(df4)
results_clf_score, sizes, info_df, importances_df, all_probas = eva.make_classification(df5)
if __name__ == "__main__":
main()
| StarcoderdataPython |
6665475 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param A : head node of linked list
# @return the first node in the cycle in the linked list
def detectCycle(self, A):
if A.next is None:
return None
fast = A.next.next
slow = A.next
while fast != slow and fast is not None and fast.next is not None:
fast = fast.next.next
slow = slow.next
if fast is None or fast.next is None:
return None
slow = A
while fast != slow:
slow = slow.next
fast = fast.next
return slow | StarcoderdataPython |
11336215 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.urls import include, re_path
from polyaxon.api import API_V1
from polycommon.apis.index import get_urlpatterns, handler403, handler404, handler500
from polycommon.apis.regex import OWNER_NAME_PATTERN, PROJECT_NAME_PATTERN
api_patterns = [
re_path(r"", include(("apis.versions.urls", "versions"), namespace="versions")),
]
api_patterns += [
re_path(
r"",
include(
("apis.project_resources.urls", "project_resources"),
namespace="project_resources",
),
),
re_path(
r"", include(("apis.run_lineage.urls", "run_lineage"), namespace="run_lineage")
),
re_path(r"", include(("apis.runs.urls", "runs"), namespace="runs")),
re_path(r"", include(("apis.projects.urls", "projects"), namespace="projects")),
]
app_urlpatterns = [
re_path(
r"^{}/".format(API_V1), include((api_patterns, "api-v1"), namespace="api-v1")
),
]
# UI
projects_urls = "{}/{}".format(OWNER_NAME_PATTERN, PROJECT_NAME_PATTERN)
orgs_urls = "orgs/{}".format(OWNER_NAME_PATTERN)
ui_urlpatterns = [
r"^$",
r"^{}/?".format(orgs_urls),
r"^{}/projects/?".format(orgs_urls),
r"^{}/?$".format(projects_urls),
r"^{}/runs.*/?".format(projects_urls),
r"^{}/jobs.*/?".format(projects_urls),
r"^{}/services.*/?".format(projects_urls),
r"^{}/cli.*/?".format(projects_urls),
r"^{}/settings/?".format(projects_urls),
r"^{}/new.*/?".format(projects_urls),
]
handler404 = handler404
handler403 = handler403
handler500 = handler500
urlpatterns = get_urlpatterns(app_urlpatterns, ui_urlpatterns)
| StarcoderdataPython |
1657455 | import androidhelper
import urllib.request
import time
import requests
from bs4 import BeautifulSoup
import datetime
droid = androidhelper.Android()
republic1="Republic Day is a national holiday in India. It honours the date on which the Constitution of India came into effect on 26 January 1950 replacing the Government of India Act as the governing document of India and thus, turning the nation into a newly formed republic."
#TEMPERATUTE WEATHER
def temperature():
url = requests.get("http://google.com/search?q=weather+report")
url=url.text
soup=BeautifulSoup(url,"html5lib")
d=soup.find("div",{"class":"tAd8D"})
e=soup.find("div",{"class":"iBp4i"})
speck("temperature is "+e.text)
def wiki(search):
search=search.replace("wikipedia search"," ")
search=search.replace(" ","+")
url= urllib.request.urlopen("https://en.m.wikipedia.org/w/index.php?search="+search+"&title=Special%3ASearch&profile=default&fulltext=1&ns0=1")
bu= BeautifulSoup(url,"html5lib")
bi= bu.select(".mw-search-result-heading a")
bk=bi[0].get("href")
url= urllib.request.urlopen("https://en.m.wikipedia.org/"+bk)
bu= BeautifulSoup(url,"html5lib")
a= bu.find_all("p")
for i in a[:]:
if len(i)==0 or len(i)==1:
pass
else:
spr=i.text
se=spr.split(".")
for l in se[:5]:
print(l)
speck(i.text)
break
def meaning(men):
url = requests.get("http://google.com/search?q=+"+men+"+meaning+in+hindi")
url=url.text
soup=BeautifulSoup(url,"html5lib")
a=soup.find("div",{"id":"lrtl-transliteration-text"})
speck(a.text)
#CALL
def call():
#v=droid.pickContact()
b=input("dial number:")
droid.startActivity("android.intent.action.VIEW",uri="tel:"+b)
#NAME
def name():
speck('my name is lakshmi kumari.')
#INTRODUCTION OF LAKSHMI
def artificial():
speck('my name is <NAME>. I am artificial intelligence . avinash kumar abhay and vivek build me on the 26 january 2021.')
def date():
dat= str(datetime.datetime.now().day)
speck("the current date is:")
speck(dat)
dat= str(datetime.datetime.now().month)
speck(dat)
dat= str(datetime.datetime.now().year)
speck(dat)
#TIME
def Time():
tim= datetime.datetime.now().strftime("%I:%M:%S")
speck("the current time is "+tim)
#MUSIC PLAYER
def music():
droid.mediaPlay("/sdcard/26.mp4")
#MY LOCATION
def location():
droid.view("https://www.google.com/maps")
#OPEN GOOGLE PLAY STORE
def playStore():
droid.view("https://play.google.com")
def googleDrive():
droid.view("https://drive.google.com/file/d/1VcFLN71OrgjdB7EocVvlCHy8LErOpS6D/view?usp=drivesdk")
#OPEN GOOGLE CHROME
def openGoogle():
droid.startActivity(action="android.intent.action.VIEW",uri="http://www.google.com")
def searchGoogle(sr):
droid.startActivity(action="android.intent.action.VIEW",uri="http://www.google.com/search?q="+sr)
#OPEN GALLERY CHROME
def openGalary():
droid.startActivity(action="android.intent.action.VIEW",uri="content://",type="image/*")
#WISH REPUBLIC DAY
def wishRepublic():
droid.mediaPlay("/sdcard/qpython/scripts3/music/wish.mp3")
# WEATHER REPORT
def weather():
temperature()
#OPEN YOUTUBE
def openYoutube():
droid.startActivity(action="android.intent.action.VIEW",uri="https://youtu.be/9w18bnz5Yts")
#OPEN FACEBOOK
def openFacebook():
droid.startActivity(action="android.intent.action.VIEW",uri="http://www.facebook.com")
#SPEECH RECOGNITION
def voice():
speech= droid.recognizeSpeech()
return speech.result
#ASSISTENT SPEAK
def speck(sound):
droid.ttsSpeak(sound)
#speck(voice())
run=True
Time()
date()
while run:
print("\033[02J")
print("\033[0;0H")
ab= input("press enter too speak ....")
if ab=='stop':
speck(' ')
if ab=='exit':
break
sp=str(voice()).lower()
if 'bye' in sp:
speck("thank you sir. i hope you enjoy. jay bhim")
run=False
elif 'what is republic day' in sp:
speck(republic1)
elif 'when india celebrating republic day' in sp:
#speck("Every year Republic Day is celebrated in India on 26th January with zeal and enthusiasm. Spectacular parades at Janpath, New Delhi, consisting the Indian National Army and national flag hoisting in various parts of the country are common practices followed on this day ")
pass
elif 'what is constitution' in sp:
speck("A constitution is an aggregate of fundamental principles or established precedents that constitute the legal basis of a polity, organisation or other type of entity and commonly determine how that entity is to be governed")
elif 'open youtube' in sp:
openYoutube()
speck("youtube is open")
elif 'open google' in sp:
openGoogle()
speck("google is open")
elif 'open facebook' in sp:
openFacebook()
speck("facebook is open")
elif 'open gallery' in sp:
openGalary()
speck("gallery is open")
elif 'play music' in sp :
music()
speck("enjoy")
elif 'search google' in sp :
sp=sp.replace("search google",'')
searchGoogle(sp)
elif 'wikipedia search' in sp :
wiki(sp)
elif 'meaning of' in sp:
sp=sp.replace("meaning of",' ')
meaning(sp)
elif 'my location' in sp :
location()
elif 'play store' in sp :
playStore()
elif 'google drive ' in sp:
googleDrive()
elif 'who are you' in sp:
artificial()
elif 'introduce yourself' in sp:
artificial()
elif 'what is your name' in sp:
name()
elif 'bhim' in sp:
speck("he was born on 14 april 1891.he was independent India's first Minister of Law and Justice, and considered as the chief architect of the Constitution of India, and a founding father of the Republic of India ")
elif 'nehru' in sp:
speck("he was born on 14 November 1889.he was an Indian independence activist and, subsequently, the first Prime Minister of India, as well as a central figure in Indian politics both before and after independence")
elif 'patel' in sp:
speck('<NAME> (31 October 1875 – 15 December 1950), popularly known as <NAME>, was an Indian politician. He served as the first Deputy Prime Minister of India')
elif 'prasad' in sp:
speck('<NAME> (3 December 1884 – 28 February 1963) was an Indian independence activist, lawyer, scholar and subsequently, the first President of India, in office from 1950 to 1962.')
elif 'gandhi' in sp:
speck("<NAME>, also known as <NAME>, was an Indian lawyer, anti-colonial nationalist, and political ethicist, who employed nonviolent resistance to lead the successful campaign for India's independence from British rule, and in turn inspired movements for civil rights and freedom across the world.")
elif 'time' in sp :
Time()
elif 'date' in sp :
date()
elif 'call' in sp:
call()
elif 'what is artificial intelligence' in sp:
speck("Artificial intelligence is intelligence demonstrated by machines, unlike the natural intelligence displayed by humans and animals, which involves consciousness and emotionality")
elif 'who teaching us networking' in sp:
speck("mr murlidhar verma sir teaching us networking")
elif 'who teaching us programming' in sp:
speck("mr naushad ahmad sir teaching as programming.")
elif 'who is counselor' in sp:
speck("mr upender sir is our counselor")
elif 'director' in sp:
speck("<NAME> sir is a director of m i m t college")
elif 'who is aunty' in sp:
speck("sheela devi is aunty ji in m i m t college")
elif 'who teaching us financial' in sp:
speck("mr. akram sir teaching us financial")
elif 'stupid' in sp:
speck("sorry sir i trying to do my best")
elif 'how are you' in sp:
speck("i am good tell me about yourself")
elif 'temperature' in sp:
weather()
elif 'who build you' in sp:
speck("<NAME> vivek and abhay build me.")
elif 'father' in sp:
speck("<NAME> vivek and abhay build me so they are my every thing")
elif 'mother' in sp:
speck("my engineer are every things for me.")
else:
speck("i am not understanding ...")
| StarcoderdataPython |
11257390 | """Sweeps throught the depth image showing 100 range at a time"""
from kin_emb.depth_processing import get_depth_in_threshhold
from kin_emb import kinect_proxy
import cv2
import matplotlib.pyplot as pplot
import signal
import time
def display_threshhold(lower, upper):
depth = get_depth_in_threshhold(lower, upper)
cv2.imshow('Depth', depth)
cv2.waitKey(10)
def sweep_threshhold(lower, upper, max_upper):
cv2.namedWindow('Depth')
while upper < max_upper:
print('%d < depth < %d' % (lower, upper))
display_threshhold(lower, upper)
time.sleep(.1)
lower += 20
upper += 20
def display_depth_plot():
keep_running = True
def handler(signum, frame):
"""Sets up the kill handler, catches SIGINT"""
global keep_running
print("SIGINT!!!")
keep_running = False
# Register signal interrupt handler
signal.signal(signal.SIGINT, handler)
pplot.ion()
pplot.gray()
pplot.figure(1)
image_depth = pplot.imshow(kinect_proxy.get_depth(), interpolation='nearest', animated=True)
print('Press Ctrl-C in terminal to stop')
while keep_running:
pplot.figure(1)
image_depth.set_data(kinect_proxy.get_depth())
pplot.draw_all()
pplot.waitforbuttonpress(0.1)
if __name__ == '__main__':
display_depth_plot()
| StarcoderdataPython |
3228948 | #!/usr/bin/env python3
import sys
import requests
from requests.cookies import RequestsCookieJar
from lxml import etree
DOMAIN = 'http://dj.cs.ustc.edu.cn'
USERNAME = ''
PASSWORD = ''
def main():
# Check username and password
global USERNAME, PASSWORD
if not USERNAME: USERNAME = input('请输入学号:')
if not PASSWORD: PASSWORD = input('请输入密码:')
req = requests.Session()
cookie_jar = RequestsCookieJar()
login_payload = {
'username': USERNAME,
'password': PASSWORD
}
url = 'http://dj.cs.ustc.edu.cn/admin/index/login.html'
# Open Login
print('正在登录: %s' % url)
r = req.post(url, data=login_payload, allow_redirects=False)
cookie_jar.update(r.cookies)
# print(cookie_jar.items())
# Now set url to index.html
url = 'http://dj.cs.ustc.edu.cn/admin/index/index.html'
r = req.get(url, cookies=cookie_jar)
# Now we have got the page. We should know what '待办事项' refers to
dashboard_page = etree.HTML(r.text)
iframe_link_path = dashboard_page.xpath("//*[@id='draggable']/div[2]/div[1]/dl[1]/dd[2]/a/@data-param")
assert(len(iframe_link_path) == 1)
iframe_link = DOMAIN + iframe_link_path[0]
todo_events = []
r = req.get(iframe_link, cookies=cookie_jar)
assert(r.status_code == 200)
events_page = etree.HTML(r.text)
events = events_page.xpath("//div[@class='bDiv']/table/tbody/tr")
for i in range(len(events)):
event_name = events_page.xpath("//div[@class='bDiv']/table/tbody/tr[%d]/td[1]/text()" % (i+1))[0]
event_status = events_page.xpath("//div[@class='bDiv']/table/tbody/tr[%d]/td[5]/text()" % (i+1))[0].strip()
event_link = events_page.xpath("//div[@class='bDiv']/table/tbody/tr[%d]/td[6]/a/@href" % (i+1))[0]
if event_status != '已办理':
event_status = '\033[1;31m未办理\033[0m'
todo_events.append((event_name, event_link))
print('%s\t%s' % (event_name, event_status))
print('=========================')
for event in todo_events:
sys.stdout.write('正在办理 %s' % event[0])
event_full_link = DOMAIN + event[1]
r = req.get(event_full_link, cookies=cookie_jar)
commit_page = etree.HTML(r.text)
commit_path = commit_page.xpath("//div[@class='bot']/a[1]/@href")[0]
commit_url = DOMAIN + commit_path
r = req.get(commit_url, cookies=cookie_jar)
print(r.status_code == 200 and '成功' or '失败')
return 0
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
5140663 | # -*- coding: utf-8 -*
'''
堆排序
'''
import sort_helper
from max_heap import Maxheap
import quick_3_sort
@sort_helper.test_sort
def heap_sort(arr):
heap = Maxheap(len(arr),arr)
for i in range(len(arr) - 1,-1,-1):
arr[i] = heap.extractMax()
def __shifDown(arr,n,k):
while (2 * k + 1 < n ):
j = 2 * k + 1
if j + 1 < n and arr[j + 1] > arr[j] :
j += 1
if arr[k] >= arr[j]:
break
arr[j],arr[k] = arr[k],arr[j]
k = j
def sort(arr):
n = len(arr)
for i in range(int((n + 1) / 2),-1,-1):
__shifDown(arr,n,i)
for i in range(n - 1,0, -1):
arr[0], arr[i] = arr[i],arr[0]
__shifDown(arr,i,0)
print(arr)
if __name__ == '__main__':
a = sort_helper.generate_randoma_array(100,1,99)
print(a)
# b = a[:]
# heap_sort(a)
# quick_3_sort.sort(b)
sort(a)
| StarcoderdataPython |
5050835 | <gh_stars>1-10
from __future__ import print_function
import sys
class Color(object):
"""A printable and mixable color."""
# A dictionary of all colors, keyed by their numeric value
colors = {}
def __init__(self, value, name, code):
self.value = value
self.name = name
self.code = code
Color.colors[value] = self
def __repr__(self):
return 'Color(%d, %r, %r)' % (self.value, self.name, self.code)
def __str__(self):
return self.colored(self.name)
def __nonzero__(self):
return bool(self.value)
def __invert__(self):
return Color.colors[~self.value]
def __and__(self, other):
return Color.colors[self.value & other.value]
def __or__(self, other):
return Color.colors[self.value | other.value]
def colored(self, s):
if sys.platform != 'win32':
return '\x1b[%sm%s\x1b[0m' % (self.code, s)
return s
Color.NEITHER = Color(0, 'neither', '0')
Color.RED = Color(1, 'red', '31;1')
Color.BLUE = Color(~1, 'blue', '44')
Color.PURPLE = Color(~0, 'purple', '37;45;1')
# RED == ~BLUE
# BLUE == ~RED
# PURPLE == RED | BLUE
# NEITHER == RED & BLUE
| StarcoderdataPython |
3250251 | # Generated by Django 3.2.9 on 2021-12-09 11:44
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Audience',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='Номер аудитории')),
],
options={
'verbose_name': 'Аудитория',
'verbose_name_plural': 'Аудитория',
'unique_together': {('name',)},
},
),
migrations.CreateModel(
name='Chair',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('public_id', models.IntegerField(verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='Наименование кафедры')),
],
options={
'verbose_name': 'Кафедра',
'verbose_name_plural': 'Кафедра',
'unique_together': {('name', 'public_id')},
},
),
migrations.CreateModel(
name='DayOfWeek',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('public_id', models.IntegerField(verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='Наименование дня')),
],
options={
'verbose_name': 'День недели',
'verbose_name_plural': 'День недели',
'unique_together': {('name', 'public_id')},
},
),
migrations.CreateModel(
name='FormOfEducation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('public_id', models.IntegerField(blank=True, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=255, verbose_name='Наименование формы обучения')),
],
options={
'verbose_name': 'Форма обучения',
'verbose_name_plural': 'Формы обучения',
'unique_together': {('name', 'public_id')},
},
),
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='Наименование группы')),
],
options={
'verbose_name': 'Группа',
'verbose_name_plural': 'Группы',
'unique_together': {('name',)},
},
),
migrations.CreateModel(
name='KindSubject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='Наименование типа')),
],
options={
'verbose_name': 'Тип предмета',
'verbose_name_plural': 'Тип предмета',
'unique_together': {('name',)},
},
),
migrations.CreateModel(
name='Upload',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(help_text='Расписание обновится в течении 10 минут', null=True, upload_to='schedule/', verbose_name='Файл расписания')),
('default_value', models.BooleanField(default=False, verbose_name='Заполнить статические значения')),
('date_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Время загрузки')),
],
options={
'verbose_name': 'Файл расписания',
'verbose_name_plural': 'Файлы расписания',
},
),
migrations.CreateModel(
name='Teacher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='ФИО')),
],
options={
'verbose_name': 'Преподаватель',
'verbose_name_plural': 'Преподаватели',
'unique_together': {('name',)},
},
),
migrations.CreateModel(
name='Subject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='Наименование предмета')),
],
options={
'verbose_name': 'Предмет',
'verbose_name_plural': 'Предмет',
'unique_together': {('name',)},
},
),
migrations.CreateModel(
name='SubGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('public_id', models.IntegerField(verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='Наименование')),
],
options={
'verbose_name': 'Подгруппа',
'verbose_name_plural': 'Подгруппа',
'unique_together': {('name', 'public_id')},
},
),
migrations.CreateModel(
name='ParityWeek',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('public_id', models.IntegerField(verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='Наименование четности')),
],
options={
'verbose_name': 'Четность недели',
'verbose_name_plural': 'Четность недели',
'unique_together': {('name', 'public_id')},
},
),
migrations.CreateModel(
name='OrderLesson',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('public_id', models.IntegerField(verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='Время занятия')),
],
options={
'verbose_name': 'Время занятия',
'verbose_name_plural': 'Время занятия',
'unique_together': {('name', 'public_id')},
},
),
migrations.CreateModel(
name='LevelOfEducation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('public_id', models.IntegerField(verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='Наименование уровня образования')),
],
options={
'verbose_name': 'Уровень образования',
'verbose_name_plural': 'Уровни образования',
'unique_together': {('name', 'public_id')},
},
),
migrations.CreateModel(
name='Lesson',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('audience', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='schedule.audience', verbose_name='Аудитория')),
('chair', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='schedule.chair', verbose_name='Кафедра')),
('day_of_week', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='schedule.dayofweek', verbose_name='День недели')),
('form_of_education', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='schedule.formofeducation', verbose_name='Форма обучения')),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='schedule.group', verbose_name='Группа')),
('kind', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='schedule.kindsubject', verbose_name='Тип предмета')),
('level_of_education', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='schedule.levelofeducation', verbose_name='Уровень обучения')),
('order_lesson', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='schedule.orderlesson', verbose_name='Время занятия')),
('parity_week', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='schedule.parityweek', verbose_name='Четность недели')),
('subgroup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='schedule.subgroup', verbose_name='Подгруппа')),
('subject', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='schedule.subject', verbose_name='Предмет')),
('teacher', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='schedule.teacher', verbose_name='Преподаватель')),
],
options={
'verbose_name': 'Пара',
'verbose_name_plural': 'Пары',
},
),
]
| StarcoderdataPython |
277994 | <gh_stars>100-1000
"""
Some correct syntax for variable annotation here.
More examples are in test_grammar and test_parser.
"""
from typing import no_type_check, ClassVar
i: int = 1
j: int
x: float = i/10
def f():
class C: ...
return C()
f().new_attr: object = object()
class C:
def __init__(self, x: int) -> None:
self.x = x
c = C(5)
c.new_attr: int = 10
__annotations__ = {}
@no_type_check
class NTC:
def meth(self, param: complex) -> None:
...
class CV:
var: ClassVar['CV']
CV.var = CV()
| StarcoderdataPython |
11330614 | <gh_stars>0
from django.shortcuts import render,render_to_response
from login import models
from django.http import HttpResponseRedirect
from pub_form.form_login import UserForm
from django.core.paginator import Paginator
# Create your views here.
def index(request):
#Paginator
current_page=request.GET.get("page",1)
mo = models.Notice.objects.all()
pages=Paginator(mo,6)
mo=pages.page(current_page)
#login in
if request.method=='POST':
uf =UserForm(request.POST)
if uf.is_vaild():
username=uf.cleaned_data['username']
password=uf.cleaned_data['password']
return HttpResponseRedirect('base.html')
else:
uf = UserForm()
return render_to_response('login.html',locals())
def main(request):
hh = 'hello there'
return render_to_response('base.html',locals()) | StarcoderdataPython |
3431918 | <gh_stars>10-100
from unittest import TestCase
from src import core_printer
class TestCorePrinters(TestCase):
p = core_printer.CorePrinters()
def test_blue_text(self):
msg1 = self.p.blue_text("test")
msg2 = "\x1b[34m [*] \x1b[0mtest"
self.assertEqual(msg1, msg2)
def test_green_text(self):
msg1 = self.p.green_text("test")
msg2 = "\x1b[32m [+] \x1b[0mtest"
self.assertEqual(msg1, msg2)
def test_print_entry(self):
self.p.print_entry()
def test_print_d_module_start(self):
self.p.print_d_module_start()
def test_print_s_module_start(self):
self.p.print_s_module_start()
def test_print_config_start(self):
self.p.print_config_start()
def test_print_modules(self):
self.p.print_modules(['modules/bing_search.py'])
| StarcoderdataPython |
3317511 | from ..factory import Type
class fileTypeAnimation(Type):
pass
| StarcoderdataPython |
285716 | <filename>dynamic/wrapper_generators/generate_bindings.py
#!/usr/bin/env python
"""Copyright (c) 2005-2016, University of Oxford.
All rights reserved.
University of Oxford means the Chancellor, Masters and Scholars of the
University of Oxford, having an administrative office at Wellington
Square, Oxford OX1 2JD, UK.
This file is part of Chaste.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the University of Oxford nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
This scipt automatically generates Python bindings using a rule based approach
"""
import sys
from pyplusplus import module_builder
from pyplusplus.module_builder import call_policies
from pyplusplus import messages
from pygccxml import parser
def template_replace(class_name):
# How to replace template names in Python output
new_name = class_name
if "<3>" in class_name[-3:]:
new_name = class_name[:-3] + "3"
if "<2>" in class_name[-3:]:
new_name = class_name[:-3] + "2"
if "<3,3>" in class_name[-5:]:
new_name = class_name[:-5] + "3_3"
if "<2,2>" in class_name[-5:]:
new_name = class_name[:-5] + "2_2"
return new_name
def template_replace_list(builder, classes):
for eachClass in classes:
new_name = template_replace(eachClass)
if(new_name != eachClass):
builder.class_(eachClass).rename(new_name)
def boost_units_namespace_fix(module_file):
# There is a bug (maybe in boost units) where sometimes static_rational does not have
# the full boost::units namespace. Manually put it in.
lines = []
replacements = {", static_rational": ", boost::units::static_rational"}
with open(module_file) as infile:
for line in infile:
for src, target in replacements.iteritems():
line = line.replace(src, target)
lines.append(line)
with open(module_file, 'w') as outfile:
for line in lines:
outfile.write(line)
def strip_undefined_call_policies(module_file):
# Can't access methods in abstract classes by return type to apply call policies with py++.
# Need to remove these methods manually.
# There is a bug (maybe in boost units) where sometimes static_rational does not have
# the full boost::units namespace. Manually put it in.
lines = []
with open(module_file) as infile:
for line in infile:
lines.append(line)
strip_indices = []
def_index = 0
for idx, eachLine in enumerate(lines):
if ".def(" in eachLine:
def_index = idx
if "/* undefined call policies */" in eachLine:
strip_indices.extend(range(idx, def_index-1, -1))
return_lines = [i for j, i in enumerate(lines) if j not in strip_indices]
with open(module_file, 'w') as outfile:
for line in return_lines:
outfile.write(line)
def do_module(module_name, builder):
# Set up the builder with module specifc classes
this_module = __import__("generate_" + module_name)
return this_module.update_builder(builder)
def generate_wrappers(args):
module_name = args[1]
work_dir = args[2]
header_collection = args[3]
castxml_binary = args[4]
includes = args[5:]
xml_generator_config = parser.xml_generator_configuration_t(xml_generator_path=castxml_binary,
xml_generator="castxml",
compiler = "gnu",
compiler_path="/usr/bin/c++",
include_paths=includes)
builder = module_builder.module_builder_t([header_collection],
xml_generator_path = castxml_binary,
xml_generator_config = xml_generator_config,
start_with_declarations = ['chaste'],
include_paths = includes,
indexing_suite_version=2)
messages.disable(messages.W1040) # unexposed declaration
messages.disable(messages.W1031) # user to expose non public member function
# Don't wrap std library
builder.global_ns.namespace('std').exclude()
# Set up the builder for each module
builder = do_module(module_name, builder)
# Make the wrapper code
builder.build_code_creator(module_name="_chaste_project_PyChaste_" + module_name)
builder.code_creator.user_defined_directories.append(work_dir + "/dynamic/wrapper_headers/")
builder.write_module(work_dir + "/dynamic/" + module_name + ".cpp")
# Fix a bug with boost units
boost_units_namespace_fix(work_dir + "/dynamic/" + module_name + ".cpp")
strip_undefined_call_policies(work_dir + "/dynamic/" + module_name + ".cpp")
if __name__=="__main__":
generate_wrappers(sys.argv)
| StarcoderdataPython |
6447499 | <reponame>mirestrepo/voxels-at-lems<filename>dbrec3d/bof/taylor/learn_codebook/k_means/save_cluster_id_scene_raw.py
import dbrec3d_batch
import multiprocessing
import Queue
import time
import optparse
import sys
import os
from xml.etree.ElementTree import ElementTree
class dbvalue:
def __init__(self, index, type):
self.id = index # unsigned integer
self.type = type # string
class save_scene_job():
def __init__(self,bof_dir,scene_id):
self.bof_dir=bof_dir;
self.scene_id= scene_id;
def execute_jobs(jobs, num_procs=5):
work_queue=multiprocessing.Queue();
result_queue=multiprocessing.Queue();
for job in jobs:
work_queue.put(job)
for i in range(num_procs):
worker= save_scene_worker(work_queue,result_queue)
worker.start();
print("worker with name ",worker.name," started!")
class save_scene_worker(multiprocessing.Process):
def __init__(self,work_queue,result_queue):
# base class initialization
multiprocessing.Process.__init__(self)
# job management stuff
self.work_queue = work_queue
self.result_queue = result_queue
self.kill_received = False
def run(self):
while not self.kill_received:
# get a task
try:
job = self.work_queue.get_nowait()
except Queue.Empty:
break
start_time = time.time();
print("Save Scene");
dbrec3d_batch.init_process("bofSaveClusterIdSceneRawProcess");
dbrec3d_batch.set_input_string(0,job.bof_dir);
dbrec3d_batch.set_input_int(1, job.scene_id);
dbrec3d_batch.run_process();
print ("Runing time for worker:", self.name)
print(time.time() - start_time);
def parse_bof_info(bof_info_file):
print 'Parsing: ' + bof_info_file
#parse xml file
bof_tree = ElementTree();
bof_tree.parse(bof_info_file);
#find scene paths
scenes_elm = bof_tree.getroot().findall('scene');
if scenes_elm is None:
print "Invalid bof info file: No scenes element"
sys.exit(-1);
return len(scenes_elm);
###### The Main Algorithm ##############
if __name__=="__main__":
dbrec3d_batch.register_processes();
dbrec3d_batch.register_datatypes();
#Parse inputs
parser = optparse.OptionParser(description='bof Statistics Pass 0');
parser.add_option('--bof_dir', action="store", dest="bof_dir");
parser.add_option('--num_cores', action="store", dest="num_cores", type="int", default=4);
options, args = parser.parse_args()
bof_dir = options.bof_dir;
num_cores = options.num_cores;
if not os.path.isdir(bof_dir +"/"):
print "Invalid bof Dir"
sys.exit(-1);
#parse the number of scenes
bof_info_file = bof_dir + "/bof_info.xml";
nscenes = parse_bof_info(bof_info_file);
print "Number of Scenes:"
print nscenes
#Begin multiprocessing
t1=time.time();
work_queue=multiprocessing.Queue();
job_list=[];
#Enqueue jobs
for scene_id in range(0, nscenes):
current_job = save_scene_job(bof_dir, scene_id);
job_list.append(current_job);
execute_jobs(job_list, num_cores);
| StarcoderdataPython |
9701000 | #!/usr/bin/env python
import os
import sys
import warnings
import django
if django.VERSION[:2] == (1, 6):
# This is only necessary for Django 1.6
from django.contrib.auth.tests import custom_user
custom_user.AbstractUser._meta.local_many_to_many = []
custom_user.PermissionsMixin._meta.local_many_to_many = []
warnings.simplefilter('error')
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tests.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| StarcoderdataPython |
4807134 | <reponame>JiaqiYao/dynamic_multi_label<gh_stars>1-10
import tensorflow as tf
import numpy as np
from gensim.models.keyedvectors import KeyedVectors
import pickle
import json
import os
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def __init__(self, data_dir, word2vec_path, max_sentence_length):
self.data_dir = data_dir
self.word2vec_path = word2vec_path
self.max_sentence_length = max_sentence_length
self.labels = set()
self.num_class = 0
self.label_map = dict()
self.tokenizer = None
def _build_vocabulary(self, train_texts, oov_token='UNK', filters='', lower=True):
self.tokenizer = tf.keras.preprocessing.text.Tokenizer(
oov_token=oov_token,
filters=filters,
lower=lower)
self.tokenizer.fit_on_texts(train_texts)
# add PAD
self.tokenizer.word_index['<PAD>'] = 0
self.tokenizer.index_word[0] = '<PAD>'
self.tokenizer.word_counts['<PAD>'] = 0
self.tokenizer.word_docs['<PAD>'] = 0
# get word embedding
self.dump_word_embedding(self.tokenizer.word_index)
print("Build the vocabulary done")
def build_label_map(self, train_labels_name, valid_labels_name, test_labels_name):
train_labels_path = os.path.join(self.data_dir, train_labels_name)
valid_labels_path = os.path.join(self.data_dir, valid_labels_name)
test_labels_path = os.path.join(self.data_dir, test_labels_name)
with open(train_labels_path, 'rt') as fin:
train_labels = json.load(fin)
with open(valid_labels_path, 'rt') as fin:
valid_labels = json.load(fin)
with open(test_labels_path, 'rt') as fin:
test_labels = json.load(fin)
for train_label in train_labels+valid_labels+test_labels:
self.labels = self.labels.union(train_label)
self.num_class = len(self.labels)
self.label_map = dict(zip(self.labels, range(self.num_class)))
def _transform_label(self, label):
label_id = np.zeros(self.num_class, dtype=np.int64)
for item in label:
if item in self.label_map:
label_id[self.label_map[item]] = 1
else:
return None
return label_id
def dump_train_features(self, text_name, label_name):
text_path = os.path.join(self.data_dir, text_name)
label_path = os.path.join(self.data_dir, label_name)
texts, labels = self._get_data_from_json(text_path, label_path)
self._build_vocabulary(texts)
# self._build_label_map(labels)
texts_ids = self.tokenizer.texts_to_sequences(texts)
max_sentence_length = max(len(x) for x in texts_ids)
if max_sentence_length < self.max_sentence_length:
self.max_sentence_length = max_sentence_length
print("max sentence length is {}".format(self.max_sentence_length))
# padding
texts_ids = tf.keras.preprocessing.sequence.pad_sequences(texts_ids,
maxlen=self.max_sentence_length,
padding='post',
truncating='post')
labels_ids = np.array([self._transform_label(label) for label in labels])
with open(os.path.join(self.data_dir, 'train_texts_ids.dat'), 'wb') as fout:
pickle.dump(texts_ids, fout)
with open(os.path.join(self.data_dir, 'train_labels_ids.dat'), 'wb') as fout:
pickle.dump(labels_ids, fout)
print("Train Data Done {}".format(len(labels_ids)))
def dump_eval_features(self, text_name, label_name):
text_path = os.path.join(self.data_dir, text_name)
label_path = os.path.join(self.data_dir, label_name)
texts, labels = self._get_data_from_json(text_path, label_path)
texts_ids = self.tokenizer.texts_to_sequences(texts)
# padding
texts_ids = tf.keras.preprocessing.sequence.pad_sequences(texts_ids,
maxlen=self.max_sentence_length,
padding='post',
truncating='post')
labels_ids = np.array([self._transform_label(label) for label in labels])
# texts_ids, labels_ids = self._filter_examples(texts_ids, labels_ids)
with open(os.path.join(self.data_dir, 'valid_texts_ids.dat'), 'wb') as fout:
pickle.dump(texts_ids, fout)
with open(os.path.join(self.data_dir, 'valid_labels_ids.dat'), 'wb') as fout:
pickle.dump(labels_ids, fout)
print("Valid Data Done {}".format(len(labels_ids)))
def dump_test_features(self, text_name, label_name):
text_path = os.path.join(self.data_dir, text_name)
label_path = os.path.join(self.data_dir, label_name)
texts, labels = self._get_data_from_json(text_path, label_path)
texts_ids = self.tokenizer.texts_to_sequences(texts)
# padding
texts_ids = tf.keras.preprocessing.sequence.pad_sequences(texts_ids,
maxlen=self.max_sentence_length,
padding='post',
truncating='post')
labels_ids = np.array([self._transform_label(label) for label in labels])
# texts_ids, labels_ids = self._filter_examples(texts_ids, labels_ids)
with open(os.path.join(self.data_dir, 'test_texts_ids.dat'), 'wb') as fout:
pickle.dump(texts_ids, fout)
with open(os.path.join(self.data_dir, 'test_labels_ids.dat'), 'wb') as fout:
pickle.dump(labels_ids, fout)
print("Test Data Done {}".format(len(labels_ids)))
def dump_word_embedding(self, vocabulary):
vocab_size = len(vocabulary)
print("vocabulary size is {}".format(vocab_size))
word_vectors = KeyedVectors.load_word2vec_format(self.word2vec_path, binary=True)
embed_size = word_vectors.vector_size
bound = np.sqrt(6.0 / embed_size)
vocab_size = len(vocabulary)
word_embeddings = np.random.uniform(-bound, bound, [vocab_size+1, embed_size])
for word in vocabulary:
# print(word)
if word in word_vectors:
word_embeddings[vocabulary[word], :] = word_vectors[word]
with open(os.path.join(self.data_dir, 'word_embeddings.dat'), 'wb') as fout:
pickle.dump(word_embeddings, fout)
def dump_meta_data(self):
with open(os.path.join(self.data_dir, "tokenizer.dat"), 'wb') as fout:
pickle.dump(self.tokenizer, fout)
with open(os.path.join(self.data_dir, "label_map.dat"), 'wb') as fout:
pickle.dump(self.label_map, fout)
with open(os.path.join(self.data_dir, "max_sentence_length.dat"), 'wb') as fout:
pickle.dump(self.max_sentence_length, fout)
def get_labels(self):
"""Gets the list of labels for this data set."""
raise self.labels
@classmethod
def _get_data_from_json(cls, text_path, label_path):
with open(text_path, 'rt') as fin:
texts = json.load(fin)
with open(label_path, 'rt') as fin:
labels = json.load(fin)
return texts, labels
@classmethod
def _filter_examples(cls, text_ids, label_ids):
output_text_ids = list()
output_label_ids = list()
count = 0
for text_id, label_id in zip(text_ids, label_ids):
if label_id is not None:
output_label_ids.append(label_id)
output_text_ids.append(text_id)
else:
count += 1
print("Filter {} examples".format(count))
return np.array(output_text_ids), np.array(output_label_ids)
if __name__ == "__main__":
data_dir = r'/home/yaojq/data/text/reuters'
word2vec_path = r'/home/yaojq/data/word_embedding/GoogleNews-vectors-negative300.bin'
print(data_dir)
max_seq_length = 512
processor = DataProcessor(data_dir, word2vec_path, max_seq_length)
processor.build_label_map("train_labels.txt", "valid_labels.txt", "test_labels.txt")
processor.dump_train_features("train_texts.txt", "train_labels.txt")
processor.dump_eval_features("valid_texts.txt", "valid_labels.txt")
processor.dump_test_features("test_texts.txt", "test_labels.txt")
processor.dump_meta_data()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.