id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
217801 | <reponame>ayushkalani/delightchat
from django.contrib import admin
from analytics.models import Accounts, Users, Conversations, Channels
admin.site.register(Accounts)
admin.site.register(Users)
admin.site.register(Conversations)
admin.site.register(Channels) | StarcoderdataPython |
6662936 | #! /bin/env python
#
# Protein Engineering Analysis Tool Structure Analysis (PEATSA)
# Copyright (C) 2010 <NAME> & <NAME>
#
# Author: <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_gmail.com
# Normal mail:
# <NAME>
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
#
'''Creates a job entry in the database using the provided information
The database information must be given in a configuration-file
'''
import optparse, os, sys
import PEATSA.WebApp as WebApp
import PEATSA.Core as Core
usage = "usage: %prog [options]"
parser = optparse.OptionParser(usage=usage, version="% 0.1", description=__doc__)
parser.add_option("-s", "--structure", dest="structureFile",
help="The pdb file the job was run on.", metavar="STRUCTURE")
parser.add_option("-d", "--database-configuration",
dest="configuration",
help="Configuration file containing database information",
metavar='CONF')
parser.add_option("-l", "--ligand",
dest="ligandFile",
help="A mol2 file of a ligand",
metavar='LIGAND')
parser.add_option("" "--mutation",
dest="mutation",
help="The mutation run [optional]",
metavar='MUTATION')
parser.add_option("" "--mutationList",
dest="mutationList",
help="The mutation list used [optional]",
metavar='MUTATION')
(options, args) = parser.parse_args()
if options.structureFile is None:
print 'Job id must be provided'
sys.exit(1)
else:
structureName = os.path.split(options.structureFile)[1]
structureName = os.path.splitext(structureName)[0]
if options.configuration is None:
print 'A database configuration file must be supplied'
sys.exit(1)
if options.mutation is None and options.mutationList is None:
print 'One of mutation and mutationList must be supplied'
sys.exit(1)
configuration = Core.Environment.Configuration(filename=options.configuration)
connection = WebApp.UtilityFunctions.ConnectionFromConfiguration(configuration)
jobTable = configuration.get("DATABASE", "jobTable")
dataTable = configuration.get("DATABASE", "dataTable")
jobManager = WebApp.Data.JobManager(connection, jobTable=jobTable)
#Hack: Have to pass something for calculations ....
job = jobManager.createJob(structureName, calculations=['scan'], dataTable=dataTable)
try:
job.setStructureFromFile(options.structureFile)
if options.ligandFile is not None:
job.setLigandFromFile(options.ligandFile)
if options.mutationList is not None:
mutationList = Core.Data.MutationListFile(filename=options.mutationList)
job.setMutationListFile(mutationList)
else:
job.setMutation(options.mutation)
print job.identification
except Exception, data:
print 'Encountered a %s exception' % Exception
print 'Reason: %s' % data
jobManager.deleteJob(job)
raise
| StarcoderdataPython |
6627086 | import requests, time, csv
from bs4 import BeautifulSoup
# listPageRootUrl is "https://www.pcmag.com/categories/mobile-phones?page="
# pageNumber is the page number on PCMag site to scrape
# write=True writes scraped URLs to CSV file in format phoneName|url
def getReviews(listPageRootUrl, pageNumber, write=True):
pageList = []
currentPage = requests.get(listPageRootUrl + str(pageNumber))
soup = BeautifulSoup(currentPage.content, "html.parser")
x = soup.find_all("div", class_="w-full flex flex-wrap md:flex-no-wrap py-4 border-b border-gray-lighter")
for y in x:
z = y.find("span", class_="ml-1 mr-3")
if z is not None:
rowList = []
k = y.find("h2", class_="text-base md:text-xl font-brand font-bold")
link = "https://www.pcmag.com" + k.find("a")['href']
p = k.find("a")['data-item']
phoneName = p.replace(" Review", "").lower().strip()
if "(" in phoneName and ")" in phoneName:
q = phoneName.split("(")
phoneName = q[0].strip()
if "+" in phoneName:
phoneName = phoneName.replace("+", " plus")
rowList.append(phoneName)
rowList.append(link)
pageList.append(phoneName)
pageList.append(link)
if write:
writeCsvRow(rowList)
return pageList
# listPageRootUrl is "https://www.pcmag.com/categories/mobile-phones?page="
# pageNumber is the page number on PCMag site to start scraping reviews from
# timeSleep is time to sleep in seconds between making each request
# if any interruption occurs, function can be called with pageNumber = page after the last page scraped before interruption
# csv writer will append to csv file as if no interruption occured
def writeAllReviews(listPageRootUrl, pageNumber, timeSleep):
fullPhoneList = []
startTime = time.time()
if timeSleep < 3:
timeSleep = 5
timeSleep = float(timeSleep)
while requests.get(listPageRootUrl + str(pageNumber)).ok:
pagePhoneList = getReviews(listPageRootUrl, pageNumber)
for x in pagePhoneList:
fullPhoneList.append(x)
print("Reviews on page " + str(pageNumber) + ":")
print(pagePhoneList)
fancySleep(timeSleep)
pageNumber += 1
print("Reached end of reviews.")
print("RUNTIME: " + str(time.time() - startTime) + " seconds.")
return fullPhoneList
# appends one row to CSV
def writeCsvRow(rowList):
dataOutput = open("PCMagURLs.csv", "a+", encoding="utf8")
writer = csv.writer(dataOutput, delimiter='|', lineterminator="\r", quoting=csv.QUOTE_NONE)
writer.writerow(rowList)
# for sleeping fancy
def fancySleep(timeSleep):
print("sleeping " + str(int(timeSleep)) + " seconds", end="", flush=True) # https://stackoverflow.com/questions/5598181/multiple-prints-on-the-same-line-in-python
time.sleep(timeSleep / 4)
print(" .", end="", flush=True)
time.sleep(timeSleep / 4)
print(" .", end="", flush=True)
time.sleep(timeSleep / 4)
print(" .")
time.sleep(timeSleep / 4)
writeAllReviews("https://www.pcmag.com/categories/mobile-phones?page=", 1, 10)
| StarcoderdataPython |
6511706 | import tempfile
import gnupg
import pytest
from django.conf import settings
from snoop import emails, models, pgp
pytestmark = pytest.mark.skipif(not settings.SNOOP_GPG_BINARY,
reason="SNOOP_GPG_BINARY not set")
PATH_HUSH_MAIL = 'eml-9-pgp/encrypted-hushmail-knockoff.eml'
HEIN_PRIVATE_KEY = 'eml-9-pgp/keys/hein-priv.gpg'
HEIN_PUBLIC_KEY = 'eml-9-pgp/keys/<KEY>'
@pytest.yield_fixture(autouse=True)
def patch_gpg_to_temp_dir(monkeypatch):
with tempfile.TemporaryDirectory() as tmp:
gpg = gnupg.GPG(gnupghome=tmp, gpgbinary='gpg')
for path in [HEIN_PRIVATE_KEY, HEIN_PUBLIC_KEY]:
with open(settings.SNOOP_ROOT + "/" + path, 'rb') as f:
gpg.import_keys(f.read())
monkeypatch.setattr(pgp, 'GPG', gpg)
monkeypatch.setattr(settings, 'SNOOP_GPG_HOME', '/tmp')
yield
def create_email_doc(path, collection):
doc = models.Document(
path=path,
content_type='message/rfc822',
collection=collection,
)
doc.save = lambda *a, **k: None
return doc
def parse_email(path, document_collection):
return emails.parse_email(create_email_doc(path, document_collection))
def open_email(path, document_collection):
return emails.open_email(create_email_doc(path, document_collection))
def test_doc_flags(document_collection):
doc = create_email_doc(PATH_HUSH_MAIL, document_collection)
emails.parse_email(doc)
assert doc.flags.get('pgp')
def test_header_data(document_collection):
data = parse_email(PATH_HUSH_MAIL, document_collection)
assert data['subject'] == "Fwd: test email"
assert data['date'] == '2016-08-10T15:00:00'
def test_attachments(document_collection):
data = parse_email(PATH_HUSH_MAIL, document_collection)
attach = data['attachments']
assert len(attach) == 6
email = open_email(PATH_HUSH_MAIL, document_collection)
assert email.pgp
with email.open_part('3') as f:
text = f.read().decode()
assert "This is GPG v1 speaking!" in text
assert "Sent from my Android piece of !@#%." in text
| StarcoderdataPython |
12827576 | # coding : UTF-8
from operator import methodcaller
from test_image_embedding import *
from test_pipeline import *
from test_audio_embedding import *
def pipeline_register():
pipeline_names = ["image-embedding", "towhee/image-embedding-efficientnetb5",
"towhee/image-embedding-efficientnetb7", "towhee/image-embedding-resnet101",
"towhee/image-embedding-swinbase", "towhee/image-embedding-swinlarge",
"towhee/image-embedding-vitlarge", "towhee/audio-embedding-clmr",
"towhee/audio-embedding-vggish"]
return pipeline_names
def pipeline_runner():
invalid_pipeline_obj = TestPipelineInvalid()
for func in dir(TestPipelineInvalid):
if not func.startswith("__"):
print("Testing %s" % func)
res = methodcaller(func)(invalid_pipeline_obj)
if res == None:
print("%s PASS" % func)
else:
print("%s FAIL" % func)
pipeline_names = pipeline_register()
for pipeline_name in pipeline_names:
valid_pipeline_obj = TestPipelineValid()
for func in dir(TestPipelineValid):
if not func.startswith("__"):
print("Testing %s:%s" % (func, pipeline_name))
res = methodcaller(func, pipeline_name)(valid_pipeline_obj)
if res == None:
print("%s:%s PASS" % (func, pipeline_name))
else:
print("%s:%s FAIL" % (func, pipeline_name))
return True
def image_class_pipeline_register():
# skip efficientnetb7 image pipeline for memory shortage
# pipeline_names = ["image-embedding", "towhee/image-embedding-efficientnetb5",
# "towhee/image-embedding-efficientnetb7", "towhee/image-embedding-resnet101",
# "towhee/image-embedding-resnet50", "towhee/image-embedding-swinbase",
# "towhee/image-embedding-swinlarge", "towhee/image-embedding-vitlarge"]
# embedding_sizes = [2048, 2048, 2560, 2048, 2048, 1024, 1536, 1024]
pipeline_names = ["image-embedding", "towhee/image-embedding-efficientnetb5",
"towhee/image-embedding-resnet101", "towhee/image-embedding-resnet50",
"towhee/image-embedding-swinbase", "towhee/image-embedding-swinlarge",
"towhee/image-embedding-vitlarge"]
embedding_sizes = [2048, 2048, 2048, 2048, 1024, 1536, 1024]
# skip multiple threads tests for memory shortage
skipped_cases = ["test_embedding_concurrent_multi_threads", "test_embedding_more_times", "test_embedding_avg_time"]
return pipeline_names, embedding_sizes, skipped_cases
def image_class_pipeline_runner():
pipeline_names, embedding_sizes, skipped_cases = image_class_pipeline_register()
for (pipeline_name, embedding_size_each) in zip(pipeline_names, embedding_sizes):
invalid_embedding_obj = TestImageEmbeddingInvalid()
for func in dir(TestImageEmbeddingInvalid):
if func in skipped_cases:
continue
if not func.startswith("__"):
print("Testing %s:%s" % (func, pipeline_name))
res = methodcaller(func, pipeline_name)(invalid_embedding_obj)
if res == 1:
print("%s:%s PASS" % (func, pipeline_name))
else:
print("%s:%s FAIL" % (func, pipeline_name))
valid_embedding_obj = TestImageEmbeddingValid()
for func in dir(TestImageEmbeddingValid):
if func in skipped_cases:
continue
if not func.startswith("__"):
print("Testing %s:%s" % (func, pipeline_name))
res = methodcaller(func, pipeline_name, embedding_size_each)(valid_embedding_obj)
if res == 1:
print("%s:%s PASS" % (func, pipeline_name))
else:
print("%s:%s FAIL" % (func, pipeline_name))
test_valid_embedding = TestImageEmbeddingStress()
for func in dir(TestImageEmbeddingStress):
if func in skipped_cases:
continue
if not func.startswith("__"):
print("Testing %s:%s" % (func, pipeline_name))
res = methodcaller(func, pipeline_name, embedding_size_each)(test_valid_embedding)
if res == 1:
print("%s:%s PASS" % (func, pipeline_name))
else:
print("%s:%s FAIL" % (func, pipeline_name))
test_valid_embedding_per = TestImageEmbeddingPerformance()
for func in dir(TestImageEmbeddingPerformance):
if func in skipped_cases:
continue
if not func.startswith("__"):
print("Testing %s:%s" % (func, pipeline_name))
res = methodcaller(func, pipeline_name, embedding_size_each)(test_valid_embedding_per)
if res == 1:
print("%s:%s PASS" % (func, pipeline_name))
else:
print("%s:%s FAIL" % (func, pipeline_name))
return True
def audio_class_pipeline_register():
# skip clmr audio pipeline for memory shortage
# pipeline_names = ["towhee/audio-embedding-clmr", "towhee/audio-embedding-vggish"]
# embedding_sizes = [512, 128]
pipeline_names = ["towhee/audio-embedding-vggish"]
embedding_sizes = [128]
# skip multiple threads tests for memory shortage
skipped_cases = ["test_embedding_concurrent_multi_threads", "test_embedding_more_times", "test_embedding_avg_time"]
return pipeline_names, embedding_sizes, skipped_cases
def audio_class_pipeline_runner():
pipeline_names, embedding_sizes, skipped_cases = audio_class_pipeline_register()
for (pipeline_name, embedding_size_each) in zip(pipeline_names, embedding_sizes):
invalid_embedding_obj = TestAudioEmbeddingInvalid()
for func in dir(TestAudioEmbeddingInvalid):
if func in skipped_cases:
continue
if not func.startswith("__"):
print("Testing %s:%s" % (func, pipeline_name))
res = methodcaller(func, pipeline_name)(invalid_embedding_obj)
if res == 1:
print("%s:%s PASS" % (func, pipeline_name))
else:
print("%s:%s FAIL" % (func, pipeline_name))
valid_embedding_obj = TestAudioEmbeddingValid()
for func in dir(TestAudioEmbeddingValid):
if func in skipped_cases:
continue
if not func.startswith("__"):
print("Testing %s:%s" % (func, pipeline_name))
res = methodcaller(func, pipeline_name, embedding_size_each)(valid_embedding_obj)
if res == 1:
print("%s:%s PASS" % (func, pipeline_name))
else:
print("%s:%s FAIL" % (func, pipeline_name))
test_valid_embedding = TestAudioEmbeddingStress()
for func in dir(TestAudioEmbeddingStress):
if func in skipped_cases:
continue
if not func.startswith("__"):
print("Testing %s:%s" % (func, pipeline_name))
res = methodcaller(func, pipeline_name, embedding_size_each)(test_valid_embedding)
if res == 1:
print("%s:%s PASS" % (func, pipeline_name))
else:
print("%s:%s FAIL" % (func, pipeline_name))
test_valid_embedding_per = TestAudioEmbeddingPerformance()
for func in dir(TestAudioEmbeddingPerformance):
if func in skipped_cases:
continue
if not func.startswith("__"):
print("Testing %s:%s" % (func, pipeline_name))
res = methodcaller(func, pipeline_name, embedding_size_each)(test_valid_embedding_per)
if res == 1:
print("%s:%s PASS" % (func, pipeline_name))
else:
print("%s:%s FAIL" % (func, pipeline_name))
return True
def test_caller():
pipeline_runner()
image_class_pipeline_runner()
# skip audio tests for issue 463
# audio_class_pipeline_runner()
return True
if __name__ == '__main__':
test_caller()
| StarcoderdataPython |
6566295 | <gh_stars>0
# This file is part of gnome-tweak-tool.
# -*- coding: utf-8 -*-
#
# Copyright (c) 2011 <NAME>
#
# gnome-tweak-tool is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gnome-tweak-tool is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gnome-tweak-tool. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from gi.repository import Gtk, Gdk
from gtweak.tweakmodel import Tweak
from gtweak.widgets import ListBoxTweakGroup, Title, build_label_beside_widget
class _TestInfoTweak(Gtk.Box, Tweak):
def __init__(self, name, description, **options):
Gtk.Box.__init__(self, orientation=Gtk.Orientation.HORIZONTAL)
Tweak.__init__(self, name, description, **options)
build_label_beside_widget(
name,
Gtk.Button(options.get("_test_button_name",name)),
info=options.get("_tweak_info"),
warning=options.get("_tweak_warning"),
hbox=self)
class _TestTweak(Gtk.Box, Tweak):
def __init__(self, name, description, **options):
Gtk.Box.__init__(self, orientation=Gtk.Orientation.HORIZONTAL)
Tweak.__init__(self, name, description, **options)
self.add(Gtk.Label("... " + name + " ..."))
class _TestButtonTweak(Gtk.Box, Tweak):
def __init__(self, name, description, **options):
Gtk.Box.__init__(self, orientation=Gtk.Orientation.HORIZONTAL)
Tweak.__init__(self, name, description, **options)
widget = Gtk.Button(name)
widget.connect("clicked", self._on_click)
self.add(widget)
self._need_action = options.get("_need_action")
self._need_logout = options.get("_need_logout")
def _on_click(self, sender):
if self._need_action:
self.notify_information(self.name)
elif self._need_logout:
self.notify_logout()
css_provider = Gtk.CssProvider()
css_provider.load_from_data("""
.list-row.tweak#tweak-test-foo {
background-color: red;
}
.list-row.tweak.title#title-tweak-test {
background-color: blue;
}
.list.tweak-group#group-tweak-test {
background-color: green;
}
""")
screen = Gdk.Screen.get_default()
context = Gtk.StyleContext()
context.add_provider_for_screen(
screen,
css_provider,
1 + Gtk.STYLE_PROVIDER_PRIORITY_USER)
TWEAK_GROUPS = [
ListBoxTweakGroup(
"Test Many Settings",
*[_TestTweak("name: " + str(d), "desc: " + str(d)) for d in range(10)],
uid="group-tweak-test"),
ListBoxTweakGroup(
"Test Settings",
_TestTweak("foo bar", "does foo bar", uid="tweak-test-foo"),
_TestTweak("foo baz", "does foo baz"),
_TestInfoTweak("long string "*10, "long description "*10, _test_button_name="short"),
_TestInfoTweak("foo info", "info widget", _tweak_info="Information"),
_TestInfoTweak("foo warning", "info widget", _tweak_warning="Warning"),
Title("Test Notifications", "", uid="title-tweak-test"),
_TestButtonTweak("Shows Information", "foo bar", _need_action=True),
_TestButtonTweak("Needs Logout", "foo bar log", _need_logout=True)),
ListBoxTweakGroup(
"Unicode Test",
Title("Words", "", uid="title-tweak-test"),
*[_TestTweak( str(d), str(d)) for d in ["Muñoz",
"Español",
"größer",
"jünger",
"grün",
"счастье",
"سعادة"]]),
]
| StarcoderdataPython |
5110361 | <gh_stars>1-10
# Generated by Django 2.0.10 on 2019-02-15 16:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('atriacalendar', '0007_auto_20190210_1240'),
]
operations = [
migrations.CreateModel(
name='AtriaCalendar',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('calendar_name', models.CharField(blank=True, max_length=40)),
('org_owner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='atriacalendar.AtriaOrganization')),
('user_owner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.RemoveField(
model_name='atriaevent',
name='org',
),
migrations.AddField(
model_name='atriaevent',
name='calendar',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='atriacalendar.AtriaCalendar'),
),
]
| StarcoderdataPython |
3264813 | <gh_stars>0
from MachineHypermediaToolkit.server.http.HypermediaHttpServer import HypermediaHTTPServer, HypermediaHTTPRequestHandler
from MachineHypermediaToolkit.resource.HypermediaCollection import HypermediaCollection
import sys
def test(HandlerClass = HypermediaHTTPRequestHandler,
ServerClass = HypermediaHTTPServer, protocol="HTTP/1.0"):
"""Test the HypermediaHTTP request handler class.
This runs an HTTP server on port 8000 (or the first command line
argument).
"""
from functools import partial
if sys.argv[1:]:
port = int(sys.argv[1])
else:
port = 8000
server_address = ('', port)
HandlerClass.protocol_version = protocol
resourceBase = HypermediaCollection()
httpd = ServerClass(server_address, \
partial(HandlerClass, resourceBase.routeRequest))
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
if __name__ == '__main__':
test()
| StarcoderdataPython |
4836617 | <filename>hanzo/warcvalid.py
#!/usr/bin/env python
"""warcvalid - check a warc is ok"""
from __future__ import print_function
import os
import sys
import sys
import os.path
from optparse import OptionParser
from .warctools import WarcRecord, expand_files
parser = OptionParser(usage="%prog [options] warc warc warc")
parser.add_option("-l", "--limit", dest="limit")
parser.add_option("-I", "--input", dest="input_format")
parser.add_option("-L", "--log-level", dest="log_level")
parser.set_defaults(output_directory=None, limit=None, log_level="info")
def main(argv):
(options, input_files) = parser.parse_args(args=argv[1:])
out = sys.stdout
if len(input_files) < 1:
parser.error("no imput warc file(s)")
correct=True
fh=None
try:
for name in expand_files(input_files):
fh = WarcRecord.open_archive(name, gzip="auto")
for (offset, record, errors) in fh.read_records(limit=None):
if errors:
print("warc errors at %s:%d"%(name, offset), file=sys.stderr)
print(errors, file=sys.stderr)
correct=False
break
elif record is not None and record.validate(): # ugh name, returns errorsa
print("warc errors at %s:%d"%(name, offset), file=sys.stderr)
print(record.validate(), file=sys.stderr)
correct=False
break
except Exception as e:
print("Exception: %s"%(str(e)), file=sys.stderr)
correct=False
finally:
if fh: fh.close()
if correct:
return 0
else:
return -1 # failure code
def run():
sys.exit(main(sys.argv))
if __name__ == '__main__':
run()
| StarcoderdataPython |
238790 | <reponame>4con/grpc-win-xp<filename>src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_cases.py<gh_stars>10-100
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for creating tests of implementations of the Face layer."""
# unittest is referenced from specification in this module.
import unittest # pylint: disable=unused-import
# test_interfaces is referenced from specification in this module.
from tests.unit.framework.interfaces.face import _blocking_invocation_inline_service
from tests.unit.framework.interfaces.face import _future_invocation_asynchronous_event_service
from tests.unit.framework.interfaces.face import _invocation
from tests.unit.framework.interfaces.face import test_interfaces # pylint: disable=unused-import
_TEST_CASE_SUPERCLASSES = (
_blocking_invocation_inline_service.TestCase,
_future_invocation_asynchronous_event_service.TestCase,
)
def test_cases(implementation):
"""Creates unittest.TestCase classes for a given Face layer implementation.
Args:
implementation: A test_interfaces.Implementation specifying creation and
destruction of a given Face layer implementation.
Returns:
A sequence of subclasses of unittest.TestCase defining tests of the
specified Face layer implementation.
"""
test_case_classes = []
for invoker_constructor in _invocation.invoker_constructors():
for super_class in _TEST_CASE_SUPERCLASSES:
test_case_classes.append(
type(
invoker_constructor.name() + super_class.NAME,
(super_class,), {
'implementation': implementation,
'invoker_constructor': invoker_constructor,
'__module__': implementation.__module__,
}))
return test_case_classes
| StarcoderdataPython |
9637694 | from moonfire_tokenomics.data_types import Allocation, AllocationRecord, Blockchain, Category, CommonType, Sector, Token
mbox = Token(
name="MBOX",
project="Mobox",
sector=Sector.GAMING,
blockchain=[Blockchain.BSC],
category=[Category.PAYMENT, Category.GOV, Category.DIVIDEND],
capped=True,
allocations=[
Allocation(
month=0,
records=[
AllocationRecord(type="Strategic Partners", common_type=CommonType.INVESTORS, share=0.08),
AllocationRecord(type="Contributors", common_type=CommonType.ECOSYSTEM, share=0.21),
AllocationRecord(type="Team", common_type=CommonType.TEAM, share=0.2),
AllocationRecord(type="Community", common_type=CommonType.ECOSYSTEM, share=0.51),
],
),
],
sources=[
"https://research.binance.com/en/projects/mobox",
],
year=2021,
)
| StarcoderdataPython |
8174835 | <reponame>matteo-rizzo/fc4-pytorch
import math
import torch
from torch import Tensor
from torch.nn.functional import normalize
from classes.core.Loss import Loss
class AngularLoss(Loss):
def __init__(self, device: torch.device):
super().__init__(device)
def _compute(self, pred: Tensor, label: Tensor, safe_v: float = 0.999999) -> Tensor:
dot = torch.clamp(torch.sum(normalize(pred, dim=1) * normalize(label, dim=1), dim=1), -safe_v, safe_v)
angle = torch.acos(dot) * (180 / math.pi)
return torch.mean(angle).to(self._device)
| StarcoderdataPython |
1829520 | """
10-15 一些关于学习正则的建议
"""
# 正则表达式可以帮助我们完成一些字符串内置函数所无法完成的功能,提高工作效率,善用正则表达式解决字符串相关的问题。
# 正则表达式并不是Python独有的,几乎所有主流语言都支持正则表达式。
# Python主要应用在爬虫以及数据处理与分析上。爬虫的核心在于抓取html,利用正则表达式分析html,最终提炼出需要的数据和信息。
# 建议:
# 常用的正则表达式,如电话号码的判断,邮箱的判断,这样常见的校验规则是可以使用别人写好的。
# 搜索常见正则表达式
# 如果用别人写好的正则表达式,可以花一点时间分析一下别人的正则表达式是怎么写的。
# 正则表达式功能强大,但是长时间不用,很多细节都会忘掉。
# 我们知道正则表达式很强大,但我们写代码的时候会有意识回避正则表达式,反而会过度依赖系统内置的字符串操作函数。
# 有意识地多使用正则表达式可以帮助我们学习正则表达式。
| StarcoderdataPython |
12843384 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/Attachment) on 2019-01-22.
# 2019, SMART Health IT.
from . import element
class Attachment(element.Element):
"""
C
o
n
t
e
n
t
i
n
a
f
o
r
m
a
t
d
e
f
i
n
e
d
e
l
s
e
w
h
e
r
e
.
F
o
r
r
e
f
e
r
r
i
n
g
t
o
d
a
t
a
c
o
n
t
e
n
t
d
e
f
i
n
e
d
i
n
o
t
h
e
r
f
o
r
m
a
t
s
.
"""
resource_type = "Attachment"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.contentType = None
"""
M
i
m
e
t
y
p
e
o
f
t
h
e
c
o
n
t
e
n
t
,
w
i
t
h
c
h
a
r
s
e
t
e
t
c
.
.
Type `str`. """
self.creation = None
"""
D
a
t
e
a
t
t
a
c
h
m
e
n
t
w
a
s
f
i
r
s
t
c
r
e
a
t
e
d
.
Type `FHIRDate` (represented as `str` in JSON). """
self.data = None
"""
D
a
t
a
i
n
l
i
n
e
,
b
a
s
e
6
4
e
d
.
Type `str`. """
self.hash = None
"""
H
a
s
h
o
f
t
h
e
d
a
t
a
(
s
h
a
-
1
,
b
a
s
e
6
4
e
d
)
.
Type `str`. """
self.language = None
"""
H
u
m
a
n
l
a
n
g
u
a
g
e
o
f
t
h
e
c
o
n
t
e
n
t
(
B
C
P
-
4
7
)
.
Type `str`. """
self.size = None
"""
N
u
m
b
e
r
o
f
b
y
t
e
s
o
f
c
o
n
t
e
n
t
(
i
f
u
r
l
p
r
o
v
i
d
e
d
)
.
Type `int`. """
self.title = None
"""
L
a
b
e
l
t
o
d
i
s
p
l
a
y
i
n
p
l
a
c
e
o
f
t
h
e
d
a
t
a
.
Type `str`. """
self.url = None
"""
U
r
i
w
h
e
r
e
t
h
e
d
a
t
a
c
a
n
b
e
f
o
u
n
d
.
Type `str`. """
super(Attachment, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Attachment, self).elementProperties()
js.extend([
("contentType", "contentType", str, False, None, False),
("creation", "creation", fhirdate.FHIRDate, False, None, False),
("data", "data", str, False, None, False),
("hash", "hash", str, False, None, False),
("language", "language", str, False, None, False),
("size", "size", int, False, None, False),
("title", "title", str, False, None, False),
("url", "url", str, False, None, False),
])
return js
import sys
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
| StarcoderdataPython |
3402776 | # you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(N):
squrt_of_n = int(N ** 0.5) + 1
min_perm = 0
current_perm = 0
for i in range(1, squrt_of_n):
if N % i == 0:
current_perm = (i + N // i) * 2
if current_perm < min_perm:
min_perm = current_perm
return current_perm
| StarcoderdataPython |
11344064 | from floodsystem.stationdata import build_station_list
from floodsystem.geo import stations_by_distance
def show_c():
#Variables
stations = build_station_list()
show_list_0 = stations_by_distance(stations, (52.2053, 0.1218)) #Calls function that calculates distance
show_list_closest_0 = [] #List of tuples of 10 names and closest distances
closest = [] #10 closest by station, town, distance
#Create list of tuples with 10 closest items
for items in range(0,10):
show_list_closest_0.append(show_list_0[items])
#add in closest town names
for i in show_list_closest_0:
temp = []
for x in stations:
if x.name == i[0]:
temp.append((i[0], x.town, i[1]))
closest.append(temp[0])
return closest
print(show_c())
def show_f():
#Variables
stations = build_station_list()
show_list_0 = stations_by_distance(stations, (52.2053, 0.1218)) #Calls function that calculates distance #10 closest by station, town, distance
show_list_furthest_0 = [] #List of tuples of 10 names and furthest distances
furthest = [] #10 furthest by station, town, distance
#Create list of tuples with 10 furthest items
for objects in range(len(show_list_0)-10, len(show_list_0)):
show_list_furthest_0.append(show_list_0[objects])
#Add in furthest town names
for j in show_list_furthest_0:
temp_0 = []
for x in stations:
if x.name == j[0]:
temp_0.append((j[0], x.town, j[1]))
furthest.append(temp_0[0])
return furthest
print(show_f()) | StarcoderdataPython |
6597978 | <filename>datamart/unit_tests/test_general_materializer.py<gh_stars>1-10
from datamart.materializers.general_materializer import GeneralMaterializer
import unittest
from datamart.utilities.utils import Utils
class TestGeneralMaterializer(unittest.TestCase):
def setUp(self):
self.general_materializer = GeneralMaterializer()
@Utils.test_print
def test_get_csv(self):
mock_metadata = {
"materialization": {
"arguments": {
"url": "http://insight.dev.schoolwires.com/HelpAssets/C2Assets/C2Files/C2ImportFamRelSample.csv",
"file_type": "csv"
}
}
}
result = self.general_materializer.get(metadata=mock_metadata).to_csv(index=False)
expected = "Parent Identifier,Student Identifier\n1001,1002\n1010,1020\n"
self.assertEqual(result, expected)
@Utils.test_print
def test_get_html(self):
mock_metadata = {
"materialization": {
"arguments": {
"url": "https://www.w3schools.com/html/html_tables.asp",
"file_type": "html"
}
}
}
result = self.general_materializer.get(metadata=mock_metadata).to_csv(index=False)
expected = """Company,Contact,Country
<NAME>,<NAME>,Germany
Centro comercial Moctezuma,Francisco Chang,Mexico
<NAME>,<NAME>,Austria
Island Trading,<NAME>,UK
<NAME>,<NAME>,Canada
Magazz<NAME>,<NAME>,Italy
"""
self.assertEqual(result, expected)
| StarcoderdataPython |
3578596 | <filename>tests/test_pdf_reader.py
import unittest
import pytest
from extractor.pdf_reader import PDFReader
@pytest.mark.usefixtures("path_to_anthology")
class TestPDFReader(unittest.TestCase):
def test_read_page(self):
reader = PDFReader(self._path)
text_stack = []
for texts in reader.iterate_page_texts(start_page=27, end_page=50):
text_stack.append(texts)
self.assertEqual(len(text_stack), 50 - 27 + 1)
self.assertTrue("Learning to Understand" in " ".join(text_stack[0]))
self.assertTrue("DeFormer" in " ".join(text_stack[-1]))
def test_read_schedule(self):
reader = PDFReader(self._path)
count = 1
for texts in reader.iterate_page_texts(start_page=74):
if count == 1:
self.assertGreater(len(texts), 0)
self.assertTrue("Monday, July 6" in " ".join(texts))
elif count == 272:
self.assertGreater(len(texts), 0)
self.assertTrue("Demo Session 5C" in " ".join(texts))
count += 1
self.assertEqual(count - 1, 272 - 74 + 1)
| StarcoderdataPython |
197991 | <reponame>ezequieljsosa/sndg-bio
#!/usr/bin/python
import sys
import os
import getopt
import time
#import mysql.connector
from random import randint
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from commands import getoutput
def help():
print "Prediccion de rRNAs y tRNAs en una secuencia de nucleotidos.\n\
Opciones:\n\
-i Archivo de contigs o scaffolds. Default: contigs.fasta\n\
-t Tipo de organismo (B=bacteria, A=archaea). Default: B\n\
-h Imprime este mensaje de ayuda\n"
try:
options, args = getopt.getopt(sys.argv[1:], "i:t:h")
except getopt.GetoptError as err:
print str(err)
sys.exit(2)
#Parametros configurables:
params = {}
params["i"] = "contigs.fasta" #Archivo de contigs de entrada por defecto
params["t"] = "B" #Tipo de organismo (bacteria o archaea)
#Asigno los parametros que hayan sido definidos externamente
for option, value in options:
if option.startswith("-"): option = option[1:]
if option in params.keys(): params[option] = value
if option == "h":
help()
sys.exit()
log = params["i"] + ".log"
if os.path.isfile(log):
print "Ya hay un pipeline en proceso. Comenzar nuevamente (s/n)?"
seguir = raw_input().strip()
if seguir == 's':
pass
else:
sys.exit()
now = time.strftime("%c")
log_step = open(log, "a")
print >> log_step, "[ %s ] Comenzando Pipeline desde Paso 1. Anotacion completa.\n" % now
#Chequeo la existencia del archivo fasta
if getoutput("ls '%s' " % params["i"]) != params["i"]:
print ("Error: No se encuentra el archivo %s \nUse el parametro -h para obtener ayuda sobre el uso del script" % params["i"])
sys.exit(2)
if params["t"] != "B" and params["t"] != "A":
print ("Error: Debe ingresar un tipo correcto de organismo (A= Archaea; B= Bacteria)\nUse el parametro -h para obtener ayuda sobre el uso del script ")
sys.exit(2)
if params["t"] == "B":
tipornammer= "bac"
print "Prediccion de RNAs para Bacteria"
if params["t"] == "A":
tipornammer= "arc"
print "Prediccion de RNAs para Archaea"
getoutput("rm " + "-r tmpbia/")
f_contigs = open(params["i"])
#Ejecucion de las predicciones
for seq_record in SeqIO.parse(f_contigs, "fasta"):
print " " + seq_record.id + ".fna"
getoutput("mkdir tmpbia")
getoutput("mkdir '%s' " % (seq_record.id))
fasta_file = open("%s/%s.fna" % (seq_record.id, seq_record.id), "w")
SeqIO.write(seq_record, fasta_file, "fasta")
fasta_file.close()
#Deteccion de rRNAs (arc o bac)
getoutput("rnammer -S %s -m lsu,ssu,tsu -g 'tmpbia/rRNA_%s.gff' -f '%s/rRNA_%s.fna' ' %s/%s.fna' " % (tipornammer, seq_record.id, seq_record.id, seq_record.id, seq_record.id, seq_record.id) )
#Deteccion de tRNAs (A (archaeal) o B (bacterial))
getoutput("tRNAscan-SE -%s -o 'tmpbia/tRNA_%s.report1' -f 'tmpbia/tRNA_%s.report2' '%s/%s.fna' " % (params["t"], seq_record.id, seq_record.id, seq_record.id, seq_record.id) )
#Obtencion de archivo fasta de la salida de tRNAscan
tRNAreport1 = open("tmpbia/tRNA_%s.report1" % (seq_record.id) )
tRNAs = list(tRNAreport1)
tRNAreport1.close()
getoutput("grep Seq 'tmpbia/tRNA_%s.report2' > 'tmpbia/tRNA_seqs_%s.report2'" % (seq_record.id, seq_record.id) )
tRNAreport2 = open("tmpbia/tRNA_seqs_%s.report2" % seq_record.id )
tRNAseqs = list(tRNAreport2)
tRNAreport2.close()
tRNAfasta = open("%s/tRNA_%s.fna" % (seq_record.id, seq_record.id), "w")
i=0
for tRNA in tRNAs[3:]:
id, num, beg, end, type, codon, intronb, introne, score = tRNA.strip().split("\t")[0:9]
trnaseq_record = SeqRecord(Seq(tRNAseqs[i][5:-1]), id="tRNA_%s_%s-%s" % (id.strip(), beg.strip(), end.strip() ), description="/molecule=tRNA-%s /score=%s /anticodon=%s" % (type, score, codon))
i+=1
SeqIO.write(trnaseq_record, tRNAfasta, "fasta")
tRNAfasta.close()
# # Enmascarado de genes de RNA en el genoma
# contigs = open(params["i"])
# seq_iterator = SeqIO.parse(contigs, "fasta")
# rRNAreport = open("rRNA_%s.gff" % (file_prefix) )
# rRNAs = list(rRNAreport)
# rRNAreport.close()
# mask_contigs = open("mask_%s.fna" % input_name, "w")
# # mask_contigs = open("mask_%s.coords" % input_name, "w")
# for record in seq_iterator:
# seq = list(record.seq)
# #Enmascarado de rRNA
# for rRNA in rRNAs[6:-1]:
# seqname, source, feature, beg, end= rRNA.strip().split("\t")[:5]
# if seqname == record.id:
# beg, end = int(beg)+50, int(end)-50
# seq[beg-1:end] = ["N"]*(end-beg+1)
# #Enmascarado de tRNA
# for tRNA in tRNAs[3:]:
# id, num, beg, end= tRNA.strip().split("\t")[:4]
# if id.strip() == record.id:
# beg, end = int(beg), int(end)
# if beg > end:
# beg, end = end, beg
# beg, end = beg+50, end-50
# seq[beg-1:end] = ["N"]*(end-beg+1)
# record.seq = Seq("".join(seq))
# SeqIO.write(record, mask_contigs, "fasta")
# contigs.close()
# mask_contigs.close()
#Ordenado de archivos de rRNAs y tRNAs por posicion en el genoma.
rRNAs = open("%s/rRNA_%s.fna" % (seq_record.id, seq_record.id) )
rRNA_list=list(SeqIO.parse(rRNAs,"fasta"))
rRNAs.close()
rRNA_list.sort(cmp=lambda x,y: cmp(int(x.id.strip().split("_")[-2].split("-")[0]), int(y.id.strip().split("_")[-2].split("-")[0])))
ordered_rRNAs = open("%s/rRNA_%s.fna" % (seq_record.id, seq_record.id), "w")
SeqIO.write(rRNA_list, ordered_rRNAs, "fasta")
ordered_rRNAs.close()
tRNAs = open("%s/tRNA_%s.fna" % (seq_record.id, seq_record.id) )
tRNA_list=list(SeqIO.parse(tRNAs,"fasta"))
tRNAs.close()
tRNA_list.sort(cmp=lambda x,y: cmp(int(x.id.strip().split("_")[-1].split("-")[0]), int(y.id.strip().split("_")[-1].split("-")[0])))
ordered_tRNAs = open("%s/tRNA_%s.fna" % (seq_record.id, seq_record.id), "w")
SeqIO.write(tRNA_list, ordered_tRNAs, "fasta")
ordered_tRNAs.close()
##Almacenado en DB MySql
#conexion = mysql.connector.connect(user='gburguener', database='BIAGenome')
#cursor = conexion.cursor()
#for rRNA in rRNA_list:
# #Insertar nuevo RNA
# add_RNA = ("INSERT INTO RNA " "(idRNA, Tipo, Start, Stop, SecuenciaDNA, idContig) " "VALUES (%s, %s, %s, %s, %s, %s)")
# datos_RNA = ('Geert', 'Vanderkelen', tomorrow, 'M', date(1977, 6, 14))
# cursor.execute(add_RNA, datos_RNA)
# print rRNA
#conexion.commit()
#cursor.close()
#conexion.close()
f_contigs.close()
getoutput("rm "+ "-r tmpbia/")
now = time.strftime("%c")
print >> log_step, "[ %s ] Paso 1 RNApredictor corrido sobre archivo %s para un organismo de tipo %s\n" % (now, params["i"], params["t"])
log_step.close()
| StarcoderdataPython |
1989757 | <reponame>kenserr/cs165<gh_stars>0
from django.contrib import admin
from .models import passport_stat
admin.site.register(passport_stat)
# Register your models here.
| StarcoderdataPython |
3565529 | """
pycmark.cli
~~~~~~~~~~~
command line tools for pycmark.
:copyright: Copyright 2017-2019 by <NAME>
:license: Apache License 2.0, see LICENSE for details.
"""
from docutils.core import publish_cmdline
from pycmark import CommonMarkParser
def md2html() -> None:
publish_cmdline(parser=CommonMarkParser(), writer_name='html5')
| StarcoderdataPython |
5094008 | from sketcher import *
import random
canvas = Canvas(600, 600)
noise_seeds = [random.randint(0,999) for i in range(0,3)]
color = [random.randint(150,250) for i in range(3)]
warp = Ink('simplex_gradient',color=color, c_var=70, noise_seeds=noise_seeds, noise_scale=0.001)
canvas.fill(warp)
img = shuffle_image(canvas.img, 10, 10)
img.show()
img.save('cool{}.png'.format(random.randint(1,99999)))
| StarcoderdataPython |
185838 | # Copyright 2021 Nokia
# Licensed under the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
from typing import Optional
from common.files import create_temp_dir
from common.logger import Logger
class Policy(object):
def __init__(self, namespace: Optional[str], policy: str):
self.namespace = namespace
self.policy = policy
def create_policies_dir(args) -> str:
policies_dir = create_temp_dir(not args.no_cleanup)
Logger.get_instance().debug('Created policies directory: ' + policies_dir)
return policies_dir | StarcoderdataPython |
11247472 | import ast
from niveristand import _errormessages
from niveristand.clientapi import realtimesequencedefinition
from niveristand.errors import TranslateError
def custom_stop_task(node, resources):
_validate_restrictions(node)
realtimesequencedefinition.add_stop_task(resources.get_current_block(), node.args[0].id)
def _validate_restrictions(node):
if not isinstance(node.args[0], ast.Name):
raise TranslateError(_errormessages.invalid_taskname_for_stop_task)
| StarcoderdataPython |
3404479 | from persistence.repositories.course_rating_repository_postgres import CourseRatingRepositoryPostgres
from exceptions.http_error import NotFoundError
crrp = CourseRatingRepositoryPostgres()
def delete_course_rating(db, course_id, rating_id):
rating = crrp.get_course_rating(db, course_id, rating_id)
if not rating:
raise NotFoundError("Rating {} in Course {}".format(rating_id, course_id))
crrp.delete_course_rating(db, rating)
| StarcoderdataPython |
6426821 | <reponame>avroshk/7100_Spring_16<gh_stars>1-10
from __future__ import division
import os, random, wave
import scipy.io.wavfile as wavfile
import numpy as np
from random import randint
numSpeakers = 4 ## In each file
numRepetitions = 2
numOutput = 200 ## to be generated
set = "F"
outputFileName = "set"+set+"_"+"S"+str(numSpeakers)
#For future when using a podcast
#minLengthOfSpeech = 2; #seconds
#minLengthOfSpeech = 10; #seconds
root = "/Users/avrosh/Documents/Coursework/7100_Spring_16/Dataset/data"
outputRoot = "/Users/avrosh/Documents/Coursework/7100_Spring_16/Dataset/dataset"
txtFile = open(outputRoot+"/"+"annotation"+outputFileName+".txt", "w")
#####--- Annotation format---####
## fileid,speaker1_ID,start_timestamp,speaker2_ID,start_timestamp,..... ##
##For example, ## 1,13,0,20,18.2,18,29.7,17,43.1
## for a set of 4 speakers
## for file ID 1
## speaker ID 13 starts speaking at 0 sec
## speaker ID 20 starts speaking at 18.2 sec
## speaker ID 18 starts speaking at 29.7 sec
## speaker ID 17 starts speaking at 43.1 sec
selected_folders = []
folders = []
files = []
list = []
timestamp = 0
#Collect all folder names (corresponding to speakers)
for item in os.listdir(root):
if not item.startswith('.'):
folders.append(item)
for n in range(1,numOutput+1):
print outputFileName+"_"+str(n)
txtFile.write("{0}".format(n))
#Randomly select speakers
selected_folders = random.sample(folders,numSpeakers)
print selected_folders
if numRepetitions > 0:
num_speakers_to_be_repeated = randint(0,numSpeakers)
selected_folders_to_be_repeated = random.sample(selected_folders,num_speakers_to_be_repeated)
for folder in selected_folders_to_be_repeated:
num_repeats = randint(0,numRepetitions-1)
for i in range(0,num_repeats+1):
selected_folders.append(folder)
random.shuffle(selected_folders)
#Iterate through selected speakers
for folder in selected_folders:
#Collect all samples spoken by a speaker
for item in os.listdir(root+"/"+folder):
if not item.startswith('.') and os.path.isfile(os.path.join(root+"/"+folder, item)):
files.append(item)
#Select a random speech
file = random.choice(files)
print root+"/"+folder+"/"+file
#Annotate the speaker ID
txtFile.write(",{0}".format(folder))
#read the speech in the file
rate,data=wavfile.read(root+"/"+folder+"/"+file)
#downsample to 16 kHz
#Annotate the timestamp
txtFile.write(",{0}".format(timestamp))
##print len(data),rate, len(data)/rate, timestamp
#calculate timestamp - when next speaker starts speaking
timestamp = timestamp + len(data)/rate
#add it to the list of speakers
list.append(data)
#empty the list of samples before moving onto next iteration
files = []
#write all samples by the speakers into one concatenated file
wavfile.write(outputRoot+"/"+outputFileName+"_"+str(n)+".wav",rate,np.concatenate(list,axis=0))
timestamp = 0;
list = []
selected_folders = []
txtFile.write("\n")
txtFile.close()
| StarcoderdataPython |
122630 | <reponame>hasithadkr7/udp_150
#!/usr/bin/python3
import datetime, re
def getUTCOffset(utcOffset, default=False):
"""
Get timedelta instance of given UTC offset string.
E.g. Given UTC offset string '+05:30' will return
datetime.timedelta(hours=5, minutes=30))
:param string utcOffset: UTC offset in format of [+/1][HH]:[MM]
:param boolean default: If True then return 00:00 time offset on invalid format.
Otherwise return False on invalid format.
"""
offset_pattern = re.compile("[+-]\d\d:\d\d")
match = offset_pattern.match(utcOffset)
if match:
utcOffset = match.group()
else:
if default:
print("UTC_OFFSET :", utcOffset, " not in correct format. Using +00:00")
return datetime.timedelta()
else:
return False
if utcOffset[0] == "-": # If timestamp in negtive zone, add it to current time
offset_str = utcOffset[1:].split(':')
return datetime.timedelta(hours=int(offset_str[0]), minutes=int(offset_str[1]))
if utcOffset[0] == "+": # If timestamp in positive zone, deduct it to current time
offset_str = utcOffset[1:].split(':')
return datetime.timedelta(hours=-1 * int(offset_str[0]), minutes=-1 * int(offset_str[1]))
| StarcoderdataPython |
186935 | # Front matter
##############
import os
from os import fdopen, remove
from tempfile import mkstemp
from shutil import move
import glob
import re
import time
import pandas as pd
import numpy as np
from scipy import constants
from scipy.optimize import curve_fit, fsolve
from scipy.interpolate import interp1d
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from matplotlib import gridspec
from scipy.interpolate import spline
import math
import seaborn as sns
matplotlib.rc('xtick', labelsize=16)
matplotlib.rc('ytick', labelsize=16)
rc = {'lines.linewidth': 1,
'axes.labelsize': 20,
'axes.titlesize': 20,
'legend.fontsize': 26,
'xtick.direction': u'in',
'ytick.direction': u'in'}
sns.set_style('ticks', rc=rc)
start_time = time.time()
# Input information
######################
# Path to get volumes from hcp-Fe Murphy dataset
EOSpath = '../../081_vD_Fe_PowerLaw/Results/input_values.csv'
# Path to get re-analyzed hcp-Fe Murphy PDOS
phoxpath = '../../050_phox_Fe_man/'
# Fei Gruneisen parameter variables
gamma0 = 1.74
q = 0.78
# From Dewaele 2006, agrees with Fei 2016 to third decimal place.
V0 = 22.428
dV0 = 0.098
# Verification:
rho0 = 8.2695 # g/cc, Fei 2016 density
M = 55.845 # g/mol, for natural Fe
V0_ccpermol = M/rho0 # cm^3/mol
V0_check = V0_ccpermol*(2*10**24)/constants.N_A # A^3
print(V0_check)
# Functions
###########
def calcScalingParam(V,Vi,V0,gamma0,q):
xi = np.exp( gamma0*(Vi/V0)**q * (1/q) * ((V/Vi)**q-1) )
return xi
def scaleDOS(ref_folder,xi,dos_dict):
dos_ref_df = dos_dict[ref_folder]
# Interpolate the reference PDOS
fdos = interp1d(dos_ref_df['E'], dos_ref_df['DOS'], kind='cubic')
E_ref_min = min(dos_ref_df['E'])
E_ref_max = max(dos_ref_df['E'])
# Scale PDOS using xi
# If xi > 1, we need to limit the energy range so we don't call the interpolated
# reference function out of range
E_min = max(E_ref_min/xi,min(dos_df['E']))
E_max = min(E_ref_max/xi,max(dos_df['E']))
dos_crop_df = dos_df[dos_df['E']>(E_min)]
dos_crop_df = dos_crop_df[dos_crop_df['E']<(E_max)]
dos_scaled = xi*fdos(xi*dos_crop_df['E'])
# Save scaled PDOS
dos_scaled_df = dos_crop_df[['E']].copy()
dos_scaled_df['DOS'] = dos_scaled
# dos_scaled_df.to_csv(ref_folder+'/scaledDOSdata/scaled2_'+folder+'.csv',index=False)
return(dos_scaled_df)
def DOSsubplot(folder,ref_folder,number,ax):
offset = 100
dos_df = dos_dict[folder]
# ax.plot(dos_df['E'], dos_df['DOS']+offset*number,color='#1f77b4')
# ax.fill_between(dos_df['E'], dos_df['DOS']-dos_df['dDOS']+offset*number,
# dos_df['DOS']+dos_df['dDOS']+offset*number, facecolor='#1f77b4', alpha=0.3)
ax.errorbar(dos_df['E'], dos_df['DOS']+offset*number,yerr=dos_df['dDOS'],
marker='.',markersize=1.5,color='black',ecolor='darkgray',elinewidth=0.5,
linestyle='none',zorder=-5)
if folder != ref_folder:
dos_scaled_df = dos_scaled_dict[folder]
ax.plot(dos_scaled_df['E'], dos_scaled_df['DOS']+offset*number,color='red')
def plotScaledPDOS(ref_folder,folder_list,dos_dict,dos_scaled_dict):
fig, (ax) = plt.subplots(nrows = 1, ncols=1, figsize=(4,10))
offsetnum = 0
for folder in folder_list:
DOSsubplot(folder,ref_folder,offsetnum,ax)
offsetnum = offsetnum + 1
ax.set_xlabel(r'Energy (meV)',fontsize = 16)
ax.set_ylabel(r'PDOS $D(E,V)$',fontsize = 16)
ax.set_xlim([0,85])
ax.set_ylim(ymin=-10,ymax=1050)
ax.xaxis.set_ticks([0,20,40,60,80])
ax.tick_params(direction='in',left='off',top='on')
ax.set_yticklabels([])
fig.savefig(ref_folder+'/Fei_scaledPDOS_Fe_narrow.pdf', format='pdf', bbox_inches='tight')
plt.close()
# Import data
#############
input_df = pd.read_csv(EOSpath, engine='python')
# # Only use hcp data (We measured the bcc phase, not Caitlin)
# input_df = input_df[input_df['Phase']=='hcp']
# # Data was out of order as imported. To fix that:
# input_df = input_df.sort_values('P')
# Load PDOS data
# Find the filepath of all .res NRIXS files in phox directories
respath_list = [filepath for filepath in glob.glob(phoxpath+'*/*.res')]
# Prep lists, dictionaries, and df to store input data in
folder_list = []
index_dict = dict()
dos_dict = dict()
# Collect folders, indices, paths, and input values
for respath in respath_list:
# Determine filepaths for dos and in_psvl
folder = re.findall('([A-Za-z0-9_]+)/[A-Za-z0-9_]+.res',respath)[0]
index = re.findall('/([A-Za-z0-9_]+).res',respath)[0]
dospath = phoxpath+folder+'/Output/'+index+'_dos.dat'
# Check if each folder is hcp. Don't use it otherwise
phase = input_df[input_df['Folder']==folder].iloc[-1]['Phase']
if phase == 'hcp':
# Import PDOS
dos_df = pd.read_csv(dospath, sep='\s+', comment='@', header=None,
names = ['E','DOS','dDOS'])
# Store to use PDOS later
folder_list.append(folder)
index_dict[folder] = index
dos_dict[folder] = dos_df
# Sort folder_list by pressure (needed to plot in correct order)
sort_folder_df = pd.DataFrame(columns = ['Folder','P'])
for folder in folder_list:
P = input_df[input_df['Folder']==folder].iloc[-1]['P']
sort_folder_df = sort_folder_df.append(
pd.DataFrame([[folder,P]],columns=sort_folder_df.columns))
sort_folder_df = sort_folder_df.sort_values('P')
folder_list = sort_folder_df['Folder'].values
# Plot scaled PDOS
##################
# Create a dataframe to store results in
results_df = pd.DataFrame(columns = ['Ref Folder','Ref Index','Vi','dVi',
'Folder','Index','V','dV','V/Vi','xi'])
# for ref_folder in ['2009Oct_30GPa']:
for ref_folder in folder_list:
print('Reference PDOS: '+ref_folder)
# Check if a folder for the reference PDOS exists, and make one if not
if not os.path.exists(ref_folder):
os.makedirs(ref_folder)
dos_ref_df = dos_dict[ref_folder]
# What is the reference volume?
Vi = input_df[input_df['Folder']==ref_folder].iloc[-1]['V']
dVi = input_df[input_df['Folder']==ref_folder].iloc[-1]['dV']
dos_scaled_dict = dict()
# for folder in ['2011Feb_171GPa']:
for folder in folder_list:
print('\tScaling to '+folder)
# What is the volume?
V = input_df[input_df['Folder']==folder].iloc[-1]['V']
dV = input_df[input_df['Folder']==folder].iloc[-1]['dV']
V_Vi = V/Vi
xi = calcScalingParam(V,Vi,V0,gamma0,q)
dos_scaled_dict[folder] = scaleDOS(ref_folder,xi,dos_dict)
results_df = results_df.append(pd.DataFrame([[
ref_folder,index_dict[ref_folder],Vi,dVi,
folder,index_dict[folder],V,dV,V_Vi,xi]],columns = results_df.columns))
# Create plot of ref PDOS scaled to all other PDOS
plotScaledPDOS(ref_folder,folder_list,dos_dict,dos_scaled_dict)
# At this point in the nested loops, save results in case code crashes
# Will be overwritten on each loop with updated results
results_df = results_df.round({'V/Vi':3,'xi':4,'dxi':4})
results_df.to_csv('Results/Fei_scalingparameters.csv',index=False)
| StarcoderdataPython |
3580656 | <gh_stars>1-10
from typing import Optional, Tuple
import torch
import torch.nn.functional as F
import torch_geometric.utils
from torch import Tensor, nn
from torch_geometric.nn.meta import MetaLayer
from torch_scatter import scatter_add, scatter_mean
from src.models.node_edge_blocks import *
class GraphNetworkBlock(MetaLayer):
# Extension of torch_geometric.nn.meta.MetaLayer to support propagation of hidden states for
# RNNs
def __init__(self, edge_model=None, node_model=None, global_model=None):
super(MetaLayer, self).__init__()
self.edge_model = edge_model
self.node_model = node_model
self.global_model = global_model
self.reset_parameters()
def reset_parameters(self):
for item in [self.node_model, self.edge_model, self.global_model]:
if hasattr(item, "reset_parameters"):
item.reset_parameters()
def forward(
self,
x: Tensor,
edge_index: Tensor,
edge_attr: Optional[Tensor] = None,
u: Optional[Tensor] = None,
batch: Optional[Tensor] = None,
hidden=None,
):
row = edge_index[0]
col = edge_index[1]
if self.edge_model is not None:
btch = batch if batch is None else batch[row]
# if hidden is None:
edge_attr = self.edge_model(
src=x[row],
dest=x[col],
edge_attr=edge_attr,
u=u,
batch=btch,
edge_index=edge_index,
)
if self.node_model is not None:
if hidden is None:
x = self.node_model(x, edge_index, edge_attr, u, batch)
else:
x, hidden = self.node_model(
x, edge_index, edge_attr, u, batch, hidden=hidden
)
if self.global_model is not None:
u = self.global_model(x, edge_index, edge_attr, u, batch)
if hidden is None:
return x, edge_attr, u
else:
return x, edge_attr, u, hidden
def __repr__(self):
return (
"{}(\n"
" edge_model={},\n"
" node_model={},\n"
" global_model={}\n"
")"
).format(
self.__class__.__name__, self.edge_model, self.node_model, self.global_model
)
class NodeNN(nn.Module):
# Node-wise MLP with skip connection.
def __init__(
self,
hidden_size: int = 64,
node_features: int = 5,
n_nodes: int = 10,
dropout: float = 0.0,
out_features: int = 7,
):
super(NodeNN, self).__init__()
self.hidden_size = hidden_size
self.node_features = node_features
self.n_nodes = n_nodes
self.dropout = dropout
self.out_features = out_features
self.mlp_1 = nn.Sequential(
nn.Linear(in_features=node_features, out_features=hidden_size),
nn.Dropout(p=dropout),
nn.ReLU(),
nn.Linear(in_features=hidden_size, out_features=hidden_size),
)
self.mlp_2 = nn.Sequential(
nn.Linear(
in_features=node_features + hidden_size,
out_features=hidden_size,
),
nn.Dropout(p=dropout),
nn.ReLU(),
nn.Linear(in_features=hidden_size, out_features=self.out_features),
)
def forward(self, x, edge_index=None, edge_attr=None, batch=None, u=None):
# Forward pass
x_latent = self.mlp_1(x)
# Skip connection
x_latent = torch.cat([x, x_latent], dim=1)
# Second pass
out = self.mlp_2(x_latent)
return out
class NodeRNN(nn.Module):
# Node-wise recurrent MLP with skip connection.
def __init__(
self,
hidden_size: int = 64,
node_features: int = 5,
dropout: float = 0.0,
rnn_size: int = 20,
num_layers: int = 1,
rnn_type: str = "LSTM",
out_features: int = 4,
):
super(NodeRNN, self).__init__()
self.hidden_size = hidden_size
self.node_features = node_features
self.rnn_size = rnn_size
self.dropout = dropout
self.out_features = out_features
self.num_layers = num_layers
self.rnn_edge_size = 0
# Node history encoder.
# Computes a node-wise representation which incorporates the nodes' respective histories.
self.node_history_encoder = node_rnn_simple(
node_features=node_features,
edge_features=0,
rnn_size=rnn_size,
dropout=dropout,
num_layers=num_layers,
rnn_type=rnn_type,
)
# MLP
self.mlp = nn.Sequential(
nn.Linear(in_features=rnn_size + node_features, out_features=hidden_size),
nn.Dropout(p=dropout),
nn.ReLU(),
nn.Linear(in_features=hidden_size, out_features=out_features),
)
def forward(
self, x, edge_index=None, edge_attr=None, batch=None, u=None, hidden=None
):
# Discard hidden edge component since only node history is being encoded
hidden_node = hidden[0]
# Forward pass
node_history, hidden_node = self.node_history_encoder(x=x, hidden=hidden_node)
# Skip connection
out = torch.cat([x, node_history], dim=1)
# Second pass
out = self.mlp(out)
return out, (hidden_node, hidden[1])
class AttentionalGNN(nn.Module):
def __init__(
self,
hidden_size: int = 64,
node_features: int = 5,
dropout: float = 0.0,
heads: int = 4,
middle_gat: bool = False,
out_features: int = 4,
edge_features: int = 1,
norm: bool = False,
):
super(AttentionalGNN, self).__init__()
self.hidden_size = hidden_size
self.node_features = node_features
self.dropout = dropout
self.middle_gat = middle_gat
self.GN1 = GraphNetworkBlock(
node_model=node_gat_in(
node_features=node_features,
dropout=dropout,
heads=heads,
out_features=hidden_size,
edge_features=edge_features,
norm=norm,
),
)
if middle_gat:
self.GN2 = GraphNetworkBlock(
node_model=node_gat_in(
node_features=hidden_size * heads,
dropout=dropout,
heads=heads,
out_features=hidden_size,
edge_features=edge_features,
norm=norm,
),
)
# Apply skip connection across this layer. Compute new input size
hidden_size = hidden_size * 2
self.GN3 = GraphNetworkBlock(
node_model=node_gat_out(
node_features=hidden_size * heads,
dropout=dropout,
heads=heads,
out_features=out_features,
edge_features=edge_features,
),
)
def forward(self, x, edge_index, edge_attr, batch=None, u=None):
# Input GAT
out, _, _ = self.GN1(
x=x, edge_index=edge_index, edge_attr=edge_attr, u=u, batch=batch
)
if self.middle_gat:
out_middle, _, _ = self.GN2(
x=out, edge_index=edge_index, edge_attr=edge_attr, u=u, batch=batch
)
# Skip connection
out = torch.cat([out, out_middle], dim=-1)
out, _, _ = self.GN3(
x=out, edge_index=edge_index, edge_attr=edge_attr, u=u, batch=batch
)
return out
class ConvolutionalGNN(nn.Module):
def __init__(
self,
hidden_size: int = 64,
node_features: int = 5,
dropout: float = 0.0,
skip: bool = False,
out_features: int = 4,
edge_features: int = 1,
):
super(ConvolutionalGNN, self).__init__()
self.hidden_size = hidden_size
self.node_features = node_features
self.dropout = dropout
self.GN1 = GraphNetworkBlock(
node_model=node_gcn(
node_features=node_features,
dropout=dropout,
skip=skip,
out_features=out_features,
edge_features=edge_features,
hidden_size=hidden_size,
),
)
def forward(self, x, edge_index, edge_attr, batch=None, u=None):
# Input GAT
out, _, _ = self.GN1(
x=x, edge_index=edge_index, edge_attr=edge_attr, u=u, batch=batch
)
return out
class MPGNN(nn.Module):
# Implementation of the 'forward GN model' from <https://arxiv.org/abs/1806.01242> by Battaglia et al.
def __init__(
self,
hidden_size: int = 64,
node_features: int = 5,
dropout: float = 0.0,
edge_features: int = 0,
latent_edge_features: int = 0,
skip: bool = True,
aggregate: bool = False,
out_features: int = 4,
):
super(MPGNN, self).__init__()
self.hidden_size = hidden_size
self.node_features = node_features
self.dropout = dropout
self.edge_features = edge_features
self.latent_edge_features = latent_edge_features
# Whether to perform a graph-wise node feature aggregation after the fist GN block
self.aggregate = aggregate
# GN block with node and edge update functions
self.GN1 = GraphNetworkBlock(
edge_model=edge_mlp_1(
node_features=node_features,
edge_features=edge_features,
hidden_size=hidden_size,
dropout=dropout,
latent_edge_features=latent_edge_features,
),
node_model=node_mlp_1(
hidden_size=hidden_size,
node_features=node_features,
dropout=dropout,
edge_features=latent_edge_features,
),
)
# Determine dimensions of second GN block
GN2_node_input = node_features + hidden_size if skip else hidden_size
GN2_edge_input = (
edge_features + latent_edge_features if skip else latent_edge_features
)
self.GN2 = GraphNetworkBlock(
edge_model=edge_mlp_1(
node_features=GN2_node_input,
edge_features=GN2_edge_input,
hidden_size=hidden_size,
dropout=dropout,
latent_edge_features=latent_edge_features,
),
node_model=node_mlp_out(
hidden_size=hidden_size,
node_features=GN2_node_input,
dropout=dropout,
edge_features=latent_edge_features,
out_features=out_features,
),
)
def forward(self, x, edge_index, edge_attr, batch=None, u=None):
# First block
x_1, edge_attr_1, _ = self.GN1(
x=x, edge_index=edge_index, edge_attr=edge_attr, u=u, batch=batch
)
# Concatenation of node and edge attributes
if self.aggregate:
x_1 = scatter_add(x_1, batch, dim=0)
x_1 = torch.cat([x, x_1[batch]], dim=1)
else:
x_1 = torch.cat([x, x_1], dim=1)
edge_attr_1 = torch.cat([edge_attr, edge_attr_1], dim=1)
# Second block
out, _, _ = self.GN2(
x=x_1, edge_index=edge_index, edge_attr=edge_attr_1, u=u, batch=batch
)
return out
class RMPGNN(nn.Module):
# Recurrent message-passing GNN
def __init__(
self,
hidden_size: int = 64,
dropout: float = 0.0,
node_features: int = 5,
edge_features: int = 0,
num_layers: int = 1,
rnn_size: int = 20,
rnn_edge_size: int = 8,
out_features: int = 4,
rnn_type: str = "LSTM",
latent_edge_features: int = 32,
):
super(RMPGNN, self).__init__()
self.num_layers = num_layers
self.rnn_size = rnn_size
self.rnn_edge_size = rnn_edge_size
self.dropout = dropout
# Node history encoder.
# Computes a node-wise representation which incorporates the nodes' respective histories.
self.node_history_encoder = node_rnn_simple(
node_features=node_features,
edge_features=0,
rnn_size=rnn_size,
dropout=dropout,
num_layers=num_layers,
rnn_type=rnn_type,
)
# GN-block to compute messages/interactions between nodes
self.message_gn = GraphNetworkBlock(
edge_model=edge_mlp_1(
node_features=node_features,
edge_features=edge_features,
hidden_size=hidden_size,
dropout=dropout,
latent_edge_features=latent_edge_features,
)
)
# Interaction history encoder.
# Computes a node-wise history which incorporates influences from neighbouring nodes using messages.
self.edge_history_encoder = edge_rnn_1(
edge_features=latent_edge_features,
rnn_size=rnn_edge_size,
dropout=dropout,
num_layers=num_layers,
rnn_type=rnn_type,
)
# Output GN
self.node_output = node_mlp_out_global(
hidden_size=hidden_size,
node_features=rnn_size + rnn_edge_size,
dropout=dropout,
out_features=out_features,
)
def forward(self, x, edge_index, edge_attr, u, hidden: tuple, batch=None):
# x: [n_nodes, node_features]
# edge_index: [2, n_edges]
# edge_attr : [n_edges, edge_features]
# u: [n_nodes, local_map_resolution]
# Unpack hidden states
h_node, h_edge = hidden
# Encode node histories. Shape [n_nodes, rnn_size]
node_history, h_node = self.node_history_encoder(
x=x, edge_index=None, edge_attr=None, u=None, batch=None, hidden=h_node
)
# Compute messages. Shape [n_edges, latent_edge_features]
_, messages, _ = self.message_gn(
x=x, edge_index=edge_index, edge_attr=edge_attr, u=None, batch=None
)
# Aggregate and encode edge histories. Shape [n_nodes, rnn_edge_size]
node_interaction_history, h_edge = self.edge_history_encoder(
edge_attr=messages, hidden=h_edge, edge_index=edge_index, x_size=x.size(0)
)
# Concatenate. Shape [n_nodes, rnn_size+rnn_edge_size]
full_node_representation = torch.cat(
[node_history, node_interaction_history], dim=-1
)
# Final node update. [n_nodes, out_features]
out = self.node_output(x=full_node_representation)
return out, (h_node, h_edge)
class LocalRMPGNN(nn.Module):
# Recurrent message-passing GNN with local map information
def __init__(
self,
hidden_size: int = 64,
dropout: float = 0.0,
node_features: int = 5,
edge_features: int = 0,
num_layers: int = 1,
rnn_size: int = 20,
rnn_edge_size: int = 8,
out_features: int = 4,
rnn_type: str = "LSTM",
latent_edge_features: int = 32,
map_encoding_size: int = 32,
):
super(LocalRMPGNN, self).__init__()
self.num_layers = num_layers
self.rnn_size = rnn_size
self.rnn_edge_size = rnn_edge_size
self.dropout = dropout
# Node history encoder.
# Computes a node-wise representation which incorporates the nodes' respective histories.
self.node_history_encoder = node_rnn_simple(
node_features=node_features,
edge_features=0,
rnn_size=rnn_size,
dropout=dropout,
num_layers=num_layers,
rnn_type=rnn_type,
)
# GN-block to compute messages/interactions between nodes
self.message_gn = GraphNetworkBlock(
edge_model=edge_mlp_1(
node_features=node_features,
edge_features=edge_features,
hidden_size=hidden_size,
dropout=dropout,
latent_edge_features=latent_edge_features,
)
)
# Interaction history encoder.
# Computes a node-wise history which incorporates influences from neighbouring nodes using messages.
self.edge_history_encoder = edge_rnn_1(
edge_features=latent_edge_features,
rnn_size=rnn_edge_size,
dropout=dropout,
num_layers=num_layers,
rnn_type=rnn_type,
)
# Output GN
self.node_output = node_mlp_out_global(
hidden_size=hidden_size,
node_features=rnn_size + rnn_edge_size + map_encoding_size,
dropout=dropout,
out_features=out_features,
)
def forward(self, x, edge_index, edge_attr, u, hidden: tuple, batch=None):
# x: [n_nodes, node_features]
# edge_index: [2, n_edges]
# edge_attr : [n_edges, edge_features]
# u: [n_nodes, local_map_resolution]
# Unpack hidden states
h_node, h_edge = hidden
# Encode node histories. Shape [n_nodes, rnn_size]
node_history, h_node = self.node_history_encoder(
x=x, edge_index=None, edge_attr=None, u=None, batch=None, hidden=h_node
)
# Compute messages. Shape [n_edges, latent_edge_features]
_, messages, _ = self.message_gn(
x=x, edge_index=edge_index, edge_attr=edge_attr, u=None, batch=None
)
# Aggregate and encode edge histories. Shape [n_nodes, rnn_edge_size]
node_interaction_history, h_edge = self.edge_history_encoder(
edge_attr=messages, hidden=h_edge, edge_index=edge_index, x_size=x.size(0)
)
# Concatenate. Shape [n_nodes, rnn_size+rnn_edge_size+map_encoding_size]
full_node_representation = torch.cat(
[node_history, node_interaction_history, u], dim=-1
)
# Final node update. [n_nodes, out_features]
out = self.node_output(x=full_node_representation)
return out, (h_node, h_edge)
| StarcoderdataPython |
11328546 | import os
from flask import Flask, url_for, request, flash ,render_template , session , redirect , send_file
from flask_admin import Admin
from DatabaseHelper import DatabaseHelper
import model
from model import db as database
import myAdmin
from flask_uploads import UploadSet , IMAGES , configure_uploads
### Define variables and pathes
basedir = os.path.abspath(os.path.dirname(__file__))
static_path = os.path.join(basedir, 'static')
user_images_path = os.path.join(static_path, 'user_images')
certificates_path = os.path.join(static_path, 'certificates')
database_path = os.path.join(basedir, 'my-database.sqlite')
# initiate flask app
app = Flask(__name__,)
# change the secret key
app.secret_key = "secret key"
app.config['SECRET_KEY'] = 'my secret key'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + database_path
app.config['FLASK_ADMIN_SWATCH'] = 'cerulean'
# helper object for sqlalchemy
helper = DatabaseHelper()
# configure uploads for uploading profile images
photos = UploadSet('photos', IMAGES)
app.config['UPLOADED_PHOTOS_DEST'] = user_images_path
configure_uploads(app, photos)
@app.route('/')
@app.route('/home')
def home():
return render_template("home.html",session=session)
@app.route('/login' , methods=['GET' , 'POST'])
def login():
if request.method == 'POST' :
email = request.form["email"]
password = request.form["password"]
if not (email and password ):
flash("Fill all the fileds!")
return redirect(url_for("home"))
data = helper.getUserData(email,password)
if data == -1 :
flash("Email or password incorrect .")
return redirect(url_for("home"))
else:
session["image"] = data["image"]
session["username"] = data["username"]
session["id"] = data["id"]
return redirect(url_for("profile"))
else :
return redirect(url_for("home"))
@app.route('/sign_up' , methods=['GET' , 'POST'])
def sign_up():
# when entreing sign up data
if request.method == 'POST' :
full_name = request.form["full_name"]
password = request.form["password"]
re_password = request.form["re_password"]
email = request.form["email"]
username = request.form["username"]
if not (full_name and password and re_password and email and username):
flash("Fill all the fileds!")
return redirect(url_for("home"))
if password != re_password :
flash("Passwords not match")
return redirect(url_for("home"))
data = helper.insertIntoUsers((full_name , username , email ,password))
if data == -1 :
flash("Username or email already taken !")
return redirect(url_for("home"))
else:
flash("Sign up done !")
return redirect(url_for('home'))
else :
return redirect(url_for("home"))
@app.route('/profile',methods=['GET' , 'POST'])
def profile():
# if the user is not logged , he will return to home page
if not "id" in session :
flash("You have to log in first !")
return redirect(url_for("home"))
# when upload image
if request.method == "POST" :
image = request.files["image"]
image_name = image.filename
image_name = ('ID-%s-%s' % (session["id"],image_name))
image_name = photos.save(request.files['image'],name=image_name )
if image_name : # delete the old image and replace it with the new one
user= model.User.query.filter_by(id=session["id"]).first()
if user.image :
try:
os.remove(os.path.join(user_images_path, user.image))
except Exception :
pass
try:
user.image = image_name
user= model.User.query.filter_by(id=session["id"]).first()
database.session.commit()
session["image"] = image_name
except Exception :
return "ERROR"
pass
# get all certificaetes to this user
certificates = model.Certificate.query.filter(model.Certificate.user_id==session["id"])
n = certificates.count()
# the user image
image = None
if session["image"] == None :
image = 'no_image.jpg'
else:
image = session["image"]
return render_template("profile.html",username=session["username"],
image=image ,
n=n,
certificates=certificates)
def send_image(path , filename):
filename = os.path.join(path, filename)
return send_file(filename , mimetype='image/gif')
@app.route('/user_images/<path:filename>')
def user_images(filename):
# if the image is "no_image" the user can see it
# and if the admin is logged , he can see any image
if "admin" in session or filename == "no_image.jpg" :
return send_image(user_images_path , filename)
# if the user is not loged in , he can't view any images
if not "id" in session :
return "You don't have permission"
# to authorize the user , each image have a prefix conatins user id that the image belong to
# for example "img.jpg" becomes "ID-1-img.jpg" for the user with id "1"
# if anyone wants to see it , we comapare his ID whit the ID in the image , if there is a match he can see it ,
# else , he has no premission
authorized = False
s = "ID-"+ str(session["id"])
if filename[0:len(s)] == s :
authorized = True
if authorized :
return send_image(user_images_path , filename)
else :
return "You don't have permission"
@app.route('/certificates/<path:filename>')
def certificates(filename):
# the same as before
if "admin" in session :
return send_image(certificates_path,filename)
if not "id" in session :
return "You don't have permission"
authorized = False
s = "ID-"+ str(session["id"])
if filename[0:len(s)] == s :
authorized = True
if authorized :
return send_image(certificates_path,filename)
else :
return "You don't have permission"
@app.route('/logout' , methods=['GET' , 'POST'])
def logout():
if "username" in session :
session.pop("id",None)
session.pop("username" , None)
session.pop("image",None)
return redirect(url_for('home'))
# admin login
@app.route('/dashboard_login',methods=['GET' , 'POST'])
def dashboard_login():
if request.method == 'POST':
email = request.form["email"]
password = request.form["password"]
if not ( email == "<EMAIL>" and password == "<PASSWORD>" ) :
flash ("Admin email or password incorrect")
return redirect(url_for('dashboard_login'))
else :
session["admin"] = True
return redirect(url_for("admin.index"))
return render_template("dashboard_login.html")
@app.route('/admin_logout')
def admin_logout():
if "admin" in session :
session.pop("admin",None)
return redirect(url_for('dashboard_login'))
@app.errorhandler(404)
def page_not_found(e):
return "Page not found"
# Delete database and create new one
def build_sample_db(database):
database.drop_all()
database.create_all()
database.session.commit()
if __name__ == '__main__':
database.app=app
database.init_app(app)
admin = Admin(app,index_view=myAdmin.MyHomeView())
admin.add_view(myAdmin.UserView(model.User,database.session))
admin.add_view(myAdmin.CertificateView(model.Certificate,database.session))
admin.add_link(myAdmin.MenuLink(name='Logout', category='', url='/admin_logout'))
# if the database not exists
if not os.path.exists(database_path) :
build_sample_db(database)
usernames = [
{"username":"davis","email":"<EMAIL>","password":"<PASSWORD>","full_name":"<NAME>"},
{"username":"irwin","email":"<EMAIL>","password":"<PASSWORD>","full_name":"<NAME>"}]
for u in usernames:
user = model.User()
user.username = u["username"]
user.email = u["email"]
user.password = u["password"]
user.full_name = u["full_name"]
database.session.add(user)
database.session.commit()
app.run(debug=True)
| StarcoderdataPython |
9760205 | """
Defines custom middleware for users and user management.
"""
import re
from django.conf import settings
from django.shortcuts import render
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
from rest_framework.views import APIView
from .views import PublicView
class UserExpiryMiddleware(object):
"""
Django Middleware to check for user expiry.
Does not apply to django rest framework requests.
See rest_addons for django rest framework support.
"""
def __init__(self, get_response):
self.get_response = get_response
# One-time configuration and initialization.
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
if not request.user.is_anonymous and request.user.expired():
context = {
'title': 'Error: Unauthorized',
'message': 'User {} is expired.'.format(request.user.username),
}
logout(request)
return render(request, 'core/base_error.html', context=context, status=403)
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
return response
class LoginRequiredMiddleware(object):
"""
Middleware that makes all views require a login.
To exempt a view from requiring a login, use @login_not_required or use the PublicView class.
Another method is to add PUBLIC_VIEWS or PUBLIC_PATHS to the settings to set
specific views or paths as public.
"""
def __init__(self, get_response):
self.get_response = get_response
# One-time configuration and initialization.
self.public_patterns = []
self.public_views = []
if hasattr(settings, 'PUBLIC_VIEWS'):
for view_path in settings.PUBLIC_VIEWS:
view = self.get_view(view_path)
self.public_views.append(view)
if hasattr(settings, 'PUBLIC_PATHS'):
for public_path in settings.PUBLIC_PATHS:
self.public_patterns.append(re.compile(public_path))
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
return response
def get_view(self, view_path):
i = view_path.rfind('.')
module_path, view_name = view_path[:i], view_path[i + 1:]
module = __import__(module_path, globals(), locals(), [view_name])
return getattr(module, view_name)
def matches_public_view(self, view):
if self.public_views:
for public_view in self.public_views:
if view == public_view:
return True
return False
def matches_public_path(self, path):
if self.public_patterns:
for pattern in self.public_patterns:
if pattern.match(path) is not None:
return True
return False
def process_view(self, request, view_func, view_args, view_kwargs):
if request.user.is_authenticated \
or (hasattr(view_func, 'view_class') and issubclass(view_func.view_class, (APIView,))) \
or (isinstance(view_func, PublicView)) \
or (hasattr(view_func, 'view_class') and issubclass(view_func.view_class, PublicView)) \
or self.matches_public_path(request.path) \
or self.matches_public_view(view_func):
return None
else:
return login_required(view_func)(request, *view_args, **view_kwargs)
class UserActivityMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
self._library = {
"GET": {
"authenticated": {
},
"not authenticated": {
}
},
"POST": {
"authenticated": {
},
"not authenticated": {
}
}
}
def __call__(self, request):
self._process_request(request)
return self.get_response(request)
def _process_request(self, request):
mapping = {"GET": self._process_get_calls,
"POST": self._process_post_calls}
mapping[request.method](request, request.method, request.user.is_authenticated)
def _process_get_calls(self, request, method, is_authenticated):
pass
def _process_post_calls(self, request, method, is_authenticated):
pass | StarcoderdataPython |
3480857 | <reponame>ThomasYeoLab/Standalone_He2022_MM<filename>stable_projects/predict_phenotypes/He2022_MM/cbig/He2022/CBIG_ukbb_dnn_haufe.py<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Written by <NAME> and CBIG under MIT license:
https://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md
"""
import os
import random
import argparse
import numpy as np
from sklearn.model_selection import train_test_split
import torch
import torch.utils.data
from torch.utils.data import DataLoader
from config import config
from CBIG_model_pytorch import ukbb_multi_task_dataset
def pred_y_train(x_train, y_train_dummy, index, device, args):
'''pred y with training meta-set data
Args:
x_train (ndarray): training set of training meta-set FC data
y_train_dummy (ndarray): dummy y data
index (int): optimal dnn epoch index for base training
device (torch.device): the gpu that torch runs on
args: args from command line
Returns:
ndarray: predicted y for haufe transform
'''
dset_train = ukbb_multi_task_dataset(x_train, y_train_dummy, True)
trainloader = DataLoader(
dset_train, batch_size=128, shuffle=False, num_workers=8)
weight_path = os.path.join(
args.out_dir, 'trained_model_ukbb',
'dnn_model_save_base_cross_dataset',
'CBIG_ukbb_dnn_run_0_epoch_' + str(index) + '.pkl_torch')
net = torch.load(weight_path)
net.train(False)
record_pred = np.zeros((0, y_train_dummy.shape[1]))
for (x, _) in trainloader:
x = x.to(device)
outputs = net(x)
record_pred = np.concatenate((record_pred, outputs.data.cpu().numpy()),
axis=0)
return np.squeeze(record_pred)
def get_haufe_ukbb_training(args):
'''Get data for haufe transform on UK Biobank training meta-set
Args:
args: args from command line
Returns:
None
'''
print('\nArgument: ' + str(args))
# set all the seed
seed = args.seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tuned_by = 'cod'
os.makedirs(os.path.join(args.out_dir, 'tmp'), exist_ok=True)
# load base prediction result
npz = os.path.join(args.out_dir, 'dnn_across_dataset_base.npz')
npz = np.load(npz)
val_record = npz['val_' + tuned_by + '_record']
temp = np.mean(val_record[0, :, :], axis=1)
temp = np.convolve(temp, np.ones(3, dtype=int), 'valid') / 3
index = np.nanargmax(temp)
index = index + 1
print('\nBest validation at index: ', index)
# load original data
npz = os.path.join(args.in_dir, 'ukbb_dnn_input_cross_dataset.npz')
npz = np.load(npz, allow_pickle=True)
tra_phe = npz['tra_phe']
x_train_raw = npz['x_train_raw']
y_train_raw = npz['y_train_raw']
split_tra, _ = train_test_split(
np.arange(x_train_raw.shape[0]), test_size=0.2, random_state=seed)
x_train = x_train_raw[split_tra, :]
y_train = y_train_raw[split_tra, :]
y_train_dummy = np.zeros(y_train.shape)
y_pred_train = pred_y_train(x_train, y_train_dummy, index, device, args)
npz_train_pred = os.path.join(args.out_dir, 'haufe_y_pred_train.npz')
np.savez(
npz_train_pred,
y_pred_train=y_pred_train,
x_train=x_train,
tra_phe=tra_phe)
return
def get_args():
'''function to get args from command line and return the args
Returns:
argparse.ArgumentParser: args that could be used by other function
'''
parser = argparse.ArgumentParser()
# general parameters
parser.add_argument('--out_dir', '-o', type=str, default=config.OUT_DIR)
parser.add_argument('--in_dir', type=str, default=config.IN_DIR)
parser.add_argument('--seed', type=int, default=config.RAMDOM_SEED)
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--split', type=str, default='test')
return parser.parse_args()
if __name__ == '__main__':
get_haufe_ukbb_training(get_args())
| StarcoderdataPython |
265272 | <gh_stars>0
from abc import ABC, abstractmethod
from overrides import overrides
import networkx as nx
from task_answer import TaskAnswer
from test_parser import Parser
class Graph(ABC):
_graph_impl = None
_n = 0
_X = None
_Y = None
_edges = None
def __init__(self, **kwargs):
pass
@abstractmethod
def init_from_txt(self, filename):
pass
@abstractmethod
def _create_graph(self):
pass
@abstractmethod
def print_graph(self, only_nodes=False):
pass
@abstractmethod
def interpretate_task_answer(self, answer: TaskAnswer) -> bool:
pass
@abstractmethod
def _check_required_statement(self) -> bool:
pass
@abstractmethod
def get_neighbours(self, vertex: int) -> list:
pass
@abstractmethod
def make_tree(self):
pass
@abstractmethod
def mark_vertex(self, vertex: int):
pass
@abstractmethod
def get_unmarked_leaves(self) -> list:
pass
@abstractmethod
def get_unmarked_neigbors(self, vertex: int) -> list:
pass
@abstractmethod
def is_mark(self, vertex: int) -> bool:
pass
@classmethod
def set_params(cls, n, X, Y, edges):
if cls._verify_params(n, X, Y, edges):
cls._n = n
cls._X = X
cls._Y = Y
cls._edges = edges
else:
raise Exception("bad params")
@classmethod
def _verify_params(cls, n, X, Y, edges):
# may be implement
return True
@classmethod
def get_n(cls):
return cls._n
@classmethod
def get_x(cls):
return cls._X
@classmethod
def get_y(cls):
return cls._Y
class NxGraph(Graph):
@overrides
def __init__(self, **kwargs):
self._graph_impl = nx.Graph()
super().__init__(**kwargs)
@overrides
def init_from_txt(self, filename: str):
with open(filename, "r") as f:
parser = Parser()
parser.init(f)
n = parser.get_n()
X = parser.get_x()
Y = parser.get_y()
edges = parser.get_edges()
del parser
self.set_params(n, X, Y, edges)
self._create_graph()
@overrides
def _create_graph(self) -> bool:
if self._n == 0 or self._edges is None:
raise Exception("failed init graph params")
# create vertices
for i in range(self._n):
self._graph_impl.add_node(i)
# create edges
for edge in self._edges:
self._graph_impl.add_edge(edge[0], edge[1])
# init weights
for i in range(self._n):
self._graph_impl.node[i]["weight"] = self._X[i]
self._graph_impl.node[i]["mark"] = False
return True
@overrides
def print_graph(self, only_nodes=False):
print(list(self._graph_impl.nodes(data=True)))
if not only_nodes:
print(list(self._graph_impl.edges()))
@overrides
def interpretate_task_answer(self, answer: TaskAnswer) -> bool:
steps = answer.get_steps()
for i, step in enumerate(steps):
(source_ver, dest_ver, value) = step
edges = self._graph_impl.edges()
# check correct state
if self._graph_impl.node[source_ver]["weight"] < value:
raise Exception("less zero during interpretation on step", i)
elif not (source_ver, dest_ver) in edges and not (dest_ver, source_ver) in edges:
raise Exception("graph doesn't contain edge", source_ver, dest_ver, "on step", i)
elif value < 0:
raise Exception("negative value on step", i)
else:
self._graph_impl.node[source_ver]["weight"] -= value
self._graph_impl.node[dest_ver]["weight"] += value
return self._check_required_statement()
@overrides
def _check_required_statement(self) -> bool:
for i in range(self._n):
if abs(self._graph_impl.node[i]["weight"] - self._Y[i]) > 10 ** -7:
return False
return True
@overrides
def get_neighbours(self, vertex: int) -> list:
return self._graph_impl.neighbors(vertex)
@overrides
def make_tree(self):
edges = list(self._graph_impl.edges)
for edge in edges:
self._graph_impl.remove_edge(*edge)
components = nx.algorithms.number_connected_components(self._graph_impl)
if components > 1:
self._graph_impl.add_edge(*edge)
@overrides
def mark_vertex(self, vertex: int):
self._graph_impl.node[vertex]["mark"] = True
@overrides
def get_unmarked_leaves(self) -> list:
unmarked_leaves = []
nodes = self._graph_impl.nodes()
for node in nodes:
if self._graph_impl.node[node]["mark"]:
continue
# append node if it has 0 or 1 unmarked neighbor
unmarked_neighbors = self.get_unmarked_neigbors(node)
if len(unmarked_neighbors) < 2:
unmarked_leaves.append(node)
return unmarked_leaves
@overrides
def get_unmarked_neigbors(self, vertex) -> list:
neighbors = self._graph_impl.neighbors(vertex)
out = []
count_unmark = 0
for neighbor in neighbors:
if not self._graph_impl.node[neighbor]["mark"]:
out.append(neighbor)
return out
@overrides
def is_mark(self, vertex: int) -> bool:
return self._graph_impl.node[vertex]["mark"]
| StarcoderdataPython |
6633326 | <filename>tools/demo.py<gh_stars>100-1000
import _init_paths
import argparse
import time
import os
import sys
import os.path as osp
from glob import glob
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import caffe
from mpi4py import MPI
from fast_rcnn.test_probe import demo_exfeat
from fast_rcnn.test_gallery import demo_detect
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
def main(args):
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
# Setup caffe
if args.gpu >= 0:
caffe.mpi_init()
caffe.set_mode_gpu()
caffe.set_device(cfg.GPU_ID)
else:
caffe.mpi_init()
caffe.set_mode_cpu()
# Get query image and roi
query_img = 'demo/query.jpg'
query_roi = [0, 0, 466, 943] # [x1, y1, x2, y2]
# Extract feature of the query person
net = caffe.Net(args.probe_def, args.caffemodel, caffe.TEST)
query_feat = demo_exfeat(net, query_img, query_roi)
del net # Necessary to release cuDNN conv static workspace
# Get gallery images
gallery_imgs = sorted(glob('demo/gallery*.jpg'))
# Detect and extract feature of persons in each gallery image
net = caffe.Net(args.gallery_def, args.caffemodel, caffe.TEST)
# Necessary to warm-up the net, otherwise the first image results are wrong
# Don't know why. Possibly a bug in caffe's memory optimization.
# Nevertheless, the results are correct after this warm-up.
demo_detect(net, query_img)
for gallery_img in gallery_imgs:
print gallery_img, '...'
boxes, features = demo_detect(net, gallery_img,
threshold=args.det_thresh)
if boxes is None:
print gallery_img, 'no detections'
continue
# Compute pairwise cosine similarities,
# equals to inner-products, as features are already L2-normed
similarities = features.dot(query_feat)
# Visualize the results
fig, ax = plt.subplots(figsize=(16, 9))
ax.imshow(plt.imread(gallery_img))
plt.axis('off')
for box, sim in zip(boxes, similarities):
x1, y1, x2, y2, _ = box
ax.add_patch(
plt.Rectangle((x1, y1), x2 - x1, y2 - y1,
fill=False, edgecolor='#4CAF50', linewidth=3.5))
ax.add_patch(
plt.Rectangle((x1, y1), x2 - x1, y2 - y1,
fill=False, edgecolor='white', linewidth=1))
ax.text(x1 + 5, y1 - 18, '{:.2f}'.format(sim),
bbox=dict(facecolor='#4CAF50', linewidth=0),
fontsize=20, color='white')
plt.tight_layout()
fig.savefig(gallery_img.replace('gallery', 'result'))
plt.show()
plt.close(fig)
del net
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Person Search Demo')
parser.add_argument('--gpu',
help='GPU id to be used, -1 for CPU. Default: 0',
type=int, default=0)
parser.add_argument('--gallery_def',
help='prototxt file defining the gallery network',
default='models/psdb/resnet50/eval_gallery.prototxt')
parser.add_argument('--probe_def',
help='prototxt file defining the probe network',
default='models/psdb/resnet50/eval_probe.prototxt')
parser.add_argument('--net', dest='caffemodel',
help='path to trained caffemodel',
default='output/psdb_train/resnet50/resnet50_iter_50000.caffemodel')
parser.add_argument('--det_thresh',
help="detection score threshold to be evaluated",
type=float, default=0.75)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default='experiments/cfgs/resnet50.yml')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
main(args)
| StarcoderdataPython |
3291018 | #!/usr/bin/env python
mqtt_host = 'mqtt.home'
import sys
import paho.mqtt.client as paho
import os
import datetime
import threading
import serial
import serial.threaded
import serial.tools.list_ports
import re
import signal
import time
serial_dev = '/dev/ttyACM'
#sys.stderr = open('proxy_error.log', 'a')
mqtt = paho.Client()
mqtt.connect(mqtt_host, 1883, 60)
mqtt_client = None
tah_proxy = None
class mqttClient(threading.Thread):
def __init__(self, mqtt):
super(mqttClient, self).__init__()
self.mqtt = mqtt
self.toggle = '0'
def run(self):
self.mqtt.on_message = self.onMessage
self.mqtt.on_disconnect = self.onDisconnect
self.mqtt.subscribe([('/actuator/bedroom/ir/#', 0)])
self.mqtt.loop_forever()
def onDisconnect(self, client, userdata, rc):
print "Disconnected from MQTT server with code: %s" % rc
while rc != 0:
try:
rc = self.mqtt.reconnect()
except:
time.sleep(1)
print "Reconnected to MQTT server."
def onMessage (self, mqtt, obj, msg):
global tah_proxy
print "{}:\t{}".format(msg.topic, msg.payload)
if msg.topic.endswith("/ceiling_light"):
tah_proxy.write_line('{"type":3,"value":8008,"bits":13}')
elif msg.topic.endswith("/limpet_light"):
if msg.payload == 'toggle':
msg.payload = self.toggle
self.toggle = '1' if self.toggle == '0' else '0'
if msg.payload == '1':
tah_proxy.write_line('{"type":1,"value":16711935,"bits":32}')
else:
tah_proxy.write_line('{"type":1,"value":16744575,"bits":32}')
class tahProxy(serial.threaded.LineReader):
TERMINATOR = b'\r\n'
ENCODING = 'utf-8'
UNICODE_HANDLING = 'replace'
def __init__(self):
super(tahProxy, self).__init__()
global tah_proxy
tah_proxy = self
def handle_packet(self, packet):
self.handle_line(packet.decode(self.ENCODING, self.UNICODE_HANDLING))
def handle_line (self, line):
global mqtt_client
print "RX: {}".format(line)
mqtt_client.mqtt.publish("/sensor/bedroom/ir", line)
if __name__ == '__main__':
print "Starting"
try:
# For some reason, systemd passes HUP to the job when starting, so we
# have to ignore HUP. Then it has trouble with INT, so best to
# explicitly exit on INT, and configure systemd to use INT.
def shutdown_handler(signum, frame):
print "Setting running to False"
shutdown_handler.running = False
shutdown_handler.running = True
signal.signal(signal.SIGINT, shutdown_handler)
# Ignore HUP
def ignore_handler(signum, frame):
pass
signal.signal(signal.SIGHUP, ignore_handler)
# Find Leonardo (ie. Tah)
dev = [port.device for port in serial.tools.list_ports.comports() if port.vid==0x2341 and port.pid==0x8036]
ser = serial.serial_for_url(dev[0], baudrate=115200)
t = serial.threaded.ReaderThread(ser, tahProxy)
t.start()
mqtt_client = mqttClient(mqtt=mqtt)
mqtt_client.daemon = True
mqtt_client.start()
while shutdown_handler.running:
time.sleep(1)
sys.exit(0)
except (KeyboardInterrupt, SystemExit) as e:
print e
sys.exit(1)
# except OSError as e:
# t.reconnect()
except Exception as e:
print e
pass
| StarcoderdataPython |
3217430 | # Copyright 2020 The Microsoft DeepSpeed Team
"""
DeepSpeed runner is the main front-end to launching multi-worker
training jobs with DeepSpeed. By default this uses pdsh to parallel
ssh into multiple worker nodes and launch all the necessary processes
per rank for training.
"""
import os
import sys
import json
import base64
import argparse
import subprocess
import collections
from copy import deepcopy
import torch.cuda
from .multinode_runner import PDSHRunner, OpenMPIRunner, MVAPICHRunner
from .constants import PDSH_LAUNCHER, OPENMPI_LAUNCHER, MVAPICH_LAUNCHER
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT
from ..utils import logger
from ..autotuning import Autotuner
DLTS_HOSTFILE = "/job/hostfile"
EXPORT_ENVS = ["NCCL", "PYTHON", "MV2", "UCX"]
DEEPSPEED_ENVIRONMENT_NAME = ".deepspeed_env"
DEEPSPEED_ENVIRONMENT_PATHS = [os.path.expanduser("~"), '.']
PDSH_MAX_FAN_OUT = 1024
def parse_args(args=None):
parser = argparse.ArgumentParser(
description="DeepSpeed runner to help launch distributed "
"multi-node/multi-gpu training jobs.")
parser.add_argument("-H",
"--hostfile",
type=str,
default=DLTS_HOSTFILE,
help="Hostfile path (in MPI style) that defines the "
"resource pool available to the job (e.g., "
"worker-0 slots=4)")
parser.add_argument("-i",
"--include",
type=str,
default="",
help='''Specify hardware resources to use during execution.
String format is
NODE_SPEC[@NODE_SPEC ...],
where
NODE_SPEC=NAME[:SLOT[,SLOT ...]].
If :SLOT is omitted, include all slots on that host.
Example: -i "worker-0@worker-1:0,2" will use all slots
on worker-0 and slots [0, 2] on worker-1.
''')
parser.add_argument("-e",
"--exclude",
type=str,
default="",
help='''Specify hardware resources to NOT use during execution.
Mutually exclusive with --include. Resource formatting
is the same as --include.
Example: -e "worker-1:0" will use all available
resources except slot 0 on worker-1.
''')
parser.add_argument("--num_nodes",
type=int,
default=-1,
help="Total number of worker nodes to run on, this will use "
"the top N hosts from the given hostfile.")
parser.add_argument("--num_gpus",
type=int,
default=-1,
help="Max number of GPUs to use on each node, will use "
"[0:N) GPU ids on each node.")
parser.add_argument("--master_port",
default=TORCH_DISTRIBUTED_DEFAULT_PORT,
type=int,
help="(optional) Port used by PyTorch distributed for "
"communication during training.")
parser.add_argument("--master_addr",
default="",
type=str,
help="(optional) IP address of node 0, will be "
"inferred via 'hostname -I' if not specified.")
parser.add_argument("--launcher",
default=PDSH_LAUNCHER,
type=str,
help="(optional) choose launcher backend for multi-node "
"training. Options currently include PDSH, OpenMPI, MVAPICH.")
parser.add_argument("--launcher_args",
default="",
type=str,
help="(optional) pass launcher specific arguments as a "
"single quoted argument.")
parser.add_argument("--module",
action="store_true",
help="Change each process to interpret the launch "
"script as a Python module, executing with the same "
"behavior as 'python -m'.")
parser.add_argument("--no_python",
action="store_true",
help="Skip prepending the training script with "
"'python' - just execute it directly.")
parser.add_argument("--no_local_rank",
action="store_true",
help="Do not pass local_rank as an argument when calling "
"the user's training script.")
parser.add_argument("--no_ssh_check",
action="store_true",
help="Do not perform ssh check in multi-node launcher model")
parser.add_argument("--force_multi",
action="store_true",
help="Force multi-node launcher mode, helps in cases where user "
"wants to launch on single remote node.")
parser.add_argument(
"--save_pid",
action="store_true",
help="Save file containing launcher process id (pid) at /tmp/<main-pid>.ds, "
"where <main-pid> is the pid of the first process that invoked `deepspeed`. "
"Useful when launching deepspeed processes programmatically.")
parser.add_argument(
"--autotuning",
default="",
choices=["tune",
"run"],
type=str,
help="Run DeepSpeed autotuner to discover optimal configuration parameters "
"before running job.")
parser.add_argument("user_script",
type=str,
help="User script to launch, followed by any required "
"arguments.")
parser.add_argument('user_args', nargs=argparse.REMAINDER)
return parser.parse_args(args=args)
def fetch_hostfile(hostfile_path):
if not os.path.isfile(hostfile_path):
logger.warning("Unable to find hostfile, will proceed with training "
"with local resources only.")
return None
# e.g., worker-0 slots=16
with open(hostfile_path, 'r') as fd:
resource_pool = collections.OrderedDict()
for line in fd.readlines():
line = line.strip()
if line == '':
# skip empty lines
continue
try:
hostname, slots = line.split()
_, slot_count = slots.split("=")
slot_count = int(slot_count)
except ValueError as err:
logger.error("Hostfile is not formatted correctly, unable to "
"proceed with training.")
raise err
if hostname in resource_pool:
logger.error("Hostfile contains duplicate hosts, unable to "
"proceed with training.")
raise ValueError(f"host {hostname} is already defined")
resource_pool[hostname] = slot_count
return resource_pool
def _stable_remove_duplicates(data):
# Create a new list in the same order as original but with duplicates
# removed, should never be more than ~16 elements so simple is best
new_list = []
for x in data:
if x not in new_list:
new_list.append(x)
return new_list
def parse_resource_filter(host_info, include_str="", exclude_str=""):
'''Parse an inclusion or exclusion string and filter a hostfile dictionary.
String format is NODE_SPEC[@NODE_SPEC ...], where
NODE_SPEC = NAME[:SLOT[,SLOT ...]].
If :SLOT is omitted, include/exclude all slots on that host.
Examples:
include_str="worker-0@worker-1:0,2" will use all slots on worker-0 and
slots [0, 2] on worker-1.
exclude_str="worker-1:0" will use all available resources except
slot 0 on worker-1.
'''
# Constants that define our syntax
NODE_SEP = '@'
SLOT_LIST_START = ':'
SLOT_SEP = ','
# Ensure include/exclude are mutually exclusive
if (include_str != "") and (exclude_str != ""):
raise ValueError('include_str and exclude_str are mutually exclusive.')
# no-op
if (include_str == "") and (exclude_str == ""):
return host_info
# Either build from scratch or remove items
filtered_hosts = dict()
if include_str:
parse_str = include_str
if exclude_str != "":
filtered_hosts = deepcopy(host_info)
parse_str = exclude_str
# foreach node in the list
for node_config in parse_str.split(NODE_SEP):
# Node can either be alone or node:slot,slot,slot
if SLOT_LIST_START in node_config:
hostname, slots = node_config.split(SLOT_LIST_START)
slots = [int(x) for x in slots.split(SLOT_SEP)]
# sanity checks
if hostname not in host_info:
raise ValueError(f"Hostname '{hostname}' not found in hostfile")
for slot in slots:
if slot not in host_info[hostname]:
raise ValueError(f"No slot '{slot}' specified on host '{hostname}'")
# If include string, build the list from here
if include_str:
filtered_hosts[hostname] = slots
elif exclude_str:
for slot in slots:
logger.info(f'removing {slot} from {hostname}')
filtered_hosts[hostname].remove(slot)
# User just specified the whole node
else:
hostname = node_config
# sanity check hostname
if hostname not in host_info:
raise ValueError(f"Hostname '{hostname}' not found in hostfile")
if include_str:
filtered_hosts[hostname] = host_info[hostname]
elif exclude_str:
filtered_hosts[hostname] = []
# Post-processing to remove duplicates and empty nodes
del_keys = []
for hostname in filtered_hosts:
# Remove duplicates
filtered_hosts[hostname] = _stable_remove_duplicates(filtered_hosts[hostname])
# Remove empty hosts
if len(filtered_hosts[hostname]) == 0:
del_keys.append(hostname)
for name in del_keys:
del filtered_hosts[name]
# Lastly, go over filtered_hosts and convert to a OrderedDict() to ensure
# we map ranks to nodes correctly by maintaining host_info ordering.
ordered_hosts = collections.OrderedDict()
for host in host_info:
if host in filtered_hosts:
ordered_hosts[host] = filtered_hosts[host]
return ordered_hosts
def parse_inclusion_exclusion(resource_pool, inclusion, exclusion):
active_resources = collections.OrderedDict()
for hostname, slots in resource_pool.items():
active_resources[hostname] = list(range(slots))
return parse_resource_filter(active_resources,
include_str=inclusion,
exclude_str=exclusion)
def encode_world_info(world_info):
world_info_json = json.dumps(world_info).encode('utf-8')
world_info_base64 = base64.urlsafe_b64encode(world_info_json).decode('utf-8')
return world_info_base64
def run_autotuning(args, active_resources):
tuner = Autotuner(args, active_resources)
logger.info("[Start] Running autotuning")
tuner.tune()
tuner.print_tuning_results()
logger.info("[End] Running autotuning")
if args.autotuning == "run":
tuner.run_after_tuning()
def main(args=None):
args = parse_args(args)
resource_pool = fetch_hostfile(args.hostfile)
# respect CUDA_VISIBLE_DEVICES for a single node and no explicit resource filters
cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", "")
if not resource_pool and len(cuda_visible_devices):
detected_str = f"Detected CUDA_VISIBLE_DEVICES={cuda_visible_devices}"
if len(args.include) or len(
args.exclude) or args.num_nodes > 1 or args.num_gpus > 0:
print(
f"{detected_str} but ignoring it because one or several of --include/--exclude/--num_gpus/--num_nodes cl args were used. If you want to use CUDA_VISIBLE_DEVICES don't pass any of these arguments to deepspeed."
)
else:
args.include = f"localhost:{cuda_visible_devices}"
print(f"{detected_str}: setting --include={args.include}")
del os.environ["CUDA_VISIBLE_DEVICES"]
if args.num_nodes >= 0 or args.num_gpus >= 0:
if args.include != "" or args.exclude != "":
raise ValueError("Cannot specify num_nodes/gpus with include/exclude")
multi_node_exec = True
if not resource_pool:
resource_pool = {}
device_count = torch.cuda.device_count()
if device_count == 0:
raise RuntimeError("Unable to proceed, no GPU resources available")
resource_pool['localhost'] = device_count
args.master_addr = "127.0.0.1"
multi_node_exec = False
if not multi_node_exec and args.num_nodes > 1:
raise ValueError("Num nodes is >1 but no extra nodes available via hostfile")
active_resources = parse_inclusion_exclusion(resource_pool,
args.include,
args.exclude)
env = os.environ.copy()
# validate that passwordless-ssh is workly properly with this hostfile
if multi_node_exec and not args.no_ssh_check:
first_host = list(active_resources.keys())[0]
try:
subprocess.check_call(
f'ssh -o PasswordAuthentication=no {first_host} hostname',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True)
except subprocess.CalledProcessError:
raise RuntimeError(
f"Using hostfile at {args.hostfile} but host={first_host} was not reachable via ssh. If you are running with a single node please remove {args.hostfile} or setup passwordless ssh."
)
if not args.master_addr:
assert multi_node_exec
first_host = list(active_resources.keys())[0]
hostname_cmd = [f"ssh {first_host} hostname -I"]
result = subprocess.check_output(hostname_cmd, shell=True)
args.master_addr = result.decode('utf-8').split()[0]
logger.info(f"Using IP address of {args.master_addr} for node {first_host}")
if args.autotuning != "":
run_autotuning(args, active_resources)
return
if args.num_nodes > 0:
updated_active_resources = collections.OrderedDict()
for count, hostname in enumerate(active_resources.keys()):
if args.num_nodes == count:
break
updated_active_resources[hostname] = active_resources[hostname]
active_resources = updated_active_resources
if args.num_gpus > 0:
updated_active_resources = collections.OrderedDict()
for hostname in active_resources.keys():
updated_active_resources[hostname] = list(range(args.num_gpus))
active_resources = updated_active_resources
# encode world info as base64 to make it easier to pass via command line
world_info_base64 = encode_world_info(active_resources)
multi_node_exec = args.force_multi or len(active_resources) > 1
if not multi_node_exec:
deepspeed_launch = [
sys.executable,
"-u",
"-m",
"deepspeed.launcher.launch",
f"--world_info={world_info_base64}",
f"--master_addr={args.master_addr}",
f"--master_port={args.master_port}"
]
if args.no_python:
deepspeed_launch.append("--no_python")
if args.module:
deepspeed_launch.append("--module")
if args.no_local_rank:
deepspeed_launch.append("--no_local_rank")
if args.save_pid:
deepspeed_launch += ["--save_pid", f"{os.getpid()}"]
cmd = deepspeed_launch + [args.user_script] + args.user_args
else:
args.launcher = args.launcher.lower()
if args.launcher == PDSH_LAUNCHER:
runner = PDSHRunner(args, world_info_base64)
elif args.launcher == OPENMPI_LAUNCHER:
runner = OpenMPIRunner(args, world_info_base64, resource_pool)
elif args.launcher == MVAPICH_LAUNCHER:
runner = MVAPICHRunner(args, world_info_base64, resource_pool)
else:
raise NotImplementedError(f"Unknown launcher {args.launcher}")
if not runner.backend_exists():
raise RuntimeError(f"launcher '{args.launcher}' not installed.")
curr_path = os.path.abspath('.')
if 'PYTHONPATH' in env:
env['PYTHONPATH'] = curr_path + ":" + env['PYTHONPATH']
else:
env['PYTHONPATH'] = curr_path
exports = ""
for var in env.keys():
if any([var.startswith(name) for name in EXPORT_ENVS]):
runner.add_export(var, env[var])
for environ_path in DEEPSPEED_ENVIRONMENT_PATHS:
environ_file = os.path.join(environ_path, DEEPSPEED_ENVIRONMENT_NAME)
if os.path.isfile(environ_file):
with open(environ_file, 'r') as fd:
for var in fd.readlines():
key, val = var.split('=', maxsplit=1)
runner.add_export(key, val)
cmd = runner.get_cmd(env, active_resources)
logger.info(f"cmd = {' '.join(cmd)}")
result = subprocess.Popen(cmd, env=env)
result.wait()
# In case of failure must propagate the error-condition back to the caller (usually shell). The
# actual error and traceback should have been printed in the subprocess, so in order to avoid
# unnecessary noise we just quietly exit here with the same code as the subprocess
if result.returncode > 0:
sys.exit(result.returncode)
if __name__ == "__main__":
main()
| StarcoderdataPython |
4819657 | # Copyright (C) 2022 FireEye, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import textwrap
import pytest
import capa.rules
def test_rule_scope_instruction():
capa.rules.Rule.from_yaml(
textwrap.dedent(
"""
rule:
meta:
name: test rule
scope: instruction
features:
- and:
- mnemonic: mov
- arch: i386
- os: windows
"""
)
)
with pytest.raises(capa.rules.InvalidRule):
capa.rules.Rule.from_yaml(
textwrap.dedent(
"""
rule:
meta:
name: test rule
scope: instruction
features:
- characteristic: embedded pe
"""
)
)
def test_rule_subscope_instruction():
rules = capa.rules.RuleSet(
[
capa.rules.Rule.from_yaml(
textwrap.dedent(
"""
rule:
meta:
name: test rule
scope: function
features:
- and:
- instruction:
- and:
- mnemonic: mov
- arch: i386
- os: windows
"""
)
)
]
)
# the function rule scope will have one rules:
# - `test rule`
assert len(rules.function_rules) == 1
# the insn rule scope have one rule:
# - the rule on which `test rule` depends
assert len(rules.instruction_rules) == 1
def test_scope_instruction_implied_and():
capa.rules.Rule.from_yaml(
textwrap.dedent(
"""
rule:
meta:
name: test rule
scope: function
features:
- and:
- instruction:
- mnemonic: mov
- arch: i386
- os: windows
"""
)
)
def test_scope_instruction_description():
capa.rules.Rule.from_yaml(
textwrap.dedent(
"""
rule:
meta:
name: test rule
scope: function
features:
- and:
- instruction:
- description: foo
- mnemonic: mov
- arch: i386
- os: windows
"""
)
)
capa.rules.Rule.from_yaml(
textwrap.dedent(
"""
rule:
meta:
name: test rule
scope: function
features:
- and:
- instruction:
- description: foo
- mnemonic: mov
- arch: i386
- os: windows
"""
)
)
| StarcoderdataPython |
137082 | <filename>SC101Assignment/SC101_Assignment0/coin_flip_runs.py
"""
File: coin_flip_runs.py
Name: 洪禎蔚
-----------------------
This program should simulate coin flip(s)
with the number of runs input by users.
A 'run' is defined as consecutive results
on either 'H' or 'T'. For example, 'HHHHHTHTT'
is regarded as a 2-run result.
Your program should stop immediately after your
coin flip results reach the runs!
"""
import random as r
def main():
"""
TODO: Print the result of coin-flipping that fits the number of runs.
"""
print('Let\'s flip a coin!')
num_run = int(input('Number of runs: '))
ht = ('H', 'T')
result = r.choice(ht)
while True:
result += r.choice(ht)
if finish(result, num_run):
break
print(result[0: len(result)-1])
def finish(result, num_run):
"""
:param result: str, the string that composed randomly by H and T
:param num_run: int, the number of runs that the player decided
:return: bool, return when the result fits the number of runs
"""
n = 0
for i in range(len(result)-2):
if result[i] == result[i+1] and result[i+1] != result[i+2]:
n += 1
if n == num_run:
return True
###### DO NOT EDIT CODE BELOW THIS LINE ######
if __name__ == "__main__":
main()
| StarcoderdataPython |
1897184 | import sys
from types import SimpleNamespace
from . import jupyter, non_tty, tty, void
# work around a bug on Windows' command prompt, where ANSI escape codes are disabled by default.
if sys.platform == 'win32':
import os
os.system('')
def _create(mod, interactive):
terminal = SimpleNamespace(
interactive=interactive,
cursor_up_1=mod.factory_cursor_up(1),
# from mod terminal impl.
write=mod.write,
flush=mod.flush,
cols=mod.cols,
carriage_return=mod.carriage_return,
clear_line=mod.clear_line,
clear_end=mod.clear_end,
hide_cursor=mod.hide_cursor,
show_cursor=mod.show_cursor,
factory_cursor_up=mod.factory_cursor_up,
)
return terminal
def _is_notebook():
"""This detection is tricky, because by design there's no way to tell which kind
of frontend is connected, there may even be more than one with different types!
Also, there may be other types I'm not aware of...
So, I've chosen what I thought it was the safest method, with a negative logic:
if it _isn't_ None or TerminalInteractiveShell, it should be the "jupyter" type.
The jupyter type does not emit any ANSI Escape Codes.
"""
if 'IPython' not in sys.modules:
# if IPython hasn't been imported, there's nothing to check.
return False
# noinspection PyPackageRequirements
from IPython import get_ipython
class_ = get_ipython().__class__.__name__
return class_ != 'TerminalInteractiveShell'
FULL = _create(jupyter.BASE if _is_notebook() else tty.BASE, True)
NON_TTY = _create(non_tty.BASE, False)
VOID = _create(void, False)
| StarcoderdataPython |
5111703 | # -*- coding: utf-8 -*-
'''
saltpylint.version
~~~~~~~~~~~~~~~~~~~
:codeauthor: :email:`<NAME> (<EMAIL>)`
:copyright: © 2013-2018 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
'''
# Import Python libs
from __future__ import absolute_import
__version_info__ = (2019, 1, 11)
__version__ = '{0}.{1}.{2}'.format(*__version_info__)
| StarcoderdataPython |
1994676 | from typing import List
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def trimBST(self, root: TreeNode, L: int, R: int) -> TreeNode:
def trim(node):
if not node:
return None
elif node.val < L:
return trim(node.right)
elif node.val > R:
return trim(node.left)
else:
node.left = trim(node.left)
node.right = trim(node.right)
return node
return trim(root)
if __name__ == "__main__":
pass
| StarcoderdataPython |
11340347 | <reponame>Brett777/Predict-Churn
import os
from boto.s3.connection import S3Connection
from boto.s3.key import Key
import h2o
import numpy as np
import pandas as pd
from tabulate import tabulate
# initialize the model scoring server
h2o.init(nthreads=1,max_mem_size=1, start_h2o=True, strict_version_check = False)
def predict_churn(State,AccountLength,AreaCode,Phone,IntlPlan,VMailPlan,VMailMessage,DayMins,DayCalls,DayCharge,EveMins,EveCalls,EveCharge,NightMins,NightCalls,NightCharge,IntlMins,IntlCalls,IntlCharge,CustServCalls):
# connect to the model scoring service
h2o.connect()
# open the downloaded model
ChurnPredictor = h2o.load_model(path='AutoML-leader')
# define a feature vector to evaluate with the model
newData = pd.DataFrame({'State' : State,
'Account Length' : AccountLength,
'Area Code' : AreaCode,
'Phone' : Phone,
'Int\'l Plan' : IntlPlan,
'VMail Plan' : VMailPlan,
'VMail Message' : VMailMessage,
'Day Mins' : DayMins,
'Day Calls' : DayCalls,
'Day Charge' : DayCharge,
'Eve Mins' : EveMins,
'Eve Calls' : EveCalls,
'Eve Charge' : EveCharge,
'Night Mins' : NightMins,
'Night Calls' : NightCalls,
'Night Charge' : NightCharge,
'Intl Mins' :IntlMins,
'Intl Calls' : IntlCalls,
'Intl Charge' : IntlCharge,
'CustServ Calls' : CustServCalls}, index=[0])
# evaluate the feature vector using the model
predictions = ChurnPredictor.predict(h2o.H2OFrame(newData))
predictionsOut = h2o.as_list(predictions, use_pandas=False)
prediction = predictionsOut[1][0]
probabilityChurn = predictionsOut[1][1]
probabilityRetain = predictionsOut[1][2]
return "Prediction: " + str(prediction) + " |Probability to Churn: " + str(probabilityChurn) + " |Probability to Retain: " + str(probabilityRetain) | StarcoderdataPython |
3533694 | <filename>util/cmd_loader.py
# -*- coding: UTF-8 -*-
import importlib
import os
from command import Cmd
from util import CmdLoader
def py_file_to_class_name(py_file):
"""
将py文件名按照首字母大写驼峰式转换成类名。忽略文件名中的"_"并截断"."
:param py_file: py文件名,非路径
:return: 首字母大写驼峰式类名
"""
class_name = []
idx = 0
next_upper = False
for c in py_file:
idx += 1
if c == '.':
break
if c == '_':
next_upper = True
continue
if idx == 1:
class_name.append(c.upper())
continue
if next_upper and c != '_':
class_name.append(c.upper())
next_upper = False
continue
class_name.append(c)
return ''.join(class_name)
class ModuleFileCmdLoader(CmdLoader):
"""
基于python模块文件动态加载的CmdLoader实现。
指定加载目录、文件后缀过滤、加载模块实现动态加载,动态加载要求每个py文件中仅一个Cmd实现类,实现类的类名必须与文件名一至并遵循首字母大写驼峰式。
注意,该实现的加载目录必须是当前项目中的python包,不支持任意目录的加载
"""
def __init__(self):
self.__cmd_list = []
self.__cmd_dir = ''
self.__module_name = 'command'
self.__file_suffix = '_cmd.py'
def __find_cmd_py(self):
"""
扫描并返回目录中的py文件
:return: 目录中的py文件,非None
"""
files = []
for path in os.listdir(self.__cmd_dir):
full_path = os.path.join(self.__cmd_dir, path)
if path.endswith(self.__file_suffix) and os.path.isfile(full_path):
files.append(path)
return files
def __load_module(self, py_file):
"""
加载模块
:param py_file: py文件名
:return: 加载的模块对象
"""
if py_file.endswith('.py'):
py_file = py_file[:len(py_file) - 3]
return importlib.import_module('.' + py_file, self.__module_name)
def __new_cmd_instance(self, py_file):
"""
加载py文件中的类并创建实例。
:return: 实例
"""
module = self.__load_module(py_file)
if not module:
return None
class_name = py_file_to_class_name(py_file)
clazz = getattr(module, class_name)
if clazz:
return clazz()
return None
def set_cmd_dir(self, cmd_dir):
"""
设置加载的目录的绝对路径,扫描该路径下的所有py文件,注意,动态加载并不会加载此处目录的子目录
:param cmd_dir: 加载目录的绝对路径
:return: 当前对象,允许链式调用
"""
if cmd_dir and isinstance(cmd_dir, str):
self.__cmd_dir = cmd_dir
return self
def set_module_name(self, module_name):
"""
设置模块名,扫描路径所对应的模块。默认为 command
:param module_name: 模块名,子模块时以"."分隔
:return: 当前对象,允许链式调用
"""
if module_name and isinstance(module_name, str):
self.__module_name = module_name
return self
def set_file_suffix(self, file_suffix):
"""
设置文件后缀,程序按照此设置的后缀过滤,排除非此后缀的文件。默认为 _cmd.py
:param file_suffix: 保留文件的后缀
:return: 当前对象,允许链式调用
"""
if file_suffix and isinstance(file_suffix, str):
self.__file_suffix = file_suffix
return self
def load(self):
"""
加载命令实现类。若设置的路径未能扫描到实现类文件,该方法会提前结束但是不会抛出异常。
但是若扫描到文件却未能成功加载实现类,该方法中断并抛出异常
:return: 当前对象,允许链式调用
"""
files = self.__find_cmd_py()
if not files or len(files) == 0:
return
for f in files:
cmd_inst = self.__new_cmd_instance(f)
if cmd_inst:
self.__cmd_list.append(cmd_inst)
return self
def cmd_instances(self):
"""
获取所有加载的Cmd实例
:return: 实例列表,加载失败为空列表
"""
return self.__cmd_list
class SimpleCmdLoader(CmdLoader):
"""
简单的CmdLoader实现。通过调用add方法添加Cmd实现类的实例对象
"""
def __init__(self):
self.__cmd_list = []
def add(self, cmd: Cmd):
"""
添加Cmd实现类的实例对象
:param cmd: Cmd实现类的实例对象,注意,同一个实现类对象不要多次添加
:return: 当前对象,允许链式调用
"""
self.__cmd_list.append(cmd)
return self
def load(self):
"""
空实现
:return: 当前对象,允许链式调用
"""
return self
def cmd_instances(self):
"""
获取所有添加的Cmd实例对象
:return: 实例列表,若未调用add则返回空列表
"""
return self.__cmd_list
| StarcoderdataPython |
8055840 | import spacy
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.cluster.vq import kmeans
from collections import Counter
def form_unique_text_lemmas(file_name, nlp):
min_len = 2
with open(file_name, 'r', encoding='utf-16') as file:
text = file.read()
doc = nlp(text)
valid_tokens = []
for token in doc:
if token.is_alpha and not token.is_stop and len(token.lemma_) > min_len:
valid_tokens.append(token.lower_)
unique_lemmas_dict = dict(zip(
Counter(valid_tokens).keys(),
Counter(valid_tokens).values()
))
return unique_lemmas_dict
def get_bag_of_words(file_names, nlp):
unique_lemmas_dictionaries = []
for file_name in file_names:
unique_lemmas_dictionaries.append(form_unique_text_lemmas(file_name, nlp))
bag_of_words = {}
for dictionary in unique_lemmas_dictionaries:
for key in dictionary.keys():
if bag_of_words.get(key) is None:
bag_of_words[key] = dictionary[key]
else:
bag_of_words[key] += dictionary[key]
return bag_of_words
def filter_bags_of_words(bags_of_words):
filtered_bags_of_words = bags_of_words.copy()
for bag_of_words in filtered_bags_of_words:
for key in bag_of_words:
for another_bag_of_words in filtered_bags_of_words:
if key in another_bag_of_words:
if bag_of_words[key] >= another_bag_of_words[key] and bag_of_words != another_bag_of_words:
del another_bag_of_words[key]
return filtered_bags_of_words
def form_thesauruses(bags_of_words, min_occurence):
filtered_bags = filter_bags_of_words(bags_of_words)
thesauruses = []
for bag in filtered_bags:
thesaurus = {}
for key in bag:
if bag[key] >= min_occurence:
thesaurus[key] = bag[key]
thesauruses.append(thesaurus)
return thesauruses
def vectorize_files(thesauruses, file_names, nlp):
vectors = []
thesauruses_amount = len(thesauruses)
for file_name in file_names:
vector = [.0] * thesauruses_amount
with open(file_name, 'r', encoding='utf-16') as file:
text = file.read()
doc = nlp(text)
for token in doc:
for i in range(thesauruses_amount):
if token.lower_ in thesauruses[i]:
vector[i] += 1.0
vectors.append(vector)
print(vector)
return vectors
def plot_results(vectors, texts_per_category, centroids):
ox = [vector[0] for vector in vectors]
oy = [vector[1] for vector in vectors]
oz = [vector[2] for vector in vectors]
colors = ['r', 'g', 'b']
n = int(len(vectors) / texts_per_category)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for i in range(n):
offset = i * texts_per_category
temp_x = ox[offset: offset + texts_per_category]
temp_y = oy[offset: offset + texts_per_category]
temp_z = oz[offset: offset + texts_per_category]
ax.scatter(temp_x, temp_y, temp_z, c=colors[i], marker='o')
ax.scatter(
centroids[i][0],
centroids[i][1],
centroids[i][2],
c='k', marker='x'
)
ax.set_xlabel('Maths')
ax.set_ylabel('Chemistry')
ax.set_zlabel('Medicine')
plt.show()
def main():
file_names = [
'maths_1.txt',
'maths_2.txt',
'maths_3.txt',
'medicine_1.txt',
'medicine_2.txt',
'medicine_3.txt',
'chemistry_1.txt',
'chemistry_2.txt',
'chemistry_3.txt'
]
training_texts = ['training/' + file_name for file_name in file_names]
test_texts = ['test/' + file_name for file_name in file_names]
min_occurence = 3
nlp = spacy.load('en_core_web_sm')
maths_bag_of_words = get_bag_of_words(training_texts[:3], nlp)
chemistry_bag_of_words = get_bag_of_words(training_texts[3:6], nlp)
medicine_bag_of_words = get_bag_of_words(training_texts[6:9], nlp)
thesauruses = form_thesauruses(
[
maths_bag_of_words,
chemistry_bag_of_words,
medicine_bag_of_words
],
min_occurence
)
vectors = vectorize_files(thesauruses, test_texts, nlp)
centroids, _ = kmeans(np.array(vectors), 3)
plot_results(vectors, 3, centroids)
if __name__ == '__main__':
main()
| StarcoderdataPython |
8131831 | from lxml import etree
def xml_to_string(xml_object):
return etree.tostring(xml_object).decode()
| StarcoderdataPython |
8049437 | <gh_stars>10-100
#iris_save_load_predict_gridmodel.py
import joblib
from pandas import read_csv
from sklearn.metrics import accuracy_score, classification_report
from sklearn.model_selection import train_test_split, GridSearchCV,RandomizedSearchCV
from sklearn.datasets import load_iris
from sklearn.svm import SVC
data_file = "iris.data"
iris_names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']
df = read_csv(data_file, names=iris_names)
X = df.drop('class', axis =1)
y = df['class']
x_train, x_test, y_train, y_test \
= train_test_split(X, y, test_size=0.20,
random_state=1, shuffle=True)
params = {"C":[0.001, 0.01, 1, 5, 10, 100],
"gamma": [0.001, 0.01, 0.1, 1, 10, 100]}
model=SVC()
grid_cv=GridSearchCV(model,params, cv=5)
grid_cv.fit(x_train,y_train)
joblib.dump(grid_cv.best_estimator_, "model.joblib")
loaded_model = joblib.load("model.joblib")
x_new = [[5.6, 2.5, 3.9, 1.1]]
y_new = loaded_model.predict(x_new)
print("X=%s, Predicted=%s" % (x_new[0], y_new[0])) | StarcoderdataPython |
4931039 | <gh_stars>10-100
import unittest
from mock import patch, Mock, call
from textmagic.rest.models import Users, MessagingStats, SpendingStats, Utils, Invoices
class TestUser(unittest.TestCase):
def setUp(self):
self.resource = Users("uri", ("username", "token"))
def test_get(self):
with patch.object(self.resource, "request") as mock_request:
with patch.object(self.resource, "load_instance") as mock_load:
mock_request.return_value = (Mock(), "{json instance}")
self.resource.get()
mock_request.assert_called_with("GET", self.resource.uri)
mock_load.assert_called_with("{json instance}")
def test_update(self):
with patch.object(self.resource, "request") as mock:
mock.return_value = (Mock(status=201), "{json instance}")
result = self.resource.update(
firstName="John",
lastName="Doe",
company="Company",
timezone=1,
)
mock.assert_called_with("PUT", self.resource.uri, data={
"firstName": "John",
"lastName": "Doe",
"company": "Company",
"timezone": 1,
})
self.assertTrue(result)
class TestMessagingStat(unittest.TestCase):
def setUp(self):
self.resource = MessagingStats("uri", ("username", "token"))
def test_list(self):
with patch.object(self.resource, "request") as mock_request:
with patch.object(self.resource, "load_instance") as mock_load:
mock_request.return_value = (Mock(), ["a", "b", "c"])
self.resource.list(
by="off",
start="start",
end="end",
)
mock_request.assert_called_with("GET", self.resource.uri, params={
"by": "off",
"start": "start",
"end": "end",
})
calls = [call("a"), call("b"), call("c")]
mock_load.assert_has_calls(calls)
assert mock_load.call_count == 3
class TestSpendingStat(unittest.TestCase):
def setUp(self):
self.resource = SpendingStats("uri", ("username", "token"))
def test_list(self):
with patch.object(self.resource, "get_instances") as mock:
self.resource.list(
limit=10,
page=2,
start="start",
end="end"
)
mock.assert_called_with({
"limit": 10,
"page": 2,
"start": "start",
"end": "end",
"search": False,
})
class TestInvoices(unittest.TestCase):
def setUp(self):
self.resource = Invoices("uri", ("username", "token"))
def test_list(self):
with patch.object(self.resource, "get_instances") as mock:
self.resource.list(
page=3,
limit=100,
)
mock.assert_called_with({
"limit": 100,
"page": 3,
"search": False
})
class TestUtil(unittest.TestCase):
def setUp(self):
self.resource = Utils("uri", ("username", "token"))
def test_ping(self):
with patch.object(self.resource, "request") as mock:
mock.return_value = (Mock(), "data")
result = self.resource.ping()
mock.assert_called_with("GET", "%s/%s" % (self.resource.base_uri, "ping"))
assert result == "data" | StarcoderdataPython |
6584918 | <gh_stars>0
try:
from fields import field_classes as fc
from selection import concrete_expression as ce
except ImportError:
from . import field_classes as fc
from ..selection import concrete_expression as ce
def test_add_fields():
field_a = fc.field('a', int)
field_b = fc.field('b', float)
expected0 = fc.group(field_a, field_b)
received0 = field_a + field_b
assert received0 == expected0, '{} != {}'.format(received0, expected0)
field_c = fc.field('c', str)
expected1 = fc.group(field_a, field_b, field_c)
received1 = fc.group(field_a, field_b, field_c)
assert received1 == expected1, '{} != {}'.format(received1, expected1)
def test_transfer_selection():
expression_a = ce.TrivialDescription('field_a', target_item_type=ce.it.ItemType.Record)
schema = fc.group(fc.field('field_a', float), fc.field('field_b', bool))
assert expression_a.get_target_item_type() == ce.it.ItemType.Record
assert expression_a.get_dict_output_field_types(schema) == {'field_a': fc.FieldType.Float}
assert expression_a.get_value_from_item(dict(field_a=1.1, field_b=True)) == 1.1
def main():
test_add_fields()
test_transfer_selection()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1805002 | """timvt.endpoints.factory: router factories."""
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, Optional, Type
from buildpg.asyncpg import BuildPgPool
from morecantile import TileMatrixSet
from timvt.db.tiles import VectorTileReader
from timvt.dependencies import (
TableParams,
TileMatrixSetNames,
TileMatrixSetParams,
_get_db_pool,
)
from timvt.models.mapbox import TileJSON
from timvt.models.metadata import TableMetadata
from timvt.models.OGC import TileMatrixSetList
from timvt.resources.enums import MimeTypes
from fastapi import APIRouter, Depends, Path, Query
from starlette.requests import Request
from starlette.responses import Response
TILE_RESPONSE_PARAMS: Dict[str, Any] = {
"responses": {200: {"content": {"application/x-protobuf": {}}}},
"response_class": Response,
}
# ref: https://github.com/python/mypy/issues/5374
@dataclass # type: ignore
class VectorTilerFactory:
"""VectorTiler Factory."""
reader: Type[VectorTileReader] = field(default=VectorTileReader)
# FastAPI router
router: APIRouter = field(default_factory=APIRouter)
# TileMatrixSet dependency
tms_dependency: Callable[..., TileMatrixSet] = TileMatrixSetParams
# Table dependency
table_dependency: Callable[..., TableMetadata] = TableParams
# Database pool dependency
db_pool_dependency: Callable[..., BuildPgPool] = _get_db_pool
# Router Prefix is needed to find the path for routes when prefixed
# e.g if you mount the route with `/foo` prefix, set router_prefix to foo
router_prefix: str = ""
def __post_init__(self):
"""Post Init: register route and configure specific options."""
self.register_routes()
def register_routes(self):
"""Register Tiler Routes."""
self.tile()
self.tilejson()
def url_for(self, request: Request, name: str, **path_params: Any) -> str:
"""Return full url (with prefix) for a specific endpoint."""
url_path = self.router.url_path_for(name, **path_params)
base_url = str(request.base_url)
if self.router_prefix:
base_url += self.router_prefix.lstrip("/")
return url_path.make_absolute_url(base_url=base_url)
############################################################################
# /tiles
############################################################################
def tile(self):
"""Register /tiles endpoints."""
@self.router.get("/tiles/{table}/{z}/{x}/{y}.pbf", **TILE_RESPONSE_PARAMS)
@self.router.get(
"/tiles/{TileMatrixSetId}/{table}/{z}/{x}/{y}.pbf", **TILE_RESPONSE_PARAMS
)
async def tile(
z: int = Path(..., ge=0, le=30, description="Mercator tiles's zoom level"),
x: int = Path(..., description="Mercator tiles's column"),
y: int = Path(..., description="Mercator tiles's row"),
tms: TileMatrixSet = Depends(self.tms_dependency),
table: TableParams = Depends(self.table_dependency),
db_pool: BuildPgPool = Depends(self.db_pool_dependency),
columns: str = None,
):
"""Return vector tile."""
reader = self.reader(db_pool, table=table, tms=tms)
content = await reader.tile(x, y, z, columns=columns)
return Response(content, media_type=MimeTypes.pbf.value)
def tilejson(self):
"""Register tilejson endpoints."""
@self.router.get(
"/{table}.json",
response_model=TileJSON,
responses={200: {"description": "Return a tilejson"}},
response_model_exclude_none=True,
)
@self.router.get(
"/{TileMatrixSetId}/{table}.json",
response_model=TileJSON,
responses={200: {"description": "Return a tilejson"}},
response_model_exclude_none=True,
)
async def tilejson(
request: Request,
table: TableMetadata = Depends(self.table_dependency),
tms: TileMatrixSet = Depends(self.tms_dependency),
minzoom: Optional[int] = Query(
None, description="Overwrite default minzoom."
),
maxzoom: Optional[int] = Query(
None, description="Overwrite default maxzoom."
),
):
"""Return TileJSON document."""
kwargs = {
"TileMatrixSetId": tms.identifier,
"table": table.id,
"z": "{z}",
"x": "{x}",
"y": "{y}",
}
tile_endpoint = self.url_for(request, "tile", **kwargs).replace("\\", "")
minzoom = minzoom if minzoom is not None else (table.minzoom or tms.minzoom)
maxzoom = maxzoom if maxzoom is not None else (table.maxzoom or tms.maxzoom)
return {
"minzoom": minzoom,
"maxzoom": maxzoom,
"name": table.id,
"bounds": table.bounds,
"tiles": [tile_endpoint],
}
@dataclass
class TMSFactory:
"""TileMatrixSet endpoints Factory."""
# Enum of supported TMS
supported_tms: Type[TileMatrixSetNames] = TileMatrixSetNames
# TileMatrixSet dependency
tms_dependency: Callable[..., TileMatrixSet] = TileMatrixSetParams
# FastAPI router
router: APIRouter = field(default_factory=APIRouter)
# Router Prefix is needed to find the path for /tile if the TilerFactory.router is mounted
# with other router (multiple `.../tile` routes).
# e.g if you mount the route with `/cog` prefix, set router_prefix to cog and
router_prefix: str = ""
def __post_init__(self):
"""Post Init: register route and configure specific options."""
self.register_routes()
def url_for(self, request: Request, name: str, **path_params: Any) -> str:
"""Return full url (with prefix) for a specific endpoint."""
url_path = self.router.url_path_for(name, **path_params)
base_url = str(request.base_url)
if self.router_prefix:
base_url += self.router_prefix.lstrip("/")
return url_path.make_absolute_url(base_url=base_url)
def register_routes(self):
"""Register TMS endpoint routes."""
@self.router.get(
r"/tileMatrixSets",
response_model=TileMatrixSetList,
response_model_exclude_none=True,
)
async def TileMatrixSet_list(request: Request):
"""
Return list of supported TileMatrixSets.
Specs: http://docs.opengeospatial.org/per/19-069.html#_tilematrixsets
"""
return {
"tileMatrixSets": [
{
"id": tms.name,
"title": tms.name,
"links": [
{
"href": self.url_for(
request,
"TileMatrixSet_info",
TileMatrixSetId=tms.name,
),
"rel": "item",
"type": "application/json",
}
],
}
for tms in self.supported_tms
]
}
@self.router.get(
r"/tileMatrixSets/{TileMatrixSetId}",
response_model=TileMatrixSet,
response_model_exclude_none=True,
)
async def TileMatrixSet_info(tms: TileMatrixSet = Depends(self.tms_dependency)):
"""Return TileMatrixSet JSON document."""
return tms
| StarcoderdataPython |
1665917 | <gh_stars>0
from collections import deque
def read_file(test = True):
if test:
filename = '../tests/day1.txt'
else:
filename = '../input/day1.txt'
with open(filename) as file:
temp = [int(line.strip()) for line in file]
return temp
def puzzle1(test = True):
depths = read_file(test)
prev = None
total = 0
for x in depths:
if prev is not None and x > prev:
total += 1
prev = x
return total
def puzzle2(test = True):
depths = read_file(test)
old_window = deque(depths[:3], maxlen=3)
prev = sum(old_window)
total = 0
for i, x in enumerate(depths[3:]):
old_window.append(x)
temp = sum(old_window)
if prev is not None and temp > prev:
#print(old_window, temp)
total += 1
prev = temp
return total
print(puzzle1(False))
print(puzzle2(False))
| StarcoderdataPython |
9702764 |
def uninstall_base(context):
gs = context.getSite().portal_setup
if u'collective.talkflow:install-base' in gs._profile_upgrade_versions:
# make safe for reinstall...
del(gs._profile_upgrade_versions[u'collective.talkflow:install-base'])
| StarcoderdataPython |
333233 | class Scene:
def __init__(self):
"""
A scene has multiple SceneObject
"""
self.objects = []
class SceneObject:
def __init__(self, pos, color):
"""
Object that belongs to a scene
Args:
pos(ndarray): Position in 3D space
color(ndarray): Color in 3 channels using float 0 to 1
"""
self.position = pos
self.color = color
def normal_at(self, p):
"""
Get the normal at point p
Args:
p(ndarray): A point in the surface of the object
Returns:
ndarray: The normal at the given point
"""
pass
class Sphere(SceneObject):
def __init__(self, pos, color, radius):
SceneObject.__init__(self, pos, color)
self.radius = radius
def normal_at(self, p):
n = (p - self.position) / self.radius
return n
class Plane(SceneObject):
def __init__(self, pos, color, n):
"""
Object that belongs to a scene
Args:
pos(ndarray): Position in 3D space
color(ndarray): Color in 3 channels using float 0 to 1
n(ndarray): Normal of the plane as a 3D vector
"""
SceneObject.__init__(self, pos, color)
self.n = n
def normal_at(self, p):
return self.n
| StarcoderdataPython |
5009508 | <filename>learn-py/regex/regex3.py
# Meta caracteres: ^ $ ( )
# * 0 ou n
# + 1 ou n {1,}
# ? 0 ou 1
# {n}
# {min, max}
# {10,} 10 ou mais
# {,10} De zero a 10
# {10} Especificamente 10
# {5,10} De 5 a 10
# ()+ [a-zA-Z0-9]+
import re
texto = '''
João trouxe flores para sua amada namorada em 10 de janeiro de 1970,
Maria era o nome dela.
Foi um ano excelente na vida de joão. Teve 5 filhos, todos adultos atualmente.
maria, hoje sua esposa, ainda faz aquele café com pão de queijo nas tardes de
domingo. Também né! Sendo a boa mineira que é, nunca esquece seu famoso
pão de queijo.
Não canso de ouvir a Maria:
"Joooooooooãooooooo, o café tá prontinho aqui. Veeemm veeem veem vem"!
Jã
'''
print(re.findall(r'j[o]+ão+', texto, flags=re.I))
print(re.findall(r'jo{1,}ão{1,}', texto, flags=re.I))
print(re.findall(r've{3}m{1,2}', texto, flags=re.I))
# print(re.sub(r'jo{1,}ão{1,}', 'Felipe', texto, flags=re.I))
texto2 = 'João ama ser amado'
print(re.findall(r'ama[od]{0,2}', texto2, flags=re.I)) | StarcoderdataPython |
3580864 | <filename>supriya/ugens/InRange.py
import collections
from supriya import CalculationRate
from supriya.synthdefs import UGen
class InRange(UGen):
"""
Tests if a signal is within a given range.
::
>>> source = supriya.ugens.SinOsc.ar()
>>> in_range = supriya.ugens.InRange.ar(
... maximum=0.9,
... minimum=0.1,
... source=source,
... )
>>> in_range
InRange.ar()
"""
### CLASS VARIABLES ###
__documentation_section__ = "Trigger Utility UGens"
_ordered_input_names = collections.OrderedDict(
[("source", 0), ("minimum", 0), ("maximum", 1)]
)
_valid_calculation_rates = (
CalculationRate.AUDIO,
CalculationRate.CONTROL,
CalculationRate.SCALAR,
)
| StarcoderdataPython |
9668621 | import sys
import os
ps = """
cp `$PROFILE` ''
"""
END_SYNC_SIGNAL = "# Do not Sync Below #"
REPLACE_SYNC_SIGNAL = "# Used to Sync:"
def syncWindowsProfile(save_path = '../windows/profile.ps1'):
cmd = "powershell echo $PROFILE"
profile_path = os.popen(cmd).read()[:-1]
print(f"Powershell Profile location: {profile_path}")
with open(profile_path, 'r') as rp, open(save_path, 'w') as wp:
for line in rp:
if line.startswith(END_SYNC_SIGNAL):
break
if REPLACE_SYNC_SIGNAL in line:
line = line.split(REPLACE_SYNC_SIGNAL)[-1]
wp.write(line)
if __name__ == "__main__":
syncWindowsProfile()
| StarcoderdataPython |
3536568 | <reponame>chasebk/code_ISLO_ELM<gh_stars>1-10
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu" at 10:23, 03/08/2021 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Nguyen_Thieu2 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
from time import time
from pandas import read_csv
from permetrics.regression import Metrics
from keras.models import Sequential
from keras.layers import Dense, GRU
from utils.io_util import save_to_csv_dict, save_to_csv, save_results_to_csv
from utils.visual_util import draw_predict
from utils.timeseries_util import *
from config import Config, Exp
from numpy import reshape
import os
import platform
import tensorflow as tf
tf.config.threading.set_intra_op_parallelism_threads(2) # matrix multiplication and reductions
tf.config.threading.set_inter_op_parallelism_threads(2) # number of threads used by independent non-blocking operations
if platform.system() == "Linux": # Linux: "Linux", Mac: "Darwin", Windows: "Windows"
os.sched_setaffinity(0, {3})
def reshape_3d(data):
return reshape(data, (data.shape[0], data.shape[1], 1))
def fit_model(train, batch_size, nb_epoch, neurons, verbose=2):
# The LSTM architecture
X, y = train[:, 0:-1], train[:, -1]
model = Sequential()
model.add(GRU(neurons, input_shape=(None, 1), activation="relu"))
model.add(Dense(units=1, activation="elu"))
model.compile(loss="mean_squared_error", optimizer="adam")
loss = model.fit(reshape_3d(X), y, epochs=nb_epoch, batch_size=batch_size, verbose=verbose, shuffle=False)
return model, loss
# run a repeated experiment
def experiment(trials, datadict, series, epochs, neurons, verbose):
time_prepare = time()
lag = datadict["lags"]
test_size = int(datadict["test_percent"] * len(series.values))
batch_size = datadict["batch_size"]
# transform data to be stationary
raw_values = series.values
diff_values = difference(raw_values, 1)
# transform data to be supervised learning
supervised = timeseries_to_supervised(diff_values, lag)
supervised_values = supervised.values[lag:, :]
# split data into train and test-sets
train, test = supervised_values[0:-test_size], supervised_values[-test_size:]
# transform the scale of the data
scaler, train_scaled, test_scaled = scale(train, test)
time_prepare = time() - time_prepare
# run experiment
for trial in range(trials):
time_train_test = time()
# fit the model
time_train = time()
train_trimmed = train_scaled[2:, :]
model, loss = fit_model(train_trimmed, batch_size, epochs, neurons, verbose)
time_train = time() - time_train
# forecast test dataset
test_reshaped = test_scaled[:, 0:-1]
output = model.predict(reshape_3d(test_reshaped), batch_size=batch_size)
test_pred = list()
for i in range(len(output)):
yhat = output[i, 0]
X = test_scaled[i, 0:-1]
# invert scaling
yhat = invert_scale(scaler, X, yhat)
# invert differencing
yhat = inverse_difference(raw_values, yhat, len(test_scaled) + 1 - i)
# store forecast
test_pred.append(yhat)
test_true = array([raw_values[-test_size:]]).flatten()
test_pred = array(test_pred).flatten()
loss_train = loss.history["loss"]
time_train_test = time() - time_train_test
time_total = time_train_test + time_prepare
## Saving results
# 1. Create path to save results
path_general = f"{Config.DATA_RESULTS}/{datadict['dataname']}/{lag}-{datadict['test_percent']}-{trial}"
filename = f"GRU-{neurons}-{epochs}-{batch_size}"
# 2. Saving performance of test set
data = {"true": test_true, "predict": test_pred}
save_to_csv_dict(data, f"predict-{filename}", f"{path_general}/{Config.FOL_RES_MODEL}")
# 3. Save loss train to csv file
data = [list(range(1, len(loss_train) + 1)), loss_train]
header = ["Epoch", "MSE"]
save_to_csv(data, header, f"loss-{filename}", f"{path_general}/{Config.FOL_RES_MODEL}")
# 4. Calculate performance metrics and save it to csv file
RM1 = Metrics(test_true, test_pred)
list_paras = len(Config.METRICS_TEST_PHASE) * [{"decimal": 3}]
mm1 = RM1.get_metrics_by_list_names(Config.METRICS_TEST_PHASE, list_paras)
item = {'filename': filename, 'time_train': time_train, 'time_total': time_total}
for metric_name, value in mm1.items():
item[metric_name] = value
save_results_to_csv(item, f"metrics-{filename}", f"{path_general}/{Config.FOL_RES_MODEL}")
# 5. Saving performance figure
list_lines = [test_true[200:400], test_pred[200:400]]
list_legends = ["Observed", "Predicted"]
xy_labels = ["#Iteration", datadict["datatype"]]
exts = [".png", ".pdf"]
draw_predict(list_lines, list_legends, xy_labels, "", filename, f"{path_general}/{Config.FOL_RES_VISUAL}", exts, verbose)
for dataname, datadict in Exp.LIST_DATASETS.items():
# load dataset
series = read_csv(f'{Config.DATA_APP}/{datadict["dataname"]}.csv', usecols=datadict["columns"])
# experiment
experiment(Exp.TRIAL, datadict, series, Exp.EPOCH[0], Exp.NN_NET, Exp.VERBOSE)
| StarcoderdataPython |
1996421 | import idc
import idautils
def enum_segments():
for segstart in idautils.Segments():
segend = idc.SegEnd(segstart)
segname = idc.SegName(segstart)
yield segstart, segend, segname
def find_pointers(start, end):
for va in range(start, end-0x4):
ptr = idc.Dword(va)
if idc.SegStart(ptr) == idc.BADADDR:
continue
yield va, ptr
def is_head(va):
return idc.isHead(idc.GetFlags(va))
def get_head(va):
if is_head(va):
return va
else:
return idc.PrevHead(va)
def is_code(va):
if is_head(va):
flags = idc.GetFlags(va)
return idc.isCode(flags)
else:
head = get_head(va)
return is_code(head)
CACHED_STRINGS = list(idautils.Strings())
def is_in_string(va):
for s in CACHED_STRINGS:
if s.ea <= va < s.ea + s.length:
return True
return False
def is_defined(va):
pass
def is_unknown(va):
return idc.isUnknown(idc.GetFlags(va))
def main():
for segstart, segend, segname in enum_segments():
if segname not in ('.text', '.data'):
continue
for src, dst in find_pointers(segstart, segend):
if is_code(src):
# ignore instructions like:
#
# call ds:__vbaGenerateBoundsError
#print('code pointer: 0x%x -> 0x%x' % (src, dst))
continue
if is_in_string(src):
# for example, the following contains 0x444974 (a common valid offset):
#
# text:004245B0 aRequestid db 'requestID',
#
# enable or disable this behavior as you wish
print('string pointer: 0x%x -> 0x%x' % (src, dst))
pass
#continue
print('pointer from 0x%x to 0x%x' % (src, dst))
if is_unknown(dst):
print('destination unknown, making byte: 0x%x' % (dst))
idc.MakeByte(dst)
elif is_head(dst):
# things are good
pass
else:
# need to undefine head, and make byte
head_va = get_head(dst)
print('destination overlaps with head: 0x%x' % (head_va))
idc.MakeUnkn(head_va, dst - head_va)
idc.MakeByte(head_va)
idc.MakeByte(dst)
idc.MakeUnkn(src, 4)
idc.MakeDword(src)
# this doesn't seem to always work :-(
idc.OpOffset(src, 0)
if __name__ == '__main__':
main()
| StarcoderdataPython |
159069 |
import scrapy
import json
import requests
import time
from spiderUtil import FbrefFixtureResponseWrapper, FbrefMatchResponseWrapper
from hashlib import md5
from dateSearch import DateSearch
from itertools import permutations
from scrapy.http import HtmlResponse
class FixturesSpider(scrapy.Spider):
name = "Fixtures"
custom_settings = {
'DOWNLOAD_DELAY': 0.25,
}
start_urls = [
#"https://fbref.com/de/comps/20/2109/schedule/2018-2019-Bundesliga-Fixtures",
#"https://fbref.com/de/comps/20/1634/schedule/2017-2018-Bundesliga-Fixtures",
#"https://fbref.com/de/comps/20/1529/schedule/2016-2017-Bundesliga-Fixtures",
#"https://fbref.com/de/comps/20/1470/schedule/2015-2016-Bundesliga-Fixtures",
#"https://fbref.com/de/comps/20/736/schedule/2014-2015-Bundesliga-Fixtures",
#"https://fbref.com/de/comps/9/1889/schedule/2018-2019-Premier-League-Fixtures",
#"https://fbref.com/de/comps/9/1631/schedule/2017-2018-Premier-League-Fixtures",
#"https://fbref.com/de/comps/9/1526/schedule/2016-2017-Premier-League-Fixtures",
#"https://fbref.com/de/comps/9/1467/schedule/2015-2016-Premier-League-Fixtures",
#"https://fbref.com/de/comps/12/1886/schedule/2018-2019-La-Liga-Fixtures",
#"https://fbref.com/de/comps/12/1652/schedule/2017-2018-La-Liga-Fixtures",
#"https://fbref.com/de/comps/12/1547/schedule/2016-2017-La-Liga-Fixtures",
#"https://fbref.com/de/comps/12/1488/schedule/2015-2016-La-Liga-Fixtures",
#"https://fbref.com/de/comps/11/1896/schedule/2018-2019-Serie-A-Fixtures",
#"https://fbref.com/de/comps/11/1640/schedule/2017-2018-Serie-A-Fixtures",
#"https://fbref.com/de/comps/11/1535/schedule/2016-2017-Serie-A-Fixtures",
#"https://fbref.com/de/comps/11/1476/schedule/2015-2016-Serie-A-Fixtures",
"https://fbref.com/en/comps/13/2104/schedule/2018-2019-Ligue-1-Fixtures"
]
leagues = {
"Premier-League": 13,
"La-Liga": 53,
"Bundesliga": 19,
"Serie-A": 31,
"Ligue-1": 16
}
def __getSeason(self, response) -> str:
# get the full season string
seasonStr = response.css("h1[itemprop='name']::text")[0].get()
# extract the years
return seasonStr.strip().split(" ")[0]
def __dumpToFile(self, filePath, toDump):
dFile = open(filePath, "w+")
dFile.write(json.dumps(toDump))
dFile.close()
def parseTeams(self, response, fileName, matchDate, matchScore, league):
plSpider = PlayerSpider(matchDate, league)
wrapper = FbrefMatchResponseWrapper(response, plSpider, matchScore)
if wrapper.hasField():
self.__dumpToFile("data/matches/test/" + fileName + ".txt", wrapper.getData())
def parse(self, response: HtmlResponse):
season = self.__getSeason(response)
respWrapper = FbrefFixtureResponseWrapper(response)
while respWrapper.nextRow():
# check if there is a score. If there is, there is also a match report
if respWrapper.hasScore():
data = respWrapper.extractData()
toHash = str(time.time()) + str(season) + data["score"] + data["team_a"] + data["team_b"]
md5Hash = md5(toHash.encode('utf-8'))
data["match_file"] = str(md5Hash.hexdigest())
url = respWrapper.generateMatchURL()
league = ""
for key in self.leagues:
if key in response.url:
league = self.leagues[key]
yield scrapy.Request(url, callback=self.parseTeams, cb_kwargs=dict(
fileName=data["match_file"], matchDate=data["date"], matchScore=respWrapper.getMatchScore(), league = league))
class PlayerSpider:
start_url = "https://www.fifaindex.com/de/players/fifa{season}/"
url_vars = "?name={name}&league={league}&order=desc"
def __init__(self, date: str, league: str):
self.__date = date
self.__dSearch = DateSearch(date)
self.__season = self.__dSearch.getSeason(date)
self.__searchableDates = self.__getSearchableDates()
self.__searchHref = self.__getSearchHref()
self.__league = league
def __getSearchableDates(self):
response = requests.get(self.start_url.format(season=self.__season))
basepage = HtmlResponse("", body=response.content, encoding=response.encoding)
dates = basepage.css("div[class='dropdown-menu fade-out'] > a[class='dropdown-item']")
result = {}
for date in dates:
result[date.css("a::text").get()] = date
return result
def permutateName(self, name: str):
"""
permutates the given name
"""
return list(permutations(name.split(" |-|'")))
def __getSearchHref(self) -> str:
href = None
keyList = self.__searchableDates.keys()
while not href:
try:
date = self.__dSearch.getNextDate()
if date in keyList:
return "https://www.fifaindex.com" + self.__searchableDates[date].css("a::attr(href)").get()
except IndexError as err:
print(err.args[0])
print(self.__searchableDates.keys())
break
raise ValueError("Could not find matching date for " + date)
def __getPlayerHrefs(self, name: str):
nameAttempts = self.permutateName(name)
result = []
for name in nameAttempts:
result.append(self.__searchHref + self.url_vars.format(name="+".join(name), league = self.__league))
return result
def __getPlayerStats(self, href: str):
response = requests.get(href)
playerPage = HtmlResponse("", body=response.content, encoding=response.encoding)
personalInfo = playerPage.css("div[class='card-body'] > p > span::text").getall()
ratings = playerPage.css("div[class='card-body'] > p > span > span::text").getall()
return [personalInfo, ratings[-34:]]
def getPlayer(self, name: str):
hrefs = self.__getPlayerHrefs(name)
for href in hrefs:
response = requests.get(href)
searchPage = HtmlResponse("", body=response.content, encoding=response.encoding)
hrefs = searchPage.css("td[data-title='Name'] > a::attr(href)").getall()
if len(hrefs) == 1:
return self.__getPlayerStats("https://www.fifaindex.com" + hrefs[0])
return None
# scrapy runspider spiders.py | StarcoderdataPython |
11347331 | import bs4
import requests
import threading
from colorama import Fore
def get_html(episode_number: int, text_html: list) -> str:
print(Fore.YELLOW + f'Getting HTML for episode {episode_number}', flush=True)
url = f'https://talkpython.fm/{episode_number}'
resp = requests.get(url)
resp.raise_for_status()
text_html.append((resp.text, episode_number))
return 'Done'
def get_title(html: str, episode_number: int) -> str:
print(Fore.CYAN + f'Getting TITLE for episode {episode_number}', flush=True)
soup = bs4.BeautifulSoup(html, 'html.parser')
header = soup.select_one('h1')
if not header:
return 'Missing header'
return header.text.strip()
def main():
get_title_range()
print('Done')
def get_title_range():
threads = []
text_html = []
for n in range(150, 160):
threads.append(threading.Thread(target=get_html, args=(n, text_html), daemon=True))
[t.start() for t in threads]
[t.join() for t in threads]
for html, episode in text_html:
title = get_title(html, episode)
print(Fore.WHITE + f'Title found: {title}', flush=True)
if __name__ == '__main__':
main()
| StarcoderdataPython |
8181192 | <filename>MainFile_OptPRFF_RFF_Nyst_ORF.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 14 15:40:47 2017
@author: damodara
"""
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 21 15:48:47 2016
@author: damodara
"""
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 14 14:50:12 2016
@author: damodara
test PRFF_Romain
"""
import numpy as np
import matplotlib.pyplot as plt
import os
from sklearn import datasets
import time
import copy
import sys
from time import clock
from revrand.basis_functions import RandomRBF, RandomLaplace, RandomCauchy, RandomMatern32, RandomMatern52, \
FastFoodRBF, OrthogonalRBF, FastFoodGM, BasisCat
from revrand import Parameter, Positive
#import matplotlib as mpl
#mpl.use('Agg')
#%%
#Dataset Load
#====================================
##census Data
# from DatasetLoad import census_dataload
# TrainData, train_label, TestData, test_label=census_dataload()
## cpu Data
#from DatasetLoad import cpu_dataload
#TrainData, train_label, TestData, test_label = cpu_dataload()
## YearPredictionMSD Data
# from DatasetLoad import YearPredictionMSD_dataload
# TrainData, train_label, TestData, test_label = YearPredictionMSD_dataload()
#from DatasetLoad import cadata_dataload
#Data, label = cadata_dataload()
#==============================================================================
#from DatasetLoad import iris_dataload
#Data, label=iris_dataload()
# digits dataset
from DatasetLoad import digits_dataload
Data, label=digits_dataload()
#==============================================================================
# Adult Data
#from DatasetLoad import adult_dataload
#TrainData, train_label, TestData, test_label=adult_dataload()
##
#MNIST Data
from DatasetLoad import MNIST_dataload
Data, label=MNIST_dataload()
#
#from DatasetLoad import MNIST_official_split_dataload
#TrainData, train_label, TestData, test_label = MNIST_official_split_dataload()
###
##CIFAR-10 Dataset
#from DatasetLoad import cifar10_dataload
#TrainData, train_label, TestData, test_label=cifar10_dataload()
###
##Forest Data
#from DatasetLoad import forest_dataload
#Data, label= forest_dataload()
#%%
# Train and Test Data split
NData, Nfeat=np.shape(Data)
Nclass=len(np.unique(label))
from sklearn.cross_validation import train_test_split
indicies=np.arange(NData)
TrainData,TestData,train_label,test_label,tr_index,test_index=train_test_split(Data,label,indicies,test_size=0.14285,random_state=42)
# For extraction of validation data, to estimate bandwidth parameter, for error approximation, and CV for SGD classifier
indicies=np.arange(np.shape(TrainData)[0])
from sklearn.cross_validation import train_test_split
#TrainData, ValData, train_label, val_label,tr_index,val_index = train_test_split(TrainData,train_label,indicies,test_size=0.16667,random_state=42)
#TrainData, ValData, train_label, val_label,tr_index,val_index = train_test_split(TrainData,train_label,indicies,test_size=0.909,random_state=42)
TrainData, ValData, train_label, val_label,tr_index,val_index = train_test_split(TrainData,train_label,indicies,test_size=0.1,random_state=42)
## if already split, comment above
Ntrain, Nfeat=np.shape(TrainData)
Ntest=np.shape(TestData)[0]
Nval = np.shape(ValData)[0]
classnames=np.unique(train_label)
Nclass=len(np.unique(train_label))
train_label=np.squeeze(train_label)
test_label=np.squeeze(test_label)
val_label = np.squeeze(val_label)
#%%
normalize_bf_rff = True
# Data Normalization
if normalize_bf_rff:
from sklearn import preprocessing
NormParam=preprocessing.StandardScaler().fit(TrainData)
TrainData = NormParam.transform(TrainData)
TestData = NormParam.transform(TestData)
ValData = NormParam.transform(ValData)
#TrainData = preprocessing.maxabs_scale(TrainData, axis=1)
#TestData = preprocessing.maxabs_scale(TestData, axis=1)
#ValData = preprocessing.maxabs_scale(ValData, axis=1)
#TrainData=preprocessing.minmax_scale(TrainData, feature_range=(-1, 1), axis=0)
#TestData = preprocessing.minmax_scale(TestData, axis=1)
#ValData = preprocessing.minmax_scale(ValData, axis=1)
#TrainData = preprocessing.normalize(TrainData)
#TestData = preprocessing.normalize(TestData)
#ValData = preprocessing.normalize(ValData)
#%% Take a Subset of Data for the computation of PRFF approximation Error
if Nval>=5000:
Nsamples = 500
else:
Nsamples = Nval
Rshuffle = np.random.permutation(Nval)
Data = copy.copy(ValData[Rshuffle,])
Data = Data[0:Nsamples,]
#%%
# Calculation of sigma using 5 percentile
from scipy.spatial import distance
Pdist=distance.pdist(Data,metric='euclidean')
nsigma=np.percentile(Pdist,5)
#nsigma = Nfeat
bestgamma = 1/(2*nsigma**2)
#sigma=2*bestgamma
del Pdist
#bestgamma = 1e-15
#%%
import numpy
from sklearn.metrics.pairwise import rbf_kernel
from prff import PRFF
from prff_bharath import PRFF_Bharath
from sklearn.kernel_approximation import RBFSampler, Nystroem
from PRFGradient import LossfunctionForm2
from Nystroem_Method import Nystroem_ErrorApprox
from RFF_Classification import RFF_Form2_Classification
from Nystroem_Method import Nystroem_Classification
from orff import ORFF
#%%
gamma= copy.copy(bestgamma)
exact_gram = rbf_kernel(Data, gamma= gamma)
no_runs =1
n_components = 50
#alpha_range =[5.0]
alpha_range = [50, 25, 10, 5, 1, 0.1, 0.5, 0.01, 0.05, 0.001, 0.005, 0.0001, 0.0005, 0.00001]
#alpha_range = [10.0]
alpha_range = [0.1]
lamda_range =[100, 50, 25, 10, 5, 0.9, 0.1, 0.01, 0.001, 0.0001, 0.0]
lamda_range =[0.001]
n_components_range = [10, 25, 50, 100, 200, 300, 400, 500]#np.arange(1, n_components+1, 10)
n_components_range = [10,25]
prff = True
if prff:
prff_p_error = np.zeros((no_runs,len(alpha_range), len(n_components_range), len(lamda_range)))
prff_b_error = np.zeros((no_runs,len(alpha_range), len(n_components_range), len(lamda_range)))
prff_p_sgd_accuracy = np.zeros((no_runs,len(alpha_range), len(n_components_range), len(lamda_range)))
prff_b_sgd_accuracy = np.zeros((no_runs,len(alpha_range), len(n_components_range), len(lamda_range)))
prff_p_ridge_accuracy = np.zeros((no_runs,len(alpha_range), len(n_components_range), len(lamda_range)))
prff_b_ridge_accuracy = np.zeros((no_runs,len(alpha_range), len(n_components_range), len(lamda_range)))
rff = True
if rff:
rff_error = np.zeros((no_runs,len(n_components_range)))
rff_sgd_accuracy=np.zeros((no_runs,len(n_components_range)))
rff_ridge_accuracy=np.zeros((no_runs,len(n_components_range)))
nyst_error =np.zeros((no_runs,len(n_components_range)))
nyst_sgd_accuracy=np.zeros((no_runs,len(n_components_range)))
nyst_ridge_accuracy=np.zeros((no_runs,len(n_components_range)))
orff_error = np.zeros((no_runs,len(n_components_range)))
orff_sgd_accuracy=np.zeros((no_runs,len(n_components_range)))
orff_ridge_accuracy=np.zeros((no_runs,len(n_components_range)))
normalize = False
for niter in range(no_runs):
RFF = RBFSampler(gamma=gamma, n_components=n_components, random_state=None)
RFF_Sampler = RFF.fit(TrainData)
RFF_W = np.concatenate((RFF_Sampler.random_weights_, RFF_Sampler.random_offset_.reshape((1,-1))), axis=0)
#ORF
ORF = ORFF(gamma=gamma, n_components=n_components,random_state=None)
# ORF = OrthogonalRBF(Xdim=Nfeat, nbases=n_components,lenscale = Parameter(nsigma, Positive()))
ORF_Sampler = ORF.fit(TrainData)
ORF_W = np.concatenate((ORF_Sampler.random_weights_, ORF_Sampler.random_offset_.reshape((1,-1))), axis=0)
# ORF_W = np.concatenate((ORF.W/nsigma, RFF_Sampler.random_offset_.reshape((1,-1))), axis=0)
b=0
sampler_iter =0
n_comp_iter=0
# option =1 # for classification
option =2 # for regression
#classifier_opt =1 # for logistic regression
classifier_opt =2 # for ridge regression
# classifier_opt =3 # for logistic and ridge regression
lamda_iter =0
for lbda in lamda_range:
alpha_iter=0
for alpha in alpha_range:
# Caution with PRFF: if you generate figures (gen_fig=True), you should use update_b=False because otherwise
# the objective function on w changes as b changes from one iteration to the next
INDEX_OF_PRFF = 0
INDEX_OF_PRFF_Bharath= 1
if prff:
st_time = time.time()
PRFF_sampler = PRFF_Bharath(gamma=gamma, n_components=n_components, random_state=0,
alpha=alpha, lbda=lbda, n_pass=1, minibatch_size=128,
max_iter=1000,update_b=True)
PRFF_sampler.fit(TrainData)
PRFF_Bh_W = np.concatenate((PRFF_sampler.random_weights_,
PRFF_sampler.random_offset_.reshape((1,-1))), axis=0)
end_time = time.time()
print('Time required to compute %d PRFF is =', n_components, end_time-st_time)
print('Computation of PRFF Coefficients Completed')
# PRFF_sampler_pupdate = PRFF_Bharath(gamma=gamma, n_components=n_components, random_state=None,
# alpha=alpha, lbda=lbda, n_pass=1, minibatch_size=128,
# max_iter=1000,update_b=True, philippe_update=True)
# PRFF_sampler_pupdate.fit(TrainData)
# PRFF_W_pupdate = np.concatenate((PRFF_sampler_pupdate.random_weights_,
# PRFF_sampler_pupdate.random_offset_.reshape((1,-1))), axis=0)
# print('Computation of PRFF Coefficients Completed')
#%%
n_comp_iter=0
for n_comp in n_components_range:
if prff:
prff_b_error[niter,alpha_iter, n_comp_iter, lamda_iter] =LossfunctionForm2(Data, PRFF_Bh_W[:,range(n_comp)], gamma, exact_gram)
prff_b_sgd_accuracy[niter,alpha_iter,n_comp_iter,lamda_iter],prff_b_ridge_accuracy[niter,alpha_iter,n_comp_iter, lamda_iter], cvparam = RFF_Form2_Classification(PRFF_Bh_W[:,range(n_comp)],b,TrainData,
ValData, TestData, train_label, val_label, test_label, option,classifier_opt= classifier_opt, normalize=normalize, loss ="hinge")
# prff_p_error[niter,alpha_iter, n_comp_iter, lamda_iter] =LossfunctionForm2(Data, PRFF_W_pupdate[:,range(n_comp)], gamma, exact_gram)
# prff_p_sgd_accuracy[niter,alpha_iter,n_comp_iter,lamda_iter],prff_p_ridge_accuracy[niter,alpha_iter,n_comp_iter, lamda_iter], cvparam = RFF_Form2_Classification(PRFF_W_pupdate[:,range(n_comp)],b,TrainData,
# ValData, TestData, train_label, val_label, test_label, option,classifier_opt= classifier_opt, normalize=normalize,loss ="hinge")
if lamda_iter==0 and alpha_iter==0 and rff:
rff_error[niter,n_comp_iter]= LossfunctionForm2(Data,RFF_W[:,range(n_comp)],gamma,exact_gram)
nyst_error[niter,n_comp_iter] = Nystroem_ErrorApprox(Data.copy(), n_components=n_comp, gamma=gamma, random_state=None, K=exact_gram)
orff_error[niter,n_comp_iter]= LossfunctionForm2(Data,ORF_W[:,range(n_comp)],gamma,exact_gram)
rff_sgd_accuracy[niter,n_comp_iter],rff_ridge_accuracy[niter,n_comp_iter], RFcvparam = RFF_Form2_Classification(RFF_W[:,range(n_comp)],b,TrainData,
ValData, TestData, train_label, val_label, test_label,option, classifier_opt= classifier_opt, normalize=normalize,loss ="hinge")
orff_sgd_accuracy[niter,n_comp_iter],orff_ridge_accuracy[niter,n_comp_iter], ORFcvparam = RFF_Form2_Classification(ORF_W[:,range(n_comp)],b,TrainData,
ValData, TestData, train_label, val_label, test_label,option,classifier_opt= classifier_opt, normalize=normalize,loss ="hinge")
nyst_sgd_accuracy[niter,n_comp_iter], nyst_ridge_accuracy[niter,n_comp_iter], Nyst_cvparam = Nystroem_Classification(TrainData,
ValData, TestData, train_label, val_label, test_label, n_components= n_comp, gamma=gamma, random_state=None,option=option,
classifier_opt= classifier_opt, normalize=normalize,loss ="hinge")
n_comp_iter = n_comp_iter+1
print('components iteration', n_comp_iter)
#%%
alpha_iter = alpha_iter+1
lamda_iter = lamda_iter+1
print('Lamda Interation', lamda_iter)
print('================================')
print('number of iterations', niter)
#%% Plot of error
filesave =False
pathname = 'D:\PostDocWork\LSML\RandomFourierFeatures\Results\check\cadata_oldgrad'
colours = ['b','g','r','y','k','b','g','r','m','y','k','b','g','r','m']
linestyle =['--','-','-','-','-','--','-.','-.','-.', '-','-','-','-','-','-','-']
#%% Save file
if filesave:
filename = 'MNIST_PRFF_B_approxError.npz'
fname = os.path.join(pathname,filename)
Info = ['MNIST PRFF_B_ApproxError with different reg (lamda) parameter and learning rate (alpha)']
np.savez(fname, prff_b_error= prff_b_error, prff_p_error= prff_b_error, lamda = lamda_range,
alpha = alpha_range, n_components = n_components_range, Info = Info)
filename= 'MNIST_PRFF_B_Diff_Lamda_alpha_SGD_Ridge_Accuracy.npz'
fname = os.path.join(pathname, filename)
Info = ['MNIST PRFF_B_Classification Accuracy SGD and Ridge with different reg (lamda) parameter and learning rate (alpha)']
np.savez(fname, prff_b_sgd_accuracy= prff_b_sgd_accuracy, prff_b_ridge_accuracy= prff_b_ridge_accuracy,
prff_p_sgd_accuracy= prff_p_sgd_accuracy, prff_p_ridge_accuracy= prff_p_ridge_accuracy,
lamda = lamda_range, alpha = alpha_range, n_components = n_components_range, Info = Info)
if rff:
filename= 'MNIST_RF_ORF_Nyst_SGD_Ridge_Accuracy.npz'
fname = os.path.join(pathname, filename)
Info = ['MNIST RF_ORF_Nyst_SGD_Ridge_Accuracy']
np.savez(fname, rff_sgd_accuracy= rff_sgd_accuracy, rff_ridge_accuracy= rff_ridge_accuracy,
orff_sgd_accuracy= orff_sgd_accuracy, orff_ridge_accuracy= orff_ridge_accuracy,
nyst_ridge_accuracy=nyst_ridge_accuracy, nyst_sgd_accuracy=nyst_sgd_accuracy,
n_components = n_components_range, Info = Info)
filename='MNIST_RF_ORF_Nyst_ApproxError.npz'
fname = os.path.join(pathname, filename)
Info = ['MNIST RF_Nyst_SGD_ApproxError']
np.savez(fname, rff_error= rff_error, orff_error= orff_error, nyst_error= nyst_error,
n_components = n_components_range, Nsamples =Nsamples, Info = Info)
#%% Plot of error
for i in range(len(lamda_range)):
for j in range(len(alpha_range)):
if prff:
plt.figure(num= i, figsize = (10,12), dpi=100)
Xaxis = n_components_range
prffb_mean_score = np.mean(prff_b_error[:,j,:,i],axis=0)
prffb_std_score = np.std(prff_b_error[:,j,:,i],axis=0)
plt.errorbar(Xaxis, prffb_mean_score,prffb_std_score,linestyle=linestyle[j], color=colours[j],
linewidth=3.0, label= 'PRFF_%s' % alpha_range[j])
# prffp_mean_score = np.mean(prff_p_error[:,j,:,i],axis=0)
# prffp_std_score = np.std(prff_p_error[:,j,:,i],axis=0)
# plt.errorbar(Xaxis, prffp_mean_score,prffp_std_score,linestyle=linestyle[-j-1], color=colours[-j-1],
# linewidth=3.0, label= 'PRFF_P_%s' % alpha_range[j])
plt.xlabel('Feature Expansions')
plt.ylabel('Approximation Error')
if rff:
rff_mean = np.mean(rff_error, axis=0)
rff_std = np.std(rff_error, axis=0)
Xaxis = n_components_range
plt.errorbar(Xaxis, np.mean(rff_error, axis=0), np.std(rff_error, axis=0),marker ='s',
color ='b',linewidth=0.5, markersize=10.0,markeredgewidth=0.5,label = 'RFF')
plt.errorbar(Xaxis, np.mean(orff_error,axis=0),np.std(orff_error,axis=0), marker ='s',
color ='r',linewidth=0.5, markersize=10.0,markeredgewidth=0.5,label = 'ORFF')
plt.errorbar(Xaxis, np.mean(nyst_error,axis=0),np.std(nyst_error,axis=0), marker ='^',
color='g',linewidth=0.5, markersize=10.0,markeredgewidth=0.5,label = 'Nyst')
plt.title('PRFF_B_Error_Approx_lamda_%s' %lamda_range[i])
plt.legend()
plt.show()
if filesave:
filename = 'MNIST_PRFF_B_Approx_Error_lamda_%s' %lamda_range[i]
plt.savefig(os.path.join(pathname,filename+'.png'))
# plt.savefig(os.path.join(pathname,filename+'.eps'))
plt.close()
#%% Classification Accuracy
for i in range(len(lamda_range)):
for j in range(len(alpha_range)):
if prff:
plt.figure(num= 2, figsize = (10,12), dpi=100)
Xaxis = n_components_range
prff_b_sgd_mean = np.mean(prff_b_sgd_accuracy[:,j,:,i], axis=0)
prff_b_sgd_std = np.std(prff_b_sgd_accuracy[:,j,:,i], axis=0)
# prff_p_sgd_mean = np.mean(prff_p_sgd_accuracy[:,j,:,i], axis=0)
# prff_p_sgd_std = np.std(prff_p_sgd_accuracy[:,j,:,i], axis=0)
plt.errorbar(Xaxis, prff_b_sgd_mean,prff_b_sgd_std,linestyle=linestyle[j],
color=colours[j], marker ='o',linewidth=3.0, markersize=10.0, label= 'PRFF_%s ' % alpha_range[j])
# plt.errorbar(Xaxis, prff_p_sgd_mean,prff_p_sgd_std,linestyle=linestyle[-j-1],
# color=colours[-j-1], marker ='p',linewidth=3.0,markersize=10.0, label= 'PRFF_P_%s' % alpha_range[j])
plt.xlabel('Feature Expansions')
if rff:
Xaxis = n_components_range
rff_sgd_acc_mean = np.mean(rff_sgd_accuracy, axis=0)
rff_sgd_acc_std = np.std(rff_sgd_accuracy, axis=0)
orff_sgd_acc_mean = np.mean(orff_sgd_accuracy, axis=0)
orff_sgd_acc_std = np.std(orff_sgd_accuracy, axis=0)
nyst_sgd_acc_mean = np.mean(nyst_sgd_accuracy, axis=0)
nyst_sgd_acc_std = np.std(nyst_sgd_accuracy, axis=0)
plt.errorbar(Xaxis, rff_sgd_acc_mean,rff_sgd_acc_std, marker ='s',
color ='r',linewidth=0.5, markersize=10.0,markeredgewidth=0.5,label = 'RFF')
plt.errorbar(Xaxis, orff_sgd_acc_mean,orff_sgd_acc_std, marker ='*',
color ='r',linewidth=0.5, markersize=10.0,markeredgewidth=0.5,label = 'ORFF')
plt.errorbar(Xaxis, nyst_sgd_acc_mean,nyst_sgd_acc_std, marker ='^',
color ='g',linewidth=0.5, markersize=10.0,markeredgewidth=0.5,label = 'nyst')
if option ==1:
plt.ylabel('Accuracy')
plt.legend(loc=4)
plt.title('PRFF_B_SGD_Classification_lamda_%s' %lamda_range[i])
else:
plt.ylabel('Mean square error')
plt.legend(loc=1)
plt.title('PRFF_B_SGD_Regression_lamda_%s' %lamda_range[i])
plt.show()
if filesave:
filename = 'PRFF_B_SGD_Classification_lamda_%s' %lamda_range[i]
plt.savefig(os.path.join(pathname, filename+'.png'))
# plt.savefig(os.path.join(filename+'.eps'))
plt.close()
#%% Ridge Classifier or Regression
for i in range(len(lamda_range)):
for j in range(len(alpha_range)):
if prff:
plt.figure(num= i, figsize = (10,12), dpi=100)
Xaxis = n_components_range
prff_b_ridge_mean = np.mean(prff_b_ridge_accuracy[:,j,:,i], axis=0)
prff_b_ridge_std = np.std(prff_b_ridge_accuracy[:,j,:,i], axis=0)
prff_p_ridge_mean = np.mean(prff_p_ridge_accuracy[:,j,:,i], axis=0)
prff_p_ridge_std = np.std(prff_p_ridge_accuracy[:,j,:,i], axis=0)
plt.errorbar(Xaxis, prff_b_ridge_mean,prff_b_ridge_std,linestyle=linestyle[j],
color=colours[j], marker ='o',linewidth=3.0, markersize=10.0, label= 'PRFF_%s ' % alpha_range[j])
# plt.errorbar(Xaxis, prff_p_ridge_mean,prff_p_ridge_std,linestyle=linestyle[-j-1],
# color=colours[-j-1], marker ='p',linewidth=3.0,markersize=10.0, label= 'PRFF_P_%s' % alpha_range[j])
plt.xlabel('Feature Expansions')
if rff:
Xaxis = n_components_range
rff_ridge_acc_mean = np.mean(rff_ridge_accuracy, axis=0)
rff_ridge_acc_std = np.std(rff_ridge_accuracy, axis=0)
orff_ridge_acc_mean = np.mean(orff_ridge_accuracy, axis=0)
orff_ridge_acc_std = np.std(orff_ridge_accuracy, axis=0)
nyst_ridge_acc_mean = np.mean(nyst_ridge_accuracy, axis=0)
nyst_ridge_acc_std = np.std(nyst_ridge_accuracy, axis=0)
plt.errorbar(Xaxis, rff_ridge_acc_mean,rff_ridge_acc_std, marker ='s',
color ='r',linewidth=0.5, markersize=10.0,markeredgewidth=0.5,label = 'RFF')
plt.errorbar(Xaxis, orff_ridge_acc_mean,orff_ridge_acc_std, marker ='*',
color ='r',linewidth=0.5, markersize=10.0,markeredgewidth=0.5,label = 'ORFF')
plt.errorbar(Xaxis, nyst_ridge_acc_mean,nyst_ridge_acc_std, marker ='^',
color ='g',linewidth=0.5, markersize=10.0,markeredgewidth=0.5,label = 'nyst')
if option ==1:
plt.ylabel('Accuracy')
plt.legend(loc=4)
plt.title('PRFF_B_Ridge_Classification_lamda_%s' %lamda_range[i])
else:
plt.ylabel('Mean square error')
plt.legend(loc=1)
plt.title('PRFF_B_Ridge_Regression_lamda_%s' %lamda_range[i])
plt.show()
if filesave:
filename = 'PRFF_B_Ridge_Classification_lamda_%s' %lamda_range[i]
plt.savefig(os.path.join(pathname, filename+'.png'))
# plt.savefig(os.path.join(filename+'.eps'))
plt.close()
| StarcoderdataPython |
1612119 | ## This file is made so that specific statements may be copied inside existing files. This is useful to copy
## import statements in __init__.py, or to complete model lists in the AUTO files.
##
## It is to be used as such:
## Put '# To replace in: "FILE_PATH"' in order to indicate the contents will be copied in the file at path FILE_PATH
## Put '# Below: "STATEMENT"' in order to copy the contents below **the first occurence** of that line in the file at FILE_PATH
## Put '# Replace with:' followed by the lines containing the content to define the content
## End a statement with '# End.'. If starting a new statement without redefining the FILE_PATH, it will continue pasting
## content in that file.
##
## Put '## COMMENT' to comment on the file.
# To replace in: "src/transformers/__init__.py"
# Below: "if is_torch_available():" if generating PyTorch
# Replace with:
from .modeling_{{cookiecutter.lowercase_modelname}} import (
{{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST,
{{cookiecutter.camelcase_modelname}}ForMaskedLM,
{{cookiecutter.camelcase_modelname}}ForMultipleChoice,
{{cookiecutter.camelcase_modelname}}ForQuestionAnswering,
{{cookiecutter.camelcase_modelname}}ForSequenceClassification,
{{cookiecutter.camelcase_modelname}}ForTokenClassification,
{{cookiecutter.camelcase_modelname}}Layer,
{{cookiecutter.camelcase_modelname}}Model,
{{cookiecutter.camelcase_modelname}}PreTrainedModel,
load_tf_weights_in_{{cookiecutter.lowercase_modelname}},
)
# End.
# Below: "if is_tf_available():" if generating TensorFlow
# Replace with:
from .modeling_tf_{{cookiecutter.lowercase_modelname}} import (
TF_{{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST,
TF{{cookiecutter.camelcase_modelname}}ForMaskedLM,
TF{{cookiecutter.camelcase_modelname}}ForMultipleChoice,
TF{{cookiecutter.camelcase_modelname}}ForQuestionAnswering,
TF{{cookiecutter.camelcase_modelname}}ForSequenceClassification,
TF{{cookiecutter.camelcase_modelname}}ForTokenClassification,
TF{{cookiecutter.camelcase_modelname}}Layer,
TF{{cookiecutter.camelcase_modelname}}Model,
TF{{cookiecutter.camelcase_modelname}}PreTrainedModel,
)
# End.
# Below: "from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig"
# Replace with:
from .configuration_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.uppercase_modelname}}_PRETRAINED_CONFIG_ARCHIVE_MAP, {{cookiecutter.camelcase_modelname}}Config
# End.
# To replace in: "src/transformers/configuration_auto.py"
# Below: "# Add configs here"
# Replace with:
("{{cookiecutter.lowercase_modelname}}", {{cookiecutter.camelcase_modelname}}Config),
# End.
# Below: "# Add archive maps here"
# Replace with:
{{cookiecutter.uppercase_modelname}}_PRETRAINED_CONFIG_ARCHIVE_MAP,
# End.
# Below: "from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig",
# Replace with:
from .configuration_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.uppercase_modelname}}_PRETRAINED_CONFIG_ARCHIVE_MAP, {{cookiecutter.camelcase_modelname}}Config
# End.
# Below: "# Add full (and cased) model names here"
# Replace with:
("{{cookiecutter.lowercase_modelname}}", "{{cookiecutter.camelcase_modelname}}"),
# End.
# To replace in: "src/transformers/models/auto/modeling_auto.py" if generating PyTorch
# Below: "from .configuration_auto import ("
# Replace with:
{{cookiecutter.camelcase_modelname}}Config,
# End.
# Below: "# Add modeling imports here"
# Replace with:
from .modeling_{{cookiecutter.lowercase_modelname}} import (
{{cookiecutter.camelcase_modelname}}ForMaskedLM,
{{cookiecutter.camelcase_modelname}}ForMultipleChoice,
{{cookiecutter.camelcase_modelname}}ForQuestionAnswering,
{{cookiecutter.camelcase_modelname}}ForSequenceClassification,
{{cookiecutter.camelcase_modelname}}ForTokenClassification,
{{cookiecutter.camelcase_modelname}}Model,
)
# End.
# Below: "# Base model mapping"
# Replace with:
({{cookiecutter.camelcase_modelname}}Config, {{cookiecutter.camelcase_modelname}}Model),
# End.
# Below: "# Model with LM heads mapping"
# Replace with:
({{cookiecutter.camelcase_modelname}}Config, {{cookiecutter.camelcase_modelname}}ForMaskedLM),
# End.
# Below: "# Model for Masked LM mapping"
# Replace with:
({{cookiecutter.camelcase_modelname}}Config, {{cookiecutter.camelcase_modelname}}ForMaskedLM),
# End.
# Below: "# Model for Sequence Classification mapping"
# Replace with:
({{cookiecutter.camelcase_modelname}}Config, {{cookiecutter.camelcase_modelname}}ForSequenceClassification),
# End.
# Below: "# Model for Question Answering mapping"
# Replace with:
({{cookiecutter.camelcase_modelname}}Config, {{cookiecutter.camelcase_modelname}}ForQuestionAnswering),
# End.
# Below: "# Model for Token Classification mapping"
# Replace with:
({{cookiecutter.camelcase_modelname}}Config, {{cookiecutter.camelcase_modelname}}ForTokenClassification),
# End.
# Below: "# Model for Multiple Choice mapping"
# Replace with:
({{cookiecutter.camelcase_modelname}}Config, {{cookiecutter.camelcase_modelname}}ForMultipleChoice),
# End.
# To replace in: "src/transformers/models/auto/modeling_tf_auto.py" if generating TensorFlow
# Below: "from .configuration_auto import ("
# Replace with:
{{cookiecutter.camelcase_modelname}}Config,
# End.
# Below: "# Add modeling imports here"
# Replace with:
from .modeling_tf_{{cookiecutter.lowercase_modelname}} import (
TF{{cookiecutter.camelcase_modelname}}ForMaskedLM,
TF{{cookiecutter.camelcase_modelname}}ForMultipleChoice,
TF{{cookiecutter.camelcase_modelname}}ForQuestionAnswering,
TF{{cookiecutter.camelcase_modelname}}ForSequenceClassification,
TF{{cookiecutter.camelcase_modelname}}ForTokenClassification,
TF{{cookiecutter.camelcase_modelname}}Model,
)
# End.
# Below: "# Base model mapping"
# Replace with:
({{cookiecutter.camelcase_modelname}}Config, TF{{cookiecutter.camelcase_modelname}}Model),
# End.
# Below: "# Model with LM heads mapping"
# Replace with:
({{cookiecutter.camelcase_modelname}}Config, TF{{cookiecutter.camelcase_modelname}}ForMaskedLM),
# End.
# Below: "# Model for Masked LM mapping"
# Replace with:
({{cookiecutter.camelcase_modelname}}Config, TF{{cookiecutter.camelcase_modelname}}ForMaskedLM),
# End.
# Below: "# Model for Sequence Classification mapping"
# Replace with:
({{cookiecutter.camelcase_modelname}}Config, TF{{cookiecutter.camelcase_modelname}}ForSequenceClassification),
# End.
# Below: "# Model for Question Answering mapping"
# Replace with:
({{cookiecutter.camelcase_modelname}}Config, TF{{cookiecutter.camelcase_modelname}}ForQuestionAnswering),
# End.
# Below: "# Model for Token Classification mapping"
# Replace with:
({{cookiecutter.camelcase_modelname}}Config, TF{{cookiecutter.camelcase_modelname}}ForTokenClassification),
# End.
# Below: "# Model for Multiple Choice mapping"
# Replace with:
({{cookiecutter.camelcase_modelname}}Config, TF{{cookiecutter.camelcase_modelname}}ForMultipleChoice),
# End.
| StarcoderdataPython |
3356699 | <reponame>sepideh-srj/flashNoflash
import torch
from .base_model import BaseModel
from . import networks
import matplotlib.pyplot as plt
import numpy as np
import numpy
import itertools
from util.image_pool import ImagePool
from torchvision import transforms
import cv2
import kornia
class SharedDecModel(BaseModel):
@staticmethod
def modify_commandline_options(parser, is_train=True):
# changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/)
parser.add_argument('--ratio', type=float, default=1)
parser.add_argument('--lambda_comp', type=float, default=0, help='')
parser.add_argument('--lambda_color_uv', type=float, default=0, help='')
parser.add_argument('--D_flash', type= float, default=0)
parser.add_argument('--dslr_color_loss', type=float, default=0)
parser.add_argument('--dec_features_num', type=float, default=32, help='')
parser.add_argument('--anti_alias', action='store_true')
if is_train:
parser.set_defaults(pool_size=0, gan_mode='vanilla')
parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')
parser.add_argument('--lambda_A', type=float, default=25.0, help='weight for cycle loss (A -> B -> A)')
parser.add_argument('--lambda_B', type=float, default=25.0, help='weight for cycle loss (B -> A -> B)')
parser.add_argument('--cycle_epoch', type=float, default=30, help='')
return parser
def __init__(self, opt):
"""Initialize the pix2pix class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseModel.__init__(self, opt)
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['G_GAN_A', 'G_L1_A', 'G_GAN_B', 'G_L1_B', 'cycle_B', 'cycle_A', 'G_L1_A_comp', 'G_L1_B_comp', 'G_GAN_flash_A', 'G_GAN_flash_B','G_GAN_recB', 'G_GAN_recA', 'G_L1_A_comp_color', 'G_L1_B_comp_color', 'color_dslr_B', 'color_dslr_A']
if self.opt.D_flash:
self.loss_names += ['D_F']
else:
self.loss_names += ['D_A', 'D_B']
visual_names_B = ['real_A', 'fake_A']
visual_names_A = ['real_B', 'fake_B']
self.visual_names = visual_names_B + visual_names_A #+ visual_names_C # combine visualizations for A and B
self.model_names = ['G_Decompostion', 'G_Generation','F_Decoder']
if self.isTrain:
if self.opt.D_flash:
self.model_names += ['D_Flash']
else:
self.model_names += ['D_Decompostion', 'D_Generation']
if self.opt.midas:
self.netF_Decoder = networks.define_G(opt.input_nc+1, opt.dec_features_num, int(opt.dec_features_num*2),
'resnet_12blocks', opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids, anti_alias=self.opt.anti_alias)
else:
self.netF_Decoder = networks.define_G(opt.input_nc, opt.dec_features_num, int(opt.dec_features_num*2),
'resnet_12blocks',opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids, anti_alias=self.opt.anti_alias)
self.netG_Decompostion = networks.define_G(opt.input_nc + opt.dec_features_num, opt.output_nc, opt.ngf,
'resnet_6blocks', opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids, anti_alias=self.opt.anti_alias)
self.netG_Generation = networks.define_G(opt.input_nc + opt.dec_features_num, opt.output_nc, opt.ngf,
'resnet_6blocks', opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids, anti_alias=self.opt.anti_alias)
if self.isTrain:
if self.opt.D_flash:
self.netD_Flash = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
else:
self.netD_Decompostion = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
self.netD_Generation = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
# define loss functions
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
self.criterionL1 = torch.nn.L1Loss()
self.criterionCycle = torch.nn.L1Loss()
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_Decompostion.parameters(), self.netG_Generation.parameters(), self.netF_Decoder.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
if self.opt.D_flash:
self.optimizer_D1 = torch.optim.Adam(itertools.chain(self.netD_Flash.parameters()), lr=opt.lr3, betas=(opt.beta1, 0.999))
self.optimizer_D2 = torch.optim.Adam(itertools.chain(self.netD_Flash.parameters()), lr=opt.lr3, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_D1)
self.optimizers.append(self.optimizer_D2)
else:
self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_Decompostion.parameters(), self.netD_Generation.parameters()), lr=opt.lr3, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_D)
def set_input(self, input):
self.real_A = input['A'].to(self.device)
self.real_B = input['B'].to(self.device)
self.image_paths = input['A_paths']
self.real_C = self.real_A - self.real_B
if self.opt.midas:
self.midas_A = input['depth_A'].to(self.device)
self.midas_B = input['depth_B'].to(self.device)
def applyratio(self,input,ratio):
output = (3*input*ratio + 9*ratio + 5*input + 3) / 4
return output
def forward_onedirection(self,real_A,real_B,midas_A,midas_B):
## Adding depth if needed
if self.opt.midas:
decomposition_input = torch.cat((real_A, midas_A), 1)
generation_input = torch.cat((real_B, midas_B), 1)
else:
decomposition_input = real_A
generation_input = real_B
## forward into networks
decomposition_features = self.netF_Decoder(decomposition_input)
generation_features = self.netF_Decoder(generation_input)
decomposition_features_and_input = torch.cat((real_A, decomposition_features), 1)
generation_features_and_input = torch.cat((real_B, generation_features), 1)
decomposition_output = self.netG_Decompostion(decomposition_features_and_input)
generation_output = self.netG_Generation(generation_features_and_input)
## applying ratio if needed
if self.opt.ratio:
fake_B = self.applyratio(self.real_A,decomposition_output)
fake_A = self.applyratio(self.real_B,generation_output)
else:
fake_B = decomposition_output
fake_A = generation_output
return fake_A, fake_B
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
self.fake_A, self.fake_B = self.forward_onedirection(self.real_A, self.real_B, self.midas_A, self.midas_B)
## compute estimated flash
self.flash_from_decomposition = self.real_A - self.fake_B
self.flash_from_generation = self.fake_A - self.real_B
##### Cycle PATH
self.rec_A, self.rec_B = self.forward_onedirection(self.fake_A, self.fake_B, self.midas_B, self.midas_A)
def forward_bilateral(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
if self.opt.midas:
A_midas = torch.cat((self.real_A, self.midas_A), 1)
self.fake_B = self.netG_Decompostion(A_midas) # G_A(A)
B_midas = torch.cat((self.real_B, self.midas_B), 1)
self.fake_A = self.netG_Generation(B_midas) # G_B(B)
else:
self.fake_B = self.netG_Decompostion(self.real_A) # G_A(A)
self.fake_A = self.netG_Generation(self.real_B) # G_B(B)
def backward_D_basic(self, netD, real_A, real_B, fake):
# Fake
fake_AB = torch.cat((real_A, fake), 1)
pred_fake = netD(fake_AB.detach())
loss_D_fake = self.criterionGAN(pred_fake, False)
# Real
real_AB = torch.cat((real_A, real_B), 1)
pred_real = netD(real_AB)
loss_D_real = self.criterionGAN(pred_real, True)
# Combined loss and calculate gradients
loss_D = (loss_D_real + loss_D_fake) * 0.5
loss_D.backward()
return loss_D
def backward_D_A(self):
self.loss_D_A = self.backward_D_basic(self.netD_Decompostion, self.real_A, self.real_B, self.fake_B)
def backward_D_B(self):
self.loss_D_B = self.backward_D_basic(self.netD_Generation, self.real_B, self.real_A, self.fake_A)
def backward_D_F_1(self):
self.loss_D_F = self.backward_D_basic(self.netD_Flash, self.real_B, self.real_C, self.flash_from_generation)
def backward_D_F_2(self):
self.loss_D_F = self.backward_D_basic(self.netD_Flash, self.real_A, self.real_C, self.flash_from_decomposition)
def backward_G(self,epoch):
self.loss_G_GAN_A = 0
self.loss_G_GAN_B = 0
self.loss_G_GAN_flash_B = 0
self.loss_G_GAN_flash_A = 0
self.loss_G_GAN_recA = 0
self.loss_G_GAN_recB = 0
self.loss_cycle_A = 0
self.loss_cycle_B = 0
self.loss_G_L1_A_comp = 0
self.loss_G_L1_B_comp = 0
self.loss_G_L1_A_comp_color = 0
self.loss_G_L1_B_comp_color = 0
self.loss_color_dslr_A = 0
self.loss_color_dslr_B = 0
if self.opt.D_flash:
fake_AC = torch.cat((self.real_A, self.flash_from_decomposition), 1)
pred_fake = self.netD_Flash(fake_AC)
self.loss_G_GAN_flash_A = self.criterionGAN(pred_fake, True)
fake_BC = torch.cat((self.real_B, self.flash_from_generation), 1)
pred_fake = self.netD_Flash(fake_BC)
self.loss_G_GAN_flash_B = self.criterionGAN(pred_fake, True)
else:
fake_AB = torch.cat((self.real_A, self.fake_B), 1)
pred_fake = self.netD_Decompostion(fake_AB)
self.loss_G_GAN_A = self.criterionGAN(pred_fake, True)
fake_AB_B = torch.cat((self.real_B, self.fake_A), 1)
pred_fake = self.netD_Generation(fake_AB_B)
self.loss_G_GAN_B = self.criterionGAN(pred_fake, True)
fake_AB = torch.cat((self.real_A, self.rec_A), 1)
pred_fake = self.netD_Decompostion(fake_AB)
self.loss_G_GAN_recA = self.criterionGAN(pred_fake, True)
fake_AB_B = torch.cat((self.real_B, self.rec_B), 1)
pred_fake = self.netD_Generation(fake_AB_B)
self.loss_G_GAN_recB = self.criterionGAN(pred_fake, True)
## Flash L1 loss
if self.opt.lambda_comp != 0:
self.loss_G_L1_A_comp = self.criterionL1(self.flash_from_decomposition, self.real_C) * self.opt.lambda_comp
self.loss_G_L1_B_comp = self.criterionL1(self.flash_from_generation, self.real_C) * self.opt.lambda_comp
## Flash Color Loss
if self.opt.lambda_color_uv != 0:
fake_C_A = kornia.rgb_to_yuv(self.flash_from_decomposition)[:,1:2,:,:]
fake_C_B = kornia.rgb_to_yuv(self.flash_from_generation)[:,1:2,:,:]
real_C_color = kornia.rgb_to_yuv(self.real_C)[:,1:2,:,:]
self.loss_G_L1_A_comp_color = self.criterionL1(fake_C_A, real_C_color) * self.opt.lambda_color_uv
self.loss_G_L1_B_comp_color = self.criterionL1(fake_C_B, real_C_color) * self.opt.lambda_color_uv
if epoch >= self.opt.cycle_epoch:
self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * self.opt.lambda_A
self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * self.opt.lambda_B
if self.opt.dslr_color_loss:
real_A_blurred = kornia.gaussian_blur2d(self.real_A, (21, 21), (3, 3))
real_B_blurred = kornia.gaussian_blur2d(self.real_B, (21, 21), (3, 3))
fake_A_blurred = kornia.gaussian_blur2d(self.fake_A, (21, 21), (3, 3))
fake_B_blurred = kornia.gaussian_blur2d(self.fake_B, (21, 21), (3, 3))
self.loss_color_dslr_A = self.criterionL1(real_A_blurred, fake_A_blurred) * self.opt.dslr_color_loss
self.loss_color_dslr_B = self.criterionL1(real_B_blurred, fake_B_blurred) * self.opt.dslr_color_loss
self.loss_G_L1_A = self.criterionL1(self.fake_A, self.real_A) * self.opt.lambda_L1
self.loss_G_L1_B = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1
self.loss_G =self.loss_color_dslr_A + self.loss_color_dslr_B + \
self.loss_G_L1_B_comp_color + self.loss_G_L1_A_comp_color +\
self.loss_G_GAN_flash_A + self.loss_G_GAN_flash_B +\
self.loss_G_GAN_B + self.loss_G_GAN_A +\
self.loss_cycle_A + self.loss_cycle_B +\
self.loss_G_L1_A+ self.loss_G_L1_B +\
self.loss_G_L1_A_comp + self.loss_G_L1_B_comp
self.loss_G.backward()
def optimize_parameters(self, epoch):
self.forward() # compute fake images: G(A)
# update D
if self.opt.D_flash:
self.set_requires_grad([self.netD_Flash], True) # enable backprop for D
self.optimizer_D1.zero_grad() # set D's gradients to zero
self.optimizer_D2.zero_grad() # set D's gradients to zero
self.backward_D_F_1()
self.optimizer_D1.step()
self.backward_D_F_2()
self.optimizer_D2.step()
# update G
self.set_requires_grad([self.netD_Flash], False) # D requires no gradients when optimizing G
self.optimizer_G.zero_grad() # set G's gradients to zero
self.backward_G(epoch) # calculate graidents for G
self.optimizer_G.step()
else:
self.set_requires_grad([self.netD_Decompostion, self.netD_Generation], True) # enable backprop for D
self.optimizer_D.zero_grad() # set D's gradients to zero
self.backward_D_A() # calculate gradients for D_A
self.backward_D_B()
self.optimizer_D.step() # update D's weights
# update G
self.set_requires_grad([self.netD_Decompostion, self.netD_Generation], False) # D requires no gradients when optimizing G
self.optimizer_G.zero_grad() # set G's gradients to zero
self.backward_G(epoch) # calculate graidents for G
self.optimizer_G.step() # udpate G's weights
def showImage(img,title=None):
image = (img + 1)/2
image = image.clone().detach().cpu().numpy().squeeze()
image = np.transpose(image,[1,2,0])
plt.imshow(image, cmap= 'inferno')
plt.colorbar()
if title is not None:
plt.title(title)
plt.show()
| StarcoderdataPython |
322794 | """
See if slowly raising discount factor can stablize DDPG.
"""
import random
from railrl.envs.mujoco.twod_point import TwoDPoint
from railrl.exploration_strategies.ou_strategy import OUStrategy
from railrl.launchers.launcher_util import (
run_experiment,
set_seed,
)
def experiment(variant):
from railrl.torch.ddpg import DDPG
from railrl.launchers.launcher_util import (
set_seed,
)
seed = variant['seed']
algo_params = variant['algo_params']
env_params = variant['env_params']
es_class = variant['es_class']
es_params = variant['es_params']
set_seed(seed)
env = TwoDPoint(**env_params)
es = es_class(
env_spec=env.spec,
**es_params
)
algorithm = DDPG(
env,
es,
**algo_params
)
algorithm.train()
if __name__ == '__main__':
n_seeds = 1
mode = "here"
exp_prefix = "dev-pytorch"
# noinspection PyTypeChecker
variant = dict(
memory_dim=20,
env_params=dict(
),
algo_params=dict(
subtraj_length=16,
),
es_class=OUStrategy,
es_params=dict(
max_sigma=1,
min_sigma=None,
),
)
exp_id = -1
for _ in range(n_seeds):
seed = random.randint(0, 99999)
exp_id += 1
set_seed(seed)
variant['seed'] = seed
variant['exp_id'] = exp_id
run_experiment(
experiment,
exp_prefix=exp_prefix,
seed=seed,
mode=mode,
variant=variant,
exp_id=exp_id,
)
| StarcoderdataPython |
12837522 | #!/usr/bin/env python2.7
#
# This file is part of peakAnalysis, http://github.com/alexjgriffith/peaks/,
# and is Copyright (C) University of Ottawa, 2014. It is Licensed under
# the three-clause BSD License; see doc/LICENSE.txt.
# Contact: <EMAIL>
#
# Created : AU1862014
# File : peak_functions
# Author : <NAME>
# Lab : Dr. Brand and Dr. Perkins
import sys
import numpy as np
import matplotlib.pyplot as plt
import random
import time
from strToListClass import strToList
def parseInputList(string):
options={"newList":"[" ,"closeList":"]" ,"setState":",","skipChar":["\""," "]}
return (strToList(options))(string)
def loadLines(fi):
seqs={}
f=open(fi,"r")
for line in f:
if "#" not in line:
a=line.strip().split("\t")
if len(a)<2:
print a
if len(a)>2:
temp=""
seqs[a[0]]=parseInputList(temp.join(a[1::]))
else:
seqs[a[0]]=parseInputList(a[1])
return seqs
def buildLines(lines):
rets={}
for i in lines:
temp=[]
for j in lines[i]:
if j in lines:
temp.append( lines[j])
else:
temp.append(j)
rets[i]=temp
return rets
def getCombos(contexts,context):
combinations=[]
for i in contexts[context]:
if len(combinations)==0:
combinations.append([i])
else:
templ=[]
for j in combinations:
templ.append(j+[i])
combinations.extend(templ)
combinations.append([i])
return combinations
def cdf(temp,cats,context,combinations,start,end,step):
#combinations=getCombos(cats,context)
comboHash={}
for i in combinations:
comboHash[str(i)]=[]
comboHash["width"]=[]
width=start
initLen=len(temp.data)
while(True):
start=time.time()
temp.overlap(width)
tlen=max([len(i.data) for i in temp.data])
if width>end:
break
if width % 10 == 0:
sys.stderr.write(str( width)+"\n")
for combo in combinations:
test=0
for i in temp.data:
if all([j in i.define.data[context]for j in combo ]):
if len(i.data)>1:
for k in i.data:
if k.define[context][0]==combo[0]:
test+=1
comboHash[str(combo)].append(test)
comboHash["width"].append(width)
width+=step
return comboHash
def accessDirectory(fileName,string):
# Replaces peak_processing.directory
f= open(fileName,'r')
for line in f:
if "#" in line:
continue
a=(line.strip().split('\t'))
if a[0]==string:
return a[1]
exit("there is no "+string+" in "+fileName+".")
class chooseLogFile():
def __init__(self,logFile):
self.storedStream=False
if logFile=="stderr":
self.f=sys.stderr
elif logFile=="stdout":
self.f=sys.stdout
else:
self.f=open(str(logFile),"w")
self.storedStream=True
def __call__(self):
return self.f
def write(self,output):
self.f.write(output)
def close(self):
if self.storedStream:
self.f.close()
class verboseOutput():
"""
=Class verboseOutput()
Logging function used in peakProcessing. Creates a closure
based on wether or not the log is required. If the log
is regected it passes all arguments.
Args:
verbose (bool): Sets the state of the closure to either
print the response or pass.
logFile (str,optional): Determines where the messages
are logged to.
options:
* ``stderr``
* ``stdout``
* filename
The default is ``stderr``.
tag (str,optional): Value to be placed in front of
every message. It can be any string. It is recomended
that the string end in " " or similar to allow for
basic parsing.By default tag is ""
Returns:
function: Retuns a logging if verbose is true
otherwise it returns lambda : pass.
file or None: If verbose is true then it returns
the stream used for logging. Otherwise it returns
None.
Examples:
case 1:
>>> [test,testStream ]=verboseOutput(False).call()
>>> test("print if verbose")
case 2:
>>> [test,testStream ]=verboseOutput(True).call()
>>> test("print if verbose")
print if verbose
case 3:
>>> [test,testStream ]=verboseOutput(True).call()
>>> test(["print if verbose",1,2,3])
print if verbose 1 2 3
case 4:
>>> [test,testStream ]=
verboseOutput(True,tag="verbose> ").call()
>>> test("print if verbose")
verbose> print if verbose
case 5:
>>> [test,testStream ]=
verboseOutput(True,logFile=test.log).call()
>>> test("print if verbose")
>>> testStream.close()
shell > cat test.log
shell > print if verbose
"""
def __init__(self,verbose,logFile="stderr",tag="",sep=" "):
if not verbose:
self.fun=lambda x : ()
self.f=None
else:
self.sep=sep
self.tag=tag
self.fun=self.caseTrue
self.f=chooseLogFile(logFile)
# Function self.output() of Class verboseOutput()
# is used in order to abstract the three posible
# logFile options.
self.output=lambda message :self.f.write(message)
def call(self):
"""
-Function call() of Class verboseOutput()
Call is used to return the function selected durring
__init__. If modifications are nescisary of the
verboseOutput object generated assign the object to a
variable. If no modifications are nessisary call
``verboseOutput(verbose).call()`` this will return the
approriate function to be used.
"""
return [self.fun,self.f]
def caseTrue(self,message):
"""
-Function caseTrue of Class verboseOutput()
caseTrue is returned by call if verbose is not ``False``.
caseTrue is the active method used to generate log files.
Args:
message (str,lsit): This is the string which will be printed
to the log file. message can be a string or list. Note
that ``str(message)`` is called before printing so
message does not have to be a str.
"""
self.output(str(self.tag))
if isinstance(message,(list,tuple)):
for i in message:
self.output(str(i)+self.sep)
else:
self.output(str(message+self.sep))
self.output("\n")
def large_scatter_plot_2(peaks_data, a_name,b_name,title=''):
array=[]
for key in peaks_data.joint_peaks:
a=key.contexts["reads"][a_name]/(key.end -key.start)
b=key.contexts["reads"][b_name]/(key.end -key.start)
if a< 300 and b<300 :
array.append([ a,b])
array=np.array(array).T
order= array[0,:].argsort()
data= array[0:2].T[order]
a=data
b = a.T[0] + a.T[1]*1.0j
print b
plt.close('all')
data = data[np.unique(b,return_index=True)[1]]
plt.plot(data.T[0], data.T[1], 'o')
plt.title(title)
plt.xlabel(a_name)
plt.ylabel(b_name)
#plt.show()
plt.savefig("ppc"+str(a_name)+"_"+str(b_name))
def loadPeakFile(file_n,root=""):
comment="#"
filename=root+file_n
peak_data=[]
f= open(filename,'r')
for line in f:
if "#" in line:
continue
if line=="":
continue
a=(line.strip().split('\t'))
filename=a[0]
context={}
i=1
while i<len(a)-1:
context[a[i]]=a[i+1]
i+=2
peak_data.append((filename,context))
return peak_data
def buffer_assossiation(input_peaks):
buffer_1=[]
k=0
#context="overlap"
#catagories=["True","False"]
for i in input_peaks:
#i.addContext(context,catagories)
#i.defining(context, None)
buffer_2=[]
start=int(i.start)
end=int(i.end)
chro=i.chro_order
buffer_2.append(i)
if len(buffer_1)>0:
for j in buffer_1:
if j.chro_order == chro:
if int(j.end)>start:
buffer_2.append(j)
del buffer_1
buffer_1=[]
buffer_1=buffer_2
del buffer_2
if len(buffer_1) >1:
k+=1
"""context magic"""
for j in buffer_1:
#if len(buffer_1)>1:
# value=1
# j.assignCatagoryValue( "overlap", "True",value)
for x in buffer_1:
value=abs(j.start-x.start)
for m in x.define.keys():
if x.define[m]:
comp=j.contexts[m][x.define[m][0]]
fun=lambda x,y: ( x in y)
if comp==None:
j.assignCatagoryValue( m,x.define[m][0],value)
else:
j.assignCatagoryValue( m,x.define[m][0],value,comp,fun,comp)
def loadContextFile(filename):
"""
May cause problems becouse it used to be a class
"""
contexts={}
comment="#"
f= open(filename,'r')
for line in f:
catagories={}
if "#" in line:
continue
a=(line.strip().split('\t'))
tempCat={}
cont=a[::-1].pop()
for cat in a[1:]:
tempCat[cat]=None
contexts[cont]=tempCat.keys()
return contexts
def loadChromosomeFile(filename):
chromosomes=[]
comment="#"
f= open(filename,'r')
for line in f:
if "#" in line:
continue
chromosomes.append(line.strip().split('\t')[0])
return chromosomes
def sortedMerge(peaks,temp):
peaks_2=[]
k=0
n=0
while True:
#print k,n
if k>=len(peaks):
for i in temp[n:]:
peaks_2.append(i)
break
if n>=len(temp):
for i in peaks[k:]:
peaks_2.append(i)
break
if temp[n].chro_order==peaks[k].chro_order:
if temp[n].start<peaks[k].start:
peaks_2.append(temp[n])
n+=1
continue
elif temp[n].start>peaks[k].start:
peaks_2.append(peaks[k])
k+=1
continue
elif temp[n].start==peaks[k].start:
peaks_2.append(peaks[k])
peaks_2.append(temp[n])
n+=1
k+=1
continue
elif temp[n].chro_order<peaks[k].chro_order:
peaks_2.append(temp[n])
n+=1
continue
elif temp[n].chro_order>peaks[k].chro_order:
peaks_2.append(peaks[k])
k+=1
continue
return peaks_2
| StarcoderdataPython |
4850898 | <reponame>barbagroup/cloud-repro
"""Plot the time-to-solution versus the number of nodes."""
import pathlib
from matplotlib import pyplot, gridspec
import numpy
def get_filepaths(casedir, prefix='stdout_run', suffix='.txt', niter=5):
"""Get the file paths with data to read.
Parameters
----------
casedir : pathlib.Path object
Directory that contains the files to read.
prefix : string (optional)
Common prefix of the file names; default: 'stdout_run'.
suffix : string (optional)
Common suffix of the file names; default: '.txt'.
niter : integer (optional)
Number of repeated runs (i.e., number of files to read).
Returns
-------
filepaths : list of pathlib.Path objects
The file paths.
"""
filepaths = [casedir / (prefix + str(i + 1) + suffix)
for i in range(niter)]
return filepaths
def amgxwrapper_poisson_read_runtimes(*filepaths):
"""Read the runtimes to solve the system for multiple runs.
Parameters
----------
filepaths : tuple of strings or pathlib.Path objects
Path of the files to read.
Returns
-------
runtimes : list of floats
The runtimes.
"""
runtimes = []
for filepath in filepaths:
with open(filepath, 'r') as infile:
prev_line = ''
for line in infile:
if 'Solve Time:' in line:
if not prev_line.startswith('Warm-up'):
runtime = float(line.split(' ')[-1])
runtimes.append(runtime)
prev_line = line
return runtimes
def amgxwrapper_poisson_read_iterations(filepath):
"""Read the number of iterations to solve the system.
Parameters
----------
filepath : string or pathlib.Path object
Path of the file to read.
Returns
-------
nites : integer
The number of iterations.
"""
with open(filepath, 'r') as infile:
for i, line in enumerate(infile):
if 'Iterations' in line:
nites = int(line.split(' ')[-1])
break
return nites
def get_runtime_stats(runtimes, scale=1.0):
"""Get the smalest, biggest, and mean runtimes of the series.
Parameters
----------
runtimes : list of floats
The runtimes of the series.
scale : float (optional)
A scale coefficient; default: 1.0.
Returns
-------
(min, max, mean) : tuple of 3 floats
The smallest, biggest, and mean runtimes.
"""
return (min(runtimes) * scale,
max(runtimes) * scale,
numpy.mean(runtimes) * scale)
def store_runtime_stats(data, nodes, stats):
"""Store the runtime stats in a dictionary.
Parameters
----------
data : dictionary
Runtime data of the series.
nodes : integer
Number of nodes used for the series.
stats : tuple or list of 3 floats
Smallest, biggest, and mean runtimes of the series.
"""
data['nodes'].append(nodes)
data['min'].append(stats[0])
data['max'].append(stats[1])
data['means'].append(stats[2])
return data
def gather_arrays(data, keys=['nodes', 'min', 'max', 'means']):
"""Gather data items into tuple.
Parameters
----------
data : dictionary
Dictionary with the runtime data.
keys : list of strings (optional)
Disctionary keys to consider in the tuple;
default: "['nodes', 'min', 'max', 'means']".
Returns
-------
res : tuple of lists
Data gathered into a tuple.
"""
for key, value in data.items():
data[key] = numpy.array(value)
return tuple(data[key] for key in keys)
rootdir = pathlib.Path(__file__).absolute().parents[1]
data = {'Colonial One': {}, 'Azure': {}}
# Get runtimes data for PETSc runs on Colonial One.
nodes = [1, 2, 4, 8]
subdata = {'nodes': [], 'min': [], 'max': [], 'means': []}
for n in nodes:
casedir = rootdir / f'colonialone/petsc/{n:0>2}_short/output'
filepaths = get_filepaths(casedir)
runtimes = amgxwrapper_poisson_read_runtimes(*filepaths)
store_runtime_stats(subdata, n, get_runtime_stats(runtimes))
data['Colonial One']['PETSc'] = gather_arrays(subdata)
# Get runtimes data for AmgX runs on Colonial One.
nodes = [1, 2, 4, 8]
subdata = {'nodes': [], 'min': [], 'max': [], 'means': []}
for n in nodes:
casedir = rootdir / f'colonialone/amgx/{n:0>2}_ivygpu/output'
filepaths = get_filepaths(casedir)
runtimes = amgxwrapper_poisson_read_runtimes(*filepaths)
nites = amgxwrapper_poisson_read_iterations(filepaths[0])
store_runtime_stats(subdata, n, get_runtime_stats(runtimes,
scale=1.0 / nites))
data['Colonial One']['AmgX'] = gather_arrays(subdata)
# Get runtimes data for PETSc runs on Azure.
nodes = [1, 2, 4, 8]
subdata = {'nodes': [], 'min': [], 'max': [], 'means': []}
for n in nodes:
casedir = rootdir / f'azure/petsc/{n:0>2}_h16r/output'
filepaths = get_filepaths(casedir)
runtimes = amgxwrapper_poisson_read_runtimes(*filepaths)
store_runtime_stats(subdata, n, get_runtime_stats(runtimes))
data['Azure']['PETSc'] = gather_arrays(subdata)
# Get runtimes data for AmgX runs on Azure.
nodes = [1, 2, 4, 8]
subdata = {'nodes': [], 'min': [], 'max': [], 'means': []}
for n in nodes:
casedir = rootdir / f'azure/amgx/{n:0>2}_nc24r/output'
filepaths = get_filepaths(casedir)
runtimes = amgxwrapper_poisson_read_runtimes(*filepaths)
nites = amgxwrapper_poisson_read_iterations(filepaths[0])
store_runtime_stats(subdata, n, get_runtime_stats(runtimes,
scale=1.0 / nites))
data['Azure']['AmgX'] = gather_arrays(subdata)
# Get runtimes data for AmgX runs on Azure.
nodes = [1, 2, 4, 8]
subdata = {'nodes': [], 'min': [], 'max': [], 'means': []}
for n in nodes:
casedir = rootdir / f'azure/amgx/larger/{n:0>2}_nc24r/output'
filepaths = get_filepaths(casedir)
runtimes = amgxwrapper_poisson_read_runtimes(*filepaths)
nites = amgxwrapper_poisson_read_iterations(filepaths[0])
store_runtime_stats(subdata, n, get_runtime_stats(runtimes,
scale=1.0 / nites))
data['Azure']['AmgX-larger'] = gather_arrays(subdata)
# Create Matplotlib figures.
pyplot.rc('font', family='serif', size=14)
fig = pyplot.figure(figsize=(6.0, 6.0))
gs = gridspec.GridSpec(nrows=2, ncols=2, height_ratios=[2, 1])
# Plot time-to-solution versus number of nodes for PETSc runs.
ax1 = fig.add_subplot(gs[0, :])
ax1.text(numpy.log2(1.0), 2.5, 'PETSc')
ax1.set_xlabel('Number of nodes')
ax1.set_ylabel('Time (s)')
# Colonial One runs.
nodes, mins, maxs, means = data['Colonial One']['PETSc']
ax1.plot(numpy.log2(nodes), means, label='Colonial One', color='C0')
ax1.errorbar(numpy.log2(nodes), means,
[means - mins, maxs - means],
fmt='k', linewidth=0, ecolor='black', elinewidth=2,
capthick=2, capsize=4, barsabove=True)
# Azure runs.
nodes, mins, maxs, means = data['Azure']['PETSc']
ax1.plot(numpy.log2(nodes), means, label='Azure', color='C1')
ax1.errorbar(numpy.log2(nodes), means,
[means - mins, maxs - means],
fmt='k', linewidth=0, ecolor='black', elinewidth=2,
capthick=2, capsize=4, barsabove=True)
ax1.legend(frameon=False)
ax1.tick_params(axis='both')
ax1.set_xticks(numpy.log2([1, 2, 4, 8]))
ax1.set_xticklabels([1, 2, 4, 8])
ax1.set_ylim(0.0, 25.0)
ax1.set_yticks([0.0, 5.0, 10.0, 15.0, 20.0, 25.0])
ax1.set_yticklabels([0, 5, 10, 15, 20, 25])
# Plot time-to-solution versus number of nodes for AmgX runs.
ax2 = fig.add_subplot(gs[1, 0])
ax2.text(numpy.log2(1.0), 0.005, 'AmgX')
ax2.set_xlabel('Number of nodes')
ax2.set_ylabel('Time (s)')
nodes, mins, maxs, means = data['Colonial One']['AmgX']
ax2.plot(numpy.log2(nodes), means, label='Colonial One', color='C0')
ax2.errorbar(numpy.log2(nodes), means,
[means - mins, maxs - means],
fmt='k', linewidth=0, ecolor='black', elinewidth=2,
capthick=2, capsize=4, barsabove=True)
nodes, mins, maxs, means = data['Azure']['AmgX']
ax2.plot(numpy.log2(nodes), means, label='Azure', color='C1')
ax2.errorbar(numpy.log2(nodes), means,
[means - mins, maxs - means],
fmt='k', linewidth=0, ecolor='black', elinewidth=2,
capthick=2, capsize=4, barsabove=True)
ax2.tick_params(axis='both')
ax2.set_xticks(numpy.log2([1, 2, 4, 8]))
ax2.set_xticklabels([1, 2, 4, 8])
ax2.set_ylim(0.0, 0.05)
ax2.set_yticks([0.0, 0.025, 0.05])
ax2.set_yticklabels([0.0, 0.025, 0.05])
# Plot time-to-solution versus number of nodes for AmgX larger runs (Azure).
ax3 = fig.add_subplot(gs[1, 1])
ax3.text(numpy.log2(1.0), 0.02, 'AmgX')
ax3.set_xlabel('Number of nodes')
ax3.set_ylabel('Time (s)')
nodes, mins, maxs, means = data['Azure']['AmgX-larger']
ax3.plot(numpy.log2(nodes), means, label='Azure', color='C1')
ax3.errorbar(numpy.log2(nodes), means,
[means - mins, maxs - means],
fmt='k', linewidth=0, ecolor='black', elinewidth=2,
capthick=2, capsize=4, barsabove=True)
ax3.tick_params(axis='both')
ax3.set_xticks(numpy.log2([1, 2, 4, 8]))
ax3.set_xticklabels([1, 2, 4, 8])
ax3.set_ylim(0.0, 0.2)
ax3.set_yticks([0.0, 0.1, 0.2])
ax3.set_yticklabels([0.0, 0.1, 0.2])
# Save the figure.
figdir = rootdir / 'figures'
figdir.mkdir(parents=True, exist_ok=True)
filepath = figdir / 'poisson_time_vs_nodes.pdf'
fig.tight_layout()
fig.savefig(str(filepath), dpi=300, bbox_inches='tight')
pyplot.show()
| StarcoderdataPython |
6574122 | from tecton import sql_transformation, TemporalFeaturePackage, DataSourceConfig, MaterializationConfig
from feature_repo.shared import entities as e, data_sources
from datetime import datetime
# @sql_transformation(inputs=data_sources.user_info, has_context=True)
# def user_age_years_sql_transform(context, table_name):
# return f"""
# select
# user_uuid,
# CAST(FLOOR(datediff(to_date('{context.feature_data_end_time}'), dob) / 365.25) as INT) AS age,
# to_timestamp('{context.feature_data_end_time}') as timestamp
# from
# {table_name}
# """
#
#
# user_age = TemporalFeaturePackage(
# name="user_age_years",
# description="Age of a user in years",
# transformation=user_age_years_sql_transform,
# entities=[e.user_entity],
#
# materialization=MaterializationConfig(
# schedule_interval='30day',
# online_enabled=True,
# offline_enabled=True,
# feature_start_time=datetime(2020, 6, 19),
# serving_ttl='60days',
# ),
#
# family='ad_serving',
# tags={'release': 'production'},
# owner="<EMAIL>",
# )
| StarcoderdataPython |
151348 | <filename>skworkorders/ConfigCenter.py<gh_stars>0
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from .models import ConfigCenter
from django.contrib.auth.decorators import login_required
from skaccounts.permission import permission_verify
from .forms import ConfigCenter_form
from django.shortcuts import render
from django.template import RequestContext
from lib.com import get_object
import logging
from lib.lib_fabric import ssh_cmd_back
log = logging.getLogger('skworkorders')
@login_required()
@permission_verify()
def ConfigCenter_index(request):
temp_name = "skworkorders/skworkorders-header.html"
tpl_all = ConfigCenter.objects.all()
return render(request,'skworkorders/ConfigCenter_index.html', locals())
@login_required()
@permission_verify()
def ConfigCenter_add(request):
temp_name = "skworkorders/skworkorders-header.html"
if request.method == "POST":
tpl_ConfigCenter_form = ConfigCenter_form(request.POST)
if tpl_ConfigCenter_form.is_valid():
tpl_ConfigCenter_form.save()
tips = "增加成功!"
display_control = ""
else:
tips = "增加失败!"
display_control = ""
return render(request,"skworkorders/ConfigCenter_add.html", locals())
else:
display_control = "none"
tpl_ConfigCenter_form = ConfigCenter_form()
return render(request,"skworkorders/ConfigCenter_add.html", locals())
@login_required()
@permission_verify()
def ConfigCenter_del(request):
temp_name = "skworkorders/skworkorders-header.html"
obj_id = request.GET.get('id', '')
if obj_id:
try:
ConfigCenter.objects.filter(id=obj_id).delete()
except Exception as tpl_error_msg:
log.warning(tpl_error_msg)
tpl_all = ConfigCenter.objects.all()
return render(request,"skworkorders/ConfigCenter_index.html", locals())
@login_required()
@permission_verify()
def ConfigCenter_edit(request, ids):
status = 0
obj = get_object(ConfigCenter, id=ids)
if request.method == 'POST':
tpl_ConfigCenter_form = ConfigCenter_form(request.POST, instance=obj)
if tpl_ConfigCenter_form.is_valid():
tpl_ConfigCenter_form.save()
status = 1
else:
status = 2
else:
tpl_ConfigCenter_form = ConfigCenter_form(instance=obj)
return render(request,"skworkorders/ConfigCenter_edit.html", locals())
@login_required()
@permission_verify()
def ConfigCenter_check(request,ids):
temp_name = "skworkorders/skworkorders-header.html"
obj = get_object(ConfigCenter, id=ids)
cmd = "date"
ret,retcode = ssh_cmd_back(obj.ip,obj.port,obj.username,obj.password,cmd,obj.rsa_key)
ret.insert(0,"SSH登陆验证:检测配置中心时间")
if retcode == 0:
ret.append("执行成功")
else:
ret.append("执行失败")
return render(request,"skworkorders/ConfigCenter_check.html", locals())
| StarcoderdataPython |
1711710 | <reponame>ibestvina/dagpy
from . import utils
from . import blockio
from . import dag
from pkg_resources import resource_string
import os
import re
import nbformat
from nbformat.v4 import new_markdown_cell, new_code_cell, new_notebook
class FlowManager:
"""Manages the creation, parsing and saving of flow notebooks"""
def __init__(self, dag, path):
"""Init from DAG object used for block lookup and .dagpy path to which block files are relative."""
self._dag = dag
self._path = path
@staticmethod
def read_nb(fpathname):
"""Read the iPython notebook file."""
with open(fpathname, encoding='utf-8') as f:
return nbformat.read(f, as_version=4)
def _read_block_nb(self, block_id):
"""Read the block notebook file."""
fname = self._dag.block_filename(block_id)
fpathname = os.path.join(self._path, fname)
return self.read_nb(fpathname)
def _header_cell(self):
"""Create the header cell with custom DAGpy code, such as the start_new_dagpy_block function.
Header cell is loaded from the header_cell.txt file.
"""
source = resource_string(__name__, 'header_cell.txt').decode('UTF-8')
#source = open('dagpy/header_cell.txt', 'r').read()
return new_code_cell(source = source, metadata = {'dagpy': {'cell_type': blockio.HEADER_CELL_TYPE}})
def _delimiter_cell(self, block_id):
"""Delimiter cell between blocks, specifying the following block metadata."""
source = '### Block_id\n' + str(block_id) + '\n'
source += '##### Description\n' + self._dag.get_block_att(block_id, 'description', default='') + '\n'
source += '##### Parents\n' + ', '.join(self._dag.get_block_att(block_id, 'parents', default='')) + '\n'
source += '##### Filter\n' + ', '.join(self._dag.get_block_att(block_id, 'filter', default='')) + '\n'
source += '##### File\n' + self._dag.get_block_att(block_id, 'file', default='')
return new_markdown_cell(source=source, metadata={'dagpy': {'cell_type': blockio.DELIMITER_CELL_TYPE, 'block_id': block_id}})
def _create_notebook(self):
"""Create new flow notebook with title and header cells"""
title_cell = new_markdown_cell(source='## DAGpy Flow notebook\nIf you want to start a new block, just run the start_new_dagpy_block() in the cell!', metadata={'dagpy': {'cell_type': blockio.TITLE_CELL_TYPE}})
return new_notebook(cells=[self._header_cell(), title_cell])
def merge_blocks(self, block_ids):
"""Merge blocks into a flow notebook."""
merged = self._create_notebook()
for block_id in block_ids:
nb = self._read_block_nb(block_id)
merged.cells.append(self._delimiter_cell(block_id))
merged.cells.extend(nb.cells)
merged.metadata.name = "dagpy_flow"
return merged
def flow_to_file(self, block_ids, filepathname):
"""Create flow and save it to file."""
block_ids_linearized = utils.linearize_dependencies(self._dag, block_ids)
nb = self.merge_blocks(block_ids_linearized)
nbformat.write(nb, filepathname)
@staticmethod
def parse_delimiter_cell_att(att_name, lines):
"""Parse a single block attribute from a delimiter cell."""
if att_name == 'block_id':
return re.sub( '\s+', '', ''.join(lines))
if att_name == 'description':
return '\n'.join(lines)
if att_name == 'file':
return ''.join([line.strip() for line in lines])
if att_name in ['parents', 'filter']:
return re.sub( '[,\s]+', ' ', ' '.join(lines)).split()
@staticmethod
def parse_delimiter_cell(cell):
"""Parse a delimiter cell to retrieve block metadata attributes."""
block_meta = {}
lines = cell.source.splitlines()
att_name = None
att_lines = []
for line in lines:
if line and line[0] == '#':
if att_name:
block_meta[att_name] = FlowManager.parse_delimiter_cell_att(att_name, att_lines)
att_name = re.sub( '[#\s]+', '', line).strip().lower()
att_lines = []
elif att_name:
att_lines += [line]
if att_name:
block_meta[att_name] = FlowManager.parse_delimiter_cell_att(att_name, att_lines)
if 'block_id' in cell.metadata['dagpy']:
# preexisting block
if 'block_id' in block_meta and block_meta['block_id'] != cell.metadata['dagpy']['block_id']:
# user has changed the block id
# TODO: Decide if block_id can be changed in flow. Probably not.
block_meta['block_id'] = cell.metadata['dagpy']['block_id']
else:
block_meta['block_id'] = cell.metadata['dagpy']['block_id']
else:
# new block
if 'block_id' not in block_meta:
print('ERROR: no block_id specified')
block_meta['block_id'] = 'new_block'
return block_meta
def apply_flow_changes(self, filepathname):
"""Apply changes made in the flow to block files and the DAG."""
cells = self.read_nb(filepathname).cells
blocks_to_save = {}
block_cells = []
block_meta = {}
blocks_meta = []
for cell in cells:
if 'dagpy' in cell.metadata:
if cell.metadata['dagpy']['cell_type'] == blockio.DELIMITER_CELL_TYPE: # other dagpy cell types are ignored
if block_meta:
blocks_meta += [block_meta]
blocks_to_save[block_meta['block_id']] = block_cells
block_cells = []
block_meta = FlowManager.parse_delimiter_cell(cell)
else:
block_cells += [cell]
if block_meta:
blocks_meta += [block_meta]
blocks_to_save[block_meta['block_id']] = block_cells
if blocks_meta:
self._dag.update_blocks(blocks_meta)
for block_id, block_cells in blocks_to_save.items():
blockio.save_block(block_id, block_cells, self._dag)
| StarcoderdataPython |
9741193 | <gh_stars>0
from typing import List
from helpers import console
from .checks import guild_only, registered_guild_only, registered_guild_and_admin_or_mod_only, registered_guild_with_muted_role_and_admin_or_mod_only, _is_admin_or_mod
from .command_registration import register_command as _register_command
from .command_registration import on_command_error
from .command_definition import CommandDefinition
from .record_usage import record_usage
from .defer_cmd import defer_cmd
from .help_service import help_service
def register_commands(commands: List[CommandDefinition]):
with console.status(f"[bold_green]Registering commands...[/bold_green]") as status:
for command in commands:
try:
_register_command(command)
except KeyError as e:
console.critical(f"Failed to register {command}, invalid config. {e}")
continue
console.info(f"[bold_green]Registered {len(commands)} commands.[/bold_green]")
| StarcoderdataPython |
8116275 | """tokenize readable handle to tokens"""
def tokenize(handle):
"""read handle and turn it into tokens"""
run = True
pos = -1
mem = ""
while run:
pos += 1
char = handle.read(1)
if not char:
run = False
break
if char in ['[', ']', '{', '}', ",", ":"]:
if mem.strip():
yield("v", pos-len(mem), mem)
mem = ""
yield (char, pos) # open
elif char in ['"', "'"]:
start_c = char
pos_start = pos
mem = ""
while True:
c_prev = char
char = handle.read(1)
pos += 1
if not char:
run = False
break
if char == start_c and c_prev != "\\":
break
mem += char
yield ('S', pos_start, mem)
mem = ""
else:
mem += char
| StarcoderdataPython |
1971209 | <gh_stars>10-100
from polyphony import testbench
def f():
return 0
def call02():
return f()
@testbench
def test():
assert 0 == call02()
test()
| StarcoderdataPython |
8083675 | from os import listdir
import glob
import json
def load_track(filename:str):
with open(filename) as f:
operations = json.load(f)
track = Tracker(filename)
track.operations = operations
return track
class Tracker:
def __init__(self, name:str, sounds_dir:str = "sounds/*.wav", mode:str = "pitch"):
self.name = name
self.state = {}
self.operations = []
self.sounds = glob.glob(sounds_dir)
self.sound_index = 0
self.mode = mode
def add(self, op: str, value, category: str=None):
if not op in self.state:
self.state[op] = {"octave": 1, "pos": 1000, "dur": 50, "value": value, "sound": self.sounds[self.sound_index]}
self.sound_index += 1
if self.sound_index >= len(self.sounds):
self.sound_index = 0
if self.mode == "pitch":
if self.state[op]["value"] < value:
self.state[op]["octave"] = self.state[op]["octave"] + .2
elif self.state[op]["value"] > value:
self.state[op]["octave"] = self.state[op]["octave"] - .2
operation = {"type": self.mode, "octave": self.state[op]["octave"], "sound": self.state[op]["sound"]}
elif self.mode == "slice":
if self.state[op]["value"] < value:
self.state[op]["pos"] = self.state[op]["pos"] - 2000
self.state[op]["dur"] = self.state[op]["dur"] - 10
elif self.state[op]["value"] > value:
self.state[op]["pos"] = self.state[op]["pos"] + 2000
self.state[op]["dur"] = self.state[op]["dur"] + 10
else:
self.state[op]["pos"] = self.state[op]["pos"] + 500
self.state[op]["dur"] = self.state[op]["dur"] + 5
operation = {"type": self.mode, "start": self.state[op]["pos"],
"end": self.state[op]["pos"] + self.state[op]["dur"], "sound": self.state[op]["sound"]}
self.state[op]["value"] = value
self.operations.append(operation)
def list(self):
return self.operations
def save(self, filename:str):
with open(filename, 'w') as outfile:
json.dump(self.operations, outfile, indent=2)
| StarcoderdataPython |
3558324 | # File name : SakitoScrap.py
# Author : <NAME>
# Outline : Sakitoの自動化を行うクラス
# license : MIT
# Copyright (c) 2017, <NAME>
import requests
from bs4 import BeautifulSoup
class SakitoScrap:
def __init__(self, email:str, password:str):
self.email = email
self.password = password
self.__session = self.__login()
def __login(self):
s = requests.Session()
# get authenticity_token
response = s.get('https://sakito.cirkit.jp/user/sign_in')
soup = BeautifulSoup(response.text, 'html.parser')
authenticity_token = soup.body.findAll('form')[0].find(attrs={'name':'authenticity_token'})['value']
# login
login_payload = {
'utf8': '✓',
'authenticity_token': authenticity_token,
'user[email]': self.email,
'user[password]': self.password,
'user[remember_me]': '0',
'commit': 'ログイン'
}
s.post('https://sakito.cirkit.jp/user/sign_in', data=login_payload)
return s
def getPoint(self):
s = self.__session
response = s.get('https://sakito.cirkit.jp/user')
soup = BeautifulSoup(response.text, 'html.parser')
return int(soup.body.findAll('h1')[0].string)
def checkNewQuestion(self):
s = self.__session
response = s.get('https://sakito.cirkit.jp/surveys')
soup = BeautifulSoup(response.text, 'html.parser')
newQuestionList = []
for row in soup.body.findAll(attrs={'class':'panel-success'}):
if( row.find(attrs={'class':'panel-footer'}).text == '回答する' ):
newQuestionList.append( row.find(attrs={'class':'panel-heading'}).text )
return newQuestionList
def gacha(self):
s = self.__session
response = s.get('https://sakito.cirkit.jp/user/point/new')
soup = BeautifulSoup(response.text, 'html.parser')
authenticity_token = soup.head.find(attrs={'name':'csrf-token'})['content']
gacha_payload = {
'_method': 'post',
'authenticity_token': authenticity_token,
}
response = s.post('https://sakito.cirkit.jp/user/point', data=gacha_payload)
| StarcoderdataPython |
6427461 | """
New Zealand Business Number
Retrieve Entity Test Module
author: <EMAIL>
© Procuret Operating Pty Ltd
"""
from nzbn.tests.test import Test
from nzbn.tests.test_result import Success, TestResult, Failure
from nzbn.entity import Entity
class RetrieveEntity(Test):
NAME = 'Retrieve an Entity'
def execute(self) -> TestResult:
rocketwerkz = Entity.retrieve(
access_token=self.access_token,
nzbn='9429032530384',
sandbox=True
)
if rocketwerkz is None:
return Failure('Expected Entity absent')
assert isinstance(rocketwerkz, Entity)
return Success()
| StarcoderdataPython |
1917874 | <filename>acky/ec2.py<gh_stars>0
from acky.api import (
AwsCollection,
AwsApiClient,
make_filters,
)
from itertools import chain
class EC2ApiClient(AwsApiClient):
service_name = "ec2"
class EC2(EC2ApiClient):
def regions(self, continent='us', include_gov=False):
# returns (string, ...)
# DescribeRegions
regions = self.call("DescribeRegions", response_data_key="Regions")
if regions and continent and continent != "all":
regions = [r for r in regions
if r['RegionName'].startswith("{}-".format(continent))]
return regions
def zones(self, region):
# returns (string, ...)
# DescribeAvailabilityZones
raise NotImplementedError("aws.ec2.zones")
@property
def environment(self):
env = super(EC2, self).environment
env['hoster'] = 'ec2'
return env
@property
def ACLs(self):
return ACLCollection(self._aws)
@property
def ACEs(self):
return ACECollection(self._aws)
@property
def ElasticIPs(self):
return ElasticIPCollection(self._aws)
@property
def Instances(self):
return InstanceCollection(self._aws)
@property
def SecurityGroups(self):
return SecurityGroupCollection(self._aws)
@property
def IpPermissions(self):
return IpPermissionsCollection(self._aws)
@property
def Volumes(self):
return VolumeCollection(self._aws)
@property
def Snapshots(self):
return SnapshotCollection(self._aws)
@property
def Subnets(self):
return SubnetCollection(self._aws)
@property
def VPCs(self):
return VPCCollection(self._aws)
@property
def PlacementGroups(self):
return PlacementGroupCollection(self._aws)
@property
def KeyPairs(self):
return KeyPairCollection(self._aws)
@property
def Tags(self):
return TagCollection(self._aws)
@property
def Images(self):
return ImageCollection(self._aws)
class ACLCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
# returns (acl_info, ...)
# DescribeNetworkAcls
raise NotImplementedError()
def create(self, vpc):
# returns acl_info
# CreateNetworkAcl
raise NotImplementedError()
def destroy(self, acl):
# returns bool
# DeleteNetworkAcl
raise NotImplementedError()
class ACECollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
# returns (ace_info, ...)
# DescribeNetworkAcls
raise NotImplementedError()
def add(self, acl, ace_list):
# returns ace_info
# CreateNetworkAclEntry
raise NotImplementedError()
def remove(self, acl, ace_list):
# returns bool
# DeleteNetworkAclEntry
raise NotImplementedError()
def replace(self, acl, old, new):
# returns ace_info
# CreateNetworkAclEntry, DeleteNetworkAclEntry
raise NotImplementedError()
class ElasticIPCollection(AwsCollection, EC2ApiClient):
"""Interface to get, create, destroy, associate, and disassociate EIPs for
classic EC2 domains and VPCs. (Amazon EC2 API Version 2014-06-15)
"""
def get(self, filters=None):
"""List EIPs and associated information."""
params = {}
if filters:
params["filters"] = make_filters(filters)
return self.call("DescribeAddresses",
response_data_key="Addresses",
**params)
def create(self, vpc=False):
"""Set vpc=True to allocate an EIP for a EC2-Classic instance.
Set vpc=False to allocate an EIP for a VPC instance.
"""
return self.call("AllocateAddress",
Domain="vpc" if vpc else "standard")
def destroy(self, eip_or_aid, disassociate=False):
"""Release an EIP. If the EIP was allocated for a VPC instance, an
AllocationId(aid) must be provided instead of a PublicIp. Setting
disassociate to True will attempt to disassociate the IP before
releasing it (required for associated nondefault VPC instances).
"""
if "." in eip_or_aid: # If an IP is given (Classic)
# NOTE: EIPs are automatically disassociated for Classic instances.
return "true" == self.call("ReleaseAddress",
response_data_key="return",
PublicIp=eip_or_aid)
else: # If an AID is given (VPC)
if disassociate:
self.disassociate(eip_or_aid)
return "true" == self.call("ReleaseAddress",
response_data_key="return",
AllocationId=eip_or_aid)
def associate(self, eip_or_aid,
instance_id='', network_interface_id='', private_ip=''):
"""Associate an EIP with a given instance or network interface. If
the EIP was allocated for a VPC instance, an AllocationId(aid) must
be provided instead of a PublicIp.
"""
if "." in eip_or_aid: # If an IP is given (Classic)
return self.call("AssociateAddress",
PublicIp=eip_or_aid,
InstanceId=instance_id,
NetworkInterfaceId=network_interface_id,
PrivateIpAddress=private_ip)
else: # If an AID is given (VPC)
return self.call("AssociateAddress",
AllocationId=eip_or_aid,
InstanceId=instance_id,
NetworkInterfaceId=network_interface_id,
PrivateIpAddress=private_ip)
def disassociate(self, eip_or_aid):
"""Disassociates an EIP. If the EIP was allocated for a VPC instance,
an AllocationId(aid) must be provided instead of a PublicIp.
"""
if "." in eip_or_aid: # If an IP is given (Classic)
return "true" == self.call("DisassociateAddress",
response_data_key="return",
PublicIp=eip_or_aid)
else: # If an AID is given (VPC)
return "true" == self.call("DisassociateAddress",
response_data_key="return",
AllocationId=eip_or_aid)
class InstanceCollection(AwsCollection, EC2ApiClient):
def get(self, instance_ids=None, filters=None):
"""List instance info."""
params = {}
if filters:
params["filters"] = make_filters(filters)
if instance_ids:
params['InstanceIds'] = instance_ids
reservations = self.call("DescribeInstances",
response_data_key="Reservations",
**params)
if reservations:
return list(chain(*(r["Instances"] for r in reservations)))
return []
def create(self, ami, count, config=None):
"""Create an instance using the launcher."""
return self.Launcher(config=config).launch(ami, count)
def destroy(self, instance_id):
"""Terminate a single given instance."""
return self.control(instance_id, "terminate")
def control(self, instances, action):
"""Valid actions: start, stop, reboot, terminate, protect, and
unprotect.
"""
if not isinstance(instances, list) and\
not isinstance(instances, tuple):
instances = [instances]
actions = {'start': {'operation': "StartInstances",
'response_data_key': "StartingInstances",
'InstanceIds': instances},
'stop': {'operation': "StopInstances",
'response_data_key': "StoppingInstances",
'InstanceIds': instances},
'reboot': {'operation': "RebootInstances",
'response_data_key': "return",
'InstanceIds': instances},
'terminate': {'operation': "TerminateInstances",
'response_data_key': "TerminatingInstances",
'InstanceIds': instances},
'protect': {'operation': "ModifyInstanceAttribute",
'response_data_key': "return",
'Attribute': 'disableApiTermination',
'Value': 'true'},
'unprotect': {'operation': "ModifyInstanceAttribute",
'response_data_key': "return",
'Attribute': 'disableApiTermination',
'Value': 'false'}}
if (action in ('protect', 'unprotect')):
for instance in instances:
self.call(InstanceId=instance, **actions[action])
return "true"
else:
return self.call(**actions[action])
def Launcher(self, config=None):
"""Provides a configurable launcher for EC2 instances."""
class _launcher(EC2ApiClient):
"""Configurable launcher for EC2 instances. Create the Launcher
(passing an optional dict of its attributes), set its attributes
(as described in the RunInstances API docs), then launch().
"""
def __init__(self, aws, config):
super(_launcher, self).__init__(aws)
self.config = config
self._attr = list(self.__dict__.keys()) + ['_attr']
def launch(self, ami, min_count, max_count=0):
"""Use given AMI to launch min_count instances with the
current configuration. Returns instance info list.
"""
params = config.copy()
params.update(dict([i for i in self.__dict__.items()
if i[0] not in self._attr]))
return self.call("RunInstances",
ImageId=ami,
MinCount=min_count,
MaxCount=max_count or min_count,
response_data_key="Instances",
**params)
if not config:
config = {}
return _launcher(self._aws, config)
def status(self, all_instances=None, instance_ids=None, filters=None):
"""List instance info."""
params = {}
if filters:
params["filters"] = make_filters(filters)
if instance_ids:
params['InstanceIds'] = instance_ids
if all_instances is not None:
params['IncludeAllInstances'] = all_instances
statuses = self.call("DescribeInstanceStatus",
response_data_key="InstanceStatuses",
**params)
return statuses
def events(self, all_instances=None, instance_ids=None, filters=None):
"""a list of tuples containing instance Id's and event information"""
params = {}
if filters:
params["filters"] = make_filters(filters)
if instance_ids:
params['InstanceIds'] = instance_ids
statuses = self.status(all_instances, **params)
event_list = []
for status in statuses:
if status.get("Events"):
for event in status.get("Events"):
event[u"InstanceId"] = status.get('InstanceId')
event_list.append(event)
return event_list
class KeyPairCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
"""List key info."""
params = {}
if filters:
params["filters"] = make_filters(filters)
return self.call("DescribeKeyPairs",
response_data_key="KeyPairs",
**params)
def create(self, key_name):
"""Create a new key with a given name."""
return self.call("CreateKeyPair", KeyName=key_name)
def destroy(self, key_name):
"""Delete a key."""
return self.call("DeleteKeyPair", KeyName=key_name)
class PlacementGroupCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
# returns (sg_info, ...)
# DescribePlacementGroups
params = {}
if filters:
params["filters"] = make_filters(filters)
return self.call("DescribePlacementGroups",
response_data_key="PlacementGroups",
**params)
def create(self, group_name, strategy="cluster"):
# returns sg_info
params = {
"strategy": strategy
}
# CreatePlacementGroup
if callable(group_name):
params['group_name'] = group_name(self.environment)
else:
params['group_name'] = group_name
return self.call("CreatePlacementGroup", **params)
def destroy(self, pg):
# returns bool
# DeletePlacementGroup
return self.call("DeletePlacementGroup", group_name=pg)
class SecurityGroupCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None, exclude_vpc=False):
# returns (sg_info, ...)
# DescribeSecurityGroups
params = {}
if filters:
params["filters"] = make_filters(filters)
groups = self.call("DescribeSecurityGroups",
response_data_key="SecurityGroups",
**params)
if groups and exclude_vpc:
# Exclude any group that belongs to a VPC
return [g for g in groups if not g.get('VpcId')]
else:
return groups
def create(self, name, description, vpc=None):
# returns sg_info
params = {
"Description": description,
}
# CreateSecurityGroup
if callable(name):
params['GroupName'] = name(self.environment)
else:
params['GroupName'] = name
if vpc:
params["VpcId"] = vpc
return self.call("CreateSecurityGroup", **params)
def destroy(self, sg):
# returns bool
# DeleteSecurityGroup
return self.call("DeleteSecurityGroup", GroupId=sg)
class IpPermissionsCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
# returns (sgr_info, ...)
# DescribeSecurityGroups
raise NotImplementedError()
def modify(self, api_action, sgid, other, proto_spec):
"""Make a change to a security group. api_action is an EC2 API name.
Other is one of:
- a group (sg-nnnnnnnn)
- a group with account (<user id>/sg-nnnnnnnn)
- a CIDR block (n.n.n.n/n)
Proto spec is a triplet (<proto>, low_port, high_port)."""
params = {'group_id': sgid, 'ip_permissions': []}
perm = {}
params['ip_permissions'].append(perm)
proto, from_port, to_port = proto_spec
perm['IpProtocol'] = proto
perm['FromPort'] = from_port or 0
perm['ToPort'] = to_port or from_port or 65535
if other.startswith("sg-"):
perm['UserIdGroupPairs'] = [{'GroupId': other}]
elif "/sg-" in other:
account, group_id = other.split("/", 1)
perm['UserIdGroupPairs'] = [{
'UserId': account,
'GroupId': group_id,
}]
else:
perm['IpRanges'] = [{'CidrIp': other}]
return self.call(api_action, **params)
def add(self, sgid, other, proto_spec, direction="in"):
"""Add a security group rule to group <sgid>.
Direction is either 'in' (ingress) or 'out' (egress).
See modify() for other parameters."""
# returns bool
# AuthorizeSecurityGroupIngress, AuthorizeSecurityGroupEgress
if direction == "in":
api = "AuthorizeSecurityGroupIngress"
elif direction == "out":
api = "AuthorizeSecurityGroupEgress"
else:
raise ValueError("direction must be one of ('in', 'out')")
return self.modify(api, sgid, other, proto_spec)
def remove(self, sgid, other, proto_spec, direction="in"):
"""Remove a security group rule from group <sgid>.
Direction is either 'in' (ingress) or 'out' (egress).
See modify() for other parameters."""
# returns (removed_sgr_info, ...)
# RevokeSecurityGroupIngress, RevokeSecurityGroupEgress
if direction == "in":
api = "RevokeSecurityGroupIngress"
elif direction == "out":
api = "RevokeSecurityGroupEgress"
else:
raise ValueError("direction must be one of ('in', 'out')")
return self.modify(api, sgid, other, proto_spec)
class VolumeCollection(AwsCollection, EC2ApiClient):
"""Interface to get, create, destroy, and attach for EBS Volumes.
(Amazon EC2 API Version 2014-06-15)
"""
def get(self, volume_ids=None, filters=None):
"""List EBS Volume info."""
params = {}
if filters:
params["filters"] = make_filters(filters)
if isinstance(volume_ids, str):
volume_ids = [volume_ids]
return self.call("DescribeVolumes",
VolumeIds=volume_ids,
response_data_key="Volumes",
**params)
def create(self, az, size_or_snap, volume_type=None, iops=None,
encrypted=True):
"""Create an EBS Volume using an availability-zone and size_or_snap
parameter, encrypted by default.
If the volume is crated from a snapshot, (str)size_or_snap denotes
the snapshot id. Otherwise, (int)size_or_snap denotes the amount of
GiB's to allocate. iops must be set if the volume type is io1.
"""
kwargs = {}
kwargs['encrypted'] = encrypted
if volume_type:
kwargs['VolumeType'] = volume_type
if iops:
kwargs['Iops'] = iops
is_snapshot_id = False
try:
size_or_snap = int(size_or_snap)
except ValueError:
is_snapshot_id = True
if is_snapshot_id:
return self.call("CreateVolume", AvailabilityZone=az,
SnapshotId=size_or_snap, **kwargs)
return self.call("CreateVolume", AvailabilityZone=az,
Size=size_or_snap, **kwargs)
def destroy(self, volume_id):
"""Delete a volume by volume-id and return success boolean."""
return 'true' == self.call("DeleteVolume", VolumeId=volume_id,
response_data_key="return")
def attach(self, volume_id, instance_id, device_path):
"""Attach a volume to an instance, exposing it with a device name."""
return self.call("AttachVolume",
VolumeId=volume_id, InstanceId=instance_id,
Device=device_path)
def detach(self, volume_id, instance_id='', device_path='', force=False):
"""Detach a volume from an instance."""
return self.call("DetachVolume",
VolumeId=volume_id, InstanceId=instance_id,
Device=device_path, force=force)
class SnapshotCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
# returns (snap_info, ...)
# DescribeSnapshots
params = {}
if filters:
params["filters"] = make_filters(filters)
return self.call("DescribeSnapshots",
response_data_key="Snapshots",
**params)
def create(self, volume_id, description=None):
# returns snap_info
# CreateSnapshot
return self.call("CreateSnapshot",
VolumeId=volume_id,
Description=description)
def destroy(self, snapshot_id):
# returns bool
# DeleteSnapshot
return self.call("DeleteSnapshot", SnapshotId=snapshot_id)
class SubnetCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
# returns (subnet_info, ...)
# DescribeSubnets
params = {}
if filters:
params["filters"] = make_filters(filters)
return self.call("DescribeSubnets",
response_data_key="Subnets",
**params)
def create(self, vpc_id, cidr, availability_zone):
# returns subnet_info
# CreateSubnet
return self.call("CreateSubnet",
VpcId=vpc_id,
CidrBlock=cidr,
response_data_key="Subnet")
def destroy(self, subnet_id):
# returns bool
# DeleteSubnet
if self.call("DeleteSubnet", SubnetId=subnet_id,
response_data_key="return"):
return True
return False
class VPCCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
# returns (vpc_info, ...)
# DescribeVpcs
params = {}
if filters:
params["filters"] = make_filters(filters)
return self.call("DescribeVpcs", response_data_key="Vpcs", **params)
def create(self, cidr, tenancy="default"):
# returns vpc_info
# CreateVpc
raise NotImplementedError()
def destroy(self, vpc):
# returns bool
# DeleteVpc
raise NotImplementedError()
class TagCollection(AwsCollection, EC2ApiClient):
def get(self, filters=None):
# returns (tag_info, ...)
# DescribeTags
params = {}
if filters:
params["filters"] = make_filters(filters)
return self.call("DescribeTags",
response_data_key="Tags",
**params)
def create(self, resource_ids, tags):
# returns bool
# CreateTags
return self.call("CreateTags", resources=resource_ids, tags=tags)
def destroy(self, resource_ids, tags):
# returns bool
# DeleteTags
return self.call("DeleteTags", resources=resource_ids, tags=tags)
class ImageCollection(AwsCollection, EC2ApiClient):
def get(self, image_ids=None, owners=None, executable_users=None, filters=None):
# returns (image_info, ...)
# DescribeImages
params = {}
if filters:
params["filters"] = make_filters(filters)
if image_ids:
params["ImageIds"] = image_ids
if owners:
params["Owners"] = owners
if executable_users:
params["ExecutableUsers"] = executable_users
return self.call("DescribeImages",
response_data_key="Images",
**params)
def create(self, instance_id, name, no_reboot=True, description=None, block_device_mappings=None):
# returns image_id
# CreateImage
params = {
"InstanceId": instance_id,
"Name": name,
"NoReboot": no_reboot
}
if description:
params["Description"] = description
if block_device_mappings:
params["BlockDeviceMappings"] = block_device_mappings
return self.call("CreateImage",
response_data_key="ImageId",
**params)
def destroy(self, image_id):
# returns bool
# CreateImage
return self.call("DeregisterImage", ImageId=image_id)
| StarcoderdataPython |
275006 | <filename>permabots/utils.py
from telegram import emoji
from six import iteritems, PY2
def create_emoji_context():
context = {}
for key, value in iteritems(emoji.Emoji.__dict__):
if '__' not in key:
if PY2:
value = value.decode('utf-8')
context[key.lower().replace(" ", "_")] = value
return context | StarcoderdataPython |
5095696 | <filename>Mapping.py
import pickle
#Python wraper for MetaMap
from pymetamap import MetaMap# MetaMap for identifying biomedical concepts
# each element is a list of indications for prescribing a plant
phenotypes = pickle.load(open('/home/balus/Metamap/1000 random phenotypes', 'rb'))
#creating a metamap instace from a locally installed metamap
mm = MetaMap.get_instance("/home/balus/Metamap/public_mm_linux_main_2016v2/public_mm/bin/metamap16")
# function defention to remove the non non ascii values found in phenotypes[i]
def strip_non_ascii(string):
''' Returns the string without non ASCII characters'''
stripped = (c for c in string if 0 < ord(c) < 127)
return ''.join(stripped)
# function call to to remove the non non ascii values found in phenotypes[i]
for i in range(len(phenotypes)):
for j in range(len(phenotypes[i])):
phenotypes[i][j]=strip_non_ascii(phenotypes[i][j])
for i in range(len(phenotypes)):
sents = phenotypes[i]
print('\n{} are supposedly treated by a plant'.format(phenotypes[i]))
concepts,error = mm.extract_concepts(sents,[1,len(phenotypes[i])])
for concept in concepts:
print(concept)
| StarcoderdataPython |
6402352 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# These two lines are necessary to find source files!!!
import sys
sys.path.append('../src')
from neuralNet import NeuralNet, DataSet
from files import files
if __name__ == '__main__':
f = files["haberman"]
ds = DataSet(f)
n = NeuralNet([3, 1, 2], ds.dataMatrix, numericalEvaluation=True)
n.startTraining(1) | StarcoderdataPython |
3593647 | <filename>src/demographics.py
def birth_generation(birth_year):
"""
Takes in a birth_year and returns the corresponding generation.
The earliest corresponding generation is returned.
Parameters
----------
birth_year: int
The birth year in question.
Returns
-------
generation: str
The corresponding generation for the input birth_year
"""
lost_generation = range(1890, 1915)
interbellum_generation = range(1901, 1913)
greatest_generation = range(1910, 1924)
silent_generation = range(1925, 1945)
baby_boomers = range(1946, 1964)
generation_x = range(1965, 1979)
xennials = range(1975, 1985)
millennials = range(1980, 1994)
generation_z = range(1995, 2012)
generation_alpha = range(2013, 2025)
if birth_year in lost_generation:
return 'Lost Generation'
if birth_year in interbellum_generation:
return 'Interbellum Generation'
if birth_year in greatest_generation:
return 'Greatest Generation'
if birth_year in silent_generation:
return 'Silent Generation'
if birth_year in baby_boomers:
return 'Baby Boomers'
if birth_year in generation_x:
return 'Generation X'
if birth_year in xennials:
return 'Xennials'
if birth_year in millennials:
return 'Millennials'
if birth_year in generation_z:
return 'Generation Z'
if birth_year in generation_alpha:
return 'Generation Alpha'
return 'Unknown'
| StarcoderdataPython |
11252339 | <filename>tests/decorators/test_render_html.py
from fbv.decorators import render_html
from tests.utils import assert_response
def test_render_html(request):
@render_html("test/template.html")
def _(*args):
return {"test": 123}
response = _(request)
assert_response(response, content="asdf 123")
| StarcoderdataPython |
5030947 | from primes import get_primes, is_prime
| StarcoderdataPython |
1874680 | import os
import subprocess
import logging
from subprocess import Popen
logger = logging.getLogger(__name__)
class SFTPUsers:
"""
System sftp users name (which are usual unix account users)
have format:
<prefix><company_name>_<web_username>
Examples:
Company opel with two web users: opeladmin
and xyz1 and given prefix is sftp_ will have
following unix accounts:
* sftp_opel_opeladmin
* sftp_opel_xyz1
This way unique unix users per tenant are guaranteed.
"""
def __init__(self, prefix, company, gname):
self._gname = gname
self._company = company
self._prefix = prefix
def group_add(self):
args = ["groupadd", "-f", self._gname]
logger.debug(args)
self.call(args)
def prefixed(self, uname):
username = f"{self._prefix}{self._company}_{uname}"
return username
def add(self, username, pw):
self.user_add(username)
self.mkdirs(username)
self.passwd(username, pw)
def user_exists(self, uname):
username = self.prefixed(uname)
return os.path.exists(
f"/home/{username}"
)
def user_add(self, uname):
"""
creates a user with prefix self._prefix. User belongs to
the self._gname group.
"""
# create group if it does not exist
self.group_add()
username = self.prefixed(uname)
args = [
"useradd",
"-g",
self._gname,
"-d",
"/home/{}".format(username),
"-s",
"/sbin/nologin",
username
]
logger.debug(args)
self.call(args)
def mkdirs(self, uname):
username = self.prefixed(uname)
home_dir = "/home/{}".format(username)
upload_dir = "/home/{}/upload".format(username)
arr = [
["mkdir", "-p", home_dir],
["chown", "root:root", home_dir],
["chmod", "755", home_dir],
["mkdir", "-p", upload_dir],
["chown", "{}:{}".format(username, self._gname), upload_dir]
]
for args in arr:
logger.debug(args)
self.call(args)
def passwd(self, uname, pw):
"""
Set (sftp) password pw for system user uname.
pw might be empty: initially when user is created
his userprofile is created with both sftp_passwd1
and sftp_passwd2 empty. Then a django signal is sent
to user model and update it - and that update triggers
this method - with pw empty.
The idea is that users have sftp account disabled by default.
They need to enabled it in user profile. When users
update their user profile this method is triggered with
pw - non empty.
"""
username = self.prefixed(uname)
if not pw:
logger.info(
f"sftp_user {uname} disabled."
)
return
logger.debug(
"Changing password for local user={}".format(username)
)
proc = Popen(
['/usr/bin/passwd', username],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE
)
proc.stdin.write(
bytes(pw + "\n", encoding='utf-8')
)
proc.stdin.write(
bytes(pw, encoding='utf-8')
)
proc.stdin.flush()
stdout, stderr = proc.communicate()
logger.debug(f"stdout={stdout} stderr={stderr}")
def call(self, args):
subprocess.call(args)
| StarcoderdataPython |
3218082 | from queue_with_stacks import Queue
def test_queue_with_stacks_tests():
# Setup
q = Queue()
q.enqueue(1)
q.enqueue(2)
q.enqueue(3)
# Test size
assert(q.size() == 3)
# Test dequeue
assert(q.dequeue() == 1)
# Test enqueue
q.enqueue(4)
assert (q.dequeue() == 2)
assert (q.dequeue() == 3)
assert (q.dequeue() == 4)
q.enqueue(5)
assert (q.size() == 1)
test_queue_with_stacks_tests() | StarcoderdataPython |
1940021 | <gh_stars>1-10
#class PopupPanel:
#
# # PopupImpl.onShow
# def onShowImpl(self, popup):
# frame = doc().createElement('iframe')
# frame.scrolling = 'no'
# frame.frameBorder = 0
# frame.style.position = 'absolute'
#
# popup.__frame = frame
# frame.__popup = popup
# frame.style.setExpression('left', 'this.__popup.offsetLeft')
# frame.style.setExpression('top', 'this.__popup.offsetTop')
# frame.style.setExpression('width', 'this.__popup.offsetWidth')
# frame.style.setExpression('height', 'this.__popup.offsetHeight')
# popup.parentElement.insertBefore(frame, popup)
#
# # PopupImpl.onHide
# def onHideImpl(self, popup):
# var frame = popup.__frame
# frame.parentElement.removeChild(frame)
# popup.__frame = None
# frame.__popup = None
| StarcoderdataPython |
397722 | <reponame>doctorblinch/leetcode<filename>1. Two Sum/two_sum.py
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
diffs = {}
for i in range(len(nums)):
if nums[i] in diffs:
return [diffs[nums[i]], i]
diffs.update({
target - nums[i]: i
})
| StarcoderdataPython |
3388058 | # -*- coding: utf-8 -*-
"""Checker of PEP-8 Class Constant Naming Conventions."""
import ast
__version__ = '0.1.2'
class ConstantNameChecker(ast.NodeVisitor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.issues = []
def _ccn_101(self, t_id: str, lineno: int, col_offset: int) -> None:
msg = "CCN101 Class constants must be uppercase {}".format(t_id)
self.issues.append((lineno,
col_offset,
msg))
def visit_ClassDef(self, node) -> None: # noqa: N802
for n in ast.walk(node):
if isinstance(n, ast.FunctionDef):
return
if isinstance(n, ast.AnnAssign):
if not n.target.id.isupper():
self._ccn_101(n.target.id, n.lineno, n.col_offset)
if isinstance(n, ast.Assign):
for target in n.targets:
if not target.id.isupper():
self._ccn_101(target.id, n.lineno, n.col_offset)
class ConstantChecker(object):
name = 'flake8_class_constants' # noqa: CCN101
options = None # noqa: CCN101
version = __version__ # noqa: CCN101
def __init__(self, tree, filename: str):
self.tree = tree
self.filename = filename
def run(self):
parser = ConstantNameChecker()
parser.visit(self.tree)
for lineno, column, msg in parser.issues:
yield (lineno, column, msg, ConstantChecker)
| StarcoderdataPython |
6431328 | <gh_stars>0
import albumentations as a
import cv2,os,random
path='E:\\pos_neg\\Positive'
write='E:\\augmented\\positive'
j=0
for file in os.listdir(path):
img=cv2.imread(os.path.join(path,file))
angle=random.randint(15,30)
img=a.rotate(img,angle)
cv2.imwrite(os.path.join(write,str(j)+file),img)
j+=1
for file in os.listdir(path):
img=cv2.imread(os.path.join(path,file))
angle=random.randint(15,30)
img=a.hflip(img)
cv2.imwrite(os.path.join(write,str(j)+file),img)
j+=1
for file in os.listdir(path):
img=cv2.imread(os.path.join(path,file))
angle=random.randint(15,30)
img=a.rotate(img,angle)
img=a.hflip(img)
cv2.imwrite(os.path.join(write,str(j)+file),img)
j+=1
| StarcoderdataPython |
3385335 | <filename>addons/test_mail/tests/test_mail_template.py<gh_stars>0
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
from datetime import datetime, timedelta
from unittest.mock import patch
from odoo.addons.test_mail.tests.common import BaseFunctionalTest, MockEmails, TestRecipients
from odoo.addons.test_mail.tests.common import mail_new_test_user
from odoo.tools import mute_logger, DEFAULT_SERVER_DATETIME_FORMAT
class TestMailTemplate(BaseFunctionalTest, MockEmails, TestRecipients):
def setUp(self):
super(TestMailTemplate, self).setUp()
self.user_employee.write({
'groups_id': [(4, self.env.ref('base.group_partner_manager').id)],
})
self._attachments = [{
'name': '_Test_First',
'datas_fname': 'first.txt',
'datas': base64.b64encode(b'My first attachment'),
'res_model': 'res.partner',
'res_id': self.user_admin.partner_id.id
}, {
'name': '_Test_Second',
'datas_fname': 'second.txt',
'datas': base64.b64encode(b'My second attachment'),
'res_model': 'res.partner',
'res_id': self.user_admin.partner_id.id
}]
self.email_1 = '<EMAIL>'
self.email_2 = '<EMAIL>'
self.email_3 = self.partner_1.email
self.email_template = self.env['mail.template'].create({
'model_id': self.env['ir.model']._get('mail.test.simple').id,
'name': 'Pigs Template',
'subject': '${object.name}',
'body_html': '${object.email_from}',
'user_signature': False,
'attachment_ids': [(0, 0, self._attachments[0]), (0, 0, self._attachments[1])],
'partner_to': '%s,%s' % (self.partner_2.id, self.user_admin.partner_id.id),
'email_to': '%s, %s' % (self.email_1, self.email_2),
'email_cc': '%s' % self.email_3})
# admin should receive emails
self.user_admin.write({'notification_type': 'email'})
@mute_logger('odoo.addons.mail.models.mail_mail')
def test_composer_w_template(self):
composer = self.env['mail.compose.message'].sudo(self.user_employee).with_context({
'default_composition_mode': 'comment',
'default_model': 'mail.test.simple',
'default_res_id': self.test_record.id,
'default_template_id': self.email_template.id,
}).create({'subject': 'Forget me subject', 'body': 'Dummy body'})
# perform onchange and send emails
values = composer.onchange_template_id(self.email_template.id, 'comment', self.test_record._name, self.test_record.id)['value']
composer.write(values)
composer.send_mail()
new_partners = self.env['res.partner'].search([('email', 'in', [self.email_1, self.email_2])])
self.assertEmails(
self.user_employee.partner_id,
[[self.partner_1], [self.partner_2], [new_partners[0]], [new_partners[1]], [self.partner_admin]],
subject=self.test_record.name,
body_content=self.test_record.email_from,
attachments=[('first.txt', b'My first attachment', 'text/plain'), ('second.txt', b'My second attachment', 'text/plain')])
def test_composer_template_onchange_attachments(self):
"""Tests that all attachments are added to the composer,
static attachments are not duplicated and while reports are re-generated,
and that intermediary attachments are dropped."""
composer = self.env['mail.compose.message'].with_context(default_attachment_ids=[]).create({})
report_template = self.env.ref('web.action_report_externalpreview')
template_1 = self.email_template.copy({
'report_template': report_template.id,
})
template_2 = self.email_template.copy({
'attachment_ids': False,
'report_template': report_template.id,
})
onchange_templates = [template_1, template_2, template_1, False]
attachments_onchange = [composer.attachment_ids]
# template_1 has two static attachments and one dynamically generated report,
# template_2 only has the report, so we should get 3, 1, 3 attachments
attachment_numbers = [0, 3, 1, 3, 0]
with self.env.do_in_onchange():
for template in onchange_templates:
onchange = composer.onchange_template_id(
template.id if template else False, 'comment', self.test_record._name, self.test_record.id
)
values = composer._convert_to_record(composer._convert_to_cache(onchange['value']))
attachments_onchange.append(values['attachment_ids'])
composer.update(onchange['value'])
self.assertEqual(
[len(attachments) for attachments in attachments_onchange],
attachment_numbers,
)
self.assertTrue(
len(attachments_onchange[1] & attachments_onchange[3]) == 2,
"The two static attachments on the template should be common to the two onchanges"
)
@mute_logger('odoo.addons.mail.models.mail_mail')
def test_post_post_w_template(self):
self.test_record.sudo(self.user_employee).message_post_with_template(self.email_template.id, composition_mode='comment')
new_partners = self.env['res.partner'].search([('email', 'in', [self.email_1, self.email_2])])
self.assertEmails(
self.user_employee.partner_id,
[[self.partner_1], [self.partner_2], [new_partners[0]], [new_partners[1]], [self.partner_admin]],
subject=self.test_record.name,
body_content=self.test_record.email_from,
attachments=[('first.txt', b'My first attachment', 'text/plain'), ('second.txt', b'My second attachment', 'text/plain')])
@mute_logger('odoo.addons.mail.models.mail_mail')
def test_composer_w_template_mass_mailing(self):
test_record_2 = self.env['mail.test.simple'].with_context(BaseFunctionalTest._test_context).create({'name': 'Test2', 'email_from': '<EMAIL>'})
composer = self.env['mail.compose.message'].sudo(self.user_employee).with_context({
'default_composition_mode': 'mass_mail',
# 'default_notify': True,
'default_notify': False,
'default_model': 'mail.test.simple',
'default_res_id': self.test_record.id,
'default_template_id': self.email_template.id,
'active_ids': [self.test_record.id, test_record_2.id]
}).create({})
values = composer.onchange_template_id(self.email_template.id, 'mass_mail', 'mail.test.simple', self.test_record.id)['value']
composer.write(values)
composer.send_mail()
new_partners = self.env['res.partner'].search([('email', 'in', [self.email_1, self.email_2])])
# hack to use assertEmails
self._mails_record1 = [dict(mail) for mail in self._mails if '%s-%s' % (self.test_record.id, self.test_record._name) in mail['message_id']]
self._mails_record2 = [dict(mail) for mail in self._mails if '%s-%s' % (test_record_2.id, test_record_2._name) in mail['message_id']]
self._mails = self._mails_record1
self.assertEmails(
self.user_employee.partner_id,
[[self.partner_1], [self.partner_2], [new_partners[0]], [new_partners[1]], [self.partner_admin]],
subject=self.test_record.name,
body_content=self.test_record.email_from,
attachments=[('first.txt', b'My first attachment', 'text/plain'), ('second.txt', b'My second attachment', 'text/plain')])
self._mails = self._mails_record2
self.assertEmails(
self.user_employee.partner_id,
[[self.partner_1], [self.partner_2], [new_partners[0]], [new_partners[1]], [self.partner_admin]],
subject=test_record_2.name,
body_content=test_record_2.email_from,
attachments=[('first.txt', b'My first attachment', 'text/plain'), ('second.txt', b'My second attachment', 'text/plain')])
message_1 = self.test_record.message_ids[0]
message_2 = test_record_2.message_ids[0]
# messages effectively posted
self.assertEqual(message_1.subject, self.test_record.name)
self.assertEqual(message_2.subject, test_record_2.name)
self.assertIn(self.test_record.email_from, message_1.body)
self.assertIn(test_record_2.email_from, message_2.body)
def test_composer_template_save(self):
self.env['mail.compose.message'].with_context({
'default_composition_mode': 'comment',
'default_model': 'mail.test.simple',
'default_res_id': self.test_record.id,
}).create({
'subject': 'Forget me subject',
'body': '<p>Dummy body</p>'
}).save_as_template()
# Test: email_template subject, body_html, model
last_template = self.env['mail.template'].search([('model', '=', 'mail.test.simple'), ('subject', '=', 'Forget me subject')], limit=1)
self.assertEqual(last_template.body_html, '<p>Dummy body</p>', 'email_template incorrect body_html')
@mute_logger('odoo.addons.mail.models.mail_mail')
def test_template_send_email(self):
mail_id = self.email_template.send_mail(self.test_record.id)
mail = self.env['mail.mail'].browse(mail_id)
self.assertEqual(mail.subject, self.test_record.name)
self.assertEqual(mail.email_to, self.email_template.email_to)
self.assertEqual(mail.email_cc, self.email_template.email_cc)
self.assertEqual(mail.recipient_ids, self.partner_2 | self.user_admin.partner_id)
def test_template_add_context_action(self):
self.email_template.create_action()
# check template act_window has been updated
self.assertTrue(bool(self.email_template.ref_ir_act_window))
# check those records
action = self.email_template.ref_ir_act_window
self.assertEqual(action.name, 'Send Mail (%s)' % self.email_template.name)
self.assertEqual(action.binding_model_id.model, 'mail.test.simple')
# def test_template_scheduled_date(self):
# from unittest.mock import patch
# self.email_template_in_2_days = self.email_template.copy()
# with patch('odoo.addons.mail.tests.test_mail_template.datetime', wraps=datetime) as mock_datetime:
# mock_datetime.now.return_value = datetime(2017, 11, 15, 11, 30, 28)
# mock_datetime.side_effect = lambda *args, **kw: datetime(*args, **kw)
# self.email_template_in_2_days.write({
# 'scheduled_date': "${(datetime.datetime.now() + relativedelta(days=2)).strftime('%s')}" % DEFAULT_SERVER_DATETIME_FORMAT,
# })
# mail_now_id = self.email_template.send_mail(self.test_record.id)
# mail_in_2_days_id = self.email_template_in_2_days.send_mail(self.test_record.id)
# mail_now = self.env['mail.mail'].browse(mail_now_id)
# mail_in_2_days = self.env['mail.mail'].browse(mail_in_2_days_id)
# # mail preparation
# self.assertEqual(mail_now.exists() | mail_in_2_days.exists(), mail_now | mail_in_2_days)
# self.assertEqual(bool(mail_now.scheduled_date), False)
# self.assertEqual(mail_now.state, 'outgoing')
# self.assertEqual(mail_in_2_days.state, 'outgoing')
# scheduled_date = datetime.strptime(mail_in_2_days.scheduled_date, DEFAULT_SERVER_DATETIME_FORMAT)
# date_in_2_days = datetime.now() + timedelta(days = 2)
# self.assertEqual(scheduled_date, date_in_2_days)
# # self.assertEqual(scheduled_date.month, date_in_2_days.month)
# # self.assertEqual(scheduled_date.year, date_in_2_days.year)
# # Launch the scheduler on the first mail, it should be reported in self.mails
# # and the mail_mail is now deleted
# self.env['mail.mail'].process_email_queue()
# self.assertEqual(mail_now.exists() | mail_in_2_days.exists(), mail_in_2_days)
# # Launch the scheduler on the first mail, it's still in 'outgoing' state
# self.env['mail.mail'].process_email_queue(ids=[mail_in_2_days.id])
# self.assertEqual(mail_in_2_days.state, 'outgoing')
# self.assertEqual(mail_now.exists() | mail_in_2_days.exists(), mail_in_2_days)
def test_create_partner_from_tracking_multicompany(self):
company1 = self.env['res.company'].create({'name': 'company1'})
self.env.user.write({'company_ids': [(4, company1.id, False)]})
self.assertNotEqual(self.env.user.company_id, company1)
email_new_partner = "<EMAIL>"
Partner = self.env['res.partner']
self.assertFalse(Partner.search([('email', '=', email_new_partner)]))
template = self.env['mail.template'].create({
'model_id': self.env['ir.model']._get('mail.test.track').id,
'name': 'AutoTemplate',
'subject': 'autoresponse',
'email_from': self.env.user.email_formatted,
'email_to': "${object.email_from}",
'body_html': "<div>A nice body</div>",
})
def patched_message_track_post_template(*args, **kwargs):
args[0].message_post_with_template(template.id)
return True
with patch('odoo.addons.mail.models.mail_thread.MailThread._message_track_post_template', patched_message_track_post_template):
self.env['mail.test.track'].create({
'email_from': email_new_partner,
'company_id': company1.id,
'user_id': self.env.user.id, # trigger tracking,
})
new_partner = Partner.search([('email', '=', email_new_partner)])
self.assertTrue(new_partner)
self.assertEqual(new_partner.company_id, company1)
def test_composer_template_onchange_attachments(self):
"""Tests that all attachments are added to the composer,
static attachments are not duplicated and while reports are re-generated,
and that intermediary attachments are dropped."""
composer = self.env['mail.compose.message'].with_context(default_attachment_ids=[]).create({})
report_template = self.env.ref('web.action_report_externalpreview')
template_1 = self.email_template.copy({
'report_template': report_template.id,
})
template_2 = self.email_template.copy({
'attachment_ids': False,
'report_template': report_template.id,
})
onchange_templates = [template_1, template_2, template_1, False]
attachments_onchange = [composer.attachment_ids]
# template_1 has two static attachments and one dynamically generated report,
# template_2 only has the report, so we should get 3, 1, 3 attachments
# and when there is no template, no attachments
attachment_numbers = [0, 3, 1, 3, 0]
with self.env.do_in_onchange():
for template in onchange_templates:
onchange = composer.onchange_template_id(
template.id if template else False, 'comment', 'mail.test.simple', self.test_record.id
)
values = composer._convert_to_record(composer._convert_to_cache(onchange['value']))
attachments_onchange.append(values['attachment_ids'])
composer.update(onchange['value'])
self.assertEqual(
[len(attachments) for attachments in attachments_onchange],
attachment_numbers,
)
self.assertTrue(
len(attachments_onchange[1] & attachments_onchange[3]) == 2,
"The two static attachments on the template should be common to the two onchanges"
)
| StarcoderdataPython |
5136598 | <reponame>yun-mh/uniwalk
# Generated by Django 2.2.5 on 2020-01-14 01:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0015_template'),
]
operations = [
migrations.AlterField(
model_name='template',
name='midsole_left',
field=models.FileField(blank=True, null=True, unique=True, upload_to='product_templates'),
),
migrations.AlterField(
model_name='template',
name='midsole_right',
field=models.FileField(blank=True, null=True, unique=True, upload_to='product_templates'),
),
migrations.AlterField(
model_name='template',
name='outsole_left',
field=models.FileField(blank=True, null=True, unique=True, upload_to='product_templates'),
),
migrations.AlterField(
model_name='template',
name='outsole_right',
field=models.FileField(blank=True, null=True, unique=True, upload_to='product_templates'),
),
migrations.AlterField(
model_name='template',
name='shoelace_left',
field=models.FileField(blank=True, null=True, unique=True, upload_to='product_templates'),
),
migrations.AlterField(
model_name='template',
name='shoelace_right',
field=models.FileField(blank=True, null=True, unique=True, upload_to='product_templates'),
),
migrations.AlterField(
model_name='template',
name='tongue_left',
field=models.FileField(blank=True, null=True, unique=True, upload_to='product_templates'),
),
migrations.AlterField(
model_name='template',
name='tongue_right',
field=models.FileField(blank=True, null=True, unique=True, upload_to='product_templates'),
),
migrations.AlterField(
model_name='template',
name='uppersole_left',
field=models.FileField(blank=True, null=True, unique=True, upload_to='product_templates'),
),
migrations.AlterField(
model_name='template',
name='uppersole_right',
field=models.FileField(blank=True, null=True, unique=True, upload_to='product_templates'),
),
]
| StarcoderdataPython |
5035004 | from unittest import TestCase
from neuropower import BUM
import numpy as np
class TestBUM(TestCase):
def test_fpLL(self):
np.random.seed(seed=100)
testpeaks = np.vstack((np.random.uniform(0,1,10),np.random.uniform(0,0.2,10))).flatten()
x = np.sum(BUM.fpLL([0.5,0.5],testpeaks))
self.assertEqual(np.around(x,decimals=2),9.57)
def test_fbumnLL(self):
np.random.seed(seed=100)
testpeaks = np.vstack((np.random.uniform(0,1,10),np.random.uniform(0,0.2,10))).flatten()
x = BUM.fbumnLL([0.5,0.5],testpeaks)
self.assertEqual(np.around(x,decimals=2)[0],-4.42)
def test_fbumnLL(self):
np.random.seed(seed=100)
testpeaks = np.vstack((np.random.uniform(0,1,10),np.random.uniform(0,0.2,10))).flatten()
x = BUM.bumOptim(testpeaks,starts=1,seed=100)
self.assertEqual(np.around(x['pi1'],decimals=2),0.29)
| StarcoderdataPython |
4894957 | import pickle
import numpy as np
from collections import deque
from format_data import format_data
def bfs(adj_dict, group_list, source_group, depth_limit):
observed_group = {source_group}
depth = 0
queue = deque([(source_group,depth)])
while queue and queue[0][1] < depth_limit:
group_parent, depth = queue.popleft()
print(f"Group {group_parent}, depth {depth}")
for node in group_list[group_parent]:
for group_child in adj_dict[node]:
if group_child not in observed_group:
observed_group.add(group_child)
queue.append((group_child,depth +1))
return observed_group
datafile = './socio_data/form_coauth-dblp.pk'
new_datafile = './socio_data/form_coauth-dblp_sub.pk'
seed = 42
depth_limit = 3
np.random.seed(seed)
with open(datafile,'rb') as filename:
data = pickle.load(filename)
group_list = data['group_list']
adj_dict = data['adj_dict']
#bfs subsample
source_group = np.random.randint(0,len(group_list))
observed_group = bfs(adj_dict, group_list, source_group, depth_limit)
print("Number of groups : ",len(observed_group))
subgroup_list = [group_list[group] for group in observed_group]
results = format_data(subgroup_list)
with open(new_datafile, 'wb') as output:
pickle.dump(results,output)
| StarcoderdataPython |
3314473 | class Node:
def __init__(self, value):
self.prev = None
self.value = value
self.next = None
class LinkedList:
def __init__(self):
self.head = None
self.tail = None
def append(self, value):
new_node = Node(value)
if self.head == None:
self.head = new_node
self.tail = self.head
self.length = 1
else:
self.tail.next = new_node
new_node.prev = self.tail
self.tail = new_node
self.length += 1
def prepend(self, value):
new_node = Node(value)
new_node.next = self.head
self.head.prev = new_node
self.head = new_node
self.length += 1
def insert(self, index, value):
new_node = Node(value)
tmp = self.head
holding_pointer = None
if index >= self.length:
self.append(value)
return
elif index == 0:
self.prepend(value)
return
for i in range(index):
holding_pointer = tmp
tmp = tmp.next
holding_pointer.next = new_node
new_node.prev = holding_pointer
new_node.next = tmp
tmp.prev = new_node
self.length += 1
def remove(self, index):
tmp = self.head
holding_pointer = None
if index >= self.length-1:
for i in range(self.length-1):
holding_pointer = tmp
tmp = tmp.next
holding_pointer.next = None
self.tail = holding_pointer
self.length -= 1
elif index == 0:
self.head = self.head.next
self.head.prev = None
self.length -= 1
return
else:
for i in range(index):
holding_pointer = tmp
tmp = tmp.next
tmp.next.prev = holding_pointer
holding_pointer.next = tmp.next
self.length -= 1
def reverse(self):
tmp = self.display()
tmp_reversed = [0] * len(tmp)
# Reset pointers
self.head = None
self.tail = None
for i in range(len(tmp)):
tmp_reversed[i] = tmp[int("-" + str(i+1))]
for i in tmp_reversed:
self.append(i)
def display(self):
tmp, lst = self.head, []
while tmp is not None:
lst.append(tmp.value)
tmp = tmp.next
return lst
l = LinkedList()
l.append(10)
l.append(5)
l.append(16)
l.prepend(1)
l.insert(2, 99)
l.insert(44, 51)
l.remove(4)
l.reverse()
print(l.display())
print(l.head.value, l.tail.value)
print(l.length)
| StarcoderdataPython |
4809185 | <filename>test/testp4svn_actions.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Test cases of perforce to svn replication
'''
import os
import unittest
import tempfile
from testcommon import get_p4d_from_docker
from testcommon_p4svn import replicate_P4SvnReplicate, verify_replication
from lib.buildlogger import getLogger
from lib.buildcommon import generate_random_str
import testp4svn_samples
logger = getLogger(__name__)
class P4SvnActionRepTest(testp4svn_samples.P4SvnReplicationTest):
def p4svn_action_remove_setup_env(self, depot_dir, action, **kwargs):
levels_of_dir = kwargs.get('levels_of_dir', 0)
place_holder_file = kwargs.get('place_holder_file', -1)
src_docker_cli = self.docker_p4d_clients['p4d_0']
with get_p4d_from_docker(src_docker_cli, depot_dir) as p4:
clientspec = p4.fetch_client(p4.client)
ws_root = clientspec._root
project_dir = 'a_dir'
project_dir = tempfile.mkdtemp(prefix=project_dir, dir=ws_root)
# add a file in project dir
proj_file_path = os.path.join(project_dir, 'project_file')
description = 'add a file with file name %s' % proj_file_path
with open(proj_file_path, 'wt') as f:
f.write('My name is %s!\n' % proj_file_path)
p4.run_add('-f', proj_file_path)
p4.run_submit('-d', description)
# make a directory and add files in it
file_names = ['a_file.txt', 'another_file.txt',
'yet_another_file.txt']
test_dir = tempfile.mkdtemp(dir=project_dir)
for i in range(levels_of_dir):
test_dir = tempfile.mkdtemp(dir=test_dir, prefix='%s_' % i)
if place_holder_file == i:
file_path = os.path.join(test_dir, 'place_holder')
with open(file_path, 'wt') as f:
f.write('prevent deletion of dir!\n')
p4.run_add('-f', file_path)
p4.run_submit('-d', 'add a file to prevent deletion of dir')
# add the files
for fn in file_names:
file_path = os.path.join(test_dir, fn)
description = 'add a file with file name %s' % fn
with open(file_path, 'wt') as f:
f.write('My name is %s!\n' % fn)
p4.run_add('-f', file_path)
p4.run_submit('-d', description)
if action == 'remove_one_by_one':
# remove all files one by one
for fn in file_names:
file_path = os.path.join(test_dir, fn)
description = 'remove %s' % fn
p4.run_delete(file_path)
p4.run_submit('-d', description)
elif action == 'remove_all_in_one_change':
# remove all files all together
description = ''
for fn in file_names:
file_path = os.path.join(test_dir, fn)
description += 'remove %s\n' % fn
p4.run_delete(file_path)
p4.run_submit('-d', description)
elif action in ['remove_all_add_one',
'remove_all_add_one_in_parent']:
# 1) remove_all_add_one
# remove all files all together but add a new file in
# the same directory, no directory should be deleted
# 2) remove_all_add_on_in_parent
# remove all files all together and add a new file in
# the parent directory, current directory should be deleted
description = ''
for fn in file_names:
file_path = os.path.join(test_dir, fn)
description += 'remove %s\n' % fn
p4.run_delete(file_path)
file_path = os.path.join(test_dir, 'fantastic_additional')
if action == 'remove_all_add_one_in_parent':
test_dir_parent = os.path.split(test_dir)[0]
file_path = os.path.join(test_dir_parent, 'fantastic_additional')
description = 'add a file with file name %s' % fn
with open(file_path, 'wt') as f:
f.write('My name is %s!\n' % fn)
p4.run_add('-f', file_path)
p4.run_submit('-d', description)
else:
logger.error('"%s" not yet implemented' % action)
p4.run_edit(proj_file_path)
with open(proj_file_path, 'a') as f:
f.write('My name is %s!\n' % proj_file_path)
p4.run_submit('-d', 'editing %s' % proj_file_path)
def test_p4svn_action_remove_empty_dir_one_by_one(self):
'''test that directory should be removed automatically if all files in
it are removed one by one.
'''
test_case = 'p4svn_action_remove_empty_dir_one_by_one'
depot_dir = '/depot/%s' % test_case
src_docker_cli = self.docker_p4d_clients['p4d_0']
dst_docker_cli = self.docker_svn_clients['svn_0']
self.p4svn_action_remove_setup_env(depot_dir, action='remove_one_by_one')
self.replicate_sample_dir_withdocker(depot_dir)
logger.passed(test_case)
def test_p4svn_action_remove_empty_dir_all_files_in_one_change(self):
'''test that directory should be removed automatically if all files in
it are removed in one change.
'''
test_case = 'p4svn_action_remove_empty_dir_all_files_in_one_change'
depot_dir = '/depot/%s' % test_case
src_docker_cli = self.docker_p4d_clients['p4d_0']
dst_docker_cli = self.docker_svn_clients['svn_0']
self.p4svn_action_remove_setup_env(depot_dir,
action='remove_all_in_one_change')
self.replicate_sample_dir_withdocker(depot_dir)
logger.passed(test_case)
def test_p4svn_action_remove_empty_dir_all_files_in_one_change_multi_levels(self):
'''test that directories should be removed recursivly if files in
them are removed.
'''
test_case = 'p4svn_action_remove_empty_dir_all_files_in_one_change_multi_levels'
depot_dir = '/depot/%s' % test_case
src_docker_cli = self.docker_p4d_clients['p4d_0']
dst_docker_cli = self.docker_svn_clients['svn_0']
self.p4svn_action_remove_setup_env(depot_dir,
action='remove_all_in_one_change',
levels_of_dir=2)
self.replicate_sample_dir_withdocker(depot_dir)
logger.passed(test_case)
def test_p4svn_action_remove_empty_dir_one_by_one_multi_levels(self):
'''test that directories should be removed recursivly if files in
them are removed.
'''
test_case = 'p4svn_action_remove_empty_dir_one_by_one_multi_levels'
depot_dir = '/depot/%s' % test_case
src_docker_cli = self.docker_p4d_clients['p4d_0']
dst_docker_cli = self.docker_svn_clients['svn_0']
self.p4svn_action_remove_setup_env(depot_dir,
action='remove_one_by_one',
levels_of_dir=2)
self.replicate_sample_dir_withdocker(depot_dir)
logger.passed(test_case)
def test_p4svn_action_remove_empty_dir_one_by_one_multi_levels_place_holder(self):
'''test that directory should not be removed automatically if some
file in it is still there.
'''
test_case = 'p4svn_action_remove_empty_dir_one_by_one_multi_levels_place_holder'
depot_dir = '/depot/%s' % test_case
src_docker_cli = self.docker_p4d_clients['p4d_0']
dst_docker_cli = self.docker_svn_clients['svn_0']
self.p4svn_action_remove_setup_env(depot_dir,
action='remove_one_by_one',
levels_of_dir=4,
place_holder_file=1)
self.replicate_sample_dir_withdocker(depot_dir)
logger.passed(test_case)
def test_p4svn_action_remove_empty_dir_remove_all_add_one(self):
'''test that directory should not be removed automatically if new
file is added to it.
'''
test_case = 'p4svn_action_remove_empty_dir_remove_all_add_one'
depot_dir = '/depot/%s' % test_case
src_docker_cli = self.docker_p4d_clients['p4d_0']
dst_docker_cli = self.docker_svn_clients['svn_0']
self.p4svn_action_remove_setup_env(depot_dir,
action='remove_all_add_one',
levels_of_dir=4)
self.replicate_sample_dir_withdocker(depot_dir)
logger.passed(test_case)
def test_p4svn_action_remove_empty_dir_remove_all_add_one_in_parent(self):
'''test that directory should not be removed automatically if new
file is added to it.
'''
test_case = 'p4svn_action_remove_empty_dir_remove_all_add_one_in_parent'
depot_dir = '/depot/%s' % test_case
src_docker_cli = self.docker_p4d_clients['p4d_0']
dst_docker_cli = self.docker_svn_clients['svn_0']
self.p4svn_action_remove_setup_env(depot_dir,
action='remove_all_add_one_in_parent',
levels_of_dir=4)
self.replicate_sample_dir_withdocker(depot_dir)
logger.passed(test_case)
def test_p4_action_rep_special_commitmsgs(self):
commitmsgs = {'utf-8':u'I think, therefore I am.',
'cp1251':u'мыслю, следовательно существую.',
'gb2312':u'我思故我在.',
'latin1':u'La Santé',}
pass
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
11365024 | import time
import unittest
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusData
from hydrus.core import HydrusExceptions
from hydrus.core import HydrusGlobals as HG
from hydrus.core.networking import HydrusNetwork
from hydrus.core.networking import HydrusNetworking
from hydrus.server import ServerDB
from hydrus.test import TestController
class TestServerDB( unittest.TestCase ):
def _read( self, action, *args, **kwargs ): return TestServerDB._db.Read( action, *args, **kwargs )
def _write( self, action, *args, **kwargs ): return TestServerDB._db.Write( action, True, *args, **kwargs )
@classmethod
def setUpClass( cls ):
cls._db = ServerDB.DB( HG.test_controller, TestController.DB_DIR, 'server' )
@classmethod
def tearDownClass( cls ):
cls._db.Shutdown()
while not cls._db.LoopIsFinished():
time.sleep( 0.1 )
del cls._db
def _test_account_creation( self ):
result = sorted( self._read( 'account_types', self._tag_service_key, self._tag_service_account ), key = lambda at: at.GetTitle() )
( self._tag_service_admin_account_type, self._null_account_type ) = result
self.assertEqual( self._tag_service_admin_account_type.GetTitle(), 'administrator' )
self.assertEqual( self._null_account_type.GetTitle(), 'null account' )
#
self._regular_user_account_type = HydrusNetwork.AccountType.GenerateNewAccountType( 'regular user', { HC.CONTENT_TYPE_MAPPINGS : HC.PERMISSION_ACTION_CREATE }, HydrusNetworking.BandwidthRules() )
self._deletee_user_account_type = HydrusNetwork.AccountType.GenerateNewAccountType( 'deletee user', {}, HydrusNetworking.BandwidthRules() )
new_account_types = [ self._tag_service_admin_account_type, self._null_account_type, self._regular_user_account_type, self._deletee_user_account_type ]
#
self._write( 'account_types', self._tag_service_key, self._tag_service_account, new_account_types, {} )
edited_account_types = self._read( 'account_types', self._tag_service_key, self._tag_service_account )
self.assertEqual(
{ at.GetAccountTypeKey() for at in edited_account_types },
{ at.GetAccountTypeKey() for at in ( self._tag_service_admin_account_type, self._null_account_type, self._regular_user_account_type, self._deletee_user_account_type ) }
)
#
r_keys = self._read( 'registration_keys', self._tag_service_key, self._tag_service_account, 5, self._deletee_user_account_type.GetAccountTypeKey(), HydrusData.GetNow() + 86400 * 365 )
access_keys = [ self._read( 'access_key', self._tag_service_key, r_key ) for r_key in r_keys ]
account_keys = [ self._read( 'account_key_from_access_key', self._tag_service_key, access_key ) for access_key in access_keys ]
accounts = [ self._read( 'account', self._tag_service_key, account_key ) for account_key in account_keys ]
for account in accounts:
self.assertEqual( account.GetAccountType().GetAccountTypeKey(), self._deletee_user_account_type.GetAccountTypeKey() )
#
deletee_account_type_keys_to_replacement_account_type_keys = { self._deletee_user_account_type.GetAccountTypeKey() : self._regular_user_account_type.GetAccountTypeKey() }
new_account_types = [ self._tag_service_admin_account_type, self._null_account_type, self._regular_user_account_type ]
self._write( 'account_types', self._tag_service_key, self._tag_service_account, new_account_types, deletee_account_type_keys_to_replacement_account_type_keys )
accounts = [ self._read( 'account', self._tag_service_key, account_key ) for account_key in account_keys ]
self._tag_service_regular_account = accounts[0]
for account in accounts:
self.assertEqual( account.GetAccountType().GetAccountTypeKey(), self._regular_user_account_type.GetAccountTypeKey() )
#
r_keys = self._read( 'registration_keys', self._tag_service_key, self._tag_service_account, 5, self._regular_user_account_type.GetAccountTypeKey(), HydrusData.GetNow() + 86400 * 365 )
self.assertEqual( len( r_keys ), 5 )
for r_key in r_keys: self.assertEqual( len( r_key ), 32 )
r_key = r_keys[0]
access_key = self._read( 'access_key', self._tag_service_key, r_key )
access_key_2 = self._read( 'access_key', self._tag_service_key, r_key )
self.assertNotEqual( access_key, access_key_2 )
with self.assertRaises( HydrusExceptions.InsufficientCredentialsException ):
# this access key has been replaced
self._read( 'account_key_from_access_key', self._tag_service_key, access_key )
account_key = self._read( 'account_key_from_access_key', self._tag_service_key, access_key_2 )
with self.assertRaises( HydrusExceptions.InsufficientCredentialsException ):
# this registration token has been deleted
self._read( 'access_key', self._tag_service_key, r_key )
def _test_account_modification( self ):
regular_account_key = self._tag_service_regular_account.GetAccountKey()
account = self._read( 'account', self._tag_service_key, regular_account_key )
self.assertEqual( account.GetAccountType().GetAccountTypeKey(), self._regular_user_account_type.GetAccountTypeKey() )
self._write( 'modify_account_account_type', self._tag_service_key, self._tag_service_account, regular_account_key, self._tag_service_admin_account_type.GetAccountTypeKey() )
account = self._read( 'account', self._tag_service_key, regular_account_key )
self.assertEqual( account.GetAccountType().GetAccountTypeKey(), self._tag_service_admin_account_type.GetAccountTypeKey() )
self._write( 'modify_account_account_type', self._tag_service_key, self._tag_service_account, regular_account_key, self._regular_user_account_type.GetAccountTypeKey() )
account = self._read( 'account', self._tag_service_key, regular_account_key )
self.assertEqual( account.GetAccountType().GetAccountTypeKey(), self._regular_user_account_type.GetAccountTypeKey() )
#
account = self._read( 'account', self._tag_service_key, regular_account_key )
self.assertFalse( account.IsBanned() )
ban_reason = 'oh no no no'
self._write( 'modify_account_ban', self._tag_service_key, self._tag_service_account, regular_account_key, ban_reason, None )
account = self._read( 'account', self._tag_service_key, regular_account_key )
self.assertTrue( account.IsBanned() )
( reason, created, expires ) = account.GetBannedInfo()
self.assertEqual( reason, ban_reason )
self.assertTrue( HydrusData.GetNow() - 5 < created < HydrusData.GetNow() + 5 )
self.assertEqual( expires, None )
ban_reason = 'just having a giggle m8'
ban_expires = HydrusData.GetNow() + 86400
self._write( 'modify_account_ban', self._tag_service_key, self._tag_service_account, regular_account_key, ban_reason, ban_expires )
account = self._read( 'account', self._tag_service_key, regular_account_key )
self.assertTrue( account.IsBanned() )
( reason, created, expires ) = account.GetBannedInfo()
self.assertEqual( reason, ban_reason )
self.assertTrue( HydrusData.GetNow() - 5 < created < HydrusData.GetNow() + 5 )
self.assertEqual( expires, ban_expires )
self._write( 'modify_account_unban', self._tag_service_key, self._tag_service_account, regular_account_key )
account = self._read( 'account', self._tag_service_key, regular_account_key )
self.assertFalse( account.IsBanned() )
#
set_expires = HydrusData.GetNow() - 5
self._write( 'modify_account_expires', self._tag_service_key, self._tag_service_account, regular_account_key, set_expires )
account = self._read( 'account', self._tag_service_key, regular_account_key )
self.assertTrue( account.IsExpired() )
self.assertEqual( set_expires, account.GetExpires() )
set_expires = HydrusData.GetNow() + 86400
self._write( 'modify_account_expires', self._tag_service_key, self._tag_service_account, regular_account_key, set_expires )
account = self._read( 'account', self._tag_service_key, regular_account_key )
self.assertFalse( account.IsExpired() )
self.assertEqual( set_expires, account.GetExpires() )
set_expires = None
self._write( 'modify_account_expires', self._tag_service_key, self._tag_service_account, regular_account_key, set_expires )
account = self._read( 'account', self._tag_service_key, regular_account_key )
self.assertFalse( account.IsExpired() )
self.assertEqual( set_expires, account.GetExpires() )
#
set_message = 'hello'
self._write( 'modify_account_set_message', self._tag_service_key, self._tag_service_account, regular_account_key, set_message )
account = self._read( 'account', self._tag_service_key, regular_account_key )
( message, created ) = account.GetMessageAndTimestamp()
self.assertEqual( message, set_message )
set_message = ''
self._write( 'modify_account_set_message', self._tag_service_key, self._tag_service_account, regular_account_key, set_message )
account = self._read( 'account', self._tag_service_key, regular_account_key )
( message, created ) = account.GetMessageAndTimestamp()
self.assertEqual( message, set_message )
def _test_content_creation( self ):
tag = 'character:samus aran'
hash = HydrusData.GenerateKey()
mappings_content = HydrusNetwork.Content( HC.CONTENT_TYPE_MAPPINGS, ( tag, ( hash, ) ) )
mapping_content = HydrusNetwork.Content( HC.CONTENT_TYPE_MAPPING, ( tag, hash ) )
client_to_server_update = HydrusNetwork.ClientToServerUpdate()
client_to_server_update.AddContent( HC.CONTENT_UPDATE_PEND, mappings_content )
self._write( 'update', self._tag_service_key, self._tag_service_regular_account, client_to_server_update, HydrusData.GetNow() )
# can extend this to generate and fetch an actual update given a timespan
#
result = self._read( 'account_from_content', self._tag_service_key, mapping_content )
self.assertEqual( result.GetAccountKey(), self._tag_service_regular_account.GetAccountKey() )
def _test_init_server_admin( self ):
result = self._read( 'access_key', HC.SERVER_ADMIN_KEY, b'init' )
self.assertEqual( type( result ), bytes )
self.assertEqual( len( result ), 32 )
self._admin_access_key = result
#
result = self._read( 'account_key_from_access_key', HC.SERVER_ADMIN_KEY, self._admin_access_key )
self.assertEqual( type( result ), bytes )
self.assertEqual( len( result ), 32 )
self._admin_account_key = result
#
result = self._read( 'account', HC.SERVER_ADMIN_KEY, self._admin_account_key )
self.assertEqual( type( result ), HydrusNetwork.Account )
self.assertEqual( result.GetAccountKey(), self._admin_account_key )
self._admin_account = result
def _test_service_creation( self ):
self._tag_service_key = HydrusData.GenerateKey()
self._file_service_key = HydrusData.GenerateKey()
current_services = self._read( 'services' )
self._tag_service = HydrusNetwork.GenerateService( self._tag_service_key, HC.TAG_REPOSITORY, 'tag repo', 100 )
self._file_service = HydrusNetwork.GenerateService( self._file_service_key, HC.FILE_REPOSITORY, 'file repo', 101 )
new_services = list( current_services )
new_services.append( self._tag_service )
new_services.append( self._file_service )
service_keys_to_access_keys = self._write( 'services', self._admin_account, new_services )
self.assertEqual( set( service_keys_to_access_keys.keys() ), { self._tag_service_key, self._file_service_key } )
self._tag_service_access_key = service_keys_to_access_keys[ self._tag_service_key ]
self._file_service_access_key = service_keys_to_access_keys[ self._file_service_key ]
self._tag_service_account_key = self._read( 'account_key_from_access_key', self._tag_service_key, self._tag_service_access_key )
self._file_service_account_key = self._read( 'account_key_from_access_key', self._file_service_key, self._file_service_access_key )
self._tag_service_account = self._read( 'account', self._tag_service_key, self._tag_service_account_key )
self._file_service_account = self._read( 'account', self._file_service_key, self._file_service_account_key )
self.assertEqual( self._tag_service_account.GetAccountKey(), self._tag_service_account_key )
self.assertEqual( self._file_service_account.GetAccountKey(), self._file_service_account_key )
def test_server( self ):
self._test_init_server_admin()
self._test_service_creation()
self._test_account_creation()
self._test_content_creation()
self._test_account_modification()
| StarcoderdataPython |
9664204 | import dash_bootstrap_components as dbc
from dash import Input, Output, State, html
tooltip = html.Div(
[
dbc.Button(
"Toggle",
id="toggle",
color="success",
className="me-4",
n_clicks=0,
),
dbc.Button("Target", id="target", color="danger", n_clicks=0),
dbc.Tooltip(
"This is a tooltip",
id="tooltip",
is_open=False,
target="target",
trigger=None,
),
]
)
@app.callback(
Output("tooltip", "is_open"),
[Input("toggle", "n_clicks")],
[State("tooltip", "is_open")],
)
def toggle_tooltip(n, is_open):
if n:
return not is_open
return is_open
| StarcoderdataPython |
3470225 | import smtplib
server = smtplib.SMTP('localhost')
addr_from = '<EMAIL>'
addr_to = '<EMAIL>'
server.sendmail(addr_from, addr_to,
"""To: {1}
From: {0}
...
Beware the Ides of March.
日本語も送れるかな?
""".format(addr_from, addr_to))
server.quit()
| StarcoderdataPython |
11212768 | <filename>dashboard/migrations/0002_author_auth_status.py
# Generated by Django 3.1.5 on 2021-04-24 18:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='author',
name='auth_status',
field=models.CharField(max_length=100, null=True),
),
]
| StarcoderdataPython |
1796659 | import os
import sys
from faker import Factory
sys.path.append(os.getcwd())
import api
import user
fake = Factory.create()
MOCK_NEW_USER = {
'name': '<NAME>',
'user_id': '3141592654',
'email': '<EMAIL>'
}
MOCK_SCAN = {
"env": {
"LANG": "en_US.UTF-8",
"AWS_DEFAULT_REGION": "eu-central-1",
"AWS_LAMBDA_FUNCTION_MEMORY_SIZE": "128",
"AWS_LAMBDA_FUNCTION_NAME": "scheduled-serverless-profiler",
"AWS_SECRET_ACCESS_KEY": "<KEY>",
"AWS_LAMBDA_FUNCTION_VERSION": "$LATEST",
"PYTHONPATH": "/var/runtime",
"AWS_LAMBDA_LOG_GROUP_NAME": "/aws/lambda/scheduled-profiler",
"AWS_REGION": "eu-central-1",
"AWS_SESSION_TOKEN": "<PASSWORD>",
"LAMBDA_TASK_ROOT": "/var/task",
"AWS_EXECUTION_ENV": "AWS_Lambda_python2.7",
"AWS_SECURITY_TOKEN": "<PASSWORD>",
"LAMBDA_RUNTIME_DIR": "/var/runtime",
"AWS_LAMBDA_LOG_STREAM_NAME": "2017/03/11/[$LATEST]d5e2ca93",
"AWS_ACCESS_KEY_ID": "<KEY>",
"PATH": "/usr/local/bin:/usr/bin/:/bin"
}
}
def test_object_init():
a = api.APIKey('123567890')
assert a is not None
def test_api_key_location():
u = user.User(MOCK_NEW_USER)
result = u.find_or_create_by()
a = api.APIKey(result['api_key'])
search_operation = a.locate_user()
u.destroy()
assert search_operation['api_key'] is not None
assert search_operation['user_id'] == MOCK_NEW_USER['user_id']
assert search_operation['email'] == MOCK_NEW_USER['email']
assert search_operation['disabled'] is False
def test_profile_object():
u = user.User(MOCK_NEW_USER)
result = u.find_or_create_by()
p = api.Profiler(result['api_key'])
u.destroy()
assert p is not None
assert p.authenticated is True
def test_profile_storage():
u = user.User(MOCK_NEW_USER)
result = u.find_or_create_by()
p = api.Profiler(result['api_key'])
result = p.store_profile(MOCK_SCAN)
u.destroy()
assert p is not None
assert p.authenticated is True
assert result is not None
p.destroy_profile(result)
| StarcoderdataPython |
4943122 | import sys
sensivity = sys.argv;
temp = sensivity[1].split('=')[1]
print({"temp": float(temp) * 0.460646}) | StarcoderdataPython |
391902 | <gh_stars>100-1000
import itertools
import matplotlib
import numpy as np
from scipy.optimize import curve_fit
from scipy import interpolate
from astropy import units
from astropy.io import fits
from astropy.convolution import convolve, Gaussian1DKernel
from matplotlib import pyplot as plt
# Imports for fast_running_median
from collections import deque
from itertools import islice
from bisect import insort, bisect_left
#from pydl.pydlutils import math
#from pydl.pydlutils import bspline
from pypeit.core import pydl
from pypeit import msgs
def quicksave(data,fname):
"""
Save a fits file (quickly) -- overwrite is forced, and no quality control checks
"""
hdu = fits.PrimaryHDU(data)
hdulist = fits.HDUList([hdu])
if os.path.exists(fname):
os.remove(fname)
hdulist.writeto(fname)
return
def bspline_inner_knots(all_knots):
"""Trim to the inner knots. Used in bspline_magfit
Might be useful elsewhere
"""
diff = all_knots - np.roll(all_knots,1)
pos = np.where(diff>0.)[0]
i0=pos[0]
i1=pos[-1]
return all_knots[i0:i1]
def func_fit(x, y, func, deg, x2=None, minx=None, maxx=None, minx2=None, maxx2=None,
w=None, inmask=None, guesses=None, return_errors=False):
"""
Args:
x (`numpy.ndarray`_):
y (`numpy.ndarray`_):
func (:obj:`str`):
polynomial, legendre, chebyshev, gaussian
deg (:obj:`int`):
degree of the fit
x2 (`numpy.ndarray`_, optional):
minx:
maxx:
minx2:
maxx2:
w (`numpy.ndarray`_, optional):
inmask (`numpy.ndarray`_, optional):
guesses:
return_errors:
Returns:
PypeItFit:
"""
if return_errors:
msgs.error("Need to deal with this")
# Init
pcov = None
# If the user provided an inmask apply it. The logic below of evaluating the fit only at the non-masked
# pixels is preferable to the other approach of simply setting the weights to zero. The reason for that is that
# the fits use a least-square optimization approach using matrix algebra, and lots of zero weights are
# 1) more costly, and 2) will not produce exactly the same result (due to roundoff error) as actually
# removing the locations you want to mask.
if inmask is not None:
x_out = x[inmask]
y_out = y[inmask]
if x2 is not None:
x2_out = x2[inmask]
else:
x2_out = None
if w is not None:
w_out = w[inmask]
else:
w_out = None
else:
x_out = x
y_out = y
if x2 is not None:
x2_out = x2
else:
x2_out = None
if w is not None:
w_out = w
else:
w_out = None
# For two-d fits x = x, y = x2, y = z
if ('2d' in func) and (x2_out is not None):
# Is this a 2d fit?
fitc = polyfit2d_general(x_out, x2_out, y_out, deg, w=w_out, function=func[:-2],minx=minx, maxx=maxx, miny=minx2, maxy=maxx2)
elif func == "polynomial":
fitc = np.polynomial.polynomial.polyfit(x_out, y_out, deg, w=w_out)
elif func == "legendre":
if minx is None or maxx is None:
if np.size(x_out) == 1:
xmin, xmax = -1.0, 1.0
else:
xmin, xmax = np.min(x_out), np.max(x_out)
else:
xmin, xmax = minx, maxx
xv = 2.0 * (x_out-xmin)/(xmax-xmin) - 1.0
fitc = np.polynomial.legendre.legfit(xv, y_out, deg, w=w_out)
elif func == "chebyshev":
if minx is None or maxx is None:
if np.size(x_out) == 1:
xmin, xmax = -1.0, 1.0
else:
xmin, xmax = np.min(x_out), np.max(x_out)
else:
xmin, xmax = minx, maxx
xv = 2.0 * (x_out-xmin)/(xmax-xmin) - 1.0
fitc = np.polynomial.chebyshev.chebfit(xv, y_out, deg, w=w_out)
elif func == "bspline":
msgs.error("Need to update whatever is calling this to call bspline instead..")
#if bspline_par is None:
# bspline_par = {}
## TODO -- Deal with this kwargs-like kludge
#fitc = bspline_fit(x_out, y_out, order=deg, w=w_out, **bspline_par)
elif func == "gaussian":
# Guesses
if guesses is None:
ampl, cent, sigma = guess_gauss(x_out, y_out)
# As first guess choose slope and intercept to be zero
b = 0
m = 0
else:
if deg == 2:
ampl, sigma = guesses
elif deg == 3:
ampl, cent, sigma = guesses
elif deg == 4:
b, ampl, cent, sigma = guesses
elif deg == 5:
m, b, ampl, cent, sigma = guesses
# Error
if w_out is not None:
sig_y = 1./w_out
else:
sig_y = None
if deg == 2: # 2 parameter fit
fitc, pcov = curve_fit(gauss_2deg, x_out, y_out, p0=[ampl, sigma], sigma=sig_y)
elif deg == 3: # Standard 3 parameters
fitc, pcov = curve_fit(gauss_3deg, x_out, y_out, p0=[ampl, cent, sigma],
sigma=sig_y)
elif deg == 4: # 4 parameters
fitc, pcov = curve_fit(gauss_4deg, x_out, y_out, p0=[b, ampl, cent, sigma],sigma=sig_y)
elif deg == 5: # 5 parameters
fitc, pcov = curve_fit(gauss_5deg, x_out, y_out, p0=[m, b, ampl, cent, sigma],sigma=sig_y)
else:
msgs.error("Not prepared for deg={:d} for Gaussian fit".format(deg))
elif func == "moffat":
# Guesses
if guesses is None:
ampl, cent, sigma = guess_gauss(x_out, y_out)
p0 = ampl
p2 = 3. # Standard guess
p1 = (2.355*sigma)/(2*np.sqrt(2**(1./p2)-1))
else:
p0,p1,p2 = guesses
# Error
if w_out is not None:
sig_y = 1./w_out
else:
sig_y = None
if deg == 3: # Standard 3 parameters
fitc, pcov = curve_fit(moffat, x_out, y_out, p0=[p0,p1,p2], sigma=sig_y)
else:
msgs.error("Not prepared for deg={:d} for Moffat fit".format(deg))
else:
msgs.error("Fitting function '{0:s}' is not implemented yet" + msgs.newline() +
"Please choose from 'polynomial', 'legendre', 'chebyshev','bspline'")
# DataContainer
pypeitFit = PypeItFit(xval=x, yval=y, weights=w, fitc=fitc, fitcov=pcov, func=func,
minx=minx, maxx=maxx, minx2=minx2, maxx2=maxx2)
if inmask is not None:
pypeitFit.gpm = inmask.astype(int)
return pypeitFit
def bspline_fit(x,y,order=3,knots=None,everyn=20,xmin=None,xmax=None,w=None,bkspace=None):
""" bspline fit to x,y
Should probably only be called from func_fit
Parameters
----------
x: ndarray
y: ndarray
func: str
Name of the fitting function: polynomial, legendre, chebyshev, bspline
deg: int
deg of the spline. Default=3 (cubic)
xmin: float, optional
Minimum value in the array [both must be set to normalize]
xmax: float, optional
Maximum value in the array [both must be set to normalize]
w: ndarray, optional
weights to be used in the fitting (weights = 1/sigma)
knots: ndarray, optional
Internal knots only. External ones are added by scipy
everyn: int
Knot everyn good pixels, if used
bkspace: float
Spacing of breakpoints in units of x
Returns
-------
tck : tuple
describes the bspline
"""
task = 0 # Default of splrep
if w is None:
ngd = x.size
gd = np.arange(ngd)
weights = None
else:
gd = np.where(w > 0.)[0]
weights = w[gd]
ngd = len(gd)
# Make the knots
if knots is None:
if bkspace is not None:
xrnge = (np.max(x[gd]) - np.min(x[gd]))
startx = np.min(x[gd])
nbkpts = max(int(xrnge/bkspace) + 1,2)
tempbkspace = xrnge/(nbkpts-1)
knots = np.arange(1, nbkpts-1)*tempbkspace + startx
# Remove cases where two knots have no data between them
keep_knots = np.array([True]*len(knots))
for ii in range(1,len(knots)): # Ugly for loop..
if not np.any((x[gd] > knots[ii-1]) & (x[gd] < knots[ii])):
keep_knots[ii] = False
knots = knots[keep_knots]
elif everyn is not None:
# A knot every good N pixels
idx_knots = np.arange(everyn//2, ngd-everyn//2, everyn)
knots = x[gd[idx_knots]]
else:
msgs.error("No method specified to generate knots")
else:
task = -1
# Generate spline
try:
tck = interpolate.splrep(x[gd], y[gd], w=weights, k=order, xb=xmin, xe=xmax, t=knots, task=task)
except ValueError:
# Knot problem (usually)
msgs.warn("Problem in the bspline knot")
raise ValueError("Crashing out of bspline fitting")
return tck
# JFH Testing
def zerocross1d(x, y, getIndices=False):
"""
Find the zero crossing points in 1d data.
Find the zero crossing events in a discrete data set.
Linear interpolation is used to determine the actual
locations of the zero crossing between two data points
showing a change in sign. Data point which are zero
are counted in as zero crossings if a sign change occurs
across them. Note that the first and last data point will
not be considered whether or not they are zero.
Parameters
----------
x, y : arrays
Ordinate and abscissa data values.
getIndices : boolean, optional
If True, also the indicies of the points preceding
the zero crossing event will be returned. Defeualt is
False.
Returns
-------
xvals : array
The locations of the zero crossing events determined
by linear interpolation on the data.
indices : array, optional
The indices of the points preceding the zero crossing
events. Only returned if `getIndices` is set True.
"""
# Check sorting of x-values
if np.any((x[1:] - x[0:-1]) <= 0.0):
msgs.error("The x-values must be sorted in ascending order!. Sort the data prior to calling zerocross1d.")
# Indices of points *before* zero-crossing
indi = np.where(y[1:] * y[0:-1] < 0.0)[0]
# Find the zero crossing by linear interpolation
dx = x[indi + 1] - x[indi]
dy = y[indi + 1] - y[indi]
zc = -y[indi] * (dx / dy) + x[indi]
# What about the points, which are actually zero
zi = np.where(y == 0.0)[0]
# Do nothing about the first and last point should they
# be zero
zi = zi[np.where((zi > 0) & (zi < x.size - 1))]
# Select those point, where zero is crossed (sign change
# across the point)
zi = zi[np.where(y[zi - 1] * y[zi + 1] < 0.0)]
# Concatenate indices
zzindi = np.concatenate((indi, zi))
# Concatenate zc and locations corresponding to zi
zz = np.concatenate((zc, x[zi]))
# Sort by x-value
sind = np.argsort(zz)
zz, zzindi = zz[sind], zzindi[sind]
if not getIndices:
return zz
else:
return zz, zzindi
def calc_offset(raA, decA, raB, decB, distance=False):
"""
Calculate the offset in arcseconds between two sky coordinates
All coordinates must be in decimal degrees.
"""
delRA = 3600.0*(raA-raB)*np.cos(decA*np.pi/180.0)
delDEC = 3600.0*(decA-decB)
if distance==True:
return np.sqrt(delRA**2 + delDEC**2)
else:
return delRA, delDEC
def func_der(coeffs, func, nderive=1):
if func == "polynomial":
return np.polynomial.polynomial.polyder(coeffs, m=nderive)
elif func == "legendre":
return np.polynomial.legendre.legder(coeffs, m=nderive)
elif func == "chebyshev":
return np.polynomial.chebyshev.chebder(coeffs, m=nderive)
else:
msgs.error("Functional derivative '{0:s}' is not implemented yet"+msgs.newline() +
"Please choose from 'polynomial', 'legendre', 'chebyshev'")
def perturb(covar, bparams, nsim=1000):
cvsize = covar.shape[0]
# Generate a new set of starting parameters from the covariance matrix
X_covar_fit = np.matrix(np.random.standard_normal((cvsize, nsim)))
C_covar_fit = np.matrix(covar)
U_covar_fit = np.linalg.cholesky(C_covar_fit)
Y_covar_fit = U_covar_fit * X_covar_fit
newpar = bparams.reshape(cvsize, 1).repeat(nsim, axis=1)
# Find the new best-fitting model parameters
newpar += Y_covar_fit
return newpar
def mask_polyfit(xarray,yarray,order,maxone=True,sigma=3.0):
mask = np.zeros(xarray.size,dtype=np.int)
mskcnt=0
while True:
w = np.where(mask==0)
xfit = xarray[w]
yfit = yarray[w]
ct = np.polyfit(xfit,yfit,order)
yrng = np.polyval(ct,xarray)
sigmed = 1.4826*np.median(np.abs(yfit-yrng[w]))
if xarray.size-np.sum(mask) <= order+2:
msgs.warn("More parameters than data points - fit might be undesirable")
break # More data was masked than allowed by order
if maxone: # Only remove the most deviant point
tst = np.abs(yarray[w]-yrng[w])
m = np.argmax(tst)
if tst[m] > sigma*sigmed:
mask[w[0][m]] = 1
else:
w = np.where(np.abs(yarray-yrng) > sigma*sigmed)
mask[w] = 1
if mskcnt == np.sum(mask): break # No new values have been included in the mask
mskcnt = np.sum(mask)
return mask
def poly_to_gauss(coeffs):
try:
sigm = np.sqrt(-0.5/coeffs[2])
cent = -0.5*coeffs[1]/coeffs[2]
ampl = np.exp( coeffs[0] + 0.5*(cent/sigm)**2 )
except:
return [0.0, 0.0, 0.0], True
return [ampl, cent, sigm], False
def robust_regression(x, y, ordr, outfrac, maxiter=100, function='polynomial', min=None, max=None):
"""
Deprecated
"""
msgs.bug("PypeIt using deprecated function")
msgs.error("Please contact the authors")
xsize=x.size
infrac = 1.0-outfrac
if infrac < 0.5: infrac = 0.5
slct = int(xsize*infrac)
if slct == xsize: slct = xsize-1
if ordr+1 >= slct:
if xsize <= 1:
msgs.error("At least 2 points are required for a statistical fit")
elif xsize == 2:
msgs.warn("Only a constant can be fit to 2 points")
msgs.info("Fitting a constant instead")
return func_fit(x,y,function,0)
elif ordr+1 >= xsize:
msgs.warn("Not enough points ({0:d}) for a {1:d}th order fit".format(xsize,ordr))
ordr = xsize-3
slct = ordr+2
msgs.info("Changing order to a {0:d} order {1:s} fucntion".format(ordr,function))
else:
slct = ordr
indx = np.arange(xsize)
np.random.shuffle(indx)
ind = indx[:slct]
i=0
while True:
tc = func_fit(x[ind],y[ind],function,ordr)
diff = np.abs(y[ind]-func_val(tc,x[ind],function))
mad = np.median(diff)
w=np.argsort(diff)
inds=-1
for j in range(0,xsize-slct):
temp = ind[w[-1]]
ind[w[-1]] = indx[slct+j]
indx[slct+j] = temp
diff = np.abs(y[ind]-func_val(tc,x[ind],function))
if np.median(diff) < mad:
inds = j
mad = np.median(diff)
# Switch it back
temp = ind[w[-1]]
ind[w[-1]] = indx[slct+j]
indx[slct+j] = temp
if inds == -1 or i>maxiter: break
temp = ind[w[-1]]
ind[w[-1]] = indx[slct+inds]
indx[slct+inds] = temp
i += 1
return tc
def polyfit_integral(x, y, dx, deg, rcond=None, full=False, w=None):
order = int(deg) + 1
x = np.asarray(x)
y = np.asarray(y)
# check arguments.
if deg < 0:
msgs.bug("Expected deg >= 0")
msgs.error("Input of function arutils.polyfit_integral is incorrect")
if x.ndim != 1:
msgs.bug("Expected 1D vector for x")
msgs.error("Input of function arutils.polyfit_integral is incorrect")
if x.size == 0:
msgs.bug("Expected non-empty vector for x")
msgs.error("Input of function arutils.polyfit_integral is incorrect")
if y.ndim < 1 or y.ndim > 2:
msgs.bug("Expected 1D or 2D array for y")
msgs.error("Input of function arutils.polyfit_integral is incorrect")
if len(x) != len(y):
msgs.bug("Expected x and y to have same length")
msgs.error("Input of function arutils.polyfit_integral is incorrect")
# set up the least squares matrices in transposed form
lhst = np.polynomial.polynomial.polyvander(x+dx/2.0, deg+1) - np.polynomial.polynomial.polyvander(x-dx/2.0, deg+1)
div = np.arange(1., deg+2.).reshape(1, deg+1).repeat(x.size, axis=0)
lhs = (lhst[:, 1:]/(dx.reshape(dx.size, 1).repeat(deg+1, axis=1)*div)).T
rhs = y.T
if w is not None:
w = np.asarray(w)
if w.ndim != 1:
msgs.bug("Expected 1D vector for weights in arutils.polyfit2d")
if len(x) != len(w):
msgs.bug("Expected x and weights to have same length in arutils.polyfit2d")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = np.linalg.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msgs.warn("The fit result of the function arutils.polyfit_integral may be poorly conditioned")
if full:
return c, [resids, rank, s, rcond]
else:
return c
def poly_iterfit(x,y,ordr,maxrej=5):
xfit = x.copy()
yfit = y.copy()
r = 0
chisqn = None
while r < maxrej:
chisqp = chisqn
wrng = np.arange(xfit.size)
chisq = np.zeros(xfit.size)
for i in range(xfit.size):
sel = np.delete(wrng,i)
c=np.polyfit(xfit[sel],yfit[sel],ordr)
m=np.polyval(c,xfit[sel])
chisq[i] = np.sum(((yfit[sel]-m)/m)**2)
csa = np.argmin(chisq)
chisqn = chisq[csa]
if chisqp is not None:
if chisqp-0.001 < chisqn:
break
xfit = np.delete(xfit,csa)
yfit = np.delete(yfit,csa)
r += 1
msgs.info("Robust regression identified {0:d} outliers".format(r))
return c
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.