id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
86 | <reponame>bertonha/python-zeep
from .compose import Compose # noqa
from .signature import BinarySignature, Signature, MemorySignature # noqa
from .username import UsernameToken # noqa
| StarcoderdataPython |
138592 | <gh_stars>0
from django.conf.urls import url
from django.contrib.admin import AdminSite
from django.contrib.auth.admin import GroupAdmin, UserAdmin
from django.contrib.auth.models import Group, User
from punchline.music.admin import (
AlbumAdmin, ArtistAdmin, PunchlineAdmin, SongAdmin,
)
from punchline.music.models import Album, Artist, Punchline, Song
from punchline.music.views import (
AlbumAutocompleteView, ArtistAutocompleteView, SongAutocompleteView,
)
class PunchlineAdminSite(AdminSite):
def get_urls(self):
urls = super().get_urls()
urls += [
url(
r'^artist-autocomplete/$',
self.admin_view(ArtistAutocompleteView.as_view(create_field='nickname')),
name='artist-autocomplete',
),
url(
r'^album-autocomplete/$',
self.admin_view(AlbumAutocompleteView.as_view(create_field='name')),
name='album-autocomplete',
),
url(
r'^song-autocomplete/$',
self.admin_view(SongAutocompleteView.as_view(create_field='title')),
name='song-autocomplete',
),
]
return urls
site = PunchlineAdminSite()
site.register(Group, GroupAdmin)
site.register(User, UserAdmin)
site.register(Artist, ArtistAdmin)
site.register(Album, AlbumAdmin)
site.register(Song, SongAdmin)
site.register(Punchline, PunchlineAdmin)
| StarcoderdataPython |
3345104 | from unittest.mock import MagicMock, call
from tqdm import tqdm
from rnnr import Event
from rnnr.attachments import ProgressBar
def test_ok(runner):
batches = range(10)
mock_tqdm_cls = MagicMock(spec=tqdm)
ProgressBar(tqdm_cls=mock_tqdm_cls).attach_on(runner)
runner.run(batches)
mock_tqdm_cls.assert_called_once_with(batches, initial=0)
assert not mock_tqdm_cls.return_value.set_postfix.called
assert mock_tqdm_cls.return_value.update.mock_calls == [call(1) for b in batches]
mock_tqdm_cls.return_value.close.assert_called_once_with()
def test_default_n_items(runner):
batches = [list("foo"), list("quux")]
mock_tqdm_cls = MagicMock(spec=tqdm)
@runner.on(Event.BATCH)
def on_batch(state):
state["n_items"] = len(state["batch"])
pbar = ProgressBar(tqdm_cls=mock_tqdm_cls)
pbar.attach_on(runner)
runner.run(batches)
assert mock_tqdm_cls.return_value.update.mock_calls == [call(len(b)) for b in batches]
def test_n_items(runner):
batches = [list("foo"), list("quux")]
mock_tqdm_cls = MagicMock(spec=tqdm)
@runner.on(Event.BATCH)
def on_batch(state):
state["foo"] = len(state["batch"])
pbar = ProgressBar(tqdm_cls=mock_tqdm_cls, n_items="foo")
pbar.attach_on(runner)
runner.run(batches)
assert mock_tqdm_cls.return_value.update.mock_calls == [call(len(b)) for b in batches]
def test_stats(runner):
batches = range(10)
mock_tqdm_cls = MagicMock(spec=tqdm)
@runner.on(Event.BATCH)
def on_batch(state):
state["stats"] = {"loss": state["batch"] ** 2}
pbar = ProgressBar(tqdm_cls=mock_tqdm_cls, stats="stats")
pbar.attach_on(runner)
runner.run(batches)
assert mock_tqdm_cls.return_value.set_postfix.mock_calls == [
call(loss=b ** 2) for b in batches
]
def test_with_kwargs(runner):
batches = range(10)
mock_tqdm_cls = MagicMock(spec=tqdm)
kwargs = {"foo": "bar", "baz": "quux"}
ProgressBar(tqdm_cls=mock_tqdm_cls, **kwargs).attach_on(runner)
runner.run(batches)
mock_tqdm_cls.assert_called_once_with(batches, initial=0, **kwargs)
| StarcoderdataPython |
95070 | <filename>scripts/dwnld.py<gh_stars>0
"""This module contains functions for API calls used in other scripts
The basic calls are first and the more complex ones are last
Logs are saved in dwnld_debug.log and printed to terminal
"""
import io
import json
import logging
import os
import requests
import sys
import zipfile
import auth # run auth.py to update OAuth2.0 access and refresh tokens
import report # import methods to build pdf reports
dir = os.path.dirname(__file__) # directory for scritps
# Logging setup to print to terminal and dwnld_debug.log
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[
logging.FileHandler(os.path.join(dir, "records/dwnld_debug.log")),
logging.StreamHandler(sys.stdout)
]
)
# get stored OAuth2.0 credentials and set HTTP request headers
token_data = json.load(open(os.path.join(dir, "records/credentials.json")))
HEADERS={'Authorization': 'Bearer {}'.format(token_data['access_token'])}
# Other Global Variables below
DOMAIN = 'https://lms.otis.edu/d2l/api'
LP_VERSION = '1.30'
LE_VERSION = '1.47'
def code_log(response, funct_name):
"""Helper function to log all API calls with consistent format"""
if response.status_code < 400:
logging.info(funct_name + " HTTP Status Code: " +
str(response.status_code))
else:
logging.error(funct_name + " HTTP Status Code: " +
str(response.status_code))
response.raise_for_status()
def get_db_folders(orgUnitId, keyphrase):
"""Retrieves JSON data on all Dropbox folders in an Org Unit and
returns a list of folderIds for matching the keyphrase
"""
url = DOMAIN + "/le/1.47/{}/dropbox/folders/".format(orgUnitId)
response = requests.get(url, headers=HEADERS)
code_log(response, "GET Drobox Folders orgUnitId({})".format(orgUnitId))
folders = json.loads(response.text)
folderIds = []
for item in folders:
if keyphrase.lower() in item["Name"].lower():
folderIds.append({"folderId": item["Id"],
"GradeItemId": item["GradeItemId"]})
return folderIds
def get_submissions(orgUnitId, folderId):
"""Retrieves JSON data on all submissions for a
given assignment Dropbox folder and returns a list of dicts
"""
url = DOMAIN + "/le/1.47/{}/dropbox/folders/{}/submissions/".format(
orgUnitId, folderId)
response = requests.get(url, headers=HEADERS)
code_log(response, "GET Submissions orgUnitId({})".format(orgUnitId))
submission_data = json.loads(response.text)
return submission_data
def dwnld_stream(response, path):
"""Downloads the file stream to the path, which must include the filename"""
with open(path, 'wb') as outfile:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
outfile.write(chunk)
if os.path.isfile(path):
logging.info(f"SUCCESS file: {os.path.basename(path)} downloaded")
else:
logging.error(f"FAILED file: {os.path.basename(path)} NOT downloaded")
def get_file(orgUnitId, folderId, submissionId, fileId, path):
"""Downloads the file to the path, which must include the filename"""
url = DOMAIN + \
"/le/1.47/{}/dropbox/folders/{}/submissions/{}/files/{}".format(
orgUnitId, folderId, submissionId, fileId)
response = requests.get(url, headers=HEADERS, stream=True)
code_log(response, "GET File orgUnitId({})".format(orgUnitId))
dwnld_stream(response, path)
def get_file_url(url, path):
"""Downloads a file from a direct URL to the path"""
response = requests.get(url, headers=HEADERS, params={"stream": True}, stream=True)
code_log(response, "GET File from url {}".format(url))
dwnld_stream(response, path)
def get_rubrics(orgUnitId, objectType, objectId):
"""Retrives a JSON arrary of Rubric blocks and returns a list of dicts"""
url = DOMAIN + "/le/unstable/{}/rubrics".format(orgUnitId)
params = {"objectType": objectType, "objectId": objectId}
response = requests.get(url, headers=HEADERS, params=params)
code_log(response, "GET Rubrics orgUnitId({})".format(orgUnitId))
rubrics_data = json.loads(response.text)
return rubrics_data
def get_rubric_assessment(orgUnitId, objectType, objectId, rubricId, userId):
"""Retrieves a rubric assessment repsonse and returns a dict"""
url = DOMAIN + "/le/unstable/{}/assessment".format(orgUnitId)
params = {
"assessmentType": "Rubric",
"objectType": objectType,
"objectId": objectId,
"rubricId": rubricId,
"userId": userId
}
response = requests.get(url, headers=HEADERS, params=params)
code_log(response, "GET rubric assessment rubricId({})".format(rubricId))
return json.loads(response.text)
def get_attachment(orgUnitId, folderId, entityType, entityId, fileId, path):
"""retrieves an attachment from a Dropbox submission's feedback"""
url = "{}/le/{}/{}/dropbox/folders/{}/feedback/{}/{}/attachments/{}".format(
DOMAIN, LE_VERSION, orgUnitId, folderId, entityType, entityId, fileId)
response = requests.get(url, headers=HEADERS)
code_log(response, "GET attachment file Id: {}".format(fileId))
dwnld_stream(response, path)
def get_grades_values(orgUnitId, userId):
"""Retrieves a JSON arrary of grade value blocks for one user and
returns a list of dicts
"""
url = DOMAIN + "/le/1.47/{}/grades/values/{}/".format(orgUnitId, userId)
response = requests.get(url, headers=HEADERS)
code_log(response, "GET grades values userId({})".format(userId))
return json.loads(response.text)
def get_item_grades(orgUnitId, GradeItemId):
"""Retrieves a JSON array of all users grade values for a grade item in
a course and returns a list of dicts
"""
url = DOMAIN + "/le/{}/{}/grades/{}/values/".format(
LE_VERSION, orgUnitId, GradeItemId)
response = requests.get(url, headers=HEADERS)
code_log(response, "GET item grades GradeItemId({})".format(GradeItemId))
return json.loads(response.text)
def get_datasets_list():
"""Retrieves the list of Brightspace Data Sets and
returns a list of dicts
"""
url = DOMAIN + "/lp/{}/dataExport/bds/list".format(LP_VERSION)
response = requests.get(url, headers=HEADERS)
code_log(response, "GET data sets list")
return json.loads(response.text)
def get_all_bds():
"""Retrieves the paginated list of all available BDS exports and returns a
list of dicts"""
url = DOMAIN + "/lp/{}/dataExport/bds".format(LP_VERSION)
exports = []
page = 1
while url != None:
response = requests.get(url, headers=HEADERS)
code_log(response, "GET all BDS exports page {}".format(page))
data = json.loads(response.text)
exports.extend(data["BrightspaceDataSets"])
url = data["NextPageUrl"]
page += 1
return exports
def get_dataset_csv(url, path):
"""Downloads the data set csv file to the path"""
response = requests.get(url, headers=HEADERS, stream=True)
code_log(response, "GET data set csv")
with zipfile.ZipFile(io.BytesIO(response.content), 'r') as zip_ref:
contents = zip_ref.namelist()
assert len(contents) == 1
zip_ref.extractall(path)
return os.path.join(path, contents[0])
def get_toc(orgUnitId):
"""Retrieves the Table of Contents for the course and
returns a list of dicts
"""
url = DOMAIN + "/le/1.47/{}/content/toc".format(orgUnitId)
params = {'ignoreModuleDateRestrictions': True}
response = requests.get(url, headers=HEADERS, params=params)
code_log(response, 'GET {} Table of Contents'.format(orgUnitId))
return json.loads(response.text)
def course_copy(destId, sourceId, components=None):
"""Issues a copy course request for the list of components
(if blank, it will copy all components)
"""
url = DOMAIN + "/le/1.47/import/{}/copy/".format(destId)
body = {"SourceOrgUnitId": sourceId,
"Components": components,
"CallbackUrl": ""}
response = requests.post(url, headers=HEADERS, json=body)
code_log(response, "POST Course copy {} to {} {}".format(sourceId,
destId,
response.text))
def get_copy_logs(params=None):
"""retrieves course copy logs for the selected parameters"""
url = DOMAIN + "/le/unstable/ccb/logs"
response = requests.get(url, headers=HEADERS, params=params)
return response.status_code
def get_children(orgUnitId, ouTypeId=None):
"""Returns a list of dicts of all child org units.
Optional to specify Org Unit Type of children.
"""
url = DOMAIN + "/lp/{}/orgstructure/{}/children/paged/".format(LP_VERSION,
orgUnitId)
flag = True
params = {}
children = []
if ouTypeId:
params['ouTypeId'] = ouTypeId
print("Org Unit Type Id: {}".format(ouTypeId))
while flag == True:
response = requests.get(url, headers=HEADERS, params=params)
code_log(response, "GET children of {} (paged)".format(orgUnitId))
page = json.loads(response.text)
params['bookmark'] = page['PagingInfo']['Bookmark']
children.extend(page['Items'])
if page['PagingInfo']['HasMoreItems'] == False:
flag = False
return children
def get_classlist(orgUnitId):
"""Returns a list of dicts for users enrolled in the org unit"""
url = DOMAIN + "/le/{}/{}/classlist/".format(LE_VERSION, orgUnitId)
response = requests.get(url, headers=HEADERS)
code_log(response, "GET class list for org unit {}".format(orgUnitId))
return json.loads(response.text)
def get_course_info(orgUnitId):
"""Returns basic info for a course offering"""
url = DOMAIN + "/lp/{}/courses/{}".format(LP_VERSION, orgUnitId)
response = requests.get(url, headers=HEADERS)
code_log(response, "GET course offering info org unit".format(orgUnitId))
return json.loads(response.text)
def put_course_info(orgUnitId, json_data):
"""Updates the course info for a particular org unit"""
url = DOMAIN + "/lp/{}/courses/{}".format(LP_VERSION, orgUnitId)
response = requests.put(url, headers=HEADERS, json=json_data)
code_log(response, "PUT course offering info org unit {}".format(orgUnitId))
def enroll(orgUnitId, userId, roleId, isCascading=False):
"""enrolls a user in the org unit"""
url = DOMAIN + "/lp/{}/enrollments/".format(LP_VERSION)
data = {"OrgUnitId": orgUnitId,"UserId": userId,"RoleId": roleId,"IsCascading": isCascading}
response = requests.post(url, headers=HEADERS, json=data)
code_log(response, "POST enroll user {} to org unit {}".format(userId, orgUnitId))
# complex functions below here
def get_files(orgUnitId, keyphrase, path):
"""Creates a Rubric Assessment file, text and attached feedback files,
and downloads the submitted files.
"""
folderIds = get_db_folders(orgUnitId, keyphrase) # get all dropbox folders that match keyphrase
if folderIds == []: # if nothing matches, print message
return "No Dropbox folders match the keyphrase!"
for folder in folderIds:
submission_data = get_submissions(orgUnitId, folder['folderId']) # get all submissions
grades_data = None # initialize grades_data variable
if folder["GradeItemId"]: # if the folder is grades, get grades data
grades_data = get_item_grades(orgUnitId, folder["GradeItemId"])
for item in submission_data:
name = item["Entity"]["DisplayName"] # get submitter name
entityId = item["Entity"]["EntityId"] # get submitter id
entityType = item["Entity"]["EntityType"] # get submitter type
flag = False # initialize flag variable
if grades_data: # check if submissions have grades
for g in grades_data["Objects"]: # iterate throught grades data
if g["User"]["Identifier"]==str(item["Entity"]["EntityId"]): # select the one that matches the current user
if g["GradeValue"]:
flag = True # set flag to true if grade value exists
grade = g["GradeValue"]["DisplayedGrade"]
name += f"__{grade}_" # add grade to downloaded submission filename
elif flag == False:
name += "__[No Grade]_"
if item["Feedback"]: # check if instructor feedback exists
file_list = item["Feedback"]["Files"] # get list of feedback files
for file in file_list:
print("found!!!!")
fileId = file["FileId"]
filename = file["FileName"]
afilename = "{}_AttachmentFeedback_{}".format(name,
filename) #format feedback file name
attach_path = os.path.join(path, afilename)
get_attachment(orgUnitId, folder["folderId"], entityType,
entityId, fileId, attach_path) # download feedback attachment files
if item["Feedback"]["Feedback"]: # check if text feedback exists
feedback = item["Feedback"]["Feedback"]["Html"] # get html text feedback
fbfile = os.path.join(path, f"{name}_OverallFeedback.pdf") # format text feedback filename
report.simple(fbfile, "Overall Feedback", feedback) # build pdf report of text feedback
if item["Feedback"]["RubricAssessments"]: # check for rubric assessments
assessments = item["Feedback"]["RubricAssessments"]
rubrics = get_rubrics(
orgUnitId, "Dropbox", folder["folderId"]) # get the rubric assessments
rfile = os.path.join(path, f"{name}_RubricAssessments.pdf") # format rubric assessments filename
report.rubric_assessments(rfile, assessments, rubrics) # build rubric report
if os.path.isfile(rfile): # handle errors with building rubric assessment file
logging.info(
f"SUCCESS {os.path.basename(rfile)} created")
else:
logging.error(
f"FAILED {os.path.basename(rfile)} NOT created")
for submission in item["Submissions"]: # get all submissions for user
submissionId = submission["Id"]
for file in submission["Files"]: # download all submission files
filename = file["FileName"]
fileId = file["FileId"]
filepath = os.path.join(path, f"{name}_{filename}")
get_file(orgUnitId,
folder["folderId"],
submissionId,
fileId,
filepath)
| StarcoderdataPython |
3378498 | <reponame>pawelptak/AI-Anomaly-Detection
import pandas as pd
"""
Class for Loading KDDCUP99 Data
The full data can be found here:
http://kdd.ics.uci.edu/databases/kddcup99/kddcup99.html
"""
class KDDLoader:
def __init__(self, service="http"):
self.dataset_name = "KDD"
self.data_basepath = f"Data/{self.dataset_name}"
self.data_trainpath = f"{self.data_basepath}/TRAIN.csv"
self.data_testpath = f"{self.data_basepath}/TEST.csv"
self.service = service
self.col_names = ["duration", "protocol_type", "service", "flag", "src_bytes",
"dst_bytes", "land", "wrong_fragment", "urgent", "hot", "num_failed_logins",
"logged_in", "num_compromised", "root_shell", "su_attempted", "num_root",
"num_file_creations", "num_shells", "num_access_files", "num_outbound_cmds",
"is_host_login", "is_guest_login", "count", "srv_count", "serror_rate",
"srv_serror_rate", "rerror_rate", "srv_rerror_rate", "same_srv_rate",
"diff_srv_rate", "srv_diff_host_rate", "dst_host_count", "dst_host_srv_count",
"dst_host_same_srv_rate", "dst_host_diff_srv_rate", "dst_host_same_src_port_rate",
"dst_host_srv_diff_host_rate", "dst_host_serror_rate", "dst_host_srv_serror_rate",
"dst_host_rerror_rate", "dst_host_srv_rerror_rate", "label"]
""" load train data """
def load_train_data(self):
return self.__load_data(self.data_trainpath)
""" load test data """
def load_test_data(self):
return self.__load_data(self.data_testpath)
""" load additional data for further predictions (optional) """
def load_predict_data(self, path: str):
return self.__load_data(path)
def __load_data(self, path: str):
df = pd.read_csv(path, header=None, names=self.col_names)
if(self.service != ""):
df = df[df["service"] == self.service]
return df
| StarcoderdataPython |
162648 | <reponame>Epatzan/tytus
from gramatica import parse
from principal import *
import ts as TS
import ts_index as TSINDEX
from expresiones import *
from instrucciones import *
from report_ast import *
from report_tc import *
from report_ts import *
from report_errores import *
class Intermedio():
instrucciones_Global = []
tc_global1 = []
ts_globalIndex1 = []
ts_global1 = []
def __init__(self):
''' Funcion Intermedia '''
def procesar_funcion0(self):
global instrucciones_Global,tc_global1,ts_global1,listaErrores,erroressss,ts_globalIndex1
instrucciones = g.parse('CREATE DATABASE DBFase2;')
erroressss = ErrorHTML()
if erroressss.getList()== []:
instrucciones_Global = instrucciones
ts_global = TS.TablaDeSimbolos()
ts_globalIndex = TSINDEX.TablaDeSimbolos()
tc_global = TC.TablaDeTipos()
tc_global1 = tc_global
ts_global1 = ts_global
ts_globalIndex1 = ts_globalIndex
salida = procesar_instrucciones(instrucciones, ts_global,tc_global,ts_globalIndex)
return salida
else:
return 'Parser Error'
def procesar_funcion1(self):
global instrucciones_Global,tc_global1,ts_global1,listaErrores,erroressss,ts_globalIndex1
instrucciones = g.parse('USE DBFase2;')
erroressss = ErrorHTML()
if erroressss.getList()== []:
instrucciones_Global = instrucciones
ts_global = TS.TablaDeSimbolos()
ts_globalIndex = TSINDEX.TablaDeSimbolos()
tc_global = TC.TablaDeTipos()
tc_global1 = tc_global
ts_global1 = ts_global
ts_globalIndex1 = ts_globalIndex
salida = procesar_instrucciones(instrucciones, ts_global,tc_global,ts_globalIndex)
return salida
else:
return 'Parser Error'
def procesar_funcion2(self):
global instrucciones_Global,tc_global1,ts_global1,listaErrores,erroressss,ts_globalIndex1
instrucciones = g.parse('CREATE TABLE tbProducto ( idproducto integer not null primary key , producto varchar ( 150 ) not null , fechacreacion date not null , estado integer );')
erroressss = ErrorHTML()
if erroressss.getList()== []:
instrucciones_Global = instrucciones
ts_global = TS.TablaDeSimbolos()
ts_globalIndex = TSINDEX.TablaDeSimbolos()
tc_global = TC.TablaDeTipos()
tc_global1 = tc_global
ts_global1 = ts_global
ts_globalIndex1 = ts_globalIndex
salida = procesar_instrucciones(instrucciones, ts_global,tc_global,ts_globalIndex)
return salida
else:
return 'Parser Error'
def procesar_funcion3(self):
global instrucciones_Global,tc_global1,ts_global1,listaErrores,erroressss,ts_globalIndex1
instrucciones = g.parse(' CREATE UNIQUE INDEX idx_producto ON tbProducto ( idproducto ) ;')
erroressss = ErrorHTML()
if erroressss.getList()== []:
instrucciones_Global = instrucciones
ts_global = TS.TablaDeSimbolos()
ts_globalIndex = TSINDEX.TablaDeSimbolos()
tc_global = TC.TablaDeTipos()
tc_global1 = tc_global
ts_global1 = ts_global
ts_globalIndex1 = ts_globalIndex
salida = procesar_instrucciones(instrucciones, ts_global,tc_global,ts_globalIndex)
return salida
else:
return 'Parser Error'
def Reportes(self):
global instrucciones_Global,tc_global1,ts_global1,listaErrores,ts_globalIndex1
astGraph = AST()
astGraph.generarAST(instrucciones_Global)
typeC = TipeChecker()
typeC.crearReporte(tc_global1)
RTablaS = RTablaDeSimbolos()
RTablaS.crearReporte(ts_global1,ts_globalIndex1)
RTablaS.crearReporte1(ts_global1,ts_globalIndex1)
return ''
| StarcoderdataPython |
1733685 | import csv
import json
from django.views import generic
from django.http import HttpResponse
from django.utils.encoding import smart_text
class DataPrepMixin(object):
"""
Provides a method for preping a context object
for serialization as JSON or CSV.
"""
def prep_context_for_serialization(self, context):
field_names = self.model._meta.get_all_field_names()
values = self.get_queryset().values_list(*field_names)
data_list = []
for i in values:
d = dict((field_names[i], val) for i, val in enumerate(i))
data_list.append(d)
return (data_list, field_names)
class JSONResponseMixin(DataPrepMixin):
"""
A mixin that can be used to render a JSON response.
"""
def render_to_json_response(self, context, **response_kwargs):
"""
Returns a JSON response, transforming 'context' to make the payload.
"""
data, fields = self.prep_context_for_serialization(context)
return HttpResponse(
json.dumps(data, default=smart_text),
content_type='application/json',
**response_kwargs
)
class CSVResponseMixin(DataPrepMixin):
"""
A mixin that can be used to render a CSV response.
"""
def render_to_csv_response(self, context, **response_kwargs):
"""
Returns a CSV file response, transforming 'context'
to make the payload.
"""
data, fields = self.prep_context_for_serialization(context)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=download.csv'
writer = csv.DictWriter(response, fieldnames=fields)
writer.writeheader()
[writer.writerow(i) for i in data]
return response
class CommitteeDataView(JSONResponseMixin, CSVResponseMixin, generic.ListView):
"""
Custom generic view for our committee specific data pages
"""
allow_empty = False
paginate_by = 100
def get_context_data(self, **kwargs):
context = super(CommitteeDataView, self).get_context_data(**kwargs)
context['committee'] = self.committee
context['base_url'] = self.committee.get_absolute_url
return context
def render_to_response(self, context, **kwargs):
"""
Return a normal response, or CSV or JSON depending
on a URL param from the user.
"""
# See if the user has requested a special format
format = self.request.GET.get('format', '')
# If it's a CSV
if 'csv' in format:
return self.render_to_csv_response(context)
# If it's JSON
if 'json' in format:
return self.render_to_json_response(context)
# And if it's none of the above return something normal
return super(CommitteeDataView, self).render_to_response(
context, **kwargs
)
| StarcoderdataPython |
137919 | # O(n) time and space where n is number of chars
def get_longest_unique_substring(s):
start_index = 0
end_index = 0
answer = 0
char_to_position = {}
for i,let in enumerate(s):
if let not in char_to_position:
char_to_position[let] = i
elif char_to_position[let] >= start_index:
start_index = char_to_position[let] + 1
char_to_position[let] = i
else:
char_to_position[let] = i
end_index += 1
if end_index - start_index > answer:
answer = end_index - start_index
return answer
| StarcoderdataPython |
4827358 | <filename>sdk/python/pulumi_digitalocean/get_kubernetes_versions.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'GetKubernetesVersionsResult',
'AwaitableGetKubernetesVersionsResult',
'get_kubernetes_versions',
]
@pulumi.output_type
class GetKubernetesVersionsResult:
"""
A collection of values returned by getKubernetesVersions.
"""
def __init__(__self__, id=None, latest_version=None, valid_versions=None, version_prefix=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if latest_version and not isinstance(latest_version, str):
raise TypeError("Expected argument 'latest_version' to be a str")
pulumi.set(__self__, "latest_version", latest_version)
if valid_versions and not isinstance(valid_versions, list):
raise TypeError("Expected argument 'valid_versions' to be a list")
pulumi.set(__self__, "valid_versions", valid_versions)
if version_prefix and not isinstance(version_prefix, str):
raise TypeError("Expected argument 'version_prefix' to be a str")
pulumi.set(__self__, "version_prefix", version_prefix)
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="latestVersion")
def latest_version(self) -> str:
"""
The most recent version available.
"""
return pulumi.get(self, "latest_version")
@property
@pulumi.getter(name="validVersions")
def valid_versions(self) -> Sequence[str]:
"""
A list of available versions.
"""
return pulumi.get(self, "valid_versions")
@property
@pulumi.getter(name="versionPrefix")
def version_prefix(self) -> Optional[str]:
return pulumi.get(self, "version_prefix")
class AwaitableGetKubernetesVersionsResult(GetKubernetesVersionsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetKubernetesVersionsResult(
id=self.id,
latest_version=self.latest_version,
valid_versions=self.valid_versions,
version_prefix=self.version_prefix)
def get_kubernetes_versions(version_prefix: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetKubernetesVersionsResult:
"""
Provides access to the available DigitalOcean Kubernetes Service versions.
## Example Usage
### Output a list of all available versions
```python
import pulumi
import pulumi_digitalocean as digitalocean
example = digitalocean.get_kubernetes_versions()
pulumi.export("k8s-versions", example.valid_versions)
```
### Create a Kubernetes cluster using the most recent version available
```python
import pulumi
import pulumi_digitalocean as digitalocean
example = digitalocean.get_kubernetes_versions()
example_cluster = digitalocean.KubernetesCluster("example-cluster",
region="lon1",
version=example.latest_version,
node_pool=digitalocean.KubernetesClusterNodePoolArgs(
name="default",
size="s-1vcpu-2gb",
node_count=3,
))
```
### Pin a Kubernetes cluster to a specific minor version
```python
import pulumi
import pulumi_digitalocean as digitalocean
example = digitalocean.get_kubernetes_versions(version_prefix="1.16.")
example_cluster = digitalocean.KubernetesCluster("example-cluster",
region="lon1",
version=example.latest_version,
node_pool=digitalocean.KubernetesClusterNodePoolArgs(
name="default",
size="s-1vcpu-2gb",
node_count=3,
))
```
:param str version_prefix: If provided, the provider will only return versions that match the string prefix. For example, `1.15.` will match all 1.15.x series releases.
"""
__args__ = dict()
__args__['versionPrefix'] = version_prefix
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('digitalocean:index/getKubernetesVersions:getKubernetesVersions', __args__, opts=opts, typ=GetKubernetesVersionsResult).value
return AwaitableGetKubernetesVersionsResult(
id=__ret__.id,
latest_version=__ret__.latest_version,
valid_versions=__ret__.valid_versions,
version_prefix=__ret__.version_prefix)
| StarcoderdataPython |
1638218 | #coding:utf-8
#webqq自动登陆和群组信息察看
from selenium import webdriver
import time,re,requests
import getpass
class QQ:
h={'Referer':'http://s.web2.qq.com/proxy.html?v=20110412001&callback=1&id=3'}
def __init__(self):
self.d=wendriver.Firefox()
self.get('http://web2.qq.com/webqq.html')
def login(self,qq,pw,check=0):
self.d.find_element_by_id('alloy_icon_app_50_3').click()
raw_input('refresh after enter')
self.d.switch_to_frame('iframe_login')
u=self.d.find_element_by_id('u')
p=self.d.find_element_by_id('p')
login=self.d.find_element_by_id('login_button')
u.clear()
u.send_keys(qq)
p.send_keys(pw)
if check:
raw_input('input image,then enter')
login.click()
a=QQ()
qq=raw_input('qq:')
pw=getpass.getpass('password')
a.login(qq,pw)
| StarcoderdataPython |
1699315 | <reponame>LJIJCJ/front_page
#!/usr/bin/env python
# -*- coding: utf-8 -*-
MYSQL_URL = 'mysql+mysqldb://root@localhost:3306/job_web?charset=utf8'
SQLITE_URL = 'sqlite:///../job_web/jobs'
# 去重所用的Redis
HOST = 'localhost'
PORT = 6379
KEY_NAME = 'job'
UA = {
'Accept': 'text/html,application/xhtml+xml,application/xml;'
'q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/64.0.3282.119 Safari/537.36',
'Upgrade-Insecure-Requests': '1'
}
LOGGING_CONF = {'version': 1,
'disable_existing_loggers': False,
'formatters': {'fh_format': {'format': '%(asctime)s [%(levelname)s] %(message)s'},
'sh_format': {'format': '%(asctime)s [%(levelname)s] %(message)s',
'datefmt': '%H:%M:%S'
}
},
'handlers': {'fh': {'level': 'DEBUG',
'formatter': 'fh_format',
'class': 'logging.FileHandler',
'filename': './log.txt'
},
'sh': {'level': 'INFO',
'formatter': 'sh_format',
'class': 'logging.StreamHandler'
}
},
'loggers': {'root': {'handlers': ['fh', 'sh'],
'level': 'DEBUG',
'encoding': 'utf8'
}
}
} | StarcoderdataPython |
3364844 | <filename>tools/arctographer/arcmap/backgroundio.py<gh_stars>0
################################################################################
# Authors: <NAME> (<NAME>)
# Copyright: <NAME> (<NAME>aran
# Date: Oct 25 2009
# License:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
################################################################################
import logging
import json
import background
import graphics
import datafiles
log = logging.getLogger("backgroundio")
class BackgroundWriter(object):
def __init__(self, background):
self.__dictionary = {}
self.__dictionary["bgColor"] = "#%08x" % background.getBGColor().toU32()
self.__dictionary["parallaxes"] = []
for index, parallax in enumerate(background.getParallaxes()):
self.__dictionary["parallaxes"].append(self.__writeParallax(
parallax, index))
def __writeParallax(self, parallax, index):
dictionary = {}
dictionary["index"] = index
dictionary["fileName"] = datafiles.getParallaxPath(parallax.fileName,
True)
dictionary["vTile"] = parallax.vTile
dictionary["hTile"] = parallax.hTile
dictionary["vScroll"] = parallax.vScroll
dictionary["hScroll"] = parallax.hScroll
dictionary["vScrollSpeed"] = parallax.vScrollSpeed
dictionary["hScrollSpeed"] = parallax.hScrollSpeed
dictionary["visible"] = parallax.visible
return dictionary
def writed(self):
return self.__dictionary
def writef(self, fileName):
try:
f = open(fileName, "w")
except IOError as e:
log.error(e)
json.dump(self.__dictionary, f, indent=0)
f.close()
class BackgroundReader(object):
def __init__(self):
self.__background = None
def readd(self, dictionary):
"""
@type dictionary: {}
@param dictionary: dictionary parsed from JSON describing the background
@rtype: background.Background
@return: the parsed background
"""
bg = background.Background()
if "bgColor" in dictionary:
try:
bgColor = int(dictionary["bgColor"][1:], 16)
except ValueError:
log.error("Invalid background color specified.")
bgColor = 0x00000000
else:
bgColor = 0x00000000
log.error("No background color specified. Defaulting to Black.")
color = graphics.RGBA()
color.fromU32(bgColor)
bg.setBGColor(color)
if "parallaxes" in dictionary:
parallaxList = []
for parallax in dictionary["parallaxes"]:
p, index = self.__readParallax(parallax)
if index == -1: # index not specified
parallaxList.append(p)
else:
while(index + 1 > len(parallaxList)):
parallaxList.append(None)
parallaxList[index] = p
if None in parallaxList:
log.warning("Parallax indicies are not consecutive. Maybe something's missing?")
newList = [i for i in parallaxList if i != None]
bg.setParallaxes(newList)
else:
log.info("No parallaxes specified")
return bg
def readf(self, fileName):
"""
@type fileName: str
@param fileName: the path to the file to read from
@rtype: background.Background
@return: the parsed background
"""
try:
f = open(fileName, "r")
except IOError as e:
log.error(e)
return None
try:
d = json.load(f)
except Exception as e:
log.error(e)
return None
else:
return self.readd(d)
def __readParallax(self, dictionary):
p = background.ParallaxLayer()
index = -1
if "index" in dictionary:
if type(dictionary["index"]) == int:
index = dictionary["index"]
else:
log.error("The index of a parallax layer must be an integer.")
else:
log.error("Layer index not specified. This may lead to file corruption.")
if "fileName" in dictionary:
p.fileName = datafiles.getParallaxPath(
dictionary["fileName"])
else:
log.error("No file name specified in file for parallax")
if "vTile" in dictionary:
if type(dictionary["vTile"]) == bool:
p.vTile = dictionary["vTile"]
else:
log.error("vTile for a parallax background must be \"true\" or \"false\". Defaulting to false.")
else:
log.error("Vertical tiling not specified for parallax. Defaulting to false.")
p.vTile = False
if "hTile" in dictionary:
p.hTile = bool(dictionary["hTile"])
else:
log.error("Horizontal tiling not specified for parallax. Defaulting to false.")
p.hTile = False
if "vScroll" in dictionary:
p.vScroll = bool(dictionary["vScroll"])
else:
log.error("No vertical scroll specified for parallax. Defaulting to false.")
p.vScroll = False
if "hScroll" in dictionary:
p.hScroll = bool(dictionary["hScroll"])
else:
log.error("No horizontal scroll specified for parallax. Defaulting to false.")
p.hScroll = False
if "vScrollSpeed" in dictionary:
# The others are easy because a bool conversion never throws an
# exception
try:
p.vScrollSpeed = float(dictionary["vScrollSpeed"])
except ValueError:
log.error("Could not convert %s to a decimal number for parallax vertical scroll speed. Defaulting to 1.0" %
dictionary["vScrollSpeed"])
p.vScrollSpeed = 1.0
else:
log.error("No vertical scroll speed specified for parallax. Defaulting to 1.0")
p.vScrollSpeed = 1.0
if "hScrollSpeed" in dictionary:
try:
p.hScrollSpeed = float(dictionary["hScrollSpeed"])
except ValueError:
log.error("Could not convert %s to a decimal number for parallax horizontal scroll speed. Defaulting to 1.0" %
dictionary["hScrollSpeed"])
p.hScrollSpeed = 1.0
else:
log.error("No horizontal scroll speed specified for parallax. Defaulting to 1.0")
p.hScrollSpeed = 1.0
if "visible" in dictionary:
p.visible = bool(dictionary["visible"])
else:
log.error("No visibility specified for parallax. Defaulting to true.")
p.visibile = True
return p, index
| StarcoderdataPython |
29127 | <gh_stars>1-10
"""
Show the INI config(s) used by a command tree.
"""
from .. import command
class Show(command.Command):
""" Show current INI configuration.
Programs may make use of a configuration file which is usually located in
your $HOME directory as .<prog>_config. The file is a standard INI
style config file where each `[section]` is the full path of a command
including spaces. """
name = 'show'
def setup_args(self, parser):
self.add_argument('section', nargs='?', help='Only show config for '
'this section.')
self.add_argument('--all', '-a', action='store_true', help='Show '
'all sections')
super().setup_args(parser)
def run(self, args):
if args.section:
try:
config = {args.section: self.session.config[args.section]}
except KeyError:
raise SystemExit("Invalid section: %s" % args.section)
else:
config = self.session.config
for section, values in config.items():
if values or args.all:
print("[%s]" % section)
for k, v in values.items():
print(" %s = %s" % (k, v))
print()
class INI(command.Command):
""" INI style configuration.
Commands support user configuration in an INI style config file. """
name = 'ini'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_subcommand(Show, default=True)
| StarcoderdataPython |
3213928 | <reponame>sriramcu/MissingPersonsTracing
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from django.forms import inlineformset_factory
from police.models import Police, Victim, Sightings
from bootstrap_datepicker_plus import DatePickerInput, TimePickerInput, DateTimePickerInput, MonthPickerInput, YearPickerInput
class UserRegisterForm(UserCreationForm):
email = forms.EmailField()
#comment = forms.CharField(label='enter comment')
class Meta:
model = User
fields = ['username', 'email', '<PASSWORD>', '<PASSWORD>']
class AdditionalPoliceDetailsForm(forms.ModelForm):
class Meta:
model = Police
exclude = ('login',)
# ~ class DateInput(forms.DateInput):
# ~ input_type = 'date'
# ~ class DateTimeInput(forms.DateTimeInput):
# ~ input_type = 'datetime'
class VictimDetailsForm(forms.ModelForm):
class Meta:
model = Victim
exclude = ('police_station_id','suspect_id','status','messages','police_officer_id','key',)
widgets = {
'dob': DatePickerInput(format='%d/%m/%Y')
}
class SightingsForm(forms.ModelForm):
class Meta:
model = Sightings
exclude = ('victim_id',)
widgets = {'date_time_sighting': DateTimePickerInput(format='%d/%m/%Y %H:%M:%S')}
| StarcoderdataPython |
1779253 | <reponame>ucx-code/ucXception
import re
import logging
from core import utils
logger = logging.getLogger(__name__)
import pandas as pd
from StringIO import StringIO
class Sar_to_CSV:
def run(self, target, paths, keep=True):
logger.info("Starting Sar to CSV transformer for files %s" % paths)
new_paths = []
for path in paths:
cmd = "-U -d %s -- -b -B -q -r -S -u" % path
(out, _) = utils.run_anywhere(target, "sadf", cmd, True, None, False)
# Divide the output into several parts, according to headers
parts = out.split("#")
dfs = []
for part in parts[1:]:
tmp = pd.read_csv(StringIO(part.lstrip()), sep=";", header=0, decimal=',', index_col='timestamp')
# Because of join() we need to delete the repeated columns, which are also useless for us...
del tmp["interval"]
del tmp["hostname"]
dfs.append(tmp)
final = dfs[0].join(dfs[1])
new_path = ".".join(path.split(".")[:-1])+".csv"
final.to_csv(new_path, sep=";")
new_paths.append(new_path)
logger.info("Leaving Sar to CSV transformer")
return None if new_paths == [] else new_paths
| StarcoderdataPython |
3275854 | <filename>matrix/x_view.py
class XView(object):
def __init__(self, x, matrix):
self._x = x
self._matrix = matrix
def __getitem__(self, key):
return self._matrix.get_matrix_data(x=self._x, y=key)
def __setitem__(self, key, value):
self._matrix.set_data(x=self._x, y=key, value=value)
| StarcoderdataPython |
4837211 | <filename>python_module/megengine/module/identity.py<gh_stars>1-10
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from ..functional import identity
from .module import Module
class Identity(Module):
def forward(self, x):
return identity(x)
| StarcoderdataPython |
3276353 | <gh_stars>0
# -*- coding: utf-8 -*-
"""DEVPoliticsDataset.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1-1CATzPCSLI1-oDo9SvNGO4dLazgSJNn
"""
# Commented out IPython magic to ensure Python compatibility.
# %tensorflow_version 1.x
!pip install -U torchviz
!pip install ekphrasis torch transformers emoji swifter
import torch
from transformers import AutoModel, AutoTokenizer
from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import MultiLabelBinarizer
!pip install ekphrasis sentence-transformers
!pip install -U bert-serving-server # server
!pip install -U bert-serving-client # client, independent of `bert-serving-server`
from torch import nn
import torch.optim as optim
import numpy as np
import torch.nn.functional as F
import emoji
from google.colab import drive
from collections import Counter
import os
import matplotlib.pyplot as plt
import pandas as pd
import swifter
from bert_serving.client import BertClient
from nltk.tokenize import TweetTokenizer
from ekphrasis.classes.preprocessor import TextPreProcessor
from ekphrasis.classes.tokenizer import SocialTokenizer
from ekphrasis.dicts.emoticons import emoticons
!wget https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip
!unzip -o uncased_L-12_H-768_A-12.zip
!nohup bert-serving-start -model_dir=./uncased_L-12_H-768_A-12 -num_worker=4 > out.file 2>&1 &
bertClient = BertClient(check_length=False)
#bertTrans = SentenceTransformer('paraphrase-MiniLM-L6-v2')
device = "cuda" if torch.cuda.is_available() else "cpu"
#device = "cpu"
MOST_COMMON = 128
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased", normalization=True, use_fast=False)
bertweet = AutoModel.from_pretrained("bert-base-uncased")
# Commented out IPython magic to ensure Python compatibility.
def _download_recourses():
drive.mount('/content/gdrive')
os.environ['KAGGLE_CONFIG_DIR'] = "/content/gdrive/My Drive"
# %cd /content/gdrive/My Drive/Kaggle
!yes | kaggle datasets download -d manchunhui/us-election-2020-tweets
!unzip -o us-election-2020-tweets.zip
!cat hashtag_donaldtrump.csv | tail -n 10
def _bulk_load_from_files():
trump_unparsed_data = pd.read_csv('./hashtag_donaldtrump.csv', lineterminator='\n',
usecols=['user_screen_name', 'likes', 'tweet'])
biden_unparsed_data = pd.read_csv('./hashtag_joebiden.csv', lineterminator='\n',
usecols=['user_screen_name', 'likes', 'tweet'])
unparsed_data = trump_unparsed_data.append(biden_unparsed_data)
data = unparsed_data.iloc[[index for index, value in unparsed_data.tweet.str.contains('#').iteritems() if value]]
data =data.reset_index(drop=True).sort_values('user_screen_name')#.sample(n=50000, random_state=42)
return data
def _extract_tags(tweets: pd.Series):
return tweets.apply(lambda lstOfTokens: [token for token in lstOfTokens if token.startswith('#')])
def _tokenize(row_tweets: pd.Series):
print("---TOKENIZING TWEETS NOW---")
text_processor = TextPreProcessor(
normalize=['url', 'email', 'percent', 'money', 'phone', 'user',
'time', 'url', 'date', 'number'],
annotate={"hashtag",# "allcaps",
"elongated", "repeated",
'emphasis', 'censored'},
fix_html=True, # fix HTML tokens
segmenter="twitter",
corrector="twitter",
#unpack_hashtags=True, # perform word segmentation on hashtags
unpack_contractions=True, # Unpack contractions (can't -> can not)
spell_correct_elong=False, # spell correction for elongated words
tokenizer=SocialTokenizer(lowercase=True).tokenize,
dicts=[emoticons]
)
return row_tweets.apply(text_processor.pre_process_doc)
def _filter_bad_tweets(data: pd.DataFrame):
data = data[data['tweet'].apply(lambda tweet: 4 < len(tweet) < 50)].reset_index(drop=True)
data = data[data['likes'].apply(lambda likes: likes > 10)].reset_index(drop=True)
return data[data['tags'].apply(lambda lst : len(lst) > 0)].reset_index(drop=True)
def _encode_with_bert(tweets: pd.Series):
return tweets.apply(lambda tweet: bertClient.encode([tweet], is_tokenized=True))
def _bertweet_encode(tweets: pd.Series):
tokenized = tweets.apply(lambda x: tokenizer.encode(x, add_special_tokens=True, max_length=128))
max_len = 0
for i in tokenized.values:
if len(i) > max_len:
max_len = len(i)
padded = np.array([i + [0]*(max_len-len(i)) for i in tokenized.values])
input_ids = torch.tensor(np.array(padded))
with torch.no_grad():
last_hidden_states = bertweet(input_ids)
return last_hidden_states[0][:,0,:].numpy()
def _one_hot_encode_tags(tags: pd.Series):
all_tags = []
tags.apply(all_tags.extend)
top_tags, top_counts = zip(*Counter(all_tags).most_common(MOST_COMMON))
recognized_tags = set(top_tags)
tags = tags.map(lambda prev_tags: [tag if tag in recognized_tags else "OTHER" for tag in prev_tags])
recognized_tags.add('OTHER')
mlb = MultiLabelBinarizer()
mlb.fit([recognized_tags])
return tags.apply(lambda lst: mlb.transform([lst]))
def _normalize_likes_by_author(df: pd.DataFrame):
likes_author = df[['user_screen_name', 'likes']]
a = likes_author.groupby('user_screen_name').transform(lambda x: x / x.max()).fillna(0)
a[a == float('inf')] = 0.5
return a
_download_recourses()
data = _bulk_load_from_files()
print("---DATA EXTRACTED FROM CSV FILES---")
data.head()
data = data[1::2].reset_index(drop=True)
data = data[data['likes'].apply(lambda likes: likes > 10)].reset_index(drop=True)
data['row'] = data['tweet']
# data['row'] = data.row.apply(emoji.demojize)
MOST_COMMON = 64
data.shape
MOST_COMMON = 64
data['tweet'] = data['tweet'].apply(emoji.demojize)
data['tweet'] = _tokenize(data['tweet'])
print("---TWEETS TOKENIZED---")
data['tweet'].head(20)
data.shape
data = data.sample(15000, random_state=42).reset_index(drop=True)
encoded_with_bertweet = pd.Series()
for i in range(1000, len(data) + 1, 1000):
encoded_with_bertweet = pd.concat([encoded_with_bertweet, pd.Series(list(_bertweet_encode(data.row[i-1000:i])))],ignore_index=True)
print(i)
assert(encoded_with_bertweet.shape[0] == 15000)
data['tags'] = _extract_tags(data['tweet'])
data = data.reset_index(drop=True)
print("---TAGS EXTRACTED FROM ROW TWEETS---")
data['tags'].head(20)
data = _filter_bad_tweets(data)
data = data.reset_index(drop=True)
print("---TWEETS FILTERED FOR WORDS---")
data['encoded_tweets'] = _encode_with_bert(data['tweet'])
print("---TWEETS ENCODED WITH BERT---")
data['encoded_tweets'] = encoded_with_bertweet
data['encoded_tags'] = _one_hot_encode_tags(data['tags'])
print("---TAGS ENCODED---")
data['encoded_tags'][0].sum()
data['normed_likes'] = _normalize_likes_by_author(data)
class PoliticsDataset(Dataset):
def __init__(self, dt, applyFunc=None):
self.data = dt
self.applyFunc = applyFunc
self.tags_vector_dimension = self.data['encoded_tags'][0].shape[1]
self.data['encoded_tags'] = self.data['encoded_tags'].apply(lambda v: toTensor(v).view(-1).to(device))
self.data['encoded_tweets'] = self.data['encoded_tweets'].apply(lambda v: toTensor(v).view(-1).to(device))
self.data['normed_likes'] = self.data['normed_likes'].apply(lambda v: toTensor(v).view(-1).to(device))
print("---INIT FINISHED---")
def __len__(self):
return self.data.shape[0]
def __getitem__(self, index):
input = (self.data['encoded_tweets'][index], self.data['encoded_tags'][index])
target = self.data['normed_likes'][index]
return (input, target)
#input = (self.applyFunc(self.data['encoded_tweets'][index]).view(-1).to(device), self.applyFunc(self.data['encoded_tags'][index]).view(-1).to(device))
#target = self.applyFunc(self.data['normed_likes'][index]).view(-1).to(device)
def toTensor(x: np.float64):
return torch.from_numpy(np.asarray(x)).float()
from sklearn.model_selection import train_test_split
data_train, data_val = train_test_split(data, test_size=0.2, random_state=42)
dataset_train = PoliticsDataset(dt=data_train.reset_index(drop=True), applyFunc=toTensor)
dataset_val = PoliticsDataset(dt=data_val.reset_index(drop=True), applyFunc=toTensor)
dataloader = DataLoader(dataset=dataset_train, batch_size=64, shuffle=True)
dataloader_val = DataLoader(dataset=dataset_val, batch_size = len(dataset_val), shuffle=True)
class LinearBertForSentences(nn.Module):
def __init__(self):
super(LinearBertForSentences, self).__init__()
self.sentenceLayers = nn.Sequential(
nn.Linear(768, 256),
nn.ReLU(),
nn.Linear(256, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU()
)
self.tagsLayers = nn.Sequential(
nn.Linear(MOST_COMMON + 1, 256),
nn.ReLU(),
nn.Linear(256, 64),
nn.ReLU()
)
self.finalLayers = nn.Sequential(
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 1)
)
def forward(self, batch: list):
assert(len(batch) == 2)
sentence, tags = batch
sentence = self.sentenceLayers(sentence)
tags = self.tagsLayers(tags)
#catted = torch.cat([sentence, tags, followers], dim=0)
return self.finalLayers(torch.cat([sentence, tags], dim=1))
data.encoded_tags[0]
running_loss = 0.0
model = LinearBertForSentences()
if device is not None and device != 'cpu':
model = model.cuda()
lossFunc = nn.MSELoss()
optimizer = optim.Adam(model.parameters())
batch_processed, losses, eval_losses = [], [], []
#Evaluation before any learning (kinda trash expected, cos weights are random values from memory)
model.eval()
train_item = next(iter(dataloader_val))
with torch.no_grad():
ls = lossFunc(model(train_item[0]), train_item[1])
print(f'EPOCH: {0}\teval_loss: {ls}')
eval_losses.append(ls)
model.train()
for ep in range(32000):
for i, dataitem in enumerate(dataloader, 0):
inputs, labels = dataitem
optimizer.zero_grad()
outputs = model(inputs)
loss = lossFunc(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 10 == 9:
batch_processed.append(i + ep*len(dataloader))
losses.append(running_loss)
running_loss = 0.0
model.eval()
with torch.no_grad():
ls = lossFunc(model(train_item[0]), train_item[1])
print(f'EPOCH: {ep + 1}\teval_loss: {ls}')
eval_losses.append(ls)
model.train()
#plt.bar(batch_processed, losses, label="Unlogged", color='r')
#plt.fill_between(range(len(eval_losses)), eval_losses, color='r')
plt.xlabel('Amount of epoch processed')
plt.ylabel('Loss value on validation data')
plt.plot(range(len(losses)), losses, color='r')
plt.show()
torch.save(model.state_dict(), './model.bin')
all_tags = []
dataloader.dataset.data['tags'].apply(all_tags.extend)
top_tags, top_counts = zip(*Counter(all_tags).most_common(MOST_COMMON))
recognized_tags = set(top_tags)
recognized_tags
recognized_tags.add('OTHER')
mlb = MultiLabelBinarizer()
mlb.fit([recognized_tags])
toTensor(mlb.transform([[next(iter(recognized_tags))]]))
"while True:"
sent = input("Input sentence PLSLSLL:")
text_processor = TextPreProcessor(
normalize=['url', 'email', 'percent', 'money', 'phone', 'user',
'time', 'url', 'date', 'number'],
annotate={"hashtag",# "allcaps",
"elongated", "repeated",
'emphasis', 'censored'},
fix_html=True, # fix HTML tokens
segmenter="twitter",
corrector="twitter",
#unpack_hashtags=True, # perform word segmentation on hashtags
unpack_contractions=True, # Unpack contractions (can't -> can not)
spell_correct_elong=False, # spell correction for elongated words
tokenizer=SocialTokenizer(lowercase=True).tokenize,
dicts=[emoticons]
)
sent = toTensor(_bertweet_encode(pd.Series(sent))).to(device)
model.eval()
res = {}
items = []
with torch.no_grad():
for tag in recognized_tags:
nowRes = model([sent, toTensor(mlb.transform([[tag]])).to(device)])
res[nowRes.item()] = tag
items.append(nowRes.item())
items.sort(reverse=True)
[(k, res[k]) for k in items]
#dict(sorted(res.items(), key=lambda item: item[1]))
from torchviz import make_dot, make_dot_from_trace
graph = make_dot(model(next(iter(dataloader))[0]), params=dict(model.named_parameters()))
graph.format = 'png'
graph.render()
graph.save()
model(next(iter(dataloader))[0]) | StarcoderdataPython |
121542 |
from io import BytesIO
import qrcode.image.svg
# Combined path factory, fixes white space that may occur when zooming
factory = qrcode.image.svg.SvgPathImage
def get_qr_code_svg(data_string, include_xml_declaration=False):
img = qrcode.make(data_string, image_factory=factory)
with BytesIO() as bytes_out:
img.save(bytes_out, kind='SVG')
some_bytes = bytes_out.getvalue()
svg = some_bytes.decode('utf-8')
if not include_xml_declaration:
svg = svg.split('?>\n')[-1]
return svg
| StarcoderdataPython |
49563 | import turtle
turtle.bgcolor("black")
sq = turtle.Turtle()
sq.speed(20)
sq.color("white")
for i in range(500):
sq.forward(i)
sq.left(91)
| StarcoderdataPython |
1772217 | <reponame>matiaspizarro/pybsd-ezjail
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import unittest
import ipaddress
import unipath
from pybsd import AttachNonMasterError, InvalidUIDError, Jail, Master, System
from ..utils import extract_message
class JailTestCase(unittest.TestCase):
master_params = {
'name': 'master',
'hostname': 'master.foo.bar',
'ext_if': ('re0', ['172.16.17.32/24', '1c02:4f8:0f0:14e6::/110']),
'int_if': ('eth0', ['192.168.0.0/24', 'fc00:e968:6179::de52:7100:1/110']),
# 'lo_if': ('lo0', ['127.0.0.1/24', '::1/110']),
'j_if': ('re0', ['10.0.2.0/24', '10.0.1.0/24', 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:1/110', 'fdf8:f53e:61e4::18:1/110']),
'jlo_if': ('lo1', ['127.0.2.0/24', '127.0.1.0/24', '::0:2:0:0/110', '::0:1:0:0/110']),
}
params = {
'name': 'system',
'uid': 12,
'hostname': 'system.foo.bar',
'master': None,
'auto_start': True,
'jail_class': 'web',
}
def setUp(self):
params = self.params.copy()
self.master = params['master'] = Master(**self.master_params)
self.system = Jail(**params)
def test_bad_master(self):
master_params = self.master_params.copy()
del master_params['j_if']
del master_params['jlo_if']
params = self.params.copy()
params['master'] = system = System(**master_params)
with self.assertRaises(AttachNonMasterError) as context_manager:
self.system = Jail(**params)
self.assertEqual(context_manager.exception.message,
"Can't attach `{params[name]}` to `{system.name}`."
" `{system.name}` is not a master.".format(system=system, params=params))
def test_clone_jail(self):
jail2 = self.system.master.clone_jail(self.system, 'new_jail', 13)
self.assertNotEqual(self.system, jail2)
def test_idempotent_attach_jail(self):
jail2 = self.system.master.attach_jail(self.system)
self.assertEqual(self.system, jail2)
def test_no_name(self):
params = self.params.copy()
del params['name']
with self.assertRaises(TypeError):
Jail(**params)
def test_name(self):
self.assertEqual(self.system.name, 'system',
'incorrect name')
def test_no_hostname_wo_master(self):
params = self.params.copy()
del params['hostname']
system = Jail(**params)
self.assertEqual(system.hostname, None,
'incorrect hostname')
def test_no_hostname_w_master(self):
params = self.params.copy()
del params['hostname']
params['master'] = Master(**self.master_params)
system = Jail(**params)
self.assertEqual(system.hostname, '{}.{}'.format(params['name'], self.master_params['hostname']),
'incorrect hostname')
def test_hostname_wo_master(self):
params = self.params.copy()
system = Jail(**params)
system.name = 'system2'
system.uid = '11'
system.hostname = 'system2.foo.bar'
self.assertEqual(system.hostname, None,
'incorrect hostname')
self.master.attach_jail(system)
self.assertEqual(system.hostname, 'system2.foo.bar',
'incorrect hostname')
def test_hostname_w_master(self):
self.assertEqual(self.system.hostname, 'system.foo.bar',
'incorrect hostname')
def test_base_hostname_unset(self):
params = self.params.copy()
del params['hostname']
system = Jail(**params)
self.assertEqual(system.base_hostname, None,
'incorrect base_hostname')
def test_base_hostname_set(self):
self.assertEqual(self.system.base_hostname, 'system.foo.bar',
'incorrect base_hostname')
def test_is_attached_false(self):
params = self.params.copy()
system = Jail(**params)
self.assertFalse(system.is_attached,
'incorrect is_attached value')
def test_is_attached_true(self):
self.assertTrue(self.system.is_attached,
'incorrect is_attached value')
def test_handler_wo_master(self):
params = self.params.copy()
system = Jail(**params)
self.assertEqual(system.handler, None,
'incorrect handler')
def test_handler_w_master(self):
self.assertEqual(self.system.handler, self.master.jail_handler,
'incorrect handler')
def test_no_uid(self):
params = self.params.copy()
del params['uid']
with self.assertRaises(TypeError):
self.system = Jail(**params)
def test_uid(self):
self.assertEqual(self.system.uid, 12,
'incorrect uid')
def test_unattached_jail_type(self):
params = self.params.copy()
del params['master']
self.system = Jail(**params)
self.assertEqual(self.system.jail_type, None,
'incorrect jail_type')
def test_attached_jail_type(self):
self.assertEqual(self.system.jail_type, 'Z',
'incorrect jail_type')
def test_attached_alt_jail_type(self):
class DummyMaster(Master):
default_jail_type = 'D'
params = self.params.copy()
params['master'] = DummyMaster(**self.master_params)
self.system = Jail(**params)
self.assertEqual(self.system.jail_type, 'D',
'incorrect jail_type')
def test_jail_type(self):
self.assertEqual(self.system.jail_type, 'Z',
'incorrect jail_type')
def test_no_auto_start(self):
params = self.params.copy()
del params['auto_start']
self.system = Jail(**params)
self.assertEqual(self.system.auto_start, False,
'incorrect auto_start')
def test_auto_start(self):
self.assertEqual(self.system.auto_start, True,
'incorrect auto_start')
def test_attached_default_status(self):
self.assertEqual(self.system.status, 'S',
'incorrect status')
def test_unattached_default_status(self):
params = self.params.copy()
del params['master']
self.system = Jail(**params)
self.assertEqual(self.system.status, 'D',
'incorrect jail_type')
def test_unattached_class_id(self):
params = self.params.copy()
del params['master']
self.system = Jail(**params)
self.assertEqual(self.system.jail_class_id, None,
'incorrect jail_class_id')
def test_unattached_jid(self):
params = self.params.copy()
del params['master']
self.system = Jail(**params)
self.assertEqual(self.system.jid, None,
'incorrect jid')
def test_jid(self):
self.assertEqual(self.system.jid, 1,
'incorrect jid')
def test_no_master_path(self):
params = self.params.copy()
del params['master']
self.system = Jail(**params)
self.assertEqual(self.system.path, None,
'incorrect path')
def test_path(self):
self.assertEqual(self.system.path, unipath.Path('/usr/jails/system'),
'incorrect path')
def test_no_master_ext_if(self):
params = self.params.copy()
del params['master']
self.system = Jail(**params)
self.assertEqual(self.system.ext_if, None,
'incorrect ext_if')
def test_ext_if_name(self):
self.assertEqual(self.system.ext_if.name, 're0',
'incorrect ext_if name')
def test_ext_if_ifsv4(self):
self.assertSequenceEqual(self.system.ext_if.ifsv4, [ipaddress.IPv4Interface('10.0.2.12/24')],
'incorrect ext_if ifsv4')
self.assertSequenceEqual(self.system.ext_if.ifsv6, [ipaddress.IPv6Interface('fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:1/110')],
'incorrect ext_if ifsv6')
def test_ext_if_failed_assignement(self):
with self.assertRaises(AttributeError) as context_manager:
self.system.ext_if = ('re0', ['8.8.8.8/24'])
self.assertEqual(extract_message(context_manager), u"can't set attribute")
def test_no_master_lo_if(self):
params = self.params.copy()
del params['master']
self.system = Jail(**params)
self.assertEqual(self.system.lo_if, None,
'incorrect lo_if')
def test_lo_if_name(self):
self.assertEqual(self.system.lo_if.name, 'lo1',
'incorrect lo_if name')
def test_lo_if_ifsv4(self):
self.assertSequenceEqual(self.system.lo_if.ifsv4, [ipaddress.IPv4Interface('127.0.2.12/24')],
'incorrect lo_if ifsv4')
self.assertSequenceEqual(self.system.lo_if.ifsv6, [ipaddress.IPv6Interface('::0:2:12:1/110')],
'incorrect lo_if ifsv6')
def test_lo_if_failed_assignement(self):
with self.assertRaises(AttributeError) as context_manager:
self.system.lo_if = ('re0', ['8.8.8.8/24'])
self.assertEqual(extract_message(context_manager), u"can't set attribute")
def test_invalid_uid(self):
params = self.params.copy()
params['uid'] = 0
with self.assertRaises(InvalidUIDError) as context_manager:
self.system = Jail(**params)
self.assertEqual(context_manager.exception.message,
"`system` on `None`: uid must be an integer >= 0.")
| StarcoderdataPython |
3371763 | #!/usr/bin/env python3
#
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import sys
from lambdas.common import all_execution_events
if len(sys.argv) < 2:
print('Usage: %s <execution-arn>' % sys.argv[0])
quit(1)
events = {e['id']: e for e in all_execution_events(sys.argv[1])}
finished = {}
for finished_event in events.values():
if finished_event['type'] == 'TaskStateExited':
started_event = finished_event
while started_event['type'] != 'TaskStateEntered':
started_event = events[started_event['previousEventId']]
finished[started_event['id']] = started_event, finished_event
unfinished = {
id: e for id, e in events.items()
if id not in finished and e['type'] == 'TaskStateEntered'
}
def output(s, f, prev):
details = s['stateEnteredEventDetails']
name = details['name']
if name == 'HealthCheck' or name.startswith('PrepareTo'):
return
out = [name]
input = json.loads(details['input'])
if type(input) == dict:
out.append(input.get('version'))
out.append(input.get('platform'))
if f:
timedelta = f['timestamp'] - s['timestamp']
out.append('(' + str(timedelta).rstrip('0') + ')')
prefix = ''
if prev:
if prev['type'].endswith('Succeeded'):
prefix = '\033[32m'
elif prev['type'].endswith('Failed'):
prefix = '\033[31mFAILED: '
print(' ' + prefix + ' '.join(o for o in out if o) + '\033[0m')
if finished:
print('Finished tasks:')
for s, f in finished.values():
output(s, f, events[f['previousEventId']])
print()
if unfinished:
print('Unfinished tasks:')
for s in unfinished.values():
output(s, None, None)
print()
| StarcoderdataPython |
1657842 | <filename>micropython_gy521/gy521.py
# -*- coding: utf-8 -*-
from struct import pack
"""Main module."""
# SDA=Pin(5)
# SCL=Pin(16)
# Int = None
DEFAULT_ADDR = 0x68
REG_WHO_AM_I = 0x75
class gy521:
def __init__(self):
from machine import Pin, I2C
self.bus = I2C(scl=Pin(16), sda=Pin(5), freq=400000)
self.addr = DEFAULT_ADDR
# def init(self, SCL=None, SDA=None, INT=None, addr=0x68):
# from machine import Pin,I2C
# (self.SCL, self.SDA, self.INT, self.addr) = (SCL, SDA, INT, addr)
# self.bus = I2C(scl=Pin(SCL), sda=Pin(SDA), freq=400000)
def ping(self):
iam = self.bus.readfrom_mem( self.addr, REG_WHO_AM_I, 1 )
return (iam == pack('B', 0x68))
#
# def deinit(self):
# self.bus.deinit()
| StarcoderdataPython |
61611 | <gh_stars>0
from rest_framework import viewsets, status, filters
from rest_framework.response import Response
from rest_framework.decorators import action
from django.http import Http404, HttpResponse
from django.core import serializers
import django_filters.rest_framework
from .serializers import OrderSerializer, OrderSpecifiedNIPSerializer
from .models import Order, Contractor
import json
class OrderViewSet(viewsets.ModelViewSet):
"""
A viewset for viewing and editing order instances.
"""
queryset = Order.objects.all()
serializer_class = OrderSerializer
http_method_names = ['get', 'post', 'delete', 'patch', 'head']
filter_backends = [django_filters.rest_framework.DjangoFilterBackend, filters.OrderingFilter]
filterset_fields = ['contractor_id', 'implementation_date', 'data_of_placing_the_order', 'order_value', 'status']
ordering_fields = ['implementation_date', 'data_of_placing_the_order', 'order_value']
class OrderSpecifiedNIPViewSet(viewsets.ViewSet):
"""
A viewset for creating seo analysis.
"""
@action(detail = False, methods = ['post'])
def create(self, request, *args, **kwargs):
serializer = OrderSpecifiedNIPSerializer(data = request.data)
serializer.is_valid(raise_exception = True)
nip = serializer.validated_data['nip']
orders = Order.objects.filter(contractor_id__nip = nip)
if not orders:
raise Http404
data = serializers.serialize('json', orders)
return HttpResponse(data, content_type="application/json")
| StarcoderdataPython |
28701 | import sensor, image, time, pyb
from pid import PID
from pyb import Servo
from pyb import UART
uart = UART(3, 19200)
usb = pyb.USB_VCP()
led_red = pyb.LED(1) # Red LED = 1, Green LED = 2, Blue LED = 3, IR LEDs = 4.
led_green = pyb.LED(2)
pan_pid = PID(p=0.07, i=0, imax=90) #脱机运行或者禁用图像传输,使用这个PID
tilt_pid = PID(p=0.05, i=0, imax=90) #脱机运行或者禁用图像传输,使用这个PID
#pan_pid = PID(p=0.1, i=0, imax=90)#在线调试使用这个PID
#tilt_pid = PID(p=0.1, i=0, imax=90)#在线调试使用这个PID
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QQVGA)
import sensor, image, time, pyb
from pid import PID
from pyb import Servo
from pyb import Pin
usb = pyb.USB_VCP()
pan_servo=Servo(1)
tilt_servo=Servo(2)
led_red = pyb.LED(1) # Red LED = 1, Green LED = 2, Blue LED = 3, IR LEDs = 4.
led_green = pyb.LED(2)
pan_pid = PID(p=0.07, i=0, imax=90) #脱机运行或者禁用图像传输,使用这个PID
tilt_pid = PID(p=0.05, i=0, imax=90) #脱机运行或者禁用图像传输,使用这个PID
#pan_pid = PID(p=0.1, i=0, imax=90)#在线调试使用这个PID
#tilt_pid = PID(p=0.1, i=0, imax=90)#在线调试使用这个PID
p_out = Pin('P7', Pin.OUT_PP)#设置p_out为输出引脚
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False) # must be turned off for color tracking
sensor.set_auto_whitebal(False) # must be turned off for color tracking
clock = time.clock()
dete_red = 0
dete_green = 0
while(dete_green != 1 or dete_red != 1):
clock.tick()
img = sensor.snapshot().lens_corr(1.8)
pan_servo.angle(pan_servo.angle()+2)
print(pan_servo.angle())
#tilt_servo.angle(tilt_servo.angle()+2)
#print(tilt_servo.angle())
for r in img.find_rects(threshold = 10000):
# for p in r.corners(): img.draw_circle(p[0], p[1], 5, color = (0, 255, 0))
# print(r)
area = (r.x(), r.y(), r.w(), r.h())
statistics = img.get_statistics(roi=area)#像素颜色统计
# print(statistics)
if 17<statistics.l_mode()<87 and 30<statistics.a_mode()<123 and -49<statistics.b_mode()<50 and dete_red == 0:#if the circle is red
img.draw_rectangle(area, color = (255, 0, 0))
dete_red = 1 #识别到的红色圆形用红色的圆框出来
print("red")
uart.write("red")
j = 3
while(j):
p_out.high()#设置p_out引脚为高
j=j-1
p_out.low()
i = 5
while(i):
led_red.on()
time.sleep(1000)
led_red.off()
i=i-1
elif 24<statistics.l_mode()<48 and -48<statistics.a_mode()<-24 and -1<statistics.b_mode()<49 and dete_green == 0:
img.draw_rectangle(area, color = (0, 255, 0))
dete_green = 1
print("green")
uart.write("green")
j = 3
while(j):
p_out.high()#设置p_out引脚为高
j=j-1
p_out.low()
i = 5
while(i):
led_green.on()
time.sleep(1000)
led_green.off()
i=i-1
# print("FPS %f" % clock.fps())
| StarcoderdataPython |
40959 | from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
import numpy as np
import pandas as pd
import pickle as pi
class Classifier:
def __init__(self):
#Array für alle Ergebnisse
self.ergebnis = []
def train_models(self, X_train, X_test, y_train, y_test, models):
for self.model in models:
#-----------------------
#Knn-Classifier
#-----------------------
if self.model == 'knn':
#Optimalen Knn-Classifier bestimmen
error = []
for i in range(1, 40):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train, y_train)
pred_i = knn.predict(X_test)
error.append(np.mean(pred_i != y_test))
#Knn-Classifier trainieren
knnclf = KNeighborsClassifier(n_neighbors=7)
knnclf.fit(X_train, y_train)
#Knn-Classifier Akkuranz bestimmen
score = knnclf.score(X_test,y_test)
self.ergebnis.append(['knn-classifier', score, knnclf])
#-----------------------
#-----------------------
#Decision Tree
#-----------------------
elif self.model == 'dt':
#class_weight gebrauchen für DT und RF
#Optimalen Decision Tree bestimmen
#Zu testende Decision Tree Parameter
dt = DecisionTreeClassifier()
tree_para = {'criterion':['gini','entropy'],'max_depth':[i for i in range(1,20)], 'min_samples_split':[i for i in range (2,20)]}
#GridSearchCV
grd_clf = GridSearchCV(dt, tree_para, cv=5)
grd_clf.fit(X_train, y_train)
#Besten gefundenen Decision Tree übergeben
dt_clf = grd_clf.best_estimator_
score = dt_clf.score(X_test,y_test)
self.ergebnis.append(['decision tree', score, dt_clf])
#-----------------------
#-----------------------
#Random Forest
#-----------------------
elif self.model == 'rf':
#rf = RandomForestClassifier(max_depth=8, criterion="entropy", min_samples_split=9)
rf = RandomForestClassifier(n_estimators=100)
rf.fit(X_train,y_train)
score = rf.score(X_test,y_test)
self.ergebnis.append(['random forest', score, rf])
#-----------------------
#-----------------------
#Support Vector Machine
#-----------------------
elif self.model == 'svm':
svm = SVC(kernel = 'poly')
svm.fit(X_train, y_train)
score = svm.score(X_test,y_test)
self.ergebnis.append(['support vector machine', score, svm])
#-----------------------
#MLP
#-----------------------
elif self.model == 'mlp':
mlp = MLPClassifier(hidden_layer_sizes=[100,100], max_iter=5000, solver='sgd'
, learning_rate='adaptive', learning_rate_init=0.01, n_iter_no_change=200, early_stopping=True)
mlp.fit(X_train, y_train)
score = mlp.score(X_test,y_test)
self.ergebnis.append(['multi-layer perceptron', score, mlp])
print("iterations: {}; layers: {}; loss: {}".format(mlp.n_iter_, mlp.n_layers_, mlp.loss_))
epochs = np.linspace(1,mlp.n_iter_, mlp.n_iter_)
#plt.plot(epochs, mlp.loss_curve_, label="Fehlerfunktion")
#plt.plot(weight,2* weight,label="Ableitung")
#plt.show()
return self.ergebnis | StarcoderdataPython |
4819232 | import os
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/{{ docs_version }}/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = (
os.path.join(PROJECT_DIR, "static"),
os.path.join(BASE_DIR, "assets"),
)
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
| StarcoderdataPython |
3216538 | from common import *
DEPLOYMENT_LEVEL = "hegenv"
STANDALONE = True
DEBUG = True
RELOAD_TEMPLATES = True
ALLOWED_HOSTS = ["*","192.168.127.12","localhost","127.0.0.1","172.16.58.3"]
injectSecrets(DEPLOYMENT_LEVEL)
INSTALLED_APPS = [
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"ui_tags",
"ezidapp",
]
| StarcoderdataPython |
1724190 | from __future__ import print_function
import sys
import toolshed as ts
import itertools as it
from operator import itemgetter
from concurrent.futures import ProcessPoolExecutor
fam_beds = sys.argv[1:]
min_sites = 2
min_flank = 20
max_sites = 20
max_length = 3000
print("#chrom\tstart\tend\tsites-left\tsites-in\tsites-right\tparent_id\tfamily_id\tab\tdepth\tfile")
def get_ab(grp):
ab = []
for d in grp:
ab.append("|".join(x for x in d['family_allele_balance'].split("|") if x != 'nan'))
return ",".join(ab)
def find_gcs(fam_bed):
last_grp = None
ends = []
nsites = []
res = []
for _, grp in it.groupby(ts.reader(fam_bed), itemgetter('same')):
grp = list(grp)
ends.append(int(grp[-1]['end']))
nsites.append(len(grp))
# cant find until we have at least 3 groups.
if len(ends) < 3:
last_grp = grp
continue
start = int(grp[0]['start'])
# check num sites, distance between flanking regions and number of
if min_sites <= len(last_grp) <= max_sites and (start - ends[-3]) < max_length and nsites[-1] >= min_flank and nsites[-3] >= min_flank:
d = last_grp[0]
res.append("\t".join(map(str, (d['chrom'], ends[-3], start, nsites[-3],
nsites[-2], nsites[-1], d['parent_id'], d['family_id'],
get_ab(last_grp),
",".join(x['family_depth'] for x in last_grp),
fam_bed))))
assert nsites[-2] == len(last_grp)
# set it here, but still need to check if it's bounds are small enough.
last_grp = grp
return res
with ProcessPoolExecutor(10) as p:
for lines in p.map(find_gcs, (f for f in fam_beds)):
if len(lines) > 0:
print("\n".join(lines))
sys.stdout.flush()
| StarcoderdataPython |
3238987 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
1607. Taxi
Time limit: 0.5 second
Memory limit: 64 MB
[Description]
Petr likes going by taxi. For him, it is not only the pleasure of a fast and
comfortable ride, but also the opportunity to bargain with the driver over
the fare. The bargaining between Petr and taxi drivers always follows the same
scheme:
— To the airport! I pay 150 roubles.
— No, I won't take you for 150. Let's go for 1000.
— Are you crazy? I haven't got that much on me! Ok, let it be 200.
— Are you laughing? I agree to 900.
— Well, I'll give 250.
— Guy, do you know how much gas is? Pay 800 and off we go!
…
Such a dialog continues until they agree on the fare. Petr always increases
his amount by the same number, and the taxi driver decreases it in the same way.
The driver would not ask a sum that is less than that offered by Petr. In this
case, he will agree with Petr's offer. Petr will act similarly.
[Input]
The single input line contains four integer numbers: the initial Petr's offer a,
Petr's raise to his offer b, the initial fare required by the driver c, and the
driver's reduction of his fare d; 1 ≤ a, b, c, d ≤ 10000.
[Output]
Output the amount of money that Petr will pay for the ride.
'''
import sys;
import math;
def calc():
a, b, c, d = sys.stdin.readline().strip('\r\n').split(' ')
a = int(a)
b = int(b)
c = int(c)
d = int(d)
turn = 0 # 0:Petr, 1:Driver
r = 0
while (a < c):
if (turn):
if (c - d > a):
c = c - d
turn = 0
else:
r = a
break
else:
if (a + b < c):
a = a + b
turn = 1
else:
r = c
break
if (r == 0):
r = a
print r
if __name__ == '__main__':
calc()
| StarcoderdataPython |
136535 | <filename>src/generative_playground/utils/fit.py<gh_stars>1-10
import torch
from torch.autograd import Variable
from generative_playground.utils.gpu_utils import to_gpu
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def to_variable(x):
if type(x) == tuple:
return tuple([to_variable(xi) for xi in x])
elif 'ndarray' in str(type(x)):
return to_gpu(torch.from_numpy(x))
elif 'Variable' not in str(type(x)):
return Variable(x)
else:
return x
# The fit function is a generator, so one can call several of these in
# the sequence one desires
def fit(train_gen=None,
valid_gen=None,
model=None,
optimizer=None,
scheduler=None,
epochs=None,
loss_fn=None,
batches_to_valid=9,
grad_clip=5,
# metric_monitor=None, # TODO: legacy, remove this in upstream code
# checkpointer=None,
callbacks=[]
):
# callbacks += [metric_monitor]
print('setting up fit...')
print('Number of model parameters:', count_parameters(model))
valid_batches = max(1, int(batches_to_valid * len(valid_gen) / len(train_gen)))
if 'ReduceLROnPlateau' in str(type(scheduler)):
step_scheduler_after_val = True
else:
step_scheduler_after_val = False
for epoch in range(epochs):
print('epoch ', epoch)
if not step_scheduler_after_val:
scheduler.step()
train_iter = train_gen.__iter__()
valid_iter = valid_gen.__iter__()
done = {True: False, False: False}
for n in range(len(train_gen) + len(valid_gen)):
if n % (batches_to_valid + valid_batches) < batches_to_valid:
train = True
data_iter = train_iter
model.train()
loss_fn.train()
else:
with torch.no_grad():
train = False
data_iter = valid_iter
model.eval()
loss_fn.eval()
# get the next pair (inputs, targets)
try:
# inputs_, targets_ = next(data_iter)
inputs = next(data_iter)
except StopIteration:
# make sure we get all data from both iterators
done[train] = True
if done[True] and done[False]:
break
else:
continue
# inputs = inputs_; # to_variable(inputs_)
# targets = targets_; # to_variable(targets_)
outputs = model(inputs)
loss = loss_fn(outputs)#, targets)
this_loss = loss.data.item()
if train:
optimizer.zero_grad()
loss.backward()
if grad_clip is not None:
nice_params = filter(lambda p: p.requires_grad, model.parameters())
torch.nn.utils.clip_grad_norm_(nice_params, grad_clip)
optimizer.step()
else:
pass
# TODO: refactor this so the scheduler is part of checkpointer, to fit the
# # general callback pattern?
# avg_loss = checkpointer(None, model, outputs, loss_fn, loss)
# if step_scheduler_after_val and avg_loss is not None:
# scheduler.step(avg_loss)
for callback in callbacks:
if callback is not None:
callback(inputs, model, outputs, loss_fn, loss)
if train:
yield this_loss
| StarcoderdataPython |
3253987 | import murraylab_tools.biotek as mt_biotek
import os
gitexamplepath = "C:\\Users\\Andrey\\Documents\\GitHub\\"+\
"murraylab_tools\\examples\\biotek_examples\\"
data_filename = gitexamplepath+\
"180515_big384wellplate.csv"
supplementary_filename = gitexamplepath+\
"supp_inductiongrid.csv"
#mt_biotek.tidy_biotek_data(data_filename, supplementary_filename, convert_to_uM = False)
import pandas as pd
tidy_filename = gitexamplepath+"180515_big384wellplate_tidy.csv"
df = pd.read_csv(tidy_filename)
#df.head()
#df.head()
#gdf = df.groupby(["Channel", "Gain", "Well"])
#gdf.head()
#df[df.Channel == "GFP"].head()
normdf = mt_biotek.normalize(df,norm_channel= "OD")
#normdf[normdf.Gain==100].head()
end_df = mt_biotek.window_averages(normdf,15,17,"hours")
end_df.Excitation.unique()
slicedf = end_df[(end_df.Gain == 100 )&(end_df.Construct=="pQi41")&(end_df.aTC==250)]
end_df[(end_df.Gain == 100 )&(end_df.Construct=="pQi41")&(end_df.aTC==250)].head()
| StarcoderdataPython |
1609597 | <gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetHubRouteTableResult',
'AwaitableGetHubRouteTableResult',
'get_hub_route_table',
]
@pulumi.output_type
class GetHubRouteTableResult:
"""
RouteTable resource in a virtual hub.
"""
def __init__(__self__, associated_connections=None, etag=None, id=None, labels=None, name=None, propagating_connections=None, provisioning_state=None, routes=None, type=None):
if associated_connections and not isinstance(associated_connections, list):
raise TypeError("Expected argument 'associated_connections' to be a list")
pulumi.set(__self__, "associated_connections", associated_connections)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if labels and not isinstance(labels, list):
raise TypeError("Expected argument 'labels' to be a list")
pulumi.set(__self__, "labels", labels)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if propagating_connections and not isinstance(propagating_connections, list):
raise TypeError("Expected argument 'propagating_connections' to be a list")
pulumi.set(__self__, "propagating_connections", propagating_connections)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if routes and not isinstance(routes, list):
raise TypeError("Expected argument 'routes' to be a list")
pulumi.set(__self__, "routes", routes)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="associatedConnections")
def associated_connections(self) -> Sequence[str]:
"""
List of all connections associated with this route table.
"""
return pulumi.get(self, "associated_connections")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def labels(self) -> Optional[Sequence[str]]:
"""
List of labels associated with this route table.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="propagatingConnections")
def propagating_connections(self) -> Sequence[str]:
"""
List of all connections that advertise to this route table.
"""
return pulumi.get(self, "propagating_connections")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the RouteTable resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def routes(self) -> Optional[Sequence['outputs.HubRouteResponse']]:
"""
List of all routes.
"""
return pulumi.get(self, "routes")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetHubRouteTableResult(GetHubRouteTableResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetHubRouteTableResult(
associated_connections=self.associated_connections,
etag=self.etag,
id=self.id,
labels=self.labels,
name=self.name,
propagating_connections=self.propagating_connections,
provisioning_state=self.provisioning_state,
routes=self.routes,
type=self.type)
def get_hub_route_table(resource_group_name: Optional[str] = None,
route_table_name: Optional[str] = None,
virtual_hub_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetHubRouteTableResult:
"""
RouteTable resource in a virtual hub.
:param str resource_group_name: The resource group name of the VirtualHub.
:param str route_table_name: The name of the RouteTable.
:param str virtual_hub_name: The name of the VirtualHub.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['routeTableName'] = route_table_name
__args__['virtualHubName'] = virtual_hub_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20200601:getHubRouteTable', __args__, opts=opts, typ=GetHubRouteTableResult).value
return AwaitableGetHubRouteTableResult(
associated_connections=__ret__.associated_connections,
etag=__ret__.etag,
id=__ret__.id,
labels=__ret__.labels,
name=__ret__.name,
propagating_connections=__ret__.propagating_connections,
provisioning_state=__ret__.provisioning_state,
routes=__ret__.routes,
type=__ret__.type)
| StarcoderdataPython |
53279 | import math
from typing import Union, Dict
import numpy as np
import torch
import torchvision.transforms as transforms
from torchvision.transforms import InterpolationMode
import utility as utils
import utility.color as color
class Resolution:
def __init__(self, width, height):
self.width = width
self.height = height
"""Class representing the width and height of an image."""
def scale_to_height(self, height: int) -> "Resolution":
"""Scales this resolution while maintaining the aspect ratio.
Args:
height (int): The desired new height
Returns:
a resolution with the specified height but the same aspect ratio
"""
width = self.width * height // self.height
return Resolution(width, height)
def square(self) -> "Resolution":
"""Returns a square version of this resolution."""
size = min(self.width, self.height)
return Resolution(size, size)
class ImageWrap:
def __init__(self, img, space="bgr"):
self.img = img
self.space = space
def reorder(self, input_order='HWC'):
"""Reorder images to 'HWC' order.
If the input_order is (h, w), return (h, w, 1);
If the input_order is (c, h, w), return (h, w, c);
If the input_order is (h, w, c), return as it is.
Args:
img (ndarray): Input image.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
If the input image shape is (h, w), input_order will not have
effects. Default: 'HWC'.
Returns:
ndarray: reordered image.
"""
if input_order not in ['HWC', 'CHW']:
raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' "'HWC' and 'CHW'")
if len(self.img.shape) == 2:
self.img = self.img[..., None]
if input_order == 'CHW':
self.img = self.img.transpose(1, 2, 0)
return self.img
def reshape(self, target_shape):
ih, iw = target_shape
s = math.sqrt(self.img.shape[1] / (ih * iw))
shape = [self.img.shape[0], round(ih * s), round(iw * s), 3]
self.img = self.img.view(*shape) \
.permute(0, 3, 1, 2).contiguous()
return self.img
def calc_dataset_stats(train_data):
dataset_size = len(train_data.targets)
total = {"R": 0, "G": 0, "B": 0}
total_pixel = 0
for i, batch in enumerate(tqdm(train_data)):
for img in batch["images"]:
total_pixel = total_pixel + img.shape[1] * img.shape[2]
total["R"] = total["R"] + torch.sum((img[0, :, :]))
total["G"] = total["G"] + torch.sum((img[1, :, :]))
total["B"] = total["B"] + torch.sum((img[2, :, :]))
if i > len(train_data):
break
data_stats["mean"][0] = total["R"]/total_pixel
data_stats["mean"][1] = total["G"]/total_pixel
data_stats["mean"][2] = total["B"]/total_pixel
for i, batch in enumerate(tqdm(train_data)):
imgs = batch["images"]
for img in imgs:
total["R"] = total["R"] + torch.sum((img[0, :, :] - data_stats["mean"][0]) ** 2)
total["G"] = total["G"] + torch.sum((img[1, :, :] - data_stats["mean"][1]) ** 2)
total["B"] = total["B"] + torch.sum((img[2, :, :] - data_stats["mean"][2]) ** 2)
if i > len(train_data):
break
data_stats["std"][0] = torch.sqrt(total["R"] / total_pixel)
data_stats["std"][1]= torch.sqrt(total["G"] / total_pixel)
data_stats["std"][2] = torch.sqrt(total["B"] / total_pixel)
print(f'\nmeans:\n{data_stats["mean"]},std:\n{data_stats["std"]}')
return data_stats
def make_img_coeff(data_norm):
if data_norm is None:
data_norm = {
'inp': {'sub': 0, 'div': 1},
'gt': {'sub': 0, 'div': 1}
}
try:
result = data_norm.copy()
result = utils.dict_apply(result,
lambda x: utils.dict_apply(x,
lambda y: torch.FloatTensor(y))
)
result['inp'] = utils.dict_apply(result['inp'],
lambda x: x.view(1, -1, 1, 1))
result['gt'] = utils.dict_apply(result['gt'],
lambda x: x.view(1, 1, -1))
if torch.cuda.is_available():
result = utils.dict_apply(result,
lambda x: utils.dict_apply(x,
lambda y: y.cuda())
)
return result
except Exception as e:
print(f"Img coeff fail:\n{e}")
return data_norm.copy()
def reshape(pred, target_shape):
ih, iw = target_shape
s = math.sqrt(pred.shape[1] / (ih * iw))
shape = [pred.shape[0], round(ih * s), round(iw * s), 3]
pred = pred.view(*shape) \
.permute(0, 3, 1, 2).contiguous()
return pred
def reorder_image(img, input_order='HWC'):
"""Reorder images to 'HWC' order.
If the input_order is (h, w), return (h, w, 1);
If the input_order is (c, h, w), return (h, w, c);
If the input_order is (h, w, c), return as it is.
Args:
img (ndarray): Input image.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
If the input image shape is (h, w), input_order will not have
effects. Default: 'HWC'.
Returns:
ndarray: reordered image.
"""
if input_order not in ['HWC', 'CHW']:
raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' "'HWC' and 'CHW'")
if len(img.shape) == 2:
img = img[..., None]
if input_order == 'CHW':
img = img.transpose(1, 2, 0)
return img
def resize_fn(img, size):
return transforms.ToTensor()(
transforms.Resize(size, InterpolationMode.BICUBIC)(
transforms.ToPILImage()(img)))
def to_frequency_samples(f_img):
freq = f_img.view(4, -1).permute(1, 0)
return freq
def to_y_channel(img):
"""Change to Y channel of YCbCr.
Args:
img (ndarray): Images with range [0, 255].
Returns:
(ndarray): Images with range [0, 255] (float type) without round.
"""
img = img.astype(np.float32) / 255.
if img.ndim == 3 and img.shape[2] == 3:
img = bgr2ycbcr(img, y_only=True)
img = img[..., None]
return img * 255.
# ----
# COLOR SPACES | StarcoderdataPython |
14476 | <gh_stars>100-1000
from container.base import TimeBase
from container.array import TimeArray, TimeDtype
from container.timeseries import TimeSeries
from container.timeframe import TimeFrame | StarcoderdataPython |
3225000 | days = int(input())
people_who_liked_advertise = 2
current_people_reached = people_who_liked_advertise
for i in range(1, days):
current_people_reached = (current_people_reached * 3) // 2
people_who_liked_advertise += current_people_reached
print(people_who_liked_advertise)
| StarcoderdataPython |
140473 | <gh_stars>1-10
from __future__ import print_function, absolute_import, division # makes KratosMultiphysics backward compatible with python 2.6 and 2.7
# Importing the Kratos Library
import KratosMultiphysics as KM
# Importing the base class
from KratosMultiphysics.CoSimulationApplication.base_classes.co_simulation_solver_wrapper import CoSimulationSolverWrapper
# Other imports
import KratosMultiphysics.CoSimulationApplication.co_simulation_tools as cs_tools
def Create(settings, solver_name):
return FLOWerWrapper(settings, solver_name)
class FLOWerWrapper(CoSimulationSolverWrapper):
"""This class serves as wrapper for the CFD solver FLOWer
"""
def __init__(self, settings, solver_name):
super(FLOWerWrapper, self).__init__(settings, solver_name)
settings_defaults = KM.Parameters("""{
"model_parts_read" : { },
"model_parts_send" : { },
"model_parts_recv" : { }
}""")
self.settings["solver_wrapper_settings"].ValidateAndAssignDefaults(settings_defaults)
cs_tools.CreateMainModelPartsFromCouplingData(self.data_dict.values(), self.model, self.name)
cs_tools.AllocateHistoricalVariablesFromCouplingData(self.data_dict.values(), self.model, self.name)
def Initialize(self):
super(FLOWerWrapper, self).Initialize()
for main_model_part_name, mdpa_file_name in self.settings["solver_wrapper_settings"]["model_parts_read"].items():
KM.ModelPartIO(mdpa_file_name.GetString()).ReadModelPart(self.model[main_model_part_name])
for model_part_name, comm_name in self.settings["solver_wrapper_settings"]["model_parts_send"].items():
interface_config = {
"comm_name" : comm_name.GetString(),
"model_part_name" : model_part_name
}
self.ExportCouplingInterface(interface_config)
for model_part_name, comm_name in self.settings["solver_wrapper_settings"]["model_parts_recv"].items():
interface_config = {
"comm_name" : comm_name.GetString(),
"model_part_name" : model_part_name
}
self.ImportCouplingInterface(interface_config)
def AdvanceInTime(self, current_time):
return 0.0 # TODO find a better solution here... maybe get time from solver through IO
def PrintInfo(self):
cs_tools.cs_print_info(self._ClassName(), "printing info...")
## TODO print additional stuff with higher echo-level
def _GetIOType(self):
return self.settings["io_settings"]["type"].GetString() | StarcoderdataPython |
1767651 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Read in Mechanical Turk results files to calculate demographic information
for various funding and regulatory agencies. Outputs an xlsx file with columns
of interest that is easy to do pivot tables for getting the necessary counts
"""
import argparse
from datetime import date, datetime
# import sqlite3
from typing import Union
from dateutil.parser import parse
from dateutil.tz import gettz, tzutc
import numpy as np
import pandas as pd
from ruamel.yaml import CLoader as Loader
from ruamel.yaml import load
CSI = '\x1B['
reset = CSI+'m'
# tzinfos = {'EDT': -14400, 'EST': -18000}
# For some reason dateutil doesn't know PDT and PST?
pactz = {'PDT': gettz('America/Los_Angeles'),
'PST': gettz('America/Los_Angeles')}
parser = argparse.ArgumentParser(
description='Load one or more MTurk results files and extract the NIH '
'mandated demographic info')
parser.add_argument('-r', '--resultsfilelist', required=True,
help='(required) YAML file with list of results files to use')
parser.add_argument('-s', '--rawsubjects',
action='store_true',
help='Dump a raw file of all assignments without removing duplicate workers')
# parser.add_argument('-p', '--protocol', required=True,
# help='Specifiy the IRB protocol name')
args = parser.parse_args()
with open(args.resultsfilelist, 'r') as rfile:
expdata = load(rfile, Loader=Loader)
abort = False
for k in ('resultsfiles', 'protocol', 'datebreaks'):
if k not in expdata:
print(f'{k} is a required key in HIT file!')
abort = True
if abort:
print('At least one required key missing; aborting HIT load')
import sys
sys.exit()
resfiles = expdata['resultsfiles']
protocol = expdata['protocol']
datebreaks = expdata['datebreaks']
columns_of_interest = {
'hitid', 'HitId',
# Some web UI downloaded files are missing 'HITTypeId' or 'hittypeid'
'hittypeid', 'HITTypeId',
'title', 'Title', 'HitTitle',
'description', 'Description',
'keywords', 'Keywords',
'reward', 'Reward',
'creationtime', 'CreationTime',
'assignments', 'MaxAssignments',
'numavailable', 'NumberofAssignmentsAvailable',
'numpending', 'NumberofAssignmentsPending',
'numcomplete', 'NumberofAssignmentsCompleted',
'hitstatus', 'HITStatus',
'reviewstatus', 'HITReviewStatus',
'annotation', 'RequesterAnnotation'
'assignmentduration', 'AssignmentDurationInSeconds',
'autoapprovaltime', 'AutoApprovalTime',
'autoapprovedelay', 'AutoApprovalDelayInSeconds',
'hitlifetime', 'LifetimeInSeconds',
'viewhit',
'assignmentid', 'AssignmentId',
'workerid', 'WorkerId',
'assignmentstatus', 'AssignmentStatus',
'assignmentaccepttime', 'AcceptTime',
'assignmentsubmittime', 'SubmitTime',
'assignmentapprovaltime', 'ApprovalTime',
'assignmentrejecttime', 'RejectionTime',
'deadline',
'feedback',
'reject',
'Answer.experiment', 'Answer.Experiment', 'Experiment',
'Experimenter',
'Answer.list', 'Answer.List',
'Answer.browser', 'Answer.browserid', 'Answer.Browser', 'Answer.userAgent',
'Answer.rsrb.raceother',
'Answer.rsrb.ethnicity',
'Answer.rsrb.sex',
'Answer.rsrb.age',
# Ilker has a column for each race, others just have "Answer.rsrb.race"
'Answer.rsrb.race.amerind',
'Answer.rsrb.race.asian',
'Answer.rsrb.race.black',
'Answer.rsrb.race.other',
'Answer.rsrb.race.pacif',
'Answer.rsrb.race.unknown',
'Answer.rsrb.race.white',
'Answer.rsrb.race'
}
name_map = {'HITId': 'hitid', 'HitId': 'hitid',
'HITTypeId': 'hittypeid',
'Title': 'title', 'HitTitle': 'title',
'Description': 'description',
'Keywords': 'keywords',
'Reward': 'reward',
'CreationTime': 'creationtime',
'MaxAssignments': 'assignments',
'NumberofAssignmentsAvailable': 'numavailable',
'NumberofAssignmentsPending': 'numpending',
'NumberofAssignmentsCompleted': 'numcomplete',
'HITStatus': 'hitstatus',
'HITReviewStatus': 'reviewstatus',
'RequesterAnnotation': 'annotation',
'AssignmentDurationInSeconds': 'assignmentduration',
'AutoApprovalTime': 'autoapprovaltime',
'AutoApprovalDelayInSeconds': 'autoapprovedelay',
'LifetimeInSeconds': 'hitlifetime',
'AssignmentId': 'assignmentid',
'WorkerId': 'workerid',
'AssignmentStatus': 'assignmentstatus',
'AcceptTime': 'assignmentaccepttime',
'SubmitTime': 'assignmentsubmittime',
'RejectionTime': 'assignmentrejecttime',
}
results = None
for r in resfiles:
with open(r['file'], 'r') as resfile:
print(f'Loading {r["file"]}')
delim = '\t'
if 'delimiter' in r:
if r['delimiter'] == 'comma':
delim = ','
rdf: pd.DataFrame = pd.read_csv(resfile, delimiter=delim, parse_dates=True,
low_memory=False)
coi = rdf.columns.intersection(columns_of_interest)
rename_keys = coi.intersection(name_map.keys())
renames = {x[0]: x[1] for x in name_map.items() if x[0] in rename_keys}
# Some really old ones have no demographic data
# Color info from http://stackoverflow.com/a/21786287/3846301
if 'Answer.rsrb.ethnicity' not in coi:
print(CSI + '31;40m' + '✗' + CSI + '0m' + f'\t{resfile.name.split("/")[-1]} has no demographic information')
else:
print(CSI + '32;40m' + '✓' + CSI + '0m' + f'\t{resfile.name.split("/")[-1]} has demographic information')
try:
results_selected = rdf.loc[:, coi]
except KeyError as e:
print(f'KeyError: {e}')
print(f'Columns of interest: {coi}')
print(f'Actual columns: {rdf.columns}')
# This is useful if you need to figure out which files are problematic
# but if you don't comment it out, you can end up with duplicates
# results_selected.loc[:, 'filename'] = resfile.name
if 'WorkerId' in coi:
# print(f'Renaming columns: {rename_keys}')
results_selected.rename(columns=renames, inplace=True)
if 'Answer.experiment' in coi:
results_selected.rename(columns={'Answer.experiment': 'Experiment'}, inplace=True)
if 'Answer.Experiment' in coi:
results_selected.rename(columns={'Answer.Experiment': 'Experiment'}, inplace=True)
if not 'Experiment' in results_selected:
results_selected['Experiment'] = r['name']
if not 'Experimenter' in results_selected:
results_selected['Experimenter'] = r['experimenter']
if results is None:
results = results_selected
else:
results = results.append(results_selected, ignore_index=True)
# cleanup
# FIXME: pd.to_datetime misses a lot of date formats, better off converting by
# hand using dateutil
# results['assignmentsubmittime'] = pd.to_datetime(results['assignmentsubmittime'])
results.rename(columns={'Answer.rsrb.ethnicity': 'Ethnicity',
'Answer.rsrb.sex': 'Sex',
'Answer.rsrb.age': 'Age'},
inplace=True)
try:
results.loc[results['Sex'] == "['Male']", 'Sex'] = 'Male'
results.loc[results['Sex'] == "['Female']", 'Sex'] = 'Female'
except KeyError:
results['Sex'] = pd.Series()
results['Sex'].fillna('unknown;', inplace=True)
try:
results.loc[results['Ethnicity'] == "['Not Hispanic or Latino']", 'Ethnicity'] = 'NonHisp'
results.loc[results['Ethnicity'] == "['Hispanic or Latino']", 'Ethnicity'] = 'Hisp'
results.loc[results['Ethnicity'] == "['N/A']", 'Ethnicity'] = np.nan
except KeyError:
results['Ethnicity'] = pd.Series()
results['Ethnicity'].fillna('unknown;', inplace=True)
try:
results['Answer.rsrb.race'].fillna('unknown;', inplace=True)
except KeyError:
results['Answer.rsrb.race'] = pd.Series()
results['Answer.rsrb.race'].fillna('unknown;', inplace=True)
for key in ('amerind', 'asian', 'black', 'other', 'pacif', 'unknown', 'white'):
try:
results[f'Answer.rsrb.race.{key}'].fillna(False, inplace=True)
except KeyError:
results[f'Answer.rsrb.race.{key}'] = pd.Series()
results[f'Answer.rsrb.race.{key}'].fillna(False, inplace=True)
datedefault = datetime(1970, 1, 1, 0, 0, 0, tzinfo=tzutc()).isoformat()
results['assignmentaccepttime'].fillna(datedefault, inplace=True)
results['assignmentsubmittime'].fillna(datedefault, inplace=True)
results['creationtime'].fillna(datedefault, inplace=True)
results['assignmentaccepttime'] = results['assignmentaccepttime'].apply(parse)
results['assignmentsubmittime'] = results['assignmentsubmittime'].apply(parse)
results['creationtime'] = results['creationtime'].apply(parse)
# results['duration'] = results['assignmentsubmittime'] - results['assignmentaccepttime']
# results['assignmentaccepttime'] = results['assignmentaccepttime'].astype('datetime64[ns]') # .tz_convert('US/Eastern')
# results['assignmentsubmittime'] = results['assignmentsubmittime'].astype('datetime64[ns]') # .tz_convert('US/Eastern')
# results['creationtime'] = results['creationtime'].astype('datetime64[ns]') # .tz_convert('US/Eastern')
# print(f"Accept time is of {results['assignmentaccepttime'].dtypes}")
# print(f"Submit time is of {results['assignmentsubmittime'].dtypes}")
# print(f"Creation time is of {results['creationtime'].dtypes}")
def normalize_race(row: pd.core.series.Series) -> str:
"""
Take possible ways race could be specified in RSRB survey and reduce to desired format.
For calculating racial demographics, the only values we want are:
* 'American Indian / Alaska Native',
* 'Asian',
* 'Black or African American',
* 'Other',
* 'Native Hawaiian or Other Pacific Islander',
* 'Unknown or Not Reported',
* 'White'
"""
try:
if row['Answer.rsrb.race'] in ('amerind;', 'asian;', 'black;', 'other;',
'pacif;', 'white;'):
return {'amerind;': 'American Indian / Alaska Native',
'asian;': 'Asian',
'black;': 'Black or African American',
'other;': 'Other',
'pacif;': 'Native Hawaiian or Other Pacific Islander',
'white;': 'White'}[row['Answer.rsrb.race']]
elif row['Answer.rsrb.race'].find('|') >= 0:
return 'More Than One Race'
elif row['Answer.rsrb.race'] == 'unknown;':
racecols = \
(row['Answer.rsrb.race.amerind'],
row['Answer.rsrb.race.asian'],
row['Answer.rsrb.race.black'],
row['Answer.rsrb.race.other'],
row['Answer.rsrb.race.pacif'],
row['Answer.rsrb.race.unknown'],
row['Answer.rsrb.race.white'])
numraces = len([x for x in racecols if x])
if numraces > 1:
return 'More Than One Race'
elif numraces == 0:
return 'Unknown or Not Reported'
else:
return {
0: 'American Indian / Alaska Native',
1: 'Asian',
2: 'Black or African American',
3: 'Other',
4: 'Native Hawaiian or Other Pacific Islander',
5: 'Unknown or Not Reported',
6: 'White'}[racecols.index(True)]
except AttributeError as e:
print(e)
print(row)
def normalize_age(row: pd.core.series.Series) -> Union[int, str]:
"""Try to make age an integer."""
try:
return int(float(row['Age']))
except ValueError:
# print(f'Can't convert: \'{row["Age"]}\'')
return row['Age']
def add_logical_year(row: pd.core.series.Series) -> str:
"""
The 'year' for the purpose of a given report may not be equivalent to the
calendar year. Take the breakpoints specified in the config file and set
the year for each row based on those.
"""
if isinstance(row['assignmentsubmittime'], datetime):
submitdate = row['assignmentsubmittime'].date()
else:
try:
submitdate = parse(row['assignmentsubmittime'], tzinfos=pactz).date()
except (ValueError, TypeError):
print(f'Submit time unparseable: {row["assignmentsubmittime"]}')
# print(f'Submit time unparseable: {row.to_dict()}')
return 'Date unparseable'
minyear = maxyear = date.today().year
for year, drange in expdata['datebreaks'].items():
if submitdate.year < minyear:
minyear = submitdate.year
elif submitdate.year > maxyear:
maxyear = submitdate.year
if submitdate >= drange['start'] and submitdate <= drange['end']:
return year
print(f'Date not in any range: {row["assignmentsubmittime"]}')
# print(f'Date not in any range: {row.to_dict()}')
return 'Date out of range'
# def normalize_experiment(row: pd.core.series.Series):
# """Reduce 'Answer.experiment' or 'Answer.Experiment' to 'Experiment'."""
# names = row[row.index.intersection(['Answer.experiment', 'Answer.Experiment'])].dropna()
# return ','.join(names) if names.any() else np.nan
def normalize_browser(row: pd.core.series.Series):
"""
Reduce possible browser column names to 'Browser'.
Various people over time have used different names for the same thing.
"""
browser = row[row.index.intersection(['Answer.browser', 'Answer.browserid', 'Answer.Browser', 'Answer.userAgent'])].dropna()
return ','.join(browser) if browser.any() else np.nan
# def normalize_list(row):
# """
# Reduce possible experiment list columns to 'ExperimentList'.
# """
# experiment_list = row[row.index.intersection(['Answer.list', 'Answer.List'])].dropna()
# return ','.join(experiment_list) if experiment_list.any() else np.nan
results['Race'] = results.apply(normalize_race, axis=1)
results['Year'] = results.apply(add_logical_year, axis=1)
try:
results['Age'] = results.apply(normalize_age, axis=1)
except KeyError:
results['Age'] = pd.Series()
results['Age'].fillna(np.nan, inplace=True)
# results['Experiment'] = results.apply(normalize_experiment, axis=1)
try:
results['Browser'] = results.apply(normalize_browser, axis=1)
except KeyError:
results['Browser'] = pd.Series()
results['Browser'].fillna('Unknown', inplace=True)
# results['ExperimentList'] = results.apply(normalize_list, axis=1)
results.sort_values(['workerid', 'Year', ], inplace=True)
if args.rawsubjects:
results.to_csv(f'{protocol}_rawsubjects-{date.today().isoformat()}.csv',
# date_format='%Y-%m-%dT%H:%M:%S%z', # Use ISO 8601 format to make R happy
index=False)
print(f'Starting with {len(results)} rows.')
# get the oldest instance of each duplicated value
results.drop_duplicates(['workerid', 'Sex', 'Race', 'Ethnicity'], inplace=True)
print(f'After 1st pass removing duplicates there are {len(results)} rows.')
# Dump full results to a SQLite file
# sql_results = results[['workerid', 'hitid', 'hittypeid', 'assignmentid',
# 'assignmentaccepttime', 'title', 'ExperimentList',
# 'Sex', 'Race', 'Ethnicity', 'Age', 'Year', 'Experiment',
# 'Browser']]
# conn = sqlite3.connect(f'{protocol}.{agency}.{date.today().isoformat()}.db')
# sql_results.to_sql(f'{protocol}_{agency}', conn, if_exists='replace')
# Only write out the important columns for final Excel file
core_cols = ('workerid', 'Sex', 'Race', 'Ethnicity', 'Year')
results = results.loc[:, core_cols]
#
# Try to drop more duplicated workers where values ostensibly mismatch
#
# We're going to put the 'Unknown or Not Reported' back at the end but it's
# easier to drop na values
results.replace(['Unknown', 'Unknown or Not Reported'], np.nan, inplace=True)
dupes = results.loc[results.duplicated('workerid', keep=False)] # default is to not mark 1st instance
# XXX: can this be done in a vectorized way?
for w in dupes['workerid'].unique():
worker_rows = results[results['workerid'] == w]
# For year, take lowest
results.loc[worker_rows.index, 'Year'] = sorted(worker_rows['Year'].dropna().tolist())[0]
# For sex, race, and ethnicity: if only one non-NA value set all to it
for col in ('Sex', 'Race', 'Ethnicity'):
vals = worker_rows[col].dropna().unique()
if len(vals) == 1:
results.loc[worker_rows.index, col] = vals.item()
results.drop_duplicates(['workerid', 'Sex', 'Race', 'Ethnicity'], inplace=True)
results.fillna('Unknown or Not Reported', inplace=True)
print(f'After 2nd pass removing duplicates there are {len(results)} rows.')
print(f'There are {len(results.workerid.unique())} unique workers out of {len(results)} rows')
outfile_name = f'{protocol}_report-{date.today().isoformat()}.xlsx'
writer = pd.ExcelWriter(outfile_name, engine='xlsxwriter',
options={'remove_timezone': True})
results.to_excel(writer, 'Demographic Data', index=False)
writer.save()
# Something in all this is triggering the error this documents:
# https://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-with-list-with-missing-labels-is-deprecated
| StarcoderdataPython |
31297 | import tensorflow as tf
import numpy as np
import cv2
import os
import rospy
from timeit import default_timer as timer
from styx_msgs.msg import TrafficLight
CLASS_TRAFFIC_LIGHT = 10
MODEL_DIR = 'light_classification/models/'
IMG_DIR = 'light_classification/img/'
DEBUG_DIR = 'light_classification/result/'
class TLClassifier(object):
def __init__(self):
#TODO load classifier
# object detection: faster_rcnn_inception_v2
# from Tensorflow detection model zoo:
# https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md
self.detector = MODEL_DIR + 'faster_rcnn_inception_v2.pb'
self.sess= self.load_graph(self.detector)
detection_graph = self.sess.graph
if not os.path.exists(DEBUG_DIR): #check the result of light detection
os.makedirs(DEBUG_DIR)
# The input placeholder for the image.
# 'get_tensor_by_name' returns the Tensor with the associated name in the Graph.
self.image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
self.detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
self.detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# the first decoding
test_image = cv2.imread(IMG_DIR + 'image_test.jpg')
image_np, box_coords, classes, scores = self.detect_tl(test_image)
# Traditional traffic light classifier
pred_image, is_red = self.classify_red_tl(image_np, box_coords, classes, scores)
# rospy.loginfo("DEBUG: stage 4")
if is_red:
rospy.loginfo("Classifier: RED")
else:
rospy.loginfo("Classifier: NOT RED")
cv2.imwrite(IMG_DIR + 'pred_image.png', pred_image)
rospy.loginfo("TensorFlow Initiation: Done")
self.num_image = 1
def load_graph(self, graph_file, use_xla=False):
config = tf.ConfigProto(log_device_placement=False)
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
# if use_xla:
# jit_level = tf.OptimizerOptions.ON_1
# config.graph_options.optimizer_options.global_jit_level = jit_level
with tf.Session(graph=tf.Graph(), config=config) as sess:
gd = tf.GraphDef()
with tf.gfile.Open(graph_file, 'rb') as f:
data = f.read()
gd.ParseFromString(data)
tf.import_graph_def(gd, name='')
ops = sess.graph.get_operations()
n_ops = len(ops)
print("number of operations = %d" % n_ops)
return sess
# return sess, ops
def detect_tl(self, image):
trt_image = np.copy(image)
image_np = np.expand_dims(np.asarray(trt_image, dtype=np.uint8), 0)
# Actual detection.
(boxes, scores, classes) = self.sess.run([self.detection_boxes, self.detection_scores, self.detection_classes],
feed_dict={self.image_tensor: image_np})
# Remove unnecessary dimensions
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes)
confidence_cutoff = 0.8
# Filter traffic light boxes with a confidence score less than `confidence_cutoff`
boxes, scores, classes = self.filter_boxes(confidence_cutoff, boxes, scores, classes, keep_classes=[CLASS_TRAFFIC_LIGHT])
# The current box coordinates are normalized to a range between 0 and 1.
# This converts the coordinates actual location on the image.
image_np = np.squeeze(image_np)
width = image_np.shape[1]
height = image_np.shape[0]
box_coords = self.to_image_coords(boxes, height, width)
return image_np, box_coords, classes, scores
# Filter the boxes which detection confidence lower than the threshold
def filter_boxes(self, min_score, boxes, scores, classes, keep_classes):
n = len(classes)
idxs = []
for i in range(n):
if scores[i] >= min_score:
if ((keep_classes is None) or (int(classes[i]) in keep_classes)):
idxs.append(i)
filtered_boxes = boxes[idxs, ...]
filtered_scores = scores[idxs, ...]
filtered_classes = classes[idxs, ...]
return filtered_boxes, filtered_scores, filtered_classes
# Convert the normalized box coordinates (0~1) to image coordinates
def to_image_coords(self, boxes, height, width):
box_coords = np.zeros_like(boxes)
box_coords[:, 0] = boxes[:, 0] * height
box_coords[:, 1] = boxes[:, 1] * width
box_coords[:, 2] = boxes[:, 2] * height
box_coords[:, 3] = boxes[:, 3] * width
return box_coords
#Draw bounding box on traffic light, and detect if it is RED
def classify_red_tl(self, image_np, boxes, classes, scores, thickness=5):
for i in range(len(boxes)):
# rospy.loginfo("DEBUG: stage 3.1")
bot, left, top, right = boxes[i, ...]
class_id = int(classes[i])
score = scores[i]
h = top - bot
w = right - left
if h <= 1.5 * w:
continue # Truncated Traffic Ligth box
cv2.rectangle(image_np,(left, top), (right, bot), (255, 43, 255), thickness) # BGR format for color
tl_img = image_np[int(bot):int(top), int(left):int(right)]
tl_img_simu = self.select_red_simu(tl_img) # SELECT RED
tl_img_real = self.select_lighton_real(tl_img) # SELECT TL
tl_img = (tl_img_simu + tl_img_real) / 2
gray_tl_img = cv2.cvtColor(tl_img, cv2.COLOR_RGB2GRAY)
nrows, ncols = gray_tl_img.shape[0], gray_tl_img.shape[1]
# compute center of mass of RED points
mean_row = 0
mean_col = 0
npoints = 0
for row in range(nrows):
for col in range(ncols):
if (gray_tl_img[row, col] > 0):
mean_row += row
mean_col += col
npoints += 1
if npoints > 0:
mean_row = float(mean_row / npoints) / nrows
mean_col = float(mean_col / npoints) / ncols
# Get the normalized center of mass of RED points
# Use the location of light to detect the color, RED is in the upper part of the box
if npoints > 10 and mean_row < 0.33:
rospy.loginfo("RED Light Detection Confidance: %.2f", score)
return image_np, True
return image_np, False
# select RED mask in simulation situation
def select_red_simu(self, img): # BGR
lower = np.array([ 0, 0, 200], dtype="uint8")
upper = np.array([ 55, 55, 255], dtype="uint8")
red_mask = cv2.inRange(img, lower, upper)
return cv2.bitwise_and(img, img, mask = red_mask)
# select Traffic Lighton area(HLS: high L and high S) in real situation
# for camera without polarization filter
def select_lighton_real(self, img): # HLS for real
hls_img = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
lower = np.array([ 50, 150, 150], dtype="uint8")
upper = np.array([ 100, 255, 255], dtype="uint8")
tl_mask = cv2.inRange(hls_img, lower, upper)
return cv2.bitwise_and(img, img, mask = tl_mask)
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
#implement light color prediction
image_np, box_coords, classes, scores = self.detect_tl(image)
# light color detection
detected_image, is_red = self.classify_red_tl(image_np, box_coords, classes, scores)
# fimage = DEBUG_DIR + 'detected_img_' + str(self.num_image) + '.png'
# #output the predicted image
# cv2.imwrite(fimage, detected_image)
self.num_image += 1
#return 'if it is a RED'
if is_red:
return TrafficLight.RED
else:
return TrafficLight.UNKNOWN
| StarcoderdataPython |
92202 | <filename>app/api/models/Base.py<gh_stars>100-1000
from abc import ABC, abstractmethod
class Base(ABC):
@abstractmethod
def info(self):
raise NotImplementedError()
@abstractmethod
def create(self):
raise NotImplementedError()
@abstractmethod
def delete(self):
raise NotImplementedError()
@abstractmethod
def start(self):
raise NotImplementedError()
@abstractmethod
def stop(self):
raise NotImplementedError()
@abstractmethod
def restart(self):
raise NotImplementedError()
@abstractmethod
def update(self):
raise NotImplementedError()
@abstractmethod
def move(self):
raise NotImplementedError()
@abstractmethod
def clone(self):
raise NotImplementedError()
@abstractmethod
def snapshot(self):
raise NotImplementedError()
| StarcoderdataPython |
155685 | <filename>sphinxcontrib/dd/data_dictionary.py
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.parsers.rst import Directive as BaseDirective
from recommonmark.parser import CommonMarkParser
from . import yaml
def string_list(argument):
if ',' in argument:
entries = argument.split(',')
else:
entries = argument.split()
return [entry.strip() for entry in entries]
class Parser(CommonMarkParser):
def setup_parse(self, inputstring, document):
"""Initial parse setup. Call at start of `self.parse()`."""
self.inputstring = inputstring
self.document = document
# document.reporter.attach_observer(document.note_parse_message)
def finish_parse(self):
"""Finalize parse details. Call at end of `self.parse()`."""
# self.document.reporter.detach_observer(
# self.document.note_parse_message)
pass
class Directive(BaseDirective):
required_arguments = 1 # Path to yml file
optional_arguments = 1 # Path to yml definition file
final_argument_whitespace = True
has_content = False
option_spec = {
'widths': directives.positive_int_list,
'headers': string_list,
'columns': string_list,
}
def run(self):
env = self.state.document.settings.env
app = env.app
config = app.config
widths = self.options.get(
'widths',
getattr(config, 'data_dictionary_{0}'.format('widths')),
)
headers = self.options.get(
'headers',
getattr(config, 'data_dictionary_{0}'.format('headers')),
)
columns = self.options.get(
'columns',
getattr(config, 'data_dictionary_{0}'.format('columns')),
)
rel_path, path = env.relfn2path(directives.path(self.arguments[0]))
def_rel_path = ''
def_path = ''
try:
def_rel_path, def_path = env.relfn2path(
directives.path(self.arguments[1])
)
except IndexError:
pass
# Add the file as a dependency to the current document.
# That means the document will be rebuilt if the file is changed.
env.note_dependency(rel_path)
if def_rel_path:
env.note_dependency(def_rel_path)
spec = yaml.load(path=path, definition_path=def_path)
data = []
for name, entity in spec['tables'].items():
data.append(self.create_section(name=name))
data.extend(self.generate_description(entity=entity))
table = self.create_table(
entity=entity,
columns=columns,
headers=headers,
widths=widths,
)
data.append(table)
return data
@staticmethod
def parse_string(text):
if not text:
return []
element = nodes.paragraph()
# self.state.nested_parse(ViewList(str(text).splitlines()), 0, element)
parser = Parser()
parser.parse(str(text), element)
return element.children
def generate_description(self, entity):
name = entity.get('name', None)
if name:
paragraph = nodes.paragraph(text='Name: ')
paragraph.extend([nodes.literal(text=entity['name'])])
yield paragraph
description = entity.get('description', None)
if description:
for each in self.parse_string(description):
yield each
def create_table(self, entity, columns=None, headers=None, widths=None):
group = self.create_group(widths)
group.append(self.create_header(data=headers))
group.append(self.create_body(entity=entity, columns=columns))
table = nodes.table(classes=['data-dictionary'])
table.append(group)
return table
@staticmethod
def create_group(widths):
group = nodes.tgroup(cols=len(widths))
group.extend(
nodes.colspec(colwidth=width) for width in widths
)
return group
def create_header(self, data):
head = nodes.thead()
head.append(self.create_row(data))
return head
def create_row(self, data):
row = nodes.row()
for datum in data:
row.append(nodes.entry(datum, *self.parse_string(text=datum)))
return row
def create_body(self, entity, columns):
body = nodes.tbody()
for k, v in entity['columns']['properties'].items():
data = []
for column in columns:
if column == 'name':
data.append(k)
continue
data.append(v.get(column, ''))
body.append(self.create_row(data=data))
return body
@staticmethod
def create_section(name):
section = nodes.section(ids=[name])
section.append(nodes.title(text=name))
return section
def setup(app):
app.add_config_value(
'data_dictionary_{0}'.format('widths'),
[1, 1, 1, 4],
'env',
)
app.add_config_value(
'data_dictionary_{0}'.format('headers'),
['Name', 'Type', 'Length', 'Description'],
'env',
)
app.add_config_value(
'data_dictionary_{0}'.format('columns'),
['name', 'type', 'maxLength', 'description'],
'env',
)
app.add_directive('data-dictionary', Directive)
| StarcoderdataPython |
3360530 | <filename>BQ/train.py
import os
import tensorflow as tf
import keras.backend.tensorflow_backend as KTF
from keras.models import load_model
from stats_graph import stats_graph
from keras import backend as K
from keras.engine.topology import Layer
# 指定第一块GPU可用
#os.environ["CUDA_VISIBLE_DEVICES"] = "0"
#++++++++++++++++++++++++++++++++++++000000++++++++++++++++++++
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
KTF.set_session(sess)
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#tf.compat.v1.disable_eager_execution()
#config = tf.compat.v1.ConfigProto()
#config.gpu_options.allow_growth=True #不全部占满显存, 按需分配
#sess = tf.compat.v1.Session(config=config)
from keras.utils import multi_gpu_model
from keras.utils import plot_model
import data_helper
from keras.layers import Embedding, Input, Bidirectional, LSTM, Concatenate, Add, Dropout, Dense, \
BatchNormalization, Lambda, Activation, multiply, concatenate, Flatten, add, Dot,Permute, GlobalAveragePooling1D,MaxPooling1D, GlobalMaxPooling1D, TimeDistributed
from keras.models import Model
import keras.backend as K
from keras.callbacks import *
from tensorflow.python.ops.nn import softmax
input_dim = data_helper.MAX_SEQUENCE_LENGTH
EMBDIM = data_helper.EMBDIM
embedding_matrix = data_helper.load_pickle('embedding_matrix.pkl')
py_embedding_matrix = data_helper.load_pickle('py_embedding_matrix.pkl')
rad_embedding_matrix = data_helper.load_pickle('rad_embedding_matrix.pkl')
model_data = data_helper.load_pickle('model_data.pkl')
embedding_layer = Embedding(embedding_matrix.shape[0], EMBDIM, weights = [embedding_matrix], trainable=False)
py_embedding_layer = Embedding(py_embedding_matrix.shape[0], 70, weights = [py_embedding_matrix], trainable=False)
rad_embedding_layer = Embedding(rad_embedding_matrix.shape[0], 70, weights = [rad_embedding_matrix], trainable=False)
def align(input_1, input_2):
attention = Dot(axes=-1)([input_1, input_2])
w_att_1 = Lambda(lambda x: softmax(x, axis=1))(attention)
w_att_2 = Permute((2, 1))(Lambda(lambda x: softmax(x, axis=2))(attention))
in2_aligned = Dot(axes=1)([w_att_1, input_1])
in1_aligned = Dot(axes=1)([w_att_2, input_2])
return in1_aligned, in2_aligned
def inter_attention(input_1):
attention = Dot(axes=-1)([input_1, input_1])
w_att_1 = Lambda(lambda x: softmax(x, axis=1))(attention)
in1_aligned = Dot(axes=1)([w_att_1, input_1])
in1_aligned = add([input_1, in1_aligned])
return in1_aligned
def f1_score(y_true, y_pred):
# Count positive samples.
c1 = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
c2 = K.sum(K.round(K.clip(y_pred, 0, 1)))
c3 = K.sum(K.round(K.clip(y_true, 0, 1)))
# If there are no true samples, fix the F1 score at 0.
if c3 == 0:
return 0
# How many selected items are relevant?
precision = c1 / c2
# How many relevant items are selected?
recall = c1 / c3
# Calculate f1_score
f1_score = 2 * (precision * recall) / (precision + recall)
return f1_score
def recall(y_true, y_pred):
# Count positive samples.
c1 = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
c3 = K.sum(K.round(K.clip(y_true, 0, 1)))
# If there are no true samples, fix the F1 score at 0.
if c3 == 0:
return 0
recall = c1 / c3
return recall
def precision(y_true, y_pred):
# Count positive samples.
c1 = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
c2 = K.sum(K.round(K.clip(y_pred, 0, 1)))
c3 = K.sum(K.round(K.clip(y_true, 0, 1)))
# If there are no true samples, fix the F1 score at 0.
if c3 == 0:
return 0
# How many selected items are relevant?
precision = c1 / c2
return precision
def matching(p,q):
abs_diff = Lambda(lambda x: K.abs(x[0] - x[1]))([p, q])
min_diff = Lambda(lambda x: x[0] - x[1])([p, q])
#cos_diff = Lambda(lambda x: K.cos(x[0] - x[1]))([p, q])
multi_diff = multiply([p, q])
all_diff = concatenate([abs_diff, multi_diff, min_diff])
return all_diff
def base_model(input_shape):
w_input = Input(shape = input_shape)
c_input = Input(shape = input_shape)
py_input = Input(shape = input_shape)
rad_input = Input(shape = input_shape)
w_embedding = embedding_layer(w_input)
c_embedding = embedding_layer(c_input)
py_embedding = py_embedding_layer(py_input)
rad_embedding = rad_embedding_layer(rad_input)
w_l = Bidirectional(LSTM(300,return_sequences='True', dropout=0.55), merge_mode = 'sum')(w_embedding)
c_l = Bidirectional(LSTM(300,return_sequences='True', dropout=0.55), merge_mode = 'sum')(c_embedding)
py_l = Bidirectional(LSTM(70,return_sequences='True', dropout=0.55), merge_mode = 'sum')(py_embedding)
rad_l = Bidirectional(LSTM(70,return_sequences='True', dropout=0.55), merge_mode = 'sum')(rad_embedding)
pyd = Dense(300)(py_l)
radd = Dense(300)(rad_l)
cpy, pyc = align(c_l, pyd)
wpy, pyw= align(w_l, pyd)
crad, radc = align(c_l, radd)
wrad, radw = align(w_l, radd)
cpy_aligned, wpy_aligned = align(cpy, wpy)
crad_aligned, wrad_aligned = align(crad, wrad)
pyc_aligned, pyw_aligned = align(pyc, pyw)
radc_aligned, radw_aligned = align(radc, radw)
w = concatenate([w_l, wpy, wrad, wpy_aligned, wrad_aligned])
c = concatenate([c_l, cpy, crad, cpy_aligned, crad_aligned])
py = concatenate([pyd, pyc, pyw, pyc_aligned, pyw_aligned])
rad = concatenate([radd, radc, radw, radc_aligned, radw_aligned])
inter_c = concatenate([cpy, pyc, crad, radc])
inter_w = concatenate([wpy, pyw, wrad, radw])
model = Model([w_input, c_input, py_input, rad_input],[w, c, py, rad, inter_c, inter_w], name = 'base_model')
model.summary()
return model
def siamese_model():
input_shape = (input_dim,)
input_pw = Input(shape = input_shape)
input_pc = Input(shape = input_shape)
input_qw = Input(shape = input_shape)
input_qc = Input(shape = input_shape)
input_ppy = Input(shape = input_shape)
input_qpy = Input(shape = input_shape)
input_prad = Input(shape = input_shape)
input_qrad = Input(shape = input_shape)
base_net = base_model(input_shape)
pw, pc, ppy, prad, p_inter_c, p_inter_w = base_net([input_pw, input_pc, input_ppy, input_prad])
qw, qc, qpy, qrad, q_inter_c, q_inter_w = base_net([input_qw, input_qc, input_qpy, input_qrad])
p_inter_c_align, q_inter_c_align = align(p_inter_c, q_inter_c)
p_inter_w_align, q_inter_w_align = align(p_inter_w, q_inter_w)
p_inter_c = inter_attention(concatenate([p_inter_c, p_inter_c_align]))
q_inter_c = inter_attention(concatenate([q_inter_c, q_inter_c_align]))
p_inter_w = inter_attention(concatenate([p_inter_w, p_inter_w_align]))
q_inter_w = inter_attention(concatenate([q_inter_w, q_inter_w_align]))
pw = inter_attention(pw)
qw = inter_attention(qw)
pc = inter_attention(pc)
qc = inter_attention(qc)
ppy = inter_attention(ppy)
qpy = inter_attention(qpy)
prad = inter_attention(prad)
qrad = inter_attention(qrad)
pw = add([GlobalMaxPooling1D()(pw), GlobalAveragePooling1D()(pw)])
qw = add([GlobalMaxPooling1D()(qw), GlobalAveragePooling1D()(qw)])
pc = add([GlobalMaxPooling1D()(pc), GlobalAveragePooling1D()(pc)])
qc = add([GlobalMaxPooling1D()(qc), GlobalAveragePooling1D()(qc)])
ppy = add([GlobalMaxPooling1D()(ppy), GlobalAveragePooling1D()(ppy)])
qpy = add([GlobalMaxPooling1D()(qpy), GlobalAveragePooling1D()(qpy)])
prad = add([GlobalMaxPooling1D()(prad), GlobalAveragePooling1D()(prad)])
qrad = add([GlobalMaxPooling1D()(qrad), GlobalAveragePooling1D()(qrad)])
p_inter_c = add([GlobalMaxPooling1D()(p_inter_c), GlobalAveragePooling1D()(p_inter_c)])
q_inter_c = add([GlobalMaxPooling1D()(q_inter_c), GlobalAveragePooling1D()(q_inter_c)])
p_inter_w = add([GlobalMaxPooling1D()(p_inter_w), GlobalAveragePooling1D()(p_inter_w)])
q_inter_w = add([GlobalMaxPooling1D()(q_inter_w), GlobalAveragePooling1D()(q_inter_w)])
all_diff1 = matching(pw,qw)
all_diff2 = matching(pc,qc)
all_diff3 = matching(ppy, qpy)
all_diff4 = matching(prad, qrad)
all_diff5 = matching(p_inter_c, q_inter_c)
all_diff6 = matching(p_inter_w, q_inter_w)
all_diff = concatenate([all_diff1, all_diff2, all_diff3, all_diff4, all_diff5, all_diff6])
all_diff = Dropout(0.5)(all_diff)
similarity = Dense(800)(all_diff)
similarity = BatchNormalization()(similarity)
similarity = Activation('relu')(similarity)
similarity = Dense(600)(similarity)
similarity = Dropout(0.5)(similarity)
similarity = Activation('relu')(similarity)
#
similarity = Dense(1)(similarity)
similarity = BatchNormalization()(similarity)
similarity = Activation('sigmoid')(similarity)
model = Model([input_pw, input_pc, input_qw, input_qc, input_ppy, input_qpy, input_prad, input_qrad], [similarity])
# loss:binary_crossentropy;optimizer:adm,Adadelta
model.summary()
margin = 0.65
theta = lambda t: (K.sign(t) + 1.) / 2.
def loss(y_true, y_pred):
return -(1 - theta(y_true - margin) * theta(y_pred - margin) - theta(1 - margin - y_true) * theta(
1 - margin - y_pred)) * (y_true * K.log(y_pred + 1e-8) + (1 - y_true) * K.log(1 - y_pred + 1e-8))
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
model.compile(loss=loss, optimizer='adam', metrics=['accuracy', precision, recall, f1_score])
return model
def train():
data = data_helper.load_pickle('model_data.pkl')
train_pw = data['train_pw']
train_pc = data['train_pc']
train_qw = data['train_qw']
train_qc = data['train_qc']
train_ppy = data['train_ppy']
train_qpy = data['train_qpy']
train_prad = data['train_prad']
train_qrad = data['train_qrad']
train_y = data['train_label']
dev_pw = data['dev_pw']
dev_pc = data['dev_pc']
dev_qw = data['dev_qw']
dev_qc = data['dev_qc']
dev_ppy = data['dev_ppy']
dev_qpy = data['dev_qpy']
dev_prad = data['dev_prad']
dev_qrad = data['dev_qrad']
dev_y = data['dev_label']
test_pw = data['test_pw']
test_pc = data['test_pc']
test_qw = data['test_qw']
test_qc = data['test_qc']
test_ppy = data['test_ppy']
test_qpy = data['test_qpy']
test_prad = data['test_prad']
test_qrad = data['test_qrad']
test_y = data['test_label']
#tensorboard_path = 'tensorboard'
model = siamese_model()
sess = K.get_session()
graph = sess.graph
stats_graph(graph)
model_path = 'nni21.best.h5'
checkpoint = ModelCheckpoint(model_path, monitor='val_acc', verbose=1, save_best_only=True, mode='max', period=1)
#tensorboard = TensorBoard(log_dir=tensorboard_path)
earlystopping = EarlyStopping(monitor='val_acc', patience=10, verbose=0, mode='max')
reduce_lr = ReduceLROnPlateau(monitor='val_acc', patience=5, mode='max')
callbackslist = [checkpoint,earlystopping, reduce_lr]
history = model.fit([train_pw, train_pc, train_qw, train_qc, train_ppy, train_qpy, train_prad, train_qrad], train_y,
batch_size=512,
epochs=200,
validation_data=([dev_pw, dev_pc, dev_qw, dev_qc, dev_ppy, dev_qpy, dev_prad, dev_qrad], dev_y),
callbacks=callbackslist)
'''
## Add graphs here
import matplotlib.pyplot as plt
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.plot(history.history['precision'])
plt.plot(history.history['val_precision'])
plt.plot(history.history['recall'])
plt.plot(history.history['val_recall'])
plt.plot(history.history['f1_score'])
plt.plot(history.history['val_f1_score'])
plt.xlabel('epoch')
plt.legend(['train loss', 'val loss','train accuracy', 'val accuracy','train precision', 'val precision','train recall', 'val recall','train f1_score', 'val f1_score'], loc=3,
bbox_to_anchor=(1.05,0),borderaxespad=0)
pic = plt.gcf()
pic.savefig ('pic.eps',format = 'eps',dpi=1000)
plt.show()
'''
print('asd')
loss, accuracy, precision, recall, f1_score = model.evaluate([test_pw, test_pc, test_qw, test_qc, test_ppy, test_qpy, test_prad, test_qrad], test_y, verbose=1, batch_size=256)
print("model =loss: %.4f, accuracy:%.4f, precision:%.4f,recall: %.4f, f1_score:%.4f" % (
loss, accuracy, precision, recall, f1_score))
x = "model =loss: %.4f, accuracy:%.4f, precision:%.4f,recall: %.4f, f1_score:%.4f" % (
loss, accuracy, precision, recall, f1_score)
model = siamese_model()
model.load_weights(model_path)
loss, accuracy, precision, recall, f1_score = model.evaluate([test_pw, test_pc, test_qw, test_qc, test_ppy, test_qpy, test_prad, test_qrad], test_y, verbose=1, batch_size=256)
y = "best model =loss: %.4f, accuracy:%.4f, precision:%.4f,recall: %.4f, f1_score:%.4f" % (loss, accuracy, precision, recall, f1_score)
with open('nni.txt', 'a') as f:
f.write(x)
f.write('\n')
f.write(y)
f.write('\n')
if __name__ == '__main__':
train() | StarcoderdataPython |
1628407 | <reponame>hawkowl/axiom
from setuptools import setup, find_packages
from setuptools.command.install import install as Install
import re
versionPattern = re.compile(r"""^__version__ = ['"](.*?)['"]$""", re.M)
with open("axiom/_version.py", "rt") as f:
version = versionPattern.search(f.read()).group(1)
class InstallAndRegenerate(Install):
def run(self):
"""
Runs the usual install logic, then regenerates the plugin cache.
"""
Install.run(self)
_regenerateCache()
def _regenerateCache():
from twisted import plugin
from axiom import plugins
list(plugin.getPlugins(plugin.IPlugin)) # Twisted
list(plugin.getPlugins(plugin.IPlugin, plugins)) # Axiom
setup(
name="Axiom",
version=version,
description="An in-process object-relational database",
url="https://github.com/twisted/axiom",
maintainer="Divmod, Inc.",
maintainer_email="<EMAIL>",
install_requires=[
"Twisted>=13.2.0",
"Epsilon>=0.7.0"
],
packages=find_packages() + ['twisted.plugins'],
scripts=['bin/axiomatic'],
cmdclass={
"install": InstallAndRegenerate,
},
include_package_data=True,
license="MIT",
platforms=["any"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Twisted",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 2 :: Only",
"Topic :: Database"])
| StarcoderdataPython |
1722687 | """
Communications
--------------
A module containing Windows functions related to communications.
"""
from pywincffi.core import dist
from pywincffi.core.checks import NON_ZERO, input_check, error_check
from pywincffi.wintypes import HANDLE, wintype_to_cdata
def ClearCommError(hFile):
"""
Retrieves information about a communications error and reports the
current status of a communications device.
.. seealso::
https://msdn.microsoft.com/en-us/aa363180
:param pywincffi.wintypes.HANDLE hFile:
A handle to the communications device, typically created by
:func:`CreateFile`
:rtype: tuple
:return:
Returns a two element tuple containing the ``lpErrors`` and
``lpStat`` result objects.
* ``lpErrors`` - Contains the mast indicating the type of error
* ``lpStat`` - A ``COMSTAT`` structure which contains the device's
information.
"""
input_check("hFile", hFile, HANDLE)
ffi, library = dist.load()
lpErrors = ffi.new("LPDWORD")
lpStat = ffi.new("LPCOMSTAT")
code = library.ClearCommError(wintype_to_cdata(hFile), lpErrors, lpStat)
error_check("ClearCommError", code=code, expected=NON_ZERO)
# TODO: Build Python instance of COMSTAT here!
return lpErrors, lpStat
| StarcoderdataPython |
3242385 | <reponame>siva-moturi/vessel-ml
import argparse
import logging
import sys
import os
import pandas as pd
from kubernetes import client, config, watch
from kfmd import metadata
from datetime import datetime
import pandas
KUBEFLOW_METADATA_URL_PREFIX = "metadata-service.kubeflow:8080"
#Vessel Kubeflow metdata accessor
class R2d2(object):
def __init__(self, workspace_name="vessel-xgboost-example", desc=""):
self._ws_name=workspace_name
self._ws= metadata.Workspace(
# Connect to metadata-service in namesapce kubeflow in k8s cluster.
backend_url_prefix=KUBEFLOW_METADATA_URL_PREFIX,
name=self._ws_name)
def get_metrics_data(self, model_id=None):
if model_id is None:
return pandas.DataFrame.from_dict(self._ws.list(metadata.Metrics.ARTIFACT_TYPE_NAME))
else:
df = pandas.DataFrame.from_dict(self._ws.list(metadata.Metrics.ARTIFACT_TYPE_NAME))
return df[df["model_id"]== str(model_id)]
def get_model_data(self, model_id=None):
if model_id is None:
return pandas.DataFrame.from_dict(self._ws.list(metadata.Model.ARTIFACT_TYPE_NAME))
else:
df = pandas.DataFrame.from_dict(self._ws.list(metadata.Model.ARTIFACT_TYPE_NAME))
return df[df["model_id"]== str(model_id)]
class R2d2Logger(object):
def __init__(self, workspace_name="vessel-xgboost-example",
owner="<EMAIL>",
execution_name_prefix="exec",
run_name_prefix ="run", desc=""):
self.create_execution(workspace_name, owner, execution_name_prefix, run_name_prefix, desc)
def create_execution(self, workspace_name, owner, execution_name_prefix, run_name_prefix, desc):
self._ws_name=workspace_name
self._owner = owner
self._ws= metadata.Workspace(
# Connect to metadata-service in namesapce kubeflow in k8s cluster.
backend_url_prefix=KUBEFLOW_METADATA_URL_PREFIX,
name=self._ws_name)
self._r = metadata.Run(
workspace=self._ws,
name="run" + "-" + run_name_prefix + "-" + datetime.utcnow().isoformat("T"),
description="")
self._exec = metadata.Execution(
name = "execution" + "-" + execution_name_prefix + "-" + datetime.utcnow().isoformat("T"),
workspace=self._ws,
run=self._r,
description="")
self._model= None
def log_model(self, name, framework_dict, hyper_param_dict, desc="", model_file_uri="", model_type=""):
self._model = self._exec.log_output(metadata.Model(
name=name,
description=desc,
owner=self._owner,
uri=model_file_uri,
model_type=model_type,
training_framework=framework_dict,
hyperparameters=hyper_param_dict,
version=datetime.utcnow().isoformat("T")))
return self._model
def log_metrics(self, name, metrics_dict, desc="", uri="gcs://path/to/metrics"):
metrics = self._exec.log_output(metadata.Metrics(
name= name,
owner=self._owner,
description= desc,
uri=uri,
model_id=self._model.id,
metrics_type=metadata.Metrics.VALIDATION,
values = metrics_dict))
return metrics
| StarcoderdataPython |
1799216 | <filename>features/bdd/account_util.py
# -*- coding: utf-8 -*-
import json
import time
import logging
import requests
from datetime import datetime, timedelta
from bdd.client import RestClient
from bdd import util as bdd_util
class Corp(object):
def __init__(self, id):
self.id = id
def join_platform(self, platform_name):
client = RestClient()
data = {
'platform_name': platform_name,
'corp_id': self.id
}
response = client.put("account.platform_member_corp", data)
assert response.is_success
def __create_corp(username, display_name, corp_type):
client = RestClient()
data = {
'username': username,
'display_name': display_name or username,
'password': '<PASSWORD>',
'type': corp_type
}
response = client.put("account.corp", data)
assert response.is_success
return Corp(response.data['id'])
def create_general_corp(username, display_name=None):
return __create_corp(username, display_name, 'general')
def create_platform_corp(username, display_name=None):
return __create_corp(username, display_name, 'platform')
def create_supplier_corp(username, display_name=None):
return __create_corp(username, display_name, 'supplier')
| StarcoderdataPython |
39422 | from django.apps import AppConfig
class SuperlistsConfig(AppConfig):
name = 'superlists'
| StarcoderdataPython |
117852 | """
main.py
"""
import re
import os
import os.path
import sys
import multiprocessing
import importlib
import os.path
import timeit
import cProfile
import tsscraper
class Application(object):
thread_count = 8
threads = None
target_directory = None
target_exporter = None
def print_usage(self):
print("Usage: '%s <exporter> <output directory> <target directories...>'" % sys.argv[0])
print("Or: '%s exporters' for a list of known exporters." % sys.argv[0])
def get_available_exporters(self):
exporters = { }
for root, dirs, files in os.walk("exporters"):
for filename in files:
module_name, extension = os.path.splitext(filename)
if (module_name == "__init__"):
continue
try:
module = importlib.import_module('exporters.%s' % (module_name))
exporters[module_name] = module
except ImportError as e:
print(e)
return exporters
def main(self):
"""
The main entry point of the application. This is equivalent to
the main() method in C and C++.
"""
if (len(sys.argv) < 2):
self.print_usage()
return
exporters = self.get_available_exporters()
if (sys.argv[1] == "exporters"):
print("Available Exporters: ")
for exporter in exporters:
print("\t- %s" % exporter)
print("\t- None")
return
elif(len(sys.argv) < 4):
self.print_usage()
return
self.target_directory = sys.argv[3]
self.output_directory = sys.argv[2]
self.target_exporter = sys.argv[1]
self.run()
def run(self):
exporter = None
if (self.target_exporter.lower() != "none"):
exporters = self.get_available_exporters()
try:
exporter = exporters[self.target_exporter]
except KeyError as e:
print("Error: No such exporter '%s'." % self.target_exporter)
self.print_usage()
return
# First, process base
base_results = None
if (os.path.isdir("base") is False):
print("Warning: No local copy of base found! Some reference checks will report false positives.")
else:
print("INFO: Processing base ...")
base_scraper = tsscraper.TSScraper("base", self.thread_count)
base_results = base_scraper.process()
print("INFO: Processing '%s' ..." % self.target_directory)
scraper = tsscraper.TSScraper(self.target_directory, self.thread_count, base_results)
results = scraper.process()
# Init the exporter
print("INFO: Exporting data ...")
if (exporter is not None):
# Ensure that the output directory at least exists
try:
os.mkdir(self.output_directory)
except OSError:
pass
output = exporter.Exporter(results, self.target_directory)
output.write(self.output_directory)
if __name__ == "__main__":
print("Operation Completion-----------------------\n%f Seconds" % timeit.timeit("Application().main()", number=1, setup="from __main__ import Application"))
| StarcoderdataPython |
77144 | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
from typing import List
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get(
"DJANGO_SECRET_KEY", "<KEY>"
)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get("DEBUG", True)
ALLOWED_HOSTS: List[str] = [
"app.so.spb.ru",
"localhost",
"127.0.0.1",
"192.168.0.137",
"f435-188-243-182-111.ngrok.io",
"api.dev.matyagin.ru",
]
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.humanize",
"django.contrib.staticfiles",
"rest_framework",
"reversion", # https://github.com/etianen/django-reversion
"reversion_compare", # https://github.com/jedie/django-reversion-compare
"rest_framework_swagger",
"django_fsm",
"fsm_admin",
"django_fsm_log",
"drf_yasg",
"django_admin_listfilter_dropdown",
"core",
"user",
"so",
"event",
"voting",
"corsheaders",
"channels",
"django_select2",
"report",
"shtab",
"area",
"apply",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"reversion.middleware.RevisionMiddleware",
]
ROOT_URLCONF = "app.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [BASE_DIR, os.path.join(BASE_DIR, "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
REST_FRAMEWORK = {
"DEFAULT_PAGINATION_CLASS": "core.pagination.StyledPagination",
"PAGE_SIZE": 20,
"DEFAULT_SCHEMA_CLASS": "rest_framework.schemas.coreapi.AutoSchema",
"DATETIME_FORMAT": "%Y-%m-%dT%H:%M:%S.%fZ",
"DEFAULT_RENDERER_CLASSES": (
"djangorestframework_camel_case.render.CamelCaseJSONRenderer",
"djangorestframework_camel_case.render.CamelCaseBrowsableAPIRenderer",
),
"DEFAULT_PARSER_CLASSES": (
"djangorestframework_camel_case.parser.CamelCaseFormParser",
"djangorestframework_camel_case.parser.CamelCaseMultiPartParser",
"djangorestframework_camel_case.parser.CamelCaseJSONParser",
),
}
WSGI_APPLICATION = "app.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.mysql",
"HOST": os.getenv("DB_HOST"),
"NAME": os.getenv("DB_NAME"),
"USER": os.getenv("DB_USER"),
"PASSWORD": os.getenv("DB_PASS"),
},
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
# AUTH_PASSWORD_VALIDATORS = [
# {
# 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
# },
# ]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = "ru-RU"
TIME_ZONE = "Europe/Moscow"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = "/static/"
MEDIA_URL = "/media/"
STATIC_ROOT = os.path.join(BASE_DIR, "/vol/web/static")
MEDIA_ROOT = os.path.join(BASE_DIR, "/vol/web/media")
AUTH_USER_MODEL = "core.User"
CORS_ALLOWED_ORIGINS = [
"http://localhost:3000",
"http://localhost:3001",
"http://127.0.0.1:3000",
"https://vk-mini-so.vercel.app",
"https://app.so.spb.ru",
]
CORS_ALLOWED_ORIGIN_REGEXES = [r"^https://\w+\.ngrok\.io$"]
ADD_REVERSION_ADMIN = True
ASGI_APPLICATION = "app.asgi.application"
REDIS_HOST = os.getenv("REDIS_HOST")
CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels_redis.core.RedisChannelLayer",
"CONFIG": {
"hosts": [(REDIS_HOST, 6379)],
},
},
}
CACHES = {
# … default cache config and others
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": f"redis://{REDIS_HOST}:6379/2",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
},
},
"select2": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": f"redis://{REDIS_HOST}:6379/2",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
},
},
}
# Tell select2 which cache configuration to use:
SELECT2_CACHE_BACKEND = "select2"
| StarcoderdataPython |
3221761 | # set implementation maybe better keeping track of black tiles
# %%
import re
f=open('test0.txt')
lines = [line.rstrip('\n') for line in f]
delta = {
'e':(0,2),
'se':(1,1),
'sw':(1,-1),
'w':(0,-2),
'nw':(-1,-1),
'ne':(-1,1)}
blacks = set()
for line in lines:
instructions = re.findall(r'(sw|se|e|w|ne|nw)',line)
R = 0
C = 0
for ins in instructions:
DR, DC = delta[ins]
R += DR
C += DC
if (R,C) in blacks:
blacks.remove((R,C))
else:
blacks.add((R,C))
print(len(blacks))
#%%
# part 2
def getneigh(coor):
R, C = coor
return {(R+dr, C+dc) for dr,dc in delta.values()}
def count_neigh(coor):
return len(getneigh(coor) & blacks)
for i in range(100):
neighbors = set().union(*(getneigh(b) for b in blacks))
neighbors -= blacks # only white neighbors
newblacks = {n for n in neighbors if count_neigh(n) == 2}
blacks = {b for b in blacks if 0 < count_neigh(b) <= 2} | newblacks
print(len(blacks))
# %%
| StarcoderdataPython |
19525 | # -*- coding: utf-8 -*-
import pickle
from sklearn.ensemble import RandomForestClassifier
from base_shallow_classifier import BaseShallowClassifier
class RFClassifier(BaseShallowClassifier):
'''
Image classification using random forest classifier (RFC).
Can reach 87.82% accuracy on test set of FashionMNIST datasets
using the following parameters:
- n_estimators=160
- min_samples_split=2
Note: actual accuracy may vary based on intial seed.
'''
def __init__(self, load_data=True):
'''
Simply calls parent's constructor,
which in turn calls load_data method (if needed).
'''
super().__init__(load_data)
def get_algorithm(self):
'''
Returns the algorithm in use (which is RFC),
this method is used in cross_validation method.
'''
return RandomForestClassifier()
def train_model(self, save_path, max_obs=None,
n_estimators=10, min_samples_split=2):
'''
Trains the model on training set of FashionMNIST datasets,
using RFC algorithm. n_estimators and min_samples_split
can be set from parameters.
'''
if self.train_data is None or self.train_labels is None:
raise ValueError('Fashion MNIST datasets is not loaded')
last_train_index = max_obs if max_obs else self.train_data.shape[0]
train_data = self.train_data[:last_train_index]
train_labels = self.train_labels[:last_train_index]
self.model = RandomForestClassifier(n_estimators=n_estimators,
min_samples_split=min_samples_split)
self.model.fit(train_data, train_labels)
with open(save_path, 'wb') as f:
f.write(pickle.dumps(self.model))
| StarcoderdataPython |
1694790 | #px8 / python cartridge
#version 1
#__python__
CHIPTUNE_MUSIC = "./examples/assets/AmsterdamBoppe.kt"
CHIPTUNE_SOUND_1 = "./examples/assets/the_horror.ki"
CHIPTUNE_SOUND_2 = "./examples/assets/clap.ki"
class Button(object):
def __init__(self, x1, y1, x2, y2, color, text, highlight=False):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.color = color
self.text = text
self.clicked = True if highlight else False
def update(self, x, y):
self.clicked = (self.x1 <= x <= self.x2 and
self.y1 <= y <= self.y2)
def draw(self):
rectfill(self.x1, self.y1, self.x2, self.y2, self.color)
i = 3 if self.clicked else 1
px8_print(self.text, self.x1 + 1, self.y1, i)
def is_click(self):
return self.clicked
class Text(object):
def __init__(self, x, y, color, text):
self.x = x
self.y = y
self.color = color
self.text = text
def update(self, x, y):
pass
def draw(self):
px8_print(str(music_position()), self.x, self.y, self.color)
class InteractiveNumber(object):
def __init__(self, x, y, color, volume_fct):
self.x = x
self.y = y
self.color = color
self.value = 128
self.text = 'Unknown'
self.volume_fct = volume_fct
base_x_rect = self.x - 4
base_y_rect = self.y - 4
self.rect_minus = [base_x_rect, self.y, base_x_rect+2, self.y+2]
self.rect_plus = [base_x_rect, base_y_rect, base_x_rect+2, base_y_rect+2]
def update(self, x, y):
rect_min_clicked = (self.rect_minus[0] <= x <= self.rect_minus[2] and
self.rect_minus[1] <= y <= self.rect_minus[3])
if rect_min_clicked:
self.value -= 10
self.value = max(0, self.value)
rect_plus_clicked = (self.rect_plus[0] <= x <= self.rect_plus[2] and
self.rect_plus[1] <= y <= self.rect_plus[3])
if rect_plus_clicked:
self.value += 10
self.value = min(128, self.value)
if rect_min_clicked or rect_plus_clicked:
self.volume_fct(self.value)
def draw(self):
rectfill(self.rect_minus[0], self.rect_minus[1], self.rect_minus[2], self.rect_minus[3], self.color)
rectfill(self.rect_plus[0], self.rect_plus[1], self.rect_plus[2], self.rect_plus[3], self.color)
px8_print(str(self.value), self.rect_minus[0]-15, self.rect_minus[1]-4 , 7)
CHIPTUNE_MENU = {
'Volume': InteractiveNumber(18, 74, 7, music_volume),
'Play': Button(20, 70, 40, 78, 7, 'Play'),
'Stop': Button(42, 70, 62, 78, 7, 'Stop'),
'Pause': Button(64, 70, 84, 78, 7, 'Pause'),
'Resume': Button(86, 70, 110, 78, 7, 'Resume'),
'Position': Text(8, 80, 7, 'Position'),
'Sound1': Button(20, 80, 46, 88, 7, 'Sound1'),
'Sound2': Button(48, 80, 74, 88, 7, 'Sound2'),
}
def _init():
show_mouse()
def _update():
if mouse_state():
mousex, mousey = mouse_x(), mouse_y()
for item in CHIPTUNE_MENU.values():
item.update(mousex, mousey)
if item.text == 'Play' and item.is_click():
music(-1, CHIPTUNE_MUSIC, 0, 0)
elif item.text == 'Stop' and item.is_click():
music_stop()
elif item.text == 'Pause' and item.is_click():
music_pause()
elif item.text == 'Resume' and item.is_click():
music_resume()
elif item.text == 'Sound1' and item.is_click():
sfx(-1, CHIPTUNE_SOUND_1)
elif item.text == 'Sound2' and item.is_click():
sfx(-1, CHIPTUNE_SOUND_2)
def _draw():
cls()
px8_print("CHIPTUNE", 10, 60, 7)
for item in CHIPTUNE_MENU.values():
item.draw() | StarcoderdataPython |
60872 | """load cmat cmat10.
import matplotlib.pyplit as plt
import seaborn as sns
sns.set()
plt.ion() # interactive plot
plt.clf(); sns.heatmap(cmat, cmap="gist_earth_r").invert_yaxis()
plt.clf(); sns.heatmap(cmat, cmap="viridis_r").invert_yaxis()
"""
import pickle
from pathlib import Path
cdir = Path(__file__).parent.resolve()
cmat = pickle.load(open(cdir / "cos_matrix.pkl", "rb"))
cmat10 = pickle.load(open(cdir / "cos_matrix10.pkl", "rb"))
| StarcoderdataPython |
188169 | <filename>models/search_result.py
# coding: utf-8
from __future__ import absolute_import
from .base_model_ import Model
from . import util
class SearchResult(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, name=None, full_name=None): # noqa: E501
"""SearchResult - a model defined in Swagger
:param name: The name of this SearchResult. # noqa: E501
:type name: str
:param full_name: The full_name of this SearchResult. # noqa: E501
:type full_name: str
"""
self.swagger_types = {
'name': str,
'full_name': str
}
self.attribute_map = {
'name': 'name',
'full_name': 'fullName'
}
self._name = name
self._full_name = full_name
@classmethod
def from_dict(cls, dikt):
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The SearchResult of this SearchResult. # noqa: E501
:rtype: SearchResult
"""
return util.deserialize_model(dikt, cls)
@property
def name(self):
"""Gets the name of this SearchResult.
:return: The name of this SearchResult.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this SearchResult.
:param name: The name of this SearchResult.
:type name: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def full_name(self):
"""Gets the full_name of this SearchResult.
A fully-describing name of the package # noqa: E501
:return: The full_name of this SearchResult.
:rtype: str
"""
return self._full_name
@full_name.setter
def full_name(self, full_name):
"""Sets the full_name of this SearchResult.
A fully-describing name of the package # noqa: E501
:param full_name: The full_name of this SearchResult.
:type full_name: str
"""
if full_name is None:
raise ValueError("Invalid value for `full_name`, must not be `None`") # noqa: E501
self._full_name = full_name
| StarcoderdataPython |
4807017 | <filename>app/migrations/0026_usersubmission_publication_datetime.py
# Generated by Django 3.2.6 on 2021-08-27 02:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0025_alter_report_embed_code'),
]
operations = [
migrations.AddField(
model_name='usersubmission',
name='publication_datetime',
field=models.DateTimeField(blank=True, default=None, null=True),
),
]
| StarcoderdataPython |
3369403 | <reponame>DanielSchuette/CalciumImagingAnalyzer
'''
HELPER FUNCTIONS AND CLASSES! CalciumImagingAnalyzer App
developed by Daniel (<EMAIL>)
-> runs with python 2.7.14 and python 3.6.x on macOS High Sierra
repository: https://github.com/DanielSchuette/CalciumImagingAnalyzer.git
'''
current_app_version = "v0.2"
#####################################
#### Import All Required Modules ####
#####################################
import warnings, timeit
from datetime import datetime
with warnings.catch_warnings(): # suppresses keras' annoying numpy warning
warnings.simplefilter("ignore")
import keras
from keras import layers
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
import tifffile as tiff # module downloaded from https://github.com/blink1073/tifffile.git
import numpy as np
import pandas as pd
import matplotlib
import time
matplotlib.use("TkAgg") # otherwise matplotlib will crash the app
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.backends.tkagg as tkagg
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
import pylab
import sys
from sys import platform
if sys.version_info[0] < 3:
import Tkinter as tk
import ttk as ttk
import tkMessageBox
else:
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
import os, errno
os.environ["TF_CPP_MIN_LOG_LEVEL"] = '2'
from skimage import measure
from skimage import filters
from skimage.feature import canny
from scipy import ndimage as ndi
from skimage.filters import sobel
from skimage.morphology import watershed
#######################################
#### FigureCanvas Class Definition ####
#######################################
class scrollableFigure():
'''
scrollableFigure can display scrollable figures within a master canvas (usually a toplevel or frame widget).
Possible parameters are 'figure', 'master', *args and **kwargs (to configure canvas; does not inherit from anything!).
'''
def __init__(self, figure, master, *args, **kwargs):
# put a figure into a master window
self.createScrollableFigure(figure=figure, master=master, *args, **kwargs)
## bind mousepad scroll events to window scrolling
# two callback functions to handle "swiping" with mousepad
def scroll_vertical_pop(self, event):
self.popup_canvas.yview_scroll(-1 * event.delta, 'units')
def scroll_horizontal_pop(self, event):
self.popup_canvas.xview_scroll(-1 * event.delta, 'units')
def createScrollableFigure(self, figure, master, *args, **kwargs):
# create a canvas within the popup window
self.popup_canvas = tk.Canvas(master, *args, **kwargs)
self.popup_canvas.grid(row=0, column=0, sticky=tk.NSEW)
# set up scrollbars
xScrollbar = tk.Scrollbar(master, orient=tk.HORIZONTAL)
yScrollbar = tk.Scrollbar(master, orient=tk.VERTICAL)
xScrollbar.grid(row=1, column=0, sticky=tk.EW)
yScrollbar.grid(row=0, column=1, sticky=tk.NS)
self.popup_canvas.config(xscrollcommand=xScrollbar.set)
xScrollbar.config(command=self.popup_canvas.xview)
self.popup_canvas.config(yscrollcommand=yScrollbar.set)
yScrollbar.config(command=self.popup_canvas.yview)
# add a size grip
sizegrip = ttk.Sizegrip(master)
sizegrip.grid(row=1, column=1, sticky=tk.SE)
# plug in the figure
figure_agg = FigureCanvasTkAgg(figure, self.popup_canvas)
figure_canvas = figure_agg.get_tk_widget()
figure_canvas.grid(sticky=tk.NSEW)
self.popup_canvas.bind('<MouseWheel>', self.scroll_vertical_pop) # probably just the figure-canvas needs to be bound
self.popup_canvas.bind('<Shift-MouseWheel>', self.scroll_horizontal_pop)
figure_canvas.bind('<MouseWheel>', self.scroll_vertical_pop)
figure_canvas.bind('<Shift-MouseWheel>', self.scroll_horizontal_pop)
# lastly, connect figure with scrolling region
self.popup_canvas.create_window(0, 0, window=figure_canvas)
self.popup_canvas.config(scrollregion=self.popup_canvas.bbox(tk.ALL))
##########################################
#### ScrollableFrame Class Definition ####
##########################################
class scrollableFrame(ttk.Frame):
'''
scrollableFrame inherits from ttk.Frame and can be used to create a scrollable frame in the root window.
'''
# two callback functions to handle "swiping" with mousepad
def scroll_vertical(self, event):
self.canvas.yview_scroll(-1 * event.delta, 'units')
def scroll_horizontal(self, event):
self.canvas.xview_scroll(-1 * event.delta, 'units')
# class __init__ method
def __init__(self, parent, *args, **kwargs):
ttk.Frame.__init__(self, parent, *args, **kwargs)
# create a vertical scrollbar
vscrollbar = ttk.Scrollbar(self, orient=tk.VERTICAL)
vscrollbar.pack(fill=tk.Y, side=tk.RIGHT, expand=False)
# create a horizontal scrollbar
hscrollbar = ttk.Scrollbar(self, orient=tk.HORIZONTAL)
hscrollbar.pack(fill=tk.X, side=tk.BOTTOM, expand=False)
# add a size grip
sizegrip = ttk.Sizegrip(self)
sizegrip.pack(in_=vscrollbar, side=tk.BOTTOM)
#Create a canvas object and associate the scrollbars with it
self.canvas = tk.Canvas(self, bd=0, highlightthickness=0, yscrollcommand=vscrollbar.set, xscrollcommand=hscrollbar.set)
self.canvas.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
#Associate scrollbars with canvas view
vscrollbar.config(command=self.canvas.yview)
hscrollbar.config(command=self.canvas.xview)
# set the view to 0,0 at initialization
self.canvas.xview_moveto(0)
self.canvas.yview_moveto(0)
# bind mousepad scroll events to window scrolling
self.canvas.bind('<MouseWheel>', self.scroll_vertical)
self.canvas.bind('<Shift-MouseWheel>', self.scroll_horizontal)
# create an interior frame to be created inside the canvas
self.interior = ttk.Frame(self.canvas)
interior = self.interior
interior_id = self.canvas.create_window(0, 0, window=interior, anchor=tk.NW)
# track changes to the canvas and frame width and sync them,
# also updating the scrollbar
def _configure_interior(event):
# update the scrollbars to match the size of the inner frame
size = (max(925, interior.winfo_reqwidth()), max(760, interior.winfo_reqheight()))
self.canvas.config(scrollregion='0 0 %s %s' % size)
if interior.winfo_reqwidth() != self.canvas.winfo_width():
# update the canvas's width to fit the inner frame
self.canvas.config(width=interior.winfo_reqwidth())
interior.bind('<Configure>', _configure_interior)
################################################
#### ScrollablePopupWindow Class Definition ####
################################################
class PopupWindow(tk.Toplevel):
'''
This is a popup window. It's main purpose is to put ScrollableFigureCanvas objects inside!
Possible parameters are 'master', 'title', *args, **kwargs (inherits from tk.Toplevel).
'''
def __init__(self, master, title="", *args, **kwargs):
# initialize a toplevel widget (basically a popup window!)
tk.Toplevel.__init__(self, master, *args, **kwargs)
# configure popup window
self.title(title)
self.focus_force()
self.protocol("WM_DELETE_WINDOW", self.destroy)
# return a popup window
self.returnPopup()
def returnPopup(self):
self.columnconfigure(0, weight=1) # a figure will not resize if this option is not specified!
self.rowconfigure(0, weight=1) # a figure will not resize if this option is not specified!
return(self)
####################################
#### GUI popup window functions ####
####################################
# help page and about page - popup windows
helptext = """Please refer to the README file accompanying the GitHub repository or this software at:
<EMAIL>:DanielSchuette/CalciumImagingAnalyzer.git"""
abouttext = """MIT License
Copyright (c) 2018 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
GitHub Repository: <EMAIL>:DanielSchuette/CalciumImagingAnalyzer.git
"""
def open_help_popup():
if sys.version_info[0] < 3:
tkMessageBox.showinfo("Help", helptext)
else:
messagebox.showinfo("Help", helptext)
def open_about_popup(master):
about_window = PopupWindow(master)
about_window.title("About")
about_window.minsize(300, 250)
about_window.maxsize(600, 400)
# add a text widget
about_text = tk.Text(about_window, height=15, width=80)
about_text.pack(side=tk.LEFT, fill=tk.Y)
# add a scrollbar
scrollbar = tk.Scrollbar(about_window)
scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
# configure scrollbar and text
about_text.config(yscrollcommand=scrollbar.set)
scrollbar.config(command=about_text.yview)
# insert about text and disable widget afterwards
about_text.insert(tk.END, abouttext)
about_text.config(state=tk.DISABLED)
def create_new_directories(save_directory):
'''
This functions checks whether directories exist in specified 'save directory' to save output files to!
'''
if not os.path.exists("{}/tiffs/".format(save_directory)):
try:
os.makedirs("{}/tiffs/".format(save_directory))
except OSError as error:
if error.errno != errno.EEXIST:
raise Exception("Could not create a 'tiffs/' folder!")
if not os.path.exists("{}/figures/".format(save_directory)):
try:
os.makedirs("{}/figures/".format(save_directory))
except OSError as error:
if error.errno != errno.EEXIST:
raise Exception("Could not create a 'figures/' folder!")
if not os.path.exists("{}/results/".format(save_directory)):
try:
os.makedirs("{}/results/".format(save_directory))
except OSError as error:
if error.errno != errno.EEXIST:
raise Exception("Could not create a 'results/' folder!")
def save_tiffs(save_directory, image, save_tiff_checkbox):
'''
This function saves .tif images to a designated directory that was previously specified.
'''
if save_tiff_checkbox:
try:
tiff.imsave("{dir}/tiffs/{day}_{time}_{name}.tif".format(
dir=save_directory,
day=datetime.now().strftime("%Y_%m_%d"),
time=datetime.now().strftime("%H.%M.%S"),
name="analysis_input"), image)
print("Image saved to: " + "{}/{}".format(save_directory, "tiffs"))
except:
print("You did not save a .tif! Check the specified save directory!")
else:
print("No .tif files written to {}/{}!".format(save_directory, "tiffs"))
def save_pdf(save_directory, figure, save_pdf_checkbox, name):
'''
This function saves .pdf images to a designated directory that was previously specified.
'''
if save_pdf_checkbox:
try:
figure.savefig("{dir}/figures/{day}_{time}_{name}.pdf".format(
dir=save_directory,
day=datetime.now().strftime("%Y_%m_%d"),
time=datetime.now().strftime("%H.%M.%S"),
name=name))
print("Figure saved to: " + "{}/{}".format(save_directory, "figures"))
except:
print("You did not save a .pdf! Check the specified save directory!")
else:
print("No .pdf files written to {}/{}".format(save_directory, "figures"))
def save_txt(save_directory, matrix, save_txt_checkbox, name):
'''
This function saves .txt files to a designated directory that was previously specified.
'''
if save_txt_checkbox:
try:
np.savetxt("{dir}/results/{day}_{time}_{name}.txt".format(
dir=save_directory,
day=datetime.now().strftime("%Y_%m_%d"),
time=datetime.now().strftime("%H.%M.%S"),
name=name), matrix)
print("Text file saved to: " + "{}/{}".format(save_directory, "results"))
except:
print("You did not save a .txt! Check the specified save directory!")
else:
print("No .txt files written to {}/{}".format(save_directory, "results"))
#############################
#### Analysis Function 1 ####
#############################
def preprocessingFunction(image_number, cutoff1, cutoff2, file_path, save_directory, save_tiff_checkbox, save_pdf_checkbox,
figure_size=(9, 9)):
'''
Analysis function 1 Doc String: Explore different filters / data pre-processing
The following code reads a .lsm file (maybe batches in a future version) and
analyses them. This includes a plot of useful statistics.
'''
# disable popup windows (also no plt.show("hold") otherwise tkinter won't show the figure in canvas)
matplotlib.interactive(False)
# read in .lsm data and return a numpy array with certain dimensions:
if file_path and file_path.endswith(".lsm"):
try:
image = tiff.imread(file_path)
print("You successfully imported a .lsm file from:" + "\n" + str(file_path) + ".")
selected_image = (image[0, 0, (int(image_number)-1), 0:512, 0:512])
print("You selected image number {}.".format(str(image_number)))
except Exception as error: # raise exception if user has no permission to write in directory!
raise error
else:
print("Specify a .lsm file to upload!")
return(False)
# create new directories for output files and save tiffs if checkbox is checked
create_new_directories(save_directory=save_directory)
save_tiffs(save_directory=save_directory, image=image, save_tiff_checkbox=save_tiff_checkbox)
# check image dimensions before plotting
print("Image format is " + str(selected_image.dtype) + " with dimensions " + str(selected_image.shape) + ".")
# plot your image (use .set_action methods for axes!)
fig, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(nrows=2, ncols=3, figsize=figure_size)
fig.canvas.set_window_title("Figure of {}".format(str(file_path)))
fig.subplots_adjust(wspace=0.2, hspace=0.2, right=0.98, left=0.10, bottom=0.07, top=0.93)
# subplot (1, 1)
ax1.tick_params(bottom=False, left=False, labelbottom=False, labelleft=False)
im1 = ax1.imshow(selected_image, cmap="viridis", interpolation="bicubic")
# colormaps "jet", "gray, "viridis" work
# "bicubic" interpolation smoothes the edges, "nearest" leads to a more pixelated figure
colbar_ax = fig.add_axes([0.02, 0.57, 0.035, 0.33])
# Add axes for colorbar at [left, bottom, width, height] (quantities are in fractions of figure)
fig.colorbar(im1, cax=colbar_ax)
ax1.set_title("Image {} of {}".format(str(image_number), str(image.shape[2])))
# create a contour figure that extracts prominent features (origin upper left corner)
ax2.contour(selected_image, origin="image", cmap="gray")
ax2.tick_params(bottom=False, left=False, labelbottom=False, labelleft=False) # Hide the axis but leave the spine
ax2.set_title("Feature Extraction without\nPrior Background Reduction")
# analyze the effect of masking certain pixel values from the image:
# first, a boxplot helps to see the distribution of pixel values
ax3.hist(selected_image.ravel(), bins=256, range=(0.0, 256.0), fc="k", ec="k")
ax3.set_yscale("log", nonposy = "clip")
ax3.set_title("Histogram of Gray Scale\nValues in Image {}".format(str(image_number)))
ax3.tick_params(width=1.5, which="both", labelsize=12)
ax3.axvline(x=cutoff1, color="darkred", linewidth=3, linestyle='--')
ax3.axvline(x=cutoff2, color="darkblue", linewidth=3, linestyle='--')
ax3.legend(["Cutoff 1 = {}".format(str(cutoff1)), "Cutoff 2 = {}".format(str(cutoff2))], loc="upper right")
# second, a scatter plot demonstrating the number of pixels below certain cutoff
pixel_values = []
cutoffs = []
tmp1 = np.copy(selected_image) # --> assignment statements in python do NOT copy objects but create binding
for cutoff in range(selected_image.max()):
mask = tmp1 < cutoff
tmp1[mask] = 0
pixel_values.append(pylab.mean(mask) * 100)
cutoffs.append(cutoff)
# create another subplot where subplot '4' would usually be and plot scatter plot with y axis break
# also, determine optimal break point for the upper panel ax4_1 using the second smallest pixel value
y_limits_top = (pixel_values[1] - 2, 102)
y_limits_bottom = (-0.5, 2)
ax4_1 = plt.subplot2grid((6, 3), (3, 0), rowspan=2) # 0-indexed!
ax4_2 = plt.subplot(6, 3, 16)
ax4_1.scatter(x=cutoffs, y=pixel_values, s=20, c="darkred")
ax4_1.set_title("% of Pixels Below Gray Scale Cutoff")
ax4_1.tick_params(width=1.5, labelsize=12)
ax4_1.set_ylim(y_limits_top)
ax4_1.axvline(x=cutoff1, color="darkred", linewidth=3, linestyle='--')
ax4_1.axvline(x=cutoff2, color="darkblue", linewidth=3, linestyle='--')
ax4_1.tick_params(bottom = False, labelbottom = False)
ax4_2.scatter(x=cutoffs, y=pixel_values, s=20, c="darkred")
ax4_2.tick_params(width=1.5, labelsize=12)
ax4_2.set_ylim(y_limits_bottom)
ax4_2.set_xlabel("Gray Scale Value Cutoff")
ax4_2.axvline(x=cutoff1, color="darkred", linewidth=3, linestyle='--')
ax4_2.axvline(x=cutoff2, color="darkblue", linewidth=3, linestyle='--')
ax4_2.legend(["Cutoff 1 = {}".format(str(cutoff1)), "Cutoff 2 = {}".format(str(cutoff2))], loc="lower right")
# hide spines:
ax4_1.spines["bottom"].set_visible(False)
ax4_2.spines["top"].set_visible(False)
# unfortunately the y label is not centered...
ax4_1.set_ylabel("Percentage of Pixels Below Cutoff")
# add diagonal 'break' lines
d = .025 # size of diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax4_1.transAxes, color="black", clip_on=False, lw=3)
ax4_1.plot((-d, +d), (0, 0), **kwargs) # top-left diagonal
ax4_1.plot((1 - d, 1 + d), (0, 0), **kwargs) # top-right diagonal
kwargs.update(transform=ax4_2.transAxes) # switch to the bottom axes
ax4_2.plot((-d, +d), (1, 1), **kwargs) # bottom-left diagonal
ax4_2.plot((1 - d, 1 + d), (1, 1), **kwargs) # bottom-right diagonal
# mask different gray scale values from the image
tmp2 = np.copy(selected_image)
tmp2[tmp2 < cutoff1] = 0
ax5.contour(tmp2, origin="image", cmap="gray")
ax5.set_title("Gray Scale Cutoff = {}".format(str(cutoff1)))
ax5.tick_params(bottom=False, left=False, labelbottom=False, labelleft=False)
tmp3 = np.copy(selected_image)
tmp3[tmp3 < cutoff2] = 0
ax6.contour(tmp3, origin="image", cmap="gray")
ax6.set_title("Gray Scale Cutoff = {}".format(str(cutoff2)))
ax6.tick_params(bottom=False, left=False, labelbottom=False, labelleft=False)
# change width of spine and spine color for some of the subplots
subplots_list = [ax1, ax2, ax3, ax4_1, ax4_2, ax5, ax5, ax6]
for axis in subplots_list:
[i.set_linewidth(2) for i in axis.spines.values()]
# if the checkbox is checked, save figure as pdf
save_pdf(save_directory=save_directory, figure=fig, save_pdf_checkbox=save_pdf_checkbox, name="exploratory_data_analysis")
return(fig)
############################
#### Analysis 2 - Class ####
############################
# skimage help page (http://www.scipy-lectures.org/packages/scikit-image/auto_examples/plot_labels.html)
# also useful: https://stackoverflow.com/questions/46441893/connected-component-labeling-in-python
class ConnectedComponentsLabeling():
'''
ConnectedComponentsLabeling class can be used to analyze a gray scale image with respect to components it contains.
'method' is 'ccl' by default but 'segmentation' via a watershed algorithm is also implementation
'''
def __init__(self, input_image, pixel_threshold=200, min_threshold=100, max_threshold=10000, skimage=True, fully_connected=True,
method="ccl"):
# monitor elapsed time
timer_start = timeit.default_timer()
# transform input image to binary image
if method == "ccl":
self.im_ccl = self.transformToClusterImage(input_im=input_image, pixel_threshold=pixel_threshold, skimage=skimage,
fully_connected=fully_connected)
elif method == "segmentation":
self.im_ccl = self.imageSegmentation(input_im=input_image, pixel_threshold=pixel_threshold)
else:
raise ValueError("Enter a valid cell identification method! ('ccl', 'segmentation')")
# find clusters in ccl image
print("Looking for cells...")
self.clust_list = self.findClusterSize(input_im_ccl=self.im_ccl)
print("Cells found!")
# analyze cluster list "clust_list" with respect to size thresholds to find cluster indices for subsetting the original image
print("Applying min/max size thresholds...")
self.clust_index = self.findClusterIndex(input_list=self.clust_list, min_threshold=min_threshold, max_threshold=max_threshold)
# lastly, subset the original image with indices and derive "cells" from those clusters
self.im_with_cells = self.findCellsInClusters(input_im_ccl=self.im_ccl, cluster_index=self.clust_index)
# end and print counter
timer_end = timeit.default_timer()
print("Done!")
print("{} sec elapsed.".format(timer_end - timer_start))
def CCL_algorithm(self, binary_image, fully_connected):
'''
!!!!!!!!!!!!!!
ATTENTION: Does currently not work. No second-pass loop with 'union-find' implemented. Thus, labels are still a mess! Use
skimage's build-in function for connected components labeling!
!!!!!!!!!!!!!!
Connected components labeling algorithm. Takes a binary image (0, 1) as an input.
'Fully_connected=boolean' defines whether to use 4- or 8-connectivity:
# i = row index
# j = column index
### which positions to test ###
###
### [i-1, j-1] [i-1, j] [i-1, j+1]
### \ | /
### [i, j-1] - [i, j]
###
'''
print("Start CCL algorithm.")
# initialize an all-0 np.array and a counter
cluster_counter = 0
ccl_image = np.zeros(shape=binary_image.shape, dtype=np.int)
# iterator over image (actual algorithm)
for i in range(0, binary_image.shape[0]):
for j in range(0, binary_image.shape[1]):
# test elements
if binary_image[i, j] == 1:
## test all adjacent elements
# -- 1 --
if fully_connected and i != 0 and j != 0 and binary_image[i-1, j-1] == 1:
ccl_image[i, j] = ccl_image[i-1, j-1]
# -- 2 --
elif i != 0 and binary_image[i-1, j] == 1:
ccl_image[i, j] = ccl_image[i-1, j]
# -- 3 --
elif binary_image[i, j-1] == 1:
ccl_image[i, j] = ccl_image[i, j-1]
# -- 4 --
# test whether element is last in a row as well!
elif fully_connected and j < (binary_image.shape[1]-1) and i != 0 and binary_image[i-1, j+1] == 1:
ccl_image[i, j] = ccl_image[i-1, j+1]
# if none of them is a 'positive' neighbor, assign a new cluster number to the element
else:
cluster_counter += 1
ccl_image[i, j] = cluster_counter
return(ccl_image)
'''
The second half of the algorithm needs to be implemented if you want to use this self-made function for CCL!
'''
def transformToClusterImage(self, input_im, pixel_threshold, skimage, fully_connected):
'''
Transform input image to binary image and analyze with skimage's connected components labeling algorithm or with
a home-made algorithm. Output is a np.array with same dimension as input image and cluster numbers as matrix elements.
'''
internal_copy = np.copy(input_im)
internal_copy[internal_copy < pixel_threshold] = 0
internal_copy[internal_copy != 0] = 1
if skimage:
copy1_ccl = measure.label(internal_copy)
else:
copy1_ccl = self.CCL_algorithm(internal_copy, fully_connected)
return(copy1_ccl)
def imageSegmentation(self, input_im, pixel_threshold):
'''
Might be more robust than CCL under certain circumstances.
Resource: http://scikit-image.org/docs/dev/user_guide/tutorial_segmentation.html
'''
markers = np.zeros_like(input_im)
markers[input_im < pixel_threshold] = 1 # set pixel values to marker values depending on 'pixel_treshold'
markers[input_im >= pixel_threshold] = 2
elevation_map = sobel(input_im) # compute an elevation map
segmentation = watershed(elevation_map, markers) # apply whatershed algorithm
segmentation2 = ndi.binary_fill_holes(segmentation - 1) # fill small holes
labeled_image, x = ndi.label(segmentation2) # label cells in image
return(labeled_image)
def findClusterSize(self, input_im_ccl):
'''
v0.15: np.unique() based algorithm finds clusters.
***************************
DEPRECATED: Loops over "Connected components image" and finds clusters. A counter is integrated so that users can
approximate how long the analysis will take. Throws an error if number of clusters is very large.
***************************
'''
# initialize counters, a list of clusters to append cluster size to, and loop over matrix elements to find clusters
#cluster_list = list()
#row_count, elem_count = 0, 0
#for number in range(1, input_im_ccl.max()+1, 1):
# print("Evaluating cluster {number} of {max_number}.".format(number=number, max_number=input_im_ccl.max()))
# cluster_count = 0
# for row in input_im_ccl:
# for element in row:
# if element == number:
# cluster_count += 1
# cluster_list.append(cluster_count)
# warning if input has a large number of clusters
#if input_im_ccl.max() > 500:
# warnings.warn("Consider to reduce the number of potential cells that are evaluated by using a filter.", RuntimeWarning,
# stacklevel=2)
# a faster alternative to looping over the matrix elements!
unique_clusts, counts_clusts = np.unique(input_im_ccl, return_counts=True)
cluster_list = list(counts_clusts)
cluster_list.pop(0)
# return list of cluster sizes
return(cluster_list)
def findClusterIndex(self, input_list, min_threshold, max_threshold):
'''
Finds indices of clusters.
'''
cluster_index = list()
for element in input_list:
if element >= min_threshold and element <= max_threshold:
cluster_index.append(input_list.index(element)+1)
if len(cluster_index) == 0:
raise ValueError("No cells in range {min} - {max}!".format(min=min_threshold, max=max_threshold))
else:
return(cluster_index)
def findCellsInClusters(self, input_im_ccl, cluster_index):
'''
Finds "cells" in clusters.
'''
# duplicate input image to make sure that it does not get changed during analysis
input_im_ccl_2 = np.copy(input_im_ccl)
# subset input image with cluster indices to delete background and identify actual cells
for row in range(0, input_im_ccl_2.shape[0]):
for col in range(0, input_im_ccl_2.shape[1]):
if input_im_ccl_2[row, col] in set(cluster_index): # using a set considerably speeds up this step
input_im_ccl_2[row, col] = cluster_index.index(input_im_ccl_2[row, col]) + 1
else:
input_im_ccl_2[row, col] = 0
return(input_im_ccl_2)
############################
#### Analysis 3 - Class ####
############################
class AnalyzeSingleCells():
'''
To initialize an instance of this class, pass in a .lsm 'movie' and a mask in form of a 'ccl_object'.
Start/stop defines the time span that should be used as baseline or for normalization.
'''
def __init__(self, input_movie, ccl_object, start, stop, method="mean", legend=True):
'''
Calls all class functions and ultimately returns a figure
'''
self.single_cell_traces = self.subsetWithCclObject(input_mov=input_movie, ccl_object=ccl_object, method=method)
self.normalized_traces = self.NormalizeCellTraces(cell_traces=self.single_cell_traces, start=start, stop=stop)
self.figure = self.PlotCellTraces(cell_traces=self.normalized_traces, legend=legend)
def subsetWithCclObject(self, input_mov, ccl_object, method):
# create a list to save all 'mean pixel value per cell over time' to
clusters_list = list()
# loop over all cells or clusters that were identified
if method == "mean":
for i in range(1, (ccl_object.im_with_cells.max()+1)):
mask = (ccl_object.im_with_cells == i) # creates a mask for a particular cluster i
cells_list = list() # create a list to save mean values to
# loop over all images in movie and extract sum of pixel values for a particular cluster i
for j in range(0, input_mov.shape[2]):
tmp_im = input_mov[0, 0, j, :, :]
cells_list.append(np.mean(tmp_im[mask]))
# append 'cells_list' to list of all clusters
clusters_list.append(cells_list)
elif method == "sum":
for i in range(1, (ccl_object.im_with_cells.max()+1)):
mask = (ccl_object.im_with_cells == i) # creates a mask for a particular cluster i
cells_list = list() # create a list to save mean values to
# loop over all images in movie and extract sum of pixel values for a particular cluster i
for j in range(0, input_mov.shape[2]):
tmp_im = input_mov[0, 0, j, :, :]
cells_list.append(np.sum(tmp_im[mask]))
# append 'cells_list' to list of all clusters
clusters_list.append(cells_list)
else:
raise ValueError("Specify a valid method! ('mean', 'sum')")
# return mean pixel values per cell in a list
return(np.array(clusters_list))
def NormalizeCellTraces(self, cell_traces, start, stop):
'''
Normalized calcium imaging data in form of an array. The mean of timepoints 'start' until 'stop' per row is used for
normalizing the rest of the respective row. Outputs an array as well.
'''
# create a new array of correct dimensions to store results in
output_array = np.zeros(shape=cell_traces.shape, dtype=np.float)
for i in range(cell_traces.shape[0]):
output_array[i, :] = np.divide(cell_traces[i, :], np.mean(cell_traces[i, start:stop]))
return(output_array)
def PlotCellTraces(self, cell_traces, legend):
'''
Takes a np.array with one or multiple rows and plots it as a time course. Use normalized data with this function!
'''
# create a time scale for x axis
time_scale = np.arange(1, (cell_traces.shape[1]+1))
# set up a figure and a list to save legend labels to
fig = plt.figure(figsize=(10,10))
legend_labels = list()
# loop over rows in input np.array to plot all traces
for i in range(cell_traces.shape[0]):
plt.plot(time_scale, cell_traces[i, :])
legend_labels.append("cell_{number}".format(number=i))
# add a legend and return figure
if legend:
plt.legend(legend_labels, loc="upper left")
plt.title("Single Cell Traces")
plt.ylabel("F / F0 (Relative Fluorescence)")
plt.xlabel("Time (Secs)")
return(fig)
############################
#### Analysis 4 - Class ####
############################
class TransformAndFilter():
'''
This class implements methods for filtering and transforming time series data. It requires a time series object as an input.
The class methods perform Fourier transform, Kalman filtering, and XXXX.
'''
def __init__(self, *args, **kwargs):
pass
| StarcoderdataPython |
110945 | <filename>open_spiel/python/games/tt_tournament.py
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Kuhn Poker implemented in Python.
This is a simple demonstration of implementing a game in Python, featuring
chance and imperfect information.
Python games are significantly slower than C++, but it may still be suitable
for prototyping or for small games.
It is possible to run C++ algorithms on Python implemented games, This is likely
to have good performance if the algorithm simply extracts a game tree and then
works with that. It is likely to be poor if the algorithm relies on processing
and updating states as it goes, e.g. MCTS.
"""
import enum
import numpy as np
from copy import deepcopy
import pyspiel
from typing import Dict
from open_spiel.python.games.tt_utils import *
_NUM_PLAYERS = 2
# per titan: pick, remove, 25 place. also pass
_NUM_ACTIONS_PER_TITAN = (1 + 1 + NUM_TILES)
_NUM_ACTIONS = NUM_TITANS*_NUM_ACTIONS_PER_TITAN + 1
_MAX_GAME_LENGTH = 30
# TODO: allow removals?
"""
r0: (8 titans) * 2 = 16
r1: (place 2) * 2 = 4
r2: (place 1) * 2 = 2
r3: (place 1) * 2 = 2
r4: (place 1) * 2 = 2
r5: (remove 1 place 1) * 2 = 4
"""
#harcoding special actions
PASS_ACTION = NUM_TITANS*_NUM_ACTIONS_PER_TITAN
_GAME_TYPE = pyspiel.GameType(
short_name="ttt",
long_name="Tiny Titans Tournament",
dynamics=pyspiel.GameType.Dynamics.SEQUENTIAL,
chance_mode=pyspiel.GameType.ChanceMode.DETERMINISTIC,
information=pyspiel.GameType.Information.IMPERFECT_INFORMATION,
utility=pyspiel.GameType.Utility.ZERO_SUM,
reward_model=pyspiel.GameType.RewardModel.TERMINAL,
max_num_players=_NUM_PLAYERS,
min_num_players=_NUM_PLAYERS,
provides_information_state_string=True,
provides_information_state_tensor=True,
provides_observation_string=True,
provides_observation_tensor=True,
provides_factored_observation_string=True)
_GAME_INFO = pyspiel.GameInfo(
num_distinct_actions=_NUM_ACTIONS,
max_chance_outcomes=0,
num_players=_NUM_PLAYERS,
min_utility=-1.03,
max_utility=1.03,
utility_sum=0.0,
max_game_length=_MAX_GAME_LENGTH)
class TTTGame(pyspiel.Game):
"""A Python version of Tiny Titans."""
def __init__(self, params=None):
super().__init__(_GAME_TYPE, _GAME_INFO, params or dict())
def new_initial_state(self):
"""Returns a state corresponding to the start of a game."""
return TTTState(self)
def make_py_observer(self, iig_obs_type=None, params=None):
"""Returns an object used for observing game state."""
return TTTObserver(
iig_obs_type or pyspiel.IIGObservationType(perfect_recall=False),
params)
class Titans:
def __init__(self):
self.titans: Dict[int, TitanInfo] = {}
def __str__(self):
return "\n".join([str(self.titans[t]) for t in sorted(self.titans.keys())])
def num_titans(self):
return len(self.titans)
def pick_titan(self, titan_index):
self.titans[titan_index] = TitanInfo(titan_index)
def place_titan(self, titan_index, tile_index):
self.titans[titan_index].tile_index = tile_index
def unplace_titan(self, titan_index):
self.titans[titan_index].tile_index = -1
def picked_titans(self):
return list(t for t in self.titans.keys())
def unpicked_titans(self):
picked = self.picked_titans()
return [t for t in range(NUM_TITANS) if t not in picked]
def placed_titans(self):
return [t.index for t in self.titans.values() if t.tile_index != -1]
def unplaced_titans(self):
return [t.index for t in self.titans.values() if t.tile_index == -1]
def used_tiles(self):
return [t.tile_index for t in self.titans.values()]
def unused_tiles(self):
used = self.used_tiles()
return [t for t in range(NUM_TILES) if t not in used]
class TTTState(pyspiel.State):
"""A python version of the tt state."""
def __init__(self, game):
"""Constructor; should only be called by Game.new_initial_state."""
super().__init__(game)
self.titans_prev = [Titans(), Titans()]
self.titans = [Titans(), Titans()]
self.round = 0
self.score = [0, 0]
self.actions = []
self._removes_left = [1, 1]
self._next_player = 0
self._game_over = False
def _cur_max_placed_titans(self):
if self.round == 0:
return 0
return min(self.round+1, MAX_TITANS)
# OpenSpiel (PySpiel) API functions are below. This is the standard set that
# should be implemented by every sequential-move game with chance.
def current_player(self):
"""Returns id of the next player to move, or TERMINAL if game is over."""
if self._game_over:
return pyspiel.PlayerId.TERMINAL
else:
return self._next_player
def _legal_actions(self, player):
# per titan: pick, remove, 25 place
ret = []
my_titans = self.titans[player]
if self.round == 0: # picking phase
unpicked_titans = my_titans.unpicked_titans()
for titan_index in unpicked_titans:
ret.append(titan_index*_NUM_ACTIONS_PER_TITAN)
elif len(my_titans.placed_titans()) < self._cur_max_placed_titans():
unplaced_titans = my_titans.unplaced_titans()
unused_tiles = my_titans.unused_tiles()
for titan_index in unplaced_titans:
for tile_index in unused_tiles:
ret.append(titan_index*_NUM_ACTIONS_PER_TITAN+2+tile_index)
if self.round == 5: # can remove on 5
placed_titans = my_titans.placed_titans()
if self._removes_left[player] > 0:
for titan_index in placed_titans:
ret.append(titan_index*_NUM_ACTIONS_PER_TITAN+1) # remove
ret.append(PASS_ACTION) # pass
return ret
def chance_outcomes(self):
"""Returns the possible chance outcomes and their probabilities."""
assert self.is_chance_node()
assert False, "not implemented"
return 0
def _apply_action(self, action):
"""Applies the specified action to the state."""
if self.is_chance_node():
assert False, "Not Implemented"
return
else:
self.actions.append(action)
my_titans = self.titans[self._next_player]
if action != PASS_ACTION:
titan_index = action // _NUM_ACTIONS_PER_TITAN
action_type = action % _NUM_ACTIONS_PER_TITAN
if action_type == 0: # pick
assert self.round == 0
my_titans.pick_titan(titan_index)
elif action_type == 1: # remove
my_titans.unplace_titan(titan_index)
self._removes_left[self._next_player] -= 1
else:
my_titans.place_titan(titan_index, action_type-2)
if self.round == 0:
self.titans_prev = deepcopy(self.titans)
if my_titans.num_titans() == 8 and self._next_player == 1:
self.round += 1
self._next_player = 1-self._next_player
return
# not enough placed titans
if len(my_titans.placed_titans()) < self._cur_max_placed_titans():
return
# p1 hasnt gone
if self._next_player == 0:
self._next_player = 1
return
# both done, play a game
is_p0_win = check_server_win_tournament(self.titans)
if is_p0_win:
self.score[0] += 1
else:
self.score[1] += 1
# all info now public
self.titans_prev = deepcopy(self.titans)
# if a round ended
if self.score[0] != 3 and self.score[1] != 3:
self.round += 1
self._next_player = 0
return
# if is complete
self._game_over = True
def _action_to_string(self, player, action):
if action == PASS_ACTION:
cmd = "PASS"
else:
titan_index = action // _NUM_ACTIONS_PER_TITAN
action_type = action % _NUM_ACTIONS_PER_TITAN
if action_type == 0:
act_str = "PICK"
elif action_type == 1:
act_str = "REMOVE"
else:
act_str = f"TILE({index_to_tile(action_type-2)})"
cmd = f"{index_to_titan_name(titan_index)}({TITAN_IDS[titan_index]})-{act_str}"
return f"[p{player}-{cmd}]"
def is_terminal(self):
"""Returns True if the game is over."""
return self._game_over
def returns(self):
"""Total reward for each player over the course of the game so far."""
if all([s != 3 for s in self.score]): # only terminal for az
return [0, 0]
points_0 = self.score[0]//3 + self.score[0]*0.01
points_1 = self.score[1]//3 + self.score[1]*0.01
return [points_0-points_1, points_1-points_0]
def __str__(self):
"""String for debug purposes. No particular semantics are required."""
"""Observation of `state` from the PoV of `player`, as a string."""
ret = []
ret.append(f"Round {self.round}")
ret.append(f"Score {self.score}")
ret.append(f"p0 \n{self.titans[0]}")
ret.append(f"p1 \n{self.titans[1]}")
return "\n".join(ret)
class TTTObserver:
"""Observer, conforming to the PyObserver interface (see observation.py)."""
def __init__(self, iig_obs_type, params):
"""Initializes an empty observation tensor."""
if params:
raise ValueError(f"Observation parameters not supported; passed {params}")
assert (iig_obs_type.private_info == pyspiel.PrivateInfoType.SINGLE_PLAYER and iig_obs_type.public_info)
if iig_obs_type.perfect_recall:
self.tensor = np.zeros(_MAX_GAME_LENGTH*_NUM_ACTIONS, np.float32)
self.dict = {"actions": self.tensor.reshape((_MAX_GAME_LENGTH, _NUM_ACTIONS))}
return
self.tensor = np.zeros((2, NUM_TITANS, 6, 5), np.float32)
self.dict = {"data": self.tensor} # we do this bc az needs 3d matrix
self.set_dict = {
"private_titan_placement": self.tensor[0, :, :5, :],
"private_titan_is_picked": self.tensor[0, :, 5, 0],
"public_titan_placement_opponent": self.tensor[1, :, :5, :],
"public_titan_is_picked_opponent": self.tensor[1, :, :5, 0],
"player": self.tensor[0, :2, 5, 4],
"round": self.tensor[0, 2:3, 5, 4],
"score": self.tensor[0, 3:5, 5, 4]
}
def set_from(self, state: TTTState, player):
"""Updates `tensor` and `dict` to reflect `state` from PoV of `player`."""
self.tensor.fill(0)
if "player" in self.set_dict:
self.set_dict["player"][player] = 1
if "round" in self.set_dict:
self.set_dict["round"][0] = state.round
if "score" in self.set_dict:
self.set_dict["score"][0] = state.score[0]
self.set_dict["score"][1] = state.score[1]
if "private_titan_placement" in self.set_dict:
for titan_info in state.titans[player].titans.values():
tile_index = titan_info.tile_index
if tile_index != -1:
self.set_dict["private_titan_placement"][titan_info.index][tile_index//5][tile_index % 5] = 1
self.set_dict["private_titan_is_picked"][titan_info.index] = 1
if "public_titan_placement" in self.set_dict:
for titan_info in state.titans_prev[1-player].titans.values():
tile_index = titan_info.tile_index
if tile_index != -1:
self.set_dict["public_titan_placement_opponent"][titan_info.index][tile_index//5][tile_index % 5] = 1
self.set_dict["public_titan_is_picked_opponent"][titan_info.index] = 1
if "actions" in self.dict:
for turn, action in enumerate(state.actions):
self.dict["actions"][turn, action] = 1
def string_from(self, state: TTTState, player):
"""Observation of `state` from the PoV of `player`, as a string."""
assert False
# Register the game with the OpenSpiel library
pyspiel.register_game(_GAME_TYPE, TTTGame)
| StarcoderdataPython |
3296557 | <reponame>tgsoverly/rover
from adafruit_motorkit import MotorKit
from picamera import PiCamera
from time import sleep
import time
import tkinter
import os
import json
camera = PiCamera()
front_kit = MotorKit(address=0x60)
middle_kit = MotorKit(address=0x61)
rear_kit = MotorKit(address=0x62)
right_motors = [front_kit.motor1, middle_kit.motor1,rear_kit.motor1]
left_motors = [front_kit.motor3, middle_kit.motor3,rear_kit.motor3]
max_speed = 0.90
commands = []
def set_side_speed(side, speed):
set_speed=speed
motors=right_motors
if side=='left':
set_speed=-speed
motors=left_motors
for motor in motors:
motor.throttle=set_speed
def forward():
set_side_speed("left", max_speed)
set_side_speed("right", max_speed)
time.sleep(2.0)
set_side_speed("left", 0)
set_side_speed("right", 0)
def left():
set_side_speed("left", -max_speed)
set_side_speed("right", max_speed)
time.sleep(1.0)
set_side_speed("left", 0)
set_side_speed("right", 0)
def reverse():
set_side_speed("left", -max_speed)
set_side_speed("right", -max_speed)
time.sleep(2.0)
set_side_speed("left", 0)
set_side_speed("right", 0)
def right():
set_side_speed("left", max_speed)
set_side_speed("right", -max_speed)
time.sleep(1.0)
set_side_speed("left", 0)
set_side_speed("right", 0)
def picture():
camera.start_preview()
sleep(5)
camera.capture('/home/pi/Desktop/image.jpg')
camera.stop_preview()
def forward_pressed():
commands.append("forward")
def reverse_pressed():
commands.append("reverse")
def left_pressed():
commands.append("left")
def right_pressed():
commands.append("right")
def picture_pressed():
commands.append("picture")
def enter_pressed():
print(commands)
for command in commands:
if command=="forward" or command=='f':
forward()
if command=="reverse" or command=='b':
reverse()
if command=="left" or command=='l':
left()
if command=="right" or command=='r':
right()
if command=="picture" or command=='p':
picture()
commands.clear()
window = tkinter.Tk()
window.title("GUI")
button_height=2
button_width=15
b1 = tkinter.Button(window, text = "forward", command = forward_pressed, height= button_height,width=button_width)
b1.grid(row=0, column=1)
b2 = tkinter.Button(window, text = "reverse", command = reverse_pressed, height= button_height,width=button_width)
b2.grid(row=2, column=1)
b3=tkinter.Button(window, text = "left", command = left_pressed, height= button_height,width=button_width)
b3.grid(row=1, column=0)
b4=tkinter.Button(window, text = "right", command = right_pressed, height= button_height,width=button_width)
b4.grid(row=1, column=2)
b5=tkinter.Button(window, text = "enter", command = enter_pressed, height= button_height,width=button_width)
b5.grid(row=1, column=1)
b6=tkinter.Button(window, text = "picture", command = picture_pressed, height= button_height,width=button_width)
b6.grid(row=0, column=0)
window.mainloop()
| StarcoderdataPython |
116596 | <filename>hash&heap/0685_First_Unique_Number_in_Data_Stream.py
'''
Description
Given a continuous stream of data, write a function that returns the first unique number (including the last number) when the terminating number arrives. If the terminating number is not found, return -1.
Example
Example1
Input:
[1, 2, 2, 1, 3, 4, 4, 5, 6]
5
Output: 3
Example2
Input:
[1, 2, 2, 1, 3, 4, 4, 5, 6]
7
Output: -1
Example3
Input:
[1, 2, 2, 1, 3, 4]
3
Output: 3
'''
class Solution:
"""
@param nums: a continuous stream of numbers
@param number: a number
@return: returns the first unique number
"""
def firstUniqueNumber(self, nums, number):
# Write your code here
onedict = {}
flag = False
for i, x in enumerate(nums):
if x not in onedict:
onedict[x] = [1, i]
else:
onedict[x][0] += 1
if number == x:
flag = True
break
if flag == True:
oncelist = []
for key in onedict:
if onedict[key][0] == 1:
oncelist.append([key, onedict[key][1]])
x = 2e30
returnkey = None
for one in oncelist:
if one[1] < x:
x = one[1]
returnkey = one[0]
return returnkey
else:
return -1 | StarcoderdataPython |
1650299 | #Write a program that lets the user enter a nonnegative integer then uses a loop
#to calculate the factorial of that number. Display the factorial.
total = 1
integer = int(input('Enter any non negative number you want to calculate the '\
'factorial of: '))
for number in range(1, integer + 1):
total *= number
print('The factorial is: ', total)
| StarcoderdataPython |
4825440 | <filename>pysrc/labstreaminglayer_ros/Converter/__init__.py
import ConverterBase
import Bool
import Float32
import ExoDataArray
import Int32
import Image
import Transform
import TransformStamped
import EEGLiveAmp
| StarcoderdataPython |
130979 |
def calculate_nearest_5(percentage_list: list[float]):
total_errors = 0
for i in range(5, 100):
portions = [i * j for j in percentage_list]
errors = [j % 5 for j in portions]
adj_ports = [ for j, k in zip(portions, errors)]
"""
Make a matched bet calculator
Add in options for
1) Not winning the stake for the sportsbook bet
2) Calculating losing part of the commission on the betting exchange
3) Could do other such as weigh the win on the side of the sportsbook so you dont need to bet as much
"""
| StarcoderdataPython |
1645471 | """This module contains the Hook class to handle pipeline hooks."""
import weakref
class Hook(object):
"""Convenient class for handling hooks.
:param key: str, unique identifier of the hook
:param func: function to be called by the hook
The function can not modify any items fed by its arguments.
:param default_kwargs: default `func` keyword argument values
Example:
def foo(x, verbose=False):
if verbose:
print('verbosity on')
return x
# init with default kwargs
foo_hook = Hook('foo', foo, verbose=True)
# and on the call
foo_hook(x=None) # prints 'verbosity on'
:param reuse: whether to reuse (share) the Hook
"""
__INSTANCES = weakref.WeakSet()
def __init__(self, key: str, func, reuse=False, **default_kwargs):
"""Initialize hook."""
if key in Hook.get_current_keys():
if not reuse:
raise ValueError("Hook with key `%s` already exists" % key)
else:
# TODO: share the existing hook instead of creating a new one
pass
# attr initialization
self._key = str(key)
self._func = func
self._default_kwargs = default_kwargs
# add the key to the class
Hook.__INSTANCES.add(self)
@property
def key(self):
"""Get hook key."""
return self._key
@property
def default_kwargs(self):
"""Get hook default keyword arguments."""
return self._default_kwargs
@default_kwargs.setter
def default_kwargs(self, kwargs):
self._default_kwargs = kwargs
def __call__(self, *args, **kwargs):
"""Call the hooked function."""
return self._func(*args, **kwargs)
@classmethod
def get_current_hooks(cls) -> list:
"""Return instances of this class."""
return list(cls.__INSTANCES)
@classmethod
def get_current_keys(cls) -> set:
"""Return keys to the instances of this class."""
return set([hook.key for hook in cls.__INSTANCES])
@classmethod
def clear_current_instances(cls):
"""Clean up the references held by the class.
This function is not usually called by user, mainly used for tests
where cleanup is needed.
"""
cls.__INSTANCES.clear()
| StarcoderdataPython |
3235321 | <reponame>Loracio/retrato-de-fases
from ..exceptions import exceptions
def is_number(x):
return isinstance(x, (float,int))
def is_range(U):
return isinstance(U, (list,tuple))
def construct_interval_1d(var):
try:
if is_number(var):
if var !=0:
return sorted([-var, var])
raise Exception('0 No es un rango válido')
if is_range(var):
return var
except Exception as e:
raise exceptions.RangoInvalid(f"{var} como rango 1d dio el error: "+str(e))
def construct_interval_2d(var, *, depth=0):
try:
[a,b],[c,d] = var
except Exception:
try:
b,d = var
if is_range(b) or is_range(d):
[a,b],[c,d] = construct_interval_1d(b), construct_interval_1d(d)
else:
a = c = b
b = d
except Exception:
try:
a = var
if a !=0:
b = d = a
a = c = 0
else:
raise Exception
except Exception as e:
raise exceptions.RangoInvalid(f"{var} no es un rango 2d válido.")
a1 = [a,b]
a2 = [c,d]
a1.sort()
a2.sort()
return [a1, a2]
def construct_interval_3d(var, *, depth=0):
try:
if is_number(var):
if depth == 0:
return [sorted([0, var])]*3
elif depth == 1:
return sorted([-var, var])
elif is_range(var):
if depth==0:
return [construct_interval_3d(i, depth=depth+1) for i in var]
if depth==1:
return var
except Exception as e:
raise exceptions.RangoInvalid(f"{var} como rango 3d dio el error: "+str(e))
def construct_interval(var, *, dim=None, depth=0):
if not dim:
dim = len(var)
if dim==1:
inter = construct_interval_1d(var)
elif dim==2:
inter = construct_interval_2d(var, depth=depth)
elif dim==3:
inter = construct_interval_3d(var, depth=depth)
while len(inter)<dim:
inter.append(inter[-1])
return inter | StarcoderdataPython |
1622254 | <reponame>soratidus999/allianceauth-signal-pings
# Generated by Django 2.2.12 on 2020-07-04 02:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('eveonline', '0012_index_additions'),
('signalpings', '0003_hrappsignal'),
]
operations = [
migrations.AddField(
model_name='timersignal',
name='corporation',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='eveonline.EveCorporationInfo'),
),
]
| StarcoderdataPython |
1724515 | <reponame>Jar-win/fts-rest
#!/usr/bin/env python
# Copyright notice:
# Copyright Members of the EMI Collaboration, 2013.
#
# See www.eu-emi.eu for details on the copyright holders
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from fts3rest.lib import api
from optparse import OptionParser
def write_resources(options, resources):
resource_index = os.path.join(options.output_directory, options.index)
swagger_resources = {
'swaggerVersion': '1.2',
'apis': resources,
'info': {
'title': 'FTS3 RESTful API',
'description': 'FTS3 RESTful API documentation',
'contact': '<EMAIL>',
'license': 'Apache 2.0',
'licenseUrl': 'http://www.apache.org/licenses/LICENSE-2.0.html'
}
}
open(resource_index, 'wt').write(json.dumps(swagger_resources, indent=2, sort_keys=True))
def write_apis(options, resources, apis, models):
for resource in resources:
resource_path = resource['id']
swagger_api = {
'swaggerVersion': '1.2',
'produces': ['application/json'],
'resourcePath': resource_path,
'authorizations': {},
'apis': apis.get(resource_path, []),
'models': models.get(resource_path, []),
}
api_path = os.path.join(options.output_directory, resource_path.strip('/'))
open(api_path, 'wt').write(json.dumps(swagger_api, indent=2, sort_keys = True))
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('-d', '--directory', dest='output_directory', default=None,
help='Where to write the output files. This is mandatory.')
parser.add_option('-f', '--file', dest='index', default='resources.json',
help='Name of the resources file')
(options, args) = parser.parse_args()
if options.output_directory is None:
parser.print_help()
parser.exit(1)
resources, apis, models = api.introspect()
resources.sort(key=lambda r:r['id'])
for api in apis.values():
api.sort(key=lambda a:a['path'])
write_resources(options, resources)
write_apis(options, resources, apis, models)
| StarcoderdataPython |
3310162 | #!/usr/bin/env python
"""
Module for reading TXT files
This expects a segment from class derived in convert_text
"""
# do not delete - needed in time_aligned_text
from asrtoolkit.data_handlers.data_handlers_common import footer, header, separator
from asrtoolkit.data_structures.segment import segment
def format_segment(seg):
"""
Formats a segment assuming it's an instance of class segment with text element
"""
return seg.text
def read_in_memory(input_data):
"""
Reads input text
"""
segments = []
for line in input_data.splitlines():
segments.append(segment({"text": line.strip()}))
return segments
def read_file(file_name):
"""
Reads a TXT file
"""
segments = []
with open(file_name, encoding="utf-8") as f:
segments = read_in_memory(f.read())
return segments
| StarcoderdataPython |
3349005 | <filename>tests/test_closed_universe.py
from copy import copy
from unittest import TestCase
from life import ClosedUniverse
class ClosedUniverseTestCase(TestCase):
def test_init(self):
with self.assertRaises(ValueError):
ClosedUniverse(0, 1)
with self.assertRaises(ValueError):
ClosedUniverse(1, 0)
with self.assertRaises(ValueError):
ClosedUniverse(-1, 1)
with self.assertRaises(ValueError):
ClosedUniverse(1, -1)
self.assertIsInstance(ClosedUniverse(1, 1), ClosedUniverse)
def test_width(self):
universe = ClosedUniverse(2, 1)
self.assertEqual(universe.width, 2)
def test_height(self):
universe = ClosedUniverse(1, 2)
self.assertEqual(universe.height, 2)
def test_through(self):
universe = ClosedUniverse(2, 2)
self.assertEqual(list(universe.through()), [(0, 0), (1, 0), (0, 1), (1, 1)])
def test_neighbours_of(self):
universe = ClosedUniverse.from_data([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
])
neighbours = universe.neighbours_of(0, 0)
self.assertEqual(list(neighbours), [2, 5, 4])
neighbours = universe.neighbours_of(2, 2)
self.assertEqual(list(neighbours), [5, 6, 8])
neighbours = universe.neighbours_of(1, 1)
self.assertEqual(list(neighbours), [1, 2, 3, 6, 9, 8, 7, 4])
def test_adjust_position(self):
universe = ClosedUniverse(2, 2)
with self.assertRaises(IndexError):
universe.adjust_position(-1, 0)
with self.assertRaises(IndexError):
universe.adjust_position(2, 0)
with self.assertRaises(IndexError):
universe.adjust_position(0, -1)
with self.assertRaises(IndexError):
universe.adjust_position(0, 2)
self.assertEqual(universe.adjust_position(0, 0), (0, 0))
def test_is_position_in_range(self):
universe = ClosedUniverse(2, 2)
self.assertFalse(universe.is_position_in_range(-1, 0))
self.assertFalse(universe.is_position_in_range(2, 0))
self.assertFalse(universe.is_position_in_range(0, -1))
self.assertFalse(universe.is_position_in_range(0, 2))
self.assertTrue(universe.is_position_in_range(0, 0))
def test_copy(self):
universe = ClosedUniverse.from_data([
[1, 2],
[3, list()]
])
universe_copy = copy(universe)
self.assertEqual(universe_copy.width, universe.width)
self.assertEqual(universe_copy.height, universe.height)
self.assertEqual(universe_copy, universe)
self.assertIs(universe_copy[1, 1], universe[1, 1])
def test_get_item(self):
universe = ClosedUniverse.from_data([
[1, 3],
[2, None]
])
self.assertEqual(universe[0, 0], 1)
self.assertEqual(universe[1, 1], None)
with self.assertRaises(IndexError):
universe[-1, 0]
with self.assertRaises(IndexError):
universe[2, 0]
with self.assertRaises(IndexError):
universe[0, -1]
with self.assertRaises(IndexError):
universe[0, 2]
def test_set_item(self):
universe = ClosedUniverse(2, 2)
universe[0, 0] = 1
self.assertEqual(universe[0, 0], 1)
universe[0, 0] = None
self.assertEqual(universe[0, 0], None)
with self.assertRaises(IndexError):
universe[-1, 0] = 2
with self.assertRaises(IndexError):
universe[2, 0] = 3
with self.assertRaises(IndexError):
universe[0, -1] = 4
with self.assertRaises(IndexError):
universe[0, 2] = 5
def test_str(self):
universe = ClosedUniverse.from_data([
[None, 3],
[2, None]
])
self.assertMultiLineEqual(str(universe), ' 3\n2 ')
def test_eq(self):
left = ClosedUniverse.from_data([
[1, 3],
[2, None]
])
right = ClosedUniverse.from_data([
[1, 3],
[2, None]
])
self.assertEqual(left, right)
left = ClosedUniverse.from_data([
[1, 3],
[2, None]
])
right = ClosedUniverse.from_data([
[1, 3],
[2, 4]
])
self.assertNotEqual(left, right)
def test_from_data(self):
universe = ClosedUniverse.from_data([
[0, 1],
[2, 3, 4]
])
self.assertEqual(universe.width, 2)
self.assertEqual(universe.height, 2)
self.assertIsNone(universe[0, 0])
self.assertEqual(universe[1, 0], 1)
self.assertEqual(universe[0, 1], 2)
self.assertEqual(universe[1, 1], 3)
universe = ClosedUniverse.from_data([
[0, '*'],
[2, '*', '*']
], lambda cell: cell == '*')
self.assertEqual(universe.width, 2)
self.assertEqual(universe.height, 2)
self.assertIsNone(universe[0, 0])
self.assertEqual(universe[1, 0], '*')
self.assertIsNone(universe[0, 1])
self.assertEqual(universe[1, 1], '*')
with self.assertRaises(ValueError):
universe = ClosedUniverse.from_data([])
with self.assertRaises(ValueError):
universe = ClosedUniverse.from_data([[]])
def test_random(self):
universe = ClosedUniverse.random(2, 2, lambda: 1)
self.assertEqual(universe.width, 2)
self.assertEqual(universe.height, 2)
self.assertEqual(universe[0, 0], 1)
self.assertEqual(universe[1, 0], 1)
self.assertEqual(universe[0, 1], 1)
self.assertEqual(universe[1, 1], 1)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3228350 | <filename>Algorithm/Easy/1000+/1033Minimum Difference Between BST Nodes.py
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: the root
@return: the minimum difference between the values of any two different nodes in the tree
"""
def minDiffInBST(self, root):
nodeList = []
self.inOrder(root, nodeList)
k = sys.maxsize
print(nodeList)
for i in range(1, len(nodeList)):
k = min(k, nodeList[i] - nodeList[i - 1])
return k
def inOrder(self, root, nodeList):
if root is None: return
self.inOrder(root.left, nodeList)
nodeList.append(root.val)
self.inOrder(root.right, nodeList)
| StarcoderdataPython |
115038 | <gh_stars>1-10
#!/usr/bin/env python3
import sys
import markdown
def main(argv):
if len(argv) != 2:
print("Usage: {} data.md".format(argv[0]))
exit(64)
with open(argv[1], 'rt', encoding="utf-8") as stream:
html = markdown.markdown(stream.read(), extensions=['attr_list'])
print(html)
if __name__ == "__main__":
main(sys.argv)
| StarcoderdataPython |
3390955 | <filename>compiler/autotest/cmd/test.py
from os import listdir, chdir
from os.path import dirname, abspath, isfile, join
from subprocess import call
import sys
def run_python( file ):
run_cmd( "python3 " + file )
def run_cmd( cmdline, expected_ret_code=0 ):
if expected_ret_code:
print ( "\033[90mRunning: '%s' \t(expecting %s ret code)\033[0m" % ( cmdline, expected_ret_code ) )
else:
print ( "\033[90mRunning: '%s'\033[0m" % ( cmdline, ) )
try:
retcode = call( cmdline, shell=True )
if retcode < 0:
print ( "\033[0;41mTest failed, child was terminated by signal %s\033[0m" % ( -retcode, ), file=sys.stderr )
else:
if expected_ret_code == "nonzero":
if retcode == 0:
print ( "\033[0;41mExpected nonzero return code but was %s\033[0m" % ( retcode, ), file=sys.stderr )
elif retcode != expected_ret_code:
print ( "\033[0;41mExpected return code %s but was %s\033[0m" % ( expected_ret_code, retcode ), file=sys.stderr )
except OSError as e:
print ( "Execution failed: %s" % ( e, ), file=sys.stderr )
raise
current_script_dir = dirname( abspath( __file__ ) )
chdir( current_script_dir )
options = """-vquiet -nobc -notx"""
# print options and quit
run_cmd( """txc -help >/dev/null""" )
# invalid options should return 1
run_cmd( """txc -foobar 2>/dev/null""", 1 )
# empty source file
run_cmd( """echo "" | txc -nojit """ + options)
# minimal source file containing syntax error, should return 1
run_cmd( """echo "unrecoverably bad syntax {" | txc -nojit """ + options + """ 2>/dev/null""", 1 )
# minimal source file containing semantic error, should return 2
run_cmd( """echo "X : Int = 1.1;" | txc -nojit -compileall """ + options + """ 2>/dev/null""", 2 )
# run minimal source file (implicit 'return 0')
run_cmd( """echo "main() : ;" | txc -jit """ + options )
# run minimal source file with explicit 'return 0'
run_cmd( """echo "main()->Int : return 0" | txc -jit """ + options )
# test assertions
run_cmd( """echo "main()->Int : { assert TRUE; return 0; }" | txc -jit """ + options )
run_cmd( """echo "main()->Int : { assert FALSE; return 0; }" | txc -jit """ + options + """ >/dev/null""", "nonzero" )
| StarcoderdataPython |
68312 | <reponame>SakshayMahna/fras
"""
Attendance based views
"""
from flask import current_app as app
from flask import (Flask, jsonify, request, session)
from flask.blueprints import Blueprint
from werkzeug.utils import secure_filename
from passlib.hash import pbkdf2_sha256
from common import *
from models.models import *
from playhouse.shortcuts import *
from datetime import datetime as DT
import logging
import json
import os
import csv
from pathlib import Path
from zipfile import ZipFile
import base64
from face_recognition_api import FaceRecognitionAPI
import numpy as np
from views.utils import _json_to_np, _ok_json, _error_json, _save_entity, _get_upload_folder, fapi
# Initalize blueprint
attendance_blueprint = Blueprint('attendance', __name__, template_folder='templates')
# Views
@auth_check
def mark_attendance():
""" View to mark attendance """
try:
grp_ph = request.files['group_photo']
if grp_ph.filename == '':
return _error_json("No file supplied!")
filename = secure_filename(grp_ph.filename)
file_path = os.path.join(_get_upload_folder(), filename)
grp_ph.save(file_path)
result = fapi.find_persons_in_photo(
file_path, _fetch_known_faces_encodings())
present = result["names_found"]
for pp in present:
u = User.get(User.first_name == pp)
ua = Attendance(user=u, status="Present")
_save_entity(ua)
result.pop("names_missing")
return _ok_json(result)
except Exception as ex:
msg = "Error when handling attendance marking request."
logging.exception(msg)
return _error_json("{0}: {1}".format(msg, ex))
@auth_check
def all_attendance():
""" View to view all attendances """
try:
att = Attendance.select()
return _ok_json([{"first_name": a.user.first_name,
"last_name": a.user.last_name,
"marked_on": a.insert_timestamp} for a in att])
except Exception as ex:
msg = "Error when fetching attendance."
logging.exception(msg)
return _error_json(msg)
# Helper functions
def _fetch_known_faces_encodings():
""" Fetch knownfaces from encodings """
rs = KnownFace.select()
fenc = []
face_info = []
for fe in rs:
t = _json_to_np(fe.face_enc)
fenc.append(t)
kf_info = {"kf_id": fe.id, "first_name": fe.user.first_name,
"last_name": fe.user.last_name, "user_id": fe.user.id}
face_info.append(kf_info)
return fenc, face_info | StarcoderdataPython |
83130 | import requests
import os
import textwrap
import re
import json
import urllib
from bs4 import BeautifulSoup
import requests_toolbelt as rt
from requests_toolbelt import MultipartEncoderMonitor
from requests_toolbelt import MultipartEncoder
from functools import partial
import uuid
import time
from ProxyCloud import ProxyCloud
import socket
import socks
class CallingUpload:
def __init__(self, func,filename,args):
self.func = func
self.args = args
self.filename = filename
self.time_start = time.time()
self.time_total = 0
self.speed = 0
self.last_read_byte = 0
def __call__(self,monitor):
self.speed += monitor.bytes_read - self.last_read_byte
self.last_read_byte = monitor.bytes_read
tcurrent = time.time() - self.time_start
self.time_total += tcurrent
self.time_start = time.time()
if self.time_total>=1:
if self.func:
self.func(self.filename,monitor.bytes_read,monitor.len,self.speed,self.args)
self.time_total = 0
self.speed = 0
class MoodleClient(object):
def __init__(self, user,passw,host='',repo_id=4,proxy:ProxyCloud=None):
self.username = user
self.password = <PASSWORD>
self.session = requests.Session()
self.path = 'https://moodle.uclv.edu.cu/'
if host!='':
self.path = host
self.userdata = None
self.userid = ''
self.repo_id = repo_id
self.sesskey = ''
self.proxy = None
if proxy :
self.proxy = proxy.as_dict_proxy()
def getsession(self):
return self.session
def getUserData(self):
try:
tokenUrl = self.path+'login/token.php?service=moodle_mobile_app&username='+urllib.parse.quote(self.username)+'&password='+urllib.parse.quote(self.password)
resp = self.session.get(tokenUrl,proxies=self.proxy)
return self.parsejson(resp.text)
except:
return None
def getDirectUrl(self,url):
tokens = str(url).split('/')
direct = self.path+'webservice/pluginfile.php/'+tokens[4]+'/user/private/'+tokens[-1]+'?token='+self.data['token']
return direct
def getSessKey(self):
fileurl = self.path + 'my/#'
resp = self.session.get(fileurl,proxies=self.proxy)
soup = BeautifulSoup(resp.text,'html.parser')
sesskey = soup.find('input',attrs={'name':'sesskey'})['value']
return sesskey
def login(self):
try:
login = self.path+'login/index.php'
resp = self.session.get(login,proxies=self.proxy)
cookie = resp.cookies.get_dict()
soup = BeautifulSoup(resp.text,'html.parser')
anchor = soup.find('input',attrs={'name':'anchor'})['value']
logintoken = soup.find('input',attrs={'name':'logintoken'})['value']
username = self.username
password = <PASSWORD>
payload = {'anchor': '', 'logintoken': logintoken,'username': username, 'password': password, 'rememberusername': 1}
loginurl = self.path+'login/index.php'
resp2 = self.session.post(loginurl, data=payload,proxies=self.proxy)
soup = BeautifulSoup(resp2.text,'html.parser')
counter = 0
for i in resp2.text.splitlines():
if "loginerrors" in i or (0 < counter <= 3):
counter += 1
print(i)
if counter>0:
print('No pude iniciar sesion')
return False
else:
self.userid = soup.find('div',{'id':'nav-notification-popover-container'})['data-userid']
print('E iniciado sesion con exito')
self.userdata = self.getUserData()
self.sesskey = self.getSessKey()
return True
except Exception as ex:
pass
return False
def createEvidence(self,name,desc=''):
evidenceurl = self.path + 'admin/tool/lp/user_evidence_edit.php?userid=' + self.userid
resp = self.session.get(evidenceurl,proxies=self.proxy)
soup = BeautifulSoup(resp.text,'html.parser')
sesskey = self.sesskey
files = self.extractQuery(soup.find('object')['data'])['itemid']
saveevidence = self.path + 'admin/tool/lp/user_evidence_edit.php?id=&userid='+self.userid+'&return='
payload = {'userid':self.userid,
'sesskey':sesskey,
'_qf__tool_lp_form_user_evidence':1,
'name':name,'description[text]':desc,
'description[format]':1,
'url':'',
'files':files,
'submitbutton':'Guardar+cambios'}
resp = self.session.post(saveevidence,data=payload,proxies=self.proxy)
evidenceid = str(resp.url).split('?')[1].split('=')[1]
return {'name':name,'desc':desc,'id':evidenceid,'url':resp.url,'files':[]}
def createBlog(self,name,itemid,desc="<p+dir=\"ltr\"+style=\"text-align:+left;\">asd<br></p>"):
post_attach = f'{self.path}blog/edit.php?action=add&userid='+self.userid
resp = self.session.get(post_attach,proxies=self.proxy)
soup = BeautifulSoup(resp.text,'html.parser')
attachment_filemanager = soup.find('input',{'id':'id_attachment_filemanager'})['value']
post_url = f'{self.path}blog/edit.php'
payload = {'action':'add',
'entryid':'',
'modid':0,
'courseid':0,
'sesskey':self.sesskey,
'_qf__blog_edit_form':1,
'mform_isexpanded_id_general':1,
'mform_isexpanded_id_tagshdr':1,
'subject':name,
'summary_editor[text]':desc,
'summary_editor[format]':1,
'summary_editor[itemid]':itemid,
'attachment_filemanager':attachment_filemanager,
'publishstate':'site',
'tags':'_qf__force_multiselect_submission',
'submitbutton':'Guardar+cambios'}
resp = self.session.post(post_url,data=payload,proxies=self.proxy)
return resp
def saveEvidence(self,evidence):
evidenceurl = self.path + 'admin/tool/lp/user_evidence_edit.php?id='+evidence['id']+'&userid='+self.userid+'&return=list'
resp = self.session.get(evidenceurl,proxies=self.proxy)
soup = BeautifulSoup(resp.text,'html.parser')
sesskey = soup.find('input',attrs={'name':'sesskey'})['value']
files = evidence['files']
saveevidence = self.path + 'admin/tool/lp/user_evidence_edit.php?id='+evidence['id']+'&userid='+self.userid+'&return=list'
payload = {'userid':self.userid,
'sesskey':sesskey,
'_qf__tool_lp_form_user_evidence':1,
'name':evidence['name'],'description[text]':evidence['desc'],
'description[format]':1,'url':'',
'files':files,
'submitbutton':'Guardar+cambios'}
resp = self.session.post(saveevidence,data=payload,proxies=self.proxy)
return evidence
def getEvidences(self):
evidencesurl = self.path + 'admin/tool/lp/user_evidence_list.php?userid=' + self.userid
resp = self.session.get(evidencesurl,proxies=self.proxy)
soup = BeautifulSoup(resp.text,'html.parser')
nodes = soup.find_all('tr',{'data-region':'user-evidence-node'})
list = []
for n in nodes:
nodetd = n.find_all('td')
evurl = nodetd[0].find('a')['href']
evname = n.find('a').next
evid = evurl.split('?')[1].split('=')[1]
nodefiles = nodetd[1].find_all('a')
nfilelist = []
for f in nodefiles:
url = str(f['href'])
directurl = url
try:
directurl = url + '&token=' + self.userdata['token']
directurl = str(directurl).replace('pluginfile.php','webservice/pluginfile.php')
except:pass
nfilelist.append({'name':f.next,'url':url,'directurl':directurl})
list.append({'name':evname,'desc':'','id':evid,'url':evurl,'files':nfilelist})
return list
def deleteEvidence(self,evidence):
evidencesurl = self.path + 'admin/tool/lp/user_evidence_edit.php?userid=' + self.userid
resp = self.session.get(evidencesurl,proxies=self.proxy)
soup = BeautifulSoup(resp.text,'html.parser')
sesskey = soup.find('input',attrs={'name':'sesskey'})['value']
deleteUrl = self.path+'lib/ajax/service.php?sesskey='+sesskey+'&info=core_competency_delete_user_evidence,tool_lp_data_for_user_evidence_list_page'
savejson = [{"index":0,"methodname":"core_competency_delete_user_evidence","args":{"id":evidence['id']}},
{"index":1,"methodname":"tool_lp_data_for_user_evidence_list_page","args":{"userid":self.userid }}]
headers = {'Content-type': 'application/json', 'Accept': 'application/json, text/javascript, */*; q=0.01'}
resp = self.session.post(deleteUrl, json=savejson,headers=headers,proxies=self.proxy)
pass
def upload_file(self,file,evidence=None,itemid=None,progressfunc=None,args=()):
try:
fileurl = self.path + 'admin/tool/lp/user_evidence_edit.php?userid=' + self.userid
resp = self.session.get(fileurl,proxies=self.proxy)
soup = BeautifulSoup(resp.text,'html.parser')
sesskey = self.sesskey
_qf__user_files_form = 1
query = self.extractQuery(soup.find('object',attrs={'type':'text/html'})['data'])
client_id = self.getclientid(resp.text)
itempostid = query['itemid']
if itemid:
itempostid = itemid
of = open(file,'rb')
b = uuid.uuid4().hex
upload_data = {
'title':(None,''),
'author':(None,'ObysoftDev'),
'license':(None,'allrightsreserved'),
'itemid':(None,itempostid),
'repo_id':(None,str(self.repo_id)),
'p':(None,''),
'page':(None,''),
'env':(None,query['env']),
'sesskey':(None,sesskey),
'client_id':(None,client_id),
'maxbytes':(None,query['maxbytes']),
'areamaxbytes':(None,query['areamaxbytes']),
'ctx_id':(None,query['ctx_id']),
'savepath':(None,'/')}
upload_file = {
'repo_upload_file':(file,of,'application/octet-stream'),
**upload_data
}
post_file_url = self.path+'repository/repository_ajax.php?action=upload'
encoder = rt.MultipartEncoder(upload_file,boundary=b)
progrescall = CallingUpload(progressfunc,file,args)
callback = partial(progrescall)
monitor = MultipartEncoderMonitor(encoder,callback=callback)
resp2 = self.session.post(post_file_url,data=monitor,headers={"Content-Type": "multipart/form-data; boundary="+b},proxies=self.proxy)
of.close()
#save evidence
if evidence:
evidence['files'] = itempostid
return itempostid,resp2.text
except:
return None,None
def upload_file_draft(self,file,progressfunc=None,args=()):
file_edit = f'{self.path}user/files.php'
#https://eduvirtual.uho.edu.cu/user/profile.php
resp = self.session.get(file_edit,proxies=self.proxy)
soup = BeautifulSoup(resp.text, 'html.parser')
usertext = 'ObisoftDev'
query = self.extractQuery(soup.find('object',attrs={'type':'text/html'})['data'])
client_id = str(soup.find('div',{'class':'filemanager'})['id']).replace('filemanager-','')
upload_file = f'{self.path}repository/repository_ajax.php?action=upload'
of = open(file,'rb')
b = uuid.uuid4().hex
upload_data = {
'title':(None,''),
'author':(None,'ObysoftDev'),
'license':(None,'allrightsreserved'),
'itemid':(None,query['itemid']),
'repo_id':(None,str(self.repo_id)),
'p':(None,''),
'page':(None,''),
'env':(None,query['env']),
'sesskey':(None,self.sesskey),
'client_id':(None,client_id),
'maxbytes':(None,query['maxbytes']),
'areamaxbytes':(None,query['areamaxbytes']),
'ctx_id':(None,query['ctx_id']),
'savepath':(None,'/')}
upload_file = {
'repo_upload_file':(file,of,'application/octet-stream'),
**upload_data
}
post_file_url = self.path+'repository/repository_ajax.php?action=upload'
encoder = rt.MultipartEncoder(upload_file,boundary=b)
progrescall = CallingUpload(progressfunc,file,args)
callback = partial(progrescall)
monitor = MultipartEncoderMonitor(encoder,callback=callback)
resp2 = self.session.post(post_file_url,data=monitor,headers={"Content-Type": "multipart/form-data; boundary="+b},proxies=self.proxy)
of.close()
data = self.parsejson(resp2.text)
data['url'] = str(data['url']).replace('\\','')
if self.userdata:
if 'token' in self.userdata:
data['url'] = str(data['url']).replace('pluginfile.php/','webservice/pluginfile.php/') + '?token=' + self.userdata['token']
return None,data
def parsejson(self,json):
data = {}
tokens = str(json).replace('{','').replace('}','').split(',')
for t in tokens:
split = str(t).split(':',1)
data[str(split[0]).replace('"','')] = str(split[1]).replace('"','')
return data
def getclientid(self,html):
index = str(html).index('client_id')
max = 25
ret = html[index:(index+max)]
return str(ret).replace('client_id":"','')
def extractQuery(self,url):
tokens = str(url).split('?')[1].split('&')
retQuery = {}
for q in tokens:
qspl = q.split('=')
try:
retQuery[qspl[0]] = qspl[1]
except:
retQuery[qspl[0]] = None
return retQuery
def getFiles(self):
urlfiles = self.path+'user/files.php'
resp = self.session.get(urlfiles,proxies=self.proxy)
soup = BeautifulSoup(resp.text,'html.parser')
sesskey = soup.find('input',attrs={'name':'sesskey'})['value']
client_id = self.getclientid(resp.text)
filepath = '/'
query = self.extractQuery(soup.find('object',attrs={'type':'text/html'})['data'])
payload = {'sesskey': sesskey, 'client_id': client_id,'filepath': filepath, 'itemid': query['itemid']}
postfiles = self.path+'repository/draftfiles_ajax.php?action=list'
resp = self.session.post(postfiles,data=payload,proxies=self.proxy)
dec = json.JSONDecoder()
jsondec = dec.decode(resp.text)
return jsondec['list']
def delteFile(self,name):
urlfiles = self.path+'user/files.php'
resp = self.session.get(urlfiles,proxies=self.proxy)
soup = BeautifulSoup(resp.text,'html.parser')
_qf__core_user_form_private_files = soup.find('input',{'name':'_qf__core_user_form_private_files'})['value']
files_filemanager = soup.find('input',attrs={'name':'files_filemanager'})['value']
sesskey = soup.find('input',attrs={'name':'sesskey'})['value']
client_id = self.getclientid(resp.text)
filepath = '/'
query = self.extractQuery(soup.find('object',attrs={'type':'text/html'})['data'])
payload = {'sesskey': sesskey, 'client_id': client_id,'filepath': filepath, 'itemid': query['itemid'],'filename':name}
postdelete = self.path+'repository/draftfiles_ajax.php?action=delete'
resp = self.session.post(postdelete,data=payload,proxies=self.proxy)
#save file
saveUrl = self.path+'lib/ajax/service.php?sesskey='+sesskey+'&info=core_form_dynamic_form'
savejson = [{"index":0,"methodname":"core_form_dynamic_form","args":{"formdata":"sesskey="+sesskey+"&_qf__core_user_form_private_files="+_qf__core_user_form_private_files+"&files_filemanager="+query['itemid']+"","form":"core_user\\form\\private_files"}}]
headers = {'Content-type': 'application/json', 'Accept': 'application/json, text/javascript, */*; q=0.01'}
resp3 = self.session.post(saveUrl, json=savejson,headers=headers,proxies=self.proxy)
return resp3
def logout(self):
logouturl = self.path + 'login/logout.php?sesskey=' + self.sesskey
self.session.post(logouturl,proxies=self.proxy)
| StarcoderdataPython |
3292840 | # coding: utf-8
# Author: Xset
# Version: 2.0
# Last updated: 2017-03-24
import re
import os
import sys
import lzma
import struct
import sqlite3
import binascii
import subprocess
from PIL import Image, ImageDraw
from modules import *
compileDir = './compile/'
compiledDir = './compiled/'
findFiles = {}
picCount = 0
dbcon = sqlite3.connect("PixelData.db")
dbcon.isolation_level = None
dbcur = dbcon.cursor()
dbcon.row_factory = sqlite3.Row
dbcon.text_factory = str
def _(message):
print(u"[RELEASE] %s" % message)
def checkAlreadyExists(filename):
dbcur.execute('select * from PixelType where filename = ?', [filename])
rrf = dbcur.fetchone()
if rrf is None:
return None
return rrf
def file2bytes(fileName):
with open(fileName, "rb+") as handle:
bytes = handle.read()
return bytes
def bytes2file(bytes, fileName):
handler = open(fileName, "wb+")
handler.write(bytes)
handler.close()
def convert_pixel(pixels, type):
value = None
if type == 0: # RGB8888
return pixels
elif type == 2: # RGB4444
r = (pixels[0] >> 4) << 12
g = (pixels[1] >> 4) << 8
b = (pixels[2] >> 4) << 4
a = (pixels[3] >> 4)
value = r | g | b | a
elif type == 4: # RGB565
r = (pixels[0] >> 3) << 11
g = (pixels[1] >> 2) << 5
b = (pixels[2] >> 3)
value = r | g | b
elif type == 6: # LA88
r = pixels[0] << 8
g = pixels[1] << 8
b = pixels[2] << 8
a = pixels[3]
value = r | g | b | a
elif type == 10: # L8
value = pixels[0] | pixels[1] | pixels[2]
else:
raise Exception("Unknown pixel type {}.".format(type))
return value
def writeImage(file, baseName, fileName, pp = 1):
_("Collecting information...")
image = Image.open(file)
data = checkAlreadyExists(baseName)
if not data is None:
fileType = data[1]
subType = data[2]
else:
_("Sorry, we can\'t find this texture in out database... May be you changed filename?")
_("We will use standart fileType, subType and headerBytes. (1, 0, None)" + "\n")
fileType = 1
subType = 0
width = image.size[0]
height = image.size[1]
pixels = []
iSrcPix = 0
if subType == 0:
BFPXFormat = 4
elif subType == 2 or subType == 4 or subType == 6:
BFPXFormat = 2
elif subType == 10:
BFPXFormat = 1
else:
_("Unknown pixel type %s" % (subType))
sys.exit(0)
if BFPXFormat:
packetSize = ((width) * (height) * BFPXFormat) + 5;
_("About: fileType %s, fileSize: %s, subType: %s, width: %s, height: %s" % (fileType, packetSize, subType, width, height))
imgl = image.load()
if fileType == 28 or fileType == 27:
for y in range(0, height):
for x in range(0, width):
c = image.getpixel((x, y))
pixels.append(c)
for l in range(int(height / 32)):
for k in range(int(width / 32)):
for j in range(32):
for h in range(32):
pixels[iSrcPix] = imgl[h + (k * 32), j + (l * 32)]
iSrcPix += 1
for j in range(32):
for h in range(width % 32):
pixels[iSrcPix] = imgl[h + (width - (width % 32)), j + (l * 32)]
iSrcPix += 1
for k in range(int(width / 32)):
for j in range(int(height % 32)):
for h in range(32):
pixels[iSrcPix] = imgl[h + (k * 32), j + (height - (height % 32))]
iSrcPix += 1
for j in range(height % 32):
for h in range(width % 32):
pixels[iSrcPix] = imgl[h + (width - (width % 32)), j + (height - (height % 32))]
iSrcPix += 1
image.putdata(pixels)
# Create new packet
p = BytesWriter(fileName)
# Start Handler
p.WStart()
# Packet
p.WByte(28)
p.WUnsignedInt(packetSize)
p.WByte(subType)
p.WUnsignedShort(width)
p.WUnsignedShort(height)
for y in range(0, height):
for x in range(0, width):
pixels = image.getpixel((x, y))
converted = convert_pixel(pixels, subType)
if subType == 0:
p.W4Bytes(*converted)
elif subType == 2: # RGB4444 to RGB8888
p.WUnsignedShort(converted)
elif subType == 4: # RGB565 to RGB8888
p.WUnsignedShort(converted)
elif subType == 6: # RGB555 to RGB8888
p.WUnsignedShort(converted)
elif subType == 10:
p.WUnsignedByte(converted)
if fileName.endswith((pp * "_") + "tex.sc"):
p.WByte(0)
p.WByte(0)
p.WByte(0)
p.WByte(0)
p.WByte(0)
p.WStop()
def compressLZMA(fileName):
_("Saving as sprite...")
result = subprocess.check_output(["lzma.exe", "e", "temp_tex.sc", "temp_.tex_sc"])
os.remove("temp_tex.sc")
os.rename("temp_.tex_sc", "temp_tex.sc")
# Module with LZMA
with open("temp_tex.sc", "rb") as lz:
lzModule = lz.read()
lzModule = lzModule[0:9] + lzModule[13:]
mModule = open("./compiled/" + re.sub("\_*tex\_*", "_tex.sc", fileName), "wb+")
mModule.write(lzModule)
mModule.close()
lz.close()
os.remove("temp_tex.sc")
_("Saving completed" + "\n")
def generateFilesList(dir):
if findFiles.get(dir) is None:
findFiles[dir] = []
toCompile = os.listdir(dir)
for file in toCompile:
fullname = dir + file
if os.path.isdir(fullname):
dir_ = fullname + "/"
if findFiles.get(dir_) is None:
findFiles[dir_] = []
generateFilesList(dir_)
else:
if file.endswith("png"):
findFiles[dir].append(file)
generateFilesList(compileDir)
for dirName in findFiles:
for file in findFiles[dirName]:
file = dirName + file
baseName, ext = os.path.splitext(os.path.basename(file))
# If we will compile 2 files in one
if not dirName == compileDir:
fileName = "temp_" + ("_" * picCount) + "tex.sc"
writeImage(file, baseName, fileName, len(findFiles[dirName]))
picCount += 1
if picCount == len(findFiles[dirName]):
b_bytes = b''
for j in range(picCount):
f_name = "temp_" + ("_" * j) + "tex.sc"
b_bytes += file2bytes(f_name)
os.remove(f_name)
bytes2file(b_bytes, "temp_tex.sc")
compressLZMA(baseName)
# if we have standart file (1 file)
elif file.endswith("png"):
fileName = "temp_tex.sc"
writeImage(file, baseName, fileName)
compressLZMA(baseName)
| StarcoderdataPython |
4828651 | #!/usr/bin/env python3
"""Attach relevant information from small Files/Subseries to parent ndjson records.
"""
import json
import re
import sys
def load_series():
"""Returns a uniqueID --> series record dict."""
out = {}
for line in open('data/series.ndjson'):
record = json.loads(line)
id_ = record['uniqueID']
out[id_] = record
return out
if __name__ == '__main__':
input_path, output_path = sys.argv[1:]
id_to_series = load_series()
with open(output_path, 'w') as out:
for line in open(input_path):
record = json.loads(line)
id_ = record['uniqueID']
if not record['title']:
continue # filter out empty records
if 'part_of_links' not in record:
sys.stderr.write(id_ + '\n')
parents = record.get('part_of_links')[0]
assert parents, id_
(parent_id, parent_name) = parents
record['parent_uniqueID'] = parent_id
if re.match(r'^File|Subseries', parent_name):
parent = id_to_series.get(parent_id)
if not parent:
sys.stderr.write(
'Missing parent: %s: %s for %s\n' % (parent_id, parent_name, id_))
else:
record['parent_date'] = parent['date']
record['parent_desc'] = parent.get('physical_desc', '')
record['parent_title'] = parent['title']
record['parent_scope'] = parent.get('scope', '')
out.write(json.dumps(record))
out.write('\n')
| StarcoderdataPython |
4811828 | <filename>test/obj_op_test.py
from logger import logger
from common_utils.common_types.point import Point2D, Point3D, Point2D_List, Point3D_List
from common_utils.common_types.keypoint import Keypoint2D, Keypoint3D, Keypoint2D_List, Keypoint3D_List
from common_utils.common_types.bbox import BBox
from common_utils.common_types.segmentation import Polygon, Segmentation
const_int = 10
const_float = 20.0
pt2d_0 = Point2D(x=1.0, y=2.0)
pt2d_1 = Point2D(x=3.0, y=4.0)
assert pt2d_0 + pt2d_1 == Point2D(x=pt2d_0.x+pt2d_1.x, y=pt2d_0.y+pt2d_1.y)
assert pt2d_0 + const_int == Point2D(x=pt2d_0.x+const_int, y=pt2d_0.y+const_int)
assert pt2d_0 + const_float == Point2D(x=pt2d_0.x+const_float, y=pt2d_0.y+const_float)
assert pt2d_0 - pt2d_1 == Point2D(x=pt2d_0.x-pt2d_1.x, y=pt2d_0.y-pt2d_1.y)
assert pt2d_0 - const_int == Point2D(x=pt2d_0.x-const_int, y=pt2d_0.y-const_int)
assert pt2d_0 - const_float == Point2D(x=pt2d_0.x-const_float, y=pt2d_0.y-const_float)
assert pt2d_0 * const_int == Point2D(x=pt2d_0.x*const_int, y=pt2d_0.y*const_int)
assert pt2d_0 * const_float == Point2D(x=pt2d_0.x*const_float, y=pt2d_0.y*const_float)
assert pt2d_0 / const_int == Point2D(x=pt2d_0.x/const_int, y=pt2d_0.y/const_int)
assert pt2d_0 / const_float == Point2D(x=pt2d_0.x/const_float, y=pt2d_0.y/const_float)
pt3d_0 = Point3D(x=1.0, y=2.0, z=3.0)
pt3d_1 = Point3D(x=4.0, y=5.0, z=6.0)
print(f'Point2D Test Passed')
pt2d_list_0 = Point2D_List(point_list=[pt2d_0 for i in range(5)])
assert pt2d_list_0 + pt2d_1 == Point2D_List([point+pt2d_1 for point in pt2d_list_0])
assert pt2d_list_0 + const_int == Point2D_List([point+const_int for point in pt2d_list_0])
assert pt2d_list_0 - pt2d_1 == Point2D_List([point-pt2d_1 for point in pt2d_list_0])
assert pt2d_list_0 - const_int == Point2D_List([point-const_int for point in pt2d_list_0])
assert pt2d_list_0 * const_int == Point2D_List([point*const_int for point in pt2d_list_0])
assert pt2d_list_0 / const_int == Point2D_List([point/const_int for point in pt2d_list_0])
print(f'Point2D_List Test Passed')
assert pt3d_0 + pt3d_1 == Point3D(x=pt3d_0.x+pt3d_1.x, y=pt3d_0.y+pt3d_1.y, z=pt3d_0.z+pt3d_1.z)
assert pt3d_0 + const_int == Point3D(x=pt3d_0.x+const_int, y=pt3d_0.y+const_int, z=pt3d_0.z+const_int)
assert pt3d_0 + const_float == Point3D(x=pt3d_0.x+const_float, y=pt3d_0.y+const_float, z=pt3d_0.z+const_float)
assert pt3d_0 - pt3d_1 == Point3D(x=pt3d_0.x-pt3d_1.x, y=pt3d_0.y-pt3d_1.y, z=pt3d_0.z-pt3d_1.z)
assert pt3d_0 - const_int == Point3D(x=pt3d_0.x-const_int, y=pt3d_0.y-const_int, z=pt3d_0.z-const_int)
assert pt3d_0 - const_float == Point3D(x=pt3d_0.x-const_float, y=pt3d_0.y-const_float, z=pt3d_0.z-const_float)
assert pt3d_0 * const_int == Point3D(x=pt3d_0.x*const_int, y=pt3d_0.y*const_int, z=pt3d_0.z*const_int)
assert pt3d_0 * const_float == Point3D(x=pt3d_0.x*const_float, y=pt3d_0.y*const_float, z=pt3d_0.z*const_float)
assert pt3d_0 / const_int == Point3D(x=pt3d_0.x/const_int, y=pt3d_0.y/const_int, z=pt3d_0.z/const_int)
assert pt3d_0 / const_float == Point3D(x=pt3d_0.x/const_float, y=pt3d_0.y/const_float, z=pt3d_0.z/const_float)
print(f'Point3D Test Passed')
pt3d_list_0 = Point3D_List(point_list=[pt3d_0 for i in range(5)])
assert pt3d_list_0 + pt3d_1 == Point3D_List([point+pt3d_1 for point in pt3d_list_0])
assert pt3d_list_0 + const_int == Point3D_List([point+const_int for point in pt3d_list_0])
assert pt3d_list_0 - pt3d_1 == Point3D_List([point-pt3d_1 for point in pt3d_list_0])
assert pt3d_list_0 - const_int == Point3D_List([point-const_int for point in pt3d_list_0])
assert pt3d_list_0 * const_int == Point3D_List([point*const_int for point in pt3d_list_0])
assert pt3d_list_0 / const_int == Point3D_List([point/const_int for point in pt3d_list_0])
print(f'Point3D_List Test Passed')
kpt2d_0 = Keypoint2D(point=pt2d_0.copy(), visibility=2)
kpt2d_1 = Keypoint2D(point=pt2d_1.copy(), visibility=2)
assert kpt2d_0 + pt2d_1 == Keypoint2D(point=kpt2d_0.point+pt2d_1, visibility=kpt2d_0.visibility)
assert kpt2d_0 + const_int == Keypoint2D(point=kpt2d_0.point+const_int, visibility=kpt2d_0.visibility)
assert kpt2d_0 + kpt2d_1 == Keypoint2D(point=kpt2d_0.point+kpt2d_1.point, visibility=int(max(kpt2d_0.visibility, kpt2d_1.visibility)))
assert kpt2d_0 - pt2d_1 == Keypoint2D(point=kpt2d_0.point-pt2d_1, visibility=kpt2d_0.visibility)
assert kpt2d_0 - const_int == Keypoint2D(point=kpt2d_0.point-const_int, visibility=kpt2d_0.visibility)
assert kpt2d_0 - kpt2d_1 == Keypoint2D(point=kpt2d_0.point-kpt2d_1.point, visibility=int(max(kpt2d_0.visibility, kpt2d_1.visibility)))
assert kpt2d_0 * const_int == Keypoint2D(point=kpt2d_0.point*const_int, visibility=kpt2d_0.visibility)
assert kpt2d_0 / const_int == Keypoint2D(point=kpt2d_0.point/const_int, visibility=kpt2d_0.visibility)
print(f'Keypoint2D Test Passed')
kpt2d_list_0 = Keypoint2D_List(kpt_list=[kpt2d_0 for i in range(5)])
assert kpt2d_list_0 + kpt2d_1 == Keypoint2D_List([kpt+kpt2d_1 for kpt in kpt2d_list_0])
assert kpt2d_list_0 + pt2d_1 == Keypoint2D_List([kpt+pt2d_1 for kpt in kpt2d_list_0])
assert kpt2d_list_0 + const_int == Keypoint2D_List([kpt+const_int for kpt in kpt2d_list_0])
assert kpt2d_list_0 - kpt2d_1 == Keypoint2D_List([kpt-kpt2d_1 for kpt in kpt2d_list_0])
assert kpt2d_list_0 - pt2d_1 == Keypoint2D_List([kpt-pt2d_1 for kpt in kpt2d_list_0])
assert kpt2d_list_0 - const_int == Keypoint2D_List([kpt-const_int for kpt in kpt2d_list_0])
assert kpt2d_list_0 * const_int == Keypoint2D_List([kpt*const_int for kpt in kpt2d_list_0])
assert kpt2d_list_0 / const_int == Keypoint2D_List([kpt/const_int for kpt in kpt2d_list_0])
print(f'Keypoint2D_List Test Passed')
kpt3d_0 = Keypoint3D(point=pt3d_0.copy(), visibility=2)
kpt3d_1 = Keypoint3D(point=pt3d_1.copy(), visibility=2)
assert kpt3d_0 + pt3d_1 == Keypoint3D(point=kpt3d_0.point+pt3d_1, visibility=kpt3d_0.visibility)
assert kpt3d_0 + const_int == Keypoint3D(point=kpt3d_0.point+const_int, visibility=kpt3d_0.visibility)
assert kpt3d_0 + kpt3d_1 == Keypoint3D(point=kpt3d_0.point+kpt3d_1.point, visibility=int(max(kpt3d_0.visibility, kpt3d_1.visibility)))
assert kpt3d_0 - pt3d_1 == Keypoint3D(point=kpt3d_0.point-pt3d_1, visibility=kpt3d_0.visibility)
assert kpt3d_0 - const_int == Keypoint3D(point=kpt3d_0.point-const_int, visibility=kpt3d_0.visibility)
assert kpt3d_0 - kpt3d_1 == Keypoint3D(point=kpt3d_0.point-kpt3d_1.point, visibility=int(max(kpt3d_0.visibility, kpt3d_1.visibility)))
assert kpt3d_0 * const_int == Keypoint3D(point=kpt3d_0.point*const_int, visibility=kpt3d_0.visibility)
assert kpt3d_0 / const_int == Keypoint3D(point=kpt3d_0.point/const_int, visibility=kpt3d_0.visibility)
print(f'Keypoint3D Test Passed')
kpt3d_list_0 = Keypoint3D_List(kpt_list=[kpt3d_0 for i in range(5)])
assert kpt3d_list_0 + kpt3d_1 == Keypoint3D_List([kpt+kpt3d_1 for kpt in kpt3d_list_0])
assert kpt3d_list_0 + pt3d_1 == Keypoint3D_List([kpt+pt3d_1 for kpt in kpt3d_list_0])
assert kpt3d_list_0 + const_int == Keypoint3D_List([kpt+const_int for kpt in kpt3d_list_0])
assert kpt3d_list_0 - kpt3d_1 == Keypoint3D_List([kpt-kpt3d_1 for kpt in kpt3d_list_0])
assert kpt3d_list_0 - pt3d_1 == Keypoint3D_List([kpt-pt3d_1 for kpt in kpt3d_list_0])
assert kpt3d_list_0 - const_int == Keypoint3D_List([kpt-const_int for kpt in kpt3d_list_0])
assert kpt3d_list_0 * const_int == Keypoint3D_List([kpt*const_int for kpt in kpt3d_list_0])
assert kpt3d_list_0 / const_int == Keypoint3D_List([kpt/const_int for kpt in kpt3d_list_0])
print(f'Keypoint3D_List Test Passed')
bbox0 = BBox(xmin=0, ymin=1, xmax=2, ymax=3)
bbox1 = BBox(xmin=4, ymin=5, xmax=6, ymax=7)
assert bbox0 + bbox1 == BBox(xmin=0, ymin=1, xmax=6, ymax=7)
assert bbox0 + const_int == BBox(xmin=bbox0.xmin+const_int, ymin=bbox0.ymin+const_int, xmax=bbox0.xmax+const_int, ymax=bbox0.ymax+const_int)
assert bbox0 + const_float == BBox(xmin=bbox0.xmin+const_float, ymin=bbox0.ymin+const_float, xmax=bbox0.xmax+const_float, ymax=bbox0.ymax+const_float)
assert bbox0 + pt2d_0 == BBox(xmin=bbox0.xmin+pt2d_0.x, ymin=bbox0.ymin+pt2d_0.y, xmax=bbox0.xmax+pt2d_0.x, ymax=bbox0.ymax+pt2d_0.y)
assert bbox0 + kpt2d_0 == BBox(xmin=bbox0.xmin+kpt2d_0.point.x, ymin=bbox0.ymin+kpt2d_0.point.y, xmax=bbox0.xmax+kpt2d_0.point.x, ymax=bbox0.ymax+kpt2d_0.point.y)
assert bbox0 - const_int == BBox(xmin=bbox0.xmin-const_int, ymin=bbox0.ymin-const_int, xmax=bbox0.xmax-const_int, ymax=bbox0.ymax-const_int)
assert bbox0 - const_float == BBox(xmin=bbox0.xmin-const_float, ymin=bbox0.ymin-const_float, xmax=bbox0.xmax-const_float, ymax=bbox0.ymax-const_float)
assert bbox0 - pt2d_0 == BBox(xmin=bbox0.xmin-pt2d_0.x, ymin=bbox0.ymin-pt2d_0.y, xmax=bbox0.xmax-pt2d_0.x, ymax=bbox0.ymax-pt2d_0.y)
assert bbox0 - kpt2d_0 == BBox(xmin=bbox0.xmin-kpt2d_0.point.x, ymin=bbox0.ymin-kpt2d_0.point.y, xmax=bbox0.xmax-kpt2d_0.point.x, ymax=bbox0.ymax-kpt2d_0.point.y)
assert bbox0 * const_int == BBox(xmin=bbox0.xmin*const_int, ymin=bbox0.ymin*const_int, xmax=bbox0.xmax*const_int, ymax=bbox0.ymax*const_int)
assert bbox0 * const_float == BBox(xmin=bbox0.xmin*const_float, ymin=bbox0.ymin*const_float, xmax=bbox0.xmax*const_float, ymax=bbox0.ymax*const_float)
assert bbox0 / const_int == BBox(xmin=bbox0.xmin/const_int, ymin=bbox0.ymin/const_int, xmax=bbox0.xmax/const_int, ymax=bbox0.ymax/const_int)
assert bbox0 / const_float == BBox(xmin=bbox0.xmin/const_float, ymin=bbox0.ymin/const_float, xmax=bbox0.xmax/const_float, ymax=bbox0.ymax/const_float)
print('BBox Test Passed')
poly2d_0 = Polygon.from_point2d_list(
Point2D_List(
[
Point2D(0,0), Point2D(2,0), Point2D(2,3), Point2D(1, 1)
]
)
)
poly2d_1 = Polygon.from_point2d_list(
Point2D_List(
[
Point2D(0,10), Point2D(2,10), Point2D(2,13), Point2D(1, 11)
]
)
)
poly2d_2 = Polygon.from_point2d_list(
Point2D_List(
[
Point2D(0,20), Point2D(2,20), Point2D(2,23), Point2D(1, 21)
]
)
)
poly3d_0 = Polygon.from_point3d_list(
Point3D_List(
[
Point3D(0,0,0), Point3D(2,0,1), Point3D(2,3,0), Point3D(1, 1,1)
]
)
)
assert poly2d_0 + kpt2d_0 == Polygon.from_point2d_list(Point2D_List([point+kpt2d_0.point for point in poly2d_0.to_point2d_list()]))
assert poly3d_0 + kpt3d_0 == Polygon.from_point3d_list(Point3D_List([point+kpt3d_0.point for point in poly3d_0.to_point3d_list()]))
assert poly2d_0 + pt2d_0 == Polygon.from_point2d_list(Point2D_List([point+pt2d_0 for point in poly2d_0.to_point2d_list()]))
assert poly3d_0 + pt3d_0 == Polygon.from_point3d_list(Point3D_List([point+pt3d_0 for point in poly3d_0.to_point3d_list()]))
assert poly2d_0 + const_float == Polygon.from_point2d_list(Point2D_List([point+const_float for point in poly2d_0.to_point2d_list()]))
assert poly3d_0 + const_float == Polygon.from_point3d_list(Point3D_List([point+const_float for point in poly3d_0.to_point3d_list()]))
assert poly2d_0 + const_int == Polygon.from_point2d_list(Point2D_List([point+const_int for point in poly2d_0.to_point2d_list()]))
assert poly3d_0 + const_int == Polygon.from_point3d_list(Point3D_List([point+const_int for point in poly3d_0.to_point3d_list()]))
assert poly2d_0 - kpt2d_0 == Polygon.from_point2d_list(Point2D_List([point-kpt2d_0.point for point in poly2d_0.to_point2d_list()]))
assert poly3d_0 - kpt3d_0 == Polygon.from_point3d_list(Point3D_List([point-kpt3d_0.point for point in poly3d_0.to_point3d_list()]))
assert poly2d_0 - pt2d_0 == Polygon.from_point2d_list(Point2D_List([point-pt2d_0 for point in poly2d_0.to_point2d_list()]))
assert poly3d_0 - pt3d_0 == Polygon.from_point3d_list(Point3D_List([point-pt3d_0 for point in poly3d_0.to_point3d_list()]))
assert poly2d_0 - const_float == Polygon.from_point2d_list(Point2D_List([point-const_float for point in poly2d_0.to_point2d_list()]))
assert poly3d_0 - const_float == Polygon.from_point3d_list(Point3D_List([point-const_float for point in poly3d_0.to_point3d_list()]))
assert poly2d_0 - const_int == Polygon.from_point2d_list(Point2D_List([point-const_int for point in poly2d_0.to_point2d_list()]))
assert poly3d_0 - const_int == Polygon.from_point3d_list(Point3D_List([point-const_int for point in poly3d_0.to_point3d_list()]))
assert poly2d_0 * const_float == Polygon.from_point2d_list(Point2D_List([point*const_float for point in poly2d_0.to_point2d_list()]))
assert poly3d_0 * const_float == Polygon.from_point3d_list(Point3D_List([point*const_float for point in poly3d_0.to_point3d_list()]))
assert poly2d_0 * const_int == Polygon.from_point2d_list(Point2D_List([point*const_int for point in poly2d_0.to_point2d_list()]))
assert poly3d_0 * const_int == Polygon.from_point3d_list(Point3D_List([point*const_int for point in poly3d_0.to_point3d_list()]))
assert poly2d_0 / const_float == Polygon.from_point2d_list(Point2D_List([point/const_float for point in poly2d_0.to_point2d_list()]))
assert poly3d_0 / const_float == Polygon.from_point3d_list(Point3D_List([point/const_float for point in poly3d_0.to_point3d_list()]))
assert poly2d_0 / const_int == Polygon.from_point2d_list(Point2D_List([point/const_int for point in poly2d_0.to_point2d_list()]))
assert poly3d_0 / const_int == Polygon.from_point3d_list(Point3D_List([point/const_int for point in poly3d_0.to_point3d_list()]))
print('Polygon Test Passed')
seg0 = Segmentation([poly2d_0, poly2d_1])
seg1 = Segmentation([poly2d_2])
assert seg0 + seg1 == Segmentation(seg0.polygon_list + seg1.polygon_list)
assert seg0 + pt2d_0 == Segmentation([poly + pt2d_0 for poly in seg0])
assert seg0 + kpt2d_0 == Segmentation([poly + kpt2d_0 for poly in seg0])
assert seg0 + const_int == Segmentation([poly + const_int for poly in seg0])
assert seg0 + const_float == Segmentation([poly + const_float for poly in seg0])
assert seg0 - pt2d_0 == Segmentation([poly - pt2d_0 for poly in seg0])
assert seg0 - kpt2d_0 == Segmentation([poly - kpt2d_0 for poly in seg0])
assert seg0 - const_int == Segmentation([poly - const_int for poly in seg0])
assert seg0 - const_float == Segmentation([poly - const_float for poly in seg0])
assert seg0 * const_int == Segmentation([poly * const_int for poly in seg0])
assert seg0 * const_float == Segmentation([poly * const_float for poly in seg0])
assert seg0 / const_int == Segmentation([poly / const_int for poly in seg0])
assert seg0 / const_float == Segmentation([poly / const_float for poly in seg0])
print('Segmentation Test Passed') | StarcoderdataPython |
3252366 | import itertools
import requests
from ..constants import API_URLS
class H2HLeague():
"""
A class representing a h2h league in the Fantasy Premier League.
"""
def __init__(self, league_id, session=None):
self.league_id = league_id
self._information = self._get_information()
self._league = self._information["league"]
#: Session for H2H fixtures
self._session = session
#: A dictionary containing information about new entries to the league.
self.new_entries = self._information["new_entries"]
#: The name of the league.
self.name = self._league["name"]
#: Whether the league has started or not.
self.has_started = self._league["has_started"]
#: Whether or not the league can be deleted.
self.can_delete = self._league["can_delete"]
#: The shortname of the league.
self.short_name = self._league["short_name"]
#: The date the league was created.
self.created = self._league["created"]
#: Whether the league is closed or not.
self.closed = self._league["closed"]
#: Whether the league's forum is disabled.
self.forum_disabled = self._league["forum_disabled"]
#: Whether the league is public.
self.is_public = self._league["make_code_public"]
#: The league's rank.
self.rank = self._league["rank"]
#: The league's size.
self.size = self._league["size"]
#: The league's type.
self.league_type = self._league["league_type"]
#: The scoring system the league uses.
self.scoring_system = self._league["_scoring"]
#: Information about the knockout rounds.
self.ko_rounds = self._league["ko_rounds"]
#: Admin entry.
self.admin_entry = self._league["admin_entry"]
#: The gameweek the league started in.
self.started = self._league["start_event"]
#: The fixtures of the league.
self.fixtures = None
def _get_information(self):
"""Returns information about the given league."""
return requests.get(API_URLS["league_h2h"].format(
self.league_id)).json()
def __str__(self):
return "{} - {}".format(self.name, self.league_id)
def get_fixtures(self):
"""Returns h2h results/fixtures for given league, login required."""
if not self._session:
return
fixtures = []
for page in itertools.count(start=1):
url = API_URLS["h2h"].format(self.league_id, page)
page_results = self._session.get(url).json()["matches"]["results"]
if page_results:
fixtures.extend(page_results)
else:
self.fixtures = fixtures
break
| StarcoderdataPython |
1792220 | from django import template
from django.contrib.auth.models import User
register = template.Library()
from django.shortcuts import render, get_object_or_404
from ..models import Analysis, ProjectComment, Module, Project, File, ParamsComment, Param
from ..forms import ProjectEditCommForm, ParamForm2, ModuleParamForm, ParamTextForm, ModuleForm, TextForm, ParamCommForm, ParamForm, ProjectPlanForm
from django.db.models.aggregates import Max
from django.forms import modelformset_factory
from django.forms import ModelForm, Textarea, NumberInput,Select
#render upload div
@register.simple_tag
def upload_js():
return """
<!-- The template to display files available for upload -->
<script id="template-upload" type="text/x-tmpl">
{% for (var i=0, file; file=o.files[i]; i++) { %}
<tr class="template-upload fade">
<td>
<span class="preview"></span>
</td>
<td>
<p class="name">{%=file.name%}</p>
{% if (file.error) { %}
<div><span class="label label-important">{%=locale.fileupload.error%}</span> {%=file.error%}</div>
{% } %}
</td>
<td>
<p class="size">{%=o.formatFileSize(file.size)%}</p>
{% if (!o.files.error) { %}
<div class="progress progress-striped active" role="progressbar" aria-valuemin="0" aria-valuemax="100" aria-valuenow="0"><div class="progress-bar progress-bar-success" style="width:0%;"></div></div>
{% } %}
</td>
<td>
{% if (!o.files.error && !i && !o.options.autoUpload) { %}
<button class="btn btn-primary start">
<i class="glyphicon glyphicon-upload"></i>
<span>{%=locale.fileupload.start%}</span>
</button>
{% } %}
{% if (!i) { %}
<button class="btn btn-warning cancel">
<i class="glyphicon glyphicon-ban-circle"></i>
<span>{%=locale.fileupload.cancel%}</span>
</button>
{% } %}
</td>
</tr>
{% } %}
</script>
<!-- The template to display files available for download -->
<script id="template-download" type="text/x-tmpl">
{% for (var i=0, file; file=o.files[i]; i++) { %}
<tr class="template-download fade">
<td>
<span class="preview">
{% if (file.thumbnailUrl) { %}
<a href="{%=file.url%}" title="{%=file.name%}" download="{%=file.name%}" data-gallery><img src="{%=file.thumbnailUrl%}"></a>
{% } %}
</span>
</td>
<td>
<p class="name">
<a href="{%=file.url%}" title="{%=file.name%}" download="{%=file.name%}" {%=file.thumbnailUrl?'data-gallery':''%}>{%=file.name%}</a>
</p>
{% if (file.error) { %}
<div><span class="label label-important">{%=locale.fileupload.error%}</span> {%=file.error%}</div>
{% } %}
</td>
<td>
<span class="size">{%=o.formatFileSize(file.size)%}</span>
</td>
<td>
<button class="btn btn-danger delete" data-type="{%=file.deleteType%}" data-url="{%=file.deleteUrl%}"{% if (file.deleteWithCredentials) { %} data-xhr-fields='{"withCredentials":true}'{% } %}>
<i class="glyphicon glyphicon-trash"></i>
<span>{%=locale.fileupload.destroy%}</span>
</button>
<input type="checkbox" name="delete" value="1" class="toggle">
</td>
</tr>
{% } %}
</script>
"""
#render plan div
@register.simple_tag
def get_plan_html(plans, ancestor=True):
html = ""
for plan in plans:
#first time only basic comments
if not (ancestor and plan.child):
plan_class = plan.get_p_class_display()
#plan_header = plan.header if plan.header else ""
if plan.p_class == "P":
plan_span = """
<span onclick="changeClassError({0})" class="{0}_plan1 glyphicon glyphicon-remove glyphicon-right glyphicon-red"></span>
<span onclick="changeClassOk({0})" class="{0}_plan1 glyphicon glyphicon-ok glyphicon-right glyphicon-green"></span>
<span style="display: none;" onclick="changeClassError({0})" class="{0}_plan2 glyphicon glyphicon-repeat glyphicon-right glyphicon-blue"></span>
""".format(plan.id)
else:
plan_span = """
<span style="display: none;" onclick="changeClassError({0})" class="{0}_plan1 glyphicon glyphicon-remove glyphicon-right glyphicon-red"></span>
<span style="display: none;" onclick="changeClassOk({0})" class="{0}_plan1 glyphicon glyphicon-ok glyphicon-right glyphicon-green"></span>
<span onclick="changeClassError({0})" class="{0}_plan2 glyphicon glyphicon-repeat glyphicon-right glyphicon-blue"></span>
""".format(plan.id)
html += """
<li id="plan_{0}" class="placeholder-children col-xs-12" data-id="{0}" data-name="{0}">
<div id="{0}" class="panel no_pad col-xs-12 {1}">
<div class="panel-heading col-xs-12 ">
<div class="panel_left col-xs-12 ">
<div class="col-xs-9 top_m"> {3} </div>
<div class="col-xs-3">
<span onclick="removePlan({0})" class="glyphicon glyphicon-right glyphicon-black glyphicon-trash"></span>
{2}
</div>
</div>
</div>
</div>
<div class="left_plan"></div>
<ol>
""".format(plan.id, plan_class, plan_span, plan.comment)
children = ProjectComment.objects.filter(child = plan)
print(plan.id, plan.child)
if children:
html += get_plan_html(children, False)
html += "</ol> </li> <ol></ol>"
return html
#get people who can see file
@register.simple_tag
def get_obj(file):
obj = User.objects.filter(file_user__file = file, file_user__role = 'X', file_user__is_active = True)
if not obj:
return None
else:
analysts = ""
for entry in obj:
analysts += " "
analysts += entry.username
return analysts
#get files belonging group
@register.simple_tag
def get_files(group):
obj = File.objects.filter(file_group__group = group, file_group__is_active = True)
if not obj:
return None
else:
files = ""
for entry in obj:
files += " "
files += entry.user_name + entry.ext
return files
#get creator of group
@register.simple_tag
def get_creator(group):
obj = get_object_or_404(User, group_user__group = group, group_user__role = 'C', group_user__is_active = True)
if not obj:
return None
else:
return obj.username
#get people who can see group
@register.simple_tag
def get_group_analysts(group):
obj = User.objects.filter(group_user__group = group, group_user__role = 'X', group_user__is_active = True)
if not obj:
return None
else:
analysts = ""
for entry in obj:
analysts += " "
analysts += entry.username
return analysts
#get comment form
@register.simple_tag
def get_comm_form(comm):
try:
old_comm = ParamsComment.objects.filter(params = comm).values('params').annotate(max_id=Max('id'))
comm_id = old_comm[0]['max_id']
param_comm = ParamsComment.objects.get(pk = comm_id)
param_comm_from = ParamCommForm(instance = param_comm)
except:
param_comm_from = ParamCommForm()
return param_comm_from
#get module comment form
@register.simple_tag
def get_module_form(mod):
try:
old_comm = ModuleComment.objects.filter(module = mod).values('module').annotate(max_id=Max('id'))
comm_id = old_comm[0]['max_id']
old_comm = ModuleComment.objects.get(pk = comm_id)
new_module_com = ModuleCommentForm(instance = old_comm)
except:
new_module_com = ModuleCommentForm()
return new_module_com
#get module form
@register.simple_tag
def get_init_module_form(service):
new_module = ModuleForm(service)
return new_module
#get project comment form
@register.simple_tag
def get_pro_comment(com):
edit_comm = ProjectEditCommForm(instance = com)
return edit_comm
#get module parameters form
@register.simple_tag
def get_param_module_form(service):
new_module = ModuleParamForm(service)
return new_module
#get parameter form
@register.simple_tag
def get_param_form(param):
if param.par_type == "N":
param_form = ParamForm(instance = param)
else:
param_form = ParamTextForm(instance = param)
return param_form
@register.simple_tag
def get_param_limit_form(param, project_name):
print(".")
#get parameters formset
@register.simple_tag
def get_param_limit_formset(param_group, project_id):
param_formset = modelformset_factory(Param, form=ParamForm2)
param_formset = param_formset(form_kwargs={'project_id': project_id}, queryset=Param.objects.filter(is_active=True, params__name = param_group.name), prefix='param_formset')
ile = param_formset.total_form_count()
return param_formset
#get init script form
@register.simple_tag
def get_init_form(init):
text = init.display_text_file()
form = TextForm(initial={'text': text})
return form
#get module analysis
@register.simple_tag
def get_service(module_id):
module = get_object_or_404(Module, pk = module_id)
analysis = Analysis.objects.filter(module = module)
return analysis
#get service modules
@register.simple_tag
def get_modules(service, project):
modules = Module.objects.filter(service = service, is_active = True, project_module__project = project)
return modules
#sort data
@register.filter
def sort_by(queryset, order):
return queryset.order_by(order)
#set global context
@register.simple_tag(takes_context=True)
def set_global_context(context, key, value):
"""
Sets a value to the global template context, so it can
be accessible across blocks.
Note that the block where the global context variable is set must appear
before the other blocks using the variable IN THE BASE TEMPLATE. The order
of the blocks in the extending template is not important.
Usage::
{% extends 'base.html' %}
{% block first %}
{% set_global_context 'foo' 'bar' %}
{% endblock %}
{% block second %}
{{ foo }}
{% endblock %}
"""
print("set ", key, " ", value)
print(context)
context.dicts[0][key] = value
return ''
| StarcoderdataPython |
40049 | from django import forms
gender = [('male', 'M'), ('female', 'F')]
response = [('1', 'yes'), ('0', 'no')]
time = [('1', 'one'), ('2', 'two'), ('3', 'three'), ('4', 'four')]
education = [('0', 'zero'),('1', 'zero'), ('2', 'two'), ('3', 'three'), ('4', 'four')]
rating = [('1', 'one'), ('2', 'two'), ('3', 'three'), ('4', 'four'), ('5', 'five')]
class ContactForm(forms.Form):
gender_choice = forms.CharField(label="Gender:", widget=forms.Select(choices=gender))
age = forms.CharField(max_length=254)
family_size = forms.CharField(max_length=254)
pstatus = forms.CharField(label="Health Status:", widget=forms.Select(choices=response))
mother_education = forms.CharField(label="Mother's Education:", widget=forms.Select(choices=education))
father_education = forms.CharField(label="Father's Education:", widget=forms.Select(choices=education))
travel_time_to_school = forms.CharField(label="Travel Time To School:", widget=forms.Select(choices=time))
study_time = forms.CharField(label="Study Time To School:", widget=forms.Select(choices=time))
failures = forms.CharField(max_length=254)
family_support = forms.CharField(label="Family Support:", widget=forms.Select(choices=response))
internet = forms.CharField(label="Internet:", widget=forms.Select(choices=response))
relationship = forms.CharField(label="Relationship:", widget=forms.Select(choices=response))
family_relationship_quality = forms.CharField(label="Family Relationship Quality:", widget=forms.Select(choices=rating))
time_after_school = forms.CharField(label="Free Time After School:", widget=forms.Select(choices=rating))
going_out = forms.CharField(label="Going Out With Friends:", widget=forms.Select(choices=rating))
health = forms.CharField(label="Health Status:", widget=forms.Select(choices=rating))
absences = forms.CharField(max_length=254)
| StarcoderdataPython |
69416 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""QA_Struture工厂模式
"""
import pandas as pd
from QUANTAXIS.QAData.QADataStruct import QA_DataStruct_Index_day, QA_DataStruct_Index_min
from QUANTAXIS.QAData.QADataStruct import QA_DataStruct_Stock_day, QA_DataStruct_Stock_min
from QUANTAXIS.QAData.QADataStruct import QA_DataStruct_Future_day, QA_DataStruct_Future_min
from QUANTAXIS.QAUtil import DATABASE
class QhDataStructFactory(object):
"""QA_DataStruct工厂类
"""
def __init__(self, frequence=9, type='stock'):
"""
Args:
frequence: 周期
type: ‘stock’:股票
‘index’:指数或etf
‘future’:期货
‘auto’:自动识别
"""
if type == 'stock':
if 5 <= frequence != 8:
# 日线以上周期
self._dataStruct = lambda df, dtype=type, if_fq='bfq': QA_DataStruct_Stock_day(df, dtype=type,
if_fq=if_fq)
else:
self._dataStruct = lambda df, dtype=type, if_fq='bfq': QA_DataStruct_Stock_min(df, dtype=type,
if_fq=if_fq)
elif type == 'index':
if 5 <= frequence != 8:
# 日线以上周期
self._dataStruct = lambda df, dtype=type, if_fq='bfq': QA_DataStruct_Index_day(df, dtype=type,
if_fq=if_fq)
else:
self._dataStruct = lambda df, dtype=type, if_fq='bfq': QA_DataStruct_Index_min(df, dtype=type,
if_fq=if_fq)
elif type == 'future':
if 5 <= frequence != 8:
# 日线以上周期
self._dataStruct = lambda df, dtype=type, if_fq='bfq': QA_DataStruct_Future_day(df, dtype=type,
if_fq=if_fq)
else:
self._dataStruct = lambda df, dtype=type, if_fq='bfq': QA_DataStruct_Future_min(df, dtype=type,
if_fq=if_fq)
else:
raise Exception("不支持的类型")
def dataStruct(self, df: pd.DataFrame, if_fq='bfq'):
"""返回QA_Struture结构数据
Args:
df: pd.DataFrame格式数据 。
Returns:
"""
if df is not None and len(df.index.names) == 1:
if df['date'].dtype != 'datetime64':
df['date'] = pd.to_datetime(df['date'])
# 设置index
df = df.set_index(['date', 'code'], drop=True)
return self._dataStruct(df)
| StarcoderdataPython |
1680976 | import turicreate as tc
#use sframe containing all images
data = tc.SFrame('Nolmyra2.sframe')
train_set, test_set = data.random_split(0.8)
train_set.save("Train_Data.sframe")
test_set.save("Test_Data.sframe")
| StarcoderdataPython |
1786847 | <reponame>kozakusek/ipp-2020-testy<filename>z2/part2/interactive/jm/random_fuzzy_arrows_1/837840602.py
from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 837840602
"""
"""
random actions, total chaos
"""
board = gamma_new(8, 5, 6, 5)
assert board is not None
assert gamma_move(board, 1, 7, 1) == 1
assert gamma_move(board, 2, 0, 4) == 1
assert gamma_move(board, 2, 7, 2) == 1
assert gamma_move(board, 4, 4, 4) == 1
assert gamma_move(board, 5, 4, 5) == 0
assert gamma_move(board, 6, 2, 4) == 1
assert gamma_free_fields(board, 6) == 35
assert gamma_move(board, 1, 1, 1) == 1
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 5, 0) == 1
assert gamma_move(board, 2, 2, 3) == 1
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 3, 0) == 1
assert gamma_move(board, 4, 1, 1) == 0
assert gamma_move(board, 5, 5, 0) == 0
assert gamma_move(board, 5, 2, 4) == 0
assert gamma_golden_possible(board, 5) == 1
assert gamma_move(board, 6, 1, 5) == 0
assert gamma_move(board, 1, 3, 4) == 1
assert gamma_move(board, 2, 3, 5) == 0
assert gamma_move(board, 2, 7, 2) == 0
assert gamma_move(board, 3, 2, 5) == 0
assert gamma_move(board, 4, 3, 3) == 1
assert gamma_move(board, 5, 2, 1) == 1
assert gamma_golden_possible(board, 5) == 1
assert gamma_move(board, 6, 3, 1) == 1
assert gamma_free_fields(board, 1) == 27
assert gamma_move(board, 2, 4, 7) == 0
assert gamma_move(board, 3, 3, 5) == 0
board333242641 = gamma_board(board)
assert board333242641 is not None
assert board333242641 == ("2.614...\n"
"..24....\n"
".......2\n"
".156...1\n"
"...3.2..\n")
del board333242641
board333242641 = None
assert gamma_move(board, 4, 1, 1) == 0
assert gamma_move(board, 4, 7, 3) == 1
assert gamma_move(board, 5, 0, 0) == 1
assert gamma_move(board, 6, 5, 1) == 1
assert gamma_move(board, 6, 0, 3) == 1
assert gamma_free_fields(board, 6) == 23
assert gamma_move(board, 1, 3, 5) == 0
assert gamma_move(board, 1, 3, 4) == 0
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_move(board, 3, 2, 1) == 0
assert gamma_move(board, 4, 0, 1) == 1
assert gamma_golden_possible(board, 4) == 1
assert gamma_move(board, 5, 2, 1) == 0
assert gamma_busy_fields(board, 5) == 2
assert gamma_move(board, 6, 2, 3) == 0
assert gamma_move(board, 6, 6, 4) == 1
assert gamma_busy_fields(board, 6) == 5
assert gamma_golden_move(board, 6, 3, 2) == 0
assert gamma_move(board, 1, 4, 7) == 0
assert gamma_free_fields(board, 1) == 21
assert gamma_move(board, 2, 4, 1) == 1
assert gamma_move(board, 2, 4, 2) == 1
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 2, 3) == 0
assert gamma_move(board, 4, 4, 7) == 0
assert gamma_move(board, 4, 0, 3) == 0
assert gamma_free_fields(board, 4) == 19
assert gamma_golden_possible(board, 4) == 1
assert gamma_move(board, 5, 0, 7) == 0
assert gamma_move(board, 6, 3, 1) == 0
assert gamma_busy_fields(board, 6) == 5
assert gamma_move(board, 1, 4, 7) == 0
board797384596 = gamma_board(board)
assert board797384596 is not None
assert board797384596 == ("2.614.6.\n"
"6.24...4\n"
"....2..2\n"
"415626.1\n"
"5..3.2..\n")
del board797384596
board797384596 = None
assert gamma_move(board, 2, 3, 5) == 0
assert gamma_move(board, 3, 2, 0) == 1
assert gamma_move(board, 4, 3, 1) == 0
assert gamma_move(board, 4, 1, 2) == 1
assert gamma_move(board, 5, 0, 7) == 0
assert gamma_move(board, 5, 6, 3) == 1
assert gamma_move(board, 6, 4, 1) == 0
assert gamma_move(board, 6, 5, 0) == 0
assert gamma_move(board, 1, 0, 1) == 0
assert gamma_move(board, 2, 2, 6) == 0
assert gamma_move(board, 3, 2, 5) == 0
assert gamma_move(board, 3, 7, 3) == 0
assert gamma_move(board, 4, 4, 2) == 0
assert gamma_move(board, 5, 2, 6) == 0
assert gamma_golden_possible(board, 5) == 1
assert gamma_move(board, 6, 5, 3) == 0
assert gamma_move(board, 1, 6, 4) == 0
assert gamma_busy_fields(board, 1) == 3
assert gamma_move(board, 2, 2, 5) == 0
assert gamma_move(board, 3, 1, 0) == 1
assert gamma_move(board, 4, 0, 1) == 0
assert gamma_golden_possible(board, 5) == 1
assert gamma_move(board, 6, 3, 1) == 0
assert gamma_move(board, 3, 3, 4) == 0
assert gamma_busy_fields(board, 3) == 3
assert gamma_move(board, 4, 5, 0) == 0
assert gamma_move(board, 4, 7, 2) == 0
assert gamma_move(board, 5, 4, 5) == 0
assert gamma_busy_fields(board, 5) == 3
assert gamma_move(board, 6, 6, 4) == 0
assert gamma_move(board, 1, 1, 6) == 0
assert gamma_move(board, 1, 4, 3) == 1
assert gamma_busy_fields(board, 1) == 4
assert gamma_busy_fields(board, 2) == 6
assert gamma_move(board, 3, 4, 5) == 0
assert gamma_move(board, 3, 5, 0) == 0
assert gamma_move(board, 4, 5, 4) == 1
assert gamma_move(board, 5, 0, 4) == 0
assert gamma_move(board, 5, 7, 2) == 0
assert gamma_move(board, 6, 2, 5) == 0
assert gamma_free_fields(board, 6) == 7
assert gamma_move(board, 1, 4, 1) == 0
assert gamma_move(board, 1, 5, 2) == 1
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_busy_fields(board, 2) == 6
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 0, 0) == 0
assert gamma_move(board, 4, 4, 2) == 0
assert gamma_move(board, 4, 4, 0) == 0
assert gamma_move(board, 5, 0, 7) == 0
assert gamma_move(board, 6, 4, 7) == 0
assert gamma_move(board, 6, 1, 1) == 0
assert gamma_move(board, 1, 2, 3) == 0
assert gamma_move(board, 1, 6, 1) == 1
assert gamma_move(board, 2, 0, 7) == 0
assert gamma_move(board, 3, 5, 2) == 0
assert gamma_move(board, 3, 1, 4) == 1
assert gamma_move(board, 4, 2, 2) == 1
assert gamma_move(board, 4, 0, 2) == 1
assert gamma_busy_fields(board, 4) == 8
assert gamma_move(board, 5, 2, 3) == 0
assert gamma_move(board, 5, 0, 4) == 0
assert gamma_free_fields(board, 5) == 8
assert gamma_move(board, 6, 1, 0) == 0
assert gamma_move(board, 6, 7, 4) == 1
assert gamma_move(board, 1, 3, 5) == 0
assert gamma_move(board, 1, 4, 4) == 0
assert gamma_move(board, 2, 0, 4) == 0
assert gamma_move(board, 2, 4, 1) == 0
gamma_delete(board)
| StarcoderdataPython |
30225 | #Faça um Programa que leia um vetor de 5 números inteiros e mostre-os.
lista=[]
for i in range(1, 6):
lista.append(int(input('Digite um número: ')))
print(lista)
| StarcoderdataPython |
98694 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, torch
from tqdm import tqdm
from silence_tensorflow import silence_tensorflow
silence_tensorflow()
import tensorflow.compat.v1 as tf
from metrics.FVD.FVD import Embedder, preprocess, calculate_fvd
import numpy as np
def compute_fvd(real_videos, fake_videos, device=0):
devs = tf.config.experimental.get_visible_devices("GPU")
target_dev = [d for d in devs if d.name.endswith(str(device))][0]
tf.config.experimental.set_visible_devices(target_dev, 'GPU')
with tf.device("/gpu:0"):
with tf.Graph().as_default():
# construct graph
sess = tf.Session()
input_real = tf.placeholder(dtype=tf.float32, shape=(*real_videos[0].shape[:2], real_videos[0].shape[3],
real_videos[0].shape[4], real_videos[0].shape[2]))
input_fake = tf.placeholder(dtype=tf.float32, shape=(*real_videos[0].shape[:2], real_videos[0].shape[3],
real_videos[0].shape[4], real_videos[0].shape[2]))
real_pre = preprocess(input_real, (224, 224))
emb_real = Embedder(real_pre)
embed_real = emb_real.create_id3_embedding(real_pre)
fake_pre = preprocess(input_fake, (224, 224))
emb_fake = Embedder(fake_pre)
embed_fake = emb_fake.create_id3_embedding(fake_pre)
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
real, fake = [], []
for rv, fv in tqdm(zip(real_videos, fake_videos)):
real_batch = ((rv + 1.) * 127.5).permute(0, 1, 3, 4, 2).cpu().numpy()
fake_batch = ((fv + 1.) * 127.5).permute(0, 1, 3, 4, 2).cpu().numpy()
feed_dict = {input_real: real_batch, input_fake: fake_batch}
r, f = sess.run([embed_fake, embed_real], feed_dict)
real.append(r); fake.append(f)
print('Compute FVD score')
real = np.concatenate(real, axis=0)
fake = np.concatenate(fake, axis=0)
embed_real = tf.placeholder(dtype=tf.float32, shape=(real.shape[0], 400))
embed_fake = tf.placeholder(dtype=tf.float32, shape=(real.shape[0], 400))
result = calculate_fvd(embed_real, embed_fake)
feed_dict = {embed_real: real, embed_fake: fake}
result = sess.run(result, feed_dict)
sess.close()
tf.reset_default_graph()
return result
def get_embeddings(fake_videos, device=0):
devs = tf.config.experimental.get_visible_devices("GPU")
target_dev = [d for d in devs if d.name.endswith(str(device))][0]
tf.config.experimental.set_visible_devices(target_dev, 'GPU')
with tf.device("/gpu:0"):
with tf.Graph().as_default():
# construct graph
sess = tf.Session()
input_fake = tf.placeholder(dtype=tf.float32, shape=(*fake_videos[0].shape[:2], fake_videos[0].shape[3],
fake_videos[0].shape[4], fake_videos[0].shape[2]))
fake_pre = preprocess(input_fake, (224, 224))
emb_fake = Embedder(fake_pre)
embed_fake = emb_fake.create_id3_embedding(fake_pre)
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
real, fake = [], []
for fv in tqdm(fake_videos):
fake_batch = ((fv + 1.) * 127.5).permute(0, 1, 3, 4, 2).cpu().numpy()
feed_dict = {input_fake: fake_batch}
f = sess.run([embed_fake], feed_dict)
fake.append(f)
fake = np.concatenate(fake, axis=0)
sess.close()
tf.reset_default_graph()
return fake | StarcoderdataPython |
1792419 | """Admin API"""
import arrow
from purepage.ext import r, db, abort
class Admin:
"""
后台管理
$shared:
user:
id?str: 用户ID
role?str: 角色
email?email&optional: 邮箱
github?url&optional: Github地址
avatar?url&default="http://purepage.org/static/avatar-default.png": 头像
date_create?datetime&optional: 创建时间
date_modify?datetime&optional: 修改时间
timestamp?int&optional: 安全时间戳
lastlogin_date?datetime&optional: 最近登录时间
lastlogin_ip?ipv4&optional: 最近登录IP
lastlogin_ua?str&optional: 最近登录设备UserAgent
""" # noqa
def put(self, id, role, email):
"""
修改帐号信息
$input:
id?str: 用户ID
role?str: 角色
email?email: 邮箱
$output: @message
"""
if role == "root":
abort(403, "PermissionDeny", "不能设为root帐号")
db.run(
r.table("user").get(id).update({
"role": role,
"email": email,
"date_modify": arrow.utcnow().datetime,
"timestamp": arrow.utcnow().timestamp
})
)
return {"message": "OK"}
def get(self, account):
"""
查找帐号
$input:
account?str: 用户名或邮箱
$output: @user
$error:
404.NotFound: 用户不存在
"""
user = db.run(r.table("user").get(account))
if not user:
user = db.first(r.table("user").get_all(account, index="email"))
if not user:
abort(404, "NotFound", "用户不存在")
return user
def get_list(self, page, per_page):
"""
查看所有用户
$input: @pagging
$output:
- @user
"""
return db.pagging(r.table("user"), page, per_page)
def delete(self, id):
"""
删除帐号
$input:
id?str: ID
$output: @message
"""
user = db.run(r.table("user").get(id))
if user and user["role"] == "root":
abort(403, "PermissionDeny", "root帐号无法删除")
db.run(r.table("user").get(id).delete())
return {"message": "OK"}
| StarcoderdataPython |
72542 | <filename>tests/test_pat_variation.py
from __future__ import division
import datetime
import os
import pytest
import numpy as np
import pandas as pd
from fasttrips import Assignment
from fasttrips import PathSet
from fasttrips import Run
EXAMPLE_DIR = os.path.join(os.getcwd(), 'fasttrips', 'Examples', 'Springfield')
# DIRECTORY LOCATIONS
INPUT_NETWORK = os.path.join(EXAMPLE_DIR, 'networks', 'vermont')
INPUT_DEMAND = os.path.join(EXAMPLE_DIR, 'demand', 'general')
INPUT_CONFIG = os.path.join(EXAMPLE_DIR, 'configs', 'A.pat')
OUTPUT_DIR = os.path.join(EXAMPLE_DIR, 'output')
# INPUT FILE LOCATIONS
CONFIG_FILE = os.path.join(INPUT_CONFIG, 'config_ft.txt')
INPUT_WEIGHTS = os.path.join(INPUT_CONFIG, 'pathweight_ft.txt')
@pytest.mark.travis
@pytest.mark.pat
# @pytest.mark.skip(reason="Not working - need to fix")
def test_pat_before_and_after():
"""
Test to ensure that some of the pathfinder trips are returned before preferred departure
or after preferred arrival.
"""
OUTPUT_FOLDER = 'pat_scenario'
r = Run.run_fasttrips(
input_network_dir=INPUT_NETWORK,
input_demand_dir=INPUT_DEMAND,
run_config=CONFIG_FILE,
input_weights=INPUT_WEIGHTS,
output_dir=OUTPUT_DIR,
output_folder=OUTPUT_FOLDER,
pathfinding_type="stochastic",
overlap_variable="count",
iters=1,
dispersion=0.50
)
links = pd.read_csv(
os.path.join(OUTPUT_DIR, OUTPUT_FOLDER, 'pathset_links.csv'),
usecols=['person_trip_id', 'pathnum', 'linkmode', 'linknum', 'new_A_time', 'new_B_time'],
parse_dates=['new_A_time', 'new_B_time'],
infer_datetime_format=True
)
trips = pd.read_csv(
os.path.join(INPUT_DEMAND, 'trip_list.txt'),
usecols=['person_trip_id', 'departure_time', 'arrival_time', 'time_target']
)
departure_link = links.loc[links.groupby(['person_trip_id', 'pathnum'])['linknum'].idxmin()]
# The C++ Pathfinder doesn't seem to respect the last egress leg from a preferred time perspective
arrival_link = links.loc[
links[links.linkmode == 'transit'].groupby(['person_trip_id', 'pathnum'])['linknum'].idxmax()]
network_date = links['new_A_time'].dt.date.unique()[0]
trips['arrival_time'] = trips['arrival_time'].apply(lambda x: parse_date(network_date, x))
trips['departure_time'] = trips['departure_time'].apply(lambda x: parse_date(network_date, x))
arrival_trips = trips[trips['time_target'] == 'arrival']
departure_trips = trips[trips['time_target'] == 'departure']
departures = pd.merge(
departure_trips[['person_trip_id', 'departure_time']],
departure_link[['person_trip_id', 'new_A_time']],
on=['person_trip_id']
)
arrivals = pd.merge(
arrival_trips[['person_trip_id', 'arrival_time']],
arrival_link[['person_trip_id', 'new_B_time']],
on=['person_trip_id']
)
early_departure = departures[departures['new_A_time'] < departures['departure_time']]
size = early_departure.shape[0]
assert size > 0
confirm_size = early_departure[
((early_departure['departure_time'] - early_departure['new_A_time']) / np.timedelta64(1, 'm')) <= 10].shape[0]
assert size == confirm_size
late_arrivals = arrivals[arrivals['new_B_time'] > arrivals['arrival_time']]
size = late_arrivals.shape[0]
assert size > 0
confirm_size = \
late_arrivals[((late_arrivals['new_B_time'] - late_arrivals['arrival_time']) / np.timedelta64(1, 'm')) <= 10].shape[
0]
assert size == confirm_size
def test_pat_off():
"""
Test to ensure that none of the pathfinder trips are returned before preferred departure
or after preferred arrival.
"""
OUTPUT_FOLDER = 'pat_scenario_reg'
in_cfg = os.path.join(EXAMPLE_DIR, 'configs', 'A')
cfg_file = os.path.join(in_cfg, 'config_ft.txt')
in_weights = os.path.join(in_cfg, 'pathweight_ft.txt')
r = Run.run_fasttrips(
input_network_dir=INPUT_NETWORK,
input_demand_dir=INPUT_DEMAND,
run_config=cfg_file,
input_weights=in_weights,
output_dir=OUTPUT_DIR,
output_folder=OUTPUT_FOLDER,
pathfinding_type="stochastic",
overlap_variable="None",
iters=1,
dispersion=0.50
)
links = pd.read_csv(
os.path.join(OUTPUT_DIR, OUTPUT_FOLDER, 'pathset_links.csv'),
usecols=['person_trip_id', 'pathnum', 'linkmode', 'linknum', 'new_A_time', 'new_B_time'],
parse_dates=['new_A_time', 'new_B_time'],
infer_datetime_format=True
)
trips = pd.read_csv(
os.path.join(INPUT_DEMAND, 'trip_list.txt'),
usecols=['person_trip_id', 'departure_time', 'arrival_time', 'time_target']
)
departure_link = links.loc[links.groupby(['person_trip_id', 'pathnum'])['linknum'].idxmin()]
# The C++ Pathfinder doesn't seem to respect the last egress leg from a preferred time perspective
arrival_link = links.loc[
links[links.linkmode == 'transit'].groupby(['person_trip_id', 'pathnum'])['linknum'].idxmax()]
network_date = links['new_A_time'].dt.date.unique()[0]
trips['arrival_time'] = trips['arrival_time'].apply(lambda x: parse_date(network_date, x))
trips['departure_time'] = trips['departure_time'].apply(lambda x: parse_date(network_date, x))
arrival_trips = trips[trips['time_target'] == 'arrival']
departure_trips = trips[trips['time_target'] == 'departure']
departures = pd.merge(
departure_trips[['person_trip_id', 'departure_time']],
departure_link[['person_trip_id', 'new_A_time']],
on=['person_trip_id']
)
arrivals = pd.merge(
arrival_trips[['person_trip_id', 'arrival_time']],
arrival_link[['person_trip_id', 'new_B_time']],
on=['person_trip_id']
)
early_departure = departures[departures['new_A_time'] < departures['departure_time']]
size = early_departure.shape[0]
assert 0 == size
confirm_size = early_departure[
((early_departure['departure_time'] - early_departure['new_A_time']) / np.timedelta64(1, 'm')) <= 10].shape[0]
assert 0 == confirm_size
late_arrivals = arrivals[arrivals['new_B_time'] > arrivals['arrival_time']]
size = late_arrivals.shape[0]
assert 0 == size
confirm_size = \
late_arrivals[((late_arrivals['new_B_time'] - late_arrivals['arrival_time']) / np.timedelta64(1, 'm')) <= 10].shape[
0]
assert 0 == confirm_size
def test_pat_growth_type_validation():
PathSet.WEIGHTS_FIXED_WIDTH = True
Assignment.read_weights(INPUT_WEIGHTS)
check, error_str = PathSet.verify_weights(PathSet.WEIGHTS_DF)
assert check
sample = pd.DataFrame(data={
'user_class': ['all'] * 7,
'purpose': ['other'] * 7,
'demand_mode_type': ['access'] * 7,
'demand_mode': ['walk'] * 7,
'supply_mode': ['walk_access'] * 7,
'weight_name': ['depart_early_min'] * 7,
'weight_value': list(np.random.rand(7)),
'growth_type': ['linear'] * 2 + ['logarithmic'] * 2 + ['logistic'] * 3,
'log_base': [np.nan] * 3 + list(np.random.rand(2)) + [np.nan] * 2,
'logistic_mid': [np.nan] * 5 + [1.3] + [np.nan],
'logistic_max': [16] + [np.nan] * 5 + [1.3],
})
check, error_str = PathSet.verify_weights(sample)
assert not check
expected_error = '\n-------Errors: pathweight_ft.txt---------------\n' \
'Logistic qualifier includes log_base modifier\n' \
'Logarithmic qualifier missing necessary log_base modifier\n' \
'Logistic qualifier missing necessary modifiers\n'
assert expected_error == error_str
def parse_date(datepart, timestring):
return datetime.datetime.combine(
datepart,
datetime.datetime.strptime(timestring, '%H:%M:%S').time()
)
| StarcoderdataPython |
4801293 | from django.test import SimpleTestCase
from django.urls import reverse, resolve
from posts.views import get_posts, post_detail, CommentUpdateView, CommentDeleteView
class TestUrls(SimpleTestCase):
def test_get_posts_url_is_resolved(self):
url = reverse('posts:get_posts')
print(resolve(url))
self.assertEquals(resolve(url).func, get_posts)
| StarcoderdataPython |
122292 | <filename>moveNegative.py
# -*- coding: utf-8 -*-
# Record the position of left 0, if != 0 then exchange, zero move to next, zero keep stay at left position of zeros
class Solution(object):
def moveNegative(self, nums):
ng = 0 # records the position of "0"
for i in xrange(len(nums)):
# print i, zero, nums
# two pointer, right pointer always move but left(zero) pointer stay at left-est zero, exchange when right != 0, left(zero) pointer move to next zero
if nums[i] < 0:
nums[i], nums[ng] = nums[ng], nums[i]
ng += 1
return nums
test = Solution()
print test.moveNegative([2,0,-1,-3,0,12]) | StarcoderdataPython |
3350269 | import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ["MOLECULE_INVENTORY_FILE"]
).get_hosts("molecule-coturn-debian10")
@pytest.mark.parametrize(
"name", ["coturn"],
)
def test_packages(host, name):
print(testinfra_hosts)
item = host.package(name)
assert item.is_installed
@pytest.mark.parametrize(
"path",
[
"/etc/consul/consul.d/coturn-metrics.service.json",
"/etc/consul/consul.d/coturn-ui.service.json",
"/etc/default/coturn",
"/etc/logrotate.d/rsyslog",
"/etc/turnserver.conf",
"/lib/systemd/system/coturn.service.d/user_group.conf",
],
)
def test_files(host, path):
with host.sudo():
item = host.file(path)
assert item.exists
def test_group(host):
g = host.group("turnserver")
assert g.exists
def test_user(host):
u = host.user("turnserver")
assert u.exists
@pytest.mark.parametrize("name", ["coturn"])
def test_services(host, name):
item = host.service(name)
assert item.is_running
assert item.is_enabled
def test_admin_ui(host):
with host.sudo():
cmd = host.check_output("curl http://localhost:9090/")
assert "TURN" in cmd, cmd
# TODO enable when https://github.com/coturn/coturn/pull/517 is released (version > 4.5.1.3)
# def test_metrics(host):
# with host.sudo():
# cmd = host.check_output("curl http://localhost:9641/")
# assert "# HELP turn_status" in cmd, cmd
| StarcoderdataPython |
1712492 | N, M = map(int, input().split())
input_route = [[float("inf") for _ in range(N)] for _ in range(N)]
next_points = []
for i in range(M):
u, v, l = map(int, input().split())
if u == 1:
next_points.append([v - 1, l])
continue
if v == 1:
next_points.append([u - 1, l])
continue
input_route[u - 1][v - 1] = l
input_route[v - 1][u - 1] = l
for i in range(N):
input_route[i][i] = 0
# def warshall_floyd(n, route):
#
# for k in range(n):
# for i in range(n):
# for j in range(n):
# route[i][j] = min(route[i][j], route[i][k] + route[k][j])
# return route
from scipy.sparse.csgraph import floyd_warshall
route = floyd_warshall(input_route, directed=False)
from itertools import combinations
chokudai_route = list(combinations(next_points, 2))
result = float("inf")
for start_choku, goal_choku in chokudai_route:
cost = start_choku[1] + goal_choku[1]
result = min(result, route[start_choku[0]][goal_choku[0]] + start_choku[1] + goal_choku[1])
if result == float("inf"):
print(-1)
else:
print(int(result))
| StarcoderdataPython |
1771997 | ###File Name:get_distence.py
###Author:haicg
###Mail:<EMAIL>
###Created Time: Mon 07 Jul 2014 08:13:00 PM HKT
###File Name : get_distence.py
#!/usr/bin/python
import math
from geopy import distance
'''Return value is the distance with the unit of mile '''
# 这个函数以前计算距离可能有点问题,不够精准,同时对于边界问题处理过于粗暴
# 这个修改成GeoPy的实现
def calc_distance(lat1, lon1, lat2, lon2):
newport_ri = (lat2, lon2)
cleveland_oh = (lat1, lon1,)
miles = distance.distance(newport_ri, cleveland_oh).miles
return miles;
'''Return value is the distance with the unit of mile '''
def calc_points_distance(p1, p2):
return calc_distance(p1.x, p1.y, p2.x, p2.y)
def get_distance(begin_point, end_point):
lat1 = float(begin_point.gps_latitude)
lat2 = float(end_point.gps_latitude)
lon1 = float(begin_point.gps_longitude)
lon2 = float(end_point.gps_longitude)
#begin_point.show()
#end_point.show()
''' The unit of the distance is kilometer'''
euclidean_distence = calc_distance(lat1, lon1, lat2, lon2) * 1.609344
return euclidean_distence
| StarcoderdataPython |
1640621 | <gh_stars>0
from .synthetic import random, plane, constant, sine
__all__ = ["random", "plane", "constant", "sine"]
| StarcoderdataPython |
4837104 | <gh_stars>1-10
# Copyright 2000 - 2015 NeuStar, Inc.All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib, json
class Load:
def __init__(self, connection, id):
self.connection = connection
self.id = str(id)
self.service = "/load/1.0"
def echo(self, msg):
"""Echos a message back to the API caller.
Arguments:
msg -- The message to echo.
"""
# Automatically encode the msg string to be used in a url.
msg = urllib.quote_plus(msg)
return self.connection.get(self.service + "/echo/" + msg)
def who_am_i(self):
"""Retrieves the username associated with the credentials used to issue the API call."""
return self.connection.get(self.service + "/whoami")
def list_most_recent_jsonp(self, limit, callback):
"""Retrieves a list of recent load tests ordered by date in descending order.
Arguments:
limit -- The maximum number of load tests to retrieve.
callback -- The name of a javascript function to be called when the JSONP result is received
"""
params = {"limit": limit, "callback": callback}
return self.connection.get(self.service + "/list/mostRecent", params)
def list(self, limit):
"""Retrieves a list of recent load tests ordered by date in descending order.
Arguments:
limit -- The maximum number of load tests to retrieve.
"""
params = {"limit": limit}
return self.connection.get(self.service + "/list", params)
def add_tag(self, tag_name):
"""Add a name tag to a load test.
Arguments:
tag_name -- The name tag to be added.
"""
if self.id is None:
raise Exception("Missing id: This API requires a load test ID be supplied.")
tag_name = urllib.quote_plus(tag_name)
return self.connection.put(self.service + "/" + self.id + "/tag/" + tag_name)
def remove_tag(self, tag_name):
"""Remove a name tag from a load test.
Arguments:
tag_name -- The name tag to be added.
"""
if self.id is None:
raise Exception("Missing id: This API requires a load test ID be supplied.")
tag_name = urllib.quote_plus(tag_name)
return self.connection.delete(self.service + "/" + self.id + "/tag/" + tag_name)
def get(self):
"""Get a load test by its ID."""
if self.id is None:
raise Exception("Missing id: This API requires a load test ID be supplied.")
return self.connection.get(self.service + "/id/" + self.id)
def delete(self):
"""Delete a load test."""
if self.id is None:
raise Exception("Missing id: This API requires a load test ID be supplied.")
return self.connection.delete(self.service + "/" + self.id + "/delete")
def pause(self):
"""Pause a running load test."""
if self.id is None:
raise Exception("Missing id: This API requires a load test ID be supplied.")
return self.connection.put(self.service + "/" + self.id + "/pause")
def resume(self):
"""Pause a running load test."""
if self.id is None:
raise Exception("Missing id: This API requires a load test ID be supplied.")
return self.connection.put(self.service + "/" + self.id + "/resume")
def schedule(self, name, region, start, scripts, parts, override=None):
"""Schedule a load test.
Arguments:
name -- The name of the load test.
region -- The geographic region where the source of load test traffic will originate from.
US_WEST
US_EAST
EU_WEST
AP_SOUTHEAST
AP_NORTHEAST
SA_EAST
start -- The date and time when the load test should begin. If this property is not specified,
then the load test will be scheduled to start in about 10 minutes. The start date
must be at least 10 minutes in the future.
scripts -- An array of scripts that will be executed through the load test. A script object looks
as follows:
{ "percentage" : int, "scriptId" : string }
parts -- The load test plan (represents the number of users in the load test over time). An
array of part objects where each part looks as follows:
{ "duration" : int, "maxUsers" : int, "type" : string }
Duration represents number of minutes, maxUsers is the maximum number of users
that will execute during this part of the test plan, type is either RAMP (linear increase
in number of users over the duration) or CONSTANT (flat).
override -- The override code that alters the behaviour of the load test. Neustar may provide you
with a specific override code depending on your needs.
"""
if type(scripts) is not list:
scripts = [scripts]
if type(parts) is not list:
parts = [parts]
load_test = {"name": name, "region": region, "start": start, "scripts": scripts, "parts": parts}
if override is not None:
load_test.update({"overrideCode": override})
return self.connection.post(self.service + "/schedule", json.dumps(load_test)) | StarcoderdataPython |
1780557 | import pytest
import copy
from formulabot.mep import Population, Solution
def test_Solution_init_params():
# make sure list object passed
with pytest.raises(ValueError):
Solution(None, 10, 10)
# make sure list is not empty
with pytest.raises(ValueError):
Solution([], 10, 10)
# make sure at least 3 operations exist
with pytest.raises(ValueError):
Solution(['X'], 2, 10)
# make sure at least 4 operands exist
with pytest.raises(ValueError):
Solution(['X'], 3, 0)
def test_Solution_init():
parms = ['X','Y']
operations = 5
operands = 4
s = Solution(parms, operations, operands)
# the num of operations should be same as operations + params
assert len(s.ops) == (operations + len(parms))
# first operations are the parameters
for i, p in enumerate(parms):
assert p == s.ops[i][0]
assert s.ops[i][1] == []
for i, o in enumerate(s.ops[len(parms):]):
# ensure the length of the operands is correct
assert len(o[1]) == operands
# ensure that the operation rows referenced in the operands list
# is less than the current operations row number
assert len([x for x in o[1] if 0 <= x < i+len(parms)]) == operands
# make sure the Operator is 0 < x < len(Operator)
assert o[0] <= len(Solution.Operator)
def test_Solution_compute_contracts():
s = Solution(['X','Y','Z'], 10, 10)
# make sure list object passed
with pytest.raises(ValueError):
s.compute(None)
with pytest.raises(ValueError):
s.compute({})
with pytest.raises(ValueError):
s.compute({'X'})
def test_Solution_compute_basic_math():
s = Solution(['X','Y','Z'], 5, 4)
# test addition
s.ops = [('X', []),
('Y', []),
('Z', []),
(1, [0, 1, 1, 1]),
(1, [1, 2, 0, 1]),
(1, [2, 3, 0, 4]),
(1, [0, 1, 3, 1]),
(1, [2, 6, 0, 5])]
x = {'X':1, 'Y':3, 'Z':7} # test values that cant be combines unintentionally and have correct outcome
assert s.compute(x) == 11
# test subtraction
s.ops = [('X', []),
('Y', []),
('Z', []),
(2, [0, 1, 1, 1]),
(2, [1, 2, 0, 1]),
(2, [2, 3, 0, 4]),
(2, [2, 1, 3, 1]),
(2, [6, 0, 0, 5])]
x = {'X':1, 'Y':3, 'Z':7} # test values that cant be combines unintentionally and have correct outcome
assert s.compute(x) == 3
# test multiplication
s.ops = [('X', []),
('Y', []),
('Z', []),
(3, [0, 1, 1, 1]),
(3, [1, 2, 0, 1]),
(3, [2, 3, 0, 4]),
(3, [2, 1, 3, 1]),
(3, [6, 0, 0, 5])]
x = {'X':2, 'Y':3, 'Z':7} # test values that cant be combines unintentionally and have correct outcome
assert s.compute(x) == 42
# test division
s.ops = [('X', []),
('Y', []),
('Z', []),
(4, [0, 1, 1, 1]),
(4, [1, 2, 0, 1]),
(4, [2, 3, 0, 4]),
(4, [2, 1, 3, 1]),
(4, [2, 1, 0, 5])]
x = {'X':2, 'Y':3, 'Z':6} # test values that cant be combines unintentionally and have correct outcome
assert s.compute(x) == 2
# test division by zero returns zero
s.ops = [('X', []),
('Y', []),
('Z', []),
(4, [0, 1, 1, 1]),
(4, [1, 2, 0, 1]),
(4, [2, 3, 0, 4]),
(4, [2, 1, 3, 1]),
(4, [2, 0, 0, 5])]
x = {'X':0, 'Y':3, 'Z':6} # test values that cant be combines unintentionally and have correct outcome
assert s.compute(x) == 0
# test squareroot
s.ops = [('X', []),
('Y', []),
('Z', []),
(5, [0, 1, 1, 1]),
(5, [1, 2, 0, 1]),
(5, [2, 3, 0, 4]),
(5, [2, 1, 3, 1]),
(5, [2, 0, 0, 5])]
x = {'X':0, 'Y':3, 'Z':64} # test values that cant be combines unintentionally and have correct outcome
assert s.compute(x) == 8
# test squareroot, negative value
s.ops = [('X', []),
('Y', []),
('Z', []),
(5, [0, 1, 1, 1]),
(5, [1, 2, 0, 1]),
(5, [2, 3, 0, 4]),
(5, [2, 1, 3, 1]),
(5, [1, 0, 0, 5])]
x = {'X':0, 'Y':-81, 'Z':64} # test values that cant be combines unintentionally and have correct outcome
assert s.compute(x) == 9
# test negative value
s.ops = [('X', []),
('Y', []),
('Z', []),
(6, [0, 1, 1, 1]),
(6, [1, 2, 0, 1]),
(6, [2, 3, 0, 4]),
(6, [2, 1, 3, 1]),
(6, [1, 0, 0, 5])]
x = {'X':0, 'Y':3, 'Z':7} # test values that cant be combines unintentionally and have correct outcome
assert s.compute(x) == -3
def test_Solution_compute_trig():
s = Solution(['X','Y','Z'], 5, 4)
# test addition
s.ops = [('X', []),
('Y', []),
('Z', []),
(7, [0, 1, 1, 1]),
(7, [1, 2, 0, 1]),
(7, [2, 3, 0, 4]),
(7, [0, 1, 3, 1]),
(7, [0, 6, 0, 5])]
x = {'X':0, 'Y':1, 'Z':90} # test values that cant be combines unintentionally and have correct outcome
assert s.compute(x) == 0
# test addition
s.ops = [('X', []),
('Y', []),
('Z', []),
(7, [0, 1, 1, 1]),
(7, [1, 2, 0, 1]),
(7, [2, 3, 0, 4]),
(7, [0, 1, 3, 1]),
(7, [1, 1, 0, 5])]
x = {'X':0, 'Y':1, 'Z':90} # test values that cant be combines unintentionally and have correct outcome
assert round(s.compute(x), 4) == 0.8415
# test cosine
s.ops = [('X', []),
('Y', []),
('Z', []),
(8, [0, 1, 1, 1]),
(8, [1, 2, 0, 1]),
(8, [2, 3, 0, 4]),
(8, [0, 1, 3, 1]),
(8, [0, 6, 0, 5])]
x = {'X':0, 'Y':1, 'Z':90} # test values that cant be combines unintentionally and have correct outcome
assert s.compute(x) == 1
# test cosine
s.ops = [('X', []),
('Y', []),
('Z', []),
(8, [0, 1, 1, 1]),
(8, [1, 2, 0, 1]),
(8, [2, 3, 0, 4]),
(8, [0, 1, 3, 1]),
(8, [1, 6, 0, 5])]
x = {'X':0, 'Y':1, 'Z':90} # test values that cant be combines unintentionally and have correct outcome
assert round(s.compute(x),4) == 0.5403
# test tangent
s.ops = [('X', []),
('Y', []),
('Z', []),
(9, [0, 1, 1, 1]),
(9, [1, 2, 0, 1]),
(9, [2, 3, 0, 4]),
(9, [0, 1, 3, 1]),
(9, [1, 6, 0, 5])]
x = {'X':0, 'Y':1, 'Z':90} # test values that cant be combines unintentionally and have correct outcome
assert round(s.compute(x),4) == 1.5574
def test_Solution_compute_conditionals():
s = Solution(['X','Y','Z'], 5, 4)
# test if greater than
s.ops = [('X', []),
('Y', []),
('Z', []),
(10, [0, 1, 1, 1,]),
(10, [1, 2, 0, 1]),
(10, [2, 3, 0, 4]),
(10, [0, 1, 3, 1]),
(10, [2, 1, 2, 1])]
x = {'X':1, 'Y':3, 'Z':7} # test values that cant be combines unintentionally and have correct outcome
assert s.compute(x) == 7
# test if greater than
s.ops = [('X', []),
('Y', []),
('Z', []),
(10, [0, 1, 1, 1,]),
(10, [1, 2, 0, 1]),
(10, [2, 3, 0, 4]),
(10, [0, 1, 3, 1]),
(10, [0, 1, 2, 0])]
x = {'X':1, 'Y':3, 'Z':7} # test values that cant be combines unintentionally and have correct outcome
assert s.compute(x) == 1
# test if less than
s.ops = [('X', []),
('Y', []),
('Z', []),
(11, [0, 1, 1, 1,]),
(11, [1, 2, 0, 1]),
(11, [2, 3, 0, 4]),
(11, [0, 1, 3, 1]),
(11, [0, 1, 2, 0])]
x = {'X':1, 'Y':3, 'Z':7} # test values that cant be combines unintentionally and have correct outcome
assert s.compute(x) == 7
# test if less than
s.ops = [('X', []),
('Y', []),
('Z', []),
(11, [0, 1, 1, 1,]),
(11, [1, 2, 0, 1]),
(11, [2, 3, 0, 4]),
(11, [0, 1, 3, 1]),
(11, [2, 1, 2, 0])]
x = {'X':1, 'Y':3, 'Z':7} # test values that cant be combines unintentionally and have correct outcome
assert s.compute(x) == 1
# test if equal
s.ops = [('X', []),
('Y', []),
('Z', []),
(11, [0, 1, 1, 1,]),
(11, [1, 2, 0, 1]),
(11, [2, 3, 0, 4]),
(11, [0, 1, 3, 1]),
(11, [2, 2, 1, 0])]
x = {'X':1, 'Y':3, 'Z':7} # test values that cant be combines unintentionally and have correct outcome
assert s.compute(x) == 1
# test if equal
s.ops = [('X', []),
('Y', []),
('Z', []),
(11, [0, 1, 1, 1,]),
(11, [1, 2, 0, 1]),
(11, [2, 3, 0, 4]),
(11, [0, 1, 3, 1]),
(11, [2, 1, 2, 0])]
x = {'X':1, 'Y':3, 'Z':7} # test values that cant be combines unintentionally and have correct outcome
assert s.compute(x) == 1
def test_Solution_mutate():
s1 = Solution(['X','Y','Z'], 100, 100)
s2 = copy.deepcopy(s1)
s2.mutate()
assert s1.compare_operations(s2) == False
def test_Solution_compare_func():
# check that the ops compare is the same
s1 = Solution(['X','Y','Z'], 10, 10)
assert s1.compare_operations(s1) == True
# check ops compare not matched
s2 = Solution(['X','Y','Z'], 10, 10)
assert s1.compare_operations(s2) == False
def test_Population_init():
X = [{'X': 0.836261492, 'Y': 0.000102515},
{'X': 0.43432574700000004, 'Y': 0.00017887}]
Y = [100., 200.]
#population_size < 10
with pytest.raises(ValueError):
Population(population_size=9,
parameters=['X','Y'],
operations_size=10,
operands_size=4,
epochs=10,
crossover_rate=0.45,
mutation_rate=0.10,
kill_rate=0.20,
error_calc='mae',
inputs=X,
outputs=Y)
# parameters not a list
with pytest.raises(ValueError):
Population(population_size=10,
parameters=2,
operations_size=10,
operands_size=4,
epochs=10,
crossover_rate=0.45,
mutation_rate=0.10,
kill_rate=0.20,
error_calc='mae',
inputs=X,
outputs=Y)
# parameters < 1
with pytest.raises(ValueError):
Population(population_size=10,
parameters=[],
operations_size=10,
operands_size=4,
epochs=10,
crossover_rate=0.45,
mutation_rate=0.10,
kill_rate=0.20,
error_calc='mae',
inputs=X,
outputs=Y)
# operations size < 2
with pytest.raises(ValueError):
Population(population_size=10,
parameters=['X','Y'],
operations_size=1,
operands_size=4,
epochs=10,
crossover_rate=0.45,
mutation_rate=0.10,
kill_rate=0.20,
error_calc='mae',
inputs=X,
outputs=Y)
# operands size < 4
with pytest.raises(ValueError):
Population(population_size=10,
parameters=['X','Y'],
operations_size=10,
operands_size=1,
epochs=10,
crossover_rate=0.45,
mutation_rate=0.10,
kill_rate=0.20,
error_calc='mae',
inputs=X,
outputs=Y)
# epochs < 1
with pytest.raises(ValueError):
Population(population_size=10,
parameters=['X','Y'],
operations_size=10,
operands_size=10,
epochs=0,
crossover_rate=0.45,
mutation_rate=0.10,
kill_rate=0.20,
error_calc='mae',
inputs=X,
outputs=Y)
# crossover rate <= 0
with pytest.raises(ValueError):
Population(population_size=10,
parameters=['X','Y'],
operations_size=10,
operands_size=10,
epochs=10,
crossover_rate=0.,
mutation_rate=0.10,
kill_rate=0.20,
error_calc='mae',
inputs=X,
outputs=Y)
# mutation rate < 0
with pytest.raises(ValueError):
Population(population_size=10,
parameters=['X','Y'],
operations_size=10,
operands_size=10,
epochs=10,
crossover_rate=0.5,
mutation_rate=-0.10,
kill_rate=0.20,
error_calc='mae',
inputs=X,
outputs=Y)
# kill rate < 0
with pytest.raises(ValueError):
Population(population_size=10,
parameters=['X','Y'],
operations_size=10,
operands_size=10,
epochs=10,
crossover_rate=0.,
mutation_rate=0.,
kill_rate=-0.20,
error_calc='mae',
inputs=X,
outputs=Y)
# inputs len != ouputs len
with pytest.raises(ValueError):
Population(population_size=10,
parameters=['X','Y'],
operations_size=10,
operands_size=10,
epochs=10,
crossover_rate=0.,
mutation_rate=0.,
kill_rate=-0.20,
error_calc='mae',
inputs=[{'X':100., 'Y':200.}, {'X':100., 'Y':200.}],
outputs=[100.])
# inputs len != ouputs len
with pytest.raises(ValueError):
Population(population_size=10,
parameters=['X','Y'],
operations_size=10,
operands_size=10,
epochs=10,
crossover_rate=0.,
mutation_rate=0.,
kill_rate=-0.20,
error_calc='mae',
inputs=[],
outputs=[100.]) | StarcoderdataPython |
3355103 | <reponame>polfpilf/feedback-bot
from dataclasses import dataclass, field
from datetime import datetime, timezone
from typing import Optional, Type, TypeVar
T = TypeVar('T')
def _utc_now():
"""Return current UTC timezone-aware datetime"""
return datetime.now(timezone.utc)
@dataclass(frozen=True, eq=True)
class TargetChat:
chat_id: int
created_at: datetime = field(default_factory=_utc_now)
@dataclass(frozen=True, eq=True)
class Admin:
user_id: int
target_chat: TargetChat
@classmethod
def authenticate(
cls: Type[T],
user_id: int,
chat_id: int,
token: str,
admin_token: str,
) -> Optional[T]:
if token != admin_token:
return None
target_chat = TargetChat(chat_id=chat_id)
return cls(user_id=user_id, target_chat=target_chat)
@dataclass(frozen=True, eq=True)
class ForwardedMessage:
forwarded_message_id: int
target_chat_id: int
origin_chat_id: int = field(hash=False, compare=False)
created_at: datetime = field(hash=False, compare=False, default_factory=_utc_now)
| StarcoderdataPython |
64907 | # Copyright 2012 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from neutron._i18n import _
DEFAULT_BRIDGE_MAPPINGS = []
DEFAULT_INTERFACE_MAPPINGS = []
DEFAULT_VXLAN_GROUP = '192.168.127.12'
DEFAULT_KERNEL_HZ_VALUE = 250 # [Hz]
DEFAULT_TC_TBF_LATENCY = 50 # [ms]
vxlan_opts = [
cfg.BoolOpt('enable_vxlan', default=True,
help=_("Enable VXLAN on the agent. Can be enabled when "
"agent is managed by ml2 plugin using linuxbridge "
"mechanism driver")),
cfg.IntOpt('ttl',
help=_("TTL for vxlan interface protocol packets.")),
cfg.IntOpt('tos',
help=_("TOS for vxlan interface protocol packets.")),
cfg.StrOpt('vxlan_group', default=DEFAULT_VXLAN_GROUP,
help=_("Multicast group(s) for vxlan interface. A range of "
"group addresses may be specified by using CIDR "
"notation. Specifying a range allows different VNIs to "
"use different group addresses, reducing or eliminating "
"spurious broadcast traffic to the tunnel endpoints. "
"To reserve a unique group for each possible "
"(24-bit) VNI, use a /8 such as 172.16.31.10/8. This "
"setting must be the same on all the agents.")),
cfg.IPOpt('local_ip', help=_("Local IP address of the VXLAN endpoints.")),
cfg.BoolOpt('l2_population', default=False,
help=_("Extension to use alongside ml2 plugin's l2population "
"mechanism driver. It enables the plugin to populate "
"VXLAN forwarding table.")),
cfg.BoolOpt('arp_responder', default=False,
help=_("Enable local ARP responder which provides local "
"responses instead of performing ARP broadcast into "
"the overlay. Enabling local ARP responder is not "
"fully compatible with the allowed-address-pairs "
"extension.")
),
]
bridge_opts = [
cfg.ListOpt('physical_interface_mappings',
default=DEFAULT_INTERFACE_MAPPINGS,
help=_("Comma-separated list of "
"<physical_network>:<physical_interface> tuples "
"mapping physical network names to the agent's "
"node-specific physical network interfaces to be used "
"for flat and VLAN networks. All physical networks "
"listed in network_vlan_ranges on the server should "
"have mappings to appropriate interfaces on each "
"agent.")),
cfg.ListOpt('bridge_mappings',
default=DEFAULT_BRIDGE_MAPPINGS,
help=_("List of <physical_network>:<physical_bridge>")),
]
qos_options = [
cfg.IntOpt('kernel_hz', default=DEFAULT_KERNEL_HZ_VALUE,
help=_("Value of host kernel tick rate (hz) for calculating "
"minimum burst value in bandwidth limit rules for "
"a port with QoS. See kernel configuration file for "
"HZ value and tc-tbf manual for more information.")),
cfg.IntOpt('tbf_latency', default=DEFAULT_TC_TBF_LATENCY,
help=_("Value of latency (ms) for calculating size of queue "
"for a port with QoS. See tc-tbf manual for more "
"information."))
]
cfg.CONF.register_opts(vxlan_opts, "VXLAN")
cfg.CONF.register_opts(bridge_opts, "LINUX_BRIDGE")
cfg.CONF.register_opts(qos_options, "QOS")
| StarcoderdataPython |
1798000 | <filename>src/nauka/utils/torch/__init__.py
# -*- coding: utf-8 -*-
from . import cuda, optim, random
| StarcoderdataPython |
20518 | import sys
sys.path.append('../')
from abc import ABCMeta, abstractmethod
# https://www.python-course.eu/python3_abstract_classes.php
import logging
import oandapyV20
from oandapyV20 import API
import oandapyV20.endpoints.orders as orders
from oandapyV20.contrib.requests import MarketOrderRequest
class ExecutionHandler(object):
"""
Provides an abstract base class to handle all execution in the backtesting
and live trading system.
"""
__metaclass__ = ABCMeta
@abstractmethod
def execute_order(self):
"""
Send the order to the brokerage
"""
raise NotImplementedError("Should implement execute_order()")
class SimulatedExecution(object):
"""
Provides a simulated execution handling environment. This class actually
does nothing - it simply receives and order to execute.
Instead, the Portfolio object actually provides fill handling. This will
be modified in later versions.
"""
def execute_order(self, event):
pass
class OANDAExecutionHandler(ExecutionHandler):
def __init__(self, domain, access_token, account_id):
self.domain = domain
self.access_token = access_token
self.account_id = account_id
self.client = self.create_OADAN_client()
self.logger = logging.getLogger(__name__)
def create_OADAN_client(self):
return API(self.access_token)
def execute_order(self, event):
print("execute order")
instrument = "%s_%s" % (event.instrument[:3], event.instrument[3:])
units = event.units
#Market order
mo = MarketOrderRequest(instrument=instrument, units=units)
print(mo)
# Create order request
request = orders.OrderCreate(self.account_id, data=mo.data)
print(request)
# perform the request
rv = self.client.request(request)
print(rv)
self.logger.debug(rv)
| StarcoderdataPython |
6559 | # - Generated by tools/entrypoint_compiler.py: do not edit by hand
"""
Trainers.LightGbmBinaryClassifier
"""
import numbers
from ..utils.entrypoints import EntryPoint
from ..utils.utils import try_set, unlist
def trainers_lightgbmbinaryclassifier(
training_data,
predictor_model=None,
number_of_iterations=100,
learning_rate=None,
number_of_leaves=None,
minimum_example_count_per_leaf=None,
feature_column_name='Features',
booster=None,
label_column_name='Label',
example_weight_column_name=None,
row_group_column_name=None,
normalize_features='Auto',
caching='Auto',
unbalanced_sets=False,
weight_of_positive_examples=1.0,
sigmoid=0.5,
evaluation_metric='Logloss',
maximum_bin_count_per_feature=255,
verbose=False,
silent=True,
number_of_threads=None,
early_stopping_round=0,
batch_size=1048576,
use_categorical_split=None,
handle_missing_value=True,
use_zero_as_missing_value=False,
minimum_example_count_per_group=100,
maximum_categorical_split_point_count=32,
categorical_smoothing=10.0,
l2_categorical_regularization=10.0,
seed=None,
parallel_trainer=None,
**params):
"""
**Description**
Train a LightGBM binary classification model.
:param number_of_iterations: Number of iterations. (inputs).
:param training_data: The data to be used for training (inputs).
:param learning_rate: Shrinkage rate for trees, used to prevent
over-fitting. Range: (0,1]. (inputs).
:param number_of_leaves: Maximum leaves for trees. (inputs).
:param minimum_example_count_per_leaf: Minimum number of
instances needed in a child. (inputs).
:param feature_column_name: Column to use for features (inputs).
:param booster: Which booster to use, can be gbtree, gblinear or
dart. gbtree and dart use tree based model while gblinear
uses linear function. (inputs).
:param label_column_name: Column to use for labels (inputs).
:param example_weight_column_name: Column to use for example
weight (inputs).
:param row_group_column_name: Column to use for example groupId
(inputs).
:param normalize_features: Normalize option for the feature
column (inputs).
:param caching: Whether trainer should cache input training data
(inputs).
:param unbalanced_sets: Use for binary classification when
training data is not balanced. (inputs).
:param weight_of_positive_examples: Control the balance of
positive and negative weights, useful for unbalanced classes.
A typical value to consider: sum(negative cases) /
sum(positive cases). (inputs).
:param sigmoid: Parameter for the sigmoid function. (inputs).
:param evaluation_metric: Evaluation metrics. (inputs).
:param maximum_bin_count_per_feature: Maximum number of bucket
bin for features. (inputs).
:param verbose: Verbose (inputs).
:param silent: Printing running messages. (inputs).
:param number_of_threads: Number of parallel threads used to run
LightGBM. (inputs).
:param early_stopping_round: Rounds of early stopping, 0 will
disable it. (inputs).
:param batch_size: Number of entries in a batch when loading
data. (inputs).
:param use_categorical_split: Enable categorical split or not.
(inputs).
:param handle_missing_value: Enable special handling of missing
value or not. (inputs).
:param use_zero_as_missing_value: Enable usage of zero (0) as
missing value. (inputs).
:param minimum_example_count_per_group: Minimum number of
instances per categorical group. (inputs).
:param maximum_categorical_split_point_count: Max number of
categorical thresholds. (inputs).
:param categorical_smoothing: Lapalace smooth term in categorical
feature spilt. Avoid the bias of small categories. (inputs).
:param l2_categorical_regularization: L2 Regularization for
categorical split. (inputs).
:param seed: Sets the random seed for LightGBM to use. (inputs).
:param parallel_trainer: Parallel LightGBM Learning Algorithm
(inputs).
:param predictor_model: The trained model (outputs).
"""
entrypoint_name = 'Trainers.LightGbmBinaryClassifier'
inputs = {}
outputs = {}
if number_of_iterations is not None:
inputs['NumberOfIterations'] = try_set(
obj=number_of_iterations,
none_acceptable=True,
is_of_type=numbers.Real)
if training_data is not None:
inputs['TrainingData'] = try_set(
obj=training_data,
none_acceptable=False,
is_of_type=str)
if learning_rate is not None:
inputs['LearningRate'] = try_set(
obj=learning_rate,
none_acceptable=True,
is_of_type=numbers.Real)
if number_of_leaves is not None:
inputs['NumberOfLeaves'] = try_set(
obj=number_of_leaves,
none_acceptable=True,
is_of_type=numbers.Real)
if minimum_example_count_per_leaf is not None:
inputs['MinimumExampleCountPerLeaf'] = try_set(
obj=minimum_example_count_per_leaf,
none_acceptable=True,
is_of_type=numbers.Real)
if feature_column_name is not None:
inputs['FeatureColumnName'] = try_set(
obj=feature_column_name,
none_acceptable=True,
is_of_type=str,
is_column=True)
if booster is not None:
inputs['Booster'] = try_set(
obj=booster,
none_acceptable=True,
is_of_type=dict)
if label_column_name is not None:
inputs['LabelColumnName'] = try_set(
obj=label_column_name,
none_acceptable=True,
is_of_type=str,
is_column=True)
if example_weight_column_name is not None:
inputs['ExampleWeightColumnName'] = try_set(
obj=example_weight_column_name,
none_acceptable=True,
is_of_type=str,
is_column=True)
if row_group_column_name is not None:
inputs['RowGroupColumnName'] = try_set(
obj=row_group_column_name,
none_acceptable=True,
is_of_type=str,
is_column=True)
if normalize_features is not None:
inputs['NormalizeFeatures'] = try_set(
obj=normalize_features,
none_acceptable=True,
is_of_type=str,
values=[
'No',
'Warn',
'Auto',
'Yes'])
if caching is not None:
inputs['Caching'] = try_set(
obj=caching,
none_acceptable=True,
is_of_type=str,
values=[
'Auto',
'Memory',
'None'])
if unbalanced_sets is not None:
inputs['UnbalancedSets'] = try_set(
obj=unbalanced_sets,
none_acceptable=True,
is_of_type=bool)
if weight_of_positive_examples is not None:
inputs['WeightOfPositiveExamples'] = try_set(
obj=weight_of_positive_examples,
none_acceptable=True,
is_of_type=numbers.Real)
if sigmoid is not None:
inputs['Sigmoid'] = try_set(
obj=sigmoid,
none_acceptable=True,
is_of_type=numbers.Real)
if evaluation_metric is not None:
inputs['EvaluationMetric'] = try_set(
obj=evaluation_metric,
none_acceptable=True,
is_of_type=str,
values=[
'None',
'Default',
'Logloss',
'Error',
'AreaUnderCurve'])
if maximum_bin_count_per_feature is not None:
inputs['MaximumBinCountPerFeature'] = try_set(
obj=maximum_bin_count_per_feature,
none_acceptable=True,
is_of_type=numbers.Real)
if verbose is not None:
inputs['Verbose'] = try_set(
obj=verbose,
none_acceptable=True,
is_of_type=bool)
if silent is not None:
inputs['Silent'] = try_set(
obj=silent,
none_acceptable=True,
is_of_type=bool)
if number_of_threads is not None:
inputs['NumberOfThreads'] = try_set(
obj=number_of_threads,
none_acceptable=True,
is_of_type=numbers.Real)
if early_stopping_round is not None:
inputs['EarlyStoppingRound'] = try_set(
obj=early_stopping_round,
none_acceptable=True,
is_of_type=numbers.Real)
if batch_size is not None:
inputs['BatchSize'] = try_set(
obj=batch_size,
none_acceptable=True,
is_of_type=numbers.Real)
if use_categorical_split is not None:
inputs['UseCategoricalSplit'] = try_set(
obj=use_categorical_split, none_acceptable=True, is_of_type=bool)
if handle_missing_value is not None:
inputs['HandleMissingValue'] = try_set(
obj=handle_missing_value,
none_acceptable=True,
is_of_type=bool)
if use_zero_as_missing_value is not None:
inputs['UseZeroAsMissingValue'] = try_set(
obj=use_zero_as_missing_value,
none_acceptable=True,
is_of_type=bool)
if minimum_example_count_per_group is not None:
inputs['MinimumExampleCountPerGroup'] = try_set(
obj=minimum_example_count_per_group,
none_acceptable=True,
is_of_type=numbers.Real,
valid_range={
'Inf': 0,
'Max': 2147483647})
if maximum_categorical_split_point_count is not None:
inputs['MaximumCategoricalSplitPointCount'] = try_set(
obj=maximum_categorical_split_point_count,
none_acceptable=True,
is_of_type=numbers.Real,
valid_range={
'Inf': 0,
'Max': 2147483647})
if categorical_smoothing is not None:
inputs['CategoricalSmoothing'] = try_set(
obj=categorical_smoothing,
none_acceptable=True,
is_of_type=numbers.Real, valid_range={'Min': 0.0})
if l2_categorical_regularization is not None:
inputs['L2CategoricalRegularization'] = try_set(
obj=l2_categorical_regularization,
none_acceptable=True,
is_of_type=numbers.Real, valid_range={'Min': 0.0})
if seed is not None:
inputs['Seed'] = try_set(
obj=seed,
none_acceptable=True,
is_of_type=numbers.Real)
if parallel_trainer is not None:
inputs['ParallelTrainer'] = try_set(
obj=parallel_trainer,
none_acceptable=True,
is_of_type=dict)
if predictor_model is not None:
outputs['PredictorModel'] = try_set(
obj=predictor_model, none_acceptable=False, is_of_type=str)
input_variables = {
x for x in unlist(inputs.values())
if isinstance(x, str) and x.startswith("$")}
output_variables = {
x for x in unlist(outputs.values())
if isinstance(x, str) and x.startswith("$")}
entrypoint = EntryPoint(
name=entrypoint_name, inputs=inputs, outputs=outputs,
input_variables=input_variables,
output_variables=output_variables)
return entrypoint
| StarcoderdataPython |
4807418 | <gh_stars>0
import re
import os, glob
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from collections import OrderedDict
from tensorboardX import SummaryWriter
import torchvision.utils as vutils
import pdb
__all__ = ['DenseNet', 'densenet121',
'densenet169', 'densenet201', 'densenet161']
model_urls = {
'densenet121': 'https://download.pytorch.org/models/densenet121-a639ec97.pth'
}
writer = SummaryWriter()
def densenet121(type, pretrained=False, **kwargs):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if (type == "va-densenet"):
model = DenseNetVa(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16),
**kwargs)
elif (type == "reva-densenet"):
model = DenseNetReva(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16),
**kwargs)
elif (type == "fp-densenet"):
model = DenseNetFP(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16),
**kwargs)
elif (type == "start-densenet"):
model = DenseNetStart(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16),
**kwargs)
elif (type == "every-densenet"):
model = DenseNetEvery(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16),
**kwargs)
elif (type == "sedensenet"):
model = SEDenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16),
**kwargs)
elif (type == "triplelossdensenet"):
model = TripleLossDenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16),
**kwargs)
if pretrained:
# '.'s are no longer allowed in module names, but pervious _DenseLayer
# has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
# They are also in the checkpoints in model_urls. This pattern is used
# to find such keys.
pattern = re.compile(r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
origin_model = model_zoo.load_url(model_urls['densenet121'])
for key in list(origin_model.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
origin_model[new_key[9:]] = origin_model[key]
del origin_model[key]
model_dict = model.state_dict()
# 1. filter out unnecessary keys
origin_model = {k: v for k, v in origin_model.items() if k in model_dict}
# 2. overwrite entries in the existing state dict
model_dict.update(origin_model)
# 3. load the new state dict
model.load_state_dict(model_dict)
return model
class _DenseLayer(nn.Sequential):
def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):
super(_DenseLayer, self).__init__()
self.add_module('norm1', nn.BatchNorm2d(num_input_features)),
self.add_module('relu1', nn.ReLU(inplace=True)),
self.add_module('conv1', nn.Conv2d(num_input_features, bn_size *
growth_rate, kernel_size=1, stride=1, bias=False)),
self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),
self.add_module('relu2', nn.ReLU(inplace=True)),
self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,
kernel_size=3, stride=1, padding=1, bias=False)),
self.drop_rate = drop_rate
def forward(self, x):
new_features = super(_DenseLayer, self).forward(x)
if self.drop_rate > 0:
new_features = F.dropout(
new_features, p=self.drop_rate, training=self.training)
return torch.cat([x, new_features], 1)
class _DenseBlock(nn.Sequential):
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate):
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(num_input_features + i *
growth_rate, growth_rate, bn_size, drop_rate)
self.add_module('denselayer%d' % (i + 1), layer)
class _Transition(nn.Sequential):
def __init__(self, num_input_features, num_output_features):
super(_Transition, self).__init__()
self.add_module('norm', nn.BatchNorm2d(num_input_features))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('conv', nn.Conv2d(num_input_features, num_output_features,
kernel_size=1, stride=1, bias=False))
self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
class _SEBlock(nn.Module):
def __init__(self, in_ch, r=16):
super(_SEBlock, self).__init__()
self.se_linear1 = nn.Linear(in_ch, in_ch//r)
self.se_linear2 = nn.Linear(in_ch//r, in_ch)
def forward(self,x):
input_x = x
x = x.view(*(x.shape[:-2]),-1).mean(-1)
x = F.relu(self.se_linear1(x), inplace=True)
x = self.se_linear2(x)
x = x.unsqueeze(-1).unsqueeze(-1)
x = torch.sigmoid(x)
x = torch.mul(input_x, x)
return x
def interpolate(x, multiplier=2, fixed_size=0, divider=2, absolute_channel = 0, mode='nearest'):
return F.interpolate(x.view(1, x.size()[0], x.size()[1], x.size()[2], x.size()[3]),
size=(x.size()[1] // divider if absolute_channel == 0 else absolute_channel,
fixed_size if fixed_size != 0 else x.size()[2] * multiplier,
fixed_size if fixed_size != 0 else x.size()[3] * multiplier),
mode=mode)[0]
# VA Densenet
# ================================================================
class DenseNetVa(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
"""
def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000):
super(DenseNetVa, self).__init__()
# First convolution
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(3, num_init_features,
kernel_size=7, stride=2, padding=3, bias=False)),
('norm0', nn.BatchNorm2d(num_init_features)),
('relu0', nn.ReLU(inplace=True)),
('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
num_features = num_init_features
# Block 1
num_layers = 6
self.denseblock1 = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
num_features = num_features + num_layers * growth_rate
self.transition1 = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
num_features = num_features // 2
# Block 2
num_layers = 12
self.denseblock2 = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
num_features = num_features + num_layers * growth_rate
self.transition2 = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
num_features = num_features // 2
# Block 3
num_layers = 24
self.denseblock3 = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
num_features = num_features + num_layers * growth_rate
self.transition3 = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
num_features = num_features // 2
# Block 4
num_layers = 16
self.denseblock4 = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
num_features = num_features + num_layers * growth_rate
# BatchNorm5
self.batchNorm5 = nn.BatchNorm2d(num_features)
# Vanilla Linear Visual attention layer
# self.valinear = nn.Linear(1024 * 7 * 7, 49)
self.valinear = nn.Conv2d(1024, 1, 3, 1, 1)
# Linear layer
self.classifier = nn.Linear(num_features, num_classes)
# Softmax Sigmoid
self.softmax = nn.Softmax()
self.sigmoid = nn.Sigmoid()
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x, epoch = -1):
# =============================================================
# Phase 1 Densenet
x = self.denseblock1(self.features(x))
x = self.denseblock2(self.transition1(x))
x = self.denseblock3(self.transition2(x))
x = self.denseblock4(self.transition3(x))
# Vanilla relu visual attention
va = self.valinear(x)
x = x + va.view(x.size()[0], 1, x.size()[2], x.size()[3])
# if epoch != -1:
# writer.add_image('Image', vutils.make_grid(x, normalize=True, scale_each=True), epoch)
x = F.relu(x, inplace=True)
x = F.avg_pool2d(x, kernel_size=7, stride=1).view(x.size(0), -1)
x = self.classifier(x)
return x
# REVA Densenet
# ================================================================
class DenseNetReva(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
"""
def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000):
super(DenseNetReva, self).__init__()
# First convolution
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(3, num_init_features,
kernel_size=7, stride=2, padding=3, bias=False)),
('norm0', nn.BatchNorm2d(num_init_features)),
('relu0', nn.ReLU(inplace=True)),
('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
num_features = num_init_features
# Block 1
num_layers = 6
self.denseblock1 = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
num_features = num_features + num_layers * growth_rate
self.transition1 = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
num_features = num_features // 2
# Block 2
num_layers = 12
self.denseblock2 = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
num_features = num_features + num_layers * growth_rate
self.transition2 = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
num_features = num_features // 2
# Block 3
num_layers = 24
self.denseblock3 = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
num_features = num_features + num_layers * growth_rate
self.transition3 = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
num_features = num_features // 2
# Block 4
num_layers = 16
self.denseblock4 = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
num_features = num_features + num_layers * growth_rate
# BatchNorm5
self.batchNorm5 = nn.BatchNorm2d(num_features)
# Feature pyramid
self.conv2d1x1fp4 = nn.Conv2d(1024, 1024, 1, stride=1, padding=0)
self.conv2d1x1fp3 = nn.Conv2d(1024, 1024, 1, stride=1, padding=0)
self.conv2d1x1fp2 = nn.Conv2d(512, 512, 1, stride=1, padding=0)
self.conv2d1x1fp1 = nn.Conv2d(256, 256, 1, stride=1, padding=0)
# Transconv upsampling
# input size 7
self.transconv1 = nn.ConvTranspose2d(1024, 1024, 4, stride=2, padding=1) # output 14x14
self.transconv2 = nn.ConvTranspose2d(1024, 512, 4, stride=2, padding=1) # output 28x28
self.transconv3 = nn.ConvTranspose2d(512, 256, 4, stride=2, padding=1) # output 56x56
self.transconv4 = nn.ConvTranspose2d(256, 128, 4, stride=2, padding=1) # output 112x112
self.transconv5 = nn.ConvTranspose2d(128, 64, 4, stride=2, padding=1) # output 224x224
self.transconv6 = nn.ConvTranspose2d(64, 3, 3, stride=1, padding=1) # output 224x224
# Linear layer
self.classifier = nn.Linear(num_features, num_classes)
# Softmax Sigmoid
self.softmax = nn.Softmax()
self.sigmoid = nn.Sigmoid()
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x, epoch = -1):
# =============================================================
# Phase 1 Densenet
features = self.denseblock1(self.features(x))
features = self.denseblock2(self.transition1(features))
features = self.denseblock3(self.transition2(features))
features = self.denseblock4(self.transition3(features))
features = self.transconv6(self.transconv5(self.transconv4(self.transconv3(self.transconv2(self.transconv1(features))))))
x = x + features
features = self.denseblock1(self.features(x))
features = self.denseblock2(self.transition1(features))
features = self.denseblock3(self.transition2(features))
features = self.denseblock4(self.transition3(features))
# if epoch != -1:
# writer.add_image('Image', vutils.make_grid(x, normalize=True, scale_each=True), epoch)
x = F.relu(features, inplace=True)
x = F.avg_pool2d(x, kernel_size=7, stride=1).view(x.size(0), -1)
x = self.classifier(x)
return x
# FP Densenet
# ================================================================
class DenseNetFP(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
"""
def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000):
super(DenseNetFP, self).__init__()
# First convolution
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(3, num_init_features,
kernel_size=7, stride=2, padding=3, bias=False)),
('norm0', nn.BatchNorm2d(num_init_features)),
('relu0', nn.ReLU(inplace=True)),
('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
num_features = num_init_features
# Block 1
num_layers = 6
self.denseblock1 = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
num_features = num_features + num_layers * growth_rate
self.transition1 = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
num_features = num_features // 2
# Block 2
num_layers = 12
self.denseblock2 = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
num_features = num_features + num_layers * growth_rate
self.transition2 = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
num_features = num_features // 2
# Block 3
num_layers = 24
self.denseblock3 = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
num_features = num_features + num_layers * growth_rate
self.transition3 = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
num_features = num_features // 2
# Block 4
num_layers = 16
self.denseblock4 = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
num_features = num_features + num_layers * growth_rate
# BatchNorm5
self.batchNorm5 = nn.BatchNorm2d(num_features)
# Feature pyramid
self.conv2d1x1fp3 = nn.Conv2d(1024, 1024, 1, stride=1, padding=0)
self.conv2d1x1fp2 = nn.Conv2d(512, 512, 1, stride=1, padding=0)
self.conv2d1x1fp1 = nn.Conv2d(256, 256, 1, stride=1, padding=0)
# Transconv upsampling
# input size 7
self.transconv1 = nn.ConvTranspose2d(1024, 1024, 4, stride=2, padding=1) # output 14x14
self.transconv2 = nn.ConvTranspose2d(1024, 512, 4, stride=2, padding=1) # output 28x28
self.transconv3 = nn.ConvTranspose2d(512, 256, 4, stride=2, padding=1) # output 56x56
self.transconv4 = nn.ConvTranspose2d(256, 128, 4, stride=2, padding=1) # output 112x112
self.transconv5 = nn.ConvTranspose2d(128, 64, 4, stride=2, padding=1) # output 224x224
self.transconv6 = nn.ConvTranspose2d(64, 3, 3, stride=1, padding=1) # output 224x224
# Linear layer
self.classifier = nn.Linear(num_features, num_classes)
# Softmax Sigmoid
self.softmax = nn.Softmax()
self.sigmoid = nn.Sigmoid()
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x, epoch = -1):
# =============================================================
# Phase 1 Densenet
f1 = self.denseblock1(self.features(x))
f2 = self.denseblock2(self.transition1(f1))
f3 = self.denseblock3(self.transition2(f2))
f4 = self.denseblock4(self.transition3(f3))
# =============================================================
# Phase 2 Feature Pyramid
# fp3 = interpolate(f4, divider=1) + self.conv2d1x1fp3(f3) # output 1024, 14, 14
# fp2 = interpolate(f3) + self.conv2d1x1fp2(f2) # output 512, 28, 28
# fp1 = interpolate(f2) + self.conv2d1x1fp1(f1) # output 256, 56, 56
# fp1 = interpolate(fp1) # output 128, 112, 112
# fp1 = interpolate(fp1) # output 64, 224, 224
# fp1 = interpolate(fp1, multiplier = 1, absolute_channel = 3) # output 3, 224, 224
fp3 = self.transconv1(f4) + self.conv2d1x1fp3(f3)
fp2 = self.transconv2(fp3) + self.conv2d1x1fp2(f2)
fp1 = self.transconv3(fp2) + self.conv2d1x1fp1(f1)
fp1 = self.transconv6(self.transconv5(self.transconv4(fp1)))
x = x + fp1
# =============================================================
# Phase 3 normal Densenet Sequence
x = self.features(x)
x = self.denseblock1(x)
x = self.denseblock2(self.transition1(x))
x = self.denseblock3(self.transition2(x))
x = self.denseblock4(self.transition3(x))
x = self.batchNorm5(x)
# if epoch != -1:
# writer.add_image('Image', vutils.make_grid(x, normalize=True, scale_each=True), epoch)
x = F.relu(x, inplace=True)
x = F.avg_pool2d(x, kernel_size=7, stride=1).view(x.size(0), -1)
x = self.classifier(x)
return x
# Start Densenet
# ================================================================
class DenseNetStart(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
"""
def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000):
super(DenseNetStart, self).__init__()
# First convolution
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(3, num_init_features,
kernel_size=7, stride=2, padding=3, bias=False)),
('norm0', nn.BatchNorm2d(num_init_features)),
('relu0', nn.ReLU(inplace=True)),
('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
num_features = num_init_features
# Block 1
num_layers = 6
self.denseblock1 = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
num_features = num_features + num_layers * growth_rate
self.transition1 = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
num_features = num_features // 2
# Block 2
num_layers = 12
self.denseblock2 = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
num_features = num_features + num_layers * growth_rate
self.transition2 = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
num_features = num_features // 2
# Block 3
num_layers = 24
self.denseblock3 = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
num_features = num_features + num_layers * growth_rate
self.transition3 = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
num_features = num_features // 2
# Block 4
num_layers = 16
self.denseblock4 = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
num_features = num_features + num_layers * growth_rate
# BatchNorm5
self.batchNorm5 = nn.BatchNorm2d(num_features)
# Start VA
self.startconv2d = nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=1)
# Linear layer
self.classifier = nn.Linear(num_features, num_classes)
# Softmax Sigmoid
self.softmax = nn.Softmax()
self.sigmoid = nn.Sigmoid()
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x, epoch = -1):
attention = self.startconv2d(x)
x = x + attention
x = self.denseblock1(self.features(x))
x = self.denseblock2(self.transition1(x))
x = self.denseblock3(self.transition2(x))
x = self.denseblock4(self.transition3(x))
x = F.relu(x, inplace=True)
x = F.avg_pool2d(x, kernel_size=7, stride=1).view(x.size(0), -1)
x = self.classifier(x)
return x
# Every Densenet
# ================================================================
class DenseNetEvery(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
"""
def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000):
super(DenseNetEvery, self).__init__()
# First convolution
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(3, num_init_features,
kernel_size=7, stride=2, padding=3, bias=False)),
('norm0', nn.BatchNorm2d(num_init_features)),
('relu0', nn.ReLU(inplace=True)),
('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
num_features = num_init_features
# Block 1
num_layers = 6
self.denseblock1 = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
num_features = num_features + num_layers * growth_rate
self.transition1 = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
num_features = num_features // 2
# Block 2
num_layers = 12
self.denseblock2 = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
num_features = num_features + num_layers * growth_rate
self.transition2 = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
num_features = num_features // 2
# Block 3
num_layers = 24
self.denseblock3 = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
num_features = num_features + num_layers * growth_rate
self.transition3 = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
num_features = num_features // 2
# Block 4
num_layers = 16
self.denseblock4 = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
num_features = num_features + num_layers * growth_rate
# BatchNorm5
self.batchNorm5 = nn.BatchNorm2d(num_features)
# Every VA
self.everyconv2dblock1 = nn.Conv2d(2816, 1024, kernel_size=1, stride=1, padding=0)
self.everyconv2dblock256 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.everyconv2dblock512 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.everyconv2dblock1024_1 = nn.Conv2d(1024, 1024, kernel_size=3, stride=1, padding=1)
self.everyconv2dblock1024_2 = nn.Conv2d(1024, 1024, kernel_size=3, stride=1, padding=1)
# Linear layer
self.classifier = nn.Linear(num_features, num_classes)
# Softmax Sigmoid
self.softmax = nn.Softmax()
self.sigmoid = nn.Sigmoid()
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x, epoch = -1):
# =============================================================
# Phase 1 Densenet
db1 = self.denseblock1(self.features(x))
attention1 = F.relu(self.everyconv2dblock256(db1))
db1 = attention1 + db1
db2 = self.denseblock2(self.transition1(db1))
attention2 = F.relu(self.everyconv2dblock512(db2))
db2 = attention2 + db2
db3 = self.denseblock3(self.transition2(db2))
attention3 = F.relu(self.everyconv2dblock1024_1(db3))
db3 = attention3 + db3
db4 = self.denseblock4(self.transition3(db3))
attention4 = F.relu(self.everyconv2dblock1024_2(db4))
db4 = attention4 + db4
# def interpolate(x, multiplier=2, fixed_size=0, divider=2, absolute_channel = 0, mode='nearest'):
# global_attention = torch.cat((interpolate(attention1, fixed_size = 7, absolute_channel = 256),
# interpolate(attention2, fixed_size = 7, absolute_channel = 512),
# interpolate(attention3, fixed_size = 7, absolute_channel = 1024),
# attention4
# ), dim = 1)
# global_attention = self.everyconv2dblock1(global_attention)
# global_attention = self.softmax(global_attention)
# db4 = db4 + global_attention
db4 = F.relu(db4, inplace=True)
db4 = F.avg_pool2d(db4, kernel_size=7, stride=1).view(x.size(0), -1)
db4 = self.classifier(db4)
return db4
# SE Densenet
# ================================================================
class SEDenseNet(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
"""
def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000):
super(SEDenseNet, self).__init__()
# First convolution
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(3, num_init_features,
kernel_size=7, stride=2, padding=3, bias=False)),
('norm0', nn.BatchNorm2d(num_init_features)),
('relu0', nn.ReLU(inplace=True)),
('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
num_features = num_init_features
# Block 1
num_layers = 6
self.denseblock1 = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
num_features = num_features + num_layers * growth_rate
self.transition1 = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
num_features = num_features // 2
self.seblock1 = _SEBlock(in_ch=num_features, r = 16)
# Block 2
num_layers = 12
self.denseblock2 = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
num_features = num_features + num_layers * growth_rate
self.transition2 = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
num_features = num_features // 2
self.seblock2 = _SEBlock(in_ch=num_features, r = 16)
# Block 3
num_layers = 24
self.denseblock3 = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
num_features = num_features + num_layers * growth_rate
self.transition3 = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
num_features = num_features // 2
self.seblock3 = _SEBlock(in_ch=num_features, r = 16)
# Block 4
num_layers = 16
self.denseblock4 = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
num_features = num_features + num_layers * growth_rate
# BatchNorm5
self.batchNorm5 = nn.BatchNorm2d(num_features)
# Linear layer
self.classifier = nn.Linear(num_features, num_classes)
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x, epoch = -1):
# =============================================================
# Phase 1 Densenet
x = self.seblock1(self.transition1(self.denseblock1(self.features(x))))
x = self.seblock2(self.transition2(self.denseblock2(x)))
x = self.seblock3(self.transition3(self.denseblock3(x)))
x = self.denseblock4(x)
# if epoch != -1:
# writer.add_image('Image', vutils.make_grid(x, normalize=True, scale_each=True), epoch)
x = F.relu(x, inplace=True)
x = F.avg_pool2d(x, kernel_size=7, stride=1).view(x.size(0), -1)
x = self.classifier(x)
return x
# TripleLossDenseNet
# ================================================================
class TripleLossDenseNet(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
"""
def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000):
super(TripleLossDenseNet, self).__init__()
# First convolution
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(3, num_init_features,
kernel_size=7, stride=2, padding=3, bias=False)),
('norm0', nn.BatchNorm2d(num_init_features)),
('relu0', nn.ReLU(inplace=True)),
('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
num_features = num_init_features
# Block 1
num_layers = 6
self.denseblock1 = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
num_features = num_features + num_layers * growth_rate
self.transition1 = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
num_features = num_features // 2
# Block 2
num_layers = 12
self.denseblock2 = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
num_features = num_features + num_layers * growth_rate
self.transition2 = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
num_features = num_features // 2
# Block 3
num_layers = 24
self.denseblock3 = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
num_features = num_features + num_layers * growth_rate
self.transition3 = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
num_features = num_features // 2
# Block 4
num_layers = 16
self.denseblock4 = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
num_features = num_features + num_layers * growth_rate
# BatchNorm5
self.batchNorm5 = nn.BatchNorm2d(num_features)
# Feature pyramid
self.conv2d1x1fp3 = nn.Conv2d(1024, 1024, 1, stride=1, padding=0)
self.conv2d1x1fp2 = nn.Conv2d(512, 512, 1, stride=1, padding=0)
self.conv2d1x1fp1 = nn.Conv2d(256, 256, 1, stride=1, padding=0)
self.conv2dlastblock1 = nn.Conv2d(256, 256, 3, stride=1, padding=1)
self.conv2dlastblock2 = nn.Conv2d(512, 512, 3, stride=1, padding=1)
self.conv2dlastblock3 = nn.Conv2d(1024, 1024, 3, stride=1, padding=1)
# Transconv upsampling
# input size 7
self.transconv1 = nn.ConvTranspose2d(1024, 1024, 4, stride=2, padding=1) # output 14x14
self.transconv2 = nn.ConvTranspose2d(1024, 512, 4, stride=2, padding=1) # output 28x28
self.transconv3 = nn.ConvTranspose2d(512, 256, 4, stride=2, padding=1) # output 56x56
self.transconv4 = nn.ConvTranspose2d(256, 128, 4, stride=2, padding=1) # output 112x112
self.transconv5 = nn.ConvTranspose2d(128, 64, 4, stride=2, padding=1) # output 224x224
self.transconv6 = nn.ConvTranspose2d(64, 3, 3, stride=1, padding=1) # output 224x224
# Linear layer
self.classifier = nn.Linear(num_features, num_classes)
# Softmax Sigmoid
self.softmax = nn.Softmax()
self.sigmoid = nn.Sigmoid()
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x, epoch = -1):
# =============================================================
# Phase 1 Densenet
f1 = self.denseblock1(self.features(x))
f2 = self.denseblock2(self.transition1(f1))
f3 = self.denseblock3(self.transition2(f2))
f4 = self.denseblock4(self.transition3(f3))
result_1 = self.batchNorm5(f4)
result_1 = F.relu(result_1, inplace=True)
result_1 = F.avg_pool2d(result_1, kernel_size=7, stride=1).view(result_1.size(0), -1)
result_1 = self.classifier(result_1)
# =============================================================
# Phase 2 Feature Pyramid
fp3 = self.transconv1(f4) + self.conv2d1x1fp3(f3)
fp2 = self.transconv2(fp3) + self.conv2d1x1fp2(f2)
fp1 = self.transconv3(fp2) + self.conv2d1x1fp1(f1)
fpimage = self.transconv6(self.transconv5(self.transconv4(fp1)))
x = x + fpimage
# =============================================================
# Phase 3 Second Densenet
fd1 = self.features(x)
fd1 = self.denseblock1(fd1) + self.conv2dlastblock1(fp1)
fd2 = self.denseblock2(self.transition1(fd1)) + self.conv2dlastblock2(fp2)
fd3 = self.denseblock3(self.transition2(fd2)) + self.conv2dlastblock3(fp3)
fd4 = self.denseblock4(self.transition3(fd3))
result_2 = self.batchNorm5(fd4)
result_2 = F.relu(result_2, inplace=True)
result_2 = F.avg_pool2d(result_2, kernel_size=7, stride=1).view(x.size(0), -1)
result_2 = self.classifier(result_2)
# return [result_1, result_2, (result_1 + result_2) / 2]
return (result_1 + result_2) / 2
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.